mirror of
https://github.com/fosrl/pangolin.git
synced 2026-01-31 07:10:43 +00:00
Compare commits
7 Commits
1.13.0-rc.
...
policies
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7fa44981cf | ||
|
|
1bacad7854 | ||
|
|
75cec731e8 | ||
|
|
16a88281bb | ||
|
|
1574cbc5df | ||
|
|
2cb2a115b0 | ||
|
|
9dce7b2cde |
@@ -29,6 +29,4 @@ CONTRIBUTING.md
|
||||
dist
|
||||
.git
|
||||
migrations/
|
||||
config/
|
||||
build.ts
|
||||
tsconfig.json
|
||||
config/
|
||||
125
.github/workflows/cicd.yml
vendored
125
.github/workflows/cicd.yml
vendored
@@ -1,62 +1,34 @@
|
||||
name: CI/CD Pipeline
|
||||
|
||||
# CI/CD workflow for building, publishing, mirroring, signing container images and building release binaries.
|
||||
# Actions are pinned to specific SHAs to reduce supply-chain risk. This workflow triggers on tag push events.
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write # for GHCR push
|
||||
id-token: write # for Cosign Keyless (OIDC) Signing
|
||||
|
||||
# Required secrets:
|
||||
# - DOCKER_HUB_USERNAME / DOCKER_HUB_ACCESS_TOKEN: push to Docker Hub
|
||||
# - GITHUB_TOKEN: used for GHCR login and OIDC keyless signing
|
||||
# - COSIGN_PRIVATE_KEY / COSIGN_PASSWORD / COSIGN_PUBLIC_KEY: for key-based signing
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "[0-9]+.[0-9]+.[0-9]+"
|
||||
- "[0-9]+.[0-9]+.[0-9]+.rc.[0-9]+"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
release:
|
||||
name: Build and Release
|
||||
runs-on: [self-hosted, linux, x64]
|
||||
# Job-level timeout to avoid runaway or stuck runs
|
||||
timeout-minutes: 120
|
||||
env:
|
||||
# Target images
|
||||
DOCKERHUB_IMAGE: docker.io/fosrl/${{ github.event.repository.name }}
|
||||
GHCR_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
|
||||
|
||||
- name: Extract tag name
|
||||
id: get-tag
|
||||
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: 1.24
|
||||
|
||||
@@ -65,21 +37,18 @@ jobs:
|
||||
TAG=${{ env.TAG }}
|
||||
sed -i "s/export const APP_VERSION = \".*\";/export const APP_VERSION = \"$TAG\";/" server/lib/consts.ts
|
||||
cat server/lib/consts.ts
|
||||
shell: bash
|
||||
|
||||
- name: Pull latest Gerbil version
|
||||
id: get-gerbil-tag
|
||||
run: |
|
||||
LATEST_TAG=$(curl -s https://api.github.com/repos/fosrl/gerbil/tags | jq -r '.[0].name')
|
||||
echo "LATEST_GERBIL_TAG=$LATEST_TAG" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Pull latest Badger version
|
||||
id: get-badger-tag
|
||||
run: |
|
||||
LATEST_TAG=$(curl -s https://api.github.com/repos/fosrl/badger/tags | jq -r '.[0].name')
|
||||
echo "LATEST_BADGER_TAG=$LATEST_TAG" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Update install/main.go
|
||||
run: |
|
||||
@@ -91,7 +60,6 @@ jobs:
|
||||
sed -i "s/config.BadgerVersion = \".*\"/config.BadgerVersion = \"$BADGER_VERSION\"/" install/main.go
|
||||
echo "Updated install/main.go with Pangolin version $PANGOLIN_VERSION, Gerbil version $GERBIL_VERSION, and Badger version $BADGER_VERSION"
|
||||
cat install/main.go
|
||||
shell: bash
|
||||
|
||||
- name: Build installer
|
||||
working-directory: install
|
||||
@@ -99,89 +67,12 @@ jobs:
|
||||
make go-build-release
|
||||
|
||||
- name: Upload artifacts from /install/bin
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: install-bin
|
||||
path: install/bin/
|
||||
|
||||
- name: Build and push Docker images (Docker Hub)
|
||||
- name: Build and push Docker images
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
make build-release tag=$TAG
|
||||
echo "Built & pushed to: ${{ env.DOCKERHUB_IMAGE }}:${TAG}"
|
||||
shell: bash
|
||||
|
||||
- name: Install skopeo + jq
|
||||
# skopeo: copy/inspect images between registries
|
||||
# jq: JSON parsing tool used to extract digest values
|
||||
run: |
|
||||
sudo apt-get update -y
|
||||
sudo apt-get install -y skopeo jq
|
||||
skopeo --version
|
||||
shell: bash
|
||||
|
||||
- name: Login to GHCR
|
||||
run: |
|
||||
skopeo login ghcr.io -u "${{ github.actor }}" -p "${{ secrets.GITHUB_TOKEN }}"
|
||||
shell: bash
|
||||
|
||||
- name: Copy tag from Docker Hub to GHCR
|
||||
# Mirror the already-built image (all architectures) to GHCR so we can sign it
|
||||
run: |
|
||||
set -euo pipefail
|
||||
TAG=${{ env.TAG }}
|
||||
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:${TAG} -> ${{ env.GHCR_IMAGE }}:${TAG}"
|
||||
skopeo copy --all --retry-times 3 \
|
||||
docker://$DOCKERHUB_IMAGE:$TAG \
|
||||
docker://$GHCR_IMAGE:$TAG
|
||||
shell: bash
|
||||
|
||||
- name: Login to GitHub Container Registry (for cosign)
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Install cosign
|
||||
# cosign is used to sign and verify container images (key and keyless)
|
||||
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
|
||||
|
||||
- name: Dual-sign and verify (GHCR & Docker Hub)
|
||||
# Sign each image by digest using keyless (OIDC) and key-based signing,
|
||||
# then verify both the public key signature and the keyless OIDC signature.
|
||||
env:
|
||||
TAG: ${{ env.TAG }}
|
||||
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
|
||||
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
|
||||
COSIGN_PUBLIC_KEY: ${{ secrets.COSIGN_PUBLIC_KEY }}
|
||||
COSIGN_YES: "true"
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
issuer="https://token.actions.githubusercontent.com"
|
||||
id_regex="^https://github.com/${{ github.repository }}/.+" # accept this repo (all workflows/refs)
|
||||
|
||||
for IMAGE in "${GHCR_IMAGE}" "${DOCKERHUB_IMAGE}"; do
|
||||
echo "Processing ${IMAGE}:${TAG}"
|
||||
|
||||
DIGEST="$(skopeo inspect --retry-times 3 docker://${IMAGE}:${TAG} | jq -r '.Digest')"
|
||||
REF="${IMAGE}@${DIGEST}"
|
||||
echo "Resolved digest: ${REF}"
|
||||
|
||||
echo "==> cosign sign (keyless) --recursive ${REF}"
|
||||
cosign sign --recursive "${REF}"
|
||||
|
||||
echo "==> cosign sign (key) --recursive ${REF}"
|
||||
cosign sign --key env://COSIGN_PRIVATE_KEY --recursive "${REF}"
|
||||
|
||||
echo "==> cosign verify (public key) ${REF}"
|
||||
cosign verify --key env://COSIGN_PUBLIC_KEY "${REF}" -o text
|
||||
|
||||
echo "==> cosign verify (keyless policy) ${REF}"
|
||||
cosign verify \
|
||||
--certificate-oidc-issuer "${issuer}" \
|
||||
--certificate-identity-regexp "${id_regex}" \
|
||||
"${REF}" -o text
|
||||
done
|
||||
shell: bash
|
||||
|
||||
9
.github/workflows/linting.yml
vendored
9
.github/workflows/linting.yml
vendored
@@ -1,8 +1,5 @@
|
||||
name: ESLint
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
@@ -21,10 +18,10 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0
|
||||
uses: actions/setup-node@v5
|
||||
with:
|
||||
node-version: '22'
|
||||
|
||||
@@ -35,4 +32,4 @@ jobs:
|
||||
run: npm run set:oss
|
||||
|
||||
- name: Run ESLint
|
||||
run: npx eslint . --ext .js,.jsx,.ts,.tsx
|
||||
run: npx eslint . --ext .js,.jsx,.ts,.tsx
|
||||
132
.github/workflows/mirror.yaml
vendored
132
.github/workflows/mirror.yaml
vendored
@@ -1,132 +0,0 @@
|
||||
name: Mirror & Sign (Docker Hub to GHCR)
|
||||
|
||||
on:
|
||||
workflow_dispatch: {}
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write # for keyless OIDC
|
||||
|
||||
env:
|
||||
SOURCE_IMAGE: docker.io/fosrl/pangolin
|
||||
DEST_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
|
||||
|
||||
jobs:
|
||||
mirror-and-dual-sign:
|
||||
runs-on: amd64-runner
|
||||
steps:
|
||||
- name: Install skopeo + jq
|
||||
run: |
|
||||
sudo apt-get update -y
|
||||
sudo apt-get install -y skopeo jq
|
||||
skopeo --version
|
||||
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
|
||||
|
||||
- name: Input check
|
||||
run: |
|
||||
test -n "${SOURCE_IMAGE}" || (echo "SOURCE_IMAGE is empty" && exit 1)
|
||||
echo "Source : ${SOURCE_IMAGE}"
|
||||
echo "Target : ${DEST_IMAGE}"
|
||||
|
||||
# Auth for skopeo (containers-auth)
|
||||
- name: Skopeo login to GHCR
|
||||
run: |
|
||||
skopeo login ghcr.io -u "${{ github.actor }}" -p "${{ secrets.GITHUB_TOKEN }}"
|
||||
|
||||
# Auth for cosign (docker-config)
|
||||
- name: Docker login to GHCR (for cosign)
|
||||
run: |
|
||||
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u "${{ github.actor }}" --password-stdin
|
||||
|
||||
- name: List source tags
|
||||
run: |
|
||||
set -euo pipefail
|
||||
skopeo list-tags --retry-times 3 docker://"${SOURCE_IMAGE}" \
|
||||
| jq -r '.Tags[]' | sort -u > src-tags.txt
|
||||
echo "Found source tags: $(wc -l < src-tags.txt)"
|
||||
head -n 20 src-tags.txt || true
|
||||
|
||||
- name: List destination tags (skip existing)
|
||||
run: |
|
||||
set -euo pipefail
|
||||
if skopeo list-tags --retry-times 3 docker://"${DEST_IMAGE}" >/tmp/dst.json 2>/dev/null; then
|
||||
jq -r '.Tags[]' /tmp/dst.json | sort -u > dst-tags.txt
|
||||
else
|
||||
: > dst-tags.txt
|
||||
fi
|
||||
echo "Existing destination tags: $(wc -l < dst-tags.txt)"
|
||||
|
||||
- name: Mirror, dual-sign, and verify
|
||||
env:
|
||||
# keyless
|
||||
COSIGN_YES: "true"
|
||||
# key-based
|
||||
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
|
||||
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
|
||||
# verify
|
||||
COSIGN_PUBLIC_KEY: ${{ secrets.COSIGN_PUBLIC_KEY }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
copied=0; skipped=0; v_ok=0; errs=0
|
||||
|
||||
issuer="https://token.actions.githubusercontent.com"
|
||||
id_regex="^https://github.com/${{ github.repository }}/.+"
|
||||
|
||||
while read -r tag; do
|
||||
[ -z "$tag" ] && continue
|
||||
|
||||
if grep -Fxq "$tag" dst-tags.txt; then
|
||||
echo "::notice ::Skip (exists) ${DEST_IMAGE}:${tag}"
|
||||
skipped=$((skipped+1))
|
||||
continue
|
||||
fi
|
||||
|
||||
echo "==> Copy ${SOURCE_IMAGE}:${tag} → ${DEST_IMAGE}:${tag}"
|
||||
if ! skopeo copy --all --retry-times 3 \
|
||||
docker://"${SOURCE_IMAGE}:${tag}" docker://"${DEST_IMAGE}:${tag}"; then
|
||||
echo "::warning title=Copy failed::${SOURCE_IMAGE}:${tag}"
|
||||
errs=$((errs+1)); continue
|
||||
fi
|
||||
copied=$((copied+1))
|
||||
|
||||
digest="$(skopeo inspect --retry-times 3 docker://"${DEST_IMAGE}:${tag}" | jq -r '.Digest')"
|
||||
ref="${DEST_IMAGE}@${digest}"
|
||||
|
||||
echo "==> cosign sign (keyless) --recursive ${ref}"
|
||||
if ! cosign sign --recursive "${ref}"; then
|
||||
echo "::warning title=Keyless sign failed::${ref}"
|
||||
errs=$((errs+1))
|
||||
fi
|
||||
|
||||
echo "==> cosign sign (key) --recursive ${ref}"
|
||||
if ! cosign sign --key env://COSIGN_PRIVATE_KEY --recursive "${ref}"; then
|
||||
echo "::warning title=Key sign failed::${ref}"
|
||||
errs=$((errs+1))
|
||||
fi
|
||||
|
||||
echo "==> cosign verify (public key) ${ref}"
|
||||
if ! cosign verify --key env://COSIGN_PUBLIC_KEY "${ref}" -o text; then
|
||||
echo "::warning title=Verify(pubkey) failed::${ref}"
|
||||
errs=$((errs+1))
|
||||
fi
|
||||
|
||||
echo "==> cosign verify (keyless policy) ${ref}"
|
||||
if ! cosign verify \
|
||||
--certificate-oidc-issuer "${issuer}" \
|
||||
--certificate-identity-regexp "${id_regex}" \
|
||||
"${ref}" -o text; then
|
||||
echo "::warning title=Verify(keyless) failed::${ref}"
|
||||
errs=$((errs+1))
|
||||
else
|
||||
v_ok=$((v_ok+1))
|
||||
fi
|
||||
done < src-tags.txt
|
||||
|
||||
echo "---- Summary ----"
|
||||
echo "Copied : $copied"
|
||||
echo "Skipped : $skipped"
|
||||
echo "Verified OK : $v_ok"
|
||||
echo "Errors : $errs"
|
||||
4
.github/workflows/stale-bot.yml
vendored
4
.github/workflows/stale-bot.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@5f858e3efba33a5ca4407a664cc011ad407f2008 # v10.1.0
|
||||
- uses: actions/stale@v10
|
||||
with:
|
||||
days-before-stale: 14
|
||||
days-before-close: 14
|
||||
@@ -34,4 +34,4 @@ jobs:
|
||||
operations-per-run: 100
|
||||
remove-stale-when-updated: true
|
||||
delete-branch: false
|
||||
enable-statistics: true
|
||||
enable-statistics: true
|
||||
7
.github/workflows/test.yml
vendored
7
.github/workflows/test.yml
vendored
@@ -1,8 +1,5 @@
|
||||
name: Run Tests
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
@@ -14,9 +11,9 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
- uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0
|
||||
- uses: actions/setup-node@v5
|
||||
with:
|
||||
node-version: '22'
|
||||
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -47,7 +47,4 @@ server/db/index.ts
|
||||
server/build.ts
|
||||
postgres/
|
||||
dynamic/
|
||||
*.mmdb
|
||||
scratch/
|
||||
tsconfig.json
|
||||
hydrateSaas.ts
|
||||
*.mmdb
|
||||
@@ -4,7 +4,7 @@ Contributions are welcome!
|
||||
|
||||
Please see the contribution and local development guide on the docs page before getting started:
|
||||
|
||||
https://docs.pangolin.net/development/contributing
|
||||
https://docs.digpangolin.com/development/contributing
|
||||
|
||||
### Licensing Considerations
|
||||
|
||||
|
||||
36
Dockerfile
36
Dockerfile
@@ -1,12 +1,10 @@
|
||||
FROM node:24-alpine AS builder
|
||||
FROM node:22-alpine AS builder
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
ARG BUILD=oss
|
||||
ARG DATABASE=sqlite
|
||||
|
||||
RUN apk add --no-cache curl tzdata python3 make g++
|
||||
|
||||
# COPY package.json package-lock.json ./
|
||||
COPY package*.json ./
|
||||
RUN npm ci
|
||||
@@ -14,42 +12,20 @@ RUN npm ci
|
||||
COPY . .
|
||||
|
||||
RUN echo "export * from \"./$DATABASE\";" > server/db/index.ts
|
||||
RUN echo "export const driver: \"pg\" | \"sqlite\" = \"$DATABASE\";" >> server/db/index.ts
|
||||
|
||||
RUN echo "export const build = \"$BUILD\" as \"saas\" | \"enterprise\" | \"oss\";" > server/build.ts
|
||||
RUN echo "export const build = \"$BUILD\" as any;" > server/build.ts
|
||||
|
||||
# Copy the appropriate TypeScript configuration based on build type
|
||||
RUN if [ "$BUILD" = "oss" ]; then cp tsconfig.oss.json tsconfig.json; \
|
||||
elif [ "$BUILD" = "saas" ]; then cp tsconfig.saas.json tsconfig.json; \
|
||||
elif [ "$BUILD" = "enterprise" ]; then cp tsconfig.enterprise.json tsconfig.json; \
|
||||
fi
|
||||
|
||||
# if the build is oss then remove the server/private directory
|
||||
RUN if [ "$BUILD" = "oss" ]; then rm -rf server/private; fi
|
||||
|
||||
RUN if [ "$DATABASE" = "pg" ]; then npx drizzle-kit generate --dialect postgresql --schema ./server/db/pg/schema --out init; else npx drizzle-kit generate --dialect $DATABASE --schema ./server/db/$DATABASE/schema --out init; fi
|
||||
|
||||
RUN mkdir -p dist
|
||||
RUN npm run next:build
|
||||
RUN node esbuild.mjs -e server/index.ts -o dist/server.mjs -b $BUILD
|
||||
RUN if [ "$DATABASE" = "pg" ]; then \
|
||||
node esbuild.mjs -e server/setup/migrationsPg.ts -o dist/migrations.mjs; \
|
||||
else \
|
||||
node esbuild.mjs -e server/setup/migrationsSqlite.ts -o dist/migrations.mjs; \
|
||||
fi
|
||||
|
||||
# test to make sure the build output is there and error if not
|
||||
RUN test -f dist/server.mjs
|
||||
RUN if [ "$DATABASE" = "pg" ]; then npx drizzle-kit generate --dialect postgresql --schema ./server/db/pg/schema.ts --out init; else npx drizzle-kit generate --dialect $DATABASE --schema ./server/db/$DATABASE/schema.ts --out init; fi
|
||||
|
||||
RUN npm run build:$DATABASE
|
||||
RUN npm run build:cli
|
||||
|
||||
FROM node:24-alpine AS runner
|
||||
FROM node:22-alpine AS runner
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Curl used for the health checks
|
||||
# Python and build tools needed for better-sqlite3 native compilation
|
||||
RUN apk add --no-cache curl tzdata python3 make g++
|
||||
RUN apk add --no-cache curl tzdata
|
||||
|
||||
# COPY package.json package-lock.json ./
|
||||
COPY package*.json ./
|
||||
|
||||
50
Makefile
50
Makefile
@@ -8,7 +8,6 @@ build-release:
|
||||
exit 1; \
|
||||
fi
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=sqlite \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:latest \
|
||||
@@ -17,7 +16,6 @@ build-release:
|
||||
--tag fosrl/pangolin:$(tag) \
|
||||
--push .
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=pg \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:postgresql-latest \
|
||||
@@ -25,54 +23,6 @@ build-release:
|
||||
--tag fosrl/pangolin:postgresql-$(minor_tag) \
|
||||
--tag fosrl/pangolin:postgresql-$(tag) \
|
||||
--push .
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=sqlite \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:ee-latest \
|
||||
--tag fosrl/pangolin:ee-$(major_tag) \
|
||||
--tag fosrl/pangolin:ee-$(minor_tag) \
|
||||
--tag fosrl/pangolin:ee-$(tag) \
|
||||
--push .
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=pg \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:ee-postgresql-latest \
|
||||
--tag fosrl/pangolin:ee-postgresql-$(major_tag) \
|
||||
--tag fosrl/pangolin:ee-postgresql-$(minor_tag) \
|
||||
--tag fosrl/pangolin:ee-postgresql-$(tag) \
|
||||
--push .
|
||||
|
||||
build-rc:
|
||||
@if [ -z "$(tag)" ]; then \
|
||||
echo "Error: tag is required. Usage: make build-release tag=<tag>"; \
|
||||
exit 1; \
|
||||
fi
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=sqlite \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:$(tag) \
|
||||
--push .
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=pg \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:postgresql-$(tag) \
|
||||
--push .
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=sqlite \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:ee-$(tag) \
|
||||
--push .
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=pg \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:ee-postgresql-$(tag) \
|
||||
--push .
|
||||
|
||||
build-arm:
|
||||
docker buildx build --platform linux/arm64 -t fosrl/pangolin:latest .
|
||||
|
||||
156
README.md
156
README.md
@@ -1,95 +1,157 @@
|
||||
<div align="center">
|
||||
<h2>
|
||||
<a href="https://pangolin.net/">
|
||||
<picture>
|
||||
<source media="(prefers-color-scheme: dark)" srcset="public/logo/word_mark_white.png">
|
||||
<img alt="Pangolin Logo" src="public/logo/word_mark_black.png" width="350">
|
||||
<picture>
|
||||
<source media="(prefers-color-scheme: dark)" srcset="public/logo/word_mark_white.png">
|
||||
<img alt="Pangolin Logo" src="public/logo/word_mark_black.png" width="250">
|
||||
</picture>
|
||||
</a>
|
||||
</h2>
|
||||
</div>
|
||||
|
||||
<h4 align="center">Secure gateway to your private networks</h4>
|
||||
<div align="center">
|
||||
|
||||
_Pangolin tunnels your services to the internet so you can access anything from anywhere._
|
||||
|
||||
</div>
|
||||
|
||||
<div align="center">
|
||||
<h5>
|
||||
<a href="https://pangolin.net/">
|
||||
<a href="https://digpangolin.com">
|
||||
Website
|
||||
</a>
|
||||
<span> | </span>
|
||||
<a href="https://docs.pangolin.net/">
|
||||
Documentation
|
||||
<a href="https://docs.digpangolin.com/self-host/quick-install-managed">
|
||||
Quick Install Guide
|
||||
</a>
|
||||
<span> | </span>
|
||||
<a href="mailto:contact@pangolin.net">
|
||||
<a href="mailto:contact@fossorial.io">
|
||||
Contact Us
|
||||
</a>
|
||||
<span> | </span>
|
||||
<a href="https://digpangolin.com/slack">
|
||||
Slack
|
||||
</a>
|
||||
<span> | </span>
|
||||
<a href="https://discord.gg/HCJR8Xhme4">
|
||||
Discord
|
||||
</a>
|
||||
</h5>
|
||||
</div>
|
||||
|
||||
<div align="center">
|
||||
|
||||
[](https://discord.gg/HCJR8Xhme4)
|
||||
[](https://pangolin.net/slack)
|
||||
[](https://digpangolin.com/slack)
|
||||
[](https://hub.docker.com/r/fosrl/pangolin)
|
||||

|
||||
[](https://discord.gg/HCJR8Xhme4)
|
||||
[](https://www.youtube.com/@fossorial-app)
|
||||
|
||||
</div>
|
||||
|
||||
<p align="center">
|
||||
<strong>
|
||||
Start testing Pangolin at <a href="https://app.pangolin.net/auth/signup">app.pangolin.net</a>
|
||||
Start testing Pangolin at <a href="https://pangolin.fossorial.io/auth/signup">pangolin.fossorial.io</a>
|
||||
</strong>
|
||||
</p>
|
||||
|
||||
Pangolin is a self-hosted tunneled reverse proxy server with identity and context aware access control, designed to easily expose and protect applications running anywhere. Pangolin acts as a central hub and connects isolated networks — even those behind restrictive firewalls — through encrypted tunnels, enabling easy access to remote services without opening ports or requiring a VPN.
|
||||
Pangolin is a self-hosted tunneled reverse proxy server with identity and access control, designed to securely expose private resources on distributed networks. Acting as a central hub, it connects isolated networks — even those behind restrictive firewalls — through encrypted tunnels, enabling easy access to remote services without opening ports.
|
||||
|
||||
## Installation
|
||||
<img src="public/screenshots/hero.png" alt="Preview"/>
|
||||
|
||||
- Check out the [quick install guide](https://docs.pangolin.net/self-host/quick-install) for how to install and set up Pangolin.
|
||||
- Install from the [DigitalOcean marketplace](https://marketplace.digitalocean.com/apps/pangolin-ce-1?refcode=edf0480eeb81) for a one-click pre-configured installer.
|
||||
|
||||
<img src="public/screenshots/hero.png" />
|
||||
|
||||
## Deployment Options
|
||||
|
||||
| <img width=500 /> | Description |
|
||||
|-----------------|--------------|
|
||||
| **Self-Host: Community Edition** | Free, open source, and licensed under AGPL-3. |
|
||||
| **Self-Host: Enterprise Edition** | Licensed under Fossorial Commercial License. Free for personal and hobbyist use, and for businesses earning under \$100K USD annually. |
|
||||
| **Pangolin Cloud** | Fully managed service with instant setup and pay-as-you-go pricing — no infrastructure required. Or, self-host your own [remote node](https://docs.pangolin.net/manage/remote-node/nodes) and connect to our control plane. |
|
||||

|
||||
|
||||
## Key Features
|
||||
|
||||
Pangolin packages everything you need for seamless application access and exposure into one cohesive platform.
|
||||
### Reverse Proxy Through WireGuard Tunnel
|
||||
|
||||
| <img width=500 /> | <img width=500 /> |
|
||||
|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------|
|
||||
| **Manage applications in one place**<br /><br /> Pangolin provides a unified dashboard where you can monitor, configure, and secure all of your services regardless of where they are hosted. | <img src="public/screenshots/hero.png" width=500 /><tr></tr> |
|
||||
| **Reverse proxy across networks anywhere**<br /><br />Route traffic via tunnels to any private network. Pangolin works like a reverse proxy that spans multiple networks and handles routing, load balancing, health checking, and more to the right services on the other end. | <img src="public/screenshots/sites.png" width=500 /><tr></tr> |
|
||||
| **Enforce identity and context aware rules**<br /><br />Protect your applications with identity and context aware rules such as SSO, OIDC, PIN, password, temporary share links, geolocation, IP, and more. | <img src="public/auth-diagram1.png" width=500 /><tr></tr> |
|
||||
| **Quickly connect Pangolin sites**<br /><br />Pangolin's lightweight [Newt](https://github.com/fosrl/newt) client runs in userspace and can run anywhere. Use it as a site connector to route traffic to backends across all of your environments. | <img src="public/clip.gif" width=500 /><tr></tr> |
|
||||
- Expose private resources on your network **without opening ports** (firewall punching).
|
||||
- Secure and easy to configure private connectivity via a custom **user space WireGuard client**, [Newt](https://github.com/fosrl/newt).
|
||||
- Built-in support for any WireGuard client.
|
||||
- Automated **SSL certificates** (https) via [LetsEncrypt](https://letsencrypt.org/).
|
||||
- Support for HTTP/HTTPS and **raw TCP/UDP services**.
|
||||
- Load balancing.
|
||||
- Extend functionality with existing [Traefik](https://github.com/traefik/traefik) plugins, such as [CrowdSec](https://plugins.traefik.io/plugins/6335346ca4caa9ddeffda116/crowdsec-bouncer-traefik-plugin) and [Geoblock](https://github.com/PascalMinder/geoblock).
|
||||
- **Automatically install and configure Crowdsec via Pangolin's installer script.**
|
||||
- Attach as many sites to the central server as you wish.
|
||||
|
||||
## Get Started
|
||||
### Identity & Access Management
|
||||
|
||||
### Check out the docs
|
||||
- Centralized authentication system using platform SSO. **Users will only have to manage one login.**
|
||||
- **Define access control rules for IPs, IP ranges, and URL paths per resource.**
|
||||
- TOTP with backup codes for two-factor authentication.
|
||||
- Create organizations, each with multiple sites, users, and roles.
|
||||
- **Role-based access control** to manage resource access permissions.
|
||||
- Additional authentication options include:
|
||||
- Email whitelisting with **one-time passcodes.**
|
||||
- **Temporary, self-destructing share links.**
|
||||
- Resource specific pin codes.
|
||||
- Resource specific passwords.
|
||||
- Passkeys
|
||||
- External identity provider (IdP) support with OAuth2/OIDC, such as Authentik, Keycloak, Okta, and others.
|
||||
- Auto-provision users and roles from your IdP.
|
||||
|
||||
We encourage everyone to read the full documentation first, which is
|
||||
available at [docs.pangolin.net](https://docs.pangolin.net). This README provides only a very brief subset of
|
||||
the docs to illustrate some basic ideas.
|
||||
<img src="public/auth-diagram1.png" alt="Auth and diagram"/>
|
||||
|
||||
### Sign up and try now
|
||||
## Use Cases
|
||||
|
||||
For Pangolin's managed service, you will first need to create an account at
|
||||
[app.pangolin.net](https://app.pangolin.net). We have a generous free tier to get started.
|
||||
### Manage Access to Internal Apps
|
||||
|
||||
- Grant users access to your apps from anywhere using just a web browser. No client software required.
|
||||
|
||||
### Developers and DevOps
|
||||
|
||||
- Expose and test internal tools and dashboards like **Grafana**. Bring localhost or private IPs online for easy access.
|
||||
|
||||
### Secure API Gateway
|
||||
|
||||
- One application load balancer across multiple clouds and on-premises.
|
||||
|
||||
### IoT and Edge Devices
|
||||
|
||||
- Easily expose **IoT devices**, **edge servers**, or **Raspberry Pi** to the internet for field equipment monitoring.
|
||||
|
||||
<img src="public/screenshots/sites.png" alt="Sites"/>
|
||||
|
||||
## Deployment Options
|
||||
|
||||
### Fully Self Hosted
|
||||
|
||||
Host the full application on your own server or on the cloud with a VPS. Take a look at the [documentation](https://docs.digpangolin.com/self-host/quick-install) to get started.
|
||||
|
||||
> Many of our users have had a great experience with [RackNerd](https://my.racknerd.com/aff.php?aff=13788). Depending on promotions, you can get a [**VPS with 1 vCPU, 1GB RAM, and ~20GB SSD for just around $12/year**](https://my.racknerd.com/aff.php?aff=13788&pid=912). That's a great deal!
|
||||
|
||||
### Pangolin Cloud
|
||||
|
||||
Easy to use with simple [pay as you go pricing](https://digpangolin.com/pricing). [Check it out here](https://pangolin.fossorial.io/auth/signup).
|
||||
|
||||
- Everything you get with self hosted Pangolin, but fully managed for you.
|
||||
|
||||
### Managed & High Availability
|
||||
|
||||
Managed control plane, your infrastructure
|
||||
|
||||
- We manage database and control plane.
|
||||
- You self-host lightweight exit-node.
|
||||
- Traffic flows through your infra.
|
||||
- We coordinate failover between your nodes or to Cloud when things go bad.
|
||||
|
||||
Try it out using [Pangolin Cloud](https://pangolin.fossorial.io)
|
||||
|
||||
### Full Enterprise On-Premises
|
||||
|
||||
[Contact us](mailto:numbat@fossorial.io) for a full distributed and enterprise deployments on your infrastructure controlled by your team.
|
||||
|
||||
## Project Development / Roadmap
|
||||
|
||||
We want to hear your feature requests! Add them to the [discussion board](https://github.com/orgs/fosrl/discussions/categories/feature-requests).
|
||||
|
||||
## Licensing
|
||||
|
||||
Pangolin is dual licensed under the AGPL-3 and the [Fossorial Commercial License](https://pangolin.net/fcl.html). For inquiries about commercial licensing, please contact us at [contact@pangolin.net](mailto:contact@pangolin.net).
|
||||
Pangolin is dual licensed under the AGPL-3 and the Fossorial Commercial license. For inquiries about commercial licensing, please contact us at [numbat@fossorial.io](mailto:numbat@fossorial.io).
|
||||
|
||||
## Contributions
|
||||
|
||||
Looking for something to contribute? Take a look at issues marked with [help wanted](https://github.com/fosrl/pangolin/issues?q=is%3Aissue%20state%3Aopen%20label%3A%22help%20wanted%22). Also take a look through the freature requests in Discussions - any are available and some are marked as a good first issue.
|
||||
|
||||
Please see [CONTRIBUTING](./CONTRIBUTING.md) in the repository for guidelines and best practices.
|
||||
|
||||
---
|
||||
Please post bug reports and other functional issues in the [Issues](https://github.com/fosrl/pangolin/issues) section of the repository.
|
||||
|
||||
WireGuard® is a registered trademark of Jason A. Donenfeld.
|
||||
If you are looking to help with translations, please contribute [on Crowdin](https://crowdin.com/project/fossorial-pangolin) or open a PR with changes to the translations files found in `messages/`.
|
||||
@@ -3,7 +3,7 @@
|
||||
If you discover a security vulnerability, please follow the steps below to responsibly disclose it to us:
|
||||
|
||||
1. **Do not create a public GitHub issue or discussion post.** This could put the security of other users at risk.
|
||||
2. Send a detailed report to [security@pangolin.net](mailto:security@pangolin.net) or send a **private** message to a maintainer on [Discord](https://discord.gg/HCJR8Xhme4). Include:
|
||||
2. Send a detailed report to [security@fossorial.io](mailto:security@fossorial.io) or send a **private** message to a maintainer on [Discord](https://discord.gg/HCJR8Xhme4). Include:
|
||||
|
||||
- Description and location of the vulnerability.
|
||||
- Potential impact of the vulnerability.
|
||||
|
||||
@@ -8,7 +8,7 @@ import base64
|
||||
YAML_FILE_PATH = 'blueprint.yaml'
|
||||
|
||||
# The API endpoint and headers from the curl request
|
||||
API_URL = 'http://api.pangolin.net/v1/org/test/blueprint'
|
||||
API_URL = 'http://api.pangolin.fossorial.io/v1/org/test/blueprint'
|
||||
HEADERS = {
|
||||
'accept': '*/*',
|
||||
'Authorization': 'Bearer <your_token_here>',
|
||||
|
||||
@@ -28,10 +28,9 @@ proxy-resources:
|
||||
# sso-roles:
|
||||
# - Member
|
||||
# sso-users:
|
||||
# - owen@pangolin.net
|
||||
# - owen@fossorial.io
|
||||
# whitelist-users:
|
||||
# - owen@pangolin.net
|
||||
# auto-login-idp: 1
|
||||
# - owen@fossorial.io
|
||||
headers:
|
||||
- name: X-Example-Header
|
||||
value: example-value
|
||||
|
||||
@@ -5,14 +5,14 @@ meta {
|
||||
}
|
||||
|
||||
post {
|
||||
url: http://localhost:3000/api/v1/auth/login
|
||||
url: http://localhost:4000/api/v1/auth/login
|
||||
body: json
|
||||
auth: none
|
||||
}
|
||||
|
||||
body:json {
|
||||
{
|
||||
"email": "admin@fosrl.io",
|
||||
"email": "owen@fossorial.io",
|
||||
"password": "Password123!"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,6 +12,6 @@ post {
|
||||
|
||||
body:json {
|
||||
{
|
||||
"email": "milo@pangolin.net"
|
||||
"email": "milo@fossorial.io"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ put {
|
||||
|
||||
body:json {
|
||||
{
|
||||
"email": "numbat@pangolin.net",
|
||||
"email": "numbat@fossorial.io",
|
||||
"password": "Password123!"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
meta {
|
||||
name: createOlm
|
||||
type: http
|
||||
seq: 1
|
||||
}
|
||||
|
||||
put {
|
||||
url: http://localhost:3000/api/v1/olm
|
||||
body: none
|
||||
auth: inherit
|
||||
}
|
||||
|
||||
settings {
|
||||
encodeUrl: true
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
meta {
|
||||
name: Olm
|
||||
seq: 15
|
||||
}
|
||||
|
||||
auth {
|
||||
mode: inherit
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"version": "1",
|
||||
"name": "Pangolin",
|
||||
"name": "Pangolin Saas",
|
||||
"type": "collection",
|
||||
"ignore": [
|
||||
"node_modules",
|
||||
|
||||
@@ -90,8 +90,7 @@ export const setAdminCredentials: CommandModule<{}, SetAdminCredentialsArgs> = {
|
||||
passwordHash,
|
||||
dateCreated: moment().toISOString(),
|
||||
serverAdmin: true,
|
||||
emailVerified: true,
|
||||
lastPasswordChange: new Date().getTime()
|
||||
emailVerified: true
|
||||
});
|
||||
|
||||
console.log("Server admin created");
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# To see all available options, please visit the docs:
|
||||
# https://docs.pangolin.net/self-host/advanced/config-file
|
||||
# https://docs.digpangolin.com/self-host/advanced/config-file
|
||||
|
||||
app:
|
||||
dashboard_url: http://localhost:3002
|
||||
@@ -25,3 +25,4 @@ flags:
|
||||
disable_user_create_org: true
|
||||
allow_raw_resources: true
|
||||
enable_integration_api: true
|
||||
enable_clients: true
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
services:
|
||||
drizzle-gateway:
|
||||
image: ghcr.io/drizzle-team/gateway:latest
|
||||
ports:
|
||||
- "4984:4983"
|
||||
depends_on:
|
||||
- db
|
||||
environment:
|
||||
- STORE_PATH=/app
|
||||
- DATABASE_URL=postgresql://postgres:password@db:5432/postgres
|
||||
volumes:
|
||||
- drizzle-gateway-data:/app
|
||||
|
||||
volumes:
|
||||
drizzle-gateway-data:
|
||||
@@ -20,7 +20,7 @@ services:
|
||||
pangolin:
|
||||
condition: service_healthy
|
||||
command:
|
||||
- --reachableAt=http://gerbil:3004
|
||||
- --reachableAt=http://gerbil:3003
|
||||
- --generateAndSaveKeyTo=/var/config/key
|
||||
- --remoteConfig=http://pangolin:3001/api/v1/
|
||||
volumes:
|
||||
@@ -35,7 +35,7 @@ services:
|
||||
- 80:80 # Port for traefik because of the network_mode
|
||||
|
||||
traefik:
|
||||
image: traefik:v3.6
|
||||
image: traefik:v3.5
|
||||
container_name: traefik
|
||||
restart: unless-stopped
|
||||
network_mode: service:gerbil # Ports appear on the gerbil service
|
||||
@@ -52,4 +52,4 @@ networks:
|
||||
default:
|
||||
driver: bridge
|
||||
name: pangolin
|
||||
enable_ipv6: true
|
||||
enable_ipv6: true
|
||||
@@ -11,7 +11,7 @@ services:
|
||||
- ./config/postgres:/var/lib/postgresql/data
|
||||
ports:
|
||||
- "5432:5432" # Map host port 5432 to container port 5432
|
||||
restart: no
|
||||
restart: no
|
||||
|
||||
redis:
|
||||
image: redis:latest # Use the latest Redis image
|
||||
|
||||
@@ -1,9 +1,16 @@
|
||||
import { defineConfig } from "drizzle-kit";
|
||||
import path from "path";
|
||||
import { build } from "@server/build";
|
||||
|
||||
const schema = [
|
||||
path.join("server", "db", "pg", "schema"),
|
||||
];
|
||||
let schema;
|
||||
if (build === "oss") {
|
||||
schema = [path.join("server", "db", "pg", "schema.ts")];
|
||||
} else {
|
||||
schema = [
|
||||
path.join("server", "db", "pg", "schema.ts"),
|
||||
path.join("server", "db", "pg", "privateSchema.ts")
|
||||
];
|
||||
}
|
||||
|
||||
export default defineConfig({
|
||||
dialect: "postgresql",
|
||||
|
||||
@@ -1,10 +1,17 @@
|
||||
import { build } from "@server/build";
|
||||
import { APP_PATH } from "@server/lib/consts";
|
||||
import { defineConfig } from "drizzle-kit";
|
||||
import path from "path";
|
||||
|
||||
const schema = [
|
||||
path.join("server", "db", "sqlite", "schema"),
|
||||
];
|
||||
let schema;
|
||||
if (build === "oss") {
|
||||
schema = [path.join("server", "db", "sqlite", "schema.ts")];
|
||||
} else {
|
||||
schema = [
|
||||
path.join("server", "db", "sqlite", "schema.ts"),
|
||||
path.join("server", "db", "sqlite", "privateSchema.ts")
|
||||
];
|
||||
}
|
||||
|
||||
export default defineConfig({
|
||||
dialect: "sqlite",
|
||||
|
||||
210
esbuild.mjs
210
esbuild.mjs
@@ -2,9 +2,8 @@ import esbuild from "esbuild";
|
||||
import yargs from "yargs";
|
||||
import { hideBin } from "yargs/helpers";
|
||||
import { nodeExternalsPlugin } from "esbuild-node-externals";
|
||||
import path from "path";
|
||||
import fs from "fs";
|
||||
// import { glob } from "glob";
|
||||
// import path from "path";
|
||||
|
||||
const banner = `
|
||||
// patch __dirname
|
||||
@@ -19,7 +18,7 @@ const require = topLevelCreateRequire(import.meta.url);
|
||||
`;
|
||||
|
||||
const argv = yargs(hideBin(process.argv))
|
||||
.usage("Usage: $0 -entry [string] -out [string] -build [string]")
|
||||
.usage("Usage: $0 -entry [string] -out [string]")
|
||||
.option("entry", {
|
||||
alias: "e",
|
||||
describe: "Entry point file",
|
||||
@@ -32,13 +31,6 @@ const argv = yargs(hideBin(process.argv))
|
||||
type: "string",
|
||||
demandOption: true,
|
||||
})
|
||||
.option("build", {
|
||||
alias: "b",
|
||||
describe: "Build type (oss, saas, enterprise)",
|
||||
type: "string",
|
||||
choices: ["oss", "saas", "enterprise"],
|
||||
default: "oss",
|
||||
})
|
||||
.help()
|
||||
.alias("help", "h").argv;
|
||||
|
||||
@@ -54,179 +46,6 @@ function getPackagePaths() {
|
||||
return ["package.json"];
|
||||
}
|
||||
|
||||
// Plugin to guard against bad imports from #private
|
||||
function privateImportGuardPlugin() {
|
||||
return {
|
||||
name: "private-import-guard",
|
||||
setup(build) {
|
||||
const violations = [];
|
||||
|
||||
build.onResolve({ filter: /^#private\// }, (args) => {
|
||||
const importingFile = args.importer;
|
||||
|
||||
// Check if the importing file is NOT in server/private
|
||||
const normalizedImporter = path.normalize(importingFile);
|
||||
const isInServerPrivate = normalizedImporter.includes(path.normalize("server/private"));
|
||||
|
||||
if (!isInServerPrivate) {
|
||||
const violation = {
|
||||
file: importingFile,
|
||||
importPath: args.path,
|
||||
resolveDir: args.resolveDir
|
||||
};
|
||||
violations.push(violation);
|
||||
|
||||
console.log(`PRIVATE IMPORT VIOLATION:`);
|
||||
console.log(` File: ${importingFile}`);
|
||||
console.log(` Import: ${args.path}`);
|
||||
console.log(` Resolve dir: ${args.resolveDir || 'N/A'}`);
|
||||
console.log('');
|
||||
}
|
||||
|
||||
// Return null to let the default resolver handle it
|
||||
return null;
|
||||
});
|
||||
|
||||
build.onEnd((result) => {
|
||||
if (violations.length > 0) {
|
||||
console.log(`\nSUMMARY: Found ${violations.length} private import violation(s):`);
|
||||
violations.forEach((v, i) => {
|
||||
console.log(` ${i + 1}. ${path.relative(process.cwd(), v.file)} imports ${v.importPath}`);
|
||||
});
|
||||
console.log('');
|
||||
|
||||
result.errors.push({
|
||||
text: `Private import violations detected: ${violations.length} violation(s) found`,
|
||||
location: null,
|
||||
notes: violations.map(v => ({
|
||||
text: `${path.relative(process.cwd(), v.file)} imports ${v.importPath}`,
|
||||
location: null
|
||||
}))
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Plugin to guard against bad imports from #private
|
||||
function dynamicImportGuardPlugin() {
|
||||
return {
|
||||
name: "dynamic-import-guard",
|
||||
setup(build) {
|
||||
const violations = [];
|
||||
|
||||
build.onResolve({ filter: /^#dynamic\// }, (args) => {
|
||||
const importingFile = args.importer;
|
||||
|
||||
// Check if the importing file is NOT in server/private
|
||||
const normalizedImporter = path.normalize(importingFile);
|
||||
const isInServerPrivate = normalizedImporter.includes(path.normalize("server/private"));
|
||||
|
||||
if (isInServerPrivate) {
|
||||
const violation = {
|
||||
file: importingFile,
|
||||
importPath: args.path,
|
||||
resolveDir: args.resolveDir
|
||||
};
|
||||
violations.push(violation);
|
||||
|
||||
console.log(`DYNAMIC IMPORT VIOLATION:`);
|
||||
console.log(` File: ${importingFile}`);
|
||||
console.log(` Import: ${args.path}`);
|
||||
console.log(` Resolve dir: ${args.resolveDir || 'N/A'}`);
|
||||
console.log('');
|
||||
}
|
||||
|
||||
// Return null to let the default resolver handle it
|
||||
return null;
|
||||
});
|
||||
|
||||
build.onEnd((result) => {
|
||||
if (violations.length > 0) {
|
||||
console.log(`\nSUMMARY: Found ${violations.length} dynamic import violation(s):`);
|
||||
violations.forEach((v, i) => {
|
||||
console.log(` ${i + 1}. ${path.relative(process.cwd(), v.file)} imports ${v.importPath}`);
|
||||
});
|
||||
console.log('');
|
||||
|
||||
result.errors.push({
|
||||
text: `Dynamic import violations detected: ${violations.length} violation(s) found`,
|
||||
location: null,
|
||||
notes: violations.map(v => ({
|
||||
text: `${path.relative(process.cwd(), v.file)} imports ${v.importPath}`,
|
||||
location: null
|
||||
}))
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Plugin to dynamically switch imports based on build type
|
||||
function dynamicImportSwitcherPlugin(buildValue) {
|
||||
return {
|
||||
name: "dynamic-import-switcher",
|
||||
setup(build) {
|
||||
const switches = [];
|
||||
|
||||
build.onStart(() => {
|
||||
console.log(`Dynamic import switcher using build type: ${buildValue}`);
|
||||
});
|
||||
|
||||
build.onResolve({ filter: /^#dynamic\// }, (args) => {
|
||||
// Extract the path after #dynamic/
|
||||
const dynamicPath = args.path.replace(/^#dynamic\//, '');
|
||||
|
||||
// Determine the replacement based on build type
|
||||
let replacement;
|
||||
if (buildValue === "oss") {
|
||||
replacement = `#open/${dynamicPath}`;
|
||||
} else if (buildValue === "saas" || buildValue === "enterprise") {
|
||||
replacement = `#closed/${dynamicPath}`; // We use #closed here so that the route guards dont complain after its been changed but this is the same as #private
|
||||
} else {
|
||||
console.warn(`Unknown build type '${buildValue}', defaulting to #open/`);
|
||||
replacement = `#open/${dynamicPath}`;
|
||||
}
|
||||
|
||||
const switchInfo = {
|
||||
file: args.importer,
|
||||
originalPath: args.path,
|
||||
replacementPath: replacement,
|
||||
buildType: buildValue
|
||||
};
|
||||
switches.push(switchInfo);
|
||||
|
||||
console.log(`DYNAMIC IMPORT SWITCH:`);
|
||||
console.log(` File: ${args.importer}`);
|
||||
console.log(` Original: ${args.path}`);
|
||||
console.log(` Switched to: ${replacement} (build: ${buildValue})`);
|
||||
console.log('');
|
||||
|
||||
// Rewrite the import path and let the normal resolution continue
|
||||
return build.resolve(replacement, {
|
||||
importer: args.importer,
|
||||
namespace: args.namespace,
|
||||
resolveDir: args.resolveDir,
|
||||
kind: args.kind
|
||||
});
|
||||
});
|
||||
|
||||
build.onEnd((result) => {
|
||||
if (switches.length > 0) {
|
||||
console.log(`\nDYNAMIC IMPORT SUMMARY: Switched ${switches.length} import(s) for build type '${buildValue}':`);
|
||||
switches.forEach((s, i) => {
|
||||
console.log(` ${i + 1}. ${path.relative(process.cwd(), s.file)}`);
|
||||
console.log(` ${s.originalPath} → ${s.replacementPath}`);
|
||||
});
|
||||
console.log('');
|
||||
}
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
esbuild
|
||||
.build({
|
||||
entryPoints: [argv.entry],
|
||||
@@ -240,9 +59,6 @@ esbuild
|
||||
platform: "node",
|
||||
external: ["body-parser"],
|
||||
plugins: [
|
||||
privateImportGuardPlugin(),
|
||||
dynamicImportGuardPlugin(),
|
||||
dynamicImportSwitcherPlugin(argv.build),
|
||||
nodeExternalsPlugin({
|
||||
packagePath: getPackagePaths(),
|
||||
}),
|
||||
@@ -250,27 +66,7 @@ esbuild
|
||||
sourcemap: "inline",
|
||||
target: "node22",
|
||||
})
|
||||
.then((result) => {
|
||||
// Check if there were any errors in the build result
|
||||
if (result.errors && result.errors.length > 0) {
|
||||
console.error(`Build failed with ${result.errors.length} error(s):`);
|
||||
result.errors.forEach((error, i) => {
|
||||
console.error(`${i + 1}. ${error.text}`);
|
||||
if (error.notes) {
|
||||
error.notes.forEach(note => {
|
||||
console.error(` - ${note.text}`);
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// remove the output file if it was created
|
||||
if (fs.existsSync(argv.out)) {
|
||||
fs.unlinkSync(argv.out);
|
||||
}
|
||||
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
.then(() => {
|
||||
console.log("Build completed successfully");
|
||||
})
|
||||
.catch((error) => {
|
||||
|
||||
@@ -18,11 +18,7 @@ put-back:
|
||||
mv main.go.bak main.go
|
||||
|
||||
dev-update-versions:
|
||||
if [ -z "$(tag)" ]; then \
|
||||
PANGOLIN_VERSION=$$(curl -s https://api.github.com/repos/fosrl/pangolin/tags | jq -r '.[0].name'); \
|
||||
else \
|
||||
PANGOLIN_VERSION=$(tag); \
|
||||
fi && \
|
||||
PANGOLIN_VERSION=$$(curl -s https://api.github.com/repos/fosrl/pangolin/tags | jq -r '.[0].name') && \
|
||||
GERBIL_VERSION=$$(curl -s https://api.github.com/repos/fosrl/gerbil/tags | jq -r '.[0].name') && \
|
||||
BADGER_VERSION=$$(curl -s https://api.github.com/repos/fosrl/badger/tags | jq -r '.[0].name') && \
|
||||
echo "Latest versions - Pangolin: $$PANGOLIN_VERSION, Gerbil: $$GERBIL_VERSION, Badger: $$BADGER_VERSION" && \
|
||||
|
||||
@@ -1,10 +1,15 @@
|
||||
# To see all available options, please visit the docs:
|
||||
# https://docs.pangolin.net/
|
||||
# https://docs.digpangolin.com/self-host/advanced/config-file
|
||||
|
||||
gerbil:
|
||||
start_port: 51820
|
||||
base_endpoint: "{{.DashboardDomain}}"
|
||||
|
||||
{{if .HybridMode}}
|
||||
managed:
|
||||
id: "{{.HybridId}}"
|
||||
secret: "{{.HybridSecret}}"
|
||||
|
||||
{{else}}
|
||||
app:
|
||||
dashboard_url: "https://{{.DashboardDomain}}"
|
||||
log_level: "info"
|
||||
@@ -14,6 +19,7 @@ app:
|
||||
domains:
|
||||
domain1:
|
||||
base_domain: "{{.BaseDomain}}"
|
||||
cert_resolver: "letsencrypt"
|
||||
|
||||
server:
|
||||
secret: "{{.Secret}}"
|
||||
@@ -22,7 +28,6 @@ server:
|
||||
methods: ["GET", "POST", "PUT", "DELETE", "PATCH"]
|
||||
allowed_headers: ["X-CSRF-Token", "Content-Type"]
|
||||
credentials: false
|
||||
{{if .EnableGeoblocking}}maxmind_db_path: "./config/GeoLite2-Country.mmdb"{{end}}
|
||||
{{if .EnableEmail}}
|
||||
email:
|
||||
smtp_host: "{{.EmailSMTPHost}}"
|
||||
@@ -36,3 +41,4 @@ flags:
|
||||
disable_signup_without_invite: true
|
||||
disable_user_create_org: false
|
||||
allow_raw_resources: true
|
||||
{{end}}
|
||||
@@ -6,6 +6,8 @@ services:
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ./config:/app/config
|
||||
- pangolin-data:/var/certificates
|
||||
- pangolin-data:/var/dynamic
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:3001/api/v1/"]
|
||||
interval: "10s"
|
||||
@@ -20,7 +22,7 @@ services:
|
||||
pangolin:
|
||||
condition: service_healthy
|
||||
command:
|
||||
- --reachableAt=http://gerbil:3004
|
||||
- --reachableAt=http://gerbil:3003
|
||||
- --generateAndSaveKeyTo=/var/config/key
|
||||
- --remoteConfig=http://pangolin:3001/api/v1/
|
||||
volumes:
|
||||
@@ -31,11 +33,11 @@ services:
|
||||
ports:
|
||||
- 51820:51820/udp
|
||||
- 21820:21820/udp
|
||||
- 443:443
|
||||
- 443:{{if .HybridMode}}8443{{else}}443{{end}}
|
||||
- 80:80
|
||||
{{end}}
|
||||
traefik:
|
||||
image: docker.io/traefik:v3.6
|
||||
image: docker.io/traefik:v3.5
|
||||
container_name: traefik
|
||||
restart: unless-stopped
|
||||
{{if .InstallGerbil}}
|
||||
@@ -54,9 +56,15 @@ services:
|
||||
- ./config/traefik:/etc/traefik:ro # Volume to store the Traefik configuration
|
||||
- ./config/letsencrypt:/letsencrypt # Volume to store the Let's Encrypt certificates
|
||||
- ./config/traefik/logs:/var/log/traefik # Volume to store Traefik logs
|
||||
# Shared volume for certificates and dynamic config in file mode
|
||||
- pangolin-data:/var/certificates:ro
|
||||
- pangolin-data:/var/dynamic:ro
|
||||
|
||||
networks:
|
||||
default:
|
||||
driver: bridge
|
||||
name: pangolin
|
||||
{{if .EnableIPv6}} enable_ipv6: true{{end}}
|
||||
|
||||
volumes:
|
||||
pangolin-data:
|
||||
|
||||
@@ -51,12 +51,3 @@ http:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://pangolin:3000" # API/WebSocket server
|
||||
|
||||
tcp:
|
||||
serversTransports:
|
||||
pp-transport-v1:
|
||||
proxyProtocol:
|
||||
version: 1
|
||||
pp-transport-v2:
|
||||
proxyProtocol:
|
||||
version: 2
|
||||
@@ -3,12 +3,17 @@ api:
|
||||
dashboard: true
|
||||
|
||||
providers:
|
||||
{{if not .HybridMode}}
|
||||
http:
|
||||
endpoint: "http://pangolin:3001/api/v1/traefik-config"
|
||||
pollInterval: "5s"
|
||||
file:
|
||||
filename: "/etc/traefik/dynamic_config.yml"
|
||||
|
||||
{{else}}
|
||||
file:
|
||||
directory: "/var/dynamic"
|
||||
watch: true
|
||||
{{end}}
|
||||
experimental:
|
||||
plugins:
|
||||
badger:
|
||||
@@ -22,7 +27,7 @@ log:
|
||||
maxBackups: 3
|
||||
maxAge: 3
|
||||
compress: true
|
||||
|
||||
{{if not .HybridMode}}
|
||||
certificatesResolvers:
|
||||
letsencrypt:
|
||||
acme:
|
||||
@@ -31,18 +36,22 @@ certificatesResolvers:
|
||||
email: "{{.LetsEncryptEmail}}"
|
||||
storage: "/letsencrypt/acme.json"
|
||||
caServer: "https://acme-v02.api.letsencrypt.org/directory"
|
||||
|
||||
{{end}}
|
||||
entryPoints:
|
||||
web:
|
||||
address: ":80"
|
||||
websecure:
|
||||
address: ":443"
|
||||
{{if .HybridMode}} proxyProtocol:
|
||||
trustedIPs:
|
||||
- 0.0.0.0/0
|
||||
- ::1/128{{end}}
|
||||
transport:
|
||||
respondingTimeouts:
|
||||
readTimeout: "30m"
|
||||
http:
|
||||
{{if not .HybridMode}} http:
|
||||
tls:
|
||||
certResolver: "letsencrypt"
|
||||
certResolver: "letsencrypt"{{end}}
|
||||
|
||||
serversTransport:
|
||||
insecureSkipVerify: true
|
||||
|
||||
@@ -73,7 +73,7 @@ func installDocker() error {
|
||||
case strings.Contains(osRelease, "ID=ubuntu"):
|
||||
installCmd = exec.Command("bash", "-c", fmt.Sprintf(`
|
||||
apt-get update &&
|
||||
apt-get install -y apt-transport-https ca-certificates curl &&
|
||||
apt-get install -y apt-transport-https ca-certificates curl software-properties-common &&
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg &&
|
||||
echo "deb [arch=%s signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" > /etc/apt/sources.list.d/docker.list &&
|
||||
apt-get update &&
|
||||
@@ -82,7 +82,7 @@ func installDocker() error {
|
||||
case strings.Contains(osRelease, "ID=debian"):
|
||||
installCmd = exec.Command("bash", "-c", fmt.Sprintf(`
|
||||
apt-get update &&
|
||||
apt-get install -y apt-transport-https ca-certificates curl &&
|
||||
apt-get install -y apt-transport-https ca-certificates curl software-properties-common &&
|
||||
curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg &&
|
||||
echo "deb [arch=%s signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian $(lsb_release -cs) stable" > /etc/apt/sources.list.d/docker.list &&
|
||||
apt-get update &&
|
||||
|
||||
@@ -1,180 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Get installer - Cross-platform installation script
|
||||
# Usage: curl -fsSL https://raw.githubusercontent.com/fosrl/installer/refs/heads/main/get-installer.sh | bash
|
||||
|
||||
set -e
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# GitHub repository info
|
||||
REPO="fosrl/pangolin"
|
||||
GITHUB_API_URL="https://api.github.com/repos/${REPO}/releases/latest"
|
||||
|
||||
# Function to print colored output
|
||||
print_status() {
|
||||
echo -e "${GREEN}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e "${YELLOW}[WARN]${NC} $1"
|
||||
}
|
||||
|
||||
print_error() {
|
||||
echo -e "${RED}[ERROR]${NC} $1"
|
||||
}
|
||||
|
||||
# Function to get latest version from GitHub API
|
||||
get_latest_version() {
|
||||
local latest_info
|
||||
|
||||
if command -v curl >/dev/null 2>&1; then
|
||||
latest_info=$(curl -fsSL "$GITHUB_API_URL" 2>/dev/null)
|
||||
elif command -v wget >/dev/null 2>&1; then
|
||||
latest_info=$(wget -qO- "$GITHUB_API_URL" 2>/dev/null)
|
||||
else
|
||||
print_error "Neither curl nor wget is available. Please install one of them." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$latest_info" ]; then
|
||||
print_error "Failed to fetch latest version information" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Extract version from JSON response (works without jq)
|
||||
local version=$(echo "$latest_info" | grep '"tag_name"' | head -1 | sed 's/.*"tag_name": *"\([^"]*\)".*/\1/')
|
||||
|
||||
if [ -z "$version" ]; then
|
||||
print_error "Could not parse version from GitHub API response" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Remove 'v' prefix if present
|
||||
version=$(echo "$version" | sed 's/^v//')
|
||||
|
||||
echo "$version"
|
||||
}
|
||||
|
||||
# Detect OS and architecture
|
||||
detect_platform() {
|
||||
local os arch
|
||||
|
||||
# Detect OS - only support Linux
|
||||
case "$(uname -s)" in
|
||||
Linux*) os="linux" ;;
|
||||
*)
|
||||
print_error "Unsupported operating system: $(uname -s). Only Linux is supported."
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
# Detect architecture - only support amd64 and arm64
|
||||
case "$(uname -m)" in
|
||||
x86_64|amd64) arch="amd64" ;;
|
||||
arm64|aarch64) arch="arm64" ;;
|
||||
*)
|
||||
print_error "Unsupported architecture: $(uname -m). Only amd64 and arm64 are supported on Linux."
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "${os}_${arch}"
|
||||
}
|
||||
|
||||
# Get installation directory
|
||||
get_install_dir() {
|
||||
# Install to the current directory
|
||||
local install_dir="$(pwd)"
|
||||
if [ ! -d "$install_dir" ]; then
|
||||
print_error "Installation directory does not exist: $install_dir"
|
||||
exit 1
|
||||
fi
|
||||
echo "$install_dir"
|
||||
}
|
||||
|
||||
# Download and install installer
|
||||
install_installer() {
|
||||
local platform="$1"
|
||||
local install_dir="$2"
|
||||
local binary_name="installer_${platform}"
|
||||
|
||||
local download_url="${BASE_URL}/${binary_name}"
|
||||
local temp_file="/tmp/installer"
|
||||
local final_path="${install_dir}/installer"
|
||||
|
||||
print_status "Downloading installer from ${download_url}"
|
||||
|
||||
# Download the binary
|
||||
if command -v curl >/dev/null 2>&1; then
|
||||
curl -fsSL "$download_url" -o "$temp_file"
|
||||
elif command -v wget >/dev/null 2>&1; then
|
||||
wget -q "$download_url" -O "$temp_file"
|
||||
else
|
||||
print_error "Neither curl nor wget is available. Please install one of them."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create install directory if it doesn't exist
|
||||
mkdir -p "$install_dir"
|
||||
|
||||
# Move binary to install directory
|
||||
mv "$temp_file" "$final_path"
|
||||
|
||||
# Make executable
|
||||
chmod +x "$final_path"
|
||||
|
||||
print_status "Installer downloaded to ${final_path}"
|
||||
}
|
||||
|
||||
# Verify installation
|
||||
verify_installation() {
|
||||
local install_dir="$1"
|
||||
local installer_path="${install_dir}/installer"
|
||||
|
||||
if [ -f "$installer_path" ] && [ -x "$installer_path" ]; then
|
||||
print_status "Installation successful!"
|
||||
return 0
|
||||
else
|
||||
print_error "Installation failed. Binary not found or not executable."
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Main installation process
|
||||
main() {
|
||||
print_status "Installing latest version of installer..."
|
||||
|
||||
# Get latest version
|
||||
print_status "Fetching latest version from GitHub..."
|
||||
VERSION=$(get_latest_version)
|
||||
print_status "Latest version: v${VERSION}"
|
||||
|
||||
# Set base URL with the fetched version
|
||||
BASE_URL="https://github.com/${REPO}/releases/download/${VERSION}"
|
||||
|
||||
# Detect platform
|
||||
PLATFORM=$(detect_platform)
|
||||
print_status "Detected platform: ${PLATFORM}"
|
||||
|
||||
# Get install directory
|
||||
INSTALL_DIR=$(get_install_dir)
|
||||
print_status "Install directory: ${INSTALL_DIR}"
|
||||
|
||||
# Install installer
|
||||
install_installer "$PLATFORM" "$INSTALL_DIR"
|
||||
|
||||
# Verify installation
|
||||
if verify_installation "$INSTALL_DIR"; then
|
||||
print_status "Installer is ready to use!"
|
||||
else
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
@@ -3,8 +3,8 @@ module installer
|
||||
go 1.24.0
|
||||
|
||||
require (
|
||||
golang.org/x/term v0.37.0
|
||||
golang.org/x/term v0.35.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
|
||||
require golang.org/x/sys v0.38.0 // indirect
|
||||
require golang.org/x/sys v0.36.0 // indirect
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
|
||||
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ=
|
||||
golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
|
||||
204
install/main.go
204
install/main.go
@@ -2,6 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"embed"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -47,15 +48,17 @@ type Config struct {
|
||||
InstallGerbil bool
|
||||
TraefikBouncerKey string
|
||||
DoCrowdsecInstall bool
|
||||
EnableGeoblocking bool
|
||||
Secret string
|
||||
HybridMode bool
|
||||
HybridId string
|
||||
HybridSecret string
|
||||
}
|
||||
|
||||
type SupportedContainer string
|
||||
|
||||
const (
|
||||
Docker SupportedContainer = "docker"
|
||||
Podman SupportedContainer = "podman"
|
||||
Docker SupportedContainer = "docker"
|
||||
Podman SupportedContainer = "podman"
|
||||
Undefined SupportedContainer = "undefined"
|
||||
)
|
||||
|
||||
@@ -95,6 +98,24 @@ func main() {
|
||||
|
||||
fmt.Println("\n=== Generating Configuration Files ===")
|
||||
|
||||
// If the secret and id are not generated then generate them
|
||||
if config.HybridMode && (config.HybridId == "" || config.HybridSecret == "") {
|
||||
// fmt.Println("Requesting hybrid credentials from cloud...")
|
||||
credentials, err := requestHybridCredentials()
|
||||
if err != nil {
|
||||
fmt.Printf("Error requesting hybrid credentials: %v\n", err)
|
||||
fmt.Println("Please obtain credentials manually from the dashboard and run the installer again.")
|
||||
os.Exit(1)
|
||||
}
|
||||
config.HybridId = credentials.RemoteExitNodeId
|
||||
config.HybridSecret = credentials.Secret
|
||||
fmt.Printf("Your managed credentials have been obtained successfully.\n")
|
||||
fmt.Printf(" ID: %s\n", config.HybridId)
|
||||
fmt.Printf(" Secret: %s\n", config.HybridSecret)
|
||||
fmt.Println("Take these to the Pangolin dashboard https://pangolin.fossorial.io to adopt your node.")
|
||||
readBool(reader, "Have you adopted your node?", true)
|
||||
}
|
||||
|
||||
if err := createConfigFiles(config); err != nil {
|
||||
fmt.Printf("Error creating config files: %v\n", err)
|
||||
os.Exit(1)
|
||||
@@ -104,15 +125,6 @@ func main() {
|
||||
|
||||
fmt.Println("\nConfiguration files created successfully!")
|
||||
|
||||
// Download MaxMind database if requested
|
||||
if config.EnableGeoblocking {
|
||||
fmt.Println("\n=== Downloading MaxMind Database ===")
|
||||
if err := downloadMaxMindDatabase(); err != nil {
|
||||
fmt.Printf("Error downloading MaxMind database: %v\n", err)
|
||||
fmt.Println("You can download it manually later if needed.")
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println("\n=== Starting installation ===")
|
||||
|
||||
if readBool(reader, "Would you like to install and start the containers?", true) {
|
||||
@@ -160,34 +172,9 @@ func main() {
|
||||
} else {
|
||||
alreadyInstalled = true
|
||||
fmt.Println("Looks like you already installed Pangolin!")
|
||||
|
||||
// Check if MaxMind database exists and offer to update it
|
||||
fmt.Println("\n=== MaxMind Database Update ===")
|
||||
if _, err := os.Stat("config/GeoLite2-Country.mmdb"); err == nil {
|
||||
fmt.Println("MaxMind GeoLite2 Country database found.")
|
||||
if readBool(reader, "Would you like to update the MaxMind database to the latest version?", false) {
|
||||
if err := downloadMaxMindDatabase(); err != nil {
|
||||
fmt.Printf("Error updating MaxMind database: %v\n", err)
|
||||
fmt.Println("You can try updating it manually later if needed.")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
fmt.Println("MaxMind GeoLite2 Country database not found.")
|
||||
if readBool(reader, "Would you like to download the MaxMind GeoLite2 database for geoblocking functionality?", false) {
|
||||
if err := downloadMaxMindDatabase(); err != nil {
|
||||
fmt.Printf("Error downloading MaxMind database: %v\n", err)
|
||||
fmt.Println("You can try downloading it manually later if needed.")
|
||||
}
|
||||
// Now you need to update your config file accordingly to enable geoblocking
|
||||
fmt.Println("Please remember to update your config/config.yml file to enable geoblocking! \n")
|
||||
// add maxmind_db_path: "./config/GeoLite2-Country.mmdb" under server
|
||||
fmt.Println("Add the following line under the 'server' section:")
|
||||
fmt.Println(" maxmind_db_path: \"./config/GeoLite2-Country.mmdb\"")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !checkIsCrowdsecInstalledInCompose() {
|
||||
if !checkIsCrowdsecInstalledInCompose() && !checkIsPangolinInstalledWithHybrid() {
|
||||
fmt.Println("\n=== CrowdSec Install ===")
|
||||
// check if crowdsec is installed
|
||||
if readBool(reader, "Would you like to install CrowdSec?", false) {
|
||||
@@ -209,8 +196,8 @@ func main() {
|
||||
|
||||
parsedURL, err := url.Parse(appConfig.DashboardURL)
|
||||
if err != nil {
|
||||
fmt.Printf("Error parsing URL: %v\n", err)
|
||||
return
|
||||
fmt.Printf("Error parsing URL: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
config.DashboardDomain = parsedURL.Hostname()
|
||||
@@ -238,11 +225,12 @@ func main() {
|
||||
}
|
||||
|
||||
fmt.Println("CrowdSec installed successfully!")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !alreadyInstalled || config.DoCrowdsecInstall {
|
||||
if !config.HybridMode && !alreadyInstalled {
|
||||
// Setup Token Section
|
||||
fmt.Println("\n=== Setup Token ===")
|
||||
|
||||
@@ -263,7 +251,9 @@ func main() {
|
||||
|
||||
fmt.Println("\nInstallation complete!")
|
||||
|
||||
fmt.Printf("\nTo complete the initial setup, please visit:\nhttps://%s/auth/initial-setup\n", config.DashboardDomain)
|
||||
if !config.HybridMode && !checkIsPangolinInstalledWithHybrid() {
|
||||
fmt.Printf("\nTo complete the initial setup, please visit:\nhttps://%s/auth/initial-setup\n", config.DashboardDomain)
|
||||
}
|
||||
}
|
||||
|
||||
func podmanOrDocker(reader *bufio.Reader) SupportedContainer {
|
||||
@@ -338,42 +328,66 @@ func collectUserInput(reader *bufio.Reader) Config {
|
||||
|
||||
// Basic configuration
|
||||
fmt.Println("\n=== Basic Configuration ===")
|
||||
|
||||
config.BaseDomain = readString(reader, "Enter your base domain (no subdomain e.g. example.com)", "")
|
||||
|
||||
// Set default dashboard domain after base domain is collected
|
||||
defaultDashboardDomain := ""
|
||||
if config.BaseDomain != "" {
|
||||
defaultDashboardDomain = "pangolin." + config.BaseDomain
|
||||
}
|
||||
config.DashboardDomain = readString(reader, "Enter the domain for the Pangolin dashboard", defaultDashboardDomain)
|
||||
config.LetsEncryptEmail = readString(reader, "Enter email for Let's Encrypt certificates", "")
|
||||
config.InstallGerbil = readBool(reader, "Do you want to use Gerbil to allow tunneled connections", true)
|
||||
|
||||
// Email configuration
|
||||
fmt.Println("\n=== Email Configuration ===")
|
||||
config.EnableEmail = readBool(reader, "Enable email functionality (SMTP)", false)
|
||||
|
||||
if config.EnableEmail {
|
||||
config.EmailSMTPHost = readString(reader, "Enter SMTP host", "")
|
||||
config.EmailSMTPPort = readInt(reader, "Enter SMTP port (default 587)", 587)
|
||||
config.EmailSMTPUser = readString(reader, "Enter SMTP username", "")
|
||||
config.EmailSMTPPass = readString(reader, "Enter SMTP password", "") // Should this be readPassword?
|
||||
config.EmailNoReply = readString(reader, "Enter no-reply email address (often the same as SMTP username)", "")
|
||||
for {
|
||||
response := readString(reader, "Do you want to install Pangolin as a cloud-managed (beta) node? (yes/no)", "")
|
||||
if strings.EqualFold(response, "yes") || strings.EqualFold(response, "y") {
|
||||
config.HybridMode = true
|
||||
break
|
||||
} else if strings.EqualFold(response, "no") || strings.EqualFold(response, "n") {
|
||||
config.HybridMode = false
|
||||
break
|
||||
}
|
||||
fmt.Println("Please answer 'yes' or 'no'")
|
||||
}
|
||||
|
||||
// Validate required fields
|
||||
if config.BaseDomain == "" {
|
||||
fmt.Println("Error: Domain name is required")
|
||||
os.Exit(1)
|
||||
}
|
||||
if config.LetsEncryptEmail == "" {
|
||||
fmt.Println("Error: Let's Encrypt email is required")
|
||||
os.Exit(1)
|
||||
}
|
||||
if config.EnableEmail && config.EmailNoReply == "" {
|
||||
fmt.Println("Error: No-reply email address is required when email is enabled")
|
||||
os.Exit(1)
|
||||
if config.HybridMode {
|
||||
alreadyHaveCreds := readBool(reader, "Do you already have credentials from the dashboard? If not, we will create them later", false)
|
||||
|
||||
if alreadyHaveCreds {
|
||||
config.HybridId = readString(reader, "Enter your ID", "")
|
||||
config.HybridSecret = readString(reader, "Enter your secret", "")
|
||||
}
|
||||
|
||||
// Try to get public IP as default
|
||||
publicIP := getPublicIP()
|
||||
if publicIP != "" {
|
||||
fmt.Printf("Detected public IP: %s\n", publicIP)
|
||||
}
|
||||
config.DashboardDomain = readString(reader, "The public addressable IP address for this node or a domain pointing to it", publicIP)
|
||||
config.InstallGerbil = true
|
||||
} else {
|
||||
config.BaseDomain = readString(reader, "Enter your base domain (no subdomain e.g. example.com)", "")
|
||||
|
||||
// Set default dashboard domain after base domain is collected
|
||||
defaultDashboardDomain := ""
|
||||
if config.BaseDomain != "" {
|
||||
defaultDashboardDomain = "pangolin." + config.BaseDomain
|
||||
}
|
||||
config.DashboardDomain = readString(reader, "Enter the domain for the Pangolin dashboard", defaultDashboardDomain)
|
||||
config.LetsEncryptEmail = readString(reader, "Enter email for Let's Encrypt certificates", "")
|
||||
config.InstallGerbil = readBool(reader, "Do you want to use Gerbil to allow tunneled connections", true)
|
||||
|
||||
// Email configuration
|
||||
fmt.Println("\n=== Email Configuration ===")
|
||||
config.EnableEmail = readBool(reader, "Enable email functionality (SMTP)", false)
|
||||
|
||||
if config.EnableEmail {
|
||||
config.EmailSMTPHost = readString(reader, "Enter SMTP host", "")
|
||||
config.EmailSMTPPort = readInt(reader, "Enter SMTP port (default 587)", 587)
|
||||
config.EmailSMTPUser = readString(reader, "Enter SMTP username", "")
|
||||
config.EmailSMTPPass = readString(reader, "Enter SMTP password", "") // Should this be readPassword?
|
||||
config.EmailNoReply = readString(reader, "Enter no-reply email address", "")
|
||||
}
|
||||
|
||||
// Validate required fields
|
||||
if config.BaseDomain == "" {
|
||||
fmt.Println("Error: Domain name is required")
|
||||
os.Exit(1)
|
||||
}
|
||||
if config.LetsEncryptEmail == "" {
|
||||
fmt.Println("Error: Let's Encrypt email is required")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// Advanced configuration
|
||||
@@ -381,7 +395,6 @@ func collectUserInput(reader *bufio.Reader) Config {
|
||||
fmt.Println("\n=== Advanced Configuration ===")
|
||||
|
||||
config.EnableIPv6 = readBool(reader, "Is your server IPv6 capable?", true)
|
||||
config.EnableGeoblocking = readBool(reader, "Do you want to download the MaxMind GeoLite2 database for geoblocking functionality?", true)
|
||||
|
||||
if config.DashboardDomain == "" {
|
||||
fmt.Println("Error: Dashboard Domain name is required")
|
||||
@@ -416,6 +429,11 @@ func createConfigFiles(config Config) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// the hybrid does not need the dynamic config
|
||||
if config.HybridMode && strings.Contains(path, "dynamic_config.yml") {
|
||||
return nil
|
||||
}
|
||||
|
||||
// skip .DS_Store
|
||||
if strings.Contains(path, ".DS_Store") {
|
||||
return nil
|
||||
@@ -645,30 +663,18 @@ func checkPortsAvailable(port int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func downloadMaxMindDatabase() error {
|
||||
fmt.Println("Downloading MaxMind GeoLite2 Country database...")
|
||||
|
||||
// Download the GeoLite2 Country database
|
||||
if err := run("curl", "-L", "-o", "GeoLite2-Country.tar.gz",
|
||||
"https://github.com/GitSquared/node-geolite2-redist/raw/refs/heads/master/redist/GeoLite2-Country.tar.gz"); err != nil {
|
||||
return fmt.Errorf("failed to download GeoLite2 database: %v", err)
|
||||
func checkIsPangolinInstalledWithHybrid() bool {
|
||||
// Check if config/config.yml exists and contains hybrid section
|
||||
if _, err := os.Stat("config/config.yml"); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Extract the database
|
||||
if err := run("tar", "-xzf", "GeoLite2-Country.tar.gz"); err != nil {
|
||||
return fmt.Errorf("failed to extract GeoLite2 database: %v", err)
|
||||
// Read config file to check for hybrid section
|
||||
content, err := os.ReadFile("config/config.yml")
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Find the .mmdb file and move it to the config directory
|
||||
if err := run("bash", "-c", "mv GeoLite2-Country_*/GeoLite2-Country.mmdb config/"); err != nil {
|
||||
return fmt.Errorf("failed to move GeoLite2 database to config directory: %v", err)
|
||||
}
|
||||
|
||||
// Clean up the downloaded files
|
||||
if err := run("rm", "-rf", "GeoLite2-Country.tar.gz", "GeoLite2-Country_*"); err != nil {
|
||||
fmt.Printf("Warning: failed to clean up temporary files: %v\n", err)
|
||||
}
|
||||
|
||||
fmt.Println("MaxMind GeoLite2 Country database downloaded successfully!")
|
||||
return nil
|
||||
// Check for hybrid section
|
||||
return bytes.Contains(content, []byte("managed:"))
|
||||
}
|
||||
|
||||
110
install/quickStart.go
Normal file
110
install/quickStart.go
Normal file
@@ -0,0 +1,110 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
FRONTEND_SECRET_KEY = "af4e4785-7e09-11f0-b93a-74563c4e2a7e"
|
||||
// CLOUD_API_URL = "https://pangolin.fossorial.io/api/v1/remote-exit-node/quick-start"
|
||||
CLOUD_API_URL = "https://pangolin.fossorial.io/api/v1/remote-exit-node/quick-start"
|
||||
)
|
||||
|
||||
// HybridCredentials represents the response from the cloud API
|
||||
type HybridCredentials struct {
|
||||
RemoteExitNodeId string `json:"remoteExitNodeId"`
|
||||
Secret string `json:"secret"`
|
||||
}
|
||||
|
||||
// APIResponse represents the full response structure from the cloud API
|
||||
type APIResponse struct {
|
||||
Data HybridCredentials `json:"data"`
|
||||
}
|
||||
|
||||
// RequestPayload represents the request body structure
|
||||
type RequestPayload struct {
|
||||
Token string `json:"token"`
|
||||
}
|
||||
|
||||
func generateValidationToken() string {
|
||||
timestamp := time.Now().UnixMilli()
|
||||
data := fmt.Sprintf("%s|%d", FRONTEND_SECRET_KEY, timestamp)
|
||||
obfuscated := make([]byte, len(data))
|
||||
for i, char := range []byte(data) {
|
||||
obfuscated[i] = char + 5
|
||||
}
|
||||
return base64.StdEncoding.EncodeToString(obfuscated)
|
||||
}
|
||||
|
||||
// requestHybridCredentials makes an HTTP POST request to the cloud API
|
||||
// to get hybrid credentials (ID and secret)
|
||||
func requestHybridCredentials() (*HybridCredentials, error) {
|
||||
// Generate validation token
|
||||
token := generateValidationToken()
|
||||
|
||||
// Create request payload
|
||||
payload := RequestPayload{
|
||||
Token: token,
|
||||
}
|
||||
|
||||
// Marshal payload to JSON
|
||||
jsonData, err := json.Marshal(payload)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal request payload: %v", err)
|
||||
}
|
||||
|
||||
// Create HTTP request
|
||||
req, err := http.NewRequest("POST", CLOUD_API_URL, bytes.NewBuffer(jsonData))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create HTTP request: %v", err)
|
||||
}
|
||||
|
||||
// Set headers
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("X-CSRF-Token", "x-csrf-protection")
|
||||
|
||||
// Create HTTP client with timeout
|
||||
client := &http.Client{
|
||||
Timeout: 30 * time.Second,
|
||||
}
|
||||
|
||||
// Make the request
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to make HTTP request: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Check response status
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("API request failed with status code: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
// Read response body for debugging
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read response body: %v", err)
|
||||
}
|
||||
|
||||
// Print the raw JSON response for debugging
|
||||
// fmt.Printf("Raw JSON response: %s\n", string(body))
|
||||
|
||||
// Parse response
|
||||
var apiResponse APIResponse
|
||||
if err := json.Unmarshal(body, &apiResponse); err != nil {
|
||||
return nil, fmt.Errorf("failed to decode API response: %v", err)
|
||||
}
|
||||
|
||||
// Validate response data
|
||||
if apiResponse.Data.RemoteExitNodeId == "" || apiResponse.Data.Secret == "" {
|
||||
return nil, fmt.Errorf("invalid response: missing remoteExitNodeId or secret")
|
||||
}
|
||||
|
||||
return &apiResponse.Data, nil
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
1027
messages/fr-FR.json
1027
messages/fr-FR.json
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
2099
messages/zh-TW.json
2099
messages/zh-TW.json
File diff suppressed because it is too large
Load Diff
@@ -1,15 +1,12 @@
|
||||
import type { NextConfig } from "next";
|
||||
import createNextIntlPlugin from "next-intl/plugin";
|
||||
|
||||
const withNextIntl = createNextIntlPlugin();
|
||||
|
||||
const nextConfig: NextConfig = {
|
||||
/** @type {import("next").NextConfig} */
|
||||
const nextConfig = {
|
||||
eslint: {
|
||||
ignoreDuringBuilds: true
|
||||
},
|
||||
experimental: {
|
||||
reactCompiler: true
|
||||
},
|
||||
output: "standalone"
|
||||
};
|
||||
|
||||
11743
package-lock.json
generated
11743
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
146
package.json
146
package.json
@@ -19,58 +19,54 @@
|
||||
"db:sqlite:studio": "drizzle-kit studio --config=./drizzle.sqlite.config.ts",
|
||||
"db:pg:studio": "drizzle-kit studio --config=./drizzle.pg.config.ts",
|
||||
"db:clear-migrations": "rm -rf server/migrations",
|
||||
"set:oss": "echo 'export const build = \"oss\" as any;' > server/build.ts && cp tsconfig.oss.json tsconfig.json",
|
||||
"set:saas": "echo 'export const build = \"saas\" as any;' > server/build.ts && cp tsconfig.saas.json tsconfig.json",
|
||||
"set:enterprise": "echo 'export const build = \"enterprise\" as any;' > server/build.ts && cp tsconfig.enterprise.json tsconfig.json",
|
||||
"set:sqlite": "echo 'export * from \"./sqlite\";\nexport const driver: \"pg\" | \"sqlite\" = \"sqlite\";' > server/db/index.ts",
|
||||
"set:pg": "echo 'export * from \"./pg\";\nexport const driver: \"pg\" | \"sqlite\" = \"pg\";' > server/db/index.ts",
|
||||
"next:build": "next build",
|
||||
"set:oss": "echo 'export const build = \"oss\" as any;' > server/build.ts",
|
||||
"set:saas": "echo 'export const build = \"saas\" as any;' > server/build.ts",
|
||||
"set:enterprise": "echo 'export const build = \"enterprise\" as any;' > server/build.ts",
|
||||
"set:sqlite": "echo 'export * from \"./sqlite\";' > server/db/index.ts",
|
||||
"set:pg": "echo 'export * from \"./pg\";' > server/db/index.ts",
|
||||
"build:sqlite": "mkdir -p dist && next build && node esbuild.mjs -e server/index.ts -o dist/server.mjs && node esbuild.mjs -e server/setup/migrationsSqlite.ts -o dist/migrations.mjs",
|
||||
"build:pg": "mkdir -p dist && next build && node esbuild.mjs -e server/index.ts -o dist/server.mjs && node esbuild.mjs -e server/setup/migrationsPg.ts -o dist/migrations.mjs",
|
||||
"start": "ENVIRONMENT=prod node dist/migrations.mjs && ENVIRONMENT=prod NODE_ENV=development node --enable-source-maps dist/server.mjs",
|
||||
"email": "email dev --dir server/emails/templates --port 3005",
|
||||
"build:cli": "node esbuild.mjs -e cli/index.ts -o dist/cli.mjs"
|
||||
"build:cli": "node esbuild.mjs -e cli/index.ts -o dist/cli.mjs",
|
||||
"db:sqlite:seed-exit-node": "sqlite3 config/db/db.sqlite \"INSERT INTO exitNodes (exitNodeId, name, address, endpoint, publicKey, listenPort, reachableAt, maxConnections, online, lastPing, type, region) VALUES (null, 'test', '10.0.0.1/24', 'localhost', 'MJ44MpnWGxMZURgxW/fWXDFsejhabnEFYDo60LQwK3A=', 1234, 'http://localhost:3003', 123, 1, null, 'gerbil', null);\""
|
||||
},
|
||||
"dependencies": {
|
||||
"@asteasolutions/zod-to-openapi": "8.1.0",
|
||||
"@faker-js/faker": "^10.1.0",
|
||||
"@headlessui/react": "^2.2.9",
|
||||
"@aws-sdk/client-s3": "3.943.0",
|
||||
"@asteasolutions/zod-to-openapi": "^7.3.4",
|
||||
"@aws-sdk/client-s3": "3.837.0",
|
||||
"@hookform/resolvers": "5.2.2",
|
||||
"@monaco-editor/react": "^4.7.0",
|
||||
"@node-rs/argon2": "^2.0.2",
|
||||
"@oslojs/crypto": "1.0.1",
|
||||
"@oslojs/encoding": "1.1.0",
|
||||
"@radix-ui/react-avatar": "1.1.11",
|
||||
"@radix-ui/react-avatar": "1.1.10",
|
||||
"@radix-ui/react-checkbox": "1.3.3",
|
||||
"@radix-ui/react-collapsible": "1.1.12",
|
||||
"@radix-ui/react-dialog": "1.1.15",
|
||||
"@radix-ui/react-dropdown-menu": "2.1.16",
|
||||
"@radix-ui/react-icons": "1.3.2",
|
||||
"@radix-ui/react-label": "2.1.8",
|
||||
"@radix-ui/react-label": "2.1.7",
|
||||
"@radix-ui/react-popover": "1.1.15",
|
||||
"@radix-ui/react-progress": "^1.1.8",
|
||||
"@radix-ui/react-progress": "^1.1.7",
|
||||
"@radix-ui/react-radio-group": "1.3.8",
|
||||
"@radix-ui/react-scroll-area": "^1.2.10",
|
||||
"@radix-ui/react-select": "2.2.6",
|
||||
"@radix-ui/react-separator": "1.1.8",
|
||||
"@radix-ui/react-slot": "1.2.4",
|
||||
"@radix-ui/react-separator": "1.1.7",
|
||||
"@radix-ui/react-slot": "1.2.3",
|
||||
"@radix-ui/react-switch": "1.2.6",
|
||||
"@radix-ui/react-tabs": "1.1.13",
|
||||
"@radix-ui/react-toast": "1.2.15",
|
||||
"@radix-ui/react-tooltip": "^1.2.8",
|
||||
"@react-email/components": "0.5.7",
|
||||
"@react-email/render": "^1.3.2",
|
||||
"@react-email/components": "0.5.5",
|
||||
"@react-email/render": "^1.2.0",
|
||||
"@react-email/tailwind": "1.2.2",
|
||||
"@simplewebauthn/browser": "^13.2.2",
|
||||
"@simplewebauthn/server": "^13.2.2",
|
||||
"@simplewebauthn/browser": "^13.2.0",
|
||||
"@simplewebauthn/server": "^13.2.1",
|
||||
"@tailwindcss/forms": "^0.5.10",
|
||||
"@tanstack/react-query": "^5.90.6",
|
||||
"@tanstack/react-table": "8.21.3",
|
||||
"arctic": "^3.7.0",
|
||||
"axios": "^1.13.2",
|
||||
"axios": "^1.12.2",
|
||||
"better-sqlite3": "11.7.0",
|
||||
"canvas-confetti": "1.9.4",
|
||||
"canvas-confetti": "1.9.3",
|
||||
"class-variance-authority": "^0.7.1",
|
||||
"clsx": "2.1.1",
|
||||
"cmdk": "1.1.1",
|
||||
@@ -79,103 +75,89 @@
|
||||
"cookies": "^0.9.1",
|
||||
"cors": "2.8.5",
|
||||
"crypto-js": "^4.2.0",
|
||||
"d3": "^7.9.0",
|
||||
"date-fns": "4.1.0",
|
||||
"drizzle-orm": "0.45.0",
|
||||
"eslint": "9.39.1",
|
||||
"eslint-config-next": "16.0.7",
|
||||
"express": "5.2.1",
|
||||
"express-rate-limit": "8.2.1",
|
||||
"glob": "11.1.0",
|
||||
"drizzle-orm": "0.44.6",
|
||||
"eslint": "9.35.0",
|
||||
"eslint-config-next": "15.5.4",
|
||||
"express": "5.1.0",
|
||||
"express-rate-limit": "8.1.0",
|
||||
"glob": "11.0.3",
|
||||
"helmet": "8.1.0",
|
||||
"http-errors": "2.0.1",
|
||||
"http-errors": "2.0.0",
|
||||
"i": "^0.3.7",
|
||||
"input-otp": "1.4.2",
|
||||
"ioredis": "5.8.2",
|
||||
"ioredis": "5.6.1",
|
||||
"jmespath": "^0.16.0",
|
||||
"js-yaml": "4.1.1",
|
||||
"js-yaml": "4.1.0",
|
||||
"jsonwebtoken": "^9.0.2",
|
||||
"lucide-react": "^0.556.0",
|
||||
"maxmind": "5.0.1",
|
||||
"lucide-react": "^0.544.0",
|
||||
"maxmind": "5.0.0",
|
||||
"moment": "2.30.1",
|
||||
"next": "15.5.7",
|
||||
"next-intl": "^4.4.0",
|
||||
"next": "15.5.4",
|
||||
"next-intl": "^4.3.9",
|
||||
"next-themes": "0.4.6",
|
||||
"nextjs-toploader": "^3.9.17",
|
||||
"node-cache": "5.1.2",
|
||||
"node-fetch": "3.3.2",
|
||||
"nodemailer": "7.0.11",
|
||||
"npm": "^11.6.4",
|
||||
"nprogress": "^0.2.0",
|
||||
"nodemailer": "7.0.6",
|
||||
"npm": "^11.6.1",
|
||||
"oslo": "1.2.1",
|
||||
"pg": "^8.16.2",
|
||||
"posthog-node": "^5.11.2",
|
||||
"posthog-node": "^5.8.4",
|
||||
"qrcode.react": "4.2.0",
|
||||
"react": "19.2.1",
|
||||
"react-day-picker": "9.11.3",
|
||||
"react-dom": "19.2.1",
|
||||
"react-easy-sort": "^1.8.0",
|
||||
"react-hook-form": "7.68.0",
|
||||
"react": "19.1.1",
|
||||
"react-dom": "19.1.1",
|
||||
"react-easy-sort": "^1.7.0",
|
||||
"react-hook-form": "7.62.0",
|
||||
"react-icons": "^5.5.0",
|
||||
"rebuild": "0.1.2",
|
||||
"recharts": "^2.15.4",
|
||||
"reodotdev": "^1.0.0",
|
||||
"resend": "^6.4.2",
|
||||
"semver": "^7.7.3",
|
||||
"resend": "^6.1.1",
|
||||
"semver": "^7.7.2",
|
||||
"stripe": "18.2.1",
|
||||
"swagger-ui-express": "^5.0.1",
|
||||
"topojson-client": "^3.1.0",
|
||||
"tailwind-merge": "3.4.0",
|
||||
"tailwind-merge": "3.3.1",
|
||||
"tw-animate-css": "^1.3.8",
|
||||
"uuid": "^13.0.0",
|
||||
"vaul": "1.1.2",
|
||||
"visionscarto-world-atlas": "^1.0.0",
|
||||
"winston": "3.18.3",
|
||||
"winston": "3.17.0",
|
||||
"winston-daily-rotate-file": "5.0.0",
|
||||
"ws": "8.18.3",
|
||||
"yaml": "^2.8.1",
|
||||
"yargs": "18.0.0",
|
||||
"zod": "4.1.12",
|
||||
"zod-validation-error": "5.0.0"
|
||||
"zod": "3.25.76",
|
||||
"zod-validation-error": "3.5.2"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@dotenvx/dotenvx": "1.51.1",
|
||||
"@dotenvx/dotenvx": "1.51.0",
|
||||
"@esbuild-plugins/tsconfig-paths": "0.1.2",
|
||||
"@react-email/preview-server": "4.3.2",
|
||||
"@tailwindcss/postcss": "^4.1.17",
|
||||
"@tanstack/react-query-devtools": "^5.90.2",
|
||||
"@react-email/preview-server": "4.2.12",
|
||||
"@tailwindcss/postcss": "^4.1.14",
|
||||
"@types/better-sqlite3": "7.6.12",
|
||||
"@types/cookie-parser": "1.4.10",
|
||||
"@types/cookie-parser": "1.4.9",
|
||||
"@types/cors": "2.8.19",
|
||||
"@types/crypto-js": "^4.2.2",
|
||||
"@types/d3": "^7.4.3",
|
||||
"@types/express": "5.0.6",
|
||||
"@types/express": "5.0.3",
|
||||
"@types/express-session": "^1.18.2",
|
||||
"@types/jmespath": "^0.15.2",
|
||||
"@types/js-yaml": "4.0.9",
|
||||
"@types/jsonwebtoken": "^9.0.10",
|
||||
"@types/node": "24.10.1",
|
||||
"@types/nprogress": "^0.2.3",
|
||||
"@types/nodemailer": "7.0.4",
|
||||
"@types/pg": "8.15.6",
|
||||
"@types/react": "19.2.7",
|
||||
"@types/react-dom": "19.2.3",
|
||||
"@types/node": "24.6.2",
|
||||
"@types/nodemailer": "7.0.2",
|
||||
"@types/pg": "8.15.5",
|
||||
"@types/react": "19.1.16",
|
||||
"@types/react-dom": "19.1.9",
|
||||
"@types/semver": "^7.7.1",
|
||||
"@types/swagger-ui-express": "^4.1.8",
|
||||
"@types/topojson-client": "^3.1.5",
|
||||
"@types/ws": "8.18.1",
|
||||
"babel-plugin-react-compiler": "^1.0.0",
|
||||
"@types/yargs": "17.0.35",
|
||||
"drizzle-kit": "0.31.8",
|
||||
"esbuild": "0.27.1",
|
||||
"esbuild-node-externals": "1.20.1",
|
||||
"@types/yargs": "17.0.33",
|
||||
"drizzle-kit": "0.31.5",
|
||||
"esbuild": "0.25.10",
|
||||
"esbuild-node-externals": "1.18.0",
|
||||
"postcss": "^8",
|
||||
"react-email": "4.3.2",
|
||||
"react-email": "4.2.12",
|
||||
"tailwindcss": "^4.1.4",
|
||||
"tsc-alias": "1.8.16",
|
||||
"tsx": "4.21.0",
|
||||
"tsx": "4.20.6",
|
||||
"typescript": "^5",
|
||||
"typescript-eslint": "^8.46.3"
|
||||
"typescript-eslint": "^8.45.0"
|
||||
},
|
||||
"overrides": {
|
||||
"emblor": {
|
||||
@@ -183,4 +165,4 @@
|
||||
"react-dom": "19.0.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 13 KiB After Width: | Height: | Size: 34 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 13 KiB After Width: | Height: | Size: 33 KiB |
@@ -7,21 +7,21 @@ import {
|
||||
errorHandlerMiddleware,
|
||||
notFoundMiddleware
|
||||
} from "@server/middlewares";
|
||||
import { authenticated, unauthenticated } from "#dynamic/routers/external";
|
||||
import { router as wsRouter, handleWSUpgrade } from "#dynamic/routers/ws";
|
||||
import { corsWithLoginPageSupport } from "@server/middlewares/private/corsWithLoginPage";
|
||||
import { authenticated, unauthenticated } from "@server/routers/external";
|
||||
import { router as wsRouter, handleWSUpgrade } from "@server/routers/ws";
|
||||
import { logIncomingMiddleware } from "./middlewares/logIncoming";
|
||||
import { csrfProtectionMiddleware } from "./middlewares/csrfProtection";
|
||||
import helmet from "helmet";
|
||||
import { stripeWebhookHandler } from "@server/routers/private/billing/webhooks";
|
||||
import { build } from "./build";
|
||||
import rateLimit, { ipKeyGenerator } from "express-rate-limit";
|
||||
import createHttpError from "http-errors";
|
||||
import HttpCode from "./types/HttpCode";
|
||||
import requestTimeoutMiddleware from "./middlewares/requestTimeout";
|
||||
import { createStore } from "#dynamic/lib/rateLimitStore";
|
||||
import { createStore } from "@server/lib/private/rateLimitStore";
|
||||
import hybridRouter from "@server/routers/private/hybrid";
|
||||
import { stripDuplicateSesions } from "./middlewares/stripDuplicateSessions";
|
||||
import { corsWithLoginPageSupport } from "@server/lib/corsWithLoginPage";
|
||||
import { hybridRouter } from "#dynamic/routers/hybrid";
|
||||
import { billingWebhookHandler } from "#dynamic/routers/billing/webhooks";
|
||||
|
||||
const dev = config.isDev;
|
||||
const externalPort = config.getRawConfig().server.external_port;
|
||||
@@ -39,30 +39,32 @@ export function createApiServer() {
|
||||
apiServer.post(
|
||||
`${prefix}/billing/webhooks`,
|
||||
express.raw({ type: "application/json" }),
|
||||
billingWebhookHandler
|
||||
stripeWebhookHandler
|
||||
);
|
||||
}
|
||||
|
||||
const corsConfig = config.getRawConfig().server.cors;
|
||||
const options = {
|
||||
...(corsConfig?.origins
|
||||
? { origin: corsConfig.origins }
|
||||
: {
|
||||
origin: (origin: any, callback: any) => {
|
||||
callback(null, true);
|
||||
}
|
||||
}),
|
||||
...(corsConfig?.methods && { methods: corsConfig.methods }),
|
||||
...(corsConfig?.allowed_headers && {
|
||||
allowedHeaders: corsConfig.allowed_headers
|
||||
}),
|
||||
credentials: !(corsConfig?.credentials === false)
|
||||
};
|
||||
|
||||
if (build == "oss" || !corsConfig) {
|
||||
if (build == "oss") {
|
||||
const options = {
|
||||
...(corsConfig?.origins
|
||||
? { origin: corsConfig.origins }
|
||||
: {
|
||||
origin: (origin: any, callback: any) => {
|
||||
callback(null, true);
|
||||
}
|
||||
}),
|
||||
...(corsConfig?.methods && { methods: corsConfig.methods }),
|
||||
...(corsConfig?.allowed_headers && {
|
||||
allowedHeaders: corsConfig.allowed_headers
|
||||
}),
|
||||
credentials: !(corsConfig?.credentials === false)
|
||||
};
|
||||
|
||||
logger.debug("Using CORS options", options);
|
||||
|
||||
apiServer.use(cors(options));
|
||||
} else if (corsConfig) {
|
||||
} else {
|
||||
// Use the custom CORS middleware with loginPage support
|
||||
apiServer.use(corsWithLoginPageSupport(corsConfig));
|
||||
}
|
||||
@@ -79,12 +81,6 @@ export function createApiServer() {
|
||||
// Add request timeout middleware
|
||||
apiServer.use(requestTimeoutMiddleware(60000)); // 60 second timeout
|
||||
|
||||
apiServer.use(logIncomingMiddleware);
|
||||
|
||||
if (build !== "oss") {
|
||||
apiServer.use(`${prefix}/hybrid`, hybridRouter); // put before rate limiting because we will rate limit there separately because some of the routes are heavily used
|
||||
}
|
||||
|
||||
if (!dev) {
|
||||
apiServer.use(
|
||||
rateLimit({
|
||||
@@ -107,7 +103,11 @@ export function createApiServer() {
|
||||
}
|
||||
|
||||
// API routes
|
||||
apiServer.use(logIncomingMiddleware);
|
||||
apiServer.use(prefix, unauthenticated);
|
||||
if (build !== "oss") {
|
||||
apiServer.use(`${prefix}/hybrid`, hybridRouter);
|
||||
}
|
||||
apiServer.use(prefix, authenticated);
|
||||
|
||||
// WebSocket routes
|
||||
|
||||
@@ -4,6 +4,7 @@ import { userActions, roleActions, userOrgs } from "@server/db";
|
||||
import { and, eq } from "drizzle-orm";
|
||||
import createHttpError from "http-errors";
|
||||
import HttpCode from "@server/types/HttpCode";
|
||||
import { sendUsageNotification } from "@server/routers/org";
|
||||
|
||||
export enum ActionsEnum {
|
||||
createOrgUser = "createOrgUser",
|
||||
@@ -19,7 +20,6 @@ export enum ActionsEnum {
|
||||
getSite = "getSite",
|
||||
listSites = "listSites",
|
||||
updateSite = "updateSite",
|
||||
reGenerateSecret = "reGenerateSecret",
|
||||
createResource = "createResource",
|
||||
deleteResource = "deleteResource",
|
||||
getResource = "getResource",
|
||||
@@ -61,7 +61,6 @@ export enum ActionsEnum {
|
||||
getUser = "getUser",
|
||||
setResourcePassword = "setResourcePassword",
|
||||
setResourcePincode = "setResourcePincode",
|
||||
setResourceHeaderAuth = "setResourceHeaderAuth",
|
||||
setResourceWhitelist = "setResourceWhitelist",
|
||||
getResourceWhitelist = "getResourceWhitelist",
|
||||
generateAccessToken = "generateAccessToken",
|
||||
@@ -82,11 +81,7 @@ export enum ActionsEnum {
|
||||
listClients = "listClients",
|
||||
getClient = "getClient",
|
||||
listOrgDomains = "listOrgDomains",
|
||||
getDomain = "getDomain",
|
||||
updateOrgDomain = "updateOrgDomain",
|
||||
getDNSRecords = "getDNSRecords",
|
||||
createNewt = "createNewt",
|
||||
createOlm = "createOlm",
|
||||
createIdp = "createIdp",
|
||||
updateIdp = "updateIdp",
|
||||
deleteIdp = "deleteIdp",
|
||||
@@ -121,11 +116,7 @@ export enum ActionsEnum {
|
||||
updateLoginPage = "updateLoginPage",
|
||||
getLoginPage = "getLoginPage",
|
||||
deleteLoginPage = "deleteLoginPage",
|
||||
listBlueprints = "listBlueprints",
|
||||
getBlueprint = "getBlueprint",
|
||||
applyBlueprint = "applyBlueprint",
|
||||
viewLogs = "viewLogs",
|
||||
exportLogs = "exportLogs"
|
||||
applyBlueprint = "applyBlueprint"
|
||||
}
|
||||
|
||||
export async function checkUserActionPermission(
|
||||
@@ -202,6 +193,8 @@ export async function checkUserActionPermission(
|
||||
.limit(1);
|
||||
|
||||
return roleActionPermission.length > 0;
|
||||
|
||||
return false;
|
||||
} catch (error) {
|
||||
console.error("Error checking user action permission:", error);
|
||||
throw createHttpError(
|
||||
|
||||
@@ -36,15 +36,12 @@ export async function createSession(
|
||||
const sessionId = encodeHexLowerCase(
|
||||
sha256(new TextEncoder().encode(token))
|
||||
);
|
||||
const [session] = await db
|
||||
.insert(sessions)
|
||||
.values({
|
||||
sessionId: sessionId,
|
||||
userId,
|
||||
expiresAt: new Date(Date.now() + SESSION_COOKIE_EXPIRES).getTime(),
|
||||
issuedAt: new Date().getTime()
|
||||
})
|
||||
.returning();
|
||||
const session: Session = {
|
||||
sessionId: sessionId,
|
||||
userId,
|
||||
expiresAt: new Date(Date.now() + SESSION_COOKIE_EXPIRES).getTime()
|
||||
};
|
||||
await db.insert(sessions).values(session);
|
||||
return session;
|
||||
}
|
||||
|
||||
|
||||
@@ -4,6 +4,9 @@ import { resourceSessions, ResourceSession } from "@server/db";
|
||||
import { db } from "@server/db";
|
||||
import { eq, and } from "drizzle-orm";
|
||||
import config from "@server/lib/config";
|
||||
import axios from "axios";
|
||||
import logger from "@server/logger";
|
||||
import { tokenManager } from "@server/lib/tokenManager";
|
||||
|
||||
export const SESSION_COOKIE_NAME =
|
||||
config.getRawConfig().server.session_cookie_name;
|
||||
@@ -50,8 +53,7 @@ export async function createResourceSession(opts: {
|
||||
doNotExtend: opts.doNotExtend || false,
|
||||
accessTokenId: opts.accessTokenId || null,
|
||||
isRequestToken: opts.isRequestToken || false,
|
||||
userSessionId: opts.userSessionId || null,
|
||||
issuedAt: new Date().getTime()
|
||||
userSessionId: opts.userSessionId || null
|
||||
};
|
||||
|
||||
await db.insert(resourceSessions).values(session);
|
||||
@@ -63,6 +65,29 @@ export async function validateResourceSessionToken(
|
||||
token: string,
|
||||
resourceId: number
|
||||
): Promise<ResourceSessionValidationResult> {
|
||||
if (config.isManagedMode()) {
|
||||
try {
|
||||
const response = await axios.post(`${config.getRawConfig().managed?.endpoint}/api/v1/hybrid/resource/${resourceId}/session/validate`, {
|
||||
token: token
|
||||
}, await tokenManager.getAuthHeader());
|
||||
return response.data.data;
|
||||
} catch (error) {
|
||||
if (axios.isAxiosError(error)) {
|
||||
logger.error("Error validating resource session token in hybrid mode:", {
|
||||
message: error.message,
|
||||
code: error.code,
|
||||
status: error.response?.status,
|
||||
statusText: error.response?.statusText,
|
||||
url: error.config?.url,
|
||||
method: error.config?.method
|
||||
});
|
||||
} else {
|
||||
logger.error("Error validating resource session token in hybrid mode:", error);
|
||||
}
|
||||
return { resourceSession: null };
|
||||
}
|
||||
}
|
||||
|
||||
const sessionId = encodeHexLowerCase(
|
||||
sha256(new TextEncoder().encode(token))
|
||||
);
|
||||
|
||||
@@ -1,43 +1,9 @@
|
||||
import { Request } from "express";
|
||||
import {
|
||||
validateSessionToken,
|
||||
SESSION_COOKIE_NAME
|
||||
} from "@server/auth/sessions/app";
|
||||
import { validateSessionToken, SESSION_COOKIE_NAME } from "@server/auth/sessions/app";
|
||||
|
||||
export async function verifySession(req: Request, forceLogin?: boolean) {
|
||||
export async function verifySession(req: Request) {
|
||||
const res = await validateSessionToken(
|
||||
req.cookies[SESSION_COOKIE_NAME] ?? ""
|
||||
req.cookies[SESSION_COOKIE_NAME] ?? "",
|
||||
);
|
||||
|
||||
if (!forceLogin) {
|
||||
return res;
|
||||
}
|
||||
if (!res.session || !res.user) {
|
||||
return {
|
||||
session: null,
|
||||
user: null
|
||||
};
|
||||
}
|
||||
if (res.session.deviceAuthUsed) {
|
||||
return {
|
||||
session: null,
|
||||
user: null
|
||||
};
|
||||
}
|
||||
if (!res.session.issuedAt) {
|
||||
return {
|
||||
session: null,
|
||||
user: null
|
||||
};
|
||||
}
|
||||
const mins = 5 * 60 * 1000;
|
||||
const now = new Date().getTime();
|
||||
if (now - res.session.issuedAt > mins) {
|
||||
return {
|
||||
session: null,
|
||||
user: null
|
||||
};
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
import { cleanup as wsCleanup } from "#dynamic/routers/ws";
|
||||
|
||||
async function cleanup() {
|
||||
await wsCleanup();
|
||||
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
export async function initCleanup() {
|
||||
// Handle process termination
|
||||
process.on("SIGTERM", () => cleanup());
|
||||
process.on("SIGINT", () => cleanup());
|
||||
}
|
||||
@@ -1,7 +1,6 @@
|
||||
import { join } from "path";
|
||||
import { readFileSync } from "fs";
|
||||
import { clients, db, resources, siteResources } from "@server/db";
|
||||
import { randomInt } from "crypto";
|
||||
import { db, resources, siteResources } from "@server/db";
|
||||
import { exitNodes, sites } from "@server/db";
|
||||
import { eq, and } from "drizzle-orm";
|
||||
import { __DIRNAME } from "@server/lib/consts";
|
||||
@@ -16,25 +15,6 @@ if (!dev) {
|
||||
}
|
||||
export const names = JSON.parse(readFileSync(file, "utf-8"));
|
||||
|
||||
export async function getUniqueClientName(orgId: string): Promise<string> {
|
||||
let loops = 0;
|
||||
while (true) {
|
||||
if (loops > 100) {
|
||||
throw new Error("Could not generate a unique name");
|
||||
}
|
||||
|
||||
const name = generateName();
|
||||
const count = await db
|
||||
.select({ niceId: clients.niceId, orgId: clients.orgId })
|
||||
.from(clients)
|
||||
.where(and(eq(clients.niceId, name), eq(clients.orgId, orgId)));
|
||||
if (count.length === 0) {
|
||||
return name;
|
||||
}
|
||||
loops++;
|
||||
}
|
||||
}
|
||||
|
||||
export async function getUniqueSiteName(orgId: string): Promise<string> {
|
||||
let loops = 0;
|
||||
while (true) {
|
||||
@@ -62,36 +42,18 @@ export async function getUniqueResourceName(orgId: string): Promise<string> {
|
||||
}
|
||||
|
||||
const name = generateName();
|
||||
const [resourceCount, siteResourceCount] = await Promise.all([
|
||||
db
|
||||
.select({ niceId: resources.niceId, orgId: resources.orgId })
|
||||
.from(resources)
|
||||
.where(
|
||||
and(eq(resources.niceId, name), eq(resources.orgId, orgId))
|
||||
),
|
||||
db
|
||||
.select({
|
||||
niceId: siteResources.niceId,
|
||||
orgId: siteResources.orgId
|
||||
})
|
||||
.from(siteResources)
|
||||
.where(
|
||||
and(
|
||||
eq(siteResources.niceId, name),
|
||||
eq(siteResources.orgId, orgId)
|
||||
)
|
||||
)
|
||||
]);
|
||||
if (resourceCount.length === 0 && siteResourceCount.length === 0) {
|
||||
const count = await db
|
||||
.select({ niceId: resources.niceId, orgId: resources.orgId })
|
||||
.from(resources)
|
||||
.where(and(eq(resources.niceId, name), eq(resources.orgId, orgId)));
|
||||
if (count.length === 0) {
|
||||
return name;
|
||||
}
|
||||
loops++;
|
||||
}
|
||||
}
|
||||
|
||||
export async function getUniqueSiteResourceName(
|
||||
orgId: string
|
||||
): Promise<string> {
|
||||
export async function getUniqueSiteResourceName(orgId: string): Promise<string> {
|
||||
let loops = 0;
|
||||
while (true) {
|
||||
if (loops > 100) {
|
||||
@@ -99,27 +61,11 @@ export async function getUniqueSiteResourceName(
|
||||
}
|
||||
|
||||
const name = generateName();
|
||||
const [resourceCount, siteResourceCount] = await Promise.all([
|
||||
db
|
||||
.select({ niceId: resources.niceId, orgId: resources.orgId })
|
||||
.from(resources)
|
||||
.where(
|
||||
and(eq(resources.niceId, name), eq(resources.orgId, orgId))
|
||||
),
|
||||
db
|
||||
.select({
|
||||
niceId: siteResources.niceId,
|
||||
orgId: siteResources.orgId
|
||||
})
|
||||
.from(siteResources)
|
||||
.where(
|
||||
and(
|
||||
eq(siteResources.niceId, name),
|
||||
eq(siteResources.orgId, orgId)
|
||||
)
|
||||
)
|
||||
]);
|
||||
if (resourceCount.length === 0 && siteResourceCount.length === 0) {
|
||||
const count = await db
|
||||
.select({ niceId: siteResources.niceId, orgId: siteResources.orgId })
|
||||
.from(siteResources)
|
||||
.where(and(eq(siteResources.niceId, name), eq(siteResources.orgId, orgId)));
|
||||
if (count.length === 0) {
|
||||
return name;
|
||||
}
|
||||
loops++;
|
||||
@@ -128,7 +74,9 @@ export async function getUniqueSiteResourceName(
|
||||
|
||||
export async function getUniqueExitNodeEndpointName(): Promise<string> {
|
||||
let loops = 0;
|
||||
const count = await db.select().from(exitNodes);
|
||||
const count = await db
|
||||
.select()
|
||||
.from(exitNodes);
|
||||
while (true) {
|
||||
if (loops > 100) {
|
||||
throw new Error("Could not generate a unique name");
|
||||
@@ -147,11 +95,14 @@ export async function getUniqueExitNodeEndpointName(): Promise<string> {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
export function generateName(): string {
|
||||
const name = (
|
||||
names.descriptors[randomInt(names.descriptors.length)] +
|
||||
names.descriptors[
|
||||
Math.floor(Math.random() * names.descriptors.length)
|
||||
] +
|
||||
"-" +
|
||||
names.animals[randomInt(names.animals.length)]
|
||||
names.animals[Math.floor(Math.random() * names.animals.length)]
|
||||
)
|
||||
.toLowerCase()
|
||||
.replace(/\s/g, "-");
|
||||
|
||||
@@ -13,12 +13,9 @@ function createDb() {
|
||||
connection_string: process.env.POSTGRES_CONNECTION_STRING
|
||||
};
|
||||
if (process.env.POSTGRES_REPLICA_CONNECTION_STRINGS) {
|
||||
const replicas =
|
||||
process.env.POSTGRES_REPLICA_CONNECTION_STRINGS.split(
|
||||
","
|
||||
).map((conn) => ({
|
||||
connection_string: conn.trim()
|
||||
}));
|
||||
const replicas = process.env.POSTGRES_REPLICA_CONNECTION_STRINGS.split(",").map((conn) => ({
|
||||
connection_string: conn.trim()
|
||||
}));
|
||||
config.postgres.replicas = replicas;
|
||||
}
|
||||
} else {
|
||||
@@ -38,49 +35,32 @@ function createDb() {
|
||||
}
|
||||
|
||||
// Create connection pools instead of individual connections
|
||||
const poolConfig = config.postgres.pool;
|
||||
const primaryPool = new Pool({
|
||||
connectionString,
|
||||
max: poolConfig?.max_connections || 20,
|
||||
idleTimeoutMillis: poolConfig?.idle_timeout_ms || 30000,
|
||||
connectionTimeoutMillis: poolConfig?.connection_timeout_ms || 5000
|
||||
max: 20,
|
||||
idleTimeoutMillis: 30000,
|
||||
connectionTimeoutMillis: 5000,
|
||||
});
|
||||
|
||||
const replicas = [];
|
||||
|
||||
if (!replicaConnections.length) {
|
||||
replicas.push(
|
||||
DrizzlePostgres(primaryPool, {
|
||||
logger: process.env.QUERY_LOGGING == "true"
|
||||
})
|
||||
);
|
||||
replicas.push(DrizzlePostgres(primaryPool));
|
||||
} else {
|
||||
for (const conn of replicaConnections) {
|
||||
const replicaPool = new Pool({
|
||||
connectionString: conn.connection_string,
|
||||
max: poolConfig?.max_replica_connections || 20,
|
||||
idleTimeoutMillis: poolConfig?.idle_timeout_ms || 30000,
|
||||
connectionTimeoutMillis:
|
||||
poolConfig?.connection_timeout_ms || 5000
|
||||
max: 10,
|
||||
idleTimeoutMillis: 30000,
|
||||
connectionTimeoutMillis: 5000,
|
||||
});
|
||||
replicas.push(
|
||||
DrizzlePostgres(replicaPool, {
|
||||
logger: process.env.QUERY_LOGGING == "true"
|
||||
})
|
||||
);
|
||||
replicas.push(DrizzlePostgres(replicaPool));
|
||||
}
|
||||
}
|
||||
|
||||
return withReplicas(
|
||||
DrizzlePostgres(primaryPool, {
|
||||
logger: process.env.QUERY_LOGGING == "true"
|
||||
}),
|
||||
replicas as any
|
||||
);
|
||||
return withReplicas(DrizzlePostgres(primaryPool), replicas as any);
|
||||
}
|
||||
|
||||
export const db = createDb();
|
||||
export default db;
|
||||
export type Transaction = Parameters<
|
||||
Parameters<(typeof db)["transaction"]>[0]
|
||||
>[0];
|
||||
export type Transaction = Parameters<Parameters<typeof db["transaction"]>[0]>[0];
|
||||
@@ -1,3 +1,3 @@
|
||||
export * from "./driver";
|
||||
export * from "./schema/schema";
|
||||
export * from "./schema/privateSchema";
|
||||
export * from "./schema";
|
||||
export * from "./privateSchema";
|
||||
|
||||
@@ -11,7 +11,6 @@ const runMigrations = async () => {
|
||||
migrationsFolder: migrationsFolder
|
||||
});
|
||||
console.log("Migrations completed successfully.");
|
||||
process.exit(0);
|
||||
} catch (error) {
|
||||
console.error("Error running migrations:", error);
|
||||
process.exit(1);
|
||||
|
||||
@@ -1,3 +1,16 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import {
|
||||
pgTable,
|
||||
serial,
|
||||
@@ -6,8 +19,7 @@ import {
|
||||
integer,
|
||||
bigint,
|
||||
real,
|
||||
text,
|
||||
index
|
||||
text
|
||||
} from "drizzle-orm/pg-core";
|
||||
import { InferSelectModel } from "drizzle-orm";
|
||||
import { domains, orgs, targets, users, exitNodes, sessions } from "./schema";
|
||||
@@ -167,7 +179,6 @@ export const remoteExitNodes = pgTable("remoteExitNode", {
|
||||
secretHash: varchar("secretHash").notNull(),
|
||||
dateCreated: varchar("dateCreated").notNull(),
|
||||
version: varchar("version"),
|
||||
secondaryVersion: varchar("secondaryVersion"), // This is to detect the new nodes after the transition to pangolin-node
|
||||
exitNodeId: integer("exitNodeId").references(() => exitNodes.exitNodeId, {
|
||||
onDelete: "cascade"
|
||||
})
|
||||
@@ -215,43 +226,6 @@ export const sessionTransferToken = pgTable("sessionTransferToken", {
|
||||
expiresAt: bigint("expiresAt", { mode: "number" }).notNull()
|
||||
});
|
||||
|
||||
export const actionAuditLog = pgTable("actionAuditLog", {
|
||||
id: serial("id").primaryKey(),
|
||||
timestamp: bigint("timestamp", { mode: "number" }).notNull(), // this is EPOCH time in seconds
|
||||
orgId: varchar("orgId")
|
||||
.notNull()
|
||||
.references(() => orgs.orgId, { onDelete: "cascade" }),
|
||||
actorType: varchar("actorType", { length: 50 }).notNull(),
|
||||
actor: varchar("actor", { length: 255 }).notNull(),
|
||||
actorId: varchar("actorId", { length: 255 }).notNull(),
|
||||
action: varchar("action", { length: 100 }).notNull(),
|
||||
metadata: text("metadata")
|
||||
}, (table) => ([
|
||||
index("idx_actionAuditLog_timestamp").on(table.timestamp),
|
||||
index("idx_actionAuditLog_org_timestamp").on(table.orgId, table.timestamp)
|
||||
]));
|
||||
|
||||
export const accessAuditLog = pgTable("accessAuditLog", {
|
||||
id: serial("id").primaryKey(),
|
||||
timestamp: bigint("timestamp", { mode: "number" }).notNull(), // this is EPOCH time in seconds
|
||||
orgId: varchar("orgId")
|
||||
.notNull()
|
||||
.references(() => orgs.orgId, { onDelete: "cascade" }),
|
||||
actorType: varchar("actorType", { length: 50 }),
|
||||
actor: varchar("actor", { length: 255 }),
|
||||
actorId: varchar("actorId", { length: 255 }),
|
||||
resourceId: integer("resourceId"),
|
||||
ip: varchar("ip", { length: 45 }),
|
||||
type: varchar("type", { length: 100 }).notNull(),
|
||||
action: boolean("action").notNull(),
|
||||
location: text("location"),
|
||||
userAgent: text("userAgent"),
|
||||
metadata: text("metadata")
|
||||
}, (table) => ([
|
||||
index("idx_identityAuditLog_timestamp").on(table.timestamp),
|
||||
index("idx_identityAuditLog_org_timestamp").on(table.orgId, table.timestamp)
|
||||
]));
|
||||
|
||||
export type Limit = InferSelectModel<typeof limits>;
|
||||
export type Account = InferSelectModel<typeof account>;
|
||||
export type Certificate = InferSelectModel<typeof certificates>;
|
||||
@@ -269,5 +243,3 @@ export type RemoteExitNodeSession = InferSelectModel<
|
||||
>;
|
||||
export type ExitNodeOrg = InferSelectModel<typeof exitNodeOrgs>;
|
||||
export type LoginPage = InferSelectModel<typeof loginPage>;
|
||||
export type ActionAuditLog = InferSelectModel<typeof actionAuditLog>;
|
||||
export type AccessAuditLog = InferSelectModel<typeof accessAuditLog>;
|
||||
@@ -6,12 +6,10 @@ import {
|
||||
integer,
|
||||
bigint,
|
||||
real,
|
||||
text,
|
||||
index
|
||||
text
|
||||
} from "drizzle-orm/pg-core";
|
||||
import { InferSelectModel } from "drizzle-orm";
|
||||
import { randomUUID } from "crypto";
|
||||
import { alias } from "yargs";
|
||||
|
||||
export const domains = pgTable("domains", {
|
||||
domainId: varchar("domainId").primaryKey(),
|
||||
@@ -20,41 +18,15 @@ export const domains = pgTable("domains", {
|
||||
type: varchar("type"), // "ns", "cname", "wildcard"
|
||||
verified: boolean("verified").notNull().default(false),
|
||||
failed: boolean("failed").notNull().default(false),
|
||||
tries: integer("tries").notNull().default(0),
|
||||
certResolver: varchar("certResolver"),
|
||||
customCertResolver: varchar("customCertResolver"),
|
||||
preferWildcardCert: boolean("preferWildcardCert")
|
||||
});
|
||||
|
||||
export const dnsRecords = pgTable("dnsRecords", {
|
||||
id: serial("id").primaryKey(),
|
||||
domainId: varchar("domainId")
|
||||
.notNull()
|
||||
.references(() => domains.domainId, { onDelete: "cascade" }),
|
||||
recordType: varchar("recordType").notNull(), // "NS" | "CNAME" | "A" | "TXT"
|
||||
baseDomain: varchar("baseDomain"),
|
||||
value: varchar("value").notNull(),
|
||||
verified: boolean("verified").notNull().default(false)
|
||||
tries: integer("tries").notNull().default(0)
|
||||
});
|
||||
|
||||
export const orgs = pgTable("orgs", {
|
||||
orgId: varchar("orgId").primaryKey(),
|
||||
name: varchar("name").notNull(),
|
||||
subnet: varchar("subnet"),
|
||||
utilitySubnet: varchar("utilitySubnet"), // this is the subnet for utility addresses
|
||||
createdAt: text("createdAt"),
|
||||
requireTwoFactor: boolean("requireTwoFactor"),
|
||||
maxSessionLengthHours: integer("maxSessionLengthHours"),
|
||||
passwordExpiryDays: integer("passwordExpiryDays"),
|
||||
settingsLogRetentionDaysRequest: integer("settingsLogRetentionDaysRequest") // where 0 = dont keep logs and -1 = keep forever, and 9001 = end of the following year
|
||||
.notNull()
|
||||
.default(7),
|
||||
settingsLogRetentionDaysAccess: integer("settingsLogRetentionDaysAccess") // where 0 = dont keep logs and -1 = keep forever and 9001 = end of the following year
|
||||
.notNull()
|
||||
.default(0),
|
||||
settingsLogRetentionDaysAction: integer("settingsLogRetentionDaysAction") // where 0 = dont keep logs and -1 = keep forever and 9001 = end of the following year
|
||||
.notNull()
|
||||
.default(0)
|
||||
settings: text("settings") // JSON blob of org-specific settings
|
||||
});
|
||||
|
||||
export const orgDomains = pgTable("orgDomains", {
|
||||
@@ -90,7 +62,8 @@ export const sites = pgTable("sites", {
|
||||
publicKey: varchar("publicKey"),
|
||||
lastHolePunch: bigint("lastHolePunch", { mode: "number" }),
|
||||
listenPort: integer("listenPort"),
|
||||
dockerSocketEnabled: boolean("dockerSocketEnabled").notNull().default(true)
|
||||
dockerSocketEnabled: boolean("dockerSocketEnabled").notNull().default(true),
|
||||
remoteSubnets: text("remoteSubnets") // comma-separated list of subnets that this site can access
|
||||
});
|
||||
|
||||
export const resources = pgTable("resources", {
|
||||
@@ -127,11 +100,9 @@ export const resources = pgTable("resources", {
|
||||
setHostHeader: varchar("setHostHeader"),
|
||||
enableProxy: boolean("enableProxy").default(true),
|
||||
skipToIdpId: integer("skipToIdpId").references(() => idp.idpId, {
|
||||
onDelete: "set null"
|
||||
onDelete: "cascade"
|
||||
}),
|
||||
headers: text("headers"), // comma-separated list of headers to add to the request
|
||||
proxyProtocol: boolean("proxyProtocol").notNull().default(false),
|
||||
proxyProtocolVersion: integer("proxyProtocolVersion").default(1)
|
||||
headers: text("headers") // comma-separated list of headers to add to the request
|
||||
});
|
||||
|
||||
export const targets = pgTable("targets", {
|
||||
@@ -176,8 +147,7 @@ export const targetHealthCheck = pgTable("targetHealthCheck", {
|
||||
hcFollowRedirects: boolean("hcFollowRedirects").default(true),
|
||||
hcMethod: varchar("hcMethod").default("GET"),
|
||||
hcStatus: integer("hcStatus"), // http code
|
||||
hcHealth: text("hcHealth").default("unknown"), // "unknown", "healthy", "unhealthy"
|
||||
hcTlsServerName: text("hcTlsServerName"),
|
||||
hcHealth: text("hcHealth").default("unknown") // "unknown", "healthy", "unhealthy"
|
||||
});
|
||||
|
||||
export const exitNodes = pgTable("exitNodes", {
|
||||
@@ -206,41 +176,11 @@ export const siteResources = pgTable("siteResources", {
|
||||
.references(() => orgs.orgId, { onDelete: "cascade" }),
|
||||
niceId: varchar("niceId").notNull(),
|
||||
name: varchar("name").notNull(),
|
||||
mode: varchar("mode").notNull(), // "host" | "cidr" | "port"
|
||||
protocol: varchar("protocol"), // only for port mode
|
||||
proxyPort: integer("proxyPort"), // only for port mode
|
||||
destinationPort: integer("destinationPort"), // only for port mode
|
||||
destination: varchar("destination").notNull(), // ip, cidr, hostname; validate against the mode
|
||||
enabled: boolean("enabled").notNull().default(true),
|
||||
alias: varchar("alias"),
|
||||
aliasAddress: varchar("aliasAddress")
|
||||
});
|
||||
|
||||
export const clientSiteResources = pgTable("clientSiteResources", {
|
||||
clientId: integer("clientId")
|
||||
.notNull()
|
||||
.references(() => clients.clientId, { onDelete: "cascade" }),
|
||||
siteResourceId: integer("siteResourceId")
|
||||
.notNull()
|
||||
.references(() => siteResources.siteResourceId, { onDelete: "cascade" })
|
||||
});
|
||||
|
||||
export const roleSiteResources = pgTable("roleSiteResources", {
|
||||
roleId: integer("roleId")
|
||||
.notNull()
|
||||
.references(() => roles.roleId, { onDelete: "cascade" }),
|
||||
siteResourceId: integer("siteResourceId")
|
||||
.notNull()
|
||||
.references(() => siteResources.siteResourceId, { onDelete: "cascade" })
|
||||
});
|
||||
|
||||
export const userSiteResources = pgTable("userSiteResources", {
|
||||
userId: varchar("userId")
|
||||
.notNull()
|
||||
.references(() => users.userId, { onDelete: "cascade" }),
|
||||
siteResourceId: integer("siteResourceId")
|
||||
.notNull()
|
||||
.references(() => siteResources.siteResourceId, { onDelete: "cascade" })
|
||||
protocol: varchar("protocol").notNull(),
|
||||
proxyPort: integer("proxyPort").notNull(),
|
||||
destinationPort: integer("destinationPort").notNull(),
|
||||
destinationIp: varchar("destinationIp").notNull(),
|
||||
enabled: boolean("enabled").notNull().default(true)
|
||||
});
|
||||
|
||||
export const users = pgTable("user", {
|
||||
@@ -260,8 +200,7 @@ export const users = pgTable("user", {
|
||||
dateCreated: varchar("dateCreated").notNull(),
|
||||
termsAcceptedTimestamp: varchar("termsAcceptedTimestamp"),
|
||||
termsVersion: varchar("termsVersion"),
|
||||
serverAdmin: boolean("serverAdmin").notNull().default(false),
|
||||
lastPasswordChange: bigint("lastPasswordChange", { mode: "number" })
|
||||
serverAdmin: boolean("serverAdmin").notNull().default(false)
|
||||
});
|
||||
|
||||
export const newts = pgTable("newt", {
|
||||
@@ -287,9 +226,7 @@ export const sessions = pgTable("session", {
|
||||
userId: varchar("userId")
|
||||
.notNull()
|
||||
.references(() => users.userId, { onDelete: "cascade" }),
|
||||
expiresAt: bigint("expiresAt", { mode: "number" }).notNull(),
|
||||
issuedAt: bigint("issuedAt", { mode: "number" }),
|
||||
deviceAuthUsed: boolean("deviceAuthUsed").notNull().default(false)
|
||||
expiresAt: bigint("expiresAt", { mode: "number" }).notNull()
|
||||
});
|
||||
|
||||
export const newtSessions = pgTable("newtSession", {
|
||||
@@ -444,14 +381,6 @@ export const resourcePassword = pgTable("resourcePassword", {
|
||||
passwordHash: varchar("passwordHash").notNull()
|
||||
});
|
||||
|
||||
export const resourceHeaderAuth = pgTable("resourceHeaderAuth", {
|
||||
headerAuthId: serial("headerAuthId").primaryKey(),
|
||||
resourceId: integer("resourceId")
|
||||
.notNull()
|
||||
.references(() => resources.resourceId, { onDelete: "cascade" }),
|
||||
headerAuthHash: varchar("headerAuthHash").notNull()
|
||||
});
|
||||
|
||||
export const resourceAccessToken = pgTable("resourceAccessToken", {
|
||||
accessTokenId: varchar("accessTokenId").primaryKey(),
|
||||
orgId: varchar("orgId")
|
||||
@@ -506,8 +435,7 @@ export const resourceSessions = pgTable("resourceSessions", {
|
||||
{
|
||||
onDelete: "cascade"
|
||||
}
|
||||
),
|
||||
issuedAt: bigint("issuedAt", { mode: "number" })
|
||||
)
|
||||
});
|
||||
|
||||
export const resourceWhitelist = pgTable("resourceWhitelist", {
|
||||
@@ -538,6 +466,8 @@ export const resourceRules = pgTable("resourceRules", {
|
||||
resourceId: integer("resourceId")
|
||||
.notNull()
|
||||
.references(() => resources.resourceId, { onDelete: "cascade" }),
|
||||
templateRuleId: integer("templateRuleId")
|
||||
.references(() => templateRules.ruleId, { onDelete: "cascade" }),
|
||||
enabled: boolean("enabled").notNull().default(true),
|
||||
priority: integer("priority").notNull(),
|
||||
action: varchar("action").notNull(), // ACCEPT, DROP, PASS
|
||||
@@ -545,6 +475,40 @@ export const resourceRules = pgTable("resourceRules", {
|
||||
value: varchar("value").notNull()
|
||||
});
|
||||
|
||||
// Rule templates (reusable rule sets)
|
||||
export const ruleTemplates = pgTable("ruleTemplates", {
|
||||
templateId: varchar("templateId").primaryKey(),
|
||||
orgId: varchar("orgId")
|
||||
.notNull()
|
||||
.references(() => orgs.orgId, { onDelete: "cascade" }),
|
||||
name: varchar("name").notNull(),
|
||||
description: varchar("description"),
|
||||
createdAt: bigint("createdAt", { mode: "number" }).notNull()
|
||||
});
|
||||
|
||||
// Rules within templates
|
||||
export const templateRules = pgTable("templateRules", {
|
||||
ruleId: serial("ruleId").primaryKey(),
|
||||
templateId: varchar("templateId")
|
||||
.notNull()
|
||||
.references(() => ruleTemplates.templateId, { onDelete: "cascade" }),
|
||||
enabled: boolean("enabled").notNull().default(true),
|
||||
priority: integer("priority").notNull(),
|
||||
action: varchar("action").notNull(), // ACCEPT, DROP
|
||||
match: varchar("match").notNull(), // CIDR, IP, PATH
|
||||
value: varchar("value").notNull()
|
||||
});
|
||||
|
||||
// Template assignments to resources
|
||||
export const resourceTemplates = pgTable("resourceTemplates", {
|
||||
resourceId: integer("resourceId")
|
||||
.notNull()
|
||||
.references(() => resources.resourceId, { onDelete: "cascade" }),
|
||||
templateId: varchar("templateId")
|
||||
.notNull()
|
||||
.references(() => ruleTemplates.templateId, { onDelete: "cascade" })
|
||||
});
|
||||
|
||||
export const supporterKey = pgTable("supporterKey", {
|
||||
keyId: serial("keyId").primaryKey(),
|
||||
key: varchar("key").notNull(),
|
||||
@@ -631,7 +595,7 @@ export const idpOrg = pgTable("idpOrg", {
|
||||
});
|
||||
|
||||
export const clients = pgTable("clients", {
|
||||
clientId: serial("clientId").primaryKey(),
|
||||
clientId: serial("id").primaryKey(),
|
||||
orgId: varchar("orgId")
|
||||
.references(() => orgs.orgId, {
|
||||
onDelete: "cascade"
|
||||
@@ -640,12 +604,6 @@ export const clients = pgTable("clients", {
|
||||
exitNodeId: integer("exitNode").references(() => exitNodes.exitNodeId, {
|
||||
onDelete: "set null"
|
||||
}),
|
||||
userId: text("userId").references(() => users.userId, {
|
||||
// optionally tied to a user and in this case delete when the user deletes
|
||||
onDelete: "cascade"
|
||||
}),
|
||||
niceId: varchar("niceId").notNull(),
|
||||
olmId: text("olmId"), // to lock it to a specific olm optionally
|
||||
name: varchar("name").notNull(),
|
||||
pubKey: varchar("pubKey"),
|
||||
subnet: varchar("subnet").notNull(),
|
||||
@@ -660,40 +618,23 @@ export const clients = pgTable("clients", {
|
||||
maxConnections: integer("maxConnections")
|
||||
});
|
||||
|
||||
export const clientSitesAssociationsCache = pgTable(
|
||||
"clientSitesAssociationsCache",
|
||||
{
|
||||
clientId: integer("clientId") // not a foreign key here so after its deleted the rebuild function can delete it and send the message
|
||||
.notNull(),
|
||||
siteId: integer("siteId").notNull(),
|
||||
isRelayed: boolean("isRelayed").notNull().default(false),
|
||||
endpoint: varchar("endpoint"),
|
||||
publicKey: varchar("publicKey") // this will act as the session's public key for hole punching so we can track when it changes
|
||||
}
|
||||
);
|
||||
|
||||
export const clientSiteResourcesAssociationsCache = pgTable(
|
||||
"clientSiteResourcesAssociationsCache",
|
||||
{
|
||||
clientId: integer("clientId") // not a foreign key here so after its deleted the rebuild function can delete it and send the message
|
||||
.notNull(),
|
||||
siteResourceId: integer("siteResourceId").notNull()
|
||||
}
|
||||
);
|
||||
export const clientSites = pgTable("clientSites", {
|
||||
clientId: integer("clientId")
|
||||
.notNull()
|
||||
.references(() => clients.clientId, { onDelete: "cascade" }),
|
||||
siteId: integer("siteId")
|
||||
.notNull()
|
||||
.references(() => sites.siteId, { onDelete: "cascade" }),
|
||||
isRelayed: boolean("isRelayed").notNull().default(false),
|
||||
endpoint: varchar("endpoint")
|
||||
});
|
||||
|
||||
export const olms = pgTable("olms", {
|
||||
olmId: varchar("id").primaryKey(),
|
||||
secretHash: varchar("secretHash").notNull(),
|
||||
dateCreated: varchar("dateCreated").notNull(),
|
||||
version: text("version"),
|
||||
agent: text("agent"),
|
||||
name: varchar("name"),
|
||||
clientId: integer("clientId").references(() => clients.clientId, {
|
||||
// we will switch this depending on the current org it wants to connect to
|
||||
onDelete: "set null"
|
||||
}),
|
||||
userId: text("userId").references(() => users.userId, {
|
||||
// optionally tied to a user and in this case delete when the user deletes
|
||||
onDelete: "cascade"
|
||||
})
|
||||
});
|
||||
@@ -758,72 +699,6 @@ export const setupTokens = pgTable("setupTokens", {
|
||||
dateUsed: varchar("dateUsed")
|
||||
});
|
||||
|
||||
// Blueprint runs
|
||||
export const blueprints = pgTable("blueprints", {
|
||||
blueprintId: serial("blueprintId").primaryKey(),
|
||||
orgId: text("orgId")
|
||||
.references(() => orgs.orgId, {
|
||||
onDelete: "cascade"
|
||||
})
|
||||
.notNull(),
|
||||
name: varchar("name").notNull(),
|
||||
source: varchar("source").notNull(),
|
||||
createdAt: integer("createdAt").notNull(),
|
||||
succeeded: boolean("succeeded").notNull(),
|
||||
contents: text("contents").notNull(),
|
||||
message: text("message")
|
||||
});
|
||||
export const requestAuditLog = pgTable(
|
||||
"requestAuditLog",
|
||||
{
|
||||
id: serial("id").primaryKey(),
|
||||
timestamp: integer("timestamp").notNull(), // this is EPOCH time in seconds
|
||||
orgId: text("orgId").references(() => orgs.orgId, {
|
||||
onDelete: "cascade"
|
||||
}),
|
||||
action: boolean("action").notNull(),
|
||||
reason: integer("reason").notNull(),
|
||||
actorType: text("actorType"),
|
||||
actor: text("actor"),
|
||||
actorId: text("actorId"),
|
||||
resourceId: integer("resourceId"),
|
||||
ip: text("ip"),
|
||||
location: text("location"),
|
||||
userAgent: text("userAgent"),
|
||||
metadata: text("metadata"),
|
||||
headers: text("headers"), // JSON blob
|
||||
query: text("query"), // JSON blob
|
||||
originalRequestURL: text("originalRequestURL"),
|
||||
scheme: text("scheme"),
|
||||
host: text("host"),
|
||||
path: text("path"),
|
||||
method: text("method"),
|
||||
tls: boolean("tls")
|
||||
},
|
||||
(table) => [
|
||||
index("idx_requestAuditLog_timestamp").on(table.timestamp),
|
||||
index("idx_requestAuditLog_org_timestamp").on(
|
||||
table.orgId,
|
||||
table.timestamp
|
||||
)
|
||||
]
|
||||
);
|
||||
|
||||
export const deviceWebAuthCodes = pgTable("deviceWebAuthCodes", {
|
||||
codeId: serial("codeId").primaryKey(),
|
||||
code: text("code").notNull().unique(),
|
||||
ip: text("ip"),
|
||||
city: text("city"),
|
||||
deviceName: text("deviceName"),
|
||||
applicationName: text("applicationName").notNull(),
|
||||
expiresAt: bigint("expiresAt", { mode: "number" }).notNull(),
|
||||
createdAt: bigint("createdAt", { mode: "number" }).notNull(),
|
||||
verified: boolean("verified").notNull().default(false),
|
||||
userId: varchar("userId").references(() => users.userId, {
|
||||
onDelete: "cascade"
|
||||
})
|
||||
});
|
||||
|
||||
export type Org = InferSelectModel<typeof orgs>;
|
||||
export type User = InferSelectModel<typeof users>;
|
||||
export type Site = InferSelectModel<typeof sites>;
|
||||
@@ -851,7 +726,6 @@ export type UserOrg = InferSelectModel<typeof userOrgs>;
|
||||
export type ResourceSession = InferSelectModel<typeof resourceSessions>;
|
||||
export type ResourcePincode = InferSelectModel<typeof resourcePincode>;
|
||||
export type ResourcePassword = InferSelectModel<typeof resourcePassword>;
|
||||
export type ResourceHeaderAuth = InferSelectModel<typeof resourceHeaderAuth>;
|
||||
export type ResourceOtp = InferSelectModel<typeof resourceOtp>;
|
||||
export type ResourceAccessToken = InferSelectModel<typeof resourceAccessToken>;
|
||||
export type ResourceWhitelist = InferSelectModel<typeof resourceWhitelist>;
|
||||
@@ -864,7 +738,7 @@ export type ApiKey = InferSelectModel<typeof apiKeys>;
|
||||
export type ApiKeyAction = InferSelectModel<typeof apiKeyActions>;
|
||||
export type ApiKeyOrg = InferSelectModel<typeof apiKeyOrg>;
|
||||
export type Client = InferSelectModel<typeof clients>;
|
||||
export type ClientSite = InferSelectModel<typeof clientSitesAssociationsCache>;
|
||||
export type ClientSite = InferSelectModel<typeof clientSites>;
|
||||
export type Olm = InferSelectModel<typeof olms>;
|
||||
export type OlmSession = InferSelectModel<typeof olmSessions>;
|
||||
export type UserClient = InferSelectModel<typeof userClients>;
|
||||
@@ -874,10 +748,6 @@ export type SiteResource = InferSelectModel<typeof siteResources>;
|
||||
export type SetupToken = InferSelectModel<typeof setupTokens>;
|
||||
export type HostMeta = InferSelectModel<typeof hostMeta>;
|
||||
export type TargetHealthCheck = InferSelectModel<typeof targetHealthCheck>;
|
||||
export type IdpOidcConfig = InferSelectModel<typeof idpOidcConfig>;
|
||||
export type Blueprint = InferSelectModel<typeof blueprints>;
|
||||
export type LicenseKey = InferSelectModel<typeof licenseKey>;
|
||||
export type SecurityKey = InferSelectModel<typeof securityKeys>;
|
||||
export type WebauthnChallenge = InferSelectModel<typeof webauthnChallenge>;
|
||||
export type DeviceWebAuthCode = InferSelectModel<typeof deviceWebAuthCodes>;
|
||||
export type RequestAuditLog = InferSelectModel<typeof requestAuditLog>;
|
||||
export type RuleTemplate = InferSelectModel<typeof ruleTemplates>;
|
||||
export type TemplateRule = InferSelectModel<typeof templateRules>;
|
||||
export type ResourceTemplate = InferSelectModel<typeof resourceTemplates>;
|
||||
@@ -12,7 +12,7 @@
|
||||
*/
|
||||
|
||||
import logger from "@server/logger";
|
||||
import redisManager from "#private/lib/redis";
|
||||
import redisManager from "@server/db/private/redis";
|
||||
import { build } from "@server/build";
|
||||
|
||||
// Rate limiting configuration
|
||||
@@ -72,43 +72,6 @@ export class RateLimitService {
|
||||
return `ratelimit:${clientId}:${messageType}`;
|
||||
}
|
||||
|
||||
// Helper function to clean up old timestamp fields from a Redis hash
|
||||
private async cleanupOldTimestamps(key: string, windowStart: number): Promise<void> {
|
||||
if (!redisManager.isRedisEnabled()) return;
|
||||
|
||||
try {
|
||||
const client = redisManager.getClient();
|
||||
if (!client) return;
|
||||
|
||||
// Get all fields in the hash
|
||||
const allData = await redisManager.hgetall(key);
|
||||
if (!allData || Object.keys(allData).length === 0) return;
|
||||
|
||||
// Find fields that are older than the window
|
||||
const fieldsToDelete: string[] = [];
|
||||
for (const timestamp of Object.keys(allData)) {
|
||||
const time = parseInt(timestamp);
|
||||
if (time < windowStart) {
|
||||
fieldsToDelete.push(timestamp);
|
||||
}
|
||||
}
|
||||
|
||||
// Delete old fields in batches to avoid call stack size exceeded errors
|
||||
// The spread operator can cause issues with very large arrays
|
||||
if (fieldsToDelete.length > 0) {
|
||||
const batchSize = 1000; // Process 1000 fields at a time
|
||||
for (let i = 0; i < fieldsToDelete.length; i += batchSize) {
|
||||
const batch = fieldsToDelete.slice(i, i + batchSize);
|
||||
await client.hdel(key, ...batch);
|
||||
}
|
||||
logger.debug(`Cleaned up ${fieldsToDelete.length} old timestamp fields from ${key}`);
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error(`Failed to cleanup old timestamps for key ${key}:`, error);
|
||||
// Don't throw - cleanup failures shouldn't block rate limiting
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function to sync local rate limit data to Redis
|
||||
private async syncRateLimitToRedis(
|
||||
clientId: string,
|
||||
@@ -118,12 +81,8 @@ export class RateLimitService {
|
||||
|
||||
try {
|
||||
const currentTime = Math.floor(Date.now() / 1000);
|
||||
const windowStart = currentTime - RATE_LIMIT_WINDOW;
|
||||
const globalKey = this.getRateLimitKey(clientId);
|
||||
|
||||
// Clean up old timestamp fields before writing
|
||||
await this.cleanupOldTimestamps(globalKey, windowStart);
|
||||
|
||||
// Get current value and add pending count
|
||||
const currentValue = await redisManager.hget(
|
||||
globalKey,
|
||||
@@ -134,7 +93,7 @@ export class RateLimitService {
|
||||
).toString();
|
||||
await redisManager.hset(globalKey, currentTime.toString(), newValue);
|
||||
|
||||
// Set TTL using the client directly - this prevents the key from persisting forever
|
||||
// Set TTL using the client directly
|
||||
if (redisManager.getClient()) {
|
||||
await redisManager
|
||||
.getClient()
|
||||
@@ -160,12 +119,8 @@ export class RateLimitService {
|
||||
|
||||
try {
|
||||
const currentTime = Math.floor(Date.now() / 1000);
|
||||
const windowStart = currentTime - RATE_LIMIT_WINDOW;
|
||||
const messageTypeKey = this.getMessageTypeRateLimitKey(clientId, messageType);
|
||||
|
||||
// Clean up old timestamp fields before writing
|
||||
await this.cleanupOldTimestamps(messageTypeKey, windowStart);
|
||||
|
||||
// Get current value and add pending count
|
||||
const currentValue = await redisManager.hget(
|
||||
messageTypeKey,
|
||||
@@ -180,7 +135,7 @@ export class RateLimitService {
|
||||
newValue
|
||||
);
|
||||
|
||||
// Set TTL using the client directly - this prevents the key from persisting forever
|
||||
// Set TTL using the client directly
|
||||
if (redisManager.getClient()) {
|
||||
await redisManager
|
||||
.getClient()
|
||||
@@ -215,10 +170,6 @@ export class RateLimitService {
|
||||
|
||||
try {
|
||||
const globalKey = this.getRateLimitKey(clientId);
|
||||
|
||||
// Clean up old timestamp fields before reading
|
||||
await this.cleanupOldTimestamps(globalKey, windowStart);
|
||||
|
||||
const globalRateLimitData = await redisManager.hgetall(globalKey);
|
||||
|
||||
let count = 0;
|
||||
@@ -264,10 +215,6 @@ export class RateLimitService {
|
||||
|
||||
try {
|
||||
const messageTypeKey = this.getMessageTypeRateLimitKey(clientId, messageType);
|
||||
|
||||
// Clean up old timestamp fields before reading
|
||||
await this.cleanupOldTimestamps(messageTypeKey, windowStart);
|
||||
|
||||
const messageTypeRateLimitData = await redisManager.hgetall(messageTypeKey);
|
||||
|
||||
let count = 0;
|
||||
@@ -504,4 +451,8 @@ export class RateLimitService {
|
||||
}
|
||||
|
||||
// Export singleton instance
|
||||
export const rateLimitService = new RateLimitService();
|
||||
export const rateLimitService = new RateLimitService();
|
||||
|
||||
// Handle process termination
|
||||
process.on("SIGTERM", () => rateLimitService.cleanup());
|
||||
process.on("SIGINT", () => rateLimitService.cleanup());
|
||||
@@ -13,7 +13,7 @@
|
||||
|
||||
import Redis, { RedisOptions } from "ioredis";
|
||||
import logger from "@server/logger";
|
||||
import privateConfig from "#private/lib/config";
|
||||
import config from "@server/lib/config";
|
||||
import { build } from "@server/build";
|
||||
|
||||
class RedisManager {
|
||||
@@ -46,7 +46,7 @@ class RedisManager {
|
||||
this.isEnabled = false;
|
||||
return;
|
||||
}
|
||||
this.isEnabled = privateConfig.getRawPrivateConfig().flags.enable_redis || false;
|
||||
this.isEnabled = config.getRawPrivateConfig().flags?.enable_redis || false;
|
||||
if (this.isEnabled) {
|
||||
this.initializeClients();
|
||||
}
|
||||
@@ -93,7 +93,7 @@ class RedisManager {
|
||||
}
|
||||
|
||||
private getRedisConfig(): RedisOptions {
|
||||
const redisConfig = privateConfig.getRawPrivateConfig().redis!;
|
||||
const redisConfig = config.getRawPrivateConfig().redis!;
|
||||
const opts: RedisOptions = {
|
||||
host: redisConfig.host!,
|
||||
port: redisConfig.port!,
|
||||
@@ -108,7 +108,7 @@ class RedisManager {
|
||||
}
|
||||
|
||||
private getReplicaRedisConfig(): RedisOptions | null {
|
||||
const redisConfig = privateConfig.getRawPrivateConfig().redis!;
|
||||
const redisConfig = config.getRawPrivateConfig().redis!;
|
||||
if (!redisConfig.replicas || redisConfig.replicas.length === 0) {
|
||||
return null;
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
import { db, loginPage, LoginPage, loginPageOrg, Org, orgs } from "@server/db";
|
||||
import { db, loginPage, LoginPage, loginPageOrg } from "@server/db";
|
||||
import {
|
||||
Resource,
|
||||
ResourcePassword,
|
||||
@@ -6,8 +6,6 @@ import {
|
||||
ResourceRule,
|
||||
resourcePassword,
|
||||
resourcePincode,
|
||||
resourceHeaderAuth,
|
||||
ResourceHeaderAuth,
|
||||
resourceRules,
|
||||
resources,
|
||||
roleResources,
|
||||
@@ -17,13 +15,15 @@ import {
|
||||
users
|
||||
} from "@server/db";
|
||||
import { and, eq } from "drizzle-orm";
|
||||
import axios from "axios";
|
||||
import config from "@server/lib/config";
|
||||
import logger from "@server/logger";
|
||||
import { tokenManager } from "@server/lib/tokenManager";
|
||||
|
||||
export type ResourceWithAuth = {
|
||||
resource: Resource | null;
|
||||
pincode: ResourcePincode | null;
|
||||
password: ResourcePassword | null;
|
||||
headerAuth: ResourceHeaderAuth | null;
|
||||
org: Org;
|
||||
};
|
||||
|
||||
export type UserSessionWithUser = {
|
||||
@@ -37,6 +37,30 @@ export type UserSessionWithUser = {
|
||||
export async function getResourceByDomain(
|
||||
domain: string
|
||||
): Promise<ResourceWithAuth | null> {
|
||||
if (config.isManagedMode()) {
|
||||
try {
|
||||
const response = await axios.get(
|
||||
`${config.getRawConfig().managed?.endpoint}/api/v1/hybrid/resource/domain/${domain}`,
|
||||
await tokenManager.getAuthHeader()
|
||||
);
|
||||
return response.data.data;
|
||||
} catch (error) {
|
||||
if (axios.isAxiosError(error)) {
|
||||
logger.error("Error fetching config in verify session:", {
|
||||
message: error.message,
|
||||
code: error.code,
|
||||
status: error.response?.status,
|
||||
statusText: error.response?.statusText,
|
||||
url: error.config?.url,
|
||||
method: error.config?.method
|
||||
});
|
||||
} else {
|
||||
logger.error("Error fetching config in verify session:", error);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
const [result] = await db
|
||||
.select()
|
||||
.from(resources)
|
||||
@@ -48,14 +72,6 @@ export async function getResourceByDomain(
|
||||
resourcePassword,
|
||||
eq(resourcePassword.resourceId, resources.resourceId)
|
||||
)
|
||||
.leftJoin(
|
||||
resourceHeaderAuth,
|
||||
eq(resourceHeaderAuth.resourceId, resources.resourceId)
|
||||
)
|
||||
.innerJoin(
|
||||
orgs,
|
||||
eq(orgs.orgId, resources.orgId)
|
||||
)
|
||||
.where(eq(resources.fullDomain, domain))
|
||||
.limit(1);
|
||||
|
||||
@@ -66,9 +82,7 @@ export async function getResourceByDomain(
|
||||
return {
|
||||
resource: result.resources,
|
||||
pincode: result.resourcePincode,
|
||||
password: result.resourcePassword,
|
||||
headerAuth: result.resourceHeaderAuth,
|
||||
org: result.orgs
|
||||
password: result.resourcePassword
|
||||
};
|
||||
}
|
||||
|
||||
@@ -78,6 +92,30 @@ export async function getResourceByDomain(
|
||||
export async function getUserSessionWithUser(
|
||||
userSessionId: string
|
||||
): Promise<UserSessionWithUser | null> {
|
||||
if (config.isManagedMode()) {
|
||||
try {
|
||||
const response = await axios.get(
|
||||
`${config.getRawConfig().managed?.endpoint}/api/v1/hybrid/session/${userSessionId}`,
|
||||
await tokenManager.getAuthHeader()
|
||||
);
|
||||
return response.data.data;
|
||||
} catch (error) {
|
||||
if (axios.isAxiosError(error)) {
|
||||
logger.error("Error fetching config in verify session:", {
|
||||
message: error.message,
|
||||
code: error.code,
|
||||
status: error.response?.status,
|
||||
statusText: error.response?.statusText,
|
||||
url: error.config?.url,
|
||||
method: error.config?.method
|
||||
});
|
||||
} else {
|
||||
logger.error("Error fetching config in verify session:", error);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
const [res] = await db
|
||||
.select()
|
||||
.from(sessions)
|
||||
@@ -98,6 +136,30 @@ export async function getUserSessionWithUser(
|
||||
* Get user organization role
|
||||
*/
|
||||
export async function getUserOrgRole(userId: string, orgId: string) {
|
||||
if (config.isManagedMode()) {
|
||||
try {
|
||||
const response = await axios.get(
|
||||
`${config.getRawConfig().managed?.endpoint}/api/v1/hybrid/user/${userId}/org/${orgId}/role`,
|
||||
await tokenManager.getAuthHeader()
|
||||
);
|
||||
return response.data.data;
|
||||
} catch (error) {
|
||||
if (axios.isAxiosError(error)) {
|
||||
logger.error("Error fetching config in verify session:", {
|
||||
message: error.message,
|
||||
code: error.code,
|
||||
status: error.response?.status,
|
||||
statusText: error.response?.statusText,
|
||||
url: error.config?.url,
|
||||
method: error.config?.method
|
||||
});
|
||||
} else {
|
||||
logger.error("Error fetching config in verify session:", error);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
const userOrgRole = await db
|
||||
.select()
|
||||
.from(userOrgs)
|
||||
@@ -114,6 +176,30 @@ export async function getRoleResourceAccess(
|
||||
resourceId: number,
|
||||
roleId: number
|
||||
) {
|
||||
if (config.isManagedMode()) {
|
||||
try {
|
||||
const response = await axios.get(
|
||||
`${config.getRawConfig().managed?.endpoint}/api/v1/hybrid/role/${roleId}/resource/${resourceId}/access`,
|
||||
await tokenManager.getAuthHeader()
|
||||
);
|
||||
return response.data.data;
|
||||
} catch (error) {
|
||||
if (axios.isAxiosError(error)) {
|
||||
logger.error("Error fetching config in verify session:", {
|
||||
message: error.message,
|
||||
code: error.code,
|
||||
status: error.response?.status,
|
||||
statusText: error.response?.statusText,
|
||||
url: error.config?.url,
|
||||
method: error.config?.method
|
||||
});
|
||||
} else {
|
||||
logger.error("Error fetching config in verify session:", error);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
const roleResourceAccess = await db
|
||||
.select()
|
||||
.from(roleResources)
|
||||
@@ -135,6 +221,30 @@ export async function getUserResourceAccess(
|
||||
userId: string,
|
||||
resourceId: number
|
||||
) {
|
||||
if (config.isManagedMode()) {
|
||||
try {
|
||||
const response = await axios.get(
|
||||
`${config.getRawConfig().managed?.endpoint}/api/v1/hybrid/user/${userId}/resource/${resourceId}/access`,
|
||||
await tokenManager.getAuthHeader()
|
||||
);
|
||||
return response.data.data;
|
||||
} catch (error) {
|
||||
if (axios.isAxiosError(error)) {
|
||||
logger.error("Error fetching config in verify session:", {
|
||||
message: error.message,
|
||||
code: error.code,
|
||||
status: error.response?.status,
|
||||
statusText: error.response?.statusText,
|
||||
url: error.config?.url,
|
||||
method: error.config?.method
|
||||
});
|
||||
} else {
|
||||
logger.error("Error fetching config in verify session:", error);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
const userResourceAccess = await db
|
||||
.select()
|
||||
.from(userResources)
|
||||
@@ -155,6 +265,30 @@ export async function getUserResourceAccess(
|
||||
export async function getResourceRules(
|
||||
resourceId: number
|
||||
): Promise<ResourceRule[]> {
|
||||
if (config.isManagedMode()) {
|
||||
try {
|
||||
const response = await axios.get(
|
||||
`${config.getRawConfig().managed?.endpoint}/api/v1/hybrid/resource/${resourceId}/rules`,
|
||||
await tokenManager.getAuthHeader()
|
||||
);
|
||||
return response.data.data;
|
||||
} catch (error) {
|
||||
if (axios.isAxiosError(error)) {
|
||||
logger.error("Error fetching config in verify session:", {
|
||||
message: error.message,
|
||||
code: error.code,
|
||||
status: error.response?.status,
|
||||
statusText: error.response?.statusText,
|
||||
url: error.config?.url,
|
||||
method: error.config?.method
|
||||
});
|
||||
} else {
|
||||
logger.error("Error fetching config in verify session:", error);
|
||||
}
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
const rules = await db
|
||||
.select()
|
||||
.from(resourceRules)
|
||||
@@ -169,6 +303,30 @@ export async function getResourceRules(
|
||||
export async function getOrgLoginPage(
|
||||
orgId: string
|
||||
): Promise<LoginPage | null> {
|
||||
if (config.isManagedMode()) {
|
||||
try {
|
||||
const response = await axios.get(
|
||||
`${config.getRawConfig().managed?.endpoint}/api/v1/hybrid/org/${orgId}/login-page`,
|
||||
await tokenManager.getAuthHeader()
|
||||
);
|
||||
return response.data.data;
|
||||
} catch (error) {
|
||||
if (axios.isAxiosError(error)) {
|
||||
logger.error("Error fetching config in verify session:", {
|
||||
message: error.message,
|
||||
code: error.code,
|
||||
status: error.response?.status,
|
||||
statusText: error.response?.statusText,
|
||||
url: error.config?.url,
|
||||
method: error.config?.method
|
||||
});
|
||||
} else {
|
||||
logger.error("Error fetching config in verify session:", error);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
const [result] = await db
|
||||
.select()
|
||||
.from(loginPageOrg)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { drizzle as DrizzleSqlite } from "drizzle-orm/better-sqlite3";
|
||||
import Database from "better-sqlite3";
|
||||
import * as schema from "./schema/schema";
|
||||
import * as schema from "./schema";
|
||||
import path from "path";
|
||||
import fs from "fs";
|
||||
import { APP_PATH } from "@server/lib/consts";
|
||||
@@ -13,16 +13,12 @@ bootstrapVolume();
|
||||
|
||||
function createDb() {
|
||||
const sqlite = new Database(location);
|
||||
return DrizzleSqlite(sqlite, {
|
||||
schema
|
||||
});
|
||||
return DrizzleSqlite(sqlite, { schema });
|
||||
}
|
||||
|
||||
export const db = createDb();
|
||||
export default db;
|
||||
export type Transaction = Parameters<
|
||||
Parameters<(typeof db)["transaction"]>[0]
|
||||
>[0];
|
||||
export type Transaction = Parameters<Parameters<typeof db["transaction"]>[0]>[0];
|
||||
|
||||
function checkFileExists(filePath: string): boolean {
|
||||
try {
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
export * from "./driver";
|
||||
export * from "./schema/schema";
|
||||
export * from "./schema/privateSchema";
|
||||
export * from "./schema";
|
||||
export * from "./privateSchema";
|
||||
@@ -1,13 +1,24 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import {
|
||||
sqliteTable,
|
||||
integer,
|
||||
text,
|
||||
real,
|
||||
index
|
||||
real
|
||||
} from "drizzle-orm/sqlite-core";
|
||||
import { InferSelectModel } from "drizzle-orm";
|
||||
import { domains, orgs, targets, users, exitNodes, sessions } from "./schema";
|
||||
import { metadata } from "@app/app/[orgId]/settings/layout";
|
||||
|
||||
export const certificates = sqliteTable("certificates", {
|
||||
certId: integer("certId").primaryKey({ autoIncrement: true }),
|
||||
@@ -162,7 +173,6 @@ export const remoteExitNodes = sqliteTable("remoteExitNode", {
|
||||
secretHash: text("secretHash").notNull(),
|
||||
dateCreated: text("dateCreated").notNull(),
|
||||
version: text("version"),
|
||||
secondaryVersion: text("secondaryVersion"), // This is to detect the new nodes after the transition to pangolin-node
|
||||
exitNodeId: integer("exitNodeId").references(() => exitNodes.exitNodeId, {
|
||||
onDelete: "cascade"
|
||||
})
|
||||
@@ -210,43 +220,6 @@ export const sessionTransferToken = sqliteTable("sessionTransferToken", {
|
||||
expiresAt: integer("expiresAt").notNull()
|
||||
});
|
||||
|
||||
export const actionAuditLog = sqliteTable("actionAuditLog", {
|
||||
id: integer("id").primaryKey({ autoIncrement: true }),
|
||||
timestamp: integer("timestamp").notNull(), // this is EPOCH time in seconds
|
||||
orgId: text("orgId")
|
||||
.notNull()
|
||||
.references(() => orgs.orgId, { onDelete: "cascade" }),
|
||||
actorType: text("actorType").notNull(),
|
||||
actor: text("actor").notNull(),
|
||||
actorId: text("actorId").notNull(),
|
||||
action: text("action").notNull(),
|
||||
metadata: text("metadata")
|
||||
}, (table) => ([
|
||||
index("idx_actionAuditLog_timestamp").on(table.timestamp),
|
||||
index("idx_actionAuditLog_org_timestamp").on(table.orgId, table.timestamp)
|
||||
]));
|
||||
|
||||
export const accessAuditLog = sqliteTable("accessAuditLog", {
|
||||
id: integer("id").primaryKey({ autoIncrement: true }),
|
||||
timestamp: integer("timestamp").notNull(), // this is EPOCH time in seconds
|
||||
orgId: text("orgId")
|
||||
.notNull()
|
||||
.references(() => orgs.orgId, { onDelete: "cascade" }),
|
||||
actorType: text("actorType"),
|
||||
actor: text("actor"),
|
||||
actorId: text("actorId"),
|
||||
resourceId: integer("resourceId"),
|
||||
ip: text("ip"),
|
||||
location: text("location"),
|
||||
type: text("type").notNull(),
|
||||
action: integer("action", { mode: "boolean" }).notNull(),
|
||||
userAgent: text("userAgent"),
|
||||
metadata: text("metadata")
|
||||
}, (table) => ([
|
||||
index("idx_identityAuditLog_timestamp").on(table.timestamp),
|
||||
index("idx_identityAuditLog_org_timestamp").on(table.orgId, table.timestamp)
|
||||
]));
|
||||
|
||||
export type Limit = InferSelectModel<typeof limits>;
|
||||
export type Account = InferSelectModel<typeof account>;
|
||||
export type Certificate = InferSelectModel<typeof certificates>;
|
||||
@@ -264,5 +237,3 @@ export type RemoteExitNodeSession = InferSelectModel<
|
||||
>;
|
||||
export type ExitNodeOrg = InferSelectModel<typeof exitNodeOrgs>;
|
||||
export type LoginPage = InferSelectModel<typeof loginPage>;
|
||||
export type ActionAuditLog = InferSelectModel<typeof actionAuditLog>;
|
||||
export type AccessAuditLog = InferSelectModel<typeof accessAuditLog>;
|
||||
@@ -1,7 +1,6 @@
|
||||
import { randomUUID } from "crypto";
|
||||
import { InferSelectModel } from "drizzle-orm";
|
||||
import { sqliteTable, text, integer, index } from "drizzle-orm/sqlite-core";
|
||||
import { no } from "zod/v4/locales";
|
||||
import { sqliteTable, text, integer } from "drizzle-orm/sqlite-core";
|
||||
|
||||
export const domains = sqliteTable("domains", {
|
||||
domainId: text("domainId").primaryKey(),
|
||||
@@ -12,41 +11,15 @@ export const domains = sqliteTable("domains", {
|
||||
type: text("type"), // "ns", "cname", "wildcard"
|
||||
verified: integer("verified", { mode: "boolean" }).notNull().default(false),
|
||||
failed: integer("failed", { mode: "boolean" }).notNull().default(false),
|
||||
tries: integer("tries").notNull().default(0),
|
||||
certResolver: text("certResolver"),
|
||||
preferWildcardCert: integer("preferWildcardCert", { mode: "boolean" })
|
||||
});
|
||||
|
||||
export const dnsRecords = sqliteTable("dnsRecords", {
|
||||
id: integer("id").primaryKey({ autoIncrement: true }),
|
||||
domainId: text("domainId")
|
||||
.notNull()
|
||||
.references(() => domains.domainId, { onDelete: "cascade" }),
|
||||
|
||||
recordType: text("recordType").notNull(), // "NS" | "CNAME" | "A" | "TXT"
|
||||
baseDomain: text("baseDomain"),
|
||||
value: text("value").notNull(),
|
||||
verified: integer("verified", { mode: "boolean" }).notNull().default(false)
|
||||
tries: integer("tries").notNull().default(0)
|
||||
});
|
||||
|
||||
export const orgs = sqliteTable("orgs", {
|
||||
orgId: text("orgId").primaryKey(),
|
||||
name: text("name").notNull(),
|
||||
subnet: text("subnet"),
|
||||
utilitySubnet: text("utilitySubnet"), // this is the subnet for utility addresses
|
||||
createdAt: text("createdAt"),
|
||||
requireTwoFactor: integer("requireTwoFactor", { mode: "boolean" }),
|
||||
maxSessionLengthHours: integer("maxSessionLengthHours"), // hours
|
||||
passwordExpiryDays: integer("passwordExpiryDays"), // days
|
||||
settingsLogRetentionDaysRequest: integer("settingsLogRetentionDaysRequest") // where 0 = dont keep logs and -1 = keep forever and 9001 = end of the following year
|
||||
.notNull()
|
||||
.default(7),
|
||||
settingsLogRetentionDaysAccess: integer("settingsLogRetentionDaysAccess") // where 0 = dont keep logs and -1 = keep forever and 9001 = end of the following year
|
||||
.notNull()
|
||||
.default(0),
|
||||
settingsLogRetentionDaysAction: integer("settingsLogRetentionDaysAction") // where 0 = dont keep logs and -1 = keep forever and 9001 = end of the following year
|
||||
.notNull()
|
||||
.default(0)
|
||||
settings: text("settings") // JSON blob of org-specific settings
|
||||
});
|
||||
|
||||
export const userDomains = sqliteTable("userDomains", {
|
||||
@@ -95,7 +68,8 @@ export const sites = sqliteTable("sites", {
|
||||
listenPort: integer("listenPort"),
|
||||
dockerSocketEnabled: integer("dockerSocketEnabled", { mode: "boolean" })
|
||||
.notNull()
|
||||
.default(true)
|
||||
.default(true),
|
||||
remoteSubnets: text("remoteSubnets") // comma-separated list of subnets that this site can access
|
||||
});
|
||||
|
||||
export const resources = sqliteTable("resources", {
|
||||
@@ -138,13 +112,9 @@ export const resources = sqliteTable("resources", {
|
||||
setHostHeader: text("setHostHeader"),
|
||||
enableProxy: integer("enableProxy", { mode: "boolean" }).default(true),
|
||||
skipToIdpId: integer("skipToIdpId").references(() => idp.idpId, {
|
||||
onDelete: "set null"
|
||||
onDelete: "cascade"
|
||||
}),
|
||||
headers: text("headers"), // comma-separated list of headers to add to the request
|
||||
proxyProtocol: integer("proxyProtocol", { mode: "boolean" })
|
||||
.notNull()
|
||||
.default(false),
|
||||
proxyProtocolVersion: integer("proxyProtocolVersion").default(1)
|
||||
headers: text("headers") // comma-separated list of headers to add to the request
|
||||
});
|
||||
|
||||
export const targets = sqliteTable("targets", {
|
||||
@@ -172,15 +142,11 @@ export const targets = sqliteTable("targets", {
|
||||
});
|
||||
|
||||
export const targetHealthCheck = sqliteTable("targetHealthCheck", {
|
||||
targetHealthCheckId: integer("targetHealthCheckId").primaryKey({
|
||||
autoIncrement: true
|
||||
}),
|
||||
targetHealthCheckId: integer("targetHealthCheckId").primaryKey({ autoIncrement: true }),
|
||||
targetId: integer("targetId")
|
||||
.notNull()
|
||||
.references(() => targets.targetId, { onDelete: "cascade" }),
|
||||
hcEnabled: integer("hcEnabled", { mode: "boolean" })
|
||||
.notNull()
|
||||
.default(false),
|
||||
hcEnabled: integer("hcEnabled", { mode: "boolean" }).notNull().default(false),
|
||||
hcPath: text("hcPath"),
|
||||
hcScheme: text("hcScheme"),
|
||||
hcMode: text("hcMode").default("http"),
|
||||
@@ -190,13 +156,10 @@ export const targetHealthCheck = sqliteTable("targetHealthCheck", {
|
||||
hcUnhealthyInterval: integer("hcUnhealthyInterval").default(30), // in seconds
|
||||
hcTimeout: integer("hcTimeout").default(5), // in seconds
|
||||
hcHeaders: text("hcHeaders"),
|
||||
hcFollowRedirects: integer("hcFollowRedirects", {
|
||||
mode: "boolean"
|
||||
}).default(true),
|
||||
hcFollowRedirects: integer("hcFollowRedirects", { mode: "boolean" }).default(true),
|
||||
hcMethod: text("hcMethod").default("GET"),
|
||||
hcStatus: integer("hcStatus"), // http code
|
||||
hcHealth: text("hcHealth").default("unknown"), // "unknown", "healthy", "unhealthy"
|
||||
hcTlsServerName: text("hcTlsServerName")
|
||||
hcHealth: text("hcHealth").default("unknown") // "unknown", "healthy", "unhealthy"
|
||||
});
|
||||
|
||||
export const exitNodes = sqliteTable("exitNodes", {
|
||||
@@ -227,41 +190,11 @@ export const siteResources = sqliteTable("siteResources", {
|
||||
.references(() => orgs.orgId, { onDelete: "cascade" }),
|
||||
niceId: text("niceId").notNull(),
|
||||
name: text("name").notNull(),
|
||||
mode: text("mode").notNull(), // "host" | "cidr" | "port"
|
||||
protocol: text("protocol"), // only for port mode
|
||||
proxyPort: integer("proxyPort"), // only for port mode
|
||||
destinationPort: integer("destinationPort"), // only for port mode
|
||||
destination: text("destination").notNull(), // ip, cidr, hostname
|
||||
enabled: integer("enabled", { mode: "boolean" }).notNull().default(true),
|
||||
alias: text("alias"),
|
||||
aliasAddress: text("aliasAddress")
|
||||
});
|
||||
|
||||
export const clientSiteResources = sqliteTable("clientSiteResources", {
|
||||
clientId: integer("clientId")
|
||||
.notNull()
|
||||
.references(() => clients.clientId, { onDelete: "cascade" }),
|
||||
siteResourceId: integer("siteResourceId")
|
||||
.notNull()
|
||||
.references(() => siteResources.siteResourceId, { onDelete: "cascade" })
|
||||
});
|
||||
|
||||
export const roleSiteResources = sqliteTable("roleSiteResources", {
|
||||
roleId: integer("roleId")
|
||||
.notNull()
|
||||
.references(() => roles.roleId, { onDelete: "cascade" }),
|
||||
siteResourceId: integer("siteResourceId")
|
||||
.notNull()
|
||||
.references(() => siteResources.siteResourceId, { onDelete: "cascade" })
|
||||
});
|
||||
|
||||
export const userSiteResources = sqliteTable("userSiteResources", {
|
||||
userId: text("userId")
|
||||
.notNull()
|
||||
.references(() => users.userId, { onDelete: "cascade" }),
|
||||
siteResourceId: integer("siteResourceId")
|
||||
.notNull()
|
||||
.references(() => siteResources.siteResourceId, { onDelete: "cascade" })
|
||||
protocol: text("protocol").notNull(),
|
||||
proxyPort: integer("proxyPort").notNull(),
|
||||
destinationPort: integer("destinationPort").notNull(),
|
||||
destinationIp: text("destinationIp").notNull(),
|
||||
enabled: integer("enabled", { mode: "boolean" }).notNull().default(true)
|
||||
});
|
||||
|
||||
export const users = sqliteTable("user", {
|
||||
@@ -289,8 +222,7 @@ export const users = sqliteTable("user", {
|
||||
termsVersion: text("termsVersion"),
|
||||
serverAdmin: integer("serverAdmin", { mode: "boolean" })
|
||||
.notNull()
|
||||
.default(false),
|
||||
lastPasswordChange: integer("lastPasswordChange")
|
||||
.default(false)
|
||||
});
|
||||
|
||||
export const securityKeys = sqliteTable("webauthnCredentials", {
|
||||
@@ -337,7 +269,7 @@ export const newts = sqliteTable("newt", {
|
||||
});
|
||||
|
||||
export const clients = sqliteTable("clients", {
|
||||
clientId: integer("clientId").primaryKey({ autoIncrement: true }),
|
||||
clientId: integer("id").primaryKey({ autoIncrement: true }),
|
||||
orgId: text("orgId")
|
||||
.references(() => orgs.orgId, {
|
||||
onDelete: "cascade"
|
||||
@@ -346,14 +278,8 @@ export const clients = sqliteTable("clients", {
|
||||
exitNodeId: integer("exitNode").references(() => exitNodes.exitNodeId, {
|
||||
onDelete: "set null"
|
||||
}),
|
||||
userId: text("userId").references(() => users.userId, {
|
||||
// optionally tied to a user and in this case delete when the user deletes
|
||||
onDelete: "cascade"
|
||||
}),
|
||||
niceId: text("niceId").notNull(),
|
||||
name: text("name").notNull(),
|
||||
pubKey: text("pubKey"),
|
||||
olmId: text("olmId"), // to lock it to a specific olm optionally
|
||||
subnet: text("subnet").notNull(),
|
||||
megabytesIn: integer("bytesIn"),
|
||||
megabytesOut: integer("bytesOut"),
|
||||
@@ -365,42 +291,25 @@ export const clients = sqliteTable("clients", {
|
||||
lastHolePunch: integer("lastHolePunch")
|
||||
});
|
||||
|
||||
export const clientSitesAssociationsCache = sqliteTable(
|
||||
"clientSitesAssociationsCache",
|
||||
{
|
||||
clientId: integer("clientId") // not a foreign key here so after its deleted the rebuild function can delete it and send the message
|
||||
.notNull(),
|
||||
siteId: integer("siteId").notNull(),
|
||||
isRelayed: integer("isRelayed", { mode: "boolean" })
|
||||
.notNull()
|
||||
.default(false),
|
||||
endpoint: text("endpoint"),
|
||||
publicKey: text("publicKey") // this will act as the session's public key for hole punching so we can track when it changes
|
||||
}
|
||||
);
|
||||
|
||||
export const clientSiteResourcesAssociationsCache = sqliteTable(
|
||||
"clientSiteResourcesAssociationsCache",
|
||||
{
|
||||
clientId: integer("clientId") // not a foreign key here so after its deleted the rebuild function can delete it and send the message
|
||||
.notNull(),
|
||||
siteResourceId: integer("siteResourceId").notNull()
|
||||
}
|
||||
);
|
||||
export const clientSites = sqliteTable("clientSites", {
|
||||
clientId: integer("clientId")
|
||||
.notNull()
|
||||
.references(() => clients.clientId, { onDelete: "cascade" }),
|
||||
siteId: integer("siteId")
|
||||
.notNull()
|
||||
.references(() => sites.siteId, { onDelete: "cascade" }),
|
||||
isRelayed: integer("isRelayed", { mode: "boolean" })
|
||||
.notNull()
|
||||
.default(false),
|
||||
endpoint: text("endpoint")
|
||||
});
|
||||
|
||||
export const olms = sqliteTable("olms", {
|
||||
olmId: text("id").primaryKey(),
|
||||
secretHash: text("secretHash").notNull(),
|
||||
dateCreated: text("dateCreated").notNull(),
|
||||
version: text("version"),
|
||||
agent: text("agent"),
|
||||
name: text("name"),
|
||||
clientId: integer("clientId").references(() => clients.clientId, {
|
||||
// we will switch this depending on the current org it wants to connect to
|
||||
onDelete: "set null"
|
||||
}),
|
||||
userId: text("userId").references(() => users.userId, {
|
||||
// optionally tied to a user and in this case delete when the user deletes
|
||||
onDelete: "cascade"
|
||||
})
|
||||
});
|
||||
@@ -418,11 +327,7 @@ export const sessions = sqliteTable("session", {
|
||||
userId: text("userId")
|
||||
.notNull()
|
||||
.references(() => users.userId, { onDelete: "cascade" }),
|
||||
expiresAt: integer("expiresAt").notNull(),
|
||||
issuedAt: integer("issuedAt"),
|
||||
deviceAuthUsed: integer("deviceAuthUsed", { mode: "boolean" })
|
||||
.notNull()
|
||||
.default(false)
|
||||
expiresAt: integer("expiresAt").notNull()
|
||||
});
|
||||
|
||||
export const newtSessions = sqliteTable("newtSession", {
|
||||
@@ -609,16 +514,6 @@ export const resourcePassword = sqliteTable("resourcePassword", {
|
||||
passwordHash: text("passwordHash").notNull()
|
||||
});
|
||||
|
||||
export const resourceHeaderAuth = sqliteTable("resourceHeaderAuth", {
|
||||
headerAuthId: integer("headerAuthId").primaryKey({
|
||||
autoIncrement: true
|
||||
}),
|
||||
resourceId: integer("resourceId")
|
||||
.notNull()
|
||||
.references(() => resources.resourceId, { onDelete: "cascade" }),
|
||||
headerAuthHash: text("headerAuthHash").notNull()
|
||||
});
|
||||
|
||||
export const resourceAccessToken = sqliteTable("resourceAccessToken", {
|
||||
accessTokenId: text("accessTokenId").primaryKey(),
|
||||
orgId: text("orgId")
|
||||
@@ -672,8 +567,7 @@ export const resourceSessions = sqliteTable("resourceSessions", {
|
||||
{
|
||||
onDelete: "cascade"
|
||||
}
|
||||
),
|
||||
issuedAt: integer("issuedAt")
|
||||
)
|
||||
});
|
||||
|
||||
export const resourceWhitelist = sqliteTable("resourceWhitelist", {
|
||||
@@ -706,6 +600,8 @@ export const resourceRules = sqliteTable("resourceRules", {
|
||||
resourceId: integer("resourceId")
|
||||
.notNull()
|
||||
.references(() => resources.resourceId, { onDelete: "cascade" }),
|
||||
templateRuleId: integer("templateRuleId")
|
||||
.references(() => templateRules.ruleId, { onDelete: "cascade" }),
|
||||
enabled: integer("enabled", { mode: "boolean" }).notNull().default(true),
|
||||
priority: integer("priority").notNull(),
|
||||
action: text("action").notNull(), // ACCEPT, DROP, PASS
|
||||
@@ -713,6 +609,40 @@ export const resourceRules = sqliteTable("resourceRules", {
|
||||
value: text("value").notNull()
|
||||
});
|
||||
|
||||
// Rule templates (reusable rule sets)
|
||||
export const ruleTemplates = sqliteTable("ruleTemplates", {
|
||||
templateId: text("templateId").primaryKey(),
|
||||
orgId: text("orgId")
|
||||
.notNull()
|
||||
.references(() => orgs.orgId, { onDelete: "cascade" }),
|
||||
name: text("name").notNull(),
|
||||
description: text("description"),
|
||||
createdAt: integer("createdAt").notNull()
|
||||
});
|
||||
|
||||
// Rules within templates
|
||||
export const templateRules = sqliteTable("templateRules", {
|
||||
ruleId: integer("ruleId").primaryKey({ autoIncrement: true }),
|
||||
templateId: text("templateId")
|
||||
.notNull()
|
||||
.references(() => ruleTemplates.templateId, { onDelete: "cascade" }),
|
||||
enabled: integer("enabled", { mode: "boolean" }).notNull().default(true),
|
||||
priority: integer("priority").notNull(),
|
||||
action: text("action").notNull(), // ACCEPT, DROP
|
||||
match: text("match").notNull(), // CIDR, IP, PATH
|
||||
value: text("value").notNull()
|
||||
});
|
||||
|
||||
// Template assignments to resources
|
||||
export const resourceTemplates = sqliteTable("resourceTemplates", {
|
||||
resourceId: integer("resourceId")
|
||||
.notNull()
|
||||
.references(() => resources.resourceId, { onDelete: "cascade" }),
|
||||
templateId: text("templateId")
|
||||
.notNull()
|
||||
.references(() => ruleTemplates.templateId, { onDelete: "cascade" })
|
||||
});
|
||||
|
||||
export const supporterKey = sqliteTable("supporterKey", {
|
||||
keyId: integer("keyId").primaryKey({ autoIncrement: true }),
|
||||
key: text("key").notNull(),
|
||||
@@ -806,74 +736,6 @@ export const idpOrg = sqliteTable("idpOrg", {
|
||||
orgMapping: text("orgMapping")
|
||||
});
|
||||
|
||||
// Blueprint runs
|
||||
export const blueprints = sqliteTable("blueprints", {
|
||||
blueprintId: integer("blueprintId").primaryKey({
|
||||
autoIncrement: true
|
||||
}),
|
||||
orgId: text("orgId")
|
||||
.references(() => orgs.orgId, {
|
||||
onDelete: "cascade"
|
||||
})
|
||||
.notNull(),
|
||||
name: text("name").notNull(),
|
||||
source: text("source").notNull(),
|
||||
createdAt: integer("createdAt").notNull(),
|
||||
succeeded: integer("succeeded", { mode: "boolean" }).notNull(),
|
||||
contents: text("contents").notNull(),
|
||||
message: text("message")
|
||||
});
|
||||
export const requestAuditLog = sqliteTable(
|
||||
"requestAuditLog",
|
||||
{
|
||||
id: integer("id").primaryKey({ autoIncrement: true }),
|
||||
timestamp: integer("timestamp").notNull(), // this is EPOCH time in seconds
|
||||
orgId: text("orgId").references(() => orgs.orgId, {
|
||||
onDelete: "cascade"
|
||||
}),
|
||||
action: integer("action", { mode: "boolean" }).notNull(),
|
||||
reason: integer("reason").notNull(),
|
||||
actorType: text("actorType"),
|
||||
actor: text("actor"),
|
||||
actorId: text("actorId"),
|
||||
resourceId: integer("resourceId"),
|
||||
ip: text("ip"),
|
||||
location: text("location"),
|
||||
userAgent: text("userAgent"),
|
||||
metadata: text("metadata"),
|
||||
headers: text("headers"), // JSON blob
|
||||
query: text("query"), // JSON blob
|
||||
originalRequestURL: text("originalRequestURL"),
|
||||
scheme: text("scheme"),
|
||||
host: text("host"),
|
||||
path: text("path"),
|
||||
method: text("method"),
|
||||
tls: integer("tls", { mode: "boolean" })
|
||||
},
|
||||
(table) => [
|
||||
index("idx_requestAuditLog_timestamp").on(table.timestamp),
|
||||
index("idx_requestAuditLog_org_timestamp").on(
|
||||
table.orgId,
|
||||
table.timestamp
|
||||
)
|
||||
]
|
||||
);
|
||||
|
||||
export const deviceWebAuthCodes = sqliteTable("deviceWebAuthCodes", {
|
||||
codeId: integer("codeId").primaryKey({ autoIncrement: true }),
|
||||
code: text("code").notNull().unique(),
|
||||
ip: text("ip"),
|
||||
city: text("city"),
|
||||
deviceName: text("deviceName"),
|
||||
applicationName: text("applicationName").notNull(),
|
||||
expiresAt: integer("expiresAt").notNull(),
|
||||
createdAt: integer("createdAt").notNull(),
|
||||
verified: integer("verified", { mode: "boolean" }).notNull().default(false),
|
||||
userId: text("userId").references(() => users.userId, {
|
||||
onDelete: "cascade"
|
||||
})
|
||||
});
|
||||
|
||||
export type Org = InferSelectModel<typeof orgs>;
|
||||
export type User = InferSelectModel<typeof users>;
|
||||
export type Site = InferSelectModel<typeof sites>;
|
||||
@@ -903,16 +765,14 @@ export type UserOrg = InferSelectModel<typeof userOrgs>;
|
||||
export type ResourceSession = InferSelectModel<typeof resourceSessions>;
|
||||
export type ResourcePincode = InferSelectModel<typeof resourcePincode>;
|
||||
export type ResourcePassword = InferSelectModel<typeof resourcePassword>;
|
||||
export type ResourceHeaderAuth = InferSelectModel<typeof resourceHeaderAuth>;
|
||||
export type ResourceOtp = InferSelectModel<typeof resourceOtp>;
|
||||
export type ResourceAccessToken = InferSelectModel<typeof resourceAccessToken>;
|
||||
export type ResourceWhitelist = InferSelectModel<typeof resourceWhitelist>;
|
||||
export type VersionMigration = InferSelectModel<typeof versionMigrations>;
|
||||
export type ResourceRule = InferSelectModel<typeof resourceRules>;
|
||||
export type Domain = InferSelectModel<typeof domains>;
|
||||
export type DnsRecord = InferSelectModel<typeof dnsRecords>;
|
||||
export type Client = InferSelectModel<typeof clients>;
|
||||
export type ClientSite = InferSelectModel<typeof clientSitesAssociationsCache>;
|
||||
export type ClientSite = InferSelectModel<typeof clientSites>;
|
||||
export type RoleClient = InferSelectModel<typeof roleClients>;
|
||||
export type UserClient = InferSelectModel<typeof userClients>;
|
||||
export type SupporterKey = InferSelectModel<typeof supporterKey>;
|
||||
@@ -925,10 +785,6 @@ export type OrgDomains = InferSelectModel<typeof orgDomains>;
|
||||
export type SetupToken = InferSelectModel<typeof setupTokens>;
|
||||
export type HostMeta = InferSelectModel<typeof hostMeta>;
|
||||
export type TargetHealthCheck = InferSelectModel<typeof targetHealthCheck>;
|
||||
export type IdpOidcConfig = InferSelectModel<typeof idpOidcConfig>;
|
||||
export type Blueprint = InferSelectModel<typeof blueprints>;
|
||||
export type LicenseKey = InferSelectModel<typeof licenseKey>;
|
||||
export type SecurityKey = InferSelectModel<typeof securityKeys>;
|
||||
export type WebauthnChallenge = InferSelectModel<typeof webauthnChallenge>;
|
||||
export type RequestAuditLog = InferSelectModel<typeof requestAuditLog>;
|
||||
export type DeviceWebAuthCode = InferSelectModel<typeof deviceWebAuthCodes>;
|
||||
export type RuleTemplate = InferSelectModel<typeof ruleTemplates>;
|
||||
export type TemplateRule = InferSelectModel<typeof templateRules>;
|
||||
export type ResourceTemplate = InferSelectModel<typeof resourceTemplates>;
|
||||
@@ -6,6 +6,11 @@ import logger from "@server/logger";
|
||||
import SMTPTransport from "nodemailer/lib/smtp-transport";
|
||||
|
||||
function createEmailClient() {
|
||||
if (config.isManagedMode()) {
|
||||
// LETS NOT WORRY ABOUT EMAILS IN HYBRID
|
||||
return;
|
||||
}
|
||||
|
||||
const emailConfig = config.getRawConfig().email;
|
||||
if (!emailConfig) {
|
||||
logger.warn(
|
||||
|
||||
@@ -2,6 +2,7 @@ import { render } from "@react-email/render";
|
||||
import { ReactElement } from "react";
|
||||
import emailClient from "@server/emails";
|
||||
import logger from "@server/logger";
|
||||
import config from "@server/lib/config";
|
||||
|
||||
export async function sendEmail(
|
||||
template: ReactElement,
|
||||
@@ -24,7 +25,7 @@ export async function sendEmail(
|
||||
|
||||
const emailHtml = await render(template);
|
||||
|
||||
const appName = process.env.BRANDING_APP_NAME || "Pangolin"; // From the private config loading into env vars to seperate away the private config
|
||||
const appName = config.getRawPrivateConfig().branding?.app_name || "Pangolin";
|
||||
|
||||
await emailClient.sendMail({
|
||||
from: {
|
||||
|
||||
@@ -1,3 +1,16 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import React from "react";
|
||||
import { Body, Head, Html, Preview, Tailwind } from "@react-email/components";
|
||||
import { themeColors } from "./lib/theme";
|
||||
@@ -1,3 +1,16 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import React from "react";
|
||||
import { Body, Head, Html, Preview, Tailwind } from "@react-email/components";
|
||||
import { themeColors } from "./lib/theme";
|
||||
@@ -1,56 +0,0 @@
|
||||
import React from "react";
|
||||
import { Body, Head, Html, Preview, Tailwind } from "@react-email/components";
|
||||
import { themeColors } from "./lib/theme";
|
||||
import {
|
||||
EmailContainer,
|
||||
EmailGreeting,
|
||||
EmailLetterHead,
|
||||
EmailText
|
||||
} from "./components/Email";
|
||||
|
||||
interface SupportEmailProps {
|
||||
email: string;
|
||||
username: string;
|
||||
subject: string;
|
||||
body: string;
|
||||
}
|
||||
|
||||
export const SupportEmail = ({
|
||||
username,
|
||||
email,
|
||||
body,
|
||||
subject
|
||||
}: SupportEmailProps) => {
|
||||
const previewText = subject;
|
||||
|
||||
return (
|
||||
<Html>
|
||||
<Head />
|
||||
<Preview>{previewText}</Preview>
|
||||
<Tailwind config={themeColors}>
|
||||
<Body className="font-sans bg-gray-50">
|
||||
<EmailContainer>
|
||||
<EmailLetterHead />
|
||||
|
||||
<EmailGreeting>Hi support,</EmailGreeting>
|
||||
|
||||
<EmailText>
|
||||
You have received a new support request from{" "}
|
||||
<strong>{username}</strong> ({email}).
|
||||
</EmailText>
|
||||
|
||||
<EmailText>
|
||||
<strong>Subject:</strong> {subject}
|
||||
</EmailText>
|
||||
|
||||
<EmailText>
|
||||
<strong>Message:</strong> {body}
|
||||
</EmailText>
|
||||
</EmailContainer>
|
||||
</Body>
|
||||
</Tailwind>
|
||||
</Html>
|
||||
);
|
||||
};
|
||||
|
||||
export default SupportEmail;
|
||||
@@ -88,7 +88,7 @@ export const WelcomeQuickStart = ({
|
||||
To learn how to use Newt, including more
|
||||
installation methods, visit the{" "}
|
||||
<a
|
||||
href="https://docs.pangolin.net/manage/sites/install-site"
|
||||
href="https://docs.digpangolin.com/manage/sites/install-site"
|
||||
className="underline"
|
||||
>
|
||||
docs
|
||||
|
||||
@@ -89,7 +89,7 @@ export function EmailFooter({ children }: { children: React.ReactNode }) {
|
||||
<p className="text-xs text-gray-400 mt-4">
|
||||
For any questions or support, please contact us at:
|
||||
<br />
|
||||
support@pangolin.net
|
||||
support@fossorial.io
|
||||
</p>
|
||||
<p className="text-xs text-gray-300 text-center mt-4">
|
||||
© {new Date().getFullYear()} Fossorial, Inc. All
|
||||
|
||||
151
server/hybridServer.ts
Normal file
151
server/hybridServer.ts
Normal file
@@ -0,0 +1,151 @@
|
||||
import logger from "@server/logger";
|
||||
import config from "@server/lib/config";
|
||||
import { createWebSocketClient } from "./routers/ws/client";
|
||||
import { addPeer, deletePeer } from "./routers/gerbil/peers";
|
||||
import { db, exitNodes } from "./db";
|
||||
import { TraefikConfigManager } from "./lib/traefik/TraefikConfigManager";
|
||||
import { tokenManager } from "./lib/tokenManager";
|
||||
import { APP_VERSION } from "./lib/consts";
|
||||
import axios from "axios";
|
||||
|
||||
export async function createHybridClientServer() {
|
||||
logger.info("Starting hybrid client server...");
|
||||
|
||||
// Start the token manager
|
||||
await tokenManager.start();
|
||||
|
||||
const token = await tokenManager.getToken();
|
||||
|
||||
const monitor = new TraefikConfigManager();
|
||||
|
||||
await monitor.start();
|
||||
|
||||
// Create client
|
||||
const client = createWebSocketClient(
|
||||
token,
|
||||
config.getRawConfig().managed!.endpoint!,
|
||||
{
|
||||
reconnectInterval: 5000,
|
||||
pingInterval: 30000,
|
||||
pingTimeout: 10000
|
||||
}
|
||||
);
|
||||
|
||||
// Register message handlers
|
||||
client.registerHandler("remoteExitNode/peers/add", async (message) => {
|
||||
const { publicKey, allowedIps } = message.data;
|
||||
|
||||
// TODO: we are getting the exit node twice here
|
||||
// NOTE: there should only be one gerbil registered so...
|
||||
const [exitNode] = await db.select().from(exitNodes).limit(1);
|
||||
await addPeer(exitNode.exitNodeId, {
|
||||
publicKey: publicKey,
|
||||
allowedIps: allowedIps || []
|
||||
});
|
||||
});
|
||||
|
||||
client.registerHandler("remoteExitNode/peers/remove", async (message) => {
|
||||
const { publicKey } = message.data;
|
||||
|
||||
// TODO: we are getting the exit node twice here
|
||||
// NOTE: there should only be one gerbil registered so...
|
||||
const [exitNode] = await db.select().from(exitNodes).limit(1);
|
||||
await deletePeer(exitNode.exitNodeId, publicKey);
|
||||
});
|
||||
|
||||
// /update-proxy-mapping
|
||||
client.registerHandler("remoteExitNode/update-proxy-mapping", async (message) => {
|
||||
try {
|
||||
const [exitNode] = await db.select().from(exitNodes).limit(1);
|
||||
if (!exitNode) {
|
||||
logger.error("No exit node found for proxy mapping update");
|
||||
return;
|
||||
}
|
||||
|
||||
const response = await axios.post(`${exitNode.endpoint}/update-proxy-mapping`, message.data);
|
||||
logger.info(`Successfully updated proxy mapping: ${response.status}`);
|
||||
} catch (error) {
|
||||
// pull data out of the axios error to log
|
||||
if (axios.isAxiosError(error)) {
|
||||
logger.error("Error updating proxy mapping:", {
|
||||
message: error.message,
|
||||
code: error.code,
|
||||
status: error.response?.status,
|
||||
statusText: error.response?.statusText,
|
||||
url: error.config?.url,
|
||||
method: error.config?.method
|
||||
});
|
||||
} else {
|
||||
logger.error("Error updating proxy mapping:", error);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// /update-destinations
|
||||
client.registerHandler("remoteExitNode/update-destinations", async (message) => {
|
||||
try {
|
||||
const [exitNode] = await db.select().from(exitNodes).limit(1);
|
||||
if (!exitNode) {
|
||||
logger.error("No exit node found for destinations update");
|
||||
return;
|
||||
}
|
||||
|
||||
const response = await axios.post(`${exitNode.endpoint}/update-destinations`, message.data);
|
||||
logger.info(`Successfully updated destinations: ${response.status}`);
|
||||
} catch (error) {
|
||||
// pull data out of the axios error to log
|
||||
if (axios.isAxiosError(error)) {
|
||||
logger.error("Error updating destinations:", {
|
||||
message: error.message,
|
||||
code: error.code,
|
||||
status: error.response?.status,
|
||||
statusText: error.response?.statusText,
|
||||
url: error.config?.url,
|
||||
method: error.config?.method
|
||||
});
|
||||
} else {
|
||||
logger.error("Error updating destinations:", error);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
client.registerHandler("remoteExitNode/traefik/reload", async (message) => {
|
||||
await monitor.HandleTraefikConfig();
|
||||
});
|
||||
|
||||
// Listen to connection events
|
||||
client.on("connect", () => {
|
||||
logger.info("Connected to WebSocket server");
|
||||
client.sendMessage("remoteExitNode/register", {
|
||||
remoteExitNodeVersion: APP_VERSION
|
||||
});
|
||||
});
|
||||
|
||||
client.on("disconnect", () => {
|
||||
logger.info("Disconnected from WebSocket server");
|
||||
});
|
||||
|
||||
client.on("message", (message) => {
|
||||
logger.info(
|
||||
`Received message: ${message.type} ${JSON.stringify(message.data)}`
|
||||
);
|
||||
});
|
||||
|
||||
// Connect to the server
|
||||
try {
|
||||
await client.connect();
|
||||
logger.info("Connection initiated");
|
||||
} catch (error) {
|
||||
logger.error("Failed to connect:", error);
|
||||
}
|
||||
|
||||
// Store the ping interval stop function for cleanup if needed
|
||||
const stopPingInterval = client.sendMessageInterval(
|
||||
"remoteExitNode/ping",
|
||||
{ timestamp: Date.now() / 1000 },
|
||||
60000
|
||||
); // send every minute
|
||||
|
||||
// Return client and cleanup function for potential use
|
||||
return { client, stopPingInterval };
|
||||
}
|
||||
@@ -5,49 +5,36 @@ import { runSetupFunctions } from "./setup";
|
||||
import { createApiServer } from "./apiServer";
|
||||
import { createNextServer } from "./nextServer";
|
||||
import { createInternalServer } from "./internalServer";
|
||||
import { ApiKey, ApiKeyOrg, RemoteExitNode, Session, User, UserOrg } from "@server/db";
|
||||
import { createIntegrationApiServer } from "./integrationApiServer";
|
||||
import {
|
||||
ApiKey,
|
||||
ApiKeyOrg,
|
||||
RemoteExitNode,
|
||||
Session,
|
||||
SiteResource,
|
||||
User,
|
||||
UserOrg
|
||||
} from "@server/db";
|
||||
import { createHybridClientServer } from "./hybridServer";
|
||||
import config from "@server/lib/config";
|
||||
import { setHostMeta } from "@server/lib/hostMeta";
|
||||
import { initTelemetryClient } from "@server/lib/telemetry";
|
||||
import { TraefikConfigManager } from "@server/lib/traefik/TraefikConfigManager";
|
||||
import { initCleanup } from "#dynamic/cleanup";
|
||||
import license from "#dynamic/license/license";
|
||||
import { initLogCleanupInterval } from "@server/lib/cleanupLogs";
|
||||
import { fetchServerIp } from "@server/lib/serverIpService";
|
||||
import { initTelemetryClient } from "./lib/telemetry.js";
|
||||
import { TraefikConfigManager } from "./lib/traefik/TraefikConfigManager.js";
|
||||
|
||||
async function startServers() {
|
||||
await setHostMeta();
|
||||
|
||||
await config.initServer();
|
||||
|
||||
license.setServerSecret(config.getRawConfig().server.secret!);
|
||||
await license.check();
|
||||
|
||||
await runSetupFunctions();
|
||||
|
||||
await fetchServerIp();
|
||||
|
||||
initTelemetryClient();
|
||||
|
||||
initLogCleanupInterval();
|
||||
|
||||
// Start all servers
|
||||
const apiServer = createApiServer();
|
||||
const internalServer = createInternalServer();
|
||||
|
||||
const nextServer = await createNextServer();
|
||||
if (config.getRawConfig().traefik.file_mode) {
|
||||
const monitor = new TraefikConfigManager();
|
||||
await monitor.start();
|
||||
let hybridClientServer;
|
||||
let nextServer;
|
||||
if (config.isManagedMode()) {
|
||||
hybridClientServer = await createHybridClientServer();
|
||||
} else {
|
||||
nextServer = await createNextServer();
|
||||
if (config.getRawConfig().traefik.file_mode) {
|
||||
const monitor = new TraefikConfigManager();
|
||||
await monitor.start();
|
||||
}
|
||||
}
|
||||
|
||||
let integrationServer;
|
||||
@@ -55,13 +42,12 @@ async function startServers() {
|
||||
integrationServer = createIntegrationApiServer();
|
||||
}
|
||||
|
||||
await initCleanup();
|
||||
|
||||
return {
|
||||
apiServer,
|
||||
nextServer,
|
||||
internalServer,
|
||||
integrationServer
|
||||
integrationServer,
|
||||
hybridClientServer
|
||||
};
|
||||
}
|
||||
|
||||
@@ -78,8 +64,6 @@ declare global {
|
||||
userOrgId?: string;
|
||||
userOrgIds?: string[];
|
||||
remoteExitNode?: RemoteExitNode;
|
||||
siteResource?: SiteResource;
|
||||
orgPolicyAllowed?: boolean;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ import {
|
||||
errorHandlerMiddleware,
|
||||
notFoundMiddleware,
|
||||
} from "@server/middlewares";
|
||||
import { authenticated, unauthenticated } from "#dynamic/routers/integration";
|
||||
import { authenticated, unauthenticated } from "@server/routers/integration";
|
||||
import { logIncomingMiddleware } from "./middlewares/logIncoming";
|
||||
import helmet from "helmet";
|
||||
import swaggerUi from "swagger-ui-express";
|
||||
|
||||
@@ -8,7 +8,7 @@ import {
|
||||
errorHandlerMiddleware,
|
||||
notFoundMiddleware
|
||||
} from "@server/middlewares";
|
||||
import { internalRouter } from "#dynamic/routers/internal";
|
||||
import internal from "@server/routers/internal";
|
||||
import { stripDuplicateSesions } from "./middlewares/stripDuplicateSessions";
|
||||
|
||||
const internalPort = config.getRawConfig().server.internal_port;
|
||||
@@ -23,7 +23,7 @@ export function createInternalServer() {
|
||||
internalServer.use(express.json());
|
||||
|
||||
const prefix = `/api/v1`;
|
||||
internalServer.use(prefix, internalRouter);
|
||||
internalServer.use(prefix, internal);
|
||||
|
||||
internalServer.use(notFoundMiddleware);
|
||||
internalServer.use(errorHandlerMiddleware);
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
export async function createCustomer(
|
||||
orgId: string,
|
||||
email: string | null | undefined
|
||||
): Promise<string | undefined> {
|
||||
return;
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
export async function getOrgTierData(
|
||||
orgId: string
|
||||
): Promise<{ tier: string | null; active: boolean }> {
|
||||
const tier = null;
|
||||
const active = false;
|
||||
|
||||
return { tier, active };
|
||||
}
|
||||
@@ -1,5 +0,0 @@
|
||||
export * from "./limitSet";
|
||||
export * from "./features";
|
||||
export * from "./limitsService";
|
||||
export * from "./getOrgTierData";
|
||||
export * from "./createCustomer";
|
||||
@@ -1,36 +1,22 @@
|
||||
import { db, newts, blueprints, Blueprint } from "@server/db";
|
||||
import { db, newts, Target } from "@server/db";
|
||||
import { Config, ConfigSchema } from "./types";
|
||||
import { ProxyResourcesResults, updateProxyResources } from "./proxyResources";
|
||||
import { fromError } from "zod-validation-error";
|
||||
import logger from "@server/logger";
|
||||
import { sites } from "@server/db";
|
||||
import { eq, and, isNotNull } from "drizzle-orm";
|
||||
import { resources, targets, sites } from "@server/db";
|
||||
import { eq, and, asc, or, ne, count, isNotNull } from "drizzle-orm";
|
||||
import { addTargets as addProxyTargets } from "@server/routers/newt/targets";
|
||||
import { addTargets as addClientTargets } from "@server/routers/client/targets";
|
||||
import {
|
||||
ClientResourcesResults,
|
||||
updateClientResources
|
||||
} from "./clientResources";
|
||||
import { BlueprintSource } from "@server/routers/blueprints/types";
|
||||
import { stringify as stringifyYaml } from "yaml";
|
||||
import { faker } from "@faker-js/faker";
|
||||
import { handleMessagingForUpdatedSiteResource } from "@server/routers/siteResource";
|
||||
|
||||
type ApplyBlueprintArgs = {
|
||||
orgId: string;
|
||||
configData: unknown;
|
||||
name?: string;
|
||||
siteId?: number;
|
||||
source?: BlueprintSource;
|
||||
};
|
||||
|
||||
export async function applyBlueprint({
|
||||
orgId,
|
||||
configData,
|
||||
siteId,
|
||||
name,
|
||||
source = "API"
|
||||
}: ApplyBlueprintArgs): Promise<Blueprint> {
|
||||
export async function applyBlueprint(
|
||||
orgId: string,
|
||||
configData: unknown,
|
||||
siteId?: number
|
||||
): Promise<void> {
|
||||
// Validate the input data
|
||||
const validationResult = ConfigSchema.safeParse(configData);
|
||||
if (!validationResult.success) {
|
||||
@@ -38,9 +24,6 @@ export async function applyBlueprint({
|
||||
}
|
||||
|
||||
const config: Config = validationResult.data;
|
||||
let blueprintSucceeded: boolean = false;
|
||||
let blueprintMessage: string;
|
||||
let error: any | null = null;
|
||||
|
||||
try {
|
||||
let proxyResourcesResults: ProxyResourcesResults = [];
|
||||
@@ -58,63 +41,22 @@ export async function applyBlueprint({
|
||||
trx,
|
||||
siteId
|
||||
);
|
||||
});
|
||||
|
||||
logger.debug(
|
||||
`Successfully updated proxy resources for org ${orgId}: ${JSON.stringify(proxyResourcesResults)}`
|
||||
);
|
||||
logger.debug(
|
||||
`Successfully updated proxy resources for org ${orgId}: ${JSON.stringify(proxyResourcesResults)}`
|
||||
);
|
||||
|
||||
// We need to update the targets on the newts from the successfully updated information
|
||||
for (const result of proxyResourcesResults) {
|
||||
for (const target of result.targetsToUpdate) {
|
||||
const [site] = await trx
|
||||
.select()
|
||||
.from(sites)
|
||||
.innerJoin(newts, eq(sites.siteId, newts.siteId))
|
||||
.where(
|
||||
and(
|
||||
eq(sites.siteId, target.siteId),
|
||||
eq(sites.orgId, orgId),
|
||||
eq(sites.type, "newt"),
|
||||
isNotNull(sites.pubKey)
|
||||
)
|
||||
)
|
||||
.limit(1);
|
||||
|
||||
if (site) {
|
||||
logger.debug(
|
||||
`Updating target ${target.targetId} on site ${site.sites.siteId}`
|
||||
);
|
||||
|
||||
// see if you can find a matching target health check from the healthchecksToUpdate array
|
||||
const matchingHealthcheck =
|
||||
result.healthchecksToUpdate.find(
|
||||
(hc) => hc.targetId === target.targetId
|
||||
);
|
||||
|
||||
await addProxyTargets(
|
||||
site.newt.newtId,
|
||||
[target],
|
||||
matchingHealthcheck ? [matchingHealthcheck] : [],
|
||||
result.proxyResource.protocol,
|
||||
result.proxyResource.proxyPort
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logger.debug(
|
||||
`Successfully updated client resources for org ${orgId}: ${JSON.stringify(clientResourcesResults)}`
|
||||
);
|
||||
|
||||
// We need to update the targets on the newts from the successfully updated information
|
||||
for (const result of clientResourcesResults) {
|
||||
const [site] = await trx
|
||||
// We need to update the targets on the newts from the successfully updated information
|
||||
for (const result of proxyResourcesResults) {
|
||||
for (const target of result.targetsToUpdate) {
|
||||
const [site] = await db
|
||||
.select()
|
||||
.from(sites)
|
||||
.innerJoin(newts, eq(sites.siteId, newts.siteId))
|
||||
.where(
|
||||
and(
|
||||
eq(sites.siteId, result.newSiteResource.siteId),
|
||||
eq(sites.siteId, target.siteId),
|
||||
eq(sites.orgId, orgId),
|
||||
eq(sites.type, "newt"),
|
||||
isNotNull(sites.pubKey)
|
||||
@@ -122,67 +64,114 @@ export async function applyBlueprint({
|
||||
)
|
||||
.limit(1);
|
||||
|
||||
if (!site) {
|
||||
if (site) {
|
||||
logger.debug(
|
||||
`No newt site found for client resource ${result.newSiteResource.siteResourceId}, skipping target update`
|
||||
`Updating target ${target.targetId} on site ${site.sites.siteId}`
|
||||
);
|
||||
|
||||
// see if you can find a matching target health check from the healthchecksToUpdate array
|
||||
const matchingHealthcheck =
|
||||
result.healthchecksToUpdate.find(
|
||||
(hc) => hc.targetId === target.targetId
|
||||
);
|
||||
|
||||
await addProxyTargets(
|
||||
site.newt.newtId,
|
||||
[target],
|
||||
matchingHealthcheck ? [matchingHealthcheck] : [],
|
||||
result.proxyResource.protocol,
|
||||
result.proxyResource.proxyPort
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
logger.debug(
|
||||
`Updating client resource ${result.newSiteResource.siteResourceId} on site ${site.sites.siteId}`
|
||||
);
|
||||
|
||||
await handleMessagingForUpdatedSiteResource(
|
||||
result.oldSiteResource,
|
||||
result.newSiteResource,
|
||||
{ siteId: site.sites.siteId, orgId: site.sites.orgId },
|
||||
trx
|
||||
);
|
||||
|
||||
// await addClientTargets(
|
||||
// site.newt.newtId,
|
||||
// result.resource.destination,
|
||||
// result.resource.destinationPort,
|
||||
// result.resource.protocol,
|
||||
// result.resource.proxyPort
|
||||
// );
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
blueprintSucceeded = true;
|
||||
blueprintMessage = "Blueprint applied successfully";
|
||||
} catch (err) {
|
||||
blueprintSucceeded = false;
|
||||
blueprintMessage = `Blueprint applied with errors: ${err}`;
|
||||
logger.error(blueprintMessage);
|
||||
error = err;
|
||||
logger.debug(
|
||||
`Successfully updated client resources for org ${orgId}: ${JSON.stringify(clientResourcesResults)}`
|
||||
);
|
||||
|
||||
// We need to update the targets on the newts from the successfully updated information
|
||||
for (const result of clientResourcesResults) {
|
||||
const [site] = await db
|
||||
.select()
|
||||
.from(sites)
|
||||
.innerJoin(newts, eq(sites.siteId, newts.siteId))
|
||||
.where(
|
||||
and(
|
||||
eq(sites.siteId, result.resource.siteId),
|
||||
eq(sites.orgId, orgId),
|
||||
eq(sites.type, "newt"),
|
||||
isNotNull(sites.pubKey)
|
||||
)
|
||||
)
|
||||
.limit(1);
|
||||
|
||||
if (site) {
|
||||
logger.debug(
|
||||
`Updating client resource ${result.resource.siteResourceId} on site ${site.sites.siteId}`
|
||||
);
|
||||
|
||||
await addClientTargets(
|
||||
site.newt.newtId,
|
||||
result.resource.destinationIp,
|
||||
result.resource.destinationPort,
|
||||
result.resource.protocol,
|
||||
result.resource.proxyPort
|
||||
);
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error(`Failed to update database from config: ${error}`);
|
||||
throw error;
|
||||
}
|
||||
|
||||
let blueprint: Blueprint | null = null;
|
||||
await db.transaction(async (trx) => {
|
||||
const newBlueprint = await trx
|
||||
.insert(blueprints)
|
||||
.values({
|
||||
orgId,
|
||||
name:
|
||||
name ??
|
||||
`${faker.word.adjective()} ${faker.word.adjective()} ${faker.word.noun()}`,
|
||||
contents: stringifyYaml(configData),
|
||||
createdAt: Math.floor(Date.now() / 1000),
|
||||
succeeded: blueprintSucceeded,
|
||||
message: blueprintMessage,
|
||||
source
|
||||
})
|
||||
.returning();
|
||||
|
||||
blueprint = newBlueprint[0];
|
||||
});
|
||||
|
||||
if (!blueprint || (source !== "UI" && !blueprintSucceeded)) {
|
||||
// ^^^^^^^^^^^^^^^ The UI considers a failed blueprint as a valid response
|
||||
throw error ?? "Unknown Server Error";
|
||||
}
|
||||
|
||||
return blueprint;
|
||||
}
|
||||
|
||||
// await updateDatabaseFromConfig("org_i21aifypnlyxur2", {
|
||||
// resources: {
|
||||
// "resource-nice-id": {
|
||||
// name: "this is my resource",
|
||||
// protocol: "http",
|
||||
// "full-domain": "level1.test.example.com",
|
||||
// "host-header": "example.com",
|
||||
// "tls-server-name": "example.com",
|
||||
// auth: {
|
||||
// pincode: 123456,
|
||||
// password: "sadfasdfadsf",
|
||||
// "sso-enabled": true,
|
||||
// "sso-roles": ["Member"],
|
||||
// "sso-users": ["owen@fossorial.io"],
|
||||
// "whitelist-users": ["owen@fossorial.io"]
|
||||
// },
|
||||
// targets: [
|
||||
// {
|
||||
// site: "glossy-plains-viscacha-rat",
|
||||
// hostname: "localhost",
|
||||
// method: "http",
|
||||
// port: 8000,
|
||||
// healthcheck: {
|
||||
// port: 8000,
|
||||
// hostname: "localhost"
|
||||
// }
|
||||
// },
|
||||
// {
|
||||
// site: "glossy-plains-viscacha-rat",
|
||||
// hostname: "localhost",
|
||||
// method: "http",
|
||||
// port: 8001
|
||||
// }
|
||||
// ]
|
||||
// },
|
||||
// "resource-nice-id2": {
|
||||
// name: "http server",
|
||||
// protocol: "tcp",
|
||||
// "proxy-port": 3000,
|
||||
// targets: [
|
||||
// {
|
||||
// site: "glossy-plains-viscacha-rat",
|
||||
// hostname: "localhost",
|
||||
// port: 3000,
|
||||
// }
|
||||
// ]
|
||||
// }
|
||||
// }
|
||||
// });
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { sendToClient } from "#dynamic/routers/ws";
|
||||
import { sendToClient } from "@server/routers/ws";
|
||||
import { processContainerLabels } from "./parseDockerContainers";
|
||||
import { applyBlueprint } from "./applyBlueprint";
|
||||
import { db, sites } from "@server/db";
|
||||
@@ -29,29 +29,15 @@ export async function applyNewtDockerBlueprint(
|
||||
|
||||
logger.debug(`Received Docker blueprint: ${JSON.stringify(blueprint)}`);
|
||||
|
||||
// make sure this is not an empty object
|
||||
if (isEmptyObject(blueprint)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (isEmptyObject(blueprint["proxy-resources"]) && isEmptyObject(blueprint["client-resources"])) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Update the blueprint in the database
|
||||
await applyBlueprint({
|
||||
orgId: site.orgId,
|
||||
configData: blueprint,
|
||||
siteId: site.siteId,
|
||||
source: "NEWT"
|
||||
});
|
||||
await applyBlueprint(site.orgId, blueprint, site.siteId);
|
||||
} catch (error) {
|
||||
logger.error(`Failed to update database from config: ${error}`);
|
||||
await sendToClient(newtId, {
|
||||
type: "newt/blueprint/results",
|
||||
data: {
|
||||
success: false,
|
||||
message: `Failed to apply blueprint from config: ${error}`
|
||||
message: `Failed to update database from config: ${error}`
|
||||
}
|
||||
});
|
||||
return;
|
||||
@@ -65,10 +51,3 @@ export async function applyNewtDockerBlueprint(
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function isEmptyObject(obj: any) {
|
||||
if (obj === null || obj === undefined) {
|
||||
return true;
|
||||
}
|
||||
return Object.keys(obj).length === 0 && obj.constructor === Object;
|
||||
}
|
||||
|
||||
@@ -1,23 +1,17 @@
|
||||
import {
|
||||
clients,
|
||||
clientSiteResources,
|
||||
roles,
|
||||
roleSiteResources,
|
||||
SiteResource,
|
||||
siteResources,
|
||||
Transaction,
|
||||
userOrgs,
|
||||
users,
|
||||
userSiteResources
|
||||
} from "@server/db";
|
||||
import { sites } from "@server/db";
|
||||
import { eq, and, ne, inArray } from "drizzle-orm";
|
||||
import { Config } from "./types";
|
||||
import { eq, and } from "drizzle-orm";
|
||||
import {
|
||||
Config,
|
||||
} from "./types";
|
||||
import logger from "@server/logger";
|
||||
|
||||
export type ClientResourcesResults = {
|
||||
newSiteResource: SiteResource;
|
||||
oldSiteResource?: SiteResource;
|
||||
resource: SiteResource;
|
||||
}[];
|
||||
|
||||
export async function updateClientResources(
|
||||
@@ -75,22 +69,16 @@ export async function updateClientResources(
|
||||
}
|
||||
|
||||
if (existingResource) {
|
||||
if (existingResource.siteId !== site.siteId) {
|
||||
throw new Error(
|
||||
`You can not change the site of an existing client resource (${resourceNiceId}). Please delete and recreate it instead.`
|
||||
);
|
||||
}
|
||||
|
||||
// Update existing resource
|
||||
const [updatedResource] = await trx
|
||||
.update(siteResources)
|
||||
.set({
|
||||
name: resourceData.name || resourceNiceId,
|
||||
mode: resourceData.mode,
|
||||
destination: resourceData.destination,
|
||||
enabled: true, // hardcoded for now
|
||||
// enabled: resourceData.enabled ?? true,
|
||||
alias: resourceData.alias || null
|
||||
siteId: site.siteId,
|
||||
proxyPort: resourceData["proxy-port"]!,
|
||||
destinationIp: resourceData.hostname,
|
||||
destinationPort: resourceData["internal-port"],
|
||||
protocol: resourceData.protocol
|
||||
})
|
||||
.where(
|
||||
eq(
|
||||
@@ -100,110 +88,7 @@ export async function updateClientResources(
|
||||
)
|
||||
.returning();
|
||||
|
||||
const siteResourceId = existingResource.siteResourceId;
|
||||
const orgId = existingResource.orgId;
|
||||
|
||||
await trx
|
||||
.delete(clientSiteResources)
|
||||
.where(eq(clientSiteResources.siteResourceId, siteResourceId));
|
||||
|
||||
if (resourceData.machines.length > 0) {
|
||||
// get clientIds from niceIds
|
||||
const clientsToUpdate = await trx
|
||||
.select()
|
||||
.from(clients)
|
||||
.where(
|
||||
and(
|
||||
inArray(clients.niceId, resourceData.machines),
|
||||
eq(clients.orgId, orgId)
|
||||
)
|
||||
);
|
||||
|
||||
const clientIds = clientsToUpdate.map(
|
||||
(client) => client.clientId
|
||||
);
|
||||
|
||||
await trx.insert(clientSiteResources).values(
|
||||
clientIds.map((clientId) => ({
|
||||
clientId,
|
||||
siteResourceId
|
||||
}))
|
||||
);
|
||||
}
|
||||
|
||||
await trx
|
||||
.delete(userSiteResources)
|
||||
.where(eq(userSiteResources.siteResourceId, siteResourceId));
|
||||
|
||||
if (resourceData.users.length > 0) {
|
||||
// get userIds from username
|
||||
const usersToUpdate = await trx
|
||||
.select()
|
||||
.from(users)
|
||||
.innerJoin(userOrgs, eq(users.userId, userOrgs.userId))
|
||||
.where(
|
||||
and(
|
||||
inArray(users.username, resourceData.users),
|
||||
eq(userOrgs.orgId, orgId)
|
||||
)
|
||||
);
|
||||
|
||||
const userIds = usersToUpdate.map((user) => user.user.userId);
|
||||
|
||||
await trx
|
||||
.insert(userSiteResources)
|
||||
.values(
|
||||
userIds.map((userId) => ({ userId, siteResourceId }))
|
||||
);
|
||||
}
|
||||
|
||||
// Get all admin role IDs for this org to exclude from deletion
|
||||
const adminRoles = await trx
|
||||
.select()
|
||||
.from(roles)
|
||||
.where(and(eq(roles.isAdmin, true), eq(roles.orgId, orgId)));
|
||||
const adminRoleIds = adminRoles.map((role) => role.roleId);
|
||||
|
||||
if (adminRoleIds.length > 0) {
|
||||
await trx.delete(roleSiteResources).where(
|
||||
and(
|
||||
eq(roleSiteResources.siteResourceId, siteResourceId),
|
||||
ne(roleSiteResources.roleId, adminRoleIds[0]) // delete all but the admin role
|
||||
)
|
||||
);
|
||||
} else {
|
||||
await trx
|
||||
.delete(roleSiteResources)
|
||||
.where(
|
||||
eq(roleSiteResources.siteResourceId, siteResourceId)
|
||||
);
|
||||
}
|
||||
|
||||
if (resourceData.roles.length > 0) {
|
||||
// Re-add specified roles but we need to get the roleIds from the role name in the array
|
||||
const rolesToUpdate = await trx
|
||||
.select()
|
||||
.from(roles)
|
||||
.where(
|
||||
and(
|
||||
eq(roles.orgId, orgId),
|
||||
inArray(roles.name, resourceData.roles)
|
||||
)
|
||||
);
|
||||
|
||||
const roleIds = rolesToUpdate.map((role) => role.roleId);
|
||||
|
||||
await trx
|
||||
.insert(roleSiteResources)
|
||||
.values(
|
||||
roleIds.map((roleId) => ({ roleId, siteResourceId }))
|
||||
);
|
||||
}
|
||||
|
||||
results.push({
|
||||
newSiteResource: updatedResource,
|
||||
oldSiteResource: existingResource
|
||||
});
|
||||
results.push({ resource: updatedResource });
|
||||
} else {
|
||||
// Create new resource
|
||||
const [newResource] = await trx
|
||||
@@ -213,103 +98,18 @@ export async function updateClientResources(
|
||||
siteId: site.siteId,
|
||||
niceId: resourceNiceId,
|
||||
name: resourceData.name || resourceNiceId,
|
||||
mode: resourceData.mode,
|
||||
destination: resourceData.destination,
|
||||
enabled: true, // hardcoded for now
|
||||
// enabled: resourceData.enabled ?? true,
|
||||
alias: resourceData.alias || null
|
||||
proxyPort: resourceData["proxy-port"]!,
|
||||
destinationIp: resourceData.hostname,
|
||||
destinationPort: resourceData["internal-port"],
|
||||
protocol: resourceData.protocol
|
||||
})
|
||||
.returning();
|
||||
|
||||
const siteResourceId = newResource.siteResourceId;
|
||||
|
||||
const [adminRole] = await trx
|
||||
.select()
|
||||
.from(roles)
|
||||
.where(and(eq(roles.isAdmin, true), eq(roles.orgId, orgId)))
|
||||
.limit(1);
|
||||
|
||||
if (!adminRole) {
|
||||
throw new Error(`Admin role not found for org ${orgId}`);
|
||||
}
|
||||
|
||||
await trx.insert(roleSiteResources).values({
|
||||
roleId: adminRole.roleId,
|
||||
siteResourceId: siteResourceId
|
||||
});
|
||||
|
||||
if (resourceData.roles.length > 0) {
|
||||
// get roleIds from role names
|
||||
const rolesToUpdate = await trx
|
||||
.select()
|
||||
.from(roles)
|
||||
.where(
|
||||
and(
|
||||
eq(roles.orgId, orgId),
|
||||
inArray(roles.name, resourceData.roles)
|
||||
)
|
||||
);
|
||||
|
||||
const roleIds = rolesToUpdate.map((role) => role.roleId);
|
||||
|
||||
await trx
|
||||
.insert(roleSiteResources)
|
||||
.values(
|
||||
roleIds.map((roleId) => ({ roleId, siteResourceId }))
|
||||
);
|
||||
}
|
||||
|
||||
if (resourceData.users.length > 0) {
|
||||
// get userIds from username
|
||||
const usersToUpdate = await trx
|
||||
.select()
|
||||
.from(users)
|
||||
.innerJoin(userOrgs, eq(users.userId, userOrgs.userId))
|
||||
.where(
|
||||
and(
|
||||
inArray(users.username, resourceData.users),
|
||||
eq(userOrgs.orgId, orgId)
|
||||
)
|
||||
);
|
||||
|
||||
const userIds = usersToUpdate.map((user) => user.user.userId);
|
||||
|
||||
await trx
|
||||
.insert(userSiteResources)
|
||||
.values(
|
||||
userIds.map((userId) => ({ userId, siteResourceId }))
|
||||
);
|
||||
}
|
||||
|
||||
if (resourceData.machines.length > 0) {
|
||||
// get clientIds from niceIds
|
||||
const clientsToUpdate = await trx
|
||||
.select()
|
||||
.from(clients)
|
||||
.where(
|
||||
and(
|
||||
inArray(clients.niceId, resourceData.machines),
|
||||
eq(clients.orgId, orgId)
|
||||
)
|
||||
);
|
||||
|
||||
const clientIds = clientsToUpdate.map(
|
||||
(client) => client.clientId
|
||||
);
|
||||
|
||||
await trx.insert(clientSiteResources).values(
|
||||
clientIds.map((clientId) => ({
|
||||
clientId,
|
||||
siteResourceId
|
||||
}))
|
||||
);
|
||||
}
|
||||
|
||||
logger.info(
|
||||
`Created new client resource ${newResource.name} (${newResource.siteResourceId}) for org ${orgId}`
|
||||
);
|
||||
|
||||
results.push({ newSiteResource: newResource });
|
||||
results.push({ resource: newResource });
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user