Compare commits
1 Commits
1.15.0
...
1.12.2-s.5
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
63e41e3dcd |
@@ -1,3 +1,6 @@
|
||||
{
|
||||
"extends": ["next/core-web-vitals", "next/typescript"]
|
||||
"extends": [
|
||||
"next/core-web-vitals",
|
||||
"next/typescript"
|
||||
]
|
||||
}
|
||||
|
||||
433
.github/workflows/cicd.yml
vendored
@@ -17,42 +17,16 @@ on:
|
||||
push:
|
||||
tags:
|
||||
- "[0-9]+.[0-9]+.[0-9]+"
|
||||
- "[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+"
|
||||
- "[0-9]+.[0-9]+.[0-9]+.rc.[0-9]+"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
pre-run:
|
||||
runs-on: ubuntu-latest
|
||||
permissions: write-all
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v5
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }}
|
||||
role-duration-seconds: 3600
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: Verify AWS identity
|
||||
run: aws sts get-caller-identity
|
||||
|
||||
- name: Start EC2 instances
|
||||
run: |
|
||||
aws ec2 start-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_ARM_RUNNER }}
|
||||
aws ec2 start-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_AMD_RUNNER }}
|
||||
echo "EC2 instances started"
|
||||
|
||||
|
||||
release-arm:
|
||||
name: Build and Release (ARM64)
|
||||
runs-on: [self-hosted, linux, arm64, us-east-1]
|
||||
needs: [pre-run]
|
||||
if: >-
|
||||
${{
|
||||
needs.pre-run.result == 'success'
|
||||
}}
|
||||
release:
|
||||
name: Build and Release
|
||||
runs-on: [self-hosted, linux, x64]
|
||||
# Job-level timeout to avoid runaway or stuck runs
|
||||
timeout-minutes: 120
|
||||
env:
|
||||
@@ -62,19 +36,13 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Monitor storage space
|
||||
run: |
|
||||
THRESHOLD=75
|
||||
USED_SPACE=$(df / | grep / | awk '{ print $5 }' | sed 's/%//g')
|
||||
echo "Used space: $USED_SPACE%"
|
||||
if [ "$USED_SPACE" -ge "$THRESHOLD" ]; then
|
||||
echo "Used space is below the threshold of 75% free. Running Docker system prune."
|
||||
echo y | docker system prune -a
|
||||
else
|
||||
echo "Storage space is above the threshold. No action needed."
|
||||
fi
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
@@ -82,189 +50,13 @@ jobs:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
|
||||
|
||||
- name: Extract tag name
|
||||
id: get-tag
|
||||
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Update version in package.json
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
sed -i "s/export const APP_VERSION = \".*\";/export const APP_VERSION = \"$TAG\";/" server/lib/consts.ts
|
||||
cat server/lib/consts.ts
|
||||
shell: bash
|
||||
|
||||
- name: Check if release candidate
|
||||
id: check-rc
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
if [[ "$TAG" == *"-rc."* ]]; then
|
||||
echo "IS_RC=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "IS_RC=false" >> $GITHUB_ENV
|
||||
fi
|
||||
shell: bash
|
||||
|
||||
- name: Build and push Docker images (Docker Hub - ARM64)
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
if [ "$IS_RC" = "true" ]; then
|
||||
make build-rc-arm tag=$TAG
|
||||
else
|
||||
make build-release-arm tag=$TAG
|
||||
fi
|
||||
echo "Built & pushed ARM64 images to: ${{ env.DOCKERHUB_IMAGE }}:${TAG}"
|
||||
shell: bash
|
||||
|
||||
release-amd:
|
||||
name: Build and Release (AMD64)
|
||||
runs-on: [self-hosted, linux, x64, us-east-1]
|
||||
needs: [pre-run]
|
||||
if: >-
|
||||
${{
|
||||
needs.pre-run.result == 'success'
|
||||
}}
|
||||
# Job-level timeout to avoid runaway or stuck runs
|
||||
timeout-minutes: 120
|
||||
env:
|
||||
# Target images
|
||||
DOCKERHUB_IMAGE: docker.io/fosrl/${{ github.event.repository.name }}
|
||||
GHCR_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
|
||||
- name: Monitor storage space
|
||||
run: |
|
||||
THRESHOLD=75
|
||||
USED_SPACE=$(df / | grep / | awk '{ print $5 }' | sed 's/%//g')
|
||||
echo "Used space: $USED_SPACE%"
|
||||
if [ "$USED_SPACE" -ge "$THRESHOLD" ]; then
|
||||
echo "Used space is below the threshold of 75% free. Running Docker system prune."
|
||||
echo y | docker system prune -a
|
||||
else
|
||||
echo "Storage space is above the threshold. No action needed."
|
||||
fi
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
|
||||
|
||||
- name: Extract tag name
|
||||
id: get-tag
|
||||
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Update version in package.json
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
sed -i "s/export const APP_VERSION = \".*\";/export const APP_VERSION = \"$TAG\";/" server/lib/consts.ts
|
||||
cat server/lib/consts.ts
|
||||
shell: bash
|
||||
|
||||
- name: Check if release candidate
|
||||
id: check-rc
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
if [[ "$TAG" == *"-rc."* ]]; then
|
||||
echo "IS_RC=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "IS_RC=false" >> $GITHUB_ENV
|
||||
fi
|
||||
shell: bash
|
||||
|
||||
- name: Build and push Docker images (Docker Hub - AMD64)
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
if [ "$IS_RC" = "true" ]; then
|
||||
make build-rc-amd tag=$TAG
|
||||
else
|
||||
make build-release-amd tag=$TAG
|
||||
fi
|
||||
echo "Built & pushed AMD64 images to: ${{ env.DOCKERHUB_IMAGE }}:${TAG}"
|
||||
shell: bash
|
||||
|
||||
create-manifest:
|
||||
name: Create Multi-Arch Manifests
|
||||
runs-on: [self-hosted, linux, x64, us-east-1]
|
||||
needs: [release-arm, release-amd]
|
||||
if: >-
|
||||
${{
|
||||
needs.release-arm.result == 'success' &&
|
||||
needs.release-amd.result == 'success'
|
||||
}}
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
|
||||
|
||||
- name: Extract tag name
|
||||
id: get-tag
|
||||
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Check if release candidate
|
||||
id: check-rc
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
if [[ "$TAG" == *"-rc."* ]]; then
|
||||
echo "IS_RC=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "IS_RC=false" >> $GITHUB_ENV
|
||||
fi
|
||||
shell: bash
|
||||
|
||||
- name: Create multi-arch manifests
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
if [ "$IS_RC" = "true" ]; then
|
||||
make create-manifests-rc tag=$TAG
|
||||
else
|
||||
make create-manifests tag=$TAG
|
||||
fi
|
||||
echo "Created multi-arch manifests for tag: ${TAG}"
|
||||
shell: bash
|
||||
|
||||
sign-and-package:
|
||||
name: Sign and Package
|
||||
runs-on: [self-hosted, linux, x64, us-east-1]
|
||||
needs: [release-arm, release-amd, create-manifest]
|
||||
if: >-
|
||||
${{
|
||||
needs.release-arm.result == 'success' &&
|
||||
needs.release-amd.result == 'success' &&
|
||||
needs.create-manifest.result == 'success'
|
||||
}}
|
||||
# Job-level timeout to avoid runaway or stuck runs
|
||||
timeout-minutes: 120
|
||||
env:
|
||||
# Target images
|
||||
DOCKERHUB_IMAGE: docker.io/fosrl/${{ github.event.repository.name }}
|
||||
GHCR_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
|
||||
- name: Extract tag name
|
||||
id: get-tag
|
||||
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
go-version: 1.24
|
||||
|
||||
@@ -304,14 +96,21 @@ jobs:
|
||||
- name: Build installer
|
||||
working-directory: install
|
||||
run: |
|
||||
make go-build-release
|
||||
make go-build-release
|
||||
|
||||
- name: Upload artifacts from /install/bin
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: install-bin
|
||||
path: install/bin/
|
||||
|
||||
- name: Build and push Docker images (Docker Hub)
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
make build-release tag=$TAG
|
||||
echo "Built & pushed to: ${{ env.DOCKERHUB_IMAGE }}:${TAG}"
|
||||
shell: bash
|
||||
|
||||
- name: Install skopeo + jq
|
||||
# skopeo: copy/inspect images between registries
|
||||
# jq: JSON parsing tool used to extract digest values
|
||||
@@ -322,105 +121,21 @@ jobs:
|
||||
shell: bash
|
||||
|
||||
- name: Login to GHCR
|
||||
env:
|
||||
REGISTRY_AUTH_FILE: ${{ runner.temp }}/containers/auth.json
|
||||
run: |
|
||||
mkdir -p "$(dirname "$REGISTRY_AUTH_FILE")"
|
||||
skopeo login ghcr.io -u "${{ github.actor }}" -p "${{ secrets.GITHUB_TOKEN }}"
|
||||
shell: bash
|
||||
|
||||
- name: Copy tags from Docker Hub to GHCR
|
||||
# Mirror the already-built images (all architectures) to GHCR so we can sign them
|
||||
# Wait a bit for both architectures to be available in Docker Hub manifest
|
||||
env:
|
||||
REGISTRY_AUTH_FILE: ${{ runner.temp }}/containers/auth.json
|
||||
- name: Copy tag from Docker Hub to GHCR
|
||||
# Mirror the already-built image (all architectures) to GHCR so we can sign it
|
||||
run: |
|
||||
set -euo pipefail
|
||||
TAG=${{ env.TAG }}
|
||||
MAJOR_TAG=$(echo $TAG | cut -d. -f1)
|
||||
MINOR_TAG=$(echo $TAG | cut -d. -f1,2)
|
||||
|
||||
echo "Waiting for multi-arch manifests to be ready..."
|
||||
sleep 30
|
||||
|
||||
# Determine if this is an RC release
|
||||
IS_RC="false"
|
||||
if [[ "$TAG" == *"-rc."* ]]; then
|
||||
IS_RC="true"
|
||||
fi
|
||||
|
||||
if [ "$IS_RC" = "true" ]; then
|
||||
echo "RC release detected - copying version-specific tags only"
|
||||
|
||||
# SQLite OSS
|
||||
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:${TAG} -> ${{ env.GHCR_IMAGE }}:${TAG}"
|
||||
skopeo copy --all --retry-times 3 \
|
||||
docker://$DOCKERHUB_IMAGE:$TAG \
|
||||
docker://$GHCR_IMAGE:$TAG
|
||||
|
||||
# PostgreSQL OSS
|
||||
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:postgresql-${TAG} -> ${{ env.GHCR_IMAGE }}:postgresql-${TAG}"
|
||||
skopeo copy --all --retry-times 3 \
|
||||
docker://$DOCKERHUB_IMAGE:postgresql-$TAG \
|
||||
docker://$GHCR_IMAGE:postgresql-$TAG
|
||||
|
||||
# SQLite Enterprise
|
||||
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:ee-${TAG} -> ${{ env.GHCR_IMAGE }}:ee-${TAG}"
|
||||
skopeo copy --all --retry-times 3 \
|
||||
docker://$DOCKERHUB_IMAGE:ee-$TAG \
|
||||
docker://$GHCR_IMAGE:ee-$TAG
|
||||
|
||||
# PostgreSQL Enterprise
|
||||
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:ee-postgresql-${TAG} -> ${{ env.GHCR_IMAGE }}:ee-postgresql-${TAG}"
|
||||
skopeo copy --all --retry-times 3 \
|
||||
docker://$DOCKERHUB_IMAGE:ee-postgresql-$TAG \
|
||||
docker://$GHCR_IMAGE:ee-postgresql-$TAG
|
||||
else
|
||||
echo "Regular release detected - copying all tags (latest, major, minor, full version)"
|
||||
|
||||
# SQLite OSS - all tags
|
||||
for TAG_SUFFIX in "latest" "$MAJOR_TAG" "$MINOR_TAG" "$TAG"; do
|
||||
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:${TAG_SUFFIX} -> ${{ env.GHCR_IMAGE }}:${TAG_SUFFIX}"
|
||||
skopeo copy --all --retry-times 3 \
|
||||
docker://$DOCKERHUB_IMAGE:$TAG_SUFFIX \
|
||||
docker://$GHCR_IMAGE:$TAG_SUFFIX
|
||||
done
|
||||
|
||||
# PostgreSQL OSS - all tags
|
||||
for TAG_SUFFIX in "latest" "$MAJOR_TAG" "$MINOR_TAG" "$TAG"; do
|
||||
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:postgresql-${TAG_SUFFIX} -> ${{ env.GHCR_IMAGE }}:postgresql-${TAG_SUFFIX}"
|
||||
skopeo copy --all --retry-times 3 \
|
||||
docker://$DOCKERHUB_IMAGE:postgresql-$TAG_SUFFIX \
|
||||
docker://$GHCR_IMAGE:postgresql-$TAG_SUFFIX
|
||||
done
|
||||
|
||||
# SQLite Enterprise - all tags
|
||||
for TAG_SUFFIX in "latest" "$MAJOR_TAG" "$MINOR_TAG" "$TAG"; do
|
||||
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:ee-${TAG_SUFFIX} -> ${{ env.GHCR_IMAGE }}:ee-${TAG_SUFFIX}"
|
||||
skopeo copy --all --retry-times 3 \
|
||||
docker://$DOCKERHUB_IMAGE:ee-$TAG_SUFFIX \
|
||||
docker://$GHCR_IMAGE:ee-$TAG_SUFFIX
|
||||
done
|
||||
|
||||
# PostgreSQL Enterprise - all tags
|
||||
for TAG_SUFFIX in "latest" "$MAJOR_TAG" "$MINOR_TAG" "$TAG"; do
|
||||
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:ee-postgresql-${TAG_SUFFIX} -> ${{ env.GHCR_IMAGE }}:ee-postgresql-${TAG_SUFFIX}"
|
||||
skopeo copy --all --retry-times 3 \
|
||||
docker://$DOCKERHUB_IMAGE:ee-postgresql-$TAG_SUFFIX \
|
||||
docker://$GHCR_IMAGE:ee-postgresql-$TAG_SUFFIX
|
||||
done
|
||||
fi
|
||||
|
||||
echo "All images copied successfully to GHCR!"
|
||||
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:${TAG} -> ${{ env.GHCR_IMAGE }}:${TAG}"
|
||||
skopeo copy --all --retry-times 3 \
|
||||
docker://$DOCKERHUB_IMAGE:$TAG \
|
||||
docker://$GHCR_IMAGE:$TAG
|
||||
shell: bash
|
||||
|
||||
- name: Login to GitHub Container Registry (for cosign)
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Install cosign
|
||||
# cosign is used to sign and verify container images (key and keyless)
|
||||
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
|
||||
@@ -440,90 +155,26 @@ jobs:
|
||||
issuer="https://token.actions.githubusercontent.com"
|
||||
id_regex="^https://github.com/${{ github.repository }}/.+" # accept this repo (all workflows/refs)
|
||||
|
||||
# Determine if this is an RC release
|
||||
IS_RC="false"
|
||||
if [[ "$TAG" == *"-rc."* ]]; then
|
||||
IS_RC="true"
|
||||
fi
|
||||
for IMAGE in "${GHCR_IMAGE}" "${DOCKERHUB_IMAGE}"; do
|
||||
echo "Processing ${IMAGE}:${TAG}"
|
||||
|
||||
# Define image variants to sign
|
||||
if [ "$IS_RC" = "true" ]; then
|
||||
echo "RC release - signing version-specific tags only"
|
||||
IMAGE_TAGS=(
|
||||
"${TAG}"
|
||||
"postgresql-${TAG}"
|
||||
"ee-${TAG}"
|
||||
"ee-postgresql-${TAG}"
|
||||
)
|
||||
else
|
||||
echo "Regular release - signing all tags"
|
||||
MAJOR_TAG=$(echo $TAG | cut -d. -f1)
|
||||
MINOR_TAG=$(echo $TAG | cut -d. -f1,2)
|
||||
IMAGE_TAGS=(
|
||||
"latest" "$MAJOR_TAG" "$MINOR_TAG" "$TAG"
|
||||
"postgresql-latest" "postgresql-$MAJOR_TAG" "postgresql-$MINOR_TAG" "postgresql-$TAG"
|
||||
"ee-latest" "ee-$MAJOR_TAG" "ee-$MINOR_TAG" "ee-$TAG"
|
||||
"ee-postgresql-latest" "ee-postgresql-$MAJOR_TAG" "ee-postgresql-$MINOR_TAG" "ee-postgresql-$TAG"
|
||||
)
|
||||
fi
|
||||
DIGEST="$(skopeo inspect --retry-times 3 docker://${IMAGE}:${TAG} | jq -r '.Digest')"
|
||||
REF="${IMAGE}@${DIGEST}"
|
||||
echo "Resolved digest: ${REF}"
|
||||
|
||||
# Sign each image variant for both registries
|
||||
for BASE_IMAGE in "${GHCR_IMAGE}" "${DOCKERHUB_IMAGE}"; do
|
||||
for IMAGE_TAG in "${IMAGE_TAGS[@]}"; do
|
||||
echo "Processing ${BASE_IMAGE}:${IMAGE_TAG}"
|
||||
echo "==> cosign sign (keyless) --recursive ${REF}"
|
||||
cosign sign --recursive "${REF}"
|
||||
|
||||
DIGEST="$(skopeo inspect --retry-times 3 docker://${BASE_IMAGE}:${IMAGE_TAG} | jq -r '.Digest')"
|
||||
REF="${BASE_IMAGE}@${DIGEST}"
|
||||
echo "Resolved digest: ${REF}"
|
||||
echo "==> cosign sign (key) --recursive ${REF}"
|
||||
cosign sign --key env://COSIGN_PRIVATE_KEY --recursive "${REF}"
|
||||
|
||||
echo "==> cosign sign (keyless) --recursive ${REF}"
|
||||
cosign sign --recursive "${REF}"
|
||||
echo "==> cosign verify (public key) ${REF}"
|
||||
cosign verify --key env://COSIGN_PUBLIC_KEY "${REF}" -o text
|
||||
|
||||
echo "==> cosign sign (key) --recursive ${REF}"
|
||||
cosign sign --key env://COSIGN_PRIVATE_KEY --recursive "${REF}"
|
||||
|
||||
echo "==> cosign verify (public key) ${REF}"
|
||||
cosign verify --key env://COSIGN_PUBLIC_KEY "${REF}" -o text
|
||||
|
||||
echo "==> cosign verify (keyless policy) ${REF}"
|
||||
cosign verify \
|
||||
--certificate-oidc-issuer "${issuer}" \
|
||||
--certificate-identity-regexp "${id_regex}" \
|
||||
"${REF}" -o text
|
||||
|
||||
echo "✓ Successfully signed and verified ${BASE_IMAGE}:${IMAGE_TAG}"
|
||||
done
|
||||
echo "==> cosign verify (keyless policy) ${REF}"
|
||||
cosign verify \
|
||||
--certificate-oidc-issuer "${issuer}" \
|
||||
--certificate-identity-regexp "${id_regex}" \
|
||||
"${REF}" -o text
|
||||
done
|
||||
|
||||
echo "All images signed and verified successfully!"
|
||||
shell: bash
|
||||
|
||||
post-run:
|
||||
needs: [pre-run, release-arm, release-amd, create-manifest, sign-and-package]
|
||||
if: >-
|
||||
${{
|
||||
always() &&
|
||||
needs.pre-run.result == 'success' &&
|
||||
(needs.release-arm.result == 'success' || needs.release-arm.result == 'skipped' || needs.release-arm.result == 'failure') &&
|
||||
(needs.release-amd.result == 'success' || needs.release-amd.result == 'skipped' || needs.release-amd.result == 'failure') &&
|
||||
(needs.create-manifest.result == 'success' || needs.create-manifest.result == 'skipped' || needs.create-manifest.result == 'failure') &&
|
||||
(needs.sign-and-package.result == 'success' || needs.sign-and-package.result == 'skipped' || needs.sign-and-package.result == 'failure')
|
||||
}}
|
||||
runs-on: ubuntu-latest
|
||||
permissions: write-all
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v5
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }}
|
||||
role-duration-seconds: 3600
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: Verify AWS identity
|
||||
run: aws sts get-caller-identity
|
||||
|
||||
- name: Stop EC2 instances
|
||||
run: |
|
||||
aws ec2 stop-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_ARM_RUNNER }}
|
||||
aws ec2 stop-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_AMD_RUNNER }}
|
||||
echo "EC2 instances stopped"
|
||||
|
||||
426
.github/workflows/cicd.yml.backup
vendored
@@ -1,426 +0,0 @@
|
||||
name: CI/CD Pipeline
|
||||
|
||||
# CI/CD workflow for building, publishing, mirroring, signing container images and building release binaries.
|
||||
# Actions are pinned to specific SHAs to reduce supply-chain risk. This workflow triggers on tag push events.
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write # for GHCR push
|
||||
id-token: write # for Cosign Keyless (OIDC) Signing
|
||||
|
||||
# Required secrets:
|
||||
# - DOCKER_HUB_USERNAME / DOCKER_HUB_ACCESS_TOKEN: push to Docker Hub
|
||||
# - GITHUB_TOKEN: used for GHCR login and OIDC keyless signing
|
||||
# - COSIGN_PRIVATE_KEY / COSIGN_PASSWORD / COSIGN_PUBLIC_KEY: for key-based signing
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "[0-9]+.[0-9]+.[0-9]+"
|
||||
- "[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
pre-run:
|
||||
runs-on: ubuntu-latest
|
||||
permissions: write-all
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }}
|
||||
role-duration-seconds: 3600
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: Verify AWS identity
|
||||
run: aws sts get-caller-identity
|
||||
|
||||
- name: Start EC2 instances
|
||||
run: |
|
||||
aws ec2 start-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_ARM_RUNNER }}
|
||||
aws ec2 start-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_AMD_RUNNER }}
|
||||
echo "EC2 instances started"
|
||||
|
||||
|
||||
release-arm:
|
||||
name: Build and Release (ARM64)
|
||||
runs-on: [self-hosted, linux, arm64, us-east-1]
|
||||
needs: [pre-run]
|
||||
if: >-
|
||||
${{
|
||||
needs.pre-run.result == 'success'
|
||||
}}
|
||||
# Job-level timeout to avoid runaway or stuck runs
|
||||
timeout-minutes: 120
|
||||
env:
|
||||
# Target images
|
||||
DOCKERHUB_IMAGE: docker.io/fosrl/${{ github.event.repository.name }}
|
||||
GHCR_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
|
||||
- name: Monitor storage space
|
||||
run: |
|
||||
THRESHOLD=75
|
||||
USED_SPACE=$(df / | grep / | awk '{ print $5 }' | sed 's/%//g')
|
||||
echo "Used space: $USED_SPACE%"
|
||||
if [ "$USED_SPACE" -ge "$THRESHOLD" ]; then
|
||||
echo "Used space is below the threshold of 75% free. Running Docker system prune."
|
||||
echo y | docker system prune -a
|
||||
else
|
||||
echo "Storage space is above the threshold. No action needed."
|
||||
fi
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
|
||||
|
||||
- name: Extract tag name
|
||||
id: get-tag
|
||||
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Update version in package.json
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
sed -i "s/export const APP_VERSION = \".*\";/export const APP_VERSION = \"$TAG\";/" server/lib/consts.ts
|
||||
cat server/lib/consts.ts
|
||||
shell: bash
|
||||
|
||||
- name: Check if release candidate
|
||||
id: check-rc
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
if [[ "$TAG" == *"-rc."* ]]; then
|
||||
echo "IS_RC=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "IS_RC=false" >> $GITHUB_ENV
|
||||
fi
|
||||
shell: bash
|
||||
|
||||
- name: Build and push Docker images (Docker Hub - ARM64)
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
if [ "$IS_RC" = "true" ]; then
|
||||
make build-rc-arm tag=$TAG
|
||||
else
|
||||
make build-release-arm tag=$TAG
|
||||
fi
|
||||
echo "Built & pushed ARM64 images to: ${{ env.DOCKERHUB_IMAGE }}:${TAG}"
|
||||
shell: bash
|
||||
|
||||
release-amd:
|
||||
name: Build and Release (AMD64)
|
||||
runs-on: [self-hosted, linux, x64, us-east-1]
|
||||
needs: [pre-run]
|
||||
if: >-
|
||||
${{
|
||||
needs.pre-run.result == 'success'
|
||||
}}
|
||||
# Job-level timeout to avoid runaway or stuck runs
|
||||
timeout-minutes: 120
|
||||
env:
|
||||
# Target images
|
||||
DOCKERHUB_IMAGE: docker.io/fosrl/${{ github.event.repository.name }}
|
||||
GHCR_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
|
||||
- name: Monitor storage space
|
||||
run: |
|
||||
THRESHOLD=75
|
||||
USED_SPACE=$(df / | grep / | awk '{ print $5 }' | sed 's/%//g')
|
||||
echo "Used space: $USED_SPACE%"
|
||||
if [ "$USED_SPACE" -ge "$THRESHOLD" ]; then
|
||||
echo "Used space is below the threshold of 75% free. Running Docker system prune."
|
||||
echo y | docker system prune -a
|
||||
else
|
||||
echo "Storage space is above the threshold. No action needed."
|
||||
fi
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
|
||||
|
||||
- name: Extract tag name
|
||||
id: get-tag
|
||||
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Update version in package.json
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
sed -i "s/export const APP_VERSION = \".*\";/export const APP_VERSION = \"$TAG\";/" server/lib/consts.ts
|
||||
cat server/lib/consts.ts
|
||||
shell: bash
|
||||
|
||||
- name: Check if release candidate
|
||||
id: check-rc
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
if [[ "$TAG" == *"-rc."* ]]; then
|
||||
echo "IS_RC=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "IS_RC=false" >> $GITHUB_ENV
|
||||
fi
|
||||
shell: bash
|
||||
|
||||
- name: Build and push Docker images (Docker Hub - AMD64)
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
if [ "$IS_RC" = "true" ]; then
|
||||
make build-rc-amd tag=$TAG
|
||||
else
|
||||
make build-release-amd tag=$TAG
|
||||
fi
|
||||
echo "Built & pushed AMD64 images to: ${{ env.DOCKERHUB_IMAGE }}:${TAG}"
|
||||
shell: bash
|
||||
|
||||
create-manifest:
|
||||
name: Create Multi-Arch Manifests
|
||||
runs-on: [self-hosted, linux, x64, us-east-1]
|
||||
needs: [release-arm, release-amd]
|
||||
if: >-
|
||||
${{
|
||||
needs.release-arm.result == 'success' &&
|
||||
needs.release-amd.result == 'success'
|
||||
}}
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
|
||||
|
||||
- name: Extract tag name
|
||||
id: get-tag
|
||||
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Check if release candidate
|
||||
id: check-rc
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
if [[ "$TAG" == *"-rc."* ]]; then
|
||||
echo "IS_RC=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "IS_RC=false" >> $GITHUB_ENV
|
||||
fi
|
||||
shell: bash
|
||||
|
||||
- name: Create multi-arch manifests
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
if [ "$IS_RC" = "true" ]; then
|
||||
make create-manifests-rc tag=$TAG
|
||||
else
|
||||
make create-manifests tag=$TAG
|
||||
fi
|
||||
echo "Created multi-arch manifests for tag: ${TAG}"
|
||||
shell: bash
|
||||
|
||||
sign-and-package:
|
||||
name: Sign and Package
|
||||
runs-on: [self-hosted, linux, x64, us-east-1]
|
||||
needs: [release-arm, release-amd, create-manifest]
|
||||
if: >-
|
||||
${{
|
||||
needs.release-arm.result == 'success' &&
|
||||
needs.release-amd.result == 'success' &&
|
||||
needs.create-manifest.result == 'success'
|
||||
}}
|
||||
# Job-level timeout to avoid runaway or stuck runs
|
||||
timeout-minutes: 120
|
||||
env:
|
||||
# Target images
|
||||
DOCKERHUB_IMAGE: docker.io/fosrl/${{ github.event.repository.name }}
|
||||
GHCR_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
|
||||
- name: Extract tag name
|
||||
id: get-tag
|
||||
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
|
||||
with:
|
||||
go-version: 1.24
|
||||
|
||||
- name: Update version in package.json
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
sed -i "s/export const APP_VERSION = \".*\";/export const APP_VERSION = \"$TAG\";/" server/lib/consts.ts
|
||||
cat server/lib/consts.ts
|
||||
shell: bash
|
||||
|
||||
- name: Pull latest Gerbil version
|
||||
id: get-gerbil-tag
|
||||
run: |
|
||||
LATEST_TAG=$(curl -s https://api.github.com/repos/fosrl/gerbil/tags | jq -r '.[0].name')
|
||||
echo "LATEST_GERBIL_TAG=$LATEST_TAG" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Pull latest Badger version
|
||||
id: get-badger-tag
|
||||
run: |
|
||||
LATEST_TAG=$(curl -s https://api.github.com/repos/fosrl/badger/tags | jq -r '.[0].name')
|
||||
echo "LATEST_BADGER_TAG=$LATEST_TAG" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Update install/main.go
|
||||
run: |
|
||||
PANGOLIN_VERSION=${{ env.TAG }}
|
||||
GERBIL_VERSION=${{ env.LATEST_GERBIL_TAG }}
|
||||
BADGER_VERSION=${{ env.LATEST_BADGER_TAG }}
|
||||
sed -i "s/config.PangolinVersion = \".*\"/config.PangolinVersion = \"$PANGOLIN_VERSION\"/" install/main.go
|
||||
sed -i "s/config.GerbilVersion = \".*\"/config.GerbilVersion = \"$GERBIL_VERSION\"/" install/main.go
|
||||
sed -i "s/config.BadgerVersion = \".*\"/config.BadgerVersion = \"$BADGER_VERSION\"/" install/main.go
|
||||
echo "Updated install/main.go with Pangolin version $PANGOLIN_VERSION, Gerbil version $GERBIL_VERSION, and Badger version $BADGER_VERSION"
|
||||
cat install/main.go
|
||||
shell: bash
|
||||
|
||||
- name: Build installer
|
||||
working-directory: install
|
||||
run: |
|
||||
make go-build-release
|
||||
|
||||
- name: Upload artifacts from /install/bin
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
|
||||
with:
|
||||
name: install-bin
|
||||
path: install/bin/
|
||||
|
||||
- name: Install skopeo + jq
|
||||
# skopeo: copy/inspect images between registries
|
||||
# jq: JSON parsing tool used to extract digest values
|
||||
run: |
|
||||
sudo apt-get update -y
|
||||
sudo apt-get install -y skopeo jq
|
||||
skopeo --version
|
||||
shell: bash
|
||||
|
||||
- name: Login to GHCR
|
||||
env:
|
||||
REGISTRY_AUTH_FILE: ${{ runner.temp }}/containers/auth.json
|
||||
run: |
|
||||
mkdir -p "$(dirname "$REGISTRY_AUTH_FILE")"
|
||||
skopeo login ghcr.io -u "${{ github.actor }}" -p "${{ secrets.GITHUB_TOKEN }}"
|
||||
shell: bash
|
||||
|
||||
- name: Copy tag from Docker Hub to GHCR
|
||||
# Mirror the already-built image (all architectures) to GHCR so we can sign it
|
||||
# Wait a bit for both architectures to be available in Docker Hub manifest
|
||||
env:
|
||||
REGISTRY_AUTH_FILE: ${{ runner.temp }}/containers/auth.json
|
||||
run: |
|
||||
set -euo pipefail
|
||||
TAG=${{ env.TAG }}
|
||||
echo "Waiting for multi-arch manifest to be ready..."
|
||||
sleep 30
|
||||
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:${TAG} -> ${{ env.GHCR_IMAGE }}:${TAG}"
|
||||
skopeo copy --all --retry-times 3 \
|
||||
docker://$DOCKERHUB_IMAGE:$TAG \
|
||||
docker://$GHCR_IMAGE:$TAG
|
||||
shell: bash
|
||||
|
||||
- name: Login to GitHub Container Registry (for cosign)
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Install cosign
|
||||
# cosign is used to sign and verify container images (key and keyless)
|
||||
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
|
||||
|
||||
- name: Dual-sign and verify (GHCR & Docker Hub)
|
||||
# Sign each image by digest using keyless (OIDC) and key-based signing,
|
||||
# then verify both the public key signature and the keyless OIDC signature.
|
||||
env:
|
||||
TAG: ${{ env.TAG }}
|
||||
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
|
||||
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
|
||||
COSIGN_PUBLIC_KEY: ${{ secrets.COSIGN_PUBLIC_KEY }}
|
||||
COSIGN_YES: "true"
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
issuer="https://token.actions.githubusercontent.com"
|
||||
id_regex="^https://github.com/${{ github.repository }}/.+" # accept this repo (all workflows/refs)
|
||||
|
||||
for IMAGE in "${GHCR_IMAGE}" "${DOCKERHUB_IMAGE}"; do
|
||||
echo "Processing ${IMAGE}:${TAG}"
|
||||
|
||||
DIGEST="$(skopeo inspect --retry-times 3 docker://${IMAGE}:${TAG} | jq -r '.Digest')"
|
||||
REF="${IMAGE}@${DIGEST}"
|
||||
echo "Resolved digest: ${REF}"
|
||||
|
||||
echo "==> cosign sign (keyless) --recursive ${REF}"
|
||||
cosign sign --recursive "${REF}"
|
||||
|
||||
echo "==> cosign sign (key) --recursive ${REF}"
|
||||
cosign sign --key env://COSIGN_PRIVATE_KEY --recursive "${REF}"
|
||||
|
||||
echo "==> cosign verify (public key) ${REF}"
|
||||
cosign verify --key env://COSIGN_PUBLIC_KEY "${REF}" -o text
|
||||
|
||||
echo "==> cosign verify (keyless policy) ${REF}"
|
||||
cosign verify \
|
||||
--certificate-oidc-issuer "${issuer}" \
|
||||
--certificate-identity-regexp "${id_regex}" \
|
||||
"${REF}" -o text
|
||||
done
|
||||
shell: bash
|
||||
|
||||
post-run:
|
||||
needs: [pre-run, release-arm, release-amd, create-manifest, sign-and-package]
|
||||
if: >-
|
||||
${{
|
||||
always() &&
|
||||
needs.pre-run.result == 'success' &&
|
||||
(needs.release-arm.result == 'success' || needs.release-arm.result == 'skipped' || needs.release-arm.result == 'failure') &&
|
||||
(needs.release-amd.result == 'success' || needs.release-amd.result == 'skipped' || needs.release-amd.result == 'failure') &&
|
||||
(needs.create-manifest.result == 'success' || needs.create-manifest.result == 'skipped' || needs.create-manifest.result == 'failure') &&
|
||||
(needs.sign-and-package.result == 'success' || needs.sign-and-package.result == 'skipped' || needs.sign-and-package.result == 'failure')
|
||||
}}
|
||||
runs-on: ubuntu-latest
|
||||
permissions: write-all
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }}
|
||||
role-duration-seconds: 3600
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: Verify AWS identity
|
||||
run: aws sts get-caller-identity
|
||||
|
||||
- name: Stop EC2 instances
|
||||
run: |
|
||||
aws ec2 stop-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_ARM_RUNNER }}
|
||||
aws ec2 stop-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_AMD_RUNNER }}
|
||||
echo "EC2 instances stopped"
|
||||
4
.github/workflows/linting.yml
vendored
@@ -21,10 +21,10 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6.2.0
|
||||
uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0
|
||||
with:
|
||||
node-version: '22'
|
||||
|
||||
|
||||
2
.github/workflows/mirror.yaml
vendored
@@ -45,7 +45,7 @@ jobs:
|
||||
run: |
|
||||
set -euo pipefail
|
||||
skopeo list-tags --retry-times 3 docker://"${SOURCE_IMAGE}" \
|
||||
| jq -r '.Tags[]' | grep -v -e '-arm64' -e '-amd64' | sort -u > src-tags.txt
|
||||
| jq -r '.Tags[]' | sort -u > src-tags.txt
|
||||
echo "Found source tags: $(wc -l < src-tags.txt)"
|
||||
head -n 20 src-tags.txt || true
|
||||
|
||||
|
||||
39
.github/workflows/restart-runners.yml
vendored
@@ -1,39 +0,0 @@
|
||||
name: Restart Runners
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 0 */7 * *'
|
||||
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
ec2-maintenance-prod:
|
||||
runs-on: ubuntu-latest
|
||||
permissions: write-all
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v5
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }}
|
||||
role-duration-seconds: 3600
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: Verify AWS identity
|
||||
run: aws sts get-caller-identity
|
||||
|
||||
- name: Start EC2 instance
|
||||
run: |
|
||||
aws ec2 start-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_ARM_RUNNER }}
|
||||
aws ec2 start-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_AMD_RUNNER }}
|
||||
echo "EC2 instances started"
|
||||
|
||||
- name: Wait
|
||||
run: sleep 600
|
||||
|
||||
- name: Stop EC2 instance
|
||||
run: |
|
||||
aws ec2 stop-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_ARM_RUNNER }}
|
||||
aws ec2 stop-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_AMD_RUNNER }}
|
||||
echo "EC2 instances stopped"
|
||||
125
.github/workflows/saas.yml
vendored
@@ -1,125 +0,0 @@
|
||||
name: CI/CD Pipeline
|
||||
|
||||
# CI/CD workflow for building, publishing, mirroring, signing container images and building release binaries.
|
||||
# Actions are pinned to specific SHAs to reduce supply-chain risk. This workflow triggers on tag push events.
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write # for GHCR push
|
||||
id-token: write # for Cosign Keyless (OIDC) Signing
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "[0-9]+.[0-9]+.[0-9]+-s.[0-9]+"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
pre-run:
|
||||
runs-on: ubuntu-latest
|
||||
permissions: write-all
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v5
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }}
|
||||
role-duration-seconds: 3600
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: Verify AWS identity
|
||||
run: aws sts get-caller-identity
|
||||
|
||||
- name: Start EC2 instances
|
||||
run: |
|
||||
aws ec2 start-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_ARM_RUNNER }}
|
||||
echo "EC2 instances started"
|
||||
|
||||
|
||||
release-arm:
|
||||
name: Build and Release (ARM64)
|
||||
runs-on: [self-hosted, linux, arm64, us-east-1]
|
||||
needs: [pre-run]
|
||||
if: >-
|
||||
${{
|
||||
needs.pre-run.result == 'success'
|
||||
}}
|
||||
# Job-level timeout to avoid runaway or stuck runs
|
||||
timeout-minutes: 120
|
||||
env:
|
||||
# Target images
|
||||
AWS_IMAGE: ${{ secrets.aws_account_id }}.dkr.ecr.us-east-1.amazonaws.com/${{ github.event.repository.name }}
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
|
||||
- name: Monitor storage space
|
||||
run: |
|
||||
THRESHOLD=75
|
||||
USED_SPACE=$(df / | grep / | awk '{ print $5 }' | sed 's/%//g')
|
||||
echo "Used space: $USED_SPACE%"
|
||||
if [ "$USED_SPACE" -ge "$THRESHOLD" ]; then
|
||||
echo "Used space is below the threshold of 75% free. Running Docker system prune."
|
||||
echo y | docker system prune -a
|
||||
else
|
||||
echo "Storage space is above the threshold. No action needed."
|
||||
fi
|
||||
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v5
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::${{ secrets.aws_account_id }}:role/${{ secrets.AWS_ROLE_NAME }}
|
||||
role-duration-seconds: 3600
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: Login to Amazon ECR
|
||||
id: login-ecr
|
||||
uses: aws-actions/amazon-ecr-login@v2
|
||||
|
||||
- name: Extract tag name
|
||||
id: get-tag
|
||||
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Update version in package.json
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
sed -i "s/export const APP_VERSION = \".*\";/export const APP_VERSION = \"$TAG\";/" server/lib/consts.ts
|
||||
cat server/lib/consts.ts
|
||||
shell: bash
|
||||
|
||||
- name: Build and push Docker images (Docker Hub - ARM64)
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
make build-saas tag=$TAG
|
||||
echo "Built & pushed ARM64 images to: ${{ env.AWS_IMAGE }}:${TAG}"
|
||||
shell: bash
|
||||
|
||||
post-run:
|
||||
needs: [pre-run, release-arm]
|
||||
if: >-
|
||||
${{
|
||||
always() &&
|
||||
needs.pre-run.result == 'success' &&
|
||||
(needs.release-arm.result == 'success' || needs.release-arm.result == 'skipped' || needs.release-arm.result == 'failure')
|
||||
}}
|
||||
runs-on: ubuntu-latest
|
||||
permissions: write-all
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v5
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }}
|
||||
role-duration-seconds: 3600
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: Verify AWS identity
|
||||
run: aws sts get-caller-identity
|
||||
|
||||
- name: Stop EC2 instances
|
||||
run: |
|
||||
aws ec2 stop-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_ARM_RUNNER }}
|
||||
echo "EC2 instances stopped"
|
||||
2
.github/workflows/stale-bot.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@997185467fa4f803885201cee163a9f38240193d # v10.1.1
|
||||
- uses: actions/stale@5f858e3efba33a5ca4407a664cc011ad407f2008 # v10.1.0
|
||||
with:
|
||||
days-before-stale: 14
|
||||
days-before-close: 14
|
||||
|
||||
31
.github/workflows/test.yml
vendored
@@ -12,12 +12,11 @@ on:
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
|
||||
- name: Install Node
|
||||
uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6.2.0
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0
|
||||
with:
|
||||
node-version: '22'
|
||||
|
||||
@@ -58,26 +57,8 @@ jobs:
|
||||
echo "App failed to start"
|
||||
exit 1
|
||||
|
||||
build-sqlite:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
|
||||
- name: Copy config file
|
||||
run: cp config/config.example.yml config/config.yml
|
||||
|
||||
- name: Build Docker image sqlite
|
||||
run: make dev-build-sqlite
|
||||
|
||||
build-postgres:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
|
||||
- name: Copy config file
|
||||
run: cp config/config.example.yml config/config.yml
|
||||
run: make build-sqlite
|
||||
|
||||
- name: Build Docker image pg
|
||||
run: make dev-build-pg
|
||||
run: make build-pg
|
||||
|
||||
4
.gitignore
vendored
@@ -49,6 +49,4 @@ postgres/
|
||||
dynamic/
|
||||
*.mmdb
|
||||
scratch/
|
||||
tsconfig.json
|
||||
hydrateSaas.ts
|
||||
CLAUDE.md
|
||||
tsconfig.json
|
||||
@@ -1,12 +0,0 @@
|
||||
.github/
|
||||
bruno/
|
||||
cli/
|
||||
config/
|
||||
messages/
|
||||
next.config.mjs/
|
||||
public/
|
||||
tailwind.config.js/
|
||||
test/
|
||||
**/*.yml
|
||||
**/*.yaml
|
||||
**/*.md
|
||||
3
.vscode/extensions.json
vendored
@@ -1,3 +0,0 @@
|
||||
{
|
||||
"recommendations": ["esbenp.prettier-vscode"]
|
||||
}
|
||||
22
.vscode/settings.json
vendored
@@ -1,22 +0,0 @@
|
||||
{
|
||||
"editor.codeActionsOnSave": {
|
||||
"source.addMissingImports.ts": "always"
|
||||
},
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode",
|
||||
"[jsonc]": {
|
||||
"editor.defaultFormatter": "vscode.json-language-features"
|
||||
},
|
||||
"[javascript]": {
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode"
|
||||
},
|
||||
"[typescript]": {
|
||||
"editor.defaultFormatter": "vscode.typescript-language-features"
|
||||
},
|
||||
"[typescriptreact]": {
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode"
|
||||
},
|
||||
"[json]": {
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode"
|
||||
},
|
||||
"editor.formatOnSave": true
|
||||
}
|
||||
51
Dockerfile
@@ -1,22 +1,10 @@
|
||||
FROM node:24-alpine AS builder
|
||||
|
||||
# OCI Image Labels - Build Args for dynamic values
|
||||
ARG VERSION="dev"
|
||||
ARG REVISION=""
|
||||
ARG CREATED=""
|
||||
ARG LICENSE="AGPL-3.0"
|
||||
FROM node:22-alpine AS builder
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
ARG BUILD=oss
|
||||
ARG DATABASE=sqlite
|
||||
|
||||
# Derive title and description based on BUILD type
|
||||
ARG IMAGE_TITLE="Pangolin"
|
||||
ARG IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere"
|
||||
|
||||
RUN apk add --no-cache curl tzdata python3 make g++
|
||||
|
||||
# COPY package.json package-lock.json ./
|
||||
COPY package*.json ./
|
||||
RUN npm ci
|
||||
@@ -24,9 +12,8 @@ RUN npm ci
|
||||
COPY . .
|
||||
|
||||
RUN echo "export * from \"./$DATABASE\";" > server/db/index.ts
|
||||
RUN echo "export const driver: \"pg\" | \"sqlite\" = \"$DATABASE\";" >> server/db/index.ts
|
||||
|
||||
RUN echo "export const build = \"$BUILD\" as \"saas\" | \"enterprise\" | \"oss\";" > server/build.ts
|
||||
RUN echo "export const build = \"$BUILD\" as any;" > server/build.ts
|
||||
|
||||
# Copy the appropriate TypeScript configuration based on build type
|
||||
RUN if [ "$BUILD" = "oss" ]; then cp tsconfig.oss.json tsconfig.json; \
|
||||
@@ -43,9 +30,9 @@ RUN mkdir -p dist
|
||||
RUN npm run next:build
|
||||
RUN node esbuild.mjs -e server/index.ts -o dist/server.mjs -b $BUILD
|
||||
RUN if [ "$DATABASE" = "pg" ]; then \
|
||||
node esbuild.mjs -e server/setup/migrationsPg.ts -o dist/migrations.mjs; \
|
||||
node esbuild.mjs -e server/setup/migrationsPg.ts -o dist/migrations.mjs; \
|
||||
else \
|
||||
node esbuild.mjs -e server/setup/migrationsSqlite.ts -o dist/migrations.mjs; \
|
||||
node esbuild.mjs -e server/setup/migrationsSqlite.ts -o dist/migrations.mjs; \
|
||||
fi
|
||||
|
||||
# test to make sure the build output is there and error if not
|
||||
@@ -53,45 +40,27 @@ RUN test -f dist/server.mjs
|
||||
|
||||
RUN npm run build:cli
|
||||
|
||||
# Prune dev dependencies and clean up to prepare for copy to runner
|
||||
RUN npm prune --omit=dev && npm cache clean --force
|
||||
|
||||
FROM node:24-alpine AS runner
|
||||
FROM node:22-alpine AS runner
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Only curl and tzdata needed at runtime - no build tools!
|
||||
# Curl used for the health checks
|
||||
RUN apk add --no-cache curl tzdata
|
||||
|
||||
# Copy pre-built node_modules from builder (already pruned to production only)
|
||||
# This includes the compiled native modules like better-sqlite3
|
||||
COPY --from=builder /app/node_modules ./node_modules
|
||||
# COPY package.json package-lock.json ./
|
||||
COPY package*.json ./
|
||||
|
||||
RUN npm ci --omit=dev && npm cache clean --force
|
||||
|
||||
COPY --from=builder /app/.next/standalone ./
|
||||
COPY --from=builder /app/.next/static ./.next/static
|
||||
COPY --from=builder /app/dist ./dist
|
||||
COPY --from=builder /app/init ./dist/init
|
||||
COPY --from=builder /app/package.json ./package.json
|
||||
|
||||
COPY ./cli/wrapper.sh /usr/local/bin/pangctl
|
||||
RUN chmod +x /usr/local/bin/pangctl ./dist/cli.mjs
|
||||
|
||||
COPY server/db/names.json ./dist/names.json
|
||||
COPY server/db/ios_models.json ./dist/ios_models.json
|
||||
COPY server/db/mac_models.json ./dist/mac_models.json
|
||||
COPY public ./public
|
||||
|
||||
# OCI Image Labels
|
||||
# https://github.com/opencontainers/image-spec/blob/main/annotations.md
|
||||
LABEL org.opencontainers.image.source="https://github.com/fosrl/pangolin" \
|
||||
org.opencontainers.image.url="https://github.com/fosrl/pangolin" \
|
||||
org.opencontainers.image.documentation="https://docs.pangolin.net" \
|
||||
org.opencontainers.image.vendor="Fossorial" \
|
||||
org.opencontainers.image.licenses="${LICENSE}" \
|
||||
org.opencontainers.image.title="${IMAGE_TITLE}" \
|
||||
org.opencontainers.image.description="${IMAGE_DESCRIPTION}" \
|
||||
org.opencontainers.image.version="${VERSION}" \
|
||||
org.opencontainers.image.revision="${REVISION}" \
|
||||
org.opencontainers.image.created="${CREATED}"
|
||||
|
||||
CMD ["npm", "run", "start"]
|
||||
|
||||
452
Makefile
@@ -1,32 +1,8 @@
|
||||
.PHONY: build build-pg build-release build-release-arm build-release-amd create-manifests build-arm build-x86 test clean
|
||||
.PHONY: build build-pg build-release build-arm build-x86 test clean
|
||||
|
||||
major_tag := $(shell echo $(tag) | cut -d. -f1)
|
||||
minor_tag := $(shell echo $(tag) | cut -d. -f1,2)
|
||||
|
||||
# OCI label variables
|
||||
CREATED := $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
|
||||
REVISION := $(shell git rev-parse HEAD 2>/dev/null || echo "unknown")
|
||||
|
||||
# Common OCI build args for OSS builds
|
||||
OCI_ARGS_OSS = --build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$(REVISION) \
|
||||
--build-arg CREATED=$(CREATED) \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere"
|
||||
|
||||
# Common OCI build args for Enterprise builds
|
||||
OCI_ARGS_EE = --build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$(REVISION) \
|
||||
--build-arg CREATED=$(CREATED) \
|
||||
--build-arg LICENSE="Fossorial Commercial" \
|
||||
--build-arg IMAGE_TITLE="Pangolin EE" \
|
||||
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere"
|
||||
|
||||
.PHONY: build-release build-sqlite build-postgresql build-ee-sqlite build-ee-postgresql
|
||||
|
||||
build-release: build-sqlite build-postgresql build-ee-sqlite build-ee-postgresql
|
||||
|
||||
build-sqlite:
|
||||
build-release:
|
||||
@if [ -z "$(tag)" ]; then \
|
||||
echo "Error: tag is required. Usage: make build-release tag=<tag>"; \
|
||||
exit 1; \
|
||||
@@ -34,55 +10,33 @@ build-sqlite:
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=sqlite \
|
||||
$(OCI_ARGS_OSS) \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:latest \
|
||||
--tag fosrl/pangolin:$(major_tag) \
|
||||
--tag fosrl/pangolin:$(minor_tag) \
|
||||
--tag fosrl/pangolin:$(tag) \
|
||||
--push .
|
||||
|
||||
build-postgresql:
|
||||
@if [ -z "$(tag)" ]; then \
|
||||
echo "Error: tag is required. Usage: make build-release tag=<tag>"; \
|
||||
exit 1; \
|
||||
fi
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=pg \
|
||||
$(OCI_ARGS_OSS) \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:postgresql-latest \
|
||||
--tag fosrl/pangolin:postgresql-$(major_tag) \
|
||||
--tag fosrl/pangolin:postgresql-$(minor_tag) \
|
||||
--tag fosrl/pangolin:postgresql-$(tag) \
|
||||
--push .
|
||||
|
||||
build-ee-sqlite:
|
||||
@if [ -z "$(tag)" ]; then \
|
||||
echo "Error: tag is required. Usage: make build-release tag=<tag>"; \
|
||||
exit 1; \
|
||||
fi
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=sqlite \
|
||||
$(OCI_ARGS_EE) \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:ee-latest \
|
||||
--tag fosrl/pangolin:ee-$(major_tag) \
|
||||
--tag fosrl/pangolin:ee-$(minor_tag) \
|
||||
--tag fosrl/pangolin:ee-$(tag) \
|
||||
--push .
|
||||
|
||||
build-ee-postgresql:
|
||||
@if [ -z "$(tag)" ]; then \
|
||||
echo "Error: tag is required. Usage: make build-release tag=<tag>"; \
|
||||
exit 1; \
|
||||
fi
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=pg \
|
||||
$(OCI_ARGS_EE) \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:ee-postgresql-latest \
|
||||
--tag fosrl/pangolin:ee-postgresql-$(major_tag) \
|
||||
@@ -90,431 +44,47 @@ build-ee-postgresql:
|
||||
--tag fosrl/pangolin:ee-postgresql-$(tag) \
|
||||
--push .
|
||||
|
||||
build-saas:
|
||||
@if [ -z "$(tag)" ]; then \
|
||||
echo "Error: tag is required. Usage: make build-release tag=<tag>"; \
|
||||
exit 1; \
|
||||
fi
|
||||
docker buildx build \
|
||||
--build-arg BUILD=saas \
|
||||
--build-arg DATABASE=pg \
|
||||
--platform linux/arm64 \
|
||||
--tag $(AWS_IMAGE):$(tag) \
|
||||
--push .
|
||||
|
||||
build-release-arm:
|
||||
@if [ -z "$(tag)" ]; then \
|
||||
echo "Error: tag is required. Usage: make build-release-arm tag=<tag>"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@MAJOR_TAG=$$(echo $(tag) | cut -d. -f1); \
|
||||
MINOR_TAG=$$(echo $(tag) | cut -d. -f1,2); \
|
||||
CREATED=$$(date -u +"%Y-%m-%dT%H:%M:%SZ"); \
|
||||
REVISION=$$(git rev-parse HEAD 2>/dev/null || echo "unknown"); \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=sqlite \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/arm64 \
|
||||
--tag fosrl/pangolin:latest-arm64 \
|
||||
--tag fosrl/pangolin:$$MAJOR_TAG-arm64 \
|
||||
--tag fosrl/pangolin:$$MINOR_TAG-arm64 \
|
||||
--tag fosrl/pangolin:$(tag)-arm64 \
|
||||
--push . && \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=pg \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/arm64 \
|
||||
--tag fosrl/pangolin:postgresql-latest-arm64 \
|
||||
--tag fosrl/pangolin:postgresql-$$MAJOR_TAG-arm64 \
|
||||
--tag fosrl/pangolin:postgresql-$$MINOR_TAG-arm64 \
|
||||
--tag fosrl/pangolin:postgresql-$(tag)-arm64 \
|
||||
--push . && \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=sqlite \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg LICENSE="Fossorial Commercial" \
|
||||
--build-arg IMAGE_TITLE="Pangolin EE" \
|
||||
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/arm64 \
|
||||
--tag fosrl/pangolin:ee-latest-arm64 \
|
||||
--tag fosrl/pangolin:ee-$$MAJOR_TAG-arm64 \
|
||||
--tag fosrl/pangolin:ee-$$MINOR_TAG-arm64 \
|
||||
--tag fosrl/pangolin:ee-$(tag)-arm64 \
|
||||
--push . && \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=pg \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg LICENSE="Fossorial Commercial" \
|
||||
--build-arg IMAGE_TITLE="Pangolin EE" \
|
||||
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/arm64 \
|
||||
--tag fosrl/pangolin:ee-postgresql-latest-arm64 \
|
||||
--tag fosrl/pangolin:ee-postgresql-$$MAJOR_TAG-arm64 \
|
||||
--tag fosrl/pangolin:ee-postgresql-$$MINOR_TAG-arm64 \
|
||||
--tag fosrl/pangolin:ee-postgresql-$(tag)-arm64 \
|
||||
--push .
|
||||
|
||||
build-release-amd:
|
||||
@if [ -z "$(tag)" ]; then \
|
||||
echo "Error: tag is required. Usage: make build-release-amd tag=<tag>"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@MAJOR_TAG=$$(echo $(tag) | cut -d. -f1); \
|
||||
MINOR_TAG=$$(echo $(tag) | cut -d. -f1,2); \
|
||||
CREATED=$$(date -u +"%Y-%m-%dT%H:%M:%SZ"); \
|
||||
REVISION=$$(git rev-parse HEAD 2>/dev/null || echo "unknown"); \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=sqlite \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/amd64 \
|
||||
--tag fosrl/pangolin:latest-amd64 \
|
||||
--tag fosrl/pangolin:$$MAJOR_TAG-amd64 \
|
||||
--tag fosrl/pangolin:$$MINOR_TAG-amd64 \
|
||||
--tag fosrl/pangolin:$(tag)-amd64 \
|
||||
--push . && \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=pg \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/amd64 \
|
||||
--tag fosrl/pangolin:postgresql-latest-amd64 \
|
||||
--tag fosrl/pangolin:postgresql-$$MAJOR_TAG-amd64 \
|
||||
--tag fosrl/pangolin:postgresql-$$MINOR_TAG-amd64 \
|
||||
--tag fosrl/pangolin:postgresql-$(tag)-amd64 \
|
||||
--push . && \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=sqlite \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg LICENSE="Fossorial Commercial" \
|
||||
--build-arg IMAGE_TITLE="Pangolin EE" \
|
||||
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/amd64 \
|
||||
--tag fosrl/pangolin:ee-latest-amd64 \
|
||||
--tag fosrl/pangolin:ee-$$MAJOR_TAG-amd64 \
|
||||
--tag fosrl/pangolin:ee-$$MINOR_TAG-amd64 \
|
||||
--tag fosrl/pangolin:ee-$(tag)-amd64 \
|
||||
--push . && \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=pg \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg LICENSE="Fossorial Commercial" \
|
||||
--build-arg IMAGE_TITLE="Pangolin EE" \
|
||||
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/amd64 \
|
||||
--tag fosrl/pangolin:ee-postgresql-latest-amd64 \
|
||||
--tag fosrl/pangolin:ee-postgresql-$$MAJOR_TAG-amd64 \
|
||||
--tag fosrl/pangolin:ee-postgresql-$$MINOR_TAG-amd64 \
|
||||
--tag fosrl/pangolin:ee-postgresql-$(tag)-amd64 \
|
||||
--push .
|
||||
|
||||
create-manifests:
|
||||
@if [ -z "$(tag)" ]; then \
|
||||
echo "Error: tag is required. Usage: make create-manifests tag=<tag>"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@MAJOR_TAG=$$(echo $(tag) | cut -d. -f1); \
|
||||
MINOR_TAG=$$(echo $(tag) | cut -d. -f1,2); \
|
||||
echo "Creating multi-arch manifests for sqlite (oss)..." && \
|
||||
docker buildx imagetools create \
|
||||
--tag fosrl/pangolin:latest \
|
||||
--tag fosrl/pangolin:$$MAJOR_TAG \
|
||||
--tag fosrl/pangolin:$$MINOR_TAG \
|
||||
--tag fosrl/pangolin:$(tag) \
|
||||
fosrl/pangolin:latest-arm64 \
|
||||
fosrl/pangolin:latest-amd64 && \
|
||||
echo "Creating multi-arch manifests for postgresql (oss)..." && \
|
||||
docker buildx imagetools create \
|
||||
--tag fosrl/pangolin:postgresql-latest \
|
||||
--tag fosrl/pangolin:postgresql-$$MAJOR_TAG \
|
||||
--tag fosrl/pangolin:postgresql-$$MINOR_TAG \
|
||||
--tag fosrl/pangolin:postgresql-$(tag) \
|
||||
fosrl/pangolin:postgresql-latest-arm64 \
|
||||
fosrl/pangolin:postgresql-latest-amd64 && \
|
||||
echo "Creating multi-arch manifests for sqlite (enterprise)..." && \
|
||||
docker buildx imagetools create \
|
||||
--tag fosrl/pangolin:ee-latest \
|
||||
--tag fosrl/pangolin:ee-$$MAJOR_TAG \
|
||||
--tag fosrl/pangolin:ee-$$MINOR_TAG \
|
||||
--tag fosrl/pangolin:ee-$(tag) \
|
||||
fosrl/pangolin:ee-latest-arm64 \
|
||||
fosrl/pangolin:ee-latest-amd64 && \
|
||||
echo "Creating multi-arch manifests for postgresql (enterprise)..." && \
|
||||
docker buildx imagetools create \
|
||||
--tag fosrl/pangolin:ee-postgresql-latest \
|
||||
--tag fosrl/pangolin:ee-postgresql-$$MAJOR_TAG \
|
||||
--tag fosrl/pangolin:ee-postgresql-$$MINOR_TAG \
|
||||
--tag fosrl/pangolin:ee-postgresql-$(tag) \
|
||||
fosrl/pangolin:ee-postgresql-latest-arm64 \
|
||||
fosrl/pangolin:ee-postgresql-latest-amd64 && \
|
||||
echo "All multi-arch manifests created successfully!"
|
||||
|
||||
build-rc:
|
||||
@if [ -z "$(tag)" ]; then \
|
||||
echo "Error: tag is required. Usage: make build-release tag=<tag>"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@CREATED=$$(date -u +"%Y-%m-%dT%H:%M:%SZ"); \
|
||||
REVISION=$$(git rev-parse HEAD 2>/dev/null || echo "unknown"); \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=sqlite \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:$(tag) \
|
||||
--push . && \
|
||||
--push .
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=pg \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:postgresql-$(tag) \
|
||||
--push . && \
|
||||
--push .
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=sqlite \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg LICENSE="Fossorial Commercial" \
|
||||
--build-arg IMAGE_TITLE="Pangolin EE" \
|
||||
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:ee-$(tag) \
|
||||
--push . && \
|
||||
--push .
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=pg \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg LICENSE="Fossorial Commercial" \
|
||||
--build-arg IMAGE_TITLE="Pangolin EE" \
|
||||
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:ee-postgresql-$(tag) \
|
||||
--push .
|
||||
|
||||
build-rc-arm:
|
||||
@if [ -z "$(tag)" ]; then \
|
||||
echo "Error: tag is required. Usage: make build-rc-arm tag=<tag>"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@CREATED=$$(date -u +"%Y-%m-%dT%H:%M:%SZ"); \
|
||||
REVISION=$$(git rev-parse HEAD 2>/dev/null || echo "unknown"); \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=sqlite \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/arm64 \
|
||||
--tag fosrl/pangolin:$(tag)-arm64 \
|
||||
--push . && \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=pg \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/arm64 \
|
||||
--tag fosrl/pangolin:postgresql-$(tag)-arm64 \
|
||||
--push . && \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=sqlite \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg LICENSE="Fossorial Commercial" \
|
||||
--build-arg IMAGE_TITLE="Pangolin EE" \
|
||||
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/arm64 \
|
||||
--tag fosrl/pangolin:ee-$(tag)-arm64 \
|
||||
--push . && \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=pg \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg LICENSE="Fossorial Commercial" \
|
||||
--build-arg IMAGE_TITLE="Pangolin EE" \
|
||||
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/arm64 \
|
||||
--tag fosrl/pangolin:ee-postgresql-$(tag)-arm64 \
|
||||
--push .
|
||||
|
||||
build-rc-amd:
|
||||
@if [ -z "$(tag)" ]; then \
|
||||
echo "Error: tag is required. Usage: make build-rc-amd tag=<tag>"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@CREATED=$$(date -u +"%Y-%m-%dT%H:%M:%SZ"); \
|
||||
REVISION=$$(git rev-parse HEAD 2>/dev/null || echo "unknown"); \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=sqlite \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/amd64 \
|
||||
--tag fosrl/pangolin:$(tag)-amd64 \
|
||||
--push . && \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=pg \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/amd64 \
|
||||
--tag fosrl/pangolin:postgresql-$(tag)-amd64 \
|
||||
--push . && \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=sqlite \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg LICENSE="Fossorial Commercial" \
|
||||
--build-arg IMAGE_TITLE="Pangolin EE" \
|
||||
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/amd64 \
|
||||
--tag fosrl/pangolin:ee-$(tag)-amd64 \
|
||||
--push . && \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=pg \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg LICENSE="Fossorial Commercial" \
|
||||
--build-arg IMAGE_TITLE="Pangolin EE" \
|
||||
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/amd64 \
|
||||
--tag fosrl/pangolin:ee-postgresql-$(tag)-amd64 \
|
||||
--push .
|
||||
|
||||
create-manifests-rc:
|
||||
@if [ -z "$(tag)" ]; then \
|
||||
echo "Error: tag is required. Usage: make create-manifests-rc tag=<tag>"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "Creating multi-arch manifests for RC sqlite (oss)..." && \
|
||||
docker buildx imagetools create \
|
||||
--tag fosrl/pangolin:$(tag) \
|
||||
fosrl/pangolin:$(tag)-arm64 \
|
||||
fosrl/pangolin:$(tag)-amd64 && \
|
||||
echo "Creating multi-arch manifests for RC postgresql (oss)..." && \
|
||||
docker buildx imagetools create \
|
||||
--tag fosrl/pangolin:postgresql-$(tag) \
|
||||
fosrl/pangolin:postgresql-$(tag)-arm64 \
|
||||
fosrl/pangolin:postgresql-$(tag)-amd64 && \
|
||||
echo "Creating multi-arch manifests for RC sqlite (enterprise)..." && \
|
||||
docker buildx imagetools create \
|
||||
--tag fosrl/pangolin:ee-$(tag) \
|
||||
fosrl/pangolin:ee-$(tag)-arm64 \
|
||||
fosrl/pangolin:ee-$(tag)-amd64 && \
|
||||
echo "Creating multi-arch manifests for RC postgresql (enterprise)..." && \
|
||||
docker buildx imagetools create \
|
||||
--tag fosrl/pangolin:ee-postgresql-$(tag) \
|
||||
fosrl/pangolin:ee-postgresql-$(tag)-arm64 \
|
||||
fosrl/pangolin:ee-postgresql-$(tag)-amd64 && \
|
||||
echo "All RC multi-arch manifests created successfully!"
|
||||
|
||||
build-arm:
|
||||
@CREATED=$$(date -u +"%Y-%m-%dT%H:%M:%SZ"); \
|
||||
REVISION=$$(git rev-parse HEAD 2>/dev/null || echo "unknown"); \
|
||||
docker buildx build \
|
||||
--build-arg VERSION=dev \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/arm64 \
|
||||
-t fosrl/pangolin:latest .
|
||||
docker buildx build --platform linux/arm64 -t fosrl/pangolin:latest .
|
||||
|
||||
build-x86:
|
||||
@CREATED=$$(date -u +"%Y-%m-%dT%H:%M:%SZ"); \
|
||||
REVISION=$$(git rev-parse HEAD 2>/dev/null || echo "unknown"); \
|
||||
docker buildx build \
|
||||
--build-arg VERSION=dev \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/amd64 \
|
||||
-t fosrl/pangolin:latest .
|
||||
docker buildx build --platform linux/amd64 -t fosrl/pangolin:latest .
|
||||
|
||||
dev-build-sqlite:
|
||||
@CREATED=$$(date -u +"%Y-%m-%dT%H:%M:%SZ"); \
|
||||
REVISION=$$(git rev-parse HEAD 2>/dev/null || echo "unknown"); \
|
||||
docker build \
|
||||
--build-arg DATABASE=sqlite \
|
||||
--build-arg VERSION=dev \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
-t fosrl/pangolin:latest .
|
||||
build-sqlite:
|
||||
docker build --build-arg DATABASE=sqlite -t fosrl/pangolin:latest .
|
||||
|
||||
dev-build-pg:
|
||||
@CREATED=$$(date -u +"%Y-%m-%dT%H:%M:%SZ"); \
|
||||
REVISION=$$(git rev-parse HEAD 2>/dev/null || echo "unknown"); \
|
||||
docker build \
|
||||
--build-arg DATABASE=pg \
|
||||
--build-arg VERSION=dev \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
-t fosrl/pangolin:postgresql-latest .
|
||||
build-pg:
|
||||
docker build --build-arg DATABASE=pg -t fosrl/pangolin:postgresql-latest .
|
||||
|
||||
test:
|
||||
docker run -it -p 3000:3000 -p 3001:3001 -p 3002:3002 -v ./config:/app/config fosrl/pangolin:latest
|
||||
|
||||
30
README.md
@@ -31,23 +31,17 @@
|
||||
[](https://pangolin.net/slack)
|
||||
[](https://hub.docker.com/r/fosrl/pangolin)
|
||||

|
||||
[](https://www.youtube.com/@pangolin-net)
|
||||
[](https://www.youtube.com/@fossorial-app)
|
||||
|
||||
</div>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://docs.pangolin.net/careers/join-us">
|
||||
<img src="https://img.shields.io/badge/🚀_We're_Hiring!-Join_Our_Team-brightgreen?style=for-the-badge" alt="We're Hiring!" />
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<strong>
|
||||
Start testing Pangolin at <a href="https://app.pangolin.net/auth/signup">app.pangolin.net</a>
|
||||
</strong>
|
||||
</p>
|
||||
|
||||
Pangolin is an open-source, identity-based remote access platform built on WireGuard that enables secure, seamless connectivity to private and public resources. Pangolin combines reverse proxy and VPN capabilities into one platform, providing browser-based access to web applications and client-based access to any private resources, all with zero-trust security and granular access control.
|
||||
Pangolin is a self-hosted tunneled reverse proxy server with identity and context aware access control, designed to easily expose and protect applications running anywhere. Pangolin acts as a central hub and connects isolated networks — even those behind restrictive firewalls — through encrypted tunnels, enabling easy access to remote services without opening ports or requiring a VPN.
|
||||
|
||||
## Installation
|
||||
|
||||
@@ -66,22 +60,14 @@ Pangolin is an open-source, identity-based remote access platform built on WireG
|
||||
|
||||
## Key Features
|
||||
|
||||
Pangolin packages everything you need for seamless application access and exposure into one cohesive platform.
|
||||
|
||||
| <img width=500 /> | <img width=500 /> |
|
||||
|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------|
|
||||
| **Connect remote networks with sites**<br /><br />Pangolin's lightweight site connectors create secure tunnels from remote networks without requiring public IP addresses or open ports. Sites make any network anywhere available for authorized access. | <img src="public/screenshots/sites.png" width=500 /><tr></tr> |
|
||||
| **Browser-based reverse proxy access**<br /><br />Expose web applications through identity and context-aware tunneled reverse proxies. Pangolin handles routing, load balancing, health checking, and automatic SSL certificates without exposing your network directly to the internet. Users access applications through any web browser with authentication and granular access control. | <img src="public/clip.gif" width=500 /><tr></tr> |
|
||||
| **Client-based private resource access**<br /><br />Access private resources like SSH servers, databases, RDP, and entire network ranges through Pangolin clients. Intelligent NAT traversal enables connections even through restrictive firewalls, while DNS aliases provide friendly names and fast connections to resources across all your sites. | <img src="public/screenshots/private-resources.png" width=500 /><tr></tr> |
|
||||
| **Zero-trust granular access**<br /><br />Grant users access to specific resources, not entire networks. Unlike traditional VPNs that expose full network access, Pangolin's zero-trust model ensures users can only reach the applications and services you explicitly define, reducing security risk and attack surface. | <img src="public/screenshots/user-devices.png" width=500 /><tr></tr> |
|
||||
|
||||
## Download Clients
|
||||
|
||||
Download the Pangolin client for your platform:
|
||||
|
||||
- [Mac](https://pangolin.net/downloads/mac)
|
||||
- [Windows](https://pangolin.net/downloads/windows)
|
||||
- [Linux](https://pangolin.net/downloads/linux)
|
||||
- [iOS](https://pangolin.net/downloads/ios)
|
||||
- [Android](https://pangolin.net/downloads/android)
|
||||
| **Manage applications in one place**<br /><br /> Pangolin provides a unified dashboard where you can monitor, configure, and secure all of your services regardless of where they are hosted. | <img src="public/screenshots/hero.png" width=500 /><tr></tr> |
|
||||
| **Reverse proxy across networks anywhere**<br /><br />Route traffic via tunnels to any private network. Pangolin works like a reverse proxy that spans multiple networks and handles routing, load balancing, health checking, and more to the right services on the other end. | <img src="public/screenshots/sites.png" width=500 /><tr></tr> |
|
||||
| **Enforce identity and context aware rules**<br /><br />Protect your applications with identity and context aware rules such as SSO, OIDC, PIN, password, temporary share links, geolocation, IP, and more. | <img src="public/auth-diagram1.png" width=500 /><tr></tr> |
|
||||
| **Quickly connect Pangolin sites**<br /><br />Pangolin's lightweight [Newt](https://github.com/fosrl/newt) client runs in userspace and can run anywhere. Use it as a site connector to route traffic to backends across all of your environments. | <img src="public/clip.gif" width=500 /><tr></tr> |
|
||||
|
||||
## Get Started
|
||||
|
||||
|
||||
72
blueprint.py
Normal file
@@ -0,0 +1,72 @@
|
||||
import requests
|
||||
import yaml
|
||||
import json
|
||||
import base64
|
||||
|
||||
# The file path for the YAML file to be read
|
||||
# You can change this to the path of your YAML file
|
||||
YAML_FILE_PATH = 'blueprint.yaml'
|
||||
|
||||
# The API endpoint and headers from the curl request
|
||||
API_URL = 'http://api.pangolin.net/v1/org/test/blueprint'
|
||||
HEADERS = {
|
||||
'accept': '*/*',
|
||||
'Authorization': 'Bearer <your_token_here>',
|
||||
'Content-Type': 'application/json'
|
||||
}
|
||||
|
||||
def convert_and_send(file_path, url, headers):
|
||||
"""
|
||||
Reads a YAML file, converts its content to a JSON payload,
|
||||
and sends it via a PUT request to a specified URL.
|
||||
"""
|
||||
try:
|
||||
# Read the YAML file content
|
||||
with open(file_path, 'r') as file:
|
||||
yaml_content = file.read()
|
||||
|
||||
# Parse the YAML string to a Python dictionary
|
||||
# This will be used to ensure the YAML is valid before sending
|
||||
parsed_yaml = yaml.safe_load(yaml_content)
|
||||
|
||||
# convert the parsed YAML to a JSON string
|
||||
json_payload = json.dumps(parsed_yaml)
|
||||
print("Converted JSON payload:")
|
||||
print(json_payload)
|
||||
|
||||
# Encode the JSON string to Base64
|
||||
encoded_json = base64.b64encode(json_payload.encode('utf-8')).decode('utf-8')
|
||||
|
||||
# Create the final payload with the base64 encoded data
|
||||
final_payload = {
|
||||
"blueprint": encoded_json
|
||||
}
|
||||
|
||||
print("Sending the following Base64 encoded JSON payload:")
|
||||
print(final_payload)
|
||||
print("-" * 20)
|
||||
|
||||
# Make the PUT request with the base64 encoded payload
|
||||
response = requests.put(url, headers=headers, json=final_payload)
|
||||
|
||||
# Print the API response for debugging
|
||||
print(f"API Response Status Code: {response.status_code}")
|
||||
print("API Response Content:")
|
||||
print(response.text)
|
||||
|
||||
# Raise an exception for bad status codes (4xx or 5xx)
|
||||
response.raise_for_status()
|
||||
|
||||
except FileNotFoundError:
|
||||
print(f"Error: The file '{file_path}' was not found.")
|
||||
except yaml.YAMLError as e:
|
||||
print(f"Error parsing YAML file: {e}")
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(f"An error occurred during the API request: {e}")
|
||||
except Exception as e:
|
||||
print(f"An unexpected error occurred: {e}")
|
||||
|
||||
# Run the function
|
||||
if __name__ == "__main__":
|
||||
convert_and_send(YAML_FILE_PATH, API_URL, HEADERS)
|
||||
|
||||
69
blueprint.yaml
Normal file
@@ -0,0 +1,69 @@
|
||||
client-resources:
|
||||
client-resource-nice-id-uno:
|
||||
name: this is my resource
|
||||
protocol: tcp
|
||||
proxy-port: 3001
|
||||
hostname: localhost
|
||||
internal-port: 3000
|
||||
site: lively-yosemite-toad
|
||||
client-resource-nice-id-duce:
|
||||
name: this is my resource
|
||||
protocol: udp
|
||||
proxy-port: 3000
|
||||
hostname: localhost
|
||||
internal-port: 3000
|
||||
site: lively-yosemite-toad
|
||||
|
||||
proxy-resources:
|
||||
resource-nice-id-uno:
|
||||
name: this is my resource
|
||||
protocol: http
|
||||
full-domain: duce.test.example.com
|
||||
host-header: example.com
|
||||
tls-server-name: example.com
|
||||
# auth:
|
||||
# pincode: 123456
|
||||
# password: sadfasdfadsf
|
||||
# sso-enabled: true
|
||||
# sso-roles:
|
||||
# - Member
|
||||
# sso-users:
|
||||
# - owen@pangolin.net
|
||||
# whitelist-users:
|
||||
# - owen@pangolin.net
|
||||
headers:
|
||||
- name: X-Example-Header
|
||||
value: example-value
|
||||
- name: X-Another-Header
|
||||
value: another-value
|
||||
rules:
|
||||
- action: allow
|
||||
match: ip
|
||||
value: 1.1.1.1
|
||||
- action: deny
|
||||
match: cidr
|
||||
value: 2.2.2.2/32
|
||||
- action: pass
|
||||
match: path
|
||||
value: /admin
|
||||
targets:
|
||||
- site: lively-yosemite-toad
|
||||
path: /path
|
||||
pathMatchType: prefix
|
||||
hostname: localhost
|
||||
method: http
|
||||
port: 8000
|
||||
- site: slim-alpine-chipmunk
|
||||
hostname: localhost
|
||||
path: /yoman
|
||||
pathMatchType: exact
|
||||
method: http
|
||||
port: 8001
|
||||
resource-nice-id-duce:
|
||||
name: this is other resource
|
||||
protocol: tcp
|
||||
proxy-port: 3000
|
||||
targets:
|
||||
- site: lively-yosemite-toad
|
||||
hostname: localhost
|
||||
port: 3000
|
||||
@@ -5,14 +5,14 @@ meta {
|
||||
}
|
||||
|
||||
post {
|
||||
url: http://localhost:3000/api/v1/auth/login
|
||||
url: http://localhost:4000/api/v1/auth/login
|
||||
body: json
|
||||
auth: none
|
||||
}
|
||||
|
||||
body:json {
|
||||
{
|
||||
"email": "admin@fosrl.io",
|
||||
"email": "owen@pangolin.net",
|
||||
"password": "Password123!"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
meta {
|
||||
name: createOlm
|
||||
type: http
|
||||
seq: 1
|
||||
}
|
||||
|
||||
put {
|
||||
url: http://localhost:3000/api/v1/olm
|
||||
body: none
|
||||
auth: inherit
|
||||
}
|
||||
|
||||
settings {
|
||||
encodeUrl: true
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
meta {
|
||||
name: Olm
|
||||
seq: 15
|
||||
}
|
||||
|
||||
auth {
|
||||
mode: inherit
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"version": "1",
|
||||
"name": "Pangolin",
|
||||
"name": "Pangolin Saas",
|
||||
"type": "collection",
|
||||
"ignore": [
|
||||
"node_modules",
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
import { CommandModule } from "yargs";
|
||||
import { db, exitNodes } from "@server/db";
|
||||
import { eq } from "drizzle-orm";
|
||||
|
||||
type ClearExitNodesArgs = { };
|
||||
|
||||
export const clearExitNodes: CommandModule<
|
||||
{},
|
||||
ClearExitNodesArgs
|
||||
> = {
|
||||
command: "clear-exit-nodes",
|
||||
describe:
|
||||
"Clear all exit nodes from the database",
|
||||
// no args
|
||||
builder: (yargs) => {
|
||||
return yargs;
|
||||
},
|
||||
handler: async (argv: {}) => {
|
||||
try {
|
||||
|
||||
console.log(`Clearing all exit nodes from the database`);
|
||||
|
||||
// Delete all exit nodes
|
||||
const deletedCount = await db
|
||||
.delete(exitNodes)
|
||||
.where(eq(exitNodes.exitNodeId, exitNodes.exitNodeId)) .returning();; // delete all
|
||||
|
||||
console.log(`Deleted ${deletedCount.length} exit node(s) from the database`);
|
||||
|
||||
process.exit(0);
|
||||
} catch (error) {
|
||||
console.error("Error:", error);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -1,36 +0,0 @@
|
||||
import { CommandModule } from "yargs";
|
||||
import { db, licenseKey } from "@server/db";
|
||||
import { eq } from "drizzle-orm";
|
||||
|
||||
type ClearLicenseKeysArgs = { };
|
||||
|
||||
export const clearLicenseKeys: CommandModule<
|
||||
{},
|
||||
ClearLicenseKeysArgs
|
||||
> = {
|
||||
command: "clear-license-keys",
|
||||
describe:
|
||||
"Clear all license keys from the database",
|
||||
// no args
|
||||
builder: (yargs) => {
|
||||
return yargs;
|
||||
},
|
||||
handler: async (argv: {}) => {
|
||||
try {
|
||||
|
||||
console.log(`Clearing all license keys from the database`);
|
||||
|
||||
// Delete all license keys
|
||||
const deletedCount = await db
|
||||
.delete(licenseKey)
|
||||
.where(eq(licenseKey.licenseKeyId, licenseKey.licenseKeyId)) .returning();; // delete all
|
||||
|
||||
console.log(`Deleted ${deletedCount.length} license key(s) from the database`);
|
||||
|
||||
process.exit(0);
|
||||
} catch (error) {
|
||||
console.error("Error:", error);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -1,123 +0,0 @@
|
||||
import { CommandModule } from "yargs";
|
||||
import { db, clients, olms, currentFingerprint, userClients, approvals } from "@server/db";
|
||||
import { eq, and, inArray } from "drizzle-orm";
|
||||
|
||||
type DeleteClientArgs = {
|
||||
orgId: string;
|
||||
niceId: string;
|
||||
};
|
||||
|
||||
export const deleteClient: CommandModule<{}, DeleteClientArgs> = {
|
||||
command: "delete-client",
|
||||
describe:
|
||||
"Delete a client and all associated data (OLMs, current fingerprint, userClients, approvals). Snapshots are preserved.",
|
||||
builder: (yargs) => {
|
||||
return yargs
|
||||
.option("orgId", {
|
||||
type: "string",
|
||||
demandOption: true,
|
||||
describe: "The organization ID"
|
||||
})
|
||||
.option("niceId", {
|
||||
type: "string",
|
||||
demandOption: true,
|
||||
describe: "The client niceId (identifier)"
|
||||
});
|
||||
},
|
||||
handler: async (argv: { orgId: string; niceId: string }) => {
|
||||
try {
|
||||
const { orgId, niceId } = argv;
|
||||
|
||||
console.log(
|
||||
`Deleting client with orgId: ${orgId}, niceId: ${niceId}...`
|
||||
);
|
||||
|
||||
// Find the client
|
||||
const [client] = await db
|
||||
.select()
|
||||
.from(clients)
|
||||
.where(and(eq(clients.orgId, orgId), eq(clients.niceId, niceId)))
|
||||
.limit(1);
|
||||
|
||||
if (!client) {
|
||||
console.error(
|
||||
`Error: Client with orgId "${orgId}" and niceId "${niceId}" not found.`
|
||||
);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const clientId = client.clientId;
|
||||
console.log(`Found client with clientId: ${clientId}`);
|
||||
|
||||
// Find all OLMs associated with this client
|
||||
const associatedOlms = await db
|
||||
.select()
|
||||
.from(olms)
|
||||
.where(eq(olms.clientId, clientId));
|
||||
|
||||
console.log(`Found ${associatedOlms.length} OLM(s) associated with this client`);
|
||||
|
||||
// Delete in a transaction to ensure atomicity
|
||||
await db.transaction(async (trx) => {
|
||||
// Delete currentFingerprint entries for the associated OLMs
|
||||
// Note: We delete these explicitly before deleting OLMs to ensure
|
||||
// we have control, even though cascade would handle it
|
||||
let fingerprintCount = 0;
|
||||
if (associatedOlms.length > 0) {
|
||||
const olmIds = associatedOlms.map((olm) => olm.olmId);
|
||||
const deletedFingerprints = await trx
|
||||
.delete(currentFingerprint)
|
||||
.where(inArray(currentFingerprint.olmId, olmIds))
|
||||
.returning();
|
||||
fingerprintCount = deletedFingerprints.length;
|
||||
}
|
||||
console.log(`Deleted ${fingerprintCount} current fingerprint(s)`);
|
||||
|
||||
// Delete OLMs
|
||||
// Note: OLMs have onDelete: "set null" for clientId, so we need to delete them explicitly
|
||||
const deletedOlms = await trx
|
||||
.delete(olms)
|
||||
.where(eq(olms.clientId, clientId))
|
||||
.returning();
|
||||
console.log(`Deleted ${deletedOlms.length} OLM(s)`);
|
||||
|
||||
// Delete approvals
|
||||
// Note: Approvals have onDelete: "cascade" but we delete explicitly for clarity
|
||||
const deletedApprovals = await trx
|
||||
.delete(approvals)
|
||||
.where(eq(approvals.clientId, clientId))
|
||||
.returning();
|
||||
console.log(`Deleted ${deletedApprovals.length} approval(s)`);
|
||||
|
||||
// Delete userClients
|
||||
// Note: userClients have onDelete: "cascade" but we delete explicitly for clarity
|
||||
const deletedUserClients = await trx
|
||||
.delete(userClients)
|
||||
.where(eq(userClients.clientId, clientId))
|
||||
.returning();
|
||||
console.log(`Deleted ${deletedUserClients.length} userClient association(s)`);
|
||||
|
||||
// Finally, delete the client itself
|
||||
const deletedClients = await trx
|
||||
.delete(clients)
|
||||
.where(eq(clients.clientId, clientId))
|
||||
.returning();
|
||||
console.log(`Deleted client: ${deletedClients[0]?.name || niceId}`);
|
||||
});
|
||||
|
||||
console.log("\nClient deletion completed successfully!");
|
||||
console.log("\nSummary:");
|
||||
console.log(` - Client: ${niceId} (clientId: ${clientId})`);
|
||||
console.log(` - Olm(s): ${associatedOlms.length}`);
|
||||
console.log(` - Current fingerprints: deleted`);
|
||||
console.log(` - Approvals: deleted`);
|
||||
console.log(` - UserClients: deleted`);
|
||||
console.log(` - Snapshots: preserved (not deleted)`);
|
||||
|
||||
process.exit(0);
|
||||
} catch (error) {
|
||||
console.error("Error deleting client:", error);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -1,284 +0,0 @@
|
||||
import { CommandModule } from "yargs";
|
||||
import { db, idpOidcConfig, licenseKey } from "@server/db";
|
||||
import { encrypt, decrypt } from "@server/lib/crypto";
|
||||
import { configFilePath1, configFilePath2 } from "@server/lib/consts";
|
||||
import { eq } from "drizzle-orm";
|
||||
import fs from "fs";
|
||||
import yaml from "js-yaml";
|
||||
|
||||
type RotateServerSecretArgs = {
|
||||
"old-secret": string;
|
||||
"new-secret": string;
|
||||
force?: boolean;
|
||||
};
|
||||
|
||||
export const rotateServerSecret: CommandModule<
|
||||
{},
|
||||
RotateServerSecretArgs
|
||||
> = {
|
||||
command: "rotate-server-secret",
|
||||
describe:
|
||||
"Rotate the server secret by decrypting all encrypted values with the old secret and re-encrypting with a new secret",
|
||||
builder: (yargs) => {
|
||||
return yargs
|
||||
.option("old-secret", {
|
||||
type: "string",
|
||||
demandOption: true,
|
||||
describe: "The current server secret (for verification)"
|
||||
})
|
||||
.option("new-secret", {
|
||||
type: "string",
|
||||
demandOption: true,
|
||||
describe: "The new server secret to use"
|
||||
})
|
||||
.option("force", {
|
||||
type: "boolean",
|
||||
default: false,
|
||||
describe:
|
||||
"Force rotation even if the old secret doesn't match the config file. " +
|
||||
"Use this if you know the old secret is correct but the config file is out of sync. " +
|
||||
"WARNING: This will attempt to decrypt all values with the provided old secret. " +
|
||||
"If the old secret is incorrect, the rotation will fail or corrupt data."
|
||||
});
|
||||
},
|
||||
handler: async (argv: {
|
||||
"old-secret": string;
|
||||
"new-secret": string;
|
||||
force?: boolean;
|
||||
}) => {
|
||||
try {
|
||||
// Determine which config file exists
|
||||
const configPath = fs.existsSync(configFilePath1)
|
||||
? configFilePath1
|
||||
: fs.existsSync(configFilePath2)
|
||||
? configFilePath2
|
||||
: null;
|
||||
|
||||
if (!configPath) {
|
||||
console.error(
|
||||
"Error: Config file not found. Expected config.yml or config.yaml in the config directory."
|
||||
);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Read current config
|
||||
const configContent = fs.readFileSync(configPath, "utf8");
|
||||
const config = yaml.load(configContent) as any;
|
||||
|
||||
if (!config?.server?.secret) {
|
||||
console.error(
|
||||
"Error: No server secret found in config file. Cannot rotate."
|
||||
);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const configSecret = config.server.secret;
|
||||
const oldSecret = argv["old-secret"];
|
||||
const newSecret = argv["new-secret"];
|
||||
const force = argv.force || false;
|
||||
|
||||
// Verify that the provided old secret matches the one in config
|
||||
if (configSecret !== oldSecret) {
|
||||
if (!force) {
|
||||
console.error(
|
||||
"Error: The provided old secret does not match the secret in the config file."
|
||||
);
|
||||
console.error(
|
||||
"\nIf you are certain the old secret is correct and the config file is out of sync,"
|
||||
);
|
||||
console.error(
|
||||
"you can use the --force flag to bypass this check."
|
||||
);
|
||||
console.error(
|
||||
"\nWARNING: Using --force with an incorrect old secret will cause the rotation to fail"
|
||||
);
|
||||
console.error(
|
||||
"or corrupt encrypted data. Only use --force if you are absolutely certain."
|
||||
);
|
||||
process.exit(1);
|
||||
} else {
|
||||
console.warn(
|
||||
"\nWARNING: Using --force flag. Bypassing old secret verification."
|
||||
);
|
||||
console.warn(
|
||||
"The provided old secret does not match the config file, but proceeding anyway."
|
||||
);
|
||||
console.warn(
|
||||
"If the old secret is incorrect, this operation will fail or corrupt data.\n"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Validate new secret
|
||||
if (newSecret.length < 8) {
|
||||
console.error(
|
||||
"Error: New secret must be at least 8 characters long"
|
||||
);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
if (oldSecret === newSecret) {
|
||||
console.error("Error: New secret must be different from old secret");
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
console.log("Starting server secret rotation...");
|
||||
console.log("This will decrypt and re-encrypt all encrypted values in the database.");
|
||||
|
||||
// Read all data first
|
||||
console.log("\nReading encrypted data from database...");
|
||||
const idpConfigs = await db.select().from(idpOidcConfig);
|
||||
const licenseKeys = await db.select().from(licenseKey);
|
||||
|
||||
console.log(`Found ${idpConfigs.length} OIDC IdP configuration(s)`);
|
||||
console.log(`Found ${licenseKeys.length} license key(s)`);
|
||||
|
||||
// Prepare all decrypted and re-encrypted values
|
||||
console.log("\nDecrypting and re-encrypting values...");
|
||||
|
||||
type IdpUpdate = {
|
||||
idpOauthConfigId: number;
|
||||
encryptedClientId: string;
|
||||
encryptedClientSecret: string;
|
||||
};
|
||||
|
||||
type LicenseKeyUpdate = {
|
||||
oldLicenseKeyId: string;
|
||||
newLicenseKeyId: string;
|
||||
encryptedToken: string;
|
||||
encryptedInstanceId: string;
|
||||
};
|
||||
|
||||
const idpUpdates: IdpUpdate[] = [];
|
||||
const licenseKeyUpdates: LicenseKeyUpdate[] = [];
|
||||
|
||||
// Process idpOidcConfig entries
|
||||
for (const idpConfig of idpConfigs) {
|
||||
try {
|
||||
// Decrypt with old secret
|
||||
const decryptedClientId = decrypt(idpConfig.clientId, oldSecret);
|
||||
const decryptedClientSecret = decrypt(
|
||||
idpConfig.clientSecret,
|
||||
oldSecret
|
||||
);
|
||||
|
||||
// Re-encrypt with new secret
|
||||
const encryptedClientId = encrypt(decryptedClientId, newSecret);
|
||||
const encryptedClientSecret = encrypt(
|
||||
decryptedClientSecret,
|
||||
newSecret
|
||||
);
|
||||
|
||||
idpUpdates.push({
|
||||
idpOauthConfigId: idpConfig.idpOauthConfigId,
|
||||
encryptedClientId,
|
||||
encryptedClientSecret
|
||||
});
|
||||
} catch (error) {
|
||||
console.error(
|
||||
`Error processing IdP config ${idpConfig.idpOauthConfigId}:`,
|
||||
error
|
||||
);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// Process licenseKey entries
|
||||
for (const key of licenseKeys) {
|
||||
try {
|
||||
// Decrypt with old secret
|
||||
const decryptedLicenseKeyId = decrypt(key.licenseKeyId, oldSecret);
|
||||
const decryptedToken = decrypt(key.token, oldSecret);
|
||||
const decryptedInstanceId = decrypt(key.instanceId, oldSecret);
|
||||
|
||||
// Re-encrypt with new secret
|
||||
const encryptedLicenseKeyId = encrypt(
|
||||
decryptedLicenseKeyId,
|
||||
newSecret
|
||||
);
|
||||
const encryptedToken = encrypt(decryptedToken, newSecret);
|
||||
const encryptedInstanceId = encrypt(
|
||||
decryptedInstanceId,
|
||||
newSecret
|
||||
);
|
||||
|
||||
licenseKeyUpdates.push({
|
||||
oldLicenseKeyId: key.licenseKeyId,
|
||||
newLicenseKeyId: encryptedLicenseKeyId,
|
||||
encryptedToken,
|
||||
encryptedInstanceId
|
||||
});
|
||||
} catch (error) {
|
||||
console.error(
|
||||
`Error processing license key ${key.licenseKeyId}:`,
|
||||
error
|
||||
);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// Perform all database updates in a single transaction
|
||||
console.log("\nUpdating database in transaction...");
|
||||
await db.transaction(async (trx) => {
|
||||
// Update idpOidcConfig entries
|
||||
for (const update of idpUpdates) {
|
||||
await trx
|
||||
.update(idpOidcConfig)
|
||||
.set({
|
||||
clientId: update.encryptedClientId,
|
||||
clientSecret: update.encryptedClientSecret
|
||||
})
|
||||
.where(
|
||||
eq(
|
||||
idpOidcConfig.idpOauthConfigId,
|
||||
update.idpOauthConfigId
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
// Update licenseKey entries (delete old, insert new)
|
||||
for (const update of licenseKeyUpdates) {
|
||||
// Delete old entry
|
||||
await trx
|
||||
.delete(licenseKey)
|
||||
.where(eq(licenseKey.licenseKeyId, update.oldLicenseKeyId));
|
||||
|
||||
// Insert new entry with re-encrypted values
|
||||
await trx.insert(licenseKey).values({
|
||||
licenseKeyId: update.newLicenseKeyId,
|
||||
token: update.encryptedToken,
|
||||
instanceId: update.encryptedInstanceId
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
console.log(`Rotated ${idpUpdates.length} OIDC IdP configuration(s)`);
|
||||
console.log(`Rotated ${licenseKeyUpdates.length} license key(s)`);
|
||||
|
||||
// Update config file with new secret
|
||||
console.log("\nUpdating config file...");
|
||||
config.server.secret = newSecret;
|
||||
const newConfigContent = yaml.dump(config, {
|
||||
indent: 2,
|
||||
lineWidth: -1
|
||||
});
|
||||
fs.writeFileSync(configPath, newConfigContent, "utf8");
|
||||
|
||||
console.log(`Updated config file: ${configPath}`);
|
||||
|
||||
console.log("\nServer secret rotation completed successfully!");
|
||||
console.log(`\nSummary:`);
|
||||
console.log(` - OIDC IdP configurations: ${idpUpdates.length}`);
|
||||
console.log(` - License keys: ${licenseKeyUpdates.length}`);
|
||||
console.log(
|
||||
`\n IMPORTANT: Restart the server for the new secret to take effect.`
|
||||
);
|
||||
|
||||
process.exit(0);
|
||||
} catch (error) {
|
||||
console.error("Error rotating server secret:", error);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@@ -4,18 +4,10 @@ import yargs from "yargs";
|
||||
import { hideBin } from "yargs/helpers";
|
||||
import { setAdminCredentials } from "@cli/commands/setAdminCredentials";
|
||||
import { resetUserSecurityKeys } from "@cli/commands/resetUserSecurityKeys";
|
||||
import { clearExitNodes } from "./commands/clearExitNodes";
|
||||
import { rotateServerSecret } from "./commands/rotateServerSecret";
|
||||
import { clearLicenseKeys } from "./commands/clearLicenseKeys";
|
||||
import { deleteClient } from "./commands/deleteClient";
|
||||
|
||||
yargs(hideBin(process.argv))
|
||||
.scriptName("pangctl")
|
||||
.command(setAdminCredentials)
|
||||
.command(resetUserSecurityKeys)
|
||||
.command(clearExitNodes)
|
||||
.command(rotateServerSecret)
|
||||
.command(clearLicenseKeys)
|
||||
.command(deleteClient)
|
||||
.demandCommand()
|
||||
.help().argv;
|
||||
|
||||
@@ -17,4 +17,4 @@
|
||||
"lib": "@/lib",
|
||||
"hooks": "@/hooks"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,30 +1,28 @@
|
||||
# To see all available options, please visit the docs:
|
||||
# https://docs.pangolin.net/
|
||||
|
||||
gerbil:
|
||||
start_port: 51820
|
||||
base_endpoint: "{{.DashboardDomain}}"
|
||||
# https://docs.pangolin.net/self-host/advanced/config-file
|
||||
|
||||
app:
|
||||
dashboard_url: "https://{{.DashboardDomain}}"
|
||||
log_level: "info"
|
||||
telemetry:
|
||||
anonymous_usage: true
|
||||
dashboard_url: http://localhost:3002
|
||||
log_level: debug
|
||||
|
||||
domains:
|
||||
domain1:
|
||||
base_domain: "{{.BaseDomain}}"
|
||||
domain1:
|
||||
base_domain: example.com
|
||||
|
||||
server:
|
||||
secret: "{{.Secret}}"
|
||||
cors:
|
||||
origins: ["https://{{.DashboardDomain}}"]
|
||||
methods: ["GET", "POST", "PUT", "DELETE", "PATCH"]
|
||||
allowed_headers: ["X-CSRF-Token", "Content-Type"]
|
||||
credentials: false
|
||||
secret: my_secret_key
|
||||
|
||||
gerbil:
|
||||
base_endpoint: example.com
|
||||
|
||||
orgs:
|
||||
block_size: 24
|
||||
subnet_group: 100.90.137.0/20
|
||||
|
||||
flags:
|
||||
require_email_verification: false
|
||||
disable_signup_without_invite: true
|
||||
disable_user_create_org: false
|
||||
allow_raw_resources: true
|
||||
require_email_verification: false
|
||||
disable_signup_without_invite: true
|
||||
disable_user_create_org: true
|
||||
allow_raw_resources: true
|
||||
enable_integration_api: true
|
||||
enable_clients: true
|
||||
|
||||
@@ -1,9 +1,5 @@
|
||||
http:
|
||||
middlewares:
|
||||
badger:
|
||||
plugin:
|
||||
badger:
|
||||
disableForwardAuth: true
|
||||
redirect-to-https:
|
||||
redirectScheme:
|
||||
scheme: https
|
||||
@@ -17,16 +13,14 @@ http:
|
||||
- web
|
||||
middlewares:
|
||||
- redirect-to-https
|
||||
- badger
|
||||
|
||||
# Next.js router (handles everything except API and WebSocket paths)
|
||||
next-router:
|
||||
rule: "Host(`{{.DashboardDomain}}`) && !PathPrefix(`/api/v1`)"
|
||||
rule: "Host(`{{.DashboardDomain}}`)"
|
||||
service: next-service
|
||||
priority: 10
|
||||
entryPoints:
|
||||
- websecure
|
||||
middlewares:
|
||||
- badger
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
|
||||
@@ -34,10 +28,9 @@ http:
|
||||
api-router:
|
||||
rule: "Host(`{{.DashboardDomain}}`) && PathPrefix(`/api/v1`)"
|
||||
service: api-service
|
||||
priority: 100
|
||||
entryPoints:
|
||||
- websecure
|
||||
middlewares:
|
||||
- badger
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
|
||||
@@ -51,12 +44,3 @@ http:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://pangolin:3000" # API/WebSocket server
|
||||
|
||||
tcp:
|
||||
serversTransports:
|
||||
pp-transport-v1:
|
||||
proxyProtocol:
|
||||
version: 1
|
||||
pp-transport-v2:
|
||||
proxyProtocol:
|
||||
version: 2
|
||||
|
||||
@@ -3,52 +3,32 @@ api:
|
||||
dashboard: true
|
||||
|
||||
providers:
|
||||
http:
|
||||
endpoint: "http://pangolin:3001/api/v1/traefik-config"
|
||||
pollInterval: "5s"
|
||||
file:
|
||||
filename: "/etc/traefik/dynamic_config.yml"
|
||||
directory: "/var/dynamic"
|
||||
watch: true
|
||||
|
||||
experimental:
|
||||
plugins:
|
||||
badger:
|
||||
moduleName: "github.com/fosrl/badger"
|
||||
version: "{{.BadgerVersion}}"
|
||||
version: "v1.2.0"
|
||||
|
||||
log:
|
||||
level: "INFO"
|
||||
level: "DEBUG"
|
||||
format: "common"
|
||||
maxSize: 100
|
||||
maxBackups: 3
|
||||
maxAge: 3
|
||||
compress: true
|
||||
|
||||
certificatesResolvers:
|
||||
letsencrypt:
|
||||
acme:
|
||||
httpChallenge:
|
||||
entryPoint: web
|
||||
email: "{{.LetsEncryptEmail}}"
|
||||
storage: "/letsencrypt/acme.json"
|
||||
caServer: "https://acme-v02.api.letsencrypt.org/directory"
|
||||
|
||||
entryPoints:
|
||||
web:
|
||||
address: ":80"
|
||||
websecure:
|
||||
address: ":443"
|
||||
address: ":9443"
|
||||
transport:
|
||||
respondingTimeouts:
|
||||
readTimeout: "30m"
|
||||
http:
|
||||
tls:
|
||||
certResolver: "letsencrypt"
|
||||
encodedCharacters:
|
||||
allowEncodedSlash: true
|
||||
allowEncodedQuestionMark: true
|
||||
|
||||
serversTransport:
|
||||
insecureSkipVerify: true
|
||||
|
||||
ping:
|
||||
entryPoint: "web"
|
||||
|
||||
@@ -35,7 +35,7 @@ services:
|
||||
- 80:80 # Port for traefik because of the network_mode
|
||||
|
||||
traefik:
|
||||
image: traefik:v3.6
|
||||
image: traefik:v3.5
|
||||
container_name: traefik
|
||||
restart: unless-stopped
|
||||
network_mode: service:gerbil # Ports appear on the gerbil service
|
||||
@@ -52,4 +52,4 @@ networks:
|
||||
default:
|
||||
driver: bridge
|
||||
name: pangolin
|
||||
enable_ipv6: true
|
||||
enable_ipv6: true
|
||||
@@ -1,7 +1,9 @@
|
||||
import { defineConfig } from "drizzle-kit";
|
||||
import path from "path";
|
||||
|
||||
const schema = [path.join("server", "db", "pg", "schema")];
|
||||
const schema = [
|
||||
path.join("server", "db", "pg", "schema"),
|
||||
];
|
||||
|
||||
export default defineConfig({
|
||||
dialect: "postgresql",
|
||||
|
||||
@@ -2,7 +2,9 @@ import { APP_PATH } from "@server/lib/consts";
|
||||
import { defineConfig } from "drizzle-kit";
|
||||
import path from "path";
|
||||
|
||||
const schema = [path.join("server", "db", "sqlite", "schema")];
|
||||
const schema = [
|
||||
path.join("server", "db", "sqlite", "schema"),
|
||||
];
|
||||
|
||||
export default defineConfig({
|
||||
dialect: "sqlite",
|
||||
|
||||
95
esbuild.mjs
@@ -24,20 +24,20 @@ const argv = yargs(hideBin(process.argv))
|
||||
alias: "e",
|
||||
describe: "Entry point file",
|
||||
type: "string",
|
||||
demandOption: true
|
||||
demandOption: true,
|
||||
})
|
||||
.option("out", {
|
||||
alias: "o",
|
||||
describe: "Output file path",
|
||||
type: "string",
|
||||
demandOption: true
|
||||
demandOption: true,
|
||||
})
|
||||
.option("build", {
|
||||
alias: "b",
|
||||
describe: "Build type (oss, saas, enterprise)",
|
||||
type: "string",
|
||||
choices: ["oss", "saas", "enterprise"],
|
||||
default: "oss"
|
||||
default: "oss",
|
||||
})
|
||||
.help()
|
||||
.alias("help", "h").argv;
|
||||
@@ -66,9 +66,7 @@ function privateImportGuardPlugin() {
|
||||
|
||||
// Check if the importing file is NOT in server/private
|
||||
const normalizedImporter = path.normalize(importingFile);
|
||||
const isInServerPrivate = normalizedImporter.includes(
|
||||
path.normalize("server/private")
|
||||
);
|
||||
const isInServerPrivate = normalizedImporter.includes(path.normalize("server/private"));
|
||||
|
||||
if (!isInServerPrivate) {
|
||||
const violation = {
|
||||
@@ -81,8 +79,8 @@ function privateImportGuardPlugin() {
|
||||
console.log(`PRIVATE IMPORT VIOLATION:`);
|
||||
console.log(` File: ${importingFile}`);
|
||||
console.log(` Import: ${args.path}`);
|
||||
console.log(` Resolve dir: ${args.resolveDir || "N/A"}`);
|
||||
console.log("");
|
||||
console.log(` Resolve dir: ${args.resolveDir || 'N/A'}`);
|
||||
console.log('');
|
||||
}
|
||||
|
||||
// Return null to let the default resolver handle it
|
||||
@@ -91,20 +89,16 @@ function privateImportGuardPlugin() {
|
||||
|
||||
build.onEnd((result) => {
|
||||
if (violations.length > 0) {
|
||||
console.log(
|
||||
`\nSUMMARY: Found ${violations.length} private import violation(s):`
|
||||
);
|
||||
console.log(`\nSUMMARY: Found ${violations.length} private import violation(s):`);
|
||||
violations.forEach((v, i) => {
|
||||
console.log(
|
||||
` ${i + 1}. ${path.relative(process.cwd(), v.file)} imports ${v.importPath}`
|
||||
);
|
||||
console.log(` ${i + 1}. ${path.relative(process.cwd(), v.file)} imports ${v.importPath}`);
|
||||
});
|
||||
console.log("");
|
||||
console.log('');
|
||||
|
||||
result.errors.push({
|
||||
text: `Private import violations detected: ${violations.length} violation(s) found`,
|
||||
location: null,
|
||||
notes: violations.map((v) => ({
|
||||
notes: violations.map(v => ({
|
||||
text: `${path.relative(process.cwd(), v.file)} imports ${v.importPath}`,
|
||||
location: null
|
||||
}))
|
||||
@@ -127,9 +121,7 @@ function dynamicImportGuardPlugin() {
|
||||
|
||||
// Check if the importing file is NOT in server/private
|
||||
const normalizedImporter = path.normalize(importingFile);
|
||||
const isInServerPrivate = normalizedImporter.includes(
|
||||
path.normalize("server/private")
|
||||
);
|
||||
const isInServerPrivate = normalizedImporter.includes(path.normalize("server/private"));
|
||||
|
||||
if (isInServerPrivate) {
|
||||
const violation = {
|
||||
@@ -142,8 +134,8 @@ function dynamicImportGuardPlugin() {
|
||||
console.log(`DYNAMIC IMPORT VIOLATION:`);
|
||||
console.log(` File: ${importingFile}`);
|
||||
console.log(` Import: ${args.path}`);
|
||||
console.log(` Resolve dir: ${args.resolveDir || "N/A"}`);
|
||||
console.log("");
|
||||
console.log(` Resolve dir: ${args.resolveDir || 'N/A'}`);
|
||||
console.log('');
|
||||
}
|
||||
|
||||
// Return null to let the default resolver handle it
|
||||
@@ -152,20 +144,16 @@ function dynamicImportGuardPlugin() {
|
||||
|
||||
build.onEnd((result) => {
|
||||
if (violations.length > 0) {
|
||||
console.log(
|
||||
`\nSUMMARY: Found ${violations.length} dynamic import violation(s):`
|
||||
);
|
||||
console.log(`\nSUMMARY: Found ${violations.length} dynamic import violation(s):`);
|
||||
violations.forEach((v, i) => {
|
||||
console.log(
|
||||
` ${i + 1}. ${path.relative(process.cwd(), v.file)} imports ${v.importPath}`
|
||||
);
|
||||
console.log(` ${i + 1}. ${path.relative(process.cwd(), v.file)} imports ${v.importPath}`);
|
||||
});
|
||||
console.log("");
|
||||
console.log('');
|
||||
|
||||
result.errors.push({
|
||||
text: `Dynamic import violations detected: ${violations.length} violation(s) found`,
|
||||
location: null,
|
||||
notes: violations.map((v) => ({
|
||||
notes: violations.map(v => ({
|
||||
text: `${path.relative(process.cwd(), v.file)} imports ${v.importPath}`,
|
||||
location: null
|
||||
}))
|
||||
@@ -184,28 +172,21 @@ function dynamicImportSwitcherPlugin(buildValue) {
|
||||
const switches = [];
|
||||
|
||||
build.onStart(() => {
|
||||
console.log(
|
||||
`Dynamic import switcher using build type: ${buildValue}`
|
||||
);
|
||||
console.log(`Dynamic import switcher using build type: ${buildValue}`);
|
||||
});
|
||||
|
||||
build.onResolve({ filter: /^#dynamic\// }, (args) => {
|
||||
// Extract the path after #dynamic/
|
||||
const dynamicPath = args.path.replace(/^#dynamic\//, "");
|
||||
const dynamicPath = args.path.replace(/^#dynamic\//, '');
|
||||
|
||||
// Determine the replacement based on build type
|
||||
let replacement;
|
||||
if (buildValue === "oss") {
|
||||
replacement = `#open/${dynamicPath}`;
|
||||
} else if (
|
||||
buildValue === "saas" ||
|
||||
buildValue === "enterprise"
|
||||
) {
|
||||
} else if (buildValue === "saas" || buildValue === "enterprise") {
|
||||
replacement = `#closed/${dynamicPath}`; // We use #closed here so that the route guards dont complain after its been changed but this is the same as #private
|
||||
} else {
|
||||
console.warn(
|
||||
`Unknown build type '${buildValue}', defaulting to #open/`
|
||||
);
|
||||
console.warn(`Unknown build type '${buildValue}', defaulting to #open/`);
|
||||
replacement = `#open/${dynamicPath}`;
|
||||
}
|
||||
|
||||
@@ -220,10 +201,8 @@ function dynamicImportSwitcherPlugin(buildValue) {
|
||||
console.log(`DYNAMIC IMPORT SWITCH:`);
|
||||
console.log(` File: ${args.importer}`);
|
||||
console.log(` Original: ${args.path}`);
|
||||
console.log(
|
||||
` Switched to: ${replacement} (build: ${buildValue})`
|
||||
);
|
||||
console.log("");
|
||||
console.log(` Switched to: ${replacement} (build: ${buildValue})`);
|
||||
console.log('');
|
||||
|
||||
// Rewrite the import path and let the normal resolution continue
|
||||
return build.resolve(replacement, {
|
||||
@@ -236,18 +215,12 @@ function dynamicImportSwitcherPlugin(buildValue) {
|
||||
|
||||
build.onEnd((result) => {
|
||||
if (switches.length > 0) {
|
||||
console.log(
|
||||
`\nDYNAMIC IMPORT SUMMARY: Switched ${switches.length} import(s) for build type '${buildValue}':`
|
||||
);
|
||||
console.log(`\nDYNAMIC IMPORT SUMMARY: Switched ${switches.length} import(s) for build type '${buildValue}':`);
|
||||
switches.forEach((s, i) => {
|
||||
console.log(
|
||||
` ${i + 1}. ${path.relative(process.cwd(), s.file)}`
|
||||
);
|
||||
console.log(
|
||||
` ${s.originalPath} → ${s.replacementPath}`
|
||||
);
|
||||
console.log(` ${i + 1}. ${path.relative(process.cwd(), s.file)}`);
|
||||
console.log(` ${s.originalPath} → ${s.replacementPath}`);
|
||||
});
|
||||
console.log("");
|
||||
console.log('');
|
||||
}
|
||||
});
|
||||
}
|
||||
@@ -262,7 +235,7 @@ esbuild
|
||||
format: "esm",
|
||||
minify: false,
|
||||
banner: {
|
||||
js: banner
|
||||
js: banner,
|
||||
},
|
||||
platform: "node",
|
||||
external: ["body-parser"],
|
||||
@@ -271,22 +244,20 @@ esbuild
|
||||
dynamicImportGuardPlugin(),
|
||||
dynamicImportSwitcherPlugin(argv.build),
|
||||
nodeExternalsPlugin({
|
||||
packagePath: getPackagePaths()
|
||||
})
|
||||
packagePath: getPackagePaths(),
|
||||
}),
|
||||
],
|
||||
sourcemap: "inline",
|
||||
target: "node22"
|
||||
target: "node22",
|
||||
})
|
||||
.then((result) => {
|
||||
// Check if there were any errors in the build result
|
||||
if (result.errors && result.errors.length > 0) {
|
||||
console.error(
|
||||
`Build failed with ${result.errors.length} error(s):`
|
||||
);
|
||||
console.error(`Build failed with ${result.errors.length} error(s):`);
|
||||
result.errors.forEach((error, i) => {
|
||||
console.error(`${i + 1}. ${error.text}`);
|
||||
if (error.notes) {
|
||||
error.notes.forEach((note) => {
|
||||
error.notes.forEach(note => {
|
||||
console.error(` - ${note.text}`);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1,19 +1,19 @@
|
||||
import tseslint from "typescript-eslint";
|
||||
import tseslint from 'typescript-eslint';
|
||||
|
||||
export default tseslint.config({
|
||||
files: ["**/*.{ts,tsx,js,jsx}"],
|
||||
languageOptions: {
|
||||
parser: tseslint.parser,
|
||||
parserOptions: {
|
||||
ecmaVersion: "latest",
|
||||
sourceType: "module",
|
||||
ecmaFeatures: {
|
||||
jsx: true
|
||||
}
|
||||
}
|
||||
},
|
||||
rules: {
|
||||
semi: "error",
|
||||
"prefer-const": "warn"
|
||||
files: ["**/*.{ts,tsx,js,jsx}"],
|
||||
languageOptions: {
|
||||
parser: tseslint.parser,
|
||||
parserOptions: {
|
||||
ecmaVersion: "latest",
|
||||
sourceType: "module",
|
||||
ecmaFeatures: {
|
||||
jsx: true
|
||||
}
|
||||
}
|
||||
});
|
||||
},
|
||||
rules: {
|
||||
"semi": "error",
|
||||
"prefer-const": "warn"
|
||||
}
|
||||
});
|
||||
@@ -9,15 +9,10 @@ services:
|
||||
PARSERS: crowdsecurity/whitelists
|
||||
ENROLL_TAGS: docker
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD
|
||||
- cscli
|
||||
- lapi
|
||||
- status
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 30s
|
||||
interval: 10s
|
||||
retries: 15
|
||||
timeout: 10s
|
||||
test: ["CMD", "cscli", "capi", "status"]
|
||||
labels:
|
||||
- "traefik.enable=false" # Disable traefik for crowdsec
|
||||
volumes:
|
||||
|
||||
@@ -1,9 +1,5 @@
|
||||
http:
|
||||
middlewares:
|
||||
badger:
|
||||
plugin:
|
||||
badger:
|
||||
disableForwardAuth: true
|
||||
redirect-to-https:
|
||||
redirectScheme:
|
||||
scheme: https
|
||||
@@ -48,7 +44,7 @@ http:
|
||||
crowdsecAppsecUnreachableBlock: true # Block on unreachable
|
||||
crowdsecAppsecBodyLimit: 10485760
|
||||
crowdsecLapiKey: "PUT_YOUR_BOUNCER_KEY_HERE_OR_IT_WILL_NOT_WORK" # CrowdSec API key which you noted down later
|
||||
crowdsecLapiHost: crowdsec:8080 # CrowdSec
|
||||
crowdsecLapiHost: crowdsec:8080 # CrowdSec
|
||||
crowdsecLapiScheme: http # CrowdSec API scheme
|
||||
forwardedHeadersTrustedIPs: # Forwarded headers trusted IPs
|
||||
- "0.0.0.0/0" # All IP addresses are trusted for forwarded headers (CHANGE MADE HERE)
|
||||
@@ -67,7 +63,6 @@ http:
|
||||
- web
|
||||
middlewares:
|
||||
- redirect-to-https
|
||||
- badger
|
||||
|
||||
# Next.js router (handles everything except API and WebSocket paths)
|
||||
next-router:
|
||||
@@ -77,7 +72,6 @@ http:
|
||||
- websecure
|
||||
middlewares:
|
||||
- security-headers # Add security headers middleware
|
||||
- badger
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
|
||||
@@ -89,7 +83,6 @@ http:
|
||||
- websecure
|
||||
middlewares:
|
||||
- security-headers # Add security headers middleware
|
||||
- badger
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
|
||||
@@ -101,7 +94,6 @@ http:
|
||||
- websecure
|
||||
middlewares:
|
||||
- security-headers # Add security headers middleware
|
||||
- badger
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
|
||||
@@ -114,13 +106,4 @@ http:
|
||||
api-service:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://pangolin:3000" # API/WebSocket server
|
||||
|
||||
tcp:
|
||||
serversTransports:
|
||||
pp-transport-v1:
|
||||
proxyProtocol:
|
||||
version: 1
|
||||
pp-transport-v2:
|
||||
proxyProtocol:
|
||||
version: 2
|
||||
- url: "http://pangolin:3000" # API/WebSocket server
|
||||
@@ -1,7 +1,7 @@
|
||||
name: pangolin
|
||||
services:
|
||||
pangolin:
|
||||
image: docker.io/fosrl/pangolin:{{if .IsEnterprise}}ee-{{end}}{{.PangolinVersion}}
|
||||
image: docker.io/fosrl/pangolin:{{.PangolinVersion}}
|
||||
container_name: pangolin
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
@@ -35,7 +35,7 @@ services:
|
||||
- 80:80
|
||||
{{end}}
|
||||
traefik:
|
||||
image: docker.io/traefik:v3.6
|
||||
image: docker.io/traefik:v3.5
|
||||
container_name: traefik
|
||||
restart: unless-stopped
|
||||
{{if .InstallGerbil}}
|
||||
@@ -59,4 +59,4 @@ networks:
|
||||
default:
|
||||
driver: bridge
|
||||
name: pangolin
|
||||
{{if .EnableIPv6}} enable_ipv6: true{{end}}
|
||||
{{if .EnableIPv6}} enable_ipv6: true{{end}}
|
||||
@@ -1,9 +1,5 @@
|
||||
http:
|
||||
middlewares:
|
||||
badger:
|
||||
plugin:
|
||||
badger:
|
||||
disableForwardAuth: true
|
||||
redirect-to-https:
|
||||
redirectScheme:
|
||||
scheme: https
|
||||
@@ -17,7 +13,6 @@ http:
|
||||
- web
|
||||
middlewares:
|
||||
- redirect-to-https
|
||||
- badger
|
||||
|
||||
# Next.js router (handles everything except API and WebSocket paths)
|
||||
next-router:
|
||||
@@ -25,8 +20,6 @@ http:
|
||||
service: next-service
|
||||
entryPoints:
|
||||
- websecure
|
||||
middlewares:
|
||||
- badger
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
|
||||
@@ -36,8 +29,6 @@ http:
|
||||
service: api-service
|
||||
entryPoints:
|
||||
- websecure
|
||||
middlewares:
|
||||
- badger
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
|
||||
@@ -47,8 +38,6 @@ http:
|
||||
service: api-service
|
||||
entryPoints:
|
||||
- websecure
|
||||
middlewares:
|
||||
- badger
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
|
||||
@@ -70,4 +59,4 @@ tcp:
|
||||
version: 1
|
||||
pp-transport-v2:
|
||||
proxyProtocol:
|
||||
version: 2
|
||||
version: 2
|
||||
@@ -43,12 +43,9 @@ entryPoints:
|
||||
http:
|
||||
tls:
|
||||
certResolver: "letsencrypt"
|
||||
encodedCharacters:
|
||||
allowEncodedSlash: true
|
||||
allowEncodedQuestionMark: true
|
||||
|
||||
serversTransport:
|
||||
insecureSkipVerify: true
|
||||
|
||||
ping:
|
||||
entryPoint: "web"
|
||||
entryPoint: "web"
|
||||
@@ -73,7 +73,7 @@ func installDocker() error {
|
||||
case strings.Contains(osRelease, "ID=ubuntu"):
|
||||
installCmd = exec.Command("bash", "-c", fmt.Sprintf(`
|
||||
apt-get update &&
|
||||
apt-get install -y apt-transport-https ca-certificates curl gpg &&
|
||||
apt-get install -y apt-transport-https ca-certificates curl &&
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg &&
|
||||
echo "deb [arch=%s signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" > /etc/apt/sources.list.d/docker.list &&
|
||||
apt-get update &&
|
||||
@@ -82,7 +82,7 @@ func installDocker() error {
|
||||
case strings.Contains(osRelease, "ID=debian"):
|
||||
installCmd = exec.Command("bash", "-c", fmt.Sprintf(`
|
||||
apt-get update &&
|
||||
apt-get install -y apt-transport-https ca-certificates curl gpg &&
|
||||
apt-get install -y apt-transport-https ca-certificates curl &&
|
||||
curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg &&
|
||||
echo "deb [arch=%s signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian $(lsb_release -cs) stable" > /etc/apt/sources.list.d/docker.list &&
|
||||
apt-get update &&
|
||||
@@ -210,47 +210,6 @@ func isDockerRunning() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func isPodmanRunning() bool {
|
||||
cmd := exec.Command("podman", "info")
|
||||
if err := cmd.Run(); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// detectContainerType detects whether the system is currently using Docker or Podman
|
||||
// by checking which container runtime is running and has containers
|
||||
func detectContainerType() SupportedContainer {
|
||||
// Check if we have running containers with podman
|
||||
if isPodmanRunning() {
|
||||
cmd := exec.Command("podman", "ps", "-q")
|
||||
output, err := cmd.Output()
|
||||
if err == nil && len(strings.TrimSpace(string(output))) > 0 {
|
||||
return Podman
|
||||
}
|
||||
}
|
||||
|
||||
// Check if we have running containers with docker
|
||||
if isDockerRunning() {
|
||||
cmd := exec.Command("docker", "ps", "-q")
|
||||
output, err := cmd.Output()
|
||||
if err == nil && len(strings.TrimSpace(string(output))) > 0 {
|
||||
return Docker
|
||||
}
|
||||
}
|
||||
|
||||
// If no containers are running, check which one is installed and running
|
||||
if isPodmanRunning() && isPodmanInstalled() {
|
||||
return Podman
|
||||
}
|
||||
|
||||
if isDockerRunning() && isDockerInstalled() {
|
||||
return Docker
|
||||
}
|
||||
|
||||
return Undefined
|
||||
}
|
||||
|
||||
// executeDockerComposeCommandWithArgs executes the appropriate docker command with arguments supplied
|
||||
func executeDockerComposeCommandWithArgs(args ...string) error {
|
||||
var cmd *exec.Cmd
|
||||
|
||||
@@ -93,7 +93,7 @@ func installCrowdsec(config Config) error {
|
||||
|
||||
if checkIfTextInFile("config/traefik/dynamic_config.yml", "PUT_YOUR_BOUNCER_KEY_HERE_OR_IT_WILL_NOT_WORK") {
|
||||
fmt.Println("Failed to replace bouncer key! Please retrieve the key and replace it in the config/traefik/dynamic_config.yml file using the following command:")
|
||||
fmt.Printf(" %s exec crowdsec cscli bouncers add traefik-bouncer\n", config.InstallationContainerType)
|
||||
fmt.Println(" docker exec crowdsec cscli bouncers add traefik-bouncer")
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -117,7 +117,7 @@ func GetCrowdSecAPIKey(containerType SupportedContainer) (string, error) {
|
||||
}
|
||||
|
||||
// Execute the command to get the API key
|
||||
cmd := exec.Command(string(containerType), "exec", "crowdsec", "cscli", "bouncers", "add", "traefik-bouncer", "-o", "raw")
|
||||
cmd := exec.Command("docker", "exec", "crowdsec", "cscli", "bouncers", "add", "traefik-bouncer", "-o", "raw")
|
||||
var out bytes.Buffer
|
||||
cmd.Stdout = &out
|
||||
|
||||
|
||||
@@ -3,8 +3,8 @@ module installer
|
||||
go 1.24.0
|
||||
|
||||
require (
|
||||
golang.org/x/term v0.39.0
|
||||
golang.org/x/term v0.37.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
|
||||
require golang.org/x/sys v0.40.0 // indirect
|
||||
require golang.org/x/sys v0.38.0 // indirect
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
|
||||
golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY=
|
||||
golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
|
||||
@@ -54,31 +54,13 @@ func readBool(reader *bufio.Reader, prompt string, defaultValue bool) bool {
|
||||
if defaultValue {
|
||||
defaultStr = "yes"
|
||||
}
|
||||
for {
|
||||
input := readString(reader, prompt+" (yes/no)", defaultStr)
|
||||
lower := strings.ToLower(input)
|
||||
if lower == "yes" {
|
||||
return true
|
||||
} else if lower == "no" {
|
||||
return false
|
||||
} else {
|
||||
fmt.Println("Please enter 'yes' or 'no'.")
|
||||
}
|
||||
}
|
||||
input := readString(reader, prompt+" (yes/no)", defaultStr)
|
||||
return strings.ToLower(input) == "yes"
|
||||
}
|
||||
|
||||
func readBoolNoDefault(reader *bufio.Reader, prompt string) bool {
|
||||
for {
|
||||
input := readStringNoDefault(reader, prompt+" (yes/no)")
|
||||
lower := strings.ToLower(input)
|
||||
if lower == "yes" {
|
||||
return true
|
||||
} else if lower == "no" {
|
||||
return false
|
||||
} else {
|
||||
fmt.Println("Please enter 'yes' or 'no'.")
|
||||
}
|
||||
}
|
||||
input := readStringNoDefault(reader, prompt+" (yes/no)")
|
||||
return strings.ToLower(input) == "yes"
|
||||
}
|
||||
|
||||
func readInt(reader *bufio.Reader, prompt string, defaultValue int) int {
|
||||
|
||||
@@ -49,14 +49,13 @@ type Config struct {
|
||||
DoCrowdsecInstall bool
|
||||
EnableGeoblocking bool
|
||||
Secret string
|
||||
IsEnterprise bool
|
||||
}
|
||||
|
||||
type SupportedContainer string
|
||||
|
||||
const (
|
||||
Docker SupportedContainer = "docker"
|
||||
Podman SupportedContainer = "podman"
|
||||
Docker SupportedContainer = "docker"
|
||||
Podman SupportedContainer = "podman"
|
||||
Undefined SupportedContainer = "undefined"
|
||||
)
|
||||
|
||||
@@ -161,7 +160,7 @@ func main() {
|
||||
} else {
|
||||
alreadyInstalled = true
|
||||
fmt.Println("Looks like you already installed Pangolin!")
|
||||
|
||||
|
||||
// Check if MaxMind database exists and offer to update it
|
||||
fmt.Println("\n=== MaxMind Database Update ===")
|
||||
if _, err := os.Stat("config/GeoLite2-Country.mmdb"); err == nil {
|
||||
@@ -180,7 +179,7 @@ func main() {
|
||||
fmt.Println("You can try downloading it manually later if needed.")
|
||||
}
|
||||
// Now you need to update your config file accordingly to enable geoblocking
|
||||
fmt.Print("Please remember to update your config/config.yml file to enable geoblocking! \n\n")
|
||||
fmt.Println("Please remember to update your config/config.yml file to enable geoblocking! \n")
|
||||
// add maxmind_db_path: "./config/GeoLite2-Country.mmdb" under server
|
||||
fmt.Println("Add the following line under the 'server' section:")
|
||||
fmt.Println(" maxmind_db_path: \"./config/GeoLite2-Country.mmdb\"")
|
||||
@@ -210,8 +209,8 @@ func main() {
|
||||
|
||||
parsedURL, err := url.Parse(appConfig.DashboardURL)
|
||||
if err != nil {
|
||||
fmt.Printf("Error parsing URL: %v\n", err)
|
||||
return
|
||||
fmt.Printf("Error parsing URL: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
config.DashboardDomain = parsedURL.Hostname()
|
||||
@@ -229,16 +228,7 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
// Try to detect container type from existing installation
|
||||
detectedType := detectContainerType()
|
||||
if detectedType == Undefined {
|
||||
// If detection fails, prompt the user
|
||||
fmt.Println("Unable to detect container type from existing installation.")
|
||||
config.InstallationContainerType = podmanOrDocker(reader)
|
||||
} else {
|
||||
config.InstallationContainerType = detectedType
|
||||
fmt.Printf("Detected container type: %s\n", config.InstallationContainerType)
|
||||
}
|
||||
config.InstallationContainerType = podmanOrDocker(reader)
|
||||
|
||||
config.DoCrowdsecInstall = true
|
||||
err := installCrowdsec(config)
|
||||
@@ -252,7 +242,7 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
if !alreadyInstalled || config.DoCrowdsecInstall {
|
||||
if !alreadyInstalled {
|
||||
// Setup Token Section
|
||||
fmt.Println("\n=== Setup Token ===")
|
||||
|
||||
@@ -295,10 +285,10 @@ func podmanOrDocker(reader *bufio.Reader) SupportedContainer {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err := exec.Command("bash", "-c", "cat /etc/sysctl.d/99-podman.conf 2>/dev/null | grep 'net.ipv4.ip_unprivileged_port_start=' || cat /etc/sysctl.conf 2>/dev/null | grep 'net.ipv4.ip_unprivileged_port_start='").Run(); err != nil {
|
||||
if err := exec.Command("bash", "-c", "cat /etc/sysctl.conf | grep 'net.ipv4.ip_unprivileged_port_start='").Run(); err != nil {
|
||||
fmt.Println("Would you like to configure ports >= 80 as unprivileged ports? This enables podman containers to listen on low-range ports.")
|
||||
fmt.Println("Pangolin will experience startup issues if this is not configured, because it needs to listen on port 80/443 by default.")
|
||||
approved := readBool(reader, "The installer is about to execute \"echo 'net.ipv4.ip_unprivileged_port_start=80' > /etc/sysctl.d/99-podman.conf && sysctl --system\". Approve?", true)
|
||||
approved := readBool(reader, "The installer is about to execute \"echo 'net.ipv4.ip_unprivileged_port_start=80' >> /etc/sysctl.conf && sysctl -p\". Approve?", true)
|
||||
if approved {
|
||||
if os.Geteuid() != 0 {
|
||||
fmt.Println("You need to run the installer as root for such a configuration.")
|
||||
@@ -309,8 +299,8 @@ func podmanOrDocker(reader *bufio.Reader) SupportedContainer {
|
||||
// container low-range ports as unprivileged ports.
|
||||
// Linux only.
|
||||
|
||||
if err := run("bash", "-c", "echo 'net.ipv4.ip_unprivileged_port_start=80' > /etc/sysctl.d/99-podman.conf && sysctl --system"); err != nil {
|
||||
fmt.Printf("Error configuring unprivileged ports: %v\n", err)
|
||||
if err := run("bash", "-c", "echo 'net.ipv4.ip_unprivileged_port_start=80' >> /etc/sysctl.conf && sysctl -p"); err != nil {
|
||||
fmt.Sprintf("failed to configure unprivileged ports: %v.\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
@@ -349,8 +339,6 @@ func collectUserInput(reader *bufio.Reader) Config {
|
||||
// Basic configuration
|
||||
fmt.Println("\n=== Basic Configuration ===")
|
||||
|
||||
config.IsEnterprise = readBoolNoDefault(reader, "Do you want to install the Enterprise version of Pangolin? The EE is free for personal use or for businesses making less than 100k USD annually.")
|
||||
|
||||
config.BaseDomain = readString(reader, "Enter your base domain (no subdomain e.g. example.com)", "")
|
||||
|
||||
// Set default dashboard domain after base domain is collected
|
||||
@@ -371,7 +359,7 @@ func collectUserInput(reader *bufio.Reader) Config {
|
||||
config.EmailSMTPPort = readInt(reader, "Enter SMTP port (default 587)", 587)
|
||||
config.EmailSMTPUser = readString(reader, "Enter SMTP username", "")
|
||||
config.EmailSMTPPass = readString(reader, "Enter SMTP password", "") // Should this be readPassword?
|
||||
config.EmailNoReply = readString(reader, "Enter no-reply email address (often the same as SMTP username)", "")
|
||||
config.EmailNoReply = readString(reader, "Enter no-reply email address", "")
|
||||
}
|
||||
|
||||
// Validate required fields
|
||||
@@ -383,10 +371,6 @@ func collectUserInput(reader *bufio.Reader) Config {
|
||||
fmt.Println("Error: Let's Encrypt email is required")
|
||||
os.Exit(1)
|
||||
}
|
||||
if config.EnableEmail && config.EmailNoReply == "" {
|
||||
fmt.Println("Error: No-reply email address is required when email is enabled")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Advanced configuration
|
||||
|
||||
@@ -659,28 +643,28 @@ func checkPortsAvailable(port int) error {
|
||||
|
||||
func downloadMaxMindDatabase() error {
|
||||
fmt.Println("Downloading MaxMind GeoLite2 Country database...")
|
||||
|
||||
|
||||
// Download the GeoLite2 Country database
|
||||
if err := run("curl", "-L", "-o", "GeoLite2-Country.tar.gz",
|
||||
if err := run("curl", "-L", "-o", "GeoLite2-Country.tar.gz",
|
||||
"https://github.com/GitSquared/node-geolite2-redist/raw/refs/heads/master/redist/GeoLite2-Country.tar.gz"); err != nil {
|
||||
return fmt.Errorf("failed to download GeoLite2 database: %v", err)
|
||||
}
|
||||
|
||||
|
||||
// Extract the database
|
||||
if err := run("tar", "-xzf", "GeoLite2-Country.tar.gz"); err != nil {
|
||||
return fmt.Errorf("failed to extract GeoLite2 database: %v", err)
|
||||
}
|
||||
|
||||
|
||||
// Find the .mmdb file and move it to the config directory
|
||||
if err := run("bash", "-c", "mv GeoLite2-Country_*/GeoLite2-Country.mmdb config/"); err != nil {
|
||||
return fmt.Errorf("failed to move GeoLite2 database to config directory: %v", err)
|
||||
}
|
||||
|
||||
|
||||
// Clean up the downloaded files
|
||||
if err := run("rm", "-rf", "GeoLite2-Country.tar.gz", "GeoLite2-Country_*"); err != nil {
|
||||
fmt.Printf("Warning: failed to clean up temporary files: %v\n", err)
|
||||
}
|
||||
|
||||
|
||||
fmt.Println("MaxMind GeoLite2 Country database downloaded successfully!")
|
||||
return nil
|
||||
}
|
||||
|
||||
2398
messages/zh-TW.json
@@ -1,17 +1,14 @@
|
||||
import type { NextConfig } from "next";
|
||||
import createNextIntlPlugin from "next-intl/plugin";
|
||||
|
||||
const withNextIntl = createNextIntlPlugin();
|
||||
|
||||
const nextConfig: NextConfig = {
|
||||
reactStrictMode: false,
|
||||
/** @type {import("next").NextConfig} */
|
||||
const nextConfig = {
|
||||
eslint: {
|
||||
ignoreDuringBuilds: true
|
||||
},
|
||||
experimental: {
|
||||
reactCompiler: true
|
||||
},
|
||||
output: "standalone"
|
||||
output: "standalone",
|
||||
|
||||
};
|
||||
|
||||
export default withNextIntl(nextConfig);
|
||||
10965
package-lock.json
generated
205
package.json
@@ -3,7 +3,7 @@
|
||||
"version": "0.0.0",
|
||||
"private": true,
|
||||
"type": "module",
|
||||
"description": "Identity-aware VPN and proxy for remote access to anything, anywhere and Dashboard UI",
|
||||
"description": "Tunneled Reverse Proxy Management Server with Identity and Access Control and Dashboard UI",
|
||||
"homepage": "https://github.com/fosrl/pangolin",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
@@ -19,27 +19,24 @@
|
||||
"db:sqlite:studio": "drizzle-kit studio --config=./drizzle.sqlite.config.ts",
|
||||
"db:pg:studio": "drizzle-kit studio --config=./drizzle.pg.config.ts",
|
||||
"db:clear-migrations": "rm -rf server/migrations",
|
||||
"set:oss": "echo 'export const build = \"oss\" as \"saas\" | \"enterprise\" | \"oss\";' > server/build.ts && cp tsconfig.oss.json tsconfig.json",
|
||||
"set:saas": "echo 'export const build = \"saas\" as \"saas\" | \"enterprise\" | \"oss\";' > server/build.ts && cp tsconfig.saas.json tsconfig.json",
|
||||
"set:enterprise": "echo 'export const build = \"enterprise\" as \"saas\" | \"enterprise\" | \"oss\";' > server/build.ts && cp tsconfig.enterprise.json tsconfig.json",
|
||||
"set:sqlite": "echo 'export * from \"./sqlite\";\nexport const driver: \"pg\" | \"sqlite\" = \"sqlite\";' > server/db/index.ts",
|
||||
"set:pg": "echo 'export * from \"./pg\";\nexport const driver: \"pg\" | \"sqlite\" = \"pg\";' > server/db/index.ts",
|
||||
"set:oss": "echo 'export const build = \"oss\" as any;' > server/build.ts && cp tsconfig.oss.json tsconfig.json",
|
||||
"set:saas": "echo 'export const build = \"saas\" as any;' > server/build.ts && cp tsconfig.saas.json tsconfig.json",
|
||||
"set:enterprise": "echo 'export const build = \"enterprise\" as any;' > server/build.ts && cp tsconfig.enterprise.json tsconfig.json",
|
||||
"set:sqlite": "echo 'export * from \"./sqlite\";' > server/db/index.ts",
|
||||
"set:pg": "echo 'export * from \"./pg\";' > server/db/index.ts",
|
||||
"next:build": "next build",
|
||||
"build:sqlite": "mkdir -p dist && next build && node esbuild.mjs -e server/index.ts -o dist/server.mjs && node esbuild.mjs -e server/setup/migrationsSqlite.ts -o dist/migrations.mjs",
|
||||
"build:pg": "mkdir -p dist && next build && node esbuild.mjs -e server/index.ts -o dist/server.mjs && node esbuild.mjs -e server/setup/migrationsPg.ts -o dist/migrations.mjs",
|
||||
"start": "ENVIRONMENT=prod node dist/migrations.mjs && ENVIRONMENT=prod NODE_ENV=development node --enable-source-maps dist/server.mjs",
|
||||
"email": "email dev --dir server/emails/templates --port 3005",
|
||||
"build:cli": "node esbuild.mjs -e cli/index.ts -o dist/cli.mjs",
|
||||
"format": "prettier --write ."
|
||||
"build:cli": "node esbuild.mjs -e cli/index.ts -o dist/cli.mjs"
|
||||
},
|
||||
"dependencies": {
|
||||
"@asteasolutions/zod-to-openapi": "8.4.0",
|
||||
"@aws-sdk/client-s3": "3.971.0",
|
||||
"@faker-js/faker": "10.2.0",
|
||||
"@headlessui/react": "2.2.9",
|
||||
"@asteasolutions/zod-to-openapi": "^7.3.4",
|
||||
"@aws-sdk/client-s3": "3.922.0",
|
||||
"@hookform/resolvers": "5.2.2",
|
||||
"@monaco-editor/react": "4.7.0",
|
||||
"@node-rs/argon2": "2.0.2",
|
||||
"@monaco-editor/react": "^4.7.0",
|
||||
"@node-rs/argon2": "^2.0.2",
|
||||
"@oslojs/crypto": "1.0.1",
|
||||
"@oslojs/encoding": "1.1.0",
|
||||
"@radix-ui/react-avatar": "1.1.11",
|
||||
@@ -50,132 +47,130 @@
|
||||
"@radix-ui/react-icons": "1.3.2",
|
||||
"@radix-ui/react-label": "2.1.8",
|
||||
"@radix-ui/react-popover": "1.1.15",
|
||||
"@radix-ui/react-progress": "1.1.8",
|
||||
"@radix-ui/react-progress": "^1.1.8",
|
||||
"@radix-ui/react-radio-group": "1.3.8",
|
||||
"@radix-ui/react-scroll-area": "1.2.10",
|
||||
"@radix-ui/react-scroll-area": "^1.2.10",
|
||||
"@radix-ui/react-select": "2.2.6",
|
||||
"@radix-ui/react-separator": "1.1.8",
|
||||
"@radix-ui/react-slot": "1.2.4",
|
||||
"@radix-ui/react-switch": "1.2.6",
|
||||
"@radix-ui/react-tabs": "1.1.13",
|
||||
"@radix-ui/react-toast": "1.2.15",
|
||||
"@radix-ui/react-tooltip": "1.2.8",
|
||||
"@react-email/components": "1.0.2",
|
||||
"@react-email/render": "2.0.0",
|
||||
"@react-email/tailwind": "2.0.2",
|
||||
"@simplewebauthn/browser": "13.2.2",
|
||||
"@simplewebauthn/server": "13.2.2",
|
||||
"@tailwindcss/forms": "0.5.11",
|
||||
"@tanstack/react-query": "5.90.12",
|
||||
"@radix-ui/react-tooltip": "^1.2.8",
|
||||
"@react-email/components": "0.5.7",
|
||||
"@react-email/render": "^1.3.2",
|
||||
"@react-email/tailwind": "1.2.2",
|
||||
"@simplewebauthn/browser": "^13.2.2",
|
||||
"@simplewebauthn/server": "^13.2.2",
|
||||
"@tailwindcss/forms": "^0.5.10",
|
||||
"@tanstack/react-table": "8.21.3",
|
||||
"arctic": "3.7.0",
|
||||
"axios": "1.13.2",
|
||||
"better-sqlite3": "11.9.1",
|
||||
"arctic": "^3.7.0",
|
||||
"axios": "^1.13.2",
|
||||
"better-sqlite3": "11.7.0",
|
||||
"canvas-confetti": "1.9.4",
|
||||
"class-variance-authority": "0.7.1",
|
||||
"class-variance-authority": "^0.7.1",
|
||||
"clsx": "2.1.1",
|
||||
"cmdk": "1.1.1",
|
||||
"cookie": "1.1.1",
|
||||
"cookie": "^1.0.2",
|
||||
"cookie-parser": "1.4.7",
|
||||
"cookies": "0.9.1",
|
||||
"cookies": "^0.9.1",
|
||||
"cors": "2.8.5",
|
||||
"crypto-js": "4.2.0",
|
||||
"d3": "7.9.0",
|
||||
"crypto-js": "^4.2.0",
|
||||
"date-fns": "4.1.0",
|
||||
"drizzle-orm": "0.45.1",
|
||||
"eslint": "9.39.2",
|
||||
"eslint-config-next": "16.1.0",
|
||||
"express": "5.2.1",
|
||||
"drizzle-orm": "0.44.7",
|
||||
"eslint": "9.39.1",
|
||||
"eslint-config-next": "16.0.3",
|
||||
"express": "5.1.0",
|
||||
"express-rate-limit": "8.2.1",
|
||||
"glob": "13.0.0",
|
||||
"glob": "11.0.3",
|
||||
"helmet": "8.1.0",
|
||||
"http-errors": "2.0.1",
|
||||
"i": "0.3.7",
|
||||
"http-errors": "2.0.0",
|
||||
"i": "^0.3.7",
|
||||
"input-otp": "1.4.2",
|
||||
"ioredis": "5.9.2",
|
||||
"jmespath": "0.16.0",
|
||||
"ioredis": "5.8.2",
|
||||
"jmespath": "^0.16.0",
|
||||
"js-yaml": "4.1.1",
|
||||
"jsonwebtoken": "9.0.3",
|
||||
"lucide-react": "0.562.0",
|
||||
"jsonwebtoken": "^9.0.2",
|
||||
"lucide-react": "^0.552.0",
|
||||
"maxmind": "5.0.1",
|
||||
"moment": "2.30.1",
|
||||
"next": "15.5.9",
|
||||
"next-intl": "4.7.0",
|
||||
"next": "15.5.7",
|
||||
"next-intl": "^4.4.0",
|
||||
"next-themes": "0.4.6",
|
||||
"nextjs-toploader": "3.9.17",
|
||||
"nextjs-toploader": "^3.9.17",
|
||||
"node-cache": "5.1.2",
|
||||
"node-fetch": "3.3.2",
|
||||
"nodemailer": "7.0.11",
|
||||
"npm": "11.7.0",
|
||||
"nprogress": "0.2.0",
|
||||
"nodemailer": "7.0.10",
|
||||
"npm": "^11.6.2",
|
||||
"nprogress": "^0.2.0",
|
||||
"oslo": "1.2.1",
|
||||
"pg": "8.17.1",
|
||||
"posthog-node": "5.23.0",
|
||||
"pg": "^8.16.2",
|
||||
"posthog-node": "^5.11.2",
|
||||
"qrcode.react": "4.2.0",
|
||||
"react": "19.2.3",
|
||||
"react-day-picker": "9.13.0",
|
||||
"react-dom": "19.2.3",
|
||||
"react-easy-sort": "1.8.0",
|
||||
"react-hook-form": "7.71.1",
|
||||
"react-icons": "5.5.0",
|
||||
"react": "19.2.0",
|
||||
"react-day-picker": "9.11.1",
|
||||
"react-dom": "19.2.0",
|
||||
"react-easy-sort": "^1.8.0",
|
||||
"react-hook-form": "7.66.0",
|
||||
"react-icons": "^5.5.0",
|
||||
"rebuild": "0.1.2",
|
||||
"recharts": "2.15.4",
|
||||
"reodotdev": "1.0.0",
|
||||
"resend": "6.8.0",
|
||||
"semver": "7.7.3",
|
||||
"stripe": "20.2.0",
|
||||
"swagger-ui-express": "5.0.1",
|
||||
"tailwind-merge": "3.4.0",
|
||||
"topojson-client": "3.1.0",
|
||||
"tw-animate-css": "1.4.0",
|
||||
"uuid": "13.0.0",
|
||||
"reodotdev": "^1.0.0",
|
||||
"resend": "^6.4.2",
|
||||
"semver": "^7.7.3",
|
||||
"stripe": "18.2.1",
|
||||
"swagger-ui-express": "^5.0.1",
|
||||
"tailwind-merge": "3.3.1",
|
||||
"tw-animate-css": "^1.3.8",
|
||||
"uuid": "^13.0.0",
|
||||
"vaul": "1.1.2",
|
||||
"visionscarto-world-atlas": "1.0.0",
|
||||
"winston": "3.19.0",
|
||||
"winston": "3.18.3",
|
||||
"winston-daily-rotate-file": "5.0.0",
|
||||
"ws": "8.19.0",
|
||||
"yaml": "2.8.2",
|
||||
"ws": "8.18.3",
|
||||
"yaml": "^2.8.1",
|
||||
"yargs": "18.0.0",
|
||||
"zod": "4.3.5",
|
||||
"zod-validation-error": "5.0.0"
|
||||
"zod": "3.25.76",
|
||||
"zod-validation-error": "3.5.2",
|
||||
"@faker-js/faker": "^10.1.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@dotenvx/dotenvx": "1.51.2",
|
||||
"@dotenvx/dotenvx": "1.51.1",
|
||||
"@esbuild-plugins/tsconfig-paths": "0.1.2",
|
||||
"@tailwindcss/postcss": "4.1.18",
|
||||
"@tanstack/react-query-devtools": "5.91.1",
|
||||
"@types/better-sqlite3": "7.6.13",
|
||||
"@react-email/preview-server": "4.3.2",
|
||||
"@tailwindcss/postcss": "^4.1.17",
|
||||
"@types/better-sqlite3": "7.6.12",
|
||||
"@types/cookie-parser": "1.4.10",
|
||||
"@types/cors": "2.8.19",
|
||||
"@types/crypto-js": "4.2.2",
|
||||
"@types/d3": "7.4.3",
|
||||
"@types/express": "5.0.6",
|
||||
"@types/express-session": "1.18.2",
|
||||
"@types/jmespath": "0.15.2",
|
||||
"@types/jsonwebtoken": "9.0.10",
|
||||
"@types/node": "24.10.2",
|
||||
"@types/nodemailer": "7.0.4",
|
||||
"@types/nprogress": "0.2.3",
|
||||
"@types/pg": "8.16.0",
|
||||
"@types/react": "19.2.7",
|
||||
"@types/react-dom": "19.2.3",
|
||||
"@types/semver": "7.7.1",
|
||||
"@types/swagger-ui-express": "4.1.8",
|
||||
"@types/topojson-client": "3.1.5",
|
||||
"@types/ws": "8.18.1",
|
||||
"@types/yargs": "17.0.35",
|
||||
"@types/crypto-js": "^4.2.2",
|
||||
"@types/express": "5.0.5",
|
||||
"@types/express-session": "^1.18.2",
|
||||
"@types/jmespath": "^0.15.2",
|
||||
"@types/js-yaml": "4.0.9",
|
||||
"babel-plugin-react-compiler": "1.0.0",
|
||||
"drizzle-kit": "0.31.8",
|
||||
"esbuild": "0.27.2",
|
||||
"esbuild-node-externals": "1.20.1",
|
||||
"postcss": "8.5.6",
|
||||
"prettier": "3.8.0",
|
||||
"react-email": "5.2.5",
|
||||
"tailwindcss": "4.1.18",
|
||||
"@types/jsonwebtoken": "^9.0.10",
|
||||
"@types/nprogress": "^0.2.3",
|
||||
"@types/node": "24.10.1",
|
||||
"@types/nodemailer": "7.0.3",
|
||||
"@types/pg": "8.15.6",
|
||||
"@types/react": "19.2.2",
|
||||
"@types/react-dom": "19.2.2",
|
||||
"@types/semver": "^7.7.1",
|
||||
"@types/swagger-ui-express": "^4.1.8",
|
||||
"@types/ws": "8.18.1",
|
||||
"@types/yargs": "17.0.34",
|
||||
"drizzle-kit": "0.31.6",
|
||||
"esbuild": "0.27.0",
|
||||
"esbuild-node-externals": "1.19.1",
|
||||
"postcss": "^8",
|
||||
"react-email": "4.3.2",
|
||||
"tailwindcss": "^4.1.4",
|
||||
"tsc-alias": "1.8.16",
|
||||
"tsx": "4.21.0",
|
||||
"typescript": "5.9.3",
|
||||
"typescript-eslint": "8.53.1"
|
||||
"tsx": "4.20.6",
|
||||
"typescript": "^5",
|
||||
"typescript-eslint": "^8.46.3"
|
||||
},
|
||||
"overrides": {
|
||||
"emblor": {
|
||||
"react": "19.0.0",
|
||||
"react-dom": "19.0.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
/** @type {import('postcss-load-config').Config} */
|
||||
const config = {
|
||||
plugins: {
|
||||
"@tailwindcss/postcss": {}
|
||||
}
|
||||
"@tailwindcss/postcss": {},
|
||||
},
|
||||
};
|
||||
|
||||
export default config;
|
||||
|
||||
BIN
public/screenshots/create-resource.png
Normal file
|
After Width: | Height: | Size: 687 KiB |
|
Before Width: | Height: | Size: 493 KiB After Width: | Height: | Size: 713 KiB |
BIN
public/screenshots/edit-resource.png
Normal file
|
After Width: | Height: | Size: 636 KiB |
|
Before Width: | Height: | Size: 484 KiB After Width: | Height: | Size: 713 KiB |
|
Before Width: | Height: | Size: 421 KiB |
|
Before Width: | Height: | Size: 484 KiB |
BIN
public/screenshots/resources.png
Normal file
|
After Width: | Height: | Size: 713 KiB |
BIN
public/screenshots/sites-fade.png
Normal file
|
After Width: | Height: | Size: 456 KiB |
|
Before Width: | Height: | Size: 396 KiB After Width: | Height: | Size: 674 KiB |
|
Before Width: | Height: | Size: 434 KiB |
@@ -19,7 +19,6 @@ export enum ActionsEnum {
|
||||
getSite = "getSite",
|
||||
listSites = "listSites",
|
||||
updateSite = "updateSite",
|
||||
reGenerateSecret = "reGenerateSecret",
|
||||
createResource = "createResource",
|
||||
deleteResource = "deleteResource",
|
||||
getResource = "getResource",
|
||||
@@ -78,10 +77,6 @@ export enum ActionsEnum {
|
||||
updateSiteResource = "updateSiteResource",
|
||||
createClient = "createClient",
|
||||
deleteClient = "deleteClient",
|
||||
archiveClient = "archiveClient",
|
||||
unarchiveClient = "unarchiveClient",
|
||||
blockClient = "blockClient",
|
||||
unblockClient = "unblockClient",
|
||||
updateClient = "updateClient",
|
||||
listClients = "listClients",
|
||||
getClient = "getClient",
|
||||
@@ -90,7 +85,6 @@ export enum ActionsEnum {
|
||||
updateOrgDomain = "updateOrgDomain",
|
||||
getDNSRecords = "getDNSRecords",
|
||||
createNewt = "createNewt",
|
||||
createOlm = "createOlm",
|
||||
createIdp = "createIdp",
|
||||
updateIdp = "updateIdp",
|
||||
deleteIdp = "deleteIdp",
|
||||
@@ -129,9 +123,7 @@ export enum ActionsEnum {
|
||||
getBlueprint = "getBlueprint",
|
||||
applyBlueprint = "applyBlueprint",
|
||||
viewLogs = "viewLogs",
|
||||
exportLogs = "exportLogs",
|
||||
listApprovals = "listApprovals",
|
||||
updateApprovals = "updateApprovals"
|
||||
exportLogs = "exportLogs"
|
||||
}
|
||||
|
||||
export async function checkUserActionPermission(
|
||||
|
||||
@@ -2,13 +2,13 @@ import { hash, verify } from "@node-rs/argon2";
|
||||
|
||||
export async function verifyPassword(
|
||||
password: string,
|
||||
hash: string
|
||||
hash: string,
|
||||
): Promise<boolean> {
|
||||
const validPassword = await verify(hash, password, {
|
||||
memoryCost: 19456,
|
||||
timeCost: 2,
|
||||
outputLen: 32,
|
||||
parallelism: 1
|
||||
parallelism: 1,
|
||||
});
|
||||
return validPassword;
|
||||
}
|
||||
@@ -18,7 +18,7 @@ export async function hashPassword(password: string): Promise<string> {
|
||||
memoryCost: 19456,
|
||||
timeCost: 2,
|
||||
outputLen: 32,
|
||||
parallelism: 1
|
||||
parallelism: 1,
|
||||
});
|
||||
|
||||
return passwordHash;
|
||||
|
||||
@@ -4,13 +4,10 @@ export const passwordSchema = z
|
||||
.string()
|
||||
.min(8, { message: "Password must be at least 8 characters long" })
|
||||
.max(128, { message: "Password must be at most 128 characters long" })
|
||||
.regex(
|
||||
/^(?=.*?[A-Z])(?=.*?[a-z])(?=.*?[0-9])(?=.*?[~!`@#$%^&*()_\-+={}[\]|\\:;"'<>,.\/?]).*$/,
|
||||
{
|
||||
message: `Your password must meet the following conditions:
|
||||
.regex(/^(?=.*?[A-Z])(?=.*?[a-z])(?=.*?[0-9])(?=.*?[~!`@#$%^&*()_\-+={}[\]|\\:;"'<>,.\/?]).*$/, {
|
||||
message: `Your password must meet the following conditions:
|
||||
at least one uppercase English letter,
|
||||
at least one lowercase English letter,
|
||||
at least one digit,
|
||||
at least one special character.`
|
||||
}
|
||||
);
|
||||
});
|
||||
|
||||
@@ -36,15 +36,13 @@ export async function createSession(
|
||||
const sessionId = encodeHexLowerCase(
|
||||
sha256(new TextEncoder().encode(token))
|
||||
);
|
||||
const [session] = await db
|
||||
.insert(sessions)
|
||||
.values({
|
||||
sessionId: sessionId,
|
||||
userId,
|
||||
expiresAt: new Date(Date.now() + SESSION_COOKIE_EXPIRES).getTime(),
|
||||
issuedAt: new Date().getTime()
|
||||
})
|
||||
.returning();
|
||||
const session: Session = {
|
||||
sessionId: sessionId,
|
||||
userId,
|
||||
expiresAt: new Date(Date.now() + SESSION_COOKIE_EXPIRES).getTime(),
|
||||
issuedAt: new Date().getTime()
|
||||
};
|
||||
await db.insert(sessions).values(session);
|
||||
return session;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
import { encodeHexLowerCase } from "@oslojs/encoding";
|
||||
import {
|
||||
encodeHexLowerCase,
|
||||
} from "@oslojs/encoding";
|
||||
import { sha256 } from "@oslojs/crypto/sha2";
|
||||
import { Newt, newts, newtSessions, NewtSession } from "@server/db";
|
||||
import { db } from "@server/db";
|
||||
@@ -8,25 +10,25 @@ export const EXPIRES = 1000 * 60 * 60 * 24 * 30;
|
||||
|
||||
export async function createNewtSession(
|
||||
token: string,
|
||||
newtId: string
|
||||
newtId: string,
|
||||
): Promise<NewtSession> {
|
||||
const sessionId = encodeHexLowerCase(
|
||||
sha256(new TextEncoder().encode(token))
|
||||
sha256(new TextEncoder().encode(token)),
|
||||
);
|
||||
const session: NewtSession = {
|
||||
sessionId: sessionId,
|
||||
newtId,
|
||||
expiresAt: new Date(Date.now() + EXPIRES).getTime()
|
||||
expiresAt: new Date(Date.now() + EXPIRES).getTime(),
|
||||
};
|
||||
await db.insert(newtSessions).values(session);
|
||||
return session;
|
||||
}
|
||||
|
||||
export async function validateNewtSessionToken(
|
||||
token: string
|
||||
token: string,
|
||||
): Promise<SessionValidationResult> {
|
||||
const sessionId = encodeHexLowerCase(
|
||||
sha256(new TextEncoder().encode(token))
|
||||
sha256(new TextEncoder().encode(token)),
|
||||
);
|
||||
const result = await db
|
||||
.select({ newt: newts, session: newtSessions })
|
||||
@@ -43,12 +45,14 @@ export async function validateNewtSessionToken(
|
||||
.where(eq(newtSessions.sessionId, session.sessionId));
|
||||
return { session: null, newt: null };
|
||||
}
|
||||
if (Date.now() >= session.expiresAt - EXPIRES / 2) {
|
||||
session.expiresAt = new Date(Date.now() + EXPIRES).getTime();
|
||||
if (Date.now() >= session.expiresAt - (EXPIRES / 2)) {
|
||||
session.expiresAt = new Date(
|
||||
Date.now() + EXPIRES,
|
||||
).getTime();
|
||||
await db
|
||||
.update(newtSessions)
|
||||
.set({
|
||||
expiresAt: session.expiresAt
|
||||
expiresAt: session.expiresAt,
|
||||
})
|
||||
.where(eq(newtSessions.sessionId, session.sessionId));
|
||||
}
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
import { encodeHexLowerCase } from "@oslojs/encoding";
|
||||
import {
|
||||
encodeHexLowerCase,
|
||||
} from "@oslojs/encoding";
|
||||
import { sha256 } from "@oslojs/crypto/sha2";
|
||||
import { Olm, olms, olmSessions, OlmSession } from "@server/db";
|
||||
import { db } from "@server/db";
|
||||
@@ -8,25 +10,25 @@ export const EXPIRES = 1000 * 60 * 60 * 24 * 30;
|
||||
|
||||
export async function createOlmSession(
|
||||
token: string,
|
||||
olmId: string
|
||||
olmId: string,
|
||||
): Promise<OlmSession> {
|
||||
const sessionId = encodeHexLowerCase(
|
||||
sha256(new TextEncoder().encode(token))
|
||||
sha256(new TextEncoder().encode(token)),
|
||||
);
|
||||
const session: OlmSession = {
|
||||
sessionId: sessionId,
|
||||
olmId,
|
||||
expiresAt: new Date(Date.now() + EXPIRES).getTime()
|
||||
expiresAt: new Date(Date.now() + EXPIRES).getTime(),
|
||||
};
|
||||
await db.insert(olmSessions).values(session);
|
||||
return session;
|
||||
}
|
||||
|
||||
export async function validateOlmSessionToken(
|
||||
token: string
|
||||
token: string,
|
||||
): Promise<SessionValidationResult> {
|
||||
const sessionId = encodeHexLowerCase(
|
||||
sha256(new TextEncoder().encode(token))
|
||||
sha256(new TextEncoder().encode(token)),
|
||||
);
|
||||
const result = await db
|
||||
.select({ olm: olms, session: olmSessions })
|
||||
@@ -43,12 +45,14 @@ export async function validateOlmSessionToken(
|
||||
.where(eq(olmSessions.sessionId, session.sessionId));
|
||||
return { session: null, olm: null };
|
||||
}
|
||||
if (Date.now() >= session.expiresAt - EXPIRES / 2) {
|
||||
session.expiresAt = new Date(Date.now() + EXPIRES).getTime();
|
||||
if (Date.now() >= session.expiresAt - (EXPIRES / 2)) {
|
||||
session.expiresAt = new Date(
|
||||
Date.now() + EXPIRES,
|
||||
).getTime();
|
||||
await db
|
||||
.update(olmSessions)
|
||||
.set({
|
||||
expiresAt: session.expiresAt
|
||||
expiresAt: session.expiresAt,
|
||||
})
|
||||
.where(eq(olmSessions.sessionId, session.sessionId));
|
||||
}
|
||||
|
||||
@@ -1,43 +1,9 @@
|
||||
import { Request } from "express";
|
||||
import {
|
||||
validateSessionToken,
|
||||
SESSION_COOKIE_NAME
|
||||
} from "@server/auth/sessions/app";
|
||||
import { validateSessionToken, SESSION_COOKIE_NAME } from "@server/auth/sessions/app";
|
||||
|
||||
export async function verifySession(req: Request, forceLogin?: boolean) {
|
||||
export async function verifySession(req: Request) {
|
||||
const res = await validateSessionToken(
|
||||
req.cookies[SESSION_COOKIE_NAME] ?? ""
|
||||
req.cookies[SESSION_COOKIE_NAME] ?? "",
|
||||
);
|
||||
|
||||
if (!forceLogin) {
|
||||
return res;
|
||||
}
|
||||
if (!res.session || !res.user) {
|
||||
return {
|
||||
session: null,
|
||||
user: null
|
||||
};
|
||||
}
|
||||
if (res.session.deviceAuthUsed) {
|
||||
return {
|
||||
session: null,
|
||||
user: null
|
||||
};
|
||||
}
|
||||
if (!res.session.issuedAt) {
|
||||
return {
|
||||
session: null,
|
||||
user: null
|
||||
};
|
||||
}
|
||||
const mins = 5 * 60 * 1000;
|
||||
const now = new Date().getTime();
|
||||
if (now - res.session.issuedAt > mins) {
|
||||
return {
|
||||
session: null,
|
||||
user: null
|
||||
};
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { cleanup as wsCleanup } from "#dynamic/routers/ws";
|
||||
import { cleanup as wsCleanup } from "@server/routers/ws";
|
||||
|
||||
async function cleanup() {
|
||||
await wsCleanup();
|
||||
@@ -10,4 +10,4 @@ export async function initCleanup() {
|
||||
// Handle process termination
|
||||
process.on("SIGTERM", () => cleanup());
|
||||
process.on("SIGINT", () => cleanup());
|
||||
}
|
||||
}
|
||||
@@ -1,321 +0,0 @@
|
||||
// Curated list of major ASNs (Cloud Providers, CDNs, ISPs, etc.)
|
||||
// This is not exhaustive - there are 100,000+ ASNs globally
|
||||
// Users can still enter any ASN manually in the input field
|
||||
export const MAJOR_ASNS = [
|
||||
{
|
||||
name: "ALL ASNs",
|
||||
code: "ALL",
|
||||
asn: 0 // Special value that will match all
|
||||
},
|
||||
// Major Cloud Providers
|
||||
{
|
||||
name: "Google LLC",
|
||||
code: "AS15169",
|
||||
asn: 15169
|
||||
},
|
||||
{
|
||||
name: "Amazon AWS",
|
||||
code: "AS16509",
|
||||
asn: 16509
|
||||
},
|
||||
{
|
||||
name: "Amazon AWS (EC2)",
|
||||
code: "AS14618",
|
||||
asn: 14618
|
||||
},
|
||||
{
|
||||
name: "Microsoft Azure",
|
||||
code: "AS8075",
|
||||
asn: 8075
|
||||
},
|
||||
{
|
||||
name: "Microsoft Corporation",
|
||||
code: "AS8068",
|
||||
asn: 8068
|
||||
},
|
||||
{
|
||||
name: "DigitalOcean",
|
||||
code: "AS14061",
|
||||
asn: 14061
|
||||
},
|
||||
{
|
||||
name: "Linode",
|
||||
code: "AS63949",
|
||||
asn: 63949
|
||||
},
|
||||
{
|
||||
name: "Hetzner Online",
|
||||
code: "AS24940",
|
||||
asn: 24940
|
||||
},
|
||||
{
|
||||
name: "OVH SAS",
|
||||
code: "AS16276",
|
||||
asn: 16276
|
||||
},
|
||||
{
|
||||
name: "Oracle Cloud",
|
||||
code: "AS31898",
|
||||
asn: 31898
|
||||
},
|
||||
{
|
||||
name: "Alibaba Cloud",
|
||||
code: "AS45102",
|
||||
asn: 45102
|
||||
},
|
||||
{
|
||||
name: "IBM Cloud",
|
||||
code: "AS36351",
|
||||
asn: 36351
|
||||
},
|
||||
|
||||
// CDNs
|
||||
{
|
||||
name: "Cloudflare",
|
||||
code: "AS13335",
|
||||
asn: 13335
|
||||
},
|
||||
{
|
||||
name: "Fastly",
|
||||
code: "AS54113",
|
||||
asn: 54113
|
||||
},
|
||||
{
|
||||
name: "Akamai Technologies",
|
||||
code: "AS20940",
|
||||
asn: 20940
|
||||
},
|
||||
{
|
||||
name: "Akamai (Primary)",
|
||||
code: "AS16625",
|
||||
asn: 16625
|
||||
},
|
||||
|
||||
// Mobile Carriers - US
|
||||
{
|
||||
name: "T-Mobile USA",
|
||||
code: "AS21928",
|
||||
asn: 21928
|
||||
},
|
||||
{
|
||||
name: "Verizon Wireless",
|
||||
code: "AS6167",
|
||||
asn: 6167
|
||||
},
|
||||
{
|
||||
name: "AT&T Mobility",
|
||||
code: "AS20057",
|
||||
asn: 20057
|
||||
},
|
||||
{
|
||||
name: "Sprint (T-Mobile)",
|
||||
code: "AS1239",
|
||||
asn: 1239
|
||||
},
|
||||
{
|
||||
name: "US Cellular",
|
||||
code: "AS6430",
|
||||
asn: 6430
|
||||
},
|
||||
|
||||
// Mobile Carriers - Europe
|
||||
{
|
||||
name: "Vodafone UK",
|
||||
code: "AS25135",
|
||||
asn: 25135
|
||||
},
|
||||
{
|
||||
name: "EE (UK)",
|
||||
code: "AS12576",
|
||||
asn: 12576
|
||||
},
|
||||
{
|
||||
name: "Three UK",
|
||||
code: "AS29194",
|
||||
asn: 29194
|
||||
},
|
||||
{
|
||||
name: "O2 UK",
|
||||
code: "AS13285",
|
||||
asn: 13285
|
||||
},
|
||||
{
|
||||
name: "Telefonica Spain Mobile",
|
||||
code: "AS12430",
|
||||
asn: 12430
|
||||
},
|
||||
|
||||
// Mobile Carriers - Asia
|
||||
{
|
||||
name: "NTT DoCoMo (Japan)",
|
||||
code: "AS9605",
|
||||
asn: 9605
|
||||
},
|
||||
{
|
||||
name: "SoftBank Mobile (Japan)",
|
||||
code: "AS17676",
|
||||
asn: 17676
|
||||
},
|
||||
{
|
||||
name: "SK Telecom (Korea)",
|
||||
code: "AS9318",
|
||||
asn: 9318
|
||||
},
|
||||
{
|
||||
name: "KT Corporation Mobile (Korea)",
|
||||
code: "AS4766",
|
||||
asn: 4766
|
||||
},
|
||||
{
|
||||
name: "Airtel India",
|
||||
code: "AS24560",
|
||||
asn: 24560
|
||||
},
|
||||
{
|
||||
name: "China Mobile",
|
||||
code: "AS9808",
|
||||
asn: 9808
|
||||
},
|
||||
|
||||
// Major US ISPs
|
||||
{
|
||||
name: "AT&T Services",
|
||||
code: "AS7018",
|
||||
asn: 7018
|
||||
},
|
||||
{
|
||||
name: "Comcast Cable",
|
||||
code: "AS7922",
|
||||
asn: 7922
|
||||
},
|
||||
{
|
||||
name: "Verizon",
|
||||
code: "AS701",
|
||||
asn: 701
|
||||
},
|
||||
{
|
||||
name: "Cox Communications",
|
||||
code: "AS22773",
|
||||
asn: 22773
|
||||
},
|
||||
{
|
||||
name: "Charter Communications",
|
||||
code: "AS20115",
|
||||
asn: 20115
|
||||
},
|
||||
{
|
||||
name: "CenturyLink",
|
||||
code: "AS209",
|
||||
asn: 209
|
||||
},
|
||||
|
||||
// Major European ISPs
|
||||
{
|
||||
name: "Deutsche Telekom",
|
||||
code: "AS3320",
|
||||
asn: 3320
|
||||
},
|
||||
{
|
||||
name: "Vodafone",
|
||||
code: "AS1273",
|
||||
asn: 1273
|
||||
},
|
||||
{
|
||||
name: "British Telecom",
|
||||
code: "AS2856",
|
||||
asn: 2856
|
||||
},
|
||||
{
|
||||
name: "Orange",
|
||||
code: "AS3215",
|
||||
asn: 3215
|
||||
},
|
||||
{
|
||||
name: "Telefonica",
|
||||
code: "AS12956",
|
||||
asn: 12956
|
||||
},
|
||||
|
||||
// Major Asian ISPs
|
||||
{
|
||||
name: "China Telecom",
|
||||
code: "AS4134",
|
||||
asn: 4134
|
||||
},
|
||||
{
|
||||
name: "China Unicom",
|
||||
code: "AS4837",
|
||||
asn: 4837
|
||||
},
|
||||
{
|
||||
name: "NTT Communications",
|
||||
code: "AS2914",
|
||||
asn: 2914
|
||||
},
|
||||
{
|
||||
name: "KDDI Corporation",
|
||||
code: "AS2516",
|
||||
asn: 2516
|
||||
},
|
||||
{
|
||||
name: "Reliance Jio (India)",
|
||||
code: "AS55836",
|
||||
asn: 55836
|
||||
},
|
||||
|
||||
// VPN/Proxy Providers
|
||||
{
|
||||
name: "Private Internet Access",
|
||||
code: "AS46562",
|
||||
asn: 46562
|
||||
},
|
||||
{
|
||||
name: "NordVPN",
|
||||
code: "AS202425",
|
||||
asn: 202425
|
||||
},
|
||||
{
|
||||
name: "Mullvad VPN",
|
||||
code: "AS213281",
|
||||
asn: 213281
|
||||
},
|
||||
|
||||
// Social Media / Major Tech
|
||||
{
|
||||
name: "Facebook/Meta",
|
||||
code: "AS32934",
|
||||
asn: 32934
|
||||
},
|
||||
{
|
||||
name: "Twitter/X",
|
||||
code: "AS13414",
|
||||
asn: 13414
|
||||
},
|
||||
{
|
||||
name: "Apple",
|
||||
code: "AS714",
|
||||
asn: 714
|
||||
},
|
||||
{
|
||||
name: "Netflix",
|
||||
code: "AS2906",
|
||||
asn: 2906
|
||||
},
|
||||
|
||||
// Academic/Research
|
||||
{
|
||||
name: "MIT",
|
||||
code: "AS3",
|
||||
asn: 3
|
||||
},
|
||||
{
|
||||
name: "Stanford University",
|
||||
code: "AS32",
|
||||
asn: 32
|
||||
},
|
||||
{
|
||||
name: "CERN",
|
||||
code: "AS513",
|
||||
asn: 513
|
||||
}
|
||||
];
|
||||
@@ -1,150 +0,0 @@
|
||||
{
|
||||
"iPad1,1": "iPad",
|
||||
"iPad2,1": "iPad 2",
|
||||
"iPad2,2": "iPad 2",
|
||||
"iPad2,3": "iPad 2",
|
||||
"iPad2,4": "iPad 2",
|
||||
"iPad3,1": "iPad 3rd Gen",
|
||||
"iPad3,3": "iPad 3rd Gen",
|
||||
"iPad3,2": "iPad 3rd Gen",
|
||||
"iPad3,4": "iPad 4th Gen",
|
||||
"iPad3,5": "iPad 4th Gen",
|
||||
"iPad3,6": "iPad 4th Gen",
|
||||
"iPad6,11": "iPad 9.7 5th Gen",
|
||||
"iPad6,12": "iPad 9.7 5th Gen",
|
||||
"iPad7,5": "iPad 9.7 6th Gen",
|
||||
"iPad7,6": "iPad 9.7 6th Gen",
|
||||
"iPad7,11": "iPad 10.2 7th Gen",
|
||||
"iPad7,12": "iPad 10.2 7th Gen",
|
||||
"iPad11,6": "iPad 10.2 8th Gen",
|
||||
"iPad11,7": "iPad 10.2 8th Gen",
|
||||
"iPad12,1": "iPad 10.2 9th Gen",
|
||||
"iPad12,2": "iPad 10.2 9th Gen",
|
||||
"iPad13,18": "iPad 10.9 10th Gen",
|
||||
"iPad13,19": "iPad 10.9 10th Gen",
|
||||
"iPad4,1": "iPad Air",
|
||||
"iPad4,2": "iPad Air",
|
||||
"iPad4,3": "iPad Air",
|
||||
"iPad5,3": "iPad Air 2",
|
||||
"iPad5,4": "iPad Air 2",
|
||||
"iPad11,3": "iPad Air 3rd Gen",
|
||||
"iPad11,4": "iPad Air 3rd Gen",
|
||||
"iPad13,1": "iPad Air 4th Gen",
|
||||
"iPad13,2": "iPad Air 4th Gen",
|
||||
"iPad13,16": "iPad Air 5th Gen",
|
||||
"iPad13,17": "iPad Air 5th Gen",
|
||||
"iPad14,8": "iPad Air M2 11",
|
||||
"iPad14,9": "iPad Air M2 11",
|
||||
"iPad14,10": "iPad Air M2 13",
|
||||
"iPad14,11": "iPad Air M2 13",
|
||||
"iPad2,5": "iPad mini",
|
||||
"iPad2,6": "iPad mini",
|
||||
"iPad2,7": "iPad mini",
|
||||
"iPad4,4": "iPad mini 2",
|
||||
"iPad4,5": "iPad mini 2",
|
||||
"iPad4,6": "iPad mini 2",
|
||||
"iPad4,7": "iPad mini 3",
|
||||
"iPad4,8": "iPad mini 3",
|
||||
"iPad4,9": "iPad mini 3",
|
||||
"iPad5,1": "iPad mini 4",
|
||||
"iPad5,2": "iPad mini 4",
|
||||
"iPad11,1": "iPad mini 5th Gen",
|
||||
"iPad11,2": "iPad mini 5th Gen",
|
||||
"iPad14,1": "iPad mini 6th Gen",
|
||||
"iPad14,2": "iPad mini 6th Gen",
|
||||
"iPad6,7": "iPad Pro 12.9",
|
||||
"iPad6,8": "iPad Pro 12.9",
|
||||
"iPad6,3": "iPad Pro 9.7",
|
||||
"iPad6,4": "iPad Pro 9.7",
|
||||
"iPad7,3": "iPad Pro 10.5",
|
||||
"iPad7,4": "iPad Pro 10.5",
|
||||
"iPad7,1": "iPad Pro 12.9",
|
||||
"iPad7,2": "iPad Pro 12.9",
|
||||
"iPad8,1": "iPad Pro 11",
|
||||
"iPad8,2": "iPad Pro 11",
|
||||
"iPad8,3": "iPad Pro 11",
|
||||
"iPad8,4": "iPad Pro 11",
|
||||
"iPad8,5": "iPad Pro 12.9",
|
||||
"iPad8,6": "iPad Pro 12.9",
|
||||
"iPad8,7": "iPad Pro 12.9",
|
||||
"iPad8,8": "iPad Pro 12.9",
|
||||
"iPad8,9": "iPad Pro 11",
|
||||
"iPad8,10": "iPad Pro 11",
|
||||
"iPad8,11": "iPad Pro 12.9",
|
||||
"iPad8,12": "iPad Pro 12.9",
|
||||
"iPad13,4": "iPad Pro 11",
|
||||
"iPad13,5": "iPad Pro 11",
|
||||
"iPad13,6": "iPad Pro 11",
|
||||
"iPad13,7": "iPad Pro 11",
|
||||
"iPad13,8": "iPad Pro 12.9",
|
||||
"iPad13,9": "iPad Pro 12.9",
|
||||
"iPad13,10": "iPad Pro 12.9",
|
||||
"iPad13,11": "iPad Pro 12.9",
|
||||
"iPad14,3": "iPad Pro 11",
|
||||
"iPad14,4": "iPad Pro 11",
|
||||
"iPad14,5": "iPad Pro 12.9",
|
||||
"iPad14,6": "iPad Pro 12.9",
|
||||
"iPad16,3": "iPad Pro M4 11",
|
||||
"iPad16,4": "iPad Pro M4 11",
|
||||
"iPad16,5": "iPad Pro M4 13",
|
||||
"iPad16,6": "iPad Pro M4 13",
|
||||
"iPhone1,1": "iPhone",
|
||||
"iPhone1,2": "iPhone 3G",
|
||||
"iPhone2,1": "iPhone 3GS",
|
||||
"iPhone3,1": "iPhone 4",
|
||||
"iPhone3,2": "iPhone 4",
|
||||
"iPhone3,3": "iPhone 4",
|
||||
"iPhone4,1": "iPhone 4S",
|
||||
"iPhone5,1": "iPhone 5",
|
||||
"iPhone5,2": "iPhone 5",
|
||||
"iPhone5,3": "iPhone 5c",
|
||||
"iPhone5,4": "iPhone 5c",
|
||||
"iPhone6,1": "iPhone 5s",
|
||||
"iPhone6,2": "iPhone 5s",
|
||||
"iPhone7,2": "iPhone 6",
|
||||
"iPhone7,1": "iPhone 6 Plus",
|
||||
"iPhone8,1": "iPhone 6s",
|
||||
"iPhone8,2": "iPhone 6s Plus",
|
||||
"iPhone8,4": "iPhone SE",
|
||||
"iPhone9,1": "iPhone 7",
|
||||
"iPhone9,3": "iPhone 7",
|
||||
"iPhone9,2": "iPhone 7 Plus",
|
||||
"iPhone9,4": "iPhone 7 Plus",
|
||||
"iPhone10,1": "iPhone 8",
|
||||
"iPhone10,4": "iPhone 8",
|
||||
"iPhone10,2": "iPhone 8 Plus",
|
||||
"iPhone10,5": "iPhone 8 Plus",
|
||||
"iPhone10,3": "iPhone X",
|
||||
"iPhone10,6": "iPhone X",
|
||||
"iPhone11,2": "iPhone Xs",
|
||||
"iPhone11,6": "iPhone Xs Max",
|
||||
"iPhone11,8": "iPhone XR",
|
||||
"iPhone12,1": "iPhone 11",
|
||||
"iPhone12,3": "iPhone 11 Pro",
|
||||
"iPhone12,5": "iPhone 11 Pro Max",
|
||||
"iPhone12,8": "iPhone SE",
|
||||
"iPhone13,1": "iPhone 12 mini",
|
||||
"iPhone13,2": "iPhone 12",
|
||||
"iPhone13,3": "iPhone 12 Pro",
|
||||
"iPhone13,4": "iPhone 12 Pro Max",
|
||||
"iPhone14,4": "iPhone 13 mini",
|
||||
"iPhone14,5": "iPhone 13",
|
||||
"iPhone14,2": "iPhone 13 Pro",
|
||||
"iPhone14,3": "iPhone 13 Pro Max",
|
||||
"iPhone14,6": "iPhone SE",
|
||||
"iPhone14,7": "iPhone 14",
|
||||
"iPhone14,8": "iPhone 14 Plus",
|
||||
"iPhone15,2": "iPhone 14 Pro",
|
||||
"iPhone15,3": "iPhone 14 Pro Max",
|
||||
"iPhone15,4": "iPhone 15",
|
||||
"iPhone15,5": "iPhone 15 Plus",
|
||||
"iPhone16,1": "iPhone 15 Pro",
|
||||
"iPhone16,2": "iPhone 15 Pro Max",
|
||||
"iPod1,1": "iPod touch Original",
|
||||
"iPod2,1": "iPod touch 2nd",
|
||||
"iPod3,1": "iPod touch 3rd Gen",
|
||||
"iPod4,1": "iPod touch 4th",
|
||||
"iPod5,1": "iPod touch 5th",
|
||||
"iPod7,1": "iPod touch 6th Gen",
|
||||
"iPod9,1": "iPod touch 7th Gen"
|
||||
}
|
||||
@@ -1,201 +0,0 @@
|
||||
{
|
||||
"PowerMac4,4": "eMac",
|
||||
"PowerMac6,4": "eMac",
|
||||
"PowerBook2,1": "iBook",
|
||||
"PowerBook2,2": "iBook",
|
||||
"PowerBook4,1": "iBook",
|
||||
"PowerBook4,2": "iBook",
|
||||
"PowerBook4,3": "iBook",
|
||||
"PowerBook6,3": "iBook",
|
||||
"PowerBook6,5": "iBook",
|
||||
"PowerBook6,7": "iBook",
|
||||
"iMac,1": "iMac",
|
||||
"PowerMac2,1": "iMac",
|
||||
"PowerMac2,2": "iMac",
|
||||
"PowerMac4,1": "iMac",
|
||||
"PowerMac4,2": "iMac",
|
||||
"PowerMac4,5": "iMac",
|
||||
"PowerMac6,1": "iMac",
|
||||
"PowerMac6,3*": "iMac",
|
||||
"PowerMac6,3": "iMac",
|
||||
"PowerMac8,1": "iMac",
|
||||
"PowerMac8,2": "iMac",
|
||||
"PowerMac12,1": "iMac",
|
||||
"iMac4,1": "iMac",
|
||||
"iMac4,2": "iMac",
|
||||
"iMac5,2": "iMac",
|
||||
"iMac5,1": "iMac",
|
||||
"iMac6,1": "iMac",
|
||||
"iMac7,1": "iMac",
|
||||
"iMac8,1": "iMac",
|
||||
"iMac9,1": "iMac",
|
||||
"iMac10,1": "iMac",
|
||||
"iMac11,1": "iMac",
|
||||
"iMac11,2": "iMac",
|
||||
"iMac11,3": "iMac",
|
||||
"iMac12,1": "iMac",
|
||||
"iMac12,2": "iMac",
|
||||
"iMac13,1": "iMac",
|
||||
"iMac13,2": "iMac",
|
||||
"iMac14,1": "iMac",
|
||||
"iMac14,3": "iMac",
|
||||
"iMac14,2": "iMac",
|
||||
"iMac14,4": "iMac",
|
||||
"iMac15,1": "iMac",
|
||||
"iMac16,1": "iMac",
|
||||
"iMac16,2": "iMac",
|
||||
"iMac17,1": "iMac",
|
||||
"iMac18,1": "iMac",
|
||||
"iMac18,2": "iMac",
|
||||
"iMac18,3": "iMac",
|
||||
"iMac19,2": "iMac",
|
||||
"iMac19,1": "iMac",
|
||||
"iMac20,1": "iMac",
|
||||
"iMac20,2": "iMac",
|
||||
"iMac21,2": "iMac",
|
||||
"iMac21,1": "iMac",
|
||||
"iMacPro1,1": "iMac Pro",
|
||||
"PowerMac10,1": "Mac mini",
|
||||
"PowerMac10,2": "Mac mini",
|
||||
"Macmini1,1": "Mac mini",
|
||||
"Macmini2,1": "Mac mini",
|
||||
"Macmini3,1": "Mac mini",
|
||||
"Macmini4,1": "Mac mini",
|
||||
"Macmini5,1": "Mac mini",
|
||||
"Macmini5,2": "Mac mini",
|
||||
"Macmini5,3": "Mac mini",
|
||||
"Macmini6,1": "Mac mini",
|
||||
"Macmini6,2": "Mac mini",
|
||||
"Macmini7,1": "Mac mini",
|
||||
"Macmini8,1": "Mac mini",
|
||||
"ADP3,2": "Mac mini",
|
||||
"Macmini9,1": "Mac mini",
|
||||
"Mac14,3": "Mac mini",
|
||||
"Mac14,12": "Mac mini",
|
||||
"MacPro1,1*": "Mac Pro",
|
||||
"MacPro2,1": "Mac Pro",
|
||||
"MacPro3,1": "Mac Pro",
|
||||
"MacPro4,1": "Mac Pro",
|
||||
"MacPro5,1": "Mac Pro",
|
||||
"MacPro6,1": "Mac Pro",
|
||||
"MacPro7,1": "Mac Pro",
|
||||
"N/A*": "Power Macintosh",
|
||||
"PowerMac1,1": "Power Macintosh",
|
||||
"PowerMac3,1": "Power Macintosh",
|
||||
"PowerMac3,3": "Power Macintosh",
|
||||
"PowerMac3,4": "Power Macintosh",
|
||||
"PowerMac3,5": "Power Macintosh",
|
||||
"PowerMac3,6": "Power Macintosh",
|
||||
"Mac13,1": "Mac Studio",
|
||||
"Mac13,2": "Mac Studio",
|
||||
"MacBook1,1": "MacBook",
|
||||
"MacBook2,1": "MacBook",
|
||||
"MacBook3,1": "MacBook",
|
||||
"MacBook4,1": "MacBook",
|
||||
"MacBook5,1": "MacBook",
|
||||
"MacBook5,2": "MacBook",
|
||||
"MacBook6,1": "MacBook",
|
||||
"MacBook7,1": "MacBook",
|
||||
"MacBook8,1": "MacBook",
|
||||
"MacBook9,1": "MacBook",
|
||||
"MacBook10,1": "MacBook",
|
||||
"MacBookAir1,1": "MacBook Air",
|
||||
"MacBookAir2,1": "MacBook Air",
|
||||
"MacBookAir3,1": "MacBook Air",
|
||||
"MacBookAir3,2": "MacBook Air",
|
||||
"MacBookAir4,1": "MacBook Air",
|
||||
"MacBookAir4,2": "MacBook Air",
|
||||
"MacBookAir5,1": "MacBook Air",
|
||||
"MacBookAir5,2": "MacBook Air",
|
||||
"MacBookAir6,1": "MacBook Air",
|
||||
"MacBookAir6,2": "MacBook Air",
|
||||
"MacBookAir7,1": "MacBook Air",
|
||||
"MacBookAir7,2": "MacBook Air",
|
||||
"MacBookAir8,1": "MacBook Air",
|
||||
"MacBookAir8,2": "MacBook Air",
|
||||
"MacBookAir9,1": "MacBook Air",
|
||||
"MacBookAir10,1": "MacBook Air",
|
||||
"Mac14,2": "MacBook Air",
|
||||
"MacBookPro1,1": "MacBook Pro",
|
||||
"MacBookPro1,2": "MacBook Pro",
|
||||
"MacBookPro2,2": "MacBook Pro",
|
||||
"MacBookPro2,1": "MacBook Pro",
|
||||
"MacBookPro3,1": "MacBook Pro",
|
||||
"MacBookPro4,1": "MacBook Pro",
|
||||
"MacBookPro5,1": "MacBook Pro",
|
||||
"MacBookPro5,2": "MacBook Pro",
|
||||
"MacBookPro5,5": "MacBook Pro",
|
||||
"MacBookPro5,4": "MacBook Pro",
|
||||
"MacBookPro5,3": "MacBook Pro",
|
||||
"MacBookPro7,1": "MacBook Pro",
|
||||
"MacBookPro6,2": "MacBook Pro",
|
||||
"MacBookPro6,1": "MacBook Pro",
|
||||
"MacBookPro8,1": "MacBook Pro",
|
||||
"MacBookPro8,2": "MacBook Pro",
|
||||
"MacBookPro8,3": "MacBook Pro",
|
||||
"MacBookPro9,2": "MacBook Pro",
|
||||
"MacBookPro9,1": "MacBook Pro",
|
||||
"MacBookPro10,1": "MacBook Pro",
|
||||
"MacBookPro10,2": "MacBook Pro",
|
||||
"MacBookPro11,1": "MacBook Pro",
|
||||
"MacBookPro11,2": "MacBook Pro",
|
||||
"MacBookPro11,3": "MacBook Pro",
|
||||
"MacBookPro12,1": "MacBook Pro",
|
||||
"MacBookPro11,4": "MacBook Pro",
|
||||
"MacBookPro11,5": "MacBook Pro",
|
||||
"MacBookPro13,1": "MacBook Pro",
|
||||
"MacBookPro13,2": "MacBook Pro",
|
||||
"MacBookPro13,3": "MacBook Pro",
|
||||
"MacBookPro14,1": "MacBook Pro",
|
||||
"MacBookPro14,2": "MacBook Pro",
|
||||
"MacBookPro14,3": "MacBook Pro",
|
||||
"MacBookPro15,2": "MacBook Pro",
|
||||
"MacBookPro15,1": "MacBook Pro",
|
||||
"MacBookPro15,3": "MacBook Pro",
|
||||
"MacBookPro15,4": "MacBook Pro",
|
||||
"MacBookPro16,1": "MacBook Pro",
|
||||
"MacBookPro16,3": "MacBook Pro",
|
||||
"MacBookPro16,2": "MacBook Pro",
|
||||
"MacBookPro16,4": "MacBook Pro",
|
||||
"MacBookPro17,1": "MacBook Pro",
|
||||
"MacBookPro18,3": "MacBook Pro",
|
||||
"MacBookPro18,4": "MacBook Pro",
|
||||
"MacBookPro18,1": "MacBook Pro",
|
||||
"MacBookPro18,2": "MacBook Pro",
|
||||
"Mac14,7": "MacBook Pro",
|
||||
"Mac14,9": "MacBook Pro",
|
||||
"Mac14,5": "MacBook Pro",
|
||||
"Mac14,10": "MacBook Pro",
|
||||
"Mac14,6": "MacBook Pro",
|
||||
"PowerMac1,2": "Power Macintosh",
|
||||
"PowerMac5,1": "Power Macintosh",
|
||||
"PowerMac7,2": "Power Macintosh",
|
||||
"PowerMac7,3": "Power Macintosh",
|
||||
"PowerMac9,1": "Power Macintosh",
|
||||
"PowerMac11,2": "Power Macintosh",
|
||||
"PowerBook1,1": "PowerBook",
|
||||
"PowerBook3,1": "PowerBook",
|
||||
"PowerBook3,2": "PowerBook",
|
||||
"PowerBook3,3": "PowerBook",
|
||||
"PowerBook3,4": "PowerBook",
|
||||
"PowerBook3,5": "PowerBook",
|
||||
"PowerBook6,1": "PowerBook",
|
||||
"PowerBook5,1": "PowerBook",
|
||||
"PowerBook6,2": "PowerBook",
|
||||
"PowerBook5,2": "PowerBook",
|
||||
"PowerBook5,3": "PowerBook",
|
||||
"PowerBook6,4": "PowerBook",
|
||||
"PowerBook5,4": "PowerBook",
|
||||
"PowerBook5,5": "PowerBook",
|
||||
"PowerBook6,8": "PowerBook",
|
||||
"PowerBook5,6": "PowerBook",
|
||||
"PowerBook5,7": "PowerBook",
|
||||
"PowerBook5,8": "PowerBook",
|
||||
"PowerBook5,9": "PowerBook",
|
||||
"RackMac1,1": "Xserve",
|
||||
"RackMac1,2": "Xserve",
|
||||
"RackMac3,1": "Xserve",
|
||||
"Xserve1,1": "Xserve",
|
||||
"Xserve2,1": "Xserve",
|
||||
"Xserve3,1": "Xserve"
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
import maxmind, { AsnResponse, Reader } from "maxmind";
|
||||
import config from "@server/lib/config";
|
||||
|
||||
let maxmindAsnLookup: Reader<AsnResponse> | null;
|
||||
if (config.getRawConfig().server.maxmind_asn_path) {
|
||||
maxmindAsnLookup = await maxmind.open<AsnResponse>(
|
||||
config.getRawConfig().server.maxmind_asn_path!
|
||||
);
|
||||
} else {
|
||||
maxmindAsnLookup = null;
|
||||
}
|
||||
|
||||
export { maxmindAsnLookup };
|
||||
@@ -1708,4 +1708,4 @@
|
||||
"Desert Box Turtle",
|
||||
"African Striped Weasel"
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,6 @@
|
||||
import { join } from "path";
|
||||
import { readFileSync } from "fs";
|
||||
import { clients, db, resources, siteResources } from "@server/db";
|
||||
import { randomInt } from "crypto";
|
||||
import { db, resources, siteResources } from "@server/db";
|
||||
import { exitNodes, sites } from "@server/db";
|
||||
import { eq, and } from "drizzle-orm";
|
||||
import { __DIRNAME } from "@server/lib/consts";
|
||||
@@ -16,43 +15,6 @@ if (!dev) {
|
||||
}
|
||||
export const names = JSON.parse(readFileSync(file, "utf-8"));
|
||||
|
||||
// Load iOS and Mac model mappings
|
||||
let iosModelsFile: string;
|
||||
let macModelsFile: string;
|
||||
if (!dev) {
|
||||
iosModelsFile = join(__DIRNAME, "ios_models.json");
|
||||
macModelsFile = join(__DIRNAME, "mac_models.json");
|
||||
} else {
|
||||
iosModelsFile = join("server/db/ios_models.json");
|
||||
macModelsFile = join("server/db/mac_models.json");
|
||||
}
|
||||
|
||||
const iosModels: Record<string, string> = JSON.parse(
|
||||
readFileSync(iosModelsFile, "utf-8")
|
||||
);
|
||||
const macModels: Record<string, string> = JSON.parse(
|
||||
readFileSync(macModelsFile, "utf-8")
|
||||
);
|
||||
|
||||
export async function getUniqueClientName(orgId: string): Promise<string> {
|
||||
let loops = 0;
|
||||
while (true) {
|
||||
if (loops > 100) {
|
||||
throw new Error("Could not generate a unique name");
|
||||
}
|
||||
|
||||
const name = generateName();
|
||||
const count = await db
|
||||
.select({ niceId: clients.niceId, orgId: clients.orgId })
|
||||
.from(clients)
|
||||
.where(and(eq(clients.niceId, name), eq(clients.orgId, orgId)));
|
||||
if (count.length === 0) {
|
||||
return name;
|
||||
}
|
||||
loops++;
|
||||
}
|
||||
}
|
||||
|
||||
export async function getUniqueSiteName(orgId: string): Promise<string> {
|
||||
let loops = 0;
|
||||
while (true) {
|
||||
@@ -80,36 +42,18 @@ export async function getUniqueResourceName(orgId: string): Promise<string> {
|
||||
}
|
||||
|
||||
const name = generateName();
|
||||
const [resourceCount, siteResourceCount] = await Promise.all([
|
||||
db
|
||||
.select({ niceId: resources.niceId, orgId: resources.orgId })
|
||||
.from(resources)
|
||||
.where(
|
||||
and(eq(resources.niceId, name), eq(resources.orgId, orgId))
|
||||
),
|
||||
db
|
||||
.select({
|
||||
niceId: siteResources.niceId,
|
||||
orgId: siteResources.orgId
|
||||
})
|
||||
.from(siteResources)
|
||||
.where(
|
||||
and(
|
||||
eq(siteResources.niceId, name),
|
||||
eq(siteResources.orgId, orgId)
|
||||
)
|
||||
)
|
||||
]);
|
||||
if (resourceCount.length === 0 && siteResourceCount.length === 0) {
|
||||
const count = await db
|
||||
.select({ niceId: resources.niceId, orgId: resources.orgId })
|
||||
.from(resources)
|
||||
.where(and(eq(resources.niceId, name), eq(resources.orgId, orgId)));
|
||||
if (count.length === 0) {
|
||||
return name;
|
||||
}
|
||||
loops++;
|
||||
}
|
||||
}
|
||||
|
||||
export async function getUniqueSiteResourceName(
|
||||
orgId: string
|
||||
): Promise<string> {
|
||||
export async function getUniqueSiteResourceName(orgId: string): Promise<string> {
|
||||
let loops = 0;
|
||||
while (true) {
|
||||
if (loops > 100) {
|
||||
@@ -117,27 +61,11 @@ export async function getUniqueSiteResourceName(
|
||||
}
|
||||
|
||||
const name = generateName();
|
||||
const [resourceCount, siteResourceCount] = await Promise.all([
|
||||
db
|
||||
.select({ niceId: resources.niceId, orgId: resources.orgId })
|
||||
.from(resources)
|
||||
.where(
|
||||
and(eq(resources.niceId, name), eq(resources.orgId, orgId))
|
||||
),
|
||||
db
|
||||
.select({
|
||||
niceId: siteResources.niceId,
|
||||
orgId: siteResources.orgId
|
||||
})
|
||||
.from(siteResources)
|
||||
.where(
|
||||
and(
|
||||
eq(siteResources.niceId, name),
|
||||
eq(siteResources.orgId, orgId)
|
||||
)
|
||||
)
|
||||
]);
|
||||
if (resourceCount.length === 0 && siteResourceCount.length === 0) {
|
||||
const count = await db
|
||||
.select({ niceId: siteResources.niceId, orgId: siteResources.orgId })
|
||||
.from(siteResources)
|
||||
.where(and(eq(siteResources.niceId, name), eq(siteResources.orgId, orgId)));
|
||||
if (count.length === 0) {
|
||||
return name;
|
||||
}
|
||||
loops++;
|
||||
@@ -146,7 +74,9 @@ export async function getUniqueSiteResourceName(
|
||||
|
||||
export async function getUniqueExitNodeEndpointName(): Promise<string> {
|
||||
let loops = 0;
|
||||
const count = await db.select().from(exitNodes);
|
||||
const count = await db
|
||||
.select()
|
||||
.from(exitNodes);
|
||||
while (true) {
|
||||
if (loops > 100) {
|
||||
throw new Error("Could not generate a unique name");
|
||||
@@ -165,11 +95,14 @@ export async function getUniqueExitNodeEndpointName(): Promise<string> {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
export function generateName(): string {
|
||||
const name = (
|
||||
names.descriptors[randomInt(names.descriptors.length)] +
|
||||
names.descriptors[
|
||||
Math.floor(Math.random() * names.descriptors.length)
|
||||
] +
|
||||
"-" +
|
||||
names.animals[randomInt(names.animals.length)]
|
||||
names.animals[Math.floor(Math.random() * names.animals.length)]
|
||||
)
|
||||
.toLowerCase()
|
||||
.replace(/\s/g, "-");
|
||||
@@ -177,29 +110,3 @@ export function generateName(): string {
|
||||
// clean out any non-alphanumeric characters except for dashes
|
||||
return name.replace(/[^a-z0-9-]/g, "");
|
||||
}
|
||||
|
||||
export function getMacDeviceName(macIdentifier?: string | null): string | null {
|
||||
if (macIdentifier && macModels[macIdentifier]) {
|
||||
return macModels[macIdentifier];
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
export function getIosDeviceName(iosIdentifier?: string | null): string | null {
|
||||
if (iosIdentifier && iosModels[iosIdentifier]) {
|
||||
return iosModels[iosIdentifier];
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
export function getUserDeviceName(
|
||||
model: string | null,
|
||||
fallBack: string | null
|
||||
): string {
|
||||
return (
|
||||
getMacDeviceName(model) ||
|
||||
getIosDeviceName(model) ||
|
||||
fallBack ||
|
||||
"Unknown Device"
|
||||
);
|
||||
}
|
||||
|
||||
@@ -6,26 +6,23 @@ import { withReplicas } from "drizzle-orm/pg-core";
|
||||
function createDb() {
|
||||
const config = readConfigFile();
|
||||
|
||||
// check the environment variables for postgres config first before the config file
|
||||
if (process.env.POSTGRES_CONNECTION_STRING) {
|
||||
config.postgres = {
|
||||
connection_string: process.env.POSTGRES_CONNECTION_STRING
|
||||
};
|
||||
if (process.env.POSTGRES_REPLICA_CONNECTION_STRINGS) {
|
||||
const replicas =
|
||||
process.env.POSTGRES_REPLICA_CONNECTION_STRINGS.split(",").map(
|
||||
(conn) => ({
|
||||
connection_string: conn.trim()
|
||||
})
|
||||
);
|
||||
config.postgres.replicas = replicas;
|
||||
}
|
||||
}
|
||||
|
||||
if (!config.postgres) {
|
||||
throw new Error(
|
||||
"Postgres configuration is missing in the configuration file."
|
||||
);
|
||||
// check the environment variables for postgres config
|
||||
if (process.env.POSTGRES_CONNECTION_STRING) {
|
||||
config.postgres = {
|
||||
connection_string: process.env.POSTGRES_CONNECTION_STRING
|
||||
};
|
||||
if (process.env.POSTGRES_REPLICA_CONNECTION_STRINGS) {
|
||||
const replicas = process.env.POSTGRES_REPLICA_CONNECTION_STRINGS.split(",").map((conn) => ({
|
||||
connection_string: conn.trim()
|
||||
}));
|
||||
config.postgres.replicas = replicas;
|
||||
}
|
||||
} else {
|
||||
throw new Error(
|
||||
"Postgres configuration is missing in the configuration file."
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
const connectionString = config.postgres?.connection_string;
|
||||
@@ -43,45 +40,28 @@ function createDb() {
|
||||
connectionString,
|
||||
max: poolConfig?.max_connections || 20,
|
||||
idleTimeoutMillis: poolConfig?.idle_timeout_ms || 30000,
|
||||
connectionTimeoutMillis: poolConfig?.connection_timeout_ms || 5000
|
||||
connectionTimeoutMillis: poolConfig?.connection_timeout_ms || 5000,
|
||||
});
|
||||
|
||||
const replicas = [];
|
||||
|
||||
if (!replicaConnections.length) {
|
||||
replicas.push(
|
||||
DrizzlePostgres(primaryPool, {
|
||||
logger: process.env.QUERY_LOGGING == "true"
|
||||
})
|
||||
);
|
||||
replicas.push(DrizzlePostgres(primaryPool));
|
||||
} else {
|
||||
for (const conn of replicaConnections) {
|
||||
const replicaPool = new Pool({
|
||||
connectionString: conn.connection_string,
|
||||
max: poolConfig?.max_replica_connections || 20,
|
||||
idleTimeoutMillis: poolConfig?.idle_timeout_ms || 30000,
|
||||
connectionTimeoutMillis:
|
||||
poolConfig?.connection_timeout_ms || 5000
|
||||
connectionTimeoutMillis: poolConfig?.connection_timeout_ms || 5000,
|
||||
});
|
||||
replicas.push(
|
||||
DrizzlePostgres(replicaPool, {
|
||||
logger: process.env.QUERY_LOGGING == "true"
|
||||
})
|
||||
);
|
||||
replicas.push(DrizzlePostgres(replicaPool));
|
||||
}
|
||||
}
|
||||
|
||||
return withReplicas(
|
||||
DrizzlePostgres(primaryPool, {
|
||||
logger: process.env.QUERY_LOGGING == "true"
|
||||
}),
|
||||
replicas as any
|
||||
);
|
||||
return withReplicas(DrizzlePostgres(primaryPool), replicas as any);
|
||||
}
|
||||
|
||||
export const db = createDb();
|
||||
export default db;
|
||||
export const primaryDb = db.$primary;
|
||||
export type Transaction = Parameters<
|
||||
Parameters<(typeof db)["transaction"]>[0]
|
||||
>[0];
|
||||
export type Transaction = Parameters<Parameters<typeof db["transaction"]>[0]>[0];
|
||||
@@ -10,8 +10,7 @@ const runMigrations = async () => {
|
||||
await migrate(db as any, {
|
||||
migrationsFolder: migrationsFolder
|
||||
});
|
||||
console.log("Migrations completed successfully. ✅");
|
||||
process.exit(0);
|
||||
console.log("Migrations completed successfully.");
|
||||
} catch (error) {
|
||||
console.error("Error running migrations:", error);
|
||||
process.exit(1);
|
||||
|
||||
@@ -10,15 +10,7 @@ import {
|
||||
index
|
||||
} from "drizzle-orm/pg-core";
|
||||
import { InferSelectModel } from "drizzle-orm";
|
||||
import {
|
||||
domains,
|
||||
orgs,
|
||||
targets,
|
||||
users,
|
||||
exitNodes,
|
||||
sessions,
|
||||
clients
|
||||
} from "./schema";
|
||||
import { domains, orgs, targets, users, exitNodes, sessions } from "./schema";
|
||||
|
||||
export const certificates = pgTable("certificates", {
|
||||
certId: serial("certId").primaryKey(),
|
||||
@@ -212,29 +204,6 @@ export const loginPageOrg = pgTable("loginPageOrg", {
|
||||
.references(() => orgs.orgId, { onDelete: "cascade" })
|
||||
});
|
||||
|
||||
export const loginPageBranding = pgTable("loginPageBranding", {
|
||||
loginPageBrandingId: serial("loginPageBrandingId").primaryKey(),
|
||||
logoUrl: text("logoUrl"),
|
||||
logoWidth: integer("logoWidth").notNull(),
|
||||
logoHeight: integer("logoHeight").notNull(),
|
||||
primaryColor: text("primaryColor"),
|
||||
resourceTitle: text("resourceTitle").notNull(),
|
||||
resourceSubtitle: text("resourceSubtitle"),
|
||||
orgTitle: text("orgTitle"),
|
||||
orgSubtitle: text("orgSubtitle")
|
||||
});
|
||||
|
||||
export const loginPageBrandingOrg = pgTable("loginPageBrandingOrg", {
|
||||
loginPageBrandingId: integer("loginPageBrandingId")
|
||||
.notNull()
|
||||
.references(() => loginPageBranding.loginPageBrandingId, {
|
||||
onDelete: "cascade"
|
||||
}),
|
||||
orgId: varchar("orgId")
|
||||
.notNull()
|
||||
.references(() => orgs.orgId, { onDelete: "cascade" })
|
||||
});
|
||||
|
||||
export const sessionTransferToken = pgTable("sessionTransferToken", {
|
||||
token: varchar("token").primaryKey(),
|
||||
sessionId: varchar("sessionId")
|
||||
@@ -246,84 +215,43 @@ export const sessionTransferToken = pgTable("sessionTransferToken", {
|
||||
expiresAt: bigint("expiresAt", { mode: "number" }).notNull()
|
||||
});
|
||||
|
||||
export const actionAuditLog = pgTable(
|
||||
"actionAuditLog",
|
||||
{
|
||||
id: serial("id").primaryKey(),
|
||||
timestamp: bigint("timestamp", { mode: "number" }).notNull(), // this is EPOCH time in seconds
|
||||
orgId: varchar("orgId")
|
||||
.notNull()
|
||||
.references(() => orgs.orgId, { onDelete: "cascade" }),
|
||||
actorType: varchar("actorType", { length: 50 }).notNull(),
|
||||
actor: varchar("actor", { length: 255 }).notNull(),
|
||||
actorId: varchar("actorId", { length: 255 }).notNull(),
|
||||
action: varchar("action", { length: 100 }).notNull(),
|
||||
metadata: text("metadata")
|
||||
},
|
||||
(table) => [
|
||||
index("idx_actionAuditLog_timestamp").on(table.timestamp),
|
||||
index("idx_actionAuditLog_org_timestamp").on(
|
||||
table.orgId,
|
||||
table.timestamp
|
||||
)
|
||||
]
|
||||
);
|
||||
|
||||
export const accessAuditLog = pgTable(
|
||||
"accessAuditLog",
|
||||
{
|
||||
id: serial("id").primaryKey(),
|
||||
timestamp: bigint("timestamp", { mode: "number" }).notNull(), // this is EPOCH time in seconds
|
||||
orgId: varchar("orgId")
|
||||
.notNull()
|
||||
.references(() => orgs.orgId, { onDelete: "cascade" }),
|
||||
actorType: varchar("actorType", { length: 50 }),
|
||||
actor: varchar("actor", { length: 255 }),
|
||||
actorId: varchar("actorId", { length: 255 }),
|
||||
resourceId: integer("resourceId"),
|
||||
ip: varchar("ip", { length: 45 }),
|
||||
type: varchar("type", { length: 100 }).notNull(),
|
||||
action: boolean("action").notNull(),
|
||||
location: text("location"),
|
||||
userAgent: text("userAgent"),
|
||||
metadata: text("metadata")
|
||||
},
|
||||
(table) => [
|
||||
index("idx_identityAuditLog_timestamp").on(table.timestamp),
|
||||
index("idx_identityAuditLog_org_timestamp").on(
|
||||
table.orgId,
|
||||
table.timestamp
|
||||
)
|
||||
]
|
||||
);
|
||||
|
||||
export const approvals = pgTable("approvals", {
|
||||
approvalId: serial("approvalId").primaryKey(),
|
||||
timestamp: integer("timestamp").notNull(), // this is EPOCH time in seconds
|
||||
export const actionAuditLog = pgTable("actionAuditLog", {
|
||||
id: serial("id").primaryKey(),
|
||||
timestamp: bigint("timestamp", { mode: "number" }).notNull(), // this is EPOCH time in seconds
|
||||
orgId: varchar("orgId")
|
||||
.references(() => orgs.orgId, {
|
||||
onDelete: "cascade"
|
||||
})
|
||||
.notNull(),
|
||||
clientId: integer("clientId").references(() => clients.clientId, {
|
||||
onDelete: "cascade"
|
||||
}), // clients reference user devices (in this case)
|
||||
userId: varchar("userId")
|
||||
.references(() => users.userId, {
|
||||
// optionally tied to a user and in this case delete when the user deletes
|
||||
onDelete: "cascade"
|
||||
})
|
||||
.notNull(),
|
||||
decision: varchar("decision")
|
||||
.$type<"approved" | "denied" | "pending">()
|
||||
.default("pending")
|
||||
.notNull(),
|
||||
type: varchar("type")
|
||||
.$type<"user_device" /*| 'proxy' // for later */>()
|
||||
.notNull()
|
||||
});
|
||||
.references(() => orgs.orgId, { onDelete: "cascade" }),
|
||||
actorType: varchar("actorType", { length: 50 }).notNull(),
|
||||
actor: varchar("actor", { length: 255 }).notNull(),
|
||||
actorId: varchar("actorId", { length: 255 }).notNull(),
|
||||
action: varchar("action", { length: 100 }).notNull(),
|
||||
metadata: text("metadata")
|
||||
}, (table) => ([
|
||||
index("idx_actionAuditLog_timestamp").on(table.timestamp),
|
||||
index("idx_actionAuditLog_org_timestamp").on(table.orgId, table.timestamp)
|
||||
]));
|
||||
|
||||
export const accessAuditLog = pgTable("accessAuditLog", {
|
||||
id: serial("id").primaryKey(),
|
||||
timestamp: bigint("timestamp", { mode: "number" }).notNull(), // this is EPOCH time in seconds
|
||||
orgId: varchar("orgId")
|
||||
.notNull()
|
||||
.references(() => orgs.orgId, { onDelete: "cascade" }),
|
||||
actorType: varchar("actorType", { length: 50 }),
|
||||
actor: varchar("actor", { length: 255 }),
|
||||
actorId: varchar("actorId", { length: 255 }),
|
||||
resourceId: integer("resourceId"),
|
||||
ip: varchar("ip", { length: 45 }),
|
||||
type: varchar("type", { length: 100 }).notNull(),
|
||||
action: boolean("action").notNull(),
|
||||
location: text("location"),
|
||||
userAgent: text("userAgent"),
|
||||
metadata: text("metadata")
|
||||
}, (table) => ([
|
||||
index("idx_identityAuditLog_timestamp").on(table.timestamp),
|
||||
index("idx_identityAuditLog_org_timestamp").on(table.orgId, table.timestamp)
|
||||
]));
|
||||
|
||||
export type Approval = InferSelectModel<typeof approvals>;
|
||||
export type Limit = InferSelectModel<typeof limits>;
|
||||
export type Account = InferSelectModel<typeof account>;
|
||||
export type Certificate = InferSelectModel<typeof certificates>;
|
||||
@@ -341,6 +269,5 @@ export type RemoteExitNodeSession = InferSelectModel<
|
||||
>;
|
||||
export type ExitNodeOrg = InferSelectModel<typeof exitNodeOrgs>;
|
||||
export type LoginPage = InferSelectModel<typeof loginPage>;
|
||||
export type LoginPageBranding = InferSelectModel<typeof loginPageBranding>;
|
||||
export type ActionAuditLog = InferSelectModel<typeof actionAuditLog>;
|
||||
export type AccessAuditLog = InferSelectModel<typeof accessAuditLog>;
|
||||
export type AccessAuditLog = InferSelectModel<typeof accessAuditLog>;
|
||||
@@ -7,12 +7,10 @@ import {
|
||||
bigint,
|
||||
real,
|
||||
text,
|
||||
index,
|
||||
uniqueIndex
|
||||
index
|
||||
} from "drizzle-orm/pg-core";
|
||||
import { InferSelectModel } from "drizzle-orm";
|
||||
import { randomUUID } from "crypto";
|
||||
import { alias } from "yargs";
|
||||
|
||||
export const domains = pgTable("domains", {
|
||||
domainId: varchar("domainId").primaryKey(),
|
||||
@@ -42,18 +40,17 @@ export const orgs = pgTable("orgs", {
|
||||
orgId: varchar("orgId").primaryKey(),
|
||||
name: varchar("name").notNull(),
|
||||
subnet: varchar("subnet"),
|
||||
utilitySubnet: varchar("utilitySubnet"), // this is the subnet for utility addresses
|
||||
createdAt: text("createdAt"),
|
||||
requireTwoFactor: boolean("requireTwoFactor"),
|
||||
maxSessionLengthHours: integer("maxSessionLengthHours"),
|
||||
passwordExpiryDays: integer("passwordExpiryDays"),
|
||||
settingsLogRetentionDaysRequest: integer("settingsLogRetentionDaysRequest") // where 0 = dont keep logs and -1 = keep forever, and 9001 = end of the following year
|
||||
settingsLogRetentionDaysRequest: integer("settingsLogRetentionDaysRequest") // where 0 = dont keep logs and -1 = keep forever
|
||||
.notNull()
|
||||
.default(7),
|
||||
settingsLogRetentionDaysAccess: integer("settingsLogRetentionDaysAccess") // where 0 = dont keep logs and -1 = keep forever and 9001 = end of the following year
|
||||
settingsLogRetentionDaysAccess: integer("settingsLogRetentionDaysAccess")
|
||||
.notNull()
|
||||
.default(0),
|
||||
settingsLogRetentionDaysAction: integer("settingsLogRetentionDaysAction") // where 0 = dont keep logs and -1 = keep forever and 9001 = end of the following year
|
||||
settingsLogRetentionDaysAction: integer("settingsLogRetentionDaysAction")
|
||||
.notNull()
|
||||
.default(0)
|
||||
});
|
||||
@@ -91,7 +88,8 @@ export const sites = pgTable("sites", {
|
||||
publicKey: varchar("publicKey"),
|
||||
lastHolePunch: bigint("lastHolePunch", { mode: "number" }),
|
||||
listenPort: integer("listenPort"),
|
||||
dockerSocketEnabled: boolean("dockerSocketEnabled").notNull().default(true)
|
||||
dockerSocketEnabled: boolean("dockerSocketEnabled").notNull().default(true),
|
||||
remoteSubnets: text("remoteSubnets") // comma-separated list of subnets that this site can access
|
||||
});
|
||||
|
||||
export const resources = pgTable("resources", {
|
||||
@@ -132,17 +130,7 @@ export const resources = pgTable("resources", {
|
||||
}),
|
||||
headers: text("headers"), // comma-separated list of headers to add to the request
|
||||
proxyProtocol: boolean("proxyProtocol").notNull().default(false),
|
||||
proxyProtocolVersion: integer("proxyProtocolVersion").default(1),
|
||||
|
||||
maintenanceModeEnabled: boolean("maintenanceModeEnabled")
|
||||
.notNull()
|
||||
.default(false),
|
||||
maintenanceModeType: text("maintenanceModeType", {
|
||||
enum: ["forced", "automatic"]
|
||||
}).default("forced"), // "forced" = always show, "automatic" = only when down
|
||||
maintenanceTitle: text("maintenanceTitle"),
|
||||
maintenanceMessage: text("maintenanceMessage"),
|
||||
maintenanceEstimatedTime: text("maintenanceEstimatedTime")
|
||||
proxyProtocolVersion: integer("proxyProtocolVersion").default(1)
|
||||
});
|
||||
|
||||
export const targets = pgTable("targets", {
|
||||
@@ -187,8 +175,7 @@ export const targetHealthCheck = pgTable("targetHealthCheck", {
|
||||
hcFollowRedirects: boolean("hcFollowRedirects").default(true),
|
||||
hcMethod: varchar("hcMethod").default("GET"),
|
||||
hcStatus: integer("hcStatus"), // http code
|
||||
hcHealth: text("hcHealth").default("unknown"), // "unknown", "healthy", "unhealthy"
|
||||
hcTlsServerName: text("hcTlsServerName")
|
||||
hcHealth: text("hcHealth").default("unknown") // "unknown", "healthy", "unhealthy"
|
||||
});
|
||||
|
||||
export const exitNodes = pgTable("exitNodes", {
|
||||
@@ -217,44 +204,11 @@ export const siteResources = pgTable("siteResources", {
|
||||
.references(() => orgs.orgId, { onDelete: "cascade" }),
|
||||
niceId: varchar("niceId").notNull(),
|
||||
name: varchar("name").notNull(),
|
||||
mode: varchar("mode").notNull(), // "host" | "cidr" | "port"
|
||||
protocol: varchar("protocol"), // only for port mode
|
||||
proxyPort: integer("proxyPort"), // only for port mode
|
||||
destinationPort: integer("destinationPort"), // only for port mode
|
||||
destination: varchar("destination").notNull(), // ip, cidr, hostname; validate against the mode
|
||||
enabled: boolean("enabled").notNull().default(true),
|
||||
alias: varchar("alias"),
|
||||
aliasAddress: varchar("aliasAddress"),
|
||||
tcpPortRangeString: varchar("tcpPortRangeString").notNull().default("*"),
|
||||
udpPortRangeString: varchar("udpPortRangeString").notNull().default("*"),
|
||||
disableIcmp: boolean("disableIcmp").notNull().default(false)
|
||||
});
|
||||
|
||||
export const clientSiteResources = pgTable("clientSiteResources", {
|
||||
clientId: integer("clientId")
|
||||
.notNull()
|
||||
.references(() => clients.clientId, { onDelete: "cascade" }),
|
||||
siteResourceId: integer("siteResourceId")
|
||||
.notNull()
|
||||
.references(() => siteResources.siteResourceId, { onDelete: "cascade" })
|
||||
});
|
||||
|
||||
export const roleSiteResources = pgTable("roleSiteResources", {
|
||||
roleId: integer("roleId")
|
||||
.notNull()
|
||||
.references(() => roles.roleId, { onDelete: "cascade" }),
|
||||
siteResourceId: integer("siteResourceId")
|
||||
.notNull()
|
||||
.references(() => siteResources.siteResourceId, { onDelete: "cascade" })
|
||||
});
|
||||
|
||||
export const userSiteResources = pgTable("userSiteResources", {
|
||||
userId: varchar("userId")
|
||||
.notNull()
|
||||
.references(() => users.userId, { onDelete: "cascade" }),
|
||||
siteResourceId: integer("siteResourceId")
|
||||
.notNull()
|
||||
.references(() => siteResources.siteResourceId, { onDelete: "cascade" })
|
||||
protocol: varchar("protocol").notNull(),
|
||||
proxyPort: integer("proxyPort").notNull(),
|
||||
destinationPort: integer("destinationPort").notNull(),
|
||||
destinationIp: varchar("destinationIp").notNull(),
|
||||
enabled: boolean("enabled").notNull().default(true)
|
||||
});
|
||||
|
||||
export const users = pgTable("user", {
|
||||
@@ -302,8 +256,7 @@ export const sessions = pgTable("session", {
|
||||
.notNull()
|
||||
.references(() => users.userId, { onDelete: "cascade" }),
|
||||
expiresAt: bigint("expiresAt", { mode: "number" }).notNull(),
|
||||
issuedAt: bigint("issuedAt", { mode: "number" }),
|
||||
deviceAuthUsed: boolean("deviceAuthUsed").notNull().default(false)
|
||||
issuedAt: bigint("issuedAt", { mode: "number" })
|
||||
});
|
||||
|
||||
export const newtSessions = pgTable("newtSession", {
|
||||
@@ -365,8 +318,7 @@ export const roles = pgTable("roles", {
|
||||
.notNull(),
|
||||
isAdmin: boolean("isAdmin"),
|
||||
name: varchar("name").notNull(),
|
||||
description: varchar("description"),
|
||||
requireDeviceApproval: boolean("requireDeviceApproval").default(false)
|
||||
description: varchar("description")
|
||||
});
|
||||
|
||||
export const roleActions = pgTable("roleActions", {
|
||||
@@ -467,23 +419,6 @@ export const resourceHeaderAuth = pgTable("resourceHeaderAuth", {
|
||||
headerAuthHash: varchar("headerAuthHash").notNull()
|
||||
});
|
||||
|
||||
export const resourceHeaderAuthExtendedCompatibility = pgTable(
|
||||
"resourceHeaderAuthExtendedCompatibility",
|
||||
{
|
||||
headerAuthExtendedCompatibilityId: serial(
|
||||
"headerAuthExtendedCompatibilityId"
|
||||
).primaryKey(),
|
||||
resourceId: integer("resourceId")
|
||||
.notNull()
|
||||
.references(() => resources.resourceId, { onDelete: "cascade" }),
|
||||
extendedCompatibilityIsActivated: boolean(
|
||||
"extendedCompatibilityIsActivated"
|
||||
)
|
||||
.notNull()
|
||||
.default(true)
|
||||
}
|
||||
);
|
||||
|
||||
export const resourceAccessToken = pgTable("resourceAccessToken", {
|
||||
accessTokenId: varchar("accessTokenId").primaryKey(),
|
||||
orgId: varchar("orgId")
|
||||
@@ -592,8 +527,7 @@ export const idp = pgTable("idp", {
|
||||
type: varchar("type").notNull(),
|
||||
defaultRoleMapping: varchar("defaultRoleMapping"),
|
||||
defaultOrgMapping: varchar("defaultOrgMapping"),
|
||||
autoProvision: boolean("autoProvision").notNull().default(false),
|
||||
tags: text("tags")
|
||||
autoProvision: boolean("autoProvision").notNull().default(false)
|
||||
});
|
||||
|
||||
export const idpOidcConfig = pgTable("idpOidcConfig", {
|
||||
@@ -664,7 +598,7 @@ export const idpOrg = pgTable("idpOrg", {
|
||||
});
|
||||
|
||||
export const clients = pgTable("clients", {
|
||||
clientId: serial("clientId").primaryKey(),
|
||||
clientId: serial("id").primaryKey(),
|
||||
orgId: varchar("orgId")
|
||||
.references(() => orgs.orgId, {
|
||||
onDelete: "cascade"
|
||||
@@ -673,12 +607,6 @@ export const clients = pgTable("clients", {
|
||||
exitNodeId: integer("exitNode").references(() => exitNodes.exitNodeId, {
|
||||
onDelete: "set null"
|
||||
}),
|
||||
userId: text("userId").references(() => users.userId, {
|
||||
// optionally tied to a user and in this case delete when the user deletes
|
||||
onDelete: "cascade"
|
||||
}),
|
||||
niceId: varchar("niceId").notNull(),
|
||||
olmId: text("olmId"), // to lock it to a specific olm optionally
|
||||
name: varchar("name").notNull(),
|
||||
pubKey: varchar("pubKey"),
|
||||
subnet: varchar("subnet").notNull(),
|
||||
@@ -690,43 +618,18 @@ export const clients = pgTable("clients", {
|
||||
online: boolean("online").notNull().default(false),
|
||||
// endpoint: varchar("endpoint"),
|
||||
lastHolePunch: integer("lastHolePunch"),
|
||||
maxConnections: integer("maxConnections"),
|
||||
archived: boolean("archived").notNull().default(false),
|
||||
blocked: boolean("blocked").notNull().default(false),
|
||||
approvalState: varchar("approvalState").$type<
|
||||
"pending" | "approved" | "denied"
|
||||
>()
|
||||
maxConnections: integer("maxConnections")
|
||||
});
|
||||
|
||||
export const clientSitesAssociationsCache = pgTable(
|
||||
"clientSitesAssociationsCache",
|
||||
{
|
||||
clientId: integer("clientId") // not a foreign key here so after its deleted the rebuild function can delete it and send the message
|
||||
.notNull(),
|
||||
siteId: integer("siteId").notNull(),
|
||||
isRelayed: boolean("isRelayed").notNull().default(false),
|
||||
endpoint: varchar("endpoint"),
|
||||
publicKey: varchar("publicKey") // this will act as the session's public key for hole punching so we can track when it changes
|
||||
}
|
||||
);
|
||||
|
||||
export const clientSiteResourcesAssociationsCache = pgTable(
|
||||
"clientSiteResourcesAssociationsCache",
|
||||
{
|
||||
clientId: integer("clientId") // not a foreign key here so after its deleted the rebuild function can delete it and send the message
|
||||
.notNull(),
|
||||
siteResourceId: integer("siteResourceId").notNull()
|
||||
}
|
||||
);
|
||||
|
||||
export const clientPostureSnapshots = pgTable("clientPostureSnapshots", {
|
||||
snapshotId: serial("snapshotId").primaryKey(),
|
||||
|
||||
clientId: integer("clientId").references(() => clients.clientId, {
|
||||
onDelete: "cascade"
|
||||
}),
|
||||
|
||||
collectedAt: integer("collectedAt").notNull()
|
||||
export const clientSites = pgTable("clientSites", {
|
||||
clientId: integer("clientId")
|
||||
.notNull()
|
||||
.references(() => clients.clientId, { onDelete: "cascade" }),
|
||||
siteId: integer("siteId")
|
||||
.notNull()
|
||||
.references(() => sites.siteId, { onDelete: "cascade" }),
|
||||
isRelayed: boolean("isRelayed").notNull().default(false),
|
||||
endpoint: varchar("endpoint")
|
||||
});
|
||||
|
||||
export const olms = pgTable("olms", {
|
||||
@@ -734,127 +637,9 @@ export const olms = pgTable("olms", {
|
||||
secretHash: varchar("secretHash").notNull(),
|
||||
dateCreated: varchar("dateCreated").notNull(),
|
||||
version: text("version"),
|
||||
agent: text("agent"),
|
||||
name: varchar("name"),
|
||||
clientId: integer("clientId").references(() => clients.clientId, {
|
||||
// we will switch this depending on the current org it wants to connect to
|
||||
onDelete: "set null"
|
||||
}),
|
||||
userId: text("userId").references(() => users.userId, {
|
||||
// optionally tied to a user and in this case delete when the user deletes
|
||||
onDelete: "cascade"
|
||||
}),
|
||||
archived: boolean("archived").notNull().default(false)
|
||||
});
|
||||
|
||||
export const currentFingerprint = pgTable("currentFingerprint", {
|
||||
fingerprintId: serial("id").primaryKey(),
|
||||
|
||||
olmId: text("olmId")
|
||||
.references(() => olms.olmId, { onDelete: "cascade" })
|
||||
.notNull(),
|
||||
|
||||
firstSeen: integer("firstSeen").notNull(),
|
||||
lastSeen: integer("lastSeen").notNull(),
|
||||
lastCollectedAt: integer("lastCollectedAt").notNull(),
|
||||
|
||||
username: text("username"),
|
||||
hostname: text("hostname"),
|
||||
platform: text("platform"),
|
||||
osVersion: text("osVersion"),
|
||||
kernelVersion: text("kernelVersion"),
|
||||
arch: text("arch"),
|
||||
deviceModel: text("deviceModel"),
|
||||
serialNumber: text("serialNumber"),
|
||||
platformFingerprint: varchar("platformFingerprint"),
|
||||
|
||||
// Platform-agnostic checks
|
||||
|
||||
biometricsEnabled: boolean("biometricsEnabled").notNull().default(false),
|
||||
diskEncrypted: boolean("diskEncrypted").notNull().default(false),
|
||||
firewallEnabled: boolean("firewallEnabled").notNull().default(false),
|
||||
autoUpdatesEnabled: boolean("autoUpdatesEnabled").notNull().default(false),
|
||||
tpmAvailable: boolean("tpmAvailable").notNull().default(false),
|
||||
|
||||
// Windows-specific posture check information
|
||||
|
||||
windowsAntivirusEnabled: boolean("windowsAntivirusEnabled")
|
||||
.notNull()
|
||||
.default(false),
|
||||
|
||||
// macOS-specific posture check information
|
||||
|
||||
macosSipEnabled: boolean("macosSipEnabled").notNull().default(false),
|
||||
macosGatekeeperEnabled: boolean("macosGatekeeperEnabled")
|
||||
.notNull()
|
||||
.default(false),
|
||||
macosFirewallStealthMode: boolean("macosFirewallStealthMode")
|
||||
.notNull()
|
||||
.default(false),
|
||||
|
||||
// Linux-specific posture check information
|
||||
|
||||
linuxAppArmorEnabled: boolean("linuxAppArmorEnabled")
|
||||
.notNull()
|
||||
.default(false),
|
||||
linuxSELinuxEnabled: boolean("linuxSELinuxEnabled").notNull().default(false)
|
||||
});
|
||||
|
||||
export const fingerprintSnapshots = pgTable("fingerprintSnapshots", {
|
||||
snapshotId: serial("id").primaryKey(),
|
||||
|
||||
fingerprintId: integer("fingerprintId").references(
|
||||
() => currentFingerprint.fingerprintId,
|
||||
{
|
||||
onDelete: "set null"
|
||||
}
|
||||
),
|
||||
|
||||
username: text("username"),
|
||||
hostname: text("hostname"),
|
||||
platform: text("platform"),
|
||||
osVersion: text("osVersion"),
|
||||
kernelVersion: text("kernelVersion"),
|
||||
arch: text("arch"),
|
||||
deviceModel: text("deviceModel"),
|
||||
serialNumber: text("serialNumber"),
|
||||
platformFingerprint: varchar("platformFingerprint"),
|
||||
|
||||
// Platform-agnostic checks
|
||||
|
||||
biometricsEnabled: boolean("biometricsEnabled").notNull().default(false),
|
||||
diskEncrypted: boolean("diskEncrypted").notNull().default(false),
|
||||
firewallEnabled: boolean("firewallEnabled").notNull().default(false),
|
||||
autoUpdatesEnabled: boolean("autoUpdatesEnabled").notNull().default(false),
|
||||
tpmAvailable: boolean("tpmAvailable").notNull().default(false),
|
||||
|
||||
// Windows-specific posture check information
|
||||
|
||||
windowsAntivirusEnabled: boolean("windowsAntivirusEnabled")
|
||||
.notNull()
|
||||
.default(false),
|
||||
|
||||
// macOS-specific posture check information
|
||||
|
||||
macosSipEnabled: boolean("macosSipEnabled").notNull().default(false),
|
||||
macosGatekeeperEnabled: boolean("macosGatekeeperEnabled")
|
||||
.notNull()
|
||||
.default(false),
|
||||
macosFirewallStealthMode: boolean("macosFirewallStealthMode")
|
||||
.notNull()
|
||||
.default(false),
|
||||
|
||||
// Linux-specific posture check information
|
||||
|
||||
linuxAppArmorEnabled: boolean("linuxAppArmorEnabled")
|
||||
.notNull()
|
||||
.default(false),
|
||||
linuxSELinuxEnabled: boolean("linuxSELinuxEnabled")
|
||||
.notNull()
|
||||
.default(false),
|
||||
|
||||
hash: text("hash").notNull(),
|
||||
collectedAt: integer("collectedAt").notNull()
|
||||
})
|
||||
});
|
||||
|
||||
export const olmSessions = pgTable("clientSession", {
|
||||
@@ -968,21 +753,6 @@ export const requestAuditLog = pgTable(
|
||||
]
|
||||
);
|
||||
|
||||
export const deviceWebAuthCodes = pgTable("deviceWebAuthCodes", {
|
||||
codeId: serial("codeId").primaryKey(),
|
||||
code: text("code").notNull().unique(),
|
||||
ip: text("ip"),
|
||||
city: text("city"),
|
||||
deviceName: text("deviceName"),
|
||||
applicationName: text("applicationName").notNull(),
|
||||
expiresAt: bigint("expiresAt", { mode: "number" }).notNull(),
|
||||
createdAt: bigint("createdAt", { mode: "number" }).notNull(),
|
||||
verified: boolean("verified").notNull().default(false),
|
||||
userId: varchar("userId").references(() => users.userId, {
|
||||
onDelete: "cascade"
|
||||
})
|
||||
});
|
||||
|
||||
export type Org = InferSelectModel<typeof orgs>;
|
||||
export type User = InferSelectModel<typeof users>;
|
||||
export type Site = InferSelectModel<typeof sites>;
|
||||
@@ -1011,9 +781,6 @@ export type ResourceSession = InferSelectModel<typeof resourceSessions>;
|
||||
export type ResourcePincode = InferSelectModel<typeof resourcePincode>;
|
||||
export type ResourcePassword = InferSelectModel<typeof resourcePassword>;
|
||||
export type ResourceHeaderAuth = InferSelectModel<typeof resourceHeaderAuth>;
|
||||
export type ResourceHeaderAuthExtendedCompatibility = InferSelectModel<
|
||||
typeof resourceHeaderAuthExtendedCompatibility
|
||||
>;
|
||||
export type ResourceOtp = InferSelectModel<typeof resourceOtp>;
|
||||
export type ResourceAccessToken = InferSelectModel<typeof resourceAccessToken>;
|
||||
export type ResourceWhitelist = InferSelectModel<typeof resourceWhitelist>;
|
||||
@@ -1026,7 +793,7 @@ export type ApiKey = InferSelectModel<typeof apiKeys>;
|
||||
export type ApiKeyAction = InferSelectModel<typeof apiKeyActions>;
|
||||
export type ApiKeyOrg = InferSelectModel<typeof apiKeyOrg>;
|
||||
export type Client = InferSelectModel<typeof clients>;
|
||||
export type ClientSite = InferSelectModel<typeof clientSitesAssociationsCache>;
|
||||
export type ClientSite = InferSelectModel<typeof clientSites>;
|
||||
export type Olm = InferSelectModel<typeof olms>;
|
||||
export type OlmSession = InferSelectModel<typeof olmSessions>;
|
||||
export type UserClient = InferSelectModel<typeof userClients>;
|
||||
@@ -1041,5 +808,4 @@ export type Blueprint = InferSelectModel<typeof blueprints>;
|
||||
export type LicenseKey = InferSelectModel<typeof licenseKey>;
|
||||
export type SecurityKey = InferSelectModel<typeof securityKeys>;
|
||||
export type WebauthnChallenge = InferSelectModel<typeof webauthnChallenge>;
|
||||
export type DeviceWebAuthCode = InferSelectModel<typeof deviceWebAuthCodes>;
|
||||
export type RequestAuditLog = InferSelectModel<typeof requestAuditLog>;
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { db, loginPage, LoginPage, loginPageOrg, Org, orgs, roles } from "@server/db";
|
||||
import { db, loginPage, LoginPage, loginPageOrg, Org, orgs } from "@server/db";
|
||||
import {
|
||||
Resource,
|
||||
ResourcePassword,
|
||||
@@ -14,9 +14,7 @@ import {
|
||||
sessions,
|
||||
userOrgs,
|
||||
userResources,
|
||||
users,
|
||||
ResourceHeaderAuthExtendedCompatibility,
|
||||
resourceHeaderAuthExtendedCompatibility
|
||||
users
|
||||
} from "@server/db";
|
||||
import { and, eq } from "drizzle-orm";
|
||||
|
||||
@@ -25,7 +23,6 @@ export type ResourceWithAuth = {
|
||||
pincode: ResourcePincode | null;
|
||||
password: ResourcePassword | null;
|
||||
headerAuth: ResourceHeaderAuth | null;
|
||||
headerAuthExtendedCompatibility: ResourceHeaderAuthExtendedCompatibility | null;
|
||||
org: Org;
|
||||
};
|
||||
|
||||
@@ -55,14 +52,10 @@ export async function getResourceByDomain(
|
||||
resourceHeaderAuth,
|
||||
eq(resourceHeaderAuth.resourceId, resources.resourceId)
|
||||
)
|
||||
.leftJoin(
|
||||
resourceHeaderAuthExtendedCompatibility,
|
||||
eq(
|
||||
resourceHeaderAuthExtendedCompatibility.resourceId,
|
||||
resources.resourceId
|
||||
)
|
||||
.innerJoin(
|
||||
orgs,
|
||||
eq(orgs.orgId, resources.orgId)
|
||||
)
|
||||
.innerJoin(orgs, eq(orgs.orgId, resources.orgId))
|
||||
.where(eq(resources.fullDomain, domain))
|
||||
.limit(1);
|
||||
|
||||
@@ -75,8 +68,6 @@ export async function getResourceByDomain(
|
||||
pincode: result.resourcePincode,
|
||||
password: result.resourcePassword,
|
||||
headerAuth: result.resourceHeaderAuth,
|
||||
headerAuthExtendedCompatibility:
|
||||
result.resourceHeaderAuthExtendedCompatibility,
|
||||
org: result.orgs
|
||||
};
|
||||
}
|
||||
@@ -108,17 +99,9 @@ export async function getUserSessionWithUser(
|
||||
*/
|
||||
export async function getUserOrgRole(userId: string, orgId: string) {
|
||||
const userOrgRole = await db
|
||||
.select({
|
||||
userId: userOrgs.userId,
|
||||
orgId: userOrgs.orgId,
|
||||
roleId: userOrgs.roleId,
|
||||
isOwner: userOrgs.isOwner,
|
||||
autoProvisioned: userOrgs.autoProvisioned,
|
||||
roleName: roles.name
|
||||
})
|
||||
.select()
|
||||
.from(userOrgs)
|
||||
.where(and(eq(userOrgs.userId, userId), eq(userOrgs.orgId, orgId)))
|
||||
.leftJoin(roles, eq(userOrgs.roleId, roles.roleId))
|
||||
.limit(1);
|
||||
|
||||
return userOrgRole.length > 0 ? userOrgRole[0] : null;
|
||||
|
||||
@@ -20,7 +20,6 @@ function createDb() {
|
||||
|
||||
export const db = createDb();
|
||||
export default db;
|
||||
export const primaryDb = db;
|
||||
export type Transaction = Parameters<
|
||||
Parameters<(typeof db)["transaction"]>[0]
|
||||
>[0];
|
||||
|
||||
@@ -8,7 +8,7 @@ const runMigrations = async () => {
|
||||
console.log("Running migrations...");
|
||||
try {
|
||||
migrate(db as any, {
|
||||
migrationsFolder: migrationsFolder
|
||||
migrationsFolder: migrationsFolder,
|
||||
});
|
||||
console.log("Migrations completed successfully.");
|
||||
} catch (error) {
|
||||
|
||||