Compare commits
7 Commits
msg-delive
...
policies
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7fa44981cf | ||
|
|
1bacad7854 | ||
|
|
75cec731e8 | ||
|
|
16a88281bb | ||
|
|
1574cbc5df | ||
|
|
2cb2a115b0 | ||
|
|
9dce7b2cde |
@@ -29,6 +29,4 @@ CONTRIBUTING.md
|
||||
dist
|
||||
.git
|
||||
migrations/
|
||||
config/
|
||||
build.ts
|
||||
tsconfig.json
|
||||
config/
|
||||
@@ -1,3 +1,6 @@
|
||||
{
|
||||
"extends": ["next/core-web-vitals", "next/typescript"]
|
||||
"extends": [
|
||||
"next/core-web-vitals",
|
||||
"next/typescript"
|
||||
]
|
||||
}
|
||||
|
||||
473
.github/workflows/cicd.yml
vendored
@@ -1,270 +1,34 @@
|
||||
name: CI/CD Pipeline
|
||||
|
||||
# CI/CD workflow for building, publishing, mirroring, signing container images and building release binaries.
|
||||
# Actions are pinned to specific SHAs to reduce supply-chain risk. This workflow triggers on tag push events.
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write # for GHCR push
|
||||
id-token: write # for Cosign Keyless (OIDC) Signing
|
||||
|
||||
# Required secrets:
|
||||
# - DOCKER_HUB_USERNAME / DOCKER_HUB_ACCESS_TOKEN: push to Docker Hub
|
||||
# - GITHUB_TOKEN: used for GHCR login and OIDC keyless signing
|
||||
# - COSIGN_PRIVATE_KEY / COSIGN_PASSWORD / COSIGN_PUBLIC_KEY: for key-based signing
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "[0-9]+.[0-9]+.[0-9]+"
|
||||
- "[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
pre-run:
|
||||
release:
|
||||
name: Build and Release
|
||||
runs-on: ubuntu-latest
|
||||
permissions: write-all
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }}
|
||||
role-duration-seconds: 3600
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: Verify AWS identity
|
||||
run: aws sts get-caller-identity
|
||||
|
||||
- name: Start EC2 instances
|
||||
run: |
|
||||
aws ec2 start-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_ARM_RUNNER }}
|
||||
aws ec2 start-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_AMD_RUNNER }}
|
||||
echo "EC2 instances started"
|
||||
|
||||
|
||||
release-arm:
|
||||
name: Build and Release (ARM64)
|
||||
runs-on: [self-hosted, linux, arm64, us-east-1]
|
||||
needs: [pre-run]
|
||||
if: >-
|
||||
${{
|
||||
needs.pre-run.result == 'success'
|
||||
}}
|
||||
# Job-level timeout to avoid runaway or stuck runs
|
||||
timeout-minutes: 120
|
||||
env:
|
||||
# Target images
|
||||
DOCKERHUB_IMAGE: docker.io/fosrl/${{ github.event.repository.name }}
|
||||
GHCR_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Monitor storage space
|
||||
run: |
|
||||
THRESHOLD=75
|
||||
USED_SPACE=$(df / | grep / | awk '{ print $5 }' | sed 's/%//g')
|
||||
echo "Used space: $USED_SPACE%"
|
||||
if [ "$USED_SPACE" -ge "$THRESHOLD" ]; then
|
||||
echo "Used space is below the threshold of 75% free. Running Docker system prune."
|
||||
echo y | docker system prune -a
|
||||
else
|
||||
echo "Storage space is above the threshold. No action needed."
|
||||
fi
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
|
||||
|
||||
- name: Extract tag name
|
||||
id: get-tag
|
||||
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Update version in package.json
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
sed -i "s/export const APP_VERSION = \".*\";/export const APP_VERSION = \"$TAG\";/" server/lib/consts.ts
|
||||
cat server/lib/consts.ts
|
||||
shell: bash
|
||||
|
||||
- name: Check if release candidate
|
||||
id: check-rc
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
if [[ "$TAG" == *"-rc."* ]]; then
|
||||
echo "IS_RC=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "IS_RC=false" >> $GITHUB_ENV
|
||||
fi
|
||||
shell: bash
|
||||
|
||||
- name: Build and push Docker images (Docker Hub - ARM64)
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
if [ "$IS_RC" = "true" ]; then
|
||||
make build-rc-arm tag=$TAG
|
||||
else
|
||||
make build-release-arm tag=$TAG
|
||||
fi
|
||||
echo "Built & pushed ARM64 images to: ${{ env.DOCKERHUB_IMAGE }}:${TAG}"
|
||||
shell: bash
|
||||
|
||||
release-amd:
|
||||
name: Build and Release (AMD64)
|
||||
runs-on: [self-hosted, linux, x64, us-east-1]
|
||||
needs: [pre-run]
|
||||
if: >-
|
||||
${{
|
||||
needs.pre-run.result == 'success'
|
||||
}}
|
||||
# Job-level timeout to avoid runaway or stuck runs
|
||||
timeout-minutes: 120
|
||||
env:
|
||||
# Target images
|
||||
DOCKERHUB_IMAGE: docker.io/fosrl/${{ github.event.repository.name }}
|
||||
GHCR_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
|
||||
- name: Monitor storage space
|
||||
run: |
|
||||
THRESHOLD=75
|
||||
USED_SPACE=$(df / | grep / | awk '{ print $5 }' | sed 's/%//g')
|
||||
echo "Used space: $USED_SPACE%"
|
||||
if [ "$USED_SPACE" -ge "$THRESHOLD" ]; then
|
||||
echo "Used space is below the threshold of 75% free. Running Docker system prune."
|
||||
echo y | docker system prune -a
|
||||
else
|
||||
echo "Storage space is above the threshold. No action needed."
|
||||
fi
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
|
||||
|
||||
- name: Extract tag name
|
||||
id: get-tag
|
||||
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Update version in package.json
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
sed -i "s/export const APP_VERSION = \".*\";/export const APP_VERSION = \"$TAG\";/" server/lib/consts.ts
|
||||
cat server/lib/consts.ts
|
||||
shell: bash
|
||||
|
||||
- name: Check if release candidate
|
||||
id: check-rc
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
if [[ "$TAG" == *"-rc."* ]]; then
|
||||
echo "IS_RC=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "IS_RC=false" >> $GITHUB_ENV
|
||||
fi
|
||||
shell: bash
|
||||
|
||||
- name: Build and push Docker images (Docker Hub - AMD64)
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
if [ "$IS_RC" = "true" ]; then
|
||||
make build-rc-amd tag=$TAG
|
||||
else
|
||||
make build-release-amd tag=$TAG
|
||||
fi
|
||||
echo "Built & pushed AMD64 images to: ${{ env.DOCKERHUB_IMAGE }}:${TAG}"
|
||||
shell: bash
|
||||
|
||||
create-manifest:
|
||||
name: Create Multi-Arch Manifests
|
||||
runs-on: [self-hosted, linux, x64, us-east-1]
|
||||
needs: [release-arm, release-amd]
|
||||
if: >-
|
||||
${{
|
||||
needs.release-arm.result == 'success' &&
|
||||
needs.release-amd.result == 'success'
|
||||
}}
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
|
||||
|
||||
- name: Extract tag name
|
||||
id: get-tag
|
||||
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Check if release candidate
|
||||
id: check-rc
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
if [[ "$TAG" == *"-rc."* ]]; then
|
||||
echo "IS_RC=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "IS_RC=false" >> $GITHUB_ENV
|
||||
fi
|
||||
shell: bash
|
||||
|
||||
- name: Create multi-arch manifests
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
if [ "$IS_RC" = "true" ]; then
|
||||
make create-manifests-rc tag=$TAG
|
||||
else
|
||||
make create-manifests tag=$TAG
|
||||
fi
|
||||
echo "Created multi-arch manifests for tag: ${TAG}"
|
||||
shell: bash
|
||||
|
||||
sign-and-package:
|
||||
name: Sign and Package
|
||||
runs-on: [self-hosted, linux, x64, us-east-1]
|
||||
needs: [release-arm, release-amd, create-manifest]
|
||||
if: >-
|
||||
${{
|
||||
needs.release-arm.result == 'success' &&
|
||||
needs.release-amd.result == 'success' &&
|
||||
needs.create-manifest.result == 'success'
|
||||
}}
|
||||
# Job-level timeout to avoid runaway or stuck runs
|
||||
timeout-minutes: 120
|
||||
env:
|
||||
# Target images
|
||||
DOCKERHUB_IMAGE: docker.io/fosrl/${{ github.event.repository.name }}
|
||||
GHCR_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
|
||||
- name: Extract tag name
|
||||
id: get-tag
|
||||
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: 1.24
|
||||
|
||||
@@ -273,21 +37,18 @@ jobs:
|
||||
TAG=${{ env.TAG }}
|
||||
sed -i "s/export const APP_VERSION = \".*\";/export const APP_VERSION = \"$TAG\";/" server/lib/consts.ts
|
||||
cat server/lib/consts.ts
|
||||
shell: bash
|
||||
|
||||
- name: Pull latest Gerbil version
|
||||
id: get-gerbil-tag
|
||||
run: |
|
||||
LATEST_TAG=$(curl -s https://api.github.com/repos/fosrl/gerbil/tags | jq -r '.[0].name')
|
||||
echo "LATEST_GERBIL_TAG=$LATEST_TAG" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Pull latest Badger version
|
||||
id: get-badger-tag
|
||||
run: |
|
||||
LATEST_TAG=$(curl -s https://api.github.com/repos/fosrl/badger/tags | jq -r '.[0].name')
|
||||
echo "LATEST_BADGER_TAG=$LATEST_TAG" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Update install/main.go
|
||||
run: |
|
||||
@@ -299,231 +60,19 @@ jobs:
|
||||
sed -i "s/config.BadgerVersion = \".*\"/config.BadgerVersion = \"$BADGER_VERSION\"/" install/main.go
|
||||
echo "Updated install/main.go with Pangolin version $PANGOLIN_VERSION, Gerbil version $GERBIL_VERSION, and Badger version $BADGER_VERSION"
|
||||
cat install/main.go
|
||||
shell: bash
|
||||
|
||||
- name: Build installer
|
||||
working-directory: install
|
||||
run: |
|
||||
make go-build-release
|
||||
make go-build-release
|
||||
|
||||
- name: Upload artifacts from /install/bin
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: install-bin
|
||||
path: install/bin/
|
||||
|
||||
- name: Install skopeo + jq
|
||||
# skopeo: copy/inspect images between registries
|
||||
# jq: JSON parsing tool used to extract digest values
|
||||
- name: Build and push Docker images
|
||||
run: |
|
||||
sudo apt-get update -y
|
||||
sudo apt-get install -y skopeo jq
|
||||
skopeo --version
|
||||
shell: bash
|
||||
|
||||
- name: Login to GHCR
|
||||
env:
|
||||
REGISTRY_AUTH_FILE: ${{ runner.temp }}/containers/auth.json
|
||||
run: |
|
||||
mkdir -p "$(dirname "$REGISTRY_AUTH_FILE")"
|
||||
skopeo login ghcr.io -u "${{ github.actor }}" -p "${{ secrets.GITHUB_TOKEN }}"
|
||||
shell: bash
|
||||
|
||||
- name: Copy tags from Docker Hub to GHCR
|
||||
# Mirror the already-built images (all architectures) to GHCR so we can sign them
|
||||
# Wait a bit for both architectures to be available in Docker Hub manifest
|
||||
env:
|
||||
REGISTRY_AUTH_FILE: ${{ runner.temp }}/containers/auth.json
|
||||
run: |
|
||||
set -euo pipefail
|
||||
TAG=${{ env.TAG }}
|
||||
MAJOR_TAG=$(echo $TAG | cut -d. -f1)
|
||||
MINOR_TAG=$(echo $TAG | cut -d. -f1,2)
|
||||
|
||||
echo "Waiting for multi-arch manifests to be ready..."
|
||||
sleep 30
|
||||
|
||||
# Determine if this is an RC release
|
||||
IS_RC="false"
|
||||
if echo "$TAG" | grep -qE "rc[0-9]+$"; then
|
||||
IS_RC="true"
|
||||
fi
|
||||
|
||||
if [ "$IS_RC" = "true" ]; then
|
||||
echo "RC release detected - copying version-specific tags only"
|
||||
|
||||
# SQLite OSS
|
||||
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:${TAG} -> ${{ env.GHCR_IMAGE }}:${TAG}"
|
||||
skopeo copy --all --retry-times 3 \
|
||||
docker://$DOCKERHUB_IMAGE:$TAG \
|
||||
docker://$GHCR_IMAGE:$TAG
|
||||
|
||||
# PostgreSQL OSS
|
||||
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:postgresql-${TAG} -> ${{ env.GHCR_IMAGE }}:postgresql-${TAG}"
|
||||
skopeo copy --all --retry-times 3 \
|
||||
docker://$DOCKERHUB_IMAGE:postgresql-$TAG \
|
||||
docker://$GHCR_IMAGE:postgresql-$TAG
|
||||
|
||||
# SQLite Enterprise
|
||||
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:ee-${TAG} -> ${{ env.GHCR_IMAGE }}:ee-${TAG}"
|
||||
skopeo copy --all --retry-times 3 \
|
||||
docker://$DOCKERHUB_IMAGE:ee-$TAG \
|
||||
docker://$GHCR_IMAGE:ee-$TAG
|
||||
|
||||
# PostgreSQL Enterprise
|
||||
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:ee-postgresql-${TAG} -> ${{ env.GHCR_IMAGE }}:ee-postgresql-${TAG}"
|
||||
skopeo copy --all --retry-times 3 \
|
||||
docker://$DOCKERHUB_IMAGE:ee-postgresql-$TAG \
|
||||
docker://$GHCR_IMAGE:ee-postgresql-$TAG
|
||||
else
|
||||
echo "Regular release detected - copying all tags (latest, major, minor, full version)"
|
||||
|
||||
# SQLite OSS - all tags
|
||||
for TAG_SUFFIX in "latest" "$MAJOR_TAG" "$MINOR_TAG" "$TAG"; do
|
||||
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:${TAG_SUFFIX} -> ${{ env.GHCR_IMAGE }}:${TAG_SUFFIX}"
|
||||
skopeo copy --all --retry-times 3 \
|
||||
docker://$DOCKERHUB_IMAGE:$TAG_SUFFIX \
|
||||
docker://$GHCR_IMAGE:$TAG_SUFFIX
|
||||
done
|
||||
|
||||
# PostgreSQL OSS - all tags
|
||||
for TAG_SUFFIX in "latest" "$MAJOR_TAG" "$MINOR_TAG" "$TAG"; do
|
||||
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:postgresql-${TAG_SUFFIX} -> ${{ env.GHCR_IMAGE }}:postgresql-${TAG_SUFFIX}"
|
||||
skopeo copy --all --retry-times 3 \
|
||||
docker://$DOCKERHUB_IMAGE:postgresql-$TAG_SUFFIX \
|
||||
docker://$GHCR_IMAGE:postgresql-$TAG_SUFFIX
|
||||
done
|
||||
|
||||
# SQLite Enterprise - all tags
|
||||
for TAG_SUFFIX in "latest" "$MAJOR_TAG" "$MINOR_TAG" "$TAG"; do
|
||||
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:ee-${TAG_SUFFIX} -> ${{ env.GHCR_IMAGE }}:ee-${TAG_SUFFIX}"
|
||||
skopeo copy --all --retry-times 3 \
|
||||
docker://$DOCKERHUB_IMAGE:ee-$TAG_SUFFIX \
|
||||
docker://$GHCR_IMAGE:ee-$TAG_SUFFIX
|
||||
done
|
||||
|
||||
# PostgreSQL Enterprise - all tags
|
||||
for TAG_SUFFIX in "latest" "$MAJOR_TAG" "$MINOR_TAG" "$TAG"; do
|
||||
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:ee-postgresql-${TAG_SUFFIX} -> ${{ env.GHCR_IMAGE }}:ee-postgresql-${TAG_SUFFIX}"
|
||||
skopeo copy --all --retry-times 3 \
|
||||
docker://$DOCKERHUB_IMAGE:ee-postgresql-$TAG_SUFFIX \
|
||||
docker://$GHCR_IMAGE:ee-postgresql-$TAG_SUFFIX
|
||||
done
|
||||
fi
|
||||
|
||||
echo "All images copied successfully to GHCR!"
|
||||
shell: bash
|
||||
|
||||
- name: Login to GitHub Container Registry (for cosign)
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Install cosign
|
||||
# cosign is used to sign and verify container images (key and keyless)
|
||||
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
|
||||
|
||||
- name: Dual-sign and verify (GHCR & Docker Hub)
|
||||
# Sign each image by digest using keyless (OIDC) and key-based signing,
|
||||
# then verify both the public key signature and the keyless OIDC signature.
|
||||
env:
|
||||
TAG: ${{ env.TAG }}
|
||||
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
|
||||
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
|
||||
COSIGN_PUBLIC_KEY: ${{ secrets.COSIGN_PUBLIC_KEY }}
|
||||
COSIGN_YES: "true"
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
issuer="https://token.actions.githubusercontent.com"
|
||||
id_regex="^https://github.com/${{ github.repository }}/.+" # accept this repo (all workflows/refs)
|
||||
|
||||
# Determine if this is an RC release
|
||||
IS_RC="false"
|
||||
if echo "$TAG" | grep -qE "rc[0-9]+$"; then
|
||||
IS_RC="true"
|
||||
fi
|
||||
|
||||
# Define image variants to sign
|
||||
if [ "$IS_RC" = "true" ]; then
|
||||
echo "RC release - signing version-specific tags only"
|
||||
IMAGE_TAGS=(
|
||||
"${TAG}"
|
||||
"postgresql-${TAG}"
|
||||
"ee-${TAG}"
|
||||
"ee-postgresql-${TAG}"
|
||||
)
|
||||
else
|
||||
echo "Regular release - signing all tags"
|
||||
MAJOR_TAG=$(echo $TAG | cut -d. -f1)
|
||||
MINOR_TAG=$(echo $TAG | cut -d. -f1,2)
|
||||
IMAGE_TAGS=(
|
||||
"latest" "$MAJOR_TAG" "$MINOR_TAG" "$TAG"
|
||||
"postgresql-latest" "postgresql-$MAJOR_TAG" "postgresql-$MINOR_TAG" "postgresql-$TAG"
|
||||
"ee-latest" "ee-$MAJOR_TAG" "ee-$MINOR_TAG" "ee-$TAG"
|
||||
"ee-postgresql-latest" "ee-postgresql-$MAJOR_TAG" "ee-postgresql-$MINOR_TAG" "ee-postgresql-$TAG"
|
||||
)
|
||||
fi
|
||||
|
||||
# Sign each image variant for both registries
|
||||
for BASE_IMAGE in "${GHCR_IMAGE}" "${DOCKERHUB_IMAGE}"; do
|
||||
for IMAGE_TAG in "${IMAGE_TAGS[@]}"; do
|
||||
echo "Processing ${BASE_IMAGE}:${IMAGE_TAG}"
|
||||
|
||||
DIGEST="$(skopeo inspect --retry-times 3 docker://${BASE_IMAGE}:${IMAGE_TAG} | jq -r '.Digest')"
|
||||
REF="${BASE_IMAGE}@${DIGEST}"
|
||||
echo "Resolved digest: ${REF}"
|
||||
|
||||
echo "==> cosign sign (keyless) --recursive ${REF}"
|
||||
cosign sign --recursive "${REF}"
|
||||
|
||||
echo "==> cosign sign (key) --recursive ${REF}"
|
||||
cosign sign --key env://COSIGN_PRIVATE_KEY --recursive "${REF}"
|
||||
|
||||
echo "==> cosign verify (public key) ${REF}"
|
||||
cosign verify --key env://COSIGN_PUBLIC_KEY "${REF}" -o text
|
||||
|
||||
echo "==> cosign verify (keyless policy) ${REF}"
|
||||
cosign verify \
|
||||
--certificate-oidc-issuer "${issuer}" \
|
||||
--certificate-identity-regexp "${id_regex}" \
|
||||
"${REF}" -o text
|
||||
|
||||
echo "✓ Successfully signed and verified ${BASE_IMAGE}:${IMAGE_TAG}"
|
||||
done
|
||||
done
|
||||
|
||||
echo "All images signed and verified successfully!"
|
||||
shell: bash
|
||||
|
||||
post-run:
|
||||
needs: [pre-run, release-arm, release-amd, create-manifest, sign-and-package]
|
||||
if: >-
|
||||
${{
|
||||
always() &&
|
||||
needs.pre-run.result == 'success' &&
|
||||
(needs.release-arm.result == 'success' || needs.release-arm.result == 'skipped' || needs.release-arm.result == 'failure') &&
|
||||
(needs.release-amd.result == 'success' || needs.release-amd.result == 'skipped' || needs.release-amd.result == 'failure') &&
|
||||
(needs.create-manifest.result == 'success' || needs.create-manifest.result == 'skipped' || needs.create-manifest.result == 'failure') &&
|
||||
(needs.sign-and-package.result == 'success' || needs.sign-and-package.result == 'skipped' || needs.sign-and-package.result == 'failure')
|
||||
}}
|
||||
runs-on: ubuntu-latest
|
||||
permissions: write-all
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }}
|
||||
role-duration-seconds: 3600
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: Verify AWS identity
|
||||
run: aws sts get-caller-identity
|
||||
|
||||
- name: Stop EC2 instances
|
||||
run: |
|
||||
aws ec2 stop-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_ARM_RUNNER }}
|
||||
aws ec2 stop-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_AMD_RUNNER }}
|
||||
echo "EC2 instances stopped"
|
||||
make build-release tag=$TAG
|
||||
|
||||
426
.github/workflows/cicd.yml.backup
vendored
@@ -1,426 +0,0 @@
|
||||
name: CI/CD Pipeline
|
||||
|
||||
# CI/CD workflow for building, publishing, mirroring, signing container images and building release binaries.
|
||||
# Actions are pinned to specific SHAs to reduce supply-chain risk. This workflow triggers on tag push events.
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write # for GHCR push
|
||||
id-token: write # for Cosign Keyless (OIDC) Signing
|
||||
|
||||
# Required secrets:
|
||||
# - DOCKER_HUB_USERNAME / DOCKER_HUB_ACCESS_TOKEN: push to Docker Hub
|
||||
# - GITHUB_TOKEN: used for GHCR login and OIDC keyless signing
|
||||
# - COSIGN_PRIVATE_KEY / COSIGN_PASSWORD / COSIGN_PUBLIC_KEY: for key-based signing
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "[0-9]+.[0-9]+.[0-9]+"
|
||||
- "[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
pre-run:
|
||||
runs-on: ubuntu-latest
|
||||
permissions: write-all
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }}
|
||||
role-duration-seconds: 3600
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: Verify AWS identity
|
||||
run: aws sts get-caller-identity
|
||||
|
||||
- name: Start EC2 instances
|
||||
run: |
|
||||
aws ec2 start-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_ARM_RUNNER }}
|
||||
aws ec2 start-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_AMD_RUNNER }}
|
||||
echo "EC2 instances started"
|
||||
|
||||
|
||||
release-arm:
|
||||
name: Build and Release (ARM64)
|
||||
runs-on: [self-hosted, linux, arm64, us-east-1]
|
||||
needs: [pre-run]
|
||||
if: >-
|
||||
${{
|
||||
needs.pre-run.result == 'success'
|
||||
}}
|
||||
# Job-level timeout to avoid runaway or stuck runs
|
||||
timeout-minutes: 120
|
||||
env:
|
||||
# Target images
|
||||
DOCKERHUB_IMAGE: docker.io/fosrl/${{ github.event.repository.name }}
|
||||
GHCR_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
|
||||
- name: Monitor storage space
|
||||
run: |
|
||||
THRESHOLD=75
|
||||
USED_SPACE=$(df / | grep / | awk '{ print $5 }' | sed 's/%//g')
|
||||
echo "Used space: $USED_SPACE%"
|
||||
if [ "$USED_SPACE" -ge "$THRESHOLD" ]; then
|
||||
echo "Used space is below the threshold of 75% free. Running Docker system prune."
|
||||
echo y | docker system prune -a
|
||||
else
|
||||
echo "Storage space is above the threshold. No action needed."
|
||||
fi
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
|
||||
|
||||
- name: Extract tag name
|
||||
id: get-tag
|
||||
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Update version in package.json
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
sed -i "s/export const APP_VERSION = \".*\";/export const APP_VERSION = \"$TAG\";/" server/lib/consts.ts
|
||||
cat server/lib/consts.ts
|
||||
shell: bash
|
||||
|
||||
- name: Check if release candidate
|
||||
id: check-rc
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
if [[ "$TAG" == *"-rc."* ]]; then
|
||||
echo "IS_RC=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "IS_RC=false" >> $GITHUB_ENV
|
||||
fi
|
||||
shell: bash
|
||||
|
||||
- name: Build and push Docker images (Docker Hub - ARM64)
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
if [ "$IS_RC" = "true" ]; then
|
||||
make build-rc-arm tag=$TAG
|
||||
else
|
||||
make build-release-arm tag=$TAG
|
||||
fi
|
||||
echo "Built & pushed ARM64 images to: ${{ env.DOCKERHUB_IMAGE }}:${TAG}"
|
||||
shell: bash
|
||||
|
||||
release-amd:
|
||||
name: Build and Release (AMD64)
|
||||
runs-on: [self-hosted, linux, x64, us-east-1]
|
||||
needs: [pre-run]
|
||||
if: >-
|
||||
${{
|
||||
needs.pre-run.result == 'success'
|
||||
}}
|
||||
# Job-level timeout to avoid runaway or stuck runs
|
||||
timeout-minutes: 120
|
||||
env:
|
||||
# Target images
|
||||
DOCKERHUB_IMAGE: docker.io/fosrl/${{ github.event.repository.name }}
|
||||
GHCR_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
|
||||
- name: Monitor storage space
|
||||
run: |
|
||||
THRESHOLD=75
|
||||
USED_SPACE=$(df / | grep / | awk '{ print $5 }' | sed 's/%//g')
|
||||
echo "Used space: $USED_SPACE%"
|
||||
if [ "$USED_SPACE" -ge "$THRESHOLD" ]; then
|
||||
echo "Used space is below the threshold of 75% free. Running Docker system prune."
|
||||
echo y | docker system prune -a
|
||||
else
|
||||
echo "Storage space is above the threshold. No action needed."
|
||||
fi
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
|
||||
|
||||
- name: Extract tag name
|
||||
id: get-tag
|
||||
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Update version in package.json
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
sed -i "s/export const APP_VERSION = \".*\";/export const APP_VERSION = \"$TAG\";/" server/lib/consts.ts
|
||||
cat server/lib/consts.ts
|
||||
shell: bash
|
||||
|
||||
- name: Check if release candidate
|
||||
id: check-rc
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
if [[ "$TAG" == *"-rc."* ]]; then
|
||||
echo "IS_RC=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "IS_RC=false" >> $GITHUB_ENV
|
||||
fi
|
||||
shell: bash
|
||||
|
||||
- name: Build and push Docker images (Docker Hub - AMD64)
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
if [ "$IS_RC" = "true" ]; then
|
||||
make build-rc-amd tag=$TAG
|
||||
else
|
||||
make build-release-amd tag=$TAG
|
||||
fi
|
||||
echo "Built & pushed AMD64 images to: ${{ env.DOCKERHUB_IMAGE }}:${TAG}"
|
||||
shell: bash
|
||||
|
||||
create-manifest:
|
||||
name: Create Multi-Arch Manifests
|
||||
runs-on: [self-hosted, linux, x64, us-east-1]
|
||||
needs: [release-arm, release-amd]
|
||||
if: >-
|
||||
${{
|
||||
needs.release-arm.result == 'success' &&
|
||||
needs.release-amd.result == 'success'
|
||||
}}
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
|
||||
|
||||
- name: Extract tag name
|
||||
id: get-tag
|
||||
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Check if release candidate
|
||||
id: check-rc
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
if [[ "$TAG" == *"-rc."* ]]; then
|
||||
echo "IS_RC=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "IS_RC=false" >> $GITHUB_ENV
|
||||
fi
|
||||
shell: bash
|
||||
|
||||
- name: Create multi-arch manifests
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
if [ "$IS_RC" = "true" ]; then
|
||||
make create-manifests-rc tag=$TAG
|
||||
else
|
||||
make create-manifests tag=$TAG
|
||||
fi
|
||||
echo "Created multi-arch manifests for tag: ${TAG}"
|
||||
shell: bash
|
||||
|
||||
sign-and-package:
|
||||
name: Sign and Package
|
||||
runs-on: [self-hosted, linux, x64, us-east-1]
|
||||
needs: [release-arm, release-amd, create-manifest]
|
||||
if: >-
|
||||
${{
|
||||
needs.release-arm.result == 'success' &&
|
||||
needs.release-amd.result == 'success' &&
|
||||
needs.create-manifest.result == 'success'
|
||||
}}
|
||||
# Job-level timeout to avoid runaway or stuck runs
|
||||
timeout-minutes: 120
|
||||
env:
|
||||
# Target images
|
||||
DOCKERHUB_IMAGE: docker.io/fosrl/${{ github.event.repository.name }}
|
||||
GHCR_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
|
||||
- name: Extract tag name
|
||||
id: get-tag
|
||||
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
|
||||
with:
|
||||
go-version: 1.24
|
||||
|
||||
- name: Update version in package.json
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
sed -i "s/export const APP_VERSION = \".*\";/export const APP_VERSION = \"$TAG\";/" server/lib/consts.ts
|
||||
cat server/lib/consts.ts
|
||||
shell: bash
|
||||
|
||||
- name: Pull latest Gerbil version
|
||||
id: get-gerbil-tag
|
||||
run: |
|
||||
LATEST_TAG=$(curl -s https://api.github.com/repos/fosrl/gerbil/tags | jq -r '.[0].name')
|
||||
echo "LATEST_GERBIL_TAG=$LATEST_TAG" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Pull latest Badger version
|
||||
id: get-badger-tag
|
||||
run: |
|
||||
LATEST_TAG=$(curl -s https://api.github.com/repos/fosrl/badger/tags | jq -r '.[0].name')
|
||||
echo "LATEST_BADGER_TAG=$LATEST_TAG" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Update install/main.go
|
||||
run: |
|
||||
PANGOLIN_VERSION=${{ env.TAG }}
|
||||
GERBIL_VERSION=${{ env.LATEST_GERBIL_TAG }}
|
||||
BADGER_VERSION=${{ env.LATEST_BADGER_TAG }}
|
||||
sed -i "s/config.PangolinVersion = \".*\"/config.PangolinVersion = \"$PANGOLIN_VERSION\"/" install/main.go
|
||||
sed -i "s/config.GerbilVersion = \".*\"/config.GerbilVersion = \"$GERBIL_VERSION\"/" install/main.go
|
||||
sed -i "s/config.BadgerVersion = \".*\"/config.BadgerVersion = \"$BADGER_VERSION\"/" install/main.go
|
||||
echo "Updated install/main.go with Pangolin version $PANGOLIN_VERSION, Gerbil version $GERBIL_VERSION, and Badger version $BADGER_VERSION"
|
||||
cat install/main.go
|
||||
shell: bash
|
||||
|
||||
- name: Build installer
|
||||
working-directory: install
|
||||
run: |
|
||||
make go-build-release
|
||||
|
||||
- name: Upload artifacts from /install/bin
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
|
||||
with:
|
||||
name: install-bin
|
||||
path: install/bin/
|
||||
|
||||
- name: Install skopeo + jq
|
||||
# skopeo: copy/inspect images between registries
|
||||
# jq: JSON parsing tool used to extract digest values
|
||||
run: |
|
||||
sudo apt-get update -y
|
||||
sudo apt-get install -y skopeo jq
|
||||
skopeo --version
|
||||
shell: bash
|
||||
|
||||
- name: Login to GHCR
|
||||
env:
|
||||
REGISTRY_AUTH_FILE: ${{ runner.temp }}/containers/auth.json
|
||||
run: |
|
||||
mkdir -p "$(dirname "$REGISTRY_AUTH_FILE")"
|
||||
skopeo login ghcr.io -u "${{ github.actor }}" -p "${{ secrets.GITHUB_TOKEN }}"
|
||||
shell: bash
|
||||
|
||||
- name: Copy tag from Docker Hub to GHCR
|
||||
# Mirror the already-built image (all architectures) to GHCR so we can sign it
|
||||
# Wait a bit for both architectures to be available in Docker Hub manifest
|
||||
env:
|
||||
REGISTRY_AUTH_FILE: ${{ runner.temp }}/containers/auth.json
|
||||
run: |
|
||||
set -euo pipefail
|
||||
TAG=${{ env.TAG }}
|
||||
echo "Waiting for multi-arch manifest to be ready..."
|
||||
sleep 30
|
||||
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:${TAG} -> ${{ env.GHCR_IMAGE }}:${TAG}"
|
||||
skopeo copy --all --retry-times 3 \
|
||||
docker://$DOCKERHUB_IMAGE:$TAG \
|
||||
docker://$GHCR_IMAGE:$TAG
|
||||
shell: bash
|
||||
|
||||
- name: Login to GitHub Container Registry (for cosign)
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Install cosign
|
||||
# cosign is used to sign and verify container images (key and keyless)
|
||||
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
|
||||
|
||||
- name: Dual-sign and verify (GHCR & Docker Hub)
|
||||
# Sign each image by digest using keyless (OIDC) and key-based signing,
|
||||
# then verify both the public key signature and the keyless OIDC signature.
|
||||
env:
|
||||
TAG: ${{ env.TAG }}
|
||||
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
|
||||
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
|
||||
COSIGN_PUBLIC_KEY: ${{ secrets.COSIGN_PUBLIC_KEY }}
|
||||
COSIGN_YES: "true"
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
issuer="https://token.actions.githubusercontent.com"
|
||||
id_regex="^https://github.com/${{ github.repository }}/.+" # accept this repo (all workflows/refs)
|
||||
|
||||
for IMAGE in "${GHCR_IMAGE}" "${DOCKERHUB_IMAGE}"; do
|
||||
echo "Processing ${IMAGE}:${TAG}"
|
||||
|
||||
DIGEST="$(skopeo inspect --retry-times 3 docker://${IMAGE}:${TAG} | jq -r '.Digest')"
|
||||
REF="${IMAGE}@${DIGEST}"
|
||||
echo "Resolved digest: ${REF}"
|
||||
|
||||
echo "==> cosign sign (keyless) --recursive ${REF}"
|
||||
cosign sign --recursive "${REF}"
|
||||
|
||||
echo "==> cosign sign (key) --recursive ${REF}"
|
||||
cosign sign --key env://COSIGN_PRIVATE_KEY --recursive "${REF}"
|
||||
|
||||
echo "==> cosign verify (public key) ${REF}"
|
||||
cosign verify --key env://COSIGN_PUBLIC_KEY "${REF}" -o text
|
||||
|
||||
echo "==> cosign verify (keyless policy) ${REF}"
|
||||
cosign verify \
|
||||
--certificate-oidc-issuer "${issuer}" \
|
||||
--certificate-identity-regexp "${id_regex}" \
|
||||
"${REF}" -o text
|
||||
done
|
||||
shell: bash
|
||||
|
||||
post-run:
|
||||
needs: [pre-run, release-arm, release-amd, create-manifest, sign-and-package]
|
||||
if: >-
|
||||
${{
|
||||
always() &&
|
||||
needs.pre-run.result == 'success' &&
|
||||
(needs.release-arm.result == 'success' || needs.release-arm.result == 'skipped' || needs.release-arm.result == 'failure') &&
|
||||
(needs.release-amd.result == 'success' || needs.release-amd.result == 'skipped' || needs.release-amd.result == 'failure') &&
|
||||
(needs.create-manifest.result == 'success' || needs.create-manifest.result == 'skipped' || needs.create-manifest.result == 'failure') &&
|
||||
(needs.sign-and-package.result == 'success' || needs.sign-and-package.result == 'skipped' || needs.sign-and-package.result == 'failure')
|
||||
}}
|
||||
runs-on: ubuntu-latest
|
||||
permissions: write-all
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }}
|
||||
role-duration-seconds: 3600
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: Verify AWS identity
|
||||
run: aws sts get-caller-identity
|
||||
|
||||
- name: Stop EC2 instances
|
||||
run: |
|
||||
aws ec2 stop-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_ARM_RUNNER }}
|
||||
aws ec2 stop-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_AMD_RUNNER }}
|
||||
echo "EC2 instances stopped"
|
||||
9
.github/workflows/linting.yml
vendored
@@ -1,8 +1,5 @@
|
||||
name: ESLint
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
@@ -21,10 +18,10 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0
|
||||
uses: actions/setup-node@v5
|
||||
with:
|
||||
node-version: '22'
|
||||
|
||||
@@ -35,4 +32,4 @@ jobs:
|
||||
run: npm run set:oss
|
||||
|
||||
- name: Run ESLint
|
||||
run: npx eslint . --ext .js,.jsx,.ts,.tsx
|
||||
run: npx eslint . --ext .js,.jsx,.ts,.tsx
|
||||
132
.github/workflows/mirror.yaml
vendored
@@ -1,132 +0,0 @@
|
||||
name: Mirror & Sign (Docker Hub to GHCR)
|
||||
|
||||
on:
|
||||
workflow_dispatch: {}
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write # for keyless OIDC
|
||||
|
||||
env:
|
||||
SOURCE_IMAGE: docker.io/fosrl/pangolin
|
||||
DEST_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
|
||||
|
||||
jobs:
|
||||
mirror-and-dual-sign:
|
||||
runs-on: amd64-runner
|
||||
steps:
|
||||
- name: Install skopeo + jq
|
||||
run: |
|
||||
sudo apt-get update -y
|
||||
sudo apt-get install -y skopeo jq
|
||||
skopeo --version
|
||||
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
|
||||
|
||||
- name: Input check
|
||||
run: |
|
||||
test -n "${SOURCE_IMAGE}" || (echo "SOURCE_IMAGE is empty" && exit 1)
|
||||
echo "Source : ${SOURCE_IMAGE}"
|
||||
echo "Target : ${DEST_IMAGE}"
|
||||
|
||||
# Auth for skopeo (containers-auth)
|
||||
- name: Skopeo login to GHCR
|
||||
run: |
|
||||
skopeo login ghcr.io -u "${{ github.actor }}" -p "${{ secrets.GITHUB_TOKEN }}"
|
||||
|
||||
# Auth for cosign (docker-config)
|
||||
- name: Docker login to GHCR (for cosign)
|
||||
run: |
|
||||
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u "${{ github.actor }}" --password-stdin
|
||||
|
||||
- name: List source tags
|
||||
run: |
|
||||
set -euo pipefail
|
||||
skopeo list-tags --retry-times 3 docker://"${SOURCE_IMAGE}" \
|
||||
| jq -r '.Tags[]' | grep -v -e '-arm64' -e '-amd64' | sort -u > src-tags.txt
|
||||
echo "Found source tags: $(wc -l < src-tags.txt)"
|
||||
head -n 20 src-tags.txt || true
|
||||
|
||||
- name: List destination tags (skip existing)
|
||||
run: |
|
||||
set -euo pipefail
|
||||
if skopeo list-tags --retry-times 3 docker://"${DEST_IMAGE}" >/tmp/dst.json 2>/dev/null; then
|
||||
jq -r '.Tags[]' /tmp/dst.json | sort -u > dst-tags.txt
|
||||
else
|
||||
: > dst-tags.txt
|
||||
fi
|
||||
echo "Existing destination tags: $(wc -l < dst-tags.txt)"
|
||||
|
||||
- name: Mirror, dual-sign, and verify
|
||||
env:
|
||||
# keyless
|
||||
COSIGN_YES: "true"
|
||||
# key-based
|
||||
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
|
||||
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
|
||||
# verify
|
||||
COSIGN_PUBLIC_KEY: ${{ secrets.COSIGN_PUBLIC_KEY }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
copied=0; skipped=0; v_ok=0; errs=0
|
||||
|
||||
issuer="https://token.actions.githubusercontent.com"
|
||||
id_regex="^https://github.com/${{ github.repository }}/.+"
|
||||
|
||||
while read -r tag; do
|
||||
[ -z "$tag" ] && continue
|
||||
|
||||
if grep -Fxq "$tag" dst-tags.txt; then
|
||||
echo "::notice ::Skip (exists) ${DEST_IMAGE}:${tag}"
|
||||
skipped=$((skipped+1))
|
||||
continue
|
||||
fi
|
||||
|
||||
echo "==> Copy ${SOURCE_IMAGE}:${tag} → ${DEST_IMAGE}:${tag}"
|
||||
if ! skopeo copy --all --retry-times 3 \
|
||||
docker://"${SOURCE_IMAGE}:${tag}" docker://"${DEST_IMAGE}:${tag}"; then
|
||||
echo "::warning title=Copy failed::${SOURCE_IMAGE}:${tag}"
|
||||
errs=$((errs+1)); continue
|
||||
fi
|
||||
copied=$((copied+1))
|
||||
|
||||
digest="$(skopeo inspect --retry-times 3 docker://"${DEST_IMAGE}:${tag}" | jq -r '.Digest')"
|
||||
ref="${DEST_IMAGE}@${digest}"
|
||||
|
||||
echo "==> cosign sign (keyless) --recursive ${ref}"
|
||||
if ! cosign sign --recursive "${ref}"; then
|
||||
echo "::warning title=Keyless sign failed::${ref}"
|
||||
errs=$((errs+1))
|
||||
fi
|
||||
|
||||
echo "==> cosign sign (key) --recursive ${ref}"
|
||||
if ! cosign sign --key env://COSIGN_PRIVATE_KEY --recursive "${ref}"; then
|
||||
echo "::warning title=Key sign failed::${ref}"
|
||||
errs=$((errs+1))
|
||||
fi
|
||||
|
||||
echo "==> cosign verify (public key) ${ref}"
|
||||
if ! cosign verify --key env://COSIGN_PUBLIC_KEY "${ref}" -o text; then
|
||||
echo "::warning title=Verify(pubkey) failed::${ref}"
|
||||
errs=$((errs+1))
|
||||
fi
|
||||
|
||||
echo "==> cosign verify (keyless policy) ${ref}"
|
||||
if ! cosign verify \
|
||||
--certificate-oidc-issuer "${issuer}" \
|
||||
--certificate-identity-regexp "${id_regex}" \
|
||||
"${ref}" -o text; then
|
||||
echo "::warning title=Verify(keyless) failed::${ref}"
|
||||
errs=$((errs+1))
|
||||
else
|
||||
v_ok=$((v_ok+1))
|
||||
fi
|
||||
done < src-tags.txt
|
||||
|
||||
echo "---- Summary ----"
|
||||
echo "Copied : $copied"
|
||||
echo "Skipped : $skipped"
|
||||
echo "Verified OK : $v_ok"
|
||||
echo "Errors : $errs"
|
||||
39
.github/workflows/restart-runners.yml
vendored
@@ -1,39 +0,0 @@
|
||||
name: Restart Runners
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 0 */7 * *'
|
||||
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
ec2-maintenance-prod:
|
||||
runs-on: ubuntu-latest
|
||||
permissions: write-all
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v5
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }}
|
||||
role-duration-seconds: 3600
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: Verify AWS identity
|
||||
run: aws sts get-caller-identity
|
||||
|
||||
- name: Start EC2 instance
|
||||
run: |
|
||||
aws ec2 start-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_ARM_RUNNER }}
|
||||
aws ec2 start-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_AMD_RUNNER }}
|
||||
echo "EC2 instances started"
|
||||
|
||||
- name: Wait
|
||||
run: sleep 600
|
||||
|
||||
- name: Stop EC2 instance
|
||||
run: |
|
||||
aws ec2 stop-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_ARM_RUNNER }}
|
||||
aws ec2 stop-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_AMD_RUNNER }}
|
||||
echo "EC2 instances stopped"
|
||||
125
.github/workflows/saas.yml
vendored
@@ -1,125 +0,0 @@
|
||||
name: CI/CD Pipeline
|
||||
|
||||
# CI/CD workflow for building, publishing, mirroring, signing container images and building release binaries.
|
||||
# Actions are pinned to specific SHAs to reduce supply-chain risk. This workflow triggers on tag push events.
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write # for GHCR push
|
||||
id-token: write # for Cosign Keyless (OIDC) Signing
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "[0-9]+.[0-9]+.[0-9]+-s.[0-9]+"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
pre-run:
|
||||
runs-on: ubuntu-latest
|
||||
permissions: write-all
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }}
|
||||
role-duration-seconds: 3600
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: Verify AWS identity
|
||||
run: aws sts get-caller-identity
|
||||
|
||||
- name: Start EC2 instances
|
||||
run: |
|
||||
aws ec2 start-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_ARM_RUNNER }}
|
||||
echo "EC2 instances started"
|
||||
|
||||
|
||||
release-arm:
|
||||
name: Build and Release (ARM64)
|
||||
runs-on: [self-hosted, linux, arm64, us-east-1]
|
||||
needs: [pre-run]
|
||||
if: >-
|
||||
${{
|
||||
needs.pre-run.result == 'success'
|
||||
}}
|
||||
# Job-level timeout to avoid runaway or stuck runs
|
||||
timeout-minutes: 120
|
||||
env:
|
||||
# Target images
|
||||
AWS_IMAGE: ${{ secrets.aws_account_id }}.dkr.ecr.us-east-1.amazonaws.com/${{ github.event.repository.name }}
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
|
||||
- name: Monitor storage space
|
||||
run: |
|
||||
THRESHOLD=75
|
||||
USED_SPACE=$(df / | grep / | awk '{ print $5 }' | sed 's/%//g')
|
||||
echo "Used space: $USED_SPACE%"
|
||||
if [ "$USED_SPACE" -ge "$THRESHOLD" ]; then
|
||||
echo "Used space is below the threshold of 75% free. Running Docker system prune."
|
||||
echo y | docker system prune -a
|
||||
else
|
||||
echo "Storage space is above the threshold. No action needed."
|
||||
fi
|
||||
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::${{ secrets.aws_account_id }}:role/${{ secrets.AWS_ROLE_NAME }}
|
||||
role-duration-seconds: 3600
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: Login to Amazon ECR
|
||||
id: login-ecr
|
||||
uses: aws-actions/amazon-ecr-login@v2
|
||||
|
||||
- name: Extract tag name
|
||||
id: get-tag
|
||||
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Update version in package.json
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
sed -i "s/export const APP_VERSION = \".*\";/export const APP_VERSION = \"$TAG\";/" server/lib/consts.ts
|
||||
cat server/lib/consts.ts
|
||||
shell: bash
|
||||
|
||||
- name: Build and push Docker images (Docker Hub - ARM64)
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
make build-saas tag=$TAG
|
||||
echo "Built & pushed ARM64 images to: ${{ env.AWS_IMAGE }}:${TAG}"
|
||||
shell: bash
|
||||
|
||||
post-run:
|
||||
needs: [pre-run, release-arm]
|
||||
if: >-
|
||||
${{
|
||||
always() &&
|
||||
needs.pre-run.result == 'success' &&
|
||||
(needs.release-arm.result == 'success' || needs.release-arm.result == 'skipped' || needs.release-arm.result == 'failure')
|
||||
}}
|
||||
runs-on: ubuntu-latest
|
||||
permissions: write-all
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }}
|
||||
role-duration-seconds: 3600
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: Verify AWS identity
|
||||
run: aws sts get-caller-identity
|
||||
|
||||
- name: Stop EC2 instances
|
||||
run: |
|
||||
aws ec2 stop-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_ARM_RUNNER }}
|
||||
echo "EC2 instances stopped"
|
||||
4
.github/workflows/stale-bot.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@997185467fa4f803885201cee163a9f38240193d # v10.1.1
|
||||
- uses: actions/stale@v10
|
||||
with:
|
||||
days-before-stale: 14
|
||||
days-before-close: 14
|
||||
@@ -34,4 +34,4 @@ jobs:
|
||||
operations-per-run: 100
|
||||
remove-stale-when-updated: true
|
||||
delete-branch: false
|
||||
enable-statistics: true
|
||||
enable-statistics: true
|
||||
34
.github/workflows/test.yml
vendored
@@ -1,8 +1,5 @@
|
||||
name: Run Tests
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
@@ -12,12 +9,11 @@ on:
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
|
||||
- name: Install Node
|
||||
uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
- uses: actions/setup-node@v5
|
||||
with:
|
||||
node-version: '22'
|
||||
|
||||
@@ -58,26 +54,8 @@ jobs:
|
||||
echo "App failed to start"
|
||||
exit 1
|
||||
|
||||
build-sqlite:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
|
||||
- name: Copy config file
|
||||
run: cp config/config.example.yml config/config.yml
|
||||
|
||||
- name: Build Docker image sqlite
|
||||
run: make dev-build-sqlite
|
||||
|
||||
build-postgres:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
|
||||
- name: Copy config file
|
||||
run: cp config/config.example.yml config/config.yml
|
||||
run: make build-sqlite
|
||||
|
||||
- name: Build Docker image pg
|
||||
run: make dev-build-pg
|
||||
run: make build-pg
|
||||
|
||||
6
.gitignore
vendored
@@ -47,8 +47,4 @@ server/db/index.ts
|
||||
server/build.ts
|
||||
postgres/
|
||||
dynamic/
|
||||
*.mmdb
|
||||
scratch/
|
||||
tsconfig.json
|
||||
hydrateSaas.ts
|
||||
CLAUDE.md
|
||||
*.mmdb
|
||||
@@ -1,12 +0,0 @@
|
||||
.github/
|
||||
bruno/
|
||||
cli/
|
||||
config/
|
||||
messages/
|
||||
next.config.mjs/
|
||||
public/
|
||||
tailwind.config.js/
|
||||
test/
|
||||
**/*.yml
|
||||
**/*.yaml
|
||||
**/*.md
|
||||
3
.vscode/extensions.json
vendored
@@ -1,3 +0,0 @@
|
||||
{
|
||||
"recommendations": ["esbenp.prettier-vscode"]
|
||||
}
|
||||
22
.vscode/settings.json
vendored
@@ -1,22 +0,0 @@
|
||||
{
|
||||
"editor.codeActionsOnSave": {
|
||||
"source.addMissingImports.ts": "always"
|
||||
},
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode",
|
||||
"[jsonc]": {
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode"
|
||||
},
|
||||
"[javascript]": {
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode"
|
||||
},
|
||||
"[typescript]": {
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode"
|
||||
},
|
||||
"[typescriptreact]": {
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode"
|
||||
},
|
||||
"[json]": {
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode"
|
||||
},
|
||||
"editor.formatOnSave": true
|
||||
}
|
||||
@@ -4,7 +4,7 @@ Contributions are welcome!
|
||||
|
||||
Please see the contribution and local development guide on the docs page before getting started:
|
||||
|
||||
https://docs.pangolin.net/development/contributing
|
||||
https://docs.digpangolin.com/development/contributing
|
||||
|
||||
### Licensing Considerations
|
||||
|
||||
|
||||
69
Dockerfile
@@ -1,22 +1,10 @@
|
||||
FROM node:24-alpine AS builder
|
||||
|
||||
# OCI Image Labels - Build Args for dynamic values
|
||||
ARG VERSION="dev"
|
||||
ARG REVISION=""
|
||||
ARG CREATED=""
|
||||
ARG LICENSE="AGPL-3.0"
|
||||
FROM node:22-alpine AS builder
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
ARG BUILD=oss
|
||||
ARG DATABASE=sqlite
|
||||
|
||||
# Derive title and description based on BUILD type
|
||||
ARG IMAGE_TITLE="Pangolin"
|
||||
ARG IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere"
|
||||
|
||||
RUN apk add --no-cache curl tzdata python3 make g++
|
||||
|
||||
# COPY package.json package-lock.json ./
|
||||
COPY package*.json ./
|
||||
RUN npm ci
|
||||
@@ -24,54 +12,30 @@ RUN npm ci
|
||||
COPY . .
|
||||
|
||||
RUN echo "export * from \"./$DATABASE\";" > server/db/index.ts
|
||||
RUN echo "export const driver: \"pg\" | \"sqlite\" = \"$DATABASE\";" >> server/db/index.ts
|
||||
|
||||
RUN echo "export const build = \"$BUILD\" as \"saas\" | \"enterprise\" | \"oss\";" > server/build.ts
|
||||
RUN echo "export const build = \"$BUILD\" as any;" > server/build.ts
|
||||
|
||||
# Copy the appropriate TypeScript configuration based on build type
|
||||
RUN if [ "$BUILD" = "oss" ]; then cp tsconfig.oss.json tsconfig.json; \
|
||||
elif [ "$BUILD" = "saas" ]; then cp tsconfig.saas.json tsconfig.json; \
|
||||
elif [ "$BUILD" = "enterprise" ]; then cp tsconfig.enterprise.json tsconfig.json; \
|
||||
fi
|
||||
|
||||
# if the build is oss then remove the server/private directory
|
||||
RUN if [ "$BUILD" = "oss" ]; then rm -rf server/private; fi
|
||||
|
||||
RUN if [ "$DATABASE" = "pg" ]; then npx drizzle-kit generate --dialect postgresql --schema ./server/db/pg/schema --out init; else npx drizzle-kit generate --dialect $DATABASE --schema ./server/db/$DATABASE/schema --out init; fi
|
||||
|
||||
RUN mkdir -p dist
|
||||
RUN npm run next:build
|
||||
RUN node esbuild.mjs -e server/index.ts -o dist/server.mjs -b $BUILD
|
||||
RUN if [ "$DATABASE" = "pg" ]; then \
|
||||
node esbuild.mjs -e server/setup/migrationsPg.ts -o dist/migrations.mjs; \
|
||||
else \
|
||||
node esbuild.mjs -e server/setup/migrationsSqlite.ts -o dist/migrations.mjs; \
|
||||
fi
|
||||
|
||||
# test to make sure the build output is there and error if not
|
||||
RUN test -f dist/server.mjs
|
||||
RUN if [ "$DATABASE" = "pg" ]; then npx drizzle-kit generate --dialect postgresql --schema ./server/db/pg/schema.ts --out init; else npx drizzle-kit generate --dialect $DATABASE --schema ./server/db/$DATABASE/schema.ts --out init; fi
|
||||
|
||||
RUN npm run build:$DATABASE
|
||||
RUN npm run build:cli
|
||||
|
||||
# Prune dev dependencies and clean up to prepare for copy to runner
|
||||
RUN npm prune --omit=dev && npm cache clean --force
|
||||
|
||||
FROM node:24-alpine AS runner
|
||||
FROM node:22-alpine AS runner
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Only curl and tzdata needed at runtime - no build tools!
|
||||
# Curl used for the health checks
|
||||
RUN apk add --no-cache curl tzdata
|
||||
|
||||
# Copy pre-built node_modules from builder (already pruned to production only)
|
||||
# This includes the compiled native modules like better-sqlite3
|
||||
COPY --from=builder /app/node_modules ./node_modules
|
||||
# COPY package.json package-lock.json ./
|
||||
COPY package*.json ./
|
||||
|
||||
RUN npm ci --omit=dev && npm cache clean --force
|
||||
|
||||
COPY --from=builder /app/.next/standalone ./
|
||||
COPY --from=builder /app/.next/static ./.next/static
|
||||
COPY --from=builder /app/dist ./dist
|
||||
COPY --from=builder /app/init ./dist/init
|
||||
COPY --from=builder /app/package.json ./package.json
|
||||
|
||||
COPY ./cli/wrapper.sh /usr/local/bin/pangctl
|
||||
RUN chmod +x /usr/local/bin/pangctl ./dist/cli.mjs
|
||||
@@ -79,17 +43,4 @@ RUN chmod +x /usr/local/bin/pangctl ./dist/cli.mjs
|
||||
COPY server/db/names.json ./dist/names.json
|
||||
COPY public ./public
|
||||
|
||||
# OCI Image Labels
|
||||
# https://github.com/opencontainers/image-spec/blob/main/annotations.md
|
||||
LABEL org.opencontainers.image.source="https://github.com/fosrl/pangolin" \
|
||||
org.opencontainers.image.url="https://github.com/fosrl/pangolin" \
|
||||
org.opencontainers.image.documentation="https://docs.pangolin.net" \
|
||||
org.opencontainers.image.vendor="Fossorial" \
|
||||
org.opencontainers.image.licenses="${LICENSE}" \
|
||||
org.opencontainers.image.title="${IMAGE_TITLE}" \
|
||||
org.opencontainers.image.description="${IMAGE_DESCRIPTION}" \
|
||||
org.opencontainers.image.version="${VERSION}" \
|
||||
org.opencontainers.image.revision="${REVISION}" \
|
||||
org.opencontainers.image.created="${CREATED}"
|
||||
|
||||
CMD ["npm", "run", "start"]
|
||||
|
||||
496
Makefile
@@ -1,56 +1,22 @@
|
||||
.PHONY: build build-pg build-release build-release-arm build-release-amd create-manifests build-arm build-x86 test clean
|
||||
.PHONY: build build-pg build-release build-arm build-x86 test clean
|
||||
|
||||
major_tag := $(shell echo $(tag) | cut -d. -f1)
|
||||
minor_tag := $(shell echo $(tag) | cut -d. -f1,2)
|
||||
|
||||
# OCI label variables
|
||||
CREATED := $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
|
||||
REVISION := $(shell git rev-parse HEAD 2>/dev/null || echo "unknown")
|
||||
|
||||
# Common OCI build args for OSS builds
|
||||
OCI_ARGS_OSS = --build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$(REVISION) \
|
||||
--build-arg CREATED=$(CREATED) \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere"
|
||||
|
||||
# Common OCI build args for Enterprise builds
|
||||
OCI_ARGS_EE = --build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$(REVISION) \
|
||||
--build-arg CREATED=$(CREATED) \
|
||||
--build-arg LICENSE="Fossorial Commercial" \
|
||||
--build-arg IMAGE_TITLE="Pangolin EE" \
|
||||
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere"
|
||||
|
||||
.PHONY: build-release build-sqlite build-postgresql build-ee-sqlite build-ee-postgresql
|
||||
|
||||
build-release: build-sqlite build-postgresql build-ee-sqlite build-ee-postgresql
|
||||
|
||||
build-sqlite:
|
||||
build-release:
|
||||
@if [ -z "$(tag)" ]; then \
|
||||
echo "Error: tag is required. Usage: make build-release tag=<tag>"; \
|
||||
exit 1; \
|
||||
fi
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=sqlite \
|
||||
$(OCI_ARGS_OSS) \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:latest \
|
||||
--tag fosrl/pangolin:$(major_tag) \
|
||||
--tag fosrl/pangolin:$(minor_tag) \
|
||||
--tag fosrl/pangolin:$(tag) \
|
||||
--push .
|
||||
|
||||
build-postgresql:
|
||||
@if [ -z "$(tag)" ]; then \
|
||||
echo "Error: tag is required. Usage: make build-release tag=<tag>"; \
|
||||
exit 1; \
|
||||
fi
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=pg \
|
||||
$(OCI_ARGS_OSS) \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:postgresql-latest \
|
||||
--tag fosrl/pangolin:postgresql-$(major_tag) \
|
||||
@@ -58,463 +24,17 @@ build-postgresql:
|
||||
--tag fosrl/pangolin:postgresql-$(tag) \
|
||||
--push .
|
||||
|
||||
build-ee-sqlite:
|
||||
@if [ -z "$(tag)" ]; then \
|
||||
echo "Error: tag is required. Usage: make build-release tag=<tag>"; \
|
||||
exit 1; \
|
||||
fi
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=sqlite \
|
||||
$(OCI_ARGS_EE) \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:ee-latest \
|
||||
--tag fosrl/pangolin:ee-$(major_tag) \
|
||||
--tag fosrl/pangolin:ee-$(minor_tag) \
|
||||
--tag fosrl/pangolin:ee-$(tag) \
|
||||
--push .
|
||||
|
||||
build-ee-postgresql:
|
||||
@if [ -z "$(tag)" ]; then \
|
||||
echo "Error: tag is required. Usage: make build-release tag=<tag>"; \
|
||||
exit 1; \
|
||||
fi
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=pg \
|
||||
$(OCI_ARGS_EE) \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:ee-postgresql-latest \
|
||||
--tag fosrl/pangolin:ee-postgresql-$(major_tag) \
|
||||
--tag fosrl/pangolin:ee-postgresql-$(minor_tag) \
|
||||
--tag fosrl/pangolin:ee-postgresql-$(tag) \
|
||||
--push .
|
||||
|
||||
build-saas:
|
||||
@if [ -z "$(tag)" ]; then \
|
||||
echo "Error: tag is required. Usage: make build-release tag=<tag>"; \
|
||||
exit 1; \
|
||||
fi
|
||||
docker buildx build \
|
||||
--build-arg BUILD=saas \
|
||||
--build-arg DATABASE=pg \
|
||||
--platform linux/arm64 \
|
||||
--tag $(AWS_IMAGE):$(tag) \
|
||||
--push .
|
||||
|
||||
build-release-arm:
|
||||
@if [ -z "$(tag)" ]; then \
|
||||
echo "Error: tag is required. Usage: make build-release-arm tag=<tag>"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@MAJOR_TAG=$$(echo $(tag) | cut -d. -f1); \
|
||||
MINOR_TAG=$$(echo $(tag) | cut -d. -f1,2); \
|
||||
CREATED=$$(date -u +"%Y-%m-%dT%H:%M:%SZ"); \
|
||||
REVISION=$$(git rev-parse HEAD 2>/dev/null || echo "unknown"); \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=sqlite \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/arm64 \
|
||||
--tag fosrl/pangolin:latest-arm64 \
|
||||
--tag fosrl/pangolin:$$MAJOR_TAG-arm64 \
|
||||
--tag fosrl/pangolin:$$MINOR_TAG-arm64 \
|
||||
--tag fosrl/pangolin:$(tag)-arm64 \
|
||||
--push . && \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=pg \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/arm64 \
|
||||
--tag fosrl/pangolin:postgresql-latest-arm64 \
|
||||
--tag fosrl/pangolin:postgresql-$$MAJOR_TAG-arm64 \
|
||||
--tag fosrl/pangolin:postgresql-$$MINOR_TAG-arm64 \
|
||||
--tag fosrl/pangolin:postgresql-$(tag)-arm64 \
|
||||
--push . && \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=sqlite \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg LICENSE="Fossorial Commercial" \
|
||||
--build-arg IMAGE_TITLE="Pangolin EE" \
|
||||
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/arm64 \
|
||||
--tag fosrl/pangolin:ee-latest-arm64 \
|
||||
--tag fosrl/pangolin:ee-$$MAJOR_TAG-arm64 \
|
||||
--tag fosrl/pangolin:ee-$$MINOR_TAG-arm64 \
|
||||
--tag fosrl/pangolin:ee-$(tag)-arm64 \
|
||||
--push . && \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=pg \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg LICENSE="Fossorial Commercial" \
|
||||
--build-arg IMAGE_TITLE="Pangolin EE" \
|
||||
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/arm64 \
|
||||
--tag fosrl/pangolin:ee-postgresql-latest-arm64 \
|
||||
--tag fosrl/pangolin:ee-postgresql-$$MAJOR_TAG-arm64 \
|
||||
--tag fosrl/pangolin:ee-postgresql-$$MINOR_TAG-arm64 \
|
||||
--tag fosrl/pangolin:ee-postgresql-$(tag)-arm64 \
|
||||
--push .
|
||||
|
||||
build-release-amd:
|
||||
@if [ -z "$(tag)" ]; then \
|
||||
echo "Error: tag is required. Usage: make build-release-amd tag=<tag>"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@MAJOR_TAG=$$(echo $(tag) | cut -d. -f1); \
|
||||
MINOR_TAG=$$(echo $(tag) | cut -d. -f1,2); \
|
||||
CREATED=$$(date -u +"%Y-%m-%dT%H:%M:%SZ"); \
|
||||
REVISION=$$(git rev-parse HEAD 2>/dev/null || echo "unknown"); \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=sqlite \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/amd64 \
|
||||
--tag fosrl/pangolin:latest-amd64 \
|
||||
--tag fosrl/pangolin:$$MAJOR_TAG-amd64 \
|
||||
--tag fosrl/pangolin:$$MINOR_TAG-amd64 \
|
||||
--tag fosrl/pangolin:$(tag)-amd64 \
|
||||
--push . && \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=pg \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/amd64 \
|
||||
--tag fosrl/pangolin:postgresql-latest-amd64 \
|
||||
--tag fosrl/pangolin:postgresql-$$MAJOR_TAG-amd64 \
|
||||
--tag fosrl/pangolin:postgresql-$$MINOR_TAG-amd64 \
|
||||
--tag fosrl/pangolin:postgresql-$(tag)-amd64 \
|
||||
--push . && \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=sqlite \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg LICENSE="Fossorial Commercial" \
|
||||
--build-arg IMAGE_TITLE="Pangolin EE" \
|
||||
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/amd64 \
|
||||
--tag fosrl/pangolin:ee-latest-amd64 \
|
||||
--tag fosrl/pangolin:ee-$$MAJOR_TAG-amd64 \
|
||||
--tag fosrl/pangolin:ee-$$MINOR_TAG-amd64 \
|
||||
--tag fosrl/pangolin:ee-$(tag)-amd64 \
|
||||
--push . && \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=pg \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg LICENSE="Fossorial Commercial" \
|
||||
--build-arg IMAGE_TITLE="Pangolin EE" \
|
||||
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/amd64 \
|
||||
--tag fosrl/pangolin:ee-postgresql-latest-amd64 \
|
||||
--tag fosrl/pangolin:ee-postgresql-$$MAJOR_TAG-amd64 \
|
||||
--tag fosrl/pangolin:ee-postgresql-$$MINOR_TAG-amd64 \
|
||||
--tag fosrl/pangolin:ee-postgresql-$(tag)-amd64 \
|
||||
--push .
|
||||
|
||||
create-manifests:
|
||||
@if [ -z "$(tag)" ]; then \
|
||||
echo "Error: tag is required. Usage: make create-manifests tag=<tag>"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@MAJOR_TAG=$$(echo $(tag) | cut -d. -f1); \
|
||||
MINOR_TAG=$$(echo $(tag) | cut -d. -f1,2); \
|
||||
echo "Creating multi-arch manifests for sqlite (oss)..." && \
|
||||
docker buildx imagetools create \
|
||||
--tag fosrl/pangolin:latest \
|
||||
--tag fosrl/pangolin:$$MAJOR_TAG \
|
||||
--tag fosrl/pangolin:$$MINOR_TAG \
|
||||
--tag fosrl/pangolin:$(tag) \
|
||||
fosrl/pangolin:latest-arm64 \
|
||||
fosrl/pangolin:latest-amd64 && \
|
||||
echo "Creating multi-arch manifests for postgresql (oss)..." && \
|
||||
docker buildx imagetools create \
|
||||
--tag fosrl/pangolin:postgresql-latest \
|
||||
--tag fosrl/pangolin:postgresql-$$MAJOR_TAG \
|
||||
--tag fosrl/pangolin:postgresql-$$MINOR_TAG \
|
||||
--tag fosrl/pangolin:postgresql-$(tag) \
|
||||
fosrl/pangolin:postgresql-latest-arm64 \
|
||||
fosrl/pangolin:postgresql-latest-amd64 && \
|
||||
echo "Creating multi-arch manifests for sqlite (enterprise)..." && \
|
||||
docker buildx imagetools create \
|
||||
--tag fosrl/pangolin:ee-latest \
|
||||
--tag fosrl/pangolin:ee-$$MAJOR_TAG \
|
||||
--tag fosrl/pangolin:ee-$$MINOR_TAG \
|
||||
--tag fosrl/pangolin:ee-$(tag) \
|
||||
fosrl/pangolin:ee-latest-arm64 \
|
||||
fosrl/pangolin:ee-latest-amd64 && \
|
||||
echo "Creating multi-arch manifests for postgresql (enterprise)..." && \
|
||||
docker buildx imagetools create \
|
||||
--tag fosrl/pangolin:ee-postgresql-latest \
|
||||
--tag fosrl/pangolin:ee-postgresql-$$MAJOR_TAG \
|
||||
--tag fosrl/pangolin:ee-postgresql-$$MINOR_TAG \
|
||||
--tag fosrl/pangolin:ee-postgresql-$(tag) \
|
||||
fosrl/pangolin:ee-postgresql-latest-arm64 \
|
||||
fosrl/pangolin:ee-postgresql-latest-amd64 && \
|
||||
echo "All multi-arch manifests created successfully!"
|
||||
|
||||
build-rc:
|
||||
@if [ -z "$(tag)" ]; then \
|
||||
echo "Error: tag is required. Usage: make build-release tag=<tag>"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@CREATED=$$(date -u +"%Y-%m-%dT%H:%M:%SZ"); \
|
||||
REVISION=$$(git rev-parse HEAD 2>/dev/null || echo "unknown"); \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=sqlite \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:$(tag) \
|
||||
--push . && \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=pg \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:postgresql-$(tag) \
|
||||
--push . && \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=sqlite \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg LICENSE="Fossorial Commercial" \
|
||||
--build-arg IMAGE_TITLE="Pangolin EE" \
|
||||
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:ee-$(tag) \
|
||||
--push . && \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=pg \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg LICENSE="Fossorial Commercial" \
|
||||
--build-arg IMAGE_TITLE="Pangolin EE" \
|
||||
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:ee-postgresql-$(tag) \
|
||||
--push .
|
||||
|
||||
build-rc-arm:
|
||||
@if [ -z "$(tag)" ]; then \
|
||||
echo "Error: tag is required. Usage: make build-rc-arm tag=<tag>"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@CREATED=$$(date -u +"%Y-%m-%dT%H:%M:%SZ"); \
|
||||
REVISION=$$(git rev-parse HEAD 2>/dev/null || echo "unknown"); \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=sqlite \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/arm64 \
|
||||
--tag fosrl/pangolin:$(tag)-arm64 \
|
||||
--push . && \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=pg \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/arm64 \
|
||||
--tag fosrl/pangolin:postgresql-$(tag)-arm64 \
|
||||
--push . && \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=sqlite \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg LICENSE="Fossorial Commercial" \
|
||||
--build-arg IMAGE_TITLE="Pangolin EE" \
|
||||
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/arm64 \
|
||||
--tag fosrl/pangolin:ee-$(tag)-arm64 \
|
||||
--push . && \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=pg \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg LICENSE="Fossorial Commercial" \
|
||||
--build-arg IMAGE_TITLE="Pangolin EE" \
|
||||
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/arm64 \
|
||||
--tag fosrl/pangolin:ee-postgresql-$(tag)-arm64 \
|
||||
--push .
|
||||
|
||||
build-rc-amd:
|
||||
@if [ -z "$(tag)" ]; then \
|
||||
echo "Error: tag is required. Usage: make build-rc-amd tag=<tag>"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@CREATED=$$(date -u +"%Y-%m-%dT%H:%M:%SZ"); \
|
||||
REVISION=$$(git rev-parse HEAD 2>/dev/null || echo "unknown"); \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=sqlite \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/amd64 \
|
||||
--tag fosrl/pangolin:$(tag)-amd64 \
|
||||
--push . && \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=pg \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/amd64 \
|
||||
--tag fosrl/pangolin:postgresql-$(tag)-amd64 \
|
||||
--push . && \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=sqlite \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg LICENSE="Fossorial Commercial" \
|
||||
--build-arg IMAGE_TITLE="Pangolin EE" \
|
||||
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/amd64 \
|
||||
--tag fosrl/pangolin:ee-$(tag)-amd64 \
|
||||
--push . && \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=pg \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg LICENSE="Fossorial Commercial" \
|
||||
--build-arg IMAGE_TITLE="Pangolin EE" \
|
||||
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/amd64 \
|
||||
--tag fosrl/pangolin:ee-postgresql-$(tag)-amd64 \
|
||||
--push .
|
||||
|
||||
create-manifests-rc:
|
||||
@if [ -z "$(tag)" ]; then \
|
||||
echo "Error: tag is required. Usage: make create-manifests-rc tag=<tag>"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "Creating multi-arch manifests for RC sqlite (oss)..." && \
|
||||
docker buildx imagetools create \
|
||||
--tag fosrl/pangolin:$(tag) \
|
||||
fosrl/pangolin:$(tag)-arm64 \
|
||||
fosrl/pangolin:$(tag)-amd64 && \
|
||||
echo "Creating multi-arch manifests for RC postgresql (oss)..." && \
|
||||
docker buildx imagetools create \
|
||||
--tag fosrl/pangolin:postgresql-$(tag) \
|
||||
fosrl/pangolin:postgresql-$(tag)-arm64 \
|
||||
fosrl/pangolin:postgresql-$(tag)-amd64 && \
|
||||
echo "Creating multi-arch manifests for RC sqlite (enterprise)..." && \
|
||||
docker buildx imagetools create \
|
||||
--tag fosrl/pangolin:ee-$(tag) \
|
||||
fosrl/pangolin:ee-$(tag)-arm64 \
|
||||
fosrl/pangolin:ee-$(tag)-amd64 && \
|
||||
echo "Creating multi-arch manifests for RC postgresql (enterprise)..." && \
|
||||
docker buildx imagetools create \
|
||||
--tag fosrl/pangolin:ee-postgresql-$(tag) \
|
||||
fosrl/pangolin:ee-postgresql-$(tag)-arm64 \
|
||||
fosrl/pangolin:ee-postgresql-$(tag)-amd64 && \
|
||||
echo "All RC multi-arch manifests created successfully!"
|
||||
|
||||
build-arm:
|
||||
@CREATED=$$(date -u +"%Y-%m-%dT%H:%M:%SZ"); \
|
||||
REVISION=$$(git rev-parse HEAD 2>/dev/null || echo "unknown"); \
|
||||
docker buildx build \
|
||||
--build-arg VERSION=dev \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/arm64 \
|
||||
-t fosrl/pangolin:latest .
|
||||
docker buildx build --platform linux/arm64 -t fosrl/pangolin:latest .
|
||||
|
||||
build-x86:
|
||||
@CREATED=$$(date -u +"%Y-%m-%dT%H:%M:%SZ"); \
|
||||
REVISION=$$(git rev-parse HEAD 2>/dev/null || echo "unknown"); \
|
||||
docker buildx build \
|
||||
--build-arg VERSION=dev \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/amd64 \
|
||||
-t fosrl/pangolin:latest .
|
||||
docker buildx build --platform linux/amd64 -t fosrl/pangolin:latest .
|
||||
|
||||
dev-build-sqlite:
|
||||
@CREATED=$$(date -u +"%Y-%m-%dT%H:%M:%SZ"); \
|
||||
REVISION=$$(git rev-parse HEAD 2>/dev/null || echo "unknown"); \
|
||||
docker build \
|
||||
--build-arg DATABASE=sqlite \
|
||||
--build-arg VERSION=dev \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
-t fosrl/pangolin:latest .
|
||||
build-sqlite:
|
||||
docker build --build-arg DATABASE=sqlite -t fosrl/pangolin:latest .
|
||||
|
||||
dev-build-pg:
|
||||
@CREATED=$$(date -u +"%Y-%m-%dT%H:%M:%SZ"); \
|
||||
REVISION=$$(git rev-parse HEAD 2>/dev/null || echo "unknown"); \
|
||||
docker build \
|
||||
--build-arg DATABASE=pg \
|
||||
--build-arg VERSION=dev \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
-t fosrl/pangolin:postgresql-latest .
|
||||
build-pg:
|
||||
docker build --build-arg DATABASE=pg -t fosrl/pangolin:postgresql-latest .
|
||||
|
||||
test:
|
||||
docker run -it -p 3000:3000 -p 3001:3001 -p 3002:3002 -v ./config:/app/config fosrl/pangolin:latest
|
||||
|
||||
160
README.md
@@ -1,101 +1,157 @@
|
||||
<div align="center">
|
||||
<h2>
|
||||
<a href="https://pangolin.net/">
|
||||
<picture>
|
||||
<source media="(prefers-color-scheme: dark)" srcset="public/logo/word_mark_white.png">
|
||||
<img alt="Pangolin Logo" src="public/logo/word_mark_black.png" width="350">
|
||||
<picture>
|
||||
<source media="(prefers-color-scheme: dark)" srcset="public/logo/word_mark_white.png">
|
||||
<img alt="Pangolin Logo" src="public/logo/word_mark_black.png" width="250">
|
||||
</picture>
|
||||
</a>
|
||||
</h2>
|
||||
</div>
|
||||
|
||||
<h4 align="center">Secure gateway to your private networks</h4>
|
||||
<div align="center">
|
||||
|
||||
_Pangolin tunnels your services to the internet so you can access anything from anywhere._
|
||||
|
||||
</div>
|
||||
|
||||
<div align="center">
|
||||
<h5>
|
||||
<a href="https://pangolin.net/">
|
||||
<a href="https://digpangolin.com">
|
||||
Website
|
||||
</a>
|
||||
<span> | </span>
|
||||
<a href="https://docs.pangolin.net/">
|
||||
Documentation
|
||||
<a href="https://docs.digpangolin.com/self-host/quick-install-managed">
|
||||
Quick Install Guide
|
||||
</a>
|
||||
<span> | </span>
|
||||
<a href="mailto:contact@pangolin.net">
|
||||
<a href="mailto:contact@fossorial.io">
|
||||
Contact Us
|
||||
</a>
|
||||
<span> | </span>
|
||||
<a href="https://digpangolin.com/slack">
|
||||
Slack
|
||||
</a>
|
||||
<span> | </span>
|
||||
<a href="https://discord.gg/HCJR8Xhme4">
|
||||
Discord
|
||||
</a>
|
||||
</h5>
|
||||
</div>
|
||||
|
||||
<div align="center">
|
||||
|
||||
[](https://discord.gg/HCJR8Xhme4)
|
||||
[](https://pangolin.net/slack)
|
||||
[](https://digpangolin.com/slack)
|
||||
[](https://hub.docker.com/r/fosrl/pangolin)
|
||||

|
||||
[](https://www.youtube.com/@pangolin-net)
|
||||
[](https://discord.gg/HCJR8Xhme4)
|
||||
[](https://www.youtube.com/@fossorial-app)
|
||||
|
||||
</div>
|
||||
|
||||
<p align="center">
|
||||
<strong>
|
||||
Start testing Pangolin at <a href="https://app.pangolin.net/auth/signup">app.pangolin.net</a>
|
||||
Start testing Pangolin at <a href="https://pangolin.fossorial.io/auth/signup">pangolin.fossorial.io</a>
|
||||
</strong>
|
||||
</p>
|
||||
|
||||
Pangolin is an open-source, identity-based remote access platform built on WireGuard that enables secure, seamless connectivity to private and public resources. Pangolin combines reverse proxy and VPN capabilities into one platform, providing browser-based access to web applications and client-based access to any private resources, all with zero-trust security and granular access control.
|
||||
Pangolin is a self-hosted tunneled reverse proxy server with identity and access control, designed to securely expose private resources on distributed networks. Acting as a central hub, it connects isolated networks — even those behind restrictive firewalls — through encrypted tunnels, enabling easy access to remote services without opening ports.
|
||||
|
||||
## Installation
|
||||
<img src="public/screenshots/hero.png" alt="Preview"/>
|
||||
|
||||
- Check out the [quick install guide](https://docs.pangolin.net/self-host/quick-install) for how to install and set up Pangolin.
|
||||
- Install from the [DigitalOcean marketplace](https://marketplace.digitalocean.com/apps/pangolin-ce-1?refcode=edf0480eeb81) for a one-click pre-configured installer.
|
||||
|
||||
<img src="public/screenshots/hero.png" />
|
||||
|
||||
## Deployment Options
|
||||
|
||||
| <img width=500 /> | Description |
|
||||
|-----------------|--------------|
|
||||
| **Self-Host: Community Edition** | Free, open source, and licensed under AGPL-3. |
|
||||
| **Self-Host: Enterprise Edition** | Licensed under Fossorial Commercial License. Free for personal and hobbyist use, and for businesses earning under \$100K USD annually. |
|
||||
| **Pangolin Cloud** | Fully managed service with instant setup and pay-as-you-go pricing — no infrastructure required. Or, self-host your own [remote node](https://docs.pangolin.net/manage/remote-node/nodes) and connect to our control plane. |
|
||||

|
||||
|
||||
## Key Features
|
||||
|
||||
| <img width=500 /> | <img width=500 /> |
|
||||
|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------|
|
||||
| **Connect remote networks with sites**<br /><br />Pangolin's lightweight site connectors create secure tunnels from remote networks without requiring public IP addresses or open ports. Sites make any network anywhere available for authorized access. | <img src="public/screenshots/sites.png" width=500 /><tr></tr> |
|
||||
| **Browser-based reverse proxy access**<br /><br />Expose web applications through identity and context-aware tunneled reverse proxies. Pangolin handles routing, load balancing, health checking, and automatic SSL certificates without exposing your network directly to the internet. Users access applications through any web browser with authentication and granular access control. | <img src="public/clip.gif" width=500 /><tr></tr> |
|
||||
| **Client-based private resource access**<br /><br />Access private resources like SSH servers, databases, RDP, and entire network ranges through Pangolin clients. Intelligent NAT traversal enables connections even through restrictive firewalls, while DNS aliases provide friendly names and fast connections to resources across all your sites. | <img src="public/screenshots/private-resources.png" width=500 /><tr></tr> |
|
||||
| **Zero-trust granular access**<br /><br />Grant users access to specific resources, not entire networks. Unlike traditional VPNs that expose full network access, Pangolin's zero-trust model ensures users can only reach the applications and services you explicitly define, reducing security risk and attack surface. | <img src="public/screenshots/user-devices.png" width=500 /><tr></tr> |
|
||||
### Reverse Proxy Through WireGuard Tunnel
|
||||
|
||||
## Download Clients
|
||||
- Expose private resources on your network **without opening ports** (firewall punching).
|
||||
- Secure and easy to configure private connectivity via a custom **user space WireGuard client**, [Newt](https://github.com/fosrl/newt).
|
||||
- Built-in support for any WireGuard client.
|
||||
- Automated **SSL certificates** (https) via [LetsEncrypt](https://letsencrypt.org/).
|
||||
- Support for HTTP/HTTPS and **raw TCP/UDP services**.
|
||||
- Load balancing.
|
||||
- Extend functionality with existing [Traefik](https://github.com/traefik/traefik) plugins, such as [CrowdSec](https://plugins.traefik.io/plugins/6335346ca4caa9ddeffda116/crowdsec-bouncer-traefik-plugin) and [Geoblock](https://github.com/PascalMinder/geoblock).
|
||||
- **Automatically install and configure Crowdsec via Pangolin's installer script.**
|
||||
- Attach as many sites to the central server as you wish.
|
||||
|
||||
Download the Pangolin client for your platform:
|
||||
### Identity & Access Management
|
||||
|
||||
- [Mac](https://pangolin.net/downloads/mac)
|
||||
- [Windows](https://pangolin.net/downloads/windows)
|
||||
- [Linux](https://pangolin.net/downloads/linux)
|
||||
- Centralized authentication system using platform SSO. **Users will only have to manage one login.**
|
||||
- **Define access control rules for IPs, IP ranges, and URL paths per resource.**
|
||||
- TOTP with backup codes for two-factor authentication.
|
||||
- Create organizations, each with multiple sites, users, and roles.
|
||||
- **Role-based access control** to manage resource access permissions.
|
||||
- Additional authentication options include:
|
||||
- Email whitelisting with **one-time passcodes.**
|
||||
- **Temporary, self-destructing share links.**
|
||||
- Resource specific pin codes.
|
||||
- Resource specific passwords.
|
||||
- Passkeys
|
||||
- External identity provider (IdP) support with OAuth2/OIDC, such as Authentik, Keycloak, Okta, and others.
|
||||
- Auto-provision users and roles from your IdP.
|
||||
|
||||
## Get Started
|
||||
<img src="public/auth-diagram1.png" alt="Auth and diagram"/>
|
||||
|
||||
### Check out the docs
|
||||
## Use Cases
|
||||
|
||||
We encourage everyone to read the full documentation first, which is
|
||||
available at [docs.pangolin.net](https://docs.pangolin.net). This README provides only a very brief subset of
|
||||
the docs to illustrate some basic ideas.
|
||||
### Manage Access to Internal Apps
|
||||
|
||||
### Sign up and try now
|
||||
- Grant users access to your apps from anywhere using just a web browser. No client software required.
|
||||
|
||||
For Pangolin's managed service, you will first need to create an account at
|
||||
[app.pangolin.net](https://app.pangolin.net). We have a generous free tier to get started.
|
||||
### Developers and DevOps
|
||||
|
||||
- Expose and test internal tools and dashboards like **Grafana**. Bring localhost or private IPs online for easy access.
|
||||
|
||||
### Secure API Gateway
|
||||
|
||||
- One application load balancer across multiple clouds and on-premises.
|
||||
|
||||
### IoT and Edge Devices
|
||||
|
||||
- Easily expose **IoT devices**, **edge servers**, or **Raspberry Pi** to the internet for field equipment monitoring.
|
||||
|
||||
<img src="public/screenshots/sites.png" alt="Sites"/>
|
||||
|
||||
## Deployment Options
|
||||
|
||||
### Fully Self Hosted
|
||||
|
||||
Host the full application on your own server or on the cloud with a VPS. Take a look at the [documentation](https://docs.digpangolin.com/self-host/quick-install) to get started.
|
||||
|
||||
> Many of our users have had a great experience with [RackNerd](https://my.racknerd.com/aff.php?aff=13788). Depending on promotions, you can get a [**VPS with 1 vCPU, 1GB RAM, and ~20GB SSD for just around $12/year**](https://my.racknerd.com/aff.php?aff=13788&pid=912). That's a great deal!
|
||||
|
||||
### Pangolin Cloud
|
||||
|
||||
Easy to use with simple [pay as you go pricing](https://digpangolin.com/pricing). [Check it out here](https://pangolin.fossorial.io/auth/signup).
|
||||
|
||||
- Everything you get with self hosted Pangolin, but fully managed for you.
|
||||
|
||||
### Managed & High Availability
|
||||
|
||||
Managed control plane, your infrastructure
|
||||
|
||||
- We manage database and control plane.
|
||||
- You self-host lightweight exit-node.
|
||||
- Traffic flows through your infra.
|
||||
- We coordinate failover between your nodes or to Cloud when things go bad.
|
||||
|
||||
Try it out using [Pangolin Cloud](https://pangolin.fossorial.io)
|
||||
|
||||
### Full Enterprise On-Premises
|
||||
|
||||
[Contact us](mailto:numbat@fossorial.io) for a full distributed and enterprise deployments on your infrastructure controlled by your team.
|
||||
|
||||
## Project Development / Roadmap
|
||||
|
||||
We want to hear your feature requests! Add them to the [discussion board](https://github.com/orgs/fosrl/discussions/categories/feature-requests).
|
||||
|
||||
## Licensing
|
||||
|
||||
Pangolin is dual licensed under the AGPL-3 and the [Fossorial Commercial License](https://pangolin.net/fcl.html). For inquiries about commercial licensing, please contact us at [contact@pangolin.net](mailto:contact@pangolin.net).
|
||||
Pangolin is dual licensed under the AGPL-3 and the Fossorial Commercial license. For inquiries about commercial licensing, please contact us at [numbat@fossorial.io](mailto:numbat@fossorial.io).
|
||||
|
||||
## Contributions
|
||||
|
||||
Looking for something to contribute? Take a look at issues marked with [help wanted](https://github.com/fosrl/pangolin/issues?q=is%3Aissue%20state%3Aopen%20label%3A%22help%20wanted%22). Also take a look through the freature requests in Discussions - any are available and some are marked as a good first issue.
|
||||
|
||||
Please see [CONTRIBUTING](./CONTRIBUTING.md) in the repository for guidelines and best practices.
|
||||
|
||||
---
|
||||
Please post bug reports and other functional issues in the [Issues](https://github.com/fosrl/pangolin/issues) section of the repository.
|
||||
|
||||
WireGuard® is a registered trademark of Jason A. Donenfeld.
|
||||
If you are looking to help with translations, please contribute [on Crowdin](https://crowdin.com/project/fossorial-pangolin) or open a PR with changes to the translations files found in `messages/`.
|
||||
@@ -3,7 +3,7 @@
|
||||
If you discover a security vulnerability, please follow the steps below to responsibly disclose it to us:
|
||||
|
||||
1. **Do not create a public GitHub issue or discussion post.** This could put the security of other users at risk.
|
||||
2. Send a detailed report to [security@pangolin.net](mailto:security@pangolin.net) or send a **private** message to a maintainer on [Discord](https://discord.gg/HCJR8Xhme4). Include:
|
||||
2. Send a detailed report to [security@fossorial.io](mailto:security@fossorial.io) or send a **private** message to a maintainer on [Discord](https://discord.gg/HCJR8Xhme4). Include:
|
||||
|
||||
- Description and location of the vulnerability.
|
||||
- Potential impact of the vulnerability.
|
||||
|
||||
@@ -8,7 +8,7 @@ import base64
|
||||
YAML_FILE_PATH = 'blueprint.yaml'
|
||||
|
||||
# The API endpoint and headers from the curl request
|
||||
API_URL = 'http://api.pangolin.net/v1/org/test/blueprint'
|
||||
API_URL = 'http://api.pangolin.fossorial.io/v1/org/test/blueprint'
|
||||
HEADERS = {
|
||||
'accept': '*/*',
|
||||
'Authorization': 'Bearer <your_token_here>',
|
||||
|
||||
@@ -28,10 +28,9 @@ proxy-resources:
|
||||
# sso-roles:
|
||||
# - Member
|
||||
# sso-users:
|
||||
# - owen@pangolin.net
|
||||
# - owen@fossorial.io
|
||||
# whitelist-users:
|
||||
# - owen@pangolin.net
|
||||
# auto-login-idp: 1
|
||||
# - owen@fossorial.io
|
||||
headers:
|
||||
- name: X-Example-Header
|
||||
value: example-value
|
||||
|
||||
@@ -5,14 +5,14 @@ meta {
|
||||
}
|
||||
|
||||
post {
|
||||
url: http://localhost:3000/api/v1/auth/login
|
||||
url: http://localhost:4000/api/v1/auth/login
|
||||
body: json
|
||||
auth: none
|
||||
}
|
||||
|
||||
body:json {
|
||||
{
|
||||
"email": "admin@fosrl.io",
|
||||
"email": "owen@fossorial.io",
|
||||
"password": "Password123!"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,6 +12,6 @@ post {
|
||||
|
||||
body:json {
|
||||
{
|
||||
"email": "milo@pangolin.net"
|
||||
"email": "milo@fossorial.io"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ put {
|
||||
|
||||
body:json {
|
||||
{
|
||||
"email": "numbat@pangolin.net",
|
||||
"email": "numbat@fossorial.io",
|
||||
"password": "Password123!"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
meta {
|
||||
name: createOlm
|
||||
type: http
|
||||
seq: 1
|
||||
}
|
||||
|
||||
put {
|
||||
url: http://localhost:3000/api/v1/olm
|
||||
body: none
|
||||
auth: inherit
|
||||
}
|
||||
|
||||
settings {
|
||||
encodeUrl: true
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
meta {
|
||||
name: Olm
|
||||
seq: 15
|
||||
}
|
||||
|
||||
auth {
|
||||
mode: inherit
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"version": "1",
|
||||
"name": "Pangolin",
|
||||
"name": "Pangolin Saas",
|
||||
"type": "collection",
|
||||
"ignore": [
|
||||
"node_modules",
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
import { CommandModule } from "yargs";
|
||||
import { db, exitNodes } from "@server/db";
|
||||
import { eq } from "drizzle-orm";
|
||||
|
||||
type ClearExitNodesArgs = { };
|
||||
|
||||
export const clearExitNodes: CommandModule<
|
||||
{},
|
||||
ClearExitNodesArgs
|
||||
> = {
|
||||
command: "clear-exit-nodes",
|
||||
describe:
|
||||
"Clear all exit nodes from the database",
|
||||
// no args
|
||||
builder: (yargs) => {
|
||||
return yargs;
|
||||
},
|
||||
handler: async (argv: {}) => {
|
||||
try {
|
||||
|
||||
console.log(`Clearing all exit nodes from the database`);
|
||||
|
||||
// Delete all exit nodes
|
||||
const deletedCount = await db
|
||||
.delete(exitNodes)
|
||||
.where(eq(exitNodes.exitNodeId, exitNodes.exitNodeId)) .returning();; // delete all
|
||||
|
||||
console.log(`Deleted ${deletedCount.length} exit node(s) from the database`);
|
||||
|
||||
process.exit(0);
|
||||
} catch (error) {
|
||||
console.error("Error:", error);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -1,284 +0,0 @@
|
||||
import { CommandModule } from "yargs";
|
||||
import { db, idpOidcConfig, licenseKey } from "@server/db";
|
||||
import { encrypt, decrypt } from "@server/lib/crypto";
|
||||
import { configFilePath1, configFilePath2 } from "@server/lib/consts";
|
||||
import { eq } from "drizzle-orm";
|
||||
import fs from "fs";
|
||||
import yaml from "js-yaml";
|
||||
|
||||
type RotateServerSecretArgs = {
|
||||
"old-secret": string;
|
||||
"new-secret": string;
|
||||
force?: boolean;
|
||||
};
|
||||
|
||||
export const rotateServerSecret: CommandModule<
|
||||
{},
|
||||
RotateServerSecretArgs
|
||||
> = {
|
||||
command: "rotate-server-secret",
|
||||
describe:
|
||||
"Rotate the server secret by decrypting all encrypted values with the old secret and re-encrypting with a new secret",
|
||||
builder: (yargs) => {
|
||||
return yargs
|
||||
.option("old-secret", {
|
||||
type: "string",
|
||||
demandOption: true,
|
||||
describe: "The current server secret (for verification)"
|
||||
})
|
||||
.option("new-secret", {
|
||||
type: "string",
|
||||
demandOption: true,
|
||||
describe: "The new server secret to use"
|
||||
})
|
||||
.option("force", {
|
||||
type: "boolean",
|
||||
default: false,
|
||||
describe:
|
||||
"Force rotation even if the old secret doesn't match the config file. " +
|
||||
"Use this if you know the old secret is correct but the config file is out of sync. " +
|
||||
"WARNING: This will attempt to decrypt all values with the provided old secret. " +
|
||||
"If the old secret is incorrect, the rotation will fail or corrupt data."
|
||||
});
|
||||
},
|
||||
handler: async (argv: {
|
||||
"old-secret": string;
|
||||
"new-secret": string;
|
||||
force?: boolean;
|
||||
}) => {
|
||||
try {
|
||||
// Determine which config file exists
|
||||
const configPath = fs.existsSync(configFilePath1)
|
||||
? configFilePath1
|
||||
: fs.existsSync(configFilePath2)
|
||||
? configFilePath2
|
||||
: null;
|
||||
|
||||
if (!configPath) {
|
||||
console.error(
|
||||
"Error: Config file not found. Expected config.yml or config.yaml in the config directory."
|
||||
);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Read current config
|
||||
const configContent = fs.readFileSync(configPath, "utf8");
|
||||
const config = yaml.load(configContent) as any;
|
||||
|
||||
if (!config?.server?.secret) {
|
||||
console.error(
|
||||
"Error: No server secret found in config file. Cannot rotate."
|
||||
);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const configSecret = config.server.secret;
|
||||
const oldSecret = argv["old-secret"];
|
||||
const newSecret = argv["new-secret"];
|
||||
const force = argv.force || false;
|
||||
|
||||
// Verify that the provided old secret matches the one in config
|
||||
if (configSecret !== oldSecret) {
|
||||
if (!force) {
|
||||
console.error(
|
||||
"Error: The provided old secret does not match the secret in the config file."
|
||||
);
|
||||
console.error(
|
||||
"\nIf you are certain the old secret is correct and the config file is out of sync,"
|
||||
);
|
||||
console.error(
|
||||
"you can use the --force flag to bypass this check."
|
||||
);
|
||||
console.error(
|
||||
"\nWARNING: Using --force with an incorrect old secret will cause the rotation to fail"
|
||||
);
|
||||
console.error(
|
||||
"or corrupt encrypted data. Only use --force if you are absolutely certain."
|
||||
);
|
||||
process.exit(1);
|
||||
} else {
|
||||
console.warn(
|
||||
"\nWARNING: Using --force flag. Bypassing old secret verification."
|
||||
);
|
||||
console.warn(
|
||||
"The provided old secret does not match the config file, but proceeding anyway."
|
||||
);
|
||||
console.warn(
|
||||
"If the old secret is incorrect, this operation will fail or corrupt data.\n"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Validate new secret
|
||||
if (newSecret.length < 8) {
|
||||
console.error(
|
||||
"Error: New secret must be at least 8 characters long"
|
||||
);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
if (oldSecret === newSecret) {
|
||||
console.error("Error: New secret must be different from old secret");
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
console.log("Starting server secret rotation...");
|
||||
console.log("This will decrypt and re-encrypt all encrypted values in the database.");
|
||||
|
||||
// Read all data first
|
||||
console.log("\nReading encrypted data from database...");
|
||||
const idpConfigs = await db.select().from(idpOidcConfig);
|
||||
const licenseKeys = await db.select().from(licenseKey);
|
||||
|
||||
console.log(`Found ${idpConfigs.length} OIDC IdP configuration(s)`);
|
||||
console.log(`Found ${licenseKeys.length} license key(s)`);
|
||||
|
||||
// Prepare all decrypted and re-encrypted values
|
||||
console.log("\nDecrypting and re-encrypting values...");
|
||||
|
||||
type IdpUpdate = {
|
||||
idpOauthConfigId: number;
|
||||
encryptedClientId: string;
|
||||
encryptedClientSecret: string;
|
||||
};
|
||||
|
||||
type LicenseKeyUpdate = {
|
||||
oldLicenseKeyId: string;
|
||||
newLicenseKeyId: string;
|
||||
encryptedToken: string;
|
||||
encryptedInstanceId: string;
|
||||
};
|
||||
|
||||
const idpUpdates: IdpUpdate[] = [];
|
||||
const licenseKeyUpdates: LicenseKeyUpdate[] = [];
|
||||
|
||||
// Process idpOidcConfig entries
|
||||
for (const idpConfig of idpConfigs) {
|
||||
try {
|
||||
// Decrypt with old secret
|
||||
const decryptedClientId = decrypt(idpConfig.clientId, oldSecret);
|
||||
const decryptedClientSecret = decrypt(
|
||||
idpConfig.clientSecret,
|
||||
oldSecret
|
||||
);
|
||||
|
||||
// Re-encrypt with new secret
|
||||
const encryptedClientId = encrypt(decryptedClientId, newSecret);
|
||||
const encryptedClientSecret = encrypt(
|
||||
decryptedClientSecret,
|
||||
newSecret
|
||||
);
|
||||
|
||||
idpUpdates.push({
|
||||
idpOauthConfigId: idpConfig.idpOauthConfigId,
|
||||
encryptedClientId,
|
||||
encryptedClientSecret
|
||||
});
|
||||
} catch (error) {
|
||||
console.error(
|
||||
`Error processing IdP config ${idpConfig.idpOauthConfigId}:`,
|
||||
error
|
||||
);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// Process licenseKey entries
|
||||
for (const key of licenseKeys) {
|
||||
try {
|
||||
// Decrypt with old secret
|
||||
const decryptedLicenseKeyId = decrypt(key.licenseKeyId, oldSecret);
|
||||
const decryptedToken = decrypt(key.token, oldSecret);
|
||||
const decryptedInstanceId = decrypt(key.instanceId, oldSecret);
|
||||
|
||||
// Re-encrypt with new secret
|
||||
const encryptedLicenseKeyId = encrypt(
|
||||
decryptedLicenseKeyId,
|
||||
newSecret
|
||||
);
|
||||
const encryptedToken = encrypt(decryptedToken, newSecret);
|
||||
const encryptedInstanceId = encrypt(
|
||||
decryptedInstanceId,
|
||||
newSecret
|
||||
);
|
||||
|
||||
licenseKeyUpdates.push({
|
||||
oldLicenseKeyId: key.licenseKeyId,
|
||||
newLicenseKeyId: encryptedLicenseKeyId,
|
||||
encryptedToken,
|
||||
encryptedInstanceId
|
||||
});
|
||||
} catch (error) {
|
||||
console.error(
|
||||
`Error processing license key ${key.licenseKeyId}:`,
|
||||
error
|
||||
);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// Perform all database updates in a single transaction
|
||||
console.log("\nUpdating database in transaction...");
|
||||
await db.transaction(async (trx) => {
|
||||
// Update idpOidcConfig entries
|
||||
for (const update of idpUpdates) {
|
||||
await trx
|
||||
.update(idpOidcConfig)
|
||||
.set({
|
||||
clientId: update.encryptedClientId,
|
||||
clientSecret: update.encryptedClientSecret
|
||||
})
|
||||
.where(
|
||||
eq(
|
||||
idpOidcConfig.idpOauthConfigId,
|
||||
update.idpOauthConfigId
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
// Update licenseKey entries (delete old, insert new)
|
||||
for (const update of licenseKeyUpdates) {
|
||||
// Delete old entry
|
||||
await trx
|
||||
.delete(licenseKey)
|
||||
.where(eq(licenseKey.licenseKeyId, update.oldLicenseKeyId));
|
||||
|
||||
// Insert new entry with re-encrypted values
|
||||
await trx.insert(licenseKey).values({
|
||||
licenseKeyId: update.newLicenseKeyId,
|
||||
token: update.encryptedToken,
|
||||
instanceId: update.encryptedInstanceId
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
console.log(`Rotated ${idpUpdates.length} OIDC IdP configuration(s)`);
|
||||
console.log(`Rotated ${licenseKeyUpdates.length} license key(s)`);
|
||||
|
||||
// Update config file with new secret
|
||||
console.log("\nUpdating config file...");
|
||||
config.server.secret = newSecret;
|
||||
const newConfigContent = yaml.dump(config, {
|
||||
indent: 2,
|
||||
lineWidth: -1
|
||||
});
|
||||
fs.writeFileSync(configPath, newConfigContent, "utf8");
|
||||
|
||||
console.log(`Updated config file: ${configPath}`);
|
||||
|
||||
console.log("\nServer secret rotation completed successfully!");
|
||||
console.log(`\nSummary:`);
|
||||
console.log(` - OIDC IdP configurations: ${idpUpdates.length}`);
|
||||
console.log(` - License keys: ${licenseKeyUpdates.length}`);
|
||||
console.log(
|
||||
`\n IMPORTANT: Restart the server for the new secret to take effect.`
|
||||
);
|
||||
|
||||
process.exit(0);
|
||||
} catch (error) {
|
||||
console.error("Error rotating server secret:", error);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@@ -90,8 +90,7 @@ export const setAdminCredentials: CommandModule<{}, SetAdminCredentialsArgs> = {
|
||||
passwordHash,
|
||||
dateCreated: moment().toISOString(),
|
||||
serverAdmin: true,
|
||||
emailVerified: true,
|
||||
lastPasswordChange: new Date().getTime()
|
||||
emailVerified: true
|
||||
});
|
||||
|
||||
console.log("Server admin created");
|
||||
|
||||
@@ -4,14 +4,10 @@ import yargs from "yargs";
|
||||
import { hideBin } from "yargs/helpers";
|
||||
import { setAdminCredentials } from "@cli/commands/setAdminCredentials";
|
||||
import { resetUserSecurityKeys } from "@cli/commands/resetUserSecurityKeys";
|
||||
import { clearExitNodes } from "./commands/clearExitNodes";
|
||||
import { rotateServerSecret } from "./commands/rotateServerSecret";
|
||||
|
||||
yargs(hideBin(process.argv))
|
||||
.scriptName("pangctl")
|
||||
.command(setAdminCredentials)
|
||||
.command(resetUserSecurityKeys)
|
||||
.command(clearExitNodes)
|
||||
.command(rotateServerSecret)
|
||||
.demandCommand()
|
||||
.help().argv;
|
||||
|
||||
@@ -17,4 +17,4 @@
|
||||
"lib": "@/lib",
|
||||
"hooks": "@/hooks"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
# To see all available options, please visit the docs:
|
||||
# https://docs.pangolin.net/self-host/advanced/config-file
|
||||
# https://docs.digpangolin.com/self-host/advanced/config-file
|
||||
|
||||
app:
|
||||
dashboard_url: http://localhost:3002
|
||||
@@ -25,3 +25,4 @@ flags:
|
||||
disable_user_create_org: true
|
||||
allow_raw_resources: true
|
||||
enable_integration_api: true
|
||||
enable_clients: true
|
||||
|
||||
@@ -1,9 +1,5 @@
|
||||
http:
|
||||
middlewares:
|
||||
badger:
|
||||
plugin:
|
||||
badger:
|
||||
disableForwardAuth: true
|
||||
redirect-to-https:
|
||||
redirectScheme:
|
||||
scheme: https
|
||||
@@ -17,7 +13,6 @@ http:
|
||||
- web
|
||||
middlewares:
|
||||
- redirect-to-https
|
||||
- badger
|
||||
|
||||
# Next.js router (handles everything except API and WebSocket paths)
|
||||
next-router:
|
||||
@@ -26,8 +21,6 @@ http:
|
||||
priority: 10
|
||||
entryPoints:
|
||||
- websecure
|
||||
middlewares:
|
||||
- badger
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
|
||||
@@ -38,8 +31,6 @@ http:
|
||||
priority: 100
|
||||
entryPoints:
|
||||
- websecure
|
||||
middlewares:
|
||||
- badger
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ experimental:
|
||||
plugins:
|
||||
badger:
|
||||
moduleName: "github.com/fosrl/badger"
|
||||
version: "v1.3.0"
|
||||
version: "v1.2.0"
|
||||
|
||||
log:
|
||||
level: "DEBUG"
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
services:
|
||||
drizzle-gateway:
|
||||
image: ghcr.io/drizzle-team/gateway:latest
|
||||
ports:
|
||||
- "4984:4983"
|
||||
depends_on:
|
||||
- db
|
||||
environment:
|
||||
- STORE_PATH=/app
|
||||
- DATABASE_URL=postgresql://postgres:password@db:5432/postgres
|
||||
volumes:
|
||||
- drizzle-gateway-data:/app
|
||||
|
||||
volumes:
|
||||
drizzle-gateway-data:
|
||||
@@ -20,7 +20,7 @@ services:
|
||||
pangolin:
|
||||
condition: service_healthy
|
||||
command:
|
||||
- --reachableAt=http://gerbil:3004
|
||||
- --reachableAt=http://gerbil:3003
|
||||
- --generateAndSaveKeyTo=/var/config/key
|
||||
- --remoteConfig=http://pangolin:3001/api/v1/
|
||||
volumes:
|
||||
@@ -35,7 +35,7 @@ services:
|
||||
- 80:80 # Port for traefik because of the network_mode
|
||||
|
||||
traefik:
|
||||
image: traefik:v3.6
|
||||
image: traefik:v3.5
|
||||
container_name: traefik
|
||||
restart: unless-stopped
|
||||
network_mode: service:gerbil # Ports appear on the gerbil service
|
||||
@@ -52,4 +52,4 @@ networks:
|
||||
default:
|
||||
driver: bridge
|
||||
name: pangolin
|
||||
enable_ipv6: true
|
||||
enable_ipv6: true
|
||||
@@ -11,7 +11,7 @@ services:
|
||||
- ./config/postgres:/var/lib/postgresql/data
|
||||
ports:
|
||||
- "5432:5432" # Map host port 5432 to container port 5432
|
||||
restart: no
|
||||
restart: no
|
||||
|
||||
redis:
|
||||
image: redis:latest # Use the latest Redis image
|
||||
|
||||
@@ -1,7 +1,16 @@
|
||||
import { defineConfig } from "drizzle-kit";
|
||||
import path from "path";
|
||||
import { build } from "@server/build";
|
||||
|
||||
const schema = [path.join("server", "db", "pg", "schema")];
|
||||
let schema;
|
||||
if (build === "oss") {
|
||||
schema = [path.join("server", "db", "pg", "schema.ts")];
|
||||
} else {
|
||||
schema = [
|
||||
path.join("server", "db", "pg", "schema.ts"),
|
||||
path.join("server", "db", "pg", "privateSchema.ts")
|
||||
];
|
||||
}
|
||||
|
||||
export default defineConfig({
|
||||
dialect: "postgresql",
|
||||
|
||||
@@ -1,8 +1,17 @@
|
||||
import { build } from "@server/build";
|
||||
import { APP_PATH } from "@server/lib/consts";
|
||||
import { defineConfig } from "drizzle-kit";
|
||||
import path from "path";
|
||||
|
||||
const schema = [path.join("server", "db", "sqlite", "schema")];
|
||||
let schema;
|
||||
if (build === "oss") {
|
||||
schema = [path.join("server", "db", "sqlite", "schema.ts")];
|
||||
} else {
|
||||
schema = [
|
||||
path.join("server", "db", "sqlite", "schema.ts"),
|
||||
path.join("server", "db", "sqlite", "privateSchema.ts")
|
||||
];
|
||||
}
|
||||
|
||||
export default defineConfig({
|
||||
dialect: "sqlite",
|
||||
|
||||
251
esbuild.mjs
@@ -2,9 +2,8 @@ import esbuild from "esbuild";
|
||||
import yargs from "yargs";
|
||||
import { hideBin } from "yargs/helpers";
|
||||
import { nodeExternalsPlugin } from "esbuild-node-externals";
|
||||
import path from "path";
|
||||
import fs from "fs";
|
||||
// import { glob } from "glob";
|
||||
// import path from "path";
|
||||
|
||||
const banner = `
|
||||
// patch __dirname
|
||||
@@ -19,25 +18,18 @@ const require = topLevelCreateRequire(import.meta.url);
|
||||
`;
|
||||
|
||||
const argv = yargs(hideBin(process.argv))
|
||||
.usage("Usage: $0 -entry [string] -out [string] -build [string]")
|
||||
.usage("Usage: $0 -entry [string] -out [string]")
|
||||
.option("entry", {
|
||||
alias: "e",
|
||||
describe: "Entry point file",
|
||||
type: "string",
|
||||
demandOption: true
|
||||
demandOption: true,
|
||||
})
|
||||
.option("out", {
|
||||
alias: "o",
|
||||
describe: "Output file path",
|
||||
type: "string",
|
||||
demandOption: true
|
||||
})
|
||||
.option("build", {
|
||||
alias: "b",
|
||||
describe: "Build type (oss, saas, enterprise)",
|
||||
type: "string",
|
||||
choices: ["oss", "saas", "enterprise"],
|
||||
default: "oss"
|
||||
demandOption: true,
|
||||
})
|
||||
.help()
|
||||
.alias("help", "h").argv;
|
||||
@@ -54,206 +46,6 @@ function getPackagePaths() {
|
||||
return ["package.json"];
|
||||
}
|
||||
|
||||
// Plugin to guard against bad imports from #private
|
||||
function privateImportGuardPlugin() {
|
||||
return {
|
||||
name: "private-import-guard",
|
||||
setup(build) {
|
||||
const violations = [];
|
||||
|
||||
build.onResolve({ filter: /^#private\// }, (args) => {
|
||||
const importingFile = args.importer;
|
||||
|
||||
// Check if the importing file is NOT in server/private
|
||||
const normalizedImporter = path.normalize(importingFile);
|
||||
const isInServerPrivate = normalizedImporter.includes(
|
||||
path.normalize("server/private")
|
||||
);
|
||||
|
||||
if (!isInServerPrivate) {
|
||||
const violation = {
|
||||
file: importingFile,
|
||||
importPath: args.path,
|
||||
resolveDir: args.resolveDir
|
||||
};
|
||||
violations.push(violation);
|
||||
|
||||
console.log(`PRIVATE IMPORT VIOLATION:`);
|
||||
console.log(` File: ${importingFile}`);
|
||||
console.log(` Import: ${args.path}`);
|
||||
console.log(` Resolve dir: ${args.resolveDir || "N/A"}`);
|
||||
console.log("");
|
||||
}
|
||||
|
||||
// Return null to let the default resolver handle it
|
||||
return null;
|
||||
});
|
||||
|
||||
build.onEnd((result) => {
|
||||
if (violations.length > 0) {
|
||||
console.log(
|
||||
`\nSUMMARY: Found ${violations.length} private import violation(s):`
|
||||
);
|
||||
violations.forEach((v, i) => {
|
||||
console.log(
|
||||
` ${i + 1}. ${path.relative(process.cwd(), v.file)} imports ${v.importPath}`
|
||||
);
|
||||
});
|
||||
console.log("");
|
||||
|
||||
result.errors.push({
|
||||
text: `Private import violations detected: ${violations.length} violation(s) found`,
|
||||
location: null,
|
||||
notes: violations.map((v) => ({
|
||||
text: `${path.relative(process.cwd(), v.file)} imports ${v.importPath}`,
|
||||
location: null
|
||||
}))
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Plugin to guard against bad imports from #private
|
||||
function dynamicImportGuardPlugin() {
|
||||
return {
|
||||
name: "dynamic-import-guard",
|
||||
setup(build) {
|
||||
const violations = [];
|
||||
|
||||
build.onResolve({ filter: /^#dynamic\// }, (args) => {
|
||||
const importingFile = args.importer;
|
||||
|
||||
// Check if the importing file is NOT in server/private
|
||||
const normalizedImporter = path.normalize(importingFile);
|
||||
const isInServerPrivate = normalizedImporter.includes(
|
||||
path.normalize("server/private")
|
||||
);
|
||||
|
||||
if (isInServerPrivate) {
|
||||
const violation = {
|
||||
file: importingFile,
|
||||
importPath: args.path,
|
||||
resolveDir: args.resolveDir
|
||||
};
|
||||
violations.push(violation);
|
||||
|
||||
console.log(`DYNAMIC IMPORT VIOLATION:`);
|
||||
console.log(` File: ${importingFile}`);
|
||||
console.log(` Import: ${args.path}`);
|
||||
console.log(` Resolve dir: ${args.resolveDir || "N/A"}`);
|
||||
console.log("");
|
||||
}
|
||||
|
||||
// Return null to let the default resolver handle it
|
||||
return null;
|
||||
});
|
||||
|
||||
build.onEnd((result) => {
|
||||
if (violations.length > 0) {
|
||||
console.log(
|
||||
`\nSUMMARY: Found ${violations.length} dynamic import violation(s):`
|
||||
);
|
||||
violations.forEach((v, i) => {
|
||||
console.log(
|
||||
` ${i + 1}. ${path.relative(process.cwd(), v.file)} imports ${v.importPath}`
|
||||
);
|
||||
});
|
||||
console.log("");
|
||||
|
||||
result.errors.push({
|
||||
text: `Dynamic import violations detected: ${violations.length} violation(s) found`,
|
||||
location: null,
|
||||
notes: violations.map((v) => ({
|
||||
text: `${path.relative(process.cwd(), v.file)} imports ${v.importPath}`,
|
||||
location: null
|
||||
}))
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Plugin to dynamically switch imports based on build type
|
||||
function dynamicImportSwitcherPlugin(buildValue) {
|
||||
return {
|
||||
name: "dynamic-import-switcher",
|
||||
setup(build) {
|
||||
const switches = [];
|
||||
|
||||
build.onStart(() => {
|
||||
console.log(
|
||||
`Dynamic import switcher using build type: ${buildValue}`
|
||||
);
|
||||
});
|
||||
|
||||
build.onResolve({ filter: /^#dynamic\// }, (args) => {
|
||||
// Extract the path after #dynamic/
|
||||
const dynamicPath = args.path.replace(/^#dynamic\//, "");
|
||||
|
||||
// Determine the replacement based on build type
|
||||
let replacement;
|
||||
if (buildValue === "oss") {
|
||||
replacement = `#open/${dynamicPath}`;
|
||||
} else if (
|
||||
buildValue === "saas" ||
|
||||
buildValue === "enterprise"
|
||||
) {
|
||||
replacement = `#closed/${dynamicPath}`; // We use #closed here so that the route guards dont complain after its been changed but this is the same as #private
|
||||
} else {
|
||||
console.warn(
|
||||
`Unknown build type '${buildValue}', defaulting to #open/`
|
||||
);
|
||||
replacement = `#open/${dynamicPath}`;
|
||||
}
|
||||
|
||||
const switchInfo = {
|
||||
file: args.importer,
|
||||
originalPath: args.path,
|
||||
replacementPath: replacement,
|
||||
buildType: buildValue
|
||||
};
|
||||
switches.push(switchInfo);
|
||||
|
||||
console.log(`DYNAMIC IMPORT SWITCH:`);
|
||||
console.log(` File: ${args.importer}`);
|
||||
console.log(` Original: ${args.path}`);
|
||||
console.log(
|
||||
` Switched to: ${replacement} (build: ${buildValue})`
|
||||
);
|
||||
console.log("");
|
||||
|
||||
// Rewrite the import path and let the normal resolution continue
|
||||
return build.resolve(replacement, {
|
||||
importer: args.importer,
|
||||
namespace: args.namespace,
|
||||
resolveDir: args.resolveDir,
|
||||
kind: args.kind
|
||||
});
|
||||
});
|
||||
|
||||
build.onEnd((result) => {
|
||||
if (switches.length > 0) {
|
||||
console.log(
|
||||
`\nDYNAMIC IMPORT SUMMARY: Switched ${switches.length} import(s) for build type '${buildValue}':`
|
||||
);
|
||||
switches.forEach((s, i) => {
|
||||
console.log(
|
||||
` ${i + 1}. ${path.relative(process.cwd(), s.file)}`
|
||||
);
|
||||
console.log(
|
||||
` ${s.originalPath} → ${s.replacementPath}`
|
||||
);
|
||||
});
|
||||
console.log("");
|
||||
}
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
esbuild
|
||||
.build({
|
||||
entryPoints: [argv.entry],
|
||||
@@ -262,44 +54,19 @@ esbuild
|
||||
format: "esm",
|
||||
minify: false,
|
||||
banner: {
|
||||
js: banner
|
||||
js: banner,
|
||||
},
|
||||
platform: "node",
|
||||
external: ["body-parser"],
|
||||
plugins: [
|
||||
privateImportGuardPlugin(),
|
||||
dynamicImportGuardPlugin(),
|
||||
dynamicImportSwitcherPlugin(argv.build),
|
||||
nodeExternalsPlugin({
|
||||
packagePath: getPackagePaths()
|
||||
})
|
||||
packagePath: getPackagePaths(),
|
||||
}),
|
||||
],
|
||||
sourcemap: "inline",
|
||||
target: "node22"
|
||||
target: "node22",
|
||||
})
|
||||
.then((result) => {
|
||||
// Check if there were any errors in the build result
|
||||
if (result.errors && result.errors.length > 0) {
|
||||
console.error(
|
||||
`Build failed with ${result.errors.length} error(s):`
|
||||
);
|
||||
result.errors.forEach((error, i) => {
|
||||
console.error(`${i + 1}. ${error.text}`);
|
||||
if (error.notes) {
|
||||
error.notes.forEach((note) => {
|
||||
console.error(` - ${note.text}`);
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// remove the output file if it was created
|
||||
if (fs.existsSync(argv.out)) {
|
||||
fs.unlinkSync(argv.out);
|
||||
}
|
||||
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
.then(() => {
|
||||
console.log("Build completed successfully");
|
||||
})
|
||||
.catch((error) => {
|
||||
|
||||
@@ -1,19 +1,19 @@
|
||||
import tseslint from "typescript-eslint";
|
||||
import tseslint from 'typescript-eslint';
|
||||
|
||||
export default tseslint.config({
|
||||
files: ["**/*.{ts,tsx,js,jsx}"],
|
||||
languageOptions: {
|
||||
parser: tseslint.parser,
|
||||
parserOptions: {
|
||||
ecmaVersion: "latest",
|
||||
sourceType: "module",
|
||||
ecmaFeatures: {
|
||||
jsx: true
|
||||
}
|
||||
}
|
||||
},
|
||||
rules: {
|
||||
semi: "error",
|
||||
"prefer-const": "warn"
|
||||
files: ["**/*.{ts,tsx,js,jsx}"],
|
||||
languageOptions: {
|
||||
parser: tseslint.parser,
|
||||
parserOptions: {
|
||||
ecmaVersion: "latest",
|
||||
sourceType: "module",
|
||||
ecmaFeatures: {
|
||||
jsx: true
|
||||
}
|
||||
}
|
||||
});
|
||||
},
|
||||
rules: {
|
||||
"semi": "error",
|
||||
"prefer-const": "warn"
|
||||
}
|
||||
});
|
||||
@@ -18,11 +18,7 @@ put-back:
|
||||
mv main.go.bak main.go
|
||||
|
||||
dev-update-versions:
|
||||
if [ -z "$(tag)" ]; then \
|
||||
PANGOLIN_VERSION=$$(curl -s https://api.github.com/repos/fosrl/pangolin/tags | jq -r '.[0].name'); \
|
||||
else \
|
||||
PANGOLIN_VERSION=$(tag); \
|
||||
fi && \
|
||||
PANGOLIN_VERSION=$$(curl -s https://api.github.com/repos/fosrl/pangolin/tags | jq -r '.[0].name') && \
|
||||
GERBIL_VERSION=$$(curl -s https://api.github.com/repos/fosrl/gerbil/tags | jq -r '.[0].name') && \
|
||||
BADGER_VERSION=$$(curl -s https://api.github.com/repos/fosrl/badger/tags | jq -r '.[0].name') && \
|
||||
echo "Latest versions - Pangolin: $$PANGOLIN_VERSION, Gerbil: $$GERBIL_VERSION, Badger: $$BADGER_VERSION" && \
|
||||
|
||||
@@ -1,10 +1,15 @@
|
||||
# To see all available options, please visit the docs:
|
||||
# https://docs.pangolin.net/
|
||||
# https://docs.digpangolin.com/self-host/advanced/config-file
|
||||
|
||||
gerbil:
|
||||
start_port: 51820
|
||||
base_endpoint: "{{.DashboardDomain}}"
|
||||
|
||||
{{if .HybridMode}}
|
||||
managed:
|
||||
id: "{{.HybridId}}"
|
||||
secret: "{{.HybridSecret}}"
|
||||
|
||||
{{else}}
|
||||
app:
|
||||
dashboard_url: "https://{{.DashboardDomain}}"
|
||||
log_level: "info"
|
||||
@@ -14,6 +19,7 @@ app:
|
||||
domains:
|
||||
domain1:
|
||||
base_domain: "{{.BaseDomain}}"
|
||||
cert_resolver: "letsencrypt"
|
||||
|
||||
server:
|
||||
secret: "{{.Secret}}"
|
||||
@@ -22,7 +28,6 @@ server:
|
||||
methods: ["GET", "POST", "PUT", "DELETE", "PATCH"]
|
||||
allowed_headers: ["X-CSRF-Token", "Content-Type"]
|
||||
credentials: false
|
||||
{{if .EnableGeoblocking}}maxmind_db_path: "./config/GeoLite2-Country.mmdb"{{end}}
|
||||
{{if .EnableEmail}}
|
||||
email:
|
||||
smtp_host: "{{.EmailSMTPHost}}"
|
||||
@@ -36,3 +41,4 @@ flags:
|
||||
disable_signup_without_invite: true
|
||||
disable_user_create_org: false
|
||||
allow_raw_resources: true
|
||||
{{end}}
|
||||
@@ -9,15 +9,10 @@ services:
|
||||
PARSERS: crowdsecurity/whitelists
|
||||
ENROLL_TAGS: docker
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD
|
||||
- cscli
|
||||
- lapi
|
||||
- status
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 30s
|
||||
interval: 10s
|
||||
retries: 15
|
||||
timeout: 10s
|
||||
test: ["CMD", "cscli", "capi", "status"]
|
||||
labels:
|
||||
- "traefik.enable=false" # Disable traefik for crowdsec
|
||||
volumes:
|
||||
|
||||
@@ -1,9 +1,5 @@
|
||||
http:
|
||||
middlewares:
|
||||
badger:
|
||||
plugin:
|
||||
badger:
|
||||
disableForwardAuth: true
|
||||
redirect-to-https:
|
||||
redirectScheme:
|
||||
scheme: https
|
||||
@@ -48,7 +44,7 @@ http:
|
||||
crowdsecAppsecUnreachableBlock: true # Block on unreachable
|
||||
crowdsecAppsecBodyLimit: 10485760
|
||||
crowdsecLapiKey: "PUT_YOUR_BOUNCER_KEY_HERE_OR_IT_WILL_NOT_WORK" # CrowdSec API key which you noted down later
|
||||
crowdsecLapiHost: crowdsec:8080 # CrowdSec
|
||||
crowdsecLapiHost: crowdsec:8080 # CrowdSec
|
||||
crowdsecLapiScheme: http # CrowdSec API scheme
|
||||
forwardedHeadersTrustedIPs: # Forwarded headers trusted IPs
|
||||
- "0.0.0.0/0" # All IP addresses are trusted for forwarded headers (CHANGE MADE HERE)
|
||||
@@ -67,7 +63,6 @@ http:
|
||||
- web
|
||||
middlewares:
|
||||
- redirect-to-https
|
||||
- badger
|
||||
|
||||
# Next.js router (handles everything except API and WebSocket paths)
|
||||
next-router:
|
||||
@@ -77,7 +72,6 @@ http:
|
||||
- websecure
|
||||
middlewares:
|
||||
- security-headers # Add security headers middleware
|
||||
- badger
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
|
||||
@@ -89,7 +83,6 @@ http:
|
||||
- websecure
|
||||
middlewares:
|
||||
- security-headers # Add security headers middleware
|
||||
- badger
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
|
||||
@@ -101,7 +94,6 @@ http:
|
||||
- websecure
|
||||
middlewares:
|
||||
- security-headers # Add security headers middleware
|
||||
- badger
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
|
||||
@@ -114,13 +106,4 @@ http:
|
||||
api-service:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://pangolin:3000" # API/WebSocket server
|
||||
|
||||
tcp:
|
||||
serversTransports:
|
||||
pp-transport-v1:
|
||||
proxyProtocol:
|
||||
version: 1
|
||||
pp-transport-v2:
|
||||
proxyProtocol:
|
||||
version: 2
|
||||
- url: "http://pangolin:3000" # API/WebSocket server
|
||||
@@ -1,11 +1,13 @@
|
||||
name: pangolin
|
||||
services:
|
||||
pangolin:
|
||||
image: docker.io/fosrl/pangolin:{{if .IsEnterprise}}ee-{{end}}{{.PangolinVersion}}
|
||||
image: docker.io/fosrl/pangolin:{{.PangolinVersion}}
|
||||
container_name: pangolin
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ./config:/app/config
|
||||
- pangolin-data:/var/certificates
|
||||
- pangolin-data:/var/dynamic
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:3001/api/v1/"]
|
||||
interval: "10s"
|
||||
@@ -20,7 +22,7 @@ services:
|
||||
pangolin:
|
||||
condition: service_healthy
|
||||
command:
|
||||
- --reachableAt=http://gerbil:3004
|
||||
- --reachableAt=http://gerbil:3003
|
||||
- --generateAndSaveKeyTo=/var/config/key
|
||||
- --remoteConfig=http://pangolin:3001/api/v1/
|
||||
volumes:
|
||||
@@ -31,11 +33,11 @@ services:
|
||||
ports:
|
||||
- 51820:51820/udp
|
||||
- 21820:21820/udp
|
||||
- 443:443
|
||||
- 443:{{if .HybridMode}}8443{{else}}443{{end}}
|
||||
- 80:80
|
||||
{{end}}
|
||||
traefik:
|
||||
image: docker.io/traefik:v3.6
|
||||
image: docker.io/traefik:v3.5
|
||||
container_name: traefik
|
||||
restart: unless-stopped
|
||||
{{if .InstallGerbil}}
|
||||
@@ -54,9 +56,15 @@ services:
|
||||
- ./config/traefik:/etc/traefik:ro # Volume to store the Traefik configuration
|
||||
- ./config/letsencrypt:/letsencrypt # Volume to store the Let's Encrypt certificates
|
||||
- ./config/traefik/logs:/var/log/traefik # Volume to store Traefik logs
|
||||
# Shared volume for certificates and dynamic config in file mode
|
||||
- pangolin-data:/var/certificates:ro
|
||||
- pangolin-data:/var/dynamic:ro
|
||||
|
||||
networks:
|
||||
default:
|
||||
driver: bridge
|
||||
name: pangolin
|
||||
{{if .EnableIPv6}} enable_ipv6: true{{end}}
|
||||
|
||||
volumes:
|
||||
pangolin-data:
|
||||
|
||||
@@ -1,9 +1,5 @@
|
||||
http:
|
||||
middlewares:
|
||||
badger:
|
||||
plugin:
|
||||
badger:
|
||||
disableForwardAuth: true
|
||||
redirect-to-https:
|
||||
redirectScheme:
|
||||
scheme: https
|
||||
@@ -17,7 +13,6 @@ http:
|
||||
- web
|
||||
middlewares:
|
||||
- redirect-to-https
|
||||
- badger
|
||||
|
||||
# Next.js router (handles everything except API and WebSocket paths)
|
||||
next-router:
|
||||
@@ -25,8 +20,6 @@ http:
|
||||
service: next-service
|
||||
entryPoints:
|
||||
- websecure
|
||||
middlewares:
|
||||
- badger
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
|
||||
@@ -36,8 +29,6 @@ http:
|
||||
service: api-service
|
||||
entryPoints:
|
||||
- websecure
|
||||
middlewares:
|
||||
- badger
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
|
||||
@@ -47,8 +38,6 @@ http:
|
||||
service: api-service
|
||||
entryPoints:
|
||||
- websecure
|
||||
middlewares:
|
||||
- badger
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
|
||||
@@ -62,12 +51,3 @@ http:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://pangolin:3000" # API/WebSocket server
|
||||
|
||||
tcp:
|
||||
serversTransports:
|
||||
pp-transport-v1:
|
||||
proxyProtocol:
|
||||
version: 1
|
||||
pp-transport-v2:
|
||||
proxyProtocol:
|
||||
version: 2
|
||||
|
||||
@@ -3,12 +3,17 @@ api:
|
||||
dashboard: true
|
||||
|
||||
providers:
|
||||
{{if not .HybridMode}}
|
||||
http:
|
||||
endpoint: "http://pangolin:3001/api/v1/traefik-config"
|
||||
pollInterval: "5s"
|
||||
file:
|
||||
filename: "/etc/traefik/dynamic_config.yml"
|
||||
|
||||
{{else}}
|
||||
file:
|
||||
directory: "/var/dynamic"
|
||||
watch: true
|
||||
{{end}}
|
||||
experimental:
|
||||
plugins:
|
||||
badger:
|
||||
@@ -22,7 +27,7 @@ log:
|
||||
maxBackups: 3
|
||||
maxAge: 3
|
||||
compress: true
|
||||
|
||||
{{if not .HybridMode}}
|
||||
certificatesResolvers:
|
||||
letsencrypt:
|
||||
acme:
|
||||
@@ -31,24 +36,25 @@ certificatesResolvers:
|
||||
email: "{{.LetsEncryptEmail}}"
|
||||
storage: "/letsencrypt/acme.json"
|
||||
caServer: "https://acme-v02.api.letsencrypt.org/directory"
|
||||
|
||||
{{end}}
|
||||
entryPoints:
|
||||
web:
|
||||
address: ":80"
|
||||
websecure:
|
||||
address: ":443"
|
||||
{{if .HybridMode}} proxyProtocol:
|
||||
trustedIPs:
|
||||
- 0.0.0.0/0
|
||||
- ::1/128{{end}}
|
||||
transport:
|
||||
respondingTimeouts:
|
||||
readTimeout: "30m"
|
||||
http:
|
||||
{{if not .HybridMode}} http:
|
||||
tls:
|
||||
certResolver: "letsencrypt"
|
||||
encodedCharacters:
|
||||
allowEncodedSlash: true
|
||||
allowEncodedQuestionMark: true
|
||||
certResolver: "letsencrypt"{{end}}
|
||||
|
||||
serversTransport:
|
||||
insecureSkipVerify: true
|
||||
|
||||
ping:
|
||||
entryPoint: "web"
|
||||
entryPoint: "web"
|
||||
@@ -73,7 +73,7 @@ func installDocker() error {
|
||||
case strings.Contains(osRelease, "ID=ubuntu"):
|
||||
installCmd = exec.Command("bash", "-c", fmt.Sprintf(`
|
||||
apt-get update &&
|
||||
apt-get install -y apt-transport-https ca-certificates curl gpg &&
|
||||
apt-get install -y apt-transport-https ca-certificates curl software-properties-common &&
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg &&
|
||||
echo "deb [arch=%s signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" > /etc/apt/sources.list.d/docker.list &&
|
||||
apt-get update &&
|
||||
@@ -82,7 +82,7 @@ func installDocker() error {
|
||||
case strings.Contains(osRelease, "ID=debian"):
|
||||
installCmd = exec.Command("bash", "-c", fmt.Sprintf(`
|
||||
apt-get update &&
|
||||
apt-get install -y apt-transport-https ca-certificates curl gpg &&
|
||||
apt-get install -y apt-transport-https ca-certificates curl software-properties-common &&
|
||||
curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg &&
|
||||
echo "deb [arch=%s signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian $(lsb_release -cs) stable" > /etc/apt/sources.list.d/docker.list &&
|
||||
apt-get update &&
|
||||
|
||||
@@ -1,180 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Get installer - Cross-platform installation script
|
||||
# Usage: curl -fsSL https://raw.githubusercontent.com/fosrl/installer/refs/heads/main/get-installer.sh | bash
|
||||
|
||||
set -e
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# GitHub repository info
|
||||
REPO="fosrl/pangolin"
|
||||
GITHUB_API_URL="https://api.github.com/repos/${REPO}/releases/latest"
|
||||
|
||||
# Function to print colored output
|
||||
print_status() {
|
||||
echo -e "${GREEN}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e "${YELLOW}[WARN]${NC} $1"
|
||||
}
|
||||
|
||||
print_error() {
|
||||
echo -e "${RED}[ERROR]${NC} $1"
|
||||
}
|
||||
|
||||
# Function to get latest version from GitHub API
|
||||
get_latest_version() {
|
||||
local latest_info
|
||||
|
||||
if command -v curl >/dev/null 2>&1; then
|
||||
latest_info=$(curl -fsSL "$GITHUB_API_URL" 2>/dev/null)
|
||||
elif command -v wget >/dev/null 2>&1; then
|
||||
latest_info=$(wget -qO- "$GITHUB_API_URL" 2>/dev/null)
|
||||
else
|
||||
print_error "Neither curl nor wget is available. Please install one of them." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$latest_info" ]; then
|
||||
print_error "Failed to fetch latest version information" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Extract version from JSON response (works without jq)
|
||||
local version=$(echo "$latest_info" | grep '"tag_name"' | head -1 | sed 's/.*"tag_name": *"\([^"]*\)".*/\1/')
|
||||
|
||||
if [ -z "$version" ]; then
|
||||
print_error "Could not parse version from GitHub API response" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Remove 'v' prefix if present
|
||||
version=$(echo "$version" | sed 's/^v//')
|
||||
|
||||
echo "$version"
|
||||
}
|
||||
|
||||
# Detect OS and architecture
|
||||
detect_platform() {
|
||||
local os arch
|
||||
|
||||
# Detect OS - only support Linux
|
||||
case "$(uname -s)" in
|
||||
Linux*) os="linux" ;;
|
||||
*)
|
||||
print_error "Unsupported operating system: $(uname -s). Only Linux is supported."
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
# Detect architecture - only support amd64 and arm64
|
||||
case "$(uname -m)" in
|
||||
x86_64|amd64) arch="amd64" ;;
|
||||
arm64|aarch64) arch="arm64" ;;
|
||||
*)
|
||||
print_error "Unsupported architecture: $(uname -m). Only amd64 and arm64 are supported on Linux."
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "${os}_${arch}"
|
||||
}
|
||||
|
||||
# Get installation directory
|
||||
get_install_dir() {
|
||||
# Install to the current directory
|
||||
local install_dir="$(pwd)"
|
||||
if [ ! -d "$install_dir" ]; then
|
||||
print_error "Installation directory does not exist: $install_dir"
|
||||
exit 1
|
||||
fi
|
||||
echo "$install_dir"
|
||||
}
|
||||
|
||||
# Download and install installer
|
||||
install_installer() {
|
||||
local platform="$1"
|
||||
local install_dir="$2"
|
||||
local binary_name="installer_${platform}"
|
||||
|
||||
local download_url="${BASE_URL}/${binary_name}"
|
||||
local temp_file="/tmp/installer"
|
||||
local final_path="${install_dir}/installer"
|
||||
|
||||
print_status "Downloading installer from ${download_url}"
|
||||
|
||||
# Download the binary
|
||||
if command -v curl >/dev/null 2>&1; then
|
||||
curl -fsSL "$download_url" -o "$temp_file"
|
||||
elif command -v wget >/dev/null 2>&1; then
|
||||
wget -q "$download_url" -O "$temp_file"
|
||||
else
|
||||
print_error "Neither curl nor wget is available. Please install one of them."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create install directory if it doesn't exist
|
||||
mkdir -p "$install_dir"
|
||||
|
||||
# Move binary to install directory
|
||||
mv "$temp_file" "$final_path"
|
||||
|
||||
# Make executable
|
||||
chmod +x "$final_path"
|
||||
|
||||
print_status "Installer downloaded to ${final_path}"
|
||||
}
|
||||
|
||||
# Verify installation
|
||||
verify_installation() {
|
||||
local install_dir="$1"
|
||||
local installer_path="${install_dir}/installer"
|
||||
|
||||
if [ -f "$installer_path" ] && [ -x "$installer_path" ]; then
|
||||
print_status "Installation successful!"
|
||||
return 0
|
||||
else
|
||||
print_error "Installation failed. Binary not found or not executable."
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Main installation process
|
||||
main() {
|
||||
print_status "Installing latest version of installer..."
|
||||
|
||||
# Get latest version
|
||||
print_status "Fetching latest version from GitHub..."
|
||||
VERSION=$(get_latest_version)
|
||||
print_status "Latest version: v${VERSION}"
|
||||
|
||||
# Set base URL with the fetched version
|
||||
BASE_URL="https://github.com/${REPO}/releases/download/${VERSION}"
|
||||
|
||||
# Detect platform
|
||||
PLATFORM=$(detect_platform)
|
||||
print_status "Detected platform: ${PLATFORM}"
|
||||
|
||||
# Get install directory
|
||||
INSTALL_DIR=$(get_install_dir)
|
||||
print_status "Install directory: ${INSTALL_DIR}"
|
||||
|
||||
# Install installer
|
||||
install_installer "$PLATFORM" "$INSTALL_DIR"
|
||||
|
||||
# Verify installation
|
||||
if verify_installation "$INSTALL_DIR"; then
|
||||
print_status "Installer is ready to use!"
|
||||
else
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
@@ -3,8 +3,8 @@ module installer
|
||||
go 1.24.0
|
||||
|
||||
require (
|
||||
golang.org/x/term v0.38.0
|
||||
golang.org/x/term v0.35.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
|
||||
require golang.org/x/sys v0.39.0 // indirect
|
||||
require golang.org/x/sys v0.36.0 // indirect
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
|
||||
golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q=
|
||||
golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg=
|
||||
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
|
||||
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ=
|
||||
golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
|
||||
@@ -54,31 +54,13 @@ func readBool(reader *bufio.Reader, prompt string, defaultValue bool) bool {
|
||||
if defaultValue {
|
||||
defaultStr = "yes"
|
||||
}
|
||||
for {
|
||||
input := readString(reader, prompt+" (yes/no)", defaultStr)
|
||||
lower := strings.ToLower(input)
|
||||
if lower == "yes" {
|
||||
return true
|
||||
} else if lower == "no" {
|
||||
return false
|
||||
} else {
|
||||
fmt.Println("Please enter 'yes' or 'no'.")
|
||||
}
|
||||
}
|
||||
input := readString(reader, prompt+" (yes/no)", defaultStr)
|
||||
return strings.ToLower(input) == "yes"
|
||||
}
|
||||
|
||||
func readBoolNoDefault(reader *bufio.Reader, prompt string) bool {
|
||||
for {
|
||||
input := readStringNoDefault(reader, prompt+" (yes/no)")
|
||||
lower := strings.ToLower(input)
|
||||
if lower == "yes" {
|
||||
return true
|
||||
} else if lower == "no" {
|
||||
return false
|
||||
} else {
|
||||
fmt.Println("Please enter 'yes' or 'no'.")
|
||||
}
|
||||
}
|
||||
input := readStringNoDefault(reader, prompt+" (yes/no)")
|
||||
return strings.ToLower(input) == "yes"
|
||||
}
|
||||
|
||||
func readInt(reader *bufio.Reader, prompt string, defaultValue int) int {
|
||||
|
||||
209
install/main.go
@@ -2,6 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"embed"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -47,16 +48,17 @@ type Config struct {
|
||||
InstallGerbil bool
|
||||
TraefikBouncerKey string
|
||||
DoCrowdsecInstall bool
|
||||
EnableGeoblocking bool
|
||||
Secret string
|
||||
IsEnterprise bool
|
||||
HybridMode bool
|
||||
HybridId string
|
||||
HybridSecret string
|
||||
}
|
||||
|
||||
type SupportedContainer string
|
||||
|
||||
const (
|
||||
Docker SupportedContainer = "docker"
|
||||
Podman SupportedContainer = "podman"
|
||||
Docker SupportedContainer = "docker"
|
||||
Podman SupportedContainer = "podman"
|
||||
Undefined SupportedContainer = "undefined"
|
||||
)
|
||||
|
||||
@@ -96,6 +98,24 @@ func main() {
|
||||
|
||||
fmt.Println("\n=== Generating Configuration Files ===")
|
||||
|
||||
// If the secret and id are not generated then generate them
|
||||
if config.HybridMode && (config.HybridId == "" || config.HybridSecret == "") {
|
||||
// fmt.Println("Requesting hybrid credentials from cloud...")
|
||||
credentials, err := requestHybridCredentials()
|
||||
if err != nil {
|
||||
fmt.Printf("Error requesting hybrid credentials: %v\n", err)
|
||||
fmt.Println("Please obtain credentials manually from the dashboard and run the installer again.")
|
||||
os.Exit(1)
|
||||
}
|
||||
config.HybridId = credentials.RemoteExitNodeId
|
||||
config.HybridSecret = credentials.Secret
|
||||
fmt.Printf("Your managed credentials have been obtained successfully.\n")
|
||||
fmt.Printf(" ID: %s\n", config.HybridId)
|
||||
fmt.Printf(" Secret: %s\n", config.HybridSecret)
|
||||
fmt.Println("Take these to the Pangolin dashboard https://pangolin.fossorial.io to adopt your node.")
|
||||
readBool(reader, "Have you adopted your node?", true)
|
||||
}
|
||||
|
||||
if err := createConfigFiles(config); err != nil {
|
||||
fmt.Printf("Error creating config files: %v\n", err)
|
||||
os.Exit(1)
|
||||
@@ -105,15 +125,6 @@ func main() {
|
||||
|
||||
fmt.Println("\nConfiguration files created successfully!")
|
||||
|
||||
// Download MaxMind database if requested
|
||||
if config.EnableGeoblocking {
|
||||
fmt.Println("\n=== Downloading MaxMind Database ===")
|
||||
if err := downloadMaxMindDatabase(); err != nil {
|
||||
fmt.Printf("Error downloading MaxMind database: %v\n", err)
|
||||
fmt.Println("You can download it manually later if needed.")
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println("\n=== Starting installation ===")
|
||||
|
||||
if readBool(reader, "Would you like to install and start the containers?", true) {
|
||||
@@ -161,34 +172,9 @@ func main() {
|
||||
} else {
|
||||
alreadyInstalled = true
|
||||
fmt.Println("Looks like you already installed Pangolin!")
|
||||
|
||||
// Check if MaxMind database exists and offer to update it
|
||||
fmt.Println("\n=== MaxMind Database Update ===")
|
||||
if _, err := os.Stat("config/GeoLite2-Country.mmdb"); err == nil {
|
||||
fmt.Println("MaxMind GeoLite2 Country database found.")
|
||||
if readBool(reader, "Would you like to update the MaxMind database to the latest version?", false) {
|
||||
if err := downloadMaxMindDatabase(); err != nil {
|
||||
fmt.Printf("Error updating MaxMind database: %v\n", err)
|
||||
fmt.Println("You can try updating it manually later if needed.")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
fmt.Println("MaxMind GeoLite2 Country database not found.")
|
||||
if readBool(reader, "Would you like to download the MaxMind GeoLite2 database for geoblocking functionality?", false) {
|
||||
if err := downloadMaxMindDatabase(); err != nil {
|
||||
fmt.Printf("Error downloading MaxMind database: %v\n", err)
|
||||
fmt.Println("You can try downloading it manually later if needed.")
|
||||
}
|
||||
// Now you need to update your config file accordingly to enable geoblocking
|
||||
fmt.Print("Please remember to update your config/config.yml file to enable geoblocking! \n\n")
|
||||
// add maxmind_db_path: "./config/GeoLite2-Country.mmdb" under server
|
||||
fmt.Println("Add the following line under the 'server' section:")
|
||||
fmt.Println(" maxmind_db_path: \"./config/GeoLite2-Country.mmdb\"")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !checkIsCrowdsecInstalledInCompose() {
|
||||
if !checkIsCrowdsecInstalledInCompose() && !checkIsPangolinInstalledWithHybrid() {
|
||||
fmt.Println("\n=== CrowdSec Install ===")
|
||||
// check if crowdsec is installed
|
||||
if readBool(reader, "Would you like to install CrowdSec?", false) {
|
||||
@@ -210,8 +196,8 @@ func main() {
|
||||
|
||||
parsedURL, err := url.Parse(appConfig.DashboardURL)
|
||||
if err != nil {
|
||||
fmt.Printf("Error parsing URL: %v\n", err)
|
||||
return
|
||||
fmt.Printf("Error parsing URL: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
config.DashboardDomain = parsedURL.Hostname()
|
||||
@@ -239,11 +225,12 @@ func main() {
|
||||
}
|
||||
|
||||
fmt.Println("CrowdSec installed successfully!")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !alreadyInstalled || config.DoCrowdsecInstall {
|
||||
if !config.HybridMode && !alreadyInstalled {
|
||||
// Setup Token Section
|
||||
fmt.Println("\n=== Setup Token ===")
|
||||
|
||||
@@ -264,7 +251,9 @@ func main() {
|
||||
|
||||
fmt.Println("\nInstallation complete!")
|
||||
|
||||
fmt.Printf("\nTo complete the initial setup, please visit:\nhttps://%s/auth/initial-setup\n", config.DashboardDomain)
|
||||
if !config.HybridMode && !checkIsPangolinInstalledWithHybrid() {
|
||||
fmt.Printf("\nTo complete the initial setup, please visit:\nhttps://%s/auth/initial-setup\n", config.DashboardDomain)
|
||||
}
|
||||
}
|
||||
|
||||
func podmanOrDocker(reader *bufio.Reader) SupportedContainer {
|
||||
@@ -301,7 +290,7 @@ func podmanOrDocker(reader *bufio.Reader) SupportedContainer {
|
||||
// Linux only.
|
||||
|
||||
if err := run("bash", "-c", "echo 'net.ipv4.ip_unprivileged_port_start=80' >> /etc/sysctl.conf && sysctl -p"); err != nil {
|
||||
fmt.Printf("Error configuring unprivileged ports: %v\n", err)
|
||||
fmt.Sprintf("failed to configure unprivileged ports: %v.\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
@@ -339,44 +328,66 @@ func collectUserInput(reader *bufio.Reader) Config {
|
||||
|
||||
// Basic configuration
|
||||
fmt.Println("\n=== Basic Configuration ===")
|
||||
|
||||
config.IsEnterprise = readBoolNoDefault(reader, "Do you want to install the Enterprise version of Pangolin? The EE is free for personal use or for businesses making less than 100k USD annually.")
|
||||
|
||||
config.BaseDomain = readString(reader, "Enter your base domain (no subdomain e.g. example.com)", "")
|
||||
|
||||
// Set default dashboard domain after base domain is collected
|
||||
defaultDashboardDomain := ""
|
||||
if config.BaseDomain != "" {
|
||||
defaultDashboardDomain = "pangolin." + config.BaseDomain
|
||||
}
|
||||
config.DashboardDomain = readString(reader, "Enter the domain for the Pangolin dashboard", defaultDashboardDomain)
|
||||
config.LetsEncryptEmail = readString(reader, "Enter email for Let's Encrypt certificates", "")
|
||||
config.InstallGerbil = readBool(reader, "Do you want to use Gerbil to allow tunneled connections", true)
|
||||
|
||||
// Email configuration
|
||||
fmt.Println("\n=== Email Configuration ===")
|
||||
config.EnableEmail = readBool(reader, "Enable email functionality (SMTP)", false)
|
||||
|
||||
if config.EnableEmail {
|
||||
config.EmailSMTPHost = readString(reader, "Enter SMTP host", "")
|
||||
config.EmailSMTPPort = readInt(reader, "Enter SMTP port (default 587)", 587)
|
||||
config.EmailSMTPUser = readString(reader, "Enter SMTP username", "")
|
||||
config.EmailSMTPPass = readString(reader, "Enter SMTP password", "") // Should this be readPassword?
|
||||
config.EmailNoReply = readString(reader, "Enter no-reply email address (often the same as SMTP username)", "")
|
||||
for {
|
||||
response := readString(reader, "Do you want to install Pangolin as a cloud-managed (beta) node? (yes/no)", "")
|
||||
if strings.EqualFold(response, "yes") || strings.EqualFold(response, "y") {
|
||||
config.HybridMode = true
|
||||
break
|
||||
} else if strings.EqualFold(response, "no") || strings.EqualFold(response, "n") {
|
||||
config.HybridMode = false
|
||||
break
|
||||
}
|
||||
fmt.Println("Please answer 'yes' or 'no'")
|
||||
}
|
||||
|
||||
// Validate required fields
|
||||
if config.BaseDomain == "" {
|
||||
fmt.Println("Error: Domain name is required")
|
||||
os.Exit(1)
|
||||
}
|
||||
if config.LetsEncryptEmail == "" {
|
||||
fmt.Println("Error: Let's Encrypt email is required")
|
||||
os.Exit(1)
|
||||
}
|
||||
if config.EnableEmail && config.EmailNoReply == "" {
|
||||
fmt.Println("Error: No-reply email address is required when email is enabled")
|
||||
os.Exit(1)
|
||||
if config.HybridMode {
|
||||
alreadyHaveCreds := readBool(reader, "Do you already have credentials from the dashboard? If not, we will create them later", false)
|
||||
|
||||
if alreadyHaveCreds {
|
||||
config.HybridId = readString(reader, "Enter your ID", "")
|
||||
config.HybridSecret = readString(reader, "Enter your secret", "")
|
||||
}
|
||||
|
||||
// Try to get public IP as default
|
||||
publicIP := getPublicIP()
|
||||
if publicIP != "" {
|
||||
fmt.Printf("Detected public IP: %s\n", publicIP)
|
||||
}
|
||||
config.DashboardDomain = readString(reader, "The public addressable IP address for this node or a domain pointing to it", publicIP)
|
||||
config.InstallGerbil = true
|
||||
} else {
|
||||
config.BaseDomain = readString(reader, "Enter your base domain (no subdomain e.g. example.com)", "")
|
||||
|
||||
// Set default dashboard domain after base domain is collected
|
||||
defaultDashboardDomain := ""
|
||||
if config.BaseDomain != "" {
|
||||
defaultDashboardDomain = "pangolin." + config.BaseDomain
|
||||
}
|
||||
config.DashboardDomain = readString(reader, "Enter the domain for the Pangolin dashboard", defaultDashboardDomain)
|
||||
config.LetsEncryptEmail = readString(reader, "Enter email for Let's Encrypt certificates", "")
|
||||
config.InstallGerbil = readBool(reader, "Do you want to use Gerbil to allow tunneled connections", true)
|
||||
|
||||
// Email configuration
|
||||
fmt.Println("\n=== Email Configuration ===")
|
||||
config.EnableEmail = readBool(reader, "Enable email functionality (SMTP)", false)
|
||||
|
||||
if config.EnableEmail {
|
||||
config.EmailSMTPHost = readString(reader, "Enter SMTP host", "")
|
||||
config.EmailSMTPPort = readInt(reader, "Enter SMTP port (default 587)", 587)
|
||||
config.EmailSMTPUser = readString(reader, "Enter SMTP username", "")
|
||||
config.EmailSMTPPass = readString(reader, "Enter SMTP password", "") // Should this be readPassword?
|
||||
config.EmailNoReply = readString(reader, "Enter no-reply email address", "")
|
||||
}
|
||||
|
||||
// Validate required fields
|
||||
if config.BaseDomain == "" {
|
||||
fmt.Println("Error: Domain name is required")
|
||||
os.Exit(1)
|
||||
}
|
||||
if config.LetsEncryptEmail == "" {
|
||||
fmt.Println("Error: Let's Encrypt email is required")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// Advanced configuration
|
||||
@@ -384,7 +395,6 @@ func collectUserInput(reader *bufio.Reader) Config {
|
||||
fmt.Println("\n=== Advanced Configuration ===")
|
||||
|
||||
config.EnableIPv6 = readBool(reader, "Is your server IPv6 capable?", true)
|
||||
config.EnableGeoblocking = readBool(reader, "Do you want to download the MaxMind GeoLite2 database for geoblocking functionality?", true)
|
||||
|
||||
if config.DashboardDomain == "" {
|
||||
fmt.Println("Error: Dashboard Domain name is required")
|
||||
@@ -419,6 +429,11 @@ func createConfigFiles(config Config) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// the hybrid does not need the dynamic config
|
||||
if config.HybridMode && strings.Contains(path, "dynamic_config.yml") {
|
||||
return nil
|
||||
}
|
||||
|
||||
// skip .DS_Store
|
||||
if strings.Contains(path, ".DS_Store") {
|
||||
return nil
|
||||
@@ -648,30 +663,18 @@ func checkPortsAvailable(port int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func downloadMaxMindDatabase() error {
|
||||
fmt.Println("Downloading MaxMind GeoLite2 Country database...")
|
||||
|
||||
// Download the GeoLite2 Country database
|
||||
if err := run("curl", "-L", "-o", "GeoLite2-Country.tar.gz",
|
||||
"https://github.com/GitSquared/node-geolite2-redist/raw/refs/heads/master/redist/GeoLite2-Country.tar.gz"); err != nil {
|
||||
return fmt.Errorf("failed to download GeoLite2 database: %v", err)
|
||||
func checkIsPangolinInstalledWithHybrid() bool {
|
||||
// Check if config/config.yml exists and contains hybrid section
|
||||
if _, err := os.Stat("config/config.yml"); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Extract the database
|
||||
if err := run("tar", "-xzf", "GeoLite2-Country.tar.gz"); err != nil {
|
||||
return fmt.Errorf("failed to extract GeoLite2 database: %v", err)
|
||||
// Read config file to check for hybrid section
|
||||
content, err := os.ReadFile("config/config.yml")
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Find the .mmdb file and move it to the config directory
|
||||
if err := run("bash", "-c", "mv GeoLite2-Country_*/GeoLite2-Country.mmdb config/"); err != nil {
|
||||
return fmt.Errorf("failed to move GeoLite2 database to config directory: %v", err)
|
||||
}
|
||||
|
||||
// Clean up the downloaded files
|
||||
if err := run("rm", "-rf", "GeoLite2-Country.tar.gz", "GeoLite2-Country_*"); err != nil {
|
||||
fmt.Printf("Warning: failed to clean up temporary files: %v\n", err)
|
||||
}
|
||||
|
||||
fmt.Println("MaxMind GeoLite2 Country database downloaded successfully!")
|
||||
return nil
|
||||
// Check for hybrid section
|
||||
return bytes.Contains(content, []byte("managed:"))
|
||||
}
|
||||
|
||||
110
install/quickStart.go
Normal file
@@ -0,0 +1,110 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
FRONTEND_SECRET_KEY = "af4e4785-7e09-11f0-b93a-74563c4e2a7e"
|
||||
// CLOUD_API_URL = "https://pangolin.fossorial.io/api/v1/remote-exit-node/quick-start"
|
||||
CLOUD_API_URL = "https://pangolin.fossorial.io/api/v1/remote-exit-node/quick-start"
|
||||
)
|
||||
|
||||
// HybridCredentials represents the response from the cloud API
|
||||
type HybridCredentials struct {
|
||||
RemoteExitNodeId string `json:"remoteExitNodeId"`
|
||||
Secret string `json:"secret"`
|
||||
}
|
||||
|
||||
// APIResponse represents the full response structure from the cloud API
|
||||
type APIResponse struct {
|
||||
Data HybridCredentials `json:"data"`
|
||||
}
|
||||
|
||||
// RequestPayload represents the request body structure
|
||||
type RequestPayload struct {
|
||||
Token string `json:"token"`
|
||||
}
|
||||
|
||||
func generateValidationToken() string {
|
||||
timestamp := time.Now().UnixMilli()
|
||||
data := fmt.Sprintf("%s|%d", FRONTEND_SECRET_KEY, timestamp)
|
||||
obfuscated := make([]byte, len(data))
|
||||
for i, char := range []byte(data) {
|
||||
obfuscated[i] = char + 5
|
||||
}
|
||||
return base64.StdEncoding.EncodeToString(obfuscated)
|
||||
}
|
||||
|
||||
// requestHybridCredentials makes an HTTP POST request to the cloud API
|
||||
// to get hybrid credentials (ID and secret)
|
||||
func requestHybridCredentials() (*HybridCredentials, error) {
|
||||
// Generate validation token
|
||||
token := generateValidationToken()
|
||||
|
||||
// Create request payload
|
||||
payload := RequestPayload{
|
||||
Token: token,
|
||||
}
|
||||
|
||||
// Marshal payload to JSON
|
||||
jsonData, err := json.Marshal(payload)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal request payload: %v", err)
|
||||
}
|
||||
|
||||
// Create HTTP request
|
||||
req, err := http.NewRequest("POST", CLOUD_API_URL, bytes.NewBuffer(jsonData))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create HTTP request: %v", err)
|
||||
}
|
||||
|
||||
// Set headers
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("X-CSRF-Token", "x-csrf-protection")
|
||||
|
||||
// Create HTTP client with timeout
|
||||
client := &http.Client{
|
||||
Timeout: 30 * time.Second,
|
||||
}
|
||||
|
||||
// Make the request
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to make HTTP request: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Check response status
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("API request failed with status code: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
// Read response body for debugging
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read response body: %v", err)
|
||||
}
|
||||
|
||||
// Print the raw JSON response for debugging
|
||||
// fmt.Printf("Raw JSON response: %s\n", string(body))
|
||||
|
||||
// Parse response
|
||||
var apiResponse APIResponse
|
||||
if err := json.Unmarshal(body, &apiResponse); err != nil {
|
||||
return nil, fmt.Errorf("failed to decode API response: %v", err)
|
||||
}
|
||||
|
||||
// Validate response data
|
||||
if apiResponse.Data.RemoteExitNodeId == "" || apiResponse.Data.Secret == "" {
|
||||
return nil, fmt.Errorf("invalid response: missing remoteExitNodeId or secret")
|
||||
}
|
||||
|
||||
return &apiResponse.Data, nil
|
||||
}
|
||||
1105
messages/de-DE.json
1108
messages/en-US.json
1173
messages/fr-FR.json
2101
messages/zh-TW.json
@@ -1,16 +1,12 @@
|
||||
import type { NextConfig } from "next";
|
||||
import createNextIntlPlugin from "next-intl/plugin";
|
||||
|
||||
const withNextIntl = createNextIntlPlugin();
|
||||
|
||||
const nextConfig: NextConfig = {
|
||||
reactStrictMode: false,
|
||||
/** @type {import("next").NextConfig} */
|
||||
const nextConfig = {
|
||||
eslint: {
|
||||
ignoreDuringBuilds: true
|
||||
},
|
||||
experimental: {
|
||||
reactCompiler: true
|
||||
},
|
||||
output: "standalone"
|
||||
};
|
||||
|
||||
14551
package-lock.json
generated
215
package.json
@@ -3,7 +3,7 @@
|
||||
"version": "0.0.0",
|
||||
"private": true,
|
||||
"type": "module",
|
||||
"description": "Identity-aware VPN and proxy for remote access to anything, anywhere and Dashboard UI",
|
||||
"description": "Tunneled Reverse Proxy Management Server with Identity and Access Control and Dashboard UI",
|
||||
"homepage": "https://github.com/fosrl/pangolin",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
@@ -19,163 +19,150 @@
|
||||
"db:sqlite:studio": "drizzle-kit studio --config=./drizzle.sqlite.config.ts",
|
||||
"db:pg:studio": "drizzle-kit studio --config=./drizzle.pg.config.ts",
|
||||
"db:clear-migrations": "rm -rf server/migrations",
|
||||
"set:oss": "echo 'export const build = \"oss\" as \"saas\" | \"enterprise\" | \"oss\";' > server/build.ts && cp tsconfig.oss.json tsconfig.json",
|
||||
"set:saas": "echo 'export const build = \"saas\" as \"saas\" | \"enterprise\" | \"oss\";' > server/build.ts && cp tsconfig.saas.json tsconfig.json",
|
||||
"set:enterprise": "echo 'export const build = \"enterprise\" as \"saas\" | \"enterprise\" | \"oss\";' > server/build.ts && cp tsconfig.enterprise.json tsconfig.json",
|
||||
"set:sqlite": "echo 'export * from \"./sqlite\";\nexport const driver: \"pg\" | \"sqlite\" = \"sqlite\";' > server/db/index.ts",
|
||||
"set:pg": "echo 'export * from \"./pg\";\nexport const driver: \"pg\" | \"sqlite\" = \"pg\";' > server/db/index.ts",
|
||||
"next:build": "next build",
|
||||
"set:oss": "echo 'export const build = \"oss\" as any;' > server/build.ts",
|
||||
"set:saas": "echo 'export const build = \"saas\" as any;' > server/build.ts",
|
||||
"set:enterprise": "echo 'export const build = \"enterprise\" as any;' > server/build.ts",
|
||||
"set:sqlite": "echo 'export * from \"./sqlite\";' > server/db/index.ts",
|
||||
"set:pg": "echo 'export * from \"./pg\";' > server/db/index.ts",
|
||||
"build:sqlite": "mkdir -p dist && next build && node esbuild.mjs -e server/index.ts -o dist/server.mjs && node esbuild.mjs -e server/setup/migrationsSqlite.ts -o dist/migrations.mjs",
|
||||
"build:pg": "mkdir -p dist && next build && node esbuild.mjs -e server/index.ts -o dist/server.mjs && node esbuild.mjs -e server/setup/migrationsPg.ts -o dist/migrations.mjs",
|
||||
"start": "ENVIRONMENT=prod node dist/migrations.mjs && ENVIRONMENT=prod NODE_ENV=development node --enable-source-maps dist/server.mjs",
|
||||
"email": "email dev --dir server/emails/templates --port 3005",
|
||||
"build:cli": "node esbuild.mjs -e cli/index.ts -o dist/cli.mjs",
|
||||
"format": "prettier --write ."
|
||||
"db:sqlite:seed-exit-node": "sqlite3 config/db/db.sqlite \"INSERT INTO exitNodes (exitNodeId, name, address, endpoint, publicKey, listenPort, reachableAt, maxConnections, online, lastPing, type, region) VALUES (null, 'test', '10.0.0.1/24', 'localhost', 'MJ44MpnWGxMZURgxW/fWXDFsejhabnEFYDo60LQwK3A=', 1234, 'http://localhost:3003', 123, 1, null, 'gerbil', null);\""
|
||||
},
|
||||
"dependencies": {
|
||||
"@asteasolutions/zod-to-openapi": "8.2.0",
|
||||
"@aws-sdk/client-s3": "3.955.0",
|
||||
"@faker-js/faker": "10.1.0",
|
||||
"@headlessui/react": "2.2.9",
|
||||
"@asteasolutions/zod-to-openapi": "^7.3.4",
|
||||
"@aws-sdk/client-s3": "3.837.0",
|
||||
"@hookform/resolvers": "5.2.2",
|
||||
"@monaco-editor/react": "4.7.0",
|
||||
"@node-rs/argon2": "2.0.2",
|
||||
"@node-rs/argon2": "^2.0.2",
|
||||
"@oslojs/crypto": "1.0.1",
|
||||
"@oslojs/encoding": "1.1.0",
|
||||
"@radix-ui/react-avatar": "1.1.11",
|
||||
"@radix-ui/react-avatar": "1.1.10",
|
||||
"@radix-ui/react-checkbox": "1.3.3",
|
||||
"@radix-ui/react-collapsible": "1.1.12",
|
||||
"@radix-ui/react-dialog": "1.1.15",
|
||||
"@radix-ui/react-dropdown-menu": "2.1.16",
|
||||
"@radix-ui/react-icons": "1.3.2",
|
||||
"@radix-ui/react-label": "2.1.8",
|
||||
"@radix-ui/react-label": "2.1.7",
|
||||
"@radix-ui/react-popover": "1.1.15",
|
||||
"@radix-ui/react-progress": "1.1.8",
|
||||
"@radix-ui/react-progress": "^1.1.7",
|
||||
"@radix-ui/react-radio-group": "1.3.8",
|
||||
"@radix-ui/react-scroll-area": "1.2.10",
|
||||
"@radix-ui/react-scroll-area": "^1.2.10",
|
||||
"@radix-ui/react-select": "2.2.6",
|
||||
"@radix-ui/react-separator": "1.1.8",
|
||||
"@radix-ui/react-slot": "1.2.4",
|
||||
"@radix-ui/react-separator": "1.1.7",
|
||||
"@radix-ui/react-slot": "1.2.3",
|
||||
"@radix-ui/react-switch": "1.2.6",
|
||||
"@radix-ui/react-tabs": "1.1.13",
|
||||
"@radix-ui/react-toast": "1.2.15",
|
||||
"@radix-ui/react-tooltip": "1.2.8",
|
||||
"@react-email/components": "1.0.2",
|
||||
"@react-email/render": "2.0.0",
|
||||
"@react-email/tailwind": "2.0.2",
|
||||
"@simplewebauthn/browser": "13.2.2",
|
||||
"@simplewebauthn/server": "13.2.2",
|
||||
"@tailwindcss/forms": "0.5.11",
|
||||
"@tanstack/react-query": "5.90.12",
|
||||
"@radix-ui/react-tooltip": "^1.2.8",
|
||||
"@react-email/components": "0.5.5",
|
||||
"@react-email/render": "^1.2.0",
|
||||
"@react-email/tailwind": "1.2.2",
|
||||
"@simplewebauthn/browser": "^13.2.0",
|
||||
"@simplewebauthn/server": "^13.2.1",
|
||||
"@tailwindcss/forms": "^0.5.10",
|
||||
"@tanstack/react-table": "8.21.3",
|
||||
"arctic": "3.7.0",
|
||||
"axios": "1.13.2",
|
||||
"better-sqlite3": "11.9.1",
|
||||
"canvas-confetti": "1.9.4",
|
||||
"class-variance-authority": "0.7.1",
|
||||
"arctic": "^3.7.0",
|
||||
"axios": "^1.12.2",
|
||||
"better-sqlite3": "11.7.0",
|
||||
"canvas-confetti": "1.9.3",
|
||||
"class-variance-authority": "^0.7.1",
|
||||
"clsx": "2.1.1",
|
||||
"cmdk": "1.1.1",
|
||||
"cookie": "1.1.1",
|
||||
"cookie": "^1.0.2",
|
||||
"cookie-parser": "1.4.7",
|
||||
"cookies": "0.9.1",
|
||||
"cookies": "^0.9.1",
|
||||
"cors": "2.8.5",
|
||||
"crypto-js": "4.2.0",
|
||||
"d3": "7.9.0",
|
||||
"date-fns": "4.1.0",
|
||||
"drizzle-orm": "0.45.1",
|
||||
"eslint": "9.39.2",
|
||||
"eslint-config-next": "16.1.0",
|
||||
"express": "5.2.1",
|
||||
"express-rate-limit": "8.2.1",
|
||||
"glob": "13.0.0",
|
||||
"crypto-js": "^4.2.0",
|
||||
"drizzle-orm": "0.44.6",
|
||||
"eslint": "9.35.0",
|
||||
"eslint-config-next": "15.5.4",
|
||||
"express": "5.1.0",
|
||||
"express-rate-limit": "8.1.0",
|
||||
"glob": "11.0.3",
|
||||
"helmet": "8.1.0",
|
||||
"http-errors": "2.0.1",
|
||||
"i": "0.3.7",
|
||||
"http-errors": "2.0.0",
|
||||
"i": "^0.3.7",
|
||||
"input-otp": "1.4.2",
|
||||
"ioredis": "5.8.2",
|
||||
"jmespath": "0.16.0",
|
||||
"js-yaml": "4.1.1",
|
||||
"jsonwebtoken": "9.0.3",
|
||||
"lucide-react": "0.562.0",
|
||||
"maxmind": "5.0.1",
|
||||
"ioredis": "5.6.1",
|
||||
"jmespath": "^0.16.0",
|
||||
"js-yaml": "4.1.0",
|
||||
"jsonwebtoken": "^9.0.2",
|
||||
"lucide-react": "^0.544.0",
|
||||
"maxmind": "5.0.0",
|
||||
"moment": "2.30.1",
|
||||
"next": "15.5.9",
|
||||
"next-intl": "4.6.1",
|
||||
"next": "15.5.4",
|
||||
"next-intl": "^4.3.9",
|
||||
"next-themes": "0.4.6",
|
||||
"nextjs-toploader": "3.9.17",
|
||||
"node-cache": "5.1.2",
|
||||
"node-fetch": "3.3.2",
|
||||
"nodemailer": "7.0.11",
|
||||
"npm": "11.7.0",
|
||||
"nprogress": "0.2.0",
|
||||
"nodemailer": "7.0.6",
|
||||
"npm": "^11.6.1",
|
||||
"oslo": "1.2.1",
|
||||
"pg": "8.16.3",
|
||||
"posthog-node": "5.17.4",
|
||||
"pg": "^8.16.2",
|
||||
"posthog-node": "^5.8.4",
|
||||
"qrcode.react": "4.2.0",
|
||||
"react": "19.2.3",
|
||||
"react-day-picker": "9.13.0",
|
||||
"react-dom": "19.2.3",
|
||||
"react-easy-sort": "1.8.0",
|
||||
"react-hook-form": "7.68.0",
|
||||
"react-icons": "5.5.0",
|
||||
"react": "19.1.1",
|
||||
"react-dom": "19.1.1",
|
||||
"react-easy-sort": "^1.7.0",
|
||||
"react-hook-form": "7.62.0",
|
||||
"react-icons": "^5.5.0",
|
||||
"rebuild": "0.1.2",
|
||||
"recharts": "2.15.4",
|
||||
"reodotdev": "1.0.0",
|
||||
"resend": "6.6.0",
|
||||
"semver": "7.7.3",
|
||||
"stripe": "20.1.0",
|
||||
"swagger-ui-express": "5.0.1",
|
||||
"tailwind-merge": "3.4.0",
|
||||
"topojson-client": "3.1.0",
|
||||
"tw-animate-css": "1.4.0",
|
||||
"uuid": "13.0.0",
|
||||
"reodotdev": "^1.0.0",
|
||||
"resend": "^6.1.1",
|
||||
"semver": "^7.7.2",
|
||||
"stripe": "18.2.1",
|
||||
"swagger-ui-express": "^5.0.1",
|
||||
"tailwind-merge": "3.3.1",
|
||||
"tw-animate-css": "^1.3.8",
|
||||
"uuid": "^13.0.0",
|
||||
"vaul": "1.1.2",
|
||||
"visionscarto-world-atlas": "1.0.0",
|
||||
"winston": "3.19.0",
|
||||
"winston": "3.17.0",
|
||||
"winston-daily-rotate-file": "5.0.0",
|
||||
"ws": "8.18.3",
|
||||
"yaml": "2.8.2",
|
||||
"yargs": "18.0.0",
|
||||
"zod": "4.2.1",
|
||||
"zod-validation-error": "5.0.0"
|
||||
"zod": "3.25.76",
|
||||
"zod-validation-error": "3.5.2"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@dotenvx/dotenvx": "1.51.2",
|
||||
"@dotenvx/dotenvx": "1.51.0",
|
||||
"@esbuild-plugins/tsconfig-paths": "0.1.2",
|
||||
"@tailwindcss/postcss": "4.1.18",
|
||||
"@tanstack/react-query-devtools": "5.91.1",
|
||||
"@types/better-sqlite3": "7.6.13",
|
||||
"@types/cookie-parser": "1.4.10",
|
||||
"@react-email/preview-server": "4.2.12",
|
||||
"@tailwindcss/postcss": "^4.1.14",
|
||||
"@types/better-sqlite3": "7.6.12",
|
||||
"@types/cookie-parser": "1.4.9",
|
||||
"@types/cors": "2.8.19",
|
||||
"@types/crypto-js": "4.2.2",
|
||||
"@types/d3": "7.4.3",
|
||||
"@types/express": "5.0.6",
|
||||
"@types/express-session": "1.18.2",
|
||||
"@types/jmespath": "0.15.2",
|
||||
"@types/jsonwebtoken": "9.0.10",
|
||||
"@types/node": "24.10.2",
|
||||
"@types/nodemailer": "7.0.4",
|
||||
"@types/nprogress": "0.2.3",
|
||||
"@types/pg": "8.16.0",
|
||||
"@types/react": "19.2.7",
|
||||
"@types/react-dom": "19.2.3",
|
||||
"@types/semver": "7.7.1",
|
||||
"@types/swagger-ui-express": "4.1.8",
|
||||
"@types/topojson-client": "3.1.5",
|
||||
"@types/ws": "8.18.1",
|
||||
"@types/yargs": "17.0.35",
|
||||
"@types/crypto-js": "^4.2.2",
|
||||
"@types/express": "5.0.3",
|
||||
"@types/express-session": "^1.18.2",
|
||||
"@types/jmespath": "^0.15.2",
|
||||
"@types/js-yaml": "4.0.9",
|
||||
"babel-plugin-react-compiler": "1.0.0",
|
||||
"drizzle-kit": "0.31.8",
|
||||
"esbuild": "0.27.2",
|
||||
"esbuild-node-externals": "1.20.1",
|
||||
"postcss": "8.5.6",
|
||||
"prettier": "3.7.4",
|
||||
"react-email": "5.0.7",
|
||||
"tailwindcss": "4.1.18",
|
||||
"@types/jsonwebtoken": "^9.0.10",
|
||||
"@types/node": "24.6.2",
|
||||
"@types/nodemailer": "7.0.2",
|
||||
"@types/pg": "8.15.5",
|
||||
"@types/react": "19.1.16",
|
||||
"@types/react-dom": "19.1.9",
|
||||
"@types/semver": "^7.7.1",
|
||||
"@types/swagger-ui-express": "^4.1.8",
|
||||
"@types/ws": "8.18.1",
|
||||
"@types/yargs": "17.0.33",
|
||||
"drizzle-kit": "0.31.5",
|
||||
"esbuild": "0.25.10",
|
||||
"esbuild-node-externals": "1.18.0",
|
||||
"postcss": "^8",
|
||||
"react-email": "4.2.12",
|
||||
"tailwindcss": "^4.1.4",
|
||||
"tsc-alias": "1.8.16",
|
||||
"tsx": "4.21.0",
|
||||
"typescript": "5.9.3",
|
||||
"typescript-eslint": "8.49.0"
|
||||
"tsx": "4.20.6",
|
||||
"typescript": "^5",
|
||||
"typescript-eslint": "^8.45.0"
|
||||
},
|
||||
"overrides": {
|
||||
"emblor": {
|
||||
"react": "19.0.0",
|
||||
"react-dom": "19.0.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
/** @type {import('postcss-load-config').Config} */
|
||||
const config = {
|
||||
plugins: {
|
||||
"@tailwindcss/postcss": {}
|
||||
}
|
||||
"@tailwindcss/postcss": {},
|
||||
},
|
||||
};
|
||||
|
||||
export default config;
|
||||
|
||||
|
Before Width: | Height: | Size: 13 KiB After Width: | Height: | Size: 34 KiB |
|
Before Width: | Height: | Size: 13 KiB After Width: | Height: | Size: 33 KiB |
BIN
public/screenshots/create-resource.png
Normal file
|
After Width: | Height: | Size: 687 KiB |
|
Before Width: | Height: | Size: 493 KiB After Width: | Height: | Size: 713 KiB |
BIN
public/screenshots/edit-resource.png
Normal file
|
After Width: | Height: | Size: 636 KiB |
|
Before Width: | Height: | Size: 484 KiB After Width: | Height: | Size: 713 KiB |
|
Before Width: | Height: | Size: 421 KiB |
|
Before Width: | Height: | Size: 484 KiB |
BIN
public/screenshots/resources.png
Normal file
|
After Width: | Height: | Size: 713 KiB |
BIN
public/screenshots/sites-fade.png
Normal file
|
After Width: | Height: | Size: 456 KiB |
|
Before Width: | Height: | Size: 396 KiB After Width: | Height: | Size: 674 KiB |
|
Before Width: | Height: | Size: 434 KiB |
@@ -7,21 +7,21 @@ import {
|
||||
errorHandlerMiddleware,
|
||||
notFoundMiddleware
|
||||
} from "@server/middlewares";
|
||||
import { authenticated, unauthenticated } from "#dynamic/routers/external";
|
||||
import { router as wsRouter, handleWSUpgrade } from "#dynamic/routers/ws";
|
||||
import { corsWithLoginPageSupport } from "@server/middlewares/private/corsWithLoginPage";
|
||||
import { authenticated, unauthenticated } from "@server/routers/external";
|
||||
import { router as wsRouter, handleWSUpgrade } from "@server/routers/ws";
|
||||
import { logIncomingMiddleware } from "./middlewares/logIncoming";
|
||||
import { csrfProtectionMiddleware } from "./middlewares/csrfProtection";
|
||||
import helmet from "helmet";
|
||||
import { stripeWebhookHandler } from "@server/routers/private/billing/webhooks";
|
||||
import { build } from "./build";
|
||||
import rateLimit, { ipKeyGenerator } from "express-rate-limit";
|
||||
import createHttpError from "http-errors";
|
||||
import HttpCode from "./types/HttpCode";
|
||||
import requestTimeoutMiddleware from "./middlewares/requestTimeout";
|
||||
import { createStore } from "#dynamic/lib/rateLimitStore";
|
||||
import { createStore } from "@server/lib/private/rateLimitStore";
|
||||
import hybridRouter from "@server/routers/private/hybrid";
|
||||
import { stripDuplicateSesions } from "./middlewares/stripDuplicateSessions";
|
||||
import { corsWithLoginPageSupport } from "@server/lib/corsWithLoginPage";
|
||||
import { hybridRouter } from "#dynamic/routers/hybrid";
|
||||
import { billingWebhookHandler } from "#dynamic/routers/billing/webhooks";
|
||||
|
||||
const dev = config.isDev;
|
||||
const externalPort = config.getRawConfig().server.external_port;
|
||||
@@ -39,30 +39,32 @@ export function createApiServer() {
|
||||
apiServer.post(
|
||||
`${prefix}/billing/webhooks`,
|
||||
express.raw({ type: "application/json" }),
|
||||
billingWebhookHandler
|
||||
stripeWebhookHandler
|
||||
);
|
||||
}
|
||||
|
||||
const corsConfig = config.getRawConfig().server.cors;
|
||||
const options = {
|
||||
...(corsConfig?.origins
|
||||
? { origin: corsConfig.origins }
|
||||
: {
|
||||
origin: (origin: any, callback: any) => {
|
||||
callback(null, true);
|
||||
}
|
||||
}),
|
||||
...(corsConfig?.methods && { methods: corsConfig.methods }),
|
||||
...(corsConfig?.allowed_headers && {
|
||||
allowedHeaders: corsConfig.allowed_headers
|
||||
}),
|
||||
credentials: !(corsConfig?.credentials === false)
|
||||
};
|
||||
|
||||
if (build == "oss" || !corsConfig) {
|
||||
if (build == "oss") {
|
||||
const options = {
|
||||
...(corsConfig?.origins
|
||||
? { origin: corsConfig.origins }
|
||||
: {
|
||||
origin: (origin: any, callback: any) => {
|
||||
callback(null, true);
|
||||
}
|
||||
}),
|
||||
...(corsConfig?.methods && { methods: corsConfig.methods }),
|
||||
...(corsConfig?.allowed_headers && {
|
||||
allowedHeaders: corsConfig.allowed_headers
|
||||
}),
|
||||
credentials: !(corsConfig?.credentials === false)
|
||||
};
|
||||
|
||||
logger.debug("Using CORS options", options);
|
||||
|
||||
apiServer.use(cors(options));
|
||||
} else if (corsConfig) {
|
||||
} else {
|
||||
// Use the custom CORS middleware with loginPage support
|
||||
apiServer.use(corsWithLoginPageSupport(corsConfig));
|
||||
}
|
||||
@@ -79,12 +81,6 @@ export function createApiServer() {
|
||||
// Add request timeout middleware
|
||||
apiServer.use(requestTimeoutMiddleware(60000)); // 60 second timeout
|
||||
|
||||
apiServer.use(logIncomingMiddleware);
|
||||
|
||||
if (build !== "oss") {
|
||||
apiServer.use(`${prefix}/hybrid`, hybridRouter); // put before rate limiting because we will rate limit there separately because some of the routes are heavily used
|
||||
}
|
||||
|
||||
if (!dev) {
|
||||
apiServer.use(
|
||||
rateLimit({
|
||||
@@ -107,7 +103,11 @@ export function createApiServer() {
|
||||
}
|
||||
|
||||
// API routes
|
||||
apiServer.use(logIncomingMiddleware);
|
||||
apiServer.use(prefix, unauthenticated);
|
||||
if (build !== "oss") {
|
||||
apiServer.use(`${prefix}/hybrid`, hybridRouter);
|
||||
}
|
||||
apiServer.use(prefix, authenticated);
|
||||
|
||||
// WebSocket routes
|
||||
|
||||
@@ -4,6 +4,7 @@ import { userActions, roleActions, userOrgs } from "@server/db";
|
||||
import { and, eq } from "drizzle-orm";
|
||||
import createHttpError from "http-errors";
|
||||
import HttpCode from "@server/types/HttpCode";
|
||||
import { sendUsageNotification } from "@server/routers/org";
|
||||
|
||||
export enum ActionsEnum {
|
||||
createOrgUser = "createOrgUser",
|
||||
@@ -19,7 +20,6 @@ export enum ActionsEnum {
|
||||
getSite = "getSite",
|
||||
listSites = "listSites",
|
||||
updateSite = "updateSite",
|
||||
reGenerateSecret = "reGenerateSecret",
|
||||
createResource = "createResource",
|
||||
deleteResource = "deleteResource",
|
||||
getResource = "getResource",
|
||||
@@ -61,7 +61,6 @@ export enum ActionsEnum {
|
||||
getUser = "getUser",
|
||||
setResourcePassword = "setResourcePassword",
|
||||
setResourcePincode = "setResourcePincode",
|
||||
setResourceHeaderAuth = "setResourceHeaderAuth",
|
||||
setResourceWhitelist = "setResourceWhitelist",
|
||||
getResourceWhitelist = "getResourceWhitelist",
|
||||
generateAccessToken = "generateAccessToken",
|
||||
@@ -78,19 +77,11 @@ export enum ActionsEnum {
|
||||
updateSiteResource = "updateSiteResource",
|
||||
createClient = "createClient",
|
||||
deleteClient = "deleteClient",
|
||||
archiveClient = "archiveClient",
|
||||
unarchiveClient = "unarchiveClient",
|
||||
blockClient = "blockClient",
|
||||
unblockClient = "unblockClient",
|
||||
updateClient = "updateClient",
|
||||
listClients = "listClients",
|
||||
getClient = "getClient",
|
||||
listOrgDomains = "listOrgDomains",
|
||||
getDomain = "getDomain",
|
||||
updateOrgDomain = "updateOrgDomain",
|
||||
getDNSRecords = "getDNSRecords",
|
||||
createNewt = "createNewt",
|
||||
createOlm = "createOlm",
|
||||
createIdp = "createIdp",
|
||||
updateIdp = "updateIdp",
|
||||
deleteIdp = "deleteIdp",
|
||||
@@ -125,13 +116,7 @@ export enum ActionsEnum {
|
||||
updateLoginPage = "updateLoginPage",
|
||||
getLoginPage = "getLoginPage",
|
||||
deleteLoginPage = "deleteLoginPage",
|
||||
listBlueprints = "listBlueprints",
|
||||
getBlueprint = "getBlueprint",
|
||||
applyBlueprint = "applyBlueprint",
|
||||
viewLogs = "viewLogs",
|
||||
exportLogs = "exportLogs",
|
||||
listApprovals = "listApprovals",
|
||||
updateApprovals = "updateApprovals"
|
||||
applyBlueprint = "applyBlueprint"
|
||||
}
|
||||
|
||||
export async function checkUserActionPermission(
|
||||
@@ -208,6 +193,8 @@ export async function checkUserActionPermission(
|
||||
.limit(1);
|
||||
|
||||
return roleActionPermission.length > 0;
|
||||
|
||||
return false;
|
||||
} catch (error) {
|
||||
console.error("Error checking user action permission:", error);
|
||||
throw createHttpError(
|
||||
|
||||
@@ -2,13 +2,13 @@ import { hash, verify } from "@node-rs/argon2";
|
||||
|
||||
export async function verifyPassword(
|
||||
password: string,
|
||||
hash: string
|
||||
hash: string,
|
||||
): Promise<boolean> {
|
||||
const validPassword = await verify(hash, password, {
|
||||
memoryCost: 19456,
|
||||
timeCost: 2,
|
||||
outputLen: 32,
|
||||
parallelism: 1
|
||||
parallelism: 1,
|
||||
});
|
||||
return validPassword;
|
||||
}
|
||||
@@ -18,7 +18,7 @@ export async function hashPassword(password: string): Promise<string> {
|
||||
memoryCost: 19456,
|
||||
timeCost: 2,
|
||||
outputLen: 32,
|
||||
parallelism: 1
|
||||
parallelism: 1,
|
||||
});
|
||||
|
||||
return passwordHash;
|
||||
|
||||
@@ -4,13 +4,10 @@ export const passwordSchema = z
|
||||
.string()
|
||||
.min(8, { message: "Password must be at least 8 characters long" })
|
||||
.max(128, { message: "Password must be at most 128 characters long" })
|
||||
.regex(
|
||||
/^(?=.*?[A-Z])(?=.*?[a-z])(?=.*?[0-9])(?=.*?[~!`@#$%^&*()_\-+={}[\]|\\:;"'<>,.\/?]).*$/,
|
||||
{
|
||||
message: `Your password must meet the following conditions:
|
||||
.regex(/^(?=.*?[A-Z])(?=.*?[a-z])(?=.*?[0-9])(?=.*?[~!`@#$%^&*()_\-+={}[\]|\\:;"'<>,.\/?]).*$/, {
|
||||
message: `Your password must meet the following conditions:
|
||||
at least one uppercase English letter,
|
||||
at least one lowercase English letter,
|
||||
at least one digit,
|
||||
at least one special character.`
|
||||
}
|
||||
);
|
||||
});
|
||||
|
||||
@@ -36,15 +36,12 @@ export async function createSession(
|
||||
const sessionId = encodeHexLowerCase(
|
||||
sha256(new TextEncoder().encode(token))
|
||||
);
|
||||
const [session] = await db
|
||||
.insert(sessions)
|
||||
.values({
|
||||
sessionId: sessionId,
|
||||
userId,
|
||||
expiresAt: new Date(Date.now() + SESSION_COOKIE_EXPIRES).getTime(),
|
||||
issuedAt: new Date().getTime()
|
||||
})
|
||||
.returning();
|
||||
const session: Session = {
|
||||
sessionId: sessionId,
|
||||
userId,
|
||||
expiresAt: new Date(Date.now() + SESSION_COOKIE_EXPIRES).getTime()
|
||||
};
|
||||
await db.insert(sessions).values(session);
|
||||
return session;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
import { encodeHexLowerCase } from "@oslojs/encoding";
|
||||
import {
|
||||
encodeHexLowerCase,
|
||||
} from "@oslojs/encoding";
|
||||
import { sha256 } from "@oslojs/crypto/sha2";
|
||||
import { Newt, newts, newtSessions, NewtSession } from "@server/db";
|
||||
import { db } from "@server/db";
|
||||
@@ -8,25 +10,25 @@ export const EXPIRES = 1000 * 60 * 60 * 24 * 30;
|
||||
|
||||
export async function createNewtSession(
|
||||
token: string,
|
||||
newtId: string
|
||||
newtId: string,
|
||||
): Promise<NewtSession> {
|
||||
const sessionId = encodeHexLowerCase(
|
||||
sha256(new TextEncoder().encode(token))
|
||||
sha256(new TextEncoder().encode(token)),
|
||||
);
|
||||
const session: NewtSession = {
|
||||
sessionId: sessionId,
|
||||
newtId,
|
||||
expiresAt: new Date(Date.now() + EXPIRES).getTime()
|
||||
expiresAt: new Date(Date.now() + EXPIRES).getTime(),
|
||||
};
|
||||
await db.insert(newtSessions).values(session);
|
||||
return session;
|
||||
}
|
||||
|
||||
export async function validateNewtSessionToken(
|
||||
token: string
|
||||
token: string,
|
||||
): Promise<SessionValidationResult> {
|
||||
const sessionId = encodeHexLowerCase(
|
||||
sha256(new TextEncoder().encode(token))
|
||||
sha256(new TextEncoder().encode(token)),
|
||||
);
|
||||
const result = await db
|
||||
.select({ newt: newts, session: newtSessions })
|
||||
@@ -43,12 +45,14 @@ export async function validateNewtSessionToken(
|
||||
.where(eq(newtSessions.sessionId, session.sessionId));
|
||||
return { session: null, newt: null };
|
||||
}
|
||||
if (Date.now() >= session.expiresAt - EXPIRES / 2) {
|
||||
session.expiresAt = new Date(Date.now() + EXPIRES).getTime();
|
||||
if (Date.now() >= session.expiresAt - (EXPIRES / 2)) {
|
||||
session.expiresAt = new Date(
|
||||
Date.now() + EXPIRES,
|
||||
).getTime();
|
||||
await db
|
||||
.update(newtSessions)
|
||||
.set({
|
||||
expiresAt: session.expiresAt
|
||||
expiresAt: session.expiresAt,
|
||||
})
|
||||
.where(eq(newtSessions.sessionId, session.sessionId));
|
||||
}
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
import { encodeHexLowerCase } from "@oslojs/encoding";
|
||||
import {
|
||||
encodeHexLowerCase,
|
||||
} from "@oslojs/encoding";
|
||||
import { sha256 } from "@oslojs/crypto/sha2";
|
||||
import { Olm, olms, olmSessions, OlmSession } from "@server/db";
|
||||
import { db } from "@server/db";
|
||||
@@ -8,25 +10,25 @@ export const EXPIRES = 1000 * 60 * 60 * 24 * 30;
|
||||
|
||||
export async function createOlmSession(
|
||||
token: string,
|
||||
olmId: string
|
||||
olmId: string,
|
||||
): Promise<OlmSession> {
|
||||
const sessionId = encodeHexLowerCase(
|
||||
sha256(new TextEncoder().encode(token))
|
||||
sha256(new TextEncoder().encode(token)),
|
||||
);
|
||||
const session: OlmSession = {
|
||||
sessionId: sessionId,
|
||||
olmId,
|
||||
expiresAt: new Date(Date.now() + EXPIRES).getTime()
|
||||
expiresAt: new Date(Date.now() + EXPIRES).getTime(),
|
||||
};
|
||||
await db.insert(olmSessions).values(session);
|
||||
return session;
|
||||
}
|
||||
|
||||
export async function validateOlmSessionToken(
|
||||
token: string
|
||||
token: string,
|
||||
): Promise<SessionValidationResult> {
|
||||
const sessionId = encodeHexLowerCase(
|
||||
sha256(new TextEncoder().encode(token))
|
||||
sha256(new TextEncoder().encode(token)),
|
||||
);
|
||||
const result = await db
|
||||
.select({ olm: olms, session: olmSessions })
|
||||
@@ -43,12 +45,14 @@ export async function validateOlmSessionToken(
|
||||
.where(eq(olmSessions.sessionId, session.sessionId));
|
||||
return { session: null, olm: null };
|
||||
}
|
||||
if (Date.now() >= session.expiresAt - EXPIRES / 2) {
|
||||
session.expiresAt = new Date(Date.now() + EXPIRES).getTime();
|
||||
if (Date.now() >= session.expiresAt - (EXPIRES / 2)) {
|
||||
session.expiresAt = new Date(
|
||||
Date.now() + EXPIRES,
|
||||
).getTime();
|
||||
await db
|
||||
.update(olmSessions)
|
||||
.set({
|
||||
expiresAt: session.expiresAt
|
||||
expiresAt: session.expiresAt,
|
||||
})
|
||||
.where(eq(olmSessions.sessionId, session.sessionId));
|
||||
}
|
||||
|
||||
@@ -11,14 +11,11 @@
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import { encodeHexLowerCase } from "@oslojs/encoding";
|
||||
import { sha256 } from "@oslojs/crypto/sha2";
|
||||
import {
|
||||
RemoteExitNode,
|
||||
remoteExitNodes,
|
||||
remoteExitNodeSessions,
|
||||
RemoteExitNodeSession
|
||||
} from "@server/db";
|
||||
encodeHexLowerCase,
|
||||
} from "@oslojs/encoding";
|
||||
import { sha256 } from "@oslojs/crypto/sha2";
|
||||
import { RemoteExitNode, remoteExitNodes, remoteExitNodeSessions, RemoteExitNodeSession } from "@server/db";
|
||||
import { db } from "@server/db";
|
||||
import { eq } from "drizzle-orm";
|
||||
|
||||
@@ -26,39 +23,30 @@ export const EXPIRES = 1000 * 60 * 60 * 24 * 30;
|
||||
|
||||
export async function createRemoteExitNodeSession(
|
||||
token: string,
|
||||
remoteExitNodeId: string
|
||||
remoteExitNodeId: string,
|
||||
): Promise<RemoteExitNodeSession> {
|
||||
const sessionId = encodeHexLowerCase(
|
||||
sha256(new TextEncoder().encode(token))
|
||||
sha256(new TextEncoder().encode(token)),
|
||||
);
|
||||
const session: RemoteExitNodeSession = {
|
||||
sessionId: sessionId,
|
||||
remoteExitNodeId,
|
||||
expiresAt: new Date(Date.now() + EXPIRES).getTime()
|
||||
expiresAt: new Date(Date.now() + EXPIRES).getTime(),
|
||||
};
|
||||
await db.insert(remoteExitNodeSessions).values(session);
|
||||
return session;
|
||||
}
|
||||
|
||||
export async function validateRemoteExitNodeSessionToken(
|
||||
token: string
|
||||
token: string,
|
||||
): Promise<SessionValidationResult> {
|
||||
const sessionId = encodeHexLowerCase(
|
||||
sha256(new TextEncoder().encode(token))
|
||||
sha256(new TextEncoder().encode(token)),
|
||||
);
|
||||
const result = await db
|
||||
.select({
|
||||
remoteExitNode: remoteExitNodes,
|
||||
session: remoteExitNodeSessions
|
||||
})
|
||||
.select({ remoteExitNode: remoteExitNodes, session: remoteExitNodeSessions })
|
||||
.from(remoteExitNodeSessions)
|
||||
.innerJoin(
|
||||
remoteExitNodes,
|
||||
eq(
|
||||
remoteExitNodeSessions.remoteExitNodeId,
|
||||
remoteExitNodes.remoteExitNodeId
|
||||
)
|
||||
)
|
||||
.innerJoin(remoteExitNodes, eq(remoteExitNodeSessions.remoteExitNodeId, remoteExitNodes.remoteExitNodeId))
|
||||
.where(eq(remoteExitNodeSessions.sessionId, sessionId));
|
||||
if (result.length < 1) {
|
||||
return { session: null, remoteExitNode: null };
|
||||
@@ -70,32 +58,26 @@ export async function validateRemoteExitNodeSessionToken(
|
||||
.where(eq(remoteExitNodeSessions.sessionId, session.sessionId));
|
||||
return { session: null, remoteExitNode: null };
|
||||
}
|
||||
if (Date.now() >= session.expiresAt - EXPIRES / 2) {
|
||||
session.expiresAt = new Date(Date.now() + EXPIRES).getTime();
|
||||
if (Date.now() >= session.expiresAt - (EXPIRES / 2)) {
|
||||
session.expiresAt = new Date(
|
||||
Date.now() + EXPIRES,
|
||||
).getTime();
|
||||
await db
|
||||
.update(remoteExitNodeSessions)
|
||||
.set({
|
||||
expiresAt: session.expiresAt
|
||||
expiresAt: session.expiresAt,
|
||||
})
|
||||
.where(eq(remoteExitNodeSessions.sessionId, session.sessionId));
|
||||
}
|
||||
return { session, remoteExitNode };
|
||||
}
|
||||
|
||||
export async function invalidateRemoteExitNodeSession(
|
||||
sessionId: string
|
||||
): Promise<void> {
|
||||
await db
|
||||
.delete(remoteExitNodeSessions)
|
||||
.where(eq(remoteExitNodeSessions.sessionId, sessionId));
|
||||
export async function invalidateRemoteExitNodeSession(sessionId: string): Promise<void> {
|
||||
await db.delete(remoteExitNodeSessions).where(eq(remoteExitNodeSessions.sessionId, sessionId));
|
||||
}
|
||||
|
||||
export async function invalidateAllRemoteExitNodeSessions(
|
||||
remoteExitNodeId: string
|
||||
): Promise<void> {
|
||||
await db
|
||||
.delete(remoteExitNodeSessions)
|
||||
.where(eq(remoteExitNodeSessions.remoteExitNodeId, remoteExitNodeId));
|
||||
export async function invalidateAllRemoteExitNodeSessions(remoteExitNodeId: string): Promise<void> {
|
||||
await db.delete(remoteExitNodeSessions).where(eq(remoteExitNodeSessions.remoteExitNodeId, remoteExitNodeId));
|
||||
}
|
||||
|
||||
export type SessionValidationResult =
|
||||
@@ -4,6 +4,9 @@ import { resourceSessions, ResourceSession } from "@server/db";
|
||||
import { db } from "@server/db";
|
||||
import { eq, and } from "drizzle-orm";
|
||||
import config from "@server/lib/config";
|
||||
import axios from "axios";
|
||||
import logger from "@server/logger";
|
||||
import { tokenManager } from "@server/lib/tokenManager";
|
||||
|
||||
export const SESSION_COOKIE_NAME =
|
||||
config.getRawConfig().server.session_cookie_name;
|
||||
@@ -50,8 +53,7 @@ export async function createResourceSession(opts: {
|
||||
doNotExtend: opts.doNotExtend || false,
|
||||
accessTokenId: opts.accessTokenId || null,
|
||||
isRequestToken: opts.isRequestToken || false,
|
||||
userSessionId: opts.userSessionId || null,
|
||||
issuedAt: new Date().getTime()
|
||||
userSessionId: opts.userSessionId || null
|
||||
};
|
||||
|
||||
await db.insert(resourceSessions).values(session);
|
||||
@@ -63,6 +65,29 @@ export async function validateResourceSessionToken(
|
||||
token: string,
|
||||
resourceId: number
|
||||
): Promise<ResourceSessionValidationResult> {
|
||||
if (config.isManagedMode()) {
|
||||
try {
|
||||
const response = await axios.post(`${config.getRawConfig().managed?.endpoint}/api/v1/hybrid/resource/${resourceId}/session/validate`, {
|
||||
token: token
|
||||
}, await tokenManager.getAuthHeader());
|
||||
return response.data.data;
|
||||
} catch (error) {
|
||||
if (axios.isAxiosError(error)) {
|
||||
logger.error("Error validating resource session token in hybrid mode:", {
|
||||
message: error.message,
|
||||
code: error.code,
|
||||
status: error.response?.status,
|
||||
statusText: error.response?.statusText,
|
||||
url: error.config?.url,
|
||||
method: error.config?.method
|
||||
});
|
||||
} else {
|
||||
logger.error("Error validating resource session token in hybrid mode:", error);
|
||||
}
|
||||
return { resourceSession: null };
|
||||
}
|
||||
}
|
||||
|
||||
const sessionId = encodeHexLowerCase(
|
||||
sha256(new TextEncoder().encode(token))
|
||||
);
|
||||
|
||||
@@ -1,43 +1,9 @@
|
||||
import { Request } from "express";
|
||||
import {
|
||||
validateSessionToken,
|
||||
SESSION_COOKIE_NAME
|
||||
} from "@server/auth/sessions/app";
|
||||
import { validateSessionToken, SESSION_COOKIE_NAME } from "@server/auth/sessions/app";
|
||||
|
||||
export async function verifySession(req: Request, forceLogin?: boolean) {
|
||||
export async function verifySession(req: Request) {
|
||||
const res = await validateSessionToken(
|
||||
req.cookies[SESSION_COOKIE_NAME] ?? ""
|
||||
req.cookies[SESSION_COOKIE_NAME] ?? "",
|
||||
);
|
||||
|
||||
if (!forceLogin) {
|
||||
return res;
|
||||
}
|
||||
if (!res.session || !res.user) {
|
||||
return {
|
||||
session: null,
|
||||
user: null
|
||||
};
|
||||
}
|
||||
if (res.session.deviceAuthUsed) {
|
||||
return {
|
||||
session: null,
|
||||
user: null
|
||||
};
|
||||
}
|
||||
if (!res.session.issuedAt) {
|
||||
return {
|
||||
session: null,
|
||||
user: null
|
||||
};
|
||||
}
|
||||
const mins = 5 * 60 * 1000;
|
||||
const now = new Date().getTime();
|
||||
if (now - res.session.issuedAt > mins) {
|
||||
return {
|
||||
session: null,
|
||||
user: null
|
||||
};
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
import { cleanup as wsCleanup } from "#dynamic/routers/ws";
|
||||
|
||||
async function cleanup() {
|
||||
await wsCleanup();
|
||||
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
export async function initCleanup() {
|
||||
// Handle process termination
|
||||
process.on("SIGTERM", () => cleanup());
|
||||
process.on("SIGINT", () => cleanup());
|
||||
}
|
||||