mirror of
https://github.com/fosrl/pangolin.git
synced 2026-01-31 15:19:09 +00:00
Compare commits
17 Commits
org-only-i
...
1.14.1-s.3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8e1fd4474f | ||
|
|
29a683a815 | ||
|
|
69dbd20ea5 | ||
|
|
89682a2ee4 | ||
|
|
78b00a18cc | ||
|
|
192702daf9 | ||
|
|
9ec94441f3 | ||
|
|
53e7b99605 | ||
|
|
20088ef82b | ||
|
|
1e0b1a3607 | ||
|
|
24e8455c73 | ||
|
|
e42a732e93 | ||
|
|
d333cb5199 | ||
|
|
a6db4f20ad | ||
|
|
9ed9472c01 | ||
|
|
9467e6c032 | ||
|
|
9d849a0ced |
149
.github/workflows/cicd.yml
vendored
149
.github/workflows/cicd.yml
vendored
@@ -329,20 +329,89 @@ jobs:
|
||||
skopeo login ghcr.io -u "${{ github.actor }}" -p "${{ secrets.GITHUB_TOKEN }}"
|
||||
shell: bash
|
||||
|
||||
- name: Copy tag from Docker Hub to GHCR
|
||||
# Mirror the already-built image (all architectures) to GHCR so we can sign it
|
||||
- name: Copy tags from Docker Hub to GHCR
|
||||
# Mirror the already-built images (all architectures) to GHCR so we can sign them
|
||||
# Wait a bit for both architectures to be available in Docker Hub manifest
|
||||
env:
|
||||
REGISTRY_AUTH_FILE: ${{ runner.temp }}/containers/auth.json
|
||||
run: |
|
||||
set -euo pipefail
|
||||
TAG=${{ env.TAG }}
|
||||
echo "Waiting for multi-arch manifest to be ready..."
|
||||
MAJOR_TAG=$(echo $TAG | cut -d. -f1)
|
||||
MINOR_TAG=$(echo $TAG | cut -d. -f1,2)
|
||||
|
||||
echo "Waiting for multi-arch manifests to be ready..."
|
||||
sleep 30
|
||||
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:${TAG} -> ${{ env.GHCR_IMAGE }}:${TAG}"
|
||||
skopeo copy --all --retry-times 3 \
|
||||
docker://$DOCKERHUB_IMAGE:$TAG \
|
||||
docker://$GHCR_IMAGE:$TAG
|
||||
|
||||
# Determine if this is an RC release
|
||||
IS_RC="false"
|
||||
if echo "$TAG" | grep -qE "rc[0-9]+$"; then
|
||||
IS_RC="true"
|
||||
fi
|
||||
|
||||
if [ "$IS_RC" = "true" ]; then
|
||||
echo "RC release detected - copying version-specific tags only"
|
||||
|
||||
# SQLite OSS
|
||||
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:${TAG} -> ${{ env.GHCR_IMAGE }}:${TAG}"
|
||||
skopeo copy --all --retry-times 3 \
|
||||
docker://$DOCKERHUB_IMAGE:$TAG \
|
||||
docker://$GHCR_IMAGE:$TAG
|
||||
|
||||
# PostgreSQL OSS
|
||||
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:postgresql-${TAG} -> ${{ env.GHCR_IMAGE }}:postgresql-${TAG}"
|
||||
skopeo copy --all --retry-times 3 \
|
||||
docker://$DOCKERHUB_IMAGE:postgresql-$TAG \
|
||||
docker://$GHCR_IMAGE:postgresql-$TAG
|
||||
|
||||
# SQLite Enterprise
|
||||
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:ee-${TAG} -> ${{ env.GHCR_IMAGE }}:ee-${TAG}"
|
||||
skopeo copy --all --retry-times 3 \
|
||||
docker://$DOCKERHUB_IMAGE:ee-$TAG \
|
||||
docker://$GHCR_IMAGE:ee-$TAG
|
||||
|
||||
# PostgreSQL Enterprise
|
||||
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:ee-postgresql-${TAG} -> ${{ env.GHCR_IMAGE }}:ee-postgresql-${TAG}"
|
||||
skopeo copy --all --retry-times 3 \
|
||||
docker://$DOCKERHUB_IMAGE:ee-postgresql-$TAG \
|
||||
docker://$GHCR_IMAGE:ee-postgresql-$TAG
|
||||
else
|
||||
echo "Regular release detected - copying all tags (latest, major, minor, full version)"
|
||||
|
||||
# SQLite OSS - all tags
|
||||
for TAG_SUFFIX in "latest" "$MAJOR_TAG" "$MINOR_TAG" "$TAG"; do
|
||||
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:${TAG_SUFFIX} -> ${{ env.GHCR_IMAGE }}:${TAG_SUFFIX}"
|
||||
skopeo copy --all --retry-times 3 \
|
||||
docker://$DOCKERHUB_IMAGE:$TAG_SUFFIX \
|
||||
docker://$GHCR_IMAGE:$TAG_SUFFIX
|
||||
done
|
||||
|
||||
# PostgreSQL OSS - all tags
|
||||
for TAG_SUFFIX in "latest" "$MAJOR_TAG" "$MINOR_TAG" "$TAG"; do
|
||||
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:postgresql-${TAG_SUFFIX} -> ${{ env.GHCR_IMAGE }}:postgresql-${TAG_SUFFIX}"
|
||||
skopeo copy --all --retry-times 3 \
|
||||
docker://$DOCKERHUB_IMAGE:postgresql-$TAG_SUFFIX \
|
||||
docker://$GHCR_IMAGE:postgresql-$TAG_SUFFIX
|
||||
done
|
||||
|
||||
# SQLite Enterprise - all tags
|
||||
for TAG_SUFFIX in "latest" "$MAJOR_TAG" "$MINOR_TAG" "$TAG"; do
|
||||
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:ee-${TAG_SUFFIX} -> ${{ env.GHCR_IMAGE }}:ee-${TAG_SUFFIX}"
|
||||
skopeo copy --all --retry-times 3 \
|
||||
docker://$DOCKERHUB_IMAGE:ee-$TAG_SUFFIX \
|
||||
docker://$GHCR_IMAGE:ee-$TAG_SUFFIX
|
||||
done
|
||||
|
||||
# PostgreSQL Enterprise - all tags
|
||||
for TAG_SUFFIX in "latest" "$MAJOR_TAG" "$MINOR_TAG" "$TAG"; do
|
||||
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:ee-postgresql-${TAG_SUFFIX} -> ${{ env.GHCR_IMAGE }}:ee-postgresql-${TAG_SUFFIX}"
|
||||
skopeo copy --all --retry-times 3 \
|
||||
docker://$DOCKERHUB_IMAGE:ee-postgresql-$TAG_SUFFIX \
|
||||
docker://$GHCR_IMAGE:ee-postgresql-$TAG_SUFFIX
|
||||
done
|
||||
fi
|
||||
|
||||
echo "All images copied successfully to GHCR!"
|
||||
shell: bash
|
||||
|
||||
- name: Login to GitHub Container Registry (for cosign)
|
||||
@@ -371,28 +440,62 @@ jobs:
|
||||
issuer="https://token.actions.githubusercontent.com"
|
||||
id_regex="^https://github.com/${{ github.repository }}/.+" # accept this repo (all workflows/refs)
|
||||
|
||||
for IMAGE in "${GHCR_IMAGE}" "${DOCKERHUB_IMAGE}"; do
|
||||
echo "Processing ${IMAGE}:${TAG}"
|
||||
# Determine if this is an RC release
|
||||
IS_RC="false"
|
||||
if echo "$TAG" | grep -qE "rc[0-9]+$"; then
|
||||
IS_RC="true"
|
||||
fi
|
||||
|
||||
DIGEST="$(skopeo inspect --retry-times 3 docker://${IMAGE}:${TAG} | jq -r '.Digest')"
|
||||
REF="${IMAGE}@${DIGEST}"
|
||||
echo "Resolved digest: ${REF}"
|
||||
# Define image variants to sign
|
||||
if [ "$IS_RC" = "true" ]; then
|
||||
echo "RC release - signing version-specific tags only"
|
||||
IMAGE_TAGS=(
|
||||
"${TAG}"
|
||||
"postgresql-${TAG}"
|
||||
"ee-${TAG}"
|
||||
"ee-postgresql-${TAG}"
|
||||
)
|
||||
else
|
||||
echo "Regular release - signing all tags"
|
||||
MAJOR_TAG=$(echo $TAG | cut -d. -f1)
|
||||
MINOR_TAG=$(echo $TAG | cut -d. -f1,2)
|
||||
IMAGE_TAGS=(
|
||||
"latest" "$MAJOR_TAG" "$MINOR_TAG" "$TAG"
|
||||
"postgresql-latest" "postgresql-$MAJOR_TAG" "postgresql-$MINOR_TAG" "postgresql-$TAG"
|
||||
"ee-latest" "ee-$MAJOR_TAG" "ee-$MINOR_TAG" "ee-$TAG"
|
||||
"ee-postgresql-latest" "ee-postgresql-$MAJOR_TAG" "ee-postgresql-$MINOR_TAG" "ee-postgresql-$TAG"
|
||||
)
|
||||
fi
|
||||
|
||||
echo "==> cosign sign (keyless) --recursive ${REF}"
|
||||
cosign sign --recursive "${REF}"
|
||||
# Sign each image variant for both registries
|
||||
for BASE_IMAGE in "${GHCR_IMAGE}" "${DOCKERHUB_IMAGE}"; do
|
||||
for IMAGE_TAG in "${IMAGE_TAGS[@]}"; do
|
||||
echo "Processing ${BASE_IMAGE}:${IMAGE_TAG}"
|
||||
|
||||
echo "==> cosign sign (key) --recursive ${REF}"
|
||||
cosign sign --key env://COSIGN_PRIVATE_KEY --recursive "${REF}"
|
||||
DIGEST="$(skopeo inspect --retry-times 3 docker://${BASE_IMAGE}:${IMAGE_TAG} | jq -r '.Digest')"
|
||||
REF="${BASE_IMAGE}@${DIGEST}"
|
||||
echo "Resolved digest: ${REF}"
|
||||
|
||||
echo "==> cosign verify (public key) ${REF}"
|
||||
cosign verify --key env://COSIGN_PUBLIC_KEY "${REF}" -o text
|
||||
echo "==> cosign sign (keyless) --recursive ${REF}"
|
||||
cosign sign --recursive "${REF}"
|
||||
|
||||
echo "==> cosign verify (keyless policy) ${REF}"
|
||||
cosign verify \
|
||||
--certificate-oidc-issuer "${issuer}" \
|
||||
--certificate-identity-regexp "${id_regex}" \
|
||||
"${REF}" -o text
|
||||
echo "==> cosign sign (key) --recursive ${REF}"
|
||||
cosign sign --key env://COSIGN_PRIVATE_KEY --recursive "${REF}"
|
||||
|
||||
echo "==> cosign verify (public key) ${REF}"
|
||||
cosign verify --key env://COSIGN_PUBLIC_KEY "${REF}" -o text
|
||||
|
||||
echo "==> cosign verify (keyless policy) ${REF}"
|
||||
cosign verify \
|
||||
--certificate-oidc-issuer "${issuer}" \
|
||||
--certificate-identity-regexp "${id_regex}" \
|
||||
"${REF}" -o text
|
||||
|
||||
echo "✓ Successfully signed and verified ${BASE_IMAGE}:${IMAGE_TAG}"
|
||||
done
|
||||
done
|
||||
|
||||
echo "All images signed and verified successfully!"
|
||||
shell: bash
|
||||
|
||||
post-run:
|
||||
|
||||
426
.github/workflows/cicd.yml.backup
vendored
Normal file
426
.github/workflows/cicd.yml.backup
vendored
Normal file
@@ -0,0 +1,426 @@
|
||||
name: CI/CD Pipeline
|
||||
|
||||
# CI/CD workflow for building, publishing, mirroring, signing container images and building release binaries.
|
||||
# Actions are pinned to specific SHAs to reduce supply-chain risk. This workflow triggers on tag push events.
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write # for GHCR push
|
||||
id-token: write # for Cosign Keyless (OIDC) Signing
|
||||
|
||||
# Required secrets:
|
||||
# - DOCKER_HUB_USERNAME / DOCKER_HUB_ACCESS_TOKEN: push to Docker Hub
|
||||
# - GITHUB_TOKEN: used for GHCR login and OIDC keyless signing
|
||||
# - COSIGN_PRIVATE_KEY / COSIGN_PASSWORD / COSIGN_PUBLIC_KEY: for key-based signing
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "[0-9]+.[0-9]+.[0-9]+"
|
||||
- "[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
pre-run:
|
||||
runs-on: ubuntu-latest
|
||||
permissions: write-all
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }}
|
||||
role-duration-seconds: 3600
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: Verify AWS identity
|
||||
run: aws sts get-caller-identity
|
||||
|
||||
- name: Start EC2 instances
|
||||
run: |
|
||||
aws ec2 start-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_ARM_RUNNER }}
|
||||
aws ec2 start-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_AMD_RUNNER }}
|
||||
echo "EC2 instances started"
|
||||
|
||||
|
||||
release-arm:
|
||||
name: Build and Release (ARM64)
|
||||
runs-on: [self-hosted, linux, arm64, us-east-1]
|
||||
needs: [pre-run]
|
||||
if: >-
|
||||
${{
|
||||
needs.pre-run.result == 'success'
|
||||
}}
|
||||
# Job-level timeout to avoid runaway or stuck runs
|
||||
timeout-minutes: 120
|
||||
env:
|
||||
# Target images
|
||||
DOCKERHUB_IMAGE: docker.io/fosrl/${{ github.event.repository.name }}
|
||||
GHCR_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
|
||||
- name: Monitor storage space
|
||||
run: |
|
||||
THRESHOLD=75
|
||||
USED_SPACE=$(df / | grep / | awk '{ print $5 }' | sed 's/%//g')
|
||||
echo "Used space: $USED_SPACE%"
|
||||
if [ "$USED_SPACE" -ge "$THRESHOLD" ]; then
|
||||
echo "Used space is below the threshold of 75% free. Running Docker system prune."
|
||||
echo y | docker system prune -a
|
||||
else
|
||||
echo "Storage space is above the threshold. No action needed."
|
||||
fi
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
|
||||
|
||||
- name: Extract tag name
|
||||
id: get-tag
|
||||
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Update version in package.json
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
sed -i "s/export const APP_VERSION = \".*\";/export const APP_VERSION = \"$TAG\";/" server/lib/consts.ts
|
||||
cat server/lib/consts.ts
|
||||
shell: bash
|
||||
|
||||
- name: Check if release candidate
|
||||
id: check-rc
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
if [[ "$TAG" == *"-rc."* ]]; then
|
||||
echo "IS_RC=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "IS_RC=false" >> $GITHUB_ENV
|
||||
fi
|
||||
shell: bash
|
||||
|
||||
- name: Build and push Docker images (Docker Hub - ARM64)
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
if [ "$IS_RC" = "true" ]; then
|
||||
make build-rc-arm tag=$TAG
|
||||
else
|
||||
make build-release-arm tag=$TAG
|
||||
fi
|
||||
echo "Built & pushed ARM64 images to: ${{ env.DOCKERHUB_IMAGE }}:${TAG}"
|
||||
shell: bash
|
||||
|
||||
release-amd:
|
||||
name: Build and Release (AMD64)
|
||||
runs-on: [self-hosted, linux, x64, us-east-1]
|
||||
needs: [pre-run]
|
||||
if: >-
|
||||
${{
|
||||
needs.pre-run.result == 'success'
|
||||
}}
|
||||
# Job-level timeout to avoid runaway or stuck runs
|
||||
timeout-minutes: 120
|
||||
env:
|
||||
# Target images
|
||||
DOCKERHUB_IMAGE: docker.io/fosrl/${{ github.event.repository.name }}
|
||||
GHCR_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
|
||||
- name: Monitor storage space
|
||||
run: |
|
||||
THRESHOLD=75
|
||||
USED_SPACE=$(df / | grep / | awk '{ print $5 }' | sed 's/%//g')
|
||||
echo "Used space: $USED_SPACE%"
|
||||
if [ "$USED_SPACE" -ge "$THRESHOLD" ]; then
|
||||
echo "Used space is below the threshold of 75% free. Running Docker system prune."
|
||||
echo y | docker system prune -a
|
||||
else
|
||||
echo "Storage space is above the threshold. No action needed."
|
||||
fi
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
|
||||
|
||||
- name: Extract tag name
|
||||
id: get-tag
|
||||
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Update version in package.json
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
sed -i "s/export const APP_VERSION = \".*\";/export const APP_VERSION = \"$TAG\";/" server/lib/consts.ts
|
||||
cat server/lib/consts.ts
|
||||
shell: bash
|
||||
|
||||
- name: Check if release candidate
|
||||
id: check-rc
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
if [[ "$TAG" == *"-rc."* ]]; then
|
||||
echo "IS_RC=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "IS_RC=false" >> $GITHUB_ENV
|
||||
fi
|
||||
shell: bash
|
||||
|
||||
- name: Build and push Docker images (Docker Hub - AMD64)
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
if [ "$IS_RC" = "true" ]; then
|
||||
make build-rc-amd tag=$TAG
|
||||
else
|
||||
make build-release-amd tag=$TAG
|
||||
fi
|
||||
echo "Built & pushed AMD64 images to: ${{ env.DOCKERHUB_IMAGE }}:${TAG}"
|
||||
shell: bash
|
||||
|
||||
create-manifest:
|
||||
name: Create Multi-Arch Manifests
|
||||
runs-on: [self-hosted, linux, x64, us-east-1]
|
||||
needs: [release-arm, release-amd]
|
||||
if: >-
|
||||
${{
|
||||
needs.release-arm.result == 'success' &&
|
||||
needs.release-amd.result == 'success'
|
||||
}}
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
|
||||
|
||||
- name: Extract tag name
|
||||
id: get-tag
|
||||
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Check if release candidate
|
||||
id: check-rc
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
if [[ "$TAG" == *"-rc."* ]]; then
|
||||
echo "IS_RC=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "IS_RC=false" >> $GITHUB_ENV
|
||||
fi
|
||||
shell: bash
|
||||
|
||||
- name: Create multi-arch manifests
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
if [ "$IS_RC" = "true" ]; then
|
||||
make create-manifests-rc tag=$TAG
|
||||
else
|
||||
make create-manifests tag=$TAG
|
||||
fi
|
||||
echo "Created multi-arch manifests for tag: ${TAG}"
|
||||
shell: bash
|
||||
|
||||
sign-and-package:
|
||||
name: Sign and Package
|
||||
runs-on: [self-hosted, linux, x64, us-east-1]
|
||||
needs: [release-arm, release-amd, create-manifest]
|
||||
if: >-
|
||||
${{
|
||||
needs.release-arm.result == 'success' &&
|
||||
needs.release-amd.result == 'success' &&
|
||||
needs.create-manifest.result == 'success'
|
||||
}}
|
||||
# Job-level timeout to avoid runaway or stuck runs
|
||||
timeout-minutes: 120
|
||||
env:
|
||||
# Target images
|
||||
DOCKERHUB_IMAGE: docker.io/fosrl/${{ github.event.repository.name }}
|
||||
GHCR_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
|
||||
- name: Extract tag name
|
||||
id: get-tag
|
||||
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
|
||||
with:
|
||||
go-version: 1.24
|
||||
|
||||
- name: Update version in package.json
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
sed -i "s/export const APP_VERSION = \".*\";/export const APP_VERSION = \"$TAG\";/" server/lib/consts.ts
|
||||
cat server/lib/consts.ts
|
||||
shell: bash
|
||||
|
||||
- name: Pull latest Gerbil version
|
||||
id: get-gerbil-tag
|
||||
run: |
|
||||
LATEST_TAG=$(curl -s https://api.github.com/repos/fosrl/gerbil/tags | jq -r '.[0].name')
|
||||
echo "LATEST_GERBIL_TAG=$LATEST_TAG" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Pull latest Badger version
|
||||
id: get-badger-tag
|
||||
run: |
|
||||
LATEST_TAG=$(curl -s https://api.github.com/repos/fosrl/badger/tags | jq -r '.[0].name')
|
||||
echo "LATEST_BADGER_TAG=$LATEST_TAG" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Update install/main.go
|
||||
run: |
|
||||
PANGOLIN_VERSION=${{ env.TAG }}
|
||||
GERBIL_VERSION=${{ env.LATEST_GERBIL_TAG }}
|
||||
BADGER_VERSION=${{ env.LATEST_BADGER_TAG }}
|
||||
sed -i "s/config.PangolinVersion = \".*\"/config.PangolinVersion = \"$PANGOLIN_VERSION\"/" install/main.go
|
||||
sed -i "s/config.GerbilVersion = \".*\"/config.GerbilVersion = \"$GERBIL_VERSION\"/" install/main.go
|
||||
sed -i "s/config.BadgerVersion = \".*\"/config.BadgerVersion = \"$BADGER_VERSION\"/" install/main.go
|
||||
echo "Updated install/main.go with Pangolin version $PANGOLIN_VERSION, Gerbil version $GERBIL_VERSION, and Badger version $BADGER_VERSION"
|
||||
cat install/main.go
|
||||
shell: bash
|
||||
|
||||
- name: Build installer
|
||||
working-directory: install
|
||||
run: |
|
||||
make go-build-release
|
||||
|
||||
- name: Upload artifacts from /install/bin
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
|
||||
with:
|
||||
name: install-bin
|
||||
path: install/bin/
|
||||
|
||||
- name: Install skopeo + jq
|
||||
# skopeo: copy/inspect images between registries
|
||||
# jq: JSON parsing tool used to extract digest values
|
||||
run: |
|
||||
sudo apt-get update -y
|
||||
sudo apt-get install -y skopeo jq
|
||||
skopeo --version
|
||||
shell: bash
|
||||
|
||||
- name: Login to GHCR
|
||||
env:
|
||||
REGISTRY_AUTH_FILE: ${{ runner.temp }}/containers/auth.json
|
||||
run: |
|
||||
mkdir -p "$(dirname "$REGISTRY_AUTH_FILE")"
|
||||
skopeo login ghcr.io -u "${{ github.actor }}" -p "${{ secrets.GITHUB_TOKEN }}"
|
||||
shell: bash
|
||||
|
||||
- name: Copy tag from Docker Hub to GHCR
|
||||
# Mirror the already-built image (all architectures) to GHCR so we can sign it
|
||||
# Wait a bit for both architectures to be available in Docker Hub manifest
|
||||
env:
|
||||
REGISTRY_AUTH_FILE: ${{ runner.temp }}/containers/auth.json
|
||||
run: |
|
||||
set -euo pipefail
|
||||
TAG=${{ env.TAG }}
|
||||
echo "Waiting for multi-arch manifest to be ready..."
|
||||
sleep 30
|
||||
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:${TAG} -> ${{ env.GHCR_IMAGE }}:${TAG}"
|
||||
skopeo copy --all --retry-times 3 \
|
||||
docker://$DOCKERHUB_IMAGE:$TAG \
|
||||
docker://$GHCR_IMAGE:$TAG
|
||||
shell: bash
|
||||
|
||||
- name: Login to GitHub Container Registry (for cosign)
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Install cosign
|
||||
# cosign is used to sign and verify container images (key and keyless)
|
||||
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
|
||||
|
||||
- name: Dual-sign and verify (GHCR & Docker Hub)
|
||||
# Sign each image by digest using keyless (OIDC) and key-based signing,
|
||||
# then verify both the public key signature and the keyless OIDC signature.
|
||||
env:
|
||||
TAG: ${{ env.TAG }}
|
||||
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
|
||||
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
|
||||
COSIGN_PUBLIC_KEY: ${{ secrets.COSIGN_PUBLIC_KEY }}
|
||||
COSIGN_YES: "true"
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
issuer="https://token.actions.githubusercontent.com"
|
||||
id_regex="^https://github.com/${{ github.repository }}/.+" # accept this repo (all workflows/refs)
|
||||
|
||||
for IMAGE in "${GHCR_IMAGE}" "${DOCKERHUB_IMAGE}"; do
|
||||
echo "Processing ${IMAGE}:${TAG}"
|
||||
|
||||
DIGEST="$(skopeo inspect --retry-times 3 docker://${IMAGE}:${TAG} | jq -r '.Digest')"
|
||||
REF="${IMAGE}@${DIGEST}"
|
||||
echo "Resolved digest: ${REF}"
|
||||
|
||||
echo "==> cosign sign (keyless) --recursive ${REF}"
|
||||
cosign sign --recursive "${REF}"
|
||||
|
||||
echo "==> cosign sign (key) --recursive ${REF}"
|
||||
cosign sign --key env://COSIGN_PRIVATE_KEY --recursive "${REF}"
|
||||
|
||||
echo "==> cosign verify (public key) ${REF}"
|
||||
cosign verify --key env://COSIGN_PUBLIC_KEY "${REF}" -o text
|
||||
|
||||
echo "==> cosign verify (keyless policy) ${REF}"
|
||||
cosign verify \
|
||||
--certificate-oidc-issuer "${issuer}" \
|
||||
--certificate-identity-regexp "${id_regex}" \
|
||||
"${REF}" -o text
|
||||
done
|
||||
shell: bash
|
||||
|
||||
post-run:
|
||||
needs: [pre-run, release-arm, release-amd, create-manifest, sign-and-package]
|
||||
if: >-
|
||||
${{
|
||||
always() &&
|
||||
needs.pre-run.result == 'success' &&
|
||||
(needs.release-arm.result == 'success' || needs.release-arm.result == 'skipped' || needs.release-arm.result == 'failure') &&
|
||||
(needs.release-amd.result == 'success' || needs.release-amd.result == 'skipped' || needs.release-amd.result == 'failure') &&
|
||||
(needs.create-manifest.result == 'success' || needs.create-manifest.result == 'skipped' || needs.create-manifest.result == 'failure') &&
|
||||
(needs.sign-and-package.result == 'success' || needs.sign-and-package.result == 'skipped' || needs.sign-and-package.result == 'failure')
|
||||
}}
|
||||
runs-on: ubuntu-latest
|
||||
permissions: write-all
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }}
|
||||
role-duration-seconds: 3600
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: Verify AWS identity
|
||||
run: aws sts get-caller-identity
|
||||
|
||||
- name: Stop EC2 instances
|
||||
run: |
|
||||
aws ec2 stop-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_ARM_RUNNER }}
|
||||
aws ec2 stop-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_AMD_RUNNER }}
|
||||
echo "EC2 instances stopped"
|
||||
125
.github/workflows/saas.yml
vendored
Normal file
125
.github/workflows/saas.yml
vendored
Normal file
@@ -0,0 +1,125 @@
|
||||
name: CI/CD Pipeline
|
||||
|
||||
# CI/CD workflow for building, publishing, mirroring, signing container images and building release binaries.
|
||||
# Actions are pinned to specific SHAs to reduce supply-chain risk. This workflow triggers on tag push events.
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write # for GHCR push
|
||||
id-token: write # for Cosign Keyless (OIDC) Signing
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "[0-9]+.[0-9]+.[0-9]+-s.[0-9]+"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
pre-run:
|
||||
runs-on: ubuntu-latest
|
||||
permissions: write-all
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }}
|
||||
role-duration-seconds: 3600
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: Verify AWS identity
|
||||
run: aws sts get-caller-identity
|
||||
|
||||
- name: Start EC2 instances
|
||||
run: |
|
||||
aws ec2 start-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_ARM_RUNNER }}
|
||||
echo "EC2 instances started"
|
||||
|
||||
|
||||
release-arm:
|
||||
name: Build and Release (ARM64)
|
||||
runs-on: [self-hosted, linux, arm64, us-east-1]
|
||||
needs: [pre-run]
|
||||
if: >-
|
||||
${{
|
||||
needs.pre-run.result == 'success'
|
||||
}}
|
||||
# Job-level timeout to avoid runaway or stuck runs
|
||||
timeout-minutes: 120
|
||||
env:
|
||||
# Target images
|
||||
AWS_IMAGE: ${{ secrets.aws_account_id }}.dkr.ecr.us-east-1.amazonaws.com/${{ github.event.repository.name }}
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
|
||||
- name: Monitor storage space
|
||||
run: |
|
||||
THRESHOLD=75
|
||||
USED_SPACE=$(df / | grep / | awk '{ print $5 }' | sed 's/%//g')
|
||||
echo "Used space: $USED_SPACE%"
|
||||
if [ "$USED_SPACE" -ge "$THRESHOLD" ]; then
|
||||
echo "Used space is below the threshold of 75% free. Running Docker system prune."
|
||||
echo y | docker system prune -a
|
||||
else
|
||||
echo "Storage space is above the threshold. No action needed."
|
||||
fi
|
||||
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::${{ secrets.aws_account_id }}:role/${{ secrets.AWS_ROLE_NAME }}
|
||||
role-duration-seconds: 3600
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: Login to Amazon ECR
|
||||
id: login-ecr
|
||||
uses: aws-actions/amazon-ecr-login@v2
|
||||
|
||||
- name: Extract tag name
|
||||
id: get-tag
|
||||
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Update version in package.json
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
sed -i "s/export const APP_VERSION = \".*\";/export const APP_VERSION = \"$TAG\";/" server/lib/consts.ts
|
||||
cat server/lib/consts.ts
|
||||
shell: bash
|
||||
|
||||
- name: Build and push Docker images (Docker Hub - ARM64)
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
make build-saas tag=$TAG
|
||||
echo "Built & pushed ARM64 images to: ${{ env.AWS_IMAGE }}:${TAG}"
|
||||
shell: bash
|
||||
|
||||
post-run:
|
||||
needs: [pre-run, release-arm]
|
||||
if: >-
|
||||
${{
|
||||
always() &&
|
||||
needs.pre-run.result == 'success' &&
|
||||
(needs.release-arm.result == 'success' || needs.release-arm.result == 'skipped' || needs.release-arm.result == 'failure')
|
||||
}}
|
||||
runs-on: ubuntu-latest
|
||||
permissions: write-all
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }}
|
||||
role-duration-seconds: 3600
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: Verify AWS identity
|
||||
run: aws sts get-caller-identity
|
||||
|
||||
- name: Stop EC2 instances
|
||||
run: |
|
||||
aws ec2 stop-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_ARM_RUNNER }}
|
||||
echo "EC2 instances stopped"
|
||||
12
Makefile
12
Makefile
@@ -67,6 +67,18 @@ build-ee-postgresql:
|
||||
--tag fosrl/pangolin:ee-postgresql-$(tag) \
|
||||
--push .
|
||||
|
||||
build-saas:
|
||||
@if [ -z "$(tag)" ]; then \
|
||||
echo "Error: tag is required. Usage: make build-release tag=<tag>"; \
|
||||
exit 1; \
|
||||
fi
|
||||
docker buildx build \
|
||||
--build-arg BUILD=saas \
|
||||
--build-arg DATABASE=pg \
|
||||
--platform linux/arm64 \
|
||||
--tag $(AWS_IMAGE):$(tag) \
|
||||
--push .
|
||||
|
||||
build-release-arm:
|
||||
@if [ -z "$(tag)" ]; then \
|
||||
echo "Error: tag is required. Usage: make build-release-arm tag=<tag>"; \
|
||||
|
||||
@@ -43,9 +43,12 @@ entryPoints:
|
||||
http:
|
||||
tls:
|
||||
certResolver: "letsencrypt"
|
||||
encodedCharacters:
|
||||
allowEncodedSlash: true
|
||||
allowEncodedQuestionMark: true
|
||||
|
||||
serversTransport:
|
||||
insecureSkipVerify: true
|
||||
|
||||
ping:
|
||||
entryPoint: "web"
|
||||
entryPoint: "web"
|
||||
|
||||
@@ -340,7 +340,7 @@ func collectUserInput(reader *bufio.Reader) Config {
|
||||
// Basic configuration
|
||||
fmt.Println("\n=== Basic Configuration ===")
|
||||
|
||||
config.IsEnterprise = readBoolNoDefault(reader, "Do you want to install the Enterprise version of Pangolin? The EE is free for persoal use or for businesses making less than 100k USD annually.")
|
||||
config.IsEnterprise = readBoolNoDefault(reader, "Do you want to install the Enterprise version of Pangolin? The EE is free for personal use or for businesses making less than 100k USD annually.")
|
||||
|
||||
config.BaseDomain = readString(reader, "Enter your base domain (no subdomain e.g. example.com)", "")
|
||||
|
||||
|
||||
@@ -2244,7 +2244,7 @@
|
||||
"deviceOrganizationsAccess": "Access to all organizations your account has access to",
|
||||
"deviceAuthorize": "Authorize {applicationName}",
|
||||
"deviceConnected": "Device Connected!",
|
||||
"deviceAuthorizedMessage": "Device is authorized to access your account.",
|
||||
"deviceAuthorizedMessage": "Device is authorized to access your account. Please return to the client application.",
|
||||
"pangolinCloud": "Pangolin Cloud",
|
||||
"viewDevices": "View Devices",
|
||||
"viewDevicesDescription": "Manage your connected devices",
|
||||
|
||||
@@ -290,8 +290,8 @@ export const ClientResourceSchema = z
|
||||
alias: z
|
||||
.string()
|
||||
.regex(
|
||||
/^(?:[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?$/,
|
||||
"Alias must be a fully qualified domain name (e.g., example.com)"
|
||||
/^(?:[a-zA-Z0-9*?](?:[a-zA-Z0-9*?-]{0,61}[a-zA-Z0-9*?])?\.)+[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?$/,
|
||||
"Alias must be a fully qualified domain name with optional wildcards (e.g., example.com, *.example.com, host-0?.example.internal)"
|
||||
)
|
||||
.optional(),
|
||||
roles: z
|
||||
|
||||
@@ -13,4 +13,3 @@ export * from "./verifyApiKeyIsRoot";
|
||||
export * from "./verifyApiKeyApiKeyAccess";
|
||||
export * from "./verifyApiKeyClientAccess";
|
||||
export * from "./verifyApiKeySiteResourceAccess";
|
||||
export * from "./verifyApiKeyIdpAccess";
|
||||
|
||||
@@ -1,88 +0,0 @@
|
||||
import { Request, Response, NextFunction } from "express";
|
||||
import { db } from "@server/db";
|
||||
import { idp, idpOrg, apiKeyOrg } from "@server/db";
|
||||
import { and, eq } from "drizzle-orm";
|
||||
import createHttpError from "http-errors";
|
||||
import HttpCode from "@server/types/HttpCode";
|
||||
|
||||
export async function verifyApiKeyIdpAccess(
|
||||
req: Request,
|
||||
res: Response,
|
||||
next: NextFunction
|
||||
) {
|
||||
try {
|
||||
const apiKey = req.apiKey;
|
||||
const idpId = req.params.idpId || req.body.idpId || req.query.idpId;
|
||||
const orgId = req.params.orgId;
|
||||
|
||||
if (!apiKey) {
|
||||
return next(
|
||||
createHttpError(HttpCode.UNAUTHORIZED, "Key not authenticated")
|
||||
);
|
||||
}
|
||||
|
||||
if (!orgId) {
|
||||
return next(
|
||||
createHttpError(HttpCode.BAD_REQUEST, "Invalid organization ID")
|
||||
);
|
||||
}
|
||||
|
||||
if (!idpId) {
|
||||
return next(
|
||||
createHttpError(HttpCode.BAD_REQUEST, "Invalid IDP ID")
|
||||
);
|
||||
}
|
||||
|
||||
if (apiKey.isRoot) {
|
||||
// Root keys can access any IDP in any org
|
||||
return next();
|
||||
}
|
||||
|
||||
const [idpRes] = await db
|
||||
.select()
|
||||
.from(idp)
|
||||
.innerJoin(idpOrg, eq(idp.idpId, idpOrg.idpId))
|
||||
.where(and(eq(idp.idpId, idpId), eq(idpOrg.orgId, orgId)))
|
||||
.limit(1);
|
||||
|
||||
if (!idpRes || !idpRes.idp || !idpRes.idpOrg) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.NOT_FOUND,
|
||||
`IdP with ID ${idpId} not found for organization ${orgId}`
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
if (!req.apiKeyOrg) {
|
||||
const apiKeyOrgRes = await db
|
||||
.select()
|
||||
.from(apiKeyOrg)
|
||||
.where(
|
||||
and(
|
||||
eq(apiKeyOrg.apiKeyId, apiKey.apiKeyId),
|
||||
eq(apiKeyOrg.orgId, idpRes.idpOrg.orgId)
|
||||
)
|
||||
);
|
||||
req.apiKeyOrg = apiKeyOrgRes[0];
|
||||
}
|
||||
|
||||
if (!req.apiKeyOrg) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.FORBIDDEN,
|
||||
"Key does not have access to this organization"
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
return next();
|
||||
} catch (error) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.INTERNAL_SERVER_ERROR,
|
||||
"Error verifying IDP access"
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -139,10 +139,6 @@ export class PrivateConfig {
|
||||
process.env.USE_PANGOLIN_DNS =
|
||||
this.rawPrivateConfig.flags.use_pangolin_dns.toString();
|
||||
}
|
||||
if (this.rawPrivateConfig.flags.use_org_only_idp) {
|
||||
process.env.USE_ORG_ONLY_IDP =
|
||||
this.rawPrivateConfig.flags.use_org_only_idp.toString();
|
||||
}
|
||||
}
|
||||
|
||||
public getRawPrivateConfig() {
|
||||
|
||||
@@ -288,7 +288,7 @@ export function selectBestExitNode(
|
||||
const validNodes = pingResults.filter((n) => !n.error && n.weight > 0);
|
||||
|
||||
if (validNodes.length === 0) {
|
||||
logger.error("No valid exit nodes available");
|
||||
logger.debug("No valid exit nodes available");
|
||||
return null;
|
||||
}
|
||||
|
||||
|
||||
@@ -24,7 +24,9 @@ export class LockManager {
|
||||
*/
|
||||
async acquireLock(
|
||||
lockKey: string,
|
||||
ttlMs: number = 30000
|
||||
ttlMs: number = 30000,
|
||||
maxRetries: number = 3,
|
||||
retryDelayMs: number = 100
|
||||
): Promise<boolean> {
|
||||
if (!redis || !redis.status || redis.status !== "ready") {
|
||||
return true;
|
||||
@@ -35,49 +37,67 @@ export class LockManager {
|
||||
}:${Date.now()}`;
|
||||
const redisKey = `lock:${lockKey}`;
|
||||
|
||||
try {
|
||||
// Use SET with NX (only set if not exists) and PX (expire in milliseconds)
|
||||
// This is atomic and handles both setting and expiration
|
||||
const result = await redis.set(
|
||||
redisKey,
|
||||
lockValue,
|
||||
"PX",
|
||||
ttlMs,
|
||||
"NX"
|
||||
);
|
||||
|
||||
if (result === "OK") {
|
||||
logger.debug(
|
||||
`Lock acquired: ${lockKey} by ${
|
||||
config.getRawConfig().gerbil.exit_node_name
|
||||
}`
|
||||
for (let attempt = 0; attempt < maxRetries; attempt++) {
|
||||
try {
|
||||
// Use SET with NX (only set if not exists) and PX (expire in milliseconds)
|
||||
// This is atomic and handles both setting and expiration
|
||||
const result = await redis.set(
|
||||
redisKey,
|
||||
lockValue,
|
||||
"PX",
|
||||
ttlMs,
|
||||
"NX"
|
||||
);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check if the existing lock is from this worker (reentrant behavior)
|
||||
const existingValue = await redis.get(redisKey);
|
||||
if (
|
||||
existingValue &&
|
||||
existingValue.startsWith(
|
||||
`${config.getRawConfig().gerbil.exit_node_name}:`
|
||||
)
|
||||
) {
|
||||
// Extend the lock TTL since it's the same worker
|
||||
await redis.pexpire(redisKey, ttlMs);
|
||||
logger.debug(
|
||||
`Lock extended: ${lockKey} by ${
|
||||
config.getRawConfig().gerbil.exit_node_name
|
||||
}`
|
||||
);
|
||||
return true;
|
||||
}
|
||||
if (result === "OK") {
|
||||
logger.debug(
|
||||
`Lock acquired: ${lockKey} by ${
|
||||
config.getRawConfig().gerbil.exit_node_name
|
||||
}`
|
||||
);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
} catch (error) {
|
||||
logger.error(`Failed to acquire lock ${lockKey}:`, error);
|
||||
return false;
|
||||
// Check if the existing lock is from this worker (reentrant behavior)
|
||||
const existingValue = await redis.get(redisKey);
|
||||
if (
|
||||
existingValue &&
|
||||
existingValue.startsWith(
|
||||
`${config.getRawConfig().gerbil.exit_node_name}:`
|
||||
)
|
||||
) {
|
||||
// Extend the lock TTL since it's the same worker
|
||||
await redis.pexpire(redisKey, ttlMs);
|
||||
logger.debug(
|
||||
`Lock extended: ${lockKey} by ${
|
||||
config.getRawConfig().gerbil.exit_node_name
|
||||
}`
|
||||
);
|
||||
return true;
|
||||
}
|
||||
|
||||
// If this isn't our last attempt, wait before retrying with exponential backoff
|
||||
if (attempt < maxRetries - 1) {
|
||||
const delay = retryDelayMs * Math.pow(2, attempt);
|
||||
logger.debug(
|
||||
`Lock ${lockKey} not available, retrying in ${delay}ms (attempt ${attempt + 1}/${maxRetries})`
|
||||
);
|
||||
await new Promise((resolve) => setTimeout(resolve, delay));
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error(`Failed to acquire lock ${lockKey} (attempt ${attempt + 1}/${maxRetries}):`, error);
|
||||
// On error, still retry if we have attempts left
|
||||
if (attempt < maxRetries - 1) {
|
||||
const delay = retryDelayMs * Math.pow(2, attempt);
|
||||
await new Promise((resolve) => setTimeout(resolve, delay));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logger.debug(
|
||||
`Failed to acquire lock ${lockKey} after ${maxRetries} attempts`
|
||||
);
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -83,8 +83,7 @@ export const privateConfigSchema = z.object({
|
||||
flags: z
|
||||
.object({
|
||||
enable_redis: z.boolean().optional().default(false),
|
||||
use_pangolin_dns: z.boolean().optional().default(false),
|
||||
use_org_only_idp: z.boolean().optional().default(false)
|
||||
use_pangolin_dns: z.boolean().optional().default(false)
|
||||
})
|
||||
.optional()
|
||||
.prefault({}),
|
||||
|
||||
@@ -456,11 +456,11 @@ export async function getTraefikConfig(
|
||||
// );
|
||||
} else if (resource.maintenanceModeType === "automatic") {
|
||||
showMaintenancePage = !hasHealthyServers;
|
||||
if (showMaintenancePage) {
|
||||
logger.warn(
|
||||
`Resource ${resource.name} (${fullDomain}) has no healthy servers - showing maintenance page (AUTOMATIC mode)`
|
||||
);
|
||||
}
|
||||
// if (showMaintenancePage) {
|
||||
// logger.warn(
|
||||
// `Resource ${resource.name} (${fullDomain}) has no healthy servers - showing maintenance page (AUTOMATIC mode)`
|
||||
// );
|
||||
// }
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -27,7 +27,18 @@ export async function verifyValidSubscription(
|
||||
return next();
|
||||
}
|
||||
|
||||
const tier = await getOrgTierData(req.params.orgId);
|
||||
const orgId = req.params.orgId || req.body.orgId || req.query.orgId || req.userOrgId;
|
||||
|
||||
if (!orgId) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
"Organization ID is required to verify subscription"
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const tier = await getOrgTierData(orgId);
|
||||
|
||||
if (!tier.active) {
|
||||
return next(
|
||||
|
||||
@@ -436,18 +436,18 @@ authenticated.get(
|
||||
|
||||
authenticated.post(
|
||||
"/re-key/:clientId/regenerate-client-secret",
|
||||
verifyClientAccess, // this is first to set the org id
|
||||
verifyValidLicense,
|
||||
verifyValidSubscription,
|
||||
verifyClientAccess,
|
||||
verifyUserHasAction(ActionsEnum.reGenerateSecret),
|
||||
reKey.reGenerateClientSecret
|
||||
);
|
||||
|
||||
authenticated.post(
|
||||
"/re-key/:siteId/regenerate-site-secret",
|
||||
verifySiteAccess, // this is first to set the org id
|
||||
verifyValidLicense,
|
||||
verifyValidSubscription,
|
||||
verifySiteAccess,
|
||||
verifyUserHasAction(ActionsEnum.reGenerateSecret),
|
||||
reKey.reGenerateSiteSecret
|
||||
);
|
||||
|
||||
@@ -18,8 +18,7 @@ import * as logs from "#private/routers/auditLogs";
|
||||
import {
|
||||
verifyApiKeyHasAction,
|
||||
verifyApiKeyIsRoot,
|
||||
verifyApiKeyOrgAccess,
|
||||
verifyApiKeyIdpAccess
|
||||
verifyApiKeyOrgAccess
|
||||
} from "@server/middlewares";
|
||||
import {
|
||||
verifyValidSubscription,
|
||||
@@ -32,8 +31,6 @@ import {
|
||||
authenticated as a
|
||||
} from "@server/routers/integration";
|
||||
import { logActionAudit } from "#private/middlewares";
|
||||
import config from "#private/lib/config";
|
||||
import { build } from "@server/build";
|
||||
|
||||
export const unauthenticated = ua;
|
||||
export const authenticated = a;
|
||||
@@ -91,49 +88,3 @@ authenticated.get(
|
||||
logActionAudit(ActionsEnum.exportLogs),
|
||||
logs.exportAccessAuditLogs
|
||||
);
|
||||
|
||||
authenticated.put(
|
||||
"/org/:orgId/idp/oidc",
|
||||
verifyValidLicense,
|
||||
verifyApiKeyOrgAccess,
|
||||
verifyApiKeyHasAction(ActionsEnum.createIdp),
|
||||
logActionAudit(ActionsEnum.createIdp),
|
||||
orgIdp.createOrgOidcIdp
|
||||
);
|
||||
|
||||
authenticated.post(
|
||||
"/org/:orgId/idp/:idpId/oidc",
|
||||
verifyValidLicense,
|
||||
verifyApiKeyOrgAccess,
|
||||
verifyApiKeyIdpAccess,
|
||||
verifyApiKeyHasAction(ActionsEnum.updateIdp),
|
||||
logActionAudit(ActionsEnum.updateIdp),
|
||||
orgIdp.updateOrgOidcIdp
|
||||
);
|
||||
|
||||
authenticated.delete(
|
||||
"/org/:orgId/idp/:idpId",
|
||||
verifyValidLicense,
|
||||
verifyApiKeyOrgAccess,
|
||||
verifyApiKeyIdpAccess,
|
||||
verifyApiKeyHasAction(ActionsEnum.deleteIdp),
|
||||
logActionAudit(ActionsEnum.deleteIdp),
|
||||
orgIdp.deleteOrgIdp
|
||||
);
|
||||
|
||||
authenticated.get(
|
||||
"/org/:orgId/idp/:idpId",
|
||||
verifyValidLicense,
|
||||
verifyApiKeyOrgAccess,
|
||||
verifyApiKeyIdpAccess,
|
||||
verifyApiKeyHasAction(ActionsEnum.getIdp),
|
||||
orgIdp.getOrgIdp
|
||||
);
|
||||
|
||||
authenticated.get(
|
||||
"/org/:orgId/idp",
|
||||
verifyValidLicense,
|
||||
verifyApiKeyOrgAccess,
|
||||
verifyApiKeyHasAction(ActionsEnum.listIdps),
|
||||
orgIdp.listOrgIdps
|
||||
);
|
||||
|
||||
@@ -28,7 +28,6 @@ import { eq, InferInsertModel } from "drizzle-orm";
|
||||
import { getOrgTierData } from "#private/lib/billing";
|
||||
import { TierId } from "@server/lib/billing/tiers";
|
||||
import { build } from "@server/build";
|
||||
import config from "@server/private/lib/config";
|
||||
|
||||
const paramsSchema = z.strictObject({
|
||||
orgId: z.string()
|
||||
@@ -95,10 +94,8 @@ export async function upsertLoginPageBranding(
|
||||
typeof loginPageBranding
|
||||
>;
|
||||
|
||||
if (
|
||||
build !== "saas" &&
|
||||
!config.getRawPrivateConfig().flags.use_org_only_idp
|
||||
) {
|
||||
if (build !== "saas") {
|
||||
// org branding settings are only considered in the saas build
|
||||
const { orgTitle, orgSubtitle, ...rest } = updateData;
|
||||
updateData = rest;
|
||||
}
|
||||
|
||||
@@ -46,23 +46,22 @@ const bodySchema = z.strictObject({
|
||||
roleMapping: z.string().optional()
|
||||
});
|
||||
|
||||
registry.registerPath({
|
||||
method: "put",
|
||||
path: "/org/{orgId}/idp/oidc",
|
||||
description: "Create an OIDC IdP for a specific organization.",
|
||||
tags: [OpenAPITags.Idp, OpenAPITags.Org],
|
||||
request: {
|
||||
params: paramsSchema,
|
||||
body: {
|
||||
content: {
|
||||
"application/json": {
|
||||
schema: bodySchema
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
responses: {}
|
||||
});
|
||||
// registry.registerPath({
|
||||
// method: "put",
|
||||
// path: "/idp/oidc",
|
||||
// description: "Create an OIDC IdP.",
|
||||
// tags: [OpenAPITags.Idp],
|
||||
// request: {
|
||||
// body: {
|
||||
// content: {
|
||||
// "application/json": {
|
||||
// schema: bodySchema
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// },
|
||||
// responses: {}
|
||||
// });
|
||||
|
||||
export async function createOrgOidcIdp(
|
||||
req: Request,
|
||||
|
||||
@@ -32,9 +32,9 @@ const paramsSchema = z
|
||||
|
||||
registry.registerPath({
|
||||
method: "delete",
|
||||
path: "/org/{orgId}/idp/{idpId}",
|
||||
description: "Delete IDP for a specific organization.",
|
||||
tags: [OpenAPITags.Idp, OpenAPITags.Org],
|
||||
path: "/idp/{idpId}",
|
||||
description: "Delete IDP.",
|
||||
tags: [OpenAPITags.Idp],
|
||||
request: {
|
||||
params: paramsSchema
|
||||
},
|
||||
|
||||
@@ -48,16 +48,16 @@ async function query(idpId: number, orgId: string) {
|
||||
return res;
|
||||
}
|
||||
|
||||
registry.registerPath({
|
||||
method: "get",
|
||||
path: "/org/:orgId/idp/:idpId",
|
||||
description: "Get an IDP by its IDP ID for a specific organization.",
|
||||
tags: [OpenAPITags.Idp, OpenAPITags.Org],
|
||||
request: {
|
||||
params: paramsSchema
|
||||
},
|
||||
responses: {}
|
||||
});
|
||||
// registry.registerPath({
|
||||
// method: "get",
|
||||
// path: "/idp/{idpId}",
|
||||
// description: "Get an IDP by its IDP ID.",
|
||||
// tags: [OpenAPITags.Idp],
|
||||
// request: {
|
||||
// params: paramsSchema
|
||||
// },
|
||||
// responses: {}
|
||||
// });
|
||||
|
||||
export async function getOrgIdp(
|
||||
req: Request,
|
||||
|
||||
@@ -62,17 +62,16 @@ async function query(orgId: string, limit: number, offset: number) {
|
||||
return res;
|
||||
}
|
||||
|
||||
registry.registerPath({
|
||||
method: "get",
|
||||
path: "/org/{orgId}/idp",
|
||||
description: "List all IDP for a specific organization.",
|
||||
tags: [OpenAPITags.Idp, OpenAPITags.Org],
|
||||
request: {
|
||||
query: querySchema,
|
||||
params: paramsSchema
|
||||
},
|
||||
responses: {}
|
||||
});
|
||||
// registry.registerPath({
|
||||
// method: "get",
|
||||
// path: "/idp",
|
||||
// description: "List all IDP in the system.",
|
||||
// tags: [OpenAPITags.Idp],
|
||||
// request: {
|
||||
// query: querySchema
|
||||
// },
|
||||
// responses: {}
|
||||
// });
|
||||
|
||||
export async function listOrgIdps(
|
||||
req: Request,
|
||||
|
||||
@@ -53,23 +53,23 @@ export type UpdateOrgIdpResponse = {
|
||||
idpId: number;
|
||||
};
|
||||
|
||||
registry.registerPath({
|
||||
method: "post",
|
||||
path: "/org/{orgId}/idp/{idpId}/oidc",
|
||||
description: "Update an OIDC IdP for a specific organization.",
|
||||
tags: [OpenAPITags.Idp, OpenAPITags.Org],
|
||||
request: {
|
||||
params: paramsSchema,
|
||||
body: {
|
||||
content: {
|
||||
"application/json": {
|
||||
schema: bodySchema
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
responses: {}
|
||||
});
|
||||
// registry.registerPath({
|
||||
// method: "post",
|
||||
// path: "/idp/{idpId}/oidc",
|
||||
// description: "Update an OIDC IdP.",
|
||||
// tags: [OpenAPITags.Idp],
|
||||
// request: {
|
||||
// params: paramsSchema,
|
||||
// body: {
|
||||
// content: {
|
||||
// "application/json": {
|
||||
// schema: bodySchema
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// },
|
||||
// responses: {}
|
||||
// });
|
||||
|
||||
export async function updateOrgOidcIdp(
|
||||
req: Request,
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
import { pullEnv } from "@app/lib/pullEnv";
|
||||
import { build } from "@server/build";
|
||||
import { redirect } from "next/navigation";
|
||||
|
||||
interface LayoutProps {
|
||||
children: React.ReactNode;
|
||||
params: Promise<{}>;
|
||||
}
|
||||
|
||||
export default async function Layout(props: LayoutProps) {
|
||||
const env = pullEnv();
|
||||
|
||||
if (build !== "saas" && !env.flags.useOrgOnlyIdp) {
|
||||
redirect("/");
|
||||
}
|
||||
|
||||
return props.children;
|
||||
}
|
||||
@@ -82,7 +82,7 @@ export default async function SettingsLayout(props: SettingsLayoutProps) {
|
||||
<Layout
|
||||
orgId={params.orgId}
|
||||
orgs={orgs}
|
||||
navItems={orgNavSections(env)}
|
||||
navItems={orgNavSections()}
|
||||
>
|
||||
{children}
|
||||
</Layout>
|
||||
|
||||
@@ -36,8 +36,8 @@ import {
|
||||
import type { ResourceContextType } from "@app/contexts/resourceContext";
|
||||
import { useEnvContext } from "@app/hooks/useEnvContext";
|
||||
import { useOrgContext } from "@app/hooks/useOrgContext";
|
||||
import { usePaidStatus } from "@app/hooks/usePaidStatus";
|
||||
import { useResourceContext } from "@app/hooks/useResourceContext";
|
||||
import { useSubscriptionStatusContext } from "@app/hooks/useSubscriptionStatusContext";
|
||||
import { toast } from "@app/hooks/useToast";
|
||||
import { createApiClient, formatAxiosError } from "@app/lib/api";
|
||||
import { orgQueries, resourceQueries } from "@app/lib/queries";
|
||||
@@ -95,7 +95,7 @@ export default function ResourceAuthenticationPage() {
|
||||
const router = useRouter();
|
||||
const t = useTranslations();
|
||||
|
||||
const { isPaidUser } = usePaidStatus();
|
||||
const subscription = useSubscriptionStatusContext();
|
||||
|
||||
const queryClient = useQueryClient();
|
||||
const { data: resourceRoles = [], isLoading: isLoadingResourceRoles } =
|
||||
@@ -129,8 +129,7 @@ export default function ResourceAuthenticationPage() {
|
||||
);
|
||||
const { data: orgIdps = [], isLoading: isLoadingOrgIdps } = useQuery(
|
||||
orgQueries.identityProviders({
|
||||
orgId: org.org.orgId,
|
||||
useOrgOnlyIdp: env.flags.useOrgOnlyIdp
|
||||
orgId: org.org.orgId
|
||||
})
|
||||
);
|
||||
|
||||
@@ -160,7 +159,7 @@ export default function ResourceAuthenticationPage() {
|
||||
|
||||
const allIdps = useMemo(() => {
|
||||
if (build === "saas") {
|
||||
if (isPaidUser) {
|
||||
if (subscription?.subscribed) {
|
||||
return orgIdps.map((idp) => ({
|
||||
id: idp.idpId,
|
||||
text: idp.name
|
||||
@@ -768,6 +767,8 @@ export default function ResourceAuthenticationPage() {
|
||||
<OneTimePasswordFormSection
|
||||
resource={resource}
|
||||
updateResource={updateResource}
|
||||
whitelist={whitelist}
|
||||
isLoadingWhiteList={isLoadingWhiteList}
|
||||
/>
|
||||
</SettingsContainer>
|
||||
</>
|
||||
@@ -777,11 +778,16 @@ export default function ResourceAuthenticationPage() {
|
||||
type OneTimePasswordFormSectionProps = Pick<
|
||||
ResourceContextType,
|
||||
"resource" | "updateResource"
|
||||
>;
|
||||
> & {
|
||||
whitelist: Array<{ email: string }>;
|
||||
isLoadingWhiteList: boolean;
|
||||
};
|
||||
|
||||
function OneTimePasswordFormSection({
|
||||
resource,
|
||||
updateResource
|
||||
updateResource,
|
||||
whitelist,
|
||||
isLoadingWhiteList
|
||||
}: OneTimePasswordFormSectionProps) {
|
||||
const { env } = useEnvContext();
|
||||
const [whitelistEnabled, setWhitelistEnabled] = useState(
|
||||
@@ -802,6 +808,18 @@ function OneTimePasswordFormSection({
|
||||
number | null
|
||||
>(null);
|
||||
|
||||
useEffect(() => {
|
||||
if (isLoadingWhiteList) return;
|
||||
|
||||
whitelistForm.setValue(
|
||||
"emails",
|
||||
whitelist.map((w) => ({
|
||||
id: w.email,
|
||||
text: w.email
|
||||
}))
|
||||
);
|
||||
}, [isLoadingWhiteList, whitelist, whitelistForm]);
|
||||
|
||||
async function saveWhitelist() {
|
||||
try {
|
||||
await api.post(`/resource/${resource.resourceId}`, {
|
||||
|
||||
@@ -11,7 +11,6 @@ import { AxiosResponse } from "axios";
|
||||
import { authCookieHeader } from "@app/lib/api/cookies";
|
||||
import { Layout } from "@app/components/Layout";
|
||||
import { adminNavSections } from "../navigation";
|
||||
import { pullEnv } from "@app/lib/pullEnv";
|
||||
|
||||
export const dynamic = "force-dynamic";
|
||||
|
||||
@@ -28,8 +27,6 @@ export default async function AdminLayout(props: LayoutProps) {
|
||||
const getUser = cache(verifySession);
|
||||
const user = await getUser();
|
||||
|
||||
const env = pullEnv();
|
||||
|
||||
if (!user || !user.serverAdmin) {
|
||||
redirect(`/`);
|
||||
}
|
||||
@@ -51,7 +48,7 @@ export default async function AdminLayout(props: LayoutProps) {
|
||||
|
||||
return (
|
||||
<UserProvider user={user}>
|
||||
<Layout orgs={orgs} navItems={adminNavSections(env)}>
|
||||
<Layout orgs={orgs} navItems={adminNavSections}>
|
||||
{props.children}
|
||||
</Layout>
|
||||
</UserProvider>
|
||||
|
||||
@@ -7,6 +7,7 @@ import { useLicenseStatusContext } from "@app/hooks/useLicenseStatusContext";
|
||||
import { CheckCircle2 } from "lucide-react";
|
||||
import { useTranslations } from "next-intl";
|
||||
import Link from "next/link";
|
||||
import { useEffect } from "react";
|
||||
|
||||
export default function DeviceAuthSuccessPage() {
|
||||
const { env } = useEnvContext();
|
||||
@@ -20,6 +21,32 @@ export default function DeviceAuthSuccessPage() {
|
||||
? env.branding.logo?.authPage?.height || 58
|
||||
: 58;
|
||||
|
||||
useEffect(() => {
|
||||
// Detect if we're on iOS or Android
|
||||
const userAgent = navigator.userAgent || navigator.vendor || (window as any).opera;
|
||||
const isIOS = /iPad|iPhone|iPod/.test(userAgent) && !(window as any).MSStream;
|
||||
const isAndroid = /android/i.test(userAgent);
|
||||
|
||||
if (isAndroid) {
|
||||
// For Android Chrome Custom Tabs, use intent:// scheme which works more reliably
|
||||
// This explicitly tells Chrome to send an intent to the app, which will bring
|
||||
// SignInCodeActivity back to the foreground (it has launchMode="singleTop")
|
||||
setTimeout(() => {
|
||||
window.location.href = "intent://auth-success#Intent;scheme=pangolin;package=net.pangolin.Pangolin;end";
|
||||
}, 500);
|
||||
} else if (isIOS) {
|
||||
// Wait 500ms then attempt to open the app
|
||||
setTimeout(() => {
|
||||
// Try to open the app using deep link
|
||||
window.location.href = "pangolin://";
|
||||
|
||||
setTimeout(() => {
|
||||
window.location.href = "https://apps.apple.com/app/pangolin/net.pangolin.Pangolin.PangoliniOS";
|
||||
}, 2000);
|
||||
}, 500);
|
||||
}
|
||||
}, []);
|
||||
|
||||
return (
|
||||
<>
|
||||
<Card>
|
||||
@@ -55,4 +82,4 @@ export default function DeviceAuthSuccessPage() {
|
||||
</p>
|
||||
</>
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -70,7 +70,7 @@ export default async function Page(props: {
|
||||
}
|
||||
|
||||
let loginIdps: LoginFormIDP[] = [];
|
||||
if (build === "oss" || !env.flags.useOrgOnlyIdp) {
|
||||
if (build !== "saas") {
|
||||
const idpsRes = await cache(
|
||||
async () => await priv.get<AxiosResponse<ListIdpsResponse>>("/idp")
|
||||
)();
|
||||
@@ -121,7 +121,7 @@ export default async function Page(props: {
|
||||
</p>
|
||||
)}
|
||||
|
||||
{!isInvite && (build === "saas" || env.flags.useOrgOnlyIdp) ? (
|
||||
{!isInvite && build === "saas" ? (
|
||||
<div className="text-center text-muted-foreground mt-12 flex flex-col items-center">
|
||||
<span>{t("needToSignInToOrg")}</span>
|
||||
<Link
|
||||
|
||||
@@ -11,7 +11,6 @@ import {
|
||||
} from "@server/routers/loginPage/types";
|
||||
import { redirect } from "next/navigation";
|
||||
import OrgLoginPage from "@app/components/OrgLoginPage";
|
||||
import { pullEnv } from "@app/lib/pullEnv";
|
||||
|
||||
export const dynamic = "force-dynamic";
|
||||
|
||||
@@ -22,9 +21,7 @@ export default async function OrgAuthPage(props: {
|
||||
const searchParams = await props.searchParams;
|
||||
const params = await props.params;
|
||||
|
||||
const env = pullEnv();
|
||||
|
||||
if (build !== "saas" && !env.flags.useOrgOnlyIdp) {
|
||||
if (build !== "saas") {
|
||||
const queryString = new URLSearchParams(searchParams as any).toString();
|
||||
redirect(`/auth/login${queryString ? `?${queryString}` : ""}`);
|
||||
}
|
||||
@@ -53,25 +50,29 @@ export default async function OrgAuthPage(props: {
|
||||
} catch (e) {}
|
||||
|
||||
let loginIdps: LoginFormIDP[] = [];
|
||||
const idpsRes = await priv.get<AxiosResponse<ListOrgIdpsResponse>>(
|
||||
`/org/${orgId}/idp`
|
||||
);
|
||||
if (build === "saas") {
|
||||
const idpsRes = await priv.get<AxiosResponse<ListOrgIdpsResponse>>(
|
||||
`/org/${orgId}/idp`
|
||||
);
|
||||
|
||||
loginIdps = idpsRes.data.data.idps.map((idp) => ({
|
||||
idpId: idp.idpId,
|
||||
name: idp.name,
|
||||
variant: idp.variant
|
||||
})) as LoginFormIDP[];
|
||||
loginIdps = idpsRes.data.data.idps.map((idp) => ({
|
||||
idpId: idp.idpId,
|
||||
name: idp.name,
|
||||
variant: idp.variant
|
||||
})) as LoginFormIDP[];
|
||||
}
|
||||
|
||||
let branding: LoadLoginPageBrandingResponse | null = null;
|
||||
try {
|
||||
const res = await priv.get<
|
||||
AxiosResponse<LoadLoginPageBrandingResponse>
|
||||
>(`/login-page-branding?orgId=${orgId}`);
|
||||
if (res.status === 200) {
|
||||
branding = res.data.data;
|
||||
}
|
||||
} catch (error) {}
|
||||
if (build === "saas") {
|
||||
try {
|
||||
const res = await priv.get<
|
||||
AxiosResponse<LoadLoginPageBrandingResponse>
|
||||
>(`/login-page-branding?orgId=${orgId}`);
|
||||
if (res.status === 200) {
|
||||
branding = res.data.data;
|
||||
}
|
||||
} catch (error) {}
|
||||
}
|
||||
|
||||
return (
|
||||
<OrgLoginPage
|
||||
|
||||
@@ -33,12 +33,12 @@ export default async function OrgAuthPage(props: {
|
||||
const forceLoginParam = searchParams.forceLogin;
|
||||
const forceLogin = forceLoginParam === "true";
|
||||
|
||||
const env = pullEnv();
|
||||
|
||||
if (build !== "saas" && !env.flags.useOrgOnlyIdp) {
|
||||
if (build !== "saas") {
|
||||
redirect("/");
|
||||
}
|
||||
|
||||
const env = pullEnv();
|
||||
|
||||
const authHeader = await authCookieHeader();
|
||||
|
||||
if (searchParams.token) {
|
||||
|
||||
@@ -204,7 +204,7 @@ export default async function ResourceAuthPage(props: {
|
||||
}
|
||||
|
||||
let loginIdps: LoginFormIDP[] = [];
|
||||
if (build === "saas" || env.flags.useOrgOnlyIdp) {
|
||||
if (build === "saas") {
|
||||
if (subscribed) {
|
||||
const idpsRes = await cache(
|
||||
async () =>
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import { SidebarNavItem } from "@app/components/SidebarNav";
|
||||
import { Env } from "@app/lib/types/env";
|
||||
import { build } from "@server/build";
|
||||
import {
|
||||
Settings,
|
||||
@@ -40,7 +39,7 @@ export const orgLangingNavItems: SidebarNavItem[] = [
|
||||
}
|
||||
];
|
||||
|
||||
export const orgNavSections = (env?: Env): SidebarNavSection[] => [
|
||||
export const orgNavSections = (): SidebarNavSection[] => [
|
||||
{
|
||||
heading: "sidebarGeneral",
|
||||
items: [
|
||||
@@ -93,7 +92,8 @@ export const orgNavSections = (env?: Env): SidebarNavSection[] => [
|
||||
{
|
||||
title: "sidebarRemoteExitNodes",
|
||||
href: "/{orgId}/settings/remote-exit-nodes",
|
||||
icon: <Server className="size-4 flex-none" />
|
||||
icon: <Server className="size-4 flex-none" />,
|
||||
showEE: true
|
||||
}
|
||||
]
|
||||
: [])
|
||||
@@ -123,12 +123,13 @@ export const orgNavSections = (env?: Env): SidebarNavSection[] => [
|
||||
href: "/{orgId}/settings/access/roles",
|
||||
icon: <Users className="size-4 flex-none" />
|
||||
},
|
||||
...(build == "saas" || env?.flags.useOrgOnlyIdp
|
||||
...(build == "saas"
|
||||
? [
|
||||
{
|
||||
title: "sidebarIdentityProviders",
|
||||
href: "/{orgId}/settings/idp",
|
||||
icon: <Fingerprint className="size-4 flex-none" />
|
||||
icon: <Fingerprint className="size-4 flex-none" />,
|
||||
showEE: true
|
||||
}
|
||||
]
|
||||
: []),
|
||||
@@ -227,7 +228,7 @@ export const orgNavSections = (env?: Env): SidebarNavSection[] => [
|
||||
}
|
||||
];
|
||||
|
||||
export const adminNavSections = (env?: Env): SidebarNavSection[] => [
|
||||
export const adminNavSections: SidebarNavSection[] = [
|
||||
{
|
||||
heading: "sidebarAdmin",
|
||||
items: [
|
||||
@@ -241,15 +242,11 @@ export const adminNavSections = (env?: Env): SidebarNavSection[] => [
|
||||
href: "/admin/api-keys",
|
||||
icon: <KeyRound className="size-4 flex-none" />
|
||||
},
|
||||
...(build === "oss" || !env?.flags.useOrgOnlyIdp
|
||||
? [
|
||||
{
|
||||
title: "sidebarIdentityProviders",
|
||||
href: "/admin/idp",
|
||||
icon: <Fingerprint className="size-4 flex-none" />
|
||||
}
|
||||
]
|
||||
: []),
|
||||
{
|
||||
title: "sidebarIdentityProviders",
|
||||
href: "/admin/idp",
|
||||
icon: <Fingerprint className="size-4 flex-none" />
|
||||
},
|
||||
...(build == "enterprise"
|
||||
? [
|
||||
{
|
||||
|
||||
@@ -118,7 +118,6 @@ export default function AuthPageBrandingForm({
|
||||
const brandingData = form.getValues();
|
||||
|
||||
if (!isValid || !isPaidUser) return;
|
||||
|
||||
try {
|
||||
const updateRes = await api.put(
|
||||
`/org/${orgId}/login-page-branding`,
|
||||
@@ -290,8 +289,7 @@ export default function AuthPageBrandingForm({
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{build === "saas" ||
|
||||
env.env.flags.useOrgOnlyIdp ? (
|
||||
{build === "saas" && (
|
||||
<>
|
||||
<div className="mt-3 mb-6">
|
||||
<SettingsSectionTitle>
|
||||
@@ -345,7 +343,7 @@ export default function AuthPageBrandingForm({
|
||||
/>
|
||||
</div>
|
||||
</>
|
||||
) : null}
|
||||
)}
|
||||
|
||||
<div className="mt-3 mb-6">
|
||||
<SettingsSectionTitle>
|
||||
|
||||
@@ -63,6 +63,8 @@ export default function ConfirmDeleteDialog({
|
||||
}
|
||||
});
|
||||
|
||||
const isConfirmed = form.watch("string") === string;
|
||||
|
||||
async function onSubmit() {
|
||||
try {
|
||||
await onConfirm();
|
||||
@@ -139,7 +141,8 @@ export default function ConfirmDeleteDialog({
|
||||
type="submit"
|
||||
form="confirm-delete-form"
|
||||
loading={loading}
|
||||
disabled={loading}
|
||||
disabled={loading || !isConfirmed}
|
||||
className={!isConfirmed && !loading ? "opacity-50" : ""}
|
||||
>
|
||||
{buttonText}
|
||||
</Button>
|
||||
|
||||
@@ -114,16 +114,6 @@ function getActionsCategories(root: boolean) {
|
||||
}
|
||||
};
|
||||
|
||||
if (root || build === "saas" || env.flags.useOrgOnlyIdp) {
|
||||
actionsByCategory["Identity Provider (IDP)"] = {
|
||||
[t("actionCreateIdp")]: "createIdp",
|
||||
[t("actionUpdateIdp")]: "updateIdp",
|
||||
[t("actionDeleteIdp")]: "deleteIdp",
|
||||
[t("actionListIdps")]: "listIdps",
|
||||
[t("actionGetIdp")]: "getIdp"
|
||||
};
|
||||
}
|
||||
|
||||
if (root) {
|
||||
actionsByCategory["Organization"] = {
|
||||
[t("actionListOrgs")]: "listOrgs",
|
||||
@@ -138,21 +128,24 @@ function getActionsCategories(root: boolean) {
|
||||
...actionsByCategory["Organization"]
|
||||
};
|
||||
|
||||
actionsByCategory["Identity Provider (IDP)"][t("actionCreateIdpOrg")] =
|
||||
"createIdpOrg";
|
||||
actionsByCategory["Identity Provider (IDP)"][t("actionDeleteIdpOrg")] =
|
||||
"deleteIdpOrg";
|
||||
actionsByCategory["Identity Provider (IDP)"][t("actionListIdpOrgs")] =
|
||||
"listIdpOrgs";
|
||||
actionsByCategory["Identity Provider (IDP)"][t("actionUpdateIdpOrg")] =
|
||||
"updateIdpOrg";
|
||||
actionsByCategory["Identity Provider (IDP)"] = {
|
||||
[t("actionCreateIdp")]: "createIdp",
|
||||
[t("actionUpdateIdp")]: "updateIdp",
|
||||
[t("actionDeleteIdp")]: "deleteIdp",
|
||||
[t("actionListIdps")]: "listIdps",
|
||||
[t("actionGetIdp")]: "getIdp",
|
||||
[t("actionCreateIdpOrg")]: "createIdpOrg",
|
||||
[t("actionDeleteIdpOrg")]: "deleteIdpOrg",
|
||||
[t("actionListIdpOrgs")]: "listIdpOrgs",
|
||||
[t("actionUpdateIdpOrg")]: "updateIdpOrg"
|
||||
};
|
||||
|
||||
actionsByCategory["User"] = {
|
||||
[t("actionUpdateUser")]: "updateUser",
|
||||
[t("actionGetUser")]: "getUser"
|
||||
};
|
||||
|
||||
if (build === "saas") {
|
||||
if (build == "saas") {
|
||||
actionsByCategory["SAAS"] = {
|
||||
["Send Usage Notification Email"]: "sendUsageNotification"
|
||||
};
|
||||
|
||||
@@ -63,9 +63,7 @@ export function pullEnv(): Env {
|
||||
disableProductHelpBanners:
|
||||
process.env.FLAGS_DISABLE_PRODUCT_HELP_BANNERS === "true"
|
||||
? true
|
||||
: false,
|
||||
useOrgOnlyIdp:
|
||||
process.env.USE_ORG_ONLY_IDP === "true" ? true : false
|
||||
: false
|
||||
},
|
||||
|
||||
branding: {
|
||||
|
||||
@@ -157,13 +157,7 @@ export const orgQueries = {
|
||||
return res.data.data.domains;
|
||||
}
|
||||
}),
|
||||
identityProviders: ({
|
||||
orgId,
|
||||
useOrgOnlyIdp
|
||||
}: {
|
||||
orgId: string;
|
||||
useOrgOnlyIdp?: boolean;
|
||||
}) =>
|
||||
identityProviders: ({ orgId }: { orgId: string }) =>
|
||||
queryOptions({
|
||||
queryKey: ["ORG", orgId, "IDPS"] as const,
|
||||
queryFn: async ({ signal, meta }) => {
|
||||
@@ -171,12 +165,7 @@ export const orgQueries = {
|
||||
AxiosResponse<{
|
||||
idps: { idpId: number; name: string }[];
|
||||
}>
|
||||
>(
|
||||
build === "saas" || useOrgOnlyIdp
|
||||
? `/org/${orgId}/idp`
|
||||
: "/idp",
|
||||
{ signal }
|
||||
);
|
||||
>(build === "saas" ? `/org/${orgId}/idp` : "/idp", { signal });
|
||||
return res.data.data.idps;
|
||||
}
|
||||
})
|
||||
|
||||
@@ -34,7 +34,6 @@ export type Env = {
|
||||
hideSupporterKey: boolean;
|
||||
usePangolinDns: boolean;
|
||||
disableProductHelpBanners: boolean;
|
||||
useOrgOnlyIdp: boolean;
|
||||
};
|
||||
branding: {
|
||||
appName?: string;
|
||||
|
||||
Reference in New Issue
Block a user