Compare commits

...

157 Commits

Author SHA1 Message Date
Owen
05748bf8ff Merge branch 'dev' into msg-delivery 2026-01-16 12:22:23 -08:00
Owen
f8c98bf6bf Fix log messages 2026-01-16 12:19:52 -08:00
Owen
a1ea3f74b3 Move the query into the sync 2026-01-15 22:00:13 -08:00
Milo Schwartz
06aaa7c680 Merge pull request #2121 from Fredkiss3/feat/device-approvals
feat: device approvals
2026-01-15 21:33:31 -08:00
Owen
65e8bfc93e Message syncing works 2026-01-15 21:26:13 -08:00
miloschwartz
ff5e12655f add pretty apple device names 2026-01-15 17:59:45 -08:00
Fred KISSIE
6d90d734f4 🏷️ fix types 2026-01-15 23:25:05 +01:00
Varun Narravula
6c8757f230 feat(olm): reset/send new olm secret if a matching fingerprint is detected 2026-01-15 12:33:26 -08:00
Milo Schwartz
40e37b1798 Merge pull request #2244 from water-sucks/add-fingerprint-and-posture-check-info
feat(fingerprint): store posture checks and fingerprint info
2026-01-15 12:05:41 -08:00
miloschwartz
bd87585396 fix whitelist hyrdration closes #2190 2026-01-14 22:08:16 -08:00
Varun Narravula
e9e935d6c4 feat(fingerprint): add platform fingerprint hash 2026-01-14 20:21:22 -08:00
miloschwartz
2f2c2b4222 improved org idp login flow 2026-01-14 19:15:19 -08:00
Fred KISSIE
9749a272ec 🏷️fix types 2026-01-15 03:46:25 +01:00
Fred KISSIE
b76a50238e 🏷️ fix types 2026-01-15 03:40:30 +01:00
Fred KISSIE
a4f3963a5a ♻️update approval filter & set approval to denied when blocked 2026-01-15 03:34:42 +01:00
Owen
d52bd65d21 Fix build 2026-01-14 17:54:34 -08:00
Fred KISSIE
fb51f42f35 ♻️ set approval & blocked work in tandem 2026-01-15 01:33:52 +01:00
Fred KISSIE
c910a715bd ♻️ set blocked to true if approvalState is set to denied 2026-01-15 01:12:38 +01:00
Fred KISSIE
9040f9b82a ♻️ set approval state nullable 2026-01-15 01:03:02 +01:00
Fred KISSIE
fc0ec0d754 🐛 remove unused approval state 2026-01-15 00:28:30 +01:00
Fred KISSIE
b3569174b6 🐛 fix sqlite type 2026-01-15 00:20:45 +01:00
Fred KISSIE
0cae624995 🏷️ fix types 2026-01-14 23:57:16 +01:00
Fred KISSIE
cbf184342b Merge branch 'dev' into feat/device-approvals 2026-01-14 23:08:40 +01:00
Fred KISSIE
ce123a7f1a 💬 make the message more descriptive 2026-01-14 03:37:47 +01:00
Fred KISSIE
0c5daa7173 process approvals on the frontend 2026-01-14 03:31:49 +01:00
Fred KISSIE
bc20a34a49 process approval endpoint 2026-01-14 03:00:40 +01:00
Fred KISSIE
d5b6a426a9 💄 filter by approval state 2026-01-14 02:24:11 +01:00
Fred KISSIE
4c78e93143 💄 show approval state in the user device uI 2026-01-14 01:59:51 +01:00
miloschwartz
5f184e9e5e support background image on org auth pages 2026-01-13 16:35:27 -08:00
miloschwartz
2201b0395d add optional tags field to idp 2026-01-13 16:21:40 -08:00
miloschwartz
51818044b1 fix broken redirect url on custom auth url login 2026-01-13 15:48:07 -08:00
Fred KISSIE
30943010e6 ♻️ sort by pending first 2026-01-14 00:37:48 +01:00
Fred KISSIE
dd5ca10226 ♻️ empty data 2026-01-14 00:35:35 +01:00
miloschwartz
a56b058858 fix role name missing in forward headers 2026-01-13 15:28:02 -08:00
miloschwartz
eade72e2c6 set text-destructive color 2026-01-13 09:36:53 -08:00
miloschwartz
e9bc9747b8 check if olm is blocked in get user olm 2026-01-12 22:08:58 -08:00
Owen
eb0cdda0f9 Merge branch 'dev' into msg-delivery 2026-01-12 21:17:38 -08:00
Owen
552adf3200 Properly handle blocked devices 2026-01-12 21:14:18 -08:00
Owen
eba25fcc4d Add increment options and slight cleanup 2026-01-12 20:48:18 -08:00
miloschwartz
673cd0fcd1 add block client 2026-01-12 20:37:53 -08:00
miloschwartz
b941b5571f add archive to org clients and add unarchive 2026-01-12 15:52:27 -08:00
Owen
ca026b41c0 Merge branch 'main' into dev 2026-01-11 14:19:59 -08:00
Owen
29a683a815 Copy all tags to github reg 2026-01-11 14:19:38 -08:00
Owen
69dbd20ea5 Use same regex for blueprint aliases
Closes #2218
Fixes #2216
2026-01-11 13:39:46 -08:00
miloschwartz
427ee026ac Merge branch 'org-only-idp' into dev 2026-01-11 10:47:57 -08:00
miloschwartz
0a537c6830 add org only idp to integration api 2026-01-11 10:47:19 -08:00
Owen
89682a2ee4 Try to intent:// into android app from tab 2026-01-11 10:39:39 -08:00
Owen
78b00a18cc Add retry to aquire 2026-01-11 10:39:28 -08:00
Owen
192702daf9 Quiet log 2026-01-11 10:39:18 -08:00
Varun Narravula
fcee735578 feat(fingerprints): receive fingerprints/postures from olm and add to db 2026-01-10 21:15:54 -08:00
miloschwartz
2ba49e84bb add archive device instead of delete 2026-01-09 18:00:00 -08:00
Fred KISSIE
262376aa75 approval list UI 2026-01-10 02:37:50 +01:00
miloschwartz
4c8d2266ec clean up login page 2026-01-09 14:41:22 -08:00
miloschwartz
bb98bf03aa Merge branch 'org-only-idp' into dev 2026-01-09 13:34:52 -08:00
Fred KISSIE
19c3efc9e9 🚧 working on the approval feed 2026-01-09 02:20:08 +01:00
Fred KISSIE
7164721ee0 🐛 insert timestamp correctly 2026-01-09 01:50:56 +01:00
Fred KISSIE
74b16809ec ♻️ update endpoint to only return relevant data 2026-01-09 01:40:15 +01:00
Fred KISSIE
220723d25f ♻️ component refactor 2026-01-09 01:33:52 +01:00
Fred KISSIE
fdb03c9626 ♻️ list approvals with client & user data 2026-01-09 01:33:40 +01:00
Fred KISSIE
a81bbb9192 create approval request and mark client approval as pending if the user's role requires it 2026-01-09 01:18:15 +01:00
Fred KISSIE
7a4aff8e4b 🗃️ use clientId and fix bad column name for decision and add userId 2026-01-09 01:17:05 +01:00
miloschwartz
2810632f4a add flag to enable org only idp in ee 2026-01-07 20:40:59 -08:00
Fred KISSIE
2d0dd067b8 ♻️ refactor 2026-01-08 03:41:09 +01:00
Fred KISSIE
3ab25f5ff1 ♻️ refactor 2026-01-08 03:38:55 +01:00
Fred KISSIE
39bebea5f7 create & update role with device approval 2026-01-08 03:33:03 +01:00
miloschwartz
57681dcd3d remove artificial delay 2026-01-07 12:06:50 -08:00
miloschwartz
168ce549f7 remove guards form list idp for integration api 2026-01-06 13:20:18 -05:00
Owen
9ec94441f3 Try to open apps 2026-01-05 21:46:38 -05:00
Owen
53e7b99605 Quiet up logs 2026-01-05 21:25:15 -05:00
Fred KISSIE
abfe476cb9 🚧 wip 2026-01-06 02:02:09 +01:00
Fred KISSIE
bbca200ceb 🙈 do not include claude.md in gitignore 2026-01-06 01:51:54 +01:00
Fred KISSIE
cb21cab117 🚧 add device approval in the roles page 2026-01-06 01:51:33 +01:00
Fred KISSIE
1f80845a7a 🗃️ move approval state to client directly where it makes more sense 2026-01-05 22:49:42 +01:00
Fred KISSIE
0f2b94307f Merge branch 'dev' into feat/device-approvals 2026-01-05 16:54:18 +01:00
Owen
f7fcde8312 Add max recursion depth to matchSegments 2025-12-31 10:40:16 -05:00
Owen
6660c850f3 Try to bound logs
Ref #2120
2025-12-31 10:31:40 -05:00
Owen
8a08bdf9f0 Add OCI labels
Fixes #2170
2025-12-29 12:29:27 -05:00
Owen
87807e22e0 Add encoded chars to default traefik config
Closes #2176
2025-12-29 10:49:32 -05:00
Owen
0eb39abdb4 Set hc to unknown when changing to local site
Fixes #2181
2025-12-29 10:22:06 -05:00
miloschwartz
a499ebc158 add badger to dyn config example 2025-12-29 10:17:26 -05:00
Owen Schwartz
982c692c40 New translations en-us.json (French) 2025-12-24 16:12:11 -05:00
Owen Schwartz
0c3ce7836c New translations en-us.json (Norwegian Bokmal) 2025-12-24 16:12:11 -05:00
Owen Schwartz
7ef86c5707 New translations en-us.json (Chinese Simplified) 2025-12-24 16:12:11 -05:00
Owen Schwartz
f62b88b930 New translations en-us.json (Turkish) 2025-12-24 16:12:11 -05:00
Owen Schwartz
03a326c841 New translations en-us.json (Russian) 2025-12-24 16:12:11 -05:00
Owen Schwartz
4df4cafd70 New translations en-us.json (Portuguese) 2025-12-24 16:12:11 -05:00
Owen Schwartz
4b9539cc6d New translations en-us.json (Polish) 2025-12-24 16:12:11 -05:00
Owen Schwartz
87135c90bd New translations en-us.json (Dutch) 2025-12-24 16:12:11 -05:00
Owen Schwartz
853d416b2f New translations en-us.json (Korean) 2025-12-24 16:12:11 -05:00
Owen Schwartz
bfd14b87bd New translations en-us.json (Italian) 2025-12-24 16:12:11 -05:00
Owen Schwartz
88aba4e169 New translations en-us.json (German) 2025-12-24 16:12:11 -05:00
Owen Schwartz
99e2fcb2e8 New translations en-us.json (Czech) 2025-12-24 16:12:11 -05:00
Owen Schwartz
1f138ab68c New translations en-us.json (Bulgarian) 2025-12-24 16:12:11 -05:00
Owen Schwartz
99ded7454e New translations en-us.json (Spanish) 2025-12-24 16:12:11 -05:00
Owen Schwartz
f82cacac6d New translations en-us.json (French) 2025-12-24 16:12:11 -05:00
Owen Schwartz
a548f61ea6 New translations en-us.json (Norwegian Bokmal) 2025-12-24 16:12:11 -05:00
Owen Schwartz
bfae715076 New translations en-us.json (Chinese Simplified) 2025-12-24 16:12:11 -05:00
Owen Schwartz
358e25b7c2 New translations en-us.json (Turkish) 2025-12-24 16:12:11 -05:00
Owen Schwartz
2c3fa54933 New translations en-us.json (Russian) 2025-12-24 16:12:11 -05:00
Owen Schwartz
00cdd5833e New translations en-us.json (Portuguese) 2025-12-24 16:12:11 -05:00
Owen Schwartz
52b1164e58 New translations en-us.json (Polish) 2025-12-24 16:12:11 -05:00
Owen Schwartz
657bc9cdf0 New translations en-us.json (Dutch) 2025-12-24 16:12:11 -05:00
Owen Schwartz
ec6bcd41b0 New translations en-us.json (Korean) 2025-12-24 16:12:11 -05:00
Owen Schwartz
1721cce040 New translations en-us.json (Italian) 2025-12-24 16:12:11 -05:00
Owen Schwartz
e41a5ad6b0 New translations en-us.json (German) 2025-12-24 16:12:11 -05:00
Owen Schwartz
ee1eca9e66 New translations en-us.json (Czech) 2025-12-24 16:12:11 -05:00
Owen Schwartz
d049369172 New translations en-us.json (Bulgarian) 2025-12-24 16:12:11 -05:00
Owen Schwartz
6280a68d51 New translations en-us.json (Spanish) 2025-12-24 16:12:11 -05:00
Owen Schwartz
32054dc4f6 New translations en-us.json (French) 2025-12-24 16:12:11 -05:00
Owen Schwartz
831c631048 New translations en-us.json (Norwegian Bokmal) 2025-12-24 16:12:11 -05:00
Owen Schwartz
e23711bcce New translations en-us.json (Chinese Simplified) 2025-12-24 16:12:11 -05:00
Owen Schwartz
440bff57d0 New translations en-us.json (Turkish) 2025-12-24 16:12:11 -05:00
Owen Schwartz
7345cc81c1 New translations en-us.json (Russian) 2025-12-24 16:12:11 -05:00
Owen Schwartz
164ab26069 New translations en-us.json (Portuguese) 2025-12-24 16:12:11 -05:00
Owen Schwartz
4b6ace80d3 New translations en-us.json (Polish) 2025-12-24 16:12:11 -05:00
Owen Schwartz
653127a0f7 New translations en-us.json (Dutch) 2025-12-24 16:12:11 -05:00
Owen Schwartz
bf3a1e20fc New translations en-us.json (Korean) 2025-12-24 16:12:11 -05:00
Owen Schwartz
d7a44e7589 New translations en-us.json (Italian) 2025-12-24 16:12:11 -05:00
Owen Schwartz
6c0d583557 New translations en-us.json (Czech) 2025-12-24 16:12:11 -05:00
Owen Schwartz
13f0fb25da New translations en-us.json (Bulgarian) 2025-12-24 16:12:11 -05:00
Owen Schwartz
818aca9ec8 New translations en-us.json (Spanish) 2025-12-24 16:12:11 -05:00
Owen Schwartz
1c7fb476b0 New translations en-us.json (Norwegian Bokmal) 2025-12-24 16:12:11 -05:00
Owen Schwartz
93843ed733 New translations en-us.json (Chinese Simplified) 2025-12-24 16:12:11 -05:00
Owen Schwartz
0973313703 New translations en-us.json (Turkish) 2025-12-24 16:12:11 -05:00
Owen Schwartz
bfbfbe8b11 New translations en-us.json (Russian) 2025-12-24 16:12:11 -05:00
Owen Schwartz
8c62d9fe78 New translations en-us.json (Portuguese) 2025-12-24 16:12:11 -05:00
Owen Schwartz
d5558f55ed New translations en-us.json (Polish) 2025-12-24 16:12:11 -05:00
Owen Schwartz
a96ad6bd07 New translations en-us.json (Dutch) 2025-12-24 16:12:11 -05:00
Owen Schwartz
00d9482a99 New translations en-us.json (Korean) 2025-12-24 16:12:11 -05:00
Owen Schwartz
0f90e2a30f New translations en-us.json (Italian) 2025-12-24 16:12:11 -05:00
Owen Schwartz
3eed636404 New translations en-us.json (German) 2025-12-24 16:12:11 -05:00
Owen Schwartz
a67f88381f New translations en-us.json (Czech) 2025-12-24 16:12:11 -05:00
Owen Schwartz
808fd856d1 New translations en-us.json (Bulgarian) 2025-12-24 16:12:11 -05:00
Owen Schwartz
5b9b532458 New translations en-us.json (Spanish) 2025-12-24 16:12:11 -05:00
miloschwartz
9fba9bd6b7 ui enhancements 2025-12-24 15:53:08 -05:00
Owen
c5ece144d0 Attempt to fix loginPageOrg undefined error 2025-12-24 12:25:11 -05:00
Owen
b64e2e11db Try to remove deadlocks on client updates 2025-12-24 12:20:22 -05:00
Owen
0ccd5714f9 Seperating out functions 2025-12-24 11:50:27 -05:00
Owen
e2dfc3eb20 Merge branch 'dev' into msg-delivery 2025-12-24 11:33:41 -05:00
Owen
40eeb9b7cb Allow all in country in blueprints
Fixes #2163
2025-12-24 10:49:18 -05:00
Owen
8fa62a0908 Respect http status for url & maintenance mode
Fixes #2164
2025-12-24 10:47:01 -05:00
Owen
446eba8bc9 Orging how we are going to make the sync 2025-12-24 10:38:44 -05:00
Owen
18579c0647 Merge branch 'dev' into msg-delivery 2025-12-23 16:57:17 -05:00
Owen
2bb94e24eb Merge branch 'main' into dev 2025-12-23 16:57:01 -05:00
Owen
0d37e08638 Merge branch 'dev' into msg-delivery 2025-12-23 16:56:50 -05:00
miloschwartz
a21f49cb02 add sticky actions col to org idp table 2025-12-23 14:58:58 -05:00
miloschwartz
ef697c4864 adjustments to mobile header css closes #1930 2025-12-23 13:57:44 -05:00
miloschwartz
2652dea09a fade mobile footer 2025-12-23 13:41:11 -05:00
miloschwartz
efa9312fca fix server admin spacing on mobile sidebar 2025-12-23 13:37:48 -05:00
miloschwartz
074ee70025 add flag to disable product help banners 2025-12-23 13:33:24 -05:00
miloschwartz
77117e48e3 improved button loading animation 2025-12-23 12:51:38 -05:00
miloschwartz
da112d3417 add stripPortFromHost and reuse everywhere 2025-12-23 12:35:03 -05:00
Owen
75b9703793 Seperate config gen into functions 2025-12-20 11:41:23 -05:00
Fred KISSIE
e983e1166a 🚧 wip: approval tables in DB 2025-12-20 00:05:33 +01:00
Owen
322f3bfb1d Add version and send it down 2025-12-19 16:44:57 -05:00
Fred KISSIE
009b86c33b Merge branch 'dev' into feat/device-approvals 2025-12-19 20:03:05 +01:00
Fred KISSIE
a5775a0f4f 🗃️ create approvals table 2025-12-19 00:00:10 +01:00
141 changed files with 8237 additions and 1579 deletions

View File

@@ -329,20 +329,89 @@ jobs:
skopeo login ghcr.io -u "${{ github.actor }}" -p "${{ secrets.GITHUB_TOKEN }}"
shell: bash
- name: Copy tag from Docker Hub to GHCR
# Mirror the already-built image (all architectures) to GHCR so we can sign it
- name: Copy tags from Docker Hub to GHCR
# Mirror the already-built images (all architectures) to GHCR so we can sign them
# Wait a bit for both architectures to be available in Docker Hub manifest
env:
REGISTRY_AUTH_FILE: ${{ runner.temp }}/containers/auth.json
run: |
set -euo pipefail
TAG=${{ env.TAG }}
echo "Waiting for multi-arch manifest to be ready..."
MAJOR_TAG=$(echo $TAG | cut -d. -f1)
MINOR_TAG=$(echo $TAG | cut -d. -f1,2)
echo "Waiting for multi-arch manifests to be ready..."
sleep 30
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:${TAG} -> ${{ env.GHCR_IMAGE }}:${TAG}"
skopeo copy --all --retry-times 3 \
docker://$DOCKERHUB_IMAGE:$TAG \
docker://$GHCR_IMAGE:$TAG
# Determine if this is an RC release
IS_RC="false"
if echo "$TAG" | grep -qE "rc[0-9]+$"; then
IS_RC="true"
fi
if [ "$IS_RC" = "true" ]; then
echo "RC release detected - copying version-specific tags only"
# SQLite OSS
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:${TAG} -> ${{ env.GHCR_IMAGE }}:${TAG}"
skopeo copy --all --retry-times 3 \
docker://$DOCKERHUB_IMAGE:$TAG \
docker://$GHCR_IMAGE:$TAG
# PostgreSQL OSS
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:postgresql-${TAG} -> ${{ env.GHCR_IMAGE }}:postgresql-${TAG}"
skopeo copy --all --retry-times 3 \
docker://$DOCKERHUB_IMAGE:postgresql-$TAG \
docker://$GHCR_IMAGE:postgresql-$TAG
# SQLite Enterprise
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:ee-${TAG} -> ${{ env.GHCR_IMAGE }}:ee-${TAG}"
skopeo copy --all --retry-times 3 \
docker://$DOCKERHUB_IMAGE:ee-$TAG \
docker://$GHCR_IMAGE:ee-$TAG
# PostgreSQL Enterprise
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:ee-postgresql-${TAG} -> ${{ env.GHCR_IMAGE }}:ee-postgresql-${TAG}"
skopeo copy --all --retry-times 3 \
docker://$DOCKERHUB_IMAGE:ee-postgresql-$TAG \
docker://$GHCR_IMAGE:ee-postgresql-$TAG
else
echo "Regular release detected - copying all tags (latest, major, minor, full version)"
# SQLite OSS - all tags
for TAG_SUFFIX in "latest" "$MAJOR_TAG" "$MINOR_TAG" "$TAG"; do
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:${TAG_SUFFIX} -> ${{ env.GHCR_IMAGE }}:${TAG_SUFFIX}"
skopeo copy --all --retry-times 3 \
docker://$DOCKERHUB_IMAGE:$TAG_SUFFIX \
docker://$GHCR_IMAGE:$TAG_SUFFIX
done
# PostgreSQL OSS - all tags
for TAG_SUFFIX in "latest" "$MAJOR_TAG" "$MINOR_TAG" "$TAG"; do
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:postgresql-${TAG_SUFFIX} -> ${{ env.GHCR_IMAGE }}:postgresql-${TAG_SUFFIX}"
skopeo copy --all --retry-times 3 \
docker://$DOCKERHUB_IMAGE:postgresql-$TAG_SUFFIX \
docker://$GHCR_IMAGE:postgresql-$TAG_SUFFIX
done
# SQLite Enterprise - all tags
for TAG_SUFFIX in "latest" "$MAJOR_TAG" "$MINOR_TAG" "$TAG"; do
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:ee-${TAG_SUFFIX} -> ${{ env.GHCR_IMAGE }}:ee-${TAG_SUFFIX}"
skopeo copy --all --retry-times 3 \
docker://$DOCKERHUB_IMAGE:ee-$TAG_SUFFIX \
docker://$GHCR_IMAGE:ee-$TAG_SUFFIX
done
# PostgreSQL Enterprise - all tags
for TAG_SUFFIX in "latest" "$MAJOR_TAG" "$MINOR_TAG" "$TAG"; do
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:ee-postgresql-${TAG_SUFFIX} -> ${{ env.GHCR_IMAGE }}:ee-postgresql-${TAG_SUFFIX}"
skopeo copy --all --retry-times 3 \
docker://$DOCKERHUB_IMAGE:ee-postgresql-$TAG_SUFFIX \
docker://$GHCR_IMAGE:ee-postgresql-$TAG_SUFFIX
done
fi
echo "All images copied successfully to GHCR!"
shell: bash
- name: Login to GitHub Container Registry (for cosign)
@@ -371,28 +440,62 @@ jobs:
issuer="https://token.actions.githubusercontent.com"
id_regex="^https://github.com/${{ github.repository }}/.+" # accept this repo (all workflows/refs)
for IMAGE in "${GHCR_IMAGE}" "${DOCKERHUB_IMAGE}"; do
echo "Processing ${IMAGE}:${TAG}"
# Determine if this is an RC release
IS_RC="false"
if echo "$TAG" | grep -qE "rc[0-9]+$"; then
IS_RC="true"
fi
DIGEST="$(skopeo inspect --retry-times 3 docker://${IMAGE}:${TAG} | jq -r '.Digest')"
REF="${IMAGE}@${DIGEST}"
echo "Resolved digest: ${REF}"
# Define image variants to sign
if [ "$IS_RC" = "true" ]; then
echo "RC release - signing version-specific tags only"
IMAGE_TAGS=(
"${TAG}"
"postgresql-${TAG}"
"ee-${TAG}"
"ee-postgresql-${TAG}"
)
else
echo "Regular release - signing all tags"
MAJOR_TAG=$(echo $TAG | cut -d. -f1)
MINOR_TAG=$(echo $TAG | cut -d. -f1,2)
IMAGE_TAGS=(
"latest" "$MAJOR_TAG" "$MINOR_TAG" "$TAG"
"postgresql-latest" "postgresql-$MAJOR_TAG" "postgresql-$MINOR_TAG" "postgresql-$TAG"
"ee-latest" "ee-$MAJOR_TAG" "ee-$MINOR_TAG" "ee-$TAG"
"ee-postgresql-latest" "ee-postgresql-$MAJOR_TAG" "ee-postgresql-$MINOR_TAG" "ee-postgresql-$TAG"
)
fi
echo "==> cosign sign (keyless) --recursive ${REF}"
cosign sign --recursive "${REF}"
# Sign each image variant for both registries
for BASE_IMAGE in "${GHCR_IMAGE}" "${DOCKERHUB_IMAGE}"; do
for IMAGE_TAG in "${IMAGE_TAGS[@]}"; do
echo "Processing ${BASE_IMAGE}:${IMAGE_TAG}"
echo "==> cosign sign (key) --recursive ${REF}"
cosign sign --key env://COSIGN_PRIVATE_KEY --recursive "${REF}"
DIGEST="$(skopeo inspect --retry-times 3 docker://${BASE_IMAGE}:${IMAGE_TAG} | jq -r '.Digest')"
REF="${BASE_IMAGE}@${DIGEST}"
echo "Resolved digest: ${REF}"
echo "==> cosign verify (public key) ${REF}"
cosign verify --key env://COSIGN_PUBLIC_KEY "${REF}" -o text
echo "==> cosign sign (keyless) --recursive ${REF}"
cosign sign --recursive "${REF}"
echo "==> cosign verify (keyless policy) ${REF}"
cosign verify \
--certificate-oidc-issuer "${issuer}" \
--certificate-identity-regexp "${id_regex}" \
"${REF}" -o text
echo "==> cosign sign (key) --recursive ${REF}"
cosign sign --key env://COSIGN_PRIVATE_KEY --recursive "${REF}"
echo "==> cosign verify (public key) ${REF}"
cosign verify --key env://COSIGN_PUBLIC_KEY "${REF}" -o text
echo "==> cosign verify (keyless policy) ${REF}"
cosign verify \
--certificate-oidc-issuer "${issuer}" \
--certificate-identity-regexp "${id_regex}" \
"${REF}" -o text
echo "✓ Successfully signed and verified ${BASE_IMAGE}:${IMAGE_TAG}"
done
done
echo "All images signed and verified successfully!"
shell: bash
post-run:

426
.github/workflows/cicd.yml.backup vendored Normal file
View File

@@ -0,0 +1,426 @@
name: CI/CD Pipeline
# CI/CD workflow for building, publishing, mirroring, signing container images and building release binaries.
# Actions are pinned to specific SHAs to reduce supply-chain risk. This workflow triggers on tag push events.
permissions:
contents: read
packages: write # for GHCR push
id-token: write # for Cosign Keyless (OIDC) Signing
# Required secrets:
# - DOCKER_HUB_USERNAME / DOCKER_HUB_ACCESS_TOKEN: push to Docker Hub
# - GITHUB_TOKEN: used for GHCR login and OIDC keyless signing
# - COSIGN_PRIVATE_KEY / COSIGN_PASSWORD / COSIGN_PUBLIC_KEY: for key-based signing
on:
push:
tags:
- "[0-9]+.[0-9]+.[0-9]+"
- "[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+"
concurrency:
group: ${{ github.ref }}
cancel-in-progress: true
jobs:
pre-run:
runs-on: ubuntu-latest
permissions: write-all
steps:
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v2
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }}
role-duration-seconds: 3600
aws-region: ${{ secrets.AWS_REGION }}
- name: Verify AWS identity
run: aws sts get-caller-identity
- name: Start EC2 instances
run: |
aws ec2 start-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_ARM_RUNNER }}
aws ec2 start-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_AMD_RUNNER }}
echo "EC2 instances started"
release-arm:
name: Build and Release (ARM64)
runs-on: [self-hosted, linux, arm64, us-east-1]
needs: [pre-run]
if: >-
${{
needs.pre-run.result == 'success'
}}
# Job-level timeout to avoid runaway or stuck runs
timeout-minutes: 120
env:
# Target images
DOCKERHUB_IMAGE: docker.io/fosrl/${{ github.event.repository.name }}
GHCR_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
steps:
- name: Checkout code
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Monitor storage space
run: |
THRESHOLD=75
USED_SPACE=$(df / | grep / | awk '{ print $5 }' | sed 's/%//g')
echo "Used space: $USED_SPACE%"
if [ "$USED_SPACE" -ge "$THRESHOLD" ]; then
echo "Used space is below the threshold of 75% free. Running Docker system prune."
echo y | docker system prune -a
else
echo "Storage space is above the threshold. No action needed."
fi
- name: Log in to Docker Hub
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with:
registry: docker.io
username: ${{ secrets.DOCKER_HUB_USERNAME }}
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
- name: Extract tag name
id: get-tag
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
shell: bash
- name: Update version in package.json
run: |
TAG=${{ env.TAG }}
sed -i "s/export const APP_VERSION = \".*\";/export const APP_VERSION = \"$TAG\";/" server/lib/consts.ts
cat server/lib/consts.ts
shell: bash
- name: Check if release candidate
id: check-rc
run: |
TAG=${{ env.TAG }}
if [[ "$TAG" == *"-rc."* ]]; then
echo "IS_RC=true" >> $GITHUB_ENV
else
echo "IS_RC=false" >> $GITHUB_ENV
fi
shell: bash
- name: Build and push Docker images (Docker Hub - ARM64)
run: |
TAG=${{ env.TAG }}
if [ "$IS_RC" = "true" ]; then
make build-rc-arm tag=$TAG
else
make build-release-arm tag=$TAG
fi
echo "Built & pushed ARM64 images to: ${{ env.DOCKERHUB_IMAGE }}:${TAG}"
shell: bash
release-amd:
name: Build and Release (AMD64)
runs-on: [self-hosted, linux, x64, us-east-1]
needs: [pre-run]
if: >-
${{
needs.pre-run.result == 'success'
}}
# Job-level timeout to avoid runaway or stuck runs
timeout-minutes: 120
env:
# Target images
DOCKERHUB_IMAGE: docker.io/fosrl/${{ github.event.repository.name }}
GHCR_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
steps:
- name: Checkout code
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Monitor storage space
run: |
THRESHOLD=75
USED_SPACE=$(df / | grep / | awk '{ print $5 }' | sed 's/%//g')
echo "Used space: $USED_SPACE%"
if [ "$USED_SPACE" -ge "$THRESHOLD" ]; then
echo "Used space is below the threshold of 75% free. Running Docker system prune."
echo y | docker system prune -a
else
echo "Storage space is above the threshold. No action needed."
fi
- name: Log in to Docker Hub
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with:
registry: docker.io
username: ${{ secrets.DOCKER_HUB_USERNAME }}
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
- name: Extract tag name
id: get-tag
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
shell: bash
- name: Update version in package.json
run: |
TAG=${{ env.TAG }}
sed -i "s/export const APP_VERSION = \".*\";/export const APP_VERSION = \"$TAG\";/" server/lib/consts.ts
cat server/lib/consts.ts
shell: bash
- name: Check if release candidate
id: check-rc
run: |
TAG=${{ env.TAG }}
if [[ "$TAG" == *"-rc."* ]]; then
echo "IS_RC=true" >> $GITHUB_ENV
else
echo "IS_RC=false" >> $GITHUB_ENV
fi
shell: bash
- name: Build and push Docker images (Docker Hub - AMD64)
run: |
TAG=${{ env.TAG }}
if [ "$IS_RC" = "true" ]; then
make build-rc-amd tag=$TAG
else
make build-release-amd tag=$TAG
fi
echo "Built & pushed AMD64 images to: ${{ env.DOCKERHUB_IMAGE }}:${TAG}"
shell: bash
create-manifest:
name: Create Multi-Arch Manifests
runs-on: [self-hosted, linux, x64, us-east-1]
needs: [release-arm, release-amd]
if: >-
${{
needs.release-arm.result == 'success' &&
needs.release-amd.result == 'success'
}}
timeout-minutes: 30
steps:
- name: Checkout code
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Log in to Docker Hub
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with:
registry: docker.io
username: ${{ secrets.DOCKER_HUB_USERNAME }}
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
- name: Extract tag name
id: get-tag
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
shell: bash
- name: Check if release candidate
id: check-rc
run: |
TAG=${{ env.TAG }}
if [[ "$TAG" == *"-rc."* ]]; then
echo "IS_RC=true" >> $GITHUB_ENV
else
echo "IS_RC=false" >> $GITHUB_ENV
fi
shell: bash
- name: Create multi-arch manifests
run: |
TAG=${{ env.TAG }}
if [ "$IS_RC" = "true" ]; then
make create-manifests-rc tag=$TAG
else
make create-manifests tag=$TAG
fi
echo "Created multi-arch manifests for tag: ${TAG}"
shell: bash
sign-and-package:
name: Sign and Package
runs-on: [self-hosted, linux, x64, us-east-1]
needs: [release-arm, release-amd, create-manifest]
if: >-
${{
needs.release-arm.result == 'success' &&
needs.release-amd.result == 'success' &&
needs.create-manifest.result == 'success'
}}
# Job-level timeout to avoid runaway or stuck runs
timeout-minutes: 120
env:
# Target images
DOCKERHUB_IMAGE: docker.io/fosrl/${{ github.event.repository.name }}
GHCR_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
steps:
- name: Checkout code
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Extract tag name
id: get-tag
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
shell: bash
- name: Install Go
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
with:
go-version: 1.24
- name: Update version in package.json
run: |
TAG=${{ env.TAG }}
sed -i "s/export const APP_VERSION = \".*\";/export const APP_VERSION = \"$TAG\";/" server/lib/consts.ts
cat server/lib/consts.ts
shell: bash
- name: Pull latest Gerbil version
id: get-gerbil-tag
run: |
LATEST_TAG=$(curl -s https://api.github.com/repos/fosrl/gerbil/tags | jq -r '.[0].name')
echo "LATEST_GERBIL_TAG=$LATEST_TAG" >> $GITHUB_ENV
shell: bash
- name: Pull latest Badger version
id: get-badger-tag
run: |
LATEST_TAG=$(curl -s https://api.github.com/repos/fosrl/badger/tags | jq -r '.[0].name')
echo "LATEST_BADGER_TAG=$LATEST_TAG" >> $GITHUB_ENV
shell: bash
- name: Update install/main.go
run: |
PANGOLIN_VERSION=${{ env.TAG }}
GERBIL_VERSION=${{ env.LATEST_GERBIL_TAG }}
BADGER_VERSION=${{ env.LATEST_BADGER_TAG }}
sed -i "s/config.PangolinVersion = \".*\"/config.PangolinVersion = \"$PANGOLIN_VERSION\"/" install/main.go
sed -i "s/config.GerbilVersion = \".*\"/config.GerbilVersion = \"$GERBIL_VERSION\"/" install/main.go
sed -i "s/config.BadgerVersion = \".*\"/config.BadgerVersion = \"$BADGER_VERSION\"/" install/main.go
echo "Updated install/main.go with Pangolin version $PANGOLIN_VERSION, Gerbil version $GERBIL_VERSION, and Badger version $BADGER_VERSION"
cat install/main.go
shell: bash
- name: Build installer
working-directory: install
run: |
make go-build-release
- name: Upload artifacts from /install/bin
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
with:
name: install-bin
path: install/bin/
- name: Install skopeo + jq
# skopeo: copy/inspect images between registries
# jq: JSON parsing tool used to extract digest values
run: |
sudo apt-get update -y
sudo apt-get install -y skopeo jq
skopeo --version
shell: bash
- name: Login to GHCR
env:
REGISTRY_AUTH_FILE: ${{ runner.temp }}/containers/auth.json
run: |
mkdir -p "$(dirname "$REGISTRY_AUTH_FILE")"
skopeo login ghcr.io -u "${{ github.actor }}" -p "${{ secrets.GITHUB_TOKEN }}"
shell: bash
- name: Copy tag from Docker Hub to GHCR
# Mirror the already-built image (all architectures) to GHCR so we can sign it
# Wait a bit for both architectures to be available in Docker Hub manifest
env:
REGISTRY_AUTH_FILE: ${{ runner.temp }}/containers/auth.json
run: |
set -euo pipefail
TAG=${{ env.TAG }}
echo "Waiting for multi-arch manifest to be ready..."
sleep 30
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:${TAG} -> ${{ env.GHCR_IMAGE }}:${TAG}"
skopeo copy --all --retry-times 3 \
docker://$DOCKERHUB_IMAGE:$TAG \
docker://$GHCR_IMAGE:$TAG
shell: bash
- name: Login to GitHub Container Registry (for cosign)
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Install cosign
# cosign is used to sign and verify container images (key and keyless)
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
- name: Dual-sign and verify (GHCR & Docker Hub)
# Sign each image by digest using keyless (OIDC) and key-based signing,
# then verify both the public key signature and the keyless OIDC signature.
env:
TAG: ${{ env.TAG }}
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
COSIGN_PUBLIC_KEY: ${{ secrets.COSIGN_PUBLIC_KEY }}
COSIGN_YES: "true"
run: |
set -euo pipefail
issuer="https://token.actions.githubusercontent.com"
id_regex="^https://github.com/${{ github.repository }}/.+" # accept this repo (all workflows/refs)
for IMAGE in "${GHCR_IMAGE}" "${DOCKERHUB_IMAGE}"; do
echo "Processing ${IMAGE}:${TAG}"
DIGEST="$(skopeo inspect --retry-times 3 docker://${IMAGE}:${TAG} | jq -r '.Digest')"
REF="${IMAGE}@${DIGEST}"
echo "Resolved digest: ${REF}"
echo "==> cosign sign (keyless) --recursive ${REF}"
cosign sign --recursive "${REF}"
echo "==> cosign sign (key) --recursive ${REF}"
cosign sign --key env://COSIGN_PRIVATE_KEY --recursive "${REF}"
echo "==> cosign verify (public key) ${REF}"
cosign verify --key env://COSIGN_PUBLIC_KEY "${REF}" -o text
echo "==> cosign verify (keyless policy) ${REF}"
cosign verify \
--certificate-oidc-issuer "${issuer}" \
--certificate-identity-regexp "${id_regex}" \
"${REF}" -o text
done
shell: bash
post-run:
needs: [pre-run, release-arm, release-amd, create-manifest, sign-and-package]
if: >-
${{
always() &&
needs.pre-run.result == 'success' &&
(needs.release-arm.result == 'success' || needs.release-arm.result == 'skipped' || needs.release-arm.result == 'failure') &&
(needs.release-amd.result == 'success' || needs.release-amd.result == 'skipped' || needs.release-amd.result == 'failure') &&
(needs.create-manifest.result == 'success' || needs.create-manifest.result == 'skipped' || needs.create-manifest.result == 'failure') &&
(needs.sign-and-package.result == 'success' || needs.sign-and-package.result == 'skipped' || needs.sign-and-package.result == 'failure')
}}
runs-on: ubuntu-latest
permissions: write-all
steps:
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v2
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }}
role-duration-seconds: 3600
aws-region: ${{ secrets.AWS_REGION }}
- name: Verify AWS identity
run: aws sts get-caller-identity
- name: Stop EC2 instances
run: |
aws ec2 stop-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_ARM_RUNNER }}
aws ec2 stop-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_AMD_RUNNER }}
echo "EC2 instances stopped"

3
.gitignore vendored
View File

@@ -50,4 +50,5 @@ dynamic/
*.mmdb
scratch/
tsconfig.json
hydrateSaas.ts
hydrateSaas.ts
CLAUDE.md

View File

@@ -1,10 +1,20 @@
FROM node:24-alpine AS builder
# OCI Image Labels - Build Args for dynamic values
ARG VERSION="dev"
ARG REVISION=""
ARG CREATED=""
ARG LICENSE="AGPL-3.0"
WORKDIR /app
ARG BUILD=oss
ARG DATABASE=sqlite
# Derive title and description based on BUILD type
ARG IMAGE_TITLE="Pangolin"
ARG IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere"
RUN apk add --no-cache curl tzdata python3 make g++
# COPY package.json package-lock.json ./
@@ -69,4 +79,17 @@ RUN chmod +x /usr/local/bin/pangctl ./dist/cli.mjs
COPY server/db/names.json ./dist/names.json
COPY public ./public
# OCI Image Labels
# https://github.com/opencontainers/image-spec/blob/main/annotations.md
LABEL org.opencontainers.image.source="https://github.com/fosrl/pangolin" \
org.opencontainers.image.url="https://github.com/fosrl/pangolin" \
org.opencontainers.image.documentation="https://docs.pangolin.net" \
org.opencontainers.image.vendor="Fossorial" \
org.opencontainers.image.licenses="${LICENSE}" \
org.opencontainers.image.title="${IMAGE_TITLE}" \
org.opencontainers.image.description="${IMAGE_DESCRIPTION}" \
org.opencontainers.image.version="${VERSION}" \
org.opencontainers.image.revision="${REVISION}" \
org.opencontainers.image.created="${CREATED}"
CMD ["npm", "run", "start"]

193
Makefile
View File

@@ -3,6 +3,25 @@
major_tag := $(shell echo $(tag) | cut -d. -f1)
minor_tag := $(shell echo $(tag) | cut -d. -f1,2)
# OCI label variables
CREATED := $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
REVISION := $(shell git rev-parse HEAD 2>/dev/null || echo "unknown")
# Common OCI build args for OSS builds
OCI_ARGS_OSS = --build-arg VERSION=$(tag) \
--build-arg REVISION=$(REVISION) \
--build-arg CREATED=$(CREATED) \
--build-arg IMAGE_TITLE="Pangolin" \
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere"
# Common OCI build args for Enterprise builds
OCI_ARGS_EE = --build-arg VERSION=$(tag) \
--build-arg REVISION=$(REVISION) \
--build-arg CREATED=$(CREATED) \
--build-arg LICENSE="Fossorial Commercial" \
--build-arg IMAGE_TITLE="Pangolin EE" \
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere"
.PHONY: build-release build-sqlite build-postgresql build-ee-sqlite build-ee-postgresql
build-release: build-sqlite build-postgresql build-ee-sqlite build-ee-postgresql
@@ -15,6 +34,7 @@ build-sqlite:
docker buildx build \
--build-arg BUILD=oss \
--build-arg DATABASE=sqlite \
$(OCI_ARGS_OSS) \
--platform linux/arm64,linux/amd64 \
--tag fosrl/pangolin:latest \
--tag fosrl/pangolin:$(major_tag) \
@@ -30,6 +50,7 @@ build-postgresql:
docker buildx build \
--build-arg BUILD=oss \
--build-arg DATABASE=pg \
$(OCI_ARGS_OSS) \
--platform linux/arm64,linux/amd64 \
--tag fosrl/pangolin:postgresql-latest \
--tag fosrl/pangolin:postgresql-$(major_tag) \
@@ -45,6 +66,7 @@ build-ee-sqlite:
docker buildx build \
--build-arg BUILD=enterprise \
--build-arg DATABASE=sqlite \
$(OCI_ARGS_EE) \
--platform linux/arm64,linux/amd64 \
--tag fosrl/pangolin:ee-latest \
--tag fosrl/pangolin:ee-$(major_tag) \
@@ -60,6 +82,7 @@ build-ee-postgresql:
docker buildx build \
--build-arg BUILD=enterprise \
--build-arg DATABASE=pg \
$(OCI_ARGS_EE) \
--platform linux/arm64,linux/amd64 \
--tag fosrl/pangolin:ee-postgresql-latest \
--tag fosrl/pangolin:ee-postgresql-$(major_tag) \
@@ -86,9 +109,16 @@ build-release-arm:
fi
@MAJOR_TAG=$$(echo $(tag) | cut -d. -f1); \
MINOR_TAG=$$(echo $(tag) | cut -d. -f1,2); \
CREATED=$$(date -u +"%Y-%m-%dT%H:%M:%SZ"); \
REVISION=$$(git rev-parse HEAD 2>/dev/null || echo "unknown"); \
docker buildx build \
--build-arg BUILD=oss \
--build-arg DATABASE=sqlite \
--build-arg VERSION=$(tag) \
--build-arg REVISION=$$REVISION \
--build-arg CREATED=$$CREATED \
--build-arg IMAGE_TITLE="Pangolin" \
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
--platform linux/arm64 \
--tag fosrl/pangolin:latest-arm64 \
--tag fosrl/pangolin:$$MAJOR_TAG-arm64 \
@@ -98,6 +128,11 @@ build-release-arm:
docker buildx build \
--build-arg BUILD=oss \
--build-arg DATABASE=pg \
--build-arg VERSION=$(tag) \
--build-arg REVISION=$$REVISION \
--build-arg CREATED=$$CREATED \
--build-arg IMAGE_TITLE="Pangolin" \
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
--platform linux/arm64 \
--tag fosrl/pangolin:postgresql-latest-arm64 \
--tag fosrl/pangolin:postgresql-$$MAJOR_TAG-arm64 \
@@ -107,6 +142,12 @@ build-release-arm:
docker buildx build \
--build-arg BUILD=enterprise \
--build-arg DATABASE=sqlite \
--build-arg VERSION=$(tag) \
--build-arg REVISION=$$REVISION \
--build-arg CREATED=$$CREATED \
--build-arg LICENSE="Fossorial Commercial" \
--build-arg IMAGE_TITLE="Pangolin EE" \
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere" \
--platform linux/arm64 \
--tag fosrl/pangolin:ee-latest-arm64 \
--tag fosrl/pangolin:ee-$$MAJOR_TAG-arm64 \
@@ -116,6 +157,12 @@ build-release-arm:
docker buildx build \
--build-arg BUILD=enterprise \
--build-arg DATABASE=pg \
--build-arg VERSION=$(tag) \
--build-arg REVISION=$$REVISION \
--build-arg CREATED=$$CREATED \
--build-arg LICENSE="Fossorial Commercial" \
--build-arg IMAGE_TITLE="Pangolin EE" \
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere" \
--platform linux/arm64 \
--tag fosrl/pangolin:ee-postgresql-latest-arm64 \
--tag fosrl/pangolin:ee-postgresql-$$MAJOR_TAG-arm64 \
@@ -130,9 +177,16 @@ build-release-amd:
fi
@MAJOR_TAG=$$(echo $(tag) | cut -d. -f1); \
MINOR_TAG=$$(echo $(tag) | cut -d. -f1,2); \
CREATED=$$(date -u +"%Y-%m-%dT%H:%M:%SZ"); \
REVISION=$$(git rev-parse HEAD 2>/dev/null || echo "unknown"); \
docker buildx build \
--build-arg BUILD=oss \
--build-arg DATABASE=sqlite \
--build-arg VERSION=$(tag) \
--build-arg REVISION=$$REVISION \
--build-arg CREATED=$$CREATED \
--build-arg IMAGE_TITLE="Pangolin" \
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
--platform linux/amd64 \
--tag fosrl/pangolin:latest-amd64 \
--tag fosrl/pangolin:$$MAJOR_TAG-amd64 \
@@ -142,6 +196,11 @@ build-release-amd:
docker buildx build \
--build-arg BUILD=oss \
--build-arg DATABASE=pg \
--build-arg VERSION=$(tag) \
--build-arg REVISION=$$REVISION \
--build-arg CREATED=$$CREATED \
--build-arg IMAGE_TITLE="Pangolin" \
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
--platform linux/amd64 \
--tag fosrl/pangolin:postgresql-latest-amd64 \
--tag fosrl/pangolin:postgresql-$$MAJOR_TAG-amd64 \
@@ -151,6 +210,12 @@ build-release-amd:
docker buildx build \
--build-arg BUILD=enterprise \
--build-arg DATABASE=sqlite \
--build-arg VERSION=$(tag) \
--build-arg REVISION=$$REVISION \
--build-arg CREATED=$$CREATED \
--build-arg LICENSE="Fossorial Commercial" \
--build-arg IMAGE_TITLE="Pangolin EE" \
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere" \
--platform linux/amd64 \
--tag fosrl/pangolin:ee-latest-amd64 \
--tag fosrl/pangolin:ee-$$MAJOR_TAG-amd64 \
@@ -160,6 +225,12 @@ build-release-amd:
docker buildx build \
--build-arg BUILD=enterprise \
--build-arg DATABASE=pg \
--build-arg VERSION=$(tag) \
--build-arg REVISION=$$REVISION \
--build-arg CREATED=$$CREATED \
--build-arg LICENSE="Fossorial Commercial" \
--build-arg IMAGE_TITLE="Pangolin EE" \
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere" \
--platform linux/amd64 \
--tag fosrl/pangolin:ee-postgresql-latest-amd64 \
--tag fosrl/pangolin:ee-postgresql-$$MAJOR_TAG-amd64 \
@@ -213,27 +284,51 @@ build-rc:
echo "Error: tag is required. Usage: make build-release tag=<tag>"; \
exit 1; \
fi
@CREATED=$$(date -u +"%Y-%m-%dT%H:%M:%SZ"); \
REVISION=$$(git rev-parse HEAD 2>/dev/null || echo "unknown"); \
docker buildx build \
--build-arg BUILD=oss \
--build-arg DATABASE=sqlite \
--build-arg VERSION=$(tag) \
--build-arg REVISION=$$REVISION \
--build-arg CREATED=$$CREATED \
--build-arg IMAGE_TITLE="Pangolin" \
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
--platform linux/arm64,linux/amd64 \
--tag fosrl/pangolin:$(tag) \
--push .
--push . && \
docker buildx build \
--build-arg BUILD=oss \
--build-arg DATABASE=pg \
--build-arg VERSION=$(tag) \
--build-arg REVISION=$$REVISION \
--build-arg CREATED=$$CREATED \
--build-arg IMAGE_TITLE="Pangolin" \
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
--platform linux/arm64,linux/amd64 \
--tag fosrl/pangolin:postgresql-$(tag) \
--push .
--push . && \
docker buildx build \
--build-arg BUILD=enterprise \
--build-arg DATABASE=sqlite \
--build-arg VERSION=$(tag) \
--build-arg REVISION=$$REVISION \
--build-arg CREATED=$$CREATED \
--build-arg LICENSE="Fossorial Commercial" \
--build-arg IMAGE_TITLE="Pangolin EE" \
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere" \
--platform linux/arm64,linux/amd64 \
--tag fosrl/pangolin:ee-$(tag) \
--push .
--push . && \
docker buildx build \
--build-arg BUILD=enterprise \
--build-arg DATABASE=pg \
--build-arg VERSION=$(tag) \
--build-arg REVISION=$$REVISION \
--build-arg CREATED=$$CREATED \
--build-arg LICENSE="Fossorial Commercial" \
--build-arg IMAGE_TITLE="Pangolin EE" \
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere" \
--platform linux/arm64,linux/amd64 \
--tag fosrl/pangolin:ee-postgresql-$(tag) \
--push .
@@ -243,27 +338,51 @@ build-rc-arm:
echo "Error: tag is required. Usage: make build-rc-arm tag=<tag>"; \
exit 1; \
fi
@CREATED=$$(date -u +"%Y-%m-%dT%H:%M:%SZ"); \
REVISION=$$(git rev-parse HEAD 2>/dev/null || echo "unknown"); \
docker buildx build \
--build-arg BUILD=oss \
--build-arg DATABASE=sqlite \
--build-arg VERSION=$(tag) \
--build-arg REVISION=$$REVISION \
--build-arg CREATED=$$CREATED \
--build-arg IMAGE_TITLE="Pangolin" \
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
--platform linux/arm64 \
--tag fosrl/pangolin:$(tag)-arm64 \
--push . && \
docker buildx build \
--build-arg BUILD=oss \
--build-arg DATABASE=pg \
--build-arg VERSION=$(tag) \
--build-arg REVISION=$$REVISION \
--build-arg CREATED=$$CREATED \
--build-arg IMAGE_TITLE="Pangolin" \
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
--platform linux/arm64 \
--tag fosrl/pangolin:postgresql-$(tag)-arm64 \
--push . && \
docker buildx build \
--build-arg BUILD=enterprise \
--build-arg DATABASE=sqlite \
--build-arg VERSION=$(tag) \
--build-arg REVISION=$$REVISION \
--build-arg CREATED=$$CREATED \
--build-arg LICENSE="Fossorial Commercial" \
--build-arg IMAGE_TITLE="Pangolin EE" \
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere" \
--platform linux/arm64 \
--tag fosrl/pangolin:ee-$(tag)-arm64 \
--push . && \
docker buildx build \
--build-arg BUILD=enterprise \
--build-arg DATABASE=pg \
--build-arg VERSION=$(tag) \
--build-arg REVISION=$$REVISION \
--build-arg CREATED=$$CREATED \
--build-arg LICENSE="Fossorial Commercial" \
--build-arg IMAGE_TITLE="Pangolin EE" \
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere" \
--platform linux/arm64 \
--tag fosrl/pangolin:ee-postgresql-$(tag)-arm64 \
--push .
@@ -273,27 +392,51 @@ build-rc-amd:
echo "Error: tag is required. Usage: make build-rc-amd tag=<tag>"; \
exit 1; \
fi
@CREATED=$$(date -u +"%Y-%m-%dT%H:%M:%SZ"); \
REVISION=$$(git rev-parse HEAD 2>/dev/null || echo "unknown"); \
docker buildx build \
--build-arg BUILD=oss \
--build-arg DATABASE=sqlite \
--build-arg VERSION=$(tag) \
--build-arg REVISION=$$REVISION \
--build-arg CREATED=$$CREATED \
--build-arg IMAGE_TITLE="Pangolin" \
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
--platform linux/amd64 \
--tag fosrl/pangolin:$(tag)-amd64 \
--push . && \
docker buildx build \
--build-arg BUILD=oss \
--build-arg DATABASE=pg \
--build-arg VERSION=$(tag) \
--build-arg REVISION=$$REVISION \
--build-arg CREATED=$$CREATED \
--build-arg IMAGE_TITLE="Pangolin" \
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
--platform linux/amd64 \
--tag fosrl/pangolin:postgresql-$(tag)-amd64 \
--push . && \
docker buildx build \
--build-arg BUILD=enterprise \
--build-arg DATABASE=sqlite \
--build-arg VERSION=$(tag) \
--build-arg REVISION=$$REVISION \
--build-arg CREATED=$$CREATED \
--build-arg LICENSE="Fossorial Commercial" \
--build-arg IMAGE_TITLE="Pangolin EE" \
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere" \
--platform linux/amd64 \
--tag fosrl/pangolin:ee-$(tag)-amd64 \
--push . && \
docker buildx build \
--build-arg BUILD=enterprise \
--build-arg DATABASE=pg \
--build-arg VERSION=$(tag) \
--build-arg REVISION=$$REVISION \
--build-arg CREATED=$$CREATED \
--build-arg LICENSE="Fossorial Commercial" \
--build-arg IMAGE_TITLE="Pangolin EE" \
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere" \
--platform linux/amd64 \
--tag fosrl/pangolin:ee-postgresql-$(tag)-amd64 \
--push .
@@ -326,16 +469,52 @@ create-manifests-rc:
echo "All RC multi-arch manifests created successfully!"
build-arm:
docker buildx build --platform linux/arm64 -t fosrl/pangolin:latest .
@CREATED=$$(date -u +"%Y-%m-%dT%H:%M:%SZ"); \
REVISION=$$(git rev-parse HEAD 2>/dev/null || echo "unknown"); \
docker buildx build \
--build-arg VERSION=dev \
--build-arg REVISION=$$REVISION \
--build-arg CREATED=$$CREATED \
--build-arg IMAGE_TITLE="Pangolin" \
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
--platform linux/arm64 \
-t fosrl/pangolin:latest .
build-x86:
docker buildx build --platform linux/amd64 -t fosrl/pangolin:latest .
@CREATED=$$(date -u +"%Y-%m-%dT%H:%M:%SZ"); \
REVISION=$$(git rev-parse HEAD 2>/dev/null || echo "unknown"); \
docker buildx build \
--build-arg VERSION=dev \
--build-arg REVISION=$$REVISION \
--build-arg CREATED=$$CREATED \
--build-arg IMAGE_TITLE="Pangolin" \
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
--platform linux/amd64 \
-t fosrl/pangolin:latest .
dev-build-sqlite:
docker build --build-arg DATABASE=sqlite -t fosrl/pangolin:latest .
@CREATED=$$(date -u +"%Y-%m-%dT%H:%M:%SZ"); \
REVISION=$$(git rev-parse HEAD 2>/dev/null || echo "unknown"); \
docker build \
--build-arg DATABASE=sqlite \
--build-arg VERSION=dev \
--build-arg REVISION=$$REVISION \
--build-arg CREATED=$$CREATED \
--build-arg IMAGE_TITLE="Pangolin" \
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
-t fosrl/pangolin:latest .
dev-build-pg:
docker build --build-arg DATABASE=pg -t fosrl/pangolin:postgresql-latest .
@CREATED=$$(date -u +"%Y-%m-%dT%H:%M:%SZ"); \
REVISION=$$(git rev-parse HEAD 2>/dev/null || echo "unknown"); \
docker build \
--build-arg DATABASE=pg \
--build-arg VERSION=dev \
--build-arg REVISION=$$REVISION \
--build-arg CREATED=$$CREATED \
--build-arg IMAGE_TITLE="Pangolin" \
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
-t fosrl/pangolin:postgresql-latest .
test:
docker run -it -p 3000:3000 -p 3001:3001 -p 3002:3002 -v ./config:/app/config fosrl/pangolin:latest

View File

@@ -1,5 +1,9 @@
http:
middlewares:
badger:
plugin:
badger:
disableForwardAuth: true
redirect-to-https:
redirectScheme:
scheme: https
@@ -13,6 +17,7 @@ http:
- web
middlewares:
- redirect-to-https
- badger
# Next.js router (handles everything except API and WebSocket paths)
next-router:
@@ -21,6 +26,8 @@ http:
priority: 10
entryPoints:
- websecure
middlewares:
- badger
tls:
certResolver: letsencrypt
@@ -31,6 +38,8 @@ http:
priority: 100
entryPoints:
- websecure
middlewares:
- badger
tls:
certResolver: letsencrypt

View File

@@ -257,6 +257,8 @@
"accessRolesSearch": "Search roles...",
"accessRolesAdd": "Add Role",
"accessRoleDelete": "Delete Role",
"accessApprovalsManage": "Manage Approvals",
"accessApprovalsDescription": "Manage approval requests in the organization",
"description": "Description",
"inviteTitle": "Open Invitations",
"inviteDescription": "Manage invitations for other users to join the organization",
@@ -450,6 +452,18 @@
"selectDuration": "Select duration",
"selectResource": "Select Resource",
"filterByResource": "Filter By Resource",
"selectApprovalState": "Select Approval State",
"filterByApprovalState": "Filter By Approval State",
"approvalListEmpty": "No approvals",
"approvalState": "Approval State",
"approve": "Approve",
"approved": "Approved",
"denied": "Denied",
"deniedApproval": "Denied Approval",
"all": "All",
"deny": "Deny",
"viewDetails": "View Details",
"requestingNewDeviceApproval": "requested a new device",
"resetFilters": "Reset Filters",
"totalBlocked": "Requests Blocked By Pangolin",
"totalRequests": "Total Requests",
@@ -729,16 +743,28 @@
"countries": "Countries",
"accessRoleCreate": "Create Role",
"accessRoleCreateDescription": "Create a new role to group users and manage their permissions.",
"accessRoleEdit": "Edit Role",
"accessRoleEditDescription": "Edit role information.",
"accessRoleCreateSubmit": "Create Role",
"accessRoleCreated": "Role created",
"accessRoleCreatedDescription": "The role has been successfully created.",
"accessRoleErrorCreate": "Failed to create role",
"accessRoleErrorCreateDescription": "An error occurred while creating the role.",
"accessRoleUpdateSubmit": "Update Role",
"accessRoleUpdated": "Role updated",
"accessRoleUpdatedDescription": "The role has been successfully updated.",
"accessApprovalUpdated": "Approval processed",
"accessApprovalApprovedDescription": "Set Approval Request decision to approved.",
"accessApprovalDeniedDescription": "Set Approval Request decision to denied.",
"accessRoleErrorUpdate": "Failed to update role",
"accessRoleErrorUpdateDescription": "An error occurred while updating the role.",
"accessApprovalErrorUpdate": "Failed to process approval",
"accessApprovalErrorUpdateDescription": "An error occurred while processing the approval.",
"accessRoleErrorNewRequired": "New role is required",
"accessRoleErrorRemove": "Failed to remove role",
"accessRoleErrorRemoveDescription": "An error occurred while removing the role.",
"accessRoleName": "Role Name",
"accessRoleQuestionRemove": "You're about to delete the {name} role. You cannot undo this action.",
"accessRoleQuestionRemove": "You're about to delete the `{name}` role. You cannot undo this action.",
"accessRoleRemove": "Remove Role",
"accessRoleRemoveDescription": "Remove a role from the organization",
"accessRoleRemoveSubmit": "Remove Role",
@@ -874,7 +900,7 @@
"inviteAlready": "Looks like you've been invited!",
"inviteAlreadyDescription": "To accept the invite, you must log in or create an account.",
"signupQuestion": "Already have an account?",
"login": "Log in",
"login": "Log In",
"resourceNotFound": "Resource Not Found",
"resourceNotFoundDescription": "The resource you're trying to access does not exist.",
"pincodeRequirementsLength": "PIN must be exactly 6 digits",
@@ -954,13 +980,13 @@
"passwordExpiryDescription": "This organization requires you to change your password every {maxDays} days.",
"changePasswordNow": "Change Password Now",
"pincodeAuth": "Authenticator Code",
"pincodeSubmit2": "Submit Code",
"pincodeSubmit2": "Submit code",
"passwordResetSubmit": "Request Reset",
"passwordResetAlreadyHaveCode": "Enter Code",
"passwordResetSmtpRequired": "Please contact your administrator",
"passwordResetSmtpRequiredDescription": "A password reset code is required to reset your password. Please contact your administrator for assistance.",
"passwordBack": "Back to Password",
"loginBack": "Go back to log in",
"loginBack": "Go back to main login page",
"signup": "Sign up",
"loginStart": "Log in to get started",
"idpOidcTokenValidating": "Validating OIDC token",
@@ -1118,6 +1144,10 @@
"actionUpdateIdpOrg": "Update IDP Org",
"actionCreateClient": "Create Client",
"actionDeleteClient": "Delete Client",
"actionArchiveClient": "Archive Client",
"actionUnarchiveClient": "Unarchive Client",
"actionBlockClient": "Block Client",
"actionUnblockClient": "Unblock Client",
"actionUpdateClient": "Update Client",
"actionListClients": "List Clients",
"actionGetClient": "Get Client",
@@ -1134,14 +1164,14 @@
"searchProgress": "Search...",
"create": "Create",
"orgs": "Organizations",
"loginError": "An error occurred while logging in",
"loginRequiredForDevice": "Login is required to authenticate your device.",
"loginError": "An unexpected error occurred. Please try again.",
"loginRequiredForDevice": "Login is required for your device.",
"passwordForgot": "Forgot your password?",
"otpAuth": "Two-Factor Authentication",
"otpAuthDescription": "Enter the code from your authenticator app or one of your single-use backup codes.",
"otpAuthSubmit": "Submit Code",
"idpContinue": "Or continue with",
"otpAuthBack": "Back to Log In",
"otpAuthBack": "Back to Password",
"navbar": "Navigation Menu",
"navbarDescription": "Main navigation menu for the application",
"navbarDocsLink": "Documentation",
@@ -1189,6 +1219,7 @@
"sidebarOverview": "Overview",
"sidebarHome": "Home",
"sidebarSites": "Sites",
"sidebarApprovals": "Approval Requests",
"sidebarResources": "Resources",
"sidebarProxyResources": "Public",
"sidebarClientResources": "Private",
@@ -1205,7 +1236,7 @@
"sidebarIdentityProviders": "Identity Providers",
"sidebarLicense": "License",
"sidebarClients": "Clients",
"sidebarUserDevices": "Users",
"sidebarUserDevices": "User Devices",
"sidebarMachineClients": "Machines",
"sidebarDomains": "Domains",
"sidebarGeneral": "Manage",
@@ -1304,6 +1335,7 @@
"refreshError": "Failed to refresh data",
"verified": "Verified",
"pending": "Pending",
"pendingApproval": "Pending Approval",
"sidebarBilling": "Billing",
"billing": "Billing",
"orgBillingDescription": "Manage billing information and subscriptions",
@@ -1420,7 +1452,7 @@
"securityKeyRemoveSuccess": "Security key removed successfully",
"securityKeyRemoveError": "Failed to remove security key",
"securityKeyLoadError": "Failed to load security keys",
"securityKeyLogin": "Continue with security key",
"securityKeyLogin": "Use Security Key",
"securityKeyAuthError": "Failed to authenticate with security key",
"securityKeyRecommendation": "Register a backup security key on another device to ensure you always have access to your account.",
"registering": "Registering...",
@@ -1547,6 +1579,8 @@
"IntervalSeconds": "Healthy Interval",
"timeoutSeconds": "Timeout (sec)",
"timeIsInSeconds": "Time is in seconds",
"requireDeviceApproval": "Require Device Approvals",
"requireDeviceApprovalDescription": "Users with this role need their devices approved by an admin before they can access resources",
"retryAttempts": "Retry Attempts",
"expectedResponseCodes": "Expected Response Codes",
"expectedResponseCodesDescription": "HTTP status code that indicates healthy status. If left blank, 200-300 is considered healthy.",
@@ -2232,6 +2266,8 @@
"deviceCodeInvalidFormat": "Code must be 9 characters (e.g., A1AJ-N5JD)",
"deviceCodeInvalidOrExpired": "Invalid or expired code",
"deviceCodeVerifyFailed": "Failed to verify device code",
"deviceCodeValidating": "Validating device code...",
"deviceCodeVerifying": "Verifying device authorization...",
"signedInAs": "Signed in as",
"deviceCodeEnterPrompt": "Enter the code displayed on the device",
"continue": "Continue",
@@ -2244,7 +2280,7 @@
"deviceOrganizationsAccess": "Access to all organizations your account has access to",
"deviceAuthorize": "Authorize {applicationName}",
"deviceConnected": "Device Connected!",
"deviceAuthorizedMessage": "Device is authorized to access your account.",
"deviceAuthorizedMessage": "Device is authorized to access your account. Please return to the client application.",
"pangolinCloud": "Pangolin Cloud",
"viewDevices": "View Devices",
"viewDevicesDescription": "Manage your connected devices",
@@ -2306,6 +2342,7 @@
"identifier": "Identifier",
"deviceLoginUseDifferentAccount": "Not you? Use a different account.",
"deviceLoginDeviceRequestingAccessToAccount": "A device is requesting access to this account.",
"loginSelectAuthenticationMethod": "Select an authentication method to continue.",
"noData": "No Data",
"machineClients": "Machine Clients",
"install": "Install",
@@ -2394,5 +2431,56 @@
"maintenanceScreenTitle": "Service Temporarily Unavailable",
"maintenanceScreenMessage": "We are currently experiencing technical difficulties. Please check back soon.",
"maintenanceScreenEstimatedCompletion": "Estimated Completion:",
"createInternalResourceDialogDestinationRequired": "Destination is required"
"createInternalResourceDialogDestinationRequired": "Destination is required",
"available": "Available",
"archived": "Archived",
"noArchivedDevices": "No archived devices found",
"deviceArchived": "Device archived",
"deviceArchivedDescription": "The device has been successfully archived.",
"errorArchivingDevice": "Error archiving device",
"failedToArchiveDevice": "Failed to archive device",
"deviceQuestionArchive": "Are you sure you want to archive this device?",
"deviceMessageArchive": "The device will be archived and removed from your active devices list.",
"deviceArchiveConfirm": "Archive Device",
"archiveDevice": "Archive Device",
"archive": "Archive",
"deviceUnarchived": "Device unarchived",
"deviceUnarchivedDescription": "The device has been successfully unarchived.",
"errorUnarchivingDevice": "Error unarchiving device",
"failedToUnarchiveDevice": "Failed to unarchive device",
"unarchive": "Unarchive",
"archiveClient": "Archive Client",
"archiveClientQuestion": "Are you sure you want to archive this client?",
"archiveClientMessage": "The client will be archived and removed from your active clients list.",
"archiveClientConfirm": "Archive Client",
"blockClient": "Block Client",
"blockClientQuestion": "Are you sure you want to block this client?",
"blockClientMessage": "The device will be forced to disconnect if currently connected. You can unblock the device later.",
"blockClientConfirm": "Block Client",
"active": "Active",
"usernameOrEmail": "Username or Email",
"selectYourOrganization": "Select your organization",
"signInTo": "Log in in to",
"signInWithPassword": "Continue with Password",
"noAuthMethodsAvailable": "No authentication methods available for this organization.",
"enterPassword": "Enter your password",
"enterMfaCode": "Enter the code from your authenticator app",
"securityKeyRequired": "Please use your security key to sign in.",
"needToUseAnotherAccount": "Need to use a different account?",
"loginLegalDisclaimer": "By clicking the buttons below, you acknowledge you have read, understand, and agree to the <termsOfService>Terms of Service</termsOfService> and <privacyPolicy>Privacy Policy</privacyPolicy>.",
"termsOfService": "Terms of Service",
"privacyPolicy": "Privacy Policy",
"userNotFoundWithUsername": "No user found with that username.",
"verify": "Verify",
"signIn": "Sign In",
"forgotPassword": "Forgot password?",
"orgSignInTip": "If you've logged in before, you can enter your username or email above to authenticate with your organization's identity provider instead. It's easier!",
"continueAnyway": "Continue anyway",
"dontShowAgain": "Don't show again",
"orgSignInNotice": "Did you know?",
"signupOrgNotice": "Trying to sign in?",
"signupOrgTip": "Are you trying to sign in through your organization's identity provider?",
"signupOrgLink": "Sign in or sign up with your organization instead",
"verifyEmailLogInWithDifferentAccount": "Use a Different Account",
"logIn": "Log In"
}

View File

@@ -3,7 +3,7 @@
"version": "0.0.0",
"private": true,
"type": "module",
"description": "Tunneled Reverse Proxy Management Server with Identity and Access Control and Dashboard UI",
"description": "Identity-aware VPN and proxy for remote access to anything, anywhere and Dashboard UI",
"homepage": "https://github.com/fosrl/pangolin",
"repository": {
"type": "git",

View File

@@ -78,6 +78,10 @@ export enum ActionsEnum {
updateSiteResource = "updateSiteResource",
createClient = "createClient",
deleteClient = "deleteClient",
archiveClient = "archiveClient",
unarchiveClient = "unarchiveClient",
blockClient = "blockClient",
unblockClient = "unblockClient",
updateClient = "updateClient",
listClients = "listClients",
getClient = "getClient",
@@ -125,7 +129,9 @@ export enum ActionsEnum {
getBlueprint = "getBlueprint",
applyBlueprint = "applyBlueprint",
viewLogs = "viewLogs",
exportLogs = "exportLogs"
exportLogs = "exportLogs",
listApprovals = "listApprovals",
updateApprovals = "updateApprovals"
}
export async function checkUserActionPermission(

150
server/db/ios_models.json Normal file
View File

@@ -0,0 +1,150 @@
{
"iPad1,1": "iPad",
"iPad2,1": "iPad 2",
"iPad2,2": "iPad 2",
"iPad2,3": "iPad 2",
"iPad2,4": "iPad 2",
"iPad3,1": "iPad 3rd Gen",
"iPad3,3": "iPad 3rd Gen",
"iPad3,2": "iPad 3rd Gen",
"iPad3,4": "iPad 4th Gen",
"iPad3,5": "iPad 4th Gen",
"iPad3,6": "iPad 4th Gen",
"iPad6,11": "iPad 9.7 5th Gen",
"iPad6,12": "iPad 9.7 5th Gen",
"iPad7,5": "iPad 9.7 6th Gen",
"iPad7,6": "iPad 9.7 6th Gen",
"iPad7,11": "iPad 10.2 7th Gen",
"iPad7,12": "iPad 10.2 7th Gen",
"iPad11,6": "iPad 10.2 8th Gen",
"iPad11,7": "iPad 10.2 8th Gen",
"iPad12,1": "iPad 10.2 9th Gen",
"iPad12,2": "iPad 10.2 9th Gen",
"iPad13,18": "iPad 10.9 10th Gen",
"iPad13,19": "iPad 10.9 10th Gen",
"iPad4,1": "iPad Air",
"iPad4,2": "iPad Air",
"iPad4,3": "iPad Air",
"iPad5,3": "iPad Air 2",
"iPad5,4": "iPad Air 2",
"iPad11,3": "iPad Air 3rd Gen",
"iPad11,4": "iPad Air 3rd Gen",
"iPad13,1": "iPad Air 4th Gen",
"iPad13,2": "iPad Air 4th Gen",
"iPad13,16": "iPad Air 5th Gen",
"iPad13,17": "iPad Air 5th Gen",
"iPad14,8": "iPad Air M2 11",
"iPad14,9": "iPad Air M2 11",
"iPad14,10": "iPad Air M2 13",
"iPad14,11": "iPad Air M2 13",
"iPad2,5": "iPad mini",
"iPad2,6": "iPad mini",
"iPad2,7": "iPad mini",
"iPad4,4": "iPad mini 2",
"iPad4,5": "iPad mini 2",
"iPad4,6": "iPad mini 2",
"iPad4,7": "iPad mini 3",
"iPad4,8": "iPad mini 3",
"iPad4,9": "iPad mini 3",
"iPad5,1": "iPad mini 4",
"iPad5,2": "iPad mini 4",
"iPad11,1": "iPad mini 5th Gen",
"iPad11,2": "iPad mini 5th Gen",
"iPad14,1": "iPad mini 6th Gen",
"iPad14,2": "iPad mini 6th Gen",
"iPad6,7": "iPad Pro 12.9",
"iPad6,8": "iPad Pro 12.9",
"iPad6,3": "iPad Pro 9.7",
"iPad6,4": "iPad Pro 9.7",
"iPad7,3": "iPad Pro 10.5",
"iPad7,4": "iPad Pro 10.5",
"iPad7,1": "iPad Pro 12.9",
"iPad7,2": "iPad Pro 12.9",
"iPad8,1": "iPad Pro 11",
"iPad8,2": "iPad Pro 11",
"iPad8,3": "iPad Pro 11",
"iPad8,4": "iPad Pro 11",
"iPad8,5": "iPad Pro 12.9",
"iPad8,6": "iPad Pro 12.9",
"iPad8,7": "iPad Pro 12.9",
"iPad8,8": "iPad Pro 12.9",
"iPad8,9": "iPad Pro 11",
"iPad8,10": "iPad Pro 11",
"iPad8,11": "iPad Pro 12.9",
"iPad8,12": "iPad Pro 12.9",
"iPad13,4": "iPad Pro 11",
"iPad13,5": "iPad Pro 11",
"iPad13,6": "iPad Pro 11",
"iPad13,7": "iPad Pro 11",
"iPad13,8": "iPad Pro 12.9",
"iPad13,9": "iPad Pro 12.9",
"iPad13,10": "iPad Pro 12.9",
"iPad13,11": "iPad Pro 12.9",
"iPad14,3": "iPad Pro 11",
"iPad14,4": "iPad Pro 11",
"iPad14,5": "iPad Pro 12.9",
"iPad14,6": "iPad Pro 12.9",
"iPad16,3": "iPad Pro M4 11",
"iPad16,4": "iPad Pro M4 11",
"iPad16,5": "iPad Pro M4 13",
"iPad16,6": "iPad Pro M4 13",
"iPhone1,1": "iPhone",
"iPhone1,2": "iPhone 3G",
"iPhone2,1": "iPhone 3GS",
"iPhone3,1": "iPhone 4",
"iPhone3,2": "iPhone 4",
"iPhone3,3": "iPhone 4",
"iPhone4,1": "iPhone 4S",
"iPhone5,1": "iPhone 5",
"iPhone5,2": "iPhone 5",
"iPhone5,3": "iPhone 5c",
"iPhone5,4": "iPhone 5c",
"iPhone6,1": "iPhone 5s",
"iPhone6,2": "iPhone 5s",
"iPhone7,2": "iPhone 6",
"iPhone7,1": "iPhone 6 Plus",
"iPhone8,1": "iPhone 6s",
"iPhone8,2": "iPhone 6s Plus",
"iPhone8,4": "iPhone SE",
"iPhone9,1": "iPhone 7",
"iPhone9,3": "iPhone 7",
"iPhone9,2": "iPhone 7 Plus",
"iPhone9,4": "iPhone 7 Plus",
"iPhone10,1": "iPhone 8",
"iPhone10,4": "iPhone 8",
"iPhone10,2": "iPhone 8 Plus",
"iPhone10,5": "iPhone 8 Plus",
"iPhone10,3": "iPhone X",
"iPhone10,6": "iPhone X",
"iPhone11,2": "iPhone Xs",
"iPhone11,6": "iPhone Xs Max",
"iPhone11,8": "iPhone XR",
"iPhone12,1": "iPhone 11",
"iPhone12,3": "iPhone 11 Pro",
"iPhone12,5": "iPhone 11 Pro Max",
"iPhone12,8": "iPhone SE",
"iPhone13,1": "iPhone 12 mini",
"iPhone13,2": "iPhone 12",
"iPhone13,3": "iPhone 12 Pro",
"iPhone13,4": "iPhone 12 Pro Max",
"iPhone14,4": "iPhone 13 mini",
"iPhone14,5": "iPhone 13",
"iPhone14,2": "iPhone 13 Pro",
"iPhone14,3": "iPhone 13 Pro Max",
"iPhone14,6": "iPhone SE",
"iPhone14,7": "iPhone 14",
"iPhone14,8": "iPhone 14 Plus",
"iPhone15,2": "iPhone 14 Pro",
"iPhone15,3": "iPhone 14 Pro Max",
"iPhone15,4": "iPhone 15",
"iPhone15,5": "iPhone 15 Plus",
"iPhone16,1": "iPhone 15 Pro",
"iPhone16,2": "iPhone 15 Pro Max",
"iPod1,1": "iPod touch Original",
"iPod2,1": "iPod touch 2nd",
"iPod3,1": "iPod touch 3rd Gen",
"iPod4,1": "iPod touch 4th",
"iPod5,1": "iPod touch 5th",
"iPod7,1": "iPod touch 6th Gen",
"iPod9,1": "iPod touch 7th Gen"
}

201
server/db/mac_models.json Normal file
View File

@@ -0,0 +1,201 @@
{
"PowerMac4,4": "eMac",
"PowerMac6,4": "eMac",
"PowerBook2,1": "iBook",
"PowerBook2,2": "iBook",
"PowerBook4,1": "iBook",
"PowerBook4,2": "iBook",
"PowerBook4,3": "iBook",
"PowerBook6,3": "iBook",
"PowerBook6,5": "iBook",
"PowerBook6,7": "iBook",
"iMac,1": "iMac",
"PowerMac2,1": "iMac",
"PowerMac2,2": "iMac",
"PowerMac4,1": "iMac",
"PowerMac4,2": "iMac",
"PowerMac4,5": "iMac",
"PowerMac6,1": "iMac",
"PowerMac6,3*": "iMac",
"PowerMac6,3": "iMac",
"PowerMac8,1": "iMac",
"PowerMac8,2": "iMac",
"PowerMac12,1": "iMac",
"iMac4,1": "iMac",
"iMac4,2": "iMac",
"iMac5,2": "iMac",
"iMac5,1": "iMac",
"iMac6,1": "iMac",
"iMac7,1": "iMac",
"iMac8,1": "iMac",
"iMac9,1": "iMac",
"iMac10,1": "iMac",
"iMac11,1": "iMac",
"iMac11,2": "iMac",
"iMac11,3": "iMac",
"iMac12,1": "iMac",
"iMac12,2": "iMac",
"iMac13,1": "iMac",
"iMac13,2": "iMac",
"iMac14,1": "iMac",
"iMac14,3": "iMac",
"iMac14,2": "iMac",
"iMac14,4": "iMac",
"iMac15,1": "iMac",
"iMac16,1": "iMac",
"iMac16,2": "iMac",
"iMac17,1": "iMac",
"iMac18,1": "iMac",
"iMac18,2": "iMac",
"iMac18,3": "iMac",
"iMac19,2": "iMac",
"iMac19,1": "iMac",
"iMac20,1": "iMac",
"iMac20,2": "iMac",
"iMac21,2": "iMac",
"iMac21,1": "iMac",
"iMacPro1,1": "iMac Pro",
"PowerMac10,1": "Mac mini",
"PowerMac10,2": "Mac mini",
"Macmini1,1": "Mac mini",
"Macmini2,1": "Mac mini",
"Macmini3,1": "Mac mini",
"Macmini4,1": "Mac mini",
"Macmini5,1": "Mac mini",
"Macmini5,2": "Mac mini",
"Macmini5,3": "Mac mini",
"Macmini6,1": "Mac mini",
"Macmini6,2": "Mac mini",
"Macmini7,1": "Mac mini",
"Macmini8,1": "Mac mini",
"ADP3,2": "Mac mini",
"Macmini9,1": "Mac mini",
"Mac14,3": "Mac mini",
"Mac14,12": "Mac mini",
"MacPro1,1*": "Mac Pro",
"MacPro2,1": "Mac Pro",
"MacPro3,1": "Mac Pro",
"MacPro4,1": "Mac Pro",
"MacPro5,1": "Mac Pro",
"MacPro6,1": "Mac Pro",
"MacPro7,1": "Mac Pro",
"N/A*": "Power Macintosh",
"PowerMac1,1": "Power Macintosh",
"PowerMac3,1": "Power Macintosh",
"PowerMac3,3": "Power Macintosh",
"PowerMac3,4": "Power Macintosh",
"PowerMac3,5": "Power Macintosh",
"PowerMac3,6": "Power Macintosh",
"Mac13,1": "Mac Studio",
"Mac13,2": "Mac Studio",
"MacBook1,1": "MacBook",
"MacBook2,1": "MacBook",
"MacBook3,1": "MacBook",
"MacBook4,1": "MacBook",
"MacBook5,1": "MacBook",
"MacBook5,2": "MacBook",
"MacBook6,1": "MacBook",
"MacBook7,1": "MacBook",
"MacBook8,1": "MacBook",
"MacBook9,1": "MacBook",
"MacBook10,1": "MacBook",
"MacBookAir1,1": "MacBook Air",
"MacBookAir2,1": "MacBook Air",
"MacBookAir3,1": "MacBook Air",
"MacBookAir3,2": "MacBook Air",
"MacBookAir4,1": "MacBook Air",
"MacBookAir4,2": "MacBook Air",
"MacBookAir5,1": "MacBook Air",
"MacBookAir5,2": "MacBook Air",
"MacBookAir6,1": "MacBook Air",
"MacBookAir6,2": "MacBook Air",
"MacBookAir7,1": "MacBook Air",
"MacBookAir7,2": "MacBook Air",
"MacBookAir8,1": "MacBook Air",
"MacBookAir8,2": "MacBook Air",
"MacBookAir9,1": "MacBook Air",
"MacBookAir10,1": "MacBook Air",
"Mac14,2": "MacBook Air",
"MacBookPro1,1": "MacBook Pro",
"MacBookPro1,2": "MacBook Pro",
"MacBookPro2,2": "MacBook Pro",
"MacBookPro2,1": "MacBook Pro",
"MacBookPro3,1": "MacBook Pro",
"MacBookPro4,1": "MacBook Pro",
"MacBookPro5,1": "MacBook Pro",
"MacBookPro5,2": "MacBook Pro",
"MacBookPro5,5": "MacBook Pro",
"MacBookPro5,4": "MacBook Pro",
"MacBookPro5,3": "MacBook Pro",
"MacBookPro7,1": "MacBook Pro",
"MacBookPro6,2": "MacBook Pro",
"MacBookPro6,1": "MacBook Pro",
"MacBookPro8,1": "MacBook Pro",
"MacBookPro8,2": "MacBook Pro",
"MacBookPro8,3": "MacBook Pro",
"MacBookPro9,2": "MacBook Pro",
"MacBookPro9,1": "MacBook Pro",
"MacBookPro10,1": "MacBook Pro",
"MacBookPro10,2": "MacBook Pro",
"MacBookPro11,1": "MacBook Pro",
"MacBookPro11,2": "MacBook Pro",
"MacBookPro11,3": "MacBook Pro",
"MacBookPro12,1": "MacBook Pro",
"MacBookPro11,4": "MacBook Pro",
"MacBookPro11,5": "MacBook Pro",
"MacBookPro13,1": "MacBook Pro",
"MacBookPro13,2": "MacBook Pro",
"MacBookPro13,3": "MacBook Pro",
"MacBookPro14,1": "MacBook Pro",
"MacBookPro14,2": "MacBook Pro",
"MacBookPro14,3": "MacBook Pro",
"MacBookPro15,2": "MacBook Pro",
"MacBookPro15,1": "MacBook Pro",
"MacBookPro15,3": "MacBook Pro",
"MacBookPro15,4": "MacBook Pro",
"MacBookPro16,1": "MacBook Pro",
"MacBookPro16,3": "MacBook Pro",
"MacBookPro16,2": "MacBook Pro",
"MacBookPro16,4": "MacBook Pro",
"MacBookPro17,1": "MacBook Pro",
"MacBookPro18,3": "MacBook Pro",
"MacBookPro18,4": "MacBook Pro",
"MacBookPro18,1": "MacBook Pro",
"MacBookPro18,2": "MacBook Pro",
"Mac14,7": "MacBook Pro",
"Mac14,9": "MacBook Pro",
"Mac14,5": "MacBook Pro",
"Mac14,10": "MacBook Pro",
"Mac14,6": "MacBook Pro",
"PowerMac1,2": "Power Macintosh",
"PowerMac5,1": "Power Macintosh",
"PowerMac7,2": "Power Macintosh",
"PowerMac7,3": "Power Macintosh",
"PowerMac9,1": "Power Macintosh",
"PowerMac11,2": "Power Macintosh",
"PowerBook1,1": "PowerBook",
"PowerBook3,1": "PowerBook",
"PowerBook3,2": "PowerBook",
"PowerBook3,3": "PowerBook",
"PowerBook3,4": "PowerBook",
"PowerBook3,5": "PowerBook",
"PowerBook6,1": "PowerBook",
"PowerBook5,1": "PowerBook",
"PowerBook6,2": "PowerBook",
"PowerBook5,2": "PowerBook",
"PowerBook5,3": "PowerBook",
"PowerBook6,4": "PowerBook",
"PowerBook5,4": "PowerBook",
"PowerBook5,5": "PowerBook",
"PowerBook6,8": "PowerBook",
"PowerBook5,6": "PowerBook",
"PowerBook5,7": "PowerBook",
"PowerBook5,8": "PowerBook",
"PowerBook5,9": "PowerBook",
"RackMac1,1": "Xserve",
"RackMac1,2": "Xserve",
"RackMac3,1": "Xserve",
"Xserve1,1": "Xserve",
"Xserve2,1": "Xserve",
"Xserve3,1": "Xserve"
}

View File

@@ -16,6 +16,24 @@ if (!dev) {
}
export const names = JSON.parse(readFileSync(file, "utf-8"));
// Load iOS and Mac model mappings
let iosModelsFile: string;
let macModelsFile: string;
if (!dev) {
iosModelsFile = join(__DIRNAME, "ios_models.json");
macModelsFile = join(__DIRNAME, "mac_models.json");
} else {
iosModelsFile = join("server/db/ios_models.json");
macModelsFile = join("server/db/mac_models.json");
}
const iosModels: Record<string, string> = JSON.parse(
readFileSync(iosModelsFile, "utf-8")
);
const macModels: Record<string, string> = JSON.parse(
readFileSync(macModelsFile, "utf-8")
);
export async function getUniqueClientName(orgId: string): Promise<string> {
let loops = 0;
while (true) {
@@ -159,3 +177,29 @@ export function generateName(): string {
// clean out any non-alphanumeric characters except for dashes
return name.replace(/[^a-z0-9-]/g, "");
}
export function getMacDeviceName(macIdentifier?: string | null): string | null {
if (macIdentifier && macModels[macIdentifier]) {
return macModels[macIdentifier];
}
return null;
}
export function getIosDeviceName(iosIdentifier?: string | null): string | null {
if (iosIdentifier && iosModels[iosIdentifier]) {
return iosModels[iosIdentifier];
}
return null;
}
export function getUserDeviceName(
model: string | null,
fallBack: string | null
): string {
return (
getMacDeviceName(model) ||
getIosDeviceName(model) ||
fallBack ||
"Unknown Device"
);
}

View File

@@ -10,7 +10,15 @@ import {
index
} from "drizzle-orm/pg-core";
import { InferSelectModel } from "drizzle-orm";
import { domains, orgs, targets, users, exitNodes, sessions } from "./schema";
import {
domains,
orgs,
targets,
users,
exitNodes,
sessions,
clients
} from "./schema";
export const certificates = pgTable("certificates", {
certId: serial("certId").primaryKey(),
@@ -289,6 +297,33 @@ export const accessAuditLog = pgTable(
]
);
export const approvals = pgTable("approvals", {
approvalId: serial("approvalId").primaryKey(),
timestamp: integer("timestamp").notNull(), // this is EPOCH time in seconds
orgId: varchar("orgId")
.references(() => orgs.orgId, {
onDelete: "cascade"
})
.notNull(),
clientId: integer("clientId").references(() => clients.clientId, {
onDelete: "cascade"
}), // clients reference user devices (in this case)
userId: varchar("userId")
.references(() => users.userId, {
// optionally tied to a user and in this case delete when the user deletes
onDelete: "cascade"
})
.notNull(),
decision: varchar("decision")
.$type<"approved" | "denied" | "pending">()
.default("pending")
.notNull(),
type: varchar("type")
.$type<"user_device" /*| 'proxy' // for later */>()
.notNull()
});
export type Approval = InferSelectModel<typeof approvals>;
export type Limit = InferSelectModel<typeof limits>;
export type Account = InferSelectModel<typeof account>;
export type Certificate = InferSelectModel<typeof certificates>;

View File

@@ -365,7 +365,8 @@ export const roles = pgTable("roles", {
.notNull(),
isAdmin: boolean("isAdmin"),
name: varchar("name").notNull(),
description: varchar("description")
description: varchar("description"),
requireDeviceApproval: boolean("requireDeviceApproval").default(false)
});
export const roleActions = pgTable("roleActions", {
@@ -591,7 +592,8 @@ export const idp = pgTable("idp", {
type: varchar("type").notNull(),
defaultRoleMapping: varchar("defaultRoleMapping"),
defaultOrgMapping: varchar("defaultOrgMapping"),
autoProvision: boolean("autoProvision").notNull().default(false)
autoProvision: boolean("autoProvision").notNull().default(false),
tags: text("tags")
});
export const idpOidcConfig = pgTable("idpOidcConfig", {
@@ -688,7 +690,12 @@ export const clients = pgTable("clients", {
online: boolean("online").notNull().default(false),
// endpoint: varchar("endpoint"),
lastHolePunch: integer("lastHolePunch"),
maxConnections: integer("maxConnections")
maxConnections: integer("maxConnections"),
archived: boolean("archived").notNull().default(false),
blocked: boolean("blocked").notNull().default(false),
approvalState: varchar("approvalState").$type<
"pending" | "approved" | "denied"
>()
});
export const clientSitesAssociationsCache = pgTable(
@@ -712,6 +719,49 @@ export const clientSiteResourcesAssociationsCache = pgTable(
}
);
export const clientPostureSnapshots = pgTable("clientPostureSnapshots", {
snapshotId: serial("snapshotId").primaryKey(),
clientId: integer("clientId").references(() => clients.clientId, {
onDelete: "cascade"
}),
// Platform-agnostic checks
biometricsEnabled: boolean("biometricsEnabled").notNull().default(false),
diskEncrypted: boolean("diskEncrypted").notNull().default(false),
firewallEnabled: boolean("firewallEnabled").notNull().default(false),
autoUpdatesEnabled: boolean("autoUpdatesEnabled").notNull().default(false),
tpmAvailable: boolean("tpmAvailable").notNull().default(false),
// Windows-specific posture check information
windowsDefenderEnabled: boolean("windowsDefenderEnabled")
.notNull()
.default(false),
// macOS-specific posture check information
macosSipEnabled: boolean("macosSipEnabled").notNull().default(false),
macosGatekeeperEnabled: boolean("macosGatekeeperEnabled")
.notNull()
.default(false),
macosFirewallStealthMode: boolean("macosFirewallStealthMode")
.notNull()
.default(false),
// Linux-specific posture check information
linuxAppArmorEnabled: boolean("linuxAppArmorEnabled")
.notNull()
.default(false),
linuxSELinuxEnabled: boolean("linuxSELinuxEnabled")
.notNull()
.default(false),
collectedAt: integer("collectedAt").notNull()
});
export const olms = pgTable("olms", {
olmId: varchar("id").primaryKey(),
secretHash: varchar("secretHash").notNull(),
@@ -726,7 +776,29 @@ export const olms = pgTable("olms", {
userId: text("userId").references(() => users.userId, {
// optionally tied to a user and in this case delete when the user deletes
onDelete: "cascade"
})
}),
archived: boolean("archived").notNull().default(false)
});
export const fingerprints = pgTable("fingerprints", {
fingerprintId: serial("id").primaryKey(),
olmId: text("olmId")
.references(() => olms.olmId, { onDelete: "cascade" })
.notNull(),
firstSeen: integer("firstSeen").notNull(),
lastSeen: integer("lastSeen").notNull(),
username: text("username"),
hostname: text("hostname"),
platform: text("platform"), // macos | windows | linux | ios | android | unknown
osVersion: text("osVersion"),
kernelVersion: text("kernelVersion"),
arch: text("arch"),
deviceModel: text("deviceModel"),
serialNumber: text("serialNumber"),
platformFingerprint: varchar("platformFingerprint")
});
export const olmSessions = pgTable("clientSession", {

View File

@@ -1,4 +1,4 @@
import { db, loginPage, LoginPage, loginPageOrg, Org, orgs } from "@server/db";
import { db, loginPage, LoginPage, loginPageOrg, Org, orgs, roles } from "@server/db";
import {
Resource,
ResourcePassword,
@@ -108,9 +108,17 @@ export async function getUserSessionWithUser(
*/
export async function getUserOrgRole(userId: string, orgId: string) {
const userOrgRole = await db
.select()
.select({
userId: userOrgs.userId,
orgId: userOrgs.orgId,
roleId: userOrgs.roleId,
isOwner: userOrgs.isOwner,
autoProvisioned: userOrgs.autoProvisioned,
roleName: roles.name
})
.from(userOrgs)
.where(and(eq(userOrgs.userId, userId), eq(userOrgs.orgId, orgId)))
.leftJoin(roles, eq(userOrgs.roleId, roles.roleId))
.limit(1);
return userOrgRole.length > 0 ? userOrgRole[0] : null;

View File

@@ -6,7 +6,7 @@ import {
sqliteTable,
text
} from "drizzle-orm/sqlite-core";
import { domains, exitNodes, orgs, sessions, users } from "./schema";
import { clients, domains, exitNodes, orgs, sessions, users } from "./schema";
export const certificates = sqliteTable("certificates", {
certId: integer("certId").primaryKey({ autoIncrement: true }),
@@ -289,6 +289,31 @@ export const accessAuditLog = sqliteTable(
]
);
export const approvals = sqliteTable("approvals", {
approvalId: integer("approvalId").primaryKey({ autoIncrement: true }),
timestamp: integer("timestamp").notNull(), // this is EPOCH time in seconds
orgId: text("orgId")
.references(() => orgs.orgId, {
onDelete: "cascade"
})
.notNull(),
clientId: integer("clientId").references(() => clients.clientId, {
onDelete: "cascade"
}), // olms reference user devices clients
userId: text("userId").references(() => users.userId, {
// optionally tied to a user and in this case delete when the user deletes
onDelete: "cascade"
}),
decision: text("decision")
.$type<"approved" | "denied" | "pending">()
.default("pending")
.notNull(),
type: text("type")
.$type<"user_device" /*| 'proxy' // for later */>()
.notNull()
});
export type Approval = InferSelectModel<typeof approvals>;
export type Limit = InferSelectModel<typeof limits>;
export type Account = InferSelectModel<typeof account>;
export type Certificate = InferSelectModel<typeof certificates>;

View File

@@ -255,7 +255,9 @@ export const siteResources = sqliteTable("siteResources", {
aliasAddress: text("aliasAddress"),
tcpPortRangeString: text("tcpPortRangeString").notNull().default("*"),
udpPortRangeString: text("udpPortRangeString").notNull().default("*"),
disableIcmp: integer("disableIcmp", { mode: "boolean" }).notNull().default(false)
disableIcmp: integer("disableIcmp", { mode: "boolean" })
.notNull()
.default(false)
});
export const clientSiteResources = sqliteTable("clientSiteResources", {
@@ -383,7 +385,12 @@ export const clients = sqliteTable("clients", {
type: text("type").notNull(), // "olm"
online: integer("online", { mode: "boolean" }).notNull().default(false),
// endpoint: text("endpoint"),
lastHolePunch: integer("lastHolePunch")
lastHolePunch: integer("lastHolePunch"),
archived: integer("archived", { mode: "boolean" }).notNull().default(false),
blocked: integer("blocked", { mode: "boolean" }).notNull().default(false),
approvalState: text("approvalState").$type<
"pending" | "approved" | "denied"
>()
});
export const clientSitesAssociationsCache = sqliteTable(
@@ -409,6 +416,69 @@ export const clientSiteResourcesAssociationsCache = sqliteTable(
}
);
export const clientPostureSnapshots = sqliteTable("clientPostureSnapshots", {
snapshotId: integer("snapshotId").primaryKey({ autoIncrement: true }),
clientId: integer("clientId").references(() => clients.clientId, {
onDelete: "cascade"
}),
// Platform-agnostic checks
biometricsEnabled: integer("biometricsEnabled", { mode: "boolean" })
.notNull()
.default(false),
diskEncrypted: integer("diskEncrypted", { mode: "boolean" })
.notNull()
.default(false),
firewallEnabled: integer("firewallEnabled", { mode: "boolean" })
.notNull()
.default(false),
autoUpdatesEnabled: integer("autoUpdatesEnabled", { mode: "boolean" })
.notNull()
.default(false),
tpmAvailable: integer("tpmAvailable", { mode: "boolean" })
.notNull()
.default(false),
// Windows-specific posture check information
windowsDefenderEnabled: integer("windowsDefenderEnabled", {
mode: "boolean"
})
.notNull()
.default(false),
// macOS-specific posture check information
macosSipEnabled: integer("macosSipEnabled", { mode: "boolean" })
.notNull()
.default(false),
macosGatekeeperEnabled: integer("macosGatekeeperEnabled", {
mode: "boolean"
})
.notNull()
.default(false),
macosFirewallStealthMode: integer("macosFirewallStealthMode", {
mode: "boolean"
})
.notNull()
.default(false),
// Linux-specific posture check information
linuxAppArmorEnabled: integer("linuxAppArmorEnabled", { mode: "boolean" })
.notNull()
.default(false),
linuxSELinuxEnabled: integer("linuxSELinuxEnabled", {
mode: "boolean"
})
.notNull()
.default(false),
collectedAt: integer("collectedAt").notNull()
});
export const olms = sqliteTable("olms", {
olmId: text("id").primaryKey(),
secretHash: text("secretHash").notNull(),
@@ -423,7 +493,29 @@ export const olms = sqliteTable("olms", {
userId: text("userId").references(() => users.userId, {
// optionally tied to a user and in this case delete when the user deletes
onDelete: "cascade"
})
}),
archived: integer("archived", { mode: "boolean" }).notNull().default(false)
});
export const fingerprints = sqliteTable("fingerprints", {
fingerprintId: integer("id").primaryKey({ autoIncrement: true }),
olmId: text("olmId")
.references(() => olms.olmId, { onDelete: "cascade" })
.notNull(),
firstSeen: integer("firstSeen").notNull(),
lastSeen: integer("lastSeen").notNull(),
username: text("username"),
hostname: text("hostname"),
platform: text("platform"), // macos | windows | linux | ios | android | unknown
osVersion: text("osVersion"),
kernelVersion: text("kernelVersion"),
arch: text("arch"),
deviceModel: text("deviceModel"),
serialNumber: text("serialNumber"),
platformFingerprint: text("platformFingerprint")
});
export const twoFactorBackupCodes = sqliteTable("twoFactorBackupCodes", {
@@ -515,7 +607,10 @@ export const roles = sqliteTable("roles", {
.notNull(),
isAdmin: integer("isAdmin", { mode: "boolean" }),
name: text("name").notNull(),
description: text("description")
description: text("description"),
requireDeviceApproval: integer("requireDeviceApproval", {
mode: "boolean"
}).default(false)
});
export const roleActions = sqliteTable("roleActions", {
@@ -774,7 +869,8 @@ export const idp = sqliteTable("idp", {
mode: "boolean"
})
.notNull()
.default(false)
.default(false),
tags: text("tags")
});
// Identity Provider OAuth Configuration

View File

@@ -290,8 +290,8 @@ export const ClientResourceSchema = z
alias: z
.string()
.regex(
/^(?:[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?$/,
"Alias must be a fully qualified domain name (e.g., example.com)"
/^(?:[a-zA-Z0-9*?](?:[a-zA-Z0-9*?-]{0,61}[a-zA-Z0-9*?])?\.)+[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?$/,
"Alias must be a fully qualified domain name with optional wildcards (e.g., example.com, *.example.com, host-0?.example.internal)"
)
.optional(),
roles: z

View File

@@ -1,21 +1,24 @@
import { listExitNodes } from "#dynamic/lib/exitNodes";
import { build } from "@server/build";
import {
approvals,
clients,
db,
olms,
orgs,
roleClients,
roles,
Transaction,
userClients,
userOrgs,
Transaction
userOrgs
} from "@server/db";
import { eq, and, notInArray } from "drizzle-orm";
import { listExitNodes } from "#dynamic/lib/exitNodes";
import { getNextAvailableClientSubnet } from "@server/lib/ip";
import logger from "@server/logger";
import { rebuildClientAssociationsFromClient } from "./rebuildClientAssociations";
import { sendTerminateClient } from "@server/routers/client/terminate";
import { getUniqueClientName } from "@server/db/names";
import { getNextAvailableClientSubnet } from "@server/lib/ip";
import { isLicensedOrSubscribed } from "@server/lib/isLicencedOrSubscribed";
import logger from "@server/logger";
import { sendTerminateClient } from "@server/routers/client/terminate";
import { and, eq, notInArray, type InferInsertModel } from "drizzle-orm";
import { rebuildClientAssociationsFromClient } from "./rebuildClientAssociations";
export async function calculateUserClientsForOrgs(
userId: string,
@@ -38,13 +41,15 @@ export async function calculateUserClientsForOrgs(
const allUserOrgs = await transaction
.select()
.from(userOrgs)
.innerJoin(roles, eq(roles.roleId, userOrgs.roleId))
.where(eq(userOrgs.userId, userId));
const userOrgIds = allUserOrgs.map((uo) => uo.orgId);
const userOrgIds = allUserOrgs.map(({ userOrgs: uo }) => uo.orgId);
// For each OLM, ensure there's a client in each org the user is in
for (const olm of userOlms) {
for (const userOrg of allUserOrgs) {
for (const userRoleOrg of allUserOrgs) {
const { userOrgs: userOrg, roles: role } = userRoleOrg;
const orgId = userOrg.orgId;
const [org] = await transaction
@@ -182,21 +187,46 @@ export async function calculateUserClientsForOrgs(
const niceId = await getUniqueClientName(orgId);
const isOrgLicensed = await isLicensedOrSubscribed(
userOrg.orgId
);
const requireApproval =
build !== "oss" &&
isOrgLicensed &&
role.requireDeviceApproval;
const newClientData: InferInsertModel<typeof clients> = {
userId,
orgId: userOrg.orgId,
exitNodeId: randomExitNode.exitNodeId,
name: olm.name || "User Client",
subnet: updatedSubnet,
olmId: olm.olmId,
type: "olm",
niceId,
approvalState: requireApproval ? "pending" : null
};
// Create the client
const [newClient] = await transaction
.insert(clients)
.values({
userId,
orgId: userOrg.orgId,
exitNodeId: randomExitNode.exitNodeId,
name: olm.name || "User Client",
subnet: updatedSubnet,
olmId: olm.olmId,
type: "olm",
niceId
})
.values(newClientData)
.returning();
// create approval request
if (requireApproval) {
await transaction
.insert(approvals)
.values({
timestamp: Math.floor(new Date().getTime() / 1000),
orgId: userOrg.orgId,
clientId: newClient.clientId,
userId,
type: "user_device"
})
.returning();
}
await rebuildClientAssociationsFromClient(
newClient,
transaction

View File

@@ -26,6 +26,7 @@ export function initLogCleanupInterval() {
)
);
// TODO: handle when there are multiple nodes doing this clearing using redis
for (const org of orgsToClean) {
const {
orgId,

View File

@@ -13,3 +13,4 @@ export * from "./verifyApiKeyIsRoot";
export * from "./verifyApiKeyApiKeyAccess";
export * from "./verifyApiKeyClientAccess";
export * from "./verifyApiKeySiteResourceAccess";
export * from "./verifyApiKeyIdpAccess";

View File

@@ -0,0 +1,88 @@
import { Request, Response, NextFunction } from "express";
import { db } from "@server/db";
import { idp, idpOrg, apiKeyOrg } from "@server/db";
import { and, eq } from "drizzle-orm";
import createHttpError from "http-errors";
import HttpCode from "@server/types/HttpCode";
export async function verifyApiKeyIdpAccess(
req: Request,
res: Response,
next: NextFunction
) {
try {
const apiKey = req.apiKey;
const idpId = req.params.idpId || req.body.idpId || req.query.idpId;
const orgId = req.params.orgId;
if (!apiKey) {
return next(
createHttpError(HttpCode.UNAUTHORIZED, "Key not authenticated")
);
}
if (!orgId) {
return next(
createHttpError(HttpCode.BAD_REQUEST, "Invalid organization ID")
);
}
if (!idpId) {
return next(
createHttpError(HttpCode.BAD_REQUEST, "Invalid IDP ID")
);
}
if (apiKey.isRoot) {
// Root keys can access any IDP in any org
return next();
}
const [idpRes] = await db
.select()
.from(idp)
.innerJoin(idpOrg, eq(idp.idpId, idpOrg.idpId))
.where(and(eq(idp.idpId, idpId), eq(idpOrg.orgId, orgId)))
.limit(1);
if (!idpRes || !idpRes.idp || !idpRes.idpOrg) {
return next(
createHttpError(
HttpCode.NOT_FOUND,
`IdP with ID ${idpId} not found for organization ${orgId}`
)
);
}
if (!req.apiKeyOrg) {
const apiKeyOrgRes = await db
.select()
.from(apiKeyOrg)
.where(
and(
eq(apiKeyOrg.apiKeyId, apiKey.apiKeyId),
eq(apiKeyOrg.orgId, idpRes.idpOrg.orgId)
)
);
req.apiKeyOrg = apiKeyOrgRes[0];
}
if (!req.apiKeyOrg) {
return next(
createHttpError(
HttpCode.FORBIDDEN,
"Key does not have access to this organization"
)
);
}
return next();
} catch (error) {
return next(
createHttpError(
HttpCode.INTERNAL_SERVER_ERROR,
"Error verifying IDP access"
)
);
}
}

View File

@@ -139,6 +139,10 @@ export class PrivateConfig {
process.env.USE_PANGOLIN_DNS =
this.rawPrivateConfig.flags.use_pangolin_dns.toString();
}
if (this.rawPrivateConfig.flags.use_org_only_idp) {
process.env.USE_ORG_ONLY_IDP =
this.rawPrivateConfig.flags.use_org_only_idp.toString();
}
}
public getRawPrivateConfig() {

View File

@@ -50,10 +50,14 @@ export async function sendToExitNode(
);
}
return sendToClient(remoteExitNode.remoteExitNodeId, {
type: request.remoteType,
data: request.data
});
return sendToClient(
remoteExitNode.remoteExitNodeId,
{
type: request.remoteType,
data: request.data
},
{ incrementConfigVersion: true }
);
} else {
let hostname = exitNode.reachableAt;

View File

@@ -288,7 +288,7 @@ export function selectBestExitNode(
const validNodes = pingResults.filter((n) => !n.error && n.weight > 0);
if (validNodes.length === 0) {
logger.error("No valid exit nodes available");
logger.debug("No valid exit nodes available");
return null;
}

View File

@@ -24,7 +24,9 @@ export class LockManager {
*/
async acquireLock(
lockKey: string,
ttlMs: number = 30000
ttlMs: number = 30000,
maxRetries: number = 3,
retryDelayMs: number = 100
): Promise<boolean> {
if (!redis || !redis.status || redis.status !== "ready") {
return true;
@@ -35,49 +37,67 @@ export class LockManager {
}:${Date.now()}`;
const redisKey = `lock:${lockKey}`;
try {
// Use SET with NX (only set if not exists) and PX (expire in milliseconds)
// This is atomic and handles both setting and expiration
const result = await redis.set(
redisKey,
lockValue,
"PX",
ttlMs,
"NX"
);
if (result === "OK") {
logger.debug(
`Lock acquired: ${lockKey} by ${
config.getRawConfig().gerbil.exit_node_name
}`
for (let attempt = 0; attempt < maxRetries; attempt++) {
try {
// Use SET with NX (only set if not exists) and PX (expire in milliseconds)
// This is atomic and handles both setting and expiration
const result = await redis.set(
redisKey,
lockValue,
"PX",
ttlMs,
"NX"
);
return true;
}
// Check if the existing lock is from this worker (reentrant behavior)
const existingValue = await redis.get(redisKey);
if (
existingValue &&
existingValue.startsWith(
`${config.getRawConfig().gerbil.exit_node_name}:`
)
) {
// Extend the lock TTL since it's the same worker
await redis.pexpire(redisKey, ttlMs);
logger.debug(
`Lock extended: ${lockKey} by ${
config.getRawConfig().gerbil.exit_node_name
}`
);
return true;
}
if (result === "OK") {
logger.debug(
`Lock acquired: ${lockKey} by ${
config.getRawConfig().gerbil.exit_node_name
}`
);
return true;
}
return false;
} catch (error) {
logger.error(`Failed to acquire lock ${lockKey}:`, error);
return false;
// Check if the existing lock is from this worker (reentrant behavior)
const existingValue = await redis.get(redisKey);
if (
existingValue &&
existingValue.startsWith(
`${config.getRawConfig().gerbil.exit_node_name}:`
)
) {
// Extend the lock TTL since it's the same worker
await redis.pexpire(redisKey, ttlMs);
logger.debug(
`Lock extended: ${lockKey} by ${
config.getRawConfig().gerbil.exit_node_name
}`
);
return true;
}
// If this isn't our last attempt, wait before retrying with exponential backoff
if (attempt < maxRetries - 1) {
const delay = retryDelayMs * Math.pow(2, attempt);
logger.debug(
`Lock ${lockKey} not available, retrying in ${delay}ms (attempt ${attempt + 1}/${maxRetries})`
);
await new Promise((resolve) => setTimeout(resolve, delay));
}
} catch (error) {
logger.error(`Failed to acquire lock ${lockKey} (attempt ${attempt + 1}/${maxRetries}):`, error);
// On error, still retry if we have attempts left
if (attempt < maxRetries - 1) {
const delay = retryDelayMs * Math.pow(2, attempt);
await new Promise((resolve) => setTimeout(resolve, delay));
}
}
}
logger.debug(
`Failed to acquire lock ${lockKey} after ${maxRetries} attempts`
);
return false;
}
/**

View File

@@ -83,7 +83,8 @@ export const privateConfigSchema = z.object({
flags: z
.object({
enable_redis: z.boolean().optional().default(false),
use_pangolin_dns: z.boolean().optional().default(false)
use_pangolin_dns: z.boolean().optional().default(false),
use_org_only_idp: z.boolean().optional().default(false)
})
.optional()
.prefault({}),

View File

@@ -573,6 +573,20 @@ class RedisManager {
}
}
public async incr(key: string): Promise<number> {
if (!this.isRedisEnabled() || !this.writeClient) return 0;
try {
return await this.executeWithRetry(
() => this.writeClient!.incr(key),
"Redis INCR"
);
} catch (error) {
logger.error("Redis INCR error:", error);
return 0;
}
}
public async sadd(key: string, member: string): Promise<boolean> {
if (!this.isRedisEnabled() || !this.writeClient) return false;

View File

@@ -456,11 +456,11 @@ export async function getTraefikConfig(
// );
} else if (resource.maintenanceModeType === "automatic") {
showMaintenancePage = !hasHealthyServers;
if (showMaintenancePage) {
logger.warn(
`Resource ${resource.name} (${fullDomain}) has no healthy servers - showing maintenance page (AUTOMATIC mode)`
);
}
// if (showMaintenancePage) {
// logger.warn(
// `Resource ${resource.name} (${fullDomain}) has no healthy servers - showing maintenance page (AUTOMATIC mode)`
// );
// }
}
}

View File

@@ -0,0 +1,15 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
export * from "./listApprovals";
export * from "./processPendingApproval";

View File

@@ -0,0 +1,188 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import logger from "@server/logger";
import HttpCode from "@server/types/HttpCode";
import createHttpError from "http-errors";
import { z } from "zod";
import { fromError } from "zod-validation-error";
import type { Request, Response, NextFunction } from "express";
import { build } from "@server/build";
import { getOrgTierData } from "@server/lib/billing";
import { TierId } from "@server/lib/billing/tiers";
import { approvals, clients, db, users, type Approval } from "@server/db";
import { eq, isNull, sql, not, and, desc } from "drizzle-orm";
import response from "@server/lib/response";
const paramsSchema = z.strictObject({
orgId: z.string()
});
const querySchema = z.strictObject({
limit: z
.string()
.optional()
.default("1000")
.transform(Number)
.pipe(z.int().nonnegative()),
offset: z
.string()
.optional()
.default("0")
.transform(Number)
.pipe(z.int().nonnegative()),
approvalState: z
.enum(["pending", "approved", "denied", "all"])
.optional()
.default("all")
.catch("all")
});
async function queryApprovals(
orgId: string,
limit: number,
offset: number,
approvalState: z.infer<typeof querySchema>["approvalState"]
) {
let state: Array<Approval["decision"]> = [];
switch (approvalState) {
case "pending":
state = ["pending"];
break;
case "approved":
state = ["approved"];
break;
case "denied":
state = ["denied"];
break;
default:
state = ["approved", "denied", "pending"];
}
const res = await db
.select({
approvalId: approvals.approvalId,
orgId: approvals.orgId,
clientId: approvals.clientId,
decision: approvals.decision,
type: approvals.type,
user: {
name: users.name,
userId: users.userId,
username: users.username
}
})
.from(approvals)
.innerJoin(users, and(eq(approvals.userId, users.userId)))
.leftJoin(
clients,
and(
eq(approvals.clientId, clients.clientId),
not(isNull(clients.userId)) // only user devices
)
)
.where(
and(
eq(approvals.orgId, orgId),
sql`${approvals.decision} in ${state}`
)
)
.orderBy(
sql`CASE ${approvals.decision} WHEN 'pending' THEN 0 ELSE 1 END`,
desc(approvals.timestamp)
)
.limit(limit)
.offset(offset);
return res;
}
export type ListApprovalsResponse = {
approvals: NonNullable<Awaited<ReturnType<typeof queryApprovals>>>;
pagination: { total: number; limit: number; offset: number };
};
export async function listApprovals(
req: Request,
res: Response,
next: NextFunction
) {
try {
const parsedParams = paramsSchema.safeParse(req.params);
if (!parsedParams.success) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
fromError(parsedParams.error).toString()
)
);
}
const parsedQuery = querySchema.safeParse(req.query);
if (!parsedQuery.success) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
fromError(parsedQuery.error).toString()
)
);
}
const { limit, offset, approvalState } = parsedQuery.data;
const { orgId } = parsedParams.data;
if (build === "saas") {
const { tier } = await getOrgTierData(orgId);
const subscribed = tier === TierId.STANDARD;
if (!subscribed) {
return next(
createHttpError(
HttpCode.FORBIDDEN,
"This organization's current plan does not support this feature."
)
);
}
}
const approvalsList = await queryApprovals(
orgId.toString(),
limit,
offset,
approvalState
);
const [{ count }] = await db
.select({ count: sql<number>`count(*)` })
.from(approvals);
return response<ListApprovalsResponse>(res, {
data: {
approvals: approvalsList,
pagination: {
total: count,
limit,
offset
}
},
success: true,
error: false,
message: "Approvals retrieved successfully",
status: HttpCode.OK
});
} catch (error) {
logger.error(error);
return next(
createHttpError(HttpCode.INTERNAL_SERVER_ERROR, "An error occurred")
);
}
}

View File

@@ -0,0 +1,142 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import logger from "@server/logger";
import HttpCode from "@server/types/HttpCode";
import createHttpError from "http-errors";
import { z } from "zod";
import { fromError } from "zod-validation-error";
import { build } from "@server/build";
import { approvals, clients, db, orgs, type Approval } from "@server/db";
import { getOrgTierData } from "@server/lib/billing";
import { TierId } from "@server/lib/billing/tiers";
import response from "@server/lib/response";
import { and, eq, type InferInsertModel } from "drizzle-orm";
import type { NextFunction, Request, Response } from "express";
const paramsSchema = z.strictObject({
orgId: z.string(),
approvalId: z.string().transform(Number).pipe(z.int().positive())
});
const bodySchema = z.strictObject({
decision: z.enum(["approved", "denied"])
});
export type ProcessApprovalResponse = Approval;
export async function processPendingApproval(
req: Request,
res: Response,
next: NextFunction
) {
try {
const parsedParams = paramsSchema.safeParse(req.params);
if (!parsedParams.success) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
fromError(parsedParams.error).toString()
)
);
}
const parsedBody = bodySchema.safeParse(req.body);
if (!parsedBody.success) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
fromError(parsedBody.error).toString()
)
);
}
const { orgId, approvalId } = parsedParams.data;
if (build === "saas") {
const { tier } = await getOrgTierData(orgId);
const subscribed = tier === TierId.STANDARD;
if (!subscribed) {
return next(
createHttpError(
HttpCode.FORBIDDEN,
"This organization's current plan does not support this feature."
)
);
}
}
const updateData = parsedBody.data;
const approval = await db
.select()
.from(approvals)
.where(
and(
eq(approvals.approvalId, approvalId),
eq(approvals.decision, "pending")
)
)
.innerJoin(orgs, eq(approvals.orgId, approvals.orgId))
.limit(1);
if (approval.length === 0) {
return next(
createHttpError(
HttpCode.NOT_FOUND,
`Pending Approval with ID ${approvalId} not found`
)
);
}
const [updatedApproval] = await db
.update(approvals)
.set(updateData)
.where(eq(approvals.approvalId, approvalId))
.returning();
// Update user device approval state too
if (
updatedApproval.type === "user_device" &&
updatedApproval.clientId
) {
const updateDataBody: Partial<InferInsertModel<typeof clients>> = {
approvalState: updateData.decision
};
if (updateData.decision === "denied") {
updateDataBody.blocked = true;
}
await db
.update(clients)
.set(updateDataBody)
.where(eq(clients.clientId, updatedApproval.clientId));
}
return response(res, {
data: updatedApproval,
success: true,
error: false,
message: "Approval updated successfully",
status: HttpCode.OK
});
} catch (error) {
logger.error(error);
return next(
createHttpError(HttpCode.INTERNAL_SERVER_ERROR, "An error occurred")
);
}
}

View File

@@ -24,6 +24,7 @@ import * as generateLicense from "./generatedLicense";
import * as logs from "#private/routers/auditLogs";
import * as misc from "#private/routers/misc";
import * as reKey from "#private/routers/re-key";
import * as approval from "#private/routers/approvals";
import {
verifyOrgAccess,
@@ -311,6 +312,24 @@ authenticated.get(
loginPage.getLoginPage
);
authenticated.get(
"/org/:orgId/approvals",
verifyValidLicense,
verifyOrgAccess,
verifyUserHasAction(ActionsEnum.listApprovals),
logActionAudit(ActionsEnum.listApprovals),
approval.listApprovals
);
authenticated.put(
"/org/:orgId/approvals/:approvalId",
verifyValidLicense,
verifyOrgAccess,
verifyUserHasAction(ActionsEnum.updateApprovals),
logActionAudit(ActionsEnum.updateApprovals),
approval.processPendingApproval
);
authenticated.get(
"/org/:orgId/login-page-branding",
verifyValidLicense,

View File

@@ -18,7 +18,8 @@ import * as logs from "#private/routers/auditLogs";
import {
verifyApiKeyHasAction,
verifyApiKeyIsRoot,
verifyApiKeyOrgAccess
verifyApiKeyOrgAccess,
verifyApiKeyIdpAccess
} from "@server/middlewares";
import {
verifyValidSubscription,
@@ -31,6 +32,8 @@ import {
authenticated as a
} from "@server/routers/integration";
import { logActionAudit } from "#private/middlewares";
import config from "#private/lib/config";
import { build } from "@server/build";
export const unauthenticated = ua;
export const authenticated = a;
@@ -88,3 +91,49 @@ authenticated.get(
logActionAudit(ActionsEnum.exportLogs),
logs.exportAccessAuditLogs
);
authenticated.put(
"/org/:orgId/idp/oidc",
verifyValidLicense,
verifyApiKeyOrgAccess,
verifyApiKeyHasAction(ActionsEnum.createIdp),
logActionAudit(ActionsEnum.createIdp),
orgIdp.createOrgOidcIdp
);
authenticated.post(
"/org/:orgId/idp/:idpId/oidc",
verifyValidLicense,
verifyApiKeyOrgAccess,
verifyApiKeyIdpAccess,
verifyApiKeyHasAction(ActionsEnum.updateIdp),
logActionAudit(ActionsEnum.updateIdp),
orgIdp.updateOrgOidcIdp
);
authenticated.delete(
"/org/:orgId/idp/:idpId",
verifyValidLicense,
verifyApiKeyOrgAccess,
verifyApiKeyIdpAccess,
verifyApiKeyHasAction(ActionsEnum.deleteIdp),
logActionAudit(ActionsEnum.deleteIdp),
orgIdp.deleteOrgIdp
);
authenticated.get(
"/org/:orgId/idp/:idpId",
verifyValidLicense,
verifyApiKeyOrgAccess,
verifyApiKeyIdpAccess,
verifyApiKeyHasAction(ActionsEnum.getIdp),
orgIdp.getOrgIdp
);
authenticated.get(
"/org/:orgId/idp",
verifyValidLicense,
verifyApiKeyOrgAccess,
verifyApiKeyHasAction(ActionsEnum.listIdps),
orgIdp.listOrgIdps
);

View File

@@ -29,11 +29,9 @@ import { getOrgTierData } from "#private/lib/billing";
import { TierId } from "@server/lib/billing/tiers";
import { build } from "@server/build";
const paramsSchema = z
.object({
orgId: z.string()
})
.strict();
const paramsSchema = z.strictObject({
orgId: z.string()
});
export async function getLoginPageBranding(
req: Request,

View File

@@ -28,6 +28,7 @@ import { eq, InferInsertModel } from "drizzle-orm";
import { getOrgTierData } from "#private/lib/billing";
import { TierId } from "@server/lib/billing/tiers";
import { build } from "@server/build";
import config from "@server/private/lib/config";
const paramsSchema = z.strictObject({
orgId: z.string()
@@ -94,8 +95,10 @@ export async function upsertLoginPageBranding(
typeof loginPageBranding
>;
if (build !== "saas") {
// org branding settings are only considered in the saas build
if (
build !== "saas" &&
!config.getRawPrivateConfig().flags.use_org_only_idp
) {
const { orgTitle, orgSubtitle, ...rest } = updateData;
updateData = rest;
}

View File

@@ -43,25 +43,27 @@ const bodySchema = z.strictObject({
scopes: z.string().nonempty(),
autoProvision: z.boolean().optional(),
variant: z.enum(["oidc", "google", "azure"]).optional().default("oidc"),
roleMapping: z.string().optional()
roleMapping: z.string().optional(),
tags: z.string().optional()
});
// registry.registerPath({
// method: "put",
// path: "/idp/oidc",
// description: "Create an OIDC IdP.",
// tags: [OpenAPITags.Idp],
// request: {
// body: {
// content: {
// "application/json": {
// schema: bodySchema
// }
// }
// }
// },
// responses: {}
// });
registry.registerPath({
method: "put",
path: "/org/{orgId}/idp/oidc",
description: "Create an OIDC IdP for a specific organization.",
tags: [OpenAPITags.Idp, OpenAPITags.Org],
request: {
params: paramsSchema,
body: {
content: {
"application/json": {
schema: bodySchema
}
}
}
},
responses: {}
});
export async function createOrgOidcIdp(
req: Request,
@@ -103,7 +105,8 @@ export async function createOrgOidcIdp(
name,
autoProvision,
variant,
roleMapping
roleMapping,
tags
} = parsedBody.data;
if (build === "saas") {
@@ -131,7 +134,8 @@ export async function createOrgOidcIdp(
.values({
name,
autoProvision,
type: "oidc"
type: "oidc",
tags
})
.returning();

View File

@@ -32,9 +32,9 @@ const paramsSchema = z
registry.registerPath({
method: "delete",
path: "/idp/{idpId}",
description: "Delete IDP.",
tags: [OpenAPITags.Idp],
path: "/org/{orgId}/idp/{idpId}",
description: "Delete IDP for a specific organization.",
tags: [OpenAPITags.Idp, OpenAPITags.Org],
request: {
params: paramsSchema
},

View File

@@ -48,16 +48,16 @@ async function query(idpId: number, orgId: string) {
return res;
}
// registry.registerPath({
// method: "get",
// path: "/idp/{idpId}",
// description: "Get an IDP by its IDP ID.",
// tags: [OpenAPITags.Idp],
// request: {
// params: paramsSchema
// },
// responses: {}
// });
registry.registerPath({
method: "get",
path: "/org/:orgId/idp/:idpId",
description: "Get an IDP by its IDP ID for a specific organization.",
tags: [OpenAPITags.Idp, OpenAPITags.Org],
request: {
params: paramsSchema
},
responses: {}
});
export async function getOrgIdp(
req: Request,

View File

@@ -50,7 +50,8 @@ async function query(orgId: string, limit: number, offset: number) {
orgId: idpOrg.orgId,
name: idp.name,
type: idp.type,
variant: idpOidcConfig.variant
variant: idpOidcConfig.variant,
tags: idp.tags
})
.from(idpOrg)
.where(eq(idpOrg.orgId, orgId))
@@ -62,16 +63,17 @@ async function query(orgId: string, limit: number, offset: number) {
return res;
}
// registry.registerPath({
// method: "get",
// path: "/idp",
// description: "List all IDP in the system.",
// tags: [OpenAPITags.Idp],
// request: {
// query: querySchema
// },
// responses: {}
// });
registry.registerPath({
method: "get",
path: "/org/{orgId}/idp",
description: "List all IDP for a specific organization.",
tags: [OpenAPITags.Idp, OpenAPITags.Org],
request: {
query: querySchema,
params: paramsSchema
},
responses: {}
});
export async function listOrgIdps(
req: Request,

View File

@@ -46,30 +46,31 @@ const bodySchema = z.strictObject({
namePath: z.string().optional(),
scopes: z.string().optional(),
autoProvision: z.boolean().optional(),
roleMapping: z.string().optional()
roleMapping: z.string().optional(),
tags: z.string().optional()
});
export type UpdateOrgIdpResponse = {
idpId: number;
};
// registry.registerPath({
// method: "post",
// path: "/idp/{idpId}/oidc",
// description: "Update an OIDC IdP.",
// tags: [OpenAPITags.Idp],
// request: {
// params: paramsSchema,
// body: {
// content: {
// "application/json": {
// schema: bodySchema
// }
// }
// }
// },
// responses: {}
// });
registry.registerPath({
method: "post",
path: "/org/{orgId}/idp/{idpId}/oidc",
description: "Update an OIDC IdP for a specific organization.",
tags: [OpenAPITags.Idp, OpenAPITags.Org],
request: {
params: paramsSchema,
body: {
content: {
"application/json": {
schema: bodySchema
}
}
}
},
responses: {}
});
export async function updateOrgOidcIdp(
req: Request,
@@ -109,7 +110,8 @@ export async function updateOrgOidcIdp(
namePath,
name,
autoProvision,
roleMapping
roleMapping,
tags
} = parsedBody.data;
if (build === "saas") {
@@ -167,7 +169,8 @@ export async function updateOrgOidcIdp(
await db.transaction(async (trx) => {
const idpData = {
name,
autoProvision
autoProvision,
tags
};
// only update if at least one key is not undefined

View File

@@ -43,7 +43,8 @@ import {
WSMessage,
TokenPayload,
WebSocketRequest,
RedisMessage
RedisMessage,
SendMessageOptions
} from "@server/routers/ws";
import { validateSessionToken } from "@server/auth/sessions/app";
@@ -118,12 +119,21 @@ const processMessage = async (
if (response.broadcast) {
await broadcastToAllExcept(
response.message,
response.excludeSender ? clientId : undefined
response.excludeSender ? clientId : undefined,
response.options
);
} else if (response.targetClientId) {
await sendToClient(response.targetClientId, response.message);
await sendToClient(
response.targetClientId,
response.message,
response.options
);
} else {
ws.send(JSON.stringify(response.message));
await sendToClient(
clientId,
response.message,
response.options
);
}
}
} catch (error) {
@@ -172,6 +182,9 @@ const REDIS_CHANNEL = "websocket_messages";
// Client tracking map (local to this node)
const connectedClients: Map<string, AuthenticatedWebSocket[]> = new Map();
// Config version tracking map (local to this node, resets on server restart)
const clientConfigVersions: Map<string, number> = new Map();
// Recovery tracking
let isRedisRecoveryInProgress = false;
@@ -182,6 +195,8 @@ const getClientMapKey = (clientId: string) => clientId;
const getConnectionsKey = (clientId: string) => `ws:connections:${clientId}`;
const getNodeConnectionsKey = (nodeId: string, clientId: string) =>
`ws:node:${nodeId}:${clientId}`;
const getConfigVersionKey = (clientId: string) =>
`ws:configVersion:${clientId}`;
// Initialize Redis subscription for cross-node messaging
const initializeRedisSubscription = async (): Promise<void> => {
@@ -304,6 +319,45 @@ const addClient = async (
existingClients.push(ws);
connectedClients.set(mapKey, existingClients);
// Get or initialize config version
let configVersion = 0;
// Check Redis first if enabled
if (redisManager.isRedisEnabled()) {
try {
const redisVersion = await redisManager.get(getConfigVersionKey(clientId));
if (redisVersion !== null) {
configVersion = parseInt(redisVersion, 10);
// Sync to local cache
clientConfigVersions.set(clientId, configVersion);
} else if (!clientConfigVersions.has(clientId)) {
// No version in Redis or local cache, initialize to 0
await redisManager.set(getConfigVersionKey(clientId), "0");
clientConfigVersions.set(clientId, 0);
} else {
// Use local cache version and sync to Redis
configVersion = clientConfigVersions.get(clientId) || 0;
await redisManager.set(getConfigVersionKey(clientId), configVersion.toString());
}
} catch (error) {
logger.error("Failed to get/set config version in Redis:", error);
// Fall back to local cache
if (!clientConfigVersions.has(clientId)) {
clientConfigVersions.set(clientId, 0);
}
configVersion = clientConfigVersions.get(clientId) || 0;
}
} else {
// Redis not enabled, use local cache only
if (!clientConfigVersions.has(clientId)) {
clientConfigVersions.set(clientId, 0);
}
configVersion = clientConfigVersions.get(clientId) || 0;
}
// Set config version on websocket
ws.configVersion = configVersion;
// Add to Redis tracking if enabled
if (redisManager.isRedisEnabled()) {
try {
@@ -322,7 +376,7 @@ const addClient = async (
}
logger.info(
`Client added to tracking - ${clientType.toUpperCase()} ID: ${clientId}, Connection ID: ${connectionId}, Total connections: ${existingClients.length}`
`Client added to tracking - ${clientType.toUpperCase()} ID: ${clientId}, Connection ID: ${connectionId}, Total connections: ${existingClients.length}, Config version: ${configVersion}`
);
};
@@ -377,53 +431,133 @@ const removeClient = async (
}
};
// Helper to get the current config version for a client
const getClientConfigVersion = async (clientId: string): Promise<number | undefined> => {
// Try Redis first if available
if (redisManager.isRedisEnabled()) {
try {
const redisVersion = await redisManager.get(
getConfigVersionKey(clientId)
);
if (redisVersion !== null) {
const version = parseInt(redisVersion, 10);
// Sync local cache with Redis
clientConfigVersions.set(clientId, version);
return version;
}
} catch (error) {
logger.error("Failed to get config version from Redis:", error);
}
}
// Fall back to local cache
return clientConfigVersions.get(clientId);
};
// Helper to increment and get the new config version for a client
const incrementClientConfigVersion = async (
clientId: string
): Promise<number> => {
let newVersion: number;
if (redisManager.isRedisEnabled()) {
try {
// Use Redis INCR for atomic increment across nodes
newVersion = await redisManager.incr(getConfigVersionKey(clientId));
// Sync local cache
clientConfigVersions.set(clientId, newVersion);
return newVersion;
} catch (error) {
logger.error("Failed to increment config version in Redis:", error);
// Fall through to local increment
}
}
// Local increment
const currentVersion = clientConfigVersions.get(clientId) || 0;
newVersion = currentVersion + 1;
clientConfigVersions.set(clientId, newVersion);
return newVersion;
};
// Local message sending (within this node)
const sendToClientLocal = async (
clientId: string,
message: WSMessage
message: WSMessage,
options: SendMessageOptions = {}
): Promise<boolean> => {
const mapKey = getClientMapKey(clientId);
const clients = connectedClients.get(mapKey);
if (!clients || clients.length === 0) {
return false;
}
const messageString = JSON.stringify(message);
// Handle config version
let configVersion = await getClientConfigVersion(clientId);
// Add config version to message
const messageWithVersion = {
...message,
configVersion
};
const messageString = JSON.stringify(messageWithVersion);
clients.forEach((client) => {
if (client.readyState === WebSocket.OPEN) {
client.send(messageString);
}
});
logger.debug(
`sendToClient: Message type ${message.type} sent to clientId ${clientId}`
);
return true;
};
const broadcastToAllExceptLocal = async (
message: WSMessage,
excludeClientId?: string
excludeClientId?: string,
options: SendMessageOptions = {}
): Promise<void> => {
connectedClients.forEach((clients, mapKey) => {
for (const [mapKey, clients] of connectedClients.entries()) {
const [type, id] = mapKey.split(":");
if (!(excludeClientId && id === excludeClientId)) {
const clientId = mapKey; // mapKey is the clientId
if (!(excludeClientId && clientId === excludeClientId)) {
// Handle config version per client
let configVersion = await getClientConfigVersion(clientId);
if (options.incrementConfigVersion) {
configVersion = await incrementClientConfigVersion(clientId);
}
// Add config version to message
const messageWithVersion = {
...message,
configVersion
};
clients.forEach((client) => {
if (client.readyState === WebSocket.OPEN) {
client.send(JSON.stringify(message));
client.send(JSON.stringify(messageWithVersion));
}
});
}
});
}
};
// Cross-node message sending (via Redis)
const sendToClient = async (
clientId: string,
message: WSMessage
message: WSMessage,
options: SendMessageOptions = {}
): Promise<boolean> => {
let configVersion = await getClientConfigVersion(clientId);
if (options.incrementConfigVersion) {
configVersion = await incrementClientConfigVersion(clientId);
}
logger.debug(
`sendToClient: Message type ${message.type} sent to clientId ${clientId} (new configVersion: ${configVersion})`
);
// Try to send locally first
const localSent = await sendToClientLocal(clientId, message);
const localSent = await sendToClientLocal(clientId, message, options);
// Only send via Redis if the client is not connected locally and Redis is enabled
if (!localSent && redisManager.isRedisEnabled()) {
@@ -431,7 +565,10 @@ const sendToClient = async (
const redisMessage: RedisMessage = {
type: "direct",
targetClientId: clientId,
message,
message: {
...message,
configVersion
},
fromNodeId: NODE_ID
};
@@ -458,19 +595,22 @@ const sendToClient = async (
const broadcastToAllExcept = async (
message: WSMessage,
excludeClientId?: string
excludeClientId?: string,
options: SendMessageOptions = {}
): Promise<void> => {
// Broadcast locally
await broadcastToAllExceptLocal(message, excludeClientId);
await broadcastToAllExceptLocal(message, excludeClientId, options);
// If Redis is enabled, also broadcast via Redis pub/sub to other nodes
// Note: For broadcasts, we include the options so remote nodes can handle versioning
if (redisManager.isRedisEnabled()) {
try {
const redisMessage: RedisMessage = {
type: "broadcast",
excludeClientId,
message,
fromNodeId: NODE_ID
fromNodeId: NODE_ID,
options
};
await redisManager.publish(
@@ -936,5 +1076,6 @@ export {
getActiveNodes,
disconnectClient,
NODE_ID,
cleanup
cleanup,
getClientConfigVersion
};

View File

@@ -17,3 +17,4 @@ export * from "./securityKey";
export * from "./startDeviceWebAuth";
export * from "./verifyDeviceWebAuth";
export * from "./pollDeviceWebAuth";
export * from "./lookupUser";

View File

@@ -0,0 +1,224 @@
import { Request, Response, NextFunction } from "express";
import { z } from "zod";
import { db } from "@server/db";
import {
users,
userOrgs,
orgs,
idpOrg,
idp,
idpOidcConfig
} from "@server/db";
import { eq, or, sql, and, isNotNull, inArray } from "drizzle-orm";
import response from "@server/lib/response";
import HttpCode from "@server/types/HttpCode";
import createHttpError from "http-errors";
import logger from "@server/logger";
import { fromError } from "zod-validation-error";
import { OpenAPITags, registry } from "@server/openApi";
import { UserType } from "@server/types/UserTypes";
const lookupBodySchema = z.strictObject({
identifier: z.string().min(1).toLowerCase()
});
export type LookupUserResponse = {
found: boolean;
identifier: string;
accounts: Array<{
userId: string;
email: string | null;
username: string;
hasInternalAuth: boolean;
orgs: Array<{
orgId: string;
orgName: string;
idps: Array<{
idpId: number;
name: string;
variant: string | null;
}>;
hasInternalAuth: boolean;
}>;
}>;
};
// registry.registerPath({
// method: "post",
// path: "/auth/lookup-user",
// description: "Lookup user accounts by username or email and return available authentication methods.",
// tags: [OpenAPITags.Auth],
// request: {
// body: lookupBodySchema
// },
// responses: {}
// });
export async function lookupUser(
req: Request,
res: Response,
next: NextFunction
): Promise<any> {
try {
const parsedBody = lookupBodySchema.safeParse(req.body);
if (!parsedBody.success) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
fromError(parsedBody.error).toString()
)
);
}
const { identifier } = parsedBody.data;
// Query users matching identifier (case-insensitive)
// Match by username OR email
const matchingUsers = await db
.select({
userId: users.userId,
email: users.email,
username: users.username,
type: users.type,
passwordHash: users.passwordHash,
idpId: users.idpId
})
.from(users)
.where(
or(
sql`LOWER(${users.username}) = ${identifier}`,
sql`LOWER(${users.email}) = ${identifier}`
)
);
if (!matchingUsers || matchingUsers.length === 0) {
return response<LookupUserResponse>(res, {
data: {
found: false,
identifier,
accounts: []
},
success: true,
error: false,
message: "No accounts found",
status: HttpCode.OK
});
}
// Get unique user IDs
const userIds = [...new Set(matchingUsers.map((u) => u.userId))];
// Get all org memberships for these users
const orgMemberships = await db
.select({
userId: userOrgs.userId,
orgId: userOrgs.orgId,
orgName: orgs.name
})
.from(userOrgs)
.innerJoin(orgs, eq(orgs.orgId, userOrgs.orgId))
.where(inArray(userOrgs.userId, userIds));
// Get unique org IDs
const orgIds = [...new Set(orgMemberships.map((m) => m.orgId))];
// Get all IdPs for these orgs
const orgIdps =
orgIds.length > 0
? await db
.select({
orgId: idpOrg.orgId,
idpId: idp.idpId,
idpName: idp.name,
variant: idpOidcConfig.variant
})
.from(idpOrg)
.innerJoin(idp, eq(idp.idpId, idpOrg.idpId))
.innerJoin(
idpOidcConfig,
eq(idpOidcConfig.idpId, idp.idpId)
)
.where(inArray(idpOrg.orgId, orgIds))
: [];
// Build response structure
const accounts: LookupUserResponse["accounts"] = [];
for (const user of matchingUsers) {
const hasInternalAuth =
user.type === UserType.Internal && user.passwordHash !== null;
// Get orgs for this user
const userOrgMemberships = orgMemberships.filter(
(m) => m.userId === user.userId
);
// Deduplicate orgs (user might have multiple memberships in same org)
const uniqueOrgs = new Map<string, typeof userOrgMemberships[0]>();
for (const membership of userOrgMemberships) {
if (!uniqueOrgs.has(membership.orgId)) {
uniqueOrgs.set(membership.orgId, membership);
}
}
const orgsData = Array.from(uniqueOrgs.values()).map((membership) => {
// Get IdPs for this org where the user (with the exact identifier) is authenticated via that IdP
// Only show IdPs where the user's idpId matches
// Internal users don't have an idpId, so they won't see any IdPs
const orgIdpsList = orgIdps
.filter((idp) => {
if (idp.orgId !== membership.orgId) {
return false;
}
// Only show IdPs where the user (with exact identifier) is authenticated via that IdP
// This means user.idpId must match idp.idpId
if (user.idpId !== null && user.idpId === idp.idpId) {
return true;
}
return false;
})
.map((idp) => ({
idpId: idp.idpId,
name: idp.idpName,
variant: idp.variant
}));
// Check if user has internal auth for this org
// User has internal auth if they have an internal account type
const orgHasInternalAuth = hasInternalAuth;
return {
orgId: membership.orgId,
orgName: membership.orgName,
idps: orgIdpsList,
hasInternalAuth: orgHasInternalAuth
};
});
accounts.push({
userId: user.userId,
email: user.email,
username: user.username,
hasInternalAuth,
orgs: orgsData
});
}
return response<LookupUserResponse>(res, {
data: {
found: true,
identifier,
accounts
},
success: true,
error: false,
message: "User lookup completed",
status: HttpCode.OK
});
} catch (error) {
logger.error(error);
return next(
createHttpError(HttpCode.INTERNAL_SERVER_ERROR, "An error occurred")
);
}
}

View File

@@ -10,6 +10,7 @@ import { eq, and, gt } from "drizzle-orm";
import { encodeHexLowerCase } from "@oslojs/encoding";
import { sha256 } from "@oslojs/crypto/sha2";
import { unauthorized } from "@server/auth/unauthorizedResponse";
import { getIosDeviceName, getMacDeviceName } from "@server/db/names";
const bodySchema = z
.object({
@@ -120,6 +121,11 @@ export async function verifyDeviceWebAuth(
);
}
const deviceName =
getMacDeviceName(deviceCode.deviceName) ||
getIosDeviceName(deviceCode.deviceName) ||
deviceCode.deviceName;
// If verify is false, just return metadata without verifying
if (!verify) {
return response<VerifyDeviceWebAuthResponse>(res, {
@@ -129,7 +135,7 @@ export async function verifyDeviceWebAuth(
metadata: {
ip: deviceCode.ip,
city: deviceCode.city,
deviceName: deviceCode.deviceName,
deviceName: deviceName,
applicationName: deviceCode.applicationName,
createdAt: deviceCode.createdAt
}

View File

@@ -49,27 +49,43 @@ const auditLogBuffer: Array<{
const BATCH_SIZE = 100; // Write to DB every 100 logs
const BATCH_INTERVAL_MS = 5000; // Or every 5 seconds, whichever comes first
const MAX_BUFFER_SIZE = 10000; // Prevent unbounded memory growth
let flushTimer: NodeJS.Timeout | null = null;
let isFlushInProgress = false;
/**
* Flush buffered logs to database
*/
async function flushAuditLogs() {
if (auditLogBuffer.length === 0) {
if (auditLogBuffer.length === 0 || isFlushInProgress) {
return;
}
isFlushInProgress = true;
// Take all current logs and clear buffer
const logsToWrite = auditLogBuffer.splice(0, auditLogBuffer.length);
try {
// Batch insert all logs at once
await db.insert(requestAuditLog).values(logsToWrite);
// Batch insert logs in groups of 25 to avoid overwhelming the database
const BATCH_DB_SIZE = 25;
for (let i = 0; i < logsToWrite.length; i += BATCH_DB_SIZE) {
const batch = logsToWrite.slice(i, i + BATCH_DB_SIZE);
await db.insert(requestAuditLog).values(batch);
}
logger.debug(`Flushed ${logsToWrite.length} audit logs to database`);
} catch (error) {
logger.error("Error flushing audit logs:", error);
// On error, we lose these logs - consider a fallback strategy if needed
// (e.g., write to file, or put back in buffer with retry limit)
} finally {
isFlushInProgress = false;
// If buffer filled up while we were flushing, flush again
if (auditLogBuffer.length >= BATCH_SIZE) {
flushAuditLogs().catch((err) =>
logger.error("Error in follow-up flush:", err)
);
}
}
}
@@ -95,6 +111,10 @@ export async function shutdownAuditLogger() {
clearTimeout(flushTimer);
flushTimer = null;
}
// Force flush even if one is in progress by waiting and retrying
while (isFlushInProgress) {
await new Promise((resolve) => setTimeout(resolve, 100));
}
await flushAuditLogs();
}
@@ -212,6 +232,14 @@ export async function logRequestAudit(
? stripPortFromHost(body.requestIp)
: undefined;
// Prevent unbounded buffer growth - drop oldest entries if buffer is too large
if (auditLogBuffer.length >= MAX_BUFFER_SIZE) {
const dropped = auditLogBuffer.splice(0, BATCH_SIZE);
logger.warn(
`Audit log buffer exceeded max size (${MAX_BUFFER_SIZE}), dropped ${dropped.length} oldest entries`
);
}
// Add to buffer instead of writing directly to DB
auditLogBuffer.push({
timestamp,

View File

@@ -942,7 +942,7 @@ async function isUserAllowedToAccessResource(
username: user.username,
email: user.email,
name: user.name,
role: user.role
role: userOrgRole.roleName
};
}
@@ -956,7 +956,7 @@ async function isUserAllowedToAccessResource(
username: user.username,
email: user.email,
name: user.name,
role: user.role
role: userOrgRole.roleName
};
}
@@ -1035,14 +1035,25 @@ export function isPathAllowed(pattern: string, path: string): boolean {
logger.debug(`Normalized pattern parts: [${patternParts.join(", ")}]`);
logger.debug(`Normalized path parts: [${pathParts.join(", ")}]`);
// Maximum recursion depth to prevent stack overflow and memory issues
const MAX_RECURSION_DEPTH = 100;
// Recursive function to try different wildcard matches
function matchSegments(patternIndex: number, pathIndex: number): boolean {
const indent = " ".repeat(pathIndex); // Indent based on recursion depth
function matchSegments(patternIndex: number, pathIndex: number, depth: number = 0): boolean {
// Check recursion depth limit
if (depth > MAX_RECURSION_DEPTH) {
logger.warn(
`Path matching exceeded maximum recursion depth (${MAX_RECURSION_DEPTH}) for pattern "${pattern}" and path "${path}"`
);
return false;
}
const indent = " ".repeat(depth); // Indent based on recursion depth
const currentPatternPart = patternParts[patternIndex];
const currentPathPart = pathParts[pathIndex];
logger.debug(
`${indent}Checking patternIndex=${patternIndex} (${currentPatternPart || "END"}) vs pathIndex=${pathIndex} (${currentPathPart || "END"})`
`${indent}Checking patternIndex=${patternIndex} (${currentPatternPart || "END"}) vs pathIndex=${pathIndex} (${currentPathPart || "END"}) [depth=${depth}]`
);
// If we've consumed all pattern parts, we should have consumed all path parts
@@ -1075,7 +1086,7 @@ export function isPathAllowed(pattern: string, path: string): boolean {
logger.debug(
`${indent}Trying to skip wildcard (consume 0 segments)`
);
if (matchSegments(patternIndex + 1, pathIndex)) {
if (matchSegments(patternIndex + 1, pathIndex, depth + 1)) {
logger.debug(
`${indent}Successfully matched by skipping wildcard`
);
@@ -1086,7 +1097,7 @@ export function isPathAllowed(pattern: string, path: string): boolean {
logger.debug(
`${indent}Trying to consume segment "${currentPathPart}" for wildcard`
);
if (matchSegments(patternIndex, pathIndex + 1)) {
if (matchSegments(patternIndex, pathIndex + 1, depth + 1)) {
logger.debug(
`${indent}Successfully matched by consuming segment for wildcard`
);
@@ -1114,7 +1125,7 @@ export function isPathAllowed(pattern: string, path: string): boolean {
logger.debug(
`${indent}Segment with wildcard matches: "${currentPatternPart}" matches "${currentPathPart}"`
);
return matchSegments(patternIndex + 1, pathIndex + 1);
return matchSegments(patternIndex + 1, pathIndex + 1, depth + 1);
}
logger.debug(
@@ -1135,10 +1146,10 @@ export function isPathAllowed(pattern: string, path: string): boolean {
`${indent}Segments match: "${currentPatternPart}" = "${currentPathPart}"`
);
// Move to next segments in both pattern and path
return matchSegments(patternIndex + 1, pathIndex + 1);
return matchSegments(patternIndex + 1, pathIndex + 1, depth + 1);
}
const result = matchSegments(0, 0);
const result = matchSegments(0, 0, 0);
logger.debug(`Final result: ${result}`);
return result;
}

View File

@@ -0,0 +1,105 @@
import { Request, Response, NextFunction } from "express";
import { z } from "zod";
import { db } from "@server/db";
import { clients } from "@server/db";
import { eq } from "drizzle-orm";
import response from "@server/lib/response";
import HttpCode from "@server/types/HttpCode";
import createHttpError from "http-errors";
import logger from "@server/logger";
import { fromError } from "zod-validation-error";
import { OpenAPITags, registry } from "@server/openApi";
import { rebuildClientAssociationsFromClient } from "@server/lib/rebuildClientAssociations";
import { sendTerminateClient } from "./terminate";
const archiveClientSchema = z.strictObject({
clientId: z.string().transform(Number).pipe(z.int().positive())
});
registry.registerPath({
method: "post",
path: "/client/{clientId}/archive",
description: "Archive a client by its client ID.",
tags: [OpenAPITags.Client],
request: {
params: archiveClientSchema
},
responses: {}
});
export async function archiveClient(
req: Request,
res: Response,
next: NextFunction
): Promise<any> {
try {
const parsedParams = archiveClientSchema.safeParse(req.params);
if (!parsedParams.success) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
fromError(parsedParams.error).toString()
)
);
}
const { clientId } = parsedParams.data;
// Check if client exists
const [client] = await db
.select()
.from(clients)
.where(eq(clients.clientId, clientId))
.limit(1);
if (!client) {
return next(
createHttpError(
HttpCode.NOT_FOUND,
`Client with ID ${clientId} not found`
)
);
}
if (client.archived) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
`Client with ID ${clientId} is already archived`
)
);
}
await db.transaction(async (trx) => {
// Archive the client
await trx
.update(clients)
.set({ archived: true })
.where(eq(clients.clientId, clientId));
// Rebuild associations to clean up related data
await rebuildClientAssociationsFromClient(client, trx);
// Send terminate signal if there's an associated OLM
if (client.olmId) {
await sendTerminateClient(client.clientId, client.olmId);
}
});
return response(res, {
data: null,
success: true,
error: false,
message: "Client archived successfully",
status: HttpCode.OK
});
} catch (error) {
logger.error(error);
return next(
createHttpError(
HttpCode.INTERNAL_SERVER_ERROR,
"Failed to archive client"
)
);
}
}

View File

@@ -0,0 +1,101 @@
import { Request, Response, NextFunction } from "express";
import { z } from "zod";
import { db } from "@server/db";
import { clients } from "@server/db";
import { eq } from "drizzle-orm";
import response from "@server/lib/response";
import HttpCode from "@server/types/HttpCode";
import createHttpError from "http-errors";
import logger from "@server/logger";
import { fromError } from "zod-validation-error";
import { OpenAPITags, registry } from "@server/openApi";
import { sendTerminateClient } from "./terminate";
const blockClientSchema = z.strictObject({
clientId: z.string().transform(Number).pipe(z.int().positive())
});
registry.registerPath({
method: "post",
path: "/client/{clientId}/block",
description: "Block a client by its client ID.",
tags: [OpenAPITags.Client],
request: {
params: blockClientSchema
},
responses: {}
});
export async function blockClient(
req: Request,
res: Response,
next: NextFunction
): Promise<any> {
try {
const parsedParams = blockClientSchema.safeParse(req.params);
if (!parsedParams.success) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
fromError(parsedParams.error).toString()
)
);
}
const { clientId } = parsedParams.data;
// Check if client exists
const [client] = await db
.select()
.from(clients)
.where(eq(clients.clientId, clientId))
.limit(1);
if (!client) {
return next(
createHttpError(
HttpCode.NOT_FOUND,
`Client with ID ${clientId} not found`
)
);
}
if (client.blocked) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
`Client with ID ${clientId} is already blocked`
)
);
}
await db.transaction(async (trx) => {
// Block the client
await trx
.update(clients)
.set({ blocked: true, approvalState: "denied" })
.where(eq(clients.clientId, clientId));
// Send terminate signal if there's an associated OLM and it's connected
if (client.olmId && client.online) {
await sendTerminateClient(client.clientId, client.olmId);
}
});
return response(res, {
data: null,
success: true,
error: false,
message: "Client blocked successfully",
status: HttpCode.OK
});
} catch (error) {
logger.error(error);
return next(
createHttpError(
HttpCode.INTERNAL_SERVER_ERROR,
"Failed to block client"
)
);
}
}

View File

@@ -60,11 +60,12 @@ export async function deleteClient(
);
}
// Only allow deletion of machine clients (clients without userId)
if (client.userId) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
`Cannot delete a user client with this endpoint`
`Cannot delete a user client. User clients must be archived instead.`
)
);
}

View File

@@ -1,7 +1,7 @@
import { Request, Response, NextFunction } from "express";
import { z } from "zod";
import { db, olms } from "@server/db";
import { clients } from "@server/db";
import { clients, fingerprints } from "@server/db";
import { eq, and } from "drizzle-orm";
import response from "@server/lib/response";
import HttpCode from "@server/types/HttpCode";
@@ -10,6 +10,7 @@ import logger from "@server/logger";
import stoi from "@server/lib/stoi";
import { fromError } from "zod-validation-error";
import { OpenAPITags, registry } from "@server/openApi";
import { getUserDeviceName } from "@server/db/names";
const getClientSchema = z.strictObject({
clientId: z
@@ -29,6 +30,7 @@ async function query(clientId?: number, niceId?: string, orgId?: string) {
.from(clients)
.where(eq(clients.clientId, clientId))
.leftJoin(olms, eq(clients.clientId, olms.clientId))
.leftJoin(fingerprints, eq(olms.olmId, fingerprints.olmId))
.limit(1);
return res;
} else if (niceId && orgId) {
@@ -37,6 +39,7 @@ async function query(clientId?: number, niceId?: string, orgId?: string) {
.from(clients)
.where(and(eq(clients.niceId, niceId), eq(clients.orgId, orgId)))
.leftJoin(olms, eq(clients.clientId, olms.clientId))
.leftJoin(fingerprints, eq(olms.olmId, fingerprints.olmId))
.limit(1);
return res;
}
@@ -105,8 +108,16 @@ export async function getClient(
);
}
// Replace name with device name if OLM exists
let clientName = client.clients.name;
if (client.olms) {
const model = client.fingerprints?.deviceModel || null;
clientName = getUserDeviceName(model, client.clients.name);
}
const data: GetClientResponse = {
...client.clients,
name: clientName,
olmId: client.olms ? client.olms.olmId : null
};

View File

@@ -1,6 +1,10 @@
export * from "./pickClientDefaults";
export * from "./createClient";
export * from "./deleteClient";
export * from "./archiveClient";
export * from "./unarchiveClient";
export * from "./blockClient";
export * from "./unblockClient";
export * from "./listClients";
export * from "./updateClient";
export * from "./getClient";

View File

@@ -5,7 +5,8 @@ import {
roleClients,
sites,
userClients,
clientSitesAssociationsCache
clientSitesAssociationsCache,
fingerprints
} from "@server/db";
import logger from "@server/logger";
import HttpCode from "@server/types/HttpCode";
@@ -27,6 +28,7 @@ import { fromError } from "zod-validation-error";
import { OpenAPITags, registry } from "@server/openApi";
import NodeCache from "node-cache";
import semver from "semver";
import { getUserDeviceName } from "@server/db/names";
const olmVersionCache = new NodeCache({ stdTTL: 3600 });
@@ -136,12 +138,18 @@ function queryClients(
username: users.username,
userEmail: users.email,
niceId: clients.niceId,
agent: olms.agent
agent: olms.agent,
approvalState: clients.approvalState,
olmArchived: olms.archived,
archived: clients.archived,
blocked: clients.blocked,
deviceModel: fingerprints.deviceModel
})
.from(clients)
.leftJoin(orgs, eq(clients.orgId, orgs.orgId))
.leftJoin(olms, eq(clients.clientId, olms.clientId))
.leftJoin(users, eq(clients.userId, users.userId))
.leftJoin(fingerprints, eq(olms.olmId, fingerprints.olmId))
.where(and(...conditions));
}
@@ -160,21 +168,22 @@ async function getSiteAssociations(clientIds: number[]) {
.where(inArray(clientSitesAssociationsCache.clientId, clientIds));
}
type OlmWithUpdateAvailable = Awaited<ReturnType<typeof queryClients>>[0] & {
type ClientWithSites = Omit<
Awaited<ReturnType<typeof queryClients>>[0],
"deviceModel"
> & {
sites: Array<{
siteId: number;
siteName: string | null;
siteNiceId: string | null;
}>;
olmUpdateAvailable?: boolean;
};
type OlmWithUpdateAvailable = ClientWithSites;
export type ListClientsResponse = {
clients: Array<
Awaited<ReturnType<typeof queryClients>>[0] & {
sites: Array<{
siteId: number;
siteName: string | null;
siteNiceId: string | null;
}>;
olmUpdateAvailable?: boolean;
}
>;
clients: Array<ClientWithSites>;
pagination: { total: number; limit: number; offset: number };
};
@@ -304,11 +313,17 @@ export async function listClients(
>
);
// Merge clients with their site associations
const clientsWithSites = clientsList.map((client) => ({
...client,
sites: sitesByClient[client.clientId] || []
}));
// Merge clients with their site associations and replace name with device name
const clientsWithSites = clientsList.map((client) => {
const model = client.deviceModel || null;
const newName = getUserDeviceName(model, client.name);
const { deviceModel, ...clientWithoutDeviceModel } = client;
return {
...clientWithoutDeviceModel,
name: newName,
sites: sitesByClient[client.clientId] || []
};
});
const latestOlVersionPromise = getLatestOlmVersion();
@@ -347,7 +362,7 @@ export async function listClients(
return response<ListClientsResponse>(res, {
data: {
clients: clientsWithSites,
clients: olmsWithUpdates,
pagination: {
total: totalCount,
limit,

View File

@@ -28,7 +28,7 @@ export async function addTargets(newtId: string, targets: SubnetProxyTarget[]) {
await sendToClient(newtId, {
type: `newt/wg/targets/add`,
data: batches[i]
});
}, { incrementConfigVersion: true });
}
}
@@ -44,7 +44,7 @@ export async function removeTargets(
await sendToClient(newtId, {
type: `newt/wg/targets/remove`,
data: batches[i]
});
},{ incrementConfigVersion: true });
}
}
@@ -69,7 +69,7 @@ export async function updateTargets(
oldTargets: oldBatches[i] || [],
newTargets: newBatches[i] || []
}
}).catch((error) => {
}, { incrementConfigVersion: true }).catch((error) => {
logger.warn(`Error sending message:`, error);
});
}
@@ -101,7 +101,7 @@ export async function addPeerData(
remoteSubnets: remoteSubnets,
aliases: aliases
}
}).catch((error) => {
}, { incrementConfigVersion: true }).catch((error) => {
logger.warn(`Error sending message:`, error);
});
}
@@ -132,7 +132,7 @@ export async function removePeerData(
remoteSubnets: remoteSubnets,
aliases: aliases
}
}).catch((error) => {
}, { incrementConfigVersion: true }).catch((error) => {
logger.warn(`Error sending message:`, error);
});
}
@@ -173,7 +173,7 @@ export async function updatePeerData(
...remoteSubnets,
...aliases
}
}).catch((error) => {
}, { incrementConfigVersion: true }).catch((error) => {
logger.warn(`Error sending message:`, error);
});
}

View File

@@ -0,0 +1,93 @@
import { Request, Response, NextFunction } from "express";
import { z } from "zod";
import { db } from "@server/db";
import { clients } from "@server/db";
import { eq } from "drizzle-orm";
import response from "@server/lib/response";
import HttpCode from "@server/types/HttpCode";
import createHttpError from "http-errors";
import logger from "@server/logger";
import { fromError } from "zod-validation-error";
import { OpenAPITags, registry } from "@server/openApi";
const unarchiveClientSchema = z.strictObject({
clientId: z.string().transform(Number).pipe(z.int().positive())
});
registry.registerPath({
method: "post",
path: "/client/{clientId}/unarchive",
description: "Unarchive a client by its client ID.",
tags: [OpenAPITags.Client],
request: {
params: unarchiveClientSchema
},
responses: {}
});
export async function unarchiveClient(
req: Request,
res: Response,
next: NextFunction
): Promise<any> {
try {
const parsedParams = unarchiveClientSchema.safeParse(req.params);
if (!parsedParams.success) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
fromError(parsedParams.error).toString()
)
);
}
const { clientId } = parsedParams.data;
// Check if client exists
const [client] = await db
.select()
.from(clients)
.where(eq(clients.clientId, clientId))
.limit(1);
if (!client) {
return next(
createHttpError(
HttpCode.NOT_FOUND,
`Client with ID ${clientId} not found`
)
);
}
if (!client.archived) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
`Client with ID ${clientId} is not archived`
)
);
}
// Unarchive the client
await db
.update(clients)
.set({ archived: false })
.where(eq(clients.clientId, clientId));
return response(res, {
data: null,
success: true,
error: false,
message: "Client unarchived successfully",
status: HttpCode.OK
});
} catch (error) {
logger.error(error);
return next(
createHttpError(
HttpCode.INTERNAL_SERVER_ERROR,
"Failed to unarchive client"
)
);
}
}

View File

@@ -0,0 +1,93 @@
import { Request, Response, NextFunction } from "express";
import { z } from "zod";
import { db } from "@server/db";
import { clients } from "@server/db";
import { eq } from "drizzle-orm";
import response from "@server/lib/response";
import HttpCode from "@server/types/HttpCode";
import createHttpError from "http-errors";
import logger from "@server/logger";
import { fromError } from "zod-validation-error";
import { OpenAPITags, registry } from "@server/openApi";
const unblockClientSchema = z.strictObject({
clientId: z.string().transform(Number).pipe(z.int().positive())
});
registry.registerPath({
method: "post",
path: "/client/{clientId}/unblock",
description: "Unblock a client by its client ID.",
tags: [OpenAPITags.Client],
request: {
params: unblockClientSchema
},
responses: {}
});
export async function unblockClient(
req: Request,
res: Response,
next: NextFunction
): Promise<any> {
try {
const parsedParams = unblockClientSchema.safeParse(req.params);
if (!parsedParams.success) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
fromError(parsedParams.error).toString()
)
);
}
const { clientId } = parsedParams.data;
// Check if client exists
const [client] = await db
.select()
.from(clients)
.where(eq(clients.clientId, clientId))
.limit(1);
if (!client) {
return next(
createHttpError(
HttpCode.NOT_FOUND,
`Client with ID ${clientId} not found`
)
);
}
if (!client.blocked) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
`Client with ID ${clientId} is not blocked`
)
);
}
// Unblock the client
await db
.update(clients)
.set({ blocked: false, approvalState: null })
.where(eq(clients.clientId, clientId));
return response(res, {
data: null,
success: true,
error: false,
message: "Client unblocked successfully",
status: HttpCode.OK
});
} catch (error) {
logger.error(error);
return next(
createHttpError(
HttpCode.INTERNAL_SERVER_ERROR,
"Failed to unblock client"
)
);
}
}

View File

@@ -174,6 +174,38 @@ authenticated.delete(
client.deleteClient
);
authenticated.post(
"/client/:clientId/archive",
verifyClientAccess,
verifyUserHasAction(ActionsEnum.archiveClient),
logActionAudit(ActionsEnum.archiveClient),
client.archiveClient
);
authenticated.post(
"/client/:clientId/unarchive",
verifyClientAccess,
verifyUserHasAction(ActionsEnum.unarchiveClient),
logActionAudit(ActionsEnum.unarchiveClient),
client.unarchiveClient
);
authenticated.post(
"/client/:clientId/block",
verifyClientAccess,
verifyUserHasAction(ActionsEnum.blockClient),
logActionAudit(ActionsEnum.blockClient),
client.blockClient
);
authenticated.post(
"/client/:clientId/unblock",
verifyClientAccess,
verifyUserHasAction(ActionsEnum.unblockClient),
logActionAudit(ActionsEnum.unblockClient),
client.unblockClient
);
authenticated.post(
"/client/:clientId",
verifyClientAccess, // this will check if the user has access to the client
@@ -554,6 +586,14 @@ authenticated.get(
verifyUserHasAction(ActionsEnum.listRoles),
role.listRoles
);
authenticated.post(
"/org/:orgId/role/:roleId",
verifyOrgAccess,
verifyUserHasAction(ActionsEnum.updateRole),
logActionAudit(ActionsEnum.updateRole),
role.updateRole
);
// authenticated.get(
// "/role/:roleId",
// verifyRoleAccess,
@@ -808,11 +848,18 @@ authenticated.put("/user/:userId/olm", verifyIsLoggedInUser, olm.createUserOlm);
authenticated.get("/user/:userId/olms", verifyIsLoggedInUser, olm.listUserOlms);
authenticated.delete(
"/user/:userId/olm/:olmId",
authenticated.post(
"/user/:userId/olm/:olmId/archive",
verifyIsLoggedInUser,
verifyOlmAccess,
olm.deleteUserOlm
olm.archiveUserOlm
);
authenticated.post(
"/user/:userId/olm/:olmId/unarchive",
verifyIsLoggedInUser,
verifyOlmAccess,
olm.unarchiveUserOlm
);
authenticated.get(
@@ -822,6 +869,12 @@ authenticated.get(
olm.getUserOlm
);
authenticated.post(
"/user/:userId/olm/recover",
verifyIsLoggedInUser,
olm.recoverOlmWithFingerprint
);
authenticated.put(
"/idp/oidc",
verifyUserIsServerAdmin,
@@ -1068,6 +1121,21 @@ authRouter.post(
auth.login
);
authRouter.post("/logout", auth.logout);
authRouter.post(
"/lookup-user",
rateLimit({
windowMs: 15 * 60 * 1000,
max: 15,
keyGenerator: (req) =>
`lookupUser:${req.body.identifier || ipKeyGenerator(req.ip || "")}`,
handler: (req, res, next) => {
const message = `You can only lookup users ${15} times every ${15} minutes. Please try again later.`;
return next(createHttpError(HttpCode.TOO_MANY_REQUESTS, message));
},
store: createStore()
}),
auth.lookupUser
);
authRouter.post(
"/newt/get-token",
rateLimit({

View File

@@ -24,7 +24,8 @@ const bodySchema = z.strictObject({
emailPath: z.string().optional(),
namePath: z.string().optional(),
scopes: z.string().nonempty(),
autoProvision: z.boolean().optional()
autoProvision: z.boolean().optional(),
tags: z.string().optional()
});
export type CreateIdpResponse = {
@@ -75,7 +76,8 @@ export async function createOidcIdp(
emailPath,
namePath,
name,
autoProvision
autoProvision,
tags
} = parsedBody.data;
const key = config.getRawConfig().server.secret!;
@@ -90,7 +92,8 @@ export async function createOidcIdp(
.values({
name,
autoProvision,
type: "oidc"
type: "oidc",
tags
})
.returning();

View File

@@ -33,7 +33,8 @@ async function query(limit: number, offset: number) {
type: idp.type,
variant: idpOidcConfig.variant,
orgCount: sql<number>`count(${idpOrg.orgId})`,
autoProvision: idp.autoProvision
autoProvision: idp.autoProvision,
tags: idp.tags
})
.from(idp)
.leftJoin(idpOrg, sql`${idp.idpId} = ${idpOrg.idpId}`)

View File

@@ -30,7 +30,8 @@ const bodySchema = z.strictObject({
scopes: z.string().optional(),
autoProvision: z.boolean().optional(),
defaultRoleMapping: z.string().optional(),
defaultOrgMapping: z.string().optional()
defaultOrgMapping: z.string().optional(),
tags: z.string().optional()
});
export type UpdateIdpResponse = {
@@ -94,7 +95,8 @@ export async function updateOidcIdp(
name,
autoProvision,
defaultRoleMapping,
defaultOrgMapping
defaultOrgMapping,
tags
} = parsedBody.data;
// Check if IDP exists and is of type OIDC
@@ -127,7 +129,8 @@ export async function updateOidcIdp(
name,
autoProvision,
defaultRoleMapping,
defaultOrgMapping
defaultOrgMapping,
tags
};
// only update if at least one key is not undefined

View File

@@ -467,6 +467,14 @@ authenticated.put(
role.createRole
);
authenticated.post(
"/org/:orgId/role/:roleId",
verifyApiKeyOrgAccess,
verifyApiKeyHasAction(ActionsEnum.updateRole),
logActionAudit(ActionsEnum.updateRole),
role.updateRole
);
authenticated.get(
"/org/:orgId/roles",
verifyApiKeyOrgAccess,
@@ -751,9 +759,10 @@ authenticated.post(
);
authenticated.get(
"/idp",
verifyApiKeyIsRoot,
verifyApiKeyHasAction(ActionsEnum.listIdps),
"/idp", // no guards on this because anyone can list idps for login purposes
// we do the same for the external api
// verifyApiKeyIsRoot,
// verifyApiKeyHasAction(ActionsEnum.listIdps),
idp.listIdps
);
@@ -842,6 +851,38 @@ authenticated.delete(
client.deleteClient
);
authenticated.post(
"/client/:clientId/archive",
verifyApiKeyClientAccess,
verifyApiKeyHasAction(ActionsEnum.archiveClient),
logActionAudit(ActionsEnum.archiveClient),
client.archiveClient
);
authenticated.post(
"/client/:clientId/unarchive",
verifyApiKeyClientAccess,
verifyApiKeyHasAction(ActionsEnum.unarchiveClient),
logActionAudit(ActionsEnum.unarchiveClient),
client.unarchiveClient
);
authenticated.post(
"/client/:clientId/block",
verifyApiKeyClientAccess,
verifyApiKeyHasAction(ActionsEnum.blockClient),
logActionAudit(ActionsEnum.blockClient),
client.blockClient
);
authenticated.post(
"/client/:clientId/unblock",
verifyApiKeyClientAccess,
verifyApiKeyHasAction(ActionsEnum.unblockClient),
logActionAudit(ActionsEnum.unblockClient),
client.unblockClient
);
authenticated.post(
"/client/:clientId",
verifyApiKeyClientAccess,

View File

@@ -0,0 +1,278 @@
import { clients, clientSiteResourcesAssociationsCache, clientSitesAssociationsCache, db, ExitNode, resources, Site, siteResources, targetHealthCheck, targets } from "@server/db";
import logger from "@server/logger";
import { initPeerAddHandshake, updatePeer } from "../olm/peers";
import { eq, and } from "drizzle-orm";
import config from "@server/lib/config";
import { generateSubnetProxyTargets, SubnetProxyTarget } from "@server/lib/ip";
export async function buildClientConfigurationForNewtClient(
site: Site,
exitNode?: ExitNode
) {
const siteId = site.siteId;
// Get all clients connected to this site
const clientsRes = await db
.select()
.from(clients)
.innerJoin(
clientSitesAssociationsCache,
eq(clients.clientId, clientSitesAssociationsCache.clientId)
)
.where(eq(clientSitesAssociationsCache.siteId, siteId));
let peers: Array<{
publicKey: string;
allowedIps: string[];
endpoint?: string;
}> = [];
if (site.publicKey && site.endpoint && exitNode) {
// Prepare peers data for the response
peers = await Promise.all(
clientsRes
.filter((client) => {
if (!client.clients.pubKey) {
logger.warn(
`Client ${client.clients.clientId} has no public key, skipping`
);
return false;
}
if (!client.clients.subnet) {
logger.warn(
`Client ${client.clients.clientId} has no subnet, skipping`
);
return false;
}
return true;
})
.map(async (client) => {
// Add or update this peer on the olm if it is connected
// const allSiteResources = await db // only get the site resources that this client has access to
// .select()
// .from(siteResources)
// .innerJoin(
// clientSiteResourcesAssociationsCache,
// eq(
// siteResources.siteResourceId,
// clientSiteResourcesAssociationsCache.siteResourceId
// )
// )
// .where(
// and(
// eq(siteResources.siteId, site.siteId),
// eq(
// clientSiteResourcesAssociationsCache.clientId,
// client.clients.clientId
// )
// )
// );
// update the peer info on the olm
// if the peer has not been added yet this will be a no-op
await updatePeer(client.clients.clientId, {
siteId: site.siteId,
endpoint: site.endpoint!,
relayEndpoint: `${exitNode.endpoint}:${config.getRawConfig().gerbil.clients_start_port}`,
publicKey: site.publicKey!,
serverIP: site.address,
serverPort: site.listenPort
// remoteSubnets: generateRemoteSubnets(
// allSiteResources.map(
// ({ siteResources }) => siteResources
// )
// ),
// aliases: generateAliasConfig(
// allSiteResources.map(
// ({ siteResources }) => siteResources
// )
// )
});
// also trigger the peer add handshake in case the peer was not already added to the olm and we need to hole punch
// if it has already been added this will be a no-op
await initPeerAddHandshake(
// this will kick off the add peer process for the client
client.clients.clientId,
{
siteId,
exitNode: {
publicKey: exitNode.publicKey,
endpoint: exitNode.endpoint
}
}
);
return {
publicKey: client.clients.pubKey!,
allowedIps: [
`${client.clients.subnet.split("/")[0]}/32`
], // we want to only allow from that client
endpoint: client.clientSitesAssociationsCache.isRelayed
? ""
: client.clientSitesAssociationsCache.endpoint! // if its relayed it should be localhost
};
})
);
}
// Filter out any null values from peers that didn't have an olm
const validPeers = peers.filter((peer) => peer !== null);
// Get all enabled site resources for this site
const allSiteResources = await db
.select()
.from(siteResources)
.where(eq(siteResources.siteId, siteId));
const targetsToSend: SubnetProxyTarget[] = [];
for (const resource of allSiteResources) {
// Get clients associated with this specific resource
const resourceClients = await db
.select({
clientId: clients.clientId,
pubKey: clients.pubKey,
subnet: clients.subnet
})
.from(clients)
.innerJoin(
clientSiteResourcesAssociationsCache,
eq(
clients.clientId,
clientSiteResourcesAssociationsCache.clientId
)
)
.where(
eq(
clientSiteResourcesAssociationsCache.siteResourceId,
resource.siteResourceId
)
);
const resourceTargets = generateSubnetProxyTargets(
resource,
resourceClients
);
targetsToSend.push(...resourceTargets);
}
return {
peers: validPeers,
targets: targetsToSend
};
}
export async function buildTargetConfigurationForNewtClient(siteId: number) {
// Get all enabled targets with their resource protocol information
const allTargets = await db
.select({
resourceId: targets.resourceId,
targetId: targets.targetId,
ip: targets.ip,
method: targets.method,
port: targets.port,
internalPort: targets.internalPort,
enabled: targets.enabled,
protocol: resources.protocol,
hcEnabled: targetHealthCheck.hcEnabled,
hcPath: targetHealthCheck.hcPath,
hcScheme: targetHealthCheck.hcScheme,
hcMode: targetHealthCheck.hcMode,
hcHostname: targetHealthCheck.hcHostname,
hcPort: targetHealthCheck.hcPort,
hcInterval: targetHealthCheck.hcInterval,
hcUnhealthyInterval: targetHealthCheck.hcUnhealthyInterval,
hcTimeout: targetHealthCheck.hcTimeout,
hcHeaders: targetHealthCheck.hcHeaders,
hcMethod: targetHealthCheck.hcMethod,
hcTlsServerName: targetHealthCheck.hcTlsServerName
})
.from(targets)
.innerJoin(resources, eq(targets.resourceId, resources.resourceId))
.leftJoin(
targetHealthCheck,
eq(targets.targetId, targetHealthCheck.targetId)
)
.where(and(eq(targets.siteId, siteId), eq(targets.enabled, true)));
const { tcpTargets, udpTargets } = allTargets.reduce(
(acc, target) => {
// Filter out invalid targets
if (!target.internalPort || !target.ip || !target.port) {
return acc;
}
// Format target into string
const formattedTarget = `${target.internalPort}:${target.ip}:${target.port}`;
// Add to the appropriate protocol array
if (target.protocol === "tcp") {
acc.tcpTargets.push(formattedTarget);
} else {
acc.udpTargets.push(formattedTarget);
}
return acc;
},
{ tcpTargets: [] as string[], udpTargets: [] as string[] }
);
const healthCheckTargets = allTargets.map((target) => {
// make sure the stuff is defined
if (
!target.hcPath ||
!target.hcHostname ||
!target.hcPort ||
!target.hcInterval ||
!target.hcMethod
) {
logger.debug(
`Skipping target ${target.targetId} due to missing health check fields`
);
return null; // Skip targets with missing health check fields
}
// parse headers
const hcHeadersParse = target.hcHeaders
? JSON.parse(target.hcHeaders)
: null;
const hcHeadersSend: { [key: string]: string } = {};
if (hcHeadersParse) {
hcHeadersParse.forEach(
(header: { name: string; value: string }) => {
hcHeadersSend[header.name] = header.value;
}
);
}
return {
id: target.targetId,
hcEnabled: target.hcEnabled,
hcPath: target.hcPath,
hcScheme: target.hcScheme,
hcMode: target.hcMode,
hcHostname: target.hcHostname,
hcPort: target.hcPort,
hcInterval: target.hcInterval, // in seconds
hcUnhealthyInterval: target.hcUnhealthyInterval, // in seconds
hcTimeout: target.hcTimeout, // in seconds
hcHeaders: hcHeadersSend,
hcMethod: target.hcMethod,
hcTlsServerName: target.hcTlsServerName
};
});
// Filter out any null values from health check targets
const validHealthCheckTargets = healthCheckTargets.filter(
(target) => target !== null
);
return {
validHealthCheckTargets,
tcpTargets,
udpTargets
};
}

View File

@@ -2,19 +2,10 @@ import { z } from "zod";
import { MessageHandler } from "@server/routers/ws";
import logger from "@server/logger";
import { fromError } from "zod-validation-error";
import {
db,
ExitNode,
exitNodes,
siteResources,
clientSiteResourcesAssociationsCache
} from "@server/db";
import { clients, clientSitesAssociationsCache, Newt, sites } from "@server/db";
import { db, ExitNode, exitNodes, Newt, sites } from "@server/db";
import { eq } from "drizzle-orm";
import { initPeerAddHandshake, updatePeer } from "../olm/peers";
import { sendToExitNode } from "#dynamic/lib/exitNodes";
import { generateSubnetProxyTargets, SubnetProxyTarget } from "@server/lib/ip";
import config from "@server/lib/config";
import { buildClientConfigurationForNewtClient } from "./buildConfiguration";
const inputSchema = z.object({
publicKey: z.string(),
@@ -130,167 +121,18 @@ export const handleGetConfigMessage: MessageHandler = async (context) => {
}
}
// Get all clients connected to this site
const clientsRes = await db
.select()
.from(clients)
.innerJoin(
clientSitesAssociationsCache,
eq(clients.clientId, clientSitesAssociationsCache.clientId)
)
.where(eq(clientSitesAssociationsCache.siteId, siteId));
const { peers, targets } = await buildClientConfigurationForNewtClient(
site,
exitNode
);
let peers: Array<{
publicKey: string;
allowedIps: string[];
endpoint?: string;
}> = [];
if (site.publicKey && site.endpoint && exitNode) {
// Prepare peers data for the response
peers = await Promise.all(
clientsRes
.filter((client) => {
if (!client.clients.pubKey) {
logger.warn(
`Client ${client.clients.clientId} has no public key, skipping`
);
return false;
}
if (!client.clients.subnet) {
logger.warn(
`Client ${client.clients.clientId} has no subnet, skipping`
);
return false;
}
return true;
})
.map(async (client) => {
// Add or update this peer on the olm if it is connected
// const allSiteResources = await db // only get the site resources that this client has access to
// .select()
// .from(siteResources)
// .innerJoin(
// clientSiteResourcesAssociationsCache,
// eq(
// siteResources.siteResourceId,
// clientSiteResourcesAssociationsCache.siteResourceId
// )
// )
// .where(
// and(
// eq(siteResources.siteId, site.siteId),
// eq(
// clientSiteResourcesAssociationsCache.clientId,
// client.clients.clientId
// )
// )
// );
// update the peer info on the olm
// if the peer has not been added yet this will be a no-op
await updatePeer(client.clients.clientId, {
siteId: site.siteId,
endpoint: site.endpoint!,
relayEndpoint: `${exitNode.endpoint}:${config.getRawConfig().gerbil.clients_start_port}`,
publicKey: site.publicKey!,
serverIP: site.address,
serverPort: site.listenPort
// remoteSubnets: generateRemoteSubnets(
// allSiteResources.map(
// ({ siteResources }) => siteResources
// )
// ),
// aliases: generateAliasConfig(
// allSiteResources.map(
// ({ siteResources }) => siteResources
// )
// )
});
// also trigger the peer add handshake in case the peer was not already added to the olm and we need to hole punch
// if it has already been added this will be a no-op
await initPeerAddHandshake(
// this will kick off the add peer process for the client
client.clients.clientId,
{
siteId,
exitNode: {
publicKey: exitNode.publicKey,
endpoint: exitNode.endpoint
}
}
);
return {
publicKey: client.clients.pubKey!,
allowedIps: [
`${client.clients.subnet.split("/")[0]}/32`
], // we want to only allow from that client
endpoint: client.clientSitesAssociationsCache.isRelayed
? ""
: client.clientSitesAssociationsCache.endpoint! // if its relayed it should be localhost
};
})
);
}
// Filter out any null values from peers that didn't have an olm
const validPeers = peers.filter((peer) => peer !== null);
// Get all enabled site resources for this site
const allSiteResources = await db
.select()
.from(siteResources)
.where(eq(siteResources.siteId, siteId));
const targetsToSend: SubnetProxyTarget[] = [];
for (const resource of allSiteResources) {
// Get clients associated with this specific resource
const resourceClients = await db
.select({
clientId: clients.clientId,
pubKey: clients.pubKey,
subnet: clients.subnet
})
.from(clients)
.innerJoin(
clientSiteResourcesAssociationsCache,
eq(
clients.clientId,
clientSiteResourcesAssociationsCache.clientId
)
)
.where(
eq(
clientSiteResourcesAssociationsCache.siteResourceId,
resource.siteResourceId
)
);
const resourceTargets = generateSubnetProxyTargets(
resource,
resourceClients
);
targetsToSend.push(...resourceTargets);
}
// Build the configuration response
const configResponse = {
ipAddress: site.address,
peers: validPeers,
targets: targetsToSend
};
logger.debug("Sending config: ", configResponse);
return {
message: {
type: "newt/wg/receive-config",
data: {
...configResponse
ipAddress: site.address,
peers,
targets
}
},
broadcast: false,

View File

@@ -0,0 +1,163 @@
import { db, sites } from "@server/db";
import { disconnectClient, getClientConfigVersion } from "#dynamic/routers/ws";
import { MessageHandler } from "@server/routers/ws";
import { clients, Newt } from "@server/db";
import { eq, lt, isNull, and, or } from "drizzle-orm";
import logger from "@server/logger";
import { validateSessionToken } from "@server/auth/sessions/app";
import { checkOrgAccessPolicy } from "#dynamic/lib/checkOrgAccessPolicy";
import { sendTerminateClient } from "../client/terminate";
import { encodeHexLowerCase } from "@oslojs/encoding";
import { sha256 } from "@oslojs/crypto/sha2";
import { sendNewtSyncMessage } from "./sync";
// Track if the offline checker interval is running
// let offlineCheckerInterval: NodeJS.Timeout | null = null;
// const OFFLINE_CHECK_INTERVAL = 30 * 1000; // Check every 30 seconds
// const OFFLINE_THRESHOLD_MS = 2 * 60 * 1000; // 2 minutes
/**
* Starts the background interval that checks for clients that haven't pinged recently
* and marks them as offline
*/
// export const startNewtOfflineChecker = (): void => {
// if (offlineCheckerInterval) {
// return; // Already running
// }
// offlineCheckerInterval = setInterval(async () => {
// try {
// const twoMinutesAgo = Math.floor(
// (Date.now() - OFFLINE_THRESHOLD_MS) / 1000
// );
// // TODO: WE NEED TO MAKE SURE THIS WORKS WITH DISTRIBUTED NODES ALL DOING THE SAME THING
// // Find clients that haven't pinged in the last 2 minutes and mark them as offline
// const offlineClients = await db
// .update(clients)
// .set({ online: false })
// .where(
// and(
// eq(clients.online, true),
// or(
// lt(clients.lastPing, twoMinutesAgo),
// isNull(clients.lastPing)
// )
// )
// )
// .returning();
// for (const offlineClient of offlineClients) {
// logger.info(
// `Kicking offline newt client ${offlineClient.clientId} due to inactivity`
// );
// if (!offlineClient.newtId) {
// logger.warn(
// `Offline client ${offlineClient.clientId} has no newtId, cannot disconnect`
// );
// continue;
// }
// // Send a disconnect message to the client if connected
// try {
// await sendTerminateClient(
// offlineClient.clientId,
// offlineClient.newtId
// ); // terminate first
// // wait a moment to ensure the message is sent
// await new Promise((resolve) => setTimeout(resolve, 1000));
// await disconnectClient(offlineClient.newtId);
// } catch (error) {
// logger.error(
// `Error sending disconnect to offline newt ${offlineClient.clientId}`,
// { error }
// );
// }
// }
// } catch (error) {
// logger.error("Error in offline checker interval", { error });
// }
// }, OFFLINE_CHECK_INTERVAL);
// logger.debug("Started offline checker interval");
// };
/**
* Stops the background interval that checks for offline clients
*/
// export const stopNewtOfflineChecker = (): void => {
// if (offlineCheckerInterval) {
// clearInterval(offlineCheckerInterval);
// offlineCheckerInterval = null;
// logger.info("Stopped offline checker interval");
// }
// };
/**
* Handles ping messages from clients and responds with pong
*/
export const handleNewtPingMessage: MessageHandler = async (context) => {
const { message, client: c, sendToClient } = context;
const newt = c as Newt;
if (!newt) {
logger.warn("Newt ping message: Newt not found");
return;
}
if (!newt.siteId) {
logger.warn("Newt ping message: has no site ID");
return;
}
// get the version
const configVersion = await getClientConfigVersion(newt.newtId);
if (message.configVersion && configVersion != null && configVersion != message.configVersion) {
logger.warn(
`Newt ping with outdated config version: ${message.configVersion} (current: ${configVersion})`
);
// get the site
const [site] = await db
.select()
.from(sites)
.where(eq(sites.siteId, newt.siteId))
.limit(1);
if (!site) {
logger.warn(
`Newt ping message: site with ID ${newt.siteId} not found`
);
return;
}
await sendNewtSyncMessage(newt, site);
}
// try {
// // Update the client's last ping timestamp
// await db
// .update(clients)
// .set({
// lastPing: Math.floor(Date.now() / 1000),
// online: true
// })
// .where(eq(clients.clientId, newt.clientId));
// } catch (error) {
// logger.error("Error handling ping message", { error });
// }
return {
message: {
type: "pong",
data: {
timestamp: new Date().toISOString()
}
},
broadcast: false,
excludeSender: false
};
};

View File

@@ -18,6 +18,7 @@ import {
} from "#dynamic/lib/exitNodes";
import { fetchContainers } from "./dockerSocket";
import { lockManager } from "#dynamic/lib/lock";
import { buildTargetConfigurationForNewtClient } from "./buildConfiguration";
export type ExitNodePingResult = {
exitNodeId: number;
@@ -233,109 +234,8 @@ export const handleNewtRegisterMessage: MessageHandler = async (context) => {
.where(eq(newts.newtId, newt.newtId));
}
// Get all enabled targets with their resource protocol information
const allTargets = await db
.select({
resourceId: targets.resourceId,
targetId: targets.targetId,
ip: targets.ip,
method: targets.method,
port: targets.port,
internalPort: targets.internalPort,
enabled: targets.enabled,
protocol: resources.protocol,
hcEnabled: targetHealthCheck.hcEnabled,
hcPath: targetHealthCheck.hcPath,
hcScheme: targetHealthCheck.hcScheme,
hcMode: targetHealthCheck.hcMode,
hcHostname: targetHealthCheck.hcHostname,
hcPort: targetHealthCheck.hcPort,
hcInterval: targetHealthCheck.hcInterval,
hcUnhealthyInterval: targetHealthCheck.hcUnhealthyInterval,
hcTimeout: targetHealthCheck.hcTimeout,
hcHeaders: targetHealthCheck.hcHeaders,
hcMethod: targetHealthCheck.hcMethod,
hcTlsServerName: targetHealthCheck.hcTlsServerName
})
.from(targets)
.innerJoin(resources, eq(targets.resourceId, resources.resourceId))
.leftJoin(
targetHealthCheck,
eq(targets.targetId, targetHealthCheck.targetId)
)
.where(and(eq(targets.siteId, siteId), eq(targets.enabled, true)));
const { tcpTargets, udpTargets } = allTargets.reduce(
(acc, target) => {
// Filter out invalid targets
if (!target.internalPort || !target.ip || !target.port) {
return acc;
}
// Format target into string
const formattedTarget = `${target.internalPort}:${target.ip}:${target.port}`;
// Add to the appropriate protocol array
if (target.protocol === "tcp") {
acc.tcpTargets.push(formattedTarget);
} else {
acc.udpTargets.push(formattedTarget);
}
return acc;
},
{ tcpTargets: [] as string[], udpTargets: [] as string[] }
);
const healthCheckTargets = allTargets.map((target) => {
// make sure the stuff is defined
if (
!target.hcPath ||
!target.hcHostname ||
!target.hcPort ||
!target.hcInterval ||
!target.hcMethod
) {
logger.debug(
`Skipping target ${target.targetId} due to missing health check fields`
);
return null; // Skip targets with missing health check fields
}
// parse headers
const hcHeadersParse = target.hcHeaders
? JSON.parse(target.hcHeaders)
: null;
const hcHeadersSend: { [key: string]: string } = {};
if (hcHeadersParse) {
hcHeadersParse.forEach(
(header: { name: string; value: string }) => {
hcHeadersSend[header.name] = header.value;
}
);
}
return {
id: target.targetId,
hcEnabled: target.hcEnabled,
hcPath: target.hcPath,
hcScheme: target.hcScheme,
hcMode: target.hcMode,
hcHostname: target.hcHostname,
hcPort: target.hcPort,
hcInterval: target.hcInterval, // in seconds
hcUnhealthyInterval: target.hcUnhealthyInterval, // in seconds
hcTimeout: target.hcTimeout, // in seconds
hcHeaders: hcHeadersSend,
hcMethod: target.hcMethod,
hcTlsServerName: target.hcTlsServerName
};
});
// Filter out any null values from health check targets
const validHealthCheckTargets = healthCheckTargets.filter(
(target) => target !== null
);
const { tcpTargets, udpTargets, validHealthCheckTargets } =
await buildTargetConfigurationForNewtClient(siteId);
logger.debug(
`Sending health check targets to newt ${newt.newtId}: ${JSON.stringify(validHealthCheckTargets)}`

View File

@@ -6,3 +6,4 @@ export * from "./handleGetConfigMessage";
export * from "./handleSocketMessages";
export * from "./handleNewtPingRequestMessage";
export * from "./handleApplyBlueprintMessage";
export * from "./handleNewtPingMessage";

View File

@@ -39,7 +39,7 @@ export async function addPeer(
await sendToClient(newtId, {
type: "newt/wg/peer/add",
data: peer
}).catch((error) => {
}, { incrementConfigVersion: true }).catch((error) => {
logger.warn(`Error sending message:`, error);
});
@@ -81,7 +81,7 @@ export async function deletePeer(
data: {
publicKey
}
}).catch((error) => {
}, { incrementConfigVersion: true }).catch((error) => {
logger.warn(`Error sending message:`, error);
});
@@ -128,7 +128,7 @@ export async function updatePeer(
publicKey,
...peer
}
}).catch((error) => {
}, { incrementConfigVersion: true }).catch((error) => {
logger.warn(`Error sending message:`, error);
});

View File

@@ -0,0 +1,41 @@
import { ExitNode, exitNodes, Newt, Site, db } from "@server/db";
import { eq } from "drizzle-orm";
import { sendToClient } from "#dynamic/routers/ws";
import logger from "@server/logger";
import {
buildClientConfigurationForNewtClient,
buildTargetConfigurationForNewtClient
} from "./buildConfiguration";
export async function sendNewtSyncMessage(newt: Newt, site: Site) {
const { tcpTargets, udpTargets, validHealthCheckTargets } =
await buildTargetConfigurationForNewtClient(site.siteId);
let exitNode: ExitNode | undefined;
if (site.exitNodeId) {
[exitNode] = await db
.select()
.from(exitNodes)
.where(eq(exitNodes.exitNodeId, site.exitNodeId))
.limit(1);
}
const { peers, targets } = await buildClientConfigurationForNewtClient(
site,
exitNode
);
await sendToClient(newt.newtId, {
type: "newt/sync",
data: {
proxyTargets: {
udp: udpTargets,
tcp: tcpTargets
},
healthCheckTargets: validHealthCheckTargets,
peers: peers,
clientTargets: targets
}
}).catch((error) => {
logger.warn(`Error sending newt sync message:`, error);
});
}

View File

@@ -22,7 +22,7 @@ export async function addTargets(
data: {
targets: payloadTargets
}
});
}, { incrementConfigVersion: true });
// Create a map for quick lookup
const healthCheckMap = new Map<number, TargetHealthCheck>();
@@ -103,7 +103,7 @@ export async function addTargets(
data: {
targets: validHealthCheckTargets
}
});
}, { incrementConfigVersion: true });
}
export async function removeTargets(
@@ -124,7 +124,7 @@ export async function removeTargets(
data: {
targets: payloadTargets
}
});
}, { incrementConfigVersion: true });
const healthCheckTargets = targets.map((target) => {
return target.targetId;
@@ -135,5 +135,5 @@ export async function removeTargets(
data: {
ids: healthCheckTargets
}
});
}, { incrementConfigVersion: true });
}

View File

@@ -0,0 +1,81 @@
import { NextFunction, Request, Response } from "express";
import { db } from "@server/db";
import { olms, clients } from "@server/db";
import { eq } from "drizzle-orm";
import HttpCode from "@server/types/HttpCode";
import createHttpError from "http-errors";
import response from "@server/lib/response";
import { z } from "zod";
import { fromError } from "zod-validation-error";
import logger from "@server/logger";
import { rebuildClientAssociationsFromClient } from "@server/lib/rebuildClientAssociations";
import { sendTerminateClient } from "../client/terminate";
const paramsSchema = z
.object({
userId: z.string(),
olmId: z.string()
})
.strict();
export async function archiveUserOlm(
req: Request,
res: Response,
next: NextFunction
): Promise<any> {
try {
const parsedParams = paramsSchema.safeParse(req.params);
if (!parsedParams.success) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
fromError(parsedParams.error).toString()
)
);
}
const { olmId } = parsedParams.data;
// Archive the OLM and disconnect associated clients in a transaction
await db.transaction(async (trx) => {
// Find all clients associated with this OLM
const associatedClients = await trx
.select()
.from(clients)
.where(eq(clients.olmId, olmId));
// Disconnect clients from the OLM (set olmId to null)
for (const client of associatedClients) {
await trx
.update(clients)
.set({ olmId: null })
.where(eq(clients.clientId, client.clientId));
await rebuildClientAssociationsFromClient(client, trx);
await sendTerminateClient(client.clientId, olmId);
}
// Archive the OLM (set archived to true)
await trx
.update(olms)
.set({ archived: true })
.where(eq(olms.olmId, olmId));
});
return response(res, {
data: null,
success: true,
error: false,
message: "Device archived successfully",
status: HttpCode.OK
});
} catch (error) {
logger.error(error);
return next(
createHttpError(
HttpCode.INTERNAL_SERVER_ERROR,
"Failed to archive device"
)
);
}
}

View File

@@ -0,0 +1,145 @@
import { Client, clientSiteResourcesAssociationsCache, clientSitesAssociationsCache, db, exitNodes, siteResources, sites } from "@server/db";
import { generateAliasConfig, generateRemoteSubnets } from "@server/lib/ip";
import logger from "@server/logger";
import { and, eq } from "drizzle-orm";
import { addPeer, deletePeer } from "../newt/peers";
import config from "@server/lib/config";
export async function buildSiteConfigurationForOlmClient(
client: Client,
publicKey: string | null,
relay: boolean
) {
const siteConfigurations = [];
// Get all sites data
const sitesData = await db
.select()
.from(sites)
.innerJoin(
clientSitesAssociationsCache,
eq(sites.siteId, clientSitesAssociationsCache.siteId)
)
.where(eq(clientSitesAssociationsCache.clientId, client.clientId));
// Process each site
for (const {
sites: site,
clientSitesAssociationsCache: association
} of sitesData) {
if (!site.exitNodeId) {
logger.warn(
`Site ${site.siteId} does not have exit node, skipping`
);
continue;
}
// Validate endpoint and hole punch status
if (!site.endpoint) {
logger.warn(
`In olm register: site ${site.siteId} has no endpoint, skipping`
);
continue;
}
// if (site.lastHolePunch && now - site.lastHolePunch > 6 && relay) {
// logger.warn(
// `Site ${site.siteId} last hole punch is too old, skipping`
// );
// continue;
// }
// If public key changed, delete old peer from this site
if (client.pubKey && client.pubKey != publicKey) {
logger.info(
`Public key mismatch. Deleting old peer from site ${site.siteId}...`
);
await deletePeer(site.siteId, client.pubKey!);
}
if (!site.subnet) {
logger.warn(`Site ${site.siteId} has no subnet, skipping`);
continue;
}
const [clientSite] = await db
.select()
.from(clientSitesAssociationsCache)
.where(
and(
eq(clientSitesAssociationsCache.clientId, client.clientId),
eq(clientSitesAssociationsCache.siteId, site.siteId)
)
)
.limit(1);
// Add the peer to the exit node for this site
if (clientSite.endpoint && publicKey) {
logger.info(
`Adding peer ${publicKey} to site ${site.siteId} with endpoint ${clientSite.endpoint}`
);
await addPeer(site.siteId, {
publicKey: publicKey,
allowedIps: [`${client.subnet.split("/")[0]}/32`], // we want to only allow from that client
endpoint: relay ? "" : clientSite.endpoint
});
} else {
logger.warn(
`Client ${client.clientId} has no endpoint, skipping peer addition`
);
}
let relayEndpoint: string | undefined = undefined;
if (relay) {
const [exitNode] = await db
.select()
.from(exitNodes)
.where(eq(exitNodes.exitNodeId, site.exitNodeId))
.limit(1);
if (!exitNode) {
logger.warn(`Exit node not found for site ${site.siteId}`);
continue;
}
relayEndpoint = `${exitNode.endpoint}:${config.getRawConfig().gerbil.clients_start_port}`;
}
const allSiteResources = await db // only get the site resources that this client has access to
.select()
.from(siteResources)
.innerJoin(
clientSiteResourcesAssociationsCache,
eq(
siteResources.siteResourceId,
clientSiteResourcesAssociationsCache.siteResourceId
)
)
.where(
and(
eq(siteResources.siteId, site.siteId),
eq(
clientSiteResourcesAssociationsCache.clientId,
client.clientId
)
)
);
// Add site configuration to the array
siteConfigurations.push({
siteId: site.siteId,
name: site.name,
// relayEndpoint: relayEndpoint, // this can be undefined now if not relayed // lets not do this for now because it would conflict with the hole punch testing
endpoint: site.endpoint,
publicKey: site.publicKey,
serverIP: site.address,
serverPort: site.listenPort,
remoteSubnets: generateRemoteSubnets(
allSiteResources.map(({ siteResources }) => siteResources)
),
aliases: generateAliasConfig(
allSiteResources.map(({ siteResources }) => siteResources)
)
});
}
return siteConfigurations;
}

View File

@@ -1,6 +1,6 @@
import { NextFunction, Request, Response } from "express";
import { db } from "@server/db";
import { olms } from "@server/db";
import { olms, clients, fingerprints } from "@server/db";
import { eq, and } from "drizzle-orm";
import HttpCode from "@server/types/HttpCode";
import createHttpError from "http-errors";
@@ -9,6 +9,7 @@ import { z } from "zod";
import { fromError } from "zod-validation-error";
import logger from "@server/logger";
import { OpenAPITags, registry } from "@server/openApi";
import { getUserDeviceName } from "@server/db/names";
const paramsSchema = z
.object({
@@ -17,6 +18,10 @@ const paramsSchema = z
})
.strict();
const querySchema = z.object({
orgId: z.string().optional()
});
// registry.registerPath({
// method: "get",
// path: "/user/{userId}/olm/{olmId}",
@@ -44,15 +49,64 @@ export async function getUserOlm(
);
}
const { olmId, userId } = parsedParams.data;
const parsedQuery = querySchema.safeParse(req.query);
if (!parsedQuery.success) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
fromError(parsedQuery.error).toString()
)
);
}
const [olm] = await db
const { olmId, userId } = parsedParams.data;
const { orgId } = parsedQuery.data;
const [result] = await db
.select()
.from(olms)
.where(and(eq(olms.userId, userId), eq(olms.olmId, olmId)));
.where(and(eq(olms.userId, userId), eq(olms.olmId, olmId)))
.leftJoin(fingerprints, eq(olms.olmId, fingerprints.olmId))
.limit(1);
if (!result || !result.olms) {
return next(
createHttpError(
HttpCode.NOT_FOUND,
"Olm not found"
)
);
}
const olm = result.olms;
// If orgId is provided and olm has a clientId, fetch the client to check blocked status
let blocked: boolean | undefined;
if (orgId && olm.clientId) {
const [client] = await db
.select({ blocked: clients.blocked })
.from(clients)
.where(
and(
eq(clients.clientId, olm.clientId),
eq(clients.orgId, orgId)
)
)
.limit(1);
blocked = client?.blocked ?? false;
}
// Replace name with device name
const model = result.fingerprints?.deviceModel || null;
const newName = getUserDeviceName(model, olm.name);
const responseData = blocked !== undefined
? { ...olm, name: newName, blocked }
: { ...olm, name: newName };
return response(res, {
data: olm,
data: responseData,
success: true,
error: false,
message: "Successfully retrieved olm",

View File

@@ -1,7 +1,7 @@
import { db } from "@server/db";
import { disconnectClient } from "#dynamic/routers/ws";
import { disconnectClient, getClientConfigVersion } from "#dynamic/routers/ws";
import { clientPostureSnapshots, db, fingerprints } from "@server/db";
import { MessageHandler } from "@server/routers/ws";
import { clients, Olm } from "@server/db";
import { clients, olms, Olm } from "@server/db";
import { eq, lt, isNull, and, or } from "drizzle-orm";
import logger from "@server/logger";
import { validateSessionToken } from "@server/auth/sessions/app";
@@ -9,6 +9,7 @@ import { checkOrgAccessPolicy } from "#dynamic/lib/checkOrgAccessPolicy";
import { sendTerminateClient } from "../client/terminate";
import { encodeHexLowerCase } from "@oslojs/encoding";
import { sha256 } from "@oslojs/crypto/sha2";
import { sendOlmSyncMessage } from "./sync";
// Track if the offline checker interval is running
let offlineCheckerInterval: NodeJS.Timeout | null = null;
@@ -101,79 +102,178 @@ export const handleOlmPingMessage: MessageHandler = async (context) => {
const { message, client: c, sendToClient } = context;
const olm = c as Olm;
const { userToken } = message.data;
const { userToken, fingerprint, postures } = message.data;
if (!olm) {
logger.warn("Olm not found");
return;
}
if (olm.userId) {
// we need to check a user token to make sure its still valid
const { session: userSession, user } =
await validateSessionToken(userToken);
if (!userSession || !user) {
logger.warn("Invalid user session for olm ping");
return; // by returning here we just ignore the ping and the setInterval will force it to disconnect
}
if (user.userId !== olm.userId) {
logger.warn("User ID mismatch for olm ping");
return;
}
// get the client
const [client] = await db
.select()
.from(clients)
.where(
and(
eq(clients.olmId, olm.olmId),
eq(clients.userId, olm.userId)
)
)
.limit(1);
if (!client) {
logger.warn("Client not found for olm ping");
return;
}
const sessionId = encodeHexLowerCase(
sha256(new TextEncoder().encode(userToken))
);
const policyCheck = await checkOrgAccessPolicy({
orgId: client.orgId,
userId: olm.userId,
sessionId // this is the user token passed in the message
});
if (!policyCheck.allowed) {
logger.warn(
`Olm user ${olm.userId} does not pass access policies for org ${client.orgId}: ${policyCheck.error}`
);
return;
}
}
if (!olm.clientId) {
logger.warn("Olm has no client ID!");
return;
}
try {
// get the client
const [client] = await db
.select()
.from(clients)
.where(eq(clients.clientId, olm.clientId))
.limit(1);
if (!client) {
logger.warn("Client not found for olm ping");
return;
}
if (client.blocked) {
// NOTE: by returning we dont update the lastPing, so the offline checker will eventually disconnect them
logger.debug(
`Blocked client ${client.clientId} attempted olm ping`
);
return;
}
if (olm.userId) {
// we need to check a user token to make sure its still valid
const { session: userSession, user } =
await validateSessionToken(userToken);
if (!userSession || !user) {
logger.warn("Invalid user session for olm ping");
return; // by returning here we just ignore the ping and the setInterval will force it to disconnect
}
if (user.userId !== olm.userId) {
logger.warn("User ID mismatch for olm ping");
return;
}
if (user.userId !== client.userId) {
logger.warn("Client user ID mismatch for olm ping");
return;
}
const sessionId = encodeHexLowerCase(
sha256(new TextEncoder().encode(userToken))
);
const policyCheck = await checkOrgAccessPolicy({
orgId: client.orgId,
userId: olm.userId,
sessionId // this is the user token passed in the message
});
if (!policyCheck.allowed) {
logger.warn(
`Olm user ${olm.userId} does not pass access policies for org ${client.orgId}: ${policyCheck.error}`
);
return;
}
}
// get the version
logger.debug(`handleOlmPingMessage: About to get config version for olmId: ${olm.olmId}`);
const configVersion = await getClientConfigVersion(olm.olmId);
logger.debug(`handleOlmPingMessage: Got config version: ${configVersion} (type: ${typeof configVersion})`);
if (configVersion == null || configVersion === undefined) {
logger.debug(`handleOlmPingMessage: could not get config version from server for olmId: ${olm.olmId}`)
}
if (message.configVersion != null && configVersion != null && configVersion != message.configVersion) {
logger.debug(
`handleOlmPingMessage: Olm ping with outdated config version: ${message.configVersion} (current: ${configVersion})`
);
await sendOlmSyncMessage(olm, client);
}
// Update the client's last ping timestamp
await db
.update(clients)
.set({
lastPing: Math.floor(Date.now() / 1000),
online: true
online: true,
archived: false
})
.where(eq(clients.clientId, olm.clientId));
if (olm.archived) {
await db
.update(olms)
.set({ archived: false })
.where(eq(olms.olmId, olm.olmId));
}
} catch (error) {
logger.error("Error handling ping message", { error });
}
const now = Math.floor(Date.now() / 1000);
if (fingerprint && olm.olmId) {
const [existingFingerprint] = await db
.select()
.from(fingerprints)
.where(eq(fingerprints.olmId, olm.olmId))
.limit(1);
if (!existingFingerprint) {
await db.insert(fingerprints).values({
olmId: olm.olmId,
firstSeen: now,
lastSeen: now,
username: fingerprint.username,
hostname: fingerprint.hostname,
platform: fingerprint.platform,
osVersion: fingerprint.osVersion,
kernelVersion: fingerprint.kernelVersion,
arch: fingerprint.arch,
deviceModel: fingerprint.deviceModel,
serialNumber: fingerprint.serialNumber,
platformFingerprint: fingerprint.platformFingerprint
});
} else {
await db
.update(fingerprints)
.set({
lastSeen: now,
username: fingerprint.username,
hostname: fingerprint.hostname,
platform: fingerprint.platform,
osVersion: fingerprint.osVersion,
kernelVersion: fingerprint.kernelVersion,
arch: fingerprint.arch,
deviceModel: fingerprint.deviceModel,
serialNumber: fingerprint.serialNumber,
platformFingerprint: fingerprint.platformFingerprint
})
.where(eq(fingerprints.olmId, olm.olmId));
}
}
if (postures && olm.clientId) {
await db.insert(clientPostureSnapshots).values({
clientId: olm.clientId,
biometricsEnabled: postures?.biometricsEnabled,
diskEncrypted: postures?.diskEncrypted,
firewallEnabled: postures?.firewallEnabled,
autoUpdatesEnabled: postures?.autoUpdatesEnabled,
tpmAvailable: postures?.tpmAvailable,
windowsDefenderEnabled: postures?.windowsDefenderEnabled,
macosSipEnabled: postures?.macosSipEnabled,
macosGatekeeperEnabled: postures?.macosGatekeeperEnabled,
macosFirewallStealthMode: postures?.macosFirewallStealthMode,
linuxAppArmorEnabled: postures?.linuxAppArmorEnabled,
linuxSELinuxEnabled: postures?.linuxSELinuxEnabled,
collectedAt: now
});
}
return {
message: {
type: "pong",

View File

@@ -1,6 +1,9 @@
import {
Client,
clientPostureSnapshots,
clientSiteResourcesAssociationsCache,
db,
fingerprints,
orgs,
siteResources
} from "@server/db";
@@ -13,7 +16,7 @@ import {
olms,
sites
} from "@server/db";
import { and, eq, inArray, isNull } from "drizzle-orm";
import { and, count, eq, inArray, isNull } from "drizzle-orm";
import { addPeer, deletePeer } from "../newt/peers";
import logger from "@server/logger";
import { generateAliasConfig } from "@server/lib/ip";
@@ -23,6 +26,7 @@ import { validateSessionToken } from "@server/auth/sessions/app";
import config from "@server/lib/config";
import { encodeHexLowerCase } from "@oslojs/encoding";
import { sha256 } from "@oslojs/crypto/sha2";
import { buildSiteConfigurationForOlmClient } from "./buildConfiguration";
export const handleOlmRegisterMessage: MessageHandler = async (context) => {
logger.info("Handling register olm message!");
@@ -36,8 +40,16 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
return;
}
const { publicKey, relay, olmVersion, olmAgent, orgId, userToken } =
message.data;
const {
publicKey,
relay,
olmVersion,
olmAgent,
orgId,
userToken,
fingerprint,
postures
} = message.data;
if (!olm.clientId) {
logger.warn("Olm client ID not found");
@@ -55,6 +67,11 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
return;
}
if (client.blocked) {
logger.debug(`Client ${client.clientId} is blocked. Ignoring register.`);
return;
}
const [org] = await db
.select()
.from(orgs)
@@ -112,18 +129,20 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
if (
(olmVersion && olm.version !== olmVersion) ||
(olmAgent && olm.agent !== olmAgent)
(olmAgent && olm.agent !== olmAgent) ||
olm.archived
) {
await db
.update(olms)
.set({
version: olmVersion,
agent: olmAgent
agent: olmAgent,
archived: false
})
.where(eq(olms.olmId, olm.olmId));
}
if (client.pubKey !== publicKey) {
if (client.pubKey !== publicKey || client.archived) {
logger.info(
"Public key mismatch. Updating public key and clearing session info..."
);
@@ -131,7 +150,8 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
await db
.update(clients)
.set({
pubKey: publicKey
pubKey: publicKey,
archived: false,
})
.where(eq(clients.clientId, client.clientId));
@@ -145,8 +165,8 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
}
// Get all sites data
const sitesData = await db
.select()
const sitesCountResult = await db
.select({ count: count() })
.from(sites)
.innerJoin(
clientSitesAssociationsCache,
@@ -154,138 +174,93 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
)
.where(eq(clientSitesAssociationsCache.clientId, client.clientId));
// Extract the count value from the result array
const sitesCount =
sitesCountResult.length > 0 ? sitesCountResult[0].count : 0;
// Prepare an array to store site configurations
const siteConfigurations = [];
logger.debug(
`Found ${sitesData.length} sites for client ${client.clientId}`
);
logger.debug(`Found ${sitesCount} sites for client ${client.clientId}`);
// this prevents us from accepting a register from an olm that has not hole punched yet.
// the olm will pump the register so we can keep checking
// TODO: I still think there is a better way to do this rather than locking it out here but ???
if (now - (client.lastHolePunch || 0) > 5 && sitesData.length > 0) {
if (now - (client.lastHolePunch || 0) > 5 && sitesCount > 0) {
logger.warn(
"Client last hole punch is too old and we have sites to send; skipping this register"
);
return;
}
// Process each site
for (const {
sites: site,
clientSitesAssociationsCache: association
} of sitesData) {
if (!site.exitNodeId) {
logger.warn(
`Site ${site.siteId} does not have exit node, skipping`
);
continue;
}
// NOTE: its important that the client here is the old client and the public key is the new key
const siteConfigurations = await buildSiteConfigurationForOlmClient(
client,
publicKey,
relay
);
// Validate endpoint and hole punch status
if (!site.endpoint) {
logger.warn(
`In olm register: site ${site.siteId} has no endpoint, skipping`
);
continue;
}
// if (site.lastHolePunch && now - site.lastHolePunch > 6 && relay) {
// logger.warn(
// `Site ${site.siteId} last hole punch is too old, skipping`
// );
// continue;
// }
// If public key changed, delete old peer from this site
if (client.pubKey && client.pubKey != publicKey) {
logger.info(
`Public key mismatch. Deleting old peer from site ${site.siteId}...`
);
await deletePeer(site.siteId, client.pubKey!);
}
if (!site.subnet) {
logger.warn(`Site ${site.siteId} has no subnet, skipping`);
continue;
}
const [clientSite] = await db
if (fingerprint) {
const [existingFingerprint] = await db
.select()
.from(clientSitesAssociationsCache)
.where(
and(
eq(clientSitesAssociationsCache.clientId, client.clientId),
eq(clientSitesAssociationsCache.siteId, site.siteId)
)
)
.from(fingerprints)
.where(eq(fingerprints.olmId, olm.olmId))
.limit(1);
// Add the peer to the exit node for this site
if (clientSite.endpoint) {
logger.info(
`Adding peer ${publicKey} to site ${site.siteId} with endpoint ${clientSite.endpoint}`
);
await addPeer(site.siteId, {
publicKey: publicKey,
allowedIps: [`${client.subnet.split("/")[0]}/32`], // we want to only allow from that client
endpoint: relay ? "" : clientSite.endpoint
if (!existingFingerprint) {
await db.insert(fingerprints).values({
olmId: olm.olmId,
firstSeen: now,
lastSeen: now,
username: fingerprint.username,
hostname: fingerprint.hostname,
platform: fingerprint.platform,
osVersion: fingerprint.osVersion,
kernelVersion: fingerprint.kernelVersion,
arch: fingerprint.arch,
deviceModel: fingerprint.deviceModel,
serialNumber: fingerprint.serialNumber,
platformFingerprint: fingerprint.platformFingerprint
});
} else {
logger.warn(
`Client ${client.clientId} has no endpoint, skipping peer addition`
);
await db
.update(fingerprints)
.set({
lastSeen: now,
username: fingerprint.username,
hostname: fingerprint.hostname,
platform: fingerprint.platform,
osVersion: fingerprint.osVersion,
kernelVersion: fingerprint.kernelVersion,
arch: fingerprint.arch,
deviceModel: fingerprint.deviceModel,
serialNumber: fingerprint.serialNumber,
platformFingerprint: fingerprint.platformFingerprint
})
.where(eq(fingerprints.olmId, olm.olmId));
}
}
let relayEndpoint: string | undefined = undefined;
if (relay) {
const [exitNode] = await db
.select()
.from(exitNodes)
.where(eq(exitNodes.exitNodeId, site.exitNodeId))
.limit(1);
if (!exitNode) {
logger.warn(`Exit node not found for site ${site.siteId}`);
continue;
}
relayEndpoint = `${exitNode.endpoint}:${config.getRawConfig().gerbil.clients_start_port}`;
}
if (postures && olm.clientId) {
await db.insert(clientPostureSnapshots).values({
clientId: olm.clientId,
const allSiteResources = await db // only get the site resources that this client has access to
.select()
.from(siteResources)
.innerJoin(
clientSiteResourcesAssociationsCache,
eq(
siteResources.siteResourceId,
clientSiteResourcesAssociationsCache.siteResourceId
)
)
.where(
and(
eq(siteResources.siteId, site.siteId),
eq(
clientSiteResourcesAssociationsCache.clientId,
client.clientId
)
)
);
biometricsEnabled: postures?.biometricsEnabled,
diskEncrypted: postures?.diskEncrypted,
firewallEnabled: postures?.firewallEnabled,
autoUpdatesEnabled: postures?.autoUpdatesEnabled,
tpmAvailable: postures?.tpmAvailable,
// Add site configuration to the array
siteConfigurations.push({
siteId: site.siteId,
name: site.name,
// relayEndpoint: relayEndpoint, // this can be undefined now if not relayed // lets not do this for now because it would conflict with the hole punch testing
endpoint: site.endpoint,
publicKey: site.publicKey,
serverIP: site.address,
serverPort: site.listenPort,
remoteSubnets: generateRemoteSubnets(
allSiteResources.map(({ siteResources }) => siteResources)
),
aliases: generateAliasConfig(
allSiteResources.map(({ siteResources }) => siteResources)
)
windowsDefenderEnabled: postures?.windowsDefenderEnabled,
macosSipEnabled: postures?.macosSipEnabled,
macosGatekeeperEnabled: postures?.macosGatekeeperEnabled,
macosFirewallStealthMode: postures?.macosFirewallStealthMode,
linuxAppArmorEnabled: postures?.linuxAppArmorEnabled,
linuxSELinuxEnabled: postures?.linuxSELinuxEnabled,
collectedAt: now
});
}

View File

@@ -3,9 +3,10 @@ export * from "./getOlmToken";
export * from "./createUserOlm";
export * from "./handleOlmRelayMessage";
export * from "./handleOlmPingMessage";
export * from "./deleteUserOlm";
export * from "./archiveUserOlm";
export * from "./unarchiveUserOlm";
export * from "./listUserOlms";
export * from "./deleteUserOlm";
export * from "./getUserOlm";
export * from "./handleOlmServerPeerAddMessage";
export * from "./handleOlmUnRelayMessage";
export * from "./recoverOlmWithFingerprint";

View File

@@ -1,5 +1,5 @@
import { NextFunction, Request, Response } from "express";
import { db } from "@server/db";
import { db, fingerprints } from "@server/db";
import { olms } from "@server/db";
import { eq, count, desc } from "drizzle-orm";
import HttpCode from "@server/types/HttpCode";
@@ -9,6 +9,7 @@ import { z } from "zod";
import { fromError } from "zod-validation-error";
import logger from "@server/logger";
import { OpenAPITags, registry } from "@server/openApi";
import { getUserDeviceName } from "@server/db/names";
const querySchema = z.object({
limit: z
@@ -51,6 +52,7 @@ export type ListUserOlmsResponse = {
name: string | null;
clientId: number | null;
userId: string | null;
archived: boolean;
}>;
pagination: {
total: number;
@@ -89,7 +91,7 @@ export async function listUserOlms(
const { userId } = parsedParams.data;
// Get total count
// Get total count (including archived OLMs)
const [totalCountResult] = await db
.select({ count: count() })
.from(olms)
@@ -97,22 +99,31 @@ export async function listUserOlms(
const total = totalCountResult?.count || 0;
// Get OLMs for the current user
const userOlms = await db
.select({
olmId: olms.olmId,
dateCreated: olms.dateCreated,
version: olms.version,
name: olms.name,
clientId: olms.clientId,
userId: olms.userId
})
// Get OLMs for the current user (including archived OLMs)
const list = await db
.select()
.from(olms)
.where(eq(olms.userId, userId))
.leftJoin(fingerprints, eq(olms.olmId, fingerprints.olmId))
.orderBy(desc(olms.dateCreated))
.limit(limit)
.offset(offset);
const userOlms = list.map((item) => {
const model = item.fingerprints?.deviceModel || null;
const newName = getUserDeviceName(model, item.olms.name);
return {
olmId: item.olms.olmId,
dateCreated: item.olms.dateCreated,
version: item.olms.version,
name: newName,
clientId: item.olms.clientId,
userId: item.olms.userId,
archived: item.olms.archived
};
});
return response<ListUserOlmsResponse>(res, {
data: {
olms: userOlms,

View File

@@ -32,20 +32,24 @@ export async function addPeer(
olmId = olm.olmId;
}
await sendToClient(olmId, {
type: "olm/wg/peer/add",
data: {
siteId: peer.siteId,
name: peer.name,
publicKey: peer.publicKey,
endpoint: peer.endpoint,
relayEndpoint: peer.relayEndpoint,
serverIP: peer.serverIP,
serverPort: peer.serverPort,
remoteSubnets: peer.remoteSubnets, // optional, comma-separated list of subnets that this site can access
aliases: peer.aliases
}
}).catch((error) => {
await sendToClient(
olmId,
{
type: "olm/wg/peer/add",
data: {
siteId: peer.siteId,
name: peer.name,
publicKey: peer.publicKey,
endpoint: peer.endpoint,
relayEndpoint: peer.relayEndpoint,
serverIP: peer.serverIP,
serverPort: peer.serverPort,
remoteSubnets: peer.remoteSubnets, // optional, comma-separated list of subnets that this site can access
aliases: peer.aliases
}
},
{ incrementConfigVersion: true }
).catch((error) => {
logger.warn(`Error sending message:`, error);
});
@@ -70,13 +74,17 @@ export async function deletePeer(
olmId = olm.olmId;
}
await sendToClient(olmId, {
type: "olm/wg/peer/remove",
data: {
publicKey,
siteId: siteId
}
}).catch((error) => {
await sendToClient(
olmId,
{
type: "olm/wg/peer/remove",
data: {
publicKey,
siteId: siteId
}
},
{ incrementConfigVersion: true }
).catch((error) => {
logger.warn(`Error sending message:`, error);
});
@@ -109,19 +117,23 @@ export async function updatePeer(
olmId = olm.olmId;
}
await sendToClient(olmId, {
type: "olm/wg/peer/update",
data: {
siteId: peer.siteId,
publicKey: peer.publicKey,
endpoint: peer.endpoint,
relayEndpoint: peer.relayEndpoint,
serverIP: peer.serverIP,
serverPort: peer.serverPort,
remoteSubnets: peer.remoteSubnets,
aliases: peer.aliases
}
}).catch((error) => {
await sendToClient(
olmId,
{
type: "olm/wg/peer/update",
data: {
siteId: peer.siteId,
publicKey: peer.publicKey,
endpoint: peer.endpoint,
relayEndpoint: peer.relayEndpoint,
serverIP: peer.serverIP,
serverPort: peer.serverPort,
remoteSubnets: peer.remoteSubnets,
aliases: peer.aliases
}
},
{ incrementConfigVersion: true }
).catch((error) => {
logger.warn(`Error sending message:`, error);
});
@@ -151,17 +163,21 @@ export async function initPeerAddHandshake(
olmId = olm.olmId;
}
await sendToClient(olmId, {
type: "olm/wg/peer/holepunch/site/add",
data: {
siteId: peer.siteId,
exitNode: {
publicKey: peer.exitNode.publicKey,
relayPort: config.getRawConfig().gerbil.clients_start_port,
endpoint: peer.exitNode.endpoint
await sendToClient(
olmId,
{
type: "olm/wg/peer/holepunch/site/add",
data: {
siteId: peer.siteId,
exitNode: {
publicKey: peer.exitNode.publicKey,
relayPort: config.getRawConfig().gerbil.clients_start_port,
endpoint: peer.exitNode.endpoint
}
}
}
}).catch((error) => {
},
{ incrementConfigVersion: true }
).catch((error) => {
logger.warn(`Error sending message:`, error);
});

View File

@@ -0,0 +1,120 @@
import { db, fingerprints, olms } from "@server/db";
import logger from "@server/logger";
import HttpCode from "@server/types/HttpCode";
import { and, eq } from "drizzle-orm";
import { NextFunction, Request, Response } from "express";
import response from "@server/lib/response";
import createHttpError from "http-errors";
import { z } from "zod";
import { fromError } from "zod-validation-error";
import { generateId } from "@server/auth/sessions/app";
import { hashPassword } from "@server/auth/password";
const paramsSchema = z
.object({
userId: z.string()
})
.strict();
const bodySchema = z
.object({
platformFingerprint: z.string()
})
.strict();
export async function recoverOlmWithFingerprint(
req: Request,
res: Response,
next: NextFunction
): Promise<any> {
try {
const parsedParams = paramsSchema.safeParse(req.params);
if (!parsedParams.success) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
fromError(parsedParams.error).toString()
)
);
}
const { userId } = parsedParams.data;
const parsedBody = bodySchema.safeParse(req.body);
if (!parsedBody.success) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
fromError(parsedBody.error).toString()
)
);
}
const { platformFingerprint } = parsedBody.data;
const result = await db
.select({
olm: olms,
fingerprint: fingerprints
})
.from(olms)
.innerJoin(fingerprints, eq(fingerprints.olmId, olms.olmId))
.where(
and(
eq(olms.userId, userId),
eq(olms.archived, false),
eq(fingerprints.platformFingerprint, platformFingerprint)
)
)
.orderBy(fingerprints.lastSeen);
if (!result || result.length == 0) {
return next(
createHttpError(
HttpCode.NOT_FOUND,
"corresponding olm with this fingerprint not found"
)
);
}
if (result.length > 1) {
return next(
createHttpError(
HttpCode.CONFLICT,
"multiple matching fingerprints found, not resetting secrets"
)
);
}
const [{ olm: foundOlm }] = result;
const newSecret = generateId(48);
const newSecretHash = await hashPassword(newSecret);
await db
.update(olms)
.set({
secretHash: newSecretHash
})
.where(eq(olms.olmId, foundOlm.olmId));
return response(res, {
data: {
olmId: foundOlm.olmId,
secret: newSecret
},
success: true,
error: false,
message: "Successfully retrieved olm",
status: HttpCode.OK
});
} catch (error) {
logger.error(error);
return next(
createHttpError(
HttpCode.INTERNAL_SERVER_ERROR,
"Failed to recover olm using provided fingerprint input"
)
);
}
}

View File

@@ -0,0 +1,80 @@
import { Client, db, exitNodes, Olm, sites, clientSitesAssociationsCache } from "@server/db";
import { buildSiteConfigurationForOlmClient } from "./buildConfiguration";
import { sendToClient } from "#dynamic/routers/ws";
import logger from "@server/logger";
import { eq, inArray } from "drizzle-orm";
import config from "@server/lib/config";
export async function sendOlmSyncMessage(olm: Olm, client: Client) {
// NOTE: WE ARE HARDCODING THE RELAY PARAMETER TO FALSE HERE BUT IN THE REGISTER MESSAGE ITS DEFINED BY THE CLIENT
const siteConfigurations = await buildSiteConfigurationForOlmClient(
client,
client.pubKey,
false
);
// Get all exit nodes from sites where the client has peers
const clientSites = await db
.select()
.from(clientSitesAssociationsCache)
.innerJoin(
sites,
eq(sites.siteId, clientSitesAssociationsCache.siteId)
)
.where(eq(clientSitesAssociationsCache.clientId, client.clientId));
// Extract unique exit node IDs
const exitNodeIds = Array.from(
new Set(
clientSites
.map(({ sites: site }) => site.exitNodeId)
.filter((id): id is number => id !== null)
)
);
let exitNodesData: {
publicKey: string;
relayPort: number;
endpoint: string;
siteIds: number[];
}[] = [];
if (exitNodeIds.length > 0) {
const allExitNodes = await db
.select()
.from(exitNodes)
.where(inArray(exitNodes.exitNodeId, exitNodeIds));
// Map exitNodeId to siteIds
const exitNodeIdToSiteIds: Record<number, number[]> = {};
for (const { sites: site } of clientSites) {
if (site.exitNodeId !== null) {
if (!exitNodeIdToSiteIds[site.exitNodeId]) {
exitNodeIdToSiteIds[site.exitNodeId] = [];
}
exitNodeIdToSiteIds[site.exitNodeId].push(site.siteId);
}
}
exitNodesData = allExitNodes.map((exitNode) => {
return {
publicKey: exitNode.publicKey,
relayPort: config.getRawConfig().gerbil.clients_start_port,
endpoint: exitNode.endpoint,
siteIds: exitNodeIdToSiteIds[exitNode.exitNodeId] ?? []
};
});
}
logger.debug("sendOlmSyncMessage: sending sync message")
await sendToClient(olm.olmId, {
type: "olm/sync",
data: {
sites: siteConfigurations,
exitNodes: exitNodesData
}
}).catch((error) => {
logger.warn(`Error sending olm sync message:`, error);
});
}

View File

@@ -0,0 +1,84 @@
import { NextFunction, Request, Response } from "express";
import { db } from "@server/db";
import { olms } from "@server/db";
import { eq } from "drizzle-orm";
import HttpCode from "@server/types/HttpCode";
import createHttpError from "http-errors";
import response from "@server/lib/response";
import { z } from "zod";
import { fromError } from "zod-validation-error";
import logger from "@server/logger";
const paramsSchema = z
.object({
userId: z.string(),
olmId: z.string()
})
.strict();
export async function unarchiveUserOlm(
req: Request,
res: Response,
next: NextFunction
): Promise<any> {
try {
const parsedParams = paramsSchema.safeParse(req.params);
if (!parsedParams.success) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
fromError(parsedParams.error).toString()
)
);
}
const { olmId } = parsedParams.data;
// Check if OLM exists and is archived
const [olm] = await db
.select()
.from(olms)
.where(eq(olms.olmId, olmId))
.limit(1);
if (!olm) {
return next(
createHttpError(
HttpCode.NOT_FOUND,
`OLM with ID ${olmId} not found`
)
);
}
if (!olm.archived) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
`OLM with ID ${olmId} is not archived`
)
);
}
// Unarchive the OLM (set archived to false)
await db
.update(olms)
.set({ archived: false })
.where(eq(olms.olmId, olmId));
return response(res, {
data: null,
success: true,
error: false,
message: "Device unarchived successfully",
status: HttpCode.OK
});
} catch (error) {
logger.error(error);
return next(
createHttpError(
HttpCode.INTERNAL_SERVER_ERROR,
"Failed to unarchive device"
)
);
}
}

View File

@@ -10,6 +10,8 @@ import { fromError } from "zod-validation-error";
import { ActionsEnum } from "@server/auth/actions";
import { eq, and } from "drizzle-orm";
import { OpenAPITags, registry } from "@server/openApi";
import { build } from "@server/build";
import { isLicensedOrSubscribed } from "@server/lib/isLicencedOrSubscribed";
const createRoleParamsSchema = z.strictObject({
orgId: z.string()
@@ -17,7 +19,8 @@ const createRoleParamsSchema = z.strictObject({
const createRoleSchema = z.strictObject({
name: z.string().min(1).max(255),
description: z.string().optional()
description: z.string().optional(),
requireDeviceApproval: z.boolean().optional()
});
export const defaultRoleAllowedActions: ActionsEnum[] = [
@@ -97,6 +100,11 @@ export async function createRole(
);
}
const isLicensed = await isLicensedOrSubscribed(orgId);
if (build === "oss" || !isLicensed) {
roleData.requireDeviceApproval = undefined;
}
await db.transaction(async (trx) => {
const newRole = await trx
.insert(roles)

View File

@@ -1,15 +1,13 @@
import { Request, Response, NextFunction } from "express";
import { z } from "zod";
import { db } from "@server/db";
import { roles, orgs } from "@server/db";
import { db, orgs, roles } from "@server/db";
import response from "@server/lib/response";
import HttpCode from "@server/types/HttpCode";
import createHttpError from "http-errors";
import { sql, eq } from "drizzle-orm";
import logger from "@server/logger";
import { fromError } from "zod-validation-error";
import stoi from "@server/lib/stoi";
import { OpenAPITags, registry } from "@server/openApi";
import HttpCode from "@server/types/HttpCode";
import { eq, sql } from "drizzle-orm";
import { NextFunction, Request, Response } from "express";
import createHttpError from "http-errors";
import { z } from "zod";
import { fromError } from "zod-validation-error";
const listRolesParamsSchema = z.strictObject({
orgId: z.string()
@@ -38,7 +36,8 @@ async function queryRoles(orgId: string, limit: number, offset: number) {
isAdmin: roles.isAdmin,
name: roles.name,
description: roles.description,
orgName: orgs.name
orgName: orgs.name,
requireDeviceApproval: roles.requireDeviceApproval
})
.from(roles)
.leftJoin(orgs, eq(roles.orgId, orgs.orgId))

View File

@@ -1,6 +1,6 @@
import { Request, Response, NextFunction } from "express";
import { z } from "zod";
import { db } from "@server/db";
import { db, orgs, type Role } from "@server/db";
import { roles } from "@server/db";
import { eq } from "drizzle-orm";
import response from "@server/lib/response";
@@ -8,20 +8,28 @@ import HttpCode from "@server/types/HttpCode";
import createHttpError from "http-errors";
import logger from "@server/logger";
import { fromError } from "zod-validation-error";
import { build } from "@server/build";
import { isLicensedOrSubscribed } from "@server/lib/isLicencedOrSubscribed";
const updateRoleParamsSchema = z.strictObject({
orgId: z.string(),
roleId: z.string().transform(Number).pipe(z.int().positive())
});
const updateRoleBodySchema = z
.strictObject({
name: z.string().min(1).max(255).optional(),
description: z.string().optional()
description: z.string().optional(),
requireDeviceApproval: z.boolean().optional()
})
.refine((data) => Object.keys(data).length > 0, {
error: "At least one field must be provided for update"
});
export type UpdateRoleBody = z.infer<typeof updateRoleBodySchema>;
export type UpdateRoleResponse = Role;
export async function updateRole(
req: Request,
res: Response,
@@ -48,13 +56,14 @@ export async function updateRole(
);
}
const { roleId } = parsedParams.data;
const { roleId, orgId } = parsedParams.data;
const updateData = parsedBody.data;
const role = await db
.select()
.from(roles)
.where(eq(roles.roleId, roleId))
.innerJoin(orgs, eq(roles.orgId, orgs.orgId))
.limit(1);
if (role.length === 0) {
@@ -66,7 +75,7 @@ export async function updateRole(
);
}
if (role[0].isAdmin) {
if (role[0].roles.isAdmin) {
return next(
createHttpError(
HttpCode.FORBIDDEN,
@@ -75,6 +84,11 @@ export async function updateRole(
);
}
const isLicensed = await isLicensedOrSubscribed(orgId);
if (build === "oss" || !isLicensed) {
updateData.requireDeviceApproval = undefined;
}
const updatedRole = await db
.update(roles)
.set(updateData)

View File

@@ -213,9 +213,11 @@ export async function updateTarget(
// When health check is disabled, reset hcHealth to "unknown"
// to prevent previously unhealthy targets from being excluded
// Also when the site is not a newt, set hcHealth to "unknown"
const hcHealthValue =
parsedBody.data.hcEnabled === false ||
parsedBody.data.hcEnabled === null
parsedBody.data.hcEnabled === null ||
site.type !== "newt"
? "unknown"
: undefined;

View File

@@ -5,7 +5,8 @@ import {
handleDockerStatusMessage,
handleDockerContainersMessage,
handleNewtPingRequestMessage,
handleApplyBlueprintMessage
handleApplyBlueprintMessage,
handleNewtPingMessage
} from "../newt";
import {
handleOlmRegisterMessage,
@@ -24,6 +25,7 @@ export const messageHandlers: Record<string, MessageHandler> = {
"olm/wg/relay": handleOlmRelayMessage,
"olm/wg/unrelay": handleOlmUnRelayMessage,
"olm/ping": handleOlmPingMessage,
"newt/ping": handleNewtPingMessage,
"newt/wg/register": handleNewtRegisterMessage,
"newt/wg/get-config": handleGetConfigMessage,
"newt/receive-bandwidth": handleReceiveBandwidthMessage,

View File

@@ -25,6 +25,7 @@ export interface AuthenticatedWebSocket extends WebSocket {
connectionId?: string;
isFullyConnected?: boolean;
pendingMessages?: Buffer[];
configVersion?: number;
}
export interface TokenPayload {
@@ -36,6 +37,7 @@ export interface TokenPayload {
export interface WSMessage {
type: string;
data: any;
configVersion?: number;
}
export interface HandlerResponse {
@@ -43,6 +45,7 @@ export interface HandlerResponse {
broadcast?: boolean;
excludeSender?: boolean;
targetClientId?: string;
options?: SendMessageOptions;
}
export interface HandlerContext {
@@ -50,10 +53,15 @@ export interface HandlerContext {
senderWs: WebSocket;
client: Newt | Olm | RemoteExitNode | undefined;
clientType: ClientType;
sendToClient: (clientId: string, message: WSMessage) => Promise<boolean>;
sendToClient: (
clientId: string,
message: WSMessage,
options?: SendMessageOptions
) => Promise<boolean>;
broadcastToAllExcept: (
message: WSMessage,
excludeClientId?: string
excludeClientId?: string,
options?: SendMessageOptions
) => Promise<void>;
connectedClients: Map<string, WebSocket[]>;
}
@@ -62,6 +70,11 @@ export type MessageHandler = (
context: HandlerContext
) => Promise<HandlerResponse | void>;
// Options for sending messages with config version tracking
export interface SendMessageOptions {
incrementConfigVersion?: boolean;
}
// Redis message type for cross-node communication
export interface RedisMessage {
type: "direct" | "broadcast";
@@ -69,4 +82,5 @@ export interface RedisMessage {
excludeClientId?: string;
message: WSMessage;
fromNodeId: string;
options?: SendMessageOptions;
}

View File

@@ -15,7 +15,8 @@ import {
TokenPayload,
WebSocketRequest,
WSMessage,
AuthenticatedWebSocket
AuthenticatedWebSocket,
SendMessageOptions
} from "./types";
import { validateSessionToken } from "@server/auth/sessions/app";
@@ -34,6 +35,8 @@ const NODE_ID = uuidv4();
// Client tracking map (local to this node)
const connectedClients: Map<string, AuthenticatedWebSocket[]> = new Map();
// Config version tracking map (clientId -> version)
const clientConfigVersions: Map<string, number> = new Map();
// Helper to get map key
const getClientMapKey = (clientId: string) => clientId;
@@ -53,6 +56,13 @@ const addClient = async (
existingClients.push(ws);
connectedClients.set(mapKey, existingClients);
// Initialize config version to 0 if not already set, otherwise use existing
if (!clientConfigVersions.has(clientId)) {
clientConfigVersions.set(clientId, 0);
}
// Set the current config version on the websocket
ws.configVersion = clientConfigVersions.get(clientId) || 0;
logger.info(
`Client added to tracking - ${clientType.toUpperCase()} ID: ${clientId}, Connection ID: ${connectionId}, Total connections: ${existingClients.length}`
);
@@ -84,14 +94,28 @@ const removeClient = async (
// Local message sending (within this node)
const sendToClientLocal = async (
clientId: string,
message: WSMessage
message: WSMessage,
options: SendMessageOptions = {}
): Promise<boolean> => {
const mapKey = getClientMapKey(clientId);
const clients = connectedClients.get(mapKey);
if (!clients || clients.length === 0) {
return false;
}
const messageString = JSON.stringify(message);
// Include config version in message
const configVersion = clientConfigVersions.get(clientId) || 0;
// Update version on all client connections
clients.forEach((client) => {
client.configVersion = configVersion;
});
const messageWithVersion = {
...message,
configVersion
};
const messageString = JSON.stringify(messageWithVersion);
clients.forEach((client) => {
if (client.readyState === WebSocket.OPEN) {
client.send(messageString);
@@ -102,14 +126,30 @@ const sendToClientLocal = async (
const broadcastToAllExceptLocal = async (
message: WSMessage,
excludeClientId?: string
excludeClientId?: string,
options: SendMessageOptions = {}
): Promise<void> => {
connectedClients.forEach((clients, mapKey) => {
const [type, id] = mapKey.split(":");
if (!(excludeClientId && id === excludeClientId)) {
const clientId = mapKey; // mapKey is the clientId
if (!(excludeClientId && clientId === excludeClientId)) {
// Handle config version per client
if (options.incrementConfigVersion) {
const currentVersion = clientConfigVersions.get(clientId) || 0;
const newVersion = currentVersion + 1;
clientConfigVersions.set(clientId, newVersion);
clients.forEach((client) => {
client.configVersion = newVersion;
});
}
// Include config version in message for this client
const configVersion = clientConfigVersions.get(clientId) || 0;
const messageWithVersion = {
...message,
configVersion
};
clients.forEach((client) => {
if (client.readyState === WebSocket.OPEN) {
client.send(JSON.stringify(message));
client.send(JSON.stringify(messageWithVersion));
}
});
}
@@ -119,10 +159,18 @@ const broadcastToAllExceptLocal = async (
// Cross-node message sending
const sendToClient = async (
clientId: string,
message: WSMessage
message: WSMessage,
options: SendMessageOptions = {}
): Promise<boolean> => {
// Increment config version if requested
if (options.incrementConfigVersion) {
const currentVersion = clientConfigVersions.get(clientId) || 0;
const newVersion = currentVersion + 1;
clientConfigVersions.set(clientId, newVersion);
}
// Try to send locally first
const localSent = await sendToClientLocal(clientId, message);
const localSent = await sendToClientLocal(clientId, message, options);
logger.debug(
`sendToClient: Message type ${message.type} sent to clientId ${clientId}`
@@ -133,10 +181,11 @@ const sendToClient = async (
const broadcastToAllExcept = async (
message: WSMessage,
excludeClientId?: string
excludeClientId?: string,
options: SendMessageOptions = {}
): Promise<void> => {
// Broadcast locally
await broadcastToAllExceptLocal(message, excludeClientId);
await broadcastToAllExceptLocal(message, excludeClientId, options);
};
// Check if a client has active connections across all nodes
@@ -146,6 +195,13 @@ const hasActiveConnections = async (clientId: string): Promise<boolean> => {
return !!(clients && clients.length > 0);
};
// Get the current config version for a client
const getClientConfigVersion = async (clientId: string): Promise<number | undefined> => {
const version = clientConfigVersions.get(clientId);
logger.debug(`getClientConfigVersion called for clientId: ${clientId}, returning: ${version} (type: ${typeof version})`);
return version;
};
// Get all active nodes for a client
const getActiveNodes = async (
clientType: ClientType,
@@ -259,15 +315,21 @@ const setupConnection = async (
if (response.broadcast) {
await broadcastToAllExcept(
response.message,
response.excludeSender ? clientId : undefined
response.excludeSender ? clientId : undefined,
response.options
);
} else if (response.targetClientId) {
await sendToClient(
response.targetClientId,
response.message
response.message,
response.options
);
} else {
ws.send(JSON.stringify(response.message));
await sendToClient(
clientId,
response.message,
response.options
);
}
}
} catch (error) {
@@ -434,5 +496,6 @@ export {
getActiveNodes,
disconnectClient,
NODE_ID,
cleanup
cleanup,
getClientConfigVersion
};

View File

@@ -0,0 +1,52 @@
import { ApprovalFeed } from "@app/components/ApprovalFeed";
import SettingsSectionTitle from "@app/components/SettingsSectionTitle";
import { internal } from "@app/lib/api";
import { authCookieHeader } from "@app/lib/api/cookies";
import { getCachedOrg } from "@app/lib/api/getCachedOrg";
import type { ApprovalItem } from "@app/lib/queries";
import OrgProvider from "@app/providers/OrgProvider";
import type { GetOrgResponse } from "@server/routers/org";
import type { AxiosResponse } from "axios";
import { getTranslations } from "next-intl/server";
export interface ApprovalFeedPageProps {
params: Promise<{ orgId: string }>;
}
export default async function ApprovalFeedPage(props: ApprovalFeedPageProps) {
const params = await props.params;
let approvals: ApprovalItem[] = [];
const res = await internal
.get<
AxiosResponse<{ approvals: ApprovalItem[] }>
>(`/org/${params.orgId}/approvals`, await authCookieHeader())
.catch((e) => {});
if (res && res.status === 200) {
approvals = res.data.data.approvals;
}
let org: GetOrgResponse | null = null;
const orgRes = await getCachedOrg(params.orgId);
if (orgRes && orgRes.status === 200) {
org = orgRes.data.data;
}
const t = await getTranslations();
return (
<>
<SettingsSectionTitle
title={t("accessApprovalsManage")}
description={t("accessApprovalsDescription")}
/>
<OrgProvider org={org}>
<div className="container mx-auto max-w-12xl">
<ApprovalFeed orgId={params.orgId} />
</div>
</OrgProvider>
</>
);
}

View File

@@ -1,5 +1,6 @@
"use client";
import AutoProvisionConfigWidget from "@app/components/private/AutoProvisionConfigWidget";
import {
SettingsContainer,
SettingsSection,
@@ -10,6 +11,10 @@ import {
SettingsSectionHeader,
SettingsSectionTitle
} from "@app/components/Settings";
import HeaderTitle from "@app/components/SettingsSectionTitle";
import { StrategySelect } from "@app/components/StrategySelect";
import { Alert, AlertDescription, AlertTitle } from "@app/components/ui/alert";
import { Button } from "@app/components/ui/button";
import {
Form,
FormControl,
@@ -19,29 +24,21 @@ import {
FormLabel,
FormMessage
} from "@app/components/ui/form";
import HeaderTitle from "@app/components/SettingsSectionTitle";
import { z } from "zod";
import { createElement, useEffect, useState } from "react";
import { useForm } from "react-hook-form";
import { zodResolver } from "@hookform/resolvers/zod";
import { Input } from "@app/components/ui/input";
import { Button } from "@app/components/ui/button";
import { createApiClient, formatAxiosError } from "@app/lib/api";
import { useEnvContext } from "@app/hooks/useEnvContext";
import { toast } from "@app/hooks/useToast";
import { useParams, useRouter } from "next/navigation";
import { Checkbox } from "@app/components/ui/checkbox";
import { Alert, AlertDescription, AlertTitle } from "@app/components/ui/alert";
import { InfoIcon, ExternalLink } from "lucide-react";
import { StrategySelect } from "@app/components/StrategySelect";
import { SwitchInput } from "@app/components/SwitchInput";
import { Badge } from "@app/components/ui/badge";
import { useLicenseStatusContext } from "@app/hooks/useLicenseStatusContext";
import { toast } from "@app/hooks/useToast";
import { createApiClient, formatAxiosError } from "@app/lib/api";
import { zodResolver } from "@hookform/resolvers/zod";
import { ListRolesResponse } from "@server/routers/role";
import { AxiosResponse } from "axios";
import { InfoIcon } from "lucide-react";
import { useTranslations } from "next-intl";
import Image from "next/image";
import AutoProvisionConfigWidget from "@app/components/private/AutoProvisionConfigWidget";
import { AxiosResponse } from "axios";
import { ListRolesResponse } from "@server/routers/role";
import { useParams, useRouter } from "next/navigation";
import { useEffect, useState } from "react";
import { useForm } from "react-hook-form";
import { z } from "zod";
export default function Page() {
const { env } = useEnvContext();

View File

@@ -0,0 +1,18 @@
import { pullEnv } from "@app/lib/pullEnv";
import { build } from "@server/build";
import { redirect } from "next/navigation";
interface LayoutProps {
children: React.ReactNode;
params: Promise<{}>;
}
export default async function Layout(props: LayoutProps) {
const env = pullEnv();
if (build !== "saas" && !env.flags.useOrgOnlyIdp) {
redirect("/");
}
return props.children;
}

View File

@@ -2,12 +2,12 @@ import { internal } from "@app/lib/api";
import { authCookieHeader } from "@app/lib/api/cookies";
import { AxiosResponse } from "axios";
import { GetOrgResponse } from "@server/routers/org";
import { cache } from "react";
import OrgProvider from "@app/providers/OrgProvider";
import { ListRolesResponse } from "@server/routers/role";
import RolesTable, { RoleRow } from "../../../../../components/RolesTable";
import RolesTable, { type RoleRow } from "@app/components/RolesTable";
import SettingsSectionTitle from "@app/components/SettingsSectionTitle";
import { getTranslations } from "next-intl/server";
import { getCachedOrg } from "@app/lib/api/getCachedOrg";
type RolesPageProps = {
params: Promise<{ orgId: string }>;
@@ -47,14 +47,7 @@ export default async function RolesPage(props: RolesPageProps) {
}
let org: GetOrgResponse | null = null;
const getOrg = cache(async () =>
internal
.get<
AxiosResponse<GetOrgResponse>
>(`/org/${params.orgId}`, await authCookieHeader())
.catch((e) => {})
);
const orgRes = await getOrg();
const orgRes = await getCachedOrg(params.orgId);
if (orgRes && orgRes.status === 200) {
org = orgRes.data.data;

View File

@@ -59,7 +59,10 @@ export default async function ClientsPage(props: ClientsPageProps) {
username: client.username,
userEmail: client.userEmail,
niceId: client.niceId,
agent: client.agent
agent: client.agent,
archived: client.archived || false,
blocked: client.blocked || false,
approvalState: client.approvalState ?? "approved"
};
};

View File

@@ -4,7 +4,7 @@ import { AxiosResponse } from "axios";
import SettingsSectionTitle from "@app/components/SettingsSectionTitle";
import { ListClientsResponse } from "@server/routers/client";
import { getTranslations } from "next-intl/server";
import type { ClientRow } from "@app/components/MachineClientsTable";
import type { ClientRow } from "@app/components/UserDevicesTable";
import UserDevicesTable from "@app/components/UserDevicesTable";
type ClientsPageProps = {
@@ -55,7 +55,10 @@ export default async function ClientsPage(props: ClientsPageProps) {
username: client.username,
userEmail: client.userEmail,
niceId: client.niceId,
agent: client.agent
agent: client.agent,
archived: client.archived || false,
blocked: client.blocked || false,
approvalState: client.approvalState
};
};

View File

@@ -82,7 +82,7 @@ export default async function SettingsLayout(props: SettingsLayoutProps) {
<Layout
orgId={params.orgId}
orgs={orgs}
navItems={orgNavSections()}
navItems={orgNavSections(env)}
>
{children}
</Layout>

View File

@@ -36,8 +36,8 @@ import {
import type { ResourceContextType } from "@app/contexts/resourceContext";
import { useEnvContext } from "@app/hooks/useEnvContext";
import { useOrgContext } from "@app/hooks/useOrgContext";
import { usePaidStatus } from "@app/hooks/usePaidStatus";
import { useResourceContext } from "@app/hooks/useResourceContext";
import { useSubscriptionStatusContext } from "@app/hooks/useSubscriptionStatusContext";
import { toast } from "@app/hooks/useToast";
import { createApiClient, formatAxiosError } from "@app/lib/api";
import { orgQueries, resourceQueries } from "@app/lib/queries";
@@ -95,7 +95,7 @@ export default function ResourceAuthenticationPage() {
const router = useRouter();
const t = useTranslations();
const subscription = useSubscriptionStatusContext();
const { isPaidUser } = usePaidStatus();
const queryClient = useQueryClient();
const { data: resourceRoles = [], isLoading: isLoadingResourceRoles } =
@@ -129,7 +129,8 @@ export default function ResourceAuthenticationPage() {
);
const { data: orgIdps = [], isLoading: isLoadingOrgIdps } = useQuery(
orgQueries.identityProviders({
orgId: org.org.orgId
orgId: org.org.orgId,
useOrgOnlyIdp: env.flags.useOrgOnlyIdp
})
);
@@ -159,7 +160,7 @@ export default function ResourceAuthenticationPage() {
const allIdps = useMemo(() => {
if (build === "saas") {
if (subscription?.subscribed) {
if (isPaidUser) {
return orgIdps.map((idp) => ({
id: idp.idpId,
text: idp.name
@@ -767,6 +768,8 @@ export default function ResourceAuthenticationPage() {
<OneTimePasswordFormSection
resource={resource}
updateResource={updateResource}
whitelist={whitelist}
isLoadingWhiteList={isLoadingWhiteList}
/>
</SettingsContainer>
</>
@@ -776,11 +779,16 @@ export default function ResourceAuthenticationPage() {
type OneTimePasswordFormSectionProps = Pick<
ResourceContextType,
"resource" | "updateResource"
>;
> & {
whitelist: Array<{ email: string }>;
isLoadingWhiteList: boolean;
};
function OneTimePasswordFormSection({
resource,
updateResource
updateResource,
whitelist,
isLoadingWhiteList
}: OneTimePasswordFormSectionProps) {
const { env } = useEnvContext();
const [whitelistEnabled, setWhitelistEnabled] = useState(
@@ -801,6 +809,18 @@ function OneTimePasswordFormSection({
number | null
>(null);
useEffect(() => {
if (isLoadingWhiteList) return;
whitelistForm.setValue(
"emails",
whitelist.map((w) => ({
id: w.email,
text: w.email
}))
);
}, [isLoadingWhiteList, whitelist, whitelistForm]);
async function saveWhitelist() {
try {
await api.post(`/resource/${resource.resourceId}`, {

View File

@@ -2,10 +2,9 @@ import { internal } from "@app/lib/api";
import { authCookieHeader } from "@app/lib/api/cookies";
import { ListSitesResponse } from "@server/routers/site";
import { AxiosResponse } from "axios";
import SitesTable, { SiteRow } from "../../../../components/SitesTable";
import SitesTable, { SiteRow } from "@app/components/SitesTable";
import SettingsSectionTitle from "@app/components/SettingsSectionTitle";
import SitesBanner from "@app/components/SitesBanner";
import SitesSplashCard from "../../../../components/SitesSplashCard";
import { getTranslations } from "next-intl/server";
type SitesPageProps = {

View File

@@ -11,6 +11,7 @@ import { AxiosResponse } from "axios";
import { authCookieHeader } from "@app/lib/api/cookies";
import { Layout } from "@app/components/Layout";
import { adminNavSections } from "../navigation";
import { pullEnv } from "@app/lib/pullEnv";
export const dynamic = "force-dynamic";
@@ -27,6 +28,8 @@ export default async function AdminLayout(props: LayoutProps) {
const getUser = cache(verifySession);
const user = await getUser();
const env = pullEnv();
if (!user || !user.serverAdmin) {
redirect(`/`);
}
@@ -48,7 +51,7 @@ export default async function AdminLayout(props: LayoutProps) {
return (
<UserProvider user={user}>
<Layout orgs={orgs} navItems={adminNavSections}>
<Layout orgs={orgs} navItems={adminNavSections(env)}>
{props.children}
</Layout>
</UserProvider>

View File

@@ -44,7 +44,7 @@ export default async function AuthLayout({ children }: AuthLayoutProps) {
return (
<div className="h-full flex flex-col">
<div className="flex justify-end items-center p-3 space-x-2">
<div className="hidden md:flex justify-end items-center p-3 space-x-2">
<ThemeSwitcher />
</div>
@@ -113,7 +113,7 @@ export default async function AuthLayout({ children }: AuthLayoutProps) {
aria-label="GitHub"
className="flex items-center space-x-2 whitespace-nowrap"
>
<span>{t("terms")}</span>
<span>{t("termsOfService")}</span>
</a>
<Separator orientation="vertical" />
<a
@@ -123,30 +123,10 @@ export default async function AuthLayout({ children }: AuthLayoutProps) {
aria-label="GitHub"
className="flex items-center space-x-2 whitespace-nowrap"
>
<span>{t("privacy")}</span>
<span>{t("privacyPolicy")}</span>
</a>
</>
)}
<Separator orientation="vertical" />
<a
href="https://docs.pangolin.net"
target="_blank"
rel="noopener noreferrer"
aria-label="Built by Fossorial"
className="flex items-center space-x-2 whitespace-nowrap"
>
<span>{t("docs")}</span>
</a>
<Separator orientation="vertical" />
<a
href="https://github.com/fosrl/pangolin"
target="_blank"
rel="noopener noreferrer"
aria-label="GitHub"
className="flex items-center space-x-2 whitespace-nowrap"
>
<span>{t("github")}</span>
</a>
</div>
</footer>
)}

View File

@@ -7,6 +7,7 @@ import { useLicenseStatusContext } from "@app/hooks/useLicenseStatusContext";
import { CheckCircle2 } from "lucide-react";
import { useTranslations } from "next-intl";
import Link from "next/link";
import { useEffect } from "react";
export default function DeviceAuthSuccessPage() {
const { env } = useEnvContext();
@@ -20,6 +21,32 @@ export default function DeviceAuthSuccessPage() {
? env.branding.logo?.authPage?.height || 58
: 58;
useEffect(() => {
// Detect if we're on iOS or Android
const userAgent = navigator.userAgent || navigator.vendor || (window as any).opera;
const isIOS = /iPad|iPhone|iPod/.test(userAgent) && !(window as any).MSStream;
const isAndroid = /android/i.test(userAgent);
if (isAndroid) {
// For Android Chrome Custom Tabs, use intent:// scheme which works more reliably
// This explicitly tells Chrome to send an intent to the app, which will bring
// SignInCodeActivity back to the foreground (it has launchMode="singleTop")
setTimeout(() => {
window.location.href = "intent://auth-success#Intent;scheme=pangolin;package=net.pangolin.Pangolin;end";
}, 500);
} else if (isIOS) {
// Wait 500ms then attempt to open the app
setTimeout(() => {
// Try to open the app using deep link
window.location.href = "pangolin://";
setTimeout(() => {
window.location.href = "https://apps.apple.com/app/pangolin/net.pangolin.Pangolin.PangoliniOS";
}, 2000);
}, 500);
}
}, []);
return (
<>
<Card>
@@ -55,4 +82,4 @@ export default function DeviceAuthSuccessPage() {
</p>
</>
);
}
}

View File

@@ -1,19 +1,22 @@
import { verifySession } from "@app/lib/auth/verifySession";
import Link from "next/link";
import { redirect } from "next/navigation";
import OrgSignInLink from "@app/components/OrgSignInLink";
import { cache } from "react";
import SmartLoginForm from "@app/components/SmartLoginForm";
import DashboardLoginForm from "@app/components/DashboardLoginForm";
import { Mail } from "lucide-react";
import { pullEnv } from "@app/lib/pullEnv";
import { cleanRedirect } from "@app/lib/cleanRedirect";
import { idp } from "@server/db";
import { LoginFormIDP } from "@app/components/LoginForm";
import { priv } from "@app/lib/api";
import { AxiosResponse } from "axios";
import { ListIdpsResponse } from "@server/routers/idp";
import { getTranslations } from "next-intl/server";
import { build } from "@server/build";
import { LoadLoginPageResponse } from "@server/routers/loginPage/types";
import { Card, CardContent } from "@app/components/ui/card";
import LoginCardHeader from "@app/components/LoginCardHeader";
import { priv } from "@app/lib/api";
import { AxiosResponse } from "axios";
import { LoginFormIDP } from "@app/components/LoginForm";
import { ListIdpsResponse } from "@server/routers/idp";
export const dynamic = "force-dynamic";
@@ -69,22 +72,57 @@ export default async function Page(props: {
searchParams.redirect = redirectUrl;
}
// Only use SmartLoginForm if NOT (OSS build OR org-only IdP enabled)
const useSmartLogin =
build === "saas" || (build === "enterprise" && env.flags.useOrgOnlyIdp);
let loginIdps: LoginFormIDP[] = [];
if (build !== "saas") {
const idpsRes = await cache(
async () => await priv.get<AxiosResponse<ListIdpsResponse>>("/idp")
)();
loginIdps = idpsRes.data.data.idps.map((idp) => ({
idpId: idp.idpId,
name: idp.name,
variant: idp.type
})) as LoginFormIDP[];
if (!useSmartLogin) {
// Load IdPs for DashboardLoginForm (OSS or org-only IdP mode)
if (build === "oss" || !env.flags.useOrgOnlyIdp) {
const idpsRes = await cache(
async () =>
await priv.get<AxiosResponse<ListIdpsResponse>>("/idp")
)();
loginIdps = idpsRes.data.data.idps.map((idp) => ({
idpId: idp.idpId,
name: idp.name,
variant: idp.type
})) as LoginFormIDP[];
}
}
const t = await getTranslations();
return (
<>
{build === "saas" && (
<p className="text-xs text-muted-foreground text-center mb-4">
{t.rich("loginLegalDisclaimer", {
termsOfService: (chunks) => (
<Link
href="https://pangolin.net/terms-of-service.html"
target="_blank"
rel="noopener noreferrer"
className="underline"
>
{chunks}
</Link>
),
privacyPolicy: (chunks) => (
<Link
href="https://pangolin.net/privacy-policy.html"
target="_blank"
rel="noopener noreferrer"
className="underline"
>
{chunks}
</Link>
)
})}
</p>
)}
{isInvite && (
<div className="border rounded-md p-3 mb-4 bg-card">
<div className="flex flex-col items-center">
@@ -99,11 +137,36 @@ export default async function Page(props: {
</div>
)}
<DashboardLoginForm
redirect={redirectUrl}
idps={loginIdps}
forceLogin={forceLogin}
/>
{useSmartLogin ? (
<>
<Card className="w-full max-w-md">
<LoginCardHeader
subtitle={
forceLogin
? t("loginRequiredForDevice")
: t("loginStart")
}
/>
<CardContent className="pt-6">
<SmartLoginForm
redirect={redirectUrl}
forceLogin={forceLogin}
/>
</CardContent>
</Card>
</>
) : (
<DashboardLoginForm
redirect={redirectUrl}
idps={loginIdps}
forceLogin={forceLogin}
showOrgLogin={
!isInvite &&
(build === "saas" || env.flags.useOrgOnlyIdp)
}
searchParams={searchParams}
/>
)}
{(!signUpDisabled || isInvite) && (
<p className="text-center text-muted-foreground mt-4">
@@ -121,16 +184,12 @@ export default async function Page(props: {
</p>
)}
{!isInvite && build === "saas" ? (
<div className="text-center text-muted-foreground mt-12 flex flex-col items-center">
<span>{t("needToSignInToOrg")}</span>
<Link
href={`/auth/org${buildQueryString(searchParams)}`}
className="underline"
>
{t("orgAuthSignInToOrg")}
</Link>
</div>
{!isInvite && (build === "saas" || env.flags.useOrgOnlyIdp) ? (
<OrgSignInLink
href={`/auth/org${buildQueryString(searchParams)}`}
linkText={t("orgAuthSignInToOrg")}
descriptionText={t("needToSignInToOrg")}
/>
) : null}
</>
);

Some files were not shown because too many files have changed in this diff Show More