mirror of
https://github.com/fosrl/pangolin.git
synced 2026-01-29 06:10:47 +00:00
Compare commits
5 Commits
1.15.0-s.4
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cb569ff14d | ||
|
|
37c4a7b690 | ||
|
|
b735e7c34d | ||
|
|
5f85c3b3b8 | ||
|
|
5d9cb9fa21 |
73
.github/workflows/cicd.yml
vendored
73
.github/workflows/cicd.yml
vendored
@@ -482,14 +482,77 @@ jobs:
|
||||
echo "==> cosign sign (key) --recursive ${REF}"
|
||||
cosign sign --key env://COSIGN_PRIVATE_KEY --recursive "${REF}"
|
||||
|
||||
# Retry wrapper for verification to handle registry propagation delays
|
||||
retry_verify() {
|
||||
local cmd="$1"
|
||||
local attempts=6
|
||||
local delay=5
|
||||
local i=1
|
||||
until eval "$cmd"; do
|
||||
if [ $i -ge $attempts ]; then
|
||||
echo "Verification failed after $attempts attempts"
|
||||
return 1
|
||||
fi
|
||||
echo "Verification not yet available. Retry $i/$attempts after ${delay}s..."
|
||||
sleep $delay
|
||||
i=$((i+1))
|
||||
delay=$((delay*2))
|
||||
# Cap the delay to avoid very long waits
|
||||
if [ $delay -gt 60 ]; then delay=60; fi
|
||||
done
|
||||
return 0
|
||||
}
|
||||
|
||||
echo "==> cosign verify (public key) ${REF}"
|
||||
cosign verify --key env://COSIGN_PUBLIC_KEY "${REF}" -o text
|
||||
if retry_verify "cosign verify --key env://COSIGN_PUBLIC_KEY '${REF}' -o text"; then
|
||||
VERIFIED_INDEX=true
|
||||
else
|
||||
VERIFIED_INDEX=false
|
||||
fi
|
||||
|
||||
echo "==> cosign verify (keyless policy) ${REF}"
|
||||
cosign verify \
|
||||
--certificate-oidc-issuer "${issuer}" \
|
||||
--certificate-identity-regexp "${id_regex}" \
|
||||
"${REF}" -o text
|
||||
if retry_verify "cosign verify --certificate-oidc-issuer '${issuer}' --certificate-identity-regexp '${id_regex}' '${REF}' -o text"; then
|
||||
VERIFIED_INDEX_KEYLESS=true
|
||||
else
|
||||
VERIFIED_INDEX_KEYLESS=false
|
||||
fi
|
||||
|
||||
# If index verification fails, attempt to verify child platform manifests
|
||||
if [ "${VERIFIED_INDEX}" != "true" ] || [ "${VERIFIED_INDEX_KEYLESS}" != "true" ]; then
|
||||
echo "Index verification not available; attempting child manifest verification for ${BASE_IMAGE}:${IMAGE_TAG}"
|
||||
CHILD_VERIFIED=false
|
||||
|
||||
for ARCH in arm64 amd64; do
|
||||
CHILD_TAG="${IMAGE_TAG}-${ARCH}"
|
||||
echo "Resolving child digest for ${BASE_IMAGE}:${CHILD_TAG}"
|
||||
CHILD_DIGEST="$(skopeo inspect --retry-times 3 docker://${BASE_IMAGE}:${CHILD_TAG} | jq -r '.Digest' || true)"
|
||||
if [ -n "${CHILD_DIGEST}" ] && [ "${CHILD_DIGEST}" != "null" ]; then
|
||||
CHILD_REF="${BASE_IMAGE}@${CHILD_DIGEST}"
|
||||
echo "==> cosign verify (public key) child ${CHILD_REF}"
|
||||
if retry_verify "cosign verify --key env://COSIGN_PUBLIC_KEY '${CHILD_REF}' -o text"; then
|
||||
CHILD_VERIFIED=true
|
||||
echo "Public key verification succeeded for child ${CHILD_REF}"
|
||||
else
|
||||
echo "Public key verification failed for child ${CHILD_REF}"
|
||||
fi
|
||||
|
||||
echo "==> cosign verify (keyless policy) child ${CHILD_REF}"
|
||||
if retry_verify "cosign verify --certificate-oidc-issuer '${issuer}' --certificate-identity-regexp '${id_regex}' '${CHILD_REF}' -o text"; then
|
||||
CHILD_VERIFIED=true
|
||||
echo "Keyless verification succeeded for child ${CHILD_REF}"
|
||||
else
|
||||
echo "Keyless verification failed for child ${CHILD_REF}"
|
||||
fi
|
||||
else
|
||||
echo "No child digest found for ${BASE_IMAGE}:${CHILD_TAG}; skipping"
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "${CHILD_VERIFIED}" != "true" ]; then
|
||||
echo "Failed to verify index and no child manifests verified for ${BASE_IMAGE}:${IMAGE_TAG}"
|
||||
exit 10
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "✓ Successfully signed and verified ${BASE_IMAGE}:${IMAGE_TAG}"
|
||||
done
|
||||
|
||||
@@ -78,7 +78,7 @@ export async function upsertLoginPageBranding(
|
||||
next: NextFunction
|
||||
): Promise<any> {
|
||||
try {
|
||||
const parsedBody = bodySchema.safeParse(req.body);
|
||||
const parsedBody = await bodySchema.safeParseAsync(req.body);
|
||||
if (!parsedBody.success) {
|
||||
return next(
|
||||
createHttpError(
|
||||
|
||||
@@ -9,9 +9,6 @@ import createHttpError from "http-errors";
|
||||
import logger from "@server/logger";
|
||||
import { fromError } from "zod-validation-error";
|
||||
import { OpenAPITags, registry } from "@server/openApi";
|
||||
import { rebuildClientAssociationsFromClient } from "@server/lib/rebuildClientAssociations";
|
||||
import { sendTerminateClient } from "./terminate";
|
||||
import { OlmErrorCodes } from "../olm/error";
|
||||
|
||||
const archiveClientSchema = z.strictObject({
|
||||
clientId: z.string().transform(Number).pipe(z.int().positive())
|
||||
@@ -77,9 +74,6 @@ export async function archiveClient(
|
||||
.update(clients)
|
||||
.set({ archived: true })
|
||||
.where(eq(clients.clientId, clientId));
|
||||
|
||||
// Rebuild associations to clean up related data
|
||||
await rebuildClientAssociationsFromClient(client, trx);
|
||||
});
|
||||
|
||||
return response(res, {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { NextFunction, Request, Response } from "express";
|
||||
import { db } from "@server/db";
|
||||
import { olms, clients } from "@server/db";
|
||||
import { olms } from "@server/db";
|
||||
import { eq } from "drizzle-orm";
|
||||
import HttpCode from "@server/types/HttpCode";
|
||||
import createHttpError from "http-errors";
|
||||
@@ -8,9 +8,6 @@ import response from "@server/lib/response";
|
||||
import { z } from "zod";
|
||||
import { fromError } from "zod-validation-error";
|
||||
import logger from "@server/logger";
|
||||
import { rebuildClientAssociationsFromClient } from "@server/lib/rebuildClientAssociations";
|
||||
import { sendTerminateClient } from "../client/terminate";
|
||||
import { OlmErrorCodes } from "./error";
|
||||
|
||||
const paramsSchema = z
|
||||
.object({
|
||||
@@ -37,26 +34,7 @@ export async function archiveUserOlm(
|
||||
|
||||
const { olmId } = parsedParams.data;
|
||||
|
||||
// Archive the OLM and disconnect associated clients in a transaction
|
||||
await db.transaction(async (trx) => {
|
||||
// Find all clients associated with this OLM
|
||||
const associatedClients = await trx
|
||||
.select()
|
||||
.from(clients)
|
||||
.where(eq(clients.olmId, olmId));
|
||||
|
||||
// Disconnect clients from the OLM (set olmId to null)
|
||||
for (const client of associatedClients) {
|
||||
await trx
|
||||
.update(clients)
|
||||
.set({ olmId: null })
|
||||
.where(eq(clients.clientId, client.clientId));
|
||||
|
||||
await rebuildClientAssociationsFromClient(client, trx);
|
||||
await sendTerminateClient(client.clientId, OlmErrorCodes.TERMINATED_ARCHIVED, olmId);
|
||||
}
|
||||
|
||||
// Archive the OLM (set archived to true)
|
||||
await trx
|
||||
.update(olms)
|
||||
.set({ archived: true })
|
||||
|
||||
@@ -64,16 +64,20 @@ export async function ensureSetupToken() {
|
||||
);
|
||||
}
|
||||
|
||||
if (existingToken?.token !== envSetupToken) {
|
||||
console.warn(
|
||||
"Overwriting existing token in DB since PANGOLIN_SETUP_TOKEN is set"
|
||||
);
|
||||
if (existingToken) {
|
||||
// Token exists in DB - update it if different
|
||||
if (existingToken.token !== envSetupToken) {
|
||||
console.warn(
|
||||
"Overwriting existing token in DB since PANGOLIN_SETUP_TOKEN is set"
|
||||
);
|
||||
|
||||
await db
|
||||
.update(setupTokens)
|
||||
.set({ token: envSetupToken })
|
||||
.where(eq(setupTokens.tokenId, existingToken.tokenId));
|
||||
await db
|
||||
.update(setupTokens)
|
||||
.set({ token: envSetupToken })
|
||||
.where(eq(setupTokens.tokenId, existingToken.tokenId));
|
||||
}
|
||||
} else {
|
||||
// No existing token - insert new one
|
||||
const tokenId = generateId(15);
|
||||
|
||||
await db.insert(setupTokens).values({
|
||||
|
||||
Reference in New Issue
Block a user