Compare commits

..

1 Commits
main ... k8s

Author SHA1 Message Date
Owen
f2ba4b270f Dont write stripe to files anymore 2026-01-29 20:56:46 -08:00
9 changed files with 94 additions and 410 deletions

View File

@@ -482,77 +482,14 @@ jobs:
echo "==> cosign sign (key) --recursive ${REF}" echo "==> cosign sign (key) --recursive ${REF}"
cosign sign --key env://COSIGN_PRIVATE_KEY --recursive "${REF}" cosign sign --key env://COSIGN_PRIVATE_KEY --recursive "${REF}"
# Retry wrapper for verification to handle registry propagation delays
retry_verify() {
local cmd="$1"
local attempts=6
local delay=5
local i=1
until eval "$cmd"; do
if [ $i -ge $attempts ]; then
echo "Verification failed after $attempts attempts"
return 1
fi
echo "Verification not yet available. Retry $i/$attempts after ${delay}s..."
sleep $delay
i=$((i+1))
delay=$((delay*2))
# Cap the delay to avoid very long waits
if [ $delay -gt 60 ]; then delay=60; fi
done
return 0
}
echo "==> cosign verify (public key) ${REF}" echo "==> cosign verify (public key) ${REF}"
if retry_verify "cosign verify --key env://COSIGN_PUBLIC_KEY '${REF}' -o text"; then cosign verify --key env://COSIGN_PUBLIC_KEY "${REF}" -o text
VERIFIED_INDEX=true
else
VERIFIED_INDEX=false
fi
echo "==> cosign verify (keyless policy) ${REF}" echo "==> cosign verify (keyless policy) ${REF}"
if retry_verify "cosign verify --certificate-oidc-issuer '${issuer}' --certificate-identity-regexp '${id_regex}' '${REF}' -o text"; then cosign verify \
VERIFIED_INDEX_KEYLESS=true --certificate-oidc-issuer "${issuer}" \
else --certificate-identity-regexp "${id_regex}" \
VERIFIED_INDEX_KEYLESS=false "${REF}" -o text
fi
# If index verification fails, attempt to verify child platform manifests
if [ "${VERIFIED_INDEX}" != "true" ] || [ "${VERIFIED_INDEX_KEYLESS}" != "true" ]; then
echo "Index verification not available; attempting child manifest verification for ${BASE_IMAGE}:${IMAGE_TAG}"
CHILD_VERIFIED=false
for ARCH in arm64 amd64; do
CHILD_TAG="${IMAGE_TAG}-${ARCH}"
echo "Resolving child digest for ${BASE_IMAGE}:${CHILD_TAG}"
CHILD_DIGEST="$(skopeo inspect --retry-times 3 docker://${BASE_IMAGE}:${CHILD_TAG} | jq -r '.Digest' || true)"
if [ -n "${CHILD_DIGEST}" ] && [ "${CHILD_DIGEST}" != "null" ]; then
CHILD_REF="${BASE_IMAGE}@${CHILD_DIGEST}"
echo "==> cosign verify (public key) child ${CHILD_REF}"
if retry_verify "cosign verify --key env://COSIGN_PUBLIC_KEY '${CHILD_REF}' -o text"; then
CHILD_VERIFIED=true
echo "Public key verification succeeded for child ${CHILD_REF}"
else
echo "Public key verification failed for child ${CHILD_REF}"
fi
echo "==> cosign verify (keyless policy) child ${CHILD_REF}"
if retry_verify "cosign verify --certificate-oidc-issuer '${issuer}' --certificate-identity-regexp '${id_regex}' '${CHILD_REF}' -o text"; then
CHILD_VERIFIED=true
echo "Keyless verification succeeded for child ${CHILD_REF}"
else
echo "Keyless verification failed for child ${CHILD_REF}"
fi
else
echo "No child digest found for ${BASE_IMAGE}:${CHILD_TAG}; skipping"
fi
done
if [ "${CHILD_VERIFIED}" != "true" ]; then
echo "Failed to verify index and no child manifests verified for ${BASE_IMAGE}:${IMAGE_TAG}"
exit 10
fi
fi
echo "✓ Successfully signed and verified ${BASE_IMAGE}:${IMAGE_TAG}" echo "✓ Successfully signed and verified ${BASE_IMAGE}:${IMAGE_TAG}"
done done

View File

@@ -97,7 +97,7 @@
"siteGeneralDescription": "Allgemeine Einstellungen für diesen Standort konfigurieren", "siteGeneralDescription": "Allgemeine Einstellungen für diesen Standort konfigurieren",
"siteSettingDescription": "Standorteinstellungen konfigurieren", "siteSettingDescription": "Standorteinstellungen konfigurieren",
"siteSetting": "{siteName} Einstellungen", "siteSetting": "{siteName} Einstellungen",
"siteNewtTunnel": "Newt Standort (empfohlen)", "siteNewtTunnel": "Neuer Standort (empfohlen)",
"siteNewtTunnelDescription": "Einfachster Weg, einen Einstiegspunkt in jedes Netzwerk zu erstellen. Keine zusätzliche Einrichtung.", "siteNewtTunnelDescription": "Einfachster Weg, einen Einstiegspunkt in jedes Netzwerk zu erstellen. Keine zusätzliche Einrichtung.",
"siteWg": "Einfacher WireGuard Tunnel", "siteWg": "Einfacher WireGuard Tunnel",
"siteWgDescription": "Verwende jeden WireGuard-Client, um einen Tunnel einzurichten. Manuelles NAT-Setup erforderlich.", "siteWgDescription": "Verwende jeden WireGuard-Client, um einen Tunnel einzurichten. Manuelles NAT-Setup erforderlich.",
@@ -107,7 +107,7 @@
"siteSeeAll": "Alle Standorte anzeigen", "siteSeeAll": "Alle Standorte anzeigen",
"siteTunnelDescription": "Legen Sie fest, wie Sie sich mit dem Standort verbinden möchten", "siteTunnelDescription": "Legen Sie fest, wie Sie sich mit dem Standort verbinden möchten",
"siteNewtCredentials": "Zugangsdaten", "siteNewtCredentials": "Zugangsdaten",
"siteNewtCredentialsDescription": "So wird sich der Standort mit dem Server authentifizieren", "siteNewtCredentialsDescription": "So wird sich die Seite mit dem Server authentifizieren",
"remoteNodeCredentialsDescription": "So wird sich der entfernte Node mit dem Server authentifizieren", "remoteNodeCredentialsDescription": "So wird sich der entfernte Node mit dem Server authentifizieren",
"siteCredentialsSave": "Anmeldedaten speichern", "siteCredentialsSave": "Anmeldedaten speichern",
"siteCredentialsSaveDescription": "Du kannst das nur einmal sehen. Stelle sicher, dass du es an einen sicheren Ort kopierst.", "siteCredentialsSaveDescription": "Du kannst das nur einmal sehen. Stelle sicher, dass du es an einen sicheren Ort kopierst.",
@@ -2503,7 +2503,7 @@
"deviceModel": "Gerätemodell", "deviceModel": "Gerätemodell",
"serialNumber": "Seriennummer", "serialNumber": "Seriennummer",
"hostname": "Hostname", "hostname": "Hostname",
"firstSeen": "Zuerst gesehen", "firstSeen": "Erster Blick",
"lastSeen": "Zuletzt gesehen", "lastSeen": "Zuletzt gesehen",
"biometricsEnabled": "Biometrie aktiviert", "biometricsEnabled": "Biometrie aktiviert",
"diskEncrypted": "Festplatte verschlüsselt", "diskEncrypted": "Festplatte verschlüsselt",

View File

@@ -105,11 +105,13 @@ function getOpenApiDocumentation() {
servers: [{ url: "/v1" }] servers: [{ url: "/v1" }]
}); });
// convert to yaml and save to file if (!process.env.DISABLE_GEN_OPENAPI) {
const outputPath = path.join(APP_PATH, "openapi.yaml"); // convert to yaml and save to file
const yamlOutput = yaml.dump(generated); const outputPath = path.join(APP_PATH, "openapi.yaml");
fs.writeFileSync(outputPath, yamlOutput, "utf8"); const yamlOutput = yaml.dump(generated);
logger.info(`OpenAPI documentation saved to ${outputPath}`); fs.writeFileSync(outputPath, yamlOutput, "utf8");
logger.info(`OpenAPI documentation saved to ${outputPath}`);
}
return generated; return generated;
} }

View File

@@ -1,8 +1,6 @@
import { eq, sql, and } from "drizzle-orm"; import { eq, sql, and } from "drizzle-orm";
import { v4 as uuidv4 } from "uuid"; import { v4 as uuidv4 } from "uuid";
import { PutObjectCommand } from "@aws-sdk/client-s3"; import { PutObjectCommand } from "@aws-sdk/client-s3";
import * as fs from "fs/promises";
import * as path from "path";
import { import {
db, db,
usage, usage,
@@ -34,8 +32,7 @@ interface StripeEvent {
export function noop() { export function noop() {
if ( if (
build !== "saas" || build !== "saas" ||
!process.env.S3_BUCKET || !process.env.S3_BUCKET
!process.env.LOCAL_FILE_PATH
) { ) {
return true; return true;
} }
@@ -44,31 +41,37 @@ export function noop() {
export class UsageService { export class UsageService {
private bucketName: string | undefined; private bucketName: string | undefined;
private currentEventFile: string | null = null; private events: StripeEvent[] = [];
private currentFileStartTime: number = 0; private lastUploadTime: number = Date.now();
private eventsDir: string | undefined; private isUploading: boolean = false;
private uploadingFiles: Set<string> = new Set();
constructor() { constructor() {
if (noop()) { if (noop()) {
return; return;
} }
// this.bucketName = privateConfig.getRawPrivateConfig().stripe?.s3Bucket;
// this.eventsDir = privateConfig.getRawPrivateConfig().stripe?.localFilePath;
this.bucketName = process.env.S3_BUCKET || undefined; this.bucketName = process.env.S3_BUCKET || undefined;
this.eventsDir = process.env.LOCAL_FILE_PATH || undefined;
// Ensure events directory exists // Periodically check and upload events
this.initializeEventsDirectory().then(() => {
this.uploadPendingEventFilesOnStartup();
});
// Periodically check for old event files to upload
setInterval(() => { setInterval(() => {
this.uploadOldEventFiles().catch((err) => { this.checkAndUploadEvents().catch((err) => {
logger.error("Error in periodic event file upload:", err); logger.error("Error in periodic event upload:", err);
}); });
}, 30000); // every 30 seconds }, 30000); // every 30 seconds
// Handle graceful shutdown on SIGTERM
process.on("SIGTERM", async () => {
logger.info("SIGTERM received, uploading events before shutdown...");
await this.forceUpload();
logger.info("Events uploaded, proceeding with shutdown");
});
// Handle SIGINT as well (Ctrl+C)
process.on("SIGINT", async () => {
logger.info("SIGINT received, uploading events before shutdown...");
await this.forceUpload();
logger.info("Events uploaded, proceeding with shutdown");
process.exit(0);
});
} }
/** /**
@@ -78,85 +81,6 @@ export class UsageService {
return Math.round(value * 100000000000) / 100000000000; // 11 decimal places return Math.round(value * 100000000000) / 100000000000; // 11 decimal places
} }
private async initializeEventsDirectory(): Promise<void> {
if (!this.eventsDir) {
logger.warn(
"Stripe local file path is not configured, skipping events directory initialization."
);
return;
}
try {
await fs.mkdir(this.eventsDir, { recursive: true });
} catch (error) {
logger.error("Failed to create events directory:", error);
}
}
private async uploadPendingEventFilesOnStartup(): Promise<void> {
if (!this.eventsDir || !this.bucketName) {
logger.warn(
"Stripe local file path or bucket name is not configured, skipping leftover event file upload."
);
return;
}
try {
const files = await fs.readdir(this.eventsDir);
for (const file of files) {
if (file.endsWith(".json")) {
const filePath = path.join(this.eventsDir, file);
try {
const fileContent = await fs.readFile(
filePath,
"utf-8"
);
const events = JSON.parse(fileContent);
if (Array.isArray(events) && events.length > 0) {
// Upload to S3
const uploadCommand = new PutObjectCommand({
Bucket: this.bucketName,
Key: file,
Body: fileContent,
ContentType: "application/json"
});
await s3Client.send(uploadCommand);
// Check if file still exists before unlinking
try {
await fs.access(filePath);
await fs.unlink(filePath);
} catch (unlinkError) {
logger.debug(
`Startup file ${file} was already deleted`
);
}
logger.info(
`Uploaded leftover event file ${file} to S3 with ${events.length} events`
);
} else {
// Remove empty file
try {
await fs.access(filePath);
await fs.unlink(filePath);
} catch (unlinkError) {
logger.debug(
`Empty startup file ${file} was already deleted`
);
}
}
} catch (err) {
logger.error(
`Error processing leftover event file ${file}:`,
err
);
}
}
}
} catch (error) {
logger.error("Failed to scan for leftover event files");
}
}
public async add( public async add(
orgId: string, orgId: string,
featureId: FeatureId, featureId: FeatureId,
@@ -450,121 +374,58 @@ export class UsageService {
} }
}; };
await this.writeEventToFile(event); this.addEventToMemory(event);
await this.checkAndUploadFile(); await this.checkAndUploadEvents();
} }
private async writeEventToFile(event: StripeEvent): Promise<void> { private addEventToMemory(event: StripeEvent): void {
if (!this.eventsDir || !this.bucketName) { if (!this.bucketName) {
logger.warn( logger.warn(
"Stripe local file path or bucket name is not configured, skipping event file write." "S3 bucket name is not configured, skipping event storage."
); );
return; return;
} }
if (!this.currentEventFile) { this.events.push(event);
this.currentEventFile = this.generateEventFileName();
this.currentFileStartTime = Date.now();
}
const filePath = path.join(this.eventsDir, this.currentEventFile);
try {
let events: StripeEvent[] = [];
// Try to read existing file
try {
const fileContent = await fs.readFile(filePath, "utf-8");
events = JSON.parse(fileContent);
} catch (error) {
// File doesn't exist or is empty, start with empty array
events = [];
}
// Add new event
events.push(event);
// Write back to file
await fs.writeFile(filePath, JSON.stringify(events, null, 2));
} catch (error) {
logger.error("Failed to write event to file:", error);
}
} }
private async checkAndUploadFile(): Promise<void> { private async checkAndUploadEvents(): Promise<void> {
if (!this.currentEventFile) {
return;
}
const now = Date.now(); const now = Date.now();
const fileAge = now - this.currentFileStartTime; const timeSinceLastUpload = now - this.lastUploadTime;
// Check if file is at least 1 minute old // Check if at least 1 minute has passed since last upload
if (fileAge >= 60000) { if (timeSinceLastUpload >= 60000 && this.events.length > 0) {
// 60 seconds await this.uploadEventsToS3();
await this.uploadFileToS3();
} }
} }
private async uploadFileToS3(): Promise<void> { private async uploadEventsToS3(): Promise<void> {
if (!this.bucketName || !this.eventsDir) { if (!this.bucketName) {
logger.warn( logger.warn(
"Stripe local file path or bucket name is not configured, skipping S3 upload." "S3 bucket name is not configured, skipping S3 upload."
);
return;
}
if (!this.currentEventFile) {
return;
}
const fileName = this.currentEventFile;
const filePath = path.join(this.eventsDir, fileName);
// Check if this file is already being uploaded
if (this.uploadingFiles.has(fileName)) {
logger.debug(
`File ${fileName} is already being uploaded, skipping`
); );
return; return;
} }
// Mark file as being uploaded if (this.events.length === 0) {
this.uploadingFiles.add(fileName); return;
}
// Check if already uploading
if (this.isUploading) {
logger.debug("Already uploading events, skipping");
return;
}
this.isUploading = true;
try { try {
// Check if file exists before trying to read it // Take a snapshot of current events and clear the array
try { const eventsToUpload = [...this.events];
await fs.access(filePath); this.events = [];
} catch (error) { this.lastUploadTime = Date.now();
logger.debug(
`File ${fileName} does not exist, may have been already processed`
);
this.uploadingFiles.delete(fileName);
// Reset current file if it was this file
if (this.currentEventFile === fileName) {
this.currentEventFile = null;
this.currentFileStartTime = 0;
}
return;
}
// Check if file exists and has content const fileName = this.generateEventFileName();
const fileContent = await fs.readFile(filePath, "utf-8"); const fileContent = JSON.stringify(eventsToUpload, null, 2);
const events = JSON.parse(fileContent);
if (events.length === 0) {
// No events to upload, just clean up
try {
await fs.unlink(filePath);
} catch (unlinkError) {
// File may have been already deleted
logger.debug(
`File ${fileName} was already deleted during cleanup`
);
}
this.currentEventFile = null;
this.uploadingFiles.delete(fileName);
return;
}
// Upload to S3 // Upload to S3
const uploadCommand = new PutObjectCommand({ const uploadCommand = new PutObjectCommand({
@@ -576,29 +437,15 @@ export class UsageService {
await s3Client.send(uploadCommand); await s3Client.send(uploadCommand);
// Clean up local file - check if it still exists before unlinking
try {
await fs.access(filePath);
await fs.unlink(filePath);
} catch (unlinkError) {
// File may have been already deleted by another process
logger.debug(
`File ${fileName} was already deleted during upload`
);
}
logger.info( logger.info(
`Uploaded ${fileName} to S3 with ${events.length} events` `Uploaded ${fileName} to S3 with ${eventsToUpload.length} events`
); );
// Reset for next file
this.currentEventFile = null;
this.currentFileStartTime = 0;
} catch (error) { } catch (error) {
logger.error(`Failed to upload ${fileName} to S3:`, error); logger.error("Failed to upload events to S3:", error);
// Note: Events are lost if upload fails. In a production system,
// you might want to add the events back to the array or implement retry logic
} finally { } finally {
// Always remove from uploading set this.isUploading = false;
this.uploadingFiles.delete(fileName);
} }
} }
@@ -695,111 +542,10 @@ export class UsageService {
} }
public async forceUpload(): Promise<void> { public async forceUpload(): Promise<void> {
await this.uploadFileToS3(); if (this.events.length > 0) {
} // Force upload regardless of time
this.lastUploadTime = 0; // Reset to force upload
/** await this.uploadEventsToS3();
* Scan the events directory for files older than 1 minute and upload them if not empty.
*/
private async uploadOldEventFiles(): Promise<void> {
if (!this.eventsDir || !this.bucketName) {
logger.warn(
"Stripe local file path or bucket name is not configured, skipping old event file upload."
);
return;
}
try {
const files = await fs.readdir(this.eventsDir);
const now = Date.now();
for (const file of files) {
if (!file.endsWith(".json")) continue;
// Skip files that are already being uploaded
if (this.uploadingFiles.has(file)) {
logger.debug(
`Skipping file ${file} as it's already being uploaded`
);
continue;
}
const filePath = path.join(this.eventsDir, file);
try {
// Check if file still exists before processing
try {
await fs.access(filePath);
} catch (accessError) {
logger.debug(`File ${file} does not exist, skipping`);
continue;
}
const stat = await fs.stat(filePath);
const age = now - stat.mtimeMs;
if (age >= 90000) {
// 1.5 minutes - Mark as being uploaded
this.uploadingFiles.add(file);
try {
const fileContent = await fs.readFile(
filePath,
"utf-8"
);
const events = JSON.parse(fileContent);
if (Array.isArray(events) && events.length > 0) {
// Upload to S3
const uploadCommand = new PutObjectCommand({
Bucket: this.bucketName,
Key: file,
Body: fileContent,
ContentType: "application/json"
});
await s3Client.send(uploadCommand);
// Check if file still exists before unlinking
try {
await fs.access(filePath);
await fs.unlink(filePath);
} catch (unlinkError) {
logger.debug(
`File ${file} was already deleted during interval upload`
);
}
logger.info(
`Interval: Uploaded event file ${file} to S3 with ${events.length} events`
);
// If this was the current event file, reset it
if (this.currentEventFile === file) {
this.currentEventFile = null;
this.currentFileStartTime = 0;
}
} else {
// Remove empty file
try {
await fs.access(filePath);
await fs.unlink(filePath);
} catch (unlinkError) {
logger.debug(
`Empty file ${file} was already deleted`
);
}
}
} finally {
// Always remove from uploading set
this.uploadingFiles.delete(file);
}
}
} catch (err) {
logger.error(
`Interval: Error processing event file ${file}:`,
err
);
// Remove from uploading set on error
this.uploadingFiles.delete(file);
}
}
} catch (err) {
logger.error("Interval: Failed to scan for event files:", err);
} }
} }

View File

@@ -128,10 +128,7 @@ export class PrivateConfig {
if (this.rawPrivateConfig.stripe?.s3Bucket) { if (this.rawPrivateConfig.stripe?.s3Bucket) {
process.env.S3_BUCKET = this.rawPrivateConfig.stripe.s3Bucket; process.env.S3_BUCKET = this.rawPrivateConfig.stripe.s3Bucket;
} }
if (this.rawPrivateConfig.stripe?.localFilePath) {
process.env.LOCAL_FILE_PATH =
this.rawPrivateConfig.stripe.localFilePath;
}
if (this.rawPrivateConfig.stripe?.s3Region) { if (this.rawPrivateConfig.stripe?.s3Region) {
process.env.S3_REGION = this.rawPrivateConfig.stripe.s3Region; process.env.S3_REGION = this.rawPrivateConfig.stripe.s3Region;
} }

View File

@@ -161,7 +161,7 @@ export const privateConfigSchema = z.object({
webhook_secret: z.string(), webhook_secret: z.string(),
s3Bucket: z.string(), s3Bucket: z.string(),
s3Region: z.string().default("us-east-1"), s3Region: z.string().default("us-east-1"),
localFilePath: z.string() localFilePath: z.string().optional()
}) })
.optional() .optional()
}); });

View File

@@ -78,7 +78,7 @@ export async function upsertLoginPageBranding(
next: NextFunction next: NextFunction
): Promise<any> { ): Promise<any> {
try { try {
const parsedBody = await bodySchema.safeParseAsync(req.body); const parsedBody = bodySchema.safeParse(req.body);
if (!parsedBody.success) { if (!parsedBody.success) {
return next( return next(
createHttpError( createHttpError(

View File

@@ -9,6 +9,9 @@ import createHttpError from "http-errors";
import logger from "@server/logger"; import logger from "@server/logger";
import { fromError } from "zod-validation-error"; import { fromError } from "zod-validation-error";
import { OpenAPITags, registry } from "@server/openApi"; import { OpenAPITags, registry } from "@server/openApi";
import { rebuildClientAssociationsFromClient } from "@server/lib/rebuildClientAssociations";
import { sendTerminateClient } from "./terminate";
import { OlmErrorCodes } from "../olm/error";
const archiveClientSchema = z.strictObject({ const archiveClientSchema = z.strictObject({
clientId: z.string().transform(Number).pipe(z.int().positive()) clientId: z.string().transform(Number).pipe(z.int().positive())
@@ -74,6 +77,9 @@ export async function archiveClient(
.update(clients) .update(clients)
.set({ archived: true }) .set({ archived: true })
.where(eq(clients.clientId, clientId)); .where(eq(clients.clientId, clientId));
// Rebuild associations to clean up related data
await rebuildClientAssociationsFromClient(client, trx);
}); });
return response(res, { return response(res, {

View File

@@ -64,20 +64,16 @@ export async function ensureSetupToken() {
); );
} }
if (existingToken) { if (existingToken?.token !== envSetupToken) {
// Token exists in DB - update it if different console.warn(
if (existingToken.token !== envSetupToken) { "Overwriting existing token in DB since PANGOLIN_SETUP_TOKEN is set"
console.warn( );
"Overwriting existing token in DB since PANGOLIN_SETUP_TOKEN is set"
);
await db await db
.update(setupTokens) .update(setupTokens)
.set({ token: envSetupToken }) .set({ token: envSetupToken })
.where(eq(setupTokens.tokenId, existingToken.tokenId)); .where(eq(setupTokens.tokenId, existingToken.tokenId));
}
} else { } else {
// No existing token - insert new one
const tokenId = generateId(15); const tokenId = generateId(15);
await db.insert(setupTokens).values({ await db.insert(setupTokens).values({