Merge branch 'dev' of https://github.com/fosrl/pangolin into dev

This commit is contained in:
miloschwartz
2025-12-18 16:13:59 -05:00
7 changed files with 122 additions and 57 deletions

View File

@@ -31,7 +31,7 @@
[![Slack](https://img.shields.io/badge/chat-slack-yellow?style=flat-square&logo=slack)](https://pangolin.net/slack)
[![Docker](https://img.shields.io/docker/pulls/fosrl/pangolin?style=flat-square)](https://hub.docker.com/r/fosrl/pangolin)
![Stars](https://img.shields.io/github/stars/fosrl/pangolin?style=flat-square)
[![YouTube](https://img.shields.io/badge/YouTube-red?logo=youtube&logoColor=white&style=flat-square)](https://www.youtube.com/@fossorial-app)
[![YouTube](https://img.shields.io/badge/YouTube-red?logo=youtube&logoColor=white&style=flat-square)](https://www.youtube.com/@pangolin-net)
</div>

View File

@@ -9,10 +9,15 @@ services:
PARSERS: crowdsecurity/whitelists
ENROLL_TAGS: docker
healthcheck:
interval: 10s
retries: 15
timeout: 10s
test: ["CMD", "cscli", "capi", "status"]
test:
- CMD
- cscli
- lapi
- status
interval: 10s
timeout: 5s
retries: 3
start_period: 30s
labels:
- "traefik.enable=false" # Disable traefik for crowdsec
volumes:

View File

@@ -44,7 +44,7 @@ http:
crowdsecAppsecUnreachableBlock: true # Block on unreachable
crowdsecAppsecBodyLimit: 10485760
crowdsecLapiKey: "PUT_YOUR_BOUNCER_KEY_HERE_OR_IT_WILL_NOT_WORK" # CrowdSec API key which you noted down later
crowdsecLapiHost: crowdsec:8080 # CrowdSec
crowdsecLapiHost: crowdsec:8080 # CrowdSec
crowdsecLapiScheme: http # CrowdSec API scheme
forwardedHeadersTrustedIPs: # Forwarded headers trusted IPs
- "0.0.0.0/0" # All IP addresses are trusted for forwarded headers (CHANGE MADE HERE)
@@ -106,4 +106,13 @@ http:
api-service:
loadBalancer:
servers:
- url: "http://pangolin:3000" # API/WebSocket server
- url: "http://pangolin:3000" # API/WebSocket server
tcp:
serversTransports:
pp-transport-v1:
proxyProtocol:
version: 1
pp-transport-v2:
proxyProtocol:
version: 2

View File

@@ -20,6 +20,7 @@ function createDb() {
export const db = createDb();
export default db;
export const driver: "pg" | "sqlite" = "sqlite";
export type Transaction = Parameters<
Parameters<(typeof db)["transaction"]>[0]
>[0];

View File

@@ -12,6 +12,11 @@ import response from "@server/lib/response";
import logger from "@server/logger";
import { getSevenDaysAgo } from "@app/lib/getSevenDaysAgo";
let primaryDb = db;
if (driver == "pg") {
primaryDb = db.$primary as typeof db; // select the primary instance in a replicated setup
}
const queryAccessAuditLogsQuery = z.object({
// iso string just validate its a parseable date
timeStart: z
@@ -74,12 +79,12 @@ async function query(query: Q) {
);
}
const [all] = await db
const [all] = await primaryDb
.select({ total: count() })
.from(requestAuditLog)
.where(baseConditions);
const [blocked] = await db
const [blocked] = await primaryDb
.select({ total: count() })
.from(requestAuditLog)
.where(and(baseConditions, eq(requestAuditLog.action, false)));
@@ -88,7 +93,9 @@ async function query(query: Q) {
.mapWith(Number)
.as("total");
const requestsPerCountry = await db
const DISTINCT_LIMIT = 500;
const requestsPerCountry = await primaryDb
.selectDistinct({
code: requestAuditLog.location,
count: totalQ
@@ -96,7 +103,16 @@ async function query(query: Q) {
.from(requestAuditLog)
.where(and(baseConditions, not(isNull(requestAuditLog.location))))
.groupBy(requestAuditLog.location)
.orderBy(desc(totalQ));
.orderBy(desc(totalQ))
.limit(DISTINCT_LIMIT+1);
if (requestsPerCountry.length > DISTINCT_LIMIT) {
// throw an error
throw createHttpError(
HttpCode.BAD_REQUEST,
`Too many distinct countries. Please narrow your query.`
);
}
const groupByDayFunction =
driver === "pg"
@@ -106,7 +122,7 @@ async function query(query: Q) {
const booleanTrue = driver === "pg" ? sql`true` : sql`1`;
const booleanFalse = driver === "pg" ? sql`false` : sql`0`;
const requestsPerDay = await db
const requestsPerDay = await primaryDb
.select({
day: groupByDayFunction.as("day"),
allowedCount:

View File

@@ -1,4 +1,4 @@
import { db, requestAuditLog, resources } from "@server/db";
import { db, driver, requestAuditLog, resources } from "@server/db";
import { registry } from "@server/openApi";
import { NextFunction } from "express";
import { Request, Response } from "express";
@@ -13,6 +13,11 @@ import response from "@server/lib/response";
import logger from "@server/logger";
import { getSevenDaysAgo } from "@app/lib/getSevenDaysAgo";
let primaryDb = db;
if (driver == "pg") {
primaryDb = db.$primary as typeof db; // select the primary instance in a replicated setup
}
export const queryAccessAuditLogsQuery = z.object({
// iso string just validate its a parseable date
timeStart: z
@@ -107,7 +112,7 @@ function getWhere(data: Q) {
}
export function queryRequest(data: Q) {
return db
return primaryDb
.select({
id: requestAuditLog.id,
timestamp: requestAuditLog.timestamp,
@@ -143,7 +148,7 @@ export function queryRequest(data: Q) {
}
export function countRequestQuery(data: Q) {
const countQuery = db
const countQuery = primaryDb
.select({ count: count() })
.from(requestAuditLog)
.where(getWhere(data));
@@ -173,50 +178,61 @@ async function queryUniqueFilterAttributes(
eq(requestAuditLog.orgId, orgId)
);
// Get unique actors
const uniqueActors = await db
.selectDistinct({
actor: requestAuditLog.actor
})
.from(requestAuditLog)
.where(baseConditions);
const DISTINCT_LIMIT = 500;
// Get unique locations
const uniqueLocations = await db
.selectDistinct({
locations: requestAuditLog.location
})
.from(requestAuditLog)
.where(baseConditions);
// TODO: SOMEONE PLEASE OPTIMIZE THIS!!!!!
// Get unique actors
const uniqueHosts = await db
.selectDistinct({
hosts: requestAuditLog.host
})
.from(requestAuditLog)
.where(baseConditions);
// Run all queries in parallel
const [
uniqueActors,
uniqueLocations,
uniqueHosts,
uniquePaths,
uniqueResources
] = await Promise.all([
primaryDb
.selectDistinct({ actor: requestAuditLog.actor })
.from(requestAuditLog)
.where(baseConditions)
.limit(DISTINCT_LIMIT+1),
primaryDb
.selectDistinct({ locations: requestAuditLog.location })
.from(requestAuditLog)
.where(baseConditions)
.limit(DISTINCT_LIMIT+1),
primaryDb
.selectDistinct({ hosts: requestAuditLog.host })
.from(requestAuditLog)
.where(baseConditions)
.limit(DISTINCT_LIMIT+1),
primaryDb
.selectDistinct({ paths: requestAuditLog.path })
.from(requestAuditLog)
.where(baseConditions)
.limit(DISTINCT_LIMIT+1),
primaryDb
.selectDistinct({
id: requestAuditLog.resourceId,
name: resources.name
})
.from(requestAuditLog)
.leftJoin(
resources,
eq(requestAuditLog.resourceId, resources.resourceId)
)
.where(baseConditions)
.limit(DISTINCT_LIMIT+1)
]);
// Get unique actors
const uniquePaths = await db
.selectDistinct({
paths: requestAuditLog.path
})
.from(requestAuditLog)
.where(baseConditions);
// Get unique resources with names
const uniqueResources = await db
.selectDistinct({
id: requestAuditLog.resourceId,
name: resources.name
})
.from(requestAuditLog)
.leftJoin(
resources,
eq(requestAuditLog.resourceId, resources.resourceId)
)
.where(baseConditions);
if (
uniqueActors.length > DISTINCT_LIMIT ||
uniqueLocations.length > DISTINCT_LIMIT ||
uniqueHosts.length > DISTINCT_LIMIT ||
uniquePaths.length > DISTINCT_LIMIT ||
uniqueResources.length > DISTINCT_LIMIT
) {
throw new Error("Too many distinct filter attributes to retrieve. Please refine your time range.");
}
return {
actors: uniqueActors
@@ -295,6 +311,12 @@ export async function queryRequestAuditLogs(
});
} catch (error) {
logger.error(error);
// if the message is "Too many distinct filter attributes to retrieve. Please refine your time range.", return a 400 and the message
if (error instanceof Error && error.message === "Too many distinct filter attributes to retrieve. Please refine your time range.") {
return next(
createHttpError(HttpCode.BAD_REQUEST, error.message)
);
}
return next(
createHttpError(HttpCode.INTERNAL_SERVER_ERROR, "An error occurred")
);

View File

@@ -194,11 +194,23 @@ export async function getOlmToken(
.where(inArray(exitNodes.exitNodeId, exitNodeIds));
}
// Map exitNodeId to siteIds
const exitNodeIdToSiteIds: Record<number, number[]> = {};
for (const { sites: site } of clientSites) {
if (site.exitNodeId !== null) {
if (!exitNodeIdToSiteIds[site.exitNodeId]) {
exitNodeIdToSiteIds[site.exitNodeId] = [];
}
exitNodeIdToSiteIds[site.exitNodeId].push(site.siteId);
}
}
const exitNodesHpData = allExitNodes.map((exitNode: ExitNode) => {
return {
publicKey: exitNode.publicKey,
relayPort: config.getRawConfig().gerbil.clients_start_port,
endpoint: exitNode.endpoint
endpoint: exitNode.endpoint,
siteIds: exitNodeIdToSiteIds[exitNode.exitNodeId] ?? []
};
});