mirror of
https://github.com/fosrl/pangolin.git
synced 2026-05-14 20:55:18 +00:00
Compare commits
6 Commits
1.18.4-s.1
...
1.18.4-s.4
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
92f992728f | ||
|
|
78ad2d17c7 | ||
|
|
b29bb7384d | ||
|
|
5a8de8210b | ||
|
|
d5181454f4 | ||
|
|
0e0666cacf |
@@ -11,7 +11,7 @@ import {
|
||||
ExitNode
|
||||
} from "@server/db";
|
||||
import { db } from "@server/db";
|
||||
import { eq, and } from "drizzle-orm";
|
||||
import { eq, and, inArray } from "drizzle-orm";
|
||||
import HttpCode from "@server/types/HttpCode";
|
||||
import createHttpError from "http-errors";
|
||||
import logger from "@server/logger";
|
||||
@@ -202,24 +202,29 @@ export async function updateAndGenerateEndpointDestinations(
|
||||
)
|
||||
);
|
||||
|
||||
// Update clientSites for each site on this exit node
|
||||
// Format the endpoint properly for both IPv4 and IPv6
|
||||
const formattedEndpoint = formatEndpoint(ip, port);
|
||||
|
||||
// Determine which rows actually need updating and whether the endpoint
|
||||
// (as opposed to only the publicKey) changed for any of them.
|
||||
const siteIdsToUpdate: number[] = [];
|
||||
let endpointChanged = false;
|
||||
for (const site of sitesOnExitNode) {
|
||||
// logger.debug(
|
||||
// `Updating site ${site.siteId} on exit node ${exitNode.exitNodeId}`
|
||||
// );
|
||||
|
||||
// Format the endpoint properly for both IPv4 and IPv6
|
||||
const formattedEndpoint = formatEndpoint(ip, port);
|
||||
|
||||
// if the public key or endpoint has changed, update it otherwise continue
|
||||
if (
|
||||
site.endpoint === formattedEndpoint &&
|
||||
site.publicKey === publicKey
|
||||
) {
|
||||
continue;
|
||||
}
|
||||
siteIdsToUpdate.push(site.siteId);
|
||||
if (site.endpoint !== formattedEndpoint) {
|
||||
endpointChanged = true;
|
||||
}
|
||||
}
|
||||
|
||||
const [updatedClientSitesAssociationsCache] = await db
|
||||
if (siteIdsToUpdate.length > 0) {
|
||||
// Single bulk update for all affected rows for this client on this exit node
|
||||
await db
|
||||
.update(clientSitesAssociationsCache)
|
||||
.set({
|
||||
endpoint: formattedEndpoint,
|
||||
@@ -228,24 +233,22 @@ export async function updateAndGenerateEndpointDestinations(
|
||||
.where(
|
||||
and(
|
||||
eq(clientSitesAssociationsCache.clientId, olm.clientId),
|
||||
eq(clientSitesAssociationsCache.siteId, site.siteId)
|
||||
inArray(
|
||||
clientSitesAssociationsCache.siteId,
|
||||
siteIdsToUpdate
|
||||
)
|
||||
)
|
||||
)
|
||||
.returning();
|
||||
);
|
||||
|
||||
if (
|
||||
updatedClientSitesAssociationsCache.endpoint !==
|
||||
site.endpoint && // this is the endpoint from the join table not the site
|
||||
updatedClient.pubKey === publicKey // only trigger if the client's public key matches the current public key which means it has registered so we dont prematurely send the update
|
||||
) {
|
||||
// Only trigger downstream peer updates once per hole punch: the
|
||||
// endpoint is the same for every site on this exit node, and
|
||||
// handleClientEndpointChange already fans out to all connected
|
||||
// sites for this client.
|
||||
if (endpointChanged && updatedClient.pubKey === publicKey) {
|
||||
logger.info(
|
||||
`ClientSitesAssociationsCache for client ${olm.clientId} and site ${site.siteId} endpoint changed from ${site.endpoint} to ${updatedClientSitesAssociationsCache.endpoint}`
|
||||
);
|
||||
// Handle any additional logic for endpoint change
|
||||
handleClientEndpointChange(
|
||||
olm.clientId,
|
||||
updatedClientSitesAssociationsCache.endpoint!
|
||||
`ClientSitesAssociationsCache for client ${olm.clientId} endpoint changed to ${formattedEndpoint} for ${siteIdsToUpdate.length} site(s) on exit node ${exitNode.exitNodeId}`
|
||||
);
|
||||
handleClientEndpointChange(olm.clientId, formattedEndpoint);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -408,12 +411,14 @@ async function handleSiteEndpointChange(siteId: number, newEndpoint: string) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Get all non-relayed clients connected to this site
|
||||
// Get all non-relayed and not jit clients connected to this site
|
||||
const connectedClients = await db
|
||||
.select({
|
||||
online: clients.online,
|
||||
clientId: clients.clientId,
|
||||
olmId: olms.olmId,
|
||||
isRelayed: clientSitesAssociationsCache.isRelayed
|
||||
isRelayed: clientSitesAssociationsCache.isRelayed,
|
||||
isJitMode: clientSitesAssociationsCache.isJitMode
|
||||
})
|
||||
.from(clientSitesAssociationsCache)
|
||||
.innerJoin(
|
||||
@@ -423,32 +428,36 @@ async function handleSiteEndpointChange(siteId: number, newEndpoint: string) {
|
||||
.innerJoin(olms, eq(olms.clientId, clients.clientId))
|
||||
.where(
|
||||
and(
|
||||
eq(clients.online, true), // the client has to be online or it does not matter...
|
||||
eq(clientSitesAssociationsCache.siteId, siteId),
|
||||
eq(clientSitesAssociationsCache.isRelayed, false)
|
||||
eq(clientSitesAssociationsCache.isRelayed, false),
|
||||
eq(clientSitesAssociationsCache.isJitMode, false)
|
||||
)
|
||||
);
|
||||
|
||||
// Update each non-relayed client with the new site endpoint
|
||||
for (const client of connectedClients) {
|
||||
try {
|
||||
await updateOlmPeer(
|
||||
client.clientId,
|
||||
{
|
||||
siteId: siteId,
|
||||
publicKey: site.publicKey,
|
||||
endpoint: newEndpoint
|
||||
},
|
||||
client.olmId
|
||||
);
|
||||
logger.debug(
|
||||
`Updated client ${client.clientId} with new site ${siteId} endpoint: ${newEndpoint}`
|
||||
);
|
||||
} catch (error) {
|
||||
logger.error(
|
||||
`Failed to update client ${client.clientId} with new site endpoint: ${error}`
|
||||
);
|
||||
}
|
||||
}
|
||||
// Update each non-relayed client with the new site endpoint (in parallel)
|
||||
await Promise.allSettled(
|
||||
connectedClients.map(async (client) => {
|
||||
try {
|
||||
await updateOlmPeer(
|
||||
client.clientId,
|
||||
{
|
||||
siteId: siteId,
|
||||
publicKey: site.publicKey!,
|
||||
endpoint: newEndpoint
|
||||
},
|
||||
client.olmId
|
||||
);
|
||||
logger.debug(
|
||||
`Updated client ${client.clientId} with new site ${siteId} endpoint: ${newEndpoint}`
|
||||
);
|
||||
} catch (error) {
|
||||
logger.error(
|
||||
`Failed to update client ${client.clientId} with new site endpoint: ${error}`
|
||||
);
|
||||
}
|
||||
})
|
||||
);
|
||||
} catch (error) {
|
||||
logger.error(
|
||||
`Error handling site endpoint change for site ${siteId}: ${error}`
|
||||
@@ -456,11 +465,11 @@ async function handleSiteEndpointChange(siteId: number, newEndpoint: string) {
|
||||
}
|
||||
}
|
||||
|
||||
async function handleClientEndpointChange(
|
||||
async function handleClientEndpointChange( // TODO: I THINK WE DONT NEED TO HIT EVERY SITE HERE BECAUSE WE ONLY NEED TO UPDATE THE SITES CONNECTED TO THIS NODE WHICH WE ALREADY HAVE FROM ABOVE
|
||||
clientId: number,
|
||||
newEndpoint: string
|
||||
) {
|
||||
// Alert all sites connected to this client that the endpoint has changed (only if NOT relayed)
|
||||
// Alert all sites connected to this client that the endpoint has changed (only if NOT relayed and NOT JIT MODE)
|
||||
try {
|
||||
// Get client details
|
||||
const [client] = await db
|
||||
@@ -480,6 +489,7 @@ async function handleClientEndpointChange(
|
||||
siteId: sites.siteId,
|
||||
newtId: newts.newtId,
|
||||
isRelayed: clientSitesAssociationsCache.isRelayed,
|
||||
isJitMode: clientSitesAssociationsCache.isJitMode,
|
||||
subnet: clients.subnet
|
||||
})
|
||||
.from(clientSitesAssociationsCache)
|
||||
@@ -494,38 +504,49 @@ async function handleClientEndpointChange(
|
||||
)
|
||||
.where(
|
||||
and(
|
||||
eq(sites.online, true), // the site has to be online or it does not matter...
|
||||
eq(clientSitesAssociationsCache.clientId, clientId),
|
||||
eq(clientSitesAssociationsCache.isRelayed, false)
|
||||
eq(clientSitesAssociationsCache.isRelayed, false),
|
||||
eq(clientSitesAssociationsCache.isJitMode, false)
|
||||
)
|
||||
);
|
||||
|
||||
// Update each non-relayed site with the new client endpoint
|
||||
for (const siteData of connectedSites) {
|
||||
try {
|
||||
if (!siteData.subnet) {
|
||||
if (connectedSites.length > 250) {
|
||||
logger.warn(
|
||||
`Client ${clientId} has ${connectedSites.length} connected sites so the client will be in jit mode anyway, skipping endpoint updates`
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
// Update each non-relayed site with the new client endpoint (in parallel)
|
||||
await Promise.allSettled(
|
||||
connectedSites.map(async (siteData) => {
|
||||
if (!siteData.subnet || !client.pubKey) {
|
||||
logger.warn(
|
||||
`Client ${clientId} has no subnet, skipping update for site ${siteData.siteId}`
|
||||
`Client ${clientId} has no subnet or public key, skipping update for site ${siteData.siteId}`
|
||||
);
|
||||
continue;
|
||||
return;
|
||||
}
|
||||
|
||||
await updateNewtPeer(
|
||||
siteData.siteId,
|
||||
client.pubKey,
|
||||
{
|
||||
endpoint: newEndpoint
|
||||
},
|
||||
siteData.newtId
|
||||
);
|
||||
logger.debug(
|
||||
`Updated site ${siteData.siteId} with new client ${clientId} endpoint: ${newEndpoint}`
|
||||
);
|
||||
} catch (error) {
|
||||
logger.error(
|
||||
`Failed to update site ${siteData.siteId} with new client endpoint: ${error}`
|
||||
);
|
||||
}
|
||||
}
|
||||
try {
|
||||
await updateNewtPeer(
|
||||
siteData.siteId,
|
||||
client.pubKey,
|
||||
{
|
||||
endpoint: newEndpoint
|
||||
},
|
||||
siteData.newtId
|
||||
);
|
||||
logger.debug(
|
||||
`Updated site ${siteData.siteId} with new client ${clientId} endpoint: ${newEndpoint}`
|
||||
);
|
||||
} catch (error) {
|
||||
logger.error(
|
||||
`Failed to update site ${siteData.siteId} with new client endpoint: ${error}`
|
||||
);
|
||||
}
|
||||
})
|
||||
);
|
||||
} catch (error) {
|
||||
logger.error(
|
||||
`Error handling client endpoint change for client ${clientId}: ${error}`
|
||||
|
||||
@@ -5,6 +5,7 @@ import {
|
||||
db,
|
||||
exitNodes,
|
||||
networks,
|
||||
SiteResource,
|
||||
siteNetworks,
|
||||
siteResources,
|
||||
sites
|
||||
@@ -15,7 +16,7 @@ import {
|
||||
generateRemoteSubnets
|
||||
} from "@server/lib/ip";
|
||||
import logger from "@server/logger";
|
||||
import { and, eq } from "drizzle-orm";
|
||||
import { eq, inArray } from "drizzle-orm";
|
||||
import { addPeer, deletePeer } from "../newt/peers";
|
||||
import config from "@server/lib/config";
|
||||
|
||||
@@ -46,49 +47,79 @@ export async function buildSiteConfigurationForOlmClient(
|
||||
)
|
||||
.where(eq(clientSitesAssociationsCache.clientId, client.clientId));
|
||||
|
||||
if (sitesData.length === 0) {
|
||||
return siteConfigurations;
|
||||
}
|
||||
|
||||
// Batch-fetch every site resource this client has access to across ALL sites
|
||||
// in a single query, then group by siteId in memory. This avoids issuing one
|
||||
// query per site (which would be N round-trips for N sites).
|
||||
const allClientSiteResources = await db
|
||||
.select({
|
||||
siteResource: siteResources,
|
||||
siteId: siteNetworks.siteId
|
||||
})
|
||||
.from(siteResources)
|
||||
.innerJoin(
|
||||
clientSiteResourcesAssociationsCache,
|
||||
eq(
|
||||
siteResources.siteResourceId,
|
||||
clientSiteResourcesAssociationsCache.siteResourceId
|
||||
)
|
||||
)
|
||||
.innerJoin(networks, eq(siteResources.networkId, networks.networkId))
|
||||
.innerJoin(siteNetworks, eq(networks.networkId, siteNetworks.networkId))
|
||||
.where(
|
||||
eq(clientSiteResourcesAssociationsCache.clientId, client.clientId)
|
||||
);
|
||||
|
||||
const siteResourcesBySiteId = new Map<number, SiteResource[]>();
|
||||
for (const row of allClientSiteResources) {
|
||||
const arr = siteResourcesBySiteId.get(row.siteId);
|
||||
if (arr) {
|
||||
arr.push(row.siteResource);
|
||||
} else {
|
||||
siteResourcesBySiteId.set(row.siteId, [row.siteResource]);
|
||||
}
|
||||
}
|
||||
|
||||
// Batch-fetch exit nodes for all sites in one query (only needed in relay mode).
|
||||
const exitNodesById = new Map<number, typeof exitNodes.$inferSelect>();
|
||||
if (!jitMode && relay) {
|
||||
const exitNodeIds = Array.from(
|
||||
new Set(
|
||||
sitesData
|
||||
.map(({ sites: s }) => s.exitNodeId)
|
||||
.filter((id): id is number => id != null)
|
||||
)
|
||||
);
|
||||
if (exitNodeIds.length > 0) {
|
||||
const nodes = await db
|
||||
.select()
|
||||
.from(exitNodes)
|
||||
.where(inArray(exitNodes.exitNodeId, exitNodeIds));
|
||||
for (const n of nodes) {
|
||||
exitNodesById.set(n.exitNodeId, n);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const clientsStartPort = config.getRawConfig().gerbil.clients_start_port;
|
||||
const peerOps: Promise<unknown>[] = [];
|
||||
|
||||
// Process each site
|
||||
for (const {
|
||||
sites: site,
|
||||
clientSitesAssociationsCache: association
|
||||
} of sitesData) {
|
||||
const allSiteResources = await db // only get the site resources that this client has access to
|
||||
.select()
|
||||
.from(siteResources)
|
||||
.innerJoin(
|
||||
clientSiteResourcesAssociationsCache,
|
||||
eq(
|
||||
siteResources.siteResourceId,
|
||||
clientSiteResourcesAssociationsCache.siteResourceId
|
||||
)
|
||||
)
|
||||
.innerJoin(
|
||||
networks,
|
||||
eq(siteResources.networkId, networks.networkId)
|
||||
)
|
||||
.innerJoin(
|
||||
siteNetworks,
|
||||
eq(networks.networkId, siteNetworks.networkId)
|
||||
)
|
||||
.where(
|
||||
and(
|
||||
eq(siteNetworks.siteId, site.siteId),
|
||||
eq(
|
||||
clientSiteResourcesAssociationsCache.clientId,
|
||||
client.clientId
|
||||
)
|
||||
)
|
||||
);
|
||||
const allSiteResources = siteResourcesBySiteId.get(site.siteId) ?? [];
|
||||
|
||||
if (jitMode) {
|
||||
// Add site configuration to the array
|
||||
siteConfigurations.push({
|
||||
siteId: site.siteId,
|
||||
// remoteSubnets: generateRemoteSubnets(
|
||||
// allSiteResources.map(({ siteResources }) => siteResources)
|
||||
// ),
|
||||
aliases: generateAliasConfig(
|
||||
allSiteResources.map(({ siteResources }) => siteResources)
|
||||
)
|
||||
// remoteSubnets: generateRemoteSubnets(allSiteResources),
|
||||
aliases: generateAliasConfig(allSiteResources)
|
||||
});
|
||||
continue;
|
||||
}
|
||||
@@ -126,7 +157,7 @@ export async function buildSiteConfigurationForOlmClient(
|
||||
logger.info(
|
||||
`Public key mismatch. Deleting old peer from site ${site.siteId}...`
|
||||
);
|
||||
await deletePeer(site.siteId, client.pubKey!);
|
||||
peerOps.push(deletePeer(site.siteId, client.pubKey!));
|
||||
}
|
||||
|
||||
if (!site.subnet) {
|
||||
@@ -134,27 +165,19 @@ export async function buildSiteConfigurationForOlmClient(
|
||||
continue;
|
||||
}
|
||||
|
||||
const [clientSite] = await db
|
||||
.select()
|
||||
.from(clientSitesAssociationsCache)
|
||||
.where(
|
||||
and(
|
||||
eq(clientSitesAssociationsCache.clientId, client.clientId),
|
||||
eq(clientSitesAssociationsCache.siteId, site.siteId)
|
||||
)
|
||||
)
|
||||
.limit(1);
|
||||
|
||||
// Add the peer to the exit node for this site
|
||||
if (clientSite.endpoint && publicKey) {
|
||||
// Add the peer to the exit node for this site. The endpoint comes from
|
||||
// the already-joined association row above, so no extra query needed.
|
||||
if (association.endpoint && publicKey) {
|
||||
logger.info(
|
||||
`Adding peer ${publicKey} to site ${site.siteId} with endpoint ${clientSite.endpoint}`
|
||||
`Adding peer ${publicKey} to site ${site.siteId} with endpoint ${association.endpoint}`
|
||||
);
|
||||
peerOps.push(
|
||||
addPeer(site.siteId, {
|
||||
publicKey: publicKey,
|
||||
allowedIps: [`${client.subnet.split("/")[0]}/32`], // we want to only allow from that client
|
||||
endpoint: relay ? "" : association.endpoint
|
||||
})
|
||||
);
|
||||
await addPeer(site.siteId, {
|
||||
publicKey: publicKey,
|
||||
allowedIps: [`${client.subnet.split("/")[0]}/32`], // we want to only allow from that client
|
||||
endpoint: relay ? "" : clientSite.endpoint
|
||||
});
|
||||
} else {
|
||||
logger.warn(
|
||||
`Client ${client.clientId} has no endpoint, skipping peer addition`
|
||||
@@ -163,16 +186,12 @@ export async function buildSiteConfigurationForOlmClient(
|
||||
|
||||
let relayEndpoint: string | undefined = undefined;
|
||||
if (relay) {
|
||||
const [exitNode] = await db
|
||||
.select()
|
||||
.from(exitNodes)
|
||||
.where(eq(exitNodes.exitNodeId, site.exitNodeId))
|
||||
.limit(1);
|
||||
const exitNode = exitNodesById.get(site.exitNodeId);
|
||||
if (!exitNode) {
|
||||
logger.warn(`Exit node not found for site ${site.siteId}`);
|
||||
continue;
|
||||
}
|
||||
relayEndpoint = `${exitNode.endpoint}:${config.getRawConfig().gerbil.clients_start_port}`;
|
||||
relayEndpoint = `${exitNode.endpoint}:${clientsStartPort}`;
|
||||
}
|
||||
|
||||
// Add site configuration to the array
|
||||
@@ -184,12 +203,16 @@ export async function buildSiteConfigurationForOlmClient(
|
||||
publicKey: site.publicKey,
|
||||
serverIP: site.address,
|
||||
serverPort: site.listenPort,
|
||||
remoteSubnets: generateRemoteSubnets(
|
||||
allSiteResources.map(({ siteResources }) => siteResources)
|
||||
),
|
||||
aliases: generateAliasConfig(
|
||||
allSiteResources.map(({ siteResources }) => siteResources)
|
||||
)
|
||||
remoteSubnets: generateRemoteSubnets(allSiteResources),
|
||||
aliases: generateAliasConfig(allSiteResources)
|
||||
});
|
||||
}
|
||||
|
||||
// Run all peer add/delete operations concurrently rather than serially per
|
||||
// site, so total time is bounded by the slowest call instead of the sum.
|
||||
if (peerOps.length > 0) {
|
||||
Promise.allSettled(peerOps).catch((err) => {
|
||||
logger.error("Error processing peer operations: ", err);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ import {
|
||||
ExitNode,
|
||||
exitNodes,
|
||||
sites,
|
||||
clientSitesAssociationsCache,
|
||||
clientSitesAssociationsCache
|
||||
} from "@server/db";
|
||||
import { olms } from "@server/db";
|
||||
import HttpCode from "@server/types/HttpCode";
|
||||
@@ -28,6 +28,7 @@ import { verifyPassword } from "@server/auth/password";
|
||||
import logger from "@server/logger";
|
||||
import config from "@server/lib/config";
|
||||
import { APP_VERSION } from "@server/lib/consts";
|
||||
import { build } from "@server/build";
|
||||
|
||||
export const olmGetTokenBodySchema = z.object({
|
||||
olmId: z.string(),
|
||||
@@ -220,6 +221,22 @@ export async function getOlmToken(
|
||||
)
|
||||
.where(eq(clientSitesAssociationsCache.clientId, clientIdToUse!));
|
||||
|
||||
if (clientSites.length > 250 && build == "saas") {
|
||||
// set all of the cache rows isJitMode to true
|
||||
await db
|
||||
.update(clientSitesAssociationsCache)
|
||||
.set({ isJitMode: true })
|
||||
.where(
|
||||
and(
|
||||
eq(
|
||||
clientSitesAssociationsCache.clientId,
|
||||
clientIdToUse!
|
||||
),
|
||||
eq(clientSitesAssociationsCache.isJitMode, false)
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
// Extract unique exit node IDs
|
||||
const exitNodeIds = Array.from(
|
||||
new Set(
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { db, orgs } from "@server/db";
|
||||
import { db, orgs, primaryDb } from "@server/db";
|
||||
import { MessageHandler } from "@server/routers/ws";
|
||||
import {
|
||||
clients,
|
||||
@@ -7,7 +7,7 @@ import {
|
||||
olms,
|
||||
sites
|
||||
} from "@server/db";
|
||||
import { count, eq } from "drizzle-orm";
|
||||
import { and, count, eq, ne, or } from "drizzle-orm";
|
||||
import logger from "@server/logger";
|
||||
import { checkOrgAccessPolicy } from "#dynamic/lib/checkOrgAccessPolicy";
|
||||
import { validateSessionToken } from "@server/auth/sessions/app";
|
||||
@@ -81,7 +81,7 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
|
||||
.where(eq(olms.olmId, olm.olmId));
|
||||
}
|
||||
|
||||
const [client] = await db
|
||||
const [client] = await primaryDb // read from the primary here so there is no latency with the last update on the holepunch
|
||||
.select()
|
||||
.from(clients)
|
||||
.where(eq(clients.clientId, olm.clientId))
|
||||
@@ -98,7 +98,7 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
|
||||
if (client.blocked) {
|
||||
logger.debug(
|
||||
`[handleOlmRegisterMessage] Client ${client.clientId} is blocked. Ignoring register.`,
|
||||
{ orgId: client.orgId }
|
||||
{ orgId: client.orgId, clientId: client.clientId }
|
||||
);
|
||||
sendOlmError(OlmErrorCodes.CLIENT_BLOCKED, olm.olmId);
|
||||
return;
|
||||
@@ -107,7 +107,7 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
|
||||
if (client.approvalState == "pending") {
|
||||
logger.debug(
|
||||
`[handleOlmRegisterMessage] Client ${client.clientId} approval is pending. Ignoring register.`,
|
||||
{ orgId: client.orgId }
|
||||
{ orgId: client.orgId, clientId: client.clientId }
|
||||
);
|
||||
sendOlmError(OlmErrorCodes.CLIENT_PENDING, olm.olmId);
|
||||
return;
|
||||
@@ -136,7 +136,8 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
|
||||
|
||||
if (!org) {
|
||||
logger.warn("[handleOlmRegisterMessage] Org not found", {
|
||||
orgId: client.orgId
|
||||
orgId: client.orgId,
|
||||
clientId: client.clientId
|
||||
});
|
||||
sendOlmError(OlmErrorCodes.ORG_NOT_FOUND, olm.olmId);
|
||||
return;
|
||||
@@ -145,7 +146,8 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
|
||||
if (orgId) {
|
||||
if (!olm.userId) {
|
||||
logger.warn("[handleOlmRegisterMessage] Olm has no user ID", {
|
||||
orgId: client.orgId
|
||||
orgId: client.orgId,
|
||||
clientId: client.clientId
|
||||
});
|
||||
sendOlmError(OlmErrorCodes.USER_ID_NOT_FOUND, olm.olmId);
|
||||
return;
|
||||
@@ -156,7 +158,7 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
|
||||
if (!userSession || !user) {
|
||||
logger.warn(
|
||||
"[handleOlmRegisterMessage] Invalid user session for olm register",
|
||||
{ orgId: client.orgId }
|
||||
{ orgId: client.orgId, clientId: client.clientId }
|
||||
);
|
||||
sendOlmError(OlmErrorCodes.INVALID_USER_SESSION, olm.olmId);
|
||||
return;
|
||||
@@ -164,7 +166,7 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
|
||||
if (user.userId !== olm.userId) {
|
||||
logger.warn(
|
||||
"[handleOlmRegisterMessage] User ID mismatch for olm register",
|
||||
{ orgId: client.orgId }
|
||||
{ orgId: client.orgId, clientId: client.clientId }
|
||||
);
|
||||
sendOlmError(OlmErrorCodes.USER_ID_MISMATCH, olm.olmId);
|
||||
return;
|
||||
@@ -182,13 +184,14 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
|
||||
|
||||
logger.debug("[handleOlmRegisterMessage] Policy check result", {
|
||||
orgId: client.orgId,
|
||||
clientId: client.clientId,
|
||||
policyCheck
|
||||
});
|
||||
|
||||
if (policyCheck?.error) {
|
||||
logger.error(
|
||||
`[handleOlmRegisterMessage] Error checking access policies for olm user ${olm.userId} in org ${orgId}: ${policyCheck?.error}`,
|
||||
{ orgId: client.orgId }
|
||||
{ orgId: client.orgId, clientId: client.clientId }
|
||||
);
|
||||
sendOlmError(OlmErrorCodes.ORG_ACCESS_POLICY_DENIED, olm.olmId);
|
||||
return;
|
||||
@@ -197,7 +200,7 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
|
||||
if (policyCheck.policies?.passwordAge?.compliant === false) {
|
||||
logger.warn(
|
||||
`[handleOlmRegisterMessage] Olm user ${olm.userId} has non-compliant password age for org ${orgId}`,
|
||||
{ orgId: client.orgId }
|
||||
{ orgId: client.orgId, clientId: client.clientId }
|
||||
);
|
||||
sendOlmError(
|
||||
OlmErrorCodes.ORG_ACCESS_POLICY_PASSWORD_EXPIRED,
|
||||
@@ -209,7 +212,7 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
|
||||
) {
|
||||
logger.warn(
|
||||
`[handleOlmRegisterMessage] Olm user ${olm.userId} has non-compliant session length for org ${orgId}`,
|
||||
{ orgId: client.orgId }
|
||||
{ orgId: client.orgId, clientId: client.clientId }
|
||||
);
|
||||
sendOlmError(
|
||||
OlmErrorCodes.ORG_ACCESS_POLICY_SESSION_EXPIRED,
|
||||
@@ -219,7 +222,7 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
|
||||
} else if (policyCheck.policies?.requiredTwoFactor === false) {
|
||||
logger.warn(
|
||||
`[handleOlmRegisterMessage] Olm user ${olm.userId} does not have 2FA enabled for org ${orgId}`,
|
||||
{ orgId: client.orgId }
|
||||
{ orgId: client.orgId, clientId: client.clientId }
|
||||
);
|
||||
sendOlmError(
|
||||
OlmErrorCodes.ORG_ACCESS_POLICY_2FA_REQUIRED,
|
||||
@@ -229,7 +232,7 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
|
||||
} else if (!policyCheck.allowed) {
|
||||
logger.warn(
|
||||
`[handleOlmRegisterMessage] Olm user ${olm.userId} does not pass access policies for org ${orgId}: ${policyCheck.error}`,
|
||||
{ orgId: client.orgId }
|
||||
{ orgId: client.orgId, clientId: client.clientId }
|
||||
);
|
||||
sendOlmError(OlmErrorCodes.ORG_ACCESS_POLICY_DENIED, olm.olmId);
|
||||
return;
|
||||
@@ -253,7 +256,7 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
|
||||
// Prepare an array to store site configurations
|
||||
logger.debug(
|
||||
`[handleOlmRegisterMessage] Found ${sitesCount} sites for client ${client.clientId}`,
|
||||
{ orgId: client.orgId }
|
||||
{ orgId: client.orgId, clientId: client.clientId }
|
||||
);
|
||||
|
||||
let jitMode = false;
|
||||
@@ -263,19 +266,20 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
|
||||
// If we have too many sites we need to drop into fully JIT mode by not sending any of the sites
|
||||
logger.info(
|
||||
`[handleOlmRegisterMessage] Too many sites (${sitesCount}), dropping into JIT mode`,
|
||||
{ orgId: client.orgId }
|
||||
{ orgId: client.orgId, clientId: client.clientId }
|
||||
);
|
||||
jitMode = true;
|
||||
}
|
||||
|
||||
logger.debug(
|
||||
`[handleOlmRegisterMessage] Olm client ID: ${client.clientId}, Public Key: ${publicKey}, Relay: ${relay}`,
|
||||
{ orgId: client.orgId }
|
||||
{ orgId: client.orgId, clientId: client.clientId }
|
||||
);
|
||||
|
||||
if (!publicKey) {
|
||||
logger.warn("[handleOlmRegisterMessage] Public key not provided", {
|
||||
orgId: client.orgId
|
||||
orgId: client.orgId,
|
||||
clientId: client.clientId
|
||||
});
|
||||
return;
|
||||
}
|
||||
@@ -283,7 +287,7 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
|
||||
if (client.pubKey !== publicKey || client.archived) {
|
||||
logger.info(
|
||||
"[handleOlmRegisterMessage] Public key mismatch. Updating public key and clearing session info...",
|
||||
{ orgId: client.orgId }
|
||||
{ orgId: client.orgId, clientId: client.clientId }
|
||||
);
|
||||
// Update the client's public key
|
||||
await db
|
||||
@@ -301,7 +305,18 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
|
||||
isRelayed: relay == true,
|
||||
isJitMode: jitMode
|
||||
})
|
||||
.where(eq(clientSitesAssociationsCache.clientId, client.clientId));
|
||||
.where(
|
||||
and(
|
||||
eq(clientSitesAssociationsCache.clientId, client.clientId),
|
||||
or(
|
||||
ne(
|
||||
clientSitesAssociationsCache.isRelayed,
|
||||
relay == true
|
||||
),
|
||||
ne(clientSitesAssociationsCache.isJitMode, jitMode)
|
||||
)
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
// this prevents us from accepting a register from an olm that has not hole punched yet.
|
||||
@@ -310,7 +325,7 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
|
||||
if (now - (client.lastHolePunch || 0) > 5 && sitesCount > 0) {
|
||||
logger.warn(
|
||||
`[handleOlmRegisterMessage] Client last hole punch is too old and we have sites to send; skipping this register. The client is failing to hole punch and identify its network address with the server. Can the client reach the server on UDP port ${config.getRawConfig().gerbil.clients_start_port}?`,
|
||||
{ orgId: client.orgId }
|
||||
{ orgId: client.orgId, clientId: client.clientId }
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user