diff --git a/server/routers/newt/buildConfiguration.ts b/server/routers/newt/buildConfiguration.ts new file mode 100644 index 00000000..bd8bfc18 --- /dev/null +++ b/server/routers/newt/buildConfiguration.ts @@ -0,0 +1,271 @@ +export async function buildClientConfigurationForNewtClient( + site: Site, + exitNode?: ExitNode +) { + const siteId = site.siteId; + + // Get all clients connected to this site + const clientsRes = await db + .select() + .from(clients) + .innerJoin( + clientSitesAssociationsCache, + eq(clients.clientId, clientSitesAssociationsCache.clientId) + ) + .where(eq(clientSitesAssociationsCache.siteId, siteId)); + + let peers: Array<{ + publicKey: string; + allowedIps: string[]; + endpoint?: string; + }> = []; + + if (site.publicKey && site.endpoint && exitNode) { + // Prepare peers data for the response + peers = await Promise.all( + clientsRes + .filter((client) => { + if (!client.clients.pubKey) { + logger.warn( + `Client ${client.clients.clientId} has no public key, skipping` + ); + return false; + } + if (!client.clients.subnet) { + logger.warn( + `Client ${client.clients.clientId} has no subnet, skipping` + ); + return false; + } + return true; + }) + .map(async (client) => { + // Add or update this peer on the olm if it is connected + + // const allSiteResources = await db // only get the site resources that this client has access to + // .select() + // .from(siteResources) + // .innerJoin( + // clientSiteResourcesAssociationsCache, + // eq( + // siteResources.siteResourceId, + // clientSiteResourcesAssociationsCache.siteResourceId + // ) + // ) + // .where( + // and( + // eq(siteResources.siteId, site.siteId), + // eq( + // clientSiteResourcesAssociationsCache.clientId, + // client.clients.clientId + // ) + // ) + // ); + + // update the peer info on the olm + // if the peer has not been added yet this will be a no-op + await updatePeer(client.clients.clientId, { + siteId: site.siteId, + endpoint: site.endpoint!, + relayEndpoint: `${exitNode.endpoint}:${config.getRawConfig().gerbil.clients_start_port}`, + publicKey: site.publicKey!, + serverIP: site.address, + serverPort: site.listenPort + // remoteSubnets: generateRemoteSubnets( + // allSiteResources.map( + // ({ siteResources }) => siteResources + // ) + // ), + // aliases: generateAliasConfig( + // allSiteResources.map( + // ({ siteResources }) => siteResources + // ) + // ) + }); + + // also trigger the peer add handshake in case the peer was not already added to the olm and we need to hole punch + // if it has already been added this will be a no-op + await initPeerAddHandshake( + // this will kick off the add peer process for the client + client.clients.clientId, + { + siteId, + exitNode: { + publicKey: exitNode.publicKey, + endpoint: exitNode.endpoint + } + } + ); + + return { + publicKey: client.clients.pubKey!, + allowedIps: [ + `${client.clients.subnet.split("/")[0]}/32` + ], // we want to only allow from that client + endpoint: client.clientSitesAssociationsCache.isRelayed + ? "" + : client.clientSitesAssociationsCache.endpoint! // if its relayed it should be localhost + }; + }) + ); + } + + // Filter out any null values from peers that didn't have an olm + const validPeers = peers.filter((peer) => peer !== null); + + // Get all enabled site resources for this site + const allSiteResources = await db + .select() + .from(siteResources) + .where(eq(siteResources.siteId, siteId)); + + const targetsToSend: SubnetProxyTarget[] = []; + + for (const resource of allSiteResources) { + // Get clients associated with this specific resource + const resourceClients = await db + .select({ + clientId: clients.clientId, + pubKey: clients.pubKey, + subnet: clients.subnet + }) + .from(clients) + .innerJoin( + clientSiteResourcesAssociationsCache, + eq( + clients.clientId, + clientSiteResourcesAssociationsCache.clientId + ) + ) + .where( + eq( + clientSiteResourcesAssociationsCache.siteResourceId, + resource.siteResourceId + ) + ); + + const resourceTargets = generateSubnetProxyTargets( + resource, + resourceClients + ); + + targetsToSend.push(...resourceTargets); + } + + return { + peers: validPeers, + targets: targetsToSend + }; +} + +export async function buildTargetConfigurationForNewtClient(siteId: number) { + // Get all enabled targets with their resource protocol information + const allTargets = await db + .select({ + resourceId: targets.resourceId, + targetId: targets.targetId, + ip: targets.ip, + method: targets.method, + port: targets.port, + internalPort: targets.internalPort, + enabled: targets.enabled, + protocol: resources.protocol, + hcEnabled: targetHealthCheck.hcEnabled, + hcPath: targetHealthCheck.hcPath, + hcScheme: targetHealthCheck.hcScheme, + hcMode: targetHealthCheck.hcMode, + hcHostname: targetHealthCheck.hcHostname, + hcPort: targetHealthCheck.hcPort, + hcInterval: targetHealthCheck.hcInterval, + hcUnhealthyInterval: targetHealthCheck.hcUnhealthyInterval, + hcTimeout: targetHealthCheck.hcTimeout, + hcHeaders: targetHealthCheck.hcHeaders, + hcMethod: targetHealthCheck.hcMethod, + hcTlsServerName: targetHealthCheck.hcTlsServerName + }) + .from(targets) + .innerJoin(resources, eq(targets.resourceId, resources.resourceId)) + .leftJoin( + targetHealthCheck, + eq(targets.targetId, targetHealthCheck.targetId) + ) + .where(and(eq(targets.siteId, siteId), eq(targets.enabled, true))); + + const { tcpTargets, udpTargets } = allTargets.reduce( + (acc, target) => { + // Filter out invalid targets + if (!target.internalPort || !target.ip || !target.port) { + return acc; + } + + // Format target into string + const formattedTarget = `${target.internalPort}:${target.ip}:${target.port}`; + + // Add to the appropriate protocol array + if (target.protocol === "tcp") { + acc.tcpTargets.push(formattedTarget); + } else { + acc.udpTargets.push(formattedTarget); + } + + return acc; + }, + { tcpTargets: [] as string[], udpTargets: [] as string[] } + ); + + const healthCheckTargets = allTargets.map((target) => { + // make sure the stuff is defined + if ( + !target.hcPath || + !target.hcHostname || + !target.hcPort || + !target.hcInterval || + !target.hcMethod + ) { + logger.debug( + `Skipping target ${target.targetId} due to missing health check fields` + ); + return null; // Skip targets with missing health check fields + } + + // parse headers + const hcHeadersParse = target.hcHeaders + ? JSON.parse(target.hcHeaders) + : null; + const hcHeadersSend: { [key: string]: string } = {}; + if (hcHeadersParse) { + hcHeadersParse.forEach( + (header: { name: string; value: string }) => { + hcHeadersSend[header.name] = header.value; + } + ); + } + + return { + id: target.targetId, + hcEnabled: target.hcEnabled, + hcPath: target.hcPath, + hcScheme: target.hcScheme, + hcMode: target.hcMode, + hcHostname: target.hcHostname, + hcPort: target.hcPort, + hcInterval: target.hcInterval, // in seconds + hcUnhealthyInterval: target.hcUnhealthyInterval, // in seconds + hcTimeout: target.hcTimeout, // in seconds + hcHeaders: hcHeadersSend, + hcMethod: target.hcMethod, + hcTlsServerName: target.hcTlsServerName + }; + }); + + // Filter out any null values from health check targets + const validHealthCheckTargets = healthCheckTargets.filter( + (target) => target !== null + ); + + return { + validHealthCheckTargets, + tcpTargets, + udpTargets + }; +} diff --git a/server/routers/newt/handleGetConfigMessage.ts b/server/routers/newt/handleGetConfigMessage.ts index 927fed50..02e3b864 100644 --- a/server/routers/newt/handleGetConfigMessage.ts +++ b/server/routers/newt/handleGetConfigMessage.ts @@ -156,163 +156,3 @@ export const handleGetConfigMessage: MessageHandler = async (context) => { excludeSender: false, }; }; - -export async function buildClientConfigurationForNewtClient( - site: Site, - exitNode?: ExitNode -) { - const siteId = site.siteId; - - // Get all clients connected to this site - const clientsRes = await db - .select() - .from(clients) - .innerJoin( - clientSitesAssociationsCache, - eq(clients.clientId, clientSitesAssociationsCache.clientId) - ) - .where(eq(clientSitesAssociationsCache.siteId, siteId)); - - let peers: Array<{ - publicKey: string; - allowedIps: string[]; - endpoint?: string; - }> = []; - - if (site.publicKey && site.endpoint && exitNode) { - // Prepare peers data for the response - peers = await Promise.all( - clientsRes - .filter((client) => { - if (!client.clients.pubKey) { - logger.warn( - `Client ${client.clients.clientId} has no public key, skipping` - ); - return false; - } - if (!client.clients.subnet) { - logger.warn( - `Client ${client.clients.clientId} has no subnet, skipping` - ); - return false; - } - return true; - }) - .map(async (client) => { - // Add or update this peer on the olm if it is connected - - // const allSiteResources = await db // only get the site resources that this client has access to - // .select() - // .from(siteResources) - // .innerJoin( - // clientSiteResourcesAssociationsCache, - // eq( - // siteResources.siteResourceId, - // clientSiteResourcesAssociationsCache.siteResourceId - // ) - // ) - // .where( - // and( - // eq(siteResources.siteId, site.siteId), - // eq( - // clientSiteResourcesAssociationsCache.clientId, - // client.clients.clientId - // ) - // ) - // ); - - // update the peer info on the olm - // if the peer has not been added yet this will be a no-op - await updatePeer(client.clients.clientId, { - siteId: site.siteId, - endpoint: site.endpoint!, - relayEndpoint: `${exitNode.endpoint}:${config.getRawConfig().gerbil.clients_start_port}`, - publicKey: site.publicKey!, - serverIP: site.address, - serverPort: site.listenPort - // remoteSubnets: generateRemoteSubnets( - // allSiteResources.map( - // ({ siteResources }) => siteResources - // ) - // ), - // aliases: generateAliasConfig( - // allSiteResources.map( - // ({ siteResources }) => siteResources - // ) - // ) - }); - - // also trigger the peer add handshake in case the peer was not already added to the olm and we need to hole punch - // if it has already been added this will be a no-op - await initPeerAddHandshake( - // this will kick off the add peer process for the client - client.clients.clientId, - { - siteId, - exitNode: { - publicKey: exitNode.publicKey, - endpoint: exitNode.endpoint - } - } - ); - - return { - publicKey: client.clients.pubKey!, - allowedIps: [ - `${client.clients.subnet.split("/")[0]}/32` - ], // we want to only allow from that client - endpoint: client.clientSitesAssociationsCache.isRelayed - ? "" - : client.clientSitesAssociationsCache.endpoint! // if its relayed it should be localhost - }; - }) - ); - } - - // Filter out any null values from peers that didn't have an olm - const validPeers = peers.filter((peer) => peer !== null); - - // Get all enabled site resources for this site - const allSiteResources = await db - .select() - .from(siteResources) - .where(eq(siteResources.siteId, siteId)); - - const targetsToSend: SubnetProxyTarget[] = []; - - for (const resource of allSiteResources) { - // Get clients associated with this specific resource - const resourceClients = await db - .select({ - clientId: clients.clientId, - pubKey: clients.pubKey, - subnet: clients.subnet - }) - .from(clients) - .innerJoin( - clientSiteResourcesAssociationsCache, - eq( - clients.clientId, - clientSiteResourcesAssociationsCache.clientId - ) - ) - .where( - eq( - clientSiteResourcesAssociationsCache.siteResourceId, - resource.siteResourceId - ) - ); - - const resourceTargets = generateSubnetProxyTargets( - resource, - resourceClients - ); - - targetsToSend.push(...resourceTargets); - } - - return { - peers: validPeers, - targets: targetsToSend - }; -} diff --git a/server/routers/newt/sync.ts b/server/routers/newt/sync.ts new file mode 100644 index 00000000..c876bf7e --- /dev/null +++ b/server/routers/newt/sync.ts @@ -0,0 +1,18 @@ +import { Client, Olm } from "@server/db"; +import { buildSiteConfigurationForOlmClient } from "./buildSiteConfigurationForOlmClient"; +import { sendToClient } from "#dynamic/routers/ws"; +import logger from "@server/logger"; + +export async function sendOlmSyncMessage(olm: Olm, client: Client) { + // NOTE: WE ARE HARDCODING THE RELAY PARAMETER TO FALSE HERE BUT IN THE REGISTER MESSAGE ITS DEFINED BY THE CLIENT + const siteConfigurations = await buildSiteConfigurationForOlmClient(client, client.pubKey, false); + + await sendToClient(olm.olmId, { + type: "olm/sync", + data: { + sites: siteConfigurations + } + }).catch((error) => { + logger.warn(`Error sending olm sync message:`, error); + }); +} diff --git a/server/routers/olm/buildSiteConfigurationForOlmClient.ts b/server/routers/olm/buildConfiguration.ts similarity index 98% rename from server/routers/olm/buildSiteConfigurationForOlmClient.ts rename to server/routers/olm/buildConfiguration.ts index 0517e764..b506366b 100644 --- a/server/routers/olm/buildSiteConfigurationForOlmClient.ts +++ b/server/routers/olm/buildConfiguration.ts @@ -7,7 +7,7 @@ import config from "@server/lib/config"; export async function buildSiteConfigurationForOlmClient( client: Client, - publicKey: string, + publicKey: string | null, relay: boolean ) { const siteConfigurations = []; @@ -74,7 +74,7 @@ export async function buildSiteConfigurationForOlmClient( .limit(1); // Add the peer to the exit node for this site - if (clientSite.endpoint) { + if (clientSite.endpoint && publicKey) { logger.info( `Adding peer ${publicKey} to site ${site.siteId} with endpoint ${clientSite.endpoint}` ); diff --git a/server/routers/olm/handleOlmRegisterMessage.ts b/server/routers/olm/handleOlmRegisterMessage.ts index eb2a5678..a6b30a4e 100644 --- a/server/routers/olm/handleOlmRegisterMessage.ts +++ b/server/routers/olm/handleOlmRegisterMessage.ts @@ -174,6 +174,7 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => { return; } + // NOTE: its important that the client here is the old client and the public key is the new key const siteConfigurations = await buildSiteConfigurationForOlmClient(client, publicKey, relay); // REMOVED THIS SO IT CREATES THE INTERFACE AND JUST WAITS FOR THE SITES diff --git a/server/routers/olm/sync.ts b/server/routers/olm/sync.ts index ce6b8ab9..c876bf7e 100644 --- a/server/routers/olm/sync.ts +++ b/server/routers/olm/sync.ts @@ -1,7 +1,18 @@ import { Client, Olm } from "@server/db"; import { buildSiteConfigurationForOlmClient } from "./buildSiteConfigurationForOlmClient"; +import { sendToClient } from "#dynamic/routers/ws"; +import logger from "@server/logger"; export async function sendOlmSyncMessage(olm: Olm, client: Client) { - const siteConfigurations = await buildSiteConfigurationForOlmClient(client, publicKey, relay); + // NOTE: WE ARE HARDCODING THE RELAY PARAMETER TO FALSE HERE BUT IN THE REGISTER MESSAGE ITS DEFINED BY THE CLIENT + const siteConfigurations = await buildSiteConfigurationForOlmClient(client, client.pubKey, false); + await sendToClient(olm.olmId, { + type: "olm/sync", + data: { + sites: siteConfigurations + } + }).catch((error) => { + logger.warn(`Error sending olm sync message:`, error); + }); }