mirror of
https://github.com/fosrl/pangolin.git
synced 2026-01-28 22:00:51 +00:00
Seperating out functions
This commit is contained in:
271
server/routers/newt/buildConfiguration.ts
Normal file
271
server/routers/newt/buildConfiguration.ts
Normal file
@@ -0,0 +1,271 @@
|
||||
export async function buildClientConfigurationForNewtClient(
|
||||
site: Site,
|
||||
exitNode?: ExitNode
|
||||
) {
|
||||
const siteId = site.siteId;
|
||||
|
||||
// Get all clients connected to this site
|
||||
const clientsRes = await db
|
||||
.select()
|
||||
.from(clients)
|
||||
.innerJoin(
|
||||
clientSitesAssociationsCache,
|
||||
eq(clients.clientId, clientSitesAssociationsCache.clientId)
|
||||
)
|
||||
.where(eq(clientSitesAssociationsCache.siteId, siteId));
|
||||
|
||||
let peers: Array<{
|
||||
publicKey: string;
|
||||
allowedIps: string[];
|
||||
endpoint?: string;
|
||||
}> = [];
|
||||
|
||||
if (site.publicKey && site.endpoint && exitNode) {
|
||||
// Prepare peers data for the response
|
||||
peers = await Promise.all(
|
||||
clientsRes
|
||||
.filter((client) => {
|
||||
if (!client.clients.pubKey) {
|
||||
logger.warn(
|
||||
`Client ${client.clients.clientId} has no public key, skipping`
|
||||
);
|
||||
return false;
|
||||
}
|
||||
if (!client.clients.subnet) {
|
||||
logger.warn(
|
||||
`Client ${client.clients.clientId} has no subnet, skipping`
|
||||
);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
})
|
||||
.map(async (client) => {
|
||||
// Add or update this peer on the olm if it is connected
|
||||
|
||||
// const allSiteResources = await db // only get the site resources that this client has access to
|
||||
// .select()
|
||||
// .from(siteResources)
|
||||
// .innerJoin(
|
||||
// clientSiteResourcesAssociationsCache,
|
||||
// eq(
|
||||
// siteResources.siteResourceId,
|
||||
// clientSiteResourcesAssociationsCache.siteResourceId
|
||||
// )
|
||||
// )
|
||||
// .where(
|
||||
// and(
|
||||
// eq(siteResources.siteId, site.siteId),
|
||||
// eq(
|
||||
// clientSiteResourcesAssociationsCache.clientId,
|
||||
// client.clients.clientId
|
||||
// )
|
||||
// )
|
||||
// );
|
||||
|
||||
// update the peer info on the olm
|
||||
// if the peer has not been added yet this will be a no-op
|
||||
await updatePeer(client.clients.clientId, {
|
||||
siteId: site.siteId,
|
||||
endpoint: site.endpoint!,
|
||||
relayEndpoint: `${exitNode.endpoint}:${config.getRawConfig().gerbil.clients_start_port}`,
|
||||
publicKey: site.publicKey!,
|
||||
serverIP: site.address,
|
||||
serverPort: site.listenPort
|
||||
// remoteSubnets: generateRemoteSubnets(
|
||||
// allSiteResources.map(
|
||||
// ({ siteResources }) => siteResources
|
||||
// )
|
||||
// ),
|
||||
// aliases: generateAliasConfig(
|
||||
// allSiteResources.map(
|
||||
// ({ siteResources }) => siteResources
|
||||
// )
|
||||
// )
|
||||
});
|
||||
|
||||
// also trigger the peer add handshake in case the peer was not already added to the olm and we need to hole punch
|
||||
// if it has already been added this will be a no-op
|
||||
await initPeerAddHandshake(
|
||||
// this will kick off the add peer process for the client
|
||||
client.clients.clientId,
|
||||
{
|
||||
siteId,
|
||||
exitNode: {
|
||||
publicKey: exitNode.publicKey,
|
||||
endpoint: exitNode.endpoint
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
return {
|
||||
publicKey: client.clients.pubKey!,
|
||||
allowedIps: [
|
||||
`${client.clients.subnet.split("/")[0]}/32`
|
||||
], // we want to only allow from that client
|
||||
endpoint: client.clientSitesAssociationsCache.isRelayed
|
||||
? ""
|
||||
: client.clientSitesAssociationsCache.endpoint! // if its relayed it should be localhost
|
||||
};
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
// Filter out any null values from peers that didn't have an olm
|
||||
const validPeers = peers.filter((peer) => peer !== null);
|
||||
|
||||
// Get all enabled site resources for this site
|
||||
const allSiteResources = await db
|
||||
.select()
|
||||
.from(siteResources)
|
||||
.where(eq(siteResources.siteId, siteId));
|
||||
|
||||
const targetsToSend: SubnetProxyTarget[] = [];
|
||||
|
||||
for (const resource of allSiteResources) {
|
||||
// Get clients associated with this specific resource
|
||||
const resourceClients = await db
|
||||
.select({
|
||||
clientId: clients.clientId,
|
||||
pubKey: clients.pubKey,
|
||||
subnet: clients.subnet
|
||||
})
|
||||
.from(clients)
|
||||
.innerJoin(
|
||||
clientSiteResourcesAssociationsCache,
|
||||
eq(
|
||||
clients.clientId,
|
||||
clientSiteResourcesAssociationsCache.clientId
|
||||
)
|
||||
)
|
||||
.where(
|
||||
eq(
|
||||
clientSiteResourcesAssociationsCache.siteResourceId,
|
||||
resource.siteResourceId
|
||||
)
|
||||
);
|
||||
|
||||
const resourceTargets = generateSubnetProxyTargets(
|
||||
resource,
|
||||
resourceClients
|
||||
);
|
||||
|
||||
targetsToSend.push(...resourceTargets);
|
||||
}
|
||||
|
||||
return {
|
||||
peers: validPeers,
|
||||
targets: targetsToSend
|
||||
};
|
||||
}
|
||||
|
||||
export async function buildTargetConfigurationForNewtClient(siteId: number) {
|
||||
// Get all enabled targets with their resource protocol information
|
||||
const allTargets = await db
|
||||
.select({
|
||||
resourceId: targets.resourceId,
|
||||
targetId: targets.targetId,
|
||||
ip: targets.ip,
|
||||
method: targets.method,
|
||||
port: targets.port,
|
||||
internalPort: targets.internalPort,
|
||||
enabled: targets.enabled,
|
||||
protocol: resources.protocol,
|
||||
hcEnabled: targetHealthCheck.hcEnabled,
|
||||
hcPath: targetHealthCheck.hcPath,
|
||||
hcScheme: targetHealthCheck.hcScheme,
|
||||
hcMode: targetHealthCheck.hcMode,
|
||||
hcHostname: targetHealthCheck.hcHostname,
|
||||
hcPort: targetHealthCheck.hcPort,
|
||||
hcInterval: targetHealthCheck.hcInterval,
|
||||
hcUnhealthyInterval: targetHealthCheck.hcUnhealthyInterval,
|
||||
hcTimeout: targetHealthCheck.hcTimeout,
|
||||
hcHeaders: targetHealthCheck.hcHeaders,
|
||||
hcMethod: targetHealthCheck.hcMethod,
|
||||
hcTlsServerName: targetHealthCheck.hcTlsServerName
|
||||
})
|
||||
.from(targets)
|
||||
.innerJoin(resources, eq(targets.resourceId, resources.resourceId))
|
||||
.leftJoin(
|
||||
targetHealthCheck,
|
||||
eq(targets.targetId, targetHealthCheck.targetId)
|
||||
)
|
||||
.where(and(eq(targets.siteId, siteId), eq(targets.enabled, true)));
|
||||
|
||||
const { tcpTargets, udpTargets } = allTargets.reduce(
|
||||
(acc, target) => {
|
||||
// Filter out invalid targets
|
||||
if (!target.internalPort || !target.ip || !target.port) {
|
||||
return acc;
|
||||
}
|
||||
|
||||
// Format target into string
|
||||
const formattedTarget = `${target.internalPort}:${target.ip}:${target.port}`;
|
||||
|
||||
// Add to the appropriate protocol array
|
||||
if (target.protocol === "tcp") {
|
||||
acc.tcpTargets.push(formattedTarget);
|
||||
} else {
|
||||
acc.udpTargets.push(formattedTarget);
|
||||
}
|
||||
|
||||
return acc;
|
||||
},
|
||||
{ tcpTargets: [] as string[], udpTargets: [] as string[] }
|
||||
);
|
||||
|
||||
const healthCheckTargets = allTargets.map((target) => {
|
||||
// make sure the stuff is defined
|
||||
if (
|
||||
!target.hcPath ||
|
||||
!target.hcHostname ||
|
||||
!target.hcPort ||
|
||||
!target.hcInterval ||
|
||||
!target.hcMethod
|
||||
) {
|
||||
logger.debug(
|
||||
`Skipping target ${target.targetId} due to missing health check fields`
|
||||
);
|
||||
return null; // Skip targets with missing health check fields
|
||||
}
|
||||
|
||||
// parse headers
|
||||
const hcHeadersParse = target.hcHeaders
|
||||
? JSON.parse(target.hcHeaders)
|
||||
: null;
|
||||
const hcHeadersSend: { [key: string]: string } = {};
|
||||
if (hcHeadersParse) {
|
||||
hcHeadersParse.forEach(
|
||||
(header: { name: string; value: string }) => {
|
||||
hcHeadersSend[header.name] = header.value;
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
return {
|
||||
id: target.targetId,
|
||||
hcEnabled: target.hcEnabled,
|
||||
hcPath: target.hcPath,
|
||||
hcScheme: target.hcScheme,
|
||||
hcMode: target.hcMode,
|
||||
hcHostname: target.hcHostname,
|
||||
hcPort: target.hcPort,
|
||||
hcInterval: target.hcInterval, // in seconds
|
||||
hcUnhealthyInterval: target.hcUnhealthyInterval, // in seconds
|
||||
hcTimeout: target.hcTimeout, // in seconds
|
||||
hcHeaders: hcHeadersSend,
|
||||
hcMethod: target.hcMethod,
|
||||
hcTlsServerName: target.hcTlsServerName
|
||||
};
|
||||
});
|
||||
|
||||
// Filter out any null values from health check targets
|
||||
const validHealthCheckTargets = healthCheckTargets.filter(
|
||||
(target) => target !== null
|
||||
);
|
||||
|
||||
return {
|
||||
validHealthCheckTargets,
|
||||
tcpTargets,
|
||||
udpTargets
|
||||
};
|
||||
}
|
||||
@@ -156,163 +156,3 @@ export const handleGetConfigMessage: MessageHandler = async (context) => {
|
||||
excludeSender: false,
|
||||
};
|
||||
};
|
||||
|
||||
export async function buildClientConfigurationForNewtClient(
|
||||
site: Site,
|
||||
exitNode?: ExitNode
|
||||
) {
|
||||
const siteId = site.siteId;
|
||||
|
||||
// Get all clients connected to this site
|
||||
const clientsRes = await db
|
||||
.select()
|
||||
.from(clients)
|
||||
.innerJoin(
|
||||
clientSitesAssociationsCache,
|
||||
eq(clients.clientId, clientSitesAssociationsCache.clientId)
|
||||
)
|
||||
.where(eq(clientSitesAssociationsCache.siteId, siteId));
|
||||
|
||||
let peers: Array<{
|
||||
publicKey: string;
|
||||
allowedIps: string[];
|
||||
endpoint?: string;
|
||||
}> = [];
|
||||
|
||||
if (site.publicKey && site.endpoint && exitNode) {
|
||||
// Prepare peers data for the response
|
||||
peers = await Promise.all(
|
||||
clientsRes
|
||||
.filter((client) => {
|
||||
if (!client.clients.pubKey) {
|
||||
logger.warn(
|
||||
`Client ${client.clients.clientId} has no public key, skipping`
|
||||
);
|
||||
return false;
|
||||
}
|
||||
if (!client.clients.subnet) {
|
||||
logger.warn(
|
||||
`Client ${client.clients.clientId} has no subnet, skipping`
|
||||
);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
})
|
||||
.map(async (client) => {
|
||||
// Add or update this peer on the olm if it is connected
|
||||
|
||||
// const allSiteResources = await db // only get the site resources that this client has access to
|
||||
// .select()
|
||||
// .from(siteResources)
|
||||
// .innerJoin(
|
||||
// clientSiteResourcesAssociationsCache,
|
||||
// eq(
|
||||
// siteResources.siteResourceId,
|
||||
// clientSiteResourcesAssociationsCache.siteResourceId
|
||||
// )
|
||||
// )
|
||||
// .where(
|
||||
// and(
|
||||
// eq(siteResources.siteId, site.siteId),
|
||||
// eq(
|
||||
// clientSiteResourcesAssociationsCache.clientId,
|
||||
// client.clients.clientId
|
||||
// )
|
||||
// )
|
||||
// );
|
||||
|
||||
// update the peer info on the olm
|
||||
// if the peer has not been added yet this will be a no-op
|
||||
await updatePeer(client.clients.clientId, {
|
||||
siteId: site.siteId,
|
||||
endpoint: site.endpoint!,
|
||||
relayEndpoint: `${exitNode.endpoint}:${config.getRawConfig().gerbil.clients_start_port}`,
|
||||
publicKey: site.publicKey!,
|
||||
serverIP: site.address,
|
||||
serverPort: site.listenPort
|
||||
// remoteSubnets: generateRemoteSubnets(
|
||||
// allSiteResources.map(
|
||||
// ({ siteResources }) => siteResources
|
||||
// )
|
||||
// ),
|
||||
// aliases: generateAliasConfig(
|
||||
// allSiteResources.map(
|
||||
// ({ siteResources }) => siteResources
|
||||
// )
|
||||
// )
|
||||
});
|
||||
|
||||
// also trigger the peer add handshake in case the peer was not already added to the olm and we need to hole punch
|
||||
// if it has already been added this will be a no-op
|
||||
await initPeerAddHandshake(
|
||||
// this will kick off the add peer process for the client
|
||||
client.clients.clientId,
|
||||
{
|
||||
siteId,
|
||||
exitNode: {
|
||||
publicKey: exitNode.publicKey,
|
||||
endpoint: exitNode.endpoint
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
return {
|
||||
publicKey: client.clients.pubKey!,
|
||||
allowedIps: [
|
||||
`${client.clients.subnet.split("/")[0]}/32`
|
||||
], // we want to only allow from that client
|
||||
endpoint: client.clientSitesAssociationsCache.isRelayed
|
||||
? ""
|
||||
: client.clientSitesAssociationsCache.endpoint! // if its relayed it should be localhost
|
||||
};
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
// Filter out any null values from peers that didn't have an olm
|
||||
const validPeers = peers.filter((peer) => peer !== null);
|
||||
|
||||
// Get all enabled site resources for this site
|
||||
const allSiteResources = await db
|
||||
.select()
|
||||
.from(siteResources)
|
||||
.where(eq(siteResources.siteId, siteId));
|
||||
|
||||
const targetsToSend: SubnetProxyTarget[] = [];
|
||||
|
||||
for (const resource of allSiteResources) {
|
||||
// Get clients associated with this specific resource
|
||||
const resourceClients = await db
|
||||
.select({
|
||||
clientId: clients.clientId,
|
||||
pubKey: clients.pubKey,
|
||||
subnet: clients.subnet
|
||||
})
|
||||
.from(clients)
|
||||
.innerJoin(
|
||||
clientSiteResourcesAssociationsCache,
|
||||
eq(
|
||||
clients.clientId,
|
||||
clientSiteResourcesAssociationsCache.clientId
|
||||
)
|
||||
)
|
||||
.where(
|
||||
eq(
|
||||
clientSiteResourcesAssociationsCache.siteResourceId,
|
||||
resource.siteResourceId
|
||||
)
|
||||
);
|
||||
|
||||
const resourceTargets = generateSubnetProxyTargets(
|
||||
resource,
|
||||
resourceClients
|
||||
);
|
||||
|
||||
targetsToSend.push(...resourceTargets);
|
||||
}
|
||||
|
||||
return {
|
||||
peers: validPeers,
|
||||
targets: targetsToSend
|
||||
};
|
||||
}
|
||||
|
||||
18
server/routers/newt/sync.ts
Normal file
18
server/routers/newt/sync.ts
Normal file
@@ -0,0 +1,18 @@
|
||||
import { Client, Olm } from "@server/db";
|
||||
import { buildSiteConfigurationForOlmClient } from "./buildSiteConfigurationForOlmClient";
|
||||
import { sendToClient } from "#dynamic/routers/ws";
|
||||
import logger from "@server/logger";
|
||||
|
||||
export async function sendOlmSyncMessage(olm: Olm, client: Client) {
|
||||
// NOTE: WE ARE HARDCODING THE RELAY PARAMETER TO FALSE HERE BUT IN THE REGISTER MESSAGE ITS DEFINED BY THE CLIENT
|
||||
const siteConfigurations = await buildSiteConfigurationForOlmClient(client, client.pubKey, false);
|
||||
|
||||
await sendToClient(olm.olmId, {
|
||||
type: "olm/sync",
|
||||
data: {
|
||||
sites: siteConfigurations
|
||||
}
|
||||
}).catch((error) => {
|
||||
logger.warn(`Error sending olm sync message:`, error);
|
||||
});
|
||||
}
|
||||
@@ -7,7 +7,7 @@ import config from "@server/lib/config";
|
||||
|
||||
export async function buildSiteConfigurationForOlmClient(
|
||||
client: Client,
|
||||
publicKey: string,
|
||||
publicKey: string | null,
|
||||
relay: boolean
|
||||
) {
|
||||
const siteConfigurations = [];
|
||||
@@ -74,7 +74,7 @@ export async function buildSiteConfigurationForOlmClient(
|
||||
.limit(1);
|
||||
|
||||
// Add the peer to the exit node for this site
|
||||
if (clientSite.endpoint) {
|
||||
if (clientSite.endpoint && publicKey) {
|
||||
logger.info(
|
||||
`Adding peer ${publicKey} to site ${site.siteId} with endpoint ${clientSite.endpoint}`
|
||||
);
|
||||
@@ -174,6 +174,7 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
|
||||
return;
|
||||
}
|
||||
|
||||
// NOTE: its important that the client here is the old client and the public key is the new key
|
||||
const siteConfigurations = await buildSiteConfigurationForOlmClient(client, publicKey, relay);
|
||||
|
||||
// REMOVED THIS SO IT CREATES THE INTERFACE AND JUST WAITS FOR THE SITES
|
||||
|
||||
@@ -1,7 +1,18 @@
|
||||
import { Client, Olm } from "@server/db";
|
||||
import { buildSiteConfigurationForOlmClient } from "./buildSiteConfigurationForOlmClient";
|
||||
import { sendToClient } from "#dynamic/routers/ws";
|
||||
import logger from "@server/logger";
|
||||
|
||||
export async function sendOlmSyncMessage(olm: Olm, client: Client) {
|
||||
const siteConfigurations = await buildSiteConfigurationForOlmClient(client, publicKey, relay);
|
||||
// NOTE: WE ARE HARDCODING THE RELAY PARAMETER TO FALSE HERE BUT IN THE REGISTER MESSAGE ITS DEFINED BY THE CLIENT
|
||||
const siteConfigurations = await buildSiteConfigurationForOlmClient(client, client.pubKey, false);
|
||||
|
||||
await sendToClient(olm.olmId, {
|
||||
type: "olm/sync",
|
||||
data: {
|
||||
sites: siteConfigurations
|
||||
}
|
||||
}).catch((error) => {
|
||||
logger.warn(`Error sending olm sync message:`, error);
|
||||
});
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user