Merge branch 'dev' into hybrid

This commit is contained in:
Owen
2025-08-12 15:02:43 -07:00
8 changed files with 218 additions and 210 deletions

View File

@@ -516,7 +516,7 @@ export const clients = pgTable("clients", {
lastPing: varchar("lastPing"),
type: varchar("type").notNull(), // "olm"
online: boolean("online").notNull().default(false),
endpoint: varchar("endpoint"),
// endpoint: varchar("endpoint"),
lastHolePunch: integer("lastHolePunch"),
maxConnections: integer("maxConnections")
});
@@ -528,7 +528,8 @@ export const clientSites = pgTable("clientSites", {
siteId: integer("siteId")
.notNull()
.references(() => sites.siteId, { onDelete: "cascade" }),
isRelayed: boolean("isRelayed").notNull().default(false)
isRelayed: boolean("isRelayed").notNull().default(false),
endpoint: varchar("endpoint")
});
export const olms = pgTable("olms", {

View File

@@ -216,7 +216,7 @@ export const clients = sqliteTable("clients", {
lastPing: text("lastPing"),
type: text("type").notNull(), // "olm"
online: integer("online", { mode: "boolean" }).notNull().default(false),
endpoint: text("endpoint"),
// endpoint: text("endpoint"),
lastHolePunch: integer("lastHolePunch")
});
@@ -227,7 +227,8 @@ export const clientSites = sqliteTable("clientSites", {
siteId: integer("siteId")
.notNull()
.references(() => sites.siteId, { onDelete: "cascade" }),
isRelayed: integer("isRelayed", { mode: "boolean" }).notNull().default(false)
isRelayed: integer("isRelayed", { mode: "boolean" }).notNull().default(false),
endpoint: text("endpoint")
});
export const olms = sqliteTable("olms", {

View File

@@ -129,7 +129,7 @@ export async function updateClient(
`Adding ${sitesAdded.length} new sites to client ${client.clientId}`
);
for (const siteId of sitesAdded) {
if (!client.subnet || !client.pubKey || !client.endpoint) {
if (!client.subnet || !client.pubKey) {
logger.debug(
"Client subnet, pubKey or endpoint is not set"
);
@@ -140,10 +140,25 @@ export async function updateClient(
// BUT REALLY WE NEED TO TRACK THE USERS PREFERENCE THAT THEY CHOSE IN THE CLIENTS
const isRelayed = true;
// get the clientsite
const [clientSite] = await db
.select()
.from(clientSites)
.where(and(
eq(clientSites.clientId, client.clientId),
eq(clientSites.siteId, siteId)
))
.limit(1);
if (!clientSite || !clientSite.endpoint) {
logger.debug("Client site is missing or has no endpoint");
continue;
}
const site = await newtAddPeer(siteId, {
publicKey: client.pubKey,
allowedIps: [`${client.subnet.split("/")[0]}/32`], // we want to only allow from that client
endpoint: isRelayed ? "" : client.endpoint
endpoint: isRelayed ? "" : clientSite.endpoint
});
if (!site) {
@@ -255,7 +270,6 @@ export async function updateClient(
}
}
if (client.endpoint) {
// get all sites for this client and join with exit nodes with site.exitNodeId
const sitesData = await db
.select()
@@ -272,6 +286,8 @@ export async function updateClient(
let exitNodeDestinations: {
reachableAt: string;
sourceIp: string;
sourcePort: number;
destinations: PeerDestination[];
}[] = [];
@@ -282,6 +298,14 @@ export async function updateClient(
);
continue;
}
if (!site.clientSites.endpoint) {
logger.warn(
`Site ${site.sites.siteId} has no endpoint, skipping`
);
continue;
}
// find the destinations in the array
let destinations = exitNodeDestinations.find(
(d) => d.reachableAt === site.exitNodes?.reachableAt
@@ -290,6 +314,8 @@ export async function updateClient(
if (!destinations) {
destinations = {
reachableAt: site.exitNodes?.reachableAt || "",
sourceIp: site.clientSites.endpoint.split(":")[0] || "",
sourcePort: parseInt(site.clientSites.endpoint.split(":")[1]) || 0,
destinations: [
{
destinationIP:
@@ -319,8 +345,8 @@ export async function updateClient(
`Updating destinations for exit node at ${destination.reachableAt}`
);
const payload = {
sourceIp: client.endpoint?.split(":")[0] || "",
sourcePort: parseInt(client.endpoint?.split(":")[1]) || 0,
sourceIp: destination.sourceIp,
sourcePort: destination.sourcePort,
destinations: destination.destinations
};
logger.info(
@@ -351,7 +377,6 @@ export async function updateClient(
}
}
}
}
// Fetch the updated client
const [updatedClient] = await trx

View File

@@ -78,19 +78,13 @@ export async function getAllRelays(
.where(eq(clientSites.siteId, site.siteId));
for (const clientSite of clientSitesRes) {
// Get client information
const [client] = await db
.select()
.from(clients)
.where(eq(clients.clientId, clientSite.clientId));
if (!client || !client.endpoint) {
if (!clientSite.endpoint) {
continue;
}
// Add this site as a destination for the client
if (!mappings[client.endpoint]) {
mappings[client.endpoint] = { destinations: [] };
if (!mappings[clientSite.endpoint]) {
mappings[clientSite.endpoint] = { destinations: [] };
}
// Add site as a destination for this client
@@ -100,13 +94,13 @@ export async function getAllRelays(
};
// Check if this destination is already in the array to avoid duplicates
const isDuplicate = mappings[client.endpoint].destinations.some(
const isDuplicate = mappings[clientSite.endpoint].destinations.some(
dest => dest.destinationIP === destination.destinationIP &&
dest.destinationPort === destination.destinationPort
);
if (!isDuplicate) {
mappings[client.endpoint].destinations.push(destination);
mappings[clientSite.endpoint].destinations.push(destination);
}
}

View File

@@ -31,7 +31,7 @@ export const receiveBandwidth = async (
const currentTime = new Date();
const oneMinuteAgo = new Date(currentTime.getTime() - 60000); // 1 minute ago
logger.debug(`Received data: ${JSON.stringify(bandwidthData)}`);
// logger.debug(`Received data: ${JSON.stringify(bandwidthData)}`);
await db.transaction(async (trx) => {
// First, handle sites that are actively reporting bandwidth

View File

@@ -1,8 +1,17 @@
import { Request, Response, NextFunction } from "express";
import { z } from "zod";
import { clients, newts, olms, Site, sites, clientSites, exitNodes } from "@server/db";
import {
clients,
newts,
olms,
Site,
sites,
clientSites,
exitNodes,
ExitNode
} from "@server/db";
import { db } from "@server/db";
import { eq } from "drizzle-orm";
import { eq, and } from "drizzle-orm";
import HttpCode from "@server/types/HttpCode";
import createHttpError from "http-errors";
import logger from "@server/logger";
@@ -19,7 +28,8 @@ const updateHolePunchSchema = z.object({
ip: z.string(),
port: z.number(),
timestamp: z.number(),
reachableAt: z.string().optional()
reachableAt: z.string().optional(),
publicKey: z.string().optional()
});
// New response type with multi-peer destination support
@@ -45,13 +55,24 @@ export async function updateHolePunch(
);
}
const { olmId, newtId, ip, port, timestamp, token, reachableAt } = parsedParams.data;
const {
olmId,
newtId,
ip,
port,
timestamp,
token,
reachableAt,
publicKey
} = parsedParams.data;
let currentSiteId: number | undefined;
let destinations: PeerDestination[] = [];
if (olmId) {
logger.debug(`Got hole punch with ip: ${ip}, port: ${port} for olmId: ${olmId}`);
logger.debug(
`Got hole punch with ip: ${ip}, port: ${port} for olmId: ${olmId}${publicKey ? ` with exit node publicKey: ${publicKey}` : ""}`
);
const { session, olm: olmSession } =
await validateOlmSessionToken(token);
@@ -62,7 +83,9 @@ export async function updateHolePunch(
}
if (olmId !== olmSession.olmId) {
logger.warn(`Olm ID mismatch: ${olmId} !== ${olmSession.olmId}`);
logger.warn(
`Olm ID mismatch: ${olmId} !== ${olmSession.olmId}`
);
return next(
createHttpError(HttpCode.UNAUTHORIZED, "Unauthorized")
);
@@ -83,12 +106,64 @@ export async function updateHolePunch(
const [client] = await db
.update(clients)
.set({
endpoint: `${ip}:${port}`,
lastHolePunch: timestamp
})
.where(eq(clients.clientId, olm.clientId))
.returning();
let exitNode: ExitNode | undefined;
if (publicKey) {
// Get the exit node by public key
[exitNode] = await db
.select()
.from(exitNodes)
.where(eq(exitNodes.publicKey, publicKey));
} else {
// FOR BACKWARDS COMPATIBILITY IF GERBIL IS STILL =<1.1.0
[exitNode] = await db.select().from(exitNodes).limit(1);
}
if (!exitNode) {
logger.warn(`Exit node not found for publicKey: ${publicKey}`);
return next(
createHttpError(HttpCode.NOT_FOUND, "Exit node not found")
);
}
// Get sites that are on this specific exit node and connected to this client
const sitesOnExitNode = await db
.select({ siteId: sites.siteId, subnet: sites.subnet, listenPort: sites.listenPort })
.from(sites)
.innerJoin(clientSites, eq(sites.siteId, clientSites.siteId))
.where(
and(
eq(sites.exitNodeId, exitNode.exitNodeId),
eq(clientSites.clientId, olm.clientId)
)
);
// Update clientSites for each site on this exit node
for (const site of sitesOnExitNode) {
logger.debug(
`Updating site ${site.siteId} on exit node with publicKey: ${publicKey}`
);
await db
.update(clientSites)
.set({
endpoint: `${ip}:${port}`
})
.where(
and(
eq(clientSites.clientId, olm.clientId),
eq(clientSites.siteId, site.siteId)
)
);
}
logger.debug(
`Updated ${sitesOnExitNode.length} sites on exit node with publicKey: ${publicKey}`
);
if (!client) {
logger.warn(`Client not found for olm: ${olmId}`);
return next(
@@ -96,131 +171,20 @@ export async function updateHolePunch(
);
}
// // Get all sites that this client is connected to
// const clientSitePairs = await db
// .select()
// .from(clientSites)
// .where(eq(clientSites.clientId, client.clientId));
// if (clientSitePairs.length === 0) {
// logger.warn(`No sites found for client: ${client.clientId}`);
// return next(
// createHttpError(HttpCode.NOT_FOUND, "No sites found for client")
// );
// }
// // Get all sites details
// const siteIds = clientSitePairs.map(pair => pair.siteId);
// for (const siteId of siteIds) {
// const [site] = await db
// .select()
// .from(sites)
// .where(eq(sites.siteId, siteId));
// if (site && site.subnet && site.listenPort) {
// destinations.push({
// destinationIP: site.subnet.split("/")[0],
// destinationPort: site.listenPort
// });
// }
// }
// get all sites for this client and join with exit nodes with site.exitNodeId
const sitesData = await db
.select()
.from(sites)
.innerJoin(clientSites, eq(sites.siteId, clientSites.siteId))
.leftJoin(exitNodes, eq(sites.exitNodeId, exitNodes.exitNodeId))
.where(eq(clientSites.clientId, client.clientId));
let exitNodeDestinations: {
reachableAt: string;
destinations: PeerDestination[];
}[] = [];
for (const site of sitesData) {
if (!site.sites.subnet) {
logger.warn(`Site ${site.sites.siteId} has no subnet, skipping`);
continue;
}
// find the destinations in the array
let destinations = exitNodeDestinations.find(
(d) => d.reachableAt === site.exitNodes?.reachableAt
);
if (!destinations) {
destinations = {
reachableAt: site.exitNodes?.reachableAt || "",
destinations: [
{
destinationIP: site.sites.subnet.split("/")[0],
destinationPort: site.sites.listenPort || 0
}
]
};
} else {
// add to the existing destinations
destinations.destinations.push({
destinationIP: site.sites.subnet.split("/")[0],
destinationPort: site.sites.listenPort || 0
// Create a list of the destinations from the sites
for (const site of sitesOnExitNode) {
if (site.subnet && site.listenPort) {
destinations.push({
destinationIP: site.subnet.split("/")[0],
destinationPort: site.listenPort
});
}
// update it in the array
exitNodeDestinations = exitNodeDestinations.filter(
(d) => d.reachableAt !== site.exitNodes?.reachableAt
);
exitNodeDestinations.push(destinations);
}
logger.debug(JSON.stringify(exitNodeDestinations, null, 2));
for (const destination of exitNodeDestinations) {
// if its the current exit node skip it because it is replying with the same data
if (reachableAt && destination.reachableAt == reachableAt) {
logger.debug(`Skipping update for reachableAt: ${reachableAt}`);
continue;
}
try {
const response = await axios.post(
`${destination.reachableAt}/update-destinations`,
{
sourceIp: client.endpoint?.split(":")[0] || "",
sourcePort: parseInt(client.endpoint?.split(":")[1] || "0"),
destinations: destination.destinations
},
{
headers: {
"Content-Type": "application/json"
}
}
);
logger.info("Destinations updated:", {
peer: response.data.status
});
} catch (error) {
if (axios.isAxiosError(error)) {
logger.error(
`Error updating destinations (can Pangolin see Gerbil HTTP API?) for exit node at ${destination.reachableAt} (status: ${error.response?.status}): ${JSON.stringify(error.response?.data, null, 2)}`
);
} else {
logger.error(
`Error updating destinations for exit node at ${destination.reachableAt}: ${error}`
);
}
}
}
// Send the desinations back to the origin
destinations = exitNodeDestinations.find(
(d) => d.reachableAt === reachableAt
)?.destinations || [];
} else if (newtId) {
logger.debug(`Got hole punch with ip: ${ip}, port: ${port} for newtId: ${newtId}`);
logger.debug(
`Got hole punch with ip: ${ip}, port: ${port} for newtId: ${newtId}`
);
const { session, newt: newtSession } =
await validateNewtSessionToken(token);
@@ -232,7 +196,9 @@ export async function updateHolePunch(
}
if (newtId !== newtSession.newtId) {
logger.warn(`Newt ID mismatch: ${newtId} !== ${newtSession.newtId}`);
logger.warn(
`Newt ID mismatch: ${newtId} !== ${newtSession.newtId}`
);
return next(
createHttpError(HttpCode.UNAUTHORIZED, "Unauthorized")
);
@@ -261,7 +227,7 @@ export async function updateHolePunch(
})
.where(eq(sites.siteId, newt.siteId))
.returning();
if (!updatedSite || !updatedSite.subnet) {
logger.warn(`Site not found: ${newt.siteId}`);
return next(
@@ -274,7 +240,7 @@ export async function updateHolePunch(
// .select()
// .from(clientSites)
// .where(eq(clientSites.siteId, newt.siteId));
// THE NEWT IS NOT SENDING RAW WG TO THE GERBIL SO IDK IF WE REALLY NEED THIS - REMOVING
// Get client details for each client
// for (const pair of sitesClientPairs) {
@@ -282,7 +248,7 @@ export async function updateHolePunch(
// .select()
// .from(clients)
// .where(eq(clients.clientId, pair.clientId));
// if (client && client.endpoint) {
// const [host, portStr] = client.endpoint.split(':');
// if (host && portStr) {
@@ -293,27 +259,27 @@ export async function updateHolePunch(
// }
// }
// }
// If this is a newt/site, also add other sites in the same org
// if (updatedSite.orgId) {
// const orgSites = await db
// .select()
// .from(sites)
// .where(eq(sites.orgId, updatedSite.orgId));
// for (const site of orgSites) {
// // Don't add the current site to the destinations
// if (site.siteId !== currentSiteId && site.subnet && site.endpoint && site.listenPort) {
// const [host, portStr] = site.endpoint.split(':');
// if (host && portStr) {
// destinations.push({
// destinationIP: host,
// destinationPort: site.listenPort
// });
// }
// }
// }
// }
// if (updatedSite.orgId) {
// const orgSites = await db
// .select()
// .from(sites)
// .where(eq(sites.orgId, updatedSite.orgId));
// for (const site of orgSites) {
// // Don't add the current site to the destinations
// if (site.siteId !== currentSiteId && site.subnet && site.endpoint && site.listenPort) {
// const [host, portStr] = site.endpoint.split(':');
// if (host && portStr) {
// destinations.push({
// destinationIP: host,
// destinationPort: site.listenPort
// });
// }
// }
// }
// }
}
// if (destinations.length === 0) {
@@ -323,6 +289,10 @@ export async function updateHolePunch(
// return next(createHttpError(HttpCode.NOT_FOUND, "No peer destinations found"));
// }
logger.debug(
`Returning ${destinations.length} peer destinations for olmId: ${olmId} or newtId: ${newtId}: ${JSON.stringify(destinations, null, 2)}`
);
// Return the new multi-peer structure
return res.status(HttpCode.OK).send({
destinations: destinations
@@ -336,4 +306,4 @@ export async function updateHolePunch(
)
);
}
}
}

View File

@@ -102,7 +102,7 @@ export const handleGetConfigMessage: MessageHandler = async (context) => {
.from(exitNodes)
.where(eq(exitNodes.exitNodeId, site.exitNodeId))
.limit(1);
if (exitNode.reachableAt) {
if (exitNode.reachableAt && existingSite.subnet && existingSite.listenPort) {
try {
const response = await axios.post(
`${exitNode.reachableAt}/update-proxy-mapping`,
@@ -157,9 +157,6 @@ export const handleGetConfigMessage: MessageHandler = async (context) => {
if (!client.clients.subnet) {
return false;
}
if (!client.clients.endpoint) {
return false;
}
return true;
})
.map(async (client) => {
@@ -215,7 +212,7 @@ export const handleGetConfigMessage: MessageHandler = async (context) => {
allowedIps: [`${client.clients.subnet.split("/")[0]}/32`], // we want to only allow from that client
endpoint: client.clientSites.isRelayed
? ""
: client.clients.endpoint! // if its relayed it should be localhost
: client.clientSites.endpoint! // if its relayed it should be localhost
};
})
);

View File

@@ -1,14 +1,7 @@
import { db, ExitNode } from "@server/db";
import { MessageHandler } from "../ws";
import {
clients,
clientSites,
exitNodes,
Olm,
olms,
sites
} from "@server/db";
import { eq, inArray } from "drizzle-orm";
import { clients, clientSites, exitNodes, Olm, olms, sites } from "@server/db";
import { and, eq, inArray } from "drizzle-orm";
import { addPeer, deletePeer } from "../newt/peers";
import logger from "@server/logger";
@@ -30,7 +23,9 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
const clientId = olm.clientId;
const { publicKey, relay } = message.data;
logger.debug(`Olm client ID: ${clientId}, Public Key: ${publicKey}, Relay: ${relay}`);
logger.debug(
`Olm client ID: ${clientId}, Public Key: ${publicKey}, Relay: ${relay}`
);
if (!publicKey) {
logger.warn("Public key not provided");
@@ -50,22 +45,34 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
}
if (client.exitNodeId) {
// Get the exit node for this site
const [exitNode] = await db
.select()
.from(exitNodes)
.where(eq(exitNodes.exitNodeId, client.exitNodeId))
.limit(1);
// TODO: FOR NOW WE ARE JUST HOLEPUNCHING ALL EXIT NODES BUT IN THE FUTURE WE SHOULD HANDLE THIS BETTER
// Send holepunch message for each site
sendToClient(olm.olmId, {
type: "olm/wg/holepunch",
// Get the exit node
const allExitNodes = await db.select().from(exitNodes);
const exitNodesHpData = allExitNodes.map((exitNode: ExitNode) => {
return {
publicKey: exitNode.publicKey,
endpoint: exitNode.endpoint
};
});
// Send holepunch message
await sendToClient(olm.olmId, {
type: "olm/wg/holepunch/all",
data: {
serverPubKey: exitNode.publicKey,
endpoint: exitNode.endpoint,
exitNodes: exitNodesHpData
}
});
// THIS IS FOR BACKWARDS COMPATIBILITY
await sendToClient(olm.olmId, {
type: "olm/wg/holepunch/all",
data: {
serverPubKey: allExitNodes[0].publicKey,
endpoint: allExitNodes[0].endpoint
}
});
}
if (now - (client.lastHolePunch || 0) > 6) {
@@ -102,8 +109,10 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
.where(eq(clientSites.clientId, client.clientId));
// Prepare an array to store site configurations
const siteConfigurations = [];
logger.debug(`Found ${sitesData.length} sites for client ${client.clientId}`);
let siteConfigurations = [];
logger.debug(
`Found ${sitesData.length} sites for client ${client.clientId}`
);
if (sitesData.length === 0) {
sendToClient(olm.olmId, {
@@ -147,15 +156,26 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
continue;
}
const [clientSite] = await db
.select()
.from(clientSites)
.where(
and(
eq(clientSites.clientId, client.clientId),
eq(clientSites.siteId, site.siteId)
)
)
.limit(1);
// Add the peer to the exit node for this site
if (client.endpoint) {
if (clientSite.endpoint) {
logger.info(
`Adding peer ${publicKey} to site ${site.siteId} with endpoint ${client.endpoint}`
`Adding peer ${publicKey} to site ${site.siteId} with endpoint ${clientSite.endpoint}`
);
await addPeer(site.siteId, {
publicKey: publicKey,
allowedIps: [`${client.subnet.split('/')[0]}/32`], // we want to only allow from that client
endpoint: relay ? "" : client.endpoint
allowedIps: [`${client.subnet.split("/")[0]}/32`], // we want to only allow from that client
endpoint: relay ? "" : clientSite.endpoint
});
} else {
logger.warn(
@@ -188,7 +208,7 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
});
}
// REMOVED THIS SO IT CREATES THE INTERFACE AND JUST WAITS FOR THE SITES
// REMOVED THIS SO IT CREATES THE INTERFACE AND JUST WAITS FOR THE SITES
// if (siteConfigurations.length === 0) {
// logger.warn("No valid site configurations found");
// return;