mirror of
https://github.com/fosrl/pangolin.git
synced 2026-01-31 23:29:08 +00:00
Fix adding sites to client
This commit is contained in:
@@ -1,6 +1,6 @@
|
||||
import { Request, Response, NextFunction } from "express";
|
||||
import { z } from "zod";
|
||||
import { db } from "@server/db";
|
||||
import { db, exitNodes, sites } from "@server/db";
|
||||
import { clients, clientSites } from "@server/db";
|
||||
import response from "@server/lib/response";
|
||||
import HttpCode from "@server/types/HttpCode";
|
||||
@@ -17,6 +17,7 @@ import {
|
||||
addPeer as olmAddPeer,
|
||||
deletePeer as olmDeletePeer
|
||||
} from "../olm/peers";
|
||||
import axios from "axios";
|
||||
|
||||
const updateClientParamsSchema = z
|
||||
.object({
|
||||
@@ -53,6 +54,11 @@ registry.registerPath({
|
||||
responses: {}
|
||||
});
|
||||
|
||||
interface PeerDestination {
|
||||
destinationIP: string;
|
||||
destinationPort: number;
|
||||
}
|
||||
|
||||
export async function updateClient(
|
||||
req: Request,
|
||||
res: Response,
|
||||
@@ -124,15 +130,22 @@ export async function updateClient(
|
||||
);
|
||||
for (const siteId of sitesAdded) {
|
||||
if (!client.subnet || !client.pubKey || !client.endpoint) {
|
||||
logger.debug("Client subnet, pubKey or endpoint is not set");
|
||||
logger.debug(
|
||||
"Client subnet, pubKey or endpoint is not set"
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
// TODO: WE NEED TO HANDLE THIS BETTER. RIGHT NOW WE ARE JUST GUESSING BASED ON THE OTHER SITES
|
||||
// BUT REALLY WE NEED TO TRACK THE USERS PREFERENCE THAT THEY CHOSE IN THE CLIENTS
|
||||
const isRelayed = true;
|
||||
|
||||
const site = await newtAddPeer(siteId, {
|
||||
publicKey: client.pubKey,
|
||||
allowedIps: [`${client.subnet.split("/")[0]}/32`], // we want to only allow from that client
|
||||
endpoint: client.endpoint
|
||||
endpoint: isRelayed ? "" : client.endpoint
|
||||
});
|
||||
|
||||
if (!site) {
|
||||
logger.debug("Failed to add peer to newt - missing site");
|
||||
continue;
|
||||
@@ -142,9 +155,45 @@ export async function updateClient(
|
||||
logger.debug("Site endpoint or publicKey is not set");
|
||||
continue;
|
||||
}
|
||||
|
||||
let endpoint;
|
||||
|
||||
if (isRelayed) {
|
||||
if (!site.exitNodeId) {
|
||||
logger.warn(
|
||||
`Site ${site.siteId} has no exit node, skipping`
|
||||
);
|
||||
return null;
|
||||
}
|
||||
|
||||
// get the exit node for the site
|
||||
const [exitNode] = await db
|
||||
.select()
|
||||
.from(exitNodes)
|
||||
.where(eq(exitNodes.exitNodeId, site.exitNodeId))
|
||||
.limit(1);
|
||||
|
||||
if (!exitNode) {
|
||||
logger.warn(
|
||||
`Exit node not found for site ${site.siteId}`
|
||||
);
|
||||
return null;
|
||||
}
|
||||
|
||||
endpoint = `${exitNode.endpoint}:21820`;
|
||||
} else {
|
||||
if (!endpoint) {
|
||||
logger.warn(
|
||||
`Site ${site.siteId} has no endpoint, skipping`
|
||||
);
|
||||
return null;
|
||||
}
|
||||
endpoint = site.endpoint;
|
||||
}
|
||||
|
||||
await olmAddPeer(client.clientId, {
|
||||
siteId: siteId,
|
||||
endpoint: site.endpoint,
|
||||
siteId: site.siteId,
|
||||
endpoint: endpoint,
|
||||
publicKey: site.publicKey,
|
||||
serverIP: site.address,
|
||||
serverPort: site.listenPort,
|
||||
@@ -171,7 +220,11 @@ export async function updateClient(
|
||||
logger.debug("Site endpoint or publicKey is not set");
|
||||
continue;
|
||||
}
|
||||
await olmDeletePeer(client.clientId, site.siteId, site.publicKey);
|
||||
await olmDeletePeer(
|
||||
client.clientId,
|
||||
site.siteId,
|
||||
site.publicKey
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -202,6 +255,101 @@ export async function updateClient(
|
||||
}
|
||||
}
|
||||
|
||||
if (client.endpoint) {
|
||||
// get all sites for this client and join with exit nodes with site.exitNodeId
|
||||
const sitesData = await db
|
||||
.select()
|
||||
.from(sites)
|
||||
.innerJoin(
|
||||
clientSites,
|
||||
eq(sites.siteId, clientSites.siteId)
|
||||
)
|
||||
.leftJoin(
|
||||
exitNodes,
|
||||
eq(sites.exitNodeId, exitNodes.exitNodeId)
|
||||
)
|
||||
.where(eq(clientSites.clientId, client.clientId));
|
||||
|
||||
let exitNodeDestinations: {
|
||||
reachableAt: string;
|
||||
destinations: PeerDestination[];
|
||||
}[] = [];
|
||||
|
||||
for (const site of sitesData) {
|
||||
if (!site.sites.subnet) {
|
||||
logger.warn(
|
||||
`Site ${site.sites.siteId} has no subnet, skipping`
|
||||
);
|
||||
continue;
|
||||
}
|
||||
// find the destinations in the array
|
||||
let destinations = exitNodeDestinations.find(
|
||||
(d) => d.reachableAt === site.exitNodes?.reachableAt
|
||||
);
|
||||
|
||||
if (!destinations) {
|
||||
destinations = {
|
||||
reachableAt: site.exitNodes?.reachableAt || "",
|
||||
destinations: [
|
||||
{
|
||||
destinationIP:
|
||||
site.sites.subnet.split("/")[0],
|
||||
destinationPort: site.sites.listenPort || 0
|
||||
}
|
||||
]
|
||||
};
|
||||
} else {
|
||||
// add to the existing destinations
|
||||
destinations.destinations.push({
|
||||
destinationIP: site.sites.subnet.split("/")[0],
|
||||
destinationPort: site.sites.listenPort || 0
|
||||
});
|
||||
}
|
||||
|
||||
// update it in the array
|
||||
exitNodeDestinations = exitNodeDestinations.filter(
|
||||
(d) => d.reachableAt !== site.exitNodes?.reachableAt
|
||||
);
|
||||
exitNodeDestinations.push(destinations);
|
||||
}
|
||||
|
||||
for (const destination of exitNodeDestinations) {
|
||||
try {
|
||||
logger.info(
|
||||
`Updating destinations for exit node at ${destination.reachableAt}`
|
||||
);
|
||||
const payload = {
|
||||
sourceIp: client.endpoint?.split(":")[0] || "",
|
||||
sourcePort: parseInt(client.endpoint?.split(":")[1]) || 0,
|
||||
destinations: destination.destinations
|
||||
};
|
||||
logger.info(
|
||||
`Payload for update-destinations: ${JSON.stringify(payload, null, 2)}`
|
||||
);
|
||||
const response = await axios.post(
|
||||
`${destination.reachableAt}/update-destinations`,
|
||||
payload,
|
||||
{
|
||||
headers: {
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
logger.info("Destinations updated:", {
|
||||
peer: response.data.status
|
||||
});
|
||||
} catch (error) {
|
||||
if (axios.isAxiosError(error)) {
|
||||
throw new Error(
|
||||
`Error communicating with Gerbil. Make sure Pangolin can reach the Gerbil HTTP API: ${error.response?.status}`
|
||||
);
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch the updated client
|
||||
const [updatedClient] = await trx
|
||||
.select()
|
||||
|
||||
@@ -8,7 +8,7 @@ export async function addPeer(exitNodeId: number, peer: {
|
||||
publicKey: string;
|
||||
allowedIps: string[];
|
||||
}) {
|
||||
|
||||
logger.info(`Adding peer with public key ${peer.publicKey} to exit node ${exitNodeId}`);
|
||||
const [exitNode] = await db.select().from(exitNodes).where(eq(exitNodes.exitNodeId, exitNodeId)).limit(1);
|
||||
if (!exitNode) {
|
||||
throw new Error(`Exit node with ID ${exitNodeId} not found`);
|
||||
@@ -35,6 +35,7 @@ export async function addPeer(exitNodeId: number, peer: {
|
||||
}
|
||||
|
||||
export async function deletePeer(exitNodeId: number, publicKey: string) {
|
||||
logger.info(`Deleting peer with public key ${publicKey} from exit node ${exitNodeId}`);
|
||||
const [exitNode] = await db.select().from(exitNodes).where(eq(exitNodes.exitNodeId, exitNodeId)).limit(1);
|
||||
if (!exitNode) {
|
||||
throw new Error(`Exit node with ID ${exitNodeId} not found`);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { Request, Response, NextFunction } from "express";
|
||||
import { z } from "zod";
|
||||
import { clients, newts, olms, Site, sites, clientSites } from "@server/db";
|
||||
import { clients, newts, olms, Site, sites, clientSites, exitNodes } from "@server/db";
|
||||
import { db } from "@server/db";
|
||||
import { eq } from "drizzle-orm";
|
||||
import HttpCode from "@server/types/HttpCode";
|
||||
@@ -9,6 +9,7 @@ import logger from "@server/logger";
|
||||
import { fromError } from "zod-validation-error";
|
||||
import { validateNewtSessionToken } from "@server/auth/sessions/newt";
|
||||
import { validateOlmSessionToken } from "@server/auth/sessions/olm";
|
||||
import axios from "axios";
|
||||
|
||||
// Define Zod schema for request validation
|
||||
const updateHolePunchSchema = z.object({
|
||||
@@ -17,7 +18,8 @@ const updateHolePunchSchema = z.object({
|
||||
token: z.string(),
|
||||
ip: z.string(),
|
||||
port: z.number(),
|
||||
timestamp: z.number()
|
||||
timestamp: z.number(),
|
||||
reachableAt: z.string().optional()
|
||||
});
|
||||
|
||||
// New response type with multi-peer destination support
|
||||
@@ -43,7 +45,7 @@ export async function updateHolePunch(
|
||||
);
|
||||
}
|
||||
|
||||
const { olmId, newtId, ip, port, timestamp, token } = parsedParams.data;
|
||||
const { olmId, newtId, ip, port, timestamp, token, reachableAt } = parsedParams.data;
|
||||
|
||||
let currentSiteId: number | undefined;
|
||||
let destinations: PeerDestination[] = [];
|
||||
@@ -94,36 +96,126 @@ export async function updateHolePunch(
|
||||
);
|
||||
}
|
||||
|
||||
// Get all sites that this client is connected to
|
||||
const clientSitePairs = await db
|
||||
.select()
|
||||
.from(clientSites)
|
||||
.where(eq(clientSites.clientId, client.clientId));
|
||||
// // Get all sites that this client is connected to
|
||||
// const clientSitePairs = await db
|
||||
// .select()
|
||||
// .from(clientSites)
|
||||
// .where(eq(clientSites.clientId, client.clientId));
|
||||
|
||||
if (clientSitePairs.length === 0) {
|
||||
logger.warn(`No sites found for client: ${client.clientId}`);
|
||||
return next(
|
||||
createHttpError(HttpCode.NOT_FOUND, "No sites found for client")
|
||||
);
|
||||
}
|
||||
// if (clientSitePairs.length === 0) {
|
||||
// logger.warn(`No sites found for client: ${client.clientId}`);
|
||||
// return next(
|
||||
// createHttpError(HttpCode.NOT_FOUND, "No sites found for client")
|
||||
// );
|
||||
// }
|
||||
|
||||
// Get all sites details
|
||||
const siteIds = clientSitePairs.map(pair => pair.siteId);
|
||||
// // Get all sites details
|
||||
// const siteIds = clientSitePairs.map(pair => pair.siteId);
|
||||
|
||||
for (const siteId of siteIds) {
|
||||
const [site] = await db
|
||||
.select()
|
||||
.from(sites)
|
||||
.where(eq(sites.siteId, siteId));
|
||||
// for (const siteId of siteIds) {
|
||||
// const [site] = await db
|
||||
// .select()
|
||||
// .from(sites)
|
||||
// .where(eq(sites.siteId, siteId));
|
||||
|
||||
if (site && site.subnet && site.listenPort) {
|
||||
destinations.push({
|
||||
destinationIP: site.subnet.split("/")[0],
|
||||
destinationPort: site.listenPort
|
||||
// if (site && site.subnet && site.listenPort) {
|
||||
// destinations.push({
|
||||
// destinationIP: site.subnet.split("/")[0],
|
||||
// destinationPort: site.listenPort
|
||||
// });
|
||||
// }
|
||||
// }
|
||||
|
||||
// get all sites for this client and join with exit nodes with site.exitNodeId
|
||||
const sitesData = await db
|
||||
.select()
|
||||
.from(sites)
|
||||
.innerJoin(clientSites, eq(sites.siteId, clientSites.siteId))
|
||||
.leftJoin(exitNodes, eq(sites.exitNodeId, exitNodes.exitNodeId))
|
||||
.where(eq(clientSites.clientId, client.clientId));
|
||||
|
||||
let exitNodeDestinations: {
|
||||
reachableAt: string;
|
||||
destinations: PeerDestination[];
|
||||
}[] = [];
|
||||
|
||||
for (const site of sitesData) {
|
||||
if (!site.sites.subnet) {
|
||||
logger.warn(`Site ${site.sites.siteId} has no subnet, skipping`);
|
||||
continue;
|
||||
}
|
||||
// find the destinations in the array
|
||||
let destinations = exitNodeDestinations.find(
|
||||
(d) => d.reachableAt === site.exitNodes?.reachableAt
|
||||
);
|
||||
|
||||
if (!destinations) {
|
||||
destinations = {
|
||||
reachableAt: site.exitNodes?.reachableAt || "",
|
||||
destinations: [
|
||||
{
|
||||
destinationIP: site.sites.subnet.split("/")[0],
|
||||
destinationPort: site.sites.listenPort || 0
|
||||
}
|
||||
]
|
||||
};
|
||||
} else {
|
||||
// add to the existing destinations
|
||||
destinations.destinations.push({
|
||||
destinationIP: site.sites.subnet.split("/")[0],
|
||||
destinationPort: site.sites.listenPort || 0
|
||||
});
|
||||
}
|
||||
|
||||
// update it in the array
|
||||
exitNodeDestinations = exitNodeDestinations.filter(
|
||||
(d) => d.reachableAt !== site.exitNodes?.reachableAt
|
||||
);
|
||||
exitNodeDestinations.push(destinations);
|
||||
}
|
||||
|
||||
logger.debug(JSON.stringify(exitNodeDestinations, null, 2));
|
||||
|
||||
for (const destination of exitNodeDestinations) {
|
||||
// if its the current exit node skip it because it is replying with the same data
|
||||
if (reachableAt && destination.reachableAt == reachableAt) {
|
||||
logger.debug(`Skipping update for reachableAt: ${reachableAt}`);
|
||||
continue;
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await axios.post(
|
||||
`${destination.reachableAt}/update-destinations`,
|
||||
{
|
||||
sourceIp: client.endpoint?.split(":")[0] || "",
|
||||
sourcePort: client.endpoint?.split(":")[1] || 0,
|
||||
destinations: destination.destinations
|
||||
},
|
||||
{
|
||||
headers: {
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
logger.info("Destinations updated:", {
|
||||
peer: response.data.status
|
||||
});
|
||||
} catch (error) {
|
||||
if (axios.isAxiosError(error)) {
|
||||
throw new Error(
|
||||
`Error communicating with Gerbil. Make sure Pangolin can reach the Gerbil HTTP API: ${error.response?.status}`
|
||||
);
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// Send the desinations back to the origin
|
||||
destinations = exitNodeDestinations.find(
|
||||
(d) => d.reachableAt === reachableAt
|
||||
)?.destinations || [];
|
||||
|
||||
} else if (newtId) {
|
||||
logger.debug(`Got hole punch with ip: ${ip}, port: ${port} for olmId: ${olmId}`);
|
||||
|
||||
|
||||
@@ -104,6 +104,14 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
|
||||
// Prepare an array to store site configurations
|
||||
let siteConfigurations = [];
|
||||
logger.debug(`Found ${sitesData.length} sites for client ${client.clientId}`);
|
||||
|
||||
if (sitesData.length === 0) {
|
||||
sendToClient(olm.olmId, {
|
||||
type: "olm/register/no-sites",
|
||||
data: {}
|
||||
});
|
||||
}
|
||||
|
||||
// Process each site
|
||||
for (const { sites: site } of sitesData) {
|
||||
if (!site.exitNodeId) {
|
||||
@@ -180,11 +188,11 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
|
||||
});
|
||||
}
|
||||
|
||||
// If we have no valid site configurations, don't send a connect message
|
||||
if (siteConfigurations.length === 0) {
|
||||
logger.warn("No valid site configurations found");
|
||||
return;
|
||||
}
|
||||
// REMOVED THIS SO IT CREATES THE INTERFACE AND JUST WAITS FOR THE SITES
|
||||
// if (siteConfigurations.length === 0) {
|
||||
// logger.warn("No valid site configurations found");
|
||||
// return;
|
||||
// }
|
||||
|
||||
// Return connect message with all site configurations
|
||||
return {
|
||||
|
||||
@@ -147,33 +147,33 @@ export default function Page() {
|
||||
mac: {
|
||||
"Apple Silicon (arm64)": [
|
||||
`curl -L -o olm "https://github.com/fosrl/olm/releases/download/${version}/olm_darwin_arm64" && chmod +x ./olm`,
|
||||
`./olm --id ${id} --secret ${secret} --endpoint ${endpoint}`
|
||||
`sudo ./olm --id ${id} --secret ${secret} --endpoint ${endpoint}`
|
||||
],
|
||||
"Intel x64 (amd64)": [
|
||||
`curl -L -o olm "https://github.com/fosrl/olm/releases/download/${version}/olm_darwin_amd64" && chmod +x ./olm`,
|
||||
`./olm --id ${id} --secret ${secret} --endpoint ${endpoint}`
|
||||
`sudo ./olm --id ${id} --secret ${secret} --endpoint ${endpoint}`
|
||||
]
|
||||
},
|
||||
linux: {
|
||||
amd64: [
|
||||
`wget -O olm "https://github.com/fosrl/olm/releases/download/${version}/olm_linux_amd64" && chmod +x ./olm`,
|
||||
`./olm --id ${id} --secret ${secret} --endpoint ${endpoint}`
|
||||
`sudo ./olm --id ${id} --secret ${secret} --endpoint ${endpoint}`
|
||||
],
|
||||
arm64: [
|
||||
`wget -O olm "https://github.com/fosrl/olm/releases/download/${version}/olm_linux_arm64" && chmod +x ./olm`,
|
||||
`./olm --id ${id} --secret ${secret} --endpoint ${endpoint}`
|
||||
`sudo ./olm --id ${id} --secret ${secret} --endpoint ${endpoint}`
|
||||
],
|
||||
arm32: [
|
||||
`wget -O olm "https://github.com/fosrl/olm/releases/download/${version}/olm_linux_arm32" && chmod +x ./olm`,
|
||||
`./olm --id ${id} --secret ${secret} --endpoint ${endpoint}`
|
||||
`sudo ./olm --id ${id} --secret ${secret} --endpoint ${endpoint}`
|
||||
],
|
||||
arm32v6: [
|
||||
`wget -O olm "https://github.com/fosrl/olm/releases/download/${version}/olm_linux_arm32v6" && chmod +x ./olm`,
|
||||
`./olm --id ${id} --secret ${secret} --endpoint ${endpoint}`
|
||||
`sudo ./olm --id ${id} --secret ${secret} --endpoint ${endpoint}`
|
||||
],
|
||||
riscv64: [
|
||||
`wget -O olm "https://github.com/fosrl/olm/releases/download/${version}/olm_linux_riscv64" && chmod +x ./olm`,
|
||||
`./olm --id ${id} --secret ${secret} --endpoint ${endpoint}`
|
||||
`sudo ./olm --id ${id} --secret ${secret} --endpoint ${endpoint}`
|
||||
]
|
||||
},
|
||||
windows: {
|
||||
|
||||
Reference in New Issue
Block a user