Remove site kick

This commit is contained in:
Owen
2026-02-09 17:23:48 -08:00
parent 7d8185e0ee
commit 431e6ffaae
11 changed files with 61 additions and 173 deletions

View File

@@ -8,77 +8,60 @@ export type LimitSet = Partial<{
}>;
export const sandboxLimitSet: LimitSet = {
[FeatureId.SITES]: { value: 1, description: "Sandbox limit" }, // 1 site up for 2 days
[FeatureId.USERS]: { value: 1, description: "Sandbox limit" },
[FeatureId.EGRESS_DATA_MB]: { value: 1000, description: "Sandbox limit" }, // 1 GB
[FeatureId.SITES]: { value: 1, description: "Sandbox limit" },
[FeatureId.DOMAINS]: { value: 0, description: "Sandbox limit" },
[FeatureId.REMOTE_EXIT_NODES]: { value: 0, description: "Sandbox limit" }
[FeatureId.REMOTE_EXIT_NODES]: { value: 0, description: "Sandbox limit" },
};
export const freeLimitSet: LimitSet = {
[FeatureId.SITES]: { value: 3, description: "Free tier limit" }, // 1 site up for 32 days
[FeatureId.USERS]: { value: 3, description: "Free tier limit" },
[FeatureId.EGRESS_DATA_MB]: {
value: 25000,
description: "Free tier limit"
}, // 25 GB
[FeatureId.DOMAINS]: { value: 3, description: "Free tier limit" },
[FeatureId.REMOTE_EXIT_NODES]: { value: 0, description: "Free tier limit" }
[FeatureId.USERS]: { value: 5, description: "Starter limit" },
[FeatureId.SITES]: { value: 5, description: "Starter limit" },
[FeatureId.DOMAINS]: { value: 5, description: "Starter limit" },
[FeatureId.REMOTE_EXIT_NODES]: { value: 1, description: "Starter limit" },
};
export const homeLabLimitSet: LimitSet = {
[FeatureId.SITES]: { value: 3, description: "Home lab limit" }, // 1 site up for 32 days
[FeatureId.USERS]: { value: 3, description: "Home lab limit" },
[FeatureId.EGRESS_DATA_MB]: {
value: 25000,
description: "Home lab limit"
}, // 25 GB
[FeatureId.DOMAINS]: { value: 3, description: "Home lab limit" },
[FeatureId.REMOTE_EXIT_NODES]: { value: 1, description: "Home lab limit" }
export const tier1LimitSet: LimitSet = {
[FeatureId.USERS]: { value: 7, description: "Home limit" },
[FeatureId.SITES]: { value: 10, description: "Home limit" },
[FeatureId.DOMAINS]: { value: 10, description: "Home limit" },
[FeatureId.REMOTE_EXIT_NODES]: { value: 1, description: "Home limit" },
};
export const tier2LimitSet: LimitSet = {
[FeatureId.SITES]: {
value: 10,
description: "Starter limit"
}, // 50 sites up for 31 days
[FeatureId.USERS]: {
value: 150,
description: "Starter limit"
value: 100,
description: "Team limit"
},
[FeatureId.SITES]: {
value: 50,
description: "Team limit"
},
[FeatureId.EGRESS_DATA_MB]: {
value: 12000000,
description: "Starter limit"
}, // 12000 GB
[FeatureId.DOMAINS]: {
value: 250,
description: "Starter limit"
value: 50,
description: "Team limit"
},
[FeatureId.REMOTE_EXIT_NODES]: {
value: 5,
description: "Starter limit"
}
value: 3,
description: "Team limit"
},
};
export const tier3LimitSet: LimitSet = {
[FeatureId.SITES]: {
value: 10,
description: "Scale limit"
}, // 50 sites up for 31 days
[FeatureId.USERS]: {
value: 150,
description: "Scale limit"
value: 500,
description: "Business limit"
},
[FeatureId.EGRESS_DATA_MB]: {
value: 12000000,
description: "Scale limit"
}, // 12000 GB
[FeatureId.DOMAINS]: {
[FeatureId.SITES]: {
value: 250,
description: "Scale limit"
description: "Business limit"
},
[FeatureId.DOMAINS]: {
value: 100,
description: "Business limit"
},
[FeatureId.REMOTE_EXIT_NODES]: {
value: 5,
description: "Scale limit"
}
value: 20,
description: "Business limit"
},
};

View File

@@ -517,7 +517,6 @@ export class UsageService {
public async checkLimitSet(
orgId: string,
kickSites = false,
featureId?: FeatureId,
usage?: Usage,
trx: Transaction | typeof db = db
@@ -591,58 +590,6 @@ export class UsageService {
break; // Exit early if any limit is exceeded
}
}
// If any limits are exceeded, disconnect all sites for this organization
if (hasExceededLimits && kickSites) {
logger.warn(
`Disconnecting all sites for org ${orgId} due to exceeded limits`
);
// Get all sites for this organization
const orgSites = await trx
.select()
.from(sites)
.where(eq(sites.orgId, orgId));
// Mark all sites as offline and send termination messages
const siteUpdates = orgSites.map((site) => site.siteId);
if (siteUpdates.length > 0) {
// Send termination messages to newt sites
for (const site of orgSites) {
if (site.type === "newt") {
const [newt] = await trx
.select()
.from(newts)
.where(eq(newts.siteId, site.siteId))
.limit(1);
if (newt) {
const payload = {
type: `newt/wg/terminate`,
data: {
reason: "Usage limits exceeded"
}
};
// Don't await to prevent blocking
await sendToClient(newt.newtId, payload).catch(
(error: any) => {
logger.error(
`Failed to send termination message to newt ${newt.newtId}:`,
error
);
}
);
}
}
}
logger.info(
`Disconnected ${orgSites.length} sites for org ${orgId} due to exceeded limits`
);
}
}
} catch (error) {
logger.error(`Error checking limits for org ${orgId}:`, error);
}

View File

@@ -13,7 +13,7 @@
import {
freeLimitSet,
homeLabLimitSet,
tier1LimitSet,
tier2LimitSet,
tier3LimitSet,
limitsService,
@@ -22,10 +22,12 @@ import {
import { usageService } from "@server/lib/billing/usageService";
import { SubscriptionType } from "./hooks/getSubType";
function getLimitSetForSubscriptionType(subType: SubscriptionType | null): LimitSet {
function getLimitSetForSubscriptionType(
subType: SubscriptionType | null
): LimitSet {
switch (subType) {
case "tier1":
return homeLabLimitSet;
return tier1LimitSet;
case "tier2":
return tier2LimitSet;
case "tier3":
@@ -48,12 +50,12 @@ export async function handleSubscriptionLifesycle(
case "active":
const activeLimitSet = getLimitSetForSubscriptionType(subType);
await limitsService.applyLimitSetToOrg(orgId, activeLimitSet);
await usageService.checkLimitSet(orgId, true);
await usageService.checkLimitSet(orgId);
break;
case "canceled":
// Subscription canceled - revert to free tier
await limitsService.applyLimitSetToOrg(orgId, freeLimitSet);
await usageService.checkLimitSet(orgId, true);
await usageService.checkLimitSet(orgId);
break;
case "past_due":
// Payment past due - keep current limits but notify customer
@@ -62,7 +64,7 @@ export async function handleSubscriptionLifesycle(
case "unpaid":
// Subscription unpaid - revert to free tier
await limitsService.applyLimitSetToOrg(orgId, freeLimitSet);
await usageService.checkLimitSet(orgId, true);
await usageService.checkLimitSet(orgId);
break;
case "incomplete":
// Payment incomplete - give them time to complete payment
@@ -70,7 +72,7 @@ export async function handleSubscriptionLifesycle(
case "incomplete_expired":
// Payment never completed - revert to free tier
await limitsService.applyLimitSetToOrg(orgId, freeLimitSet);
await usageService.checkLimitSet(orgId, true);
await usageService.checkLimitSet(orgId);
break;
default:
break;

View File

@@ -85,7 +85,7 @@ export async function createRemoteExitNode(
if (usage) {
const rejectRemoteExitNodes = await usageService.checkLimitSet(
orgId,
false,
FeatureId.REMOTE_EXIT_NODES,
{
...usage,

View File

@@ -131,7 +131,7 @@ export async function createOrgDomain(
}
const rejectDomains = await usageService.checkLimitSet(
orgId,
false,
FeatureId.DOMAINS,
{
...usage,

View File

@@ -178,11 +178,9 @@ export async function updateSiteBandwidth(
// Process usage updates outside of site update transactions
// This separates the concerns and reduces lock contention
if (calcUsageAndLimits && (orgUsageMap.size > 0)) {
if (calcUsageAndLimits && orgUsageMap.size > 0) {
// Sort org IDs to ensure consistent lock ordering
const allOrgIds = [
...new Set([...orgUsageMap.keys()])
].sort();
const allOrgIds = [...new Set([...orgUsageMap.keys()])].sort();
for (const orgId of allOrgIds) {
try {
@@ -199,7 +197,7 @@ export async function updateSiteBandwidth(
usageService
.checkLimitSet(
orgId,
true,
FeatureId.EGRESS_DATA_MB,
bandwidthUsage
)

View File

@@ -1,17 +1,13 @@
import { db, ExitNode, exitNodeOrgs, newts, Transaction } from "@server/db";
import { db, ExitNode, newts, Transaction } from "@server/db";
import { MessageHandler } from "@server/routers/ws";
import { exitNodes, Newt, resources, sites, Target, targets } from "@server/db";
import { targetHealthCheck } from "@server/db";
import { eq, and, sql, inArray, ne } from "drizzle-orm";
import { exitNodes, Newt, sites } from "@server/db";
import { eq } from "drizzle-orm";
import { addPeer, deletePeer } from "../gerbil/peers";
import logger from "@server/logger";
import config from "@server/lib/config";
import {
findNextAvailableCidr,
getNextAvailableClientSubnet
} from "@server/lib/ip";
import { usageService } from "@server/lib/billing/usageService";
import { FeatureId } from "@server/lib/billing";
import {
selectBestExitNode,
verifyExitNodeOrgAccess
@@ -30,8 +26,6 @@ export type ExitNodePingResult = {
wasPreviouslyConnected: boolean;
};
const numTimesLimitExceededForId: Record<string, number> = {};
export const handleNewtRegisterMessage: MessageHandler = async (context) => {
const { message, client, sendToClient } = context;
const newt = client as Newt;
@@ -96,42 +90,6 @@ export const handleNewtRegisterMessage: MessageHandler = async (context) => {
fetchContainers(newt.newtId);
}
const rejectSites = await usageService.checkLimitSet(
oldSite.orgId,
false,
FeatureId.SITES
);
const rejectEgressDataMb = await usageService.checkLimitSet(
oldSite.orgId,
false,
FeatureId.EGRESS_DATA_MB
);
// Do we need to check the users and domains count limits here?
// const rejectUsers = await usageService.checkLimitSet(oldSite.orgId, false, FeatureId.USERS);
// const rejectDomains = await usageService.checkLimitSet(oldSite.orgId, false, FeatureId.DOMAINS);
// if (rejectEgressDataMb || rejectSites || rejectUsers || rejectDomains) {
if (rejectEgressDataMb || rejectSites) {
logger.info(
`Usage limits exceeded for org ${oldSite.orgId}. Rejecting newt registration.`
);
// PREVENT FURTHER REGISTRATION ATTEMPTS SO WE DON'T SPAM
// Increment the limit exceeded count for this site
numTimesLimitExceededForId[newt.newtId] =
(numTimesLimitExceededForId[newt.newtId] || 0) + 1;
if (numTimesLimitExceededForId[newt.newtId] > 15) {
logger.debug(
`Newt ${newt.newtId} has exceeded usage limits 15 times. Terminating...`
);
}
return;
}
let siteSubnet = oldSite.subnet;
let exitNodeIdToQuery = oldSite.exitNodeId;
if (exitNodeId && (oldSite.exitNodeId !== exitNodeId || !oldSite.subnet)) {

View File

@@ -140,7 +140,7 @@ export async function createSite(
}
const rejectSites = await usageService.checkLimitSet(
orgId,
false,
FeatureId.SITES,
{
...usage,

View File

@@ -94,7 +94,10 @@ export async function acceptInvite(
}
if (build == "saas") {
const usage = await usageService.getUsage(existingInvite.orgId, FeatureId.USERS);
const usage = await usageService.getUsage(
existingInvite.orgId,
FeatureId.USERS
);
if (!usage) {
return next(
createHttpError(
@@ -105,7 +108,7 @@ export async function acceptInvite(
}
const rejectUsers = await usageService.checkLimitSet(
existingInvite.orgId,
false,
FeatureId.USERS,
{
...usage,
@@ -163,7 +166,9 @@ export async function acceptInvite(
.from(userOrgs)
.where(eq(userOrgs.orgId, existingInvite.orgId));
logger.debug(`User ${existingUser[0].userId} accepted invite to org ${existingInvite.orgId}. Total users in org: ${totalUsers.length}`);
logger.debug(
`User ${existingUser[0].userId} accepted invite to org ${existingInvite.orgId}. Total users in org: ${totalUsers.length}`
);
});
if (totalUsers) {

View File

@@ -21,11 +21,7 @@ const paramsSchema = z.strictObject({
});
const bodySchema = z.strictObject({
email: z
.string()
.email()
.toLowerCase()
.optional(),
email: z.string().email().toLowerCase().optional(),
username: z.string().nonempty().toLowerCase(),
name: z.string().optional(),
type: z.enum(["internal", "oidc"]).optional(),
@@ -94,7 +90,7 @@ export async function createOrgUser(
}
const rejectUsers = await usageService.checkLimitSet(
orgId,
false,
FeatureId.USERS,
{
...usage,

View File

@@ -133,7 +133,6 @@ export async function inviteUser(
}
const rejectUsers = await usageService.checkLimitSet(
orgId,
false,
FeatureId.USERS,
{
...usage,