Merge branch 'main' into dev

This commit is contained in:
Owen
2025-12-05 09:45:17 -05:00
19 changed files with 4279 additions and 3903 deletions

View File

@@ -31,6 +31,7 @@ proxy-resources:
# - owen@pangolin.net
# whitelist-users:
# - owen@pangolin.net
# auto-login-idp: 1
headers:
- name: X-Example-Header
value: example-value

View File

@@ -35,7 +35,7 @@ services:
- 80:80 # Port for traefik because of the network_mode
traefik:
image: traefik:v3.5
image: traefik:v3.6
container_name: traefik
restart: unless-stopped
network_mode: service:gerbil # Ports appear on the gerbil service

View File

@@ -35,7 +35,7 @@ services:
- 80:80
{{end}}
traefik:
image: docker.io/traefik:v3.5
image: docker.io/traefik:v3.6
container_name: traefik
restart: unless-stopped
{{if .InstallGerbil}}

View File

@@ -1080,11 +1080,11 @@
"actionDeleteIdpOrg": "IDP-Organisationsrichtlinie löschen",
"actionListIdpOrgs": "IDP-Organisationen auflisten",
"actionUpdateIdpOrg": "IDP-Organisation aktualisieren",
"actionCreateClient": "Client anlegen",
"actionDeleteClient": "Client löschen",
"actionUpdateClient": "Client aktualisieren",
"actionCreateClient": "Kunde erstellen",
"actionDeleteClient": "Kunde löschen",
"actionUpdateClient": "Kunde aktualisieren",
"actionListClients": "Clients auflisten",
"actionGetClient": "Clients abrufen",
"actionGetClient": "Kunde holen",
"actionCreateSiteResource": "Site-Ressource erstellen",
"actionDeleteSiteResource": "Site-Ressource löschen",
"actionGetSiteResource": "Site-Ressource abrufen",
@@ -1432,14 +1432,14 @@
},
"siteRequired": "Standort ist erforderlich.",
"olmTunnel": "Olm-Tunnel",
"olmTunnelDescription": "Nutzen Sie Olm für die Kundenverbindung",
"olmTunnelDescription": "Nutzen Sie Olm für die Client-Verbindung",
"errorCreatingClient": "Fehler beim Erstellen des Clients",
"clientDefaultsNotFound": "Standardeinstellungen des Clients nicht gefunden",
"createClient": "Client erstellen",
"createClientDescription": "Erstellen Sie einen neuen Client für die Verbindung zu Ihren Standorten.",
"seeAllClients": "Alle Clients anzeigen",
"clientInformation": "Client Informationen",
"clientNamePlaceholder": "Client Name",
"clientInformation": "Client-Informationen",
"clientNamePlaceholder": "Client-Name",
"address": "Adresse",
"subnetPlaceholder": "Subnetz",
"addressDescription": "Die Adresse, die dieser Client für die Verbindung verwenden wird.",
@@ -2110,7 +2110,6 @@
"selectedResources": "Ausgewählte Ressourcen",
"enableSelected": "Ausgewählte aktivieren",
"disableSelected": "Ausgewählte deaktivieren",
"checkSelectedStatus": "Status der Auswahl überprüfen",
"credentials": "Zugangsdaten",
"savecredentials": "Zugangsdaten speichern",
"regeneratecredentials": "Re-Key",
@@ -2136,5 +2135,6 @@
"niceIdUpdateErrorDescription": "Beim Aktualisieren der Nizza-ID ist ein Fehler aufgetreten.",
"niceIdCannotBeEmpty": "Nizza-ID darf nicht leer sein",
"enterIdentifier": "Identifikator eingeben",
"identifier": "Identifier"
"identifier": "Identifier",
"checkSelectedStatus": "Status der Auswahl überprüfen"
}

View File

@@ -2089,9 +2089,9 @@
"olmUpdateAvailableInfo": "An updated version of Olm is available. Please update to the latest version for the best experience.",
"client": "Client",
"proxyProtocol": "Proxy Protocol Settings",
"proxyProtocolDescription": "Configure Proxy Protocol to preserve client IP addresses for TCP/UDP services.",
"proxyProtocolDescription": "Configure Proxy Protocol to preserve client IP addresses for TCP services.",
"enableProxyProtocol": "Enable Proxy Protocol",
"proxyProtocolInfo": "Preserve client IP addresses for TCP/UDP backends",
"proxyProtocolInfo": "Preserve client IP addresses for TCP backends",
"proxyProtocolVersion": "Proxy Protocol Version",
"version1": " Version 1 (Recommended)",
"version2": "Version 2",

2099
messages/zh-TW.json Normal file

File diff suppressed because it is too large Load Diff

5434
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -86,7 +86,7 @@
"eslint-config-next": "16.0.3",
"express": "5.1.0",
"express-rate-limit": "8.2.1",
"glob": "11.0.3",
"glob": "11.1.0",
"helmet": "8.1.0",
"http-errors": "2.0.0",
"i": "^0.3.7",
@@ -98,14 +98,14 @@
"lucide-react": "^0.552.0",
"maxmind": "5.0.1",
"moment": "2.30.1",
"next": "15.5.6",
"next": "15.5.7",
"next-intl": "^4.4.0",
"next-themes": "0.4.6",
"nextjs-toploader": "^3.9.17",
"node-cache": "5.1.2",
"node-fetch": "3.3.2",
"nodemailer": "7.0.10",
"npm": "^11.6.2",
"npm": "^11.6.4",
"nprogress": "^0.2.0",
"oslo": "1.2.1",
"pg": "^8.16.2",

View File

@@ -221,6 +221,7 @@ export async function updateProxyResources(
domainId: domain ? domain.domainId : null,
enabled: resourceEnabled,
sso: resourceData.auth?.["sso-enabled"] || false,
skipToIdpId: resourceData.auth?.["auto-login-idp"] || null,
ssl: resourceSsl,
setHostHeader: resourceData["host-header"] || null,
tlsServerName: resourceData["tls-server-name"] || null,
@@ -610,6 +611,7 @@ export async function updateProxyResources(
domainId: domain ? domain.domainId : null,
enabled: resourceEnabled,
sso: resourceData.auth?.["sso-enabled"] || false,
skipToIdpId: resourceData.auth?.["auto-login-idp"] || null,
setHostHeader: resourceData["host-header"] || null,
tlsServerName: resourceData["tls-server-name"] || null,
ssl: resourceSsl,

View File

@@ -59,6 +59,7 @@ export const AuthSchema = z.object({
}),
"sso-users": z.array(z.email()).optional().default([]),
"whitelist-users": z.array(z.email()).optional().default([]),
"auto-login-idp": z.int().positive().optional(),
});
export const RuleSchema = z.object({

View File

@@ -2,7 +2,7 @@ import path from "path";
import { fileURLToPath } from "url";
// This is a placeholder value replaced by the build process
export const APP_VERSION = "1.12.1";
export const APP_VERSION = "1.12.3";
export const __FILENAME = fileURLToPath(import.meta.url);
export const __DIRNAME = path.dirname(__FILENAME);

111
server/lib/lock.ts Normal file
View File

@@ -0,0 +1,111 @@
export class LockManager {
/**
* Acquire a distributed lock using Redis SET with NX and PX options
* @param lockKey - Unique identifier for the lock
* @param ttlMs - Time to live in milliseconds
* @returns Promise<boolean> - true if lock acquired, false otherwise
*/
async acquireLock(
lockKey: string,
ttlMs: number = 30000
): Promise<boolean> {
return true;
}
/**
* Release a lock using Lua script to ensure atomicity
* @param lockKey - Unique identifier for the lock
*/
async releaseLock(lockKey: string): Promise<void> {}
/**
* Force release a lock regardless of owner (use with caution)
* @param lockKey - Unique identifier for the lock
*/
async forceReleaseLock(lockKey: string): Promise<void> {}
/**
* Check if a lock exists and get its info
* @param lockKey - Unique identifier for the lock
* @returns Promise<{exists: boolean, ownedByMe: boolean, ttl: number}>
*/
async getLockInfo(lockKey: string): Promise<{
exists: boolean;
ownedByMe: boolean;
ttl: number;
owner?: string;
}> {
return { exists: true, ownedByMe: true, ttl: 0 };
}
/**
* Extend the TTL of an existing lock owned by this worker
* @param lockKey - Unique identifier for the lock
* @param ttlMs - New TTL in milliseconds
* @returns Promise<boolean> - true if extended successfully
*/
async extendLock(lockKey: string, ttlMs: number): Promise<boolean> {
return true;
}
/**
* Attempt to acquire lock with retries and exponential backoff
* @param lockKey - Unique identifier for the lock
* @param ttlMs - Time to live in milliseconds
* @param maxRetries - Maximum number of retry attempts
* @param baseDelayMs - Base delay between retries in milliseconds
* @returns Promise<boolean> - true if lock acquired
*/
async acquireLockWithRetry(
lockKey: string,
ttlMs: number = 30000,
maxRetries: number = 5,
baseDelayMs: number = 100
): Promise<boolean> {
return true;
}
/**
* Execute a function while holding a lock
* @param lockKey - Unique identifier for the lock
* @param fn - Function to execute while holding the lock
* @param ttlMs - Lock TTL in milliseconds
* @returns Promise<T> - Result of the executed function
*/
async withLock<T>(
lockKey: string,
fn: () => Promise<T>,
ttlMs: number = 30000
): Promise<T> {
const acquired = await this.acquireLock(lockKey, ttlMs);
if (!acquired) {
throw new Error(`Failed to acquire lock: ${lockKey}`);
}
try {
return await fn();
} finally {
await this.releaseLock(lockKey);
}
}
/**
* Clean up expired locks - Redis handles this automatically, but this method
* can be used to get statistics about locks
* @returns Promise<{activeLocksCount: number, locksOwnedByMe: number}>
*/
async getLockStatistics(): Promise<{
activeLocksCount: number;
locksOwnedByMe: number;
}> {
return { activeLocksCount: 0, locksOwnedByMe: 0 };
}
/**
* Close the Redis connection
*/
async disconnect(): Promise<void> {}
}
export const lockManager = new LockManager();

363
server/private/lib/lock.ts Normal file
View File

@@ -0,0 +1,363 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import { config } from "@server/lib/config";
import logger from "@server/logger";
import { redis } from "#private/lib/redis";
export class LockManager {
/**
* Acquire a distributed lock using Redis SET with NX and PX options
* @param lockKey - Unique identifier for the lock
* @param ttlMs - Time to live in milliseconds
* @returns Promise<boolean> - true if lock acquired, false otherwise
*/
async acquireLock(
lockKey: string,
ttlMs: number = 30000
): Promise<boolean> {
if (!redis || !redis.status || redis.status !== "ready") {
return true;
}
const lockValue = `${
config.getRawConfig().gerbil.exit_node_name
}:${Date.now()}`;
const redisKey = `lock:${lockKey}`;
try {
// Use SET with NX (only set if not exists) and PX (expire in milliseconds)
// This is atomic and handles both setting and expiration
const result = await redis.set(
redisKey,
lockValue,
"PX",
ttlMs,
"NX"
);
if (result === "OK") {
logger.debug(
`Lock acquired: ${lockKey} by ${
config.getRawConfig().gerbil.exit_node_name
}`
);
return true;
}
// Check if the existing lock is from this worker (reentrant behavior)
const existingValue = await redis.get(redisKey);
if (
existingValue &&
existingValue.startsWith(
`${config.getRawConfig().gerbil.exit_node_name}:`
)
) {
// Extend the lock TTL since it's the same worker
await redis.pexpire(redisKey, ttlMs);
logger.debug(
`Lock extended: ${lockKey} by ${
config.getRawConfig().gerbil.exit_node_name
}`
);
return true;
}
return false;
} catch (error) {
logger.error(`Failed to acquire lock ${lockKey}:`, error);
return false;
}
}
/**
* Release a lock using Lua script to ensure atomicity
* @param lockKey - Unique identifier for the lock
*/
async releaseLock(lockKey: string): Promise<void> {
if (!redis || !redis.status || redis.status !== "ready") {
return;
}
const redisKey = `lock:${lockKey}`;
// Lua script to ensure we only delete the lock if it belongs to this worker
const luaScript = `
local key = KEYS[1]
local worker_prefix = ARGV[1]
local current_value = redis.call('GET', key)
if current_value and string.find(current_value, worker_prefix, 1, true) == 1 then
return redis.call('DEL', key)
else
return 0
end
`;
try {
const result = (await redis.eval(
luaScript,
1,
redisKey,
`${config.getRawConfig().gerbil.exit_node_name}:`
)) as number;
if (result === 1) {
logger.debug(
`Lock released: ${lockKey} by ${
config.getRawConfig().gerbil.exit_node_name
}`
);
} else {
logger.warn(
`Lock not released - not owned by worker: ${lockKey} by ${
config.getRawConfig().gerbil.exit_node_name
}`
);
}
} catch (error) {
logger.error(`Failed to release lock ${lockKey}:`, error);
}
}
/**
* Force release a lock regardless of owner (use with caution)
* @param lockKey - Unique identifier for the lock
*/
async forceReleaseLock(lockKey: string): Promise<void> {
if (!redis || !redis.status || redis.status !== "ready") {
return;
}
const redisKey = `lock:${lockKey}`;
try {
const result = await redis.del(redisKey);
if (result === 1) {
logger.debug(`Lock force released: ${lockKey}`);
}
} catch (error) {
logger.error(`Failed to force release lock ${lockKey}:`, error);
}
}
/**
* Check if a lock exists and get its info
* @param lockKey - Unique identifier for the lock
* @returns Promise<{exists: boolean, ownedByMe: boolean, ttl: number}>
*/
async getLockInfo(lockKey: string): Promise<{
exists: boolean;
ownedByMe: boolean;
ttl: number;
owner?: string;
}> {
if (!redis || !redis.status || redis.status !== "ready") {
return { exists: false, ownedByMe: true, ttl: 0 };
}
const redisKey = `lock:${lockKey}`;
try {
const [value, ttl] = await Promise.all([
redis.get(redisKey),
redis.pttl(redisKey)
]);
const exists = value !== null;
const ownedByMe =
exists &&
value!.startsWith(`${config.getRawConfig().gerbil.exit_node_name}:`);
const owner = exists ? value!.split(":")[0] : undefined;
return {
exists,
ownedByMe,
ttl: ttl > 0 ? ttl : 0,
owner
};
} catch (error) {
logger.error(`Failed to get lock info ${lockKey}:`, error);
return { exists: false, ownedByMe: false, ttl: 0 };
}
}
/**
* Extend the TTL of an existing lock owned by this worker
* @param lockKey - Unique identifier for the lock
* @param ttlMs - New TTL in milliseconds
* @returns Promise<boolean> - true if extended successfully
*/
async extendLock(lockKey: string, ttlMs: number): Promise<boolean> {
if (!redis || !redis.status || redis.status !== "ready") {
return true;
}
const redisKey = `lock:${lockKey}`;
// Lua script to extend TTL only if lock is owned by this worker
const luaScript = `
local key = KEYS[1]
local worker_prefix = ARGV[1]
local ttl = tonumber(ARGV[2])
local current_value = redis.call('GET', key)
if current_value and string.find(current_value, worker_prefix, 1, true) == 1 then
return redis.call('PEXPIRE', key, ttl)
else
return 0
end
`;
try {
const result = (await redis.eval(
luaScript,
1,
redisKey,
`${config.getRawConfig().gerbil.exit_node_name}:`,
ttlMs.toString()
)) as number;
if (result === 1) {
logger.debug(
`Lock extended: ${lockKey} by ${
config.getRawConfig().gerbil.exit_node_name
} for ${ttlMs}ms`
);
return true;
}
return false;
} catch (error) {
logger.error(`Failed to extend lock ${lockKey}:`, error);
return false;
}
}
/**
* Attempt to acquire lock with retries and exponential backoff
* @param lockKey - Unique identifier for the lock
* @param ttlMs - Time to live in milliseconds
* @param maxRetries - Maximum number of retry attempts
* @param baseDelayMs - Base delay between retries in milliseconds
* @returns Promise<boolean> - true if lock acquired
*/
async acquireLockWithRetry(
lockKey: string,
ttlMs: number = 30000,
maxRetries: number = 5,
baseDelayMs: number = 100
): Promise<boolean> {
if (!redis || !redis.status || redis.status !== "ready") {
return true;
}
for (let attempt = 0; attempt <= maxRetries; attempt++) {
const acquired = await this.acquireLock(lockKey, ttlMs);
if (acquired) {
return true;
}
if (attempt < maxRetries) {
// Exponential backoff with jitter
const delay =
baseDelayMs * Math.pow(2, attempt) + Math.random() * 100;
await new Promise((resolve) => setTimeout(resolve, delay));
}
}
logger.warn(
`Failed to acquire lock ${lockKey} after ${maxRetries + 1} attempts`
);
return false;
}
/**
* Execute a function while holding a lock
* @param lockKey - Unique identifier for the lock
* @param fn - Function to execute while holding the lock
* @param ttlMs - Lock TTL in milliseconds
* @returns Promise<T> - Result of the executed function
*/
async withLock<T>(
lockKey: string,
fn: () => Promise<T>,
ttlMs: number = 30000
): Promise<T> {
if (!redis || !redis.status || redis.status !== "ready") {
return await fn();
}
const acquired = await this.acquireLock(lockKey, ttlMs);
if (!acquired) {
throw new Error(`Failed to acquire lock: ${lockKey}`);
}
try {
return await fn();
} finally {
await this.releaseLock(lockKey);
}
}
/**
* Clean up expired locks - Redis handles this automatically, but this method
* can be used to get statistics about locks
* @returns Promise<{activeLocksCount: number, locksOwnedByMe: number}>
*/
async getLockStatistics(): Promise<{
activeLocksCount: number;
locksOwnedByMe: number;
}> {
if (!redis || !redis.status || redis.status !== "ready") {
return { activeLocksCount: 0, locksOwnedByMe: 0 };
}
try {
const keys = await redis.keys("lock:*");
let locksOwnedByMe = 0;
if (keys.length > 0) {
const values = await redis.mget(...keys);
locksOwnedByMe = values.filter(
(value) =>
value &&
value.startsWith(
`${config.getRawConfig().gerbil.exit_node_name}:`
)
).length;
}
return {
activeLocksCount: keys.length,
locksOwnedByMe
};
} catch (error) {
logger.error("Failed to get lock statistics:", error);
return { activeLocksCount: 0, locksOwnedByMe: 0 };
}
}
/**
* Close the Redis connection
*/
async disconnect(): Promise<void> {
if (!redis || !redis.status || redis.status !== "ready") {
return;
}
await redis.quit();
}
}
export const lockManager = new LockManager();

View File

@@ -1742,7 +1742,12 @@ hybridRouter.post(
tls: logEntry.tls
}));
await db.insert(requestAuditLog).values(logEntries);
// batch them into inserts of 100 to avoid exceeding parameter limits
const batchSize = 100;
for (let i = 0; i < logEntries.length; i += batchSize) {
const batch = logEntries.slice(i, i + batchSize);
await db.insert(requestAuditLog).values(batch);
}
return response(res, {
data: null,

View File

@@ -1,8 +1,8 @@
import { db, exitNodeOrgs, newts } from "@server/db";
import { db, ExitNode, exitNodeOrgs, newts, Transaction } from "@server/db";
import { MessageHandler } from "@server/routers/ws";
import { exitNodes, Newt, resources, sites, Target, targets } from "@server/db";
import { targetHealthCheck } from "@server/db";
import { eq, and, sql, inArray } from "drizzle-orm";
import { eq, and, sql, inArray, ne } from "drizzle-orm";
import { addPeer, deletePeer } from "../gerbil/peers";
import logger from "@server/logger";
import config from "@server/lib/config";
@@ -17,6 +17,7 @@ import {
verifyExitNodeOrgAccess
} from "#dynamic/lib/exitNodes";
import { fetchContainers } from "./dockerSocket";
import { lockManager } from "#dynamic/lib/lock";
export type ExitNodePingResult = {
exitNodeId: number;
@@ -151,27 +152,8 @@ export const handleNewtRegisterMessage: MessageHandler = async (context) => {
return;
}
const sitesQuery = await db
.select({
subnet: sites.subnet
})
.from(sites)
.where(eq(sites.exitNodeId, exitNodeId));
const newSubnet = await getUniqueSubnetForSite(exitNode);
const blockSize = config.getRawConfig().gerbil.site_block_size;
const subnets = sitesQuery
.map((site) => site.subnet)
.filter(
(subnet) =>
subnet && /^(\d{1,3}\.){3}\d{1,3}\/\d{1,2}$/.test(subnet)
)
.filter((subnet) => subnet !== null);
subnets.push(exitNode.address.replace(/\/\d+$/, `/${blockSize}`));
const newSubnet = findNextAvailableCidr(
subnets,
blockSize,
exitNode.address
);
if (!newSubnet) {
logger.error(
`No available subnets found for the new exit node id ${exitNodeId} and site id ${siteId}`
@@ -378,3 +360,39 @@ export const handleNewtRegisterMessage: MessageHandler = async (context) => {
excludeSender: false // Include sender in broadcast
};
};
async function getUniqueSubnetForSite(
exitNode: ExitNode,
trx: Transaction | typeof db = db
): Promise<string | null> {
const lockKey = `subnet-allocation:${exitNode.exitNodeId}`;
return await lockManager.withLock(
lockKey,
async () => {
const sitesQuery = await trx
.select({
subnet: sites.subnet
})
.from(sites)
.where(eq(sites.exitNodeId, exitNode.exitNodeId));
const blockSize = config.getRawConfig().gerbil.site_block_size;
const subnets = sitesQuery
.map((site) => site.subnet)
.filter(
(subnet) =>
subnet && /^(\d{1,3}\.){3}\d{1,3}\/\d{1,2}$/.test(subnet)
)
.filter((subnet) => subnet !== null);
subnets.push(exitNode.address.replace(/\/\d+$/, `/${blockSize}`));
const newSubnet = findNextAvailableCidr(
subnets,
blockSize,
exitNode.address
);
return newSubnet;
},
5000 // 5 second lock TTL - subnet allocation should be quick
);
}

View File

@@ -329,7 +329,7 @@ async function updateHttpResource(
}
}
let headers = null;
let headers = resource.headers;
if (updateData.headers) {
headers = JSON.stringify(updateData.headers);
}

View File

@@ -198,6 +198,62 @@ export async function createSite(
}
}
if (subnet && exitNodeId) {
//make sure the subnet is in the range of the exit node if provided
const [exitNode] = await db
.select()
.from(exitNodes)
.where(eq(exitNodes.exitNodeId, exitNodeId));
if (!exitNode) {
return next(
createHttpError(HttpCode.NOT_FOUND, "Exit node not found")
);
}
if (!exitNode.address) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
"Exit node has no subnet defined"
)
);
}
const subnetIp = subnet.split("/")[0];
if (!isIpInCidr(subnetIp, exitNode.address)) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
"Subnet is not in the CIDR range of the exit node address."
)
);
}
// lets also make sure there is no overlap with other sites on the exit node
const sitesQuery = await db
.select({
subnet: sites.subnet
})
.from(sites)
.where(
and(
eq(sites.exitNodeId, exitNodeId),
eq(sites.subnet, subnet)
)
);
if (sitesQuery.length > 0) {
return next(
createHttpError(
HttpCode.CONFLICT,
`Subnet ${subnet} overlaps with an existing site on this exit node. Please restart site creation.`
)
);
}
}
const niceId = await getUniqueSiteName(orgId);
let newSite: Site;

View File

@@ -56,6 +56,10 @@ export default function LocaleSwitcher() {
{
value: "nb-NO",
label: "Norsk (Bokmål)"
},
{
value: "zh-TW",
label: "繁體中文"
}
]}
/>

View File

@@ -1,4 +1,4 @@
export type Locale = (typeof locales)[number];
export const locales = ['en-US', 'es-ES', 'fr-FR', 'de-DE', 'nl-NL', 'it-IT', 'pl-PL', 'pt-PT', 'tr-TR', 'zh-CN', 'ko-KR', 'bg-BG', 'cs-CZ', 'ru-RU', 'nb-NO'] as const;
export const locales = ['en-US', 'es-ES', 'fr-FR', 'de-DE', 'nl-NL', 'it-IT', 'pl-PL', 'pt-PT', 'tr-TR', 'zh-CN', 'ko-KR', 'bg-BG', 'cs-CZ', 'ru-RU', 'nb-NO', 'zh-TW'] as const;
export const defaultLocale: Locale = 'en-US';