mirror of
https://github.com/fosrl/pangolin.git
synced 2026-01-30 14:50:45 +00:00
Compare commits
949 Commits
1.11.0
...
1.13.0-rc.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b63a8fd3ed | ||
|
|
ada3c6f2ef | ||
|
|
aafca7694d | ||
|
|
4345669793 | ||
|
|
66cae9802d | ||
|
|
2325e30f26 | ||
|
|
d1c98cf650 | ||
|
|
d06cd9b5be | ||
|
|
2eb440d019 | ||
|
|
4084c85c00 | ||
|
|
4fee65e5a4 | ||
|
|
17ee51249c | ||
|
|
f239c4370e | ||
|
|
c2a32a50cd | ||
|
|
7229bfa51b | ||
|
|
080e2f0a3a | ||
|
|
64e5cc172d | ||
|
|
c51a1c9c4d | ||
|
|
79958be380 | ||
|
|
05daedc6ad | ||
|
|
0234234108 | ||
|
|
f9b15b9156 | ||
|
|
37830d211d | ||
|
|
24cdac95cd | ||
|
|
e10f7efcbe | ||
|
|
1d7f4322e3 | ||
|
|
e8f10b049e | ||
|
|
a3ba4fff54 | ||
|
|
eecfcd640c | ||
|
|
40c38fa070 | ||
|
|
042c88ccb8 | ||
|
|
5a60f66ae0 | ||
|
|
4d665e8596 | ||
|
|
9221bcf889 | ||
|
|
2418813902 | ||
|
|
f66a9bdd33 | ||
|
|
bc7a1f4673 | ||
|
|
9010803046 | ||
|
|
311233b9f7 | ||
|
|
38203a0e7c | ||
|
|
5e9d660e26 | ||
|
|
110e950476 | ||
|
|
4e7843c1f3 | ||
|
|
502d15b9dc | ||
|
|
71db29c09c | ||
|
|
8cced5011b | ||
|
|
a812dde026 | ||
|
|
58374f77c9 | ||
|
|
8df3fa0ac0 | ||
|
|
840e9914cb | ||
|
|
f30a4f3cfd | ||
|
|
27004f9d0c | ||
|
|
427638ed3d | ||
|
|
350379b0c7 | ||
|
|
cf80c9d45c | ||
|
|
2d801b8ea5 | ||
|
|
f82d01d39b | ||
|
|
e959ce1698 | ||
|
|
25e176e8d5 | ||
|
|
8df01eb13a | ||
|
|
8d87f31bec | ||
|
|
2b3594a5ea | ||
|
|
72b7c8de0c | ||
|
|
b329dbb585 | ||
|
|
56d30ad6bd | ||
|
|
e24a13fb11 | ||
|
|
d7e06161a8 | ||
|
|
8a8c0edad3 | ||
|
|
66fc8529c2 | ||
|
|
0beaadf512 | ||
|
|
58177f4a02 | ||
|
|
28725dd164 | ||
|
|
1714140ee7 | ||
|
|
6329c3d140 | ||
|
|
44113ad93a | ||
|
|
ee1af459cc | ||
|
|
69561caa74 | ||
|
|
6f03d099b8 | ||
|
|
1581b5cb74 | ||
|
|
e09ec56fad | ||
|
|
8bcad76eb5 | ||
|
|
ff4a6b1d3f | ||
|
|
07b04b2603 | ||
|
|
54471c703c | ||
|
|
8a160ec0fe | ||
|
|
15da2f130b | ||
|
|
d64d2d6916 | ||
|
|
68928843a5 | ||
|
|
1228fddb01 | ||
|
|
3fa0b01c41 | ||
|
|
a4884f90a9 | ||
|
|
d7311ad947 | ||
|
|
1aa155a0af | ||
|
|
4f1c207083 | ||
|
|
dc6ee70eba | ||
|
|
0f9f4dfaeb | ||
|
|
22941c0653 | ||
|
|
d714f7d52c | ||
|
|
4f2dd92e81 | ||
|
|
090706c816 | ||
|
|
f449fdc7ec | ||
|
|
394d1503dd | ||
|
|
60380b70ed | ||
|
|
cece7a59bf | ||
|
|
00174be8c0 | ||
|
|
1d303feca2 | ||
|
|
3f4fae8f09 | ||
|
|
dab795e94a | ||
|
|
bd2165c553 | ||
|
|
646497cda0 | ||
|
|
dbc046397b | ||
|
|
fbafb48562 | ||
|
|
ccb17cdbbf | ||
|
|
c56512dc7d | ||
|
|
a92edf519e | ||
|
|
6cd3f2df1b | ||
|
|
b9c0089fac | ||
|
|
b2f78c9149 | ||
|
|
2a361b010f | ||
|
|
7bfa732a90 | ||
|
|
c554364001 | ||
|
|
5e52c48e77 | ||
|
|
c233fc564e | ||
|
|
151cd3e6de | ||
|
|
97489b9564 | ||
|
|
d263d282ee | ||
|
|
d1c7832e40 | ||
|
|
313d3c72da | ||
|
|
c8ec94c307 | ||
|
|
4809b64f7d | ||
|
|
26e49ca39d | ||
|
|
bb1472d25c | ||
|
|
8ea7b2ce02 | ||
|
|
1ee70e04ed | ||
|
|
d90f3bb6be | ||
|
|
149f4c1332 | ||
|
|
8e3b5688d5 | ||
|
|
bfd1293847 | ||
|
|
f4701f3da5 | ||
|
|
93af09ee97 | ||
|
|
897ddbec01 | ||
|
|
889b381e96 | ||
|
|
54c05c8345 | ||
|
|
a3b852ef45 | ||
|
|
53bb4efbb2 | ||
|
|
96dbec9352 | ||
|
|
2d3fbb9704 | ||
|
|
d3be1fbf4c | ||
|
|
89ee57cdf9 | ||
|
|
bdfc7fbcdb | ||
|
|
8726a7f931 | ||
|
|
1cae815be5 | ||
|
|
8d62fb3865 | ||
|
|
c5befee134 | ||
|
|
9cf2dbc2cc | ||
|
|
6217086cd5 | ||
|
|
6fbe25e91f | ||
|
|
57b3f49819 | ||
|
|
35f9c67cfe | ||
|
|
6707b3c7fe | ||
|
|
dfb85f2c89 | ||
|
|
17dec6cf0b | ||
|
|
8ee4ee7baf | ||
|
|
b1b0702886 | ||
|
|
92aed108cd | ||
|
|
2dcc94cd14 | ||
|
|
a7185ff913 | ||
|
|
04e73515b8 | ||
|
|
2bad9daaea | ||
|
|
54670e150d | ||
|
|
761ed1de9a | ||
|
|
078692c818 | ||
|
|
53ab51691a | ||
|
|
54e2d95b55 | ||
|
|
6e6fa77625 | ||
|
|
5c0c12cabe | ||
|
|
b3ed7c0129 | ||
|
|
10a00ff225 | ||
|
|
ba09479827 | ||
|
|
1c5c36fc12 | ||
|
|
d37ff6e15b | ||
|
|
9288575341 | ||
|
|
0ceed4c812 | ||
|
|
4b61a38501 | ||
|
|
ca9273c9ea | ||
|
|
810704e190 | ||
|
|
f33be1434b | ||
|
|
82a9f2b24f | ||
|
|
7204b5f0de | ||
|
|
9b372780bd | ||
|
|
9065385b87 | ||
|
|
77306e8c97 | ||
|
|
a746ef36a8 | ||
|
|
6e565f1331 | ||
|
|
84c608c2cf | ||
|
|
6da7f58ced | ||
|
|
351097b04d | ||
|
|
bd3d339905 | ||
|
|
c6ad36d78e | ||
|
|
eaeb65e9b4 | ||
|
|
4176bdbc81 | ||
|
|
a2cdd8484c | ||
|
|
23ab76ae08 | ||
|
|
8eec122114 | ||
|
|
79ccbc8e92 | ||
|
|
d70da2aa70 | ||
|
|
c695f50122 | ||
|
|
1b09e5b9f9 | ||
|
|
7efc947e26 | ||
|
|
4b580105cd | ||
|
|
a61c82570a | ||
|
|
6734003d85 | ||
|
|
e49d796b06 | ||
|
|
4ab4029625 | ||
|
|
5afff3c662 | ||
|
|
9be5a01173 | ||
|
|
357f297a3e | ||
|
|
e1edbe6067 | ||
|
|
5a859aad29 | ||
|
|
a28b15a81d | ||
|
|
e62186f395 | ||
|
|
11c1efc19c | ||
|
|
8b0491eb52 | ||
|
|
0032634004 | ||
|
|
4af10c8108 | ||
|
|
56cb685813 | ||
|
|
ccfe1f7d0a | ||
|
|
bf987d867c | ||
|
|
3870ced635 | ||
|
|
cb3861a5c8 | ||
|
|
f5bfddd262 | ||
|
|
f060063f53 | ||
|
|
6eb6b44f41 | ||
|
|
c93ab34021 | ||
|
|
06a31bb716 | ||
|
|
152fb47ca4 | ||
|
|
3d400b2321 | ||
|
|
2cdc23d63e | ||
|
|
45a82f3ecc | ||
|
|
342bedc012 | ||
|
|
18db4a11c8 | ||
|
|
a7e32d4013 | ||
|
|
beea28daf3 | ||
|
|
b5e94d44ae | ||
|
|
a623604e96 | ||
|
|
8c62dfa706 | ||
|
|
610e46f2d5 | ||
|
|
92125611e9 | ||
|
|
096da391e5 | ||
|
|
dd6b1d88d3 | ||
|
|
79f0d60533 | ||
|
|
67665864c2 | ||
|
|
336d31ce39 | ||
|
|
8df62e8b6a | ||
|
|
3eab3b0827 | ||
|
|
fbbab60956 | ||
|
|
c4de617751 | ||
|
|
19e3c5045e | ||
|
|
9f63d8bb5b | ||
|
|
49348c6ab7 | ||
|
|
0961ac1da1 | ||
|
|
6a79436516 | ||
|
|
85b46392e1 | ||
|
|
f721c983aa | ||
|
|
ff0b30fc2e | ||
|
|
18070a37a8 | ||
|
|
5bd31f87f0 | ||
|
|
de83cf9d8c | ||
|
|
ceae787cf5 | ||
|
|
ce6afd0019 | ||
|
|
d977d57b2a | ||
|
|
7bcd6adf01 | ||
|
|
ac68dbd545 | ||
|
|
d450e2c3ab | ||
|
|
9440a4f879 | ||
|
|
73b0411e1c | ||
|
|
a8d11d78fc | ||
|
|
e16aa6e90b | ||
|
|
6368b9d837 | ||
|
|
1b643fb4b6 | ||
|
|
d118c6b666 | ||
|
|
380e062d25 | ||
|
|
261f0333b8 | ||
|
|
24adca6108 | ||
|
|
3f440f0f7a | ||
|
|
ba6defa87c | ||
|
|
887a0ef574 | ||
|
|
200743747d | ||
|
|
2082c5eed2 | ||
|
|
a42d012788 | ||
|
|
82cc51424b | ||
|
|
7924f195aa | ||
|
|
d41bd3023f | ||
|
|
87a0dd2d12 | ||
|
|
5fd64596eb | ||
|
|
d23f61d995 | ||
|
|
7ac27b3883 | ||
|
|
9420b41e39 | ||
|
|
2cfb0e05cf | ||
|
|
5b9386b18a | ||
|
|
f5c3dff43c | ||
|
|
eeb82c8cfe | ||
|
|
3750c36aa7 | ||
|
|
be4d697dfe | ||
|
|
94b34c489c | ||
|
|
3801354ae6 | ||
|
|
266fbb1762 | ||
|
|
5d1f81a92c | ||
|
|
d6e8eb5307 | ||
|
|
2bc82f49ed | ||
|
|
487985558d | ||
|
|
dc237b8052 | ||
|
|
4ed4515262 | ||
|
|
cd76fa0139 | ||
|
|
af4b9e83f7 | ||
|
|
fa5facdf33 | ||
|
|
937b36e756 | ||
|
|
e90bdf8f97 | ||
|
|
56491cc17b | ||
|
|
6da531e99b | ||
|
|
01b5158b73 | ||
|
|
8f9b665bef | ||
|
|
806949879a | ||
|
|
e72e2b53aa | ||
|
|
10f42fe2e6 | ||
|
|
51b438117a | ||
|
|
d73825dd24 | ||
|
|
b5c6191c67 | ||
|
|
97c707248e | ||
|
|
02fbc279b5 | ||
|
|
447b706909 | ||
|
|
80a68507cd | ||
|
|
dbb1e37033 | ||
|
|
364b84359e | ||
|
|
93d4a40977 | ||
|
|
97312343e4 | ||
|
|
1736ad486a | ||
|
|
a07ad843a2 | ||
|
|
fef9101058 | ||
|
|
2890ff2605 | ||
|
|
026ad2ccb9 | ||
|
|
a82969b778 | ||
|
|
612b04c26f | ||
|
|
2162f5f76f | ||
|
|
710f16ce68 | ||
|
|
61a4f468ba | ||
|
|
b00fea5656 | ||
|
|
269ff630aa | ||
|
|
986f7121bd | ||
|
|
21f0501bc6 | ||
|
|
2b31dd955c | ||
|
|
e7aeb4ff89 | ||
|
|
9dd1192033 | ||
|
|
e61da0958f | ||
|
|
fce588057e | ||
|
|
33331fd3c8 | ||
|
|
1261ad3a00 | ||
|
|
7dcf4d5192 | ||
|
|
dc87df5d38 | ||
|
|
5d2f65daa9 | ||
|
|
58cf471bc4 | ||
|
|
7db99a7dd5 | ||
|
|
000904eb31 | ||
|
|
6d1713b6b9 | ||
|
|
de8262d7b9 | ||
|
|
4f026acad8 | ||
|
|
5b31bbce8d | ||
|
|
e6e80f6fc7 | ||
|
|
bde4492d49 | ||
|
|
7c728c144c | ||
|
|
8ad7bcc0d6 | ||
|
|
e62806d6fb | ||
|
|
4e0a2e441b | ||
|
|
aabe39137b | ||
|
|
d9564ed6fe | ||
|
|
0798a0c6c2 | ||
|
|
c9786946b7 | ||
|
|
9344ab3546 | ||
|
|
1a4078b8a1 | ||
|
|
ca66637270 | ||
|
|
8674ca931b | ||
|
|
08c82e072e | ||
|
|
23c9827e4c | ||
|
|
864b587b89 | ||
|
|
ca89aa7ce8 | ||
|
|
63a1ecfb86 | ||
|
|
fbce392137 | ||
|
|
c004e969cb | ||
|
|
c6611471b1 | ||
|
|
bdf1625976 | ||
|
|
0a5dc17800 | ||
|
|
fa7aa508ea | ||
|
|
2973b61676 | ||
|
|
2428413442 | ||
|
|
5602d8ee64 | ||
|
|
a70799c8c0 | ||
|
|
d38b321f85 | ||
|
|
b0ff50a76f | ||
|
|
37acdc2796 | ||
|
|
f3d31cb6de | ||
|
|
a336955066 | ||
|
|
a229fc1c61 | ||
|
|
7995fd364e | ||
|
|
5e0d822d45 | ||
|
|
4fddaa8f11 | ||
|
|
4a87cecf89 | ||
|
|
ac5ee5c7ca | ||
|
|
8a8c357563 | ||
|
|
263fd80c18 | ||
|
|
7bdf05bdf5 | ||
|
|
d00f12967d | ||
|
|
d9991a18e2 | ||
|
|
a51c21cdd2 | ||
|
|
265cab5b64 | ||
|
|
da15e5e77b | ||
|
|
a717ca2675 | ||
|
|
693c9fbe0f | ||
|
|
564b290244 | ||
|
|
84d78df67e | ||
|
|
107053a98f | ||
|
|
6422a78e6f | ||
|
|
10f8298161 | ||
|
|
5f11630e27 | ||
|
|
a776b2ea94 | ||
|
|
b83ec1b503 | ||
|
|
83bd5957cd | ||
|
|
f98b4baa73 | ||
|
|
0af51cebbe | ||
|
|
abc5f8ec68 | ||
|
|
ddc14d164e | ||
|
|
aeda85fcfb | ||
|
|
66124f09c4 | ||
|
|
ac5fe1486a | ||
|
|
50ac52d316 | ||
|
|
f85d9f8b6e | ||
|
|
feb0bd58c8 | ||
|
|
32949127d2 | ||
|
|
84d24d9bf5 | ||
|
|
8e1bb6a6fd | ||
|
|
66c14c2d09 | ||
|
|
cad4d97fb3 | ||
|
|
de53cfb912 | ||
|
|
55fd276773 | ||
|
|
7125b49024 | ||
|
|
fb9ed8f592 | ||
|
|
020cb2d794 | ||
|
|
9b2c0d0b67 | ||
|
|
3993e5b705 | ||
|
|
47bcadb329 | ||
|
|
00df2c876f | ||
|
|
b4535f3dc4 | ||
|
|
e51fca1f61 | ||
|
|
0e7f5b1aef | ||
|
|
579a4e1021 | ||
|
|
c813202f92 | ||
|
|
94e1c534ca | ||
|
|
41e21acf42 | ||
|
|
b6e98632b5 | ||
|
|
51db267a4a | ||
|
|
8a5f59cb9f | ||
|
|
669817818a | ||
|
|
b84453bfbe | ||
|
|
15d561f59f | ||
|
|
0745734273 | ||
|
|
aa3f07f1ba | ||
|
|
2b8204fdc8 | ||
|
|
90e72c6aca | ||
|
|
62e2b7ca9e | ||
|
|
f7e7993fd4 | ||
|
|
18cdf070c7 | ||
|
|
563a5b3e7e | ||
|
|
3756aaecda | ||
|
|
58a13de0ff | ||
|
|
d32505a833 | ||
|
|
42091e88cb | ||
|
|
c2f607bb9a | ||
|
|
3f38080b46 | ||
|
|
9f9aa07c2d | ||
|
|
76d54b2d0f | ||
|
|
bdb564823d | ||
|
|
b3a616c9f3 | ||
|
|
ec1f94791a | ||
|
|
bea1c65076 | ||
|
|
2274a3525b | ||
|
|
749cea5a4d | ||
|
|
999fb2fff1 | ||
|
|
2a7529c39e | ||
|
|
f27ae210ed | ||
|
|
ea744f8d28 | ||
|
|
0b70cbb1a3 | ||
|
|
fce887436d | ||
|
|
f928708156 | ||
|
|
fae899a8f1 | ||
|
|
3489107a49 | ||
|
|
45fb0a4156 | ||
|
|
a62299c387 | ||
|
|
18757d7eb3 | ||
|
|
296b220bf3 | ||
|
|
0a9f37c44d | ||
|
|
776c33d79d | ||
|
|
9fd6af3a31 | ||
|
|
4ade878320 | ||
|
|
9e2477587c | ||
|
|
c7787352c8 | ||
|
|
85892c30b2 | ||
|
|
7a2dd31019 | ||
|
|
096ca379ce | ||
|
|
41601010f4 | ||
|
|
64b87e203a | ||
|
|
c64b102aaa | ||
|
|
f371c7df81 | ||
|
|
030f90db2e | ||
|
|
e51b6b545e | ||
|
|
ef5d72663f | ||
|
|
6ddfc9b8fe | ||
|
|
301654b63e | ||
|
|
c73f8c88f7 | ||
|
|
2274404324 | ||
|
|
6d349693a7 | ||
|
|
b9ce316574 | ||
|
|
a247ef7564 | ||
|
|
18566c09dc | ||
|
|
1090dca634 | ||
|
|
44f419d4f7 | ||
|
|
162c6d567c | ||
|
|
2f1abfbef8 | ||
|
|
a26a441d56 | ||
|
|
f628a76223 | ||
|
|
8088e30e06 | ||
|
|
801cdec7f3 | ||
|
|
3fd3f9871d | ||
|
|
959a562e7c | ||
|
|
3b12a77cf0 | ||
|
|
03e0e8d9c2 | ||
|
|
7cd31313d8 | ||
|
|
52a311bf36 | ||
|
|
9822deb4bf | ||
|
|
83e0282212 | ||
|
|
8942cb7aa7 | ||
|
|
f0f219f293 | ||
|
|
dc75d72522 | ||
|
|
6da81b3817 | ||
|
|
847479b639 | ||
|
|
0790f37f5e | ||
|
|
9dd472c59b | ||
|
|
5746d69f98 | ||
|
|
8356c5933f | ||
|
|
2c488baa80 | ||
|
|
d30743a428 | ||
|
|
009d84a3c6 | ||
|
|
e888b76747 | ||
|
|
6174599754 | ||
|
|
8ba04aeb74 | ||
|
|
43590896e9 | ||
|
|
3547c4832b | ||
|
|
1cd098252e | ||
|
|
4adbc31dae | ||
|
|
99031feb35 | ||
|
|
d363b06d0e | ||
|
|
2af100cc86 | ||
|
|
3e90211108 | ||
|
|
6dd161fe17 | ||
|
|
558bd040c6 | ||
|
|
f2c48975f6 | ||
|
|
fc43a56bb3 | ||
|
|
ca7f557a3c | ||
|
|
7477713eef | ||
|
|
c16e762fa4 | ||
|
|
41592133a6 | ||
|
|
54f7525f1b | ||
|
|
ad6bb3da9f | ||
|
|
49bc2dc5da | ||
|
|
cdf77087cd | ||
|
|
8e5dde887c | ||
|
|
f21188000e | ||
|
|
1b3eb32bf4 | ||
|
|
eec3f183e6 | ||
|
|
31b66cd911 | ||
|
|
ad425e8d9e | ||
|
|
da0196a308 | ||
|
|
e585972b7b | ||
|
|
cc62cd4add | ||
|
|
25225a452c | ||
|
|
678644c7fb | ||
|
|
32f20ed984 | ||
|
|
4eb5bf08d5 | ||
|
|
35c93f38e0 | ||
|
|
f60c2f4fb9 | ||
|
|
b2cf152b9e | ||
|
|
444928dffd | ||
|
|
4d7e2d5840 | ||
|
|
318046ce1d | ||
|
|
808ad1e272 | ||
|
|
05a1195661 | ||
|
|
c46322c6a6 | ||
|
|
80d5efc41f | ||
|
|
0409ab7dc1 | ||
|
|
63f079ec76 | ||
|
|
5988f1e8da | ||
|
|
ed0c0edeba | ||
|
|
34b4841f4d | ||
|
|
ff47c5a8ad | ||
|
|
9430a53c0c | ||
|
|
03334e3f0f | ||
|
|
6f2ecf9d0d | ||
|
|
6f803c3b4b | ||
|
|
15d400c842 | ||
|
|
3ddf150661 | ||
|
|
5b519afee4 | ||
|
|
15ea9f3dcc | ||
|
|
d5e2536f8d | ||
|
|
d7e9083e06 | ||
|
|
e0cc338c3a | ||
|
|
624c5741e2 | ||
|
|
558507dd71 | ||
|
|
565340bd53 | ||
|
|
756745487a | ||
|
|
d2ece4d370 | ||
|
|
d5f5d1da1e | ||
|
|
dfaf1a72cc | ||
|
|
ff8e5b871c | ||
|
|
927dda4e53 | ||
|
|
0e51bac307 | ||
|
|
7a50af14f3 | ||
|
|
396477c2e2 | ||
|
|
8765874d9a | ||
|
|
49dffe086d | ||
|
|
77ddadcded | ||
|
|
05b297ddec | ||
|
|
feb0de9a08 | ||
|
|
f4f2361d22 | ||
|
|
cae6a9f51c | ||
|
|
2872f5c018 | ||
|
|
0512c21ad7 | ||
|
|
922a69feed | ||
|
|
24192c79d4 | ||
|
|
17c22a635f | ||
|
|
bcbcf417b5 | ||
|
|
acf7596368 | ||
|
|
34c7d925ca | ||
|
|
c10730ebb9 | ||
|
|
e50743b922 | ||
|
|
75b0745e42 | ||
|
|
ebd99f95a3 | ||
|
|
0e649883cb | ||
|
|
3d376c8d14 | ||
|
|
adedb0e391 | ||
|
|
521935786c | ||
|
|
885b9d186b | ||
|
|
356f023539 | ||
|
|
de8d3f45da | ||
|
|
72c9956190 | ||
|
|
6dc4cbe448 | ||
|
|
77364488c2 | ||
|
|
5a61040027 | ||
|
|
c6f7be40df | ||
|
|
c36fb63f8c | ||
|
|
48aebea6cf | ||
|
|
55082d2ef8 | ||
|
|
cc03b97234 | ||
|
|
5542873368 | ||
|
|
1db5d76ef1 | ||
|
|
ca6c45087b | ||
|
|
3333eb95f9 | ||
|
|
d681725fc3 | ||
|
|
f5eadc9e1e | ||
|
|
219e213c1e | ||
|
|
af654e663b | ||
|
|
39b3b4ef9d | ||
|
|
6c62a0900f | ||
|
|
ddd772eb43 | ||
|
|
69458ab649 | ||
|
|
c7df70143e | ||
|
|
a81ea7cc8f | ||
|
|
02330a0756 | ||
|
|
db49b599b5 | ||
|
|
bb0bfd440a | ||
|
|
10ce732b8d | ||
|
|
4c567cf2d7 | ||
|
|
2783d2989d | ||
|
|
c3d6510231 | ||
|
|
3bb948991f | ||
|
|
4b9ce22f06 | ||
|
|
772bda69f9 | ||
|
|
8b4722b1c9 | ||
|
|
9e5c9d9c34 | ||
|
|
ee533df38f | ||
|
|
52dc8e011c | ||
|
|
bd5cc790d6 | ||
|
|
7d6d5a7787 | ||
|
|
ba6e7dd06a | ||
|
|
6270fb3237 | ||
|
|
16ec50a6ee | ||
|
|
3d2021c8a1 | ||
|
|
15d63ddffa | ||
|
|
7ce6fadb3d | ||
|
|
6b18a24f9b | ||
|
|
a38cb961c7 | ||
|
|
3c5fe21078 | ||
|
|
b44305694f | ||
|
|
be217e2b6f | ||
|
|
6ce04c2aa1 | ||
|
|
85e4b649db | ||
|
|
73a3335148 | ||
|
|
32845c5a3d | ||
|
|
05a878ac34 | ||
|
|
847d015243 | ||
|
|
51cde2681c | ||
|
|
9c0606942c | ||
|
|
646d476bdb | ||
|
|
31261681a0 | ||
|
|
f6fae820c4 | ||
|
|
b3cbf925aa | ||
|
|
aa1ae3ee42 | ||
|
|
80f6c8b74e | ||
|
|
79d8e8d59d | ||
|
|
9193375586 | ||
|
|
240bcb8759 | ||
|
|
a5dcafb84c | ||
|
|
192207a857 | ||
|
|
d18fafb0ef | ||
|
|
380c86898c | ||
|
|
b59a6b82ef | ||
|
|
77ba568c36 | ||
|
|
a0f05cc77b | ||
|
|
80f43a9774 | ||
|
|
c04d9eda6b | ||
|
|
cabf3e9695 | ||
|
|
ff7b4386d6 | ||
|
|
4dbbe159ee | ||
|
|
eeab92719a | ||
|
|
43e6b7de07 | ||
|
|
4cfd1b1ff5 | ||
|
|
09ba018493 | ||
|
|
7acf7dd0eb | ||
|
|
592d085de6 | ||
|
|
2cf2c64651 | ||
|
|
560974f7d2 | ||
|
|
85270f497a | ||
|
|
9fbea4a380 | ||
|
|
cbf9c5361e | ||
|
|
44316731c0 | ||
|
|
60513af8ed | ||
|
|
24cfe02979 | ||
|
|
8f3324560a | ||
|
|
2041edcf30 | ||
|
|
1227b3c11a | ||
|
|
8973726f63 | ||
|
|
5559fef1bc | ||
|
|
9cb3c3821a | ||
|
|
c85e367ded | ||
|
|
5e20487216 | ||
|
|
bc6b9eb905 | ||
|
|
5940bbd498 | ||
|
|
f4a0f6a2e6 | ||
|
|
0df7d45678 | ||
|
|
a05ee2483b | ||
|
|
f5dbc18c05 | ||
|
|
dd052fa1af | ||
|
|
2cc4ad9c30 | ||
|
|
4dd741cc3f | ||
|
|
9ce81b34c9 | ||
|
|
460df46abc | ||
|
|
1e70e4289b | ||
|
|
5fa0ac5927 | ||
|
|
4b40e7b8d6 | ||
|
|
29cd035a05 | ||
|
|
39d6b93d42 | ||
|
|
629f17294a | ||
|
|
10a5af67aa | ||
|
|
b542d82553 | ||
|
|
2a644c3f88 | ||
|
|
f6de61968d | ||
|
|
68f0c4df3a | ||
|
|
0743daf56a | ||
|
|
58b6ab2601 | ||
|
|
038f8829c2 | ||
|
|
ddcf77a62d | ||
|
|
adefbdbeb3 | ||
|
|
921285e5b1 | ||
|
|
264bf46798 | ||
|
|
5a7b5d65a4 | ||
|
|
23b13f0a0e | ||
|
|
90ddffce0e | ||
|
|
e30fde5237 | ||
|
|
ac683c3ff7 | ||
|
|
b5a931c96e | ||
|
|
5b61742075 | ||
|
|
4e4a38f7e9 | ||
|
|
c1bb029a1c | ||
|
|
eae2c37388 | ||
|
|
7193fea068 | ||
|
|
9b85deebf8 | ||
|
|
0211f75cb6 | ||
|
|
fa6b7ca3ed | ||
|
|
007d03e7f6 | ||
|
|
a534301eb7 | ||
|
|
1baa987016 | ||
|
|
a5b48ab392 | ||
|
|
7f981f05fb | ||
|
|
259cea1c42 | ||
|
|
9024b2a974 | ||
|
|
f2c31d3ca6 | ||
|
|
6f8b5dd909 | ||
|
|
6521b66b7c | ||
|
|
202d2075a6 | ||
|
|
e575fae73b | ||
|
|
d84ee3d03d | ||
|
|
ba745588e9 | ||
|
|
84731bdc19 | ||
|
|
f748c5dbe4 | ||
|
|
fdd4d5244f | ||
|
|
9301477262 | ||
|
|
9a787e6ef8 | ||
|
|
5b8cdf7884 | ||
|
|
5fd104bb30 | ||
|
|
9ba42a8fa3 | ||
|
|
fe8fd2e3a8 | ||
|
|
9ebce35d2b | ||
|
|
654145be84 | ||
|
|
3662d42374 | ||
|
|
d392fb371e | ||
|
|
1142d6ac48 | ||
|
|
bdc3b2425b | ||
|
|
9a64f45815 | ||
|
|
3633e02ff7 | ||
|
|
2c502ec764 | ||
|
|
b17d7f0e27 | ||
|
|
65364d6b0f | ||
|
|
6fd6c77ce6 | ||
|
|
e447549de1 | ||
|
|
6b0dd00aa5 | ||
|
|
461866836e | ||
|
|
3ae42f054f | ||
|
|
5a571f19e1 | ||
|
|
70aeaf7b5d | ||
|
|
7a6838f5a5 | ||
|
|
07f5e8f215 | ||
|
|
2b05bc1f5f | ||
|
|
edf64ae7b5 | ||
|
|
7370448be9 | ||
|
|
51af293d66 | ||
|
|
d37e28215e | ||
|
|
2c01849f2e | ||
|
|
c29ba9bb5f | ||
|
|
8fdf120ec2 | ||
|
|
a9b9161c40 | ||
|
|
43f907ebec | ||
|
|
ae670e1eb5 | ||
|
|
f102718901 | ||
|
|
9d452efc7d | ||
|
|
156fe529b5 | ||
|
|
df24525105 | ||
|
|
d938345deb | ||
|
|
d6681733dd | ||
|
|
2f1aec02f0 | ||
|
|
d30e0a3c51 | ||
|
|
3f3e9cf1bb | ||
|
|
e77909d498 | ||
|
|
d10830f892 | ||
|
|
18d8f72da2 | ||
|
|
4a59823e58 | ||
|
|
f3149e46cd | ||
|
|
60379a7b4e | ||
|
|
605b3cccee | ||
|
|
843799f4f6 | ||
|
|
a69cda5c13 | ||
|
|
dbaa3dbd52 | ||
|
|
58197c6fb2 | ||
|
|
7813093452 | ||
|
|
3f2c3dc987 | ||
|
|
08ddba25d0 | ||
|
|
d47fa7e64f | ||
|
|
c87aa2e537 | ||
|
|
bc430546bc | ||
|
|
9428e065eb | ||
|
|
10408c5717 | ||
|
|
ae902da913 | ||
|
|
0be5a91eff | ||
|
|
7dcf46ce98 | ||
|
|
33e6e4b411 | ||
|
|
bab6e4eb0d | ||
|
|
6a7c7521d8 | ||
|
|
d070244ea7 | ||
|
|
9219bb7d6e | ||
|
|
54e83f35e5 | ||
|
|
eb138d6526 | ||
|
|
edd0c3099b | ||
|
|
04455d40cf | ||
|
|
221af94d15 | ||
|
|
48ac3bb7af | ||
|
|
07273b8b7f | ||
|
|
bfb5b2864d | ||
|
|
07330e84fb | ||
|
|
0e39704b3a | ||
|
|
f25e794e7c | ||
|
|
df46ce8bdc | ||
|
|
4d83f537dc | ||
|
|
58443ef53f | ||
|
|
1ee52ad86b | ||
|
|
bc941239ec | ||
|
|
9a52d5387d | ||
|
|
1f50bc3752 | ||
|
|
0819df0910 | ||
|
|
663787c15b | ||
|
|
2c39d07261 | ||
|
|
dce84b9b09 | ||
|
|
a5bab6bb80 | ||
|
|
7536c03f63 | ||
|
|
ada5d2ef0e | ||
|
|
b8bead0590 | ||
|
|
68f852d6d1 | ||
|
|
d9fe5a8819 | ||
|
|
346183a23f | ||
|
|
dcfd7f5443 | ||
|
|
e59cd6672b | ||
|
|
7c8c440f67 | ||
|
|
f258c41f15 | ||
|
|
ae4a24f4aa | ||
|
|
476cdcfe86 | ||
|
|
f869df2f65 | ||
|
|
03cfabacd9 | ||
|
|
47ac5875f3 | ||
|
|
f67327358e | ||
|
|
4901823f15 | ||
|
|
5407e3c821 | ||
|
|
1d5cdad8b7 | ||
|
|
cd2424cb77 | ||
|
|
c17efde6bf | ||
|
|
40cd8cdec7 | ||
|
|
6768672a44 | ||
|
|
240c5b005b | ||
|
|
8dde170a35 | ||
|
|
c07abf8ff9 | ||
|
|
e5a436593f | ||
|
|
bb6e093ac6 | ||
|
|
59a334ce24 | ||
|
|
d241dcfb27 | ||
|
|
af263e7913 | ||
|
|
6610e7d405 | ||
|
|
c476e65cf2 | ||
|
|
b69b2eeeb3 | ||
|
|
89dab0917b | ||
|
|
73efdb95ae | ||
|
|
1bcca88614 | ||
|
|
8387571c1d | ||
|
|
1d017f60b4 | ||
|
|
81effda9e8 | ||
|
|
9343906ab1 |
125
.github/workflows/cicd.yml
vendored
125
.github/workflows/cicd.yml
vendored
@@ -1,34 +1,62 @@
|
||||
name: CI/CD Pipeline
|
||||
|
||||
# CI/CD workflow for building, publishing, mirroring, signing container images and building release binaries.
|
||||
# Actions are pinned to specific SHAs to reduce supply-chain risk. This workflow triggers on tag push events.
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write # for GHCR push
|
||||
id-token: write # for Cosign Keyless (OIDC) Signing
|
||||
|
||||
# Required secrets:
|
||||
# - DOCKER_HUB_USERNAME / DOCKER_HUB_ACCESS_TOKEN: push to Docker Hub
|
||||
# - GITHUB_TOKEN: used for GHCR login and OIDC keyless signing
|
||||
# - COSIGN_PRIVATE_KEY / COSIGN_PASSWORD / COSIGN_PUBLIC_KEY: for key-based signing
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "[0-9]+.[0-9]+.[0-9]+"
|
||||
- "[0-9]+.[0-9]+.[0-9]+.rc.[0-9]+"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
release:
|
||||
name: Build and Release
|
||||
runs-on: amd64-runner
|
||||
runs-on: [self-hosted, linux, x64]
|
||||
# Job-level timeout to avoid runaway or stuck runs
|
||||
timeout-minutes: 120
|
||||
env:
|
||||
# Target images
|
||||
DOCKERHUB_IMAGE: docker.io/fosrl/${{ github.event.repository.name }}
|
||||
GHCR_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
|
||||
|
||||
- name: Extract tag name
|
||||
id: get-tag
|
||||
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v6
|
||||
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
|
||||
with:
|
||||
go-version: 1.24
|
||||
|
||||
@@ -37,18 +65,21 @@ jobs:
|
||||
TAG=${{ env.TAG }}
|
||||
sed -i "s/export const APP_VERSION = \".*\";/export const APP_VERSION = \"$TAG\";/" server/lib/consts.ts
|
||||
cat server/lib/consts.ts
|
||||
shell: bash
|
||||
|
||||
- name: Pull latest Gerbil version
|
||||
id: get-gerbil-tag
|
||||
run: |
|
||||
LATEST_TAG=$(curl -s https://api.github.com/repos/fosrl/gerbil/tags | jq -r '.[0].name')
|
||||
echo "LATEST_GERBIL_TAG=$LATEST_TAG" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Pull latest Badger version
|
||||
id: get-badger-tag
|
||||
run: |
|
||||
LATEST_TAG=$(curl -s https://api.github.com/repos/fosrl/badger/tags | jq -r '.[0].name')
|
||||
echo "LATEST_BADGER_TAG=$LATEST_TAG" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Update install/main.go
|
||||
run: |
|
||||
@@ -60,6 +91,7 @@ jobs:
|
||||
sed -i "s/config.BadgerVersion = \".*\"/config.BadgerVersion = \"$BADGER_VERSION\"/" install/main.go
|
||||
echo "Updated install/main.go with Pangolin version $PANGOLIN_VERSION, Gerbil version $GERBIL_VERSION, and Badger version $BADGER_VERSION"
|
||||
cat install/main.go
|
||||
shell: bash
|
||||
|
||||
- name: Build installer
|
||||
working-directory: install
|
||||
@@ -67,12 +99,89 @@ jobs:
|
||||
make go-build-release
|
||||
|
||||
- name: Upload artifacts from /install/bin
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: install-bin
|
||||
path: install/bin/
|
||||
|
||||
- name: Build and push Docker images
|
||||
- name: Build and push Docker images (Docker Hub)
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
make build-release tag=$TAG
|
||||
echo "Built & pushed to: ${{ env.DOCKERHUB_IMAGE }}:${TAG}"
|
||||
shell: bash
|
||||
|
||||
- name: Install skopeo + jq
|
||||
# skopeo: copy/inspect images between registries
|
||||
# jq: JSON parsing tool used to extract digest values
|
||||
run: |
|
||||
sudo apt-get update -y
|
||||
sudo apt-get install -y skopeo jq
|
||||
skopeo --version
|
||||
shell: bash
|
||||
|
||||
- name: Login to GHCR
|
||||
run: |
|
||||
skopeo login ghcr.io -u "${{ github.actor }}" -p "${{ secrets.GITHUB_TOKEN }}"
|
||||
shell: bash
|
||||
|
||||
- name: Copy tag from Docker Hub to GHCR
|
||||
# Mirror the already-built image (all architectures) to GHCR so we can sign it
|
||||
run: |
|
||||
set -euo pipefail
|
||||
TAG=${{ env.TAG }}
|
||||
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:${TAG} -> ${{ env.GHCR_IMAGE }}:${TAG}"
|
||||
skopeo copy --all --retry-times 3 \
|
||||
docker://$DOCKERHUB_IMAGE:$TAG \
|
||||
docker://$GHCR_IMAGE:$TAG
|
||||
shell: bash
|
||||
|
||||
- name: Login to GitHub Container Registry (for cosign)
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Install cosign
|
||||
# cosign is used to sign and verify container images (key and keyless)
|
||||
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
|
||||
|
||||
- name: Dual-sign and verify (GHCR & Docker Hub)
|
||||
# Sign each image by digest using keyless (OIDC) and key-based signing,
|
||||
# then verify both the public key signature and the keyless OIDC signature.
|
||||
env:
|
||||
TAG: ${{ env.TAG }}
|
||||
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
|
||||
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
|
||||
COSIGN_PUBLIC_KEY: ${{ secrets.COSIGN_PUBLIC_KEY }}
|
||||
COSIGN_YES: "true"
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
issuer="https://token.actions.githubusercontent.com"
|
||||
id_regex="^https://github.com/${{ github.repository }}/.+" # accept this repo (all workflows/refs)
|
||||
|
||||
for IMAGE in "${GHCR_IMAGE}" "${DOCKERHUB_IMAGE}"; do
|
||||
echo "Processing ${IMAGE}:${TAG}"
|
||||
|
||||
DIGEST="$(skopeo inspect --retry-times 3 docker://${IMAGE}:${TAG} | jq -r '.Digest')"
|
||||
REF="${IMAGE}@${DIGEST}"
|
||||
echo "Resolved digest: ${REF}"
|
||||
|
||||
echo "==> cosign sign (keyless) --recursive ${REF}"
|
||||
cosign sign --recursive "${REF}"
|
||||
|
||||
echo "==> cosign sign (key) --recursive ${REF}"
|
||||
cosign sign --key env://COSIGN_PRIVATE_KEY --recursive "${REF}"
|
||||
|
||||
echo "==> cosign verify (public key) ${REF}"
|
||||
cosign verify --key env://COSIGN_PUBLIC_KEY "${REF}" -o text
|
||||
|
||||
echo "==> cosign verify (keyless policy) ${REF}"
|
||||
cosign verify \
|
||||
--certificate-oidc-issuer "${issuer}" \
|
||||
--certificate-identity-regexp "${id_regex}" \
|
||||
"${REF}" -o text
|
||||
done
|
||||
shell: bash
|
||||
|
||||
9
.github/workflows/linting.yml
vendored
9
.github/workflows/linting.yml
vendored
@@ -1,5 +1,8 @@
|
||||
name: ESLint
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
@@ -18,10 +21,10 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v5
|
||||
uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0
|
||||
with:
|
||||
node-version: '22'
|
||||
|
||||
@@ -32,4 +35,4 @@ jobs:
|
||||
run: npm run set:oss
|
||||
|
||||
- name: Run ESLint
|
||||
run: npx eslint . --ext .js,.jsx,.ts,.tsx
|
||||
run: npx eslint . --ext .js,.jsx,.ts,.tsx
|
||||
|
||||
132
.github/workflows/mirror.yaml
vendored
Normal file
132
.github/workflows/mirror.yaml
vendored
Normal file
@@ -0,0 +1,132 @@
|
||||
name: Mirror & Sign (Docker Hub to GHCR)
|
||||
|
||||
on:
|
||||
workflow_dispatch: {}
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write # for keyless OIDC
|
||||
|
||||
env:
|
||||
SOURCE_IMAGE: docker.io/fosrl/pangolin
|
||||
DEST_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
|
||||
|
||||
jobs:
|
||||
mirror-and-dual-sign:
|
||||
runs-on: amd64-runner
|
||||
steps:
|
||||
- name: Install skopeo + jq
|
||||
run: |
|
||||
sudo apt-get update -y
|
||||
sudo apt-get install -y skopeo jq
|
||||
skopeo --version
|
||||
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
|
||||
|
||||
- name: Input check
|
||||
run: |
|
||||
test -n "${SOURCE_IMAGE}" || (echo "SOURCE_IMAGE is empty" && exit 1)
|
||||
echo "Source : ${SOURCE_IMAGE}"
|
||||
echo "Target : ${DEST_IMAGE}"
|
||||
|
||||
# Auth for skopeo (containers-auth)
|
||||
- name: Skopeo login to GHCR
|
||||
run: |
|
||||
skopeo login ghcr.io -u "${{ github.actor }}" -p "${{ secrets.GITHUB_TOKEN }}"
|
||||
|
||||
# Auth for cosign (docker-config)
|
||||
- name: Docker login to GHCR (for cosign)
|
||||
run: |
|
||||
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u "${{ github.actor }}" --password-stdin
|
||||
|
||||
- name: List source tags
|
||||
run: |
|
||||
set -euo pipefail
|
||||
skopeo list-tags --retry-times 3 docker://"${SOURCE_IMAGE}" \
|
||||
| jq -r '.Tags[]' | sort -u > src-tags.txt
|
||||
echo "Found source tags: $(wc -l < src-tags.txt)"
|
||||
head -n 20 src-tags.txt || true
|
||||
|
||||
- name: List destination tags (skip existing)
|
||||
run: |
|
||||
set -euo pipefail
|
||||
if skopeo list-tags --retry-times 3 docker://"${DEST_IMAGE}" >/tmp/dst.json 2>/dev/null; then
|
||||
jq -r '.Tags[]' /tmp/dst.json | sort -u > dst-tags.txt
|
||||
else
|
||||
: > dst-tags.txt
|
||||
fi
|
||||
echo "Existing destination tags: $(wc -l < dst-tags.txt)"
|
||||
|
||||
- name: Mirror, dual-sign, and verify
|
||||
env:
|
||||
# keyless
|
||||
COSIGN_YES: "true"
|
||||
# key-based
|
||||
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
|
||||
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
|
||||
# verify
|
||||
COSIGN_PUBLIC_KEY: ${{ secrets.COSIGN_PUBLIC_KEY }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
copied=0; skipped=0; v_ok=0; errs=0
|
||||
|
||||
issuer="https://token.actions.githubusercontent.com"
|
||||
id_regex="^https://github.com/${{ github.repository }}/.+"
|
||||
|
||||
while read -r tag; do
|
||||
[ -z "$tag" ] && continue
|
||||
|
||||
if grep -Fxq "$tag" dst-tags.txt; then
|
||||
echo "::notice ::Skip (exists) ${DEST_IMAGE}:${tag}"
|
||||
skipped=$((skipped+1))
|
||||
continue
|
||||
fi
|
||||
|
||||
echo "==> Copy ${SOURCE_IMAGE}:${tag} → ${DEST_IMAGE}:${tag}"
|
||||
if ! skopeo copy --all --retry-times 3 \
|
||||
docker://"${SOURCE_IMAGE}:${tag}" docker://"${DEST_IMAGE}:${tag}"; then
|
||||
echo "::warning title=Copy failed::${SOURCE_IMAGE}:${tag}"
|
||||
errs=$((errs+1)); continue
|
||||
fi
|
||||
copied=$((copied+1))
|
||||
|
||||
digest="$(skopeo inspect --retry-times 3 docker://"${DEST_IMAGE}:${tag}" | jq -r '.Digest')"
|
||||
ref="${DEST_IMAGE}@${digest}"
|
||||
|
||||
echo "==> cosign sign (keyless) --recursive ${ref}"
|
||||
if ! cosign sign --recursive "${ref}"; then
|
||||
echo "::warning title=Keyless sign failed::${ref}"
|
||||
errs=$((errs+1))
|
||||
fi
|
||||
|
||||
echo "==> cosign sign (key) --recursive ${ref}"
|
||||
if ! cosign sign --key env://COSIGN_PRIVATE_KEY --recursive "${ref}"; then
|
||||
echo "::warning title=Key sign failed::${ref}"
|
||||
errs=$((errs+1))
|
||||
fi
|
||||
|
||||
echo "==> cosign verify (public key) ${ref}"
|
||||
if ! cosign verify --key env://COSIGN_PUBLIC_KEY "${ref}" -o text; then
|
||||
echo "::warning title=Verify(pubkey) failed::${ref}"
|
||||
errs=$((errs+1))
|
||||
fi
|
||||
|
||||
echo "==> cosign verify (keyless policy) ${ref}"
|
||||
if ! cosign verify \
|
||||
--certificate-oidc-issuer "${issuer}" \
|
||||
--certificate-identity-regexp "${id_regex}" \
|
||||
"${ref}" -o text; then
|
||||
echo "::warning title=Verify(keyless) failed::${ref}"
|
||||
errs=$((errs+1))
|
||||
else
|
||||
v_ok=$((v_ok+1))
|
||||
fi
|
||||
done < src-tags.txt
|
||||
|
||||
echo "---- Summary ----"
|
||||
echo "Copied : $copied"
|
||||
echo "Skipped : $skipped"
|
||||
echo "Verified OK : $v_ok"
|
||||
echo "Errors : $errs"
|
||||
4
.github/workflows/stale-bot.yml
vendored
4
.github/workflows/stale-bot.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v10
|
||||
- uses: actions/stale@5f858e3efba33a5ca4407a664cc011ad407f2008 # v10.1.0
|
||||
with:
|
||||
days-before-stale: 14
|
||||
days-before-close: 14
|
||||
@@ -34,4 +34,4 @@ jobs:
|
||||
operations-per-run: 100
|
||||
remove-stale-when-updated: true
|
||||
delete-branch: false
|
||||
enable-statistics: true
|
||||
enable-statistics: true
|
||||
|
||||
7
.github/workflows/test.yml
vendored
7
.github/workflows/test.yml
vendored
@@ -1,5 +1,8 @@
|
||||
name: Run Tests
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
@@ -11,9 +14,9 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
|
||||
- uses: actions/setup-node@v5
|
||||
- uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0
|
||||
with:
|
||||
node-version: '22'
|
||||
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -49,4 +49,5 @@ postgres/
|
||||
dynamic/
|
||||
*.mmdb
|
||||
scratch/
|
||||
tsconfig.json
|
||||
tsconfig.json
|
||||
hydrateSaas.ts
|
||||
@@ -4,7 +4,7 @@ Contributions are welcome!
|
||||
|
||||
Please see the contribution and local development guide on the docs page before getting started:
|
||||
|
||||
https://docs.digpangolin.com/development/contributing
|
||||
https://docs.pangolin.net/development/contributing
|
||||
|
||||
### Licensing Considerations
|
||||
|
||||
|
||||
16
Dockerfile
16
Dockerfile
@@ -1,10 +1,12 @@
|
||||
FROM node:22-alpine AS builder
|
||||
FROM node:24-alpine AS builder
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
ARG BUILD=oss
|
||||
ARG DATABASE=sqlite
|
||||
|
||||
RUN apk add --no-cache curl tzdata python3 make g++
|
||||
|
||||
# COPY package.json package-lock.json ./
|
||||
COPY package*.json ./
|
||||
RUN npm ci
|
||||
@@ -12,8 +14,9 @@ RUN npm ci
|
||||
COPY . .
|
||||
|
||||
RUN echo "export * from \"./$DATABASE\";" > server/db/index.ts
|
||||
RUN echo "export const driver: \"pg\" | \"sqlite\" = \"$DATABASE\";" >> server/db/index.ts
|
||||
|
||||
RUN echo "export const build = \"$BUILD\" as any;" > server/build.ts
|
||||
RUN echo "export const build = \"$BUILD\" as \"saas\" | \"enterprise\" | \"oss\";" > server/build.ts
|
||||
|
||||
# Copy the appropriate TypeScript configuration based on build type
|
||||
RUN if [ "$BUILD" = "oss" ]; then cp tsconfig.oss.json tsconfig.json; \
|
||||
@@ -30,9 +33,9 @@ RUN mkdir -p dist
|
||||
RUN npm run next:build
|
||||
RUN node esbuild.mjs -e server/index.ts -o dist/server.mjs -b $BUILD
|
||||
RUN if [ "$DATABASE" = "pg" ]; then \
|
||||
node esbuild.mjs -e server/setup/migrationsPg.ts -o dist/migrations.mjs; \
|
||||
node esbuild.mjs -e server/setup/migrationsPg.ts -o dist/migrations.mjs; \
|
||||
else \
|
||||
node esbuild.mjs -e server/setup/migrationsSqlite.ts -o dist/migrations.mjs; \
|
||||
node esbuild.mjs -e server/setup/migrationsSqlite.ts -o dist/migrations.mjs; \
|
||||
fi
|
||||
|
||||
# test to make sure the build output is there and error if not
|
||||
@@ -40,12 +43,13 @@ RUN test -f dist/server.mjs
|
||||
|
||||
RUN npm run build:cli
|
||||
|
||||
FROM node:22-alpine AS runner
|
||||
FROM node:24-alpine AS runner
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Curl used for the health checks
|
||||
RUN apk add --no-cache curl tzdata
|
||||
# Python and build tools needed for better-sqlite3 native compilation
|
||||
RUN apk add --no-cache curl tzdata python3 make g++
|
||||
|
||||
# COPY package.json package-lock.json ./
|
||||
COPY package*.json ./
|
||||
|
||||
30
Makefile
30
Makefile
@@ -44,6 +44,36 @@ build-release:
|
||||
--tag fosrl/pangolin:ee-postgresql-$(tag) \
|
||||
--push .
|
||||
|
||||
build-rc:
|
||||
@if [ -z "$(tag)" ]; then \
|
||||
echo "Error: tag is required. Usage: make build-release tag=<tag>"; \
|
||||
exit 1; \
|
||||
fi
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=sqlite \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:$(tag) \
|
||||
--push .
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=pg \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:postgresql-$(tag) \
|
||||
--push .
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=sqlite \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:ee-$(tag) \
|
||||
--push .
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=pg \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:ee-postgresql-$(tag) \
|
||||
--push .
|
||||
|
||||
build-arm:
|
||||
docker buildx build --platform linux/arm64 -t fosrl/pangolin:latest .
|
||||
|
||||
|
||||
29
README.md
29
README.md
@@ -1,6 +1,6 @@
|
||||
<div align="center">
|
||||
<h2>
|
||||
<a href="https://digpangolin.com">
|
||||
<a href="https://pangolin.net/">
|
||||
<picture>
|
||||
<source media="(prefers-color-scheme: dark)" srcset="public/logo/word_mark_white.png">
|
||||
<img alt="Pangolin Logo" src="public/logo/word_mark_black.png" width="350">
|
||||
@@ -11,15 +11,15 @@
|
||||
|
||||
<div align="center">
|
||||
<h5>
|
||||
<a href="https://digpangolin.com">
|
||||
<a href="https://pangolin.net/">
|
||||
Website
|
||||
</a>
|
||||
<span> | </span>
|
||||
<a href="https://docs.digpangolin.com/">
|
||||
<a href="https://docs.pangolin.net/">
|
||||
Documentation
|
||||
</a>
|
||||
<span> | </span>
|
||||
<a href="mailto:contact@fossorial.io">
|
||||
<a href="mailto:contact@pangolin.net">
|
||||
Contact Us
|
||||
</a>
|
||||
</h5>
|
||||
@@ -28,7 +28,7 @@
|
||||
<div align="center">
|
||||
|
||||
[](https://discord.gg/HCJR8Xhme4)
|
||||
[](https://digpangolin.com/slack)
|
||||
[](https://pangolin.net/slack)
|
||||
[](https://hub.docker.com/r/fosrl/pangolin)
|
||||

|
||||
[](https://www.youtube.com/@fossorial-app)
|
||||
@@ -37,7 +37,7 @@
|
||||
|
||||
<p align="center">
|
||||
<strong>
|
||||
Start testing Pangolin at <a href="https://pangolin.fossorial.io/auth/signup">pangolin.fossorial.io</a>
|
||||
Start testing Pangolin at <a href="https://app.pangolin.net/auth/signup">app.pangolin.net</a>
|
||||
</strong>
|
||||
</p>
|
||||
|
||||
@@ -45,7 +45,10 @@ Pangolin is a self-hosted tunneled reverse proxy server with identity and contex
|
||||
|
||||
## Installation
|
||||
|
||||
Check out the [quick install guide](https://docs.digpangolin.com/self-host/quick-install) for how to install and set up Pangolin.
|
||||
- Check out the [quick install guide](https://docs.pangolin.net/self-host/quick-install) for how to install and set up Pangolin.
|
||||
- Install from the [DigitalOcean marketplace](https://marketplace.digitalocean.com/apps/pangolin-ce-1?refcode=edf0480eeb81) for a one-click pre-configured installer.
|
||||
|
||||
<img src="public/screenshots/hero.png" />
|
||||
|
||||
## Deployment Options
|
||||
|
||||
@@ -53,7 +56,7 @@ Check out the [quick install guide](https://docs.digpangolin.com/self-host/quick
|
||||
|-----------------|--------------|
|
||||
| **Self-Host: Community Edition** | Free, open source, and licensed under AGPL-3. |
|
||||
| **Self-Host: Enterprise Edition** | Licensed under Fossorial Commercial License. Free for personal and hobbyist use, and for businesses earning under \$100K USD annually. |
|
||||
| **Pangolin Cloud** | Fully managed service with instant setup and pay-as-you-go pricing — no infrastructure required. Or, self-host your own [remote node](https://docs.digpangolin.com/manage/remote-node/nodes) and connect to our control plane. |
|
||||
| **Pangolin Cloud** | Fully managed service with instant setup and pay-as-you-go pricing — no infrastructure required. Or, self-host your own [remote node](https://docs.pangolin.net/manage/remote-node/nodes) and connect to our control plane. |
|
||||
|
||||
## Key Features
|
||||
|
||||
@@ -71,18 +74,22 @@ Pangolin packages everything you need for seamless application access and exposu
|
||||
### Check out the docs
|
||||
|
||||
We encourage everyone to read the full documentation first, which is
|
||||
available at [docs.digpangolin.com](https://docs.digpangolin.com). This README provides only a very brief subset of
|
||||
available at [docs.pangolin.net](https://docs.pangolin.net). This README provides only a very brief subset of
|
||||
the docs to illustrate some basic ideas.
|
||||
|
||||
### Sign up and try now
|
||||
|
||||
For Pangolin's managed service, you will first need to create an account at
|
||||
[pangolin.fossorial.io](https://pangolin.fossorial.io). We have a generous free tier to get started.
|
||||
[app.pangolin.net](https://app.pangolin.net). We have a generous free tier to get started.
|
||||
|
||||
## Licensing
|
||||
|
||||
Pangolin is dual licensed under the AGPL-3 and the [Fossorial Commercial License](https://digpangolin.com/fcl.html). For inquiries about commercial licensing, please contact us at [contact@fossorial.io](mailto:contact@fossorial.io).
|
||||
Pangolin is dual licensed under the AGPL-3 and the [Fossorial Commercial License](https://pangolin.net/fcl.html). For inquiries about commercial licensing, please contact us at [contact@pangolin.net](mailto:contact@pangolin.net).
|
||||
|
||||
## Contributions
|
||||
|
||||
Please see [CONTRIBUTING](./CONTRIBUTING.md) in the repository for guidelines and best practices.
|
||||
|
||||
---
|
||||
|
||||
WireGuard® is a registered trademark of Jason A. Donenfeld.
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
If you discover a security vulnerability, please follow the steps below to responsibly disclose it to us:
|
||||
|
||||
1. **Do not create a public GitHub issue or discussion post.** This could put the security of other users at risk.
|
||||
2. Send a detailed report to [security@fossorial.io](mailto:security@fossorial.io) or send a **private** message to a maintainer on [Discord](https://discord.gg/HCJR8Xhme4). Include:
|
||||
2. Send a detailed report to [security@pangolin.net](mailto:security@pangolin.net) or send a **private** message to a maintainer on [Discord](https://discord.gg/HCJR8Xhme4). Include:
|
||||
|
||||
- Description and location of the vulnerability.
|
||||
- Potential impact of the vulnerability.
|
||||
|
||||
@@ -8,7 +8,7 @@ import base64
|
||||
YAML_FILE_PATH = 'blueprint.yaml'
|
||||
|
||||
# The API endpoint and headers from the curl request
|
||||
API_URL = 'http://api.pangolin.fossorial.io/v1/org/test/blueprint'
|
||||
API_URL = 'http://api.pangolin.net/v1/org/test/blueprint'
|
||||
HEADERS = {
|
||||
'accept': '*/*',
|
||||
'Authorization': 'Bearer <your_token_here>',
|
||||
|
||||
@@ -28,9 +28,10 @@ proxy-resources:
|
||||
# sso-roles:
|
||||
# - Member
|
||||
# sso-users:
|
||||
# - owen@fossorial.io
|
||||
# - owen@pangolin.net
|
||||
# whitelist-users:
|
||||
# - owen@fossorial.io
|
||||
# - owen@pangolin.net
|
||||
# auto-login-idp: 1
|
||||
headers:
|
||||
- name: X-Example-Header
|
||||
value: example-value
|
||||
|
||||
@@ -5,14 +5,14 @@ meta {
|
||||
}
|
||||
|
||||
post {
|
||||
url: http://localhost:4000/api/v1/auth/login
|
||||
url: http://localhost:3000/api/v1/auth/login
|
||||
body: json
|
||||
auth: none
|
||||
}
|
||||
|
||||
body:json {
|
||||
{
|
||||
"email": "owen@fossorial.io",
|
||||
"email": "admin@fosrl.io",
|
||||
"password": "Password123!"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,6 +12,6 @@ post {
|
||||
|
||||
body:json {
|
||||
{
|
||||
"email": "milo@fossorial.io"
|
||||
"email": "milo@pangolin.net"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ put {
|
||||
|
||||
body:json {
|
||||
{
|
||||
"email": "numbat@fossorial.io",
|
||||
"email": "numbat@pangolin.net",
|
||||
"password": "Password123!"
|
||||
}
|
||||
}
|
||||
|
||||
15
bruno/Olm/createOlm.bru
Normal file
15
bruno/Olm/createOlm.bru
Normal file
@@ -0,0 +1,15 @@
|
||||
meta {
|
||||
name: createOlm
|
||||
type: http
|
||||
seq: 1
|
||||
}
|
||||
|
||||
put {
|
||||
url: http://localhost:3000/api/v1/olm
|
||||
body: none
|
||||
auth: inherit
|
||||
}
|
||||
|
||||
settings {
|
||||
encodeUrl: true
|
||||
}
|
||||
8
bruno/Olm/folder.bru
Normal file
8
bruno/Olm/folder.bru
Normal file
@@ -0,0 +1,8 @@
|
||||
meta {
|
||||
name: Olm
|
||||
seq: 15
|
||||
}
|
||||
|
||||
auth {
|
||||
mode: inherit
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"version": "1",
|
||||
"name": "Pangolin Saas",
|
||||
"name": "Pangolin",
|
||||
"type": "collection",
|
||||
"ignore": [
|
||||
"node_modules",
|
||||
|
||||
@@ -90,7 +90,8 @@ export const setAdminCredentials: CommandModule<{}, SetAdminCredentialsArgs> = {
|
||||
passwordHash,
|
||||
dateCreated: moment().toISOString(),
|
||||
serverAdmin: true,
|
||||
emailVerified: true
|
||||
emailVerified: true,
|
||||
lastPasswordChange: new Date().getTime()
|
||||
});
|
||||
|
||||
console.log("Server admin created");
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# To see all available options, please visit the docs:
|
||||
# https://docs.digpangolin.com/self-host/advanced/config-file
|
||||
# https://docs.pangolin.net/self-host/advanced/config-file
|
||||
|
||||
app:
|
||||
dashboard_url: http://localhost:3002
|
||||
@@ -25,4 +25,3 @@ flags:
|
||||
disable_user_create_org: true
|
||||
allow_raw_resources: true
|
||||
enable_integration_api: true
|
||||
enable_clients: true
|
||||
|
||||
15
docker-compose.drizzle.yml
Normal file
15
docker-compose.drizzle.yml
Normal file
@@ -0,0 +1,15 @@
|
||||
services:
|
||||
drizzle-gateway:
|
||||
image: ghcr.io/drizzle-team/gateway:latest
|
||||
ports:
|
||||
- "4984:4983"
|
||||
depends_on:
|
||||
- db
|
||||
environment:
|
||||
- STORE_PATH=/app
|
||||
- DATABASE_URL=postgresql://postgres:password@db:5432/postgres
|
||||
volumes:
|
||||
- drizzle-gateway-data:/app
|
||||
|
||||
volumes:
|
||||
drizzle-gateway-data:
|
||||
@@ -35,7 +35,7 @@ services:
|
||||
- 80:80 # Port for traefik because of the network_mode
|
||||
|
||||
traefik:
|
||||
image: traefik:v3.5
|
||||
image: traefik:v3.6
|
||||
container_name: traefik
|
||||
restart: unless-stopped
|
||||
network_mode: service:gerbil # Ports appear on the gerbil service
|
||||
@@ -52,4 +52,4 @@ networks:
|
||||
default:
|
||||
driver: bridge
|
||||
name: pangolin
|
||||
enable_ipv6: true
|
||||
enable_ipv6: true
|
||||
|
||||
@@ -11,7 +11,7 @@ services:
|
||||
- ./config/postgres:/var/lib/postgresql/data
|
||||
ports:
|
||||
- "5432:5432" # Map host port 5432 to container port 5432
|
||||
restart: no
|
||||
restart: no
|
||||
|
||||
redis:
|
||||
image: redis:latest # Use the latest Redis image
|
||||
|
||||
@@ -18,7 +18,11 @@ put-back:
|
||||
mv main.go.bak main.go
|
||||
|
||||
dev-update-versions:
|
||||
PANGOLIN_VERSION=$$(curl -s https://api.github.com/repos/fosrl/pangolin/tags | jq -r '.[0].name') && \
|
||||
if [ -z "$(tag)" ]; then \
|
||||
PANGOLIN_VERSION=$$(curl -s https://api.github.com/repos/fosrl/pangolin/tags | jq -r '.[0].name'); \
|
||||
else \
|
||||
PANGOLIN_VERSION=$(tag); \
|
||||
fi && \
|
||||
GERBIL_VERSION=$$(curl -s https://api.github.com/repos/fosrl/gerbil/tags | jq -r '.[0].name') && \
|
||||
BADGER_VERSION=$$(curl -s https://api.github.com/repos/fosrl/badger/tags | jq -r '.[0].name') && \
|
||||
echo "Latest versions - Pangolin: $$PANGOLIN_VERSION, Gerbil: $$GERBIL_VERSION, Badger: $$BADGER_VERSION" && \
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# To see all available options, please visit the docs:
|
||||
# https://docs.digpangolin.com/
|
||||
# https://docs.pangolin.net/
|
||||
|
||||
gerbil:
|
||||
start_port: 51820
|
||||
@@ -14,7 +14,6 @@ app:
|
||||
domains:
|
||||
domain1:
|
||||
base_domain: "{{.BaseDomain}}"
|
||||
cert_resolver: "letsencrypt"
|
||||
|
||||
server:
|
||||
secret: "{{.Secret}}"
|
||||
@@ -36,4 +35,4 @@ flags:
|
||||
require_email_verification: {{.EnableEmail}}
|
||||
disable_signup_without_invite: true
|
||||
disable_user_create_org: false
|
||||
allow_raw_resources: true
|
||||
allow_raw_resources: true
|
||||
|
||||
@@ -35,7 +35,7 @@ services:
|
||||
- 80:80
|
||||
{{end}}
|
||||
traefik:
|
||||
image: docker.io/traefik:v3.5
|
||||
image: docker.io/traefik:v3.6
|
||||
container_name: traefik
|
||||
restart: unless-stopped
|
||||
{{if .InstallGerbil}}
|
||||
@@ -59,4 +59,4 @@ networks:
|
||||
default:
|
||||
driver: bridge
|
||||
name: pangolin
|
||||
{{if .EnableIPv6}} enable_ipv6: true{{end}}
|
||||
{{if .EnableIPv6}} enable_ipv6: true{{end}}
|
||||
|
||||
@@ -51,3 +51,12 @@ http:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://pangolin:3000" # API/WebSocket server
|
||||
|
||||
tcp:
|
||||
serversTransports:
|
||||
pp-transport-v1:
|
||||
proxyProtocol:
|
||||
version: 1
|
||||
pp-transport-v2:
|
||||
proxyProtocol:
|
||||
version: 2
|
||||
@@ -73,7 +73,7 @@ func installDocker() error {
|
||||
case strings.Contains(osRelease, "ID=ubuntu"):
|
||||
installCmd = exec.Command("bash", "-c", fmt.Sprintf(`
|
||||
apt-get update &&
|
||||
apt-get install -y apt-transport-https ca-certificates curl software-properties-common &&
|
||||
apt-get install -y apt-transport-https ca-certificates curl &&
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg &&
|
||||
echo "deb [arch=%s signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" > /etc/apt/sources.list.d/docker.list &&
|
||||
apt-get update &&
|
||||
@@ -82,7 +82,7 @@ func installDocker() error {
|
||||
case strings.Contains(osRelease, "ID=debian"):
|
||||
installCmd = exec.Command("bash", "-c", fmt.Sprintf(`
|
||||
apt-get update &&
|
||||
apt-get install -y apt-transport-https ca-certificates curl software-properties-common &&
|
||||
apt-get install -y apt-transport-https ca-certificates curl &&
|
||||
curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg &&
|
||||
echo "deb [arch=%s signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian $(lsb_release -cs) stable" > /etc/apt/sources.list.d/docker.list &&
|
||||
apt-get update &&
|
||||
|
||||
@@ -3,8 +3,8 @@ module installer
|
||||
go 1.24.0
|
||||
|
||||
require (
|
||||
golang.org/x/term v0.36.0
|
||||
golang.org/x/term v0.37.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
|
||||
require golang.org/x/sys v0.37.0 // indirect
|
||||
require golang.org/x/sys v0.38.0 // indirect
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
|
||||
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q=
|
||||
golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
|
||||
@@ -54,8 +54,8 @@ type Config struct {
|
||||
type SupportedContainer string
|
||||
|
||||
const (
|
||||
Docker SupportedContainer = "docker"
|
||||
Podman SupportedContainer = "podman"
|
||||
Docker SupportedContainer = "docker"
|
||||
Podman SupportedContainer = "podman"
|
||||
Undefined SupportedContainer = "undefined"
|
||||
)
|
||||
|
||||
@@ -160,7 +160,7 @@ func main() {
|
||||
} else {
|
||||
alreadyInstalled = true
|
||||
fmt.Println("Looks like you already installed Pangolin!")
|
||||
|
||||
|
||||
// Check if MaxMind database exists and offer to update it
|
||||
fmt.Println("\n=== MaxMind Database Update ===")
|
||||
if _, err := os.Stat("config/GeoLite2-Country.mmdb"); err == nil {
|
||||
@@ -209,8 +209,8 @@ func main() {
|
||||
|
||||
parsedURL, err := url.Parse(appConfig.DashboardURL)
|
||||
if err != nil {
|
||||
fmt.Printf("Error parsing URL: %v\n", err)
|
||||
return
|
||||
fmt.Printf("Error parsing URL: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
config.DashboardDomain = parsedURL.Hostname()
|
||||
@@ -238,12 +238,11 @@ func main() {
|
||||
}
|
||||
|
||||
fmt.Println("CrowdSec installed successfully!")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !alreadyInstalled {
|
||||
if !alreadyInstalled || config.DoCrowdsecInstall {
|
||||
// Setup Token Section
|
||||
fmt.Println("\n=== Setup Token ===")
|
||||
|
||||
@@ -360,7 +359,7 @@ func collectUserInput(reader *bufio.Reader) Config {
|
||||
config.EmailSMTPPort = readInt(reader, "Enter SMTP port (default 587)", 587)
|
||||
config.EmailSMTPUser = readString(reader, "Enter SMTP username", "")
|
||||
config.EmailSMTPPass = readString(reader, "Enter SMTP password", "") // Should this be readPassword?
|
||||
config.EmailNoReply = readString(reader, "Enter no-reply email address", "")
|
||||
config.EmailNoReply = readString(reader, "Enter no-reply email address (often the same as SMTP username)", "")
|
||||
}
|
||||
|
||||
// Validate required fields
|
||||
@@ -372,13 +371,17 @@ func collectUserInput(reader *bufio.Reader) Config {
|
||||
fmt.Println("Error: Let's Encrypt email is required")
|
||||
os.Exit(1)
|
||||
}
|
||||
if config.EnableEmail && config.EmailNoReply == "" {
|
||||
fmt.Println("Error: No-reply email address is required when email is enabled")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Advanced configuration
|
||||
|
||||
fmt.Println("\n=== Advanced Configuration ===")
|
||||
|
||||
config.EnableIPv6 = readBool(reader, "Is your server IPv6 capable?", true)
|
||||
config.EnableGeoblocking = readBool(reader, "Do you want to download the MaxMind GeoLite2 database for geoblocking functionality?", false)
|
||||
config.EnableGeoblocking = readBool(reader, "Do you want to download the MaxMind GeoLite2 database for geoblocking functionality?", true)
|
||||
|
||||
if config.DashboardDomain == "" {
|
||||
fmt.Println("Error: Dashboard Domain name is required")
|
||||
@@ -644,28 +647,28 @@ func checkPortsAvailable(port int) error {
|
||||
|
||||
func downloadMaxMindDatabase() error {
|
||||
fmt.Println("Downloading MaxMind GeoLite2 Country database...")
|
||||
|
||||
|
||||
// Download the GeoLite2 Country database
|
||||
if err := run("curl", "-L", "-o", "GeoLite2-Country.tar.gz",
|
||||
if err := run("curl", "-L", "-o", "GeoLite2-Country.tar.gz",
|
||||
"https://github.com/GitSquared/node-geolite2-redist/raw/refs/heads/master/redist/GeoLite2-Country.tar.gz"); err != nil {
|
||||
return fmt.Errorf("failed to download GeoLite2 database: %v", err)
|
||||
}
|
||||
|
||||
|
||||
// Extract the database
|
||||
if err := run("tar", "-xzf", "GeoLite2-Country.tar.gz"); err != nil {
|
||||
return fmt.Errorf("failed to extract GeoLite2 database: %v", err)
|
||||
}
|
||||
|
||||
|
||||
// Find the .mmdb file and move it to the config directory
|
||||
if err := run("bash", "-c", "mv GeoLite2-Country_*/GeoLite2-Country.mmdb config/"); err != nil {
|
||||
return fmt.Errorf("failed to move GeoLite2 database to config directory: %v", err)
|
||||
}
|
||||
|
||||
|
||||
// Clean up the downloaded files
|
||||
if err := run("rm", "-rf", "GeoLite2-Country.tar.gz", "GeoLite2-Country_*"); err != nil {
|
||||
fmt.Printf("Warning: failed to clean up temporary files: %v\n", err)
|
||||
}
|
||||
|
||||
|
||||
fmt.Println("MaxMind GeoLite2 Country database downloaded successfully!")
|
||||
return nil
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
1045
messages/fr-FR.json
1045
messages/fr-FR.json
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
2099
messages/zh-TW.json
Normal file
2099
messages/zh-TW.json
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,12 +1,15 @@
|
||||
import type { NextConfig } from "next";
|
||||
import createNextIntlPlugin from "next-intl/plugin";
|
||||
|
||||
const withNextIntl = createNextIntlPlugin();
|
||||
|
||||
/** @type {import("next").NextConfig} */
|
||||
const nextConfig = {
|
||||
const nextConfig: NextConfig = {
|
||||
eslint: {
|
||||
ignoreDuringBuilds: true
|
||||
},
|
||||
experimental: {
|
||||
reactCompiler: true
|
||||
},
|
||||
output: "standalone"
|
||||
};
|
||||
|
||||
9229
package-lock.json
generated
9229
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
124
package.json
124
package.json
@@ -22,8 +22,8 @@
|
||||
"set:oss": "echo 'export const build = \"oss\" as any;' > server/build.ts && cp tsconfig.oss.json tsconfig.json",
|
||||
"set:saas": "echo 'export const build = \"saas\" as any;' > server/build.ts && cp tsconfig.saas.json tsconfig.json",
|
||||
"set:enterprise": "echo 'export const build = \"enterprise\" as any;' > server/build.ts && cp tsconfig.enterprise.json tsconfig.json",
|
||||
"set:sqlite": "echo 'export * from \"./sqlite\";' > server/db/index.ts",
|
||||
"set:pg": "echo 'export * from \"./pg\";' > server/db/index.ts",
|
||||
"set:sqlite": "echo 'export * from \"./sqlite\";\nexport const driver: \"pg\" | \"sqlite\" = \"sqlite\";' > server/db/index.ts",
|
||||
"set:pg": "echo 'export * from \"./pg\";\nexport const driver: \"pg\" | \"sqlite\" = \"pg\";' > server/db/index.ts",
|
||||
"next:build": "next build",
|
||||
"build:sqlite": "mkdir -p dist && next build && node esbuild.mjs -e server/index.ts -o dist/server.mjs && node esbuild.mjs -e server/setup/migrationsSqlite.ts -o dist/migrations.mjs",
|
||||
"build:pg": "mkdir -p dist && next build && node esbuild.mjs -e server/index.ts -o dist/server.mjs && node esbuild.mjs -e server/setup/migrationsPg.ts -o dist/migrations.mjs",
|
||||
@@ -32,41 +32,45 @@
|
||||
"build:cli": "node esbuild.mjs -e cli/index.ts -o dist/cli.mjs"
|
||||
},
|
||||
"dependencies": {
|
||||
"@asteasolutions/zod-to-openapi": "^7.3.4",
|
||||
"@aws-sdk/client-s3": "3.908.0",
|
||||
"@asteasolutions/zod-to-openapi": "8.1.0",
|
||||
"@faker-js/faker": "^10.1.0",
|
||||
"@headlessui/react": "^2.2.9",
|
||||
"@aws-sdk/client-s3": "3.943.0",
|
||||
"@hookform/resolvers": "5.2.2",
|
||||
"@monaco-editor/react": "^4.7.0",
|
||||
"@node-rs/argon2": "^2.0.2",
|
||||
"@oslojs/crypto": "1.0.1",
|
||||
"@oslojs/encoding": "1.1.0",
|
||||
"@radix-ui/react-avatar": "1.1.10",
|
||||
"@radix-ui/react-avatar": "1.1.11",
|
||||
"@radix-ui/react-checkbox": "1.3.3",
|
||||
"@radix-ui/react-collapsible": "1.1.12",
|
||||
"@radix-ui/react-dialog": "1.1.15",
|
||||
"@radix-ui/react-dropdown-menu": "2.1.16",
|
||||
"@radix-ui/react-icons": "1.3.2",
|
||||
"@radix-ui/react-label": "2.1.7",
|
||||
"@radix-ui/react-label": "2.1.8",
|
||||
"@radix-ui/react-popover": "1.1.15",
|
||||
"@radix-ui/react-progress": "^1.1.7",
|
||||
"@radix-ui/react-progress": "^1.1.8",
|
||||
"@radix-ui/react-radio-group": "1.3.8",
|
||||
"@radix-ui/react-scroll-area": "^1.2.10",
|
||||
"@radix-ui/react-select": "2.2.6",
|
||||
"@radix-ui/react-separator": "1.1.7",
|
||||
"@radix-ui/react-slot": "1.2.3",
|
||||
"@radix-ui/react-separator": "1.1.8",
|
||||
"@radix-ui/react-slot": "1.2.4",
|
||||
"@radix-ui/react-switch": "1.2.6",
|
||||
"@radix-ui/react-tabs": "1.1.13",
|
||||
"@radix-ui/react-toast": "1.2.15",
|
||||
"@radix-ui/react-tooltip": "^1.2.8",
|
||||
"@react-email/components": "0.5.6",
|
||||
"@react-email/components": "0.5.7",
|
||||
"@react-email/render": "^1.3.2",
|
||||
"@react-email/tailwind": "1.2.2",
|
||||
"@simplewebauthn/browser": "^13.2.2",
|
||||
"@simplewebauthn/server": "^13.2.2",
|
||||
"@tailwindcss/forms": "^0.5.10",
|
||||
"@tanstack/react-query": "^5.90.6",
|
||||
"@tanstack/react-table": "8.21.3",
|
||||
"arctic": "^3.7.0",
|
||||
"axios": "^1.12.2",
|
||||
"axios": "^1.13.2",
|
||||
"better-sqlite3": "11.7.0",
|
||||
"canvas-confetti": "1.9.3",
|
||||
"canvas-confetti": "1.9.4",
|
||||
"class-variance-authority": "^0.7.1",
|
||||
"clsx": "2.1.1",
|
||||
"cmdk": "1.1.1",
|
||||
@@ -75,89 +79,103 @@
|
||||
"cookies": "^0.9.1",
|
||||
"cors": "2.8.5",
|
||||
"crypto-js": "^4.2.0",
|
||||
"drizzle-orm": "0.44.6",
|
||||
"eslint": "9.37.0",
|
||||
"eslint-config-next": "15.5.4",
|
||||
"express": "5.1.0",
|
||||
"express-rate-limit": "8.1.0",
|
||||
"glob": "11.0.3",
|
||||
"d3": "^7.9.0",
|
||||
"date-fns": "4.1.0",
|
||||
"drizzle-orm": "0.45.0",
|
||||
"eslint": "9.39.1",
|
||||
"eslint-config-next": "16.0.7",
|
||||
"express": "5.2.1",
|
||||
"express-rate-limit": "8.2.1",
|
||||
"glob": "11.1.0",
|
||||
"helmet": "8.1.0",
|
||||
"http-errors": "2.0.0",
|
||||
"http-errors": "2.0.1",
|
||||
"i": "^0.3.7",
|
||||
"input-otp": "1.4.2",
|
||||
"ioredis": "5.8.1",
|
||||
"ioredis": "5.8.2",
|
||||
"jmespath": "^0.16.0",
|
||||
"js-yaml": "4.1.0",
|
||||
"js-yaml": "4.1.1",
|
||||
"jsonwebtoken": "^9.0.2",
|
||||
"lucide-react": "^0.545.0",
|
||||
"maxmind": "5.0.0",
|
||||
"lucide-react": "^0.556.0",
|
||||
"maxmind": "5.0.1",
|
||||
"moment": "2.30.1",
|
||||
"next": "15.5.4",
|
||||
"next-intl": "^4.3.12",
|
||||
"next": "15.5.7",
|
||||
"next-intl": "^4.4.0",
|
||||
"next-themes": "0.4.6",
|
||||
"nextjs-toploader": "^3.9.17",
|
||||
"node-cache": "5.1.2",
|
||||
"node-fetch": "3.3.2",
|
||||
"nodemailer": "7.0.9",
|
||||
"npm": "^11.6.2",
|
||||
"nodemailer": "7.0.11",
|
||||
"npm": "^11.6.4",
|
||||
"nprogress": "^0.2.0",
|
||||
"oslo": "1.2.1",
|
||||
"pg": "^8.16.2",
|
||||
"posthog-node": "^5.9.5",
|
||||
"posthog-node": "^5.11.2",
|
||||
"qrcode.react": "4.2.0",
|
||||
"react": "19.2.0",
|
||||
"react-dom": "19.2.0",
|
||||
"react": "19.2.1",
|
||||
"react-day-picker": "9.11.3",
|
||||
"react-dom": "19.2.1",
|
||||
"react-easy-sort": "^1.8.0",
|
||||
"react-hook-form": "7.65.0",
|
||||
"react-hook-form": "7.68.0",
|
||||
"react-icons": "^5.5.0",
|
||||
"rebuild": "0.1.2",
|
||||
"recharts": "^2.15.4",
|
||||
"reodotdev": "^1.0.0",
|
||||
"resend": "^6.1.2",
|
||||
"resend": "^6.4.2",
|
||||
"semver": "^7.7.3",
|
||||
"stripe": "18.2.1",
|
||||
"swagger-ui-express": "^5.0.1",
|
||||
"tailwind-merge": "3.3.1",
|
||||
"topojson-client": "^3.1.0",
|
||||
"tailwind-merge": "3.4.0",
|
||||
"tw-animate-css": "^1.3.8",
|
||||
"uuid": "^13.0.0",
|
||||
"vaul": "1.1.2",
|
||||
"visionscarto-world-atlas": "^1.0.0",
|
||||
"winston": "3.18.3",
|
||||
"winston-daily-rotate-file": "5.0.0",
|
||||
"ws": "8.18.3",
|
||||
"yaml": "^2.8.1",
|
||||
"yargs": "18.0.0",
|
||||
"zod": "3.25.76",
|
||||
"zod-validation-error": "3.5.2"
|
||||
"zod": "4.1.12",
|
||||
"zod-validation-error": "5.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@dotenvx/dotenvx": "1.51.0",
|
||||
"@dotenvx/dotenvx": "1.51.1",
|
||||
"@esbuild-plugins/tsconfig-paths": "0.1.2",
|
||||
"@react-email/preview-server": "4.3.0",
|
||||
"@tailwindcss/postcss": "^4.1.14",
|
||||
"@react-email/preview-server": "4.3.2",
|
||||
"@tailwindcss/postcss": "^4.1.17",
|
||||
"@tanstack/react-query-devtools": "^5.90.2",
|
||||
"@types/better-sqlite3": "7.6.12",
|
||||
"@types/cookie-parser": "1.4.9",
|
||||
"@types/cookie-parser": "1.4.10",
|
||||
"@types/cors": "2.8.19",
|
||||
"@types/crypto-js": "^4.2.2",
|
||||
"@types/express": "5.0.3",
|
||||
"@types/d3": "^7.4.3",
|
||||
"@types/express": "5.0.6",
|
||||
"@types/express-session": "^1.18.2",
|
||||
"@types/jmespath": "^0.15.2",
|
||||
"@types/js-yaml": "4.0.9",
|
||||
"@types/jsonwebtoken": "^9.0.10",
|
||||
"@types/node": "24.7.2",
|
||||
"@types/nodemailer": "7.0.2",
|
||||
"@types/pg": "8.15.5",
|
||||
"@types/react": "19.2.2",
|
||||
"@types/react-dom": "19.2.1",
|
||||
"@types/node": "24.10.1",
|
||||
"@types/nprogress": "^0.2.3",
|
||||
"@types/nodemailer": "7.0.4",
|
||||
"@types/pg": "8.15.6",
|
||||
"@types/react": "19.2.7",
|
||||
"@types/react-dom": "19.2.3",
|
||||
"@types/semver": "^7.7.1",
|
||||
"@types/swagger-ui-express": "^4.1.8",
|
||||
"@types/topojson-client": "^3.1.5",
|
||||
"@types/ws": "8.18.1",
|
||||
"@types/yargs": "17.0.33",
|
||||
"drizzle-kit": "0.31.5",
|
||||
"esbuild": "0.25.10",
|
||||
"esbuild-node-externals": "1.18.0",
|
||||
"babel-plugin-react-compiler": "^1.0.0",
|
||||
"@types/yargs": "17.0.35",
|
||||
"drizzle-kit": "0.31.8",
|
||||
"esbuild": "0.27.1",
|
||||
"esbuild-node-externals": "1.20.1",
|
||||
"postcss": "^8",
|
||||
"react-email": "4.3.0",
|
||||
"react-email": "4.3.2",
|
||||
"tailwindcss": "^4.1.4",
|
||||
"tsc-alias": "1.8.16",
|
||||
"tsx": "4.20.6",
|
||||
"tsx": "4.21.0",
|
||||
"typescript": "^5",
|
||||
"typescript-eslint": "^8.46.0"
|
||||
"typescript-eslint": "^8.46.3"
|
||||
},
|
||||
"overrides": {
|
||||
"emblor": {
|
||||
@@ -165,4 +183,4 @@
|
||||
"react-dom": "19.0.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Before Width: | Height: | Size: 15 KiB After Width: | Height: | Size: 15 KiB |
@@ -79,6 +79,12 @@ export function createApiServer() {
|
||||
// Add request timeout middleware
|
||||
apiServer.use(requestTimeoutMiddleware(60000)); // 60 second timeout
|
||||
|
||||
apiServer.use(logIncomingMiddleware);
|
||||
|
||||
if (build !== "oss") {
|
||||
apiServer.use(`${prefix}/hybrid`, hybridRouter); // put before rate limiting because we will rate limit there separately because some of the routes are heavily used
|
||||
}
|
||||
|
||||
if (!dev) {
|
||||
apiServer.use(
|
||||
rateLimit({
|
||||
@@ -101,11 +107,7 @@ export function createApiServer() {
|
||||
}
|
||||
|
||||
// API routes
|
||||
apiServer.use(logIncomingMiddleware);
|
||||
apiServer.use(prefix, unauthenticated);
|
||||
if (build !== "oss") {
|
||||
apiServer.use(`${prefix}/hybrid`, hybridRouter);
|
||||
}
|
||||
apiServer.use(prefix, authenticated);
|
||||
|
||||
// WebSocket routes
|
||||
|
||||
@@ -19,6 +19,7 @@ export enum ActionsEnum {
|
||||
getSite = "getSite",
|
||||
listSites = "listSites",
|
||||
updateSite = "updateSite",
|
||||
reGenerateSecret = "reGenerateSecret",
|
||||
createResource = "createResource",
|
||||
deleteResource = "deleteResource",
|
||||
getResource = "getResource",
|
||||
@@ -81,7 +82,11 @@ export enum ActionsEnum {
|
||||
listClients = "listClients",
|
||||
getClient = "getClient",
|
||||
listOrgDomains = "listOrgDomains",
|
||||
getDomain = "getDomain",
|
||||
updateOrgDomain = "updateOrgDomain",
|
||||
getDNSRecords = "getDNSRecords",
|
||||
createNewt = "createNewt",
|
||||
createOlm = "createOlm",
|
||||
createIdp = "createIdp",
|
||||
updateIdp = "updateIdp",
|
||||
deleteIdp = "deleteIdp",
|
||||
@@ -116,7 +121,11 @@ export enum ActionsEnum {
|
||||
updateLoginPage = "updateLoginPage",
|
||||
getLoginPage = "getLoginPage",
|
||||
deleteLoginPage = "deleteLoginPage",
|
||||
applyBlueprint = "applyBlueprint"
|
||||
listBlueprints = "listBlueprints",
|
||||
getBlueprint = "getBlueprint",
|
||||
applyBlueprint = "applyBlueprint",
|
||||
viewLogs = "viewLogs",
|
||||
exportLogs = "exportLogs"
|
||||
}
|
||||
|
||||
export async function checkUserActionPermission(
|
||||
@@ -193,7 +202,6 @@ export async function checkUserActionPermission(
|
||||
.limit(1);
|
||||
|
||||
return roleActionPermission.length > 0;
|
||||
|
||||
} catch (error) {
|
||||
console.error("Error checking user action permission:", error);
|
||||
throw createHttpError(
|
||||
|
||||
@@ -36,12 +36,15 @@ export async function createSession(
|
||||
const sessionId = encodeHexLowerCase(
|
||||
sha256(new TextEncoder().encode(token))
|
||||
);
|
||||
const session: Session = {
|
||||
sessionId: sessionId,
|
||||
userId,
|
||||
expiresAt: new Date(Date.now() + SESSION_COOKIE_EXPIRES).getTime()
|
||||
};
|
||||
await db.insert(sessions).values(session);
|
||||
const [session] = await db
|
||||
.insert(sessions)
|
||||
.values({
|
||||
sessionId: sessionId,
|
||||
userId,
|
||||
expiresAt: new Date(Date.now() + SESSION_COOKIE_EXPIRES).getTime(),
|
||||
issuedAt: new Date().getTime()
|
||||
})
|
||||
.returning();
|
||||
return session;
|
||||
}
|
||||
|
||||
|
||||
@@ -50,7 +50,8 @@ export async function createResourceSession(opts: {
|
||||
doNotExtend: opts.doNotExtend || false,
|
||||
accessTokenId: opts.accessTokenId || null,
|
||||
isRequestToken: opts.isRequestToken || false,
|
||||
userSessionId: opts.userSessionId || null
|
||||
userSessionId: opts.userSessionId || null,
|
||||
issuedAt: new Date().getTime()
|
||||
};
|
||||
|
||||
await db.insert(resourceSessions).values(session);
|
||||
|
||||
@@ -1,9 +1,43 @@
|
||||
import { Request } from "express";
|
||||
import { validateSessionToken, SESSION_COOKIE_NAME } from "@server/auth/sessions/app";
|
||||
import {
|
||||
validateSessionToken,
|
||||
SESSION_COOKIE_NAME
|
||||
} from "@server/auth/sessions/app";
|
||||
|
||||
export async function verifySession(req: Request) {
|
||||
export async function verifySession(req: Request, forceLogin?: boolean) {
|
||||
const res = await validateSessionToken(
|
||||
req.cookies[SESSION_COOKIE_NAME] ?? "",
|
||||
req.cookies[SESSION_COOKIE_NAME] ?? ""
|
||||
);
|
||||
|
||||
if (!forceLogin) {
|
||||
return res;
|
||||
}
|
||||
if (!res.session || !res.user) {
|
||||
return {
|
||||
session: null,
|
||||
user: null
|
||||
};
|
||||
}
|
||||
if (res.session.deviceAuthUsed) {
|
||||
return {
|
||||
session: null,
|
||||
user: null
|
||||
};
|
||||
}
|
||||
if (!res.session.issuedAt) {
|
||||
return {
|
||||
session: null,
|
||||
user: null
|
||||
};
|
||||
}
|
||||
const mins = 5 * 60 * 1000;
|
||||
const now = new Date().getTime();
|
||||
if (now - res.session.issuedAt > mins) {
|
||||
return {
|
||||
session: null,
|
||||
user: null
|
||||
};
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { cleanup as wsCleanup } from "@server/routers/ws";
|
||||
import { cleanup as wsCleanup } from "#dynamic/routers/ws";
|
||||
|
||||
async function cleanup() {
|
||||
await wsCleanup();
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { join } from "path";
|
||||
import { readFileSync } from "fs";
|
||||
import { db, resources, siteResources } from "@server/db";
|
||||
import { clients, db, resources, siteResources } from "@server/db";
|
||||
import { randomInt } from "crypto";
|
||||
import { exitNodes, sites } from "@server/db";
|
||||
import { eq, and } from "drizzle-orm";
|
||||
import { __DIRNAME } from "@server/lib/consts";
|
||||
@@ -15,6 +16,25 @@ if (!dev) {
|
||||
}
|
||||
export const names = JSON.parse(readFileSync(file, "utf-8"));
|
||||
|
||||
export async function getUniqueClientName(orgId: string): Promise<string> {
|
||||
let loops = 0;
|
||||
while (true) {
|
||||
if (loops > 100) {
|
||||
throw new Error("Could not generate a unique name");
|
||||
}
|
||||
|
||||
const name = generateName();
|
||||
const count = await db
|
||||
.select({ niceId: clients.niceId, orgId: clients.orgId })
|
||||
.from(clients)
|
||||
.where(and(eq(clients.niceId, name), eq(clients.orgId, orgId)));
|
||||
if (count.length === 0) {
|
||||
return name;
|
||||
}
|
||||
loops++;
|
||||
}
|
||||
}
|
||||
|
||||
export async function getUniqueSiteName(orgId: string): Promise<string> {
|
||||
let loops = 0;
|
||||
while (true) {
|
||||
@@ -42,18 +62,36 @@ export async function getUniqueResourceName(orgId: string): Promise<string> {
|
||||
}
|
||||
|
||||
const name = generateName();
|
||||
const count = await db
|
||||
.select({ niceId: resources.niceId, orgId: resources.orgId })
|
||||
.from(resources)
|
||||
.where(and(eq(resources.niceId, name), eq(resources.orgId, orgId)));
|
||||
if (count.length === 0) {
|
||||
const [resourceCount, siteResourceCount] = await Promise.all([
|
||||
db
|
||||
.select({ niceId: resources.niceId, orgId: resources.orgId })
|
||||
.from(resources)
|
||||
.where(
|
||||
and(eq(resources.niceId, name), eq(resources.orgId, orgId))
|
||||
),
|
||||
db
|
||||
.select({
|
||||
niceId: siteResources.niceId,
|
||||
orgId: siteResources.orgId
|
||||
})
|
||||
.from(siteResources)
|
||||
.where(
|
||||
and(
|
||||
eq(siteResources.niceId, name),
|
||||
eq(siteResources.orgId, orgId)
|
||||
)
|
||||
)
|
||||
]);
|
||||
if (resourceCount.length === 0 && siteResourceCount.length === 0) {
|
||||
return name;
|
||||
}
|
||||
loops++;
|
||||
}
|
||||
}
|
||||
|
||||
export async function getUniqueSiteResourceName(orgId: string): Promise<string> {
|
||||
export async function getUniqueSiteResourceName(
|
||||
orgId: string
|
||||
): Promise<string> {
|
||||
let loops = 0;
|
||||
while (true) {
|
||||
if (loops > 100) {
|
||||
@@ -61,11 +99,27 @@ export async function getUniqueSiteResourceName(orgId: string): Promise<string>
|
||||
}
|
||||
|
||||
const name = generateName();
|
||||
const count = await db
|
||||
.select({ niceId: siteResources.niceId, orgId: siteResources.orgId })
|
||||
.from(siteResources)
|
||||
.where(and(eq(siteResources.niceId, name), eq(siteResources.orgId, orgId)));
|
||||
if (count.length === 0) {
|
||||
const [resourceCount, siteResourceCount] = await Promise.all([
|
||||
db
|
||||
.select({ niceId: resources.niceId, orgId: resources.orgId })
|
||||
.from(resources)
|
||||
.where(
|
||||
and(eq(resources.niceId, name), eq(resources.orgId, orgId))
|
||||
),
|
||||
db
|
||||
.select({
|
||||
niceId: siteResources.niceId,
|
||||
orgId: siteResources.orgId
|
||||
})
|
||||
.from(siteResources)
|
||||
.where(
|
||||
and(
|
||||
eq(siteResources.niceId, name),
|
||||
eq(siteResources.orgId, orgId)
|
||||
)
|
||||
)
|
||||
]);
|
||||
if (resourceCount.length === 0 && siteResourceCount.length === 0) {
|
||||
return name;
|
||||
}
|
||||
loops++;
|
||||
@@ -74,9 +128,7 @@ export async function getUniqueSiteResourceName(orgId: string): Promise<string>
|
||||
|
||||
export async function getUniqueExitNodeEndpointName(): Promise<string> {
|
||||
let loops = 0;
|
||||
const count = await db
|
||||
.select()
|
||||
.from(exitNodes);
|
||||
const count = await db.select().from(exitNodes);
|
||||
while (true) {
|
||||
if (loops > 100) {
|
||||
throw new Error("Could not generate a unique name");
|
||||
@@ -95,14 +147,11 @@ export async function getUniqueExitNodeEndpointName(): Promise<string> {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
export function generateName(): string {
|
||||
const name = (
|
||||
names.descriptors[
|
||||
Math.floor(Math.random() * names.descriptors.length)
|
||||
] +
|
||||
names.descriptors[randomInt(names.descriptors.length)] +
|
||||
"-" +
|
||||
names.animals[Math.floor(Math.random() * names.animals.length)]
|
||||
names.animals[randomInt(names.animals.length)]
|
||||
)
|
||||
.toLowerCase()
|
||||
.replace(/\s/g, "-");
|
||||
|
||||
@@ -13,9 +13,12 @@ function createDb() {
|
||||
connection_string: process.env.POSTGRES_CONNECTION_STRING
|
||||
};
|
||||
if (process.env.POSTGRES_REPLICA_CONNECTION_STRINGS) {
|
||||
const replicas = process.env.POSTGRES_REPLICA_CONNECTION_STRINGS.split(",").map((conn) => ({
|
||||
connection_string: conn.trim()
|
||||
}));
|
||||
const replicas =
|
||||
process.env.POSTGRES_REPLICA_CONNECTION_STRINGS.split(
|
||||
","
|
||||
).map((conn) => ({
|
||||
connection_string: conn.trim()
|
||||
}));
|
||||
config.postgres.replicas = replicas;
|
||||
}
|
||||
} else {
|
||||
@@ -40,28 +43,44 @@ function createDb() {
|
||||
connectionString,
|
||||
max: poolConfig?.max_connections || 20,
|
||||
idleTimeoutMillis: poolConfig?.idle_timeout_ms || 30000,
|
||||
connectionTimeoutMillis: poolConfig?.connection_timeout_ms || 5000,
|
||||
connectionTimeoutMillis: poolConfig?.connection_timeout_ms || 5000
|
||||
});
|
||||
|
||||
const replicas = [];
|
||||
|
||||
if (!replicaConnections.length) {
|
||||
replicas.push(DrizzlePostgres(primaryPool));
|
||||
replicas.push(
|
||||
DrizzlePostgres(primaryPool, {
|
||||
logger: process.env.QUERY_LOGGING == "true"
|
||||
})
|
||||
);
|
||||
} else {
|
||||
for (const conn of replicaConnections) {
|
||||
const replicaPool = new Pool({
|
||||
connectionString: conn.connection_string,
|
||||
max: poolConfig?.max_replica_connections || 20,
|
||||
idleTimeoutMillis: poolConfig?.idle_timeout_ms || 30000,
|
||||
connectionTimeoutMillis: poolConfig?.connection_timeout_ms || 5000,
|
||||
connectionTimeoutMillis:
|
||||
poolConfig?.connection_timeout_ms || 5000
|
||||
});
|
||||
replicas.push(DrizzlePostgres(replicaPool));
|
||||
replicas.push(
|
||||
DrizzlePostgres(replicaPool, {
|
||||
logger: process.env.QUERY_LOGGING == "true"
|
||||
})
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
return withReplicas(DrizzlePostgres(primaryPool), replicas as any);
|
||||
return withReplicas(
|
||||
DrizzlePostgres(primaryPool, {
|
||||
logger: process.env.QUERY_LOGGING == "true"
|
||||
}),
|
||||
replicas as any
|
||||
);
|
||||
}
|
||||
|
||||
export const db = createDb();
|
||||
export default db;
|
||||
export type Transaction = Parameters<Parameters<typeof db["transaction"]>[0]>[0];
|
||||
export type Transaction = Parameters<
|
||||
Parameters<(typeof db)["transaction"]>[0]
|
||||
>[0];
|
||||
|
||||
@@ -11,6 +11,7 @@ const runMigrations = async () => {
|
||||
migrationsFolder: migrationsFolder
|
||||
});
|
||||
console.log("Migrations completed successfully.");
|
||||
process.exit(0);
|
||||
} catch (error) {
|
||||
console.error("Error running migrations:", error);
|
||||
process.exit(1);
|
||||
|
||||
@@ -6,7 +6,8 @@ import {
|
||||
integer,
|
||||
bigint,
|
||||
real,
|
||||
text
|
||||
text,
|
||||
index
|
||||
} from "drizzle-orm/pg-core";
|
||||
import { InferSelectModel } from "drizzle-orm";
|
||||
import { domains, orgs, targets, users, exitNodes, sessions } from "./schema";
|
||||
@@ -166,6 +167,7 @@ export const remoteExitNodes = pgTable("remoteExitNode", {
|
||||
secretHash: varchar("secretHash").notNull(),
|
||||
dateCreated: varchar("dateCreated").notNull(),
|
||||
version: varchar("version"),
|
||||
secondaryVersion: varchar("secondaryVersion"), // This is to detect the new nodes after the transition to pangolin-node
|
||||
exitNodeId: integer("exitNodeId").references(() => exitNodes.exitNodeId, {
|
||||
onDelete: "cascade"
|
||||
})
|
||||
@@ -213,6 +215,43 @@ export const sessionTransferToken = pgTable("sessionTransferToken", {
|
||||
expiresAt: bigint("expiresAt", { mode: "number" }).notNull()
|
||||
});
|
||||
|
||||
export const actionAuditLog = pgTable("actionAuditLog", {
|
||||
id: serial("id").primaryKey(),
|
||||
timestamp: bigint("timestamp", { mode: "number" }).notNull(), // this is EPOCH time in seconds
|
||||
orgId: varchar("orgId")
|
||||
.notNull()
|
||||
.references(() => orgs.orgId, { onDelete: "cascade" }),
|
||||
actorType: varchar("actorType", { length: 50 }).notNull(),
|
||||
actor: varchar("actor", { length: 255 }).notNull(),
|
||||
actorId: varchar("actorId", { length: 255 }).notNull(),
|
||||
action: varchar("action", { length: 100 }).notNull(),
|
||||
metadata: text("metadata")
|
||||
}, (table) => ([
|
||||
index("idx_actionAuditLog_timestamp").on(table.timestamp),
|
||||
index("idx_actionAuditLog_org_timestamp").on(table.orgId, table.timestamp)
|
||||
]));
|
||||
|
||||
export const accessAuditLog = pgTable("accessAuditLog", {
|
||||
id: serial("id").primaryKey(),
|
||||
timestamp: bigint("timestamp", { mode: "number" }).notNull(), // this is EPOCH time in seconds
|
||||
orgId: varchar("orgId")
|
||||
.notNull()
|
||||
.references(() => orgs.orgId, { onDelete: "cascade" }),
|
||||
actorType: varchar("actorType", { length: 50 }),
|
||||
actor: varchar("actor", { length: 255 }),
|
||||
actorId: varchar("actorId", { length: 255 }),
|
||||
resourceId: integer("resourceId"),
|
||||
ip: varchar("ip", { length: 45 }),
|
||||
type: varchar("type", { length: 100 }).notNull(),
|
||||
action: boolean("action").notNull(),
|
||||
location: text("location"),
|
||||
userAgent: text("userAgent"),
|
||||
metadata: text("metadata")
|
||||
}, (table) => ([
|
||||
index("idx_identityAuditLog_timestamp").on(table.timestamp),
|
||||
index("idx_identityAuditLog_org_timestamp").on(table.orgId, table.timestamp)
|
||||
]));
|
||||
|
||||
export type Limit = InferSelectModel<typeof limits>;
|
||||
export type Account = InferSelectModel<typeof account>;
|
||||
export type Certificate = InferSelectModel<typeof certificates>;
|
||||
@@ -230,3 +269,5 @@ export type RemoteExitNodeSession = InferSelectModel<
|
||||
>;
|
||||
export type ExitNodeOrg = InferSelectModel<typeof exitNodeOrgs>;
|
||||
export type LoginPage = InferSelectModel<typeof loginPage>;
|
||||
export type ActionAuditLog = InferSelectModel<typeof actionAuditLog>;
|
||||
export type AccessAuditLog = InferSelectModel<typeof accessAuditLog>;
|
||||
@@ -6,10 +6,12 @@ import {
|
||||
integer,
|
||||
bigint,
|
||||
real,
|
||||
text
|
||||
text,
|
||||
index
|
||||
} from "drizzle-orm/pg-core";
|
||||
import { InferSelectModel } from "drizzle-orm";
|
||||
import { randomUUID } from "crypto";
|
||||
import { alias } from "yargs";
|
||||
|
||||
export const domains = pgTable("domains", {
|
||||
domainId: varchar("domainId").primaryKey(),
|
||||
@@ -18,15 +20,41 @@ export const domains = pgTable("domains", {
|
||||
type: varchar("type"), // "ns", "cname", "wildcard"
|
||||
verified: boolean("verified").notNull().default(false),
|
||||
failed: boolean("failed").notNull().default(false),
|
||||
tries: integer("tries").notNull().default(0)
|
||||
tries: integer("tries").notNull().default(0),
|
||||
certResolver: varchar("certResolver"),
|
||||
customCertResolver: varchar("customCertResolver"),
|
||||
preferWildcardCert: boolean("preferWildcardCert")
|
||||
});
|
||||
|
||||
export const dnsRecords = pgTable("dnsRecords", {
|
||||
id: serial("id").primaryKey(),
|
||||
domainId: varchar("domainId")
|
||||
.notNull()
|
||||
.references(() => domains.domainId, { onDelete: "cascade" }),
|
||||
recordType: varchar("recordType").notNull(), // "NS" | "CNAME" | "A" | "TXT"
|
||||
baseDomain: varchar("baseDomain"),
|
||||
value: varchar("value").notNull(),
|
||||
verified: boolean("verified").notNull().default(false)
|
||||
});
|
||||
|
||||
export const orgs = pgTable("orgs", {
|
||||
orgId: varchar("orgId").primaryKey(),
|
||||
name: varchar("name").notNull(),
|
||||
subnet: varchar("subnet"),
|
||||
utilitySubnet: varchar("utilitySubnet"), // this is the subnet for utility addresses
|
||||
createdAt: text("createdAt"),
|
||||
settings: text("settings") // JSON blob of org-specific settings
|
||||
requireTwoFactor: boolean("requireTwoFactor"),
|
||||
maxSessionLengthHours: integer("maxSessionLengthHours"),
|
||||
passwordExpiryDays: integer("passwordExpiryDays"),
|
||||
settingsLogRetentionDaysRequest: integer("settingsLogRetentionDaysRequest") // where 0 = dont keep logs and -1 = keep forever, and 9001 = end of the following year
|
||||
.notNull()
|
||||
.default(7),
|
||||
settingsLogRetentionDaysAccess: integer("settingsLogRetentionDaysAccess") // where 0 = dont keep logs and -1 = keep forever and 9001 = end of the following year
|
||||
.notNull()
|
||||
.default(0),
|
||||
settingsLogRetentionDaysAction: integer("settingsLogRetentionDaysAction") // where 0 = dont keep logs and -1 = keep forever and 9001 = end of the following year
|
||||
.notNull()
|
||||
.default(0)
|
||||
});
|
||||
|
||||
export const orgDomains = pgTable("orgDomains", {
|
||||
@@ -62,8 +90,7 @@ export const sites = pgTable("sites", {
|
||||
publicKey: varchar("publicKey"),
|
||||
lastHolePunch: bigint("lastHolePunch", { mode: "number" }),
|
||||
listenPort: integer("listenPort"),
|
||||
dockerSocketEnabled: boolean("dockerSocketEnabled").notNull().default(true),
|
||||
remoteSubnets: text("remoteSubnets") // comma-separated list of subnets that this site can access
|
||||
dockerSocketEnabled: boolean("dockerSocketEnabled").notNull().default(true)
|
||||
});
|
||||
|
||||
export const resources = pgTable("resources", {
|
||||
@@ -100,9 +127,11 @@ export const resources = pgTable("resources", {
|
||||
setHostHeader: varchar("setHostHeader"),
|
||||
enableProxy: boolean("enableProxy").default(true),
|
||||
skipToIdpId: integer("skipToIdpId").references(() => idp.idpId, {
|
||||
onDelete: "cascade"
|
||||
onDelete: "set null"
|
||||
}),
|
||||
headers: text("headers") // comma-separated list of headers to add to the request
|
||||
headers: text("headers"), // comma-separated list of headers to add to the request
|
||||
proxyProtocol: boolean("proxyProtocol").notNull().default(false),
|
||||
proxyProtocolVersion: integer("proxyProtocolVersion").default(1)
|
||||
});
|
||||
|
||||
export const targets = pgTable("targets", {
|
||||
@@ -147,7 +176,8 @@ export const targetHealthCheck = pgTable("targetHealthCheck", {
|
||||
hcFollowRedirects: boolean("hcFollowRedirects").default(true),
|
||||
hcMethod: varchar("hcMethod").default("GET"),
|
||||
hcStatus: integer("hcStatus"), // http code
|
||||
hcHealth: text("hcHealth").default("unknown") // "unknown", "healthy", "unhealthy"
|
||||
hcHealth: text("hcHealth").default("unknown"), // "unknown", "healthy", "unhealthy"
|
||||
hcTlsServerName: text("hcTlsServerName"),
|
||||
});
|
||||
|
||||
export const exitNodes = pgTable("exitNodes", {
|
||||
@@ -176,11 +206,41 @@ export const siteResources = pgTable("siteResources", {
|
||||
.references(() => orgs.orgId, { onDelete: "cascade" }),
|
||||
niceId: varchar("niceId").notNull(),
|
||||
name: varchar("name").notNull(),
|
||||
protocol: varchar("protocol").notNull(),
|
||||
proxyPort: integer("proxyPort").notNull(),
|
||||
destinationPort: integer("destinationPort").notNull(),
|
||||
destinationIp: varchar("destinationIp").notNull(),
|
||||
enabled: boolean("enabled").notNull().default(true)
|
||||
mode: varchar("mode").notNull(), // "host" | "cidr" | "port"
|
||||
protocol: varchar("protocol"), // only for port mode
|
||||
proxyPort: integer("proxyPort"), // only for port mode
|
||||
destinationPort: integer("destinationPort"), // only for port mode
|
||||
destination: varchar("destination").notNull(), // ip, cidr, hostname; validate against the mode
|
||||
enabled: boolean("enabled").notNull().default(true),
|
||||
alias: varchar("alias"),
|
||||
aliasAddress: varchar("aliasAddress")
|
||||
});
|
||||
|
||||
export const clientSiteResources = pgTable("clientSiteResources", {
|
||||
clientId: integer("clientId")
|
||||
.notNull()
|
||||
.references(() => clients.clientId, { onDelete: "cascade" }),
|
||||
siteResourceId: integer("siteResourceId")
|
||||
.notNull()
|
||||
.references(() => siteResources.siteResourceId, { onDelete: "cascade" })
|
||||
});
|
||||
|
||||
export const roleSiteResources = pgTable("roleSiteResources", {
|
||||
roleId: integer("roleId")
|
||||
.notNull()
|
||||
.references(() => roles.roleId, { onDelete: "cascade" }),
|
||||
siteResourceId: integer("siteResourceId")
|
||||
.notNull()
|
||||
.references(() => siteResources.siteResourceId, { onDelete: "cascade" })
|
||||
});
|
||||
|
||||
export const userSiteResources = pgTable("userSiteResources", {
|
||||
userId: varchar("userId")
|
||||
.notNull()
|
||||
.references(() => users.userId, { onDelete: "cascade" }),
|
||||
siteResourceId: integer("siteResourceId")
|
||||
.notNull()
|
||||
.references(() => siteResources.siteResourceId, { onDelete: "cascade" })
|
||||
});
|
||||
|
||||
export const users = pgTable("user", {
|
||||
@@ -200,7 +260,8 @@ export const users = pgTable("user", {
|
||||
dateCreated: varchar("dateCreated").notNull(),
|
||||
termsAcceptedTimestamp: varchar("termsAcceptedTimestamp"),
|
||||
termsVersion: varchar("termsVersion"),
|
||||
serverAdmin: boolean("serverAdmin").notNull().default(false)
|
||||
serverAdmin: boolean("serverAdmin").notNull().default(false),
|
||||
lastPasswordChange: bigint("lastPasswordChange", { mode: "number" })
|
||||
});
|
||||
|
||||
export const newts = pgTable("newt", {
|
||||
@@ -226,7 +287,9 @@ export const sessions = pgTable("session", {
|
||||
userId: varchar("userId")
|
||||
.notNull()
|
||||
.references(() => users.userId, { onDelete: "cascade" }),
|
||||
expiresAt: bigint("expiresAt", { mode: "number" }).notNull()
|
||||
expiresAt: bigint("expiresAt", { mode: "number" }).notNull(),
|
||||
issuedAt: bigint("issuedAt", { mode: "number" }),
|
||||
deviceAuthUsed: boolean("deviceAuthUsed").notNull().default(false)
|
||||
});
|
||||
|
||||
export const newtSessions = pgTable("newtSession", {
|
||||
@@ -443,7 +506,8 @@ export const resourceSessions = pgTable("resourceSessions", {
|
||||
{
|
||||
onDelete: "cascade"
|
||||
}
|
||||
)
|
||||
),
|
||||
issuedAt: bigint("issuedAt", { mode: "number" })
|
||||
});
|
||||
|
||||
export const resourceWhitelist = pgTable("resourceWhitelist", {
|
||||
@@ -567,7 +631,7 @@ export const idpOrg = pgTable("idpOrg", {
|
||||
});
|
||||
|
||||
export const clients = pgTable("clients", {
|
||||
clientId: serial("id").primaryKey(),
|
||||
clientId: serial("clientId").primaryKey(),
|
||||
orgId: varchar("orgId")
|
||||
.references(() => orgs.orgId, {
|
||||
onDelete: "cascade"
|
||||
@@ -576,6 +640,12 @@ export const clients = pgTable("clients", {
|
||||
exitNodeId: integer("exitNode").references(() => exitNodes.exitNodeId, {
|
||||
onDelete: "set null"
|
||||
}),
|
||||
userId: text("userId").references(() => users.userId, {
|
||||
// optionally tied to a user and in this case delete when the user deletes
|
||||
onDelete: "cascade"
|
||||
}),
|
||||
niceId: varchar("niceId").notNull(),
|
||||
olmId: text("olmId"), // to lock it to a specific olm optionally
|
||||
name: varchar("name").notNull(),
|
||||
pubKey: varchar("pubKey"),
|
||||
subnet: varchar("subnet").notNull(),
|
||||
@@ -590,23 +660,40 @@ export const clients = pgTable("clients", {
|
||||
maxConnections: integer("maxConnections")
|
||||
});
|
||||
|
||||
export const clientSites = pgTable("clientSites", {
|
||||
clientId: integer("clientId")
|
||||
.notNull()
|
||||
.references(() => clients.clientId, { onDelete: "cascade" }),
|
||||
siteId: integer("siteId")
|
||||
.notNull()
|
||||
.references(() => sites.siteId, { onDelete: "cascade" }),
|
||||
isRelayed: boolean("isRelayed").notNull().default(false),
|
||||
endpoint: varchar("endpoint")
|
||||
});
|
||||
export const clientSitesAssociationsCache = pgTable(
|
||||
"clientSitesAssociationsCache",
|
||||
{
|
||||
clientId: integer("clientId") // not a foreign key here so after its deleted the rebuild function can delete it and send the message
|
||||
.notNull(),
|
||||
siteId: integer("siteId").notNull(),
|
||||
isRelayed: boolean("isRelayed").notNull().default(false),
|
||||
endpoint: varchar("endpoint"),
|
||||
publicKey: varchar("publicKey") // this will act as the session's public key for hole punching so we can track when it changes
|
||||
}
|
||||
);
|
||||
|
||||
export const clientSiteResourcesAssociationsCache = pgTable(
|
||||
"clientSiteResourcesAssociationsCache",
|
||||
{
|
||||
clientId: integer("clientId") // not a foreign key here so after its deleted the rebuild function can delete it and send the message
|
||||
.notNull(),
|
||||
siteResourceId: integer("siteResourceId").notNull()
|
||||
}
|
||||
);
|
||||
|
||||
export const olms = pgTable("olms", {
|
||||
olmId: varchar("id").primaryKey(),
|
||||
secretHash: varchar("secretHash").notNull(),
|
||||
dateCreated: varchar("dateCreated").notNull(),
|
||||
version: text("version"),
|
||||
agent: text("agent"),
|
||||
name: varchar("name"),
|
||||
clientId: integer("clientId").references(() => clients.clientId, {
|
||||
// we will switch this depending on the current org it wants to connect to
|
||||
onDelete: "set null"
|
||||
}),
|
||||
userId: text("userId").references(() => users.userId, {
|
||||
// optionally tied to a user and in this case delete when the user deletes
|
||||
onDelete: "cascade"
|
||||
})
|
||||
});
|
||||
@@ -671,6 +758,72 @@ export const setupTokens = pgTable("setupTokens", {
|
||||
dateUsed: varchar("dateUsed")
|
||||
});
|
||||
|
||||
// Blueprint runs
|
||||
export const blueprints = pgTable("blueprints", {
|
||||
blueprintId: serial("blueprintId").primaryKey(),
|
||||
orgId: text("orgId")
|
||||
.references(() => orgs.orgId, {
|
||||
onDelete: "cascade"
|
||||
})
|
||||
.notNull(),
|
||||
name: varchar("name").notNull(),
|
||||
source: varchar("source").notNull(),
|
||||
createdAt: integer("createdAt").notNull(),
|
||||
succeeded: boolean("succeeded").notNull(),
|
||||
contents: text("contents").notNull(),
|
||||
message: text("message")
|
||||
});
|
||||
export const requestAuditLog = pgTable(
|
||||
"requestAuditLog",
|
||||
{
|
||||
id: serial("id").primaryKey(),
|
||||
timestamp: integer("timestamp").notNull(), // this is EPOCH time in seconds
|
||||
orgId: text("orgId").references(() => orgs.orgId, {
|
||||
onDelete: "cascade"
|
||||
}),
|
||||
action: boolean("action").notNull(),
|
||||
reason: integer("reason").notNull(),
|
||||
actorType: text("actorType"),
|
||||
actor: text("actor"),
|
||||
actorId: text("actorId"),
|
||||
resourceId: integer("resourceId"),
|
||||
ip: text("ip"),
|
||||
location: text("location"),
|
||||
userAgent: text("userAgent"),
|
||||
metadata: text("metadata"),
|
||||
headers: text("headers"), // JSON blob
|
||||
query: text("query"), // JSON blob
|
||||
originalRequestURL: text("originalRequestURL"),
|
||||
scheme: text("scheme"),
|
||||
host: text("host"),
|
||||
path: text("path"),
|
||||
method: text("method"),
|
||||
tls: boolean("tls")
|
||||
},
|
||||
(table) => [
|
||||
index("idx_requestAuditLog_timestamp").on(table.timestamp),
|
||||
index("idx_requestAuditLog_org_timestamp").on(
|
||||
table.orgId,
|
||||
table.timestamp
|
||||
)
|
||||
]
|
||||
);
|
||||
|
||||
export const deviceWebAuthCodes = pgTable("deviceWebAuthCodes", {
|
||||
codeId: serial("codeId").primaryKey(),
|
||||
code: text("code").notNull().unique(),
|
||||
ip: text("ip"),
|
||||
city: text("city"),
|
||||
deviceName: text("deviceName"),
|
||||
applicationName: text("applicationName").notNull(),
|
||||
expiresAt: bigint("expiresAt", { mode: "number" }).notNull(),
|
||||
createdAt: bigint("createdAt", { mode: "number" }).notNull(),
|
||||
verified: boolean("verified").notNull().default(false),
|
||||
userId: varchar("userId").references(() => users.userId, {
|
||||
onDelete: "cascade"
|
||||
})
|
||||
});
|
||||
|
||||
export type Org = InferSelectModel<typeof orgs>;
|
||||
export type User = InferSelectModel<typeof users>;
|
||||
export type Site = InferSelectModel<typeof sites>;
|
||||
@@ -711,7 +864,7 @@ export type ApiKey = InferSelectModel<typeof apiKeys>;
|
||||
export type ApiKeyAction = InferSelectModel<typeof apiKeyActions>;
|
||||
export type ApiKeyOrg = InferSelectModel<typeof apiKeyOrg>;
|
||||
export type Client = InferSelectModel<typeof clients>;
|
||||
export type ClientSite = InferSelectModel<typeof clientSites>;
|
||||
export type ClientSite = InferSelectModel<typeof clientSitesAssociationsCache>;
|
||||
export type Olm = InferSelectModel<typeof olms>;
|
||||
export type OlmSession = InferSelectModel<typeof olmSessions>;
|
||||
export type UserClient = InferSelectModel<typeof userClients>;
|
||||
@@ -722,3 +875,9 @@ export type SetupToken = InferSelectModel<typeof setupTokens>;
|
||||
export type HostMeta = InferSelectModel<typeof hostMeta>;
|
||||
export type TargetHealthCheck = InferSelectModel<typeof targetHealthCheck>;
|
||||
export type IdpOidcConfig = InferSelectModel<typeof idpOidcConfig>;
|
||||
export type Blueprint = InferSelectModel<typeof blueprints>;
|
||||
export type LicenseKey = InferSelectModel<typeof licenseKey>;
|
||||
export type SecurityKey = InferSelectModel<typeof securityKeys>;
|
||||
export type WebauthnChallenge = InferSelectModel<typeof webauthnChallenge>;
|
||||
export type DeviceWebAuthCode = InferSelectModel<typeof deviceWebAuthCodes>;
|
||||
export type RequestAuditLog = InferSelectModel<typeof requestAuditLog>;
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { db, loginPage, LoginPage, loginPageOrg } from "@server/db";
|
||||
import { db, loginPage, LoginPage, loginPageOrg, Org, orgs } from "@server/db";
|
||||
import {
|
||||
Resource,
|
||||
ResourcePassword,
|
||||
@@ -23,6 +23,7 @@ export type ResourceWithAuth = {
|
||||
pincode: ResourcePincode | null;
|
||||
password: ResourcePassword | null;
|
||||
headerAuth: ResourceHeaderAuth | null;
|
||||
org: Org;
|
||||
};
|
||||
|
||||
export type UserSessionWithUser = {
|
||||
@@ -51,6 +52,10 @@ export async function getResourceByDomain(
|
||||
resourceHeaderAuth,
|
||||
eq(resourceHeaderAuth.resourceId, resources.resourceId)
|
||||
)
|
||||
.innerJoin(
|
||||
orgs,
|
||||
eq(orgs.orgId, resources.orgId)
|
||||
)
|
||||
.where(eq(resources.fullDomain, domain))
|
||||
.limit(1);
|
||||
|
||||
@@ -62,7 +67,8 @@ export async function getResourceByDomain(
|
||||
resource: result.resources,
|
||||
pincode: result.resourcePincode,
|
||||
password: result.resourcePassword,
|
||||
headerAuth: result.resourceHeaderAuth
|
||||
headerAuth: result.resourceHeaderAuth,
|
||||
org: result.orgs
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@@ -13,12 +13,16 @@ bootstrapVolume();
|
||||
|
||||
function createDb() {
|
||||
const sqlite = new Database(location);
|
||||
return DrizzleSqlite(sqlite, { schema });
|
||||
return DrizzleSqlite(sqlite, {
|
||||
schema
|
||||
});
|
||||
}
|
||||
|
||||
export const db = createDb();
|
||||
export default db;
|
||||
export type Transaction = Parameters<Parameters<typeof db["transaction"]>[0]>[0];
|
||||
export type Transaction = Parameters<
|
||||
Parameters<(typeof db)["transaction"]>[0]
|
||||
>[0];
|
||||
|
||||
function checkFileExists(filePath: string): boolean {
|
||||
try {
|
||||
|
||||
@@ -2,10 +2,12 @@ import {
|
||||
sqliteTable,
|
||||
integer,
|
||||
text,
|
||||
real
|
||||
real,
|
||||
index
|
||||
} from "drizzle-orm/sqlite-core";
|
||||
import { InferSelectModel } from "drizzle-orm";
|
||||
import { domains, orgs, targets, users, exitNodes, sessions } from "./schema";
|
||||
import { metadata } from "@app/app/[orgId]/settings/layout";
|
||||
|
||||
export const certificates = sqliteTable("certificates", {
|
||||
certId: integer("certId").primaryKey({ autoIncrement: true }),
|
||||
@@ -160,6 +162,7 @@ export const remoteExitNodes = sqliteTable("remoteExitNode", {
|
||||
secretHash: text("secretHash").notNull(),
|
||||
dateCreated: text("dateCreated").notNull(),
|
||||
version: text("version"),
|
||||
secondaryVersion: text("secondaryVersion"), // This is to detect the new nodes after the transition to pangolin-node
|
||||
exitNodeId: integer("exitNodeId").references(() => exitNodes.exitNodeId, {
|
||||
onDelete: "cascade"
|
||||
})
|
||||
@@ -207,6 +210,43 @@ export const sessionTransferToken = sqliteTable("sessionTransferToken", {
|
||||
expiresAt: integer("expiresAt").notNull()
|
||||
});
|
||||
|
||||
export const actionAuditLog = sqliteTable("actionAuditLog", {
|
||||
id: integer("id").primaryKey({ autoIncrement: true }),
|
||||
timestamp: integer("timestamp").notNull(), // this is EPOCH time in seconds
|
||||
orgId: text("orgId")
|
||||
.notNull()
|
||||
.references(() => orgs.orgId, { onDelete: "cascade" }),
|
||||
actorType: text("actorType").notNull(),
|
||||
actor: text("actor").notNull(),
|
||||
actorId: text("actorId").notNull(),
|
||||
action: text("action").notNull(),
|
||||
metadata: text("metadata")
|
||||
}, (table) => ([
|
||||
index("idx_actionAuditLog_timestamp").on(table.timestamp),
|
||||
index("idx_actionAuditLog_org_timestamp").on(table.orgId, table.timestamp)
|
||||
]));
|
||||
|
||||
export const accessAuditLog = sqliteTable("accessAuditLog", {
|
||||
id: integer("id").primaryKey({ autoIncrement: true }),
|
||||
timestamp: integer("timestamp").notNull(), // this is EPOCH time in seconds
|
||||
orgId: text("orgId")
|
||||
.notNull()
|
||||
.references(() => orgs.orgId, { onDelete: "cascade" }),
|
||||
actorType: text("actorType"),
|
||||
actor: text("actor"),
|
||||
actorId: text("actorId"),
|
||||
resourceId: integer("resourceId"),
|
||||
ip: text("ip"),
|
||||
location: text("location"),
|
||||
type: text("type").notNull(),
|
||||
action: integer("action", { mode: "boolean" }).notNull(),
|
||||
userAgent: text("userAgent"),
|
||||
metadata: text("metadata")
|
||||
}, (table) => ([
|
||||
index("idx_identityAuditLog_timestamp").on(table.timestamp),
|
||||
index("idx_identityAuditLog_org_timestamp").on(table.orgId, table.timestamp)
|
||||
]));
|
||||
|
||||
export type Limit = InferSelectModel<typeof limits>;
|
||||
export type Account = InferSelectModel<typeof account>;
|
||||
export type Certificate = InferSelectModel<typeof certificates>;
|
||||
@@ -224,3 +264,5 @@ export type RemoteExitNodeSession = InferSelectModel<
|
||||
>;
|
||||
export type ExitNodeOrg = InferSelectModel<typeof exitNodeOrgs>;
|
||||
export type LoginPage = InferSelectModel<typeof loginPage>;
|
||||
export type ActionAuditLog = InferSelectModel<typeof actionAuditLog>;
|
||||
export type AccessAuditLog = InferSelectModel<typeof accessAuditLog>;
|
||||
@@ -1,6 +1,7 @@
|
||||
import { randomUUID } from "crypto";
|
||||
import { InferSelectModel } from "drizzle-orm";
|
||||
import { sqliteTable, text, integer } from "drizzle-orm/sqlite-core";
|
||||
import { sqliteTable, text, integer, index } from "drizzle-orm/sqlite-core";
|
||||
import { no } from "zod/v4/locales";
|
||||
|
||||
export const domains = sqliteTable("domains", {
|
||||
domainId: text("domainId").primaryKey(),
|
||||
@@ -11,15 +12,41 @@ export const domains = sqliteTable("domains", {
|
||||
type: text("type"), // "ns", "cname", "wildcard"
|
||||
verified: integer("verified", { mode: "boolean" }).notNull().default(false),
|
||||
failed: integer("failed", { mode: "boolean" }).notNull().default(false),
|
||||
tries: integer("tries").notNull().default(0)
|
||||
tries: integer("tries").notNull().default(0),
|
||||
certResolver: text("certResolver"),
|
||||
preferWildcardCert: integer("preferWildcardCert", { mode: "boolean" })
|
||||
});
|
||||
|
||||
export const dnsRecords = sqliteTable("dnsRecords", {
|
||||
id: integer("id").primaryKey({ autoIncrement: true }),
|
||||
domainId: text("domainId")
|
||||
.notNull()
|
||||
.references(() => domains.domainId, { onDelete: "cascade" }),
|
||||
|
||||
recordType: text("recordType").notNull(), // "NS" | "CNAME" | "A" | "TXT"
|
||||
baseDomain: text("baseDomain"),
|
||||
value: text("value").notNull(),
|
||||
verified: integer("verified", { mode: "boolean" }).notNull().default(false)
|
||||
});
|
||||
|
||||
export const orgs = sqliteTable("orgs", {
|
||||
orgId: text("orgId").primaryKey(),
|
||||
name: text("name").notNull(),
|
||||
subnet: text("subnet"),
|
||||
utilitySubnet: text("utilitySubnet"), // this is the subnet for utility addresses
|
||||
createdAt: text("createdAt"),
|
||||
settings: text("settings") // JSON blob of org-specific settings
|
||||
requireTwoFactor: integer("requireTwoFactor", { mode: "boolean" }),
|
||||
maxSessionLengthHours: integer("maxSessionLengthHours"), // hours
|
||||
passwordExpiryDays: integer("passwordExpiryDays"), // days
|
||||
settingsLogRetentionDaysRequest: integer("settingsLogRetentionDaysRequest") // where 0 = dont keep logs and -1 = keep forever and 9001 = end of the following year
|
||||
.notNull()
|
||||
.default(7),
|
||||
settingsLogRetentionDaysAccess: integer("settingsLogRetentionDaysAccess") // where 0 = dont keep logs and -1 = keep forever and 9001 = end of the following year
|
||||
.notNull()
|
||||
.default(0),
|
||||
settingsLogRetentionDaysAction: integer("settingsLogRetentionDaysAction") // where 0 = dont keep logs and -1 = keep forever and 9001 = end of the following year
|
||||
.notNull()
|
||||
.default(0)
|
||||
});
|
||||
|
||||
export const userDomains = sqliteTable("userDomains", {
|
||||
@@ -68,8 +95,7 @@ export const sites = sqliteTable("sites", {
|
||||
listenPort: integer("listenPort"),
|
||||
dockerSocketEnabled: integer("dockerSocketEnabled", { mode: "boolean" })
|
||||
.notNull()
|
||||
.default(true),
|
||||
remoteSubnets: text("remoteSubnets") // comma-separated list of subnets that this site can access
|
||||
.default(true)
|
||||
});
|
||||
|
||||
export const resources = sqliteTable("resources", {
|
||||
@@ -112,9 +138,13 @@ export const resources = sqliteTable("resources", {
|
||||
setHostHeader: text("setHostHeader"),
|
||||
enableProxy: integer("enableProxy", { mode: "boolean" }).default(true),
|
||||
skipToIdpId: integer("skipToIdpId").references(() => idp.idpId, {
|
||||
onDelete: "cascade"
|
||||
onDelete: "set null"
|
||||
}),
|
||||
headers: text("headers") // comma-separated list of headers to add to the request
|
||||
headers: text("headers"), // comma-separated list of headers to add to the request
|
||||
proxyProtocol: integer("proxyProtocol", { mode: "boolean" })
|
||||
.notNull()
|
||||
.default(false),
|
||||
proxyProtocolVersion: integer("proxyProtocolVersion").default(1)
|
||||
});
|
||||
|
||||
export const targets = sqliteTable("targets", {
|
||||
@@ -142,11 +172,15 @@ export const targets = sqliteTable("targets", {
|
||||
});
|
||||
|
||||
export const targetHealthCheck = sqliteTable("targetHealthCheck", {
|
||||
targetHealthCheckId: integer("targetHealthCheckId").primaryKey({ autoIncrement: true }),
|
||||
targetHealthCheckId: integer("targetHealthCheckId").primaryKey({
|
||||
autoIncrement: true
|
||||
}),
|
||||
targetId: integer("targetId")
|
||||
.notNull()
|
||||
.references(() => targets.targetId, { onDelete: "cascade" }),
|
||||
hcEnabled: integer("hcEnabled", { mode: "boolean" }).notNull().default(false),
|
||||
hcEnabled: integer("hcEnabled", { mode: "boolean" })
|
||||
.notNull()
|
||||
.default(false),
|
||||
hcPath: text("hcPath"),
|
||||
hcScheme: text("hcScheme"),
|
||||
hcMode: text("hcMode").default("http"),
|
||||
@@ -156,10 +190,13 @@ export const targetHealthCheck = sqliteTable("targetHealthCheck", {
|
||||
hcUnhealthyInterval: integer("hcUnhealthyInterval").default(30), // in seconds
|
||||
hcTimeout: integer("hcTimeout").default(5), // in seconds
|
||||
hcHeaders: text("hcHeaders"),
|
||||
hcFollowRedirects: integer("hcFollowRedirects", { mode: "boolean" }).default(true),
|
||||
hcFollowRedirects: integer("hcFollowRedirects", {
|
||||
mode: "boolean"
|
||||
}).default(true),
|
||||
hcMethod: text("hcMethod").default("GET"),
|
||||
hcStatus: integer("hcStatus"), // http code
|
||||
hcHealth: text("hcHealth").default("unknown") // "unknown", "healthy", "unhealthy"
|
||||
hcHealth: text("hcHealth").default("unknown"), // "unknown", "healthy", "unhealthy"
|
||||
hcTlsServerName: text("hcTlsServerName")
|
||||
});
|
||||
|
||||
export const exitNodes = sqliteTable("exitNodes", {
|
||||
@@ -190,11 +227,41 @@ export const siteResources = sqliteTable("siteResources", {
|
||||
.references(() => orgs.orgId, { onDelete: "cascade" }),
|
||||
niceId: text("niceId").notNull(),
|
||||
name: text("name").notNull(),
|
||||
protocol: text("protocol").notNull(),
|
||||
proxyPort: integer("proxyPort").notNull(),
|
||||
destinationPort: integer("destinationPort").notNull(),
|
||||
destinationIp: text("destinationIp").notNull(),
|
||||
enabled: integer("enabled", { mode: "boolean" }).notNull().default(true)
|
||||
mode: text("mode").notNull(), // "host" | "cidr" | "port"
|
||||
protocol: text("protocol"), // only for port mode
|
||||
proxyPort: integer("proxyPort"), // only for port mode
|
||||
destinationPort: integer("destinationPort"), // only for port mode
|
||||
destination: text("destination").notNull(), // ip, cidr, hostname
|
||||
enabled: integer("enabled", { mode: "boolean" }).notNull().default(true),
|
||||
alias: text("alias"),
|
||||
aliasAddress: text("aliasAddress")
|
||||
});
|
||||
|
||||
export const clientSiteResources = sqliteTable("clientSiteResources", {
|
||||
clientId: integer("clientId")
|
||||
.notNull()
|
||||
.references(() => clients.clientId, { onDelete: "cascade" }),
|
||||
siteResourceId: integer("siteResourceId")
|
||||
.notNull()
|
||||
.references(() => siteResources.siteResourceId, { onDelete: "cascade" })
|
||||
});
|
||||
|
||||
export const roleSiteResources = sqliteTable("roleSiteResources", {
|
||||
roleId: integer("roleId")
|
||||
.notNull()
|
||||
.references(() => roles.roleId, { onDelete: "cascade" }),
|
||||
siteResourceId: integer("siteResourceId")
|
||||
.notNull()
|
||||
.references(() => siteResources.siteResourceId, { onDelete: "cascade" })
|
||||
});
|
||||
|
||||
export const userSiteResources = sqliteTable("userSiteResources", {
|
||||
userId: text("userId")
|
||||
.notNull()
|
||||
.references(() => users.userId, { onDelete: "cascade" }),
|
||||
siteResourceId: integer("siteResourceId")
|
||||
.notNull()
|
||||
.references(() => siteResources.siteResourceId, { onDelete: "cascade" })
|
||||
});
|
||||
|
||||
export const users = sqliteTable("user", {
|
||||
@@ -222,7 +289,8 @@ export const users = sqliteTable("user", {
|
||||
termsVersion: text("termsVersion"),
|
||||
serverAdmin: integer("serverAdmin", { mode: "boolean" })
|
||||
.notNull()
|
||||
.default(false)
|
||||
.default(false),
|
||||
lastPasswordChange: integer("lastPasswordChange")
|
||||
});
|
||||
|
||||
export const securityKeys = sqliteTable("webauthnCredentials", {
|
||||
@@ -269,7 +337,7 @@ export const newts = sqliteTable("newt", {
|
||||
});
|
||||
|
||||
export const clients = sqliteTable("clients", {
|
||||
clientId: integer("id").primaryKey({ autoIncrement: true }),
|
||||
clientId: integer("clientId").primaryKey({ autoIncrement: true }),
|
||||
orgId: text("orgId")
|
||||
.references(() => orgs.orgId, {
|
||||
onDelete: "cascade"
|
||||
@@ -278,8 +346,14 @@ export const clients = sqliteTable("clients", {
|
||||
exitNodeId: integer("exitNode").references(() => exitNodes.exitNodeId, {
|
||||
onDelete: "set null"
|
||||
}),
|
||||
userId: text("userId").references(() => users.userId, {
|
||||
// optionally tied to a user and in this case delete when the user deletes
|
||||
onDelete: "cascade"
|
||||
}),
|
||||
niceId: text("niceId").notNull(),
|
||||
name: text("name").notNull(),
|
||||
pubKey: text("pubKey"),
|
||||
olmId: text("olmId"), // to lock it to a specific olm optionally
|
||||
subnet: text("subnet").notNull(),
|
||||
megabytesIn: integer("bytesIn"),
|
||||
megabytesOut: integer("bytesOut"),
|
||||
@@ -291,25 +365,42 @@ export const clients = sqliteTable("clients", {
|
||||
lastHolePunch: integer("lastHolePunch")
|
||||
});
|
||||
|
||||
export const clientSites = sqliteTable("clientSites", {
|
||||
clientId: integer("clientId")
|
||||
.notNull()
|
||||
.references(() => clients.clientId, { onDelete: "cascade" }),
|
||||
siteId: integer("siteId")
|
||||
.notNull()
|
||||
.references(() => sites.siteId, { onDelete: "cascade" }),
|
||||
isRelayed: integer("isRelayed", { mode: "boolean" })
|
||||
.notNull()
|
||||
.default(false),
|
||||
endpoint: text("endpoint")
|
||||
});
|
||||
export const clientSitesAssociationsCache = sqliteTable(
|
||||
"clientSitesAssociationsCache",
|
||||
{
|
||||
clientId: integer("clientId") // not a foreign key here so after its deleted the rebuild function can delete it and send the message
|
||||
.notNull(),
|
||||
siteId: integer("siteId").notNull(),
|
||||
isRelayed: integer("isRelayed", { mode: "boolean" })
|
||||
.notNull()
|
||||
.default(false),
|
||||
endpoint: text("endpoint"),
|
||||
publicKey: text("publicKey") // this will act as the session's public key for hole punching so we can track when it changes
|
||||
}
|
||||
);
|
||||
|
||||
export const clientSiteResourcesAssociationsCache = sqliteTable(
|
||||
"clientSiteResourcesAssociationsCache",
|
||||
{
|
||||
clientId: integer("clientId") // not a foreign key here so after its deleted the rebuild function can delete it and send the message
|
||||
.notNull(),
|
||||
siteResourceId: integer("siteResourceId").notNull()
|
||||
}
|
||||
);
|
||||
|
||||
export const olms = sqliteTable("olms", {
|
||||
olmId: text("id").primaryKey(),
|
||||
secretHash: text("secretHash").notNull(),
|
||||
dateCreated: text("dateCreated").notNull(),
|
||||
version: text("version"),
|
||||
agent: text("agent"),
|
||||
name: text("name"),
|
||||
clientId: integer("clientId").references(() => clients.clientId, {
|
||||
// we will switch this depending on the current org it wants to connect to
|
||||
onDelete: "set null"
|
||||
}),
|
||||
userId: text("userId").references(() => users.userId, {
|
||||
// optionally tied to a user and in this case delete when the user deletes
|
||||
onDelete: "cascade"
|
||||
})
|
||||
});
|
||||
@@ -327,7 +418,11 @@ export const sessions = sqliteTable("session", {
|
||||
userId: text("userId")
|
||||
.notNull()
|
||||
.references(() => users.userId, { onDelete: "cascade" }),
|
||||
expiresAt: integer("expiresAt").notNull()
|
||||
expiresAt: integer("expiresAt").notNull(),
|
||||
issuedAt: integer("issuedAt"),
|
||||
deviceAuthUsed: integer("deviceAuthUsed", { mode: "boolean" })
|
||||
.notNull()
|
||||
.default(false)
|
||||
});
|
||||
|
||||
export const newtSessions = sqliteTable("newtSession", {
|
||||
@@ -577,7 +672,8 @@ export const resourceSessions = sqliteTable("resourceSessions", {
|
||||
{
|
||||
onDelete: "cascade"
|
||||
}
|
||||
)
|
||||
),
|
||||
issuedAt: integer("issuedAt")
|
||||
});
|
||||
|
||||
export const resourceWhitelist = sqliteTable("resourceWhitelist", {
|
||||
@@ -710,6 +806,74 @@ export const idpOrg = sqliteTable("idpOrg", {
|
||||
orgMapping: text("orgMapping")
|
||||
});
|
||||
|
||||
// Blueprint runs
|
||||
export const blueprints = sqliteTable("blueprints", {
|
||||
blueprintId: integer("blueprintId").primaryKey({
|
||||
autoIncrement: true
|
||||
}),
|
||||
orgId: text("orgId")
|
||||
.references(() => orgs.orgId, {
|
||||
onDelete: "cascade"
|
||||
})
|
||||
.notNull(),
|
||||
name: text("name").notNull(),
|
||||
source: text("source").notNull(),
|
||||
createdAt: integer("createdAt").notNull(),
|
||||
succeeded: integer("succeeded", { mode: "boolean" }).notNull(),
|
||||
contents: text("contents").notNull(),
|
||||
message: text("message")
|
||||
});
|
||||
export const requestAuditLog = sqliteTable(
|
||||
"requestAuditLog",
|
||||
{
|
||||
id: integer("id").primaryKey({ autoIncrement: true }),
|
||||
timestamp: integer("timestamp").notNull(), // this is EPOCH time in seconds
|
||||
orgId: text("orgId").references(() => orgs.orgId, {
|
||||
onDelete: "cascade"
|
||||
}),
|
||||
action: integer("action", { mode: "boolean" }).notNull(),
|
||||
reason: integer("reason").notNull(),
|
||||
actorType: text("actorType"),
|
||||
actor: text("actor"),
|
||||
actorId: text("actorId"),
|
||||
resourceId: integer("resourceId"),
|
||||
ip: text("ip"),
|
||||
location: text("location"),
|
||||
userAgent: text("userAgent"),
|
||||
metadata: text("metadata"),
|
||||
headers: text("headers"), // JSON blob
|
||||
query: text("query"), // JSON blob
|
||||
originalRequestURL: text("originalRequestURL"),
|
||||
scheme: text("scheme"),
|
||||
host: text("host"),
|
||||
path: text("path"),
|
||||
method: text("method"),
|
||||
tls: integer("tls", { mode: "boolean" })
|
||||
},
|
||||
(table) => [
|
||||
index("idx_requestAuditLog_timestamp").on(table.timestamp),
|
||||
index("idx_requestAuditLog_org_timestamp").on(
|
||||
table.orgId,
|
||||
table.timestamp
|
||||
)
|
||||
]
|
||||
);
|
||||
|
||||
export const deviceWebAuthCodes = sqliteTable("deviceWebAuthCodes", {
|
||||
codeId: integer("codeId").primaryKey({ autoIncrement: true }),
|
||||
code: text("code").notNull().unique(),
|
||||
ip: text("ip"),
|
||||
city: text("city"),
|
||||
deviceName: text("deviceName"),
|
||||
applicationName: text("applicationName").notNull(),
|
||||
expiresAt: integer("expiresAt").notNull(),
|
||||
createdAt: integer("createdAt").notNull(),
|
||||
verified: integer("verified", { mode: "boolean" }).notNull().default(false),
|
||||
userId: text("userId").references(() => users.userId, {
|
||||
onDelete: "cascade"
|
||||
})
|
||||
});
|
||||
|
||||
export type Org = InferSelectModel<typeof orgs>;
|
||||
export type User = InferSelectModel<typeof users>;
|
||||
export type Site = InferSelectModel<typeof sites>;
|
||||
@@ -746,8 +910,9 @@ export type ResourceWhitelist = InferSelectModel<typeof resourceWhitelist>;
|
||||
export type VersionMigration = InferSelectModel<typeof versionMigrations>;
|
||||
export type ResourceRule = InferSelectModel<typeof resourceRules>;
|
||||
export type Domain = InferSelectModel<typeof domains>;
|
||||
export type DnsRecord = InferSelectModel<typeof dnsRecords>;
|
||||
export type Client = InferSelectModel<typeof clients>;
|
||||
export type ClientSite = InferSelectModel<typeof clientSites>;
|
||||
export type ClientSite = InferSelectModel<typeof clientSitesAssociationsCache>;
|
||||
export type RoleClient = InferSelectModel<typeof roleClients>;
|
||||
export type UserClient = InferSelectModel<typeof userClients>;
|
||||
export type SupporterKey = InferSelectModel<typeof supporterKey>;
|
||||
@@ -761,3 +926,9 @@ export type SetupToken = InferSelectModel<typeof setupTokens>;
|
||||
export type HostMeta = InferSelectModel<typeof hostMeta>;
|
||||
export type TargetHealthCheck = InferSelectModel<typeof targetHealthCheck>;
|
||||
export type IdpOidcConfig = InferSelectModel<typeof idpOidcConfig>;
|
||||
export type Blueprint = InferSelectModel<typeof blueprints>;
|
||||
export type LicenseKey = InferSelectModel<typeof licenseKey>;
|
||||
export type SecurityKey = InferSelectModel<typeof securityKeys>;
|
||||
export type WebauthnChallenge = InferSelectModel<typeof webauthnChallenge>;
|
||||
export type RequestAuditLog = InferSelectModel<typeof requestAuditLog>;
|
||||
export type DeviceWebAuthCode = InferSelectModel<typeof deviceWebAuthCodes>;
|
||||
|
||||
56
server/emails/templates/SupportEmail.tsx
Normal file
56
server/emails/templates/SupportEmail.tsx
Normal file
@@ -0,0 +1,56 @@
|
||||
import React from "react";
|
||||
import { Body, Head, Html, Preview, Tailwind } from "@react-email/components";
|
||||
import { themeColors } from "./lib/theme";
|
||||
import {
|
||||
EmailContainer,
|
||||
EmailGreeting,
|
||||
EmailLetterHead,
|
||||
EmailText
|
||||
} from "./components/Email";
|
||||
|
||||
interface SupportEmailProps {
|
||||
email: string;
|
||||
username: string;
|
||||
subject: string;
|
||||
body: string;
|
||||
}
|
||||
|
||||
export const SupportEmail = ({
|
||||
username,
|
||||
email,
|
||||
body,
|
||||
subject
|
||||
}: SupportEmailProps) => {
|
||||
const previewText = subject;
|
||||
|
||||
return (
|
||||
<Html>
|
||||
<Head />
|
||||
<Preview>{previewText}</Preview>
|
||||
<Tailwind config={themeColors}>
|
||||
<Body className="font-sans bg-gray-50">
|
||||
<EmailContainer>
|
||||
<EmailLetterHead />
|
||||
|
||||
<EmailGreeting>Hi support,</EmailGreeting>
|
||||
|
||||
<EmailText>
|
||||
You have received a new support request from{" "}
|
||||
<strong>{username}</strong> ({email}).
|
||||
</EmailText>
|
||||
|
||||
<EmailText>
|
||||
<strong>Subject:</strong> {subject}
|
||||
</EmailText>
|
||||
|
||||
<EmailText>
|
||||
<strong>Message:</strong> {body}
|
||||
</EmailText>
|
||||
</EmailContainer>
|
||||
</Body>
|
||||
</Tailwind>
|
||||
</Html>
|
||||
);
|
||||
};
|
||||
|
||||
export default SupportEmail;
|
||||
@@ -88,7 +88,7 @@ export const WelcomeQuickStart = ({
|
||||
To learn how to use Newt, including more
|
||||
installation methods, visit the{" "}
|
||||
<a
|
||||
href="https://docs.digpangolin.com/manage/sites/install-site"
|
||||
href="https://docs.pangolin.net/manage/sites/install-site"
|
||||
className="underline"
|
||||
>
|
||||
docs
|
||||
|
||||
@@ -89,7 +89,7 @@ export function EmailFooter({ children }: { children: React.ReactNode }) {
|
||||
<p className="text-xs text-gray-400 mt-4">
|
||||
For any questions or support, please contact us at:
|
||||
<br />
|
||||
support@fossorial.io
|
||||
support@pangolin.net
|
||||
</p>
|
||||
<p className="text-xs text-gray-300 text-center mt-4">
|
||||
© {new Date().getFullYear()} Fossorial, Inc. All
|
||||
|
||||
@@ -5,21 +5,24 @@ import { runSetupFunctions } from "./setup";
|
||||
import { createApiServer } from "./apiServer";
|
||||
import { createNextServer } from "./nextServer";
|
||||
import { createInternalServer } from "./internalServer";
|
||||
import { createIntegrationApiServer } from "./integrationApiServer";
|
||||
import {
|
||||
ApiKey,
|
||||
ApiKeyOrg,
|
||||
RemoteExitNode,
|
||||
Session,
|
||||
SiteResource,
|
||||
User,
|
||||
UserOrg
|
||||
} from "@server/db";
|
||||
import { createIntegrationApiServer } from "./integrationApiServer";
|
||||
import config from "@server/lib/config";
|
||||
import { setHostMeta } from "@server/lib/hostMeta";
|
||||
import { initTelemetryClient } from "./lib/telemetry.js";
|
||||
import { TraefikConfigManager } from "./lib/traefik/TraefikConfigManager.js";
|
||||
import { initTelemetryClient } from "@server/lib/telemetry";
|
||||
import { TraefikConfigManager } from "@server/lib/traefik/TraefikConfigManager";
|
||||
import { initCleanup } from "#dynamic/cleanup";
|
||||
import license from "#dynamic/license/license";
|
||||
import { initLogCleanupInterval } from "@server/lib/cleanupLogs";
|
||||
import { fetchServerIp } from "@server/lib/serverIpService";
|
||||
|
||||
async function startServers() {
|
||||
await setHostMeta();
|
||||
@@ -31,14 +34,17 @@ async function startServers() {
|
||||
|
||||
await runSetupFunctions();
|
||||
|
||||
await fetchServerIp();
|
||||
|
||||
initTelemetryClient();
|
||||
|
||||
initLogCleanupInterval();
|
||||
|
||||
// Start all servers
|
||||
const apiServer = createApiServer();
|
||||
const internalServer = createInternalServer();
|
||||
|
||||
let nextServer;
|
||||
nextServer = await createNextServer();
|
||||
const nextServer = await createNextServer();
|
||||
if (config.getRawConfig().traefik.file_mode) {
|
||||
const monitor = new TraefikConfigManager();
|
||||
await monitor.start();
|
||||
@@ -72,6 +78,8 @@ declare global {
|
||||
userOrgId?: string;
|
||||
userOrgIds?: string[];
|
||||
remoteExitNode?: RemoteExitNode;
|
||||
siteResource?: SiteResource;
|
||||
orgPolicyAllowed?: boolean;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
export async function getOrgTierData(
|
||||
orgId: string
|
||||
): Promise<{ tier: string | null; active: boolean }> {
|
||||
let tier = null;
|
||||
let active = false;
|
||||
const tier = null;
|
||||
const active = false;
|
||||
|
||||
return { tier, active };
|
||||
}
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import { eq, sql, and } from "drizzle-orm";
|
||||
import NodeCache from "node-cache";
|
||||
import { v4 as uuidv4 } from "uuid";
|
||||
import { PutObjectCommand } from "@aws-sdk/client-s3";
|
||||
import * as fs from "fs/promises";
|
||||
@@ -20,6 +19,7 @@ import logger from "@server/logger";
|
||||
import { sendToClient } from "#dynamic/routers/ws";
|
||||
import { build } from "@server/build";
|
||||
import { s3Client } from "@server/lib/s3";
|
||||
import cache from "@server/lib/cache";
|
||||
|
||||
interface StripeEvent {
|
||||
identifier?: string;
|
||||
@@ -43,7 +43,6 @@ export function noop() {
|
||||
}
|
||||
|
||||
export class UsageService {
|
||||
private cache: NodeCache;
|
||||
private bucketName: string | undefined;
|
||||
private currentEventFile: string | null = null;
|
||||
private currentFileStartTime: number = 0;
|
||||
@@ -51,7 +50,6 @@ export class UsageService {
|
||||
private uploadingFiles: Set<string> = new Set();
|
||||
|
||||
constructor() {
|
||||
this.cache = new NodeCache({ stdTTL: 300 }); // 5 minute TTL
|
||||
if (noop()) {
|
||||
return;
|
||||
}
|
||||
@@ -399,7 +397,7 @@ export class UsageService {
|
||||
featureId: FeatureId
|
||||
): Promise<string | null> {
|
||||
const cacheKey = `customer_${orgId}_${featureId}`;
|
||||
const cached = this.cache.get<string>(cacheKey);
|
||||
const cached = cache.get<string>(cacheKey);
|
||||
|
||||
if (cached) {
|
||||
return cached;
|
||||
@@ -422,7 +420,7 @@ export class UsageService {
|
||||
const customerId = customer.customerId;
|
||||
|
||||
// Cache the result
|
||||
this.cache.set(cacheKey, customerId);
|
||||
cache.set(cacheKey, customerId, 300); // 5 minute TTL
|
||||
|
||||
return customerId;
|
||||
} catch (error) {
|
||||
@@ -612,7 +610,8 @@ export class UsageService {
|
||||
|
||||
public async getUsage(
|
||||
orgId: string,
|
||||
featureId: FeatureId
|
||||
featureId: FeatureId,
|
||||
trx: Transaction | typeof db = db
|
||||
): Promise<Usage | null> {
|
||||
if (noop()) {
|
||||
return null;
|
||||
@@ -621,7 +620,7 @@ export class UsageService {
|
||||
const usageId = `${orgId}-${featureId}`;
|
||||
|
||||
try {
|
||||
const [result] = await db
|
||||
const [result] = await trx
|
||||
.select()
|
||||
.from(usage)
|
||||
.where(eq(usage.usageId, usageId))
|
||||
@@ -635,7 +634,7 @@ export class UsageService {
|
||||
const meterId = getFeatureMeterId(featureId);
|
||||
|
||||
try {
|
||||
const [newUsage] = await db
|
||||
const [newUsage] = await trx
|
||||
.insert(usage)
|
||||
.values({
|
||||
usageId,
|
||||
@@ -652,7 +651,7 @@ export class UsageService {
|
||||
return newUsage;
|
||||
} else {
|
||||
// Record was created by another process, fetch it
|
||||
const [existingUsage] = await db
|
||||
const [existingUsage] = await trx
|
||||
.select()
|
||||
.from(usage)
|
||||
.where(eq(usage.usageId, usageId))
|
||||
@@ -665,7 +664,7 @@ export class UsageService {
|
||||
`Insert failed for ${orgId}/${featureId}, attempting to fetch existing record:`,
|
||||
insertError
|
||||
);
|
||||
const [existingUsage] = await db
|
||||
const [existingUsage] = await trx
|
||||
.select()
|
||||
.from(usage)
|
||||
.where(eq(usage.usageId, usageId))
|
||||
@@ -699,10 +698,6 @@ export class UsageService {
|
||||
await this.uploadFileToS3();
|
||||
}
|
||||
|
||||
public clearCache(): void {
|
||||
this.cache.flushAll();
|
||||
}
|
||||
|
||||
/**
|
||||
* Scan the events directory for files older than 1 minute and upload them if not empty.
|
||||
*/
|
||||
@@ -812,7 +807,8 @@ export class UsageService {
|
||||
orgId: string,
|
||||
kickSites = false,
|
||||
featureId?: FeatureId,
|
||||
usage?: Usage
|
||||
usage?: Usage,
|
||||
trx: Transaction | typeof db = db
|
||||
): Promise<boolean> {
|
||||
if (noop()) {
|
||||
return false;
|
||||
@@ -825,7 +821,7 @@ export class UsageService {
|
||||
let orgLimits: Limit[] = [];
|
||||
if (featureId) {
|
||||
// Get all limits set for this organization
|
||||
orgLimits = await db
|
||||
orgLimits = await trx
|
||||
.select()
|
||||
.from(limits)
|
||||
.where(
|
||||
@@ -836,7 +832,7 @@ export class UsageService {
|
||||
);
|
||||
} else {
|
||||
// Get all limits set for this organization
|
||||
orgLimits = await db
|
||||
orgLimits = await trx
|
||||
.select()
|
||||
.from(limits)
|
||||
.where(eq(limits.orgId, orgId));
|
||||
@@ -855,7 +851,8 @@ export class UsageService {
|
||||
} else {
|
||||
currentUsage = await this.getUsage(
|
||||
orgId,
|
||||
limit.featureId as FeatureId
|
||||
limit.featureId as FeatureId,
|
||||
trx
|
||||
);
|
||||
}
|
||||
|
||||
@@ -890,7 +887,7 @@ export class UsageService {
|
||||
);
|
||||
|
||||
// Get all sites for this organization
|
||||
const orgSites = await db
|
||||
const orgSites = await trx
|
||||
.select()
|
||||
.from(sites)
|
||||
.where(eq(sites.orgId, orgId));
|
||||
@@ -902,7 +899,7 @@ export class UsageService {
|
||||
// Send termination messages to newt sites
|
||||
for (const site of orgSites) {
|
||||
if (site.type === "newt") {
|
||||
const [newt] = await db
|
||||
const [newt] = await trx
|
||||
.select()
|
||||
.from(newts)
|
||||
.where(eq(newts.siteId, site.siteId))
|
||||
@@ -917,7 +914,7 @@ export class UsageService {
|
||||
};
|
||||
|
||||
// Don't await to prevent blocking
|
||||
sendToClient(newt.newtId, payload).catch(
|
||||
await sendToClient(newt.newtId, payload).catch(
|
||||
(error: any) => {
|
||||
logger.error(
|
||||
`Failed to send termination message to newt ${newt.newtId}:`,
|
||||
|
||||
@@ -1,22 +1,36 @@
|
||||
import { db, newts, Target } from "@server/db";
|
||||
import { db, newts, blueprints, Blueprint } from "@server/db";
|
||||
import { Config, ConfigSchema } from "./types";
|
||||
import { ProxyResourcesResults, updateProxyResources } from "./proxyResources";
|
||||
import { fromError } from "zod-validation-error";
|
||||
import logger from "@server/logger";
|
||||
import { resources, targets, sites } from "@server/db";
|
||||
import { eq, and, asc, or, ne, count, isNotNull } from "drizzle-orm";
|
||||
import { sites } from "@server/db";
|
||||
import { eq, and, isNotNull } from "drizzle-orm";
|
||||
import { addTargets as addProxyTargets } from "@server/routers/newt/targets";
|
||||
import { addTargets as addClientTargets } from "@server/routers/client/targets";
|
||||
import {
|
||||
ClientResourcesResults,
|
||||
updateClientResources
|
||||
} from "./clientResources";
|
||||
import { BlueprintSource } from "@server/routers/blueprints/types";
|
||||
import { stringify as stringifyYaml } from "yaml";
|
||||
import { faker } from "@faker-js/faker";
|
||||
import { handleMessagingForUpdatedSiteResource } from "@server/routers/siteResource";
|
||||
|
||||
export async function applyBlueprint(
|
||||
orgId: string,
|
||||
configData: unknown,
|
||||
siteId?: number
|
||||
): Promise<void> {
|
||||
type ApplyBlueprintArgs = {
|
||||
orgId: string;
|
||||
configData: unknown;
|
||||
name?: string;
|
||||
siteId?: number;
|
||||
source?: BlueprintSource;
|
||||
};
|
||||
|
||||
export async function applyBlueprint({
|
||||
orgId,
|
||||
configData,
|
||||
siteId,
|
||||
name,
|
||||
source = "API"
|
||||
}: ApplyBlueprintArgs): Promise<Blueprint> {
|
||||
// Validate the input data
|
||||
const validationResult = ConfigSchema.safeParse(configData);
|
||||
if (!validationResult.success) {
|
||||
@@ -24,6 +38,9 @@ export async function applyBlueprint(
|
||||
}
|
||||
|
||||
const config: Config = validationResult.data;
|
||||
let blueprintSucceeded: boolean = false;
|
||||
let blueprintMessage: string;
|
||||
let error: any | null = null;
|
||||
|
||||
try {
|
||||
let proxyResourcesResults: ProxyResourcesResults = [];
|
||||
@@ -41,22 +58,63 @@ export async function applyBlueprint(
|
||||
trx,
|
||||
siteId
|
||||
);
|
||||
});
|
||||
|
||||
logger.debug(
|
||||
`Successfully updated proxy resources for org ${orgId}: ${JSON.stringify(proxyResourcesResults)}`
|
||||
);
|
||||
logger.debug(
|
||||
`Successfully updated proxy resources for org ${orgId}: ${JSON.stringify(proxyResourcesResults)}`
|
||||
);
|
||||
|
||||
// We need to update the targets on the newts from the successfully updated information
|
||||
for (const result of proxyResourcesResults) {
|
||||
for (const target of result.targetsToUpdate) {
|
||||
const [site] = await db
|
||||
// We need to update the targets on the newts from the successfully updated information
|
||||
for (const result of proxyResourcesResults) {
|
||||
for (const target of result.targetsToUpdate) {
|
||||
const [site] = await trx
|
||||
.select()
|
||||
.from(sites)
|
||||
.innerJoin(newts, eq(sites.siteId, newts.siteId))
|
||||
.where(
|
||||
and(
|
||||
eq(sites.siteId, target.siteId),
|
||||
eq(sites.orgId, orgId),
|
||||
eq(sites.type, "newt"),
|
||||
isNotNull(sites.pubKey)
|
||||
)
|
||||
)
|
||||
.limit(1);
|
||||
|
||||
if (site) {
|
||||
logger.debug(
|
||||
`Updating target ${target.targetId} on site ${site.sites.siteId}`
|
||||
);
|
||||
|
||||
// see if you can find a matching target health check from the healthchecksToUpdate array
|
||||
const matchingHealthcheck =
|
||||
result.healthchecksToUpdate.find(
|
||||
(hc) => hc.targetId === target.targetId
|
||||
);
|
||||
|
||||
await addProxyTargets(
|
||||
site.newt.newtId,
|
||||
[target],
|
||||
matchingHealthcheck ? [matchingHealthcheck] : [],
|
||||
result.proxyResource.protocol,
|
||||
result.proxyResource.proxyPort
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logger.debug(
|
||||
`Successfully updated client resources for org ${orgId}: ${JSON.stringify(clientResourcesResults)}`
|
||||
);
|
||||
|
||||
// We need to update the targets on the newts from the successfully updated information
|
||||
for (const result of clientResourcesResults) {
|
||||
const [site] = await trx
|
||||
.select()
|
||||
.from(sites)
|
||||
.innerJoin(newts, eq(sites.siteId, newts.siteId))
|
||||
.where(
|
||||
and(
|
||||
eq(sites.siteId, target.siteId),
|
||||
eq(sites.siteId, result.newSiteResource.siteId),
|
||||
eq(sites.orgId, orgId),
|
||||
eq(sites.type, "newt"),
|
||||
isNotNull(sites.pubKey)
|
||||
@@ -64,114 +122,67 @@ export async function applyBlueprint(
|
||||
)
|
||||
.limit(1);
|
||||
|
||||
if (site) {
|
||||
if (!site) {
|
||||
logger.debug(
|
||||
`Updating target ${target.targetId} on site ${site.sites.siteId}`
|
||||
);
|
||||
|
||||
// see if you can find a matching target health check from the healthchecksToUpdate array
|
||||
const matchingHealthcheck =
|
||||
result.healthchecksToUpdate.find(
|
||||
(hc) => hc.targetId === target.targetId
|
||||
);
|
||||
|
||||
await addProxyTargets(
|
||||
site.newt.newtId,
|
||||
[target],
|
||||
matchingHealthcheck ? [matchingHealthcheck] : [],
|
||||
result.proxyResource.protocol,
|
||||
result.proxyResource.proxyPort
|
||||
`No newt site found for client resource ${result.newSiteResource.siteResourceId}, skipping target update`
|
||||
);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logger.debug(
|
||||
`Successfully updated client resources for org ${orgId}: ${JSON.stringify(clientResourcesResults)}`
|
||||
);
|
||||
|
||||
// We need to update the targets on the newts from the successfully updated information
|
||||
for (const result of clientResourcesResults) {
|
||||
const [site] = await db
|
||||
.select()
|
||||
.from(sites)
|
||||
.innerJoin(newts, eq(sites.siteId, newts.siteId))
|
||||
.where(
|
||||
and(
|
||||
eq(sites.siteId, result.resource.siteId),
|
||||
eq(sites.orgId, orgId),
|
||||
eq(sites.type, "newt"),
|
||||
isNotNull(sites.pubKey)
|
||||
)
|
||||
)
|
||||
.limit(1);
|
||||
|
||||
if (site) {
|
||||
logger.debug(
|
||||
`Updating client resource ${result.resource.siteResourceId} on site ${site.sites.siteId}`
|
||||
`Updating client resource ${result.newSiteResource.siteResourceId} on site ${site.sites.siteId}`
|
||||
);
|
||||
|
||||
await addClientTargets(
|
||||
site.newt.newtId,
|
||||
result.resource.destinationIp,
|
||||
result.resource.destinationPort,
|
||||
result.resource.protocol,
|
||||
result.resource.proxyPort
|
||||
await handleMessagingForUpdatedSiteResource(
|
||||
result.oldSiteResource,
|
||||
result.newSiteResource,
|
||||
{ siteId: site.sites.siteId, orgId: site.sites.orgId },
|
||||
trx
|
||||
);
|
||||
|
||||
// await addClientTargets(
|
||||
// site.newt.newtId,
|
||||
// result.resource.destination,
|
||||
// result.resource.destinationPort,
|
||||
// result.resource.protocol,
|
||||
// result.resource.proxyPort
|
||||
// );
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error(`Failed to update database from config: ${error}`);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// await updateDatabaseFromConfig("org_i21aifypnlyxur2", {
|
||||
// resources: {
|
||||
// "resource-nice-id": {
|
||||
// name: "this is my resource",
|
||||
// protocol: "http",
|
||||
// "full-domain": "level1.test.example.com",
|
||||
// "host-header": "example.com",
|
||||
// "tls-server-name": "example.com",
|
||||
// auth: {
|
||||
// pincode: 123456,
|
||||
// password: "sadfasdfadsf",
|
||||
// "sso-enabled": true,
|
||||
// "sso-roles": ["Member"],
|
||||
// "sso-users": ["owen@fossorial.io"],
|
||||
// "whitelist-users": ["owen@fossorial.io"]
|
||||
// },
|
||||
// targets: [
|
||||
// {
|
||||
// site: "glossy-plains-viscacha-rat",
|
||||
// hostname: "localhost",
|
||||
// method: "http",
|
||||
// port: 8000,
|
||||
// healthcheck: {
|
||||
// port: 8000,
|
||||
// hostname: "localhost"
|
||||
// }
|
||||
// },
|
||||
// {
|
||||
// site: "glossy-plains-viscacha-rat",
|
||||
// hostname: "localhost",
|
||||
// method: "http",
|
||||
// port: 8001
|
||||
// }
|
||||
// ]
|
||||
// },
|
||||
// "resource-nice-id2": {
|
||||
// name: "http server",
|
||||
// protocol: "tcp",
|
||||
// "proxy-port": 3000,
|
||||
// targets: [
|
||||
// {
|
||||
// site: "glossy-plains-viscacha-rat",
|
||||
// hostname: "localhost",
|
||||
// port: 3000,
|
||||
// }
|
||||
// ]
|
||||
// }
|
||||
// }
|
||||
// });
|
||||
blueprintSucceeded = true;
|
||||
blueprintMessage = "Blueprint applied successfully";
|
||||
} catch (err) {
|
||||
blueprintSucceeded = false;
|
||||
blueprintMessage = `Blueprint applied with errors: ${err}`;
|
||||
logger.error(blueprintMessage);
|
||||
error = err;
|
||||
}
|
||||
|
||||
let blueprint: Blueprint | null = null;
|
||||
await db.transaction(async (trx) => {
|
||||
const newBlueprint = await trx
|
||||
.insert(blueprints)
|
||||
.values({
|
||||
orgId,
|
||||
name:
|
||||
name ??
|
||||
`${faker.word.adjective()} ${faker.word.adjective()} ${faker.word.noun()}`,
|
||||
contents: stringifyYaml(configData),
|
||||
createdAt: Math.floor(Date.now() / 1000),
|
||||
succeeded: blueprintSucceeded,
|
||||
message: blueprintMessage,
|
||||
source
|
||||
})
|
||||
.returning();
|
||||
|
||||
blueprint = newBlueprint[0];
|
||||
});
|
||||
|
||||
if (!blueprint || (source !== "UI" && !blueprintSucceeded)) {
|
||||
// ^^^^^^^^^^^^^^^ The UI considers a failed blueprint as a valid response
|
||||
throw error ?? "Unknown Server Error";
|
||||
}
|
||||
|
||||
return blueprint;
|
||||
}
|
||||
|
||||
@@ -29,15 +29,29 @@ export async function applyNewtDockerBlueprint(
|
||||
|
||||
logger.debug(`Received Docker blueprint: ${JSON.stringify(blueprint)}`);
|
||||
|
||||
// make sure this is not an empty object
|
||||
if (isEmptyObject(blueprint)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (isEmptyObject(blueprint["proxy-resources"]) && isEmptyObject(blueprint["client-resources"])) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Update the blueprint in the database
|
||||
await applyBlueprint(site.orgId, blueprint, site.siteId);
|
||||
await applyBlueprint({
|
||||
orgId: site.orgId,
|
||||
configData: blueprint,
|
||||
siteId: site.siteId,
|
||||
source: "NEWT"
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error(`Failed to update database from config: ${error}`);
|
||||
await sendToClient(newtId, {
|
||||
type: "newt/blueprint/results",
|
||||
data: {
|
||||
success: false,
|
||||
message: `Failed to update database from config: ${error}`
|
||||
message: `Failed to apply blueprint from config: ${error}`
|
||||
}
|
||||
});
|
||||
return;
|
||||
@@ -51,3 +65,10 @@ export async function applyNewtDockerBlueprint(
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function isEmptyObject(obj: any) {
|
||||
if (obj === null || obj === undefined) {
|
||||
return true;
|
||||
}
|
||||
return Object.keys(obj).length === 0 && obj.constructor === Object;
|
||||
}
|
||||
|
||||
@@ -1,17 +1,23 @@
|
||||
import {
|
||||
clients,
|
||||
clientSiteResources,
|
||||
roles,
|
||||
roleSiteResources,
|
||||
SiteResource,
|
||||
siteResources,
|
||||
Transaction,
|
||||
userOrgs,
|
||||
users,
|
||||
userSiteResources
|
||||
} from "@server/db";
|
||||
import { sites } from "@server/db";
|
||||
import { eq, and } from "drizzle-orm";
|
||||
import {
|
||||
Config,
|
||||
} from "./types";
|
||||
import { eq, and, ne, inArray } from "drizzle-orm";
|
||||
import { Config } from "./types";
|
||||
import logger from "@server/logger";
|
||||
|
||||
export type ClientResourcesResults = {
|
||||
resource: SiteResource;
|
||||
newSiteResource: SiteResource;
|
||||
oldSiteResource?: SiteResource;
|
||||
}[];
|
||||
|
||||
export async function updateClientResources(
|
||||
@@ -69,16 +75,22 @@ export async function updateClientResources(
|
||||
}
|
||||
|
||||
if (existingResource) {
|
||||
if (existingResource.siteId !== site.siteId) {
|
||||
throw new Error(
|
||||
`You can not change the site of an existing client resource (${resourceNiceId}). Please delete and recreate it instead.`
|
||||
);
|
||||
}
|
||||
|
||||
// Update existing resource
|
||||
const [updatedResource] = await trx
|
||||
.update(siteResources)
|
||||
.set({
|
||||
name: resourceData.name || resourceNiceId,
|
||||
siteId: site.siteId,
|
||||
proxyPort: resourceData["proxy-port"]!,
|
||||
destinationIp: resourceData.hostname,
|
||||
destinationPort: resourceData["internal-port"],
|
||||
protocol: resourceData.protocol
|
||||
mode: resourceData.mode,
|
||||
destination: resourceData.destination,
|
||||
enabled: true, // hardcoded for now
|
||||
// enabled: resourceData.enabled ?? true,
|
||||
alias: resourceData.alias || null
|
||||
})
|
||||
.where(
|
||||
eq(
|
||||
@@ -88,7 +100,110 @@ export async function updateClientResources(
|
||||
)
|
||||
.returning();
|
||||
|
||||
results.push({ resource: updatedResource });
|
||||
const siteResourceId = existingResource.siteResourceId;
|
||||
const orgId = existingResource.orgId;
|
||||
|
||||
await trx
|
||||
.delete(clientSiteResources)
|
||||
.where(eq(clientSiteResources.siteResourceId, siteResourceId));
|
||||
|
||||
if (resourceData.machines.length > 0) {
|
||||
// get clientIds from niceIds
|
||||
const clientsToUpdate = await trx
|
||||
.select()
|
||||
.from(clients)
|
||||
.where(
|
||||
and(
|
||||
inArray(clients.niceId, resourceData.machines),
|
||||
eq(clients.orgId, orgId)
|
||||
)
|
||||
);
|
||||
|
||||
const clientIds = clientsToUpdate.map(
|
||||
(client) => client.clientId
|
||||
);
|
||||
|
||||
await trx.insert(clientSiteResources).values(
|
||||
clientIds.map((clientId) => ({
|
||||
clientId,
|
||||
siteResourceId
|
||||
}))
|
||||
);
|
||||
}
|
||||
|
||||
await trx
|
||||
.delete(userSiteResources)
|
||||
.where(eq(userSiteResources.siteResourceId, siteResourceId));
|
||||
|
||||
if (resourceData.users.length > 0) {
|
||||
// get userIds from username
|
||||
const usersToUpdate = await trx
|
||||
.select()
|
||||
.from(users)
|
||||
.innerJoin(userOrgs, eq(users.userId, userOrgs.userId))
|
||||
.where(
|
||||
and(
|
||||
inArray(users.username, resourceData.users),
|
||||
eq(userOrgs.orgId, orgId)
|
||||
)
|
||||
);
|
||||
|
||||
const userIds = usersToUpdate.map((user) => user.user.userId);
|
||||
|
||||
await trx
|
||||
.insert(userSiteResources)
|
||||
.values(
|
||||
userIds.map((userId) => ({ userId, siteResourceId }))
|
||||
);
|
||||
}
|
||||
|
||||
// Get all admin role IDs for this org to exclude from deletion
|
||||
const adminRoles = await trx
|
||||
.select()
|
||||
.from(roles)
|
||||
.where(and(eq(roles.isAdmin, true), eq(roles.orgId, orgId)));
|
||||
const adminRoleIds = adminRoles.map((role) => role.roleId);
|
||||
|
||||
if (adminRoleIds.length > 0) {
|
||||
await trx.delete(roleSiteResources).where(
|
||||
and(
|
||||
eq(roleSiteResources.siteResourceId, siteResourceId),
|
||||
ne(roleSiteResources.roleId, adminRoleIds[0]) // delete all but the admin role
|
||||
)
|
||||
);
|
||||
} else {
|
||||
await trx
|
||||
.delete(roleSiteResources)
|
||||
.where(
|
||||
eq(roleSiteResources.siteResourceId, siteResourceId)
|
||||
);
|
||||
}
|
||||
|
||||
if (resourceData.roles.length > 0) {
|
||||
// Re-add specified roles but we need to get the roleIds from the role name in the array
|
||||
const rolesToUpdate = await trx
|
||||
.select()
|
||||
.from(roles)
|
||||
.where(
|
||||
and(
|
||||
eq(roles.orgId, orgId),
|
||||
inArray(roles.name, resourceData.roles)
|
||||
)
|
||||
);
|
||||
|
||||
const roleIds = rolesToUpdate.map((role) => role.roleId);
|
||||
|
||||
await trx
|
||||
.insert(roleSiteResources)
|
||||
.values(
|
||||
roleIds.map((roleId) => ({ roleId, siteResourceId }))
|
||||
);
|
||||
}
|
||||
|
||||
results.push({
|
||||
newSiteResource: updatedResource,
|
||||
oldSiteResource: existingResource
|
||||
});
|
||||
} else {
|
||||
// Create new resource
|
||||
const [newResource] = await trx
|
||||
@@ -98,18 +213,103 @@ export async function updateClientResources(
|
||||
siteId: site.siteId,
|
||||
niceId: resourceNiceId,
|
||||
name: resourceData.name || resourceNiceId,
|
||||
proxyPort: resourceData["proxy-port"]!,
|
||||
destinationIp: resourceData.hostname,
|
||||
destinationPort: resourceData["internal-port"],
|
||||
protocol: resourceData.protocol
|
||||
mode: resourceData.mode,
|
||||
destination: resourceData.destination,
|
||||
enabled: true, // hardcoded for now
|
||||
// enabled: resourceData.enabled ?? true,
|
||||
alias: resourceData.alias || null
|
||||
})
|
||||
.returning();
|
||||
|
||||
const siteResourceId = newResource.siteResourceId;
|
||||
|
||||
const [adminRole] = await trx
|
||||
.select()
|
||||
.from(roles)
|
||||
.where(and(eq(roles.isAdmin, true), eq(roles.orgId, orgId)))
|
||||
.limit(1);
|
||||
|
||||
if (!adminRole) {
|
||||
throw new Error(`Admin role not found for org ${orgId}`);
|
||||
}
|
||||
|
||||
await trx.insert(roleSiteResources).values({
|
||||
roleId: adminRole.roleId,
|
||||
siteResourceId: siteResourceId
|
||||
});
|
||||
|
||||
if (resourceData.roles.length > 0) {
|
||||
// get roleIds from role names
|
||||
const rolesToUpdate = await trx
|
||||
.select()
|
||||
.from(roles)
|
||||
.where(
|
||||
and(
|
||||
eq(roles.orgId, orgId),
|
||||
inArray(roles.name, resourceData.roles)
|
||||
)
|
||||
);
|
||||
|
||||
const roleIds = rolesToUpdate.map((role) => role.roleId);
|
||||
|
||||
await trx
|
||||
.insert(roleSiteResources)
|
||||
.values(
|
||||
roleIds.map((roleId) => ({ roleId, siteResourceId }))
|
||||
);
|
||||
}
|
||||
|
||||
if (resourceData.users.length > 0) {
|
||||
// get userIds from username
|
||||
const usersToUpdate = await trx
|
||||
.select()
|
||||
.from(users)
|
||||
.innerJoin(userOrgs, eq(users.userId, userOrgs.userId))
|
||||
.where(
|
||||
and(
|
||||
inArray(users.username, resourceData.users),
|
||||
eq(userOrgs.orgId, orgId)
|
||||
)
|
||||
);
|
||||
|
||||
const userIds = usersToUpdate.map((user) => user.user.userId);
|
||||
|
||||
await trx
|
||||
.insert(userSiteResources)
|
||||
.values(
|
||||
userIds.map((userId) => ({ userId, siteResourceId }))
|
||||
);
|
||||
}
|
||||
|
||||
if (resourceData.machines.length > 0) {
|
||||
// get clientIds from niceIds
|
||||
const clientsToUpdate = await trx
|
||||
.select()
|
||||
.from(clients)
|
||||
.where(
|
||||
and(
|
||||
inArray(clients.niceId, resourceData.machines),
|
||||
eq(clients.orgId, orgId)
|
||||
)
|
||||
);
|
||||
|
||||
const clientIds = clientsToUpdate.map(
|
||||
(client) => client.clientId
|
||||
);
|
||||
|
||||
await trx.insert(clientSiteResources).values(
|
||||
clientIds.map((clientId) => ({
|
||||
clientId,
|
||||
siteResourceId
|
||||
}))
|
||||
);
|
||||
}
|
||||
|
||||
logger.info(
|
||||
`Created new client resource ${newResource.name} (${newResource.siteResourceId}) for org ${orgId}`
|
||||
);
|
||||
|
||||
results.push({ resource: newResource });
|
||||
results.push({ newSiteResource: newResource });
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -87,8 +87,8 @@ export function convertValue(value: string): any {
|
||||
// "resources.resource-nice-id.auth.password": "sadfasdfadsf",
|
||||
// "resources.resource-nice-id.auth.sso-enabled": "true",
|
||||
// "resources.resource-nice-id.auth.sso-roles[0]": "Member",
|
||||
// "resources.resource-nice-id.auth.sso-users[0]": "owen@fossorial.io",
|
||||
// "resources.resource-nice-id.auth.whitelist-users[0]": "owen@fossorial.io",
|
||||
// "resources.resource-nice-id.auth.sso-users[0]": "owen@pangolin.net",
|
||||
// "resources.resource-nice-id.auth.whitelist-users[0]": "owen@pangolin.net",
|
||||
// "resources.resource-nice-id.targets[0].hostname": "localhost",
|
||||
// "resources.resource-nice-id.targets[0].method": "http",
|
||||
// "resources.resource-nice-id.targets[0].port": "8000",
|
||||
|
||||
@@ -30,6 +30,7 @@ import { pickPort } from "@server/routers/target/helpers";
|
||||
import { resourcePassword } from "@server/db";
|
||||
import { hashPassword } from "@server/auth/password";
|
||||
import { isValidCIDR, isValidIP, isValidUrlGlobPattern } from "../validators";
|
||||
import { get } from "http";
|
||||
|
||||
export type ProxyResourcesResults = {
|
||||
proxyResource: Resource;
|
||||
@@ -114,7 +115,12 @@ export async function updateProxyResources(
|
||||
internalPort: internalPortToCreate,
|
||||
path: targetData.path,
|
||||
pathMatchType: targetData["path-match"],
|
||||
rewritePath: targetData.rewritePath,
|
||||
rewritePath:
|
||||
targetData.rewritePath ||
|
||||
targetData["rewrite-path"] ||
|
||||
(targetData["rewrite-match"] === "stripPrefix"
|
||||
? "/"
|
||||
: undefined),
|
||||
rewritePathType: targetData["rewrite-match"],
|
||||
priority: targetData.priority
|
||||
})
|
||||
@@ -139,10 +145,14 @@ export async function updateProxyResources(
|
||||
hcHostname: healthcheckData?.hostname,
|
||||
hcPort: healthcheckData?.port,
|
||||
hcInterval: healthcheckData?.interval,
|
||||
hcUnhealthyInterval: healthcheckData?.unhealthyInterval,
|
||||
hcUnhealthyInterval:
|
||||
healthcheckData?.unhealthyInterval ||
|
||||
healthcheckData?.["unhealthy-interval"],
|
||||
hcTimeout: healthcheckData?.timeout,
|
||||
hcHeaders: hcHeaders,
|
||||
hcFollowRedirects: healthcheckData?.followRedirects,
|
||||
hcFollowRedirects:
|
||||
healthcheckData?.followRedirects ||
|
||||
healthcheckData?.["follow-redirects"],
|
||||
hcMethod: healthcheckData?.method,
|
||||
hcStatus: healthcheckData?.status,
|
||||
hcHealth: "unknown"
|
||||
@@ -211,6 +221,8 @@ export async function updateProxyResources(
|
||||
domainId: domain ? domain.domainId : null,
|
||||
enabled: resourceEnabled,
|
||||
sso: resourceData.auth?.["sso-enabled"] || false,
|
||||
skipToIdpId:
|
||||
resourceData.auth?.["auto-login-idp"] || null,
|
||||
ssl: resourceSsl,
|
||||
setHostHeader: resourceData["host-header"] || null,
|
||||
tlsServerName: resourceData["tls-server-name"] || null,
|
||||
@@ -392,7 +404,12 @@ export async function updateProxyResources(
|
||||
enabled: targetData.enabled,
|
||||
path: targetData.path,
|
||||
pathMatchType: targetData["path-match"],
|
||||
rewritePath: targetData.rewritePath,
|
||||
rewritePath:
|
||||
targetData.rewritePath ||
|
||||
targetData["rewrite-path"] ||
|
||||
(targetData["rewrite-match"] === "stripPrefix"
|
||||
? "/"
|
||||
: undefined),
|
||||
rewritePathType: targetData["rewrite-match"],
|
||||
priority: targetData.priority
|
||||
})
|
||||
@@ -452,10 +469,13 @@ export async function updateProxyResources(
|
||||
hcPort: healthcheckData?.port,
|
||||
hcInterval: healthcheckData?.interval,
|
||||
hcUnhealthyInterval:
|
||||
healthcheckData?.unhealthyInterval,
|
||||
healthcheckData?.unhealthyInterval ||
|
||||
healthcheckData?.["unhealthy-interval"],
|
||||
hcTimeout: healthcheckData?.timeout,
|
||||
hcHeaders: hcHeaders,
|
||||
hcFollowRedirects: healthcheckData?.followRedirects,
|
||||
hcFollowRedirects:
|
||||
healthcheckData?.followRedirects ||
|
||||
healthcheckData?.["follow-redirects"],
|
||||
hcMethod: healthcheckData?.method,
|
||||
hcStatus: healthcheckData?.status
|
||||
})
|
||||
@@ -527,7 +547,8 @@ export async function updateProxyResources(
|
||||
if (
|
||||
existingRule.action !== getRuleAction(rule.action) ||
|
||||
existingRule.match !== rule.match.toUpperCase() ||
|
||||
existingRule.value !== rule.value
|
||||
existingRule.value !==
|
||||
getRuleValue(rule.match.toUpperCase(), rule.value)
|
||||
) {
|
||||
validateRule(rule);
|
||||
await trx
|
||||
@@ -535,7 +556,10 @@ export async function updateProxyResources(
|
||||
.set({
|
||||
action: getRuleAction(rule.action),
|
||||
match: rule.match.toUpperCase(),
|
||||
value: rule.value
|
||||
value: getRuleValue(
|
||||
rule.match.toUpperCase(),
|
||||
rule.value
|
||||
)
|
||||
})
|
||||
.where(
|
||||
eq(resourceRules.ruleId, existingRule.ruleId)
|
||||
@@ -547,7 +571,10 @@ export async function updateProxyResources(
|
||||
resourceId: existingResource.resourceId,
|
||||
action: getRuleAction(rule.action),
|
||||
match: rule.match.toUpperCase(),
|
||||
value: rule.value,
|
||||
value: getRuleValue(
|
||||
rule.match.toUpperCase(),
|
||||
rule.value
|
||||
),
|
||||
priority: index + 1 // start priorities at 1
|
||||
});
|
||||
}
|
||||
@@ -592,6 +619,7 @@ export async function updateProxyResources(
|
||||
domainId: domain ? domain.domainId : null,
|
||||
enabled: resourceEnabled,
|
||||
sso: resourceData.auth?.["sso-enabled"] || false,
|
||||
skipToIdpId: resourceData.auth?.["auto-login-idp"] || null,
|
||||
setHostHeader: resourceData["host-header"] || null,
|
||||
tlsServerName: resourceData["tls-server-name"] || null,
|
||||
ssl: resourceSsl,
|
||||
@@ -705,7 +733,7 @@ export async function updateProxyResources(
|
||||
resourceId: newResource.resourceId,
|
||||
action: getRuleAction(rule.action),
|
||||
match: rule.match.toUpperCase(),
|
||||
value: rule.value,
|
||||
value: getRuleValue(rule.match.toUpperCase(), rule.value),
|
||||
priority: index + 1 // start priorities at 1
|
||||
});
|
||||
}
|
||||
@@ -735,6 +763,14 @@ function getRuleAction(input: string) {
|
||||
return action;
|
||||
}
|
||||
|
||||
function getRuleValue(match: string, value: string) {
|
||||
// if the match is a country, uppercase the value
|
||||
if (match == "COUNTRY") {
|
||||
return value.toUpperCase();
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
function validateRule(rule: any) {
|
||||
if (rule.match === "cidr") {
|
||||
if (!isValidCIDR(rule.value)) {
|
||||
@@ -763,10 +799,6 @@ async function syncRoleResources(
|
||||
.where(eq(roleResources.resourceId, resourceId));
|
||||
|
||||
for (const roleName of ssoRoles) {
|
||||
if (roleName === "Admin") {
|
||||
continue; // never add admin access
|
||||
}
|
||||
|
||||
const [role] = await trx
|
||||
.select()
|
||||
.from(roles)
|
||||
@@ -777,6 +809,10 @@ async function syncRoleResources(
|
||||
throw new Error(`Role not found: ${roleName} in org ${orgId}`);
|
||||
}
|
||||
|
||||
if (role.isAdmin) {
|
||||
continue; // never add admin access
|
||||
}
|
||||
|
||||
const existingRoleResource = existingRoleResources.find(
|
||||
(rr) => rr.roleId === role.roleId
|
||||
);
|
||||
@@ -824,16 +860,16 @@ async function syncUserResources(
|
||||
.from(userResources)
|
||||
.where(eq(userResources.resourceId, resourceId));
|
||||
|
||||
for (const email of ssoUsers) {
|
||||
for (const username of ssoUsers) {
|
||||
const [user] = await trx
|
||||
.select()
|
||||
.from(users)
|
||||
.innerJoin(userOrgs, eq(users.userId, userOrgs.userId))
|
||||
.where(and(eq(users.email, email), eq(userOrgs.orgId, orgId)))
|
||||
.where(and(eq(users.username, username), eq(userOrgs.orgId, orgId)))
|
||||
.limit(1);
|
||||
|
||||
if (!user) {
|
||||
throw new Error(`User not found: ${email} in org ${orgId}`);
|
||||
throw new Error(`User not found: ${username} in org ${orgId}`);
|
||||
}
|
||||
|
||||
const existingUserResource = existingUserResources.find(
|
||||
@@ -861,7 +897,11 @@ async function syncUserResources(
|
||||
)
|
||||
.limit(1);
|
||||
|
||||
if (user && user.user.email && !ssoUsers.includes(user.user.email)) {
|
||||
if (
|
||||
user &&
|
||||
user.user.username &&
|
||||
!ssoUsers.includes(user.user.username)
|
||||
) {
|
||||
await trx
|
||||
.delete(userResources)
|
||||
.where(
|
||||
@@ -1046,7 +1086,7 @@ async function getDomainId(
|
||||
|
||||
// remove the base domain of the domain
|
||||
let subdomain = null;
|
||||
if (domainSelection.type == "ns") {
|
||||
if (domainSelection.type == "ns" || domainSelection.type == "wildcard") {
|
||||
if (fullDomain != baseDomain) {
|
||||
subdomain = fullDomain.replace(`.${baseDomain}`, "");
|
||||
}
|
||||
|
||||
@@ -7,18 +7,24 @@ export const SiteSchema = z.object({
|
||||
|
||||
export const TargetHealthCheckSchema = z.object({
|
||||
hostname: z.string(),
|
||||
port: z.number().int().min(1).max(65535),
|
||||
port: z.int().min(1).max(65535),
|
||||
enabled: z.boolean().optional().default(true),
|
||||
path: z.string().optional(),
|
||||
path: z.string().optional().default("/"),
|
||||
scheme: z.string().optional(),
|
||||
mode: z.string().default("http"),
|
||||
interval: z.number().int().default(30),
|
||||
unhealthyInterval: z.number().int().default(30),
|
||||
timeout: z.number().int().default(5),
|
||||
headers: z.array(z.object({ name: z.string(), value: z.string() })).nullable().optional().default(null),
|
||||
followRedirects: z.boolean().default(true),
|
||||
interval: z.int().default(30),
|
||||
"unhealthy-interval": z.int().default(30),
|
||||
unhealthyInterval: z.int().optional(), // deprecated alias
|
||||
timeout: z.int().default(5),
|
||||
headers: z
|
||||
.array(z.object({ name: z.string(), value: z.string() }))
|
||||
.nullable()
|
||||
.optional()
|
||||
.default(null),
|
||||
"follow-redirects": z.boolean().default(true),
|
||||
followRedirects: z.boolean().optional(), // deprecated alias
|
||||
method: z.string().default("GET"),
|
||||
status: z.number().int().optional()
|
||||
status: z.int().optional()
|
||||
});
|
||||
|
||||
// Schema for individual target within a resource
|
||||
@@ -26,15 +32,19 @@ export const TargetSchema = z.object({
|
||||
site: z.string().optional(),
|
||||
method: z.enum(["http", "https", "h2c"]).optional(),
|
||||
hostname: z.string(),
|
||||
port: z.number().int().min(1).max(65535),
|
||||
port: z.int().min(1).max(65535),
|
||||
enabled: z.boolean().optional().default(true),
|
||||
"internal-port": z.number().int().min(1).max(65535).optional(),
|
||||
"internal-port": z.int().min(1).max(65535).optional(),
|
||||
path: z.string().optional(),
|
||||
"path-match": z.enum(["exact", "prefix", "regex"]).optional().nullable(),
|
||||
healthcheck: TargetHealthCheckSchema.optional(),
|
||||
rewritePath: z.string().optional(),
|
||||
"rewrite-match": z.enum(["exact", "prefix", "regex", "stripPrefix"]).optional().nullable(),
|
||||
priority: z.number().int().min(1).max(1000).optional().default(100)
|
||||
rewritePath: z.string().optional(), // deprecated alias
|
||||
"rewrite-path": z.string().optional(),
|
||||
"rewrite-match": z
|
||||
.enum(["exact", "prefix", "regex", "stripPrefix"])
|
||||
.optional()
|
||||
.nullable(),
|
||||
priority: z.int().min(1).max(1000).optional().default(100)
|
||||
});
|
||||
export type TargetData = z.infer<typeof TargetSchema>;
|
||||
|
||||
@@ -42,20 +52,23 @@ export const AuthSchema = z.object({
|
||||
// pincode has to have 6 digits
|
||||
pincode: z.number().min(100000).max(999999).optional(),
|
||||
password: z.string().min(1).optional(),
|
||||
"basic-auth": z.object({
|
||||
user: z.string().min(1),
|
||||
password: z.string().min(1)
|
||||
}).optional(),
|
||||
"basic-auth": z
|
||||
.object({
|
||||
user: z.string().min(1),
|
||||
password: z.string().min(1)
|
||||
})
|
||||
.optional(),
|
||||
"sso-enabled": z.boolean().optional().default(false),
|
||||
"sso-roles": z
|
||||
.array(z.string())
|
||||
.optional()
|
||||
.default([])
|
||||
.refine((roles) => !roles.includes("Admin"), {
|
||||
message: "Admin role cannot be included in sso-roles"
|
||||
error: "Admin role cannot be included in sso-roles"
|
||||
}),
|
||||
"sso-users": z.array(z.string().email()).optional().default([]),
|
||||
"whitelist-users": z.array(z.string().email()).optional().default([]),
|
||||
"sso-users": z.array(z.email()).optional().default([]),
|
||||
"whitelist-users": z.array(z.email()).optional().default([]),
|
||||
"auto-login-idp": z.int().positive().optional()
|
||||
});
|
||||
|
||||
export const RuleSchema = z.object({
|
||||
@@ -76,7 +89,7 @@ export const ResourceSchema = z
|
||||
protocol: z.enum(["http", "tcp", "udp"]).optional(),
|
||||
ssl: z.boolean().optional(),
|
||||
"full-domain": z.string().optional(),
|
||||
"proxy-port": z.number().int().min(1).max(65535).optional(),
|
||||
"proxy-port": z.int().min(1).max(65535).optional(),
|
||||
enabled: z.boolean().optional(),
|
||||
targets: z.array(TargetSchema.nullable()).optional().default([]),
|
||||
auth: AuthSchema.optional(),
|
||||
@@ -97,9 +110,8 @@ export const ResourceSchema = z
|
||||
);
|
||||
},
|
||||
{
|
||||
message:
|
||||
"Resource must either be targets-only (only 'targets' field) or have both 'name' and 'protocol' fields at a minimum",
|
||||
path: ["name", "protocol"]
|
||||
path: ["name", "protocol"],
|
||||
error: "Resource must either be targets-only (only 'targets' field) or have both 'name' and 'protocol' fields at a minimum"
|
||||
}
|
||||
)
|
||||
.refine(
|
||||
@@ -114,6 +126,19 @@ export const ResourceSchema = z
|
||||
(target) => target == null || target.method !== undefined
|
||||
);
|
||||
}
|
||||
return true;
|
||||
},
|
||||
{
|
||||
path: ["targets"],
|
||||
error: "When protocol is 'http', all targets must have a 'method' field"
|
||||
}
|
||||
)
|
||||
.refine(
|
||||
(resource) => {
|
||||
if (isTargetsOnlyResource(resource)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// If protocol is tcp or udp, no target should have method field
|
||||
if (resource.protocol === "tcp" || resource.protocol === "udp") {
|
||||
return resource.targets.every(
|
||||
@@ -122,19 +147,9 @@ export const ResourceSchema = z
|
||||
}
|
||||
return true;
|
||||
},
|
||||
(resource) => {
|
||||
if (resource.protocol === "http") {
|
||||
return {
|
||||
message:
|
||||
"When protocol is 'http', all targets must have a 'method' field",
|
||||
path: ["targets"]
|
||||
};
|
||||
}
|
||||
return {
|
||||
message:
|
||||
"When protocol is 'tcp' or 'udp', targets must not have a 'method' field",
|
||||
path: ["targets"]
|
||||
};
|
||||
{
|
||||
path: ["targets"],
|
||||
error: "When protocol is 'tcp' or 'udp', targets must not have a 'method' field"
|
||||
}
|
||||
)
|
||||
.refine(
|
||||
@@ -153,9 +168,8 @@ export const ResourceSchema = z
|
||||
return true;
|
||||
},
|
||||
{
|
||||
message:
|
||||
"When protocol is 'http', a 'full-domain' must be provided",
|
||||
path: ["full-domain"]
|
||||
path: ["full-domain"],
|
||||
error: "When protocol is 'http', a 'full-domain' must be provided"
|
||||
}
|
||||
)
|
||||
.refine(
|
||||
@@ -171,9 +185,8 @@ export const ResourceSchema = z
|
||||
return true;
|
||||
},
|
||||
{
|
||||
message:
|
||||
"When protocol is 'tcp' or 'udp', 'proxy-port' must be provided",
|
||||
path: ["proxy-port", "exit-node"]
|
||||
path: ["proxy-port", "exit-node"],
|
||||
error: "When protocol is 'tcp' or 'udp', 'proxy-port' must be provided"
|
||||
}
|
||||
)
|
||||
.refine(
|
||||
@@ -190,9 +203,8 @@ export const ResourceSchema = z
|
||||
return true;
|
||||
},
|
||||
{
|
||||
message:
|
||||
"When protocol is 'tcp' or 'udp', 'auth' must not be provided",
|
||||
path: ["auth"]
|
||||
path: ["auth"],
|
||||
error: "When protocol is 'tcp' or 'udp', 'auth' must not be provided"
|
||||
}
|
||||
);
|
||||
|
||||
@@ -200,188 +212,219 @@ export function isTargetsOnlyResource(resource: any): boolean {
|
||||
return Object.keys(resource).length === 1 && resource.targets;
|
||||
}
|
||||
|
||||
export const ClientResourceSchema = z.object({
|
||||
name: z.string().min(2).max(100),
|
||||
site: z.string().min(2).max(100).optional(),
|
||||
protocol: z.enum(["tcp", "udp"]),
|
||||
"proxy-port": z.number().min(1).max(65535),
|
||||
"hostname": z.string().min(1).max(255),
|
||||
"internal-port": z.number().min(1).max(65535),
|
||||
enabled: z.boolean().optional().default(true)
|
||||
});
|
||||
export const ClientResourceSchema = z
|
||||
.object({
|
||||
name: z.string().min(1).max(255),
|
||||
mode: z.enum(["host", "cidr"]),
|
||||
site: z.string(),
|
||||
// protocol: z.enum(["tcp", "udp"]).optional(),
|
||||
// proxyPort: z.int().positive().optional(),
|
||||
// destinationPort: z.int().positive().optional(),
|
||||
destination: z.string().min(1),
|
||||
// enabled: z.boolean().default(true),
|
||||
alias: z
|
||||
.string()
|
||||
.regex(
|
||||
/^(?:[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?$/,
|
||||
"Alias must be a fully qualified domain name (e.g., example.com)"
|
||||
)
|
||||
.optional(),
|
||||
roles: z
|
||||
.array(z.string())
|
||||
.optional()
|
||||
.default([])
|
||||
.refine((roles) => !roles.includes("Admin"), {
|
||||
error: "Admin role cannot be included in roles"
|
||||
}),
|
||||
users: z.array(z.email()).optional().default([]),
|
||||
machines: z.array(z.string()).optional().default([])
|
||||
})
|
||||
.refine(
|
||||
(data) => {
|
||||
if (data.mode === "host") {
|
||||
// Check if it's a valid IP address using zod (v4 or v6)
|
||||
const isValidIP = z
|
||||
.union([z.ipv4(), z.ipv6()])
|
||||
.safeParse(data.destination).success;
|
||||
|
||||
if (isValidIP) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check if it's a valid domain (hostname pattern, TLD not required)
|
||||
const domainRegex =
|
||||
/^(?:[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?\.)*[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?$/;
|
||||
const isValidDomain = domainRegex.test(data.destination);
|
||||
const isValidAlias = data.alias && domainRegex.test(data.alias);
|
||||
|
||||
return isValidDomain && isValidAlias; // require the alias to be set in the case of domain
|
||||
}
|
||||
return true;
|
||||
},
|
||||
{
|
||||
message:
|
||||
"Destination must be a valid IP address or valid domain AND alias is required"
|
||||
}
|
||||
)
|
||||
.refine(
|
||||
(data) => {
|
||||
if (data.mode === "cidr") {
|
||||
// Check if it's a valid CIDR (v4 or v6)
|
||||
const isValidCIDR = z
|
||||
.union([z.cidrv4(), z.cidrv6()])
|
||||
.safeParse(data.destination).success;
|
||||
return isValidCIDR;
|
||||
}
|
||||
return true;
|
||||
},
|
||||
{
|
||||
message: "Destination must be a valid CIDR notation for cidr mode"
|
||||
}
|
||||
);
|
||||
|
||||
// Schema for the entire configuration object
|
||||
export const ConfigSchema = z
|
||||
.object({
|
||||
"proxy-resources": z.record(z.string(), ResourceSchema).optional().default({}),
|
||||
"client-resources": z.record(z.string(), ClientResourceSchema).optional().default({}),
|
||||
sites: z.record(z.string(), SiteSchema).optional().default({})
|
||||
"proxy-resources": z
|
||||
.record(z.string(), ResourceSchema)
|
||||
.optional()
|
||||
.prefault({}),
|
||||
"public-resources": z
|
||||
.record(z.string(), ResourceSchema)
|
||||
.optional()
|
||||
.prefault({}),
|
||||
"client-resources": z
|
||||
.record(z.string(), ClientResourceSchema)
|
||||
.optional()
|
||||
.prefault({}),
|
||||
"private-resources": z
|
||||
.record(z.string(), ClientResourceSchema)
|
||||
.optional()
|
||||
.prefault({}),
|
||||
sites: z.record(z.string(), SiteSchema).optional().prefault({})
|
||||
})
|
||||
.refine(
|
||||
.transform((data) => {
|
||||
// Merge public-resources into proxy-resources
|
||||
if (data["public-resources"]) {
|
||||
data["proxy-resources"] = {
|
||||
...data["proxy-resources"],
|
||||
...data["public-resources"]
|
||||
};
|
||||
delete (data as any)["public-resources"];
|
||||
}
|
||||
|
||||
// Merge private-resources into client-resources
|
||||
if (data["private-resources"]) {
|
||||
data["client-resources"] = {
|
||||
...data["client-resources"],
|
||||
...data["private-resources"]
|
||||
};
|
||||
delete (data as any)["private-resources"];
|
||||
}
|
||||
|
||||
return data as {
|
||||
"proxy-resources": Record<string, z.infer<typeof ResourceSchema>>;
|
||||
"client-resources": Record<string, z.infer<typeof ClientResourceSchema>>;
|
||||
sites: Record<string, z.infer<typeof SiteSchema>>;
|
||||
};
|
||||
})
|
||||
.superRefine((config, ctx) => {
|
||||
// Enforce the full-domain uniqueness across resources in the same stack
|
||||
(config) => {
|
||||
// Extract all full-domain values with their resource keys
|
||||
const fullDomainMap = new Map<string, string[]>();
|
||||
const fullDomainMap = new Map<string, string[]>();
|
||||
|
||||
Object.entries(config["proxy-resources"]).forEach(
|
||||
([resourceKey, resource]) => {
|
||||
const fullDomain = resource["full-domain"];
|
||||
if (fullDomain) {
|
||||
// Only process if full-domain is defined
|
||||
if (!fullDomainMap.has(fullDomain)) {
|
||||
fullDomainMap.set(fullDomain, []);
|
||||
}
|
||||
fullDomainMap.get(fullDomain)!.push(resourceKey);
|
||||
Object.entries(config["proxy-resources"]).forEach(
|
||||
([resourceKey, resource]) => {
|
||||
const fullDomain = resource["full-domain"];
|
||||
if (fullDomain) {
|
||||
// Only process if full-domain is defined
|
||||
if (!fullDomainMap.has(fullDomain)) {
|
||||
fullDomainMap.set(fullDomain, []);
|
||||
}
|
||||
fullDomainMap.get(fullDomain)!.push(resourceKey);
|
||||
}
|
||||
);
|
||||
}
|
||||
);
|
||||
|
||||
// Find duplicates
|
||||
const duplicates = Array.from(fullDomainMap.entries()).filter(
|
||||
([_, resourceKeys]) => resourceKeys.length > 1
|
||||
);
|
||||
const fullDomainDuplicates = Array.from(fullDomainMap.entries())
|
||||
.filter(([_, resourceKeys]) => resourceKeys.length > 1)
|
||||
.map(
|
||||
([fullDomain, resourceKeys]) =>
|
||||
`'${fullDomain}' used by resources: ${resourceKeys.join(", ")}`
|
||||
)
|
||||
.join("; ");
|
||||
|
||||
return duplicates.length === 0;
|
||||
},
|
||||
(config) => {
|
||||
// Extract duplicates for error message
|
||||
const fullDomainMap = new Map<string, string[]>();
|
||||
|
||||
Object.entries(config["proxy-resources"]).forEach(
|
||||
([resourceKey, resource]) => {
|
||||
const fullDomain = resource["full-domain"];
|
||||
if (fullDomain) {
|
||||
// Only process if full-domain is defined
|
||||
if (!fullDomainMap.has(fullDomain)) {
|
||||
fullDomainMap.set(fullDomain, []);
|
||||
}
|
||||
fullDomainMap.get(fullDomain)!.push(resourceKey);
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
const duplicates = Array.from(fullDomainMap.entries())
|
||||
.filter(([_, resourceKeys]) => resourceKeys.length > 1)
|
||||
.map(
|
||||
([fullDomain, resourceKeys]) =>
|
||||
`'${fullDomain}' used by resources: ${resourceKeys.join(", ")}`
|
||||
)
|
||||
.join("; ");
|
||||
|
||||
return {
|
||||
message: `Duplicate 'full-domain' values found: ${duplicates}`,
|
||||
path: ["resources"]
|
||||
};
|
||||
if (fullDomainDuplicates.length !== 0) {
|
||||
ctx.addIssue({
|
||||
code: z.ZodIssueCode.custom,
|
||||
path: ["proxy-resources"],
|
||||
message: `Duplicate 'full-domain' values found: ${fullDomainDuplicates}`
|
||||
});
|
||||
}
|
||||
)
|
||||
.refine(
|
||||
// Enforce proxy-port uniqueness within proxy-resources
|
||||
(config) => {
|
||||
const proxyPortMap = new Map<number, string[]>();
|
||||
|
||||
Object.entries(config["proxy-resources"]).forEach(
|
||||
([resourceKey, resource]) => {
|
||||
const proxyPort = resource["proxy-port"];
|
||||
if (proxyPort !== undefined) {
|
||||
if (!proxyPortMap.has(proxyPort)) {
|
||||
proxyPortMap.set(proxyPort, []);
|
||||
}
|
||||
proxyPortMap.get(proxyPort)!.push(resourceKey);
|
||||
// Enforce proxy-port uniqueness within proxy-resources per protocol
|
||||
const protocolPortMap = new Map<string, string[]>();
|
||||
|
||||
Object.entries(config["proxy-resources"]).forEach(
|
||||
([resourceKey, resource]) => {
|
||||
const proxyPort = resource["proxy-port"];
|
||||
const protocol = resource.protocol;
|
||||
if (proxyPort !== undefined && protocol !== undefined) {
|
||||
const key = `${protocol}:${proxyPort}`;
|
||||
if (!protocolPortMap.has(key)) {
|
||||
protocolPortMap.set(key, []);
|
||||
}
|
||||
protocolPortMap.get(key)!.push(resourceKey);
|
||||
}
|
||||
);
|
||||
}
|
||||
);
|
||||
|
||||
// Find duplicates
|
||||
const duplicates = Array.from(proxyPortMap.entries()).filter(
|
||||
([_, resourceKeys]) => resourceKeys.length > 1
|
||||
);
|
||||
const portDuplicates = Array.from(protocolPortMap.entries())
|
||||
.filter(([_, resourceKeys]) => resourceKeys.length > 1)
|
||||
.map(([protocolPort, resourceKeys]) => {
|
||||
const [protocol, port] = protocolPort.split(":");
|
||||
return `${protocol.toUpperCase()} port ${port} used by proxy-resources: ${resourceKeys.join(", ")}`;
|
||||
})
|
||||
.join("; ");
|
||||
|
||||
return duplicates.length === 0;
|
||||
},
|
||||
(config) => {
|
||||
// Extract duplicates for error message
|
||||
const proxyPortMap = new Map<number, string[]>();
|
||||
|
||||
Object.entries(config["proxy-resources"]).forEach(
|
||||
([resourceKey, resource]) => {
|
||||
const proxyPort = resource["proxy-port"];
|
||||
if (proxyPort !== undefined) {
|
||||
if (!proxyPortMap.has(proxyPort)) {
|
||||
proxyPortMap.set(proxyPort, []);
|
||||
}
|
||||
proxyPortMap.get(proxyPort)!.push(resourceKey);
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
const duplicates = Array.from(proxyPortMap.entries())
|
||||
.filter(([_, resourceKeys]) => resourceKeys.length > 1)
|
||||
.map(
|
||||
([proxyPort, resourceKeys]) =>
|
||||
`port ${proxyPort} used by proxy-resources: ${resourceKeys.join(", ")}`
|
||||
)
|
||||
.join("; ");
|
||||
|
||||
return {
|
||||
message: `Duplicate 'proxy-port' values found in proxy-resources: ${duplicates}`,
|
||||
path: ["proxy-resources"]
|
||||
};
|
||||
if (portDuplicates.length !== 0) {
|
||||
ctx.addIssue({
|
||||
code: z.ZodIssueCode.custom,
|
||||
path: ["proxy-resources"],
|
||||
message: `Duplicate 'proxy-port' values found in proxy-resources: ${portDuplicates}`
|
||||
});
|
||||
}
|
||||
)
|
||||
.refine(
|
||||
// Enforce proxy-port uniqueness within client-resources
|
||||
(config) => {
|
||||
const proxyPortMap = new Map<number, string[]>();
|
||||
|
||||
Object.entries(config["client-resources"]).forEach(
|
||||
([resourceKey, resource]) => {
|
||||
const proxyPort = resource["proxy-port"];
|
||||
if (proxyPort !== undefined) {
|
||||
if (!proxyPortMap.has(proxyPort)) {
|
||||
proxyPortMap.set(proxyPort, []);
|
||||
}
|
||||
proxyPortMap.get(proxyPort)!.push(resourceKey);
|
||||
// Enforce alias uniqueness within client-resources
|
||||
const aliasMap = new Map<string, string[]>();
|
||||
|
||||
Object.entries(config["client-resources"]).forEach(
|
||||
([resourceKey, resource]) => {
|
||||
const alias = resource.alias;
|
||||
if (alias !== undefined) {
|
||||
if (!aliasMap.has(alias)) {
|
||||
aliasMap.set(alias, []);
|
||||
}
|
||||
aliasMap.get(alias)!.push(resourceKey);
|
||||
}
|
||||
);
|
||||
}
|
||||
);
|
||||
|
||||
// Find duplicates
|
||||
const duplicates = Array.from(proxyPortMap.entries()).filter(
|
||||
([_, resourceKeys]) => resourceKeys.length > 1
|
||||
);
|
||||
const aliasDuplicates = Array.from(aliasMap.entries())
|
||||
.filter(([_, resourceKeys]) => resourceKeys.length > 1)
|
||||
.map(
|
||||
([alias, resourceKeys]) =>
|
||||
`alias '${alias}' used by client-resources: ${resourceKeys.join(", ")}`
|
||||
)
|
||||
.join("; ");
|
||||
|
||||
return duplicates.length === 0;
|
||||
},
|
||||
(config) => {
|
||||
// Extract duplicates for error message
|
||||
const proxyPortMap = new Map<number, string[]>();
|
||||
|
||||
Object.entries(config["client-resources"]).forEach(
|
||||
([resourceKey, resource]) => {
|
||||
const proxyPort = resource["proxy-port"];
|
||||
if (proxyPort !== undefined) {
|
||||
if (!proxyPortMap.has(proxyPort)) {
|
||||
proxyPortMap.set(proxyPort, []);
|
||||
}
|
||||
proxyPortMap.get(proxyPort)!.push(resourceKey);
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
const duplicates = Array.from(proxyPortMap.entries())
|
||||
.filter(([_, resourceKeys]) => resourceKeys.length > 1)
|
||||
.map(
|
||||
([proxyPort, resourceKeys]) =>
|
||||
`port ${proxyPort} used by client-resources: ${resourceKeys.join(", ")}`
|
||||
)
|
||||
.join("; ");
|
||||
|
||||
return {
|
||||
message: `Duplicate 'proxy-port' values found in client-resources: ${duplicates}`,
|
||||
path: ["client-resources"]
|
||||
};
|
||||
if (aliasDuplicates.length !== 0) {
|
||||
ctx.addIssue({
|
||||
code: z.ZodIssueCode.custom,
|
||||
path: ["client-resources"],
|
||||
message: `Duplicate 'alias' values found in client-resources: ${aliasDuplicates}`
|
||||
});
|
||||
}
|
||||
);
|
||||
});
|
||||
|
||||
// Type inference from the schema
|
||||
export type Site = z.infer<typeof SiteSchema>;
|
||||
|
||||
5
server/lib/cache.ts
Normal file
5
server/lib/cache.ts
Normal file
@@ -0,0 +1,5 @@
|
||||
import NodeCache from "node-cache";
|
||||
|
||||
export const cache = new NodeCache({ stdTTL: 3600, checkperiod: 120 });
|
||||
|
||||
export default cache;
|
||||
290
server/lib/calculateUserClientsForOrgs.ts
Normal file
290
server/lib/calculateUserClientsForOrgs.ts
Normal file
@@ -0,0 +1,290 @@
|
||||
import {
|
||||
clients,
|
||||
db,
|
||||
olms,
|
||||
orgs,
|
||||
roleClients,
|
||||
roles,
|
||||
userClients,
|
||||
userOrgs,
|
||||
Transaction
|
||||
} from "@server/db";
|
||||
import { eq, and, notInArray } from "drizzle-orm";
|
||||
import { listExitNodes } from "#dynamic/lib/exitNodes";
|
||||
import { getNextAvailableClientSubnet } from "@server/lib/ip";
|
||||
import logger from "@server/logger";
|
||||
import { rebuildClientAssociationsFromClient } from "./rebuildClientAssociations";
|
||||
import { sendTerminateClient } from "@server/routers/client/terminate";
|
||||
import { getUniqueClientName } from "@server/db/names";
|
||||
|
||||
export async function calculateUserClientsForOrgs(
|
||||
userId: string,
|
||||
trx?: Transaction
|
||||
): Promise<void> {
|
||||
const execute = async (transaction: Transaction) => {
|
||||
// Get all OLMs for this user
|
||||
const userOlms = await transaction
|
||||
.select()
|
||||
.from(olms)
|
||||
.where(eq(olms.userId, userId));
|
||||
|
||||
if (userOlms.length === 0) {
|
||||
// No OLMs for this user, but we should still clean up any orphaned clients
|
||||
await cleanupOrphanedClients(userId, transaction);
|
||||
return;
|
||||
}
|
||||
|
||||
// Get all user orgs
|
||||
const allUserOrgs = await transaction
|
||||
.select()
|
||||
.from(userOrgs)
|
||||
.where(eq(userOrgs.userId, userId));
|
||||
|
||||
const userOrgIds = allUserOrgs.map((uo) => uo.orgId);
|
||||
|
||||
// For each OLM, ensure there's a client in each org the user is in
|
||||
for (const olm of userOlms) {
|
||||
for (const userOrg of allUserOrgs) {
|
||||
const orgId = userOrg.orgId;
|
||||
|
||||
const [org] = await transaction
|
||||
.select()
|
||||
.from(orgs)
|
||||
.where(eq(orgs.orgId, orgId));
|
||||
|
||||
if (!org) {
|
||||
logger.warn(
|
||||
`Skipping org ${orgId} for OLM ${olm.olmId} (user ${userId}): org not found`
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!org.subnet) {
|
||||
logger.warn(
|
||||
`Skipping org ${orgId} for OLM ${olm.olmId} (user ${userId}): org has no subnet configured`
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Get admin role for this org (needed for access grants)
|
||||
const [adminRole] = await transaction
|
||||
.select()
|
||||
.from(roles)
|
||||
.where(and(eq(roles.isAdmin, true), eq(roles.orgId, orgId)))
|
||||
.limit(1);
|
||||
|
||||
if (!adminRole) {
|
||||
logger.warn(
|
||||
`Skipping org ${orgId} for OLM ${olm.olmId} (user ${userId}): no admin role found`
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check if a client already exists for this OLM+user+org combination
|
||||
const [existingClient] = await transaction
|
||||
.select()
|
||||
.from(clients)
|
||||
.where(
|
||||
and(
|
||||
eq(clients.userId, userId),
|
||||
eq(clients.orgId, orgId),
|
||||
eq(clients.olmId, olm.olmId)
|
||||
)
|
||||
)
|
||||
.limit(1);
|
||||
|
||||
if (existingClient) {
|
||||
// Ensure admin role has access to the client
|
||||
const [existingRoleClient] = await transaction
|
||||
.select()
|
||||
.from(roleClients)
|
||||
.where(
|
||||
and(
|
||||
eq(roleClients.roleId, adminRole.roleId),
|
||||
eq(
|
||||
roleClients.clientId,
|
||||
existingClient.clientId
|
||||
)
|
||||
)
|
||||
)
|
||||
.limit(1);
|
||||
|
||||
if (!existingRoleClient) {
|
||||
await transaction.insert(roleClients).values({
|
||||
roleId: adminRole.roleId,
|
||||
clientId: existingClient.clientId
|
||||
});
|
||||
logger.debug(
|
||||
`Granted admin role access to existing client ${existingClient.clientId} for OLM ${olm.olmId} in org ${orgId} (user ${userId})`
|
||||
);
|
||||
}
|
||||
|
||||
// Ensure user has access to the client
|
||||
const [existingUserClient] = await transaction
|
||||
.select()
|
||||
.from(userClients)
|
||||
.where(
|
||||
and(
|
||||
eq(userClients.userId, userId),
|
||||
eq(
|
||||
userClients.clientId,
|
||||
existingClient.clientId
|
||||
)
|
||||
)
|
||||
)
|
||||
.limit(1);
|
||||
|
||||
if (!existingUserClient) {
|
||||
await transaction.insert(userClients).values({
|
||||
userId,
|
||||
clientId: existingClient.clientId
|
||||
});
|
||||
logger.debug(
|
||||
`Granted user access to existing client ${existingClient.clientId} for OLM ${olm.olmId} in org ${orgId} (user ${userId})`
|
||||
);
|
||||
}
|
||||
|
||||
logger.debug(
|
||||
`Client already exists for OLM ${olm.olmId} in org ${orgId} (user ${userId}), skipping creation`
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Get exit nodes for this org
|
||||
const exitNodesList = await listExitNodes(orgId);
|
||||
|
||||
if (exitNodesList.length === 0) {
|
||||
logger.warn(
|
||||
`Skipping org ${orgId} for OLM ${olm.olmId} (user ${userId}): no exit nodes found`
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
const randomExitNode =
|
||||
exitNodesList[
|
||||
Math.floor(Math.random() * exitNodesList.length)
|
||||
];
|
||||
|
||||
// Get next available subnet
|
||||
const newSubnet = await getNextAvailableClientSubnet(orgId);
|
||||
if (!newSubnet) {
|
||||
logger.warn(
|
||||
`Skipping org ${orgId} for OLM ${olm.olmId} (user ${userId}): no available subnet found`
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
const subnet = newSubnet.split("/")[0];
|
||||
const updatedSubnet = `${subnet}/${org.subnet.split("/")[1]}`;
|
||||
|
||||
const niceId = await getUniqueClientName(orgId);
|
||||
|
||||
// Create the client
|
||||
const [newClient] = await transaction
|
||||
.insert(clients)
|
||||
.values({
|
||||
userId,
|
||||
orgId: userOrg.orgId,
|
||||
exitNodeId: randomExitNode.exitNodeId,
|
||||
name: olm.name || "User Client",
|
||||
subnet: updatedSubnet,
|
||||
olmId: olm.olmId,
|
||||
type: "olm",
|
||||
niceId
|
||||
})
|
||||
.returning();
|
||||
|
||||
await rebuildClientAssociationsFromClient(
|
||||
newClient,
|
||||
transaction
|
||||
);
|
||||
|
||||
// Grant admin role access to the client
|
||||
await transaction.insert(roleClients).values({
|
||||
roleId: adminRole.roleId,
|
||||
clientId: newClient.clientId
|
||||
});
|
||||
|
||||
// Grant user access to the client
|
||||
await transaction.insert(userClients).values({
|
||||
userId,
|
||||
clientId: newClient.clientId
|
||||
});
|
||||
|
||||
logger.debug(
|
||||
`Created client for OLM ${olm.olmId} in org ${orgId} (user ${userId}) with access granted to admin role and user`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up clients in orgs the user is no longer in
|
||||
await cleanupOrphanedClients(userId, transaction, userOrgIds);
|
||||
};
|
||||
|
||||
if (trx) {
|
||||
// Use provided transaction
|
||||
await execute(trx);
|
||||
} else {
|
||||
// Create new transaction
|
||||
await db.transaction(async (transaction) => {
|
||||
await execute(transaction);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
async function cleanupOrphanedClients(
|
||||
userId: string,
|
||||
trx: Transaction,
|
||||
userOrgIds: string[] = []
|
||||
): Promise<void> {
|
||||
// Find all OLM clients for this user that should be deleted
|
||||
// If userOrgIds is empty, delete all OLM clients (user has no orgs)
|
||||
// If userOrgIds has values, delete clients in orgs they're not in
|
||||
const clientsToDelete = await trx
|
||||
.select({ clientId: clients.clientId })
|
||||
.from(clients)
|
||||
.where(
|
||||
userOrgIds.length > 0
|
||||
? and(
|
||||
eq(clients.userId, userId),
|
||||
notInArray(clients.orgId, userOrgIds)
|
||||
)
|
||||
: and(eq(clients.userId, userId))
|
||||
);
|
||||
|
||||
if (clientsToDelete.length > 0) {
|
||||
const deletedClients = await trx
|
||||
.delete(clients)
|
||||
.where(
|
||||
userOrgIds.length > 0
|
||||
? and(
|
||||
eq(clients.userId, userId),
|
||||
notInArray(clients.orgId, userOrgIds)
|
||||
)
|
||||
: and(eq(clients.userId, userId))
|
||||
)
|
||||
.returning();
|
||||
|
||||
// Rebuild associations for each deleted client to clean up related data
|
||||
for (const deletedClient of deletedClients) {
|
||||
await rebuildClientAssociationsFromClient(deletedClient, trx);
|
||||
|
||||
if (deletedClient.olmId) {
|
||||
await sendTerminateClient(
|
||||
deletedClient.clientId,
|
||||
deletedClient.olmId
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if (userOrgIds.length === 0) {
|
||||
logger.debug(
|
||||
`Deleted all ${clientsToDelete.length} OLM client(s) for user ${userId} (user has no orgs)`
|
||||
);
|
||||
} else {
|
||||
logger.debug(
|
||||
`Deleted ${clientsToDelete.length} orphaned OLM client(s) for user ${userId} in orgs they're no longer in`
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
41
server/lib/checkOrgAccessPolicy.ts
Normal file
41
server/lib/checkOrgAccessPolicy.ts
Normal file
@@ -0,0 +1,41 @@
|
||||
import { Org, ResourceSession, Session, User } from "@server/db";
|
||||
|
||||
export type CheckOrgAccessPolicyProps = {
|
||||
orgId?: string;
|
||||
org?: Org;
|
||||
userId?: string;
|
||||
user?: User;
|
||||
sessionId?: string;
|
||||
session?: Session;
|
||||
};
|
||||
|
||||
export type CheckOrgAccessPolicyResult = {
|
||||
allowed: boolean;
|
||||
error?: string;
|
||||
policies?: {
|
||||
requiredTwoFactor?: boolean;
|
||||
maxSessionLength?: {
|
||||
compliant: boolean;
|
||||
maxSessionLengthHours: number;
|
||||
sessionAgeHours: number;
|
||||
};
|
||||
passwordAge?: {
|
||||
compliant: boolean;
|
||||
maxPasswordAgeDays: number;
|
||||
passwordAgeDays: number;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
export async function enforceResourceSessionLength(
|
||||
resourceSession: ResourceSession,
|
||||
org: Org
|
||||
): Promise<{ valid: boolean; error?: string }> {
|
||||
return { valid: true };
|
||||
}
|
||||
|
||||
export async function checkOrgAccessPolicy(
|
||||
props: CheckOrgAccessPolicyProps
|
||||
): Promise<CheckOrgAccessPolicyResult> {
|
||||
return { allowed: true };
|
||||
}
|
||||
201
server/lib/cleanupLogs.test.ts
Normal file
201
server/lib/cleanupLogs.test.ts
Normal file
@@ -0,0 +1,201 @@
|
||||
import { assertEquals } from "@test/assert";
|
||||
|
||||
// Helper to create a timestamp from a date string (UTC)
|
||||
function dateToTimestamp(dateStr: string): number {
|
||||
return Math.floor(new Date(dateStr).getTime() / 1000);
|
||||
}
|
||||
|
||||
// Testable version of calculateCutoffTimestamp that accepts a "now" timestamp
|
||||
// This matches the logic in cleanupLogs.ts but allows injecting the current time
|
||||
function calculateCutoffTimestampWithNow(retentionDays: number, nowTimestamp: number): number {
|
||||
if (retentionDays === 9001) {
|
||||
// Special case: data is erased at the end of the year following the year it was generated
|
||||
// This means we delete logs from 2 years ago or older (logs from year Y are deleted after Dec 31 of year Y+1)
|
||||
const currentYear = new Date(nowTimestamp * 1000).getUTCFullYear();
|
||||
// Cutoff is the start of the year before last (Jan 1, currentYear - 1 at 00:00:00)
|
||||
// Any logs before this date are from 2+ years ago and should be deleted
|
||||
const cutoffDate = new Date(Date.UTC(currentYear - 1, 0, 1, 0, 0, 0));
|
||||
return Math.floor(cutoffDate.getTime() / 1000);
|
||||
} else {
|
||||
return nowTimestamp - retentionDays * 24 * 60 * 60;
|
||||
}
|
||||
}
|
||||
|
||||
function testCalculateCutoffTimestamp() {
|
||||
console.log("Running calculateCutoffTimestamp tests...");
|
||||
|
||||
// Test 1: Normal retention days (e.g., 30 days)
|
||||
{
|
||||
const now = dateToTimestamp("2025-12-06T12:00:00Z");
|
||||
const result = calculateCutoffTimestampWithNow(30, now);
|
||||
const expected = now - (30 * 24 * 60 * 60);
|
||||
assertEquals(result, expected, "30 days retention calculation failed");
|
||||
}
|
||||
|
||||
// Test 2: Normal retention days (e.g., 90 days)
|
||||
{
|
||||
const now = dateToTimestamp("2025-06-15T00:00:00Z");
|
||||
const result = calculateCutoffTimestampWithNow(90, now);
|
||||
const expected = now - (90 * 24 * 60 * 60);
|
||||
assertEquals(result, expected, "90 days retention calculation failed");
|
||||
}
|
||||
|
||||
// Test 3: Special case 9001 - December 2025 (before Dec 31)
|
||||
// Data from 2024 should NOT be deleted yet (must wait until after Dec 31, 2025)
|
||||
// Data from 2023 and earlier should be deleted
|
||||
// Cutoff should be Jan 1, 2024 (start of currentYear - 1)
|
||||
{
|
||||
const now = dateToTimestamp("2025-12-06T12:00:00Z");
|
||||
const result = calculateCutoffTimestampWithNow(9001, now);
|
||||
const expected = dateToTimestamp("2024-01-01T00:00:00Z");
|
||||
assertEquals(result, expected, "9001 retention (Dec 2025) - should cutoff at Jan 1, 2024");
|
||||
}
|
||||
|
||||
// Test 4: Special case 9001 - January 2026
|
||||
// Data from 2024 should now be deleted (Dec 31, 2025 has passed)
|
||||
// Cutoff should be Jan 1, 2025 (start of currentYear - 1)
|
||||
{
|
||||
const now = dateToTimestamp("2026-01-15T12:00:00Z");
|
||||
const result = calculateCutoffTimestampWithNow(9001, now);
|
||||
const expected = dateToTimestamp("2025-01-01T00:00:00Z");
|
||||
assertEquals(result, expected, "9001 retention (Jan 2026) - should cutoff at Jan 1, 2025");
|
||||
}
|
||||
|
||||
// Test 5: Special case 9001 - December 31, 2025 at 23:59:59 UTC
|
||||
// Still in 2025, so data from 2024 should NOT be deleted yet
|
||||
// Cutoff should be Jan 1, 2024
|
||||
{
|
||||
const now = dateToTimestamp("2025-12-31T23:59:59Z");
|
||||
const result = calculateCutoffTimestampWithNow(9001, now);
|
||||
const expected = dateToTimestamp("2024-01-01T00:00:00Z");
|
||||
assertEquals(result, expected, "9001 retention (Dec 31, 2025 23:59:59) - should cutoff at Jan 1, 2024");
|
||||
}
|
||||
|
||||
// Test 6: Special case 9001 - January 1, 2026 at 00:00:01 UTC
|
||||
// Now in 2026, so data from 2024 should be deleted
|
||||
// Cutoff should be Jan 1, 2025
|
||||
{
|
||||
const now = dateToTimestamp("2026-01-01T00:00:01Z");
|
||||
const result = calculateCutoffTimestampWithNow(9001, now);
|
||||
const expected = dateToTimestamp("2025-01-01T00:00:00Z");
|
||||
assertEquals(result, expected, "9001 retention (Jan 1, 2026 00:00:01) - should cutoff at Jan 1, 2025");
|
||||
}
|
||||
|
||||
// Test 7: Special case 9001 - Mid year 2025
|
||||
// Cutoff should still be Jan 1, 2024
|
||||
{
|
||||
const now = dateToTimestamp("2025-06-15T12:00:00Z");
|
||||
const result = calculateCutoffTimestampWithNow(9001, now);
|
||||
const expected = dateToTimestamp("2024-01-01T00:00:00Z");
|
||||
assertEquals(result, expected, "9001 retention (mid 2025) - should cutoff at Jan 1, 2024");
|
||||
}
|
||||
|
||||
// Test 8: Special case 9001 - Early 2024
|
||||
// Cutoff should be Jan 1, 2023
|
||||
{
|
||||
const now = dateToTimestamp("2024-02-01T12:00:00Z");
|
||||
const result = calculateCutoffTimestampWithNow(9001, now);
|
||||
const expected = dateToTimestamp("2023-01-01T00:00:00Z");
|
||||
assertEquals(result, expected, "9001 retention (early 2024) - should cutoff at Jan 1, 2023");
|
||||
}
|
||||
|
||||
// Test 9: 1 day retention
|
||||
{
|
||||
const now = dateToTimestamp("2025-12-06T12:00:00Z");
|
||||
const result = calculateCutoffTimestampWithNow(1, now);
|
||||
const expected = now - (1 * 24 * 60 * 60);
|
||||
assertEquals(result, expected, "1 day retention calculation failed");
|
||||
}
|
||||
|
||||
// Test 10: 365 days retention (1 year)
|
||||
{
|
||||
const now = dateToTimestamp("2025-12-06T12:00:00Z");
|
||||
const result = calculateCutoffTimestampWithNow(365, now);
|
||||
const expected = now - (365 * 24 * 60 * 60);
|
||||
assertEquals(result, expected, "365 days retention calculation failed");
|
||||
}
|
||||
|
||||
// Test 11: Verify 9001 deletes logs correctly across year boundary
|
||||
// If we're in 2025, logs from Dec 31, 2023 (timestamp) should be DELETED (before cutoff)
|
||||
// But logs from Jan 1, 2024 (timestamp) should be KEPT (at or after cutoff)
|
||||
{
|
||||
const now = dateToTimestamp("2025-12-06T12:00:00Z");
|
||||
const cutoff = calculateCutoffTimestampWithNow(9001, now);
|
||||
const logFromDec2023 = dateToTimestamp("2023-12-31T23:59:59Z");
|
||||
const logFromJan2024 = dateToTimestamp("2024-01-01T00:00:00Z");
|
||||
|
||||
// Log from Dec 2023 should be before cutoff (deleted)
|
||||
assertEquals(logFromDec2023 < cutoff, true, "Log from Dec 2023 should be deleted");
|
||||
// Log from Jan 2024 should be at or after cutoff (kept)
|
||||
assertEquals(logFromJan2024 >= cutoff, true, "Log from Jan 2024 should be kept");
|
||||
}
|
||||
|
||||
// Test 12: Verify 9001 in 2026 - logs from 2024 should now be deleted
|
||||
{
|
||||
const now = dateToTimestamp("2026-03-15T12:00:00Z");
|
||||
const cutoff = calculateCutoffTimestampWithNow(9001, now);
|
||||
const logFromDec2024 = dateToTimestamp("2024-12-31T23:59:59Z");
|
||||
const logFromJan2025 = dateToTimestamp("2025-01-01T00:00:00Z");
|
||||
|
||||
// Log from Dec 2024 should be before cutoff (deleted)
|
||||
assertEquals(logFromDec2024 < cutoff, true, "Log from Dec 2024 should be deleted in 2026");
|
||||
// Log from Jan 2025 should be at or after cutoff (kept)
|
||||
assertEquals(logFromJan2025 >= cutoff, true, "Log from Jan 2025 should be kept in 2026");
|
||||
}
|
||||
|
||||
// Test 13: Edge case - exactly at year boundary for 9001
|
||||
// On Jan 1, 2025 00:00:00 UTC, cutoff should be Jan 1, 2024
|
||||
{
|
||||
const now = dateToTimestamp("2025-01-01T00:00:00Z");
|
||||
const result = calculateCutoffTimestampWithNow(9001, now);
|
||||
const expected = dateToTimestamp("2024-01-01T00:00:00Z");
|
||||
assertEquals(result, expected, "9001 retention (Jan 1, 2025 00:00:00) - should cutoff at Jan 1, 2024");
|
||||
}
|
||||
|
||||
// Test 14: Verify data from 2024 is kept throughout 2025 when using 9001
|
||||
// Example: Log created on July 15, 2024 should be kept until Dec 31, 2025
|
||||
{
|
||||
// Running in June 2025
|
||||
const nowJune2025 = dateToTimestamp("2025-06-15T12:00:00Z");
|
||||
const cutoffJune2025 = calculateCutoffTimestampWithNow(9001, nowJune2025);
|
||||
const logFromJuly2024 = dateToTimestamp("2024-07-15T12:00:00Z");
|
||||
|
||||
// Log from July 2024 should be KEPT in June 2025
|
||||
assertEquals(logFromJuly2024 >= cutoffJune2025, true, "Log from July 2024 should be kept in June 2025");
|
||||
|
||||
// Running in January 2026
|
||||
const nowJan2026 = dateToTimestamp("2026-01-15T12:00:00Z");
|
||||
const cutoffJan2026 = calculateCutoffTimestampWithNow(9001, nowJan2026);
|
||||
|
||||
// Log from July 2024 should be DELETED in January 2026
|
||||
assertEquals(logFromJuly2024 < cutoffJan2026, true, "Log from July 2024 should be deleted in Jan 2026");
|
||||
}
|
||||
|
||||
// Test 15: Verify the exact requirement - data from 2024 must be purged on December 31, 2025
|
||||
// On Dec 31, 2025 (still 2025), data from 2024 should still exist
|
||||
// On Jan 1, 2026 (now 2026), data from 2024 can be deleted
|
||||
{
|
||||
const logFromMid2024 = dateToTimestamp("2024-06-15T12:00:00Z");
|
||||
|
||||
// Dec 31, 2025 23:59:59 - still 2025, log should be kept
|
||||
const nowDec31_2025 = dateToTimestamp("2025-12-31T23:59:59Z");
|
||||
const cutoffDec31 = calculateCutoffTimestampWithNow(9001, nowDec31_2025);
|
||||
assertEquals(logFromMid2024 >= cutoffDec31, true, "Log from mid-2024 should be kept on Dec 31, 2025");
|
||||
|
||||
// Jan 1, 2026 00:00:00 - now 2026, log can be deleted
|
||||
const nowJan1_2026 = dateToTimestamp("2026-01-01T00:00:00Z");
|
||||
const cutoffJan1 = calculateCutoffTimestampWithNow(9001, nowJan1_2026);
|
||||
assertEquals(logFromMid2024 < cutoffJan1, true, "Log from mid-2024 should be deleted on Jan 1, 2026");
|
||||
}
|
||||
|
||||
console.log("All calculateCutoffTimestamp tests passed!");
|
||||
}
|
||||
|
||||
// Run all tests
|
||||
try {
|
||||
testCalculateCutoffTimestamp();
|
||||
console.log("All tests passed successfully!");
|
||||
} catch (error) {
|
||||
console.error("Test failed:", error);
|
||||
process.exit(1);
|
||||
}
|
||||
76
server/lib/cleanupLogs.ts
Normal file
76
server/lib/cleanupLogs.ts
Normal file
@@ -0,0 +1,76 @@
|
||||
import { db, orgs } from "@server/db";
|
||||
import { cleanUpOldLogs as cleanUpOldAccessLogs } from "#dynamic/lib/logAccessAudit";
|
||||
import { cleanUpOldLogs as cleanUpOldActionLogs } from "#dynamic/middlewares/logActionAudit";
|
||||
import { cleanUpOldLogs as cleanUpOldRequestLogs } from "@server/routers/badger/logRequestAudit";
|
||||
import { gt, or } from "drizzle-orm";
|
||||
|
||||
export function initLogCleanupInterval() {
|
||||
return setInterval(
|
||||
async () => {
|
||||
const orgsToClean = await db
|
||||
.select({
|
||||
orgId: orgs.orgId,
|
||||
settingsLogRetentionDaysAction:
|
||||
orgs.settingsLogRetentionDaysAction,
|
||||
settingsLogRetentionDaysAccess:
|
||||
orgs.settingsLogRetentionDaysAccess,
|
||||
settingsLogRetentionDaysRequest:
|
||||
orgs.settingsLogRetentionDaysRequest
|
||||
})
|
||||
.from(orgs)
|
||||
.where(
|
||||
or(
|
||||
gt(orgs.settingsLogRetentionDaysAction, 0),
|
||||
gt(orgs.settingsLogRetentionDaysAccess, 0),
|
||||
gt(orgs.settingsLogRetentionDaysRequest, 0)
|
||||
)
|
||||
);
|
||||
|
||||
for (const org of orgsToClean) {
|
||||
const {
|
||||
orgId,
|
||||
settingsLogRetentionDaysAction,
|
||||
settingsLogRetentionDaysAccess,
|
||||
settingsLogRetentionDaysRequest
|
||||
} = org;
|
||||
|
||||
if (settingsLogRetentionDaysAction > 0) {
|
||||
await cleanUpOldActionLogs(
|
||||
orgId,
|
||||
settingsLogRetentionDaysAction
|
||||
);
|
||||
}
|
||||
|
||||
if (settingsLogRetentionDaysAccess > 0) {
|
||||
await cleanUpOldAccessLogs(
|
||||
orgId,
|
||||
settingsLogRetentionDaysAccess
|
||||
);
|
||||
}
|
||||
|
||||
if (settingsLogRetentionDaysRequest > 0) {
|
||||
await cleanUpOldRequestLogs(
|
||||
orgId,
|
||||
settingsLogRetentionDaysRequest
|
||||
);
|
||||
}
|
||||
}
|
||||
},
|
||||
3 * 60 * 60 * 1000
|
||||
); // every 3 hours
|
||||
}
|
||||
|
||||
export function calculateCutoffTimestamp(retentionDays: number): number {
|
||||
const now = Math.floor(Date.now() / 1000);
|
||||
if (retentionDays === 9001) {
|
||||
// Special case: data is erased at the end of the year following the year it was generated
|
||||
// This means we delete logs from 2 years ago or older (logs from year Y are deleted after Dec 31 of year Y+1)
|
||||
const currentYear = new Date().getFullYear();
|
||||
// Cutoff is the start of the year before last (Jan 1, currentYear - 1 at 00:00:00)
|
||||
// Any logs before this date are from 2+ years ago and should be deleted
|
||||
const cutoffDate = new Date(Date.UTC(currentYear - 1, 0, 1, 0, 0, 0));
|
||||
return Math.floor(cutoffDate.getTime() / 1000);
|
||||
} else {
|
||||
return now - retentionDays * 24 * 60 * 60;
|
||||
}
|
||||
}
|
||||
@@ -85,7 +85,13 @@ export class Config {
|
||||
? "true"
|
||||
: "false";
|
||||
|
||||
process.env.FLAGS_ENABLE_CLIENTS = parsedConfig.flags?.enable_clients
|
||||
process.env.PRODUCT_UPDATES_NOTIFICATION_ENABLED = parsedConfig.app
|
||||
.notifications.product_updates
|
||||
? "true"
|
||||
: "false";
|
||||
|
||||
process.env.NEW_RELEASES_NOTIFICATION_ENABLED = parsedConfig.app
|
||||
.notifications.new_releases
|
||||
? "true"
|
||||
: "false";
|
||||
|
||||
@@ -158,7 +164,7 @@ export class Config {
|
||||
|
||||
try {
|
||||
const response = await fetch(
|
||||
"https://api.fossorial.io/api/v1/license/validate",
|
||||
`https://api.fossorial.io/api/v1/license/validate`,
|
||||
{
|
||||
method: "POST",
|
||||
headers: {
|
||||
|
||||
@@ -2,7 +2,7 @@ import path from "path";
|
||||
import { fileURLToPath } from "url";
|
||||
|
||||
// This is a placeholder value replaced by the build process
|
||||
export const APP_VERSION = "1.11.0";
|
||||
export const APP_VERSION = "1.13.0-rc.0";
|
||||
|
||||
export const __FILENAME = fileURLToPath(import.meta.url);
|
||||
export const __DIRNAME = path.dirname(__FILENAME);
|
||||
|
||||
@@ -18,6 +18,7 @@ import { defaultRoleAllowedActions } from "@server/routers/role";
|
||||
import { FeatureId, limitsService, sandboxLimitSet } from "@server/lib/billing";
|
||||
import { createCustomer } from "#dynamic/lib/billing";
|
||||
import { usageService } from "@server/lib/billing/usageService";
|
||||
import config from "@server/lib/config";
|
||||
|
||||
export async function createUserAccountOrg(
|
||||
userId: string,
|
||||
@@ -76,6 +77,8 @@ export async function createUserAccountOrg(
|
||||
.from(domains)
|
||||
.where(eq(domains.configManaged, true));
|
||||
|
||||
const utilitySubnet = config.getRawConfig().orgs.utility_subnet_group;
|
||||
|
||||
const newOrg = await trx
|
||||
.insert(orgs)
|
||||
.values({
|
||||
@@ -83,6 +86,7 @@ export async function createUserAccountOrg(
|
||||
name,
|
||||
// subnet
|
||||
subnet: "100.90.128.0/24", // TODO: this should not be hardcoded - or can it be the same in all orgs?
|
||||
utilitySubnet: utilitySubnet,
|
||||
createdAt: new Date().toISOString()
|
||||
})
|
||||
.returning();
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { db, exitNodes } from "@server/db";
|
||||
import { db, exitNodes, Transaction } from "@server/db";
|
||||
import logger from "@server/logger";
|
||||
import { ExitNodePingResult } from "@server/routers/newt";
|
||||
import { eq } from "drizzle-orm";
|
||||
@@ -59,7 +59,11 @@ export function selectBestExitNode(
|
||||
return pingResults[0];
|
||||
}
|
||||
|
||||
export async function checkExitNodeOrg(exitNodeId: number, orgId: string) {
|
||||
export async function checkExitNodeOrg(
|
||||
exitNodeId: number,
|
||||
orgId: string,
|
||||
trx?: Transaction | typeof db
|
||||
): Promise<boolean> {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ export async function getCountryCodeForIp(
|
||||
): Promise<string | undefined> {
|
||||
try {
|
||||
if (!maxmindLookup) {
|
||||
logger.warn(
|
||||
logger.debug(
|
||||
"MaxMind DB path not configured, cannot perform GeoIP lookup"
|
||||
);
|
||||
return;
|
||||
|
||||
170
server/lib/ip.ts
170
server/lib/ip.ts
@@ -1,7 +1,15 @@
|
||||
import { db } from "@server/db";
|
||||
import {
|
||||
clientSitesAssociationsCache,
|
||||
db,
|
||||
SiteResource,
|
||||
siteResources,
|
||||
Transaction
|
||||
} from "@server/db";
|
||||
import { clients, orgs, sites } from "@server/db";
|
||||
import { and, eq, isNotNull } from "drizzle-orm";
|
||||
import config from "@server/lib/config";
|
||||
import z from "zod";
|
||||
import logger from "@server/logger";
|
||||
|
||||
interface IPRange {
|
||||
start: bigint;
|
||||
@@ -279,6 +287,56 @@ export async function getNextAvailableClientSubnet(
|
||||
return subnet;
|
||||
}
|
||||
|
||||
export async function getNextAvailableAliasAddress(
|
||||
orgId: string
|
||||
): Promise<string> {
|
||||
const [org] = await db.select().from(orgs).where(eq(orgs.orgId, orgId));
|
||||
|
||||
if (!org) {
|
||||
throw new Error(`Organization with ID ${orgId} not found`);
|
||||
}
|
||||
|
||||
if (!org.subnet) {
|
||||
throw new Error(`Organization with ID ${orgId} has no subnet defined`);
|
||||
}
|
||||
|
||||
if (!org.utilitySubnet) {
|
||||
throw new Error(
|
||||
`Organization with ID ${orgId} has no utility subnet defined`
|
||||
);
|
||||
}
|
||||
|
||||
const existingAddresses = await db
|
||||
.select({
|
||||
aliasAddress: siteResources.aliasAddress
|
||||
})
|
||||
.from(siteResources)
|
||||
.where(
|
||||
and(
|
||||
isNotNull(siteResources.aliasAddress),
|
||||
eq(siteResources.orgId, orgId)
|
||||
)
|
||||
);
|
||||
|
||||
const addresses = [
|
||||
...existingAddresses.map(
|
||||
(site) => `${site.aliasAddress?.split("/")[0]}/32`
|
||||
),
|
||||
// reserve a /29 for the dns server and other stuff
|
||||
`${org.utilitySubnet.split("/")[0]}/29`
|
||||
].filter((address) => address !== null) as string[];
|
||||
|
||||
let subnet = findNextAvailableCidr(addresses, 32, org.utilitySubnet);
|
||||
if (!subnet) {
|
||||
throw new Error("No available subnets remaining in space");
|
||||
}
|
||||
|
||||
// remove the cidr
|
||||
subnet = subnet.split("/")[0];
|
||||
|
||||
return subnet;
|
||||
}
|
||||
|
||||
export async function getNextAvailableOrgSubnet(): Promise<string> {
|
||||
const existingAddresses = await db
|
||||
.select({
|
||||
@@ -300,3 +358,113 @@ export async function getNextAvailableOrgSubnet(): Promise<string> {
|
||||
|
||||
return subnet;
|
||||
}
|
||||
|
||||
export function generateRemoteSubnets(allSiteResources: SiteResource[]): string[] {
|
||||
const remoteSubnets = allSiteResources
|
||||
.filter((sr) => {
|
||||
if (sr.mode === "cidr") return true;
|
||||
if (sr.mode === "host") {
|
||||
// check if its a valid IP using zod
|
||||
const ipSchema = z.union([z.ipv4(), z.ipv6()]);
|
||||
const parseResult = ipSchema.safeParse(sr.destination);
|
||||
return parseResult.success;
|
||||
}
|
||||
return false;
|
||||
})
|
||||
.map((sr) => {
|
||||
if (sr.mode === "cidr") return sr.destination;
|
||||
if (sr.mode === "host") {
|
||||
return `${sr.destination}/32`;
|
||||
}
|
||||
return ""; // This should never be reached due to filtering, but satisfies TypeScript
|
||||
})
|
||||
.filter((subnet) => subnet !== ""); // Remove empty strings just to be safe
|
||||
// remove duplicates
|
||||
return Array.from(new Set(remoteSubnets));
|
||||
}
|
||||
|
||||
export type Alias = { alias: string | null; aliasAddress: string | null };
|
||||
|
||||
export function generateAliasConfig(allSiteResources: SiteResource[]): Alias[] {
|
||||
let aliasConfigs = allSiteResources
|
||||
.filter((sr) => sr.alias && sr.aliasAddress && sr.mode == "host")
|
||||
.map((sr) => ({
|
||||
alias: sr.alias,
|
||||
aliasAddress: sr.aliasAddress
|
||||
}));
|
||||
return aliasConfigs;
|
||||
}
|
||||
|
||||
export type SubnetProxyTarget = {
|
||||
sourcePrefix: string; // must be a cidr
|
||||
destPrefix: string; // must be a cidr
|
||||
rewriteTo?: string; // must be a cidr
|
||||
portRange?: {
|
||||
min: number;
|
||||
max: number;
|
||||
}[];
|
||||
};
|
||||
|
||||
export function generateSubnetProxyTargets(
|
||||
siteResource: SiteResource,
|
||||
clients: {
|
||||
clientId: number;
|
||||
pubKey: string | null;
|
||||
subnet: string | null;
|
||||
}[]
|
||||
): SubnetProxyTarget[] {
|
||||
const targets: SubnetProxyTarget[] = [];
|
||||
|
||||
if (clients.length === 0) {
|
||||
logger.debug(
|
||||
`No clients have access to site resource ${siteResource.siteResourceId}, skipping target generation.`
|
||||
);
|
||||
return [];
|
||||
}
|
||||
|
||||
for (const clientSite of clients) {
|
||||
if (!clientSite.subnet) {
|
||||
logger.debug(
|
||||
`Client ${clientSite.clientId} has no subnet, skipping for site resource ${siteResource.siteResourceId}.`
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
const clientPrefix = `${clientSite.subnet.split("/")[0]}/32`;
|
||||
|
||||
if (siteResource.mode == "host") {
|
||||
let destination = siteResource.destination;
|
||||
// check if this is a valid ip
|
||||
const ipSchema = z.union([z.ipv4(), z.ipv6()]);
|
||||
if (ipSchema.safeParse(destination).success) {
|
||||
destination = `${destination}/32`;
|
||||
|
||||
targets.push({
|
||||
sourcePrefix: clientPrefix,
|
||||
destPrefix: destination
|
||||
});
|
||||
}
|
||||
|
||||
if (siteResource.alias && siteResource.aliasAddress) {
|
||||
// also push a match for the alias address
|
||||
targets.push({
|
||||
sourcePrefix: clientPrefix,
|
||||
destPrefix: `${siteResource.aliasAddress}/32`,
|
||||
rewriteTo: destination
|
||||
});
|
||||
}
|
||||
} else if (siteResource.mode == "cidr") {
|
||||
targets.push({
|
||||
sourcePrefix: clientPrefix,
|
||||
destPrefix: siteResource.destination
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// print a nice representation of the targets
|
||||
// logger.debug(
|
||||
// `Generated subnet proxy targets for: ${JSON.stringify(targets, null, 2)}`
|
||||
// );
|
||||
|
||||
return targets;
|
||||
}
|
||||
|
||||
111
server/lib/lock.ts
Normal file
111
server/lib/lock.ts
Normal file
@@ -0,0 +1,111 @@
|
||||
export class LockManager {
|
||||
/**
|
||||
* Acquire a distributed lock using Redis SET with NX and PX options
|
||||
* @param lockKey - Unique identifier for the lock
|
||||
* @param ttlMs - Time to live in milliseconds
|
||||
* @returns Promise<boolean> - true if lock acquired, false otherwise
|
||||
*/
|
||||
async acquireLock(
|
||||
lockKey: string,
|
||||
ttlMs: number = 30000
|
||||
): Promise<boolean> {
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Release a lock using Lua script to ensure atomicity
|
||||
* @param lockKey - Unique identifier for the lock
|
||||
*/
|
||||
async releaseLock(lockKey: string): Promise<void> {}
|
||||
|
||||
/**
|
||||
* Force release a lock regardless of owner (use with caution)
|
||||
* @param lockKey - Unique identifier for the lock
|
||||
*/
|
||||
async forceReleaseLock(lockKey: string): Promise<void> {}
|
||||
|
||||
/**
|
||||
* Check if a lock exists and get its info
|
||||
* @param lockKey - Unique identifier for the lock
|
||||
* @returns Promise<{exists: boolean, ownedByMe: boolean, ttl: number}>
|
||||
*/
|
||||
async getLockInfo(lockKey: string): Promise<{
|
||||
exists: boolean;
|
||||
ownedByMe: boolean;
|
||||
ttl: number;
|
||||
owner?: string;
|
||||
}> {
|
||||
return { exists: true, ownedByMe: true, ttl: 0 };
|
||||
}
|
||||
|
||||
/**
|
||||
* Extend the TTL of an existing lock owned by this worker
|
||||
* @param lockKey - Unique identifier for the lock
|
||||
* @param ttlMs - New TTL in milliseconds
|
||||
* @returns Promise<boolean> - true if extended successfully
|
||||
*/
|
||||
async extendLock(lockKey: string, ttlMs: number): Promise<boolean> {
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempt to acquire lock with retries and exponential backoff
|
||||
* @param lockKey - Unique identifier for the lock
|
||||
* @param ttlMs - Time to live in milliseconds
|
||||
* @param maxRetries - Maximum number of retry attempts
|
||||
* @param baseDelayMs - Base delay between retries in milliseconds
|
||||
* @returns Promise<boolean> - true if lock acquired
|
||||
*/
|
||||
async acquireLockWithRetry(
|
||||
lockKey: string,
|
||||
ttlMs: number = 30000,
|
||||
maxRetries: number = 5,
|
||||
baseDelayMs: number = 100
|
||||
): Promise<boolean> {
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a function while holding a lock
|
||||
* @param lockKey - Unique identifier for the lock
|
||||
* @param fn - Function to execute while holding the lock
|
||||
* @param ttlMs - Lock TTL in milliseconds
|
||||
* @returns Promise<T> - Result of the executed function
|
||||
*/
|
||||
async withLock<T>(
|
||||
lockKey: string,
|
||||
fn: () => Promise<T>,
|
||||
ttlMs: number = 30000
|
||||
): Promise<T> {
|
||||
const acquired = await this.acquireLock(lockKey, ttlMs);
|
||||
|
||||
if (!acquired) {
|
||||
throw new Error(`Failed to acquire lock: ${lockKey}`);
|
||||
}
|
||||
|
||||
try {
|
||||
return await fn();
|
||||
} finally {
|
||||
await this.releaseLock(lockKey);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean up expired locks - Redis handles this automatically, but this method
|
||||
* can be used to get statistics about locks
|
||||
* @returns Promise<{activeLocksCount: number, locksOwnedByMe: number}>
|
||||
*/
|
||||
async getLockStatistics(): Promise<{
|
||||
activeLocksCount: number;
|
||||
locksOwnedByMe: number;
|
||||
}> {
|
||||
return { activeLocksCount: 0, locksOwnedByMe: 0 };
|
||||
}
|
||||
|
||||
/**
|
||||
* Close the Redis connection
|
||||
*/
|
||||
async disconnect(): Promise<void> {}
|
||||
}
|
||||
|
||||
export const lockManager = new LockManager();
|
||||
17
server/lib/logAccessAudit.ts
Normal file
17
server/lib/logAccessAudit.ts
Normal file
@@ -0,0 +1,17 @@
|
||||
export async function cleanUpOldLogs(orgId: string, retentionDays: number) {
|
||||
return;
|
||||
}
|
||||
|
||||
export async function logAccessAudit(data: {
|
||||
action: boolean;
|
||||
type: string;
|
||||
orgId: string;
|
||||
resourceId?: number;
|
||||
user?: { username: string; userId: string };
|
||||
apiKey?: { name: string | null; apiKeyId: string };
|
||||
metadata?: any;
|
||||
userAgent?: string;
|
||||
requestIp?: string;
|
||||
}) {
|
||||
return;
|
||||
}
|
||||
@@ -14,10 +14,8 @@ export const configSchema = z
|
||||
.object({
|
||||
app: z
|
||||
.object({
|
||||
dashboard_url: z
|
||||
.string()
|
||||
.url()
|
||||
.pipe(z.string().url())
|
||||
dashboard_url: z.url()
|
||||
.pipe(z.url())
|
||||
.transform((url) => url.toLowerCase())
|
||||
.optional(),
|
||||
log_level: z
|
||||
@@ -31,7 +29,14 @@ export const configSchema = z
|
||||
anonymous_usage: z.boolean().optional().default(true)
|
||||
})
|
||||
.optional()
|
||||
.default({})
|
||||
.prefault({}),
|
||||
notifications: z
|
||||
.object({
|
||||
product_updates: z.boolean().optional().default(true),
|
||||
new_releases: z.boolean().optional().default(true)
|
||||
})
|
||||
.optional()
|
||||
.prefault({})
|
||||
})
|
||||
.optional()
|
||||
.default({
|
||||
@@ -40,6 +45,10 @@ export const configSchema = z
|
||||
log_failed_attempts: false,
|
||||
telemetry: {
|
||||
anonymous_usage: true
|
||||
},
|
||||
notifications: {
|
||||
product_updates: true,
|
||||
new_releases: true
|
||||
}
|
||||
}),
|
||||
domains: z
|
||||
@@ -50,7 +59,7 @@ export const configSchema = z
|
||||
.string()
|
||||
.nonempty("base_domain must not be empty")
|
||||
.transform((url) => url.toLowerCase()),
|
||||
cert_resolver: z.string().optional().default("letsencrypt"),
|
||||
cert_resolver: z.string().optional(), // null falls back to traefik.cert_resolver
|
||||
prefer_wildcard_cert: z.boolean().optional().default(false)
|
||||
})
|
||||
)
|
||||
@@ -96,7 +105,7 @@ export const configSchema = z
|
||||
token: z.string().optional().default("P-Access-Token")
|
||||
})
|
||||
.optional()
|
||||
.default({}),
|
||||
.prefault({}),
|
||||
resource_session_request_param: z
|
||||
.string()
|
||||
.optional()
|
||||
@@ -121,7 +130,7 @@ export const configSchema = z
|
||||
credentials: z.boolean().optional()
|
||||
})
|
||||
.optional(),
|
||||
trust_proxy: z.number().int().gte(0).optional().default(1),
|
||||
trust_proxy: z.int().gte(0).optional().default(1),
|
||||
secret: z.string().pipe(z.string().min(8)).optional(),
|
||||
maxmind_db_path: z.string().optional()
|
||||
})
|
||||
@@ -178,7 +187,7 @@ export const configSchema = z
|
||||
.default(5000)
|
||||
})
|
||||
.optional()
|
||||
.default({})
|
||||
.prefault({})
|
||||
})
|
||||
.optional(),
|
||||
traefik: z
|
||||
@@ -204,10 +213,14 @@ export const configSchema = z
|
||||
.optional()
|
||||
.default(["newt", "wireguard", "local"]),
|
||||
allow_raw_resources: z.boolean().optional().default(true),
|
||||
file_mode: z.boolean().optional().default(false)
|
||||
file_mode: z.boolean().optional().default(false),
|
||||
pp_transport_prefix: z
|
||||
.string()
|
||||
.optional()
|
||||
.default("pp-transport-v")
|
||||
})
|
||||
.optional()
|
||||
.default({}),
|
||||
.prefault({}),
|
||||
gerbil: z
|
||||
.object({
|
||||
exit_node_name: z.string().optional(),
|
||||
@@ -216,6 +229,11 @@ export const configSchema = z
|
||||
.default(51820)
|
||||
.transform(stoi)
|
||||
.pipe(portSchema),
|
||||
clients_start_port: portSchema
|
||||
.optional()
|
||||
.default(21820)
|
||||
.transform(stoi)
|
||||
.pipe(portSchema),
|
||||
base_endpoint: z
|
||||
.string()
|
||||
.optional()
|
||||
@@ -232,16 +250,18 @@ export const configSchema = z
|
||||
.default(30)
|
||||
})
|
||||
.optional()
|
||||
.default({}),
|
||||
.prefault({}),
|
||||
orgs: z
|
||||
.object({
|
||||
block_size: z.number().positive().gt(0).optional().default(24),
|
||||
subnet_group: z.string().optional().default("100.90.128.0/24")
|
||||
subnet_group: z.string().optional().default("100.90.128.0/24"),
|
||||
utility_subnet_group: z.string().optional().default("100.96.128.0/24") //just hardcode this for now as well
|
||||
})
|
||||
.optional()
|
||||
.default({
|
||||
block_size: 24,
|
||||
subnet_group: "100.90.128.0/24"
|
||||
subnet_group: "100.90.128.0/24",
|
||||
utility_subnet_group: "100.96.128.0/24"
|
||||
}),
|
||||
rate_limits: z
|
||||
.object({
|
||||
@@ -261,7 +281,7 @@ export const configSchema = z
|
||||
.default(500)
|
||||
})
|
||||
.optional()
|
||||
.default({}),
|
||||
.prefault({}),
|
||||
auth: z
|
||||
.object({
|
||||
window_minutes: z
|
||||
@@ -278,10 +298,10 @@ export const configSchema = z
|
||||
.default(500)
|
||||
})
|
||||
.optional()
|
||||
.default({})
|
||||
.prefault({})
|
||||
})
|
||||
.optional()
|
||||
.default({}),
|
||||
.prefault({}),
|
||||
email: z
|
||||
.object({
|
||||
smtp_host: z.string().optional(),
|
||||
@@ -293,7 +313,7 @@ export const configSchema = z
|
||||
.transform(getEnvOrYaml("EMAIL_SMTP_PASS")),
|
||||
smtp_secure: z.boolean().optional(),
|
||||
smtp_tls_reject_unauthorized: z.boolean().optional(),
|
||||
no_reply: z.string().email().optional()
|
||||
no_reply: z.email().optional()
|
||||
})
|
||||
.optional(),
|
||||
flags: z
|
||||
@@ -305,8 +325,7 @@ export const configSchema = z
|
||||
enable_integration_api: z.boolean().optional(),
|
||||
disable_local_sites: z.boolean().optional(),
|
||||
disable_basic_wireguard_sites: z.boolean().optional(),
|
||||
disable_config_managed_domains: z.boolean().optional(),
|
||||
enable_clients: z.boolean().optional().default(true)
|
||||
disable_config_managed_domains: z.boolean().optional()
|
||||
})
|
||||
.optional(),
|
||||
dns: z
|
||||
@@ -314,14 +333,18 @@ export const configSchema = z
|
||||
nameservers: z
|
||||
.array(z.string().optional().optional())
|
||||
.optional()
|
||||
.default(["ns1.fossorial.io", "ns2.fossorial.io"]),
|
||||
cname_extension: z.string().optional().default("fossorial.io")
|
||||
.default([
|
||||
"ns1.pangolin.net",
|
||||
"ns2.pangolin.net",
|
||||
"ns3.pangolin.net"
|
||||
]),
|
||||
cname_extension: z
|
||||
.string()
|
||||
.optional()
|
||||
.default("cname.pangolin.net")
|
||||
})
|
||||
.optional()
|
||||
.default({
|
||||
nameservers: ["ns1.fossorial.io", "ns2.fossorial.io"],
|
||||
cname_extension: "fossorial.io"
|
||||
})
|
||||
.prefault({})
|
||||
})
|
||||
.refine(
|
||||
(data) => {
|
||||
@@ -336,7 +359,7 @@ export const configSchema = z
|
||||
return true;
|
||||
},
|
||||
{
|
||||
message: "At least one domain must be defined"
|
||||
error: "At least one domain must be defined"
|
||||
}
|
||||
)
|
||||
.refine(
|
||||
@@ -351,7 +374,7 @@ export const configSchema = z
|
||||
);
|
||||
},
|
||||
{
|
||||
message: "Server secret must be defined"
|
||||
error: "Server secret must be defined"
|
||||
}
|
||||
)
|
||||
.refine(
|
||||
@@ -363,7 +386,7 @@ export const configSchema = z
|
||||
);
|
||||
},
|
||||
{
|
||||
message: "Dashboard URL must be defined"
|
||||
error: "Dashboard URL must be defined"
|
||||
}
|
||||
);
|
||||
|
||||
@@ -392,7 +415,7 @@ export function readConfigFile() {
|
||||
|
||||
if (!environment) {
|
||||
throw new Error(
|
||||
"No configuration file found. Please create one. https://docs.digpangolin.com/self-host/advanced/config-file"
|
||||
"No configuration file found. Please create one. https://docs.pangolin.net/self-host/advanced/config-file"
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
1243
server/lib/rebuildClientAssociations.ts
Normal file
1243
server/lib/rebuildClientAssociations.ts
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,7 +1,8 @@
|
||||
export enum AudienceIds {
|
||||
General = "",
|
||||
Subscribed = "",
|
||||
Churned = ""
|
||||
SignUps = "",
|
||||
Subscribed = "",
|
||||
Churned = "",
|
||||
Newsletter = ""
|
||||
}
|
||||
|
||||
let resend;
|
||||
@@ -12,4 +13,4 @@ export async function moveEmailToAudience(
|
||||
audienceId: AudienceIds
|
||||
) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
29
server/lib/serverIpService.ts
Normal file
29
server/lib/serverIpService.ts
Normal file
@@ -0,0 +1,29 @@
|
||||
import logger from "@server/logger";
|
||||
import axios from "axios";
|
||||
|
||||
let serverIp: string | null = null;
|
||||
|
||||
const services = [
|
||||
"https://checkip.amazonaws.com",
|
||||
"https://ifconfig.io/ip",
|
||||
"https://api.ipify.org",
|
||||
];
|
||||
|
||||
export async function fetchServerIp() {
|
||||
for (const url of services) {
|
||||
try {
|
||||
const response = await axios.get(url, { timeout: 5000 });
|
||||
serverIp = response.data.trim();
|
||||
logger.debug("Detected public IP: " + serverIp);
|
||||
return;
|
||||
} catch (err: any) {
|
||||
console.warn(`Failed to fetch server IP from ${url}: ${err.message || err.code}`);
|
||||
}
|
||||
}
|
||||
|
||||
console.error("All attempts to fetch server IP failed.");
|
||||
}
|
||||
|
||||
export function getServerIp() {
|
||||
return serverIp;
|
||||
}
|
||||
@@ -4,7 +4,7 @@ import { getHostMeta } from "./hostMeta";
|
||||
import logger from "@server/logger";
|
||||
import { apiKeys, db, roles } from "@server/db";
|
||||
import { sites, users, orgs, resources, clients, idp } from "@server/db";
|
||||
import { eq, count, notInArray } from "drizzle-orm";
|
||||
import { eq, count, notInArray, and } from "drizzle-orm";
|
||||
import { APP_VERSION } from "./consts";
|
||||
import crypto from "crypto";
|
||||
import { UserType } from "@server/types/UserTypes";
|
||||
@@ -33,7 +33,7 @@ class TelemetryClient {
|
||||
this.client = new PostHog(
|
||||
"phc_QYuATSSZt6onzssWcYJbXLzQwnunIpdGGDTYhzK3VjX",
|
||||
{
|
||||
host: "https://digpangolin.com/relay-O7yI"
|
||||
host: "https://pangolin.net/relay-O7yI"
|
||||
}
|
||||
);
|
||||
|
||||
@@ -48,11 +48,11 @@ class TelemetryClient {
|
||||
this.startAnalyticsInterval();
|
||||
|
||||
logger.info(
|
||||
"Pangolin now gathers anonymous usage data to help us better understand how the software is used and guide future improvements and feature development. You can find more details, including instructions for opting out of this anonymous data collection, at: https://docs.digpangolin.com/telemetry"
|
||||
"Pangolin now gathers anonymous usage data to help us better understand how the software is used and guide future improvements and feature development. You can find more details, including instructions for opting out of this anonymous data collection, at: https://docs.pangolin.net/telemetry"
|
||||
);
|
||||
} else if (!this.enabled) {
|
||||
logger.info(
|
||||
"Analytics usage statistics collection is disabled. If you enable this, you can help us make Pangolin better for everyone. Learn more at: https://docs.digpangolin.com/telemetry"
|
||||
"Analytics usage statistics collection is disabled. If you enable this, you can help us make Pangolin better for everyone. Learn more at: https://docs.pangolin.net/telemetry"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -113,7 +113,12 @@ class TelemetryClient {
|
||||
const [customRoles] = await db
|
||||
.select({ count: count() })
|
||||
.from(roles)
|
||||
.where(notInArray(roles.name, ["Admin", "Member"]));
|
||||
.where(
|
||||
and(
|
||||
eq(roles.isAdmin, false),
|
||||
notInArray(roles.name, ["Member"])
|
||||
)
|
||||
);
|
||||
|
||||
const adminUsers = await db
|
||||
.select({ email: users.email })
|
||||
@@ -188,7 +193,7 @@ class TelemetryClient {
|
||||
license_tier: licenseStatus.tier || "unknown"
|
||||
}
|
||||
};
|
||||
logger.debug("Sending enterprise startup telemtry payload:", {
|
||||
logger.debug("Sending enterprise startup telemetry payload:", {
|
||||
payload
|
||||
});
|
||||
// this.client.capture(payload);
|
||||
@@ -200,10 +205,7 @@ class TelemetryClient {
|
||||
event: "supporter_status",
|
||||
properties: {
|
||||
valid: stats.supporterStatus.valid,
|
||||
tier: stats.supporterStatus.tier,
|
||||
github_username: stats.supporterStatus.githubUsername
|
||||
? this.anon(stats.supporterStatus.githubUsername)
|
||||
: "None"
|
||||
tier: stats.supporterStatus.tier
|
||||
}
|
||||
});
|
||||
}
|
||||
@@ -217,21 +219,6 @@ class TelemetryClient {
|
||||
install_timestamp: hostMeta.createdAt
|
||||
}
|
||||
});
|
||||
|
||||
for (const email of stats.adminUsers) {
|
||||
// There should only be on admin user, but just in case
|
||||
if (email) {
|
||||
this.client.capture({
|
||||
distinctId: this.anon(email),
|
||||
event: "admin_user",
|
||||
properties: {
|
||||
host_id: hostMeta.hostMetaId,
|
||||
app_version: stats.appVersion,
|
||||
hashed_email: this.anon(email)
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private async collectAndSendAnalytics() {
|
||||
@@ -262,19 +249,38 @@ class TelemetryClient {
|
||||
num_clients: stats.numClients,
|
||||
num_identity_providers: stats.numIdentityProviders,
|
||||
num_sites_online: stats.numSitesOnline,
|
||||
resources: stats.resources.map((r) => ({
|
||||
name: this.anon(r.name),
|
||||
sso_enabled: r.sso,
|
||||
protocol: r.protocol,
|
||||
http_enabled: r.http
|
||||
})),
|
||||
sites: stats.sites.map((s) => ({
|
||||
site_name: this.anon(s.siteName),
|
||||
megabytes_in: s.megabytesIn,
|
||||
megabytes_out: s.megabytesOut,
|
||||
type: s.type,
|
||||
online: s.online
|
||||
})),
|
||||
num_resources_sso_enabled: stats.resources.filter(
|
||||
(r) => r.sso
|
||||
).length,
|
||||
num_resources_non_http: stats.resources.filter(
|
||||
(r) => !r.http
|
||||
).length,
|
||||
num_newt_sites: stats.sites.filter((s) => s.type === "newt")
|
||||
.length,
|
||||
num_local_sites: stats.sites.filter(
|
||||
(s) => s.type === "local"
|
||||
).length,
|
||||
num_wg_sites: stats.sites.filter(
|
||||
(s) => s.type === "wireguard"
|
||||
).length,
|
||||
avg_megabytes_in:
|
||||
stats.sites.length > 0
|
||||
? Math.round(
|
||||
stats.sites.reduce(
|
||||
(sum, s) => sum + (s.megabytesIn ?? 0),
|
||||
0
|
||||
) / stats.sites.length
|
||||
)
|
||||
: 0,
|
||||
avg_megabytes_out:
|
||||
stats.sites.length > 0
|
||||
? Math.round(
|
||||
stats.sites.reduce(
|
||||
(sum, s) => sum + (s.megabytesOut ?? 0),
|
||||
0
|
||||
) / stats.sites.length
|
||||
)
|
||||
: 0,
|
||||
num_api_keys: stats.numApiKeys,
|
||||
num_custom_roles: stats.numCustomRoles
|
||||
}
|
||||
|
||||
@@ -142,8 +142,24 @@ export class TraefikConfigManager {
|
||||
const wildcardExists = await this.fileExists(wildcardPath);
|
||||
|
||||
let lastModified: Date | null = null;
|
||||
const expiresAt: Date | null = null;
|
||||
let expiresAt: number | null = null;
|
||||
let wildcard = false;
|
||||
const expiresAtPath = path.join(domainDir, ".expires_at");
|
||||
const expiresAtExists = await this.fileExists(expiresAtPath);
|
||||
|
||||
if (expiresAtExists) {
|
||||
try {
|
||||
const expiresAtStr = fs
|
||||
.readFileSync(expiresAtPath, "utf8")
|
||||
.trim();
|
||||
expiresAt = parseInt(expiresAtStr, 10);
|
||||
if (isNaN(expiresAt)) {
|
||||
expiresAt = null;
|
||||
}
|
||||
} catch {
|
||||
expiresAt = null;
|
||||
}
|
||||
}
|
||||
|
||||
if (lastUpdateExists) {
|
||||
try {
|
||||
@@ -179,7 +195,7 @@ export class TraefikConfigManager {
|
||||
|
||||
state.set(domain, {
|
||||
exists: certExists && keyExists,
|
||||
lastModified,
|
||||
lastModified: lastModified ? Math.floor(lastModified.getTime() / 1000) : null,
|
||||
expiresAt,
|
||||
wildcard
|
||||
});
|
||||
@@ -259,9 +275,9 @@ export class TraefikConfigManager {
|
||||
|
||||
// Check if certificate is expiring soon (within 30 days)
|
||||
if (localState.expiresAt) {
|
||||
const daysUntilExpiry =
|
||||
(localState.expiresAt - Math.floor(Date.now() / 1000)) /
|
||||
(1000 * 60 * 60 * 24);
|
||||
const nowInSeconds = Math.floor(Date.now() / 1000);
|
||||
const secondsUntilExpiry = localState.expiresAt - nowInSeconds;
|
||||
const daysUntilExpiry = secondsUntilExpiry / (60 * 60 * 24);
|
||||
if (daysUntilExpiry < 30) {
|
||||
logger.info(
|
||||
`Fetching certificates due to upcoming expiry for ${domain} (${Math.round(daysUntilExpiry)} days remaining)`
|
||||
@@ -309,10 +325,7 @@ export class TraefikConfigManager {
|
||||
this.lastActiveDomains = new Set(domains);
|
||||
}
|
||||
|
||||
if (
|
||||
process.env.USE_PANGOLIN_DNS === "true" &&
|
||||
build != "oss"
|
||||
) {
|
||||
if (process.env.USE_PANGOLIN_DNS === "true" && build != "oss") {
|
||||
// Scan current local certificate state
|
||||
this.lastLocalCertificateState =
|
||||
await this.scanLocalCertificateState();
|
||||
@@ -450,7 +463,8 @@ export class TraefikConfigManager {
|
||||
currentExitNode,
|
||||
config.getRawConfig().traefik.site_types,
|
||||
build == "oss", // filter out the namespace domains in open source
|
||||
build != "oss" // generate the login pages on the cloud and hybrid
|
||||
build != "oss", // generate the login pages on the cloud and hybrid,
|
||||
build == "saas" ? false : config.getRawConfig().traefik.allow_raw_resources // dont allow raw resources on saas otherwise use config
|
||||
);
|
||||
|
||||
const domains = new Set<string>();
|
||||
@@ -502,6 +516,25 @@ export class TraefikConfigManager {
|
||||
};
|
||||
}
|
||||
|
||||
// tcp:
|
||||
// serversTransports:
|
||||
// pp-transport-v1:
|
||||
// proxyProtocol:
|
||||
// version: 1
|
||||
// pp-transport-v2:
|
||||
// proxyProtocol:
|
||||
// version: 2
|
||||
|
||||
if (build != "saas") {
|
||||
// add the serversTransports section if not present
|
||||
if (traefikConfig.tcp && !traefikConfig.tcp.serversTransports) {
|
||||
traefikConfig.tcp.serversTransports = {
|
||||
"pp-transport-v1": { proxyProtocol: { version: 1 } },
|
||||
"pp-transport-v2": { proxyProtocol: { version: 2 } }
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
return { domains, traefikConfig };
|
||||
} catch (error) {
|
||||
// pull data out of the axios error to log
|
||||
@@ -753,6 +786,16 @@ export class TraefikConfigManager {
|
||||
"utf8"
|
||||
);
|
||||
|
||||
// Store the certificate expiry time
|
||||
if (cert.expiresAt) {
|
||||
const expiresAtPath = path.join(domainDir, ".expires_at");
|
||||
fs.writeFileSync(
|
||||
expiresAtPath,
|
||||
cert.expiresAt.toString(),
|
||||
"utf8"
|
||||
);
|
||||
}
|
||||
|
||||
logger.info(
|
||||
`Certificate updated for domain: ${cert.domain}${cert.wildcard ? " (wildcard)" : ""}`
|
||||
);
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { db, targetHealthCheck } from "@server/db";
|
||||
import { db, targetHealthCheck, domains } from "@server/db";
|
||||
import {
|
||||
and,
|
||||
eq,
|
||||
@@ -23,7 +23,8 @@ export async function getTraefikConfig(
|
||||
exitNodeId: number,
|
||||
siteTypes: string[],
|
||||
filterOutNamespaceDomains = false,
|
||||
generateLoginPageRouters = false
|
||||
generateLoginPageRouters = false,
|
||||
allowRawResources = true
|
||||
): Promise<any> {
|
||||
// Define extended target type with site information
|
||||
type TargetWithSite = Target & {
|
||||
@@ -56,6 +57,8 @@ export async function getTraefikConfig(
|
||||
setHostHeader: resources.setHostHeader,
|
||||
enableProxy: resources.enableProxy,
|
||||
headers: resources.headers,
|
||||
proxyProtocol: resources.proxyProtocol,
|
||||
proxyProtocolVersion: resources.proxyProtocolVersion,
|
||||
// Target fields
|
||||
targetId: targets.targetId,
|
||||
targetEnabled: targets.enabled,
|
||||
@@ -75,11 +78,15 @@ export async function getTraefikConfig(
|
||||
siteType: sites.type,
|
||||
siteOnline: sites.online,
|
||||
subnet: sites.subnet,
|
||||
exitNodeId: sites.exitNodeId
|
||||
exitNodeId: sites.exitNodeId,
|
||||
// Domain cert resolver fields
|
||||
domainCertResolver: domains.certResolver,
|
||||
preferWildcardCert: domains.preferWildcardCert
|
||||
})
|
||||
.from(sites)
|
||||
.innerJoin(targets, eq(targets.siteId, sites.siteId))
|
||||
.innerJoin(resources, eq(resources.resourceId, targets.resourceId))
|
||||
.leftJoin(domains, eq(domains.domainId, resources.domainId))
|
||||
.leftJoin(
|
||||
targetHealthCheck,
|
||||
eq(targetHealthCheck.targetId, targets.targetId)
|
||||
@@ -92,7 +99,8 @@ export async function getTraefikConfig(
|
||||
eq(sites.exitNodeId, exitNodeId),
|
||||
and(
|
||||
isNull(sites.exitNodeId),
|
||||
sql`(${siteTypes.includes("local") ? 1 : 0} = 1)` // only allow local sites if "local" is in siteTypes
|
||||
sql`(${siteTypes.includes("local") ? 1 : 0} = 1)`, // only allow local sites if "local" is in siteTypes
|
||||
eq(sites.type, "local")
|
||||
)
|
||||
),
|
||||
or(
|
||||
@@ -100,7 +108,7 @@ export async function getTraefikConfig(
|
||||
isNull(targetHealthCheck.hcHealth) // Include targets with no health check record
|
||||
),
|
||||
inArray(sites.type, siteTypes),
|
||||
config.getRawConfig().traefik.allow_raw_resources
|
||||
allowRawResources
|
||||
? isNotNull(resources.http) // ignore the http check if allow_raw_resources is true
|
||||
: eq(resources.http, true)
|
||||
)
|
||||
@@ -163,11 +171,16 @@ export async function getTraefikConfig(
|
||||
enableProxy: row.enableProxy,
|
||||
targets: [],
|
||||
headers: row.headers,
|
||||
proxyProtocol: row.proxyProtocol,
|
||||
proxyProtocolVersion: row.proxyProtocolVersion ?? 1,
|
||||
path: row.path, // the targets will all have the same path
|
||||
pathMatchType: row.pathMatchType, // the targets will all have the same pathMatchType
|
||||
rewritePath: row.rewritePath,
|
||||
rewritePathType: row.rewritePathType,
|
||||
priority: priority // may be null, we fallback later
|
||||
priority: priority,
|
||||
// Store domain cert resolver fields
|
||||
domainCertResolver: row.domainCertResolver,
|
||||
preferWildcardCert: row.preferWildcardCert
|
||||
});
|
||||
}
|
||||
|
||||
@@ -246,21 +259,35 @@ export async function getTraefikConfig(
|
||||
wildCard = resource.fullDomain;
|
||||
}
|
||||
|
||||
const configDomain = config.getDomain(resource.domainId);
|
||||
const globalDefaultResolver =
|
||||
config.getRawConfig().traefik.cert_resolver;
|
||||
const globalDefaultPreferWildcard =
|
||||
config.getRawConfig().traefik.prefer_wildcard_cert;
|
||||
|
||||
let certResolver: string, preferWildcardCert: boolean;
|
||||
if (!configDomain) {
|
||||
certResolver = config.getRawConfig().traefik.cert_resolver;
|
||||
preferWildcardCert =
|
||||
config.getRawConfig().traefik.prefer_wildcard_cert;
|
||||
const domainCertResolver = resource.domainCertResolver;
|
||||
const preferWildcardCert = resource.preferWildcardCert;
|
||||
|
||||
let resolverName: string | undefined;
|
||||
let preferWildcard: boolean | undefined;
|
||||
// Handle both letsencrypt & custom cases
|
||||
if (domainCertResolver) {
|
||||
resolverName = domainCertResolver.trim();
|
||||
} else {
|
||||
certResolver = configDomain.cert_resolver;
|
||||
preferWildcardCert = configDomain.prefer_wildcard_cert;
|
||||
resolverName = globalDefaultResolver;
|
||||
}
|
||||
|
||||
if (
|
||||
preferWildcardCert !== undefined &&
|
||||
preferWildcardCert !== null
|
||||
) {
|
||||
preferWildcard = preferWildcardCert;
|
||||
} else {
|
||||
preferWildcard = globalDefaultPreferWildcard;
|
||||
}
|
||||
|
||||
const tls = {
|
||||
certResolver: certResolver,
|
||||
...(preferWildcardCert
|
||||
certResolver: resolverName,
|
||||
...(preferWildcard
|
||||
? {
|
||||
domains: [
|
||||
{
|
||||
@@ -318,9 +345,9 @@ export async function getTraefikConfig(
|
||||
routerMiddlewares.push(rewriteMiddlewareName);
|
||||
}
|
||||
|
||||
logger.debug(
|
||||
`Created path rewrite middleware ${rewriteMiddlewareName}: ${resource.pathMatchType}(${resource.path}) -> ${resource.rewritePathType}(${resource.rewritePath})`
|
||||
);
|
||||
// logger.debug(
|
||||
// `Created path rewrite middleware ${rewriteMiddlewareName}: ${resource.pathMatchType}(${resource.path}) -> ${resource.rewritePathType}(${resource.rewritePath})`
|
||||
// );
|
||||
} catch (error) {
|
||||
logger.error(
|
||||
`Failed to create path rewrite middleware for resource ${resource.resourceId}: ${error}`
|
||||
@@ -561,6 +588,8 @@ export async function getTraefikConfig(
|
||||
...(protocol === "tcp" ? { rule: "HostSNI(`*`)" } : {})
|
||||
};
|
||||
|
||||
const ppPrefix = config.getRawConfig().traefik.pp_transport_prefix;
|
||||
|
||||
config_output[protocol].services[serviceName] = {
|
||||
loadBalancer: {
|
||||
servers: (() => {
|
||||
@@ -614,6 +643,11 @@ export async function getTraefikConfig(
|
||||
}
|
||||
});
|
||||
})(),
|
||||
...(resource.proxyProtocol && protocol == "tcp"
|
||||
? {
|
||||
serversTransport: `${ppPrefix}${resource.proxyProtocolVersion || 1}@file` // TODO: does @file here cause issues?
|
||||
}
|
||||
: {}),
|
||||
...(resource.stickySession
|
||||
? {
|
||||
sticky: {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user