mirror of
https://github.com/fosrl/pangolin.git
synced 2026-01-30 06:40:46 +00:00
Compare commits
1268 Commits
site-targe
...
1.12.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
31b66cd911 | ||
|
|
da0196a308 | ||
|
|
e585972b7b | ||
|
|
cc62cd4add | ||
|
|
25225a452c | ||
|
|
678644c7fb | ||
|
|
32f20ed984 | ||
|
|
4eb5bf08d5 | ||
|
|
35c93f38e0 | ||
|
|
f60c2f4fb9 | ||
|
|
b2cf152b9e | ||
|
|
444928dffd | ||
|
|
4d7e2d5840 | ||
|
|
318046ce1d | ||
|
|
808ad1e272 | ||
|
|
05a1195661 | ||
|
|
c46322c6a6 | ||
|
|
80d5efc41f | ||
|
|
0409ab7dc1 | ||
|
|
63f079ec76 | ||
|
|
5988f1e8da | ||
|
|
ed0c0edeba | ||
|
|
34b4841f4d | ||
|
|
ff47c5a8ad | ||
|
|
9430a53c0c | ||
|
|
03334e3f0f | ||
|
|
6f2ecf9d0d | ||
|
|
6f803c3b4b | ||
|
|
15d400c842 | ||
|
|
3ddf150661 | ||
|
|
5b519afee4 | ||
|
|
15ea9f3dcc | ||
|
|
d5e2536f8d | ||
|
|
d7e9083e06 | ||
|
|
e0cc338c3a | ||
|
|
624c5741e2 | ||
|
|
558507dd71 | ||
|
|
565340bd53 | ||
|
|
756745487a | ||
|
|
d2ece4d370 | ||
|
|
d5f5d1da1e | ||
|
|
dfaf1a72cc | ||
|
|
ff8e5b871c | ||
|
|
927dda4e53 | ||
|
|
0e51bac307 | ||
|
|
7a50af14f3 | ||
|
|
396477c2e2 | ||
|
|
8765874d9a | ||
|
|
49dffe086d | ||
|
|
77ddadcded | ||
|
|
05b297ddec | ||
|
|
feb0de9a08 | ||
|
|
f4f2361d22 | ||
|
|
cae6a9f51c | ||
|
|
2872f5c018 | ||
|
|
0512c21ad7 | ||
|
|
922a69feed | ||
|
|
24192c79d4 | ||
|
|
17c22a635f | ||
|
|
bcbcf417b5 | ||
|
|
acf7596368 | ||
|
|
34c7d925ca | ||
|
|
c10730ebb9 | ||
|
|
e50743b922 | ||
|
|
75b0745e42 | ||
|
|
ebd99f95a3 | ||
|
|
0e649883cb | ||
|
|
3d376c8d14 | ||
|
|
adedb0e391 | ||
|
|
521935786c | ||
|
|
885b9d186b | ||
|
|
356f023539 | ||
|
|
de8d3f45da | ||
|
|
72c9956190 | ||
|
|
6dc4cbe448 | ||
|
|
77364488c2 | ||
|
|
5a61040027 | ||
|
|
c6f7be40df | ||
|
|
c36fb63f8c | ||
|
|
48aebea6cf | ||
|
|
55082d2ef8 | ||
|
|
cc03b97234 | ||
|
|
5542873368 | ||
|
|
1db5d76ef1 | ||
|
|
ca6c45087b | ||
|
|
3333eb95f9 | ||
|
|
d681725fc3 | ||
|
|
f5eadc9e1e | ||
|
|
219e213c1e | ||
|
|
af654e663b | ||
|
|
39b3b4ef9d | ||
|
|
6c62a0900f | ||
|
|
ddd772eb43 | ||
|
|
69458ab649 | ||
|
|
c7df70143e | ||
|
|
a81ea7cc8f | ||
|
|
02330a0756 | ||
|
|
db49b599b5 | ||
|
|
bb0bfd440a | ||
|
|
10ce732b8d | ||
|
|
4c567cf2d7 | ||
|
|
2783d2989d | ||
|
|
c3d6510231 | ||
|
|
3bb948991f | ||
|
|
4b9ce22f06 | ||
|
|
772bda69f9 | ||
|
|
8b4722b1c9 | ||
|
|
9e5c9d9c34 | ||
|
|
ee533df38f | ||
|
|
52dc8e011c | ||
|
|
bd5cc790d6 | ||
|
|
7d6d5a7787 | ||
|
|
ba6e7dd06a | ||
|
|
6270fb3237 | ||
|
|
16ec50a6ee | ||
|
|
3d2021c8a1 | ||
|
|
15d63ddffa | ||
|
|
7ce6fadb3d | ||
|
|
6b18a24f9b | ||
|
|
a38cb961c7 | ||
|
|
3c5fe21078 | ||
|
|
b44305694f | ||
|
|
be217e2b6f | ||
|
|
6ce04c2aa1 | ||
|
|
85e4b649db | ||
|
|
73a3335148 | ||
|
|
32845c5a3d | ||
|
|
05a878ac34 | ||
|
|
847d015243 | ||
|
|
51cde2681c | ||
|
|
9c0606942c | ||
|
|
646d476bdb | ||
|
|
31261681a0 | ||
|
|
f6fae820c4 | ||
|
|
b3cbf925aa | ||
|
|
aa1ae3ee42 | ||
|
|
80f6c8b74e | ||
|
|
79d8e8d59d | ||
|
|
9193375586 | ||
|
|
240bcb8759 | ||
|
|
a5dcafb84c | ||
|
|
192207a857 | ||
|
|
d18fafb0ef | ||
|
|
380c86898c | ||
|
|
b59a6b82ef | ||
|
|
77ba568c36 | ||
|
|
a0f05cc77b | ||
|
|
80f43a9774 | ||
|
|
c04d9eda6b | ||
|
|
cabf3e9695 | ||
|
|
ff7b4386d6 | ||
|
|
4dbbe159ee | ||
|
|
eeab92719a | ||
|
|
43e6b7de07 | ||
|
|
4cfd1b1ff5 | ||
|
|
09ba018493 | ||
|
|
7acf7dd0eb | ||
|
|
592d085de6 | ||
|
|
2cf2c64651 | ||
|
|
560974f7d2 | ||
|
|
85270f497a | ||
|
|
9fbea4a380 | ||
|
|
cbf9c5361e | ||
|
|
44316731c0 | ||
|
|
60513af8ed | ||
|
|
24cfe02979 | ||
|
|
8f3324560a | ||
|
|
2041edcf30 | ||
|
|
1227b3c11a | ||
|
|
8973726f63 | ||
|
|
5559fef1bc | ||
|
|
9cb3c3821a | ||
|
|
c85e367ded | ||
|
|
5e20487216 | ||
|
|
bc6b9eb905 | ||
|
|
5940bbd498 | ||
|
|
f4a0f6a2e6 | ||
|
|
0df7d45678 | ||
|
|
a05ee2483b | ||
|
|
f5dbc18c05 | ||
|
|
dd052fa1af | ||
|
|
2cc4ad9c30 | ||
|
|
4dd741cc3f | ||
|
|
9ce81b34c9 | ||
|
|
460df46abc | ||
|
|
1e70e4289b | ||
|
|
5fa0ac5927 | ||
|
|
4b40e7b8d6 | ||
|
|
29cd035a05 | ||
|
|
39d6b93d42 | ||
|
|
629f17294a | ||
|
|
10a5af67aa | ||
|
|
b542d82553 | ||
|
|
2a644c3f88 | ||
|
|
f6de61968d | ||
|
|
68f0c4df3a | ||
|
|
0743daf56a | ||
|
|
58b6ab2601 | ||
|
|
038f8829c2 | ||
|
|
ddcf77a62d | ||
|
|
adefbdbeb3 | ||
|
|
921285e5b1 | ||
|
|
264bf46798 | ||
|
|
5a7b5d65a4 | ||
|
|
23b13f0a0e | ||
|
|
90ddffce0e | ||
|
|
e30fde5237 | ||
|
|
ac683c3ff7 | ||
|
|
b5a931c96e | ||
|
|
5b61742075 | ||
|
|
4e4a38f7e9 | ||
|
|
c1bb029a1c | ||
|
|
eae2c37388 | ||
|
|
7193fea068 | ||
|
|
9b85deebf8 | ||
|
|
0211f75cb6 | ||
|
|
fa6b7ca3ed | ||
|
|
007d03e7f6 | ||
|
|
a534301eb7 | ||
|
|
1baa987016 | ||
|
|
a5b48ab392 | ||
|
|
7f981f05fb | ||
|
|
259cea1c42 | ||
|
|
9024b2a974 | ||
|
|
f2c31d3ca6 | ||
|
|
6f8b5dd909 | ||
|
|
6521b66b7c | ||
|
|
202d2075a6 | ||
|
|
e575fae73b | ||
|
|
d84ee3d03d | ||
|
|
ba745588e9 | ||
|
|
84731bdc19 | ||
|
|
f748c5dbe4 | ||
|
|
fdd4d5244f | ||
|
|
9301477262 | ||
|
|
9a787e6ef8 | ||
|
|
5b8cdf7884 | ||
|
|
5fd104bb30 | ||
|
|
9ba42a8fa3 | ||
|
|
fe8fd2e3a8 | ||
|
|
9ebce35d2b | ||
|
|
654145be84 | ||
|
|
3662d42374 | ||
|
|
d392fb371e | ||
|
|
1142d6ac48 | ||
|
|
bdc3b2425b | ||
|
|
9a64f45815 | ||
|
|
3633e02ff7 | ||
|
|
2c502ec764 | ||
|
|
b17d7f0e27 | ||
|
|
65364d6b0f | ||
|
|
6fd6c77ce6 | ||
|
|
e447549de1 | ||
|
|
6b0dd00aa5 | ||
|
|
461866836e | ||
|
|
3ae42f054f | ||
|
|
5a571f19e1 | ||
|
|
70aeaf7b5d | ||
|
|
7a6838f5a5 | ||
|
|
07f5e8f215 | ||
|
|
2b05bc1f5f | ||
|
|
edf64ae7b5 | ||
|
|
7370448be9 | ||
|
|
51af293d66 | ||
|
|
d37e28215e | ||
|
|
2c01849f2e | ||
|
|
c29ba9bb5f | ||
|
|
8fdf120ec2 | ||
|
|
a9b9161c40 | ||
|
|
43f907ebec | ||
|
|
ae670e1eb5 | ||
|
|
f102718901 | ||
|
|
9d452efc7d | ||
|
|
156fe529b5 | ||
|
|
df24525105 | ||
|
|
d938345deb | ||
|
|
d6681733dd | ||
|
|
2f1aec02f0 | ||
|
|
d30e0a3c51 | ||
|
|
3f3e9cf1bb | ||
|
|
e77909d498 | ||
|
|
d10830f892 | ||
|
|
18d8f72da2 | ||
|
|
4a59823e58 | ||
|
|
f3149e46cd | ||
|
|
60379a7b4e | ||
|
|
605b3cccee | ||
|
|
843799f4f6 | ||
|
|
a69cda5c13 | ||
|
|
dbaa3dbd52 | ||
|
|
58197c6fb2 | ||
|
|
7813093452 | ||
|
|
3f2c3dc987 | ||
|
|
08ddba25d0 | ||
|
|
d47fa7e64f | ||
|
|
c87aa2e537 | ||
|
|
bc430546bc | ||
|
|
9428e065eb | ||
|
|
10408c5717 | ||
|
|
ae902da913 | ||
|
|
0be5a91eff | ||
|
|
7dcf46ce98 | ||
|
|
33e6e4b411 | ||
|
|
bab6e4eb0d | ||
|
|
6a7c7521d8 | ||
|
|
d070244ea7 | ||
|
|
9219bb7d6e | ||
|
|
54e83f35e5 | ||
|
|
eb138d6526 | ||
|
|
edd0c3099b | ||
|
|
04455d40cf | ||
|
|
221af94d15 | ||
|
|
48ac3bb7af | ||
|
|
07273b8b7f | ||
|
|
bfb5b2864d | ||
|
|
07330e84fb | ||
|
|
0e39704b3a | ||
|
|
f25e794e7c | ||
|
|
df46ce8bdc | ||
|
|
4d83f537dc | ||
|
|
58443ef53f | ||
|
|
1ee52ad86b | ||
|
|
bc941239ec | ||
|
|
9a52d5387d | ||
|
|
1f50bc3752 | ||
|
|
0819df0910 | ||
|
|
663787c15b | ||
|
|
2c39d07261 | ||
|
|
dce84b9b09 | ||
|
|
a5bab6bb80 | ||
|
|
7536c03f63 | ||
|
|
ada5d2ef0e | ||
|
|
b8bead0590 | ||
|
|
68f852d6d1 | ||
|
|
d9fe5a8819 | ||
|
|
346183a23f | ||
|
|
dcfd7f5443 | ||
|
|
e59cd6672b | ||
|
|
7c8c440f67 | ||
|
|
f258c41f15 | ||
|
|
ae4a24f4aa | ||
|
|
476cdcfe86 | ||
|
|
f869df2f65 | ||
|
|
03cfabacd9 | ||
|
|
47ac5875f3 | ||
|
|
f67327358e | ||
|
|
4901823f15 | ||
|
|
5407e3c821 | ||
|
|
1d5cdad8b7 | ||
|
|
cd2424cb77 | ||
|
|
c17efde6bf | ||
|
|
40cd8cdec7 | ||
|
|
6768672a44 | ||
|
|
240c5b005b | ||
|
|
8dde170a35 | ||
|
|
c07abf8ff9 | ||
|
|
e5a436593f | ||
|
|
bb6e093ac6 | ||
|
|
59a334ce24 | ||
|
|
d241dcfb27 | ||
|
|
af263e7913 | ||
|
|
6610e7d405 | ||
|
|
c476e65cf2 | ||
|
|
b69b2eeeb3 | ||
|
|
89dab0917b | ||
|
|
73efdb95ae | ||
|
|
1bcca88614 | ||
|
|
3af1e0ef56 | ||
|
|
8387571c1d | ||
|
|
1d017f60b4 | ||
|
|
81effda9e8 | ||
|
|
9343906ab1 | ||
|
|
08b7d6735c | ||
|
|
a91ebd1e91 | ||
|
|
312e03b4eb | ||
|
|
e8a57e432c | ||
|
|
bca2eef2e8 | ||
|
|
ec7211a15d | ||
|
|
46807c6477 | ||
|
|
b578786e62 | ||
|
|
2e0ad8d262 | ||
|
|
003f0cfa6d | ||
|
|
ee3df081ef | ||
|
|
08eeb12519 | ||
|
|
e66c6b2505 | ||
|
|
d2a880d9c8 | ||
|
|
edc0b86470 | ||
|
|
aebe6b80b7 | ||
|
|
4d87333b43 | ||
|
|
ef32f3ed5a | ||
|
|
216ded3034 | ||
|
|
cb59fe2cee | ||
|
|
7776f6d09c | ||
|
|
c50392c947 | ||
|
|
ceee978fcd | ||
|
|
c5a73dc87e | ||
|
|
7198ef2774 | ||
|
|
7e9a066797 | ||
|
|
ba96332313 | ||
|
|
e2d0338b0b | ||
|
|
59ecab5738 | ||
|
|
721bf3403d | ||
|
|
3b8ba47377 | ||
|
|
e752929f69 | ||
|
|
e41c3e6f54 | ||
|
|
9dedd1a8de | ||
|
|
c4a5fae28f | ||
|
|
5f95a3233f | ||
|
|
d3174d0196 | ||
|
|
3710d71974 | ||
|
|
f62e88eb67 | ||
|
|
904b302fb6 | ||
|
|
5fc096f2d5 | ||
|
|
87668c492f | ||
|
|
6d7a8b97ad | ||
|
|
282d444933 | ||
|
|
f3d7d97fb9 | ||
|
|
de857a7c4e | ||
|
|
20a0ebfc9d | ||
|
|
ba8166bdeb | ||
|
|
2b634fc6c5 | ||
|
|
5429bc03ab | ||
|
|
a558b34608 | ||
|
|
1850d56977 | ||
|
|
61b4c62824 | ||
|
|
10e5ccfe86 | ||
|
|
9f5d475e80 | ||
|
|
9bb9a3acbe | ||
|
|
0923b7e3c5 | ||
|
|
ccd81f6fe2 | ||
|
|
0f74107e86 | ||
|
|
8377434c08 | ||
|
|
1fbf2bfb8d | ||
|
|
42facf8e12 | ||
|
|
4bb3d85c25 | ||
|
|
c0039190bd | ||
|
|
a8d00a47cd | ||
|
|
57bcbf6c48 | ||
|
|
c57db1479e | ||
|
|
cd8062ada3 | ||
|
|
244d05adb1 | ||
|
|
812bd64325 | ||
|
|
276d1361ac | ||
|
|
881eac4722 | ||
|
|
2a2a550a6a | ||
|
|
e75001080a | ||
|
|
6fbba38a76 | ||
|
|
902b413881 | ||
|
|
8b2f8ad3ef | ||
|
|
377cb77307 | ||
|
|
733bf0b169 | ||
|
|
8faff3e075 | ||
|
|
48af91c976 | ||
|
|
6664efaa13 | ||
|
|
e5ee96cf52 | ||
|
|
38faf1f905 | ||
|
|
2cff142266 | ||
|
|
2c99cfacc0 | ||
|
|
0c63ea1f50 | ||
|
|
f50df66e3a | ||
|
|
4b93491160 | ||
|
|
19210cbf7d | ||
|
|
9af206b69a | ||
|
|
b6b9c71c5e | ||
|
|
c000c4502f | ||
|
|
b6c1d9a592 | ||
|
|
7a75fe0cad | ||
|
|
a83e660902 | ||
|
|
65eb3e4b95 | ||
|
|
093fb419f3 | ||
|
|
026e56aead | ||
|
|
fa9bc59f62 | ||
|
|
06ec80db42 | ||
|
|
24d564b79b | ||
|
|
2f5e6248cd | ||
|
|
c0cc81ed96 | ||
|
|
b33a54a449 | ||
|
|
94137e587c | ||
|
|
a6086d3724 | ||
|
|
0a377150e3 | ||
|
|
d20e0a228a | ||
|
|
ca146a1b57 | ||
|
|
c7c3e3ee73 | ||
|
|
cd27f6459c | ||
|
|
b1e212721e | ||
|
|
ccd2773331 | ||
|
|
cfa82b51fb | ||
|
|
9c91a8db46 | ||
|
|
b160eee8d2 | ||
|
|
37ceabdf5d | ||
|
|
e7828a43fa | ||
|
|
ccb1f04ad8 | ||
|
|
4c14ccbb63 | ||
|
|
25c24ca9cf | ||
|
|
787869fe21 | ||
|
|
b51c27a823 | ||
|
|
5917881b47 | ||
|
|
c7a40d59b7 | ||
|
|
a50c0d84e9 | ||
|
|
f17a957058 | ||
|
|
2c63851130 | ||
|
|
6b125bba7c | ||
|
|
d92b87b7c8 | ||
|
|
f64a477c3d | ||
|
|
b6f8ed1e4a | ||
|
|
bad88e4741 | ||
|
|
01db519691 | ||
|
|
e601038c0f | ||
|
|
e0996a17ef | ||
|
|
526307e192 | ||
|
|
1b01c4f053 | ||
|
|
a184e23f16 | ||
|
|
06156e0ca6 | ||
|
|
02b1de3266 | ||
|
|
c5b3d92466 | ||
|
|
186a78b064 | ||
|
|
9a808dc139 | ||
|
|
977404b8c3 | ||
|
|
b00143ce9b | ||
|
|
4435d9a248 | ||
|
|
7d0303e2be | ||
|
|
a0da9c1129 | ||
|
|
5e73690570 | ||
|
|
b0409b7d52 | ||
|
|
fe474b3989 | ||
|
|
5154d5d3ee | ||
|
|
62df92f63a | ||
|
|
e2534af40e | ||
|
|
b627e391ac | ||
|
|
40a3eac704 | ||
|
|
2ee3f10e02 | ||
|
|
5a3bf2f758 | ||
|
|
e121dd0d1d | ||
|
|
2c46a37a53 | ||
|
|
23f05d7f4e | ||
|
|
6105eea7a9 | ||
|
|
850e9a734a | ||
|
|
2d30b155f2 | ||
|
|
1333e21553 | ||
|
|
4c412528f5 | ||
|
|
a8fce47ba0 | ||
|
|
cb7c57fd03 | ||
|
|
494d0f7c14 | ||
|
|
a4e480e02b | ||
|
|
cd285cc019 | ||
|
|
9e8e00d4bb | ||
|
|
389834f735 | ||
|
|
b14ddc07fb | ||
|
|
4447fb8202 | ||
|
|
1c9c4b1802 | ||
|
|
19e15f4ef5 | ||
|
|
c2c29e2cd2 | ||
|
|
7b33dc591d | ||
|
|
a95f2e76f4 | ||
|
|
979860a951 | ||
|
|
9e9a81d9e8 | ||
|
|
8f09561114 | ||
|
|
b167d94ead | ||
|
|
e4c0a157e3 | ||
|
|
956869ab58 | ||
|
|
e5f4da9a99 | ||
|
|
9649d9a46b | ||
|
|
22477b7e81 | ||
|
|
b6c76a2164 | ||
|
|
043834274d | ||
|
|
1e4ca69c89 | ||
|
|
ff2bcfb0e7 | ||
|
|
b47fc9f901 | ||
|
|
165f4023d0 | ||
|
|
d51053ce86 | ||
|
|
229872589c | ||
|
|
e4c47c46a6 | ||
|
|
84fe2fb92e | ||
|
|
076912c648 | ||
|
|
033653e234 | ||
|
|
0624087373 | ||
|
|
346d886f8a | ||
|
|
67ac01b31a | ||
|
|
87f1cf6730 | ||
|
|
0f46651500 | ||
|
|
65bf055e0f | ||
|
|
4c995f786b | ||
|
|
4853c8c872 | ||
|
|
170da08001 | ||
|
|
a39a133ee5 | ||
|
|
1251b1e870 | ||
|
|
418120196f | ||
|
|
759661420e | ||
|
|
bb28f856da | ||
|
|
f90e6bef9e | ||
|
|
cabaa2e6d6 | ||
|
|
c8bddd4289 | ||
|
|
71ba980757 | ||
|
|
38c3c49778 | ||
|
|
ab7ac9cb60 | ||
|
|
e4787924e7 | ||
|
|
3385a92b0f | ||
|
|
e73e6956a5 | ||
|
|
024eb2b157 | ||
|
|
ccff0592ca | ||
|
|
942f7c2bc9 | ||
|
|
b3a6cd0660 | ||
|
|
c5569fccf1 | ||
|
|
cc7c443145 | ||
|
|
8d7e5baf9d | ||
|
|
ed64d4b5ae | ||
|
|
8fe42bc6aa | ||
|
|
a67aa3852d | ||
|
|
c2c907852d | ||
|
|
3123f858bb | ||
|
|
6a18369891 | ||
|
|
0f4ef40600 | ||
|
|
42a7fb949a | ||
|
|
bbfa6e9c82 | ||
|
|
0d8ae0d615 | ||
|
|
7bbbc88c34 | ||
|
|
e2ad197d7e | ||
|
|
ca8f52d304 | ||
|
|
7395a64b26 | ||
|
|
4dd672a590 | ||
|
|
cff3f739db | ||
|
|
7fb35cfebb | ||
|
|
ddfda31924 | ||
|
|
353e085b0e | ||
|
|
989b548ef9 | ||
|
|
8f60e7e200 | ||
|
|
ec74525fde | ||
|
|
a317c50737 | ||
|
|
c62b46268a | ||
|
|
42ef075d4f | ||
|
|
f52605289b | ||
|
|
68e0911866 | ||
|
|
756fcbb590 | ||
|
|
a49d900951 | ||
|
|
38f212d632 | ||
|
|
204fdfd233 | ||
|
|
b5e04e8111 | ||
|
|
21fc829766 | ||
|
|
39851c3412 | ||
|
|
21811465b6 | ||
|
|
7574726815 | ||
|
|
236e0f9ab6 | ||
|
|
8e95f0b73f | ||
|
|
bed45a5fbd | ||
|
|
c50c2e2b01 | ||
|
|
adf982fcd6 | ||
|
|
9b4103be75 | ||
|
|
672eec0c33 | ||
|
|
0d8c06595e | ||
|
|
a5a7ca5fcc | ||
|
|
8767d20c47 | ||
|
|
4cbf3fffb1 | ||
|
|
51fad19d0d | ||
|
|
664aa6ed2a | ||
|
|
574cd2a754 | ||
|
|
1b34ee7369 | ||
|
|
7b2f1dd4c6 | ||
|
|
a97b6efe9c | ||
|
|
3722b67724 | ||
|
|
218a5ec9e4 | ||
|
|
90d3ac07a9 | ||
|
|
149a4b916b | ||
|
|
70914e836f | ||
|
|
a2dae8aa13 | ||
|
|
b6ea0808e4 | ||
|
|
089e43e1ce | ||
|
|
42936ab8dc | ||
|
|
411fa9345f | ||
|
|
336e118096 | ||
|
|
d1707801bf | ||
|
|
71bcf25718 | ||
|
|
288da0ef05 | ||
|
|
fec29eb349 | ||
|
|
032d48e394 | ||
|
|
a433d97573 | ||
|
|
6bd571f1b3 | ||
|
|
1dd89601ad | ||
|
|
a7cf359672 | ||
|
|
baa98952fa | ||
|
|
55afbf4db5 | ||
|
|
dca0fb327b | ||
|
|
e34a31941d | ||
|
|
dbba5002d9 | ||
|
|
4dd9e34a11 | ||
|
|
a30222a13e | ||
|
|
5797144083 | ||
|
|
db513b43e7 | ||
|
|
d387fa3bfb | ||
|
|
1bff9f550e | ||
|
|
0167b30bf1 | ||
|
|
bf993d04f1 | ||
|
|
be2b2c6c77 | ||
|
|
8851156f23 | ||
|
|
1a13694843 | ||
|
|
3872831bd7 | ||
|
|
ef4ce115ff | ||
|
|
516b300731 | ||
|
|
88d97dd49b | ||
|
|
be9494dd54 | ||
|
|
e43fc59634 | ||
|
|
4523a8df0f | ||
|
|
2c8082451f | ||
|
|
7ab498702c | ||
|
|
a06e8c8f83 | ||
|
|
1a01e8d53a | ||
|
|
5ce60cf1cd | ||
|
|
de1a6025d0 | ||
|
|
ca6ae53fe6 | ||
|
|
4eff52ab62 | ||
|
|
e5c5780547 | ||
|
|
f348c9daa7 | ||
|
|
e1dd29dd0b | ||
|
|
558f302342 | ||
|
|
5fee1c3ebd | ||
|
|
248debb7c4 | ||
|
|
8504fd8d9d | ||
|
|
e360a5323d | ||
|
|
1ad5eb010a | ||
|
|
ca7f1e5db8 | ||
|
|
2981e35c75 | ||
|
|
f3e8677ae4 | ||
|
|
d209c8af9d | ||
|
|
26b2233168 | ||
|
|
b2669aaa34 | ||
|
|
1438eef62b | ||
|
|
a92f7dbb7c | ||
|
|
4710bab697 | ||
|
|
52aa27025d | ||
|
|
8e544c056f | ||
|
|
3c2a8b9031 | ||
|
|
fff4883bca | ||
|
|
dc234beab1 | ||
|
|
66d310fcca | ||
|
|
702b5eb3dd | ||
|
|
06477b6e7f | ||
|
|
fc76899384 | ||
|
|
97102b9be9 | ||
|
|
1c0dfa830e | ||
|
|
53bfaac0c0 | ||
|
|
30790fdcb6 | ||
|
|
b8b256da2e | ||
|
|
0472dc1b25 | ||
|
|
1ec3e53e11 | ||
|
|
9f66e09e44 | ||
|
|
a71b0a8924 | ||
|
|
e555d3c496 | ||
|
|
df92e41384 | ||
|
|
d10fdac670 | ||
|
|
b63bffa524 | ||
|
|
957cfdd5d7 | ||
|
|
1352316492 | ||
|
|
73cd82081a | ||
|
|
812820472f | ||
|
|
21f0cd6e3f | ||
|
|
b2ee8ef7de | ||
|
|
1e066cbabd | ||
|
|
4cc38d44e0 | ||
|
|
dcf7393259 | ||
|
|
bab070b09c | ||
|
|
2bd4ad5770 | ||
|
|
61ecebf911 | ||
|
|
33c8663a5b | ||
|
|
76da2ee324 | ||
|
|
31896c9be9 | ||
|
|
f61d722aee | ||
|
|
1f9f3fdede | ||
|
|
a778109214 | ||
|
|
cb7fa9375b | ||
|
|
515ecb09e7 | ||
|
|
a12a620697 | ||
|
|
0c3b2bc2f5 | ||
|
|
78ba27dc63 | ||
|
|
dc20b863ed | ||
|
|
c9a211d5cf | ||
|
|
95f94cffd2 | ||
|
|
0da95cbdb8 | ||
|
|
dadd1e3101 | ||
|
|
d523ae3ffa | ||
|
|
9a41cac6e1 | ||
|
|
5d3c5ab7cc | ||
|
|
08c930e6cf | ||
|
|
e94ded920b | ||
|
|
c882fbd59a | ||
|
|
46b50a042e | ||
|
|
fda9e95786 | ||
|
|
ea1ad23bff | ||
|
|
7ffc5e0212 | ||
|
|
acba9444f4 | ||
|
|
f7e3671801 | ||
|
|
a1b2e36a5d | ||
|
|
44e96942b3 | ||
|
|
f2efa760ff | ||
|
|
256df9042b | ||
|
|
6d7091fb5c | ||
|
|
0d1f88a368 | ||
|
|
2ae601717d | ||
|
|
c7c8b463b4 | ||
|
|
282f839211 | ||
|
|
b2eb846b69 | ||
|
|
62cf925dcf | ||
|
|
e699f84c4d | ||
|
|
c1189dadc5 | ||
|
|
76bc080a6d | ||
|
|
7f989f77ac | ||
|
|
b916f768fe | ||
|
|
e4509c5714 | ||
|
|
ddb6893a64 | ||
|
|
248751ba1d | ||
|
|
b4b74ed53a | ||
|
|
76903cd67f | ||
|
|
e4f2eac703 | ||
|
|
3aa45007a7 | ||
|
|
f452892c88 | ||
|
|
a0fece8a0e | ||
|
|
e3d493209b | ||
|
|
2e8b63553d | ||
|
|
fb8f4b95b7 | ||
|
|
83e107c713 | ||
|
|
e97642a790 | ||
|
|
426d8684bf | ||
|
|
5e7409a4f0 | ||
|
|
c225a4cd48 | ||
|
|
24df9e1ce6 | ||
|
|
eab1fd3722 | ||
|
|
93bd041693 | ||
|
|
665ebe993c | ||
|
|
4086130371 | ||
|
|
29aacf5238 | ||
|
|
497e6a8422 | ||
|
|
af8572add9 | ||
|
|
d6aea96400 | ||
|
|
17e26ff1a6 | ||
|
|
f5f223348d | ||
|
|
e4f90fd7ea | ||
|
|
96dff20760 | ||
|
|
d639f7f6de | ||
|
|
5b35ec2ea2 | ||
|
|
bc78b95265 | ||
|
|
97f22eccbb | ||
|
|
a4fe86e38a | ||
|
|
4bc1e10ecb | ||
|
|
5b840d73bb | ||
|
|
afa9acfb1e | ||
|
|
7b7f65da39 | ||
|
|
806da59f47 | ||
|
|
9a009a4ea3 | ||
|
|
083d890053 | ||
|
|
e693a8aeb8 | ||
|
|
831b46d7b5 | ||
|
|
8dd3022b94 | ||
|
|
b278eb7110 | ||
|
|
7a66163216 | ||
|
|
dda2043401 | ||
|
|
08d6183c9b | ||
|
|
eea0b86d6d | ||
|
|
58c04fd196 | ||
|
|
09de6f6b5f | ||
|
|
d0bbd2b539 | ||
|
|
ee8952de10 | ||
|
|
134595a6b7 | ||
|
|
4ff46f1650 | ||
|
|
4779201d4c | ||
|
|
3a8643d83c | ||
|
|
806a49b822 | ||
|
|
95d74825ee | ||
|
|
e4960909ed | ||
|
|
6cb36aaf13 | ||
|
|
cb06e93650 | ||
|
|
e3a2f7a514 | ||
|
|
01b1e817d8 | ||
|
|
c3a5195575 | ||
|
|
99765c7bd5 | ||
|
|
8929f389f4 | ||
|
|
4141d91f1b | ||
|
|
90272c84d2 | ||
|
|
3006a8e58c | ||
|
|
52a9dbd45d | ||
|
|
b2fb55d2c1 | ||
|
|
a1c16d22d8 | ||
|
|
0e9504ee4d | ||
|
|
e8cad6fc20 | ||
|
|
bc261f7739 | ||
|
|
0b8983a86b | ||
|
|
5a61da3c53 | ||
|
|
800fe6244c | ||
|
|
9e1fec812c | ||
|
|
61632f9c97 | ||
|
|
f5e44129d8 | ||
|
|
3eaca924da | ||
|
|
da1c706334 | ||
|
|
3b726dfb1e | ||
|
|
d51e7f7e40 | ||
|
|
2551e0c291 | ||
|
|
2efd5c31ab | ||
|
|
1eacb8ff36 | ||
|
|
612446c3c9 | ||
|
|
e121e16ad9 | ||
|
|
23616b41be | ||
|
|
1778ba49b2 | ||
|
|
b6f2bd4703 | ||
|
|
5fd67224f6 | ||
|
|
c9d21dde0c | ||
|
|
de2c5aa068 | ||
|
|
ad01cecae6 | ||
|
|
75ef14c75b | ||
|
|
03a5a0eddb | ||
|
|
66befd35eb | ||
|
|
3cbad16c30 | ||
|
|
3bba7c5956 | ||
|
|
0daa84c583 | ||
|
|
92358a52c0 | ||
|
|
faf17e9e86 | ||
|
|
ef6efe94b4 | ||
|
|
819d7ea23e | ||
|
|
61ff192cfd | ||
|
|
ceb1b07ce2 | ||
|
|
90188d4358 | ||
|
|
35aa0ab4e7 | ||
|
|
14dd76db8b | ||
|
|
fb26dfad65 | ||
|
|
bedc5adb75 | ||
|
|
800b1f1520 | ||
|
|
a4571a80ae | ||
|
|
a0a612618e | ||
|
|
db94728a5b | ||
|
|
04352a670a | ||
|
|
fe6e3b013e | ||
|
|
a947a74194 | ||
|
|
06055ff62b | ||
|
|
45cb1562e5 | ||
|
|
2f89a16852 | ||
|
|
86956b8cac | ||
|
|
84fb3add33 | ||
|
|
56ee68d9f3 | ||
|
|
e81fd3bb31 | ||
|
|
938ca29777 | ||
|
|
122902968f | ||
|
|
b55c30065f | ||
|
|
92ac2dbac2 | ||
|
|
d3e6decef9 | ||
|
|
579cd9d338 | ||
|
|
9e2a58dd46 | ||
|
|
64722617c1 | ||
|
|
5845ddbdda | ||
|
|
bf9ce0df9b | ||
|
|
8aee2ec3a1 | ||
|
|
3292eafe4a | ||
|
|
9ad31b2c81 | ||
|
|
374ed79a18 | ||
|
|
3d5f73e344 | ||
|
|
6761428a96 | ||
|
|
0a9b463eaa | ||
|
|
c219256fff | ||
|
|
7e48803dc5 | ||
|
|
d496b8a414 | ||
|
|
4825129560 | ||
|
|
adc54b2582 | ||
|
|
863567c9b6 | ||
|
|
102555023b | ||
|
|
f1a9eef531 | ||
|
|
5f007a5b0f | ||
|
|
9455141262 | ||
|
|
37e1379c88 | ||
|
|
55d597e519 | ||
|
|
da5ee5c951 | ||
|
|
b0bd9279fc | ||
|
|
90456339ca | ||
|
|
a653c8bad7 | ||
|
|
c4fa6cf458 | ||
|
|
268fc7b923 | ||
|
|
02604f5290 | ||
|
|
1dad7e86a0 | ||
|
|
838e3efbca | ||
|
|
3e353717f5 | ||
|
|
0a4b74b91a | ||
|
|
e69fbf3ccf | ||
|
|
0b2349d6bf | ||
|
|
3a8f04cf14 | ||
|
|
e941cf956f | ||
|
|
29f7bcf6f5 | ||
|
|
1cf1e0dc57 | ||
|
|
175283805e | ||
|
|
063c0405e8 | ||
|
|
947cb77753 | ||
|
|
28b3b305ea | ||
|
|
df85f13aea | ||
|
|
042e2c1390 | ||
|
|
e6314bee35 | ||
|
|
4292d3262e | ||
|
|
35d070ad29 | ||
|
|
cd79e77576 | ||
|
|
1f1c20d637 | ||
|
|
e87b3b1b54 | ||
|
|
a6f7b65625 | ||
|
|
722fa47132 | ||
|
|
f83e290b4c | ||
|
|
11b4047283 | ||
|
|
69b2032a86 | ||
|
|
636298569f | ||
|
|
ed8a282d35 | ||
|
|
3bd5e850e0 | ||
|
|
070f1f9159 | ||
|
|
195644cca5 | ||
|
|
8092c86ecd | ||
|
|
28f33702da | ||
|
|
570632b8be | ||
|
|
f2881e1b31 | ||
|
|
dad35e37ef | ||
|
|
39afabd60e | ||
|
|
dc7e14a34b | ||
|
|
1dca71a779 | ||
|
|
e9494efa8e | ||
|
|
8159a0f13d | ||
|
|
ee9101e738 | ||
|
|
b670e6e3dc | ||
|
|
5e5754fa62 | ||
|
|
5fcf76066f | ||
|
|
601645fa72 | ||
|
|
12765ad675 | ||
|
|
ad3383d23d | ||
|
|
7d5961cf50 | ||
|
|
864aa052f1 | ||
|
|
be16196058 | ||
|
|
8a62f12e8b | ||
|
|
78f464f6ca | ||
|
|
f37eda4739 | ||
|
|
4e106e9e5a | ||
|
|
ccf8e5e6f4 | ||
|
|
9455adf61f | ||
|
|
970ab9818a | ||
|
|
7848cf7141 | ||
|
|
8e5aa9c195 | ||
|
|
a03e9ba7dd | ||
|
|
9e646ba385 | ||
|
|
d9a4f20fe6 | ||
|
|
e659f0e75d | ||
|
|
8891d6239f | ||
|
|
e3bd3fb985 | ||
|
|
54764dfacd | ||
|
|
b156b5ff2d | ||
|
|
d8e547c9a0 | ||
|
|
a0b93377a4 | ||
|
|
e8a6efd079 | ||
|
|
18bb6caf8f | ||
|
|
bc335d15c0 | ||
|
|
2a0d440a34 | ||
|
|
b0500fac29 | ||
|
|
af16e6423a | ||
|
|
ff90471f0f | ||
|
|
bcc501c524 | ||
|
|
c4ef211a3e | ||
|
|
592c0eb7ab | ||
|
|
880a000865 | ||
|
|
a9571f6adf | ||
|
|
ca91f313bc | ||
|
|
76b9753916 | ||
|
|
cd8bbe28bf | ||
|
|
9550c11594 | ||
|
|
7ac21cad25 | ||
|
|
2008a3955a | ||
|
|
f1641c9f3e | ||
|
|
bec5bbd033 | ||
|
|
ae11f72e28 | ||
|
|
6b88cb3920 | ||
|
|
38772111e8 | ||
|
|
14f50c3e66 | ||
|
|
b89a2c9e49 | ||
|
|
2d2eda988c | ||
|
|
432033969b | ||
|
|
1fbf74e1f7 | ||
|
|
93648ff00b | ||
|
|
9513136610 | ||
|
|
43a2a39f8d | ||
|
|
218351de9a | ||
|
|
b92b922eee | ||
|
|
91be4937ee | ||
|
|
19b36a5fae | ||
|
|
bb9ee7dfd2 | ||
|
|
ac0351b525 | ||
|
|
405f5ad7cc | ||
|
|
8cc2712da3 | ||
|
|
c02ac8d1bf | ||
|
|
a1802add19 | ||
|
|
218a6642a2 | ||
|
|
21a83a5755 | ||
|
|
bec75e51f6 | ||
|
|
6c9b445be6 | ||
|
|
06b17fa941 | ||
|
|
e1d4c029e7 | ||
|
|
293fd70ccb | ||
|
|
4ee863db5a | ||
|
|
2717be0fed | ||
|
|
1f312e146f | ||
|
|
b91557ebb0 | ||
|
|
465380b5a3 | ||
|
|
60af901feb | ||
|
|
ea78a654ff | ||
|
|
f28b6ad0a5 | ||
|
|
a3bdab1318 | ||
|
|
f8c5d01e3c | ||
|
|
3ebe218b7f | ||
|
|
7d039ab729 | ||
|
|
b2b6c8c268 | ||
|
|
4950f25063 | ||
|
|
524d6b48d9 | ||
|
|
29fb5735e2 | ||
|
|
247fc85440 | ||
|
|
2b4302572c | ||
|
|
9b28780e62 | ||
|
|
8656f68008 | ||
|
|
15651b6919 | ||
|
|
78d3861382 | ||
|
|
72f19274cd | ||
|
|
adbcd1a2e0 | ||
|
|
5b7727fab4 | ||
|
|
9627dfa90c | ||
|
|
50022c9fc8 | ||
|
|
e0b76ffebc | ||
|
|
be5a9a840c | ||
|
|
6e5f429e0a | ||
|
|
e9d9d6e2f4 | ||
|
|
b4a57e630c | ||
|
|
1062e33dc8 | ||
|
|
0e14441f73 | ||
|
|
a6a909ae4f | ||
|
|
2b4a39e64c | ||
|
|
82b4921602 | ||
|
|
4229324a5d | ||
|
|
34d3ca9c51 | ||
|
|
9bd7002917 | ||
|
|
ebed9f7a68 | ||
|
|
5d34bd82c0 | ||
|
|
8bcb2b3b0f | ||
|
|
32ba17cf91 | ||
|
|
704ded4410 | ||
|
|
88277976c6 | ||
|
|
cb95f02912 | ||
|
|
928b406359 | ||
|
|
4757c7db8c | ||
|
|
5df87641a1 | ||
|
|
04077c53fd | ||
|
|
574be52b84 | ||
|
|
a66613c5ca | ||
|
|
01b3b19715 | ||
|
|
fb1481c69c | ||
|
|
9557f755a5 | ||
|
|
60d8831399 | ||
|
|
5ff5660db3 | ||
|
|
d62c359452 | ||
|
|
ec0b6b64fe | ||
|
|
c53eac76f8 | ||
|
|
49cb2ae260 | ||
|
|
77796e8a75 | ||
|
|
49f0f6ec7d | ||
|
|
2c273a85d8 | ||
|
|
8273554a1c | ||
|
|
ad8ab63fd5 | ||
|
|
7de0761329 | ||
|
|
907dab7d05 | ||
|
|
2907f22200 | ||
|
|
7bbe1b2dbe | ||
|
|
099513072c | ||
|
|
7de8bb00e7 | ||
|
|
12d44696e8 | ||
|
|
25cef26251 | ||
|
|
dceb398695 | ||
|
|
f60599abd3 | ||
|
|
44f8098e4a | ||
|
|
747979f939 | ||
|
|
b3083ae779 | ||
|
|
67580a8b69 | ||
|
|
291c7aaf0b | ||
|
|
1a098eecf6 | ||
|
|
0a05bdba1d | ||
|
|
37bfc07ffb | ||
|
|
eae3ab2dc1 | ||
|
|
1665bf6515 | ||
|
|
0383ffb7f3 | ||
|
|
a0d6646e49 | ||
|
|
254b3a0fc8 | ||
|
|
21743e5a23 | ||
|
|
0550924e08 | ||
|
|
7867302be5 | ||
|
|
14815b388d | ||
|
|
92cc82220e | ||
|
|
da1fae6016 | ||
|
|
34002470a5 | ||
|
|
49f84bccad | ||
|
|
4bcb4a1590 | ||
|
|
378de19f41 | ||
|
|
ffe2512734 | ||
|
|
b4be620a5b | ||
|
|
ac8b546393 | ||
|
|
9bdf31ee97 | ||
|
|
c29cd05db8 | ||
|
|
cd34820138 | ||
|
|
d207318494 | ||
|
|
117062f1d1 | ||
|
|
9d561ba94d | ||
|
|
97fcaed9b4 | ||
|
|
5e53ea3607 | ||
|
|
7dc74cb61b | ||
|
|
fbefcfedb9 | ||
|
|
36c0d9aba2 | ||
|
|
8c8a981452 | ||
|
|
7dd586e31d | ||
|
|
366a31b41b | ||
|
|
f09557d73c | ||
|
|
33a2ac402c | ||
|
|
632333c49f | ||
|
|
c8bea4d7de | ||
|
|
c1d75d32c2 | ||
|
|
b805daec51 | ||
|
|
af2088df4e | ||
|
|
3b8d1f40a7 | ||
|
|
8355d3664e | ||
|
|
83a696f743 | ||
|
|
7ca507b1ce | ||
|
|
609435328e | ||
|
|
d771317e3f | ||
|
|
d548563e65 | ||
|
|
f07cd8aee3 | ||
|
|
48963f24df | ||
|
|
7bf98c0c40 | ||
|
|
e73383cc79 | ||
|
|
79ce93d578 | ||
|
|
e043d0e654 | ||
|
|
21ce678e5b | ||
|
|
5c94887949 | ||
|
|
69a9bcb3da | ||
|
|
2fea091e1f | ||
|
|
24314a103f | ||
|
|
b56db41d0b | ||
|
|
825bff5d60 | ||
|
|
f9184cf489 | ||
|
|
2c96eb7851 | ||
|
|
04ecf41c5a | ||
|
|
6600de7320 | ||
|
|
f7b82f0a7a | ||
|
|
65bdb232f4 | ||
|
|
200e3af384 | ||
|
|
aabfa91f80 | ||
|
|
e5468a7391 | ||
|
|
d5a11edd0c | ||
|
|
fcc86b07ba | ||
|
|
50cf284273 | ||
|
|
aaddde0a9b | ||
|
|
ac87345b7a | ||
|
|
23079d9ac0 | ||
|
|
b573d63648 | ||
|
|
34d705a54e | ||
|
|
b638adedff | ||
|
|
285e24cdc7 | ||
|
|
396e643b06 | ||
|
|
dc50190dc3 | ||
|
|
2c8bf4f18c | ||
|
|
1f6379a7e6 | ||
|
|
ddd8eb1da0 | ||
|
|
3d8869066a | ||
|
|
880a123149 | ||
|
|
39e35bc1d6 | ||
|
|
f219f1e36b | ||
|
|
25ed3d65f8 | ||
|
|
30dbabd73d | ||
|
|
ea2e5bf486 | ||
|
|
b6c2f123e8 | ||
|
|
15f900317a | ||
|
|
22545cac8b |
@@ -28,4 +28,7 @@ LICENSE
|
||||
CONTRIBUTING.md
|
||||
dist
|
||||
.git
|
||||
config/
|
||||
migrations/
|
||||
config/
|
||||
build.ts
|
||||
tsconfig.json
|
||||
47
.github/DISCUSSION_TEMPLATE/feature-requests.yml
vendored
Normal file
47
.github/DISCUSSION_TEMPLATE/feature-requests.yml
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
body:
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Summary
|
||||
description: A clear and concise summary of the requested feature.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Motivation
|
||||
description: |
|
||||
Why is this feature important?
|
||||
Explain the problem this feature would solve or what use case it would enable.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Proposed Solution
|
||||
description: |
|
||||
How would you like to see this feature implemented?
|
||||
Provide as much detail as possible about the desired behavior, configuration, or changes.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Alternatives Considered
|
||||
description: Describe any alternative solutions or workarounds you've thought about.
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Additional Context
|
||||
description: Add any other context, mockups, or screenshots about the feature request here.
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Before submitting, please:
|
||||
- Check if there is an existing issue for this feature.
|
||||
- Clearly explain the benefit and use case.
|
||||
- Be as specific as possible to help contributors evaluate and implement.
|
||||
51
.github/ISSUE_TEMPLATE/1.bug_report.yml
vendored
Normal file
51
.github/ISSUE_TEMPLATE/1.bug_report.yml
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
name: Bug Report
|
||||
description: Create a bug report
|
||||
labels: []
|
||||
body:
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Describe the Bug
|
||||
description: A clear and concise description of what the bug is.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Environment
|
||||
description: Please fill out the relevant details below for your environment.
|
||||
value: |
|
||||
- OS Type & Version: (e.g., Ubuntu 22.04)
|
||||
- Pangolin Version:
|
||||
- Gerbil Version:
|
||||
- Traefik Version:
|
||||
- Newt Version:
|
||||
- Olm Version: (if applicable)
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: To Reproduce
|
||||
description: |
|
||||
Steps to reproduce the behavior, please provide a clear description of how to reproduce the issue, based on the linked minimal reproduction. Screenshots can be provided in the issue body below.
|
||||
|
||||
If using code blocks, make sure syntax highlighting is correct and double-check that the rendered preview is not broken.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Expected Behavior
|
||||
description: A clear and concise description of what you expected to happen.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Before posting the issue go through the steps you've written down to make sure the steps provided are detailed and clear.
|
||||
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Contributors should be able to follow the steps provided in order to reproduce the bug.
|
||||
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: Need help or have questions?
|
||||
url: https://github.com/orgs/fosrl/discussions
|
||||
about: Ask questions, get help, and discuss with other community members
|
||||
- name: Request a Feature
|
||||
url: https://github.com/orgs/fosrl/discussions/new?category=feature-requests
|
||||
about: Feature requests should be opened as discussions so others can upvote and comment
|
||||
120
.github/workflows/cicd.yml
vendored
120
.github/workflows/cicd.yml
vendored
@@ -1,34 +1,62 @@
|
||||
name: CI/CD Pipeline
|
||||
|
||||
# CI/CD workflow for building, publishing, mirroring, signing container images and building release binaries.
|
||||
# Actions are pinned to specific SHAs to reduce supply-chain risk. This workflow triggers on tag push events.
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write # for GHCR push
|
||||
id-token: write # for Cosign Keyless (OIDC) Signing
|
||||
|
||||
# Required secrets:
|
||||
# - DOCKER_HUB_USERNAME / DOCKER_HUB_ACCESS_TOKEN: push to Docker Hub
|
||||
# - GITHUB_TOKEN: used for GHCR login and OIDC keyless signing
|
||||
# - COSIGN_PRIVATE_KEY / COSIGN_PASSWORD / COSIGN_PUBLIC_KEY: for key-based signing
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "*"
|
||||
- "[0-9]+.[0-9]+.[0-9]+"
|
||||
- "[0-9]+.[0-9]+.[0-9]+.rc.[0-9]+"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
release:
|
||||
name: Build and Release
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: [self-hosted, linux, x64]
|
||||
# Job-level timeout to avoid runaway or stuck runs
|
||||
timeout-minutes: 120
|
||||
env:
|
||||
# Target images
|
||||
DOCKERHUB_IMAGE: docker.io/${{ secrets.DOCKER_HUB_USERNAME }}/${{ github.event.repository.name }}
|
||||
GHCR_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
|
||||
|
||||
- name: Extract tag name
|
||||
id: get-tag
|
||||
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
go-version: 1.24
|
||||
|
||||
@@ -37,18 +65,21 @@ jobs:
|
||||
TAG=${{ env.TAG }}
|
||||
sed -i "s/export const APP_VERSION = \".*\";/export const APP_VERSION = \"$TAG\";/" server/lib/consts.ts
|
||||
cat server/lib/consts.ts
|
||||
shell: bash
|
||||
|
||||
- name: Pull latest Gerbil version
|
||||
id: get-gerbil-tag
|
||||
run: |
|
||||
LATEST_TAG=$(curl -s https://api.github.com/repos/fosrl/gerbil/tags | jq -r '.[0].name')
|
||||
echo "LATEST_GERBIL_TAG=$LATEST_TAG" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Pull latest Badger version
|
||||
id: get-badger-tag
|
||||
run: |
|
||||
LATEST_TAG=$(curl -s https://api.github.com/repos/fosrl/badger/tags | jq -r '.[0].name')
|
||||
echo "LATEST_BADGER_TAG=$LATEST_TAG" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Update install/main.go
|
||||
run: |
|
||||
@@ -60,6 +91,7 @@ jobs:
|
||||
sed -i "s/config.BadgerVersion = \".*\"/config.BadgerVersion = \"$BADGER_VERSION\"/" install/main.go
|
||||
echo "Updated install/main.go with Pangolin version $PANGOLIN_VERSION, Gerbil version $GERBIL_VERSION, and Badger version $BADGER_VERSION"
|
||||
cat install/main.go
|
||||
shell: bash
|
||||
|
||||
- name: Build installer
|
||||
working-directory: install
|
||||
@@ -67,12 +99,82 @@ jobs:
|
||||
make go-build-release
|
||||
|
||||
- name: Upload artifacts from /install/bin
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: install-bin
|
||||
path: install/bin/
|
||||
|
||||
- name: Build and push Docker images
|
||||
- name: Build and push Docker images (Docker Hub)
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
make build-release tag=$TAG
|
||||
echo "Built & pushed to: ${{ env.DOCKERHUB_IMAGE }}:${TAG}"
|
||||
shell: bash
|
||||
|
||||
- name: Install skopeo + jq
|
||||
# skopeo: copy/inspect images between registries
|
||||
# jq: JSON parsing tool used to extract digest values
|
||||
run: |
|
||||
sudo apt-get update -y
|
||||
sudo apt-get install -y skopeo jq
|
||||
skopeo --version
|
||||
shell: bash
|
||||
|
||||
- name: Login to GHCR
|
||||
run: |
|
||||
skopeo login ghcr.io -u "${{ github.actor }}" -p "${{ secrets.GITHUB_TOKEN }}"
|
||||
shell: bash
|
||||
|
||||
- name: Copy tag from Docker Hub to GHCR
|
||||
# Mirror the already-built image (all architectures) to GHCR so we can sign it
|
||||
run: |
|
||||
set -euo pipefail
|
||||
TAG=${{ env.TAG }}
|
||||
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:${TAG} -> ${{ env.GHCR_IMAGE }}:${TAG}"
|
||||
skopeo copy --all --retry-times 3 \
|
||||
docker://$DOCKERHUB_IMAGE:$TAG \
|
||||
docker://$GHCR_IMAGE:$TAG
|
||||
shell: bash
|
||||
|
||||
- name: Install cosign
|
||||
# cosign is used to sign and verify container images (key and keyless)
|
||||
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
|
||||
|
||||
- name: Dual-sign and verify (GHCR & Docker Hub)
|
||||
# Sign each image by digest using keyless (OIDC) and key-based signing,
|
||||
# then verify both the public key signature and the keyless OIDC signature.
|
||||
env:
|
||||
TAG: ${{ env.TAG }}
|
||||
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
|
||||
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
|
||||
COSIGN_PUBLIC_KEY: ${{ secrets.COSIGN_PUBLIC_KEY }}
|
||||
COSIGN_YES: "true"
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
issuer="https://token.actions.githubusercontent.com"
|
||||
id_regex="^https://github.com/${{ github.repository }}/.+" # accept this repo (all workflows/refs)
|
||||
|
||||
for IMAGE in "${GHCR_IMAGE}" "${DOCKERHUB_IMAGE}"; do
|
||||
echo "Processing ${IMAGE}:${TAG}"
|
||||
|
||||
DIGEST="$(skopeo inspect --retry-times 3 docker://${IMAGE}:${TAG} | jq -r '.Digest')"
|
||||
REF="${IMAGE}@${DIGEST}"
|
||||
echo "Resolved digest: ${REF}"
|
||||
|
||||
echo "==> cosign sign (keyless) --recursive ${REF}"
|
||||
cosign sign --recursive "${REF}"
|
||||
|
||||
echo "==> cosign sign (key) --recursive ${REF}"
|
||||
cosign sign --key env://COSIGN_PRIVATE_KEY --recursive "${REF}"
|
||||
|
||||
echo "==> cosign verify (public key) ${REF}"
|
||||
cosign verify --key env://COSIGN_PUBLIC_KEY "${REF}" -o text
|
||||
|
||||
echo "==> cosign verify (keyless policy) ${REF}"
|
||||
cosign verify \
|
||||
--certificate-oidc-issuer "${issuer}" \
|
||||
--certificate-identity-regexp "${id_regex}" \
|
||||
"${REF}" -o text
|
||||
done
|
||||
shell: bash
|
||||
|
||||
16
.github/workflows/linting.yml
vendored
16
.github/workflows/linting.yml
vendored
@@ -1,5 +1,8 @@
|
||||
name: ESLint
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
@@ -18,17 +21,18 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0
|
||||
with:
|
||||
node-version: '22'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
npm ci
|
||||
run: npm ci
|
||||
|
||||
- name: Create build file
|
||||
run: npm run set:oss
|
||||
|
||||
- name: Run ESLint
|
||||
run: |
|
||||
npx eslint . --ext .js,.jsx,.ts,.tsx
|
||||
run: npx eslint . --ext .js,.jsx,.ts,.tsx
|
||||
|
||||
132
.github/workflows/mirror.yaml
vendored
Normal file
132
.github/workflows/mirror.yaml
vendored
Normal file
@@ -0,0 +1,132 @@
|
||||
name: Mirror & Sign (Docker Hub to GHCR)
|
||||
|
||||
on:
|
||||
workflow_dispatch: {}
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write # for keyless OIDC
|
||||
|
||||
env:
|
||||
SOURCE_IMAGE: docker.io/fosrl/pangolin
|
||||
DEST_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
|
||||
|
||||
jobs:
|
||||
mirror-and-dual-sign:
|
||||
runs-on: amd64-runner
|
||||
steps:
|
||||
- name: Install skopeo + jq
|
||||
run: |
|
||||
sudo apt-get update -y
|
||||
sudo apt-get install -y skopeo jq
|
||||
skopeo --version
|
||||
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
|
||||
|
||||
- name: Input check
|
||||
run: |
|
||||
test -n "${SOURCE_IMAGE}" || (echo "SOURCE_IMAGE is empty" && exit 1)
|
||||
echo "Source : ${SOURCE_IMAGE}"
|
||||
echo "Target : ${DEST_IMAGE}"
|
||||
|
||||
# Auth for skopeo (containers-auth)
|
||||
- name: Skopeo login to GHCR
|
||||
run: |
|
||||
skopeo login ghcr.io -u "${{ github.actor }}" -p "${{ secrets.GITHUB_TOKEN }}"
|
||||
|
||||
# Auth for cosign (docker-config)
|
||||
- name: Docker login to GHCR (for cosign)
|
||||
run: |
|
||||
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u "${{ github.actor }}" --password-stdin
|
||||
|
||||
- name: List source tags
|
||||
run: |
|
||||
set -euo pipefail
|
||||
skopeo list-tags --retry-times 3 docker://"${SOURCE_IMAGE}" \
|
||||
| jq -r '.Tags[]' | sort -u > src-tags.txt
|
||||
echo "Found source tags: $(wc -l < src-tags.txt)"
|
||||
head -n 20 src-tags.txt || true
|
||||
|
||||
- name: List destination tags (skip existing)
|
||||
run: |
|
||||
set -euo pipefail
|
||||
if skopeo list-tags --retry-times 3 docker://"${DEST_IMAGE}" >/tmp/dst.json 2>/dev/null; then
|
||||
jq -r '.Tags[]' /tmp/dst.json | sort -u > dst-tags.txt
|
||||
else
|
||||
: > dst-tags.txt
|
||||
fi
|
||||
echo "Existing destination tags: $(wc -l < dst-tags.txt)"
|
||||
|
||||
- name: Mirror, dual-sign, and verify
|
||||
env:
|
||||
# keyless
|
||||
COSIGN_YES: "true"
|
||||
# key-based
|
||||
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
|
||||
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
|
||||
# verify
|
||||
COSIGN_PUBLIC_KEY: ${{ secrets.COSIGN_PUBLIC_KEY }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
copied=0; skipped=0; v_ok=0; errs=0
|
||||
|
||||
issuer="https://token.actions.githubusercontent.com"
|
||||
id_regex="^https://github.com/${{ github.repository }}/.+"
|
||||
|
||||
while read -r tag; do
|
||||
[ -z "$tag" ] && continue
|
||||
|
||||
if grep -Fxq "$tag" dst-tags.txt; then
|
||||
echo "::notice ::Skip (exists) ${DEST_IMAGE}:${tag}"
|
||||
skipped=$((skipped+1))
|
||||
continue
|
||||
fi
|
||||
|
||||
echo "==> Copy ${SOURCE_IMAGE}:${tag} → ${DEST_IMAGE}:${tag}"
|
||||
if ! skopeo copy --all --retry-times 3 \
|
||||
docker://"${SOURCE_IMAGE}:${tag}" docker://"${DEST_IMAGE}:${tag}"; then
|
||||
echo "::warning title=Copy failed::${SOURCE_IMAGE}:${tag}"
|
||||
errs=$((errs+1)); continue
|
||||
fi
|
||||
copied=$((copied+1))
|
||||
|
||||
digest="$(skopeo inspect --retry-times 3 docker://"${DEST_IMAGE}:${tag}" | jq -r '.Digest')"
|
||||
ref="${DEST_IMAGE}@${digest}"
|
||||
|
||||
echo "==> cosign sign (keyless) --recursive ${ref}"
|
||||
if ! cosign sign --recursive "${ref}"; then
|
||||
echo "::warning title=Keyless sign failed::${ref}"
|
||||
errs=$((errs+1))
|
||||
fi
|
||||
|
||||
echo "==> cosign sign (key) --recursive ${ref}"
|
||||
if ! cosign sign --key env://COSIGN_PRIVATE_KEY --recursive "${ref}"; then
|
||||
echo "::warning title=Key sign failed::${ref}"
|
||||
errs=$((errs+1))
|
||||
fi
|
||||
|
||||
echo "==> cosign verify (public key) ${ref}"
|
||||
if ! cosign verify --key env://COSIGN_PUBLIC_KEY "${ref}" -o text; then
|
||||
echo "::warning title=Verify(pubkey) failed::${ref}"
|
||||
errs=$((errs+1))
|
||||
fi
|
||||
|
||||
echo "==> cosign verify (keyless policy) ${ref}"
|
||||
if ! cosign verify \
|
||||
--certificate-oidc-issuer "${issuer}" \
|
||||
--certificate-identity-regexp "${id_regex}" \
|
||||
"${ref}" -o text; then
|
||||
echo "::warning title=Verify(keyless) failed::${ref}"
|
||||
errs=$((errs+1))
|
||||
else
|
||||
v_ok=$((v_ok+1))
|
||||
fi
|
||||
done < src-tags.txt
|
||||
|
||||
echo "---- Summary ----"
|
||||
echo "Copied : $copied"
|
||||
echo "Skipped : $skipped"
|
||||
echo "Verified OK : $v_ok"
|
||||
echo "Errors : $errs"
|
||||
4
.github/workflows/stale-bot.yml
vendored
4
.github/workflows/stale-bot.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v9
|
||||
- uses: actions/stale@5f858e3efba33a5ca4407a664cc011ad407f2008 # v10.1.0
|
||||
with:
|
||||
days-before-stale: 14
|
||||
days-before-close: 14
|
||||
@@ -34,4 +34,4 @@ jobs:
|
||||
operations-per-run: 100
|
||||
remove-stale-when-updated: true
|
||||
delete-branch: false
|
||||
enable-statistics: true
|
||||
enable-statistics: true
|
||||
|
||||
15
.github/workflows/test.yml
vendored
15
.github/workflows/test.yml
vendored
@@ -1,5 +1,8 @@
|
||||
name: Run Tests
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
@@ -11,9 +14,9 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
- uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0
|
||||
with:
|
||||
node-version: '22'
|
||||
|
||||
@@ -24,7 +27,10 @@ jobs:
|
||||
run: npm ci
|
||||
|
||||
- name: Create database index.ts
|
||||
run: echo 'export * from "./sqlite";' > server/db/index.ts
|
||||
run: npm run set:sqlite
|
||||
|
||||
- name: Create build file
|
||||
run: npm run set:oss
|
||||
|
||||
- name: Generate database migrations
|
||||
run: npm run db:sqlite:generate
|
||||
@@ -32,6 +38,9 @@ jobs:
|
||||
- name: Apply database migrations
|
||||
run: npm run db:sqlite:push
|
||||
|
||||
- name: Test with tsc
|
||||
run: npx tsc --noEmit
|
||||
|
||||
- name: Start app in background
|
||||
run: nohup npm run dev &
|
||||
|
||||
|
||||
15
.gitignore
vendored
15
.gitignore
vendored
@@ -26,6 +26,14 @@ next-env.d.ts
|
||||
migrations
|
||||
tsconfig.tsbuildinfo
|
||||
config/config.yml
|
||||
config/config.saas.yml
|
||||
config/config.oss.yml
|
||||
config/config.enterprise.yml
|
||||
config/privateConfig.yml
|
||||
config/postgres
|
||||
config/postgres*
|
||||
config/openapi.yaml
|
||||
config/key
|
||||
dist
|
||||
.dist
|
||||
installer
|
||||
@@ -34,4 +42,11 @@ bin
|
||||
.secrets
|
||||
test_event.json
|
||||
.idea/
|
||||
public/branding
|
||||
server/db/index.ts
|
||||
server/build.ts
|
||||
postgres/
|
||||
dynamic/
|
||||
*.mmdb
|
||||
scratch/
|
||||
tsconfig.json
|
||||
@@ -4,7 +4,7 @@ Contributions are welcome!
|
||||
|
||||
Please see the contribution and local development guide on the docs page before getting started:
|
||||
|
||||
https://docs.digpangolin.com/development/contributing
|
||||
https://docs.pangolin.net/development/contributing
|
||||
|
||||
### Licensing Considerations
|
||||
|
||||
|
||||
66
Dockerfile
Normal file
66
Dockerfile
Normal file
@@ -0,0 +1,66 @@
|
||||
FROM node:22-alpine AS builder
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
ARG BUILD=oss
|
||||
ARG DATABASE=sqlite
|
||||
|
||||
# COPY package.json package-lock.json ./
|
||||
COPY package*.json ./
|
||||
RUN npm ci
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN echo "export * from \"./$DATABASE\";" > server/db/index.ts
|
||||
|
||||
RUN echo "export const build = \"$BUILD\" as any;" > server/build.ts
|
||||
|
||||
# Copy the appropriate TypeScript configuration based on build type
|
||||
RUN if [ "$BUILD" = "oss" ]; then cp tsconfig.oss.json tsconfig.json; \
|
||||
elif [ "$BUILD" = "saas" ]; then cp tsconfig.saas.json tsconfig.json; \
|
||||
elif [ "$BUILD" = "enterprise" ]; then cp tsconfig.enterprise.json tsconfig.json; \
|
||||
fi
|
||||
|
||||
# if the build is oss then remove the server/private directory
|
||||
RUN if [ "$BUILD" = "oss" ]; then rm -rf server/private; fi
|
||||
|
||||
RUN if [ "$DATABASE" = "pg" ]; then npx drizzle-kit generate --dialect postgresql --schema ./server/db/pg/schema --out init; else npx drizzle-kit generate --dialect $DATABASE --schema ./server/db/$DATABASE/schema --out init; fi
|
||||
|
||||
RUN mkdir -p dist
|
||||
RUN npm run next:build
|
||||
RUN node esbuild.mjs -e server/index.ts -o dist/server.mjs -b $BUILD
|
||||
RUN if [ "$DATABASE" = "pg" ]; then \
|
||||
node esbuild.mjs -e server/setup/migrationsPg.ts -o dist/migrations.mjs; \
|
||||
else \
|
||||
node esbuild.mjs -e server/setup/migrationsSqlite.ts -o dist/migrations.mjs; \
|
||||
fi
|
||||
|
||||
# test to make sure the build output is there and error if not
|
||||
RUN test -f dist/server.mjs
|
||||
|
||||
RUN npm run build:cli
|
||||
|
||||
FROM node:22-alpine AS runner
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Curl used for the health checks
|
||||
RUN apk add --no-cache curl tzdata
|
||||
|
||||
# COPY package.json package-lock.json ./
|
||||
COPY package*.json ./
|
||||
|
||||
RUN npm ci --omit=dev && npm cache clean --force
|
||||
|
||||
COPY --from=builder /app/.next/standalone ./
|
||||
COPY --from=builder /app/.next/static ./.next/static
|
||||
COPY --from=builder /app/dist ./dist
|
||||
COPY --from=builder /app/init ./dist/init
|
||||
|
||||
COPY ./cli/wrapper.sh /usr/local/bin/pangctl
|
||||
RUN chmod +x /usr/local/bin/pangctl ./dist/cli.mjs
|
||||
|
||||
COPY server/db/names.json ./dist/names.json
|
||||
COPY public ./public
|
||||
|
||||
CMD ["npm", "run", "start"]
|
||||
@@ -1,41 +0,0 @@
|
||||
FROM node:22-alpine AS builder
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# COPY package.json package-lock.json ./
|
||||
COPY package*.json ./
|
||||
RUN npm ci
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN echo 'export * from "./pg";' > server/db/index.ts
|
||||
|
||||
RUN npx drizzle-kit generate --dialect postgresql --schema ./server/db/pg/schema.ts --out init
|
||||
|
||||
RUN npm run build:pg
|
||||
RUN npm run build:cli
|
||||
|
||||
FROM node:22-alpine AS runner
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Curl used for the health checks
|
||||
RUN apk add --no-cache curl
|
||||
|
||||
# COPY package.json package-lock.json ./
|
||||
COPY package*.json ./
|
||||
RUN npm ci --omit=dev && npm cache clean --force
|
||||
|
||||
COPY --from=builder /app/.next/standalone ./
|
||||
COPY --from=builder /app/.next/static ./.next/static
|
||||
COPY --from=builder /app/dist ./dist
|
||||
COPY --from=builder /app/init ./dist/init
|
||||
|
||||
COPY ./cli/wrapper.sh /usr/local/bin/pangctl
|
||||
RUN chmod +x /usr/local/bin/pangctl ./dist/cli.mjs
|
||||
|
||||
COPY server/db/names.json ./dist/names.json
|
||||
|
||||
COPY public ./public
|
||||
|
||||
CMD ["npm", "run", "start:pg"]
|
||||
@@ -1,41 +0,0 @@
|
||||
FROM node:22-alpine AS builder
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# COPY package.json package-lock.json ./
|
||||
COPY package*.json ./
|
||||
RUN npm ci
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN echo 'export * from "./sqlite";' > server/db/index.ts
|
||||
|
||||
RUN npx drizzle-kit generate --dialect sqlite --schema ./server/db/sqlite/schema.ts --out init
|
||||
|
||||
RUN npm run build:sqlite
|
||||
RUN npm run build:cli
|
||||
|
||||
FROM node:22-alpine AS runner
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Curl used for the health checks
|
||||
RUN apk add --no-cache curl
|
||||
|
||||
# COPY package.json package-lock.json ./
|
||||
COPY package*.json ./
|
||||
RUN npm ci --omit=dev && npm cache clean --force
|
||||
|
||||
COPY --from=builder /app/.next/standalone ./
|
||||
COPY --from=builder /app/.next/static ./.next/static
|
||||
COPY --from=builder /app/dist ./dist
|
||||
COPY --from=builder /app/init ./dist/init
|
||||
|
||||
COPY ./cli/wrapper.sh /usr/local/bin/pangctl
|
||||
RUN chmod +x /usr/local/bin/pangctl ./dist/cli.mjs
|
||||
|
||||
COPY server/db/names.json ./dist/names.json
|
||||
|
||||
COPY public ./public
|
||||
|
||||
CMD ["npm", "run", "start:sqlite"]
|
||||
31
LICENSE
31
LICENSE
@@ -1,3 +1,34 @@
|
||||
Copyright (c) 2025 Fossorial, Inc.
|
||||
|
||||
Portions of this software are licensed as follows:
|
||||
|
||||
* All files that include a header specifying they are licensed under the
|
||||
"Fossorial Commercial License" are governed by the Fossorial Commercial
|
||||
License terms. The specific terms applicable to each customer depend on the
|
||||
commercial license tier agreed upon in writing with Fossorial, Inc.
|
||||
Unauthorized use, copying, modification, or distribution is strictly
|
||||
prohibited.
|
||||
|
||||
* All files that include a header specifying they are licensed under the GNU
|
||||
Affero General Public License, Version 3 ("AGPL-3"), are governed by the
|
||||
AGPL-3 terms. A full copy of the AGPL-3 license is provided below. However,
|
||||
these files are also available under the Fossorial Commercial License if a
|
||||
separate commercial license agreement has been executed between the customer
|
||||
and Fossorial, Inc.
|
||||
|
||||
* All files without a license header are, by default, licensed under the GNU
|
||||
Affero General Public License, Version 3 (AGPL-3). These files may also be
|
||||
made available under the Fossorial Commercial License upon agreement with
|
||||
Fossorial, Inc.
|
||||
|
||||
* All third-party components included in this repository are licensed under
|
||||
their respective original licenses, as provided by their authors.
|
||||
|
||||
Please consult the header of each individual file to determine the applicable
|
||||
license. For AGPL-3 licensed files, dual-licensing under the Fossorial
|
||||
Commercial License is available subject to written agreement with Fossorial,
|
||||
Inc.
|
||||
|
||||
GNU AFFERO GENERAL PUBLIC LICENSE
|
||||
Version 3, 19 November 2007
|
||||
|
||||
|
||||
76
Makefile
76
Makefile
@@ -1,14 +1,78 @@
|
||||
.PHONY: build build-pg build-release build-arm build-x86 test clean
|
||||
|
||||
major_tag := $(shell echo $(tag) | cut -d. -f1)
|
||||
minor_tag := $(shell echo $(tag) | cut -d. -f1,2)
|
||||
build-release:
|
||||
@if [ -z "$(tag)" ]; then \
|
||||
echo "Error: tag is required. Usage: make build-release tag=<tag>"; \
|
||||
exit 1; \
|
||||
fi
|
||||
docker buildx build --platform linux/arm64,linux/amd64 -t fosrl/pangolin:latest -f Dockerfile.sqlite --push .
|
||||
docker buildx build --platform linux/arm64,linux/amd64 -t fosrl/pangolin:$(tag) -f Dockerfile.sqlite --push .
|
||||
docker buildx build --platform linux/arm64,linux/amd64 -t fosrl/pangolin:postgresql-latest -f Dockerfile.pg --push .
|
||||
docker buildx build --platform linux/arm64,linux/amd64 -t fosrl/pangolin:postgresql-$(tag) -f Dockerfile.pg --push .
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=sqlite \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:latest \
|
||||
--tag fosrl/pangolin:$(major_tag) \
|
||||
--tag fosrl/pangolin:$(minor_tag) \
|
||||
--tag fosrl/pangolin:$(tag) \
|
||||
--push .
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=pg \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:postgresql-latest \
|
||||
--tag fosrl/pangolin:postgresql-$(major_tag) \
|
||||
--tag fosrl/pangolin:postgresql-$(minor_tag) \
|
||||
--tag fosrl/pangolin:postgresql-$(tag) \
|
||||
--push .
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=sqlite \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:ee-latest \
|
||||
--tag fosrl/pangolin:ee-$(major_tag) \
|
||||
--tag fosrl/pangolin:ee-$(minor_tag) \
|
||||
--tag fosrl/pangolin:ee-$(tag) \
|
||||
--push .
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=pg \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:ee-postgresql-latest \
|
||||
--tag fosrl/pangolin:ee-postgresql-$(major_tag) \
|
||||
--tag fosrl/pangolin:ee-postgresql-$(minor_tag) \
|
||||
--tag fosrl/pangolin:ee-postgresql-$(tag) \
|
||||
--push .
|
||||
|
||||
build-rc:
|
||||
@if [ -z "$(tag)" ]; then \
|
||||
echo "Error: tag is required. Usage: make build-release tag=<tag>"; \
|
||||
exit 1; \
|
||||
fi
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=sqlite \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:$(tag) \
|
||||
--push .
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=pg \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:postgresql-$(tag) \
|
||||
--push .
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=sqlite \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:ee-$(tag) \
|
||||
--push .
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=pg \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:ee-postgresql-$(tag) \
|
||||
--push .
|
||||
|
||||
build-arm:
|
||||
docker buildx build --platform linux/arm64 -t fosrl/pangolin:latest .
|
||||
@@ -17,10 +81,10 @@ build-x86:
|
||||
docker buildx build --platform linux/amd64 -t fosrl/pangolin:latest .
|
||||
|
||||
build-sqlite:
|
||||
docker build -t fosrl/pangolin:latest -f Dockerfile.sqlite .
|
||||
docker build --build-arg DATABASE=sqlite -t fosrl/pangolin:latest .
|
||||
|
||||
build-pg:
|
||||
docker build -t fosrl/pangolin:postgresql-latest -f Dockerfile.pg .
|
||||
docker build --build-arg DATABASE=pg -t fosrl/pangolin:postgresql-latest .
|
||||
|
||||
test:
|
||||
docker run -it -p 3000:3000 -p 3001:3001 -p 3002:3002 -v ./config:/app/config fosrl/pangolin:latest
|
||||
|
||||
141
README.md
141
README.md
@@ -1,146 +1,91 @@
|
||||
<div align="center">
|
||||
<h2>
|
||||
<picture>
|
||||
<source media="(prefers-color-scheme: dark)" srcset="public/logo/word_mark_white.png">
|
||||
<img alt="Pangolin Logo" src="public/logo/word_mark_black.png" width="250">
|
||||
<a href="https://pangolin.net/">
|
||||
<picture>
|
||||
<source media="(prefers-color-scheme: dark)" srcset="public/logo/word_mark_white.png">
|
||||
<img alt="Pangolin Logo" src="public/logo/word_mark_black.png" width="350">
|
||||
</picture>
|
||||
</a>
|
||||
</h2>
|
||||
</div>
|
||||
|
||||
<h4 align="center">Secure gateway to your private networks</h4>
|
||||
<div align="center">
|
||||
|
||||
_Pangolin tunnels your services to the internet so you can access anything from anywhere._
|
||||
|
||||
</div>
|
||||
|
||||
<div align="center">
|
||||
<h5>
|
||||
<a href="https://digpangolin.com">
|
||||
<a href="https://pangolin.net/">
|
||||
Website
|
||||
</a>
|
||||
<span> | </span>
|
||||
<a href="https://docs.digpangolin.com/self-host/quick-install">
|
||||
Install Guide
|
||||
<a href="https://docs.pangolin.net/">
|
||||
Documentation
|
||||
</a>
|
||||
<span> | </span>
|
||||
<a href="mailto:numbat@fossorial.io">
|
||||
<a href="mailto:contact@pangolin.net">
|
||||
Contact Us
|
||||
</a>
|
||||
</h5>
|
||||
</div>
|
||||
|
||||
<div align="center">
|
||||
|
||||
[](https://discord.gg/HCJR8Xhme4)
|
||||
[](https://pangolin.net/slack)
|
||||
[](https://hub.docker.com/r/fosrl/pangolin)
|
||||

|
||||
[](https://discord.gg/HCJR8Xhme4)
|
||||
[](https://www.youtube.com/@fossorial-app)
|
||||
[](https://www.youtube.com/@fossorial-app)
|
||||
|
||||
</div>
|
||||
|
||||
<p align="center">
|
||||
<strong>
|
||||
Start testing Pangolin at <a href="https://pangolin.fossorial.io/auth/signup">pangolin.fossorial.io</a>
|
||||
Start testing Pangolin at <a href="https://app.pangolin.net/auth/signup">app.pangolin.net</a>
|
||||
</strong>
|
||||
</p>
|
||||
|
||||
Pangolin is a self-hosted tunneled reverse proxy server with identity and access control, designed to securely expose private resources on distributed networks. Acting as a central hub, it connects isolated networks — even those behind restrictive firewalls — through encrypted tunnels, enabling easy access to remote services without opening ports.
|
||||
Pangolin is a self-hosted tunneled reverse proxy server with identity and context aware access control, designed to easily expose and protect applications running anywhere. Pangolin acts as a central hub and connects isolated networks — even those behind restrictive firewalls — through encrypted tunnels, enabling easy access to remote services without opening ports or requiring a VPN.
|
||||
|
||||
<img src="public/screenshots/hero.png" alt="Preview"/>
|
||||
## Installation
|
||||
|
||||

|
||||
- Check out the [quick install guide](https://docs.pangolin.net/self-host/quick-install) for how to install and set up Pangolin.
|
||||
- Install from the [DigitalOcean marketplace](https://marketplace.digitalocean.com/apps/pangolin-ce-1?refcode=edf0480eeb81) for a one-click pre-configured installer.
|
||||
|
||||
## Key Features
|
||||
|
||||
### Reverse Proxy Through WireGuard Tunnel
|
||||
|
||||
- Expose private resources on your network **without opening ports** (firewall punching).
|
||||
- Secure and easy to configure private connectivity via a custom **user space WireGuard client**, [Newt](https://github.com/fosrl/newt).
|
||||
- Built-in support for any WireGuard client.
|
||||
- Automated **SSL certificates** (https) via [LetsEncrypt](https://letsencrypt.org/).
|
||||
- Support for HTTP/HTTPS and **raw TCP/UDP services**.
|
||||
- Load balancing.
|
||||
- Extend functionality with existing [Traefik](https://github.com/traefik/traefik) plugins, such as [CrowdSec](https://plugins.traefik.io/plugins/6335346ca4caa9ddeffda116/crowdsec-bouncer-traefik-plugin) and [Geoblock](https://github.com/PascalMinder/geoblock).
|
||||
- **Automatically install and configure Crowdsec via Pangolin's installer script.**
|
||||
- Attach as many sites to the central server as you wish.
|
||||
|
||||
### Identity & Access Management
|
||||
|
||||
- Centralized authentication system using platform SSO. **Users will only have to manage one login.**
|
||||
- **Define access control rules for IPs, IP ranges, and URL paths per resource.**
|
||||
- TOTP with backup codes for two-factor authentication.
|
||||
- Create organizations, each with multiple sites, users, and roles.
|
||||
- **Role-based access control** to manage resource access permissions.
|
||||
- Additional authentication options include:
|
||||
- Email whitelisting with **one-time passcodes.**
|
||||
- **Temporary, self-destructing share links.**
|
||||
- Resource specific pin codes.
|
||||
- Resource specific passwords.
|
||||
- Passkeys
|
||||
- External identity provider (IdP) support with OAuth2/OIDC, such as Authentik, Keycloak, Okta, and others.
|
||||
- Auto-provision users and roles from your IdP.
|
||||
|
||||
<img src="public/auth-diagram1.png" alt="Auth and diagram"/>
|
||||
|
||||
## Use Cases
|
||||
|
||||
### Manage Access to Internal Apps
|
||||
|
||||
- Grant users access to your apps from anywhere using just a web browser. No client software required.
|
||||
|
||||
### Developers and DevOps
|
||||
|
||||
- Expose and test internal tools and dashboards like **Grafana**. Bring localhost or private IPs online for easy access.
|
||||
|
||||
### Secure API Gateway
|
||||
|
||||
- One application load balancer across multiple clouds and on-premises.
|
||||
|
||||
### IoT and Edge Devices
|
||||
|
||||
- Easily expose **IoT devices**, **edge servers**, or **Raspberry Pi** to the internet for field equipment monitoring.
|
||||
|
||||
<img src="public/screenshots/sites.png" alt="Sites"/>
|
||||
<img src="public/screenshots/hero.png" />
|
||||
|
||||
## Deployment Options
|
||||
|
||||
### Fully Self Hosted
|
||||
| <img width=500 /> | Description |
|
||||
|-----------------|--------------|
|
||||
| **Self-Host: Community Edition** | Free, open source, and licensed under AGPL-3. |
|
||||
| **Self-Host: Enterprise Edition** | Licensed under Fossorial Commercial License. Free for personal and hobbyist use, and for businesses earning under \$100K USD annually. |
|
||||
| **Pangolin Cloud** | Fully managed service with instant setup and pay-as-you-go pricing — no infrastructure required. Or, self-host your own [remote node](https://docs.pangolin.net/manage/remote-node/nodes) and connect to our control plane. |
|
||||
|
||||
Host the full application on your own server or on the cloud with a VPS. Take a look at the [documentation](https://docs.digpangolin.com/self-host/quick-install) to get started.
|
||||
## Key Features
|
||||
|
||||
> Many of our users have had a great experience with [RackNerd](https://my.racknerd.com/aff.php?aff=13788). Depending on promotions, you can get a [**VPS with 1 vCPU, 1GB RAM, and ~20GB SSD for just around $12/year**](https://my.racknerd.com/aff.php?aff=13788&pid=912). That's a great deal!
|
||||
Pangolin packages everything you need for seamless application access and exposure into one cohesive platform.
|
||||
|
||||
### Pangolin Cloud
|
||||
| <img width=500 /> | <img width=500 /> |
|
||||
|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------|
|
||||
| **Manage applications in one place**<br /><br /> Pangolin provides a unified dashboard where you can monitor, configure, and secure all of your services regardless of where they are hosted. | <img src="public/screenshots/hero.png" width=500 /><tr></tr> |
|
||||
| **Reverse proxy across networks anywhere**<br /><br />Route traffic via tunnels to any private network. Pangolin works like a reverse proxy that spans multiple networks and handles routing, load balancing, health checking, and more to the right services on the other end. | <img src="public/screenshots/sites.png" width=500 /><tr></tr> |
|
||||
| **Enforce identity and context aware rules**<br /><br />Protect your applications with identity and context aware rules such as SSO, OIDC, PIN, password, temporary share links, geolocation, IP, and more. | <img src="public/auth-diagram1.png" width=500 /><tr></tr> |
|
||||
| **Quickly connect Pangolin sites**<br /><br />Pangolin's lightweight [Newt](https://github.com/fosrl/newt) client runs in userspace and can run anywhere. Use it as a site connector to route traffic to backends across all of your environments. | <img src="public/clip.gif" width=500 /><tr></tr> |
|
||||
|
||||
Easy to use with simple [pay as you go pricing](https://digpangolin.com/pricing). [Check it out here](https://pangolin.fossorial.io/auth/signup).
|
||||
## Get Started
|
||||
|
||||
- Everything you get with self hosted Pangolin, but fully managed for you.
|
||||
### Check out the docs
|
||||
|
||||
### Hybrid & High Availability
|
||||
We encourage everyone to read the full documentation first, which is
|
||||
available at [docs.pangolin.net](https://docs.pangolin.net). This README provides only a very brief subset of
|
||||
the docs to illustrate some basic ideas.
|
||||
|
||||
Managed control plane, your infrastructure
|
||||
### Sign up and try now
|
||||
|
||||
- We manage database and control plane.
|
||||
- You self-host lightweight exit-node.
|
||||
- Traffic flows through your infra.
|
||||
- We coordinate failover between your nodes or to Cloud when things go bad.
|
||||
|
||||
If interested, [contact us](mailto:numbat@fossorial.io).
|
||||
|
||||
### Full Enterprise On-Premises
|
||||
|
||||
[Contact us](mailto:numbat@fossorial.io) for a full distributed and enterprise deployments on your infrastructure controlled by your team.
|
||||
|
||||
## Project Development / Roadmap
|
||||
|
||||
We want to hear your feature requests! Add them to the [discussion board](https://github.com/orgs/fosrl/discussions/categories/feature-requests).
|
||||
For Pangolin's managed service, you will first need to create an account at
|
||||
[app.pangolin.net](https://app.pangolin.net). We have a generous free tier to get started.
|
||||
|
||||
## Licensing
|
||||
|
||||
Pangolin is dual licensed under the AGPL-3 and the Fossorial Commercial license. For inquiries about commercial licensing, please contact us at [numbat@fossorial.io](mailto:numbat@fossorial.io).
|
||||
Pangolin is dual licensed under the AGPL-3 and the [Fossorial Commercial License](https://pangolin.net/fcl.html). For inquiries about commercial licensing, please contact us at [contact@pangolin.net](mailto:contact@pangolin.net).
|
||||
|
||||
## Contributions
|
||||
|
||||
Looking for something to contribute? Take a look at issues marked with [help wanted](https://github.com/fosrl/pangolin/issues?q=is%3Aissue%20state%3Aopen%20label%3A%22help%20wanted%22). Also take a look through the freature requests in Discussions - any are available and some are marked as a good first issue.
|
||||
|
||||
Please see [CONTRIBUTING](./CONTRIBUTING.md) in the repository for guidelines and best practices.
|
||||
|
||||
Please post bug reports and other functional issues in the [Issues](https://github.com/fosrl/pangolin/issues) section of the repository.
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
If you discover a security vulnerability, please follow the steps below to responsibly disclose it to us:
|
||||
|
||||
1. **Do not create a public GitHub issue or discussion post.** This could put the security of other users at risk.
|
||||
2. Send a detailed report to [security@fossorial.io](mailto:security@fossorial.io) or send a **private** message to a maintainer on [Discord](https://discord.gg/HCJR8Xhme4). Include:
|
||||
2. Send a detailed report to [security@pangolin.net](mailto:security@pangolin.net) or send a **private** message to a maintainer on [Discord](https://discord.gg/HCJR8Xhme4). Include:
|
||||
|
||||
- Description and location of the vulnerability.
|
||||
- Potential impact of the vulnerability.
|
||||
|
||||
72
blueprint.py
Normal file
72
blueprint.py
Normal file
@@ -0,0 +1,72 @@
|
||||
import requests
|
||||
import yaml
|
||||
import json
|
||||
import base64
|
||||
|
||||
# The file path for the YAML file to be read
|
||||
# You can change this to the path of your YAML file
|
||||
YAML_FILE_PATH = 'blueprint.yaml'
|
||||
|
||||
# The API endpoint and headers from the curl request
|
||||
API_URL = 'http://api.pangolin.net/v1/org/test/blueprint'
|
||||
HEADERS = {
|
||||
'accept': '*/*',
|
||||
'Authorization': 'Bearer <your_token_here>',
|
||||
'Content-Type': 'application/json'
|
||||
}
|
||||
|
||||
def convert_and_send(file_path, url, headers):
|
||||
"""
|
||||
Reads a YAML file, converts its content to a JSON payload,
|
||||
and sends it via a PUT request to a specified URL.
|
||||
"""
|
||||
try:
|
||||
# Read the YAML file content
|
||||
with open(file_path, 'r') as file:
|
||||
yaml_content = file.read()
|
||||
|
||||
# Parse the YAML string to a Python dictionary
|
||||
# This will be used to ensure the YAML is valid before sending
|
||||
parsed_yaml = yaml.safe_load(yaml_content)
|
||||
|
||||
# convert the parsed YAML to a JSON string
|
||||
json_payload = json.dumps(parsed_yaml)
|
||||
print("Converted JSON payload:")
|
||||
print(json_payload)
|
||||
|
||||
# Encode the JSON string to Base64
|
||||
encoded_json = base64.b64encode(json_payload.encode('utf-8')).decode('utf-8')
|
||||
|
||||
# Create the final payload with the base64 encoded data
|
||||
final_payload = {
|
||||
"blueprint": encoded_json
|
||||
}
|
||||
|
||||
print("Sending the following Base64 encoded JSON payload:")
|
||||
print(final_payload)
|
||||
print("-" * 20)
|
||||
|
||||
# Make the PUT request with the base64 encoded payload
|
||||
response = requests.put(url, headers=headers, json=final_payload)
|
||||
|
||||
# Print the API response for debugging
|
||||
print(f"API Response Status Code: {response.status_code}")
|
||||
print("API Response Content:")
|
||||
print(response.text)
|
||||
|
||||
# Raise an exception for bad status codes (4xx or 5xx)
|
||||
response.raise_for_status()
|
||||
|
||||
except FileNotFoundError:
|
||||
print(f"Error: The file '{file_path}' was not found.")
|
||||
except yaml.YAMLError as e:
|
||||
print(f"Error parsing YAML file: {e}")
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(f"An error occurred during the API request: {e}")
|
||||
except Exception as e:
|
||||
print(f"An unexpected error occurred: {e}")
|
||||
|
||||
# Run the function
|
||||
if __name__ == "__main__":
|
||||
convert_and_send(YAML_FILE_PATH, API_URL, HEADERS)
|
||||
|
||||
69
blueprint.yaml
Normal file
69
blueprint.yaml
Normal file
@@ -0,0 +1,69 @@
|
||||
client-resources:
|
||||
client-resource-nice-id-uno:
|
||||
name: this is my resource
|
||||
protocol: tcp
|
||||
proxy-port: 3001
|
||||
hostname: localhost
|
||||
internal-port: 3000
|
||||
site: lively-yosemite-toad
|
||||
client-resource-nice-id-duce:
|
||||
name: this is my resource
|
||||
protocol: udp
|
||||
proxy-port: 3000
|
||||
hostname: localhost
|
||||
internal-port: 3000
|
||||
site: lively-yosemite-toad
|
||||
|
||||
proxy-resources:
|
||||
resource-nice-id-uno:
|
||||
name: this is my resource
|
||||
protocol: http
|
||||
full-domain: duce.test.example.com
|
||||
host-header: example.com
|
||||
tls-server-name: example.com
|
||||
# auth:
|
||||
# pincode: 123456
|
||||
# password: sadfasdfadsf
|
||||
# sso-enabled: true
|
||||
# sso-roles:
|
||||
# - Member
|
||||
# sso-users:
|
||||
# - owen@pangolin.net
|
||||
# whitelist-users:
|
||||
# - owen@pangolin.net
|
||||
headers:
|
||||
- name: X-Example-Header
|
||||
value: example-value
|
||||
- name: X-Another-Header
|
||||
value: another-value
|
||||
rules:
|
||||
- action: allow
|
||||
match: ip
|
||||
value: 1.1.1.1
|
||||
- action: deny
|
||||
match: cidr
|
||||
value: 2.2.2.2/32
|
||||
- action: pass
|
||||
match: path
|
||||
value: /admin
|
||||
targets:
|
||||
- site: lively-yosemite-toad
|
||||
path: /path
|
||||
pathMatchType: prefix
|
||||
hostname: localhost
|
||||
method: http
|
||||
port: 8000
|
||||
- site: slim-alpine-chipmunk
|
||||
hostname: localhost
|
||||
path: /yoman
|
||||
pathMatchType: exact
|
||||
method: http
|
||||
port: 8001
|
||||
resource-nice-id-duce:
|
||||
name: this is other resource
|
||||
protocol: tcp
|
||||
proxy-port: 3000
|
||||
targets:
|
||||
- site: lively-yosemite-toad
|
||||
hostname: localhost
|
||||
port: 3000
|
||||
17
bruno/API Keys/Create API Key.bru
Normal file
17
bruno/API Keys/Create API Key.bru
Normal file
@@ -0,0 +1,17 @@
|
||||
meta {
|
||||
name: Create API Key
|
||||
type: http
|
||||
seq: 1
|
||||
}
|
||||
|
||||
put {
|
||||
url: http://localhost:3000/api/v1/api-key
|
||||
body: json
|
||||
auth: inherit
|
||||
}
|
||||
|
||||
body:json {
|
||||
{
|
||||
"isRoot": true
|
||||
}
|
||||
}
|
||||
11
bruno/API Keys/Delete API Key.bru
Normal file
11
bruno/API Keys/Delete API Key.bru
Normal file
@@ -0,0 +1,11 @@
|
||||
meta {
|
||||
name: Delete API Key
|
||||
type: http
|
||||
seq: 2
|
||||
}
|
||||
|
||||
delete {
|
||||
url: http://localhost:3000/api/v1/api-key/dm47aacqxxn3ubj
|
||||
body: none
|
||||
auth: inherit
|
||||
}
|
||||
11
bruno/API Keys/List API Key Actions.bru
Normal file
11
bruno/API Keys/List API Key Actions.bru
Normal file
@@ -0,0 +1,11 @@
|
||||
meta {
|
||||
name: List API Key Actions
|
||||
type: http
|
||||
seq: 6
|
||||
}
|
||||
|
||||
get {
|
||||
url: http://localhost:3000/api/v1/api-key/ex0izu2c37fjz9x/actions
|
||||
body: none
|
||||
auth: inherit
|
||||
}
|
||||
11
bruno/API Keys/List Org API Keys.bru
Normal file
11
bruno/API Keys/List Org API Keys.bru
Normal file
@@ -0,0 +1,11 @@
|
||||
meta {
|
||||
name: List Org API Keys
|
||||
type: http
|
||||
seq: 4
|
||||
}
|
||||
|
||||
get {
|
||||
url: http://localhost:3000/api/v1/org/home-lab/api-keys
|
||||
body: none
|
||||
auth: inherit
|
||||
}
|
||||
11
bruno/API Keys/List Root API Keys.bru
Normal file
11
bruno/API Keys/List Root API Keys.bru
Normal file
@@ -0,0 +1,11 @@
|
||||
meta {
|
||||
name: List Root API Keys
|
||||
type: http
|
||||
seq: 3
|
||||
}
|
||||
|
||||
get {
|
||||
url: http://localhost:3000/api/v1/root/api-keys
|
||||
body: none
|
||||
auth: inherit
|
||||
}
|
||||
17
bruno/API Keys/Set API Key Actions.bru
Normal file
17
bruno/API Keys/Set API Key Actions.bru
Normal file
@@ -0,0 +1,17 @@
|
||||
meta {
|
||||
name: Set API Key Actions
|
||||
type: http
|
||||
seq: 5
|
||||
}
|
||||
|
||||
post {
|
||||
url: http://localhost:3000/api/v1/api-key/ex0izu2c37fjz9x/actions
|
||||
body: json
|
||||
auth: inherit
|
||||
}
|
||||
|
||||
body:json {
|
||||
{
|
||||
"actionIds": ["listSites"]
|
||||
}
|
||||
}
|
||||
17
bruno/API Keys/Set API Key Orgs.bru
Normal file
17
bruno/API Keys/Set API Key Orgs.bru
Normal file
@@ -0,0 +1,17 @@
|
||||
meta {
|
||||
name: Set API Key Orgs
|
||||
type: http
|
||||
seq: 7
|
||||
}
|
||||
|
||||
post {
|
||||
url: http://localhost:3000/api/v1/api-key/ex0izu2c37fjz9x/orgs
|
||||
body: json
|
||||
auth: inherit
|
||||
}
|
||||
|
||||
body:json {
|
||||
{
|
||||
"orgIds": ["home-lab"]
|
||||
}
|
||||
}
|
||||
3
bruno/API Keys/folder.bru
Normal file
3
bruno/API Keys/folder.bru
Normal file
@@ -0,0 +1,3 @@
|
||||
meta {
|
||||
name: API Keys
|
||||
}
|
||||
@@ -5,14 +5,14 @@ meta {
|
||||
}
|
||||
|
||||
post {
|
||||
url: http://localhost:3000/api/v1/auth/login
|
||||
url: http://localhost:4000/api/v1/auth/login
|
||||
body: json
|
||||
auth: none
|
||||
}
|
||||
|
||||
body:json {
|
||||
{
|
||||
"email": "admin@fosrl.io",
|
||||
"email": "owen@pangolin.net",
|
||||
"password": "Password123!"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ meta {
|
||||
}
|
||||
|
||||
post {
|
||||
url: http://localhost:3000/api/v1/auth/logout
|
||||
url: http://localhost:4000/api/v1/auth/logout
|
||||
body: none
|
||||
auth: none
|
||||
}
|
||||
|
||||
@@ -12,6 +12,6 @@ post {
|
||||
|
||||
body:json {
|
||||
{
|
||||
"email": "milo@fossorial.io"
|
||||
"email": "milo@pangolin.net"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ put {
|
||||
|
||||
body:json {
|
||||
{
|
||||
"email": "numbat@fossorial.io",
|
||||
"email": "numbat@pangolin.net",
|
||||
"password": "Password123!"
|
||||
}
|
||||
}
|
||||
|
||||
22
bruno/IDP/Create OIDC Provider.bru
Normal file
22
bruno/IDP/Create OIDC Provider.bru
Normal file
@@ -0,0 +1,22 @@
|
||||
meta {
|
||||
name: Create OIDC Provider
|
||||
type: http
|
||||
seq: 1
|
||||
}
|
||||
|
||||
put {
|
||||
url: http://localhost:3000/api/v1/org/home-lab/idp/oidc
|
||||
body: json
|
||||
auth: inherit
|
||||
}
|
||||
|
||||
body:json {
|
||||
{
|
||||
"clientId": "JJoSvHCZcxnXT2sn6CObj6a21MuKNRXs3kN5wbys",
|
||||
"clientSecret": "2SlGL2wOGgMEWLI9yUuMAeFxre7qSNJVnXMzyepdNzH1qlxYnC4lKhhQ6a157YQEkYH3vm40KK4RCqbYiF8QIweuPGagPX3oGxEj2exwutoXFfOhtq4hHybQKoFq01Z3",
|
||||
"authUrl": "http://localhost:9000/application/o/authorize/",
|
||||
"tokenUrl": "http://localhost:9000/application/o/token/",
|
||||
"scopes": ["email", "openid", "profile"],
|
||||
"userIdentifier": "email"
|
||||
}
|
||||
}
|
||||
11
bruno/IDP/Generate OIDC URL.bru
Normal file
11
bruno/IDP/Generate OIDC URL.bru
Normal file
@@ -0,0 +1,11 @@
|
||||
meta {
|
||||
name: Generate OIDC URL
|
||||
type: http
|
||||
seq: 2
|
||||
}
|
||||
|
||||
get {
|
||||
url: http://localhost:3000/api/v1
|
||||
body: none
|
||||
auth: inherit
|
||||
}
|
||||
3
bruno/IDP/folder.bru
Normal file
3
bruno/IDP/folder.bru
Normal file
@@ -0,0 +1,3 @@
|
||||
meta {
|
||||
name: IDP
|
||||
}
|
||||
11
bruno/Internal/Traefik Config.bru
Normal file
11
bruno/Internal/Traefik Config.bru
Normal file
@@ -0,0 +1,11 @@
|
||||
meta {
|
||||
name: Traefik Config
|
||||
type: http
|
||||
seq: 1
|
||||
}
|
||||
|
||||
get {
|
||||
url: http://localhost:3001/api/v1/traefik-config
|
||||
body: none
|
||||
auth: inherit
|
||||
}
|
||||
3
bruno/Internal/folder.bru
Normal file
3
bruno/Internal/folder.bru
Normal file
@@ -0,0 +1,3 @@
|
||||
meta {
|
||||
name: Internal
|
||||
}
|
||||
11
bruno/Remote Exit Node/createRemoteExitNode.bru
Normal file
11
bruno/Remote Exit Node/createRemoteExitNode.bru
Normal file
@@ -0,0 +1,11 @@
|
||||
meta {
|
||||
name: createRemoteExitNode
|
||||
type: http
|
||||
seq: 1
|
||||
}
|
||||
|
||||
put {
|
||||
url: http://localhost:4000/api/v1/org/org_i21aifypnlyxur2/remote-exit-node
|
||||
body: none
|
||||
auth: none
|
||||
}
|
||||
11
bruno/Test.bru
Normal file
11
bruno/Test.bru
Normal file
@@ -0,0 +1,11 @@
|
||||
meta {
|
||||
name: Test
|
||||
type: http
|
||||
seq: 2
|
||||
}
|
||||
|
||||
get {
|
||||
url: http://localhost:3000/api/v1
|
||||
body: none
|
||||
auth: inherit
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"version": "1",
|
||||
"name": "Pangolin",
|
||||
"name": "Pangolin Saas",
|
||||
"type": "collection",
|
||||
"ignore": [
|
||||
"node_modules",
|
||||
|
||||
@@ -32,7 +32,8 @@ export const setAdminCredentials: CommandModule<{}, SetAdminCredentialsArgs> = {
|
||||
},
|
||||
handler: async (argv: { email: string; password: string }) => {
|
||||
try {
|
||||
let { email, password } = argv;
|
||||
const { password } = argv;
|
||||
let { email } = argv;
|
||||
email = email.trim().toLowerCase();
|
||||
|
||||
const parsed = passwordSchema.safeParse(password);
|
||||
@@ -89,7 +90,8 @@ export const setAdminCredentials: CommandModule<{}, SetAdminCredentialsArgs> = {
|
||||
passwordHash,
|
||||
dateCreated: moment().toISOString(),
|
||||
serverAdmin: true,
|
||||
emailVerified: true
|
||||
emailVerified: true,
|
||||
lastPasswordChange: new Date().getTime()
|
||||
});
|
||||
|
||||
console.log("Server admin created");
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# To see all available options, please visit the docs:
|
||||
# https://docs.digpangolin.com/self-host/advanced/config-file
|
||||
# https://docs.pangolin.net/self-host/advanced/config-file
|
||||
|
||||
app:
|
||||
dashboard_url: http://localhost:3002
|
||||
|
||||
Binary file not shown.
46
config/traefik/dynamic_config.yml
Normal file
46
config/traefik/dynamic_config.yml
Normal file
@@ -0,0 +1,46 @@
|
||||
http:
|
||||
middlewares:
|
||||
redirect-to-https:
|
||||
redirectScheme:
|
||||
scheme: https
|
||||
|
||||
routers:
|
||||
# HTTP to HTTPS redirect router
|
||||
main-app-router-redirect:
|
||||
rule: "Host(`{{.DashboardDomain}}`)"
|
||||
service: next-service
|
||||
entryPoints:
|
||||
- web
|
||||
middlewares:
|
||||
- redirect-to-https
|
||||
|
||||
# Next.js router (handles everything except API and WebSocket paths)
|
||||
next-router:
|
||||
rule: "Host(`{{.DashboardDomain}}`)"
|
||||
service: next-service
|
||||
priority: 10
|
||||
entryPoints:
|
||||
- websecure
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
|
||||
# API router (handles /api/v1 paths)
|
||||
api-router:
|
||||
rule: "Host(`{{.DashboardDomain}}`) && PathPrefix(`/api/v1`)"
|
||||
service: api-service
|
||||
priority: 100
|
||||
entryPoints:
|
||||
- websecure
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
|
||||
services:
|
||||
next-service:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://pangolin:3002" # Next.js server
|
||||
|
||||
api-service:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://pangolin:3000" # API/WebSocket server
|
||||
34
config/traefik/traefik_config.yml
Normal file
34
config/traefik/traefik_config.yml
Normal file
@@ -0,0 +1,34 @@
|
||||
api:
|
||||
insecure: true
|
||||
dashboard: true
|
||||
|
||||
providers:
|
||||
file:
|
||||
directory: "/var/dynamic"
|
||||
watch: true
|
||||
|
||||
experimental:
|
||||
plugins:
|
||||
badger:
|
||||
moduleName: "github.com/fosrl/badger"
|
||||
version: "v1.2.0"
|
||||
|
||||
log:
|
||||
level: "DEBUG"
|
||||
format: "common"
|
||||
maxSize: 100
|
||||
maxBackups: 3
|
||||
maxAge: 3
|
||||
compress: true
|
||||
|
||||
entryPoints:
|
||||
web:
|
||||
address: ":80"
|
||||
websecure:
|
||||
address: ":9443"
|
||||
transport:
|
||||
respondingTimeouts:
|
||||
readTimeout: "30m"
|
||||
|
||||
serversTransport:
|
||||
insecureSkipVerify: true
|
||||
15
docker-compose.drizzle.yml
Normal file
15
docker-compose.drizzle.yml
Normal file
@@ -0,0 +1,15 @@
|
||||
services:
|
||||
drizzle-gateway:
|
||||
image: ghcr.io/drizzle-team/gateway:latest
|
||||
ports:
|
||||
- "4984:4983"
|
||||
depends_on:
|
||||
- db
|
||||
environment:
|
||||
- STORE_PATH=/app
|
||||
- DATABASE_URL=postgresql://postgres:password@db:5432/postgres
|
||||
volumes:
|
||||
- drizzle-gateway-data:/app
|
||||
|
||||
volumes:
|
||||
drizzle-gateway-data:
|
||||
@@ -20,10 +20,9 @@ services:
|
||||
pangolin:
|
||||
condition: service_healthy
|
||||
command:
|
||||
- --reachableAt=http://gerbil:3003
|
||||
- --reachableAt=http://gerbil:3004
|
||||
- --generateAndSaveKeyTo=/var/config/key
|
||||
- --remoteConfig=http://pangolin:3001/api/v1/gerbil/get-config
|
||||
- --reportBandwidthTo=http://pangolin:3001/api/v1/gerbil/receive-bandwidth
|
||||
- --remoteConfig=http://pangolin:3001/api/v1/
|
||||
volumes:
|
||||
- ./config/:/var/config
|
||||
cap_add:
|
||||
|
||||
@@ -7,6 +7,15 @@ services:
|
||||
POSTGRES_DB: postgres # Default database name
|
||||
POSTGRES_USER: postgres # Default user
|
||||
POSTGRES_PASSWORD: password # Default password (change for production!)
|
||||
volumes:
|
||||
- ./config/postgres:/var/lib/postgresql/data
|
||||
ports:
|
||||
- "5432:5432" # Map host port 5432 to container port 5432
|
||||
restart: no
|
||||
restart: no
|
||||
|
||||
redis:
|
||||
image: redis:latest # Use the latest Redis image
|
||||
container_name: dev_redis # Name your Redis container
|
||||
ports:
|
||||
- "6379:6379" # Map host port 6379 to container port 6379
|
||||
restart: no
|
||||
@@ -13,7 +13,6 @@ services:
|
||||
environment:
|
||||
- NODE_ENV=development
|
||||
- ENVIRONMENT=dev
|
||||
- DB_TYPE=pg
|
||||
volumes:
|
||||
# Mount source code for hot reload
|
||||
- ./src:/app/src
|
||||
|
||||
@@ -1,9 +1,13 @@
|
||||
import { defineConfig } from "drizzle-kit";
|
||||
import path from "path";
|
||||
|
||||
const schema = [
|
||||
path.join("server", "db", "pg", "schema"),
|
||||
];
|
||||
|
||||
export default defineConfig({
|
||||
dialect: "postgresql",
|
||||
schema: [path.join("server", "db", "pg", "schema.ts")],
|
||||
schema: schema,
|
||||
out: path.join("server", "migrations"),
|
||||
verbose: true,
|
||||
dbCredentials: {
|
||||
|
||||
@@ -2,9 +2,13 @@ import { APP_PATH } from "@server/lib/consts";
|
||||
import { defineConfig } from "drizzle-kit";
|
||||
import path from "path";
|
||||
|
||||
const schema = [
|
||||
path.join("server", "db", "sqlite", "schema"),
|
||||
];
|
||||
|
||||
export default defineConfig({
|
||||
dialect: "sqlite",
|
||||
schema: path.join("server", "db", "sqlite", "schema.ts"),
|
||||
schema: schema,
|
||||
out: path.join("server", "migrations"),
|
||||
verbose: true,
|
||||
dbCredentials: {
|
||||
|
||||
214
esbuild.mjs
214
esbuild.mjs
@@ -2,8 +2,9 @@ import esbuild from "esbuild";
|
||||
import yargs from "yargs";
|
||||
import { hideBin } from "yargs/helpers";
|
||||
import { nodeExternalsPlugin } from "esbuild-node-externals";
|
||||
import path from "path";
|
||||
import fs from "fs";
|
||||
// import { glob } from "glob";
|
||||
// import path from "path";
|
||||
|
||||
const banner = `
|
||||
// patch __dirname
|
||||
@@ -18,7 +19,7 @@ const require = topLevelCreateRequire(import.meta.url);
|
||||
`;
|
||||
|
||||
const argv = yargs(hideBin(process.argv))
|
||||
.usage("Usage: $0 -entry [string] -out [string]")
|
||||
.usage("Usage: $0 -entry [string] -out [string] -build [string]")
|
||||
.option("entry", {
|
||||
alias: "e",
|
||||
describe: "Entry point file",
|
||||
@@ -31,6 +32,13 @@ const argv = yargs(hideBin(process.argv))
|
||||
type: "string",
|
||||
demandOption: true,
|
||||
})
|
||||
.option("build", {
|
||||
alias: "b",
|
||||
describe: "Build type (oss, saas, enterprise)",
|
||||
type: "string",
|
||||
choices: ["oss", "saas", "enterprise"],
|
||||
default: "oss",
|
||||
})
|
||||
.help()
|
||||
.alias("help", "h").argv;
|
||||
|
||||
@@ -46,27 +54,223 @@ function getPackagePaths() {
|
||||
return ["package.json"];
|
||||
}
|
||||
|
||||
// Plugin to guard against bad imports from #private
|
||||
function privateImportGuardPlugin() {
|
||||
return {
|
||||
name: "private-import-guard",
|
||||
setup(build) {
|
||||
const violations = [];
|
||||
|
||||
build.onResolve({ filter: /^#private\// }, (args) => {
|
||||
const importingFile = args.importer;
|
||||
|
||||
// Check if the importing file is NOT in server/private
|
||||
const normalizedImporter = path.normalize(importingFile);
|
||||
const isInServerPrivate = normalizedImporter.includes(path.normalize("server/private"));
|
||||
|
||||
if (!isInServerPrivate) {
|
||||
const violation = {
|
||||
file: importingFile,
|
||||
importPath: args.path,
|
||||
resolveDir: args.resolveDir
|
||||
};
|
||||
violations.push(violation);
|
||||
|
||||
console.log(`PRIVATE IMPORT VIOLATION:`);
|
||||
console.log(` File: ${importingFile}`);
|
||||
console.log(` Import: ${args.path}`);
|
||||
console.log(` Resolve dir: ${args.resolveDir || 'N/A'}`);
|
||||
console.log('');
|
||||
}
|
||||
|
||||
// Return null to let the default resolver handle it
|
||||
return null;
|
||||
});
|
||||
|
||||
build.onEnd((result) => {
|
||||
if (violations.length > 0) {
|
||||
console.log(`\nSUMMARY: Found ${violations.length} private import violation(s):`);
|
||||
violations.forEach((v, i) => {
|
||||
console.log(` ${i + 1}. ${path.relative(process.cwd(), v.file)} imports ${v.importPath}`);
|
||||
});
|
||||
console.log('');
|
||||
|
||||
result.errors.push({
|
||||
text: `Private import violations detected: ${violations.length} violation(s) found`,
|
||||
location: null,
|
||||
notes: violations.map(v => ({
|
||||
text: `${path.relative(process.cwd(), v.file)} imports ${v.importPath}`,
|
||||
location: null
|
||||
}))
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Plugin to guard against bad imports from #private
|
||||
function dynamicImportGuardPlugin() {
|
||||
return {
|
||||
name: "dynamic-import-guard",
|
||||
setup(build) {
|
||||
const violations = [];
|
||||
|
||||
build.onResolve({ filter: /^#dynamic\// }, (args) => {
|
||||
const importingFile = args.importer;
|
||||
|
||||
// Check if the importing file is NOT in server/private
|
||||
const normalizedImporter = path.normalize(importingFile);
|
||||
const isInServerPrivate = normalizedImporter.includes(path.normalize("server/private"));
|
||||
|
||||
if (isInServerPrivate) {
|
||||
const violation = {
|
||||
file: importingFile,
|
||||
importPath: args.path,
|
||||
resolveDir: args.resolveDir
|
||||
};
|
||||
violations.push(violation);
|
||||
|
||||
console.log(`DYNAMIC IMPORT VIOLATION:`);
|
||||
console.log(` File: ${importingFile}`);
|
||||
console.log(` Import: ${args.path}`);
|
||||
console.log(` Resolve dir: ${args.resolveDir || 'N/A'}`);
|
||||
console.log('');
|
||||
}
|
||||
|
||||
// Return null to let the default resolver handle it
|
||||
return null;
|
||||
});
|
||||
|
||||
build.onEnd((result) => {
|
||||
if (violations.length > 0) {
|
||||
console.log(`\nSUMMARY: Found ${violations.length} dynamic import violation(s):`);
|
||||
violations.forEach((v, i) => {
|
||||
console.log(` ${i + 1}. ${path.relative(process.cwd(), v.file)} imports ${v.importPath}`);
|
||||
});
|
||||
console.log('');
|
||||
|
||||
result.errors.push({
|
||||
text: `Dynamic import violations detected: ${violations.length} violation(s) found`,
|
||||
location: null,
|
||||
notes: violations.map(v => ({
|
||||
text: `${path.relative(process.cwd(), v.file)} imports ${v.importPath}`,
|
||||
location: null
|
||||
}))
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Plugin to dynamically switch imports based on build type
|
||||
function dynamicImportSwitcherPlugin(buildValue) {
|
||||
return {
|
||||
name: "dynamic-import-switcher",
|
||||
setup(build) {
|
||||
const switches = [];
|
||||
|
||||
build.onStart(() => {
|
||||
console.log(`Dynamic import switcher using build type: ${buildValue}`);
|
||||
});
|
||||
|
||||
build.onResolve({ filter: /^#dynamic\// }, (args) => {
|
||||
// Extract the path after #dynamic/
|
||||
const dynamicPath = args.path.replace(/^#dynamic\//, '');
|
||||
|
||||
// Determine the replacement based on build type
|
||||
let replacement;
|
||||
if (buildValue === "oss") {
|
||||
replacement = `#open/${dynamicPath}`;
|
||||
} else if (buildValue === "saas" || buildValue === "enterprise") {
|
||||
replacement = `#closed/${dynamicPath}`; // We use #closed here so that the route guards dont complain after its been changed but this is the same as #private
|
||||
} else {
|
||||
console.warn(`Unknown build type '${buildValue}', defaulting to #open/`);
|
||||
replacement = `#open/${dynamicPath}`;
|
||||
}
|
||||
|
||||
const switchInfo = {
|
||||
file: args.importer,
|
||||
originalPath: args.path,
|
||||
replacementPath: replacement,
|
||||
buildType: buildValue
|
||||
};
|
||||
switches.push(switchInfo);
|
||||
|
||||
console.log(`DYNAMIC IMPORT SWITCH:`);
|
||||
console.log(` File: ${args.importer}`);
|
||||
console.log(` Original: ${args.path}`);
|
||||
console.log(` Switched to: ${replacement} (build: ${buildValue})`);
|
||||
console.log('');
|
||||
|
||||
// Rewrite the import path and let the normal resolution continue
|
||||
return build.resolve(replacement, {
|
||||
importer: args.importer,
|
||||
namespace: args.namespace,
|
||||
resolveDir: args.resolveDir,
|
||||
kind: args.kind
|
||||
});
|
||||
});
|
||||
|
||||
build.onEnd((result) => {
|
||||
if (switches.length > 0) {
|
||||
console.log(`\nDYNAMIC IMPORT SUMMARY: Switched ${switches.length} import(s) for build type '${buildValue}':`);
|
||||
switches.forEach((s, i) => {
|
||||
console.log(` ${i + 1}. ${path.relative(process.cwd(), s.file)}`);
|
||||
console.log(` ${s.originalPath} → ${s.replacementPath}`);
|
||||
});
|
||||
console.log('');
|
||||
}
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
esbuild
|
||||
.build({
|
||||
entryPoints: [argv.entry],
|
||||
bundle: true,
|
||||
outfile: argv.out,
|
||||
format: "esm",
|
||||
minify: true,
|
||||
minify: false,
|
||||
banner: {
|
||||
js: banner,
|
||||
},
|
||||
platform: "node",
|
||||
external: ["body-parser"],
|
||||
plugins: [
|
||||
privateImportGuardPlugin(),
|
||||
dynamicImportGuardPlugin(),
|
||||
dynamicImportSwitcherPlugin(argv.build),
|
||||
nodeExternalsPlugin({
|
||||
packagePath: getPackagePaths(),
|
||||
}),
|
||||
],
|
||||
sourcemap: true,
|
||||
sourcemap: "inline",
|
||||
target: "node22",
|
||||
})
|
||||
.then(() => {
|
||||
.then((result) => {
|
||||
// Check if there were any errors in the build result
|
||||
if (result.errors && result.errors.length > 0) {
|
||||
console.error(`Build failed with ${result.errors.length} error(s):`);
|
||||
result.errors.forEach((error, i) => {
|
||||
console.error(`${i + 1}. ${error.text}`);
|
||||
if (error.notes) {
|
||||
error.notes.forEach(note => {
|
||||
console.error(` - ${note.text}`);
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// remove the output file if it was created
|
||||
if (fs.existsSync(argv.out)) {
|
||||
fs.unlinkSync(argv.out);
|
||||
}
|
||||
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
console.log("Build completed successfully");
|
||||
})
|
||||
.catch((error) => {
|
||||
|
||||
@@ -18,7 +18,11 @@ put-back:
|
||||
mv main.go.bak main.go
|
||||
|
||||
dev-update-versions:
|
||||
PANGOLIN_VERSION=$$(curl -s https://api.github.com/repos/fosrl/pangolin/tags | jq -r '.[0].name') && \
|
||||
if [ -z "$(tag)" ]; then \
|
||||
PANGOLIN_VERSION=$$(curl -s https://api.github.com/repos/fosrl/pangolin/tags | jq -r '.[0].name'); \
|
||||
else \
|
||||
PANGOLIN_VERSION=$(tag); \
|
||||
fi && \
|
||||
GERBIL_VERSION=$$(curl -s https://api.github.com/repos/fosrl/gerbil/tags | jq -r '.[0].name') && \
|
||||
BADGER_VERSION=$$(curl -s https://api.github.com/repos/fosrl/badger/tags | jq -r '.[0].name') && \
|
||||
echo "Latest versions - Pangolin: $$PANGOLIN_VERSION, Gerbil: $$GERBIL_VERSION, Badger: $$BADGER_VERSION" && \
|
||||
|
||||
@@ -37,15 +37,28 @@ type DynamicConfig struct {
|
||||
} `yaml:"http"`
|
||||
}
|
||||
|
||||
// ConfigValues holds the extracted configuration values
|
||||
type ConfigValues struct {
|
||||
// TraefikConfigValues holds the extracted configuration values
|
||||
type TraefikConfigValues struct {
|
||||
DashboardDomain string
|
||||
LetsEncryptEmail string
|
||||
BadgerVersion string
|
||||
}
|
||||
|
||||
// AppConfig represents the app section of the config.yml
|
||||
type AppConfig struct {
|
||||
App struct {
|
||||
DashboardURL string `yaml:"dashboard_url"`
|
||||
LogLevel string `yaml:"log_level"`
|
||||
} `yaml:"app"`
|
||||
}
|
||||
|
||||
type AppConfigValues struct {
|
||||
DashboardURL string
|
||||
LogLevel string
|
||||
}
|
||||
|
||||
// ReadTraefikConfig reads and extracts values from Traefik configuration files
|
||||
func ReadTraefikConfig(mainConfigPath, dynamicConfigPath string) (*ConfigValues, error) {
|
||||
func ReadTraefikConfig(mainConfigPath string) (*TraefikConfigValues, error) {
|
||||
// Read main config file
|
||||
mainConfigData, err := os.ReadFile(mainConfigPath)
|
||||
if err != nil {
|
||||
@@ -57,48 +70,33 @@ func ReadTraefikConfig(mainConfigPath, dynamicConfigPath string) (*ConfigValues,
|
||||
return nil, fmt.Errorf("error parsing main config file: %w", err)
|
||||
}
|
||||
|
||||
// Read dynamic config file
|
||||
dynamicConfigData, err := os.ReadFile(dynamicConfigPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading dynamic config file: %w", err)
|
||||
}
|
||||
|
||||
var dynamicConfig DynamicConfig
|
||||
if err := yaml.Unmarshal(dynamicConfigData, &dynamicConfig); err != nil {
|
||||
return nil, fmt.Errorf("error parsing dynamic config file: %w", err)
|
||||
}
|
||||
|
||||
// Extract values
|
||||
values := &ConfigValues{
|
||||
values := &TraefikConfigValues{
|
||||
BadgerVersion: mainConfig.Experimental.Plugins.Badger.Version,
|
||||
LetsEncryptEmail: mainConfig.CertificatesResolvers.LetsEncrypt.Acme.Email,
|
||||
}
|
||||
|
||||
// Extract DashboardDomain from router rules
|
||||
// Look for it in the main router rules
|
||||
for _, router := range dynamicConfig.HTTP.Routers {
|
||||
if router.Rule != "" {
|
||||
// Extract domain from Host(`mydomain.com`)
|
||||
if domain := extractDomainFromRule(router.Rule); domain != "" {
|
||||
values.DashboardDomain = domain
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// extractDomainFromRule extracts the domain from a router rule
|
||||
func extractDomainFromRule(rule string) string {
|
||||
// Look for the Host(`mydomain.com`) pattern
|
||||
if start := findPattern(rule, "Host(`"); start != -1 {
|
||||
end := findPattern(rule[start:], "`)")
|
||||
if end != -1 {
|
||||
return rule[start+6 : start+end]
|
||||
}
|
||||
func ReadAppConfig(configPath string) (*AppConfigValues, error) {
|
||||
// Read config file
|
||||
configData, err := os.ReadFile(configPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading config file: %w", err)
|
||||
}
|
||||
return ""
|
||||
|
||||
var appConfig AppConfig
|
||||
if err := yaml.Unmarshal(configData, &appConfig); err != nil {
|
||||
return nil, fmt.Errorf("error parsing config file: %w", err)
|
||||
}
|
||||
|
||||
values := &AppConfigValues{
|
||||
DashboardURL: appConfig.App.DashboardURL,
|
||||
LogLevel: appConfig.App.LogLevel,
|
||||
}
|
||||
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// findPattern finds the start of a pattern in a string
|
||||
|
||||
@@ -1,14 +1,19 @@
|
||||
# To see all available options, please visit the docs:
|
||||
# https://docs.digpangolin.com/self-host/dns-and-networking
|
||||
# https://docs.pangolin.net/
|
||||
|
||||
gerbil:
|
||||
start_port: 51820
|
||||
base_endpoint: "{{.DashboardDomain}}"
|
||||
|
||||
app:
|
||||
dashboard_url: "https://{{.DashboardDomain}}"
|
||||
log_level: "info"
|
||||
telemetry:
|
||||
anonymous_usage: true
|
||||
|
||||
domains:
|
||||
domain1:
|
||||
base_domain: "{{.BaseDomain}}"
|
||||
cert_resolver: "letsencrypt"
|
||||
|
||||
server:
|
||||
secret: "{{.Secret}}"
|
||||
@@ -17,11 +22,7 @@ server:
|
||||
methods: ["GET", "POST", "PUT", "DELETE", "PATCH"]
|
||||
allowed_headers: ["X-CSRF-Token", "Content-Type"]
|
||||
credentials: false
|
||||
|
||||
gerbil:
|
||||
start_port: 51820
|
||||
base_endpoint: "{{.DashboardDomain}}"
|
||||
|
||||
{{if .EnableGeoblocking}}maxmind_db_path: "./config/GeoLite2-Country.mmdb"{{end}}
|
||||
{{if .EnableEmail}}
|
||||
email:
|
||||
smtp_host: "{{.EmailSMTPHost}}"
|
||||
@@ -30,7 +31,6 @@ email:
|
||||
smtp_pass: "{{.EmailSMTPPass}}"
|
||||
no_reply: "{{.EmailNoReply}}"
|
||||
{{end}}
|
||||
|
||||
flags:
|
||||
require_email_verification: {{.EnableEmail}}
|
||||
disable_signup_without_invite: true
|
||||
|
||||
@@ -20,10 +20,9 @@ services:
|
||||
pangolin:
|
||||
condition: service_healthy
|
||||
command:
|
||||
- --reachableAt=http://gerbil:3003
|
||||
- --reachableAt=http://gerbil:3004
|
||||
- --generateAndSaveKeyTo=/var/config/key
|
||||
- --remoteConfig=http://pangolin:3001/api/v1/gerbil/get-config
|
||||
- --reportBandwidthTo=http://pangolin:3001/api/v1/gerbil/receive-bandwidth
|
||||
- --remoteConfig=http://pangolin:3001/api/v1/
|
||||
volumes:
|
||||
- ./config/:/var/config
|
||||
cap_add:
|
||||
@@ -32,8 +31,8 @@ services:
|
||||
ports:
|
||||
- 51820:51820/udp
|
||||
- 21820:21820/udp
|
||||
- 443:443 # Port for traefik because of the network_mode
|
||||
- 80:80 # Port for traefik because of the network_mode
|
||||
- 443:443
|
||||
- 80:80
|
||||
{{end}}
|
||||
traefik:
|
||||
image: docker.io/traefik:v3.5
|
||||
@@ -60,4 +59,4 @@ networks:
|
||||
default:
|
||||
driver: bridge
|
||||
name: pangolin
|
||||
{{if .EnableIPv6}} enable_ipv6: true{{end}}
|
||||
{{if .EnableIPv6}} enable_ipv6: true{{end}}
|
||||
@@ -51,3 +51,12 @@ http:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://pangolin:3000" # API/WebSocket server
|
||||
|
||||
tcp:
|
||||
serversTransports:
|
||||
pp-transport-v1:
|
||||
proxyProtocol:
|
||||
version: 1
|
||||
pp-transport-v2:
|
||||
proxyProtocol:
|
||||
version: 2
|
||||
@@ -46,3 +46,6 @@ entryPoints:
|
||||
|
||||
serversTransport:
|
||||
insecureSkipVerify: true
|
||||
|
||||
ping:
|
||||
entryPoint: "web"
|
||||
332
install/containers.go
Normal file
332
install/containers.go
Normal file
@@ -0,0 +1,332 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/user"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func waitForContainer(containerName string, containerType SupportedContainer) error {
|
||||
maxAttempts := 30
|
||||
retryInterval := time.Second * 2
|
||||
|
||||
for attempt := 0; attempt < maxAttempts; attempt++ {
|
||||
// Check if container is running
|
||||
cmd := exec.Command(string(containerType), "container", "inspect", "-f", "{{.State.Running}}", containerName)
|
||||
var out bytes.Buffer
|
||||
cmd.Stdout = &out
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
// If the container doesn't exist or there's another error, wait and retry
|
||||
time.Sleep(retryInterval)
|
||||
continue
|
||||
}
|
||||
|
||||
isRunning := strings.TrimSpace(out.String()) == "true"
|
||||
if isRunning {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Container exists but isn't running yet, wait and retry
|
||||
time.Sleep(retryInterval)
|
||||
}
|
||||
|
||||
return fmt.Errorf("container %s did not start within %v seconds", containerName, maxAttempts*int(retryInterval.Seconds()))
|
||||
}
|
||||
|
||||
func installDocker() error {
|
||||
// Detect Linux distribution
|
||||
cmd := exec.Command("cat", "/etc/os-release")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to detect Linux distribution: %v", err)
|
||||
}
|
||||
osRelease := string(output)
|
||||
|
||||
// Detect system architecture
|
||||
archCmd := exec.Command("uname", "-m")
|
||||
archOutput, err := archCmd.Output()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to detect system architecture: %v", err)
|
||||
}
|
||||
arch := strings.TrimSpace(string(archOutput))
|
||||
|
||||
// Map architecture to Docker's architecture naming
|
||||
var dockerArch string
|
||||
switch arch {
|
||||
case "x86_64":
|
||||
dockerArch = "amd64"
|
||||
case "aarch64":
|
||||
dockerArch = "arm64"
|
||||
default:
|
||||
return fmt.Errorf("unsupported architecture: %s", arch)
|
||||
}
|
||||
|
||||
var installCmd *exec.Cmd
|
||||
switch {
|
||||
case strings.Contains(osRelease, "ID=ubuntu"):
|
||||
installCmd = exec.Command("bash", "-c", fmt.Sprintf(`
|
||||
apt-get update &&
|
||||
apt-get install -y apt-transport-https ca-certificates curl software-properties-common &&
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg &&
|
||||
echo "deb [arch=%s signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" > /etc/apt/sources.list.d/docker.list &&
|
||||
apt-get update &&
|
||||
apt-get install -y docker-ce docker-ce-cli containerd.io docker-compose-plugin
|
||||
`, dockerArch))
|
||||
case strings.Contains(osRelease, "ID=debian"):
|
||||
installCmd = exec.Command("bash", "-c", fmt.Sprintf(`
|
||||
apt-get update &&
|
||||
apt-get install -y apt-transport-https ca-certificates curl software-properties-common &&
|
||||
curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg &&
|
||||
echo "deb [arch=%s signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian $(lsb_release -cs) stable" > /etc/apt/sources.list.d/docker.list &&
|
||||
apt-get update &&
|
||||
apt-get install -y docker-ce docker-ce-cli containerd.io docker-compose-plugin
|
||||
`, dockerArch))
|
||||
case strings.Contains(osRelease, "ID=fedora"):
|
||||
// Detect Fedora version to handle DNF 5 changes
|
||||
versionCmd := exec.Command("bash", "-c", "grep VERSION_ID /etc/os-release | cut -d'=' -f2 | tr -d '\"'")
|
||||
versionOutput, err := versionCmd.Output()
|
||||
var fedoraVersion int
|
||||
if err == nil {
|
||||
if v, parseErr := strconv.Atoi(strings.TrimSpace(string(versionOutput))); parseErr == nil {
|
||||
fedoraVersion = v
|
||||
}
|
||||
}
|
||||
|
||||
// Use appropriate DNF syntax based on version
|
||||
var repoCmd string
|
||||
if fedoraVersion >= 41 {
|
||||
// DNF 5 syntax for Fedora 41+
|
||||
repoCmd = "dnf config-manager addrepo --from-repofile=https://download.docker.com/linux/fedora/docker-ce.repo"
|
||||
} else {
|
||||
// DNF 4 syntax for Fedora < 41
|
||||
repoCmd = "dnf config-manager --add-repo https://download.docker.com/linux/fedora/docker-ce.repo"
|
||||
}
|
||||
|
||||
installCmd = exec.Command("bash", "-c", fmt.Sprintf(`
|
||||
dnf -y install dnf-plugins-core &&
|
||||
%s &&
|
||||
dnf install -y docker-ce docker-ce-cli containerd.io docker-compose-plugin
|
||||
`, repoCmd))
|
||||
case strings.Contains(osRelease, "ID=opensuse") || strings.Contains(osRelease, "ID=\"opensuse-"):
|
||||
installCmd = exec.Command("bash", "-c", `
|
||||
zypper install -y docker docker-compose &&
|
||||
systemctl enable docker
|
||||
`)
|
||||
case strings.Contains(osRelease, "ID=rhel") || strings.Contains(osRelease, "ID=\"rhel"):
|
||||
installCmd = exec.Command("bash", "-c", `
|
||||
dnf remove -y runc &&
|
||||
dnf -y install yum-utils &&
|
||||
dnf config-manager --add-repo https://download.docker.com/linux/rhel/docker-ce.repo &&
|
||||
dnf install -y docker-ce docker-ce-cli containerd.io docker-compose-plugin &&
|
||||
systemctl enable docker
|
||||
`)
|
||||
case strings.Contains(osRelease, "ID=amzn"):
|
||||
installCmd = exec.Command("bash", "-c", `
|
||||
yum update -y &&
|
||||
yum install -y docker &&
|
||||
systemctl enable docker &&
|
||||
usermod -a -G docker ec2-user
|
||||
`)
|
||||
default:
|
||||
return fmt.Errorf("unsupported Linux distribution")
|
||||
}
|
||||
|
||||
installCmd.Stdout = os.Stdout
|
||||
installCmd.Stderr = os.Stderr
|
||||
return installCmd.Run()
|
||||
}
|
||||
|
||||
func startDockerService() error {
|
||||
if runtime.GOOS == "linux" {
|
||||
cmd := exec.Command("systemctl", "enable", "--now", "docker")
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
return cmd.Run()
|
||||
} else if runtime.GOOS == "darwin" {
|
||||
// On macOS, Docker is usually started via the Docker Desktop application
|
||||
fmt.Println("Please start Docker Desktop manually on macOS.")
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("unsupported operating system for starting Docker service")
|
||||
}
|
||||
|
||||
func isDockerInstalled() bool {
|
||||
return isContainerInstalled("docker")
|
||||
}
|
||||
|
||||
func isPodmanInstalled() bool {
|
||||
return isContainerInstalled("podman") && isContainerInstalled("podman-compose")
|
||||
}
|
||||
|
||||
func isContainerInstalled(container string) bool {
|
||||
cmd := exec.Command(container, "--version")
|
||||
if err := cmd.Run(); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func isUserInDockerGroup() bool {
|
||||
if runtime.GOOS == "darwin" {
|
||||
// Docker group is not applicable on macOS
|
||||
// So we assume that the user can run Docker commands
|
||||
return true
|
||||
}
|
||||
|
||||
if os.Geteuid() == 0 {
|
||||
return true // Root user can run Docker commands anyway
|
||||
}
|
||||
|
||||
// Check if the current user is in the docker group
|
||||
if dockerGroup, err := user.LookupGroup("docker"); err == nil {
|
||||
if currentUser, err := user.Current(); err == nil {
|
||||
if currentUserGroupIds, err := currentUser.GroupIds(); err == nil {
|
||||
for _, groupId := range currentUserGroupIds {
|
||||
if groupId == dockerGroup.Gid {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Eventually, if any of the checks fail, we assume the user cannot run Docker commands
|
||||
return false
|
||||
}
|
||||
|
||||
// isDockerRunning checks if the Docker daemon is running by using the `docker info` command.
|
||||
func isDockerRunning() bool {
|
||||
cmd := exec.Command("docker", "info")
|
||||
if err := cmd.Run(); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// executeDockerComposeCommandWithArgs executes the appropriate docker command with arguments supplied
|
||||
func executeDockerComposeCommandWithArgs(args ...string) error {
|
||||
var cmd *exec.Cmd
|
||||
var useNewStyle bool
|
||||
|
||||
if !isDockerInstalled() {
|
||||
return fmt.Errorf("docker is not installed")
|
||||
}
|
||||
|
||||
checkCmd := exec.Command("docker", "compose", "version")
|
||||
if err := checkCmd.Run(); err == nil {
|
||||
useNewStyle = true
|
||||
} else {
|
||||
checkCmd = exec.Command("docker-compose", "version")
|
||||
if err := checkCmd.Run(); err == nil {
|
||||
useNewStyle = false
|
||||
} else {
|
||||
return fmt.Errorf("neither 'docker compose' nor 'docker-compose' command is available")
|
||||
}
|
||||
}
|
||||
|
||||
if useNewStyle {
|
||||
cmd = exec.Command("docker", append([]string{"compose"}, args...)...)
|
||||
} else {
|
||||
cmd = exec.Command("docker-compose", args...)
|
||||
}
|
||||
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
// pullContainers pulls the containers using the appropriate command.
|
||||
func pullContainers(containerType SupportedContainer) error {
|
||||
fmt.Println("Pulling the container images...")
|
||||
if containerType == Podman {
|
||||
if err := run("podman-compose", "-f", "docker-compose.yml", "pull"); err != nil {
|
||||
return fmt.Errorf("failed to pull the containers: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if containerType == Docker {
|
||||
if err := executeDockerComposeCommandWithArgs("-f", "docker-compose.yml", "pull", "--policy", "always"); err != nil {
|
||||
return fmt.Errorf("failed to pull the containers: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("Unsupported container type: %s", containerType)
|
||||
}
|
||||
|
||||
// startContainers starts the containers using the appropriate command.
|
||||
func startContainers(containerType SupportedContainer) error {
|
||||
fmt.Println("Starting containers...")
|
||||
|
||||
if containerType == Podman {
|
||||
if err := run("podman-compose", "-f", "docker-compose.yml", "up", "-d", "--force-recreate"); err != nil {
|
||||
return fmt.Errorf("failed start containers: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if containerType == Docker {
|
||||
if err := executeDockerComposeCommandWithArgs("-f", "docker-compose.yml", "up", "-d", "--force-recreate"); err != nil {
|
||||
return fmt.Errorf("failed to start containers: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("Unsupported container type: %s", containerType)
|
||||
}
|
||||
|
||||
// stopContainers stops the containers using the appropriate command.
|
||||
func stopContainers(containerType SupportedContainer) error {
|
||||
fmt.Println("Stopping containers...")
|
||||
if containerType == Podman {
|
||||
if err := run("podman-compose", "-f", "docker-compose.yml", "down"); err != nil {
|
||||
return fmt.Errorf("failed to stop containers: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if containerType == Docker {
|
||||
if err := executeDockerComposeCommandWithArgs("-f", "docker-compose.yml", "down"); err != nil {
|
||||
return fmt.Errorf("failed to stop containers: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("Unsupported container type: %s", containerType)
|
||||
}
|
||||
|
||||
// restartContainer restarts a specific container using the appropriate command.
|
||||
func restartContainer(container string, containerType SupportedContainer) error {
|
||||
fmt.Println("Restarting containers...")
|
||||
if containerType == Podman {
|
||||
if err := run("podman-compose", "-f", "docker-compose.yml", "restart"); err != nil {
|
||||
return fmt.Errorf("failed to stop the container \"%s\": %v", container, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if containerType == Docker {
|
||||
if err := executeDockerComposeCommandWithArgs("-f", "docker-compose.yml", "restart", container); err != nil {
|
||||
return fmt.Errorf("failed to stop the container \"%s\": %v", container, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("Unsupported container type: %s", containerType)
|
||||
}
|
||||
180
install/get-installer.sh
Normal file
180
install/get-installer.sh
Normal file
@@ -0,0 +1,180 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Get installer - Cross-platform installation script
|
||||
# Usage: curl -fsSL https://raw.githubusercontent.com/fosrl/installer/refs/heads/main/get-installer.sh | bash
|
||||
|
||||
set -e
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# GitHub repository info
|
||||
REPO="fosrl/pangolin"
|
||||
GITHUB_API_URL="https://api.github.com/repos/${REPO}/releases/latest"
|
||||
|
||||
# Function to print colored output
|
||||
print_status() {
|
||||
echo -e "${GREEN}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e "${YELLOW}[WARN]${NC} $1"
|
||||
}
|
||||
|
||||
print_error() {
|
||||
echo -e "${RED}[ERROR]${NC} $1"
|
||||
}
|
||||
|
||||
# Function to get latest version from GitHub API
|
||||
get_latest_version() {
|
||||
local latest_info
|
||||
|
||||
if command -v curl >/dev/null 2>&1; then
|
||||
latest_info=$(curl -fsSL "$GITHUB_API_URL" 2>/dev/null)
|
||||
elif command -v wget >/dev/null 2>&1; then
|
||||
latest_info=$(wget -qO- "$GITHUB_API_URL" 2>/dev/null)
|
||||
else
|
||||
print_error "Neither curl nor wget is available. Please install one of them." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$latest_info" ]; then
|
||||
print_error "Failed to fetch latest version information" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Extract version from JSON response (works without jq)
|
||||
local version=$(echo "$latest_info" | grep '"tag_name"' | head -1 | sed 's/.*"tag_name": *"\([^"]*\)".*/\1/')
|
||||
|
||||
if [ -z "$version" ]; then
|
||||
print_error "Could not parse version from GitHub API response" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Remove 'v' prefix if present
|
||||
version=$(echo "$version" | sed 's/^v//')
|
||||
|
||||
echo "$version"
|
||||
}
|
||||
|
||||
# Detect OS and architecture
|
||||
detect_platform() {
|
||||
local os arch
|
||||
|
||||
# Detect OS - only support Linux
|
||||
case "$(uname -s)" in
|
||||
Linux*) os="linux" ;;
|
||||
*)
|
||||
print_error "Unsupported operating system: $(uname -s). Only Linux is supported."
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
# Detect architecture - only support amd64 and arm64
|
||||
case "$(uname -m)" in
|
||||
x86_64|amd64) arch="amd64" ;;
|
||||
arm64|aarch64) arch="arm64" ;;
|
||||
*)
|
||||
print_error "Unsupported architecture: $(uname -m). Only amd64 and arm64 are supported on Linux."
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "${os}_${arch}"
|
||||
}
|
||||
|
||||
# Get installation directory
|
||||
get_install_dir() {
|
||||
# Install to the current directory
|
||||
local install_dir="$(pwd)"
|
||||
if [ ! -d "$install_dir" ]; then
|
||||
print_error "Installation directory does not exist: $install_dir"
|
||||
exit 1
|
||||
fi
|
||||
echo "$install_dir"
|
||||
}
|
||||
|
||||
# Download and install installer
|
||||
install_installer() {
|
||||
local platform="$1"
|
||||
local install_dir="$2"
|
||||
local binary_name="installer_${platform}"
|
||||
|
||||
local download_url="${BASE_URL}/${binary_name}"
|
||||
local temp_file="/tmp/installer"
|
||||
local final_path="${install_dir}/installer"
|
||||
|
||||
print_status "Downloading installer from ${download_url}"
|
||||
|
||||
# Download the binary
|
||||
if command -v curl >/dev/null 2>&1; then
|
||||
curl -fsSL "$download_url" -o "$temp_file"
|
||||
elif command -v wget >/dev/null 2>&1; then
|
||||
wget -q "$download_url" -O "$temp_file"
|
||||
else
|
||||
print_error "Neither curl nor wget is available. Please install one of them."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create install directory if it doesn't exist
|
||||
mkdir -p "$install_dir"
|
||||
|
||||
# Move binary to install directory
|
||||
mv "$temp_file" "$final_path"
|
||||
|
||||
# Make executable
|
||||
chmod +x "$final_path"
|
||||
|
||||
print_status "Installer downloaded to ${final_path}"
|
||||
}
|
||||
|
||||
# Verify installation
|
||||
verify_installation() {
|
||||
local install_dir="$1"
|
||||
local installer_path="${install_dir}/installer"
|
||||
|
||||
if [ -f "$installer_path" ] && [ -x "$installer_path" ]; then
|
||||
print_status "Installation successful!"
|
||||
return 0
|
||||
else
|
||||
print_error "Installation failed. Binary not found or not executable."
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Main installation process
|
||||
main() {
|
||||
print_status "Installing latest version of installer..."
|
||||
|
||||
# Get latest version
|
||||
print_status "Fetching latest version from GitHub..."
|
||||
VERSION=$(get_latest_version)
|
||||
print_status "Latest version: v${VERSION}"
|
||||
|
||||
# Set base URL with the fetched version
|
||||
BASE_URL="https://github.com/${REPO}/releases/download/${VERSION}"
|
||||
|
||||
# Detect platform
|
||||
PLATFORM=$(detect_platform)
|
||||
print_status "Detected platform: ${PLATFORM}"
|
||||
|
||||
# Get install directory
|
||||
INSTALL_DIR=$(get_install_dir)
|
||||
print_status "Install directory: ${INSTALL_DIR}"
|
||||
|
||||
# Install installer
|
||||
install_installer "$PLATFORM" "$INSTALL_DIR"
|
||||
|
||||
# Verify installation
|
||||
if verify_installation "$INSTALL_DIR"; then
|
||||
print_status "Installer is ready to use!"
|
||||
else
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
@@ -1,10 +1,10 @@
|
||||
module installer
|
||||
|
||||
go 1.24
|
||||
go 1.24.0
|
||||
|
||||
require (
|
||||
golang.org/x/term v0.33.0
|
||||
golang.org/x/term v0.36.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
|
||||
require golang.org/x/sys v0.34.0 // indirect
|
||||
require golang.org/x/sys v0.37.0 // indirect
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
|
||||
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg=
|
||||
golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0=
|
||||
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
|
||||
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q=
|
||||
golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
|
||||
74
install/input.go
Normal file
74
install/input.go
Normal file
@@ -0,0 +1,74 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/term"
|
||||
)
|
||||
|
||||
func readString(reader *bufio.Reader, prompt string, defaultValue string) string {
|
||||
if defaultValue != "" {
|
||||
fmt.Printf("%s (default: %s): ", prompt, defaultValue)
|
||||
} else {
|
||||
fmt.Print(prompt + ": ")
|
||||
}
|
||||
input, _ := reader.ReadString('\n')
|
||||
input = strings.TrimSpace(input)
|
||||
if input == "" {
|
||||
return defaultValue
|
||||
}
|
||||
return input
|
||||
}
|
||||
|
||||
func readStringNoDefault(reader *bufio.Reader, prompt string) string {
|
||||
fmt.Print(prompt + ": ")
|
||||
input, _ := reader.ReadString('\n')
|
||||
return strings.TrimSpace(input)
|
||||
}
|
||||
|
||||
func readPassword(prompt string, reader *bufio.Reader) string {
|
||||
if term.IsTerminal(int(syscall.Stdin)) {
|
||||
fmt.Print(prompt + ": ")
|
||||
// Read password without echo if we're in a terminal
|
||||
password, err := term.ReadPassword(int(syscall.Stdin))
|
||||
fmt.Println() // Add a newline since ReadPassword doesn't add one
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
input := strings.TrimSpace(string(password))
|
||||
if input == "" {
|
||||
return readPassword(prompt, reader)
|
||||
}
|
||||
return input
|
||||
} else {
|
||||
// Fallback to reading from stdin if not in a terminal
|
||||
return readString(reader, prompt, "")
|
||||
}
|
||||
}
|
||||
|
||||
func readBool(reader *bufio.Reader, prompt string, defaultValue bool) bool {
|
||||
defaultStr := "no"
|
||||
if defaultValue {
|
||||
defaultStr = "yes"
|
||||
}
|
||||
input := readString(reader, prompt+" (yes/no)", defaultStr)
|
||||
return strings.ToLower(input) == "yes"
|
||||
}
|
||||
|
||||
func readBoolNoDefault(reader *bufio.Reader, prompt string) bool {
|
||||
input := readStringNoDefault(reader, prompt+" (yes/no)")
|
||||
return strings.ToLower(input) == "yes"
|
||||
}
|
||||
|
||||
func readInt(reader *bufio.Reader, prompt string, defaultValue int) int {
|
||||
input := readString(reader, prompt, fmt.Sprintf("%d", defaultValue))
|
||||
if input == "" {
|
||||
return defaultValue
|
||||
}
|
||||
value := defaultValue
|
||||
fmt.Sscanf(input, "%d", &value)
|
||||
return value
|
||||
}
|
||||
859
install/main.go
859
install/main.go
@@ -2,25 +2,21 @@ package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"embed"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"text/template"
|
||||
"time"
|
||||
"net"
|
||||
|
||||
"golang.org/x/term"
|
||||
)
|
||||
|
||||
// DO NOT EDIT THIS FUNCTION; IT MATCHED BY REGEX IN CICD
|
||||
@@ -51,6 +47,7 @@ type Config struct {
|
||||
InstallGerbil bool
|
||||
TraefikBouncerKey string
|
||||
DoCrowdsecInstall bool
|
||||
EnableGeoblocking bool
|
||||
Secret string
|
||||
}
|
||||
|
||||
@@ -59,6 +56,7 @@ type SupportedContainer string
|
||||
const (
|
||||
Docker SupportedContainer = "docker"
|
||||
Podman SupportedContainer = "podman"
|
||||
Undefined SupportedContainer = "undefined"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -67,28 +65,209 @@ func main() {
|
||||
|
||||
fmt.Println("Welcome to the Pangolin installer!")
|
||||
fmt.Println("This installer will help you set up Pangolin on your server.")
|
||||
fmt.Println("")
|
||||
fmt.Println("Please make sure you have the following prerequisites:")
|
||||
fmt.Println("\nPlease make sure you have the following prerequisites:")
|
||||
fmt.Println("- Open TCP ports 80 and 443 and UDP ports 51820 and 21820 on your VPS and firewall.")
|
||||
fmt.Println("- Point your domain to the VPS IP with A records.")
|
||||
fmt.Println("")
|
||||
fmt.Println("https://docs.digpangolin.com/self-host/dns-and-networking")
|
||||
fmt.Println("")
|
||||
fmt.Println("Lets get started!")
|
||||
fmt.Println("")
|
||||
fmt.Println("\nLets get started!")
|
||||
|
||||
if os.Geteuid() == 0 { // WE NEED TO BE SUDO TO CHECK THIS
|
||||
for _, p := range []int{80, 443} {
|
||||
if err := checkPortsAvailable(p); err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
|
||||
fmt.Printf("Please close any services on ports 80/443 in order to run the installation smoothly")
|
||||
fmt.Printf("Please close any services on ports 80/443 in order to run the installation smoothly. If you already have the Pangolin stack running, shut them down before proceeding.\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
|
||||
var config Config
|
||||
var alreadyInstalled = false
|
||||
|
||||
// check if there is already a config file
|
||||
if _, err := os.Stat("config/config.yml"); err != nil {
|
||||
config = collectUserInput(reader)
|
||||
|
||||
loadVersions(&config)
|
||||
config.DoCrowdsecInstall = false
|
||||
config.Secret = generateRandomSecretKey()
|
||||
|
||||
fmt.Println("\n=== Generating Configuration Files ===")
|
||||
|
||||
if err := createConfigFiles(config); err != nil {
|
||||
fmt.Printf("Error creating config files: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
moveFile("config/docker-compose.yml", "docker-compose.yml")
|
||||
|
||||
fmt.Println("\nConfiguration files created successfully!")
|
||||
|
||||
// Download MaxMind database if requested
|
||||
if config.EnableGeoblocking {
|
||||
fmt.Println("\n=== Downloading MaxMind Database ===")
|
||||
if err := downloadMaxMindDatabase(); err != nil {
|
||||
fmt.Printf("Error downloading MaxMind database: %v\n", err)
|
||||
fmt.Println("You can download it manually later if needed.")
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println("\n=== Starting installation ===")
|
||||
|
||||
if readBool(reader, "Would you like to install and start the containers?", true) {
|
||||
|
||||
config.InstallationContainerType = podmanOrDocker(reader)
|
||||
|
||||
if !isDockerInstalled() && runtime.GOOS == "linux" && config.InstallationContainerType == Docker {
|
||||
if readBool(reader, "Docker is not installed. Would you like to install it?", true) {
|
||||
installDocker()
|
||||
// try to start docker service but ignore errors
|
||||
if err := startDockerService(); err != nil {
|
||||
fmt.Println("Error starting Docker service:", err)
|
||||
} else {
|
||||
fmt.Println("Docker service started successfully!")
|
||||
}
|
||||
// wait 10 seconds for docker to start checking if docker is running every 2 seconds
|
||||
fmt.Println("Waiting for Docker to start...")
|
||||
for i := 0; i < 5; i++ {
|
||||
if isDockerRunning() {
|
||||
fmt.Println("Docker is running!")
|
||||
break
|
||||
}
|
||||
fmt.Println("Docker is not running yet, waiting...")
|
||||
time.Sleep(2 * time.Second)
|
||||
}
|
||||
if !isDockerRunning() {
|
||||
fmt.Println("Docker is still not running after 10 seconds. Please check the installation.")
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Println("Docker installed successfully!")
|
||||
}
|
||||
}
|
||||
|
||||
if err := pullContainers(config.InstallationContainerType); err != nil {
|
||||
fmt.Println("Error: ", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := startContainers(config.InstallationContainerType); err != nil {
|
||||
fmt.Println("Error: ", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
alreadyInstalled = true
|
||||
fmt.Println("Looks like you already installed Pangolin!")
|
||||
|
||||
// Check if MaxMind database exists and offer to update it
|
||||
fmt.Println("\n=== MaxMind Database Update ===")
|
||||
if _, err := os.Stat("config/GeoLite2-Country.mmdb"); err == nil {
|
||||
fmt.Println("MaxMind GeoLite2 Country database found.")
|
||||
if readBool(reader, "Would you like to update the MaxMind database to the latest version?", false) {
|
||||
if err := downloadMaxMindDatabase(); err != nil {
|
||||
fmt.Printf("Error updating MaxMind database: %v\n", err)
|
||||
fmt.Println("You can try updating it manually later if needed.")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
fmt.Println("MaxMind GeoLite2 Country database not found.")
|
||||
if readBool(reader, "Would you like to download the MaxMind GeoLite2 database for geoblocking functionality?", false) {
|
||||
if err := downloadMaxMindDatabase(); err != nil {
|
||||
fmt.Printf("Error downloading MaxMind database: %v\n", err)
|
||||
fmt.Println("You can try downloading it manually later if needed.")
|
||||
}
|
||||
// Now you need to update your config file accordingly to enable geoblocking
|
||||
fmt.Println("Please remember to update your config/config.yml file to enable geoblocking! \n")
|
||||
// add maxmind_db_path: "./config/GeoLite2-Country.mmdb" under server
|
||||
fmt.Println("Add the following line under the 'server' section:")
|
||||
fmt.Println(" maxmind_db_path: \"./config/GeoLite2-Country.mmdb\"")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !checkIsCrowdsecInstalledInCompose() {
|
||||
fmt.Println("\n=== CrowdSec Install ===")
|
||||
// check if crowdsec is installed
|
||||
if readBool(reader, "Would you like to install CrowdSec?", false) {
|
||||
fmt.Println("This installer constitutes a minimal viable CrowdSec deployment. CrowdSec will add extra complexity to your Pangolin installation and may not work to the best of its abilities out of the box. Users are expected to implement configuration adjustments on their own to achieve the best security posture. Consult the CrowdSec documentation for detailed configuration instructions.")
|
||||
|
||||
// BUG: crowdsec installation will be skipped if the user chooses to install on the first installation.
|
||||
if readBool(reader, "Are you willing to manage CrowdSec?", false) {
|
||||
if config.DashboardDomain == "" {
|
||||
traefikConfig, err := ReadTraefikConfig("config/traefik/traefik_config.yml")
|
||||
if err != nil {
|
||||
fmt.Printf("Error reading config: %v\n", err)
|
||||
return
|
||||
}
|
||||
appConfig, err := ReadAppConfig("config/config.yml")
|
||||
if err != nil {
|
||||
fmt.Printf("Error reading config: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
parsedURL, err := url.Parse(appConfig.DashboardURL)
|
||||
if err != nil {
|
||||
fmt.Printf("Error parsing URL: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
config.DashboardDomain = parsedURL.Hostname()
|
||||
config.LetsEncryptEmail = traefikConfig.LetsEncryptEmail
|
||||
config.BadgerVersion = traefikConfig.BadgerVersion
|
||||
|
||||
// print the values and check if they are right
|
||||
fmt.Println("Detected values:")
|
||||
fmt.Printf("Dashboard Domain: %s\n", config.DashboardDomain)
|
||||
fmt.Printf("Let's Encrypt Email: %s\n", config.LetsEncryptEmail)
|
||||
fmt.Printf("Badger Version: %s\n", config.BadgerVersion)
|
||||
|
||||
if !readBool(reader, "Are these values correct?", true) {
|
||||
config = collectUserInput(reader)
|
||||
}
|
||||
}
|
||||
|
||||
config.InstallationContainerType = podmanOrDocker(reader)
|
||||
|
||||
config.DoCrowdsecInstall = true
|
||||
err := installCrowdsec(config)
|
||||
if err != nil {
|
||||
fmt.Printf("Error installing CrowdSec: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Println("CrowdSec installed successfully!")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !alreadyInstalled {
|
||||
// Setup Token Section
|
||||
fmt.Println("\n=== Setup Token ===")
|
||||
|
||||
// Check if containers were started during this installation
|
||||
containersStarted := false
|
||||
if (isDockerInstalled() && config.InstallationContainerType == Docker) ||
|
||||
(isPodmanInstalled() && config.InstallationContainerType == Podman) {
|
||||
// Try to fetch and display the token if containers are running
|
||||
containersStarted = true
|
||||
printSetupToken(config.InstallationContainerType, config.DashboardDomain)
|
||||
}
|
||||
|
||||
// If containers weren't started or token wasn't found, show instructions
|
||||
if !containersStarted {
|
||||
showSetupTokenInstructions(config.InstallationContainerType, config.DashboardDomain)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println("\nInstallation complete!")
|
||||
|
||||
fmt.Printf("\nTo complete the initial setup, please visit:\nhttps://%s/auth/initial-setup\n", config.DashboardDomain)
|
||||
}
|
||||
|
||||
func podmanOrDocker(reader *bufio.Reader) SupportedContainer {
|
||||
inputContainer := readString(reader, "Would you like to run Pangolin as Docker or Podman containers?", "docker")
|
||||
|
||||
chosenContainer := Docker
|
||||
@@ -152,200 +331,7 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
var config Config
|
||||
config.InstallationContainerType = chosenContainer
|
||||
|
||||
// check if there is already a config file
|
||||
if _, err := os.Stat("config/config.yml"); err != nil {
|
||||
config = collectUserInput(reader)
|
||||
|
||||
loadVersions(&config)
|
||||
config.DoCrowdsecInstall = false
|
||||
config.Secret = generateRandomSecretKey()
|
||||
|
||||
if err := createConfigFiles(config); err != nil {
|
||||
fmt.Printf("Error creating config files: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
moveFile("config/docker-compose.yml", "docker-compose.yml")
|
||||
|
||||
if !isDockerInstalled() && runtime.GOOS == "linux" && chosenContainer == Docker {
|
||||
if readBool(reader, "Docker is not installed. Would you like to install it?", true) {
|
||||
installDocker()
|
||||
// try to start docker service but ignore errors
|
||||
if err := startDockerService(); err != nil {
|
||||
fmt.Println("Error starting Docker service:", err)
|
||||
} else {
|
||||
fmt.Println("Docker service started successfully!")
|
||||
}
|
||||
// wait 10 seconds for docker to start checking if docker is running every 2 seconds
|
||||
fmt.Println("Waiting for Docker to start...")
|
||||
for i := 0; i < 5; i++ {
|
||||
if isDockerRunning() {
|
||||
fmt.Println("Docker is running!")
|
||||
break
|
||||
}
|
||||
fmt.Println("Docker is not running yet, waiting...")
|
||||
time.Sleep(2 * time.Second)
|
||||
}
|
||||
if !isDockerRunning() {
|
||||
fmt.Println("Docker is still not running after 10 seconds. Please check the installation.")
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Println("Docker installed successfully!")
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println("\n=== Starting installation ===")
|
||||
|
||||
if (isDockerInstalled() && chosenContainer == Docker) ||
|
||||
(isPodmanInstalled() && chosenContainer == Podman) {
|
||||
if readBool(reader, "Would you like to install and start the containers?", true) {
|
||||
if err := pullContainers(chosenContainer); err != nil {
|
||||
fmt.Println("Error: ", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := startContainers(chosenContainer); err != nil {
|
||||
fmt.Println("Error: ", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
fmt.Println("Looks like you already installed, so I am going to do the setup...")
|
||||
|
||||
// Read existing config to get DashboardDomain
|
||||
traefikConfig, err := ReadTraefikConfig("config/traefik/traefik_config.yml", "config/traefik/dynamic_config.yml")
|
||||
if err != nil {
|
||||
fmt.Printf("Warning: Could not read existing config: %v\n", err)
|
||||
fmt.Println("You may need to manually enter your domain information.")
|
||||
config = collectUserInput(reader)
|
||||
} else {
|
||||
config.DashboardDomain = traefikConfig.DashboardDomain
|
||||
config.LetsEncryptEmail = traefikConfig.LetsEncryptEmail
|
||||
config.BadgerVersion = traefikConfig.BadgerVersion
|
||||
|
||||
// Show detected values and allow user to confirm or re-enter
|
||||
fmt.Println("Detected existing configuration:")
|
||||
fmt.Printf("Dashboard Domain: %s\n", config.DashboardDomain)
|
||||
fmt.Printf("Let's Encrypt Email: %s\n", config.LetsEncryptEmail)
|
||||
fmt.Printf("Badger Version: %s\n", config.BadgerVersion)
|
||||
|
||||
if !readBool(reader, "Are these values correct?", true) {
|
||||
config = collectUserInput(reader)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !checkIsCrowdsecInstalledInCompose() {
|
||||
fmt.Println("\n=== CrowdSec Install ===")
|
||||
// check if crowdsec is installed
|
||||
if readBool(reader, "Would you like to install CrowdSec?", false) {
|
||||
fmt.Println("This installer constitutes a minimal viable CrowdSec deployment. CrowdSec will add extra complexity to your Pangolin installation and may not work to the best of its abilities out of the box. Users are expected to implement configuration adjustments on their own to achieve the best security posture. Consult the CrowdSec documentation for detailed configuration instructions.")
|
||||
|
||||
// BUG: crowdsec installation will be skipped if the user chooses to install on the first installation.
|
||||
if readBool(reader, "Are you willing to manage CrowdSec?", false) {
|
||||
if config.DashboardDomain == "" {
|
||||
traefikConfig, err := ReadTraefikConfig("config/traefik/traefik_config.yml", "config/traefik/dynamic_config.yml")
|
||||
if err != nil {
|
||||
fmt.Printf("Error reading config: %v\n", err)
|
||||
return
|
||||
}
|
||||
config.DashboardDomain = traefikConfig.DashboardDomain
|
||||
config.LetsEncryptEmail = traefikConfig.LetsEncryptEmail
|
||||
config.BadgerVersion = traefikConfig.BadgerVersion
|
||||
|
||||
// print the values and check if they are right
|
||||
fmt.Println("Detected values:")
|
||||
fmt.Printf("Dashboard Domain: %s\n", config.DashboardDomain)
|
||||
fmt.Printf("Let's Encrypt Email: %s\n", config.LetsEncryptEmail)
|
||||
fmt.Printf("Badger Version: %s\n", config.BadgerVersion)
|
||||
|
||||
if !readBool(reader, "Are these values correct?", true) {
|
||||
config = collectUserInput(reader)
|
||||
}
|
||||
}
|
||||
|
||||
config.DoCrowdsecInstall = true
|
||||
installCrowdsec(config)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Setup Token Section
|
||||
fmt.Println("\n=== Setup Token ===")
|
||||
|
||||
// Check if containers were started during this installation
|
||||
containersStarted := false
|
||||
if (isDockerInstalled() && chosenContainer == Docker) ||
|
||||
(isPodmanInstalled() && chosenContainer == Podman) {
|
||||
// Try to fetch and display the token if containers are running
|
||||
containersStarted = true
|
||||
printSetupToken(chosenContainer, config.DashboardDomain)
|
||||
}
|
||||
|
||||
// If containers weren't started or token wasn't found, show instructions
|
||||
if !containersStarted {
|
||||
showSetupTokenInstructions(chosenContainer, config.DashboardDomain)
|
||||
}
|
||||
|
||||
fmt.Println("Installation complete!")
|
||||
fmt.Printf("\nTo complete the initial setup, please visit:\nhttps://%s/auth/initial-setup\n", config.DashboardDomain)
|
||||
}
|
||||
|
||||
func readString(reader *bufio.Reader, prompt string, defaultValue string) string {
|
||||
if defaultValue != "" {
|
||||
fmt.Printf("%s (default: %s): ", prompt, defaultValue)
|
||||
} else {
|
||||
fmt.Print(prompt + ": ")
|
||||
}
|
||||
input, _ := reader.ReadString('\n')
|
||||
input = strings.TrimSpace(input)
|
||||
if input == "" {
|
||||
return defaultValue
|
||||
}
|
||||
return input
|
||||
}
|
||||
|
||||
func readPassword(prompt string, reader *bufio.Reader) string {
|
||||
if term.IsTerminal(int(syscall.Stdin)) {
|
||||
fmt.Print(prompt + ": ")
|
||||
// Read password without echo if we're in a terminal
|
||||
password, err := term.ReadPassword(int(syscall.Stdin))
|
||||
fmt.Println() // Add a newline since ReadPassword doesn't add one
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
input := strings.TrimSpace(string(password))
|
||||
if input == "" {
|
||||
return readPassword(prompt, reader)
|
||||
}
|
||||
return input
|
||||
} else {
|
||||
// Fallback to reading from stdin if not in a terminal
|
||||
return readString(reader, prompt, "")
|
||||
}
|
||||
}
|
||||
|
||||
func readBool(reader *bufio.Reader, prompt string, defaultValue bool) bool {
|
||||
defaultStr := "no"
|
||||
if defaultValue {
|
||||
defaultStr = "yes"
|
||||
}
|
||||
input := readString(reader, prompt+" (yes/no)", defaultStr)
|
||||
return strings.ToLower(input) == "yes"
|
||||
}
|
||||
|
||||
func readInt(reader *bufio.Reader, prompt string, defaultValue int) int {
|
||||
input := readString(reader, prompt, fmt.Sprintf("%d", defaultValue))
|
||||
if input == "" {
|
||||
return defaultValue
|
||||
}
|
||||
value := defaultValue
|
||||
fmt.Sscanf(input, "%d", &value)
|
||||
return value
|
||||
return chosenContainer
|
||||
}
|
||||
|
||||
func collectUserInput(reader *bufio.Reader) Config {
|
||||
@@ -353,8 +339,9 @@ func collectUserInput(reader *bufio.Reader) Config {
|
||||
|
||||
// Basic configuration
|
||||
fmt.Println("\n=== Basic Configuration ===")
|
||||
|
||||
config.BaseDomain = readString(reader, "Enter your base domain (no subdomain e.g. example.com)", "")
|
||||
|
||||
|
||||
// Set default dashboard domain after base domain is collected
|
||||
defaultDashboardDomain := ""
|
||||
if config.BaseDomain != "" {
|
||||
@@ -363,7 +350,6 @@ func collectUserInput(reader *bufio.Reader) Config {
|
||||
config.DashboardDomain = readString(reader, "Enter the domain for the Pangolin dashboard", defaultDashboardDomain)
|
||||
config.LetsEncryptEmail = readString(reader, "Enter email for Let's Encrypt certificates", "")
|
||||
config.InstallGerbil = readBool(reader, "Do you want to use Gerbil to allow tunneled connections", true)
|
||||
config.EnableIPv6 = readBool(reader, "Is your server IPv6 capable?", true)
|
||||
|
||||
// Email configuration
|
||||
fmt.Println("\n=== Email Configuration ===")
|
||||
@@ -382,15 +368,23 @@ func collectUserInput(reader *bufio.Reader) Config {
|
||||
fmt.Println("Error: Domain name is required")
|
||||
os.Exit(1)
|
||||
}
|
||||
if config.DashboardDomain == "" {
|
||||
fmt.Println("Error: Dashboard Domain name is required")
|
||||
os.Exit(1)
|
||||
}
|
||||
if config.LetsEncryptEmail == "" {
|
||||
fmt.Println("Error: Let's Encrypt email is required")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Advanced configuration
|
||||
|
||||
fmt.Println("\n=== Advanced Configuration ===")
|
||||
|
||||
config.EnableIPv6 = readBool(reader, "Is your server IPv6 capable?", true)
|
||||
config.EnableGeoblocking = readBool(reader, "Do you want to download the MaxMind GeoLite2 database for geoblocking functionality?", true)
|
||||
|
||||
if config.DashboardDomain == "" {
|
||||
fmt.Println("Error: Dashboard Domain name is required")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
return config
|
||||
}
|
||||
|
||||
@@ -470,297 +464,6 @@ func createConfigFiles(config Config) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func installDocker() error {
|
||||
// Detect Linux distribution
|
||||
cmd := exec.Command("cat", "/etc/os-release")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to detect Linux distribution: %v", err)
|
||||
}
|
||||
osRelease := string(output)
|
||||
|
||||
// Detect system architecture
|
||||
archCmd := exec.Command("uname", "-m")
|
||||
archOutput, err := archCmd.Output()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to detect system architecture: %v", err)
|
||||
}
|
||||
arch := strings.TrimSpace(string(archOutput))
|
||||
|
||||
// Map architecture to Docker's architecture naming
|
||||
var dockerArch string
|
||||
switch arch {
|
||||
case "x86_64":
|
||||
dockerArch = "amd64"
|
||||
case "aarch64":
|
||||
dockerArch = "arm64"
|
||||
default:
|
||||
return fmt.Errorf("unsupported architecture: %s", arch)
|
||||
}
|
||||
|
||||
var installCmd *exec.Cmd
|
||||
switch {
|
||||
case strings.Contains(osRelease, "ID=ubuntu"):
|
||||
installCmd = exec.Command("bash", "-c", fmt.Sprintf(`
|
||||
apt-get update &&
|
||||
apt-get install -y apt-transport-https ca-certificates curl software-properties-common &&
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg &&
|
||||
echo "deb [arch=%s signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" > /etc/apt/sources.list.d/docker.list &&
|
||||
apt-get update &&
|
||||
apt-get install -y docker-ce docker-ce-cli containerd.io docker-compose-plugin
|
||||
`, dockerArch))
|
||||
case strings.Contains(osRelease, "ID=debian"):
|
||||
installCmd = exec.Command("bash", "-c", fmt.Sprintf(`
|
||||
apt-get update &&
|
||||
apt-get install -y apt-transport-https ca-certificates curl software-properties-common &&
|
||||
curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg &&
|
||||
echo "deb [arch=%s signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian $(lsb_release -cs) stable" > /etc/apt/sources.list.d/docker.list &&
|
||||
apt-get update &&
|
||||
apt-get install -y docker-ce docker-ce-cli containerd.io docker-compose-plugin
|
||||
`, dockerArch))
|
||||
case strings.Contains(osRelease, "ID=fedora"):
|
||||
// Detect Fedora version to handle DNF 5 changes
|
||||
versionCmd := exec.Command("bash", "-c", "grep VERSION_ID /etc/os-release | cut -d'=' -f2 | tr -d '\"'")
|
||||
versionOutput, err := versionCmd.Output()
|
||||
var fedoraVersion int
|
||||
if err == nil {
|
||||
if v, parseErr := strconv.Atoi(strings.TrimSpace(string(versionOutput))); parseErr == nil {
|
||||
fedoraVersion = v
|
||||
}
|
||||
}
|
||||
|
||||
// Use appropriate DNF syntax based on version
|
||||
var repoCmd string
|
||||
if fedoraVersion >= 41 {
|
||||
// DNF 5 syntax for Fedora 41+
|
||||
repoCmd = "dnf config-manager addrepo --from-repofile=https://download.docker.com/linux/fedora/docker-ce.repo"
|
||||
} else {
|
||||
// DNF 4 syntax for Fedora < 41
|
||||
repoCmd = "dnf config-manager --add-repo https://download.docker.com/linux/fedora/docker-ce.repo"
|
||||
}
|
||||
|
||||
installCmd = exec.Command("bash", "-c", fmt.Sprintf(`
|
||||
dnf -y install dnf-plugins-core &&
|
||||
%s &&
|
||||
dnf install -y docker-ce docker-ce-cli containerd.io docker-compose-plugin
|
||||
`, repoCmd))
|
||||
case strings.Contains(osRelease, "ID=opensuse") || strings.Contains(osRelease, "ID=\"opensuse-"):
|
||||
installCmd = exec.Command("bash", "-c", `
|
||||
zypper install -y docker docker-compose &&
|
||||
systemctl enable docker
|
||||
`)
|
||||
case strings.Contains(osRelease, "ID=rhel") || strings.Contains(osRelease, "ID=\"rhel"):
|
||||
installCmd = exec.Command("bash", "-c", `
|
||||
dnf remove -y runc &&
|
||||
dnf -y install yum-utils &&
|
||||
dnf config-manager --add-repo https://download.docker.com/linux/rhel/docker-ce.repo &&
|
||||
dnf install -y docker-ce docker-ce-cli containerd.io docker-compose-plugin &&
|
||||
systemctl enable docker
|
||||
`)
|
||||
case strings.Contains(osRelease, "ID=amzn"):
|
||||
installCmd = exec.Command("bash", "-c", `
|
||||
yum update -y &&
|
||||
yum install -y docker &&
|
||||
systemctl enable docker &&
|
||||
usermod -a -G docker ec2-user
|
||||
`)
|
||||
default:
|
||||
return fmt.Errorf("unsupported Linux distribution")
|
||||
}
|
||||
|
||||
installCmd.Stdout = os.Stdout
|
||||
installCmd.Stderr = os.Stderr
|
||||
return installCmd.Run()
|
||||
}
|
||||
|
||||
func startDockerService() error {
|
||||
if runtime.GOOS == "linux" {
|
||||
cmd := exec.Command("systemctl", "enable", "--now", "docker")
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
return cmd.Run()
|
||||
} else if runtime.GOOS == "darwin" {
|
||||
// On macOS, Docker is usually started via the Docker Desktop application
|
||||
fmt.Println("Please start Docker Desktop manually on macOS.")
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("unsupported operating system for starting Docker service")
|
||||
}
|
||||
|
||||
func isDockerInstalled() bool {
|
||||
return isContainerInstalled("docker")
|
||||
}
|
||||
|
||||
func isPodmanInstalled() bool {
|
||||
return isContainerInstalled("podman") && isContainerInstalled("podman-compose")
|
||||
}
|
||||
|
||||
func isContainerInstalled(container string) bool {
|
||||
cmd := exec.Command(container, "--version")
|
||||
if err := cmd.Run(); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func isUserInDockerGroup() bool {
|
||||
if runtime.GOOS == "darwin" {
|
||||
// Docker group is not applicable on macOS
|
||||
// So we assume that the user can run Docker commands
|
||||
return true
|
||||
}
|
||||
|
||||
if os.Geteuid() == 0 {
|
||||
return true // Root user can run Docker commands anyway
|
||||
}
|
||||
|
||||
// Check if the current user is in the docker group
|
||||
if dockerGroup, err := user.LookupGroup("docker"); err == nil {
|
||||
if currentUser, err := user.Current(); err == nil {
|
||||
if currentUserGroupIds, err := currentUser.GroupIds(); err == nil {
|
||||
for _, groupId := range currentUserGroupIds {
|
||||
if groupId == dockerGroup.Gid {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Eventually, if any of the checks fail, we assume the user cannot run Docker commands
|
||||
return false
|
||||
}
|
||||
|
||||
// isDockerRunning checks if the Docker daemon is running by using the `docker info` command.
|
||||
func isDockerRunning() bool {
|
||||
cmd := exec.Command("docker", "info")
|
||||
if err := cmd.Run(); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// executeDockerComposeCommandWithArgs executes the appropriate docker command with arguments supplied
|
||||
func executeDockerComposeCommandWithArgs(args ...string) error {
|
||||
var cmd *exec.Cmd
|
||||
var useNewStyle bool
|
||||
|
||||
if !isDockerInstalled() {
|
||||
return fmt.Errorf("docker is not installed")
|
||||
}
|
||||
|
||||
checkCmd := exec.Command("docker", "compose", "version")
|
||||
if err := checkCmd.Run(); err == nil {
|
||||
useNewStyle = true
|
||||
} else {
|
||||
checkCmd = exec.Command("docker-compose", "version")
|
||||
if err := checkCmd.Run(); err == nil {
|
||||
useNewStyle = false
|
||||
} else {
|
||||
return fmt.Errorf("neither 'docker compose' nor 'docker-compose' command is available")
|
||||
}
|
||||
}
|
||||
|
||||
if useNewStyle {
|
||||
cmd = exec.Command("docker", append([]string{"compose"}, args...)...)
|
||||
} else {
|
||||
cmd = exec.Command("docker-compose", args...)
|
||||
}
|
||||
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
// pullContainers pulls the containers using the appropriate command.
|
||||
func pullContainers(containerType SupportedContainer) error {
|
||||
fmt.Println("Pulling the container images...")
|
||||
if containerType == Podman {
|
||||
if err := run("podman-compose", "-f", "docker-compose.yml", "pull"); err != nil {
|
||||
return fmt.Errorf("failed to pull the containers: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if containerType == Docker {
|
||||
if err := executeDockerComposeCommandWithArgs("-f", "docker-compose.yml", "pull", "--policy", "always"); err != nil {
|
||||
return fmt.Errorf("failed to pull the containers: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("Unsupported container type: %s", containerType)
|
||||
}
|
||||
|
||||
// startContainers starts the containers using the appropriate command.
|
||||
func startContainers(containerType SupportedContainer) error {
|
||||
fmt.Println("Starting containers...")
|
||||
|
||||
if containerType == Podman {
|
||||
if err := run("podman-compose", "-f", "docker-compose.yml", "up", "-d", "--force-recreate"); err != nil {
|
||||
return fmt.Errorf("failed start containers: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if containerType == Docker {
|
||||
if err := executeDockerComposeCommandWithArgs("-f", "docker-compose.yml", "up", "-d", "--force-recreate"); err != nil {
|
||||
return fmt.Errorf("failed to start containers: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("Unsupported container type: %s", containerType)
|
||||
}
|
||||
|
||||
// stopContainers stops the containers using the appropriate command.
|
||||
func stopContainers(containerType SupportedContainer) error {
|
||||
fmt.Println("Stopping containers...")
|
||||
if containerType == Podman {
|
||||
if err := run("podman-compose", "-f", "docker-compose.yml", "down"); err != nil {
|
||||
return fmt.Errorf("failed to stop containers: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if containerType == Docker {
|
||||
if err := executeDockerComposeCommandWithArgs("-f", "docker-compose.yml", "down"); err != nil {
|
||||
return fmt.Errorf("failed to stop containers: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("Unsupported container type: %s", containerType)
|
||||
}
|
||||
|
||||
// restartContainer restarts a specific container using the appropriate command.
|
||||
func restartContainer(container string, containerType SupportedContainer) error {
|
||||
fmt.Println("Restarting containers...")
|
||||
if containerType == Podman {
|
||||
if err := run("podman-compose", "-f", "docker-compose.yml", "restart"); err != nil {
|
||||
return fmt.Errorf("failed to stop the container \"%s\": %v", container, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if containerType == Docker {
|
||||
if err := executeDockerComposeCommandWithArgs("-f", "docker-compose.yml", "restart", container); err != nil {
|
||||
return fmt.Errorf("failed to stop the container \"%s\": %v", container, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("Unsupported container type: %s", containerType)
|
||||
}
|
||||
|
||||
func copyFile(src, dst string) error {
|
||||
source, err := os.Open(src)
|
||||
if err != nil {
|
||||
@@ -786,37 +489,9 @@ func moveFile(src, dst string) error {
|
||||
return os.Remove(src)
|
||||
}
|
||||
|
||||
func waitForContainer(containerName string, containerType SupportedContainer) error {
|
||||
maxAttempts := 30
|
||||
retryInterval := time.Second * 2
|
||||
|
||||
for attempt := 0; attempt < maxAttempts; attempt++ {
|
||||
// Check if container is running
|
||||
cmd := exec.Command(string(containerType), "container", "inspect", "-f", "{{.State.Running}}", containerName)
|
||||
var out bytes.Buffer
|
||||
cmd.Stdout = &out
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
// If the container doesn't exist or there's another error, wait and retry
|
||||
time.Sleep(retryInterval)
|
||||
continue
|
||||
}
|
||||
|
||||
isRunning := strings.TrimSpace(out.String()) == "true"
|
||||
if isRunning {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Container exists but isn't running yet, wait and retry
|
||||
time.Sleep(retryInterval)
|
||||
}
|
||||
|
||||
return fmt.Errorf("container %s did not start within %v seconds", containerName, maxAttempts*int(retryInterval.Seconds()))
|
||||
}
|
||||
|
||||
func printSetupToken(containerType SupportedContainer, dashboardDomain string) {
|
||||
fmt.Println("Waiting for Pangolin to generate setup token...")
|
||||
|
||||
|
||||
// Wait for Pangolin to be healthy
|
||||
if err := waitForContainer("pangolin", containerType); err != nil {
|
||||
fmt.Println("Warning: Pangolin container did not become healthy in time.")
|
||||
@@ -851,12 +526,12 @@ func printSetupToken(containerType SupportedContainer, dashboardDomain string) {
|
||||
tokenStart := strings.Index(trimmedLine, "Token:")
|
||||
if tokenStart != -1 {
|
||||
token := strings.TrimSpace(trimmedLine[tokenStart+6:])
|
||||
fmt.Printf("Setup token: %s\n", token)
|
||||
fmt.Println("")
|
||||
fmt.Println("This token is required to register the first admin account in the web UI at:")
|
||||
fmt.Printf("https://%s/auth/initial-setup\n", dashboardDomain)
|
||||
fmt.Println("")
|
||||
fmt.Println("Save this token securely. It will be invalid after the first admin is created.")
|
||||
fmt.Printf("Setup token: %s\n", token)
|
||||
fmt.Println("")
|
||||
fmt.Println("This token is required to register the first admin account in the web UI at:")
|
||||
fmt.Printf("https://%s/auth/initial-setup\n", dashboardDomain)
|
||||
fmt.Println("")
|
||||
fmt.Println("Save this token securely. It will be invalid after the first admin is created.")
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -870,28 +545,30 @@ func showSetupTokenInstructions(containerType SupportedContainer, dashboardDomai
|
||||
fmt.Println("\n=== Setup Token Instructions ===")
|
||||
fmt.Println("To get your setup token, you need to:")
|
||||
fmt.Println("")
|
||||
fmt.Println("1. Start the containers:")
|
||||
fmt.Println("1. Start the containers")
|
||||
if containerType == Docker {
|
||||
fmt.Println(" docker-compose up -d")
|
||||
} else {
|
||||
fmt.Println(" docker compose up -d")
|
||||
} else if containerType == Podman {
|
||||
fmt.Println(" podman-compose up -d")
|
||||
} else {
|
||||
}
|
||||
fmt.Println("")
|
||||
fmt.Println("2. Wait for the Pangolin container to start and generate the token")
|
||||
fmt.Println("")
|
||||
fmt.Println("3. Check the container logs for the setup token:")
|
||||
fmt.Println("3. Check the container logs for the setup token")
|
||||
if containerType == Docker {
|
||||
fmt.Println(" docker logs pangolin | grep -A 2 -B 2 'SETUP TOKEN'")
|
||||
} else {
|
||||
} else if containerType == Podman {
|
||||
fmt.Println(" podman logs pangolin | grep -A 2 -B 2 'SETUP TOKEN'")
|
||||
} else {
|
||||
}
|
||||
fmt.Println("")
|
||||
fmt.Println("4. Look for output like:")
|
||||
fmt.Println("4. Look for output like")
|
||||
fmt.Println(" === SETUP TOKEN GENERATED ===")
|
||||
fmt.Println(" Token: [your-token-here]")
|
||||
fmt.Println(" Use this token on the initial setup page")
|
||||
fmt.Println("")
|
||||
fmt.Println("5. Use the token to complete initial setup at:")
|
||||
fmt.Println("5. Use the token to complete initial setup at")
|
||||
fmt.Printf(" https://%s/auth/initial-setup\n", dashboardDomain)
|
||||
fmt.Println("")
|
||||
fmt.Println("The setup token is required to register the first admin account.")
|
||||
@@ -913,6 +590,32 @@ func generateRandomSecretKey() string {
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func getPublicIP() string {
|
||||
client := &http.Client{
|
||||
Timeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
resp, err := client.Get("https://ifconfig.io/ip")
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
ip := strings.TrimSpace(string(body))
|
||||
|
||||
// Validate that it's a valid IP address
|
||||
if net.ParseIP(ip) != nil {
|
||||
return ip
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// Run external commands with stdio/stderr attached.
|
||||
func run(name string, args ...string) error {
|
||||
cmd := exec.Command(name, args...)
|
||||
@@ -922,19 +625,47 @@ func run(name string, args ...string) error {
|
||||
}
|
||||
|
||||
func checkPortsAvailable(port int) error {
|
||||
addr := fmt.Sprintf(":%d", port)
|
||||
ln, err := net.Listen("tcp", addr)
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"ERROR: port %d is occupied or cannot be bound: %w\n\n",
|
||||
port, err,
|
||||
)
|
||||
}
|
||||
if closeErr := ln.Close(); closeErr != nil {
|
||||
fmt.Fprintf(os.Stderr,
|
||||
"WARNING: failed to close test listener on port %d: %v\n",
|
||||
port, closeErr,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
addr := fmt.Sprintf(":%d", port)
|
||||
ln, err := net.Listen("tcp", addr)
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"ERROR: port %d is occupied or cannot be bound: %w\n\n",
|
||||
port, err,
|
||||
)
|
||||
}
|
||||
if closeErr := ln.Close(); closeErr != nil {
|
||||
fmt.Fprintf(os.Stderr,
|
||||
"WARNING: failed to close test listener on port %d: %v\n",
|
||||
port, closeErr,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func downloadMaxMindDatabase() error {
|
||||
fmt.Println("Downloading MaxMind GeoLite2 Country database...")
|
||||
|
||||
// Download the GeoLite2 Country database
|
||||
if err := run("curl", "-L", "-o", "GeoLite2-Country.tar.gz",
|
||||
"https://github.com/GitSquared/node-geolite2-redist/raw/refs/heads/master/redist/GeoLite2-Country.tar.gz"); err != nil {
|
||||
return fmt.Errorf("failed to download GeoLite2 database: %v", err)
|
||||
}
|
||||
|
||||
// Extract the database
|
||||
if err := run("tar", "-xzf", "GeoLite2-Country.tar.gz"); err != nil {
|
||||
return fmt.Errorf("failed to extract GeoLite2 database: %v", err)
|
||||
}
|
||||
|
||||
// Find the .mmdb file and move it to the config directory
|
||||
if err := run("bash", "-c", "mv GeoLite2-Country_*/GeoLite2-Country.mmdb config/"); err != nil {
|
||||
return fmt.Errorf("failed to move GeoLite2 database to config directory: %v", err)
|
||||
}
|
||||
|
||||
// Clean up the downloaded files
|
||||
if err := run("rm", "-rf", "GeoLite2-Country.tar.gz", "GeoLite2-Country_*"); err != nil {
|
||||
fmt.Printf("Warning: failed to clean up temporary files: %v\n", err)
|
||||
}
|
||||
|
||||
fmt.Println("MaxMind GeoLite2 Country database downloaded successfully!")
|
||||
return nil
|
||||
}
|
||||
|
||||
3378
messages/bg-BG.json
3378
messages/bg-BG.json
File diff suppressed because it is too large
Load Diff
3244
messages/cs-CZ.json
3244
messages/cs-CZ.json
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -47,9 +47,8 @@
|
||||
"edit": "Edit",
|
||||
"siteConfirmDelete": "Confirm Delete Site",
|
||||
"siteDelete": "Delete Site",
|
||||
"siteMessageRemove": "Once removed, the site will no longer be accessible. All resources and targets associated with the site will also be removed.",
|
||||
"siteMessageConfirm": "To confirm, please type the name of the site below.",
|
||||
"siteQuestionRemove": "Are you sure you want to remove the site {selectedSite} from the organization?",
|
||||
"siteMessageRemove": "Once removed the site will no longer be accessible. All targets associated with the site will also be removed.",
|
||||
"siteQuestionRemove": "Are you sure you want to remove the site from the organization?",
|
||||
"siteManageSites": "Manage Sites",
|
||||
"siteDescription": "Allow connectivity to your network through secure tunnels",
|
||||
"siteCreate": "Create Site",
|
||||
@@ -94,7 +93,9 @@
|
||||
"siteNewtTunnelDescription": "Easiest way to create an entrypoint into your network. No extra setup.",
|
||||
"siteWg": "Basic WireGuard",
|
||||
"siteWgDescription": "Use any WireGuard client to establish a tunnel. Manual NAT setup required.",
|
||||
"siteWgDescriptionSaas": "Use any WireGuard client to establish a tunnel. Manual NAT setup required.",
|
||||
"siteLocalDescription": "Local resources only. No tunneling.",
|
||||
"siteLocalDescriptionSaas": "Local resources only. No tunneling. Only available on remote nodes.",
|
||||
"siteSeeAll": "See All Sites",
|
||||
"siteTunnelDescription": "Determine how you want to connect to your site",
|
||||
"siteNewtCredentials": "Newt Credentials",
|
||||
@@ -152,12 +153,11 @@
|
||||
"protected": "Protected",
|
||||
"notProtected": "Not Protected",
|
||||
"resourceMessageRemove": "Once removed, the resource will no longer be accessible. All targets associated with the resource will also be removed.",
|
||||
"resourceMessageConfirm": "To confirm, please type the name of the resource below.",
|
||||
"resourceQuestionRemove": "Are you sure you want to remove the resource {selectedResource} from the organization?",
|
||||
"resourceQuestionRemove": "Are you sure you want to remove the resource from the organization?",
|
||||
"resourceHTTP": "HTTPS Resource",
|
||||
"resourceHTTPDescription": "Proxy requests to your app over HTTPS using a subdomain or base domain.",
|
||||
"resourceRaw": "Raw TCP/UDP Resource",
|
||||
"resourceRawDescription": "Proxy requests to your app over TCP/UDP using a port number.",
|
||||
"resourceRawDescription": "Proxy requests to your app over TCP/UDP using a port number. This only works when sites are connected to nodes.",
|
||||
"resourceCreate": "Create Resource",
|
||||
"resourceCreateDescription": "Follow the steps below to create a new resource",
|
||||
"resourceSeeAll": "See All Resources",
|
||||
@@ -166,6 +166,9 @@
|
||||
"siteSelect": "Select site",
|
||||
"siteSearch": "Search site",
|
||||
"siteNotFound": "No site found.",
|
||||
"selectCountry": "Select country",
|
||||
"searchCountries": "Search countries...",
|
||||
"noCountryFound": "No country found.",
|
||||
"siteSelectionDescription": "This site will provide connectivity to the target.",
|
||||
"resourceType": "Resource Type",
|
||||
"resourceTypeDescription": "Determine how you want to access your resource",
|
||||
@@ -203,7 +206,8 @@
|
||||
"resourceSetting": "{resourceName} Settings",
|
||||
"alwaysAllow": "Always Allow",
|
||||
"alwaysDeny": "Always Deny",
|
||||
"orgSettingsDescription": "Configure your organization's general settings",
|
||||
"passToAuth": "Pass to Auth",
|
||||
"orgSettingsDescription": "Configure your organization's settings",
|
||||
"orgGeneralSettings": "Organization Settings",
|
||||
"orgGeneralSettingsDescription": "Manage your organization details and configuration",
|
||||
"saveGeneralSettings": "Save General Settings",
|
||||
@@ -214,7 +218,7 @@
|
||||
"orgDeleteConfirm": "Confirm Delete Organization",
|
||||
"orgMessageRemove": "This action is irreversible and will delete all associated data.",
|
||||
"orgMessageConfirm": "To confirm, please type the name of the organization below.",
|
||||
"orgQuestionRemove": "Are you sure you want to remove the organization {selectedOrg}?",
|
||||
"orgQuestionRemove": "Are you sure you want to remove the organization?",
|
||||
"orgUpdated": "Organization updated",
|
||||
"orgUpdatedDescription": "The organization has been updated.",
|
||||
"orgErrorUpdate": "Failed to update organization",
|
||||
@@ -281,9 +285,8 @@
|
||||
"apiKeysAdd": "Generate API Key",
|
||||
"apiKeysErrorDelete": "Error deleting API key",
|
||||
"apiKeysErrorDeleteMessage": "Error deleting API key",
|
||||
"apiKeysQuestionRemove": "Are you sure you want to remove the API key {selectedApiKey} from the organization?",
|
||||
"apiKeysQuestionRemove": "Are you sure you want to remove the API key from the organization?",
|
||||
"apiKeysMessageRemove": "Once removed, the API key will no longer be able to be used.",
|
||||
"apiKeysMessageConfirm": "To confirm, please type the name of the API key below.",
|
||||
"apiKeysDeleteConfirm": "Confirm Delete API Key",
|
||||
"apiKeysDelete": "Delete API Key",
|
||||
"apiKeysManage": "Manage API Keys",
|
||||
@@ -299,8 +302,7 @@
|
||||
"userDeleteConfirm": "Confirm Delete User",
|
||||
"userDeleteServer": "Delete User from Server",
|
||||
"userMessageRemove": "The user will be removed from all organizations and be completely removed from the server.",
|
||||
"userMessageConfirm": "To confirm, please type the name of the user below.",
|
||||
"userQuestionRemove": "Are you sure you want to permanently delete {selectedUser} from the server?",
|
||||
"userQuestionRemove": "Are you sure you want to permanently delete user from the server?",
|
||||
"licenseKey": "License Key",
|
||||
"valid": "Valid",
|
||||
"numberOfSites": "Number of Sites",
|
||||
@@ -333,7 +335,7 @@
|
||||
"fossorialLicense": "View Fossorial Commercial License & Subscription Terms",
|
||||
"licenseMessageRemove": "This will remove the license key and all associated permissions granted by it.",
|
||||
"licenseMessageConfirm": "To confirm, please type the license key below.",
|
||||
"licenseQuestionRemove": "Are you sure you want to delete the license key {selectedKey} ?",
|
||||
"licenseQuestionRemove": "Are you sure you want to delete the license key ?",
|
||||
"licenseKeyDelete": "Delete License Key",
|
||||
"licenseKeyDeleteConfirm": "Confirm Delete License Key",
|
||||
"licenseTitle": "Manage License Status",
|
||||
@@ -366,7 +368,7 @@
|
||||
"inviteRemoveErrorDescription": "An error occurred while removing the invitation.",
|
||||
"inviteRemoved": "Invitation removed",
|
||||
"inviteRemovedDescription": "The invitation for {email} has been removed.",
|
||||
"inviteQuestionRemove": "Are you sure you want to remove the invitation {email}?",
|
||||
"inviteQuestionRemove": "Are you sure you want to remove the invitation?",
|
||||
"inviteMessageRemove": "Once removed, this invitation will no longer be valid. You can always re-invite the user later.",
|
||||
"inviteMessageConfirm": "To confirm, please type the email address of the invitation below.",
|
||||
"inviteQuestionRegenerate": "Are you sure you want to regenerate the invitation for {email}? This will revoke the previous invitation.",
|
||||
@@ -392,9 +394,8 @@
|
||||
"userErrorOrgRemoveDescription": "An error occurred while removing the user.",
|
||||
"userOrgRemoved": "User removed",
|
||||
"userOrgRemovedDescription": "The user {email} has been removed from the organization.",
|
||||
"userQuestionOrgRemove": "Are you sure you want to remove {email} from the organization?",
|
||||
"userQuestionOrgRemove": "Are you sure you want to remove this user from the organization?",
|
||||
"userMessageOrgRemove": "Once removed, this user will no longer have access to the organization. You can always re-invite them later, but they will need to accept the invitation again.",
|
||||
"userMessageOrgConfirm": "To confirm, please type the name of the of the user below.",
|
||||
"userRemoveOrgConfirm": "Confirm Remove User",
|
||||
"userRemoveOrg": "Remove User from Organization",
|
||||
"users": "Users",
|
||||
@@ -451,6 +452,8 @@
|
||||
"accessRoleErrorAddDescription": "An error occurred while adding user to the role.",
|
||||
"userSaved": "User saved",
|
||||
"userSavedDescription": "The user has been updated.",
|
||||
"autoProvisioned": "Auto Provisioned",
|
||||
"autoProvisionedDescription": "Allow this user to be automatically managed by identity provider",
|
||||
"accessControlsDescription": "Manage what this user can access and do in the organization",
|
||||
"accessControlsSubmit": "Save Access Controls",
|
||||
"roles": "Roles",
|
||||
@@ -460,7 +463,10 @@
|
||||
"createdAt": "Created At",
|
||||
"proxyErrorInvalidHeader": "Invalid custom Host Header value. Use domain name format, or save empty to unset custom Host Header.",
|
||||
"proxyErrorTls": "Invalid TLS Server Name. Use domain name format, or save empty to remove the TLS Server Name.",
|
||||
"proxyEnableSSL": "Enable SSL (https)",
|
||||
"proxyEnableSSL": "Enable SSL",
|
||||
"proxyEnableSSLDescription": "Enable SSL/TLS encryption for secure HTTPS connections to your targets.",
|
||||
"target": "Target",
|
||||
"configureTarget": "Configure Targets",
|
||||
"targetErrorFetch": "Failed to fetch targets",
|
||||
"targetErrorFetchDescription": "An error occurred while fetching targets",
|
||||
"siteErrorFetch": "Failed to fetch resource",
|
||||
@@ -487,7 +493,7 @@
|
||||
"targetTlsSettings": "Secure Connection Configuration",
|
||||
"targetTlsSettingsDescription": "Configure SSL/TLS settings for your resource",
|
||||
"targetTlsSettingsAdvanced": "Advanced TLS Settings",
|
||||
"targetTlsSni": "TLS Server Name (SNI)",
|
||||
"targetTlsSni": "TLS Server Name",
|
||||
"targetTlsSniDescription": "The TLS Server Name to use for SNI. Leave empty to use the default.",
|
||||
"targetTlsSubmit": "Save Settings",
|
||||
"targets": "Targets Configuration",
|
||||
@@ -496,9 +502,21 @@
|
||||
"targetStickySessionsDescription": "Keep connections on the same backend target for their entire session.",
|
||||
"methodSelect": "Select method",
|
||||
"targetSubmit": "Add Target",
|
||||
"targetNoOne": "No targets. Add a target using the form.",
|
||||
"targetNoOne": "This resource doesn't have any targets. Add a target to configure where to send requests to your backend.",
|
||||
"targetNoOneDescription": "Adding more than one target above will enable load balancing.",
|
||||
"targetsSubmit": "Save Targets",
|
||||
"addTarget": "Add Target",
|
||||
"targetErrorInvalidIp": "Invalid IP address",
|
||||
"targetErrorInvalidIpDescription": "Please enter a valid IP address or hostname",
|
||||
"targetErrorInvalidPort": "Invalid port",
|
||||
"targetErrorInvalidPortDescription": "Please enter a valid port number",
|
||||
"targetErrorNoSite": "No site selected",
|
||||
"targetErrorNoSiteDescription": "Please select a site for the target",
|
||||
"targetCreated": "Target created",
|
||||
"targetCreatedDescription": "Target has been created successfully",
|
||||
"targetErrorCreate": "Failed to create target",
|
||||
"targetErrorCreateDescription": "An error occurred while creating the target",
|
||||
"save": "Save",
|
||||
"proxyAdditional": "Additional Proxy Settings",
|
||||
"proxyAdditionalDescription": "Configure how your resource handles proxy settings",
|
||||
"proxyCustomHeader": "Custom Host Header",
|
||||
@@ -508,6 +526,7 @@
|
||||
"ipAddressErrorInvalidFormat": "Invalid IP address format",
|
||||
"ipAddressErrorInvalidOctet": "Invalid IP address octet",
|
||||
"path": "Path",
|
||||
"matchPath": "Match Path",
|
||||
"ipAddressRange": "IP Range",
|
||||
"rulesErrorFetch": "Failed to fetch rules",
|
||||
"rulesErrorFetchDescription": "An error occurred while fetching rules",
|
||||
@@ -543,6 +562,7 @@
|
||||
"rulesActions": "Actions",
|
||||
"rulesActionAlwaysAllow": "Always Allow: Bypass all authentication methods",
|
||||
"rulesActionAlwaysDeny": "Always Deny: Block all requests; no authentication can be attempted",
|
||||
"rulesActionPassToAuth": "Pass to Auth: Allow authentication methods to be attempted",
|
||||
"rulesMatchCriteria": "Matching Criteria",
|
||||
"rulesMatchCriteriaIpAddress": "Match a specific IP address",
|
||||
"rulesMatchCriteriaIpAddressRange": "Match a range of IP addresses in CIDR notation",
|
||||
@@ -705,7 +725,7 @@
|
||||
"pangolinServerAdmin": "Server Admin - Pangolin",
|
||||
"licenseTierProfessional": "Professional License",
|
||||
"licenseTierEnterprise": "Enterprise License",
|
||||
"licenseTierCommercial": "Commercial License",
|
||||
"licenseTierPersonal": "Personal License",
|
||||
"licensed": "Licensed",
|
||||
"yes": "Yes",
|
||||
"no": "No",
|
||||
@@ -717,7 +737,7 @@
|
||||
"idpManageDescription": "View and manage identity providers in the system",
|
||||
"idpDeletedDescription": "Identity provider deleted successfully",
|
||||
"idpOidc": "OAuth2/OIDC",
|
||||
"idpQuestionRemove": "Are you sure you want to permanently delete the identity provider {name}?",
|
||||
"idpQuestionRemove": "Are you sure you want to permanently delete the identity provider?",
|
||||
"idpMessageRemove": "This will remove the identity provider and all associated configurations. Users who authenticate through this provider will no longer be able to log in.",
|
||||
"idpMessageConfirm": "To confirm, please type the name of the identity provider below.",
|
||||
"idpConfirmDelete": "Confirm Delete Identity Provider",
|
||||
@@ -740,7 +760,7 @@
|
||||
"idpDisplayName": "A display name for this identity provider",
|
||||
"idpAutoProvisionUsers": "Auto Provision Users",
|
||||
"idpAutoProvisionUsersDescription": "When enabled, users will be automatically created in the system upon first login with the ability to map users to roles and organizations.",
|
||||
"licenseBadge": "Professional",
|
||||
"licenseBadge": "EE",
|
||||
"idpType": "Provider Type",
|
||||
"idpTypeDescription": "Select the type of identity provider you want to configure",
|
||||
"idpOidcConfigure": "OAuth2/OIDC Configuration",
|
||||
@@ -891,6 +911,18 @@
|
||||
"passwordResetCodeDescription": "Check your email for the reset code.",
|
||||
"passwordNew": "New Password",
|
||||
"passwordNewConfirm": "Confirm New Password",
|
||||
"changePassword": "Change Password",
|
||||
"changePasswordDescription": "Update your account password",
|
||||
"oldPassword": "Current Password",
|
||||
"newPassword": "New Password",
|
||||
"confirmNewPassword": "Confirm New Password",
|
||||
"changePasswordError": "Failed to change password",
|
||||
"changePasswordErrorDescription": "An error occurred while changing your password",
|
||||
"changePasswordSuccess": "Password Changed Successfully",
|
||||
"changePasswordSuccessDescription": "Your password has been updated successfully",
|
||||
"passwordExpiryRequired": "Password Expiry Required",
|
||||
"passwordExpiryDescription": "This organization requires you to change your password every {maxDays} days.",
|
||||
"changePasswordNow": "Change Password Now",
|
||||
"pincodeAuth": "Authenticator Code",
|
||||
"pincodeSubmit2": "Submit Code",
|
||||
"passwordResetSubmit": "Request Reset",
|
||||
@@ -971,12 +1003,15 @@
|
||||
"logoutError": "Error logging out",
|
||||
"signingAs": "Signed in as",
|
||||
"serverAdmin": "Server Admin",
|
||||
"managedSelfhosted": "Managed Self-Hosted",
|
||||
"otpEnable": "Enable Two-factor",
|
||||
"otpDisable": "Disable Two-factor",
|
||||
"logout": "Log Out",
|
||||
"licenseTierProfessionalRequired": "Professional Edition Required",
|
||||
"licenseTierProfessionalRequiredDescription": "This feature is only available in the Professional Edition.",
|
||||
"actionGetOrg": "Get Organization",
|
||||
"updateOrgUser": "Update Org User",
|
||||
"createOrgUser": "Create Org User",
|
||||
"actionUpdateOrg": "Update Organization",
|
||||
"actionUpdateUser": "Update User",
|
||||
"actionGetUser": "Get User",
|
||||
@@ -986,6 +1021,7 @@
|
||||
"actionDeleteSite": "Delete Site",
|
||||
"actionGetSite": "Get Site",
|
||||
"actionListSites": "List Sites",
|
||||
"actionApplyBlueprint": "Apply Blueprint",
|
||||
"setupToken": "Setup Token",
|
||||
"setupTokenDescription": "Enter the setup token from the server console.",
|
||||
"setupTokenRequired": "Setup token is required",
|
||||
@@ -1049,6 +1085,12 @@
|
||||
"actionUpdateClient": "Update Client",
|
||||
"actionListClients": "List Clients",
|
||||
"actionGetClient": "Get Client",
|
||||
"actionCreateSiteResource": "Create Site Resource",
|
||||
"actionDeleteSiteResource": "Delete Site Resource",
|
||||
"actionGetSiteResource": "Get Site Resource",
|
||||
"actionListSiteResources": "List Site Resources",
|
||||
"actionUpdateSiteResource": "Update Site Resource",
|
||||
"actionListInvitations": "List Invitations",
|
||||
"noneSelected": "None selected",
|
||||
"orgNotFound2": "No organizations found.",
|
||||
"searchProgress": "Search...",
|
||||
@@ -1064,7 +1106,6 @@
|
||||
"navbar": "Navigation Menu",
|
||||
"navbarDescription": "Main navigation menu for the application",
|
||||
"navbarDocsLink": "Documentation",
|
||||
"commercialEdition": "Commercial Edition",
|
||||
"otpErrorEnable": "Unable to enable 2FA",
|
||||
"otpErrorEnableDescription": "An error occurred while enabling 2FA",
|
||||
"otpSetupCheckCode": "Please enter a 6-digit code",
|
||||
@@ -1120,10 +1161,29 @@
|
||||
"sidebarAllUsers": "All Users",
|
||||
"sidebarIdentityProviders": "Identity Providers",
|
||||
"sidebarLicense": "License",
|
||||
"sidebarClients": "Clients (Beta)",
|
||||
"sidebarClients": "Clients",
|
||||
"sidebarDomains": "Domains",
|
||||
"enableDockerSocket": "Enable Docker Socket",
|
||||
"enableDockerSocketDescription": "Enable Docker Socket discovery for populating container information. Socket path must be provided to Newt.",
|
||||
"sidebarBluePrints": "Blueprints",
|
||||
"blueprints": "Blueprints",
|
||||
"blueprintsDescription": "Blueprints are declarative YAML configurations that define your resources and their settings",
|
||||
"blueprintAdd": "Add Blueprint",
|
||||
"blueprintGoBack": "See all Blueprints",
|
||||
"blueprintCreate": "Create Blueprint",
|
||||
"blueprintCreateDescription2": "Follow the steps below to create and apply a new blueprint",
|
||||
"blueprintDetails": "Blueprint details",
|
||||
"blueprintDetailsDescription": "See the blueprint run details",
|
||||
"blueprintInfo": "Blueprint Information",
|
||||
"message": "Message",
|
||||
"blueprintContentsDescription": "Define the YAML content describing your infrastructure",
|
||||
"blueprintErrorCreateDescription": "An error occurred when applying the blueprint",
|
||||
"blueprintErrorCreate": "Error creating blueprint",
|
||||
"searchBlueprintProgress": "Search blueprints...",
|
||||
"appliedAt": "Applied At",
|
||||
"source": "Source",
|
||||
"contents": "Contents",
|
||||
"parsedContents": "Parsed Contents",
|
||||
"enableDockerSocket": "Enable Docker Blueprint",
|
||||
"enableDockerSocketDescription": "Enable Docker Socket label scraping for blueprint labels. Socket path must be provided to Newt.",
|
||||
"enableDockerSocketLink": "Learn More",
|
||||
"viewDockerContainers": "View Docker Containers",
|
||||
"containersIn": "Containers in {siteName}",
|
||||
@@ -1177,9 +1237,8 @@
|
||||
"domainCreate": "Create Domain",
|
||||
"domainCreatedDescription": "Domain created successfully",
|
||||
"domainDeletedDescription": "Domain deleted successfully",
|
||||
"domainQuestionRemove": "Are you sure you want to remove the domain {domain} from your account?",
|
||||
"domainQuestionRemove": "Are you sure you want to remove the domain from your account?",
|
||||
"domainMessageRemove": "Once removed, the domain will no longer be associated with your account.",
|
||||
"domainMessageConfirm": "To confirm, please type the domain name below.",
|
||||
"domainConfirmDelete": "Confirm Delete Domain",
|
||||
"domainDelete": "Delete Domain",
|
||||
"domain": "Domain",
|
||||
@@ -1223,7 +1282,7 @@
|
||||
"newtUpdateAvailable": "Update Available",
|
||||
"newtUpdateAvailableInfo": "A new version of Newt is available. Please update to the latest version for the best experience.",
|
||||
"domainPickerEnterDomain": "Domain",
|
||||
"domainPickerPlaceholder": "myapp.example.com, api.v1.mydomain.com, or just myapp",
|
||||
"domainPickerPlaceholder": "myapp.example.com",
|
||||
"domainPickerDescription": "Enter the full domain of the resource to see available options.",
|
||||
"domainPickerDescriptionSaas": "Enter a full domain, subdomain, or just a name to see available options",
|
||||
"domainPickerTabAll": "All",
|
||||
@@ -1238,6 +1297,48 @@
|
||||
"domainPickerSubdomain": "Subdomain: {subdomain}",
|
||||
"domainPickerNamespace": "Namespace: {namespace}",
|
||||
"domainPickerShowMore": "Show More",
|
||||
"regionSelectorTitle": "Select Region",
|
||||
"regionSelectorInfo": "Selecting a region helps us provide better performance for your location. You do not have to be in the same region as your server.",
|
||||
"regionSelectorPlaceholder": "Choose a region",
|
||||
"regionSelectorComingSoon": "Coming Soon",
|
||||
"billingLoadingSubscription": "Loading subscription...",
|
||||
"billingFreeTier": "Free Tier",
|
||||
"billingWarningOverLimit": "Warning: You have exceeded one or more usage limits. Your sites will not connect until you modify your subscription or adjust your usage.",
|
||||
"billingUsageLimitsOverview": "Usage Limits Overview",
|
||||
"billingMonitorUsage": "Monitor your usage against configured limits. If you need limits increased please contact us support@pangolin.net.",
|
||||
"billingDataUsage": "Data Usage",
|
||||
"billingOnlineTime": "Site Online Time",
|
||||
"billingUsers": "Active Users",
|
||||
"billingDomains": "Active Domains",
|
||||
"billingRemoteExitNodes": "Active Self-hosted Nodes",
|
||||
"billingNoLimitConfigured": "No limit configured",
|
||||
"billingEstimatedPeriod": "Estimated Billing Period",
|
||||
"billingIncludedUsage": "Included Usage",
|
||||
"billingIncludedUsageDescription": "Usage included with your current subscription plan",
|
||||
"billingFreeTierIncludedUsage": "Free tier usage allowances",
|
||||
"billingIncluded": "included",
|
||||
"billingEstimatedTotal": "Estimated Total:",
|
||||
"billingNotes": "Notes",
|
||||
"billingEstimateNote": "This is an estimate based on your current usage.",
|
||||
"billingActualChargesMayVary": "Actual charges may vary.",
|
||||
"billingBilledAtEnd": "You will be billed at the end of the billing period.",
|
||||
"billingModifySubscription": "Modify Subscription",
|
||||
"billingStartSubscription": "Start Subscription",
|
||||
"billingRecurringCharge": "Recurring Charge",
|
||||
"billingManageSubscriptionSettings": "Manage your subscription settings and preferences",
|
||||
"billingNoActiveSubscription": "You don't have an active subscription. Start your subscription to increase usage limits.",
|
||||
"billingFailedToLoadSubscription": "Failed to load subscription",
|
||||
"billingFailedToLoadUsage": "Failed to load usage",
|
||||
"billingFailedToGetCheckoutUrl": "Failed to get checkout URL",
|
||||
"billingPleaseTryAgainLater": "Please try again later.",
|
||||
"billingCheckoutError": "Checkout Error",
|
||||
"billingFailedToGetPortalUrl": "Failed to get portal URL",
|
||||
"billingPortalError": "Portal Error",
|
||||
"billingDataUsageInfo": "You're charged for all data transferred through your secure tunnels when connected to the cloud. This includes both incoming and outgoing traffic across all your sites. When you reach your limit, your sites will disconnect until you upgrade your plan or reduce usage. Data is not charged when using nodes.",
|
||||
"billingOnlineTimeInfo": "You're charged based on how long your sites stay connected to the cloud. For example, 44,640 minutes equals one site running 24/7 for a full month. When you reach your limit, your sites will disconnect until you upgrade your plan or reduce usage. Time is not charged when using nodes.",
|
||||
"billingUsersInfo": "You're charged for each user in your organization. Billing is calculated daily based on the number of active user accounts in your org.",
|
||||
"billingDomainInfo": "You're charged for each domain in your organization. Billing is calculated daily based on the number of active domain accounts in your org.",
|
||||
"billingRemoteExitNodesInfo": "You're charged for each managed Node in your organization. Billing is calculated daily based on the number of active managed Nodes in your org.",
|
||||
"domainNotFound": "Domain Not Found",
|
||||
"domainNotFoundDescription": "This resource is disabled because the domain no longer exists our system. Please set a new domain for this resource.",
|
||||
"failed": "Failed",
|
||||
@@ -1270,8 +1371,20 @@
|
||||
"securityKeyUnknownError": "There was a problem using your security key. Please try again.",
|
||||
"twoFactorRequired": "Two-factor authentication is required to register a security key.",
|
||||
"twoFactor": "Two-Factor Authentication",
|
||||
"twoFactorAuthentication": "Two-Factor Authentication",
|
||||
"twoFactorDescription": "This organization requires two-factor authentication.",
|
||||
"enableTwoFactor": "Enable Two-Factor Authentication",
|
||||
"organizationSecurityPolicy": "Organization Security Policy",
|
||||
"organizationSecurityPolicyDescription": "This organization has security requirements that must be met before you can access it",
|
||||
"securityRequirements": "Security Requirements",
|
||||
"allRequirementsMet": "All requirements have been met",
|
||||
"completeRequirementsToContinue": "Complete the requirements below to continue accessing this organization",
|
||||
"youCanNowAccessOrganization": "You can now access this organization",
|
||||
"reauthenticationRequired": "Session Length",
|
||||
"reauthenticationDescription": "This organization requires you to log in every {maxDays} days.",
|
||||
"reauthenticationDescriptionHours": "This organization requires you to log in every {maxHours} hours.",
|
||||
"reauthenticateNow": "Log In Again",
|
||||
"adminEnabled2FaOnYourAccount": "Your administrator has enabled two-factor authentication for {email}. Please complete the setup process to continue.",
|
||||
"continueToApplication": "Continue to Application",
|
||||
"securityKeyAdd": "Add Security Key",
|
||||
"securityKeyRegisterTitle": "Register New Security Key",
|
||||
"securityKeyRegisterDescription": "Connect your security key and enter a name to identify it",
|
||||
@@ -1301,6 +1414,7 @@
|
||||
"createDomainDnsPropagationDescription": "DNS changes may take some time to propagate across the internet. This can take anywhere from a few minutes to 48 hours, depending on your DNS provider and TTL settings.",
|
||||
"resourcePortRequired": "Port number is required for non-HTTP resources",
|
||||
"resourcePortNotAllowed": "Port number should not be set for HTTP resources",
|
||||
"billingPricingCalculatorLink": "Pricing Calculator",
|
||||
"signUpTerms": {
|
||||
"IAgreeToThe": "I agree to the",
|
||||
"termsOfService": "terms of service",
|
||||
@@ -1348,7 +1462,43 @@
|
||||
"externalProxyEnabled": "External Proxy Enabled",
|
||||
"addNewTarget": "Add New Target",
|
||||
"targetsList": "Targets List",
|
||||
"advancedMode": "Advanced Mode",
|
||||
"targetErrorDuplicateTargetFound": "Duplicate target found",
|
||||
"healthCheckHealthy": "Healthy",
|
||||
"healthCheckUnhealthy": "Unhealthy",
|
||||
"healthCheckUnknown": "Unknown",
|
||||
"healthCheck": "Health Check",
|
||||
"configureHealthCheck": "Configure Health Check",
|
||||
"configureHealthCheckDescription": "Set up health monitoring for {target}",
|
||||
"enableHealthChecks": "Enable Health Checks",
|
||||
"enableHealthChecksDescription": "Monitor the health of this target. You can monitor a different endpoint than the target if required.",
|
||||
"healthScheme": "Method",
|
||||
"healthSelectScheme": "Select Method",
|
||||
"healthCheckPath": "Path",
|
||||
"healthHostname": "IP / Host",
|
||||
"healthPort": "Port",
|
||||
"healthCheckPathDescription": "The path to check for health status.",
|
||||
"healthyIntervalSeconds": "Healthy Interval",
|
||||
"unhealthyIntervalSeconds": "Unhealthy Interval",
|
||||
"IntervalSeconds": "Healthy Interval",
|
||||
"timeoutSeconds": "Timeout",
|
||||
"timeIsInSeconds": "Time is in seconds",
|
||||
"retryAttempts": "Retry Attempts",
|
||||
"expectedResponseCodes": "Expected Response Codes",
|
||||
"expectedResponseCodesDescription": "HTTP status code that indicates healthy status. If left blank, 200-300 is considered healthy.",
|
||||
"customHeaders": "Custom Headers",
|
||||
"customHeadersDescription": "Headers new line separated: Header-Name: value",
|
||||
"headersValidationError": "Headers must be in the format: Header-Name: value",
|
||||
"saveHealthCheck": "Save Health Check",
|
||||
"healthCheckSaved": "Health Check Saved",
|
||||
"healthCheckSavedDescription": "Health check configuration has been saved successfully",
|
||||
"healthCheckError": "Health Check Error",
|
||||
"healthCheckErrorDescription": "An error occurred while saving the health check configuration",
|
||||
"healthCheckPathRequired": "Health check path is required",
|
||||
"healthCheckMethodRequired": "HTTP method is required",
|
||||
"healthCheckIntervalMin": "Check interval must be at least 5 seconds",
|
||||
"healthCheckTimeoutMin": "Timeout must be at least 1 second",
|
||||
"healthCheckRetryMin": "Retry attempts must be at least 1",
|
||||
"httpMethod": "HTTP Method",
|
||||
"selectHttpMethod": "Select HTTP method",
|
||||
"domainPickerSubdomainLabel": "Subdomain",
|
||||
@@ -1362,6 +1512,7 @@
|
||||
"domainPickerEnterSubdomainToSearch": "Enter a subdomain to search and select from available free domains.",
|
||||
"domainPickerFreeDomains": "Free Domains",
|
||||
"domainPickerSearchForAvailableDomains": "Search for available domains",
|
||||
"domainPickerNotWorkSelfHosted": "Note: Free provided domains are not available for self-hosted instances right now.",
|
||||
"resourceDomain": "Domain",
|
||||
"resourceEditDomain": "Edit Domain",
|
||||
"siteName": "Site Name",
|
||||
@@ -1381,8 +1532,6 @@
|
||||
"editInternalResourceDialogProtocol": "Protocol",
|
||||
"editInternalResourceDialogSitePort": "Site Port",
|
||||
"editInternalResourceDialogTargetConfiguration": "Target Configuration",
|
||||
"editInternalResourceDialogDestinationIP": "Destination IP",
|
||||
"editInternalResourceDialogDestinationPort": "Destination Port",
|
||||
"editInternalResourceDialogCancel": "Cancel",
|
||||
"editInternalResourceDialogSaveResource": "Save Resource",
|
||||
"editInternalResourceDialogSuccess": "Success",
|
||||
@@ -1413,9 +1562,7 @@
|
||||
"createInternalResourceDialogSitePort": "Site Port",
|
||||
"createInternalResourceDialogSitePortDescription": "Use this port to access the resource on the site when connected with a client.",
|
||||
"createInternalResourceDialogTargetConfiguration": "Target Configuration",
|
||||
"createInternalResourceDialogDestinationIP": "Destination IP",
|
||||
"createInternalResourceDialogDestinationIPDescription": "The IP address of the resource on the site's network.",
|
||||
"createInternalResourceDialogDestinationPort": "Destination Port",
|
||||
"createInternalResourceDialogDestinationIPDescription": "The IP or hostname address of the resource on the site's network.",
|
||||
"createInternalResourceDialogDestinationPortDescription": "The port on the destination IP where the resource is accessible.",
|
||||
"createInternalResourceDialogCancel": "Cancel",
|
||||
"createInternalResourceDialogCreateResource": "Create Resource",
|
||||
@@ -1435,7 +1582,7 @@
|
||||
"siteAcceptClientConnections": "Accept Client Connections",
|
||||
"siteAcceptClientConnectionsDescription": "Allow other devices to connect through this Newt instance as a gateway using clients.",
|
||||
"siteAddress": "Site Address",
|
||||
"siteAddressDescription": "Specify the IP address of the host for clients to connect to.",
|
||||
"siteAddressDescription": "Specify the IP address of the host for clients to connect to. This is the internal address of the site in the Pangolin network for clients to address. Must fall within the Org subnet.",
|
||||
"autoLoginExternalIdp": "Auto Login with External IDP",
|
||||
"autoLoginExternalIdpDescription": "Immediately redirect the user to the external IDP for authentication.",
|
||||
"selectIdp": "Select IDP",
|
||||
@@ -1447,5 +1594,492 @@
|
||||
"autoLoginRedirecting": "Redirecting to login...",
|
||||
"autoLoginError": "Auto Login Error",
|
||||
"autoLoginErrorNoRedirectUrl": "No redirect URL received from the identity provider.",
|
||||
"autoLoginErrorGeneratingUrl": "Failed to generate authentication URL."
|
||||
"autoLoginErrorGeneratingUrl": "Failed to generate authentication URL.",
|
||||
"remoteExitNodeManageRemoteExitNodes": "Remote Nodes",
|
||||
"remoteExitNodeDescription": "Self-host one or more remote nodes to extend your network connectivity and reduce reliance on the cloud",
|
||||
"remoteExitNodes": "Nodes",
|
||||
"searchRemoteExitNodes": "Search nodes...",
|
||||
"remoteExitNodeAdd": "Add Node",
|
||||
"remoteExitNodeErrorDelete": "Error deleting node",
|
||||
"remoteExitNodeQuestionRemove": "Are you sure you want to remove the node from the organization?",
|
||||
"remoteExitNodeMessageRemove": "Once removed, the node will no longer be accessible.",
|
||||
"remoteExitNodeConfirmDelete": "Confirm Delete Node",
|
||||
"remoteExitNodeDelete": "Delete Node",
|
||||
"sidebarRemoteExitNodes": "Remote Nodes",
|
||||
"remoteExitNodeCreate": {
|
||||
"title": "Create Node",
|
||||
"description": "Create a new node to extend your network connectivity",
|
||||
"viewAllButton": "View All Nodes",
|
||||
"strategy": {
|
||||
"title": "Creation Strategy",
|
||||
"description": "Choose this to manually configure your node or generate new credentials.",
|
||||
"adopt": {
|
||||
"title": "Adopt Node",
|
||||
"description": "Choose this if you already have the credentials for the node."
|
||||
},
|
||||
"generate": {
|
||||
"title": "Generate Keys",
|
||||
"description": "Choose this if you want to generate new keys for the node"
|
||||
}
|
||||
},
|
||||
"adopt": {
|
||||
"title": "Adopt Existing Node",
|
||||
"description": "Enter the credentials of the existing node you want to adopt",
|
||||
"nodeIdLabel": "Node ID",
|
||||
"nodeIdDescription": "The ID of the existing node you want to adopt",
|
||||
"secretLabel": "Secret",
|
||||
"secretDescription": "The secret key of the existing node",
|
||||
"submitButton": "Adopt Node"
|
||||
},
|
||||
"generate": {
|
||||
"title": "Generated Credentials",
|
||||
"description": "Use these generated credentials to configure your node",
|
||||
"nodeIdTitle": "Node ID",
|
||||
"secretTitle": "Secret",
|
||||
"saveCredentialsTitle": "Add Credentials to Config",
|
||||
"saveCredentialsDescription": "Add these credentials to your self-hosted Pangolin node configuration file to complete the connection.",
|
||||
"submitButton": "Create Node"
|
||||
},
|
||||
"validation": {
|
||||
"adoptRequired": "Node ID and Secret are required when adopting an existing node"
|
||||
},
|
||||
"errors": {
|
||||
"loadDefaultsFailed": "Failed to load defaults",
|
||||
"defaultsNotLoaded": "Defaults not loaded",
|
||||
"createFailed": "Failed to create node"
|
||||
},
|
||||
"success": {
|
||||
"created": "Node created successfully"
|
||||
}
|
||||
},
|
||||
"remoteExitNodeSelection": "Node Selection",
|
||||
"remoteExitNodeSelectionDescription": "Select a node to route traffic through for this local site",
|
||||
"remoteExitNodeRequired": "A node must be selected for local sites",
|
||||
"noRemoteExitNodesAvailable": "No Nodes Available",
|
||||
"noRemoteExitNodesAvailableDescription": "No nodes are available for this organization. Create a node first to use local sites.",
|
||||
"exitNode": "Exit Node",
|
||||
"country": "Country",
|
||||
"rulesMatchCountry": "Currently based on source IP",
|
||||
"managedSelfHosted": {
|
||||
"title": "Managed Self-Hosted",
|
||||
"description": "More reliable and low-maintenance self-hosted Pangolin server with extra bells and whistles",
|
||||
"introTitle": "Managed Self-Hosted Pangolin",
|
||||
"introDescription": "is a deployment option designed for people who want simplicity and extra reliability while still keeping their data private and self-hosted.",
|
||||
"introDetail": "With this option, you still run your own Pangolin node — your tunnels, SSL termination, and traffic all stay on your server. The difference is that management and monitoring are handled through our cloud dashboard, which unlocks a number of benefits:",
|
||||
"benefitSimplerOperations": {
|
||||
"title": "Simpler operations",
|
||||
"description": "No need to run your own mail server or set up complex alerting. You'll get health checks and downtime alerts out of the box."
|
||||
},
|
||||
"benefitAutomaticUpdates": {
|
||||
"title": "Automatic updates",
|
||||
"description": "The cloud dashboard evolves quickly, so you get new features and bug fixes without having to manually pull new containers every time."
|
||||
},
|
||||
"benefitLessMaintenance": {
|
||||
"title": "Less maintenance",
|
||||
"description": "No database migrations, backups, or extra infrastructure to manage. We handle that in the cloud."
|
||||
},
|
||||
"benefitCloudFailover": {
|
||||
"title": "Cloud failover",
|
||||
"description": "If your node goes down, your tunnels can temporarily fail over to our cloud points of presence until you bring it back online."
|
||||
},
|
||||
"benefitHighAvailability": {
|
||||
"title": "High availability (PoPs)",
|
||||
"description": "You can also attach multiple nodes to your account for redundancy and better performance."
|
||||
},
|
||||
"benefitFutureEnhancements": {
|
||||
"title": "Future enhancements",
|
||||
"description": "We're planning to add more analytics, alerting, and management tools to make your deployment even more robust."
|
||||
},
|
||||
"docsAlert": {
|
||||
"text": "Learn more about the Managed Self-Hosted option in our",
|
||||
"documentation": "documentation"
|
||||
},
|
||||
"convertButton": "Convert This Node to Managed Self-Hosted"
|
||||
},
|
||||
"internationaldomaindetected": "International Domain Detected",
|
||||
"willbestoredas": "Will be stored as:",
|
||||
"roleMappingDescription": "Determine how roles are assigned to users when they sign in when Auto Provision is enabled.",
|
||||
"selectRole": "Select a Role",
|
||||
"roleMappingExpression": "Expression",
|
||||
"selectRolePlaceholder": "Choose a role",
|
||||
"selectRoleDescription": "Select a role to assign to all users from this identity provider",
|
||||
"roleMappingExpressionDescription": "Enter a JMESPath expression to extract role information from the ID token",
|
||||
"idpTenantIdRequired": "Tenant ID is required",
|
||||
"invalidValue": "Invalid value",
|
||||
"idpTypeLabel": "Identity Provider Type",
|
||||
"roleMappingExpressionPlaceholder": "e.g., contains(groups, 'admin') && 'Admin' || 'Member'",
|
||||
"idpGoogleConfiguration": "Google Configuration",
|
||||
"idpGoogleConfigurationDescription": "Configure your Google OAuth2 credentials",
|
||||
"idpGoogleClientIdDescription": "Your Google OAuth2 Client ID",
|
||||
"idpGoogleClientSecretDescription": "Your Google OAuth2 Client Secret",
|
||||
"idpAzureConfiguration": "Azure Entra ID Configuration",
|
||||
"idpAzureConfigurationDescription": "Configure your Azure Entra ID OAuth2 credentials",
|
||||
"idpTenantId": "Tenant ID",
|
||||
"idpTenantIdPlaceholder": "your-tenant-id",
|
||||
"idpAzureTenantIdDescription": "Your Azure tenant ID (found in Azure Active Directory overview)",
|
||||
"idpAzureClientIdDescription": "Your Azure App Registration Client ID",
|
||||
"idpAzureClientSecretDescription": "Your Azure App Registration Client Secret",
|
||||
"idpGoogleTitle": "Google",
|
||||
"idpGoogleAlt": "Google",
|
||||
"idpAzureTitle": "Azure Entra ID",
|
||||
"idpAzureAlt": "Azure",
|
||||
"idpGoogleConfigurationTitle": "Google Configuration",
|
||||
"idpAzureConfigurationTitle": "Azure Entra ID Configuration",
|
||||
"idpTenantIdLabel": "Tenant ID",
|
||||
"idpAzureClientIdDescription2": "Your Azure App Registration Client ID",
|
||||
"idpAzureClientSecretDescription2": "Your Azure App Registration Client Secret",
|
||||
"idpGoogleDescription": "Google OAuth2/OIDC provider",
|
||||
"idpAzureDescription": "Microsoft Azure OAuth2/OIDC provider",
|
||||
"subnet": "Subnet",
|
||||
"subnetDescription": "The subnet for this organization's network configuration.",
|
||||
"authPage": "Auth Page",
|
||||
"authPageDescription": "Configure the auth page for your organization",
|
||||
"authPageDomain": "Auth Page Domain",
|
||||
"noDomainSet": "No domain set",
|
||||
"changeDomain": "Change Domain",
|
||||
"selectDomain": "Select Domain",
|
||||
"restartCertificate": "Restart Certificate",
|
||||
"editAuthPageDomain": "Edit Auth Page Domain",
|
||||
"setAuthPageDomain": "Set Auth Page Domain",
|
||||
"failedToFetchCertificate": "Failed to fetch certificate",
|
||||
"failedToRestartCertificate": "Failed to restart certificate",
|
||||
"addDomainToEnableCustomAuthPages": "Add a domain to enable custom authentication pages for your organization",
|
||||
"selectDomainForOrgAuthPage": "Select a domain for the organization's authentication page",
|
||||
"domainPickerProvidedDomain": "Provided Domain",
|
||||
"domainPickerFreeProvidedDomain": "Free Provided Domain",
|
||||
"domainPickerVerified": "Verified",
|
||||
"domainPickerUnverified": "Unverified",
|
||||
"domainPickerInvalidSubdomainStructure": "This subdomain contains invalid characters or structure. It will be sanitized automatically when you save.",
|
||||
"domainPickerError": "Error",
|
||||
"domainPickerErrorLoadDomains": "Failed to load organization domains",
|
||||
"domainPickerErrorCheckAvailability": "Failed to check domain availability",
|
||||
"domainPickerInvalidSubdomain": "Invalid subdomain",
|
||||
"domainPickerInvalidSubdomainRemoved": "The input \"{sub}\" was removed because it's not valid.",
|
||||
"domainPickerInvalidSubdomainCannotMakeValid": "\"{sub}\" could not be made valid for {domain}.",
|
||||
"domainPickerSubdomainSanitized": "Subdomain sanitized",
|
||||
"domainPickerSubdomainCorrected": "\"{sub}\" was corrected to \"{sanitized}\"",
|
||||
"orgAuthSignInTitle": "Sign in to your organization",
|
||||
"orgAuthChooseIdpDescription": "Choose your identity provider to continue",
|
||||
"orgAuthNoIdpConfigured": "This organization doesn't have any identity providers configured. You can log in with your Pangolin identity instead.",
|
||||
"orgAuthSignInWithPangolin": "Sign in with Pangolin",
|
||||
"subscriptionRequiredToUse": "A subscription is required to use this feature.",
|
||||
"idpDisabled": "Identity providers are disabled.",
|
||||
"orgAuthPageDisabled": "Organization auth page is disabled.",
|
||||
"domainRestartedDescription": "Domain verification restarted successfully",
|
||||
"resourceAddEntrypointsEditFile": "Edit file: config/traefik/traefik_config.yml",
|
||||
"resourceExposePortsEditFile": "Edit file: docker-compose.yml",
|
||||
"emailVerificationRequired": "Email verification is required. Please log in again via {dashboardUrl}/auth/login complete this step. Then, come back here.",
|
||||
"twoFactorSetupRequired": "Two-factor authentication setup is required. Please log in again via {dashboardUrl}/auth/login complete this step. Then, come back here.",
|
||||
"additionalSecurityRequired": "Additional Security Required",
|
||||
"organizationRequiresAdditionalSteps": "This organization requires additional security steps before you can access resources.",
|
||||
"completeTheseSteps": "Complete these steps",
|
||||
"enableTwoFactorAuthentication": "Enable two-factor authentication",
|
||||
"completeSecuritySteps": "Complete Security Steps",
|
||||
"securitySettings": "Security Settings",
|
||||
"securitySettingsDescription": "Configure security policies for your organization",
|
||||
"requireTwoFactorForAllUsers": "Require Two-Factor Authentication for All Users",
|
||||
"requireTwoFactorDescription": "When enabled, all internal users in this organization must have two-factor authentication enabled to access the organization.",
|
||||
"requireTwoFactorDisabledDescription": "This feature requires a valid license (Enterprise) or active subscription (SaaS)",
|
||||
"requireTwoFactorCannotEnableDescription": "You must enable two-factor authentication for your account before enforcing it for all users",
|
||||
"maxSessionLength": "Maximum Session Length",
|
||||
"maxSessionLengthDescription": "Set the maximum duration for user sessions. After this time, users will need to re-authenticate.",
|
||||
"maxSessionLengthDisabledDescription": "This feature requires a valid license (Enterprise) or active subscription (SaaS)",
|
||||
"selectSessionLength": "Select session length",
|
||||
"unenforced": "Unenforced",
|
||||
"1Hour": "1 hour",
|
||||
"3Hours": "3 hours",
|
||||
"6Hours": "6 hours",
|
||||
"12Hours": "12 hours",
|
||||
"1DaySession": "1 day",
|
||||
"3Days": "3 days",
|
||||
"7Days": "7 days",
|
||||
"14Days": "14 days",
|
||||
"30DaysSession": "30 days",
|
||||
"90DaysSession": "90 days",
|
||||
"180DaysSession": "180 days",
|
||||
"passwordExpiryDays": "Password Expiry",
|
||||
"editPasswordExpiryDescription": "Set the number of days before users are required to change their password.",
|
||||
"selectPasswordExpiry": "Select password expiry",
|
||||
"30Days": "30 days",
|
||||
"1Day": "1 day",
|
||||
"60Days": "60 days",
|
||||
"90Days": "90 days",
|
||||
"180Days": "180 days",
|
||||
"1Year": "1 year",
|
||||
"subscriptionBadge": "Subscription Required",
|
||||
"securityPolicyChangeWarning": "Security Policy Change Warning",
|
||||
"securityPolicyChangeDescription": "You are about to change security policy settings. After saving, you may need to reauthenticate to comply with these policy updates. All users who are not compliant will also need to reauthenticate.",
|
||||
"securityPolicyChangeConfirmMessage": "I confirm",
|
||||
"securityPolicyChangeWarningText": "This will affect all users in the organization",
|
||||
"authPageErrorUpdateMessage": "An error occurred while updating the auth page settings",
|
||||
"authPageErrorUpdate": "Unable to update auth page",
|
||||
"authPageUpdated": "Auth page updated successfully",
|
||||
"healthCheckNotAvailable": "Local",
|
||||
"rewritePath": "Rewrite Path",
|
||||
"rewritePathDescription": "Optionally rewrite the path before forwarding to the target.",
|
||||
"continueToApplication": "Continue to application",
|
||||
"checkingInvite": "Checking Invite",
|
||||
"setResourceHeaderAuth": "setResourceHeaderAuth",
|
||||
"resourceHeaderAuthRemove": "Remove Header Auth",
|
||||
"resourceHeaderAuthRemoveDescription": "Header authentication removed successfully.",
|
||||
"resourceErrorHeaderAuthRemove": "Failed to remove Header Authentication",
|
||||
"resourceErrorHeaderAuthRemoveDescription": "Could not remove header authentication for the resource.",
|
||||
"resourceHeaderAuthProtectionEnabled": "Header Authentication Enabled",
|
||||
"resourceHeaderAuthProtectionDisabled": "Header Authentication Disabled",
|
||||
"headerAuthRemove": "Remove Header Auth",
|
||||
"headerAuthAdd": "Add Header Auth",
|
||||
"resourceErrorHeaderAuthSetup": "Failed to set Header Authentication",
|
||||
"resourceErrorHeaderAuthSetupDescription": "Could not set header authentication for the resource.",
|
||||
"resourceHeaderAuthSetup": "Header Authentication set successfully",
|
||||
"resourceHeaderAuthSetupDescription": "Header authentication has been successfully set.",
|
||||
"resourceHeaderAuthSetupTitle": "Set Header Authentication",
|
||||
"resourceHeaderAuthSetupTitleDescription": "Set the basic auth credentials (username and password) to protect this resource with HTTP Header Authentication. Access it using the format https://username:password@resource.example.com",
|
||||
"resourceHeaderAuthSubmit": "Set Header Authentication",
|
||||
"actionSetResourceHeaderAuth": "Set Header Authentication",
|
||||
"enterpriseEdition": "Enterprise Edition",
|
||||
"unlicensed": "Unlicensed",
|
||||
"beta": "Beta",
|
||||
"manageClients": "Manage Clients",
|
||||
"manageClientsDescription": "Clients are devices that can connect to your sites",
|
||||
"licenseTableValidUntil": "Valid Until",
|
||||
"saasLicenseKeysSettingsTitle": "Enterprise Licenses",
|
||||
"saasLicenseKeysSettingsDescription": "Generate and manage Enterprise license keys for self-hosted Pangolin instances",
|
||||
"sidebarEnterpriseLicenses": "Licenses",
|
||||
"generateLicenseKey": "Generate License Key",
|
||||
"generateLicenseKeyForm": {
|
||||
"validation": {
|
||||
"emailRequired": "Please enter a valid email address",
|
||||
"useCaseTypeRequired": "Please select a use case type",
|
||||
"firstNameRequired": "First name is required",
|
||||
"lastNameRequired": "Last name is required",
|
||||
"primaryUseRequired": "Please describe your primary use",
|
||||
"jobTitleRequiredBusiness": "Job title is required for business use",
|
||||
"industryRequiredBusiness": "Industry is required for business use",
|
||||
"stateProvinceRegionRequired": "State/Province/Region is required",
|
||||
"postalZipCodeRequired": "Postal/ZIP Code is required",
|
||||
"companyNameRequiredBusiness": "Company name is required for business use",
|
||||
"countryOfResidenceRequiredBusiness": "Country of residence is required for business use",
|
||||
"countryRequiredPersonal": "Country is required for personal use",
|
||||
"agreeToTermsRequired": "You must agree to the terms",
|
||||
"complianceConfirmationRequired": "You must confirm compliance with the Fossorial Commercial License"
|
||||
},
|
||||
"useCaseOptions": {
|
||||
"personal": {
|
||||
"title": "Personal Use",
|
||||
"description": "For individual, non-commercial use such as learning, personal projects, or experimentation."
|
||||
},
|
||||
"business": {
|
||||
"title": "Business Use",
|
||||
"description": "For use within organizations, companies, or commercial or revenue-generating activities."
|
||||
}
|
||||
},
|
||||
"steps": {
|
||||
"emailLicenseType": {
|
||||
"title": "Email & License Type",
|
||||
"description": "Enter your email and choose your license type"
|
||||
},
|
||||
"personalInformation": {
|
||||
"title": "Personal Information",
|
||||
"description": "Tell us about yourself"
|
||||
},
|
||||
"contactInformation": {
|
||||
"title": "Contact Information",
|
||||
"description": "Your contact details"
|
||||
},
|
||||
"termsGenerate": {
|
||||
"title": "Terms & Generate",
|
||||
"description": "Review and accept terms to generate your license"
|
||||
}
|
||||
},
|
||||
"alerts": {
|
||||
"commercialUseDisclosure": {
|
||||
"title": "Usage Disclosure",
|
||||
"description": "Select the license tier that accurately reflects your intended use. The Personal License permits free use of the Software for individual, non-commercial or small-scale commercial activities with annual gross revenue under $100,000 USD. Any use beyond these limits — including use within a business, organization, or other revenue-generating environment — requires a valid Enterprise License and payment of the applicable licensing fee. All users, whether Personal or Enterprise, must comply with the Fossorial Commercial License Terms."
|
||||
},
|
||||
"trialPeriodInformation": {
|
||||
"title": "Trial Period Information",
|
||||
"description": "This License Key enables Enterprise features for a 7-day evaluation period. Continued access to Paid Features beyond the evaluation period requires activation under a valid Personal or Enterprise License. For Enterprise licensing, contact sales@pangolin.net."
|
||||
}
|
||||
},
|
||||
"form": {
|
||||
"useCaseQuestion": "Are you using Pangolin for personal or business use?",
|
||||
"firstName": "First Name",
|
||||
"lastName": "Last Name",
|
||||
"jobTitle": "Job Title",
|
||||
"primaryUseQuestion": "What do you primarily plan to use Pangolin for?",
|
||||
"industryQuestion": "What is your industry?",
|
||||
"prospectiveUsersQuestion": "How many prospective users do you expect to have?",
|
||||
"prospectiveSitesQuestion": "How many prospective sites (tunnels) do you expect to have?",
|
||||
"companyName": "Company name",
|
||||
"countryOfResidence": "Country of residence",
|
||||
"stateProvinceRegion": "State / Province / Region",
|
||||
"postalZipCode": "Postal / ZIP Code",
|
||||
"companyWebsite": "Company website",
|
||||
"companyPhoneNumber": "Company phone number",
|
||||
"country": "Country",
|
||||
"phoneNumberOptional": "Phone number (optional)",
|
||||
"complianceConfirmation": "I confirm that the information I provided is accurate and that I am in compliance with the Fossorial Commercial License. Reporting inaccurate information or misidentifying use of the product is a violation of the license and may result in your key getting revoked."
|
||||
},
|
||||
"buttons": {
|
||||
"close": "Close",
|
||||
"previous": "Previous",
|
||||
"next": "Next",
|
||||
"generateLicenseKey": "Generate License Key"
|
||||
},
|
||||
"toasts": {
|
||||
"success": {
|
||||
"title": "License key generated successfully",
|
||||
"description": "Your license key has been generated and is ready to use."
|
||||
},
|
||||
"error": {
|
||||
"title": "Failed to generate license key",
|
||||
"description": "An error occurred while generating the license key."
|
||||
}
|
||||
}
|
||||
},
|
||||
"priority": "Priority",
|
||||
"priorityDescription": "Higher priority routes are evaluated first. Priority = 100 means automatic ordering (system decides). Use another number to enforce manual priority.",
|
||||
"instanceName": "Instance Name",
|
||||
"pathMatchModalTitle": "Configure Path Matching",
|
||||
"pathMatchModalDescription": "Set up how incoming requests should be matched based on their path.",
|
||||
"pathMatchType": "Match Type",
|
||||
"pathMatchPrefix": "Prefix",
|
||||
"pathMatchExact": "Exact",
|
||||
"pathMatchRegex": "Regex",
|
||||
"pathMatchValue": "Path Value",
|
||||
"clear": "Clear",
|
||||
"saveChanges": "Save Changes",
|
||||
"pathMatchRegexPlaceholder": "^/api/.*",
|
||||
"pathMatchDefaultPlaceholder": "/path",
|
||||
"pathMatchPrefixHelp": "Example: /api matches /api, /api/users, etc.",
|
||||
"pathMatchExactHelp": "Example: /api matches only /api",
|
||||
"pathMatchRegexHelp": "Example: ^/api/.* matches /api/anything",
|
||||
"pathRewriteModalTitle": "Configure Path Rewriting",
|
||||
"pathRewriteModalDescription": "Transform the matched path before forwarding to the target.",
|
||||
"pathRewriteType": "Rewrite Type",
|
||||
"pathRewritePrefixOption": "Prefix - Replace prefix",
|
||||
"pathRewriteExactOption": "Exact - Replace entire path",
|
||||
"pathRewriteRegexOption": "Regex - Pattern replacement",
|
||||
"pathRewriteStripPrefixOption": "Strip Prefix - Remove prefix",
|
||||
"pathRewriteValue": "Rewrite Value",
|
||||
"pathRewriteRegexPlaceholder": "/new/$1",
|
||||
"pathRewriteDefaultPlaceholder": "/new-path",
|
||||
"pathRewritePrefixHelp": "Replace the matched prefix with this value",
|
||||
"pathRewriteExactHelp": "Replace the entire path with this value when the path matches exactly",
|
||||
"pathRewriteRegexHelp": "Use capture groups like $1, $2 for replacement",
|
||||
"pathRewriteStripPrefixHelp": "Leave empty to strip prefix or provide new prefix",
|
||||
"pathRewritePrefix": "Prefix",
|
||||
"pathRewriteExact": "Exact",
|
||||
"pathRewriteRegex": "Regex",
|
||||
"pathRewriteStrip": "Strip",
|
||||
"pathRewriteStripLabel": "strip",
|
||||
"sidebarEnableEnterpriseLicense": "Enable Enterprise License",
|
||||
"cannotbeUndone": "This can not be undone.",
|
||||
"toConfirm": "to confirm",
|
||||
"deleteClientQuestion": "Are you sure you want to remove the client from the site and organization?",
|
||||
"clientMessageRemove": "Once removed, the client will no longer be able to connect to the site.",
|
||||
"sidebarLogs": "Logs",
|
||||
"request": "Request",
|
||||
"logs": "Logs",
|
||||
"logsSettingsDescription": "Monitor logs collected from this orginization",
|
||||
"searchLogs": "Search logs...",
|
||||
"action": "Action",
|
||||
"actor": "Actor",
|
||||
"timestamp": "Timestamp",
|
||||
"accessLogs": "Access Logs",
|
||||
"exportCsv": "Export CSV",
|
||||
"actorId": "Actor ID",
|
||||
"allowedByRule": "Allowed by Rule",
|
||||
"allowedNoAuth": "Allowed No Auth",
|
||||
"validAccessToken": "Valid Access Token",
|
||||
"validHeaderAuth": "Valid header auth",
|
||||
"validPincode": "Valid Pincode",
|
||||
"validPassword": "Valid Password",
|
||||
"validEmail": "Valid email",
|
||||
"validSSO": "Valid SSO",
|
||||
"resourceBlocked": "Resource Blocked",
|
||||
"droppedByRule": "Dropped by Rule",
|
||||
"noSessions": "No Sessions",
|
||||
"temporaryRequestToken": "Temporary Request Token",
|
||||
"noMoreAuthMethods": "No Valid Auth",
|
||||
"ip": "IP",
|
||||
"reason": "Reason",
|
||||
"requestLogs": "Request Logs",
|
||||
"host": "Host",
|
||||
"location": "Location",
|
||||
"actionLogs": "Action Logs",
|
||||
"sidebarLogsRequest": "Request Logs",
|
||||
"sidebarLogsAccess": "Access Logs",
|
||||
"sidebarLogsAction": "Action Logs",
|
||||
"logRetention": "Log Retention",
|
||||
"logRetentionDescription": "Manage how long different types of logs are retained for this organization or disable them",
|
||||
"requestLogsDescription": "View detailed request logs for resources in this organization",
|
||||
"logRetentionRequestLabel": "Request Log Retention",
|
||||
"logRetentionRequestDescription": "How long to retain request logs",
|
||||
"logRetentionAccessLabel": "Access Log Retention",
|
||||
"logRetentionAccessDescription": "How long to retain access logs",
|
||||
"logRetentionActionLabel": "Action Log Retention",
|
||||
"logRetentionActionDescription": "How long to retain action logs",
|
||||
"logRetentionDisabled": "Disabled",
|
||||
"logRetention3Days": "3 days",
|
||||
"logRetention7Days": "7 days",
|
||||
"logRetention14Days": "14 days",
|
||||
"logRetention30Days": "30 days",
|
||||
"logRetention90Days": "90 days",
|
||||
"logRetentionForever": "Forever",
|
||||
"actionLogsDescription": "View a history of actions performed in this organization",
|
||||
"accessLogsDescription": "View access auth requests for resources in this organization",
|
||||
"licenseRequiredToUse": "An Enterprise license is required to use this feature.",
|
||||
"certResolver": "Certificate Resolver",
|
||||
"certResolverDescription": "Select the certificate resolver to use for this resource.",
|
||||
"selectCertResolver": "Select Certificate Resolver",
|
||||
"enterCustomResolver": "Enter Custom Resolver",
|
||||
"preferWildcardCert": "Prefer Wildcard Certificate",
|
||||
"unverified": "Unverified",
|
||||
"domainSetting": "Domain Settings",
|
||||
"domainSettingDescription": "Configure settings for your domain",
|
||||
"preferWildcardCertDescription": "Attempt to generate a wildcard certificate (require a properly configured certificate resolver).",
|
||||
"recordName": "Record Name",
|
||||
"auto": "Auto",
|
||||
"TTL": "TTL",
|
||||
"howToAddRecords": "How to Add Records",
|
||||
"dnsRecord": "DNS Records",
|
||||
"required": "Required",
|
||||
"domainSettingsUpdated": "Domain settings updated successfully",
|
||||
"orgOrDomainIdMissing": "Organization or Domain ID is missing",
|
||||
"loadingDNSRecords": "Loading DNS records...",
|
||||
"olmUpdateAvailableInfo": "An updated version of Olm is available. Please update to the latest version for the best experience.",
|
||||
"client": "Client",
|
||||
"proxyProtocol": "Proxy Protocol Settings",
|
||||
"proxyProtocolDescription": "Configure Proxy Protocol to preserve client IP addresses for TCP/UDP services.",
|
||||
"enableProxyProtocol": "Enable Proxy Protocol",
|
||||
"proxyProtocolInfo": "Preserve client IP addresses for TCP/UDP backends",
|
||||
"proxyProtocolVersion": "Proxy Protocol Version",
|
||||
"version1": " Version 1 (Recommended)",
|
||||
"version2": "Version 2",
|
||||
"versionDescription": "Version 1 is text-based and widely supported. Version 2 is binary and more efficient but less compatible. Make sure servers transport is added to dynamic config.",
|
||||
"warning": "Warning",
|
||||
"proxyProtocolWarning": "Your backend application must be configured to accept Proxy Protocol connections. If your backend doesn't support Proxy Protocol, enabling this will break all connections so only enable this if you know what you're doing. Make sure to configure your backend to trust Proxy Protocol headers from Traefik.",
|
||||
"restarting": "Restarting...",
|
||||
"manual": "Manual",
|
||||
"messageSupport": "Message Support",
|
||||
"supportNotAvailableTitle": "Support Not Available",
|
||||
"supportNotAvailableDescription": "Support is not available right now. You can send an email to support@pangolin.net.",
|
||||
"supportRequestSentTitle": "Support Request Sent",
|
||||
"supportRequestSentDescription": "Your message has been sent successfully.",
|
||||
"supportRequestFailedTitle": "Failed to Send Request",
|
||||
"supportRequestFailedDescription": "An error occurred while sending your support request.",
|
||||
"supportSubjectRequired": "Subject is required",
|
||||
"supportSubjectMaxLength": "Subject must be 255 characters or less",
|
||||
"supportMessageRequired": "Message is required",
|
||||
"supportReplyTo": "Reply To",
|
||||
"supportSubject": "Subject",
|
||||
"supportSubjectPlaceholder": "Enter subject",
|
||||
"supportMessage": "Message",
|
||||
"supportMessagePlaceholder": "Enter your message",
|
||||
"supportSending": "Sending...",
|
||||
"supportSend": "Send",
|
||||
"supportMessageSent": "Message Sent!",
|
||||
"supportWillContact": "We'll be in touch shortly!",
|
||||
"selectLogRetention": "Select log retention"
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
1094
messages/pt-PT.json
1094
messages/pt-PT.json
File diff suppressed because it is too large
Load Diff
1120
messages/ru-RU.json
1120
messages/ru-RU.json
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -7,7 +7,8 @@ const nextConfig = {
|
||||
eslint: {
|
||||
ignoreDuringBuilds: true
|
||||
},
|
||||
output: "standalone"
|
||||
output: "standalone",
|
||||
|
||||
};
|
||||
|
||||
export default withNextIntl(nextConfig);
|
||||
|
||||
9242
package-lock.json
generated
9242
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
132
package.json
132
package.json
@@ -19,48 +19,55 @@
|
||||
"db:sqlite:studio": "drizzle-kit studio --config=./drizzle.sqlite.config.ts",
|
||||
"db:pg:studio": "drizzle-kit studio --config=./drizzle.pg.config.ts",
|
||||
"db:clear-migrations": "rm -rf server/migrations",
|
||||
"set:oss": "echo 'export const build = \"oss\" as any;' > server/build.ts && cp tsconfig.oss.json tsconfig.json",
|
||||
"set:saas": "echo 'export const build = \"saas\" as any;' > server/build.ts && cp tsconfig.saas.json tsconfig.json",
|
||||
"set:enterprise": "echo 'export const build = \"enterprise\" as any;' > server/build.ts && cp tsconfig.enterprise.json tsconfig.json",
|
||||
"set:sqlite": "echo 'export * from \"./sqlite\";' > server/db/index.ts",
|
||||
"set:pg": "echo 'export * from \"./pg\";' > server/db/index.ts",
|
||||
"next:build": "next build",
|
||||
"build:sqlite": "mkdir -p dist && next build && node esbuild.mjs -e server/index.ts -o dist/server.mjs && node esbuild.mjs -e server/setup/migrationsSqlite.ts -o dist/migrations.mjs",
|
||||
"build:pg": "mkdir -p dist && next build && node esbuild.mjs -e server/index.ts -o dist/server.mjs && node esbuild.mjs -e server/setup/migrationsPg.ts -o dist/migrations.mjs",
|
||||
"start:sqlite": "DB_TYPE=sqlite NODE_OPTIONS=--enable-source-maps NODE_ENV=development ENVIRONMENT=prod sh -c 'node dist/migrations.mjs && node dist/server.mjs'",
|
||||
"start:pg": "DB_TYPE=pg NODE_OPTIONS=--enable-source-maps NODE_ENV=development ENVIRONMENT=prod sh -c 'node dist/migrations.mjs && node dist/server.mjs'",
|
||||
"start": "ENVIRONMENT=prod node dist/migrations.mjs && ENVIRONMENT=prod NODE_ENV=development node --enable-source-maps dist/server.mjs",
|
||||
"email": "email dev --dir server/emails/templates --port 3005",
|
||||
"build:cli": "node esbuild.mjs -e cli/index.ts -o dist/cli.mjs"
|
||||
},
|
||||
"dependencies": {
|
||||
"@asteasolutions/zod-to-openapi": "^7.3.4",
|
||||
"@hookform/resolvers": "3.9.1",
|
||||
"@aws-sdk/client-s3": "3.908.0",
|
||||
"@hookform/resolvers": "5.2.2",
|
||||
"@monaco-editor/react": "^4.7.0",
|
||||
"@node-rs/argon2": "^2.0.2",
|
||||
"@oslojs/crypto": "1.0.1",
|
||||
"@oslojs/encoding": "1.1.0",
|
||||
"@radix-ui/react-avatar": "1.1.10",
|
||||
"@radix-ui/react-checkbox": "1.3.2",
|
||||
"@radix-ui/react-collapsible": "1.1.11",
|
||||
"@radix-ui/react-dialog": "1.1.14",
|
||||
"@radix-ui/react-dropdown-menu": "2.1.15",
|
||||
"@radix-ui/react-checkbox": "1.3.3",
|
||||
"@radix-ui/react-collapsible": "1.1.12",
|
||||
"@radix-ui/react-dialog": "1.1.15",
|
||||
"@radix-ui/react-dropdown-menu": "2.1.16",
|
||||
"@radix-ui/react-icons": "1.3.2",
|
||||
"@radix-ui/react-label": "2.1.7",
|
||||
"@radix-ui/react-popover": "1.1.14",
|
||||
"@radix-ui/react-popover": "1.1.15",
|
||||
"@radix-ui/react-progress": "^1.1.7",
|
||||
"@radix-ui/react-radio-group": "1.3.7",
|
||||
"@radix-ui/react-scroll-area": "^1.2.9",
|
||||
"@radix-ui/react-select": "2.2.5",
|
||||
"@radix-ui/react-radio-group": "1.3.8",
|
||||
"@radix-ui/react-scroll-area": "^1.2.10",
|
||||
"@radix-ui/react-select": "2.2.6",
|
||||
"@radix-ui/react-separator": "1.1.7",
|
||||
"@radix-ui/react-slot": "1.2.3",
|
||||
"@radix-ui/react-switch": "1.2.5",
|
||||
"@radix-ui/react-tabs": "1.1.12",
|
||||
"@radix-ui/react-toast": "1.2.14",
|
||||
"@radix-ui/react-tooltip": "^1.2.7",
|
||||
"@react-email/components": "0.5.0",
|
||||
"@react-email/render": "^1.2.0",
|
||||
"@radix-ui/react-switch": "1.2.6",
|
||||
"@radix-ui/react-tabs": "1.1.13",
|
||||
"@radix-ui/react-toast": "1.2.15",
|
||||
"@radix-ui/react-tooltip": "^1.2.8",
|
||||
"@react-email/components": "0.5.7",
|
||||
"@react-email/render": "^1.3.2",
|
||||
"@react-email/tailwind": "1.2.2",
|
||||
"@simplewebauthn/browser": "^13.1.0",
|
||||
"@simplewebauthn/server": "^9.0.3",
|
||||
"@simplewebauthn/browser": "^13.2.2",
|
||||
"@simplewebauthn/server": "^13.2.2",
|
||||
"@tailwindcss/forms": "^0.5.10",
|
||||
"@tanstack/react-table": "8.21.3",
|
||||
"arctic": "^3.7.0",
|
||||
"axios": "1.11.0",
|
||||
"axios": "^1.12.2",
|
||||
"better-sqlite3": "11.7.0",
|
||||
"canvas-confetti": "1.9.3",
|
||||
"canvas-confetti": "1.9.4",
|
||||
"class-variance-authority": "^0.7.1",
|
||||
"clsx": "2.1.1",
|
||||
"cmdk": "1.1.1",
|
||||
@@ -69,83 +76,96 @@
|
||||
"cookies": "^0.9.1",
|
||||
"cors": "2.8.5",
|
||||
"crypto-js": "^4.2.0",
|
||||
"drizzle-orm": "0.44.4",
|
||||
"eslint": "9.33.0",
|
||||
"eslint-config-next": "15.4.6",
|
||||
"date-fns": "4.1.0",
|
||||
"drizzle-orm": "0.44.7",
|
||||
"eslint": "9.37.0",
|
||||
"eslint-config-next": "15.5.6",
|
||||
"express": "5.1.0",
|
||||
"express-rate-limit": "7.5.1",
|
||||
"express-rate-limit": "8.1.0",
|
||||
"glob": "11.0.3",
|
||||
"helmet": "8.1.0",
|
||||
"http-errors": "2.0.0",
|
||||
"i": "^0.3.7",
|
||||
"input-otp": "1.4.2",
|
||||
"ioredis": "5.8.2",
|
||||
"jmespath": "^0.16.0",
|
||||
"js-yaml": "4.1.0",
|
||||
"jsonwebtoken": "^9.0.2",
|
||||
"lucide-react": "0.539.0",
|
||||
"lucide-react": "^0.545.0",
|
||||
"maxmind": "5.0.0",
|
||||
"moment": "2.30.1",
|
||||
"next": "15.4.6",
|
||||
"next-intl": "^4.3.4",
|
||||
"next": "15.5.6",
|
||||
"next-intl": "^4.3.12",
|
||||
"next-themes": "0.4.6",
|
||||
"nextjs-toploader": "^3.9.17",
|
||||
"node-cache": "5.1.2",
|
||||
"node-fetch": "3.3.2",
|
||||
"nodemailer": "7.0.5",
|
||||
"npm": "^11.5.2",
|
||||
"nodemailer": "7.0.10",
|
||||
"npm": "^11.6.2",
|
||||
"nprogress": "^0.2.0",
|
||||
"oslo": "1.2.1",
|
||||
"pg": "^8.16.2",
|
||||
"posthog-node": "^5.7.0",
|
||||
"posthog-node": "^5.10.4",
|
||||
"qrcode.react": "4.2.0",
|
||||
"react": "19.1.1",
|
||||
"react-dom": "19.1.1",
|
||||
"react-easy-sort": "^1.6.0",
|
||||
"react-hook-form": "7.62.0",
|
||||
"react": "19.2.0",
|
||||
"react-day-picker": "9.11.1",
|
||||
"react-dom": "19.2.0",
|
||||
"react-easy-sort": "^1.8.0",
|
||||
"react-hook-form": "7.65.0",
|
||||
"react-icons": "^5.5.0",
|
||||
"rebuild": "0.1.2",
|
||||
"semver": "^7.7.2",
|
||||
"reodotdev": "^1.0.0",
|
||||
"resend": "^6.1.2",
|
||||
"semver": "^7.7.3",
|
||||
"stripe": "18.2.1",
|
||||
"swagger-ui-express": "^5.0.1",
|
||||
"tailwind-merge": "3.3.1",
|
||||
"tw-animate-css": "^1.3.6",
|
||||
"uuid": "^11.1.0",
|
||||
"tw-animate-css": "^1.3.8",
|
||||
"uuid": "^13.0.0",
|
||||
"vaul": "1.1.2",
|
||||
"winston": "3.17.0",
|
||||
"winston": "3.18.3",
|
||||
"winston-daily-rotate-file": "5.0.0",
|
||||
"ws": "8.18.3",
|
||||
"yaml": "^2.8.1",
|
||||
"yargs": "18.0.0",
|
||||
"zod": "3.25.76",
|
||||
"zod-validation-error": "3.5.2"
|
||||
"zod-validation-error": "3.5.2",
|
||||
"@faker-js/faker": "^10.1.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@dotenvx/dotenvx": "1.48.4",
|
||||
"@dotenvx/dotenvx": "1.51.0",
|
||||
"@esbuild-plugins/tsconfig-paths": "0.1.2",
|
||||
"@tailwindcss/postcss": "^4.1.10",
|
||||
"@react-email/preview-server": "4.3.2",
|
||||
"@tailwindcss/postcss": "^4.1.16",
|
||||
"@types/better-sqlite3": "7.6.12",
|
||||
"@types/cookie-parser": "1.4.9",
|
||||
"@types/cookie-parser": "1.4.10",
|
||||
"@types/cors": "2.8.19",
|
||||
"@types/crypto-js": "^4.2.2",
|
||||
"@types/express": "5.0.3",
|
||||
"@types/express": "5.0.5",
|
||||
"@types/express-session": "^1.18.2",
|
||||
"@types/jmespath": "^0.15.2",
|
||||
"@types/js-yaml": "4.0.9",
|
||||
"@types/jsonwebtoken": "^9.0.10",
|
||||
"@types/node": "^24",
|
||||
"@types/nodemailer": "6.4.17",
|
||||
"@types/pg": "8.15.5",
|
||||
"@types/react": "19.1.10",
|
||||
"@types/react-dom": "19.1.7",
|
||||
"@types/semver": "^7.7.0",
|
||||
"@types/nprogress": "^0.2.3",
|
||||
"@types/node": "24.9.2",
|
||||
"@types/nodemailer": "7.0.3",
|
||||
"@types/pg": "8.15.6",
|
||||
"@types/react": "19.2.2",
|
||||
"@types/react-dom": "19.2.2",
|
||||
"@types/semver": "^7.7.1",
|
||||
"@types/swagger-ui-express": "^4.1.8",
|
||||
"@types/ws": "8.18.1",
|
||||
"@types/yargs": "17.0.33",
|
||||
"drizzle-kit": "0.31.4",
|
||||
"esbuild": "0.25.9",
|
||||
"@types/yargs": "17.0.34",
|
||||
"drizzle-kit": "0.31.6",
|
||||
"esbuild": "0.25.11",
|
||||
"esbuild-node-externals": "1.18.0",
|
||||
"postcss": "^8",
|
||||
"react-email": "4.2.8",
|
||||
"react-email": "4.3.2",
|
||||
"tailwindcss": "^4.1.4",
|
||||
"tsc-alias": "1.8.16",
|
||||
"tsx": "4.20.4",
|
||||
"tsx": "4.20.6",
|
||||
"typescript": "^5",
|
||||
"typescript-eslint": "^8.39.1"
|
||||
"typescript-eslint": "^8.46.2"
|
||||
},
|
||||
"overrides": {
|
||||
"emblor": {
|
||||
|
||||
BIN
public/idp/azure.png
Normal file
BIN
public/idp/azure.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 65 KiB |
BIN
public/idp/google.png
Normal file
BIN
public/idp/google.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 46 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 34 KiB After Width: | Height: | Size: 13 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 33 KiB After Width: | Height: | Size: 13 KiB |
@@ -7,16 +7,21 @@ import {
|
||||
errorHandlerMiddleware,
|
||||
notFoundMiddleware
|
||||
} from "@server/middlewares";
|
||||
import { authenticated, unauthenticated } from "@server/routers/external";
|
||||
import { router as wsRouter, handleWSUpgrade } from "@server/routers/ws";
|
||||
import { authenticated, unauthenticated } from "#dynamic/routers/external";
|
||||
import { router as wsRouter, handleWSUpgrade } from "#dynamic/routers/ws";
|
||||
import { logIncomingMiddleware } from "./middlewares/logIncoming";
|
||||
import { csrfProtectionMiddleware } from "./middlewares/csrfProtection";
|
||||
import helmet from "helmet";
|
||||
import rateLimit from "express-rate-limit";
|
||||
import { build } from "./build";
|
||||
import rateLimit, { ipKeyGenerator } from "express-rate-limit";
|
||||
import createHttpError from "http-errors";
|
||||
import HttpCode from "./types/HttpCode";
|
||||
import requestTimeoutMiddleware from "./middlewares/requestTimeout";
|
||||
import { createStore } from "./lib/rateLimitStore";
|
||||
import { createStore } from "#dynamic/lib/rateLimitStore";
|
||||
import { stripDuplicateSesions } from "./middlewares/stripDuplicateSessions";
|
||||
import { corsWithLoginPageSupport } from "@server/lib/corsWithLoginPage";
|
||||
import { hybridRouter } from "#dynamic/routers/hybrid";
|
||||
import { billingWebhookHandler } from "#dynamic/routers/billing/webhooks";
|
||||
|
||||
const dev = config.isDev;
|
||||
const externalPort = config.getRawConfig().server.external_port;
|
||||
@@ -30,8 +35,15 @@ export function createApiServer() {
|
||||
apiServer.set("trust proxy", trustProxy);
|
||||
}
|
||||
|
||||
const corsConfig = config.getRawConfig().server.cors;
|
||||
if (build == "saas") {
|
||||
apiServer.post(
|
||||
`${prefix}/billing/webhooks`,
|
||||
express.raw({ type: "application/json" }),
|
||||
billingWebhookHandler
|
||||
);
|
||||
}
|
||||
|
||||
const corsConfig = config.getRawConfig().server.cors;
|
||||
const options = {
|
||||
...(corsConfig?.origins
|
||||
? { origin: corsConfig.origins }
|
||||
@@ -47,15 +59,20 @@ export function createApiServer() {
|
||||
credentials: !(corsConfig?.credentials === false)
|
||||
};
|
||||
|
||||
logger.debug("Using CORS options", options);
|
||||
|
||||
apiServer.use(cors(options));
|
||||
if (build == "oss" || !corsConfig) {
|
||||
logger.debug("Using CORS options", options);
|
||||
apiServer.use(cors(options));
|
||||
} else if (corsConfig) {
|
||||
// Use the custom CORS middleware with loginPage support
|
||||
apiServer.use(corsWithLoginPageSupport(corsConfig));
|
||||
}
|
||||
|
||||
if (!dev) {
|
||||
apiServer.use(helmet());
|
||||
apiServer.use(csrfProtectionMiddleware);
|
||||
}
|
||||
|
||||
apiServer.use(stripDuplicateSesions);
|
||||
apiServer.use(cookieParser());
|
||||
apiServer.use(express.json());
|
||||
|
||||
@@ -70,7 +87,8 @@ export function createApiServer() {
|
||||
60 *
|
||||
1000,
|
||||
max: config.getRawConfig().rate_limits.global.max_requests,
|
||||
keyGenerator: (req) => `apiServerGlobal:${req.ip}:${req.path}`,
|
||||
keyGenerator: (req) =>
|
||||
`apiServerGlobal:${ipKeyGenerator(req.ip || "")}:${req.path}`,
|
||||
handler: (req, res, next) => {
|
||||
const message = `Rate limit exceeded. You can make ${config.getRawConfig().rate_limits.global.max_requests} requests every ${config.getRawConfig().rate_limits.global.window_minutes} minute(s).`;
|
||||
return next(
|
||||
@@ -85,6 +103,9 @@ export function createApiServer() {
|
||||
// API routes
|
||||
apiServer.use(logIncomingMiddleware);
|
||||
apiServer.use(prefix, unauthenticated);
|
||||
if (build !== "oss") {
|
||||
apiServer.use(`${prefix}/hybrid`, hybridRouter);
|
||||
}
|
||||
apiServer.use(prefix, authenticated);
|
||||
|
||||
// WebSocket routes
|
||||
|
||||
@@ -60,6 +60,7 @@ export enum ActionsEnum {
|
||||
getUser = "getUser",
|
||||
setResourcePassword = "setResourcePassword",
|
||||
setResourcePincode = "setResourcePincode",
|
||||
setResourceHeaderAuth = "setResourceHeaderAuth",
|
||||
setResourceWhitelist = "setResourceWhitelist",
|
||||
getResourceWhitelist = "getResourceWhitelist",
|
||||
generateAccessToken = "generateAccessToken",
|
||||
@@ -80,6 +81,9 @@ export enum ActionsEnum {
|
||||
listClients = "listClients",
|
||||
getClient = "getClient",
|
||||
listOrgDomains = "listOrgDomains",
|
||||
getDomain = "getDomain",
|
||||
updateOrgDomain = "updateOrgDomain",
|
||||
getDNSRecords = "getDNSRecords",
|
||||
createNewt = "createNewt",
|
||||
createIdp = "createIdp",
|
||||
updateIdp = "updateIdp",
|
||||
@@ -98,9 +102,28 @@ export enum ActionsEnum {
|
||||
listApiKeyActions = "listApiKeyActions",
|
||||
listApiKeys = "listApiKeys",
|
||||
getApiKey = "getApiKey",
|
||||
getCertificate = "getCertificate",
|
||||
restartCertificate = "restartCertificate",
|
||||
billing = "billing",
|
||||
createOrgDomain = "createOrgDomain",
|
||||
deleteOrgDomain = "deleteOrgDomain",
|
||||
restartOrgDomain = "restartOrgDomain"
|
||||
restartOrgDomain = "restartOrgDomain",
|
||||
sendUsageNotification = "sendUsageNotification",
|
||||
createRemoteExitNode = "createRemoteExitNode",
|
||||
updateRemoteExitNode = "updateRemoteExitNode",
|
||||
getRemoteExitNode = "getRemoteExitNode",
|
||||
listRemoteExitNode = "listRemoteExitNode",
|
||||
deleteRemoteExitNode = "deleteRemoteExitNode",
|
||||
updateOrgUser = "updateOrgUser",
|
||||
createLoginPage = "createLoginPage",
|
||||
updateLoginPage = "updateLoginPage",
|
||||
getLoginPage = "getLoginPage",
|
||||
deleteLoginPage = "deleteLoginPage",
|
||||
listBlueprints = "listBlueprints",
|
||||
getBlueprint = "getBlueprint",
|
||||
applyBlueprint = "applyBlueprint",
|
||||
viewLogs = "viewLogs",
|
||||
exportLogs = "exportLogs"
|
||||
}
|
||||
|
||||
export async function checkUserActionPermission(
|
||||
@@ -177,8 +200,6 @@ export async function checkUserActionPermission(
|
||||
.limit(1);
|
||||
|
||||
return roleActionPermission.length > 0;
|
||||
|
||||
return false;
|
||||
} catch (error) {
|
||||
console.error("Error checking user action permission:", error);
|
||||
throw createHttpError(
|
||||
|
||||
@@ -3,13 +3,7 @@ import {
|
||||
encodeHexLowerCase
|
||||
} from "@oslojs/encoding";
|
||||
import { sha256 } from "@oslojs/crypto/sha2";
|
||||
import {
|
||||
resourceSessions,
|
||||
Session,
|
||||
sessions,
|
||||
User,
|
||||
users
|
||||
} from "@server/db";
|
||||
import { resourceSessions, Session, sessions, User, users } from "@server/db";
|
||||
import { db } from "@server/db";
|
||||
import { eq, inArray } from "drizzle-orm";
|
||||
import config from "@server/lib/config";
|
||||
@@ -24,8 +18,9 @@ export const SESSION_COOKIE_EXPIRES =
|
||||
60 *
|
||||
60 *
|
||||
config.getRawConfig().server.dashboard_session_length_hours;
|
||||
export const COOKIE_DOMAIN =
|
||||
"." + new URL(config.getRawConfig().app.dashboard_url).hostname;
|
||||
export const COOKIE_DOMAIN = config.getRawConfig().app.dashboard_url
|
||||
? new URL(config.getRawConfig().app.dashboard_url!).hostname
|
||||
: undefined;
|
||||
|
||||
export function generateSessionToken(): string {
|
||||
const bytes = new Uint8Array(20);
|
||||
@@ -44,7 +39,8 @@ export async function createSession(
|
||||
const session: Session = {
|
||||
sessionId: sessionId,
|
||||
userId,
|
||||
expiresAt: new Date(Date.now() + SESSION_COOKIE_EXPIRES).getTime()
|
||||
expiresAt: new Date(Date.now() + SESSION_COOKIE_EXPIRES).getTime(),
|
||||
issuedAt: new Date().getTime()
|
||||
};
|
||||
await db.insert(sessions).values(session);
|
||||
return session;
|
||||
@@ -98,8 +94,8 @@ export async function invalidateSession(sessionId: string): Promise<void> {
|
||||
try {
|
||||
await db.transaction(async (trx) => {
|
||||
await trx
|
||||
.delete(resourceSessions)
|
||||
.where(eq(resourceSessions.userSessionId, sessionId));
|
||||
.delete(resourceSessions)
|
||||
.where(eq(resourceSessions.userSessionId, sessionId));
|
||||
await trx.delete(sessions).where(eq(sessions.sessionId, sessionId));
|
||||
});
|
||||
} catch (e) {
|
||||
@@ -111,9 +107,9 @@ export async function invalidateAllSessions(userId: string): Promise<void> {
|
||||
try {
|
||||
await db.transaction(async (trx) => {
|
||||
const userSessions = await trx
|
||||
.select()
|
||||
.from(sessions)
|
||||
.where(eq(sessions.userId, userId));
|
||||
.select()
|
||||
.from(sessions)
|
||||
.where(eq(sessions.userId, userId));
|
||||
await trx.delete(resourceSessions).where(
|
||||
inArray(
|
||||
resourceSessions.userSessionId,
|
||||
|
||||
@@ -50,7 +50,8 @@ export async function createResourceSession(opts: {
|
||||
doNotExtend: opts.doNotExtend || false,
|
||||
accessTokenId: opts.accessTokenId || null,
|
||||
isRequestToken: opts.isRequestToken || false,
|
||||
userSessionId: opts.userSessionId || null
|
||||
userSessionId: opts.userSessionId || null,
|
||||
issuedAt: new Date().getTime()
|
||||
};
|
||||
|
||||
await db.insert(resourceSessions).values(session);
|
||||
@@ -173,14 +174,14 @@ export function serializeResourceSessionCookie(
|
||||
const now = new Date().getTime();
|
||||
if (!isHttp) {
|
||||
if (expiresAt === undefined) {
|
||||
return `${cookieName}_s.${now}=${token}; HttpOnly; SameSite=Lax; Path=/; Secure; Domain=${"." + domain}`;
|
||||
return `${cookieName}_s.${now}=${token}; HttpOnly; SameSite=Lax; Path=/; Secure; Domain=${domain}`;
|
||||
}
|
||||
return `${cookieName}_s.${now}=${token}; HttpOnly; SameSite=Lax; Expires=${expiresAt.toUTCString()}; Path=/; Secure; Domain=${"." + domain}`;
|
||||
return `${cookieName}_s.${now}=${token}; HttpOnly; SameSite=Lax; Expires=${expiresAt.toUTCString()}; Path=/; Secure; Domain=${domain}`;
|
||||
} else {
|
||||
if (expiresAt === undefined) {
|
||||
return `${cookieName}.${now}=${token}; HttpOnly; SameSite=Lax; Path=/; Domain=${"." + domain}`;
|
||||
return `${cookieName}.${now}=${token}; HttpOnly; SameSite=Lax; Path=/; Domain=$domain}`;
|
||||
}
|
||||
return `${cookieName}.${now}=${token}; HttpOnly; SameSite=Lax; Expires=${expiresAt.toUTCString()}; Path=/; Domain=${"." + domain}`;
|
||||
return `${cookieName}.${now}=${token}; HttpOnly; SameSite=Lax; Expires=${expiresAt.toUTCString()}; Path=/; Domain=${domain}`;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -190,9 +191,9 @@ export function createBlankResourceSessionTokenCookie(
|
||||
isHttp: boolean = false
|
||||
): string {
|
||||
if (!isHttp) {
|
||||
return `${cookieName}_s=; HttpOnly; SameSite=Lax; Max-Age=0; Path=/; Secure; Domain=${"." + domain}`;
|
||||
return `${cookieName}_s=; HttpOnly; SameSite=Lax; Max-Age=0; Path=/; Secure; Domain=${domain}`;
|
||||
} else {
|
||||
return `${cookieName}=; HttpOnly; SameSite=Lax; Max-Age=0; Path=/; Domain=${"." + domain}`;
|
||||
return `${cookieName}=; HttpOnly; SameSite=Lax; Max-Age=0; Path=/; Domain=${domain}`;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
export const build = "oss" as any;
|
||||
13
server/cleanup.ts
Normal file
13
server/cleanup.ts
Normal file
@@ -0,0 +1,13 @@
|
||||
import { cleanup as wsCleanup } from "@server/routers/ws";
|
||||
|
||||
async function cleanup() {
|
||||
await wsCleanup();
|
||||
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
export async function initCleanup() {
|
||||
// Handle process termination
|
||||
process.on("SIGTERM", () => cleanup());
|
||||
process.on("SIGINT", () => cleanup());
|
||||
}
|
||||
1014
server/db/countries.ts
Normal file
1014
server/db/countries.ts
Normal file
File diff suppressed because it is too large
Load Diff
13
server/db/maxmind.ts
Normal file
13
server/db/maxmind.ts
Normal file
@@ -0,0 +1,13 @@
|
||||
import maxmind, { CountryResponse, Reader } from "maxmind";
|
||||
import config from "@server/lib/config";
|
||||
|
||||
let maxmindLookup: Reader<CountryResponse> | null;
|
||||
if (config.getRawConfig().server.maxmind_db_path) {
|
||||
maxmindLookup = await maxmind.open<CountryResponse>(
|
||||
config.getRawConfig().server.maxmind_db_path!
|
||||
);
|
||||
} else {
|
||||
maxmindLookup = null;
|
||||
}
|
||||
|
||||
export { maxmindLookup };
|
||||
@@ -1,6 +1,6 @@
|
||||
import { join } from "path";
|
||||
import { readFileSync } from "fs";
|
||||
import { db } from "@server/db";
|
||||
import { db, resources, siteResources } from "@server/db";
|
||||
import { exitNodes, sites } from "@server/db";
|
||||
import { eq, and } from "drizzle-orm";
|
||||
import { __DIRNAME } from "@server/lib/consts";
|
||||
@@ -34,6 +34,44 @@ export async function getUniqueSiteName(orgId: string): Promise<string> {
|
||||
}
|
||||
}
|
||||
|
||||
export async function getUniqueResourceName(orgId: string): Promise<string> {
|
||||
let loops = 0;
|
||||
while (true) {
|
||||
if (loops > 100) {
|
||||
throw new Error("Could not generate a unique name");
|
||||
}
|
||||
|
||||
const name = generateName();
|
||||
const count = await db
|
||||
.select({ niceId: resources.niceId, orgId: resources.orgId })
|
||||
.from(resources)
|
||||
.where(and(eq(resources.niceId, name), eq(resources.orgId, orgId)));
|
||||
if (count.length === 0) {
|
||||
return name;
|
||||
}
|
||||
loops++;
|
||||
}
|
||||
}
|
||||
|
||||
export async function getUniqueSiteResourceName(orgId: string): Promise<string> {
|
||||
let loops = 0;
|
||||
while (true) {
|
||||
if (loops > 100) {
|
||||
throw new Error("Could not generate a unique name");
|
||||
}
|
||||
|
||||
const name = generateName();
|
||||
const count = await db
|
||||
.select({ niceId: siteResources.niceId, orgId: siteResources.orgId })
|
||||
.from(siteResources)
|
||||
.where(and(eq(siteResources.niceId, name), eq(siteResources.orgId, orgId)));
|
||||
if (count.length === 0) {
|
||||
return name;
|
||||
}
|
||||
loops++;
|
||||
}
|
||||
}
|
||||
|
||||
export async function getUniqueExitNodeEndpointName(): Promise<string> {
|
||||
let loops = 0;
|
||||
const count = await db
|
||||
|
||||
@@ -7,9 +7,22 @@ function createDb() {
|
||||
const config = readConfigFile();
|
||||
|
||||
if (!config.postgres) {
|
||||
throw new Error(
|
||||
"Postgres configuration is missing in the configuration file."
|
||||
);
|
||||
// check the environment variables for postgres config
|
||||
if (process.env.POSTGRES_CONNECTION_STRING) {
|
||||
config.postgres = {
|
||||
connection_string: process.env.POSTGRES_CONNECTION_STRING
|
||||
};
|
||||
if (process.env.POSTGRES_REPLICA_CONNECTION_STRINGS) {
|
||||
const replicas = process.env.POSTGRES_REPLICA_CONNECTION_STRINGS.split(",").map((conn) => ({
|
||||
connection_string: conn.trim()
|
||||
}));
|
||||
config.postgres.replicas = replicas;
|
||||
}
|
||||
} else {
|
||||
throw new Error(
|
||||
"Postgres configuration is missing in the configuration file."
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
const connectionString = config.postgres?.connection_string;
|
||||
@@ -22,11 +35,12 @@ function createDb() {
|
||||
}
|
||||
|
||||
// Create connection pools instead of individual connections
|
||||
const poolConfig = config.postgres.pool;
|
||||
const primaryPool = new Pool({
|
||||
connectionString,
|
||||
max: 20,
|
||||
idleTimeoutMillis: 30000,
|
||||
connectionTimeoutMillis: 2000,
|
||||
max: poolConfig?.max_connections || 20,
|
||||
idleTimeoutMillis: poolConfig?.idle_timeout_ms || 30000,
|
||||
connectionTimeoutMillis: poolConfig?.connection_timeout_ms || 5000,
|
||||
});
|
||||
|
||||
const replicas = [];
|
||||
@@ -37,9 +51,9 @@ function createDb() {
|
||||
for (const conn of replicaConnections) {
|
||||
const replicaPool = new Pool({
|
||||
connectionString: conn.connection_string,
|
||||
max: 10,
|
||||
idleTimeoutMillis: 30000,
|
||||
connectionTimeoutMillis: 2000,
|
||||
max: poolConfig?.max_replica_connections || 20,
|
||||
idleTimeoutMillis: poolConfig?.idle_timeout_ms || 30000,
|
||||
connectionTimeoutMillis: poolConfig?.connection_timeout_ms || 5000,
|
||||
});
|
||||
replicas.push(DrizzlePostgres(replicaPool));
|
||||
}
|
||||
@@ -50,3 +64,4 @@ function createDb() {
|
||||
|
||||
export const db = createDb();
|
||||
export default db;
|
||||
export type Transaction = Parameters<Parameters<typeof db["transaction"]>[0]>[0];
|
||||
@@ -1,2 +1,3 @@
|
||||
export * from "./driver";
|
||||
export * from "./schema";
|
||||
export * from "./schema/schema";
|
||||
export * from "./schema/privateSchema";
|
||||
|
||||
273
server/db/pg/schema/privateSchema.ts
Normal file
273
server/db/pg/schema/privateSchema.ts
Normal file
@@ -0,0 +1,273 @@
|
||||
import {
|
||||
pgTable,
|
||||
serial,
|
||||
varchar,
|
||||
boolean,
|
||||
integer,
|
||||
bigint,
|
||||
real,
|
||||
text,
|
||||
index
|
||||
} from "drizzle-orm/pg-core";
|
||||
import { InferSelectModel } from "drizzle-orm";
|
||||
import { domains, orgs, targets, users, exitNodes, sessions } from "./schema";
|
||||
|
||||
export const certificates = pgTable("certificates", {
|
||||
certId: serial("certId").primaryKey(),
|
||||
domain: varchar("domain", { length: 255 }).notNull().unique(),
|
||||
domainId: varchar("domainId").references(() => domains.domainId, {
|
||||
onDelete: "cascade"
|
||||
}),
|
||||
wildcard: boolean("wildcard").default(false),
|
||||
status: varchar("status", { length: 50 }).notNull().default("pending"), // pending, requested, valid, expired, failed
|
||||
expiresAt: bigint("expiresAt", { mode: "number" }),
|
||||
lastRenewalAttempt: bigint("lastRenewalAttempt", { mode: "number" }),
|
||||
createdAt: bigint("createdAt", { mode: "number" }).notNull(),
|
||||
updatedAt: bigint("updatedAt", { mode: "number" }).notNull(),
|
||||
orderId: varchar("orderId", { length: 500 }),
|
||||
errorMessage: text("errorMessage"),
|
||||
renewalCount: integer("renewalCount").default(0),
|
||||
certFile: text("certFile"),
|
||||
keyFile: text("keyFile")
|
||||
});
|
||||
|
||||
export const dnsChallenge = pgTable("dnsChallenges", {
|
||||
dnsChallengeId: serial("dnsChallengeId").primaryKey(),
|
||||
domain: varchar("domain", { length: 255 }).notNull(),
|
||||
token: varchar("token", { length: 255 }).notNull(),
|
||||
keyAuthorization: varchar("keyAuthorization", { length: 1000 }).notNull(),
|
||||
createdAt: bigint("createdAt", { mode: "number" }).notNull(),
|
||||
expiresAt: bigint("expiresAt", { mode: "number" }).notNull(),
|
||||
completed: boolean("completed").default(false)
|
||||
});
|
||||
|
||||
export const account = pgTable("account", {
|
||||
accountId: serial("accountId").primaryKey(),
|
||||
userId: varchar("userId")
|
||||
.notNull()
|
||||
.references(() => users.userId, { onDelete: "cascade" })
|
||||
});
|
||||
|
||||
export const customers = pgTable("customers", {
|
||||
customerId: varchar("customerId", { length: 255 }).primaryKey().notNull(),
|
||||
orgId: varchar("orgId", { length: 255 })
|
||||
.notNull()
|
||||
.references(() => orgs.orgId, { onDelete: "cascade" }),
|
||||
// accountId: integer("accountId")
|
||||
// .references(() => account.accountId, { onDelete: "cascade" }), // Optional, if using accounts
|
||||
email: varchar("email", { length: 255 }),
|
||||
name: varchar("name", { length: 255 }),
|
||||
phone: varchar("phone", { length: 50 }),
|
||||
address: text("address"),
|
||||
createdAt: bigint("createdAt", { mode: "number" }).notNull(),
|
||||
updatedAt: bigint("updatedAt", { mode: "number" }).notNull()
|
||||
});
|
||||
|
||||
export const subscriptions = pgTable("subscriptions", {
|
||||
subscriptionId: varchar("subscriptionId", { length: 255 })
|
||||
.primaryKey()
|
||||
.notNull(),
|
||||
customerId: varchar("customerId", { length: 255 })
|
||||
.notNull()
|
||||
.references(() => customers.customerId, { onDelete: "cascade" }),
|
||||
status: varchar("status", { length: 50 }).notNull().default("active"), // active, past_due, canceled, unpaid
|
||||
canceledAt: bigint("canceledAt", { mode: "number" }),
|
||||
createdAt: bigint("createdAt", { mode: "number" }).notNull(),
|
||||
updatedAt: bigint("updatedAt", { mode: "number" }),
|
||||
billingCycleAnchor: bigint("billingCycleAnchor", { mode: "number" })
|
||||
});
|
||||
|
||||
export const subscriptionItems = pgTable("subscriptionItems", {
|
||||
subscriptionItemId: serial("subscriptionItemId").primaryKey(),
|
||||
subscriptionId: varchar("subscriptionId", { length: 255 })
|
||||
.notNull()
|
||||
.references(() => subscriptions.subscriptionId, {
|
||||
onDelete: "cascade"
|
||||
}),
|
||||
planId: varchar("planId", { length: 255 }).notNull(),
|
||||
priceId: varchar("priceId", { length: 255 }),
|
||||
meterId: varchar("meterId", { length: 255 }),
|
||||
unitAmount: real("unitAmount"),
|
||||
tiers: text("tiers"),
|
||||
interval: varchar("interval", { length: 50 }),
|
||||
currentPeriodStart: bigint("currentPeriodStart", { mode: "number" }),
|
||||
currentPeriodEnd: bigint("currentPeriodEnd", { mode: "number" }),
|
||||
name: varchar("name", { length: 255 })
|
||||
});
|
||||
|
||||
export const accountDomains = pgTable("accountDomains", {
|
||||
accountId: integer("accountId")
|
||||
.notNull()
|
||||
.references(() => account.accountId, { onDelete: "cascade" }),
|
||||
domainId: varchar("domainId")
|
||||
.notNull()
|
||||
.references(() => domains.domainId, { onDelete: "cascade" })
|
||||
});
|
||||
|
||||
export const usage = pgTable("usage", {
|
||||
usageId: varchar("usageId", { length: 255 }).primaryKey(),
|
||||
featureId: varchar("featureId", { length: 255 }).notNull(),
|
||||
orgId: varchar("orgId")
|
||||
.references(() => orgs.orgId, { onDelete: "cascade" })
|
||||
.notNull(),
|
||||
meterId: varchar("meterId", { length: 255 }),
|
||||
instantaneousValue: real("instantaneousValue"),
|
||||
latestValue: real("latestValue").notNull(),
|
||||
previousValue: real("previousValue"),
|
||||
updatedAt: bigint("updatedAt", { mode: "number" }).notNull(),
|
||||
rolledOverAt: bigint("rolledOverAt", { mode: "number" }),
|
||||
nextRolloverAt: bigint("nextRolloverAt", { mode: "number" })
|
||||
});
|
||||
|
||||
export const limits = pgTable("limits", {
|
||||
limitId: varchar("limitId", { length: 255 }).primaryKey(),
|
||||
featureId: varchar("featureId", { length: 255 }).notNull(),
|
||||
orgId: varchar("orgId")
|
||||
.references(() => orgs.orgId, {
|
||||
onDelete: "cascade"
|
||||
})
|
||||
.notNull(),
|
||||
value: real("value"),
|
||||
description: text("description")
|
||||
});
|
||||
|
||||
export const usageNotifications = pgTable("usageNotifications", {
|
||||
notificationId: serial("notificationId").primaryKey(),
|
||||
orgId: varchar("orgId")
|
||||
.notNull()
|
||||
.references(() => orgs.orgId, { onDelete: "cascade" }),
|
||||
featureId: varchar("featureId", { length: 255 }).notNull(),
|
||||
limitId: varchar("limitId", { length: 255 }).notNull(),
|
||||
notificationType: varchar("notificationType", { length: 50 }).notNull(),
|
||||
sentAt: bigint("sentAt", { mode: "number" }).notNull()
|
||||
});
|
||||
|
||||
export const domainNamespaces = pgTable("domainNamespaces", {
|
||||
domainNamespaceId: varchar("domainNamespaceId", {
|
||||
length: 255
|
||||
}).primaryKey(),
|
||||
domainId: varchar("domainId")
|
||||
.references(() => domains.domainId, {
|
||||
onDelete: "set null"
|
||||
})
|
||||
.notNull()
|
||||
});
|
||||
|
||||
export const exitNodeOrgs = pgTable("exitNodeOrgs", {
|
||||
exitNodeId: integer("exitNodeId")
|
||||
.notNull()
|
||||
.references(() => exitNodes.exitNodeId, { onDelete: "cascade" }),
|
||||
orgId: text("orgId")
|
||||
.notNull()
|
||||
.references(() => orgs.orgId, { onDelete: "cascade" })
|
||||
});
|
||||
|
||||
export const remoteExitNodes = pgTable("remoteExitNode", {
|
||||
remoteExitNodeId: varchar("id").primaryKey(),
|
||||
secretHash: varchar("secretHash").notNull(),
|
||||
dateCreated: varchar("dateCreated").notNull(),
|
||||
version: varchar("version"),
|
||||
secondaryVersion: varchar("secondaryVersion"), // This is to detect the new nodes after the transition to pangolin-node
|
||||
exitNodeId: integer("exitNodeId").references(() => exitNodes.exitNodeId, {
|
||||
onDelete: "cascade"
|
||||
})
|
||||
});
|
||||
|
||||
export const remoteExitNodeSessions = pgTable("remoteExitNodeSession", {
|
||||
sessionId: varchar("id").primaryKey(),
|
||||
remoteExitNodeId: varchar("remoteExitNodeId")
|
||||
.notNull()
|
||||
.references(() => remoteExitNodes.remoteExitNodeId, {
|
||||
onDelete: "cascade"
|
||||
}),
|
||||
expiresAt: bigint("expiresAt", { mode: "number" }).notNull()
|
||||
});
|
||||
|
||||
export const loginPage = pgTable("loginPage", {
|
||||
loginPageId: serial("loginPageId").primaryKey(),
|
||||
subdomain: varchar("subdomain"),
|
||||
fullDomain: varchar("fullDomain"),
|
||||
exitNodeId: integer("exitNodeId").references(() => exitNodes.exitNodeId, {
|
||||
onDelete: "set null"
|
||||
}),
|
||||
domainId: varchar("domainId").references(() => domains.domainId, {
|
||||
onDelete: "set null"
|
||||
})
|
||||
});
|
||||
|
||||
export const loginPageOrg = pgTable("loginPageOrg", {
|
||||
loginPageId: integer("loginPageId")
|
||||
.notNull()
|
||||
.references(() => loginPage.loginPageId, { onDelete: "cascade" }),
|
||||
orgId: varchar("orgId")
|
||||
.notNull()
|
||||
.references(() => orgs.orgId, { onDelete: "cascade" })
|
||||
});
|
||||
|
||||
export const sessionTransferToken = pgTable("sessionTransferToken", {
|
||||
token: varchar("token").primaryKey(),
|
||||
sessionId: varchar("sessionId")
|
||||
.notNull()
|
||||
.references(() => sessions.sessionId, {
|
||||
onDelete: "cascade"
|
||||
}),
|
||||
encryptedSession: text("encryptedSession").notNull(),
|
||||
expiresAt: bigint("expiresAt", { mode: "number" }).notNull()
|
||||
});
|
||||
|
||||
export const actionAuditLog = pgTable("actionAuditLog", {
|
||||
id: serial("id").primaryKey(),
|
||||
timestamp: bigint("timestamp", { mode: "number" }).notNull(), // this is EPOCH time in seconds
|
||||
orgId: varchar("orgId")
|
||||
.notNull()
|
||||
.references(() => orgs.orgId, { onDelete: "cascade" }),
|
||||
actorType: varchar("actorType", { length: 50 }).notNull(),
|
||||
actor: varchar("actor", { length: 255 }).notNull(),
|
||||
actorId: varchar("actorId", { length: 255 }).notNull(),
|
||||
action: varchar("action", { length: 100 }).notNull(),
|
||||
metadata: text("metadata")
|
||||
}, (table) => ([
|
||||
index("idx_actionAuditLog_timestamp").on(table.timestamp),
|
||||
index("idx_actionAuditLog_org_timestamp").on(table.orgId, table.timestamp)
|
||||
]));
|
||||
|
||||
export const accessAuditLog = pgTable("accessAuditLog", {
|
||||
id: serial("id").primaryKey(),
|
||||
timestamp: bigint("timestamp", { mode: "number" }).notNull(), // this is EPOCH time in seconds
|
||||
orgId: varchar("orgId")
|
||||
.notNull()
|
||||
.references(() => orgs.orgId, { onDelete: "cascade" }),
|
||||
actorType: varchar("actorType", { length: 50 }),
|
||||
actor: varchar("actor", { length: 255 }),
|
||||
actorId: varchar("actorId", { length: 255 }),
|
||||
resourceId: integer("resourceId"),
|
||||
ip: varchar("ip", { length: 45 }),
|
||||
type: varchar("type", { length: 100 }).notNull(),
|
||||
action: boolean("action").notNull(),
|
||||
location: text("location"),
|
||||
userAgent: text("userAgent"),
|
||||
metadata: text("metadata")
|
||||
}, (table) => ([
|
||||
index("idx_identityAuditLog_timestamp").on(table.timestamp),
|
||||
index("idx_identityAuditLog_org_timestamp").on(table.orgId, table.timestamp)
|
||||
]));
|
||||
|
||||
export type Limit = InferSelectModel<typeof limits>;
|
||||
export type Account = InferSelectModel<typeof account>;
|
||||
export type Certificate = InferSelectModel<typeof certificates>;
|
||||
export type DnsChallenge = InferSelectModel<typeof dnsChallenge>;
|
||||
export type Customer = InferSelectModel<typeof customers>;
|
||||
export type Subscription = InferSelectModel<typeof subscriptions>;
|
||||
export type SubscriptionItem = InferSelectModel<typeof subscriptionItems>;
|
||||
export type Usage = InferSelectModel<typeof usage>;
|
||||
export type UsageLimit = InferSelectModel<typeof limits>;
|
||||
export type AccountDomain = InferSelectModel<typeof accountDomains>;
|
||||
export type UsageNotification = InferSelectModel<typeof usageNotifications>;
|
||||
export type RemoteExitNode = InferSelectModel<typeof remoteExitNodes>;
|
||||
export type RemoteExitNodeSession = InferSelectModel<
|
||||
typeof remoteExitNodeSessions
|
||||
>;
|
||||
export type ExitNodeOrg = InferSelectModel<typeof exitNodeOrgs>;
|
||||
export type LoginPage = InferSelectModel<typeof loginPage>;
|
||||
export type ActionAuditLog = InferSelectModel<typeof actionAuditLog>;
|
||||
export type AccessAuditLog = InferSelectModel<typeof accessAuditLog>;
|
||||
@@ -6,9 +6,11 @@ import {
|
||||
integer,
|
||||
bigint,
|
||||
real,
|
||||
text
|
||||
text,
|
||||
index
|
||||
} from "drizzle-orm/pg-core";
|
||||
import { InferSelectModel } from "drizzle-orm";
|
||||
import { randomUUID } from "crypto";
|
||||
|
||||
export const domains = pgTable("domains", {
|
||||
domainId: varchar("domainId").primaryKey(),
|
||||
@@ -17,14 +19,40 @@ export const domains = pgTable("domains", {
|
||||
type: varchar("type"), // "ns", "cname", "wildcard"
|
||||
verified: boolean("verified").notNull().default(false),
|
||||
failed: boolean("failed").notNull().default(false),
|
||||
tries: integer("tries").notNull().default(0)
|
||||
tries: integer("tries").notNull().default(0),
|
||||
certResolver: varchar("certResolver"),
|
||||
customCertResolver: varchar("customCertResolver"),
|
||||
preferWildcardCert: boolean("preferWildcardCert")
|
||||
});
|
||||
|
||||
export const dnsRecords = pgTable("dnsRecords", {
|
||||
id: serial("id").primaryKey(),
|
||||
domainId: varchar("domainId")
|
||||
.notNull()
|
||||
.references(() => domains.domainId, { onDelete: "cascade" }),
|
||||
recordType: varchar("recordType").notNull(), // "NS" | "CNAME" | "A" | "TXT"
|
||||
baseDomain: varchar("baseDomain"),
|
||||
value: varchar("value").notNull(),
|
||||
verified: boolean("verified").notNull().default(false)
|
||||
});
|
||||
|
||||
export const orgs = pgTable("orgs", {
|
||||
orgId: varchar("orgId").primaryKey(),
|
||||
name: varchar("name").notNull(),
|
||||
subnet: varchar("subnet"),
|
||||
createdAt: text("createdAt")
|
||||
createdAt: text("createdAt"),
|
||||
requireTwoFactor: boolean("requireTwoFactor"),
|
||||
maxSessionLengthHours: integer("maxSessionLengthHours"),
|
||||
passwordExpiryDays: integer("passwordExpiryDays"),
|
||||
settingsLogRetentionDaysRequest: integer("settingsLogRetentionDaysRequest") // where 0 = dont keep logs and -1 = keep forever
|
||||
.notNull()
|
||||
.default(7),
|
||||
settingsLogRetentionDaysAccess: integer("settingsLogRetentionDaysAccess")
|
||||
.notNull()
|
||||
.default(0),
|
||||
settingsLogRetentionDaysAction: integer("settingsLogRetentionDaysAction")
|
||||
.notNull()
|
||||
.default(0)
|
||||
});
|
||||
|
||||
export const orgDomains = pgTable("orgDomains", {
|
||||
@@ -66,11 +94,16 @@ export const sites = pgTable("sites", {
|
||||
|
||||
export const resources = pgTable("resources", {
|
||||
resourceId: serial("resourceId").primaryKey(),
|
||||
resourceGuid: varchar("resourceGuid", { length: 36 })
|
||||
.unique()
|
||||
.notNull()
|
||||
.$defaultFn(() => randomUUID()),
|
||||
orgId: varchar("orgId")
|
||||
.references(() => orgs.orgId, {
|
||||
onDelete: "cascade"
|
||||
})
|
||||
.notNull(),
|
||||
niceId: text("niceId").notNull(),
|
||||
name: varchar("name").notNull(),
|
||||
subdomain: varchar("subdomain"),
|
||||
fullDomain: varchar("fullDomain"),
|
||||
@@ -93,8 +126,11 @@ export const resources = pgTable("resources", {
|
||||
setHostHeader: varchar("setHostHeader"),
|
||||
enableProxy: boolean("enableProxy").default(true),
|
||||
skipToIdpId: integer("skipToIdpId").references(() => idp.idpId, {
|
||||
onDelete: "cascade"
|
||||
onDelete: "set null"
|
||||
}),
|
||||
headers: text("headers"), // comma-separated list of headers to add to the request
|
||||
proxyProtocol: boolean("proxyProtocol").notNull().default(false),
|
||||
proxyProtocolVersion: integer("proxyProtocolVersion").default(1)
|
||||
});
|
||||
|
||||
export const targets = pgTable("targets", {
|
||||
@@ -113,7 +149,33 @@ export const targets = pgTable("targets", {
|
||||
method: varchar("method"),
|
||||
port: integer("port").notNull(),
|
||||
internalPort: integer("internalPort"),
|
||||
enabled: boolean("enabled").notNull().default(true)
|
||||
enabled: boolean("enabled").notNull().default(true),
|
||||
path: text("path"),
|
||||
pathMatchType: text("pathMatchType"), // exact, prefix, regex
|
||||
rewritePath: text("rewritePath"), // if set, rewrites the path to this value before sending to the target
|
||||
rewritePathType: text("rewritePathType"), // exact, prefix, regex, stripPrefix
|
||||
priority: integer("priority").notNull().default(100)
|
||||
});
|
||||
|
||||
export const targetHealthCheck = pgTable("targetHealthCheck", {
|
||||
targetHealthCheckId: serial("targetHealthCheckId").primaryKey(),
|
||||
targetId: integer("targetId")
|
||||
.notNull()
|
||||
.references(() => targets.targetId, { onDelete: "cascade" }),
|
||||
hcEnabled: boolean("hcEnabled").notNull().default(false),
|
||||
hcPath: varchar("hcPath"),
|
||||
hcScheme: varchar("hcScheme"),
|
||||
hcMode: varchar("hcMode").default("http"),
|
||||
hcHostname: varchar("hcHostname"),
|
||||
hcPort: integer("hcPort"),
|
||||
hcInterval: integer("hcInterval").default(30), // in seconds
|
||||
hcUnhealthyInterval: integer("hcUnhealthyInterval").default(30), // in seconds
|
||||
hcTimeout: integer("hcTimeout").default(5), // in seconds
|
||||
hcHeaders: varchar("hcHeaders"),
|
||||
hcFollowRedirects: boolean("hcFollowRedirects").default(true),
|
||||
hcMethod: varchar("hcMethod").default("GET"),
|
||||
hcStatus: integer("hcStatus"), // http code
|
||||
hcHealth: text("hcHealth").default("unknown") // "unknown", "healthy", "unhealthy"
|
||||
});
|
||||
|
||||
export const exitNodes = pgTable("exitNodes", {
|
||||
@@ -124,10 +186,15 @@ export const exitNodes = pgTable("exitNodes", {
|
||||
publicKey: varchar("publicKey").notNull(),
|
||||
listenPort: integer("listenPort").notNull(),
|
||||
reachableAt: varchar("reachableAt"),
|
||||
maxConnections: integer("maxConnections")
|
||||
maxConnections: integer("maxConnections"),
|
||||
online: boolean("online").notNull().default(false),
|
||||
lastPing: integer("lastPing"),
|
||||
type: text("type").default("gerbil"), // gerbil, remoteExitNode
|
||||
region: varchar("region")
|
||||
});
|
||||
|
||||
export const siteResources = pgTable("siteResources", { // this is for the clients
|
||||
export const siteResources = pgTable("siteResources", {
|
||||
// this is for the clients
|
||||
siteResourceId: serial("siteResourceId").primaryKey(),
|
||||
siteId: integer("siteId")
|
||||
.notNull()
|
||||
@@ -135,12 +202,13 @@ export const siteResources = pgTable("siteResources", { // this is for the clien
|
||||
orgId: varchar("orgId")
|
||||
.notNull()
|
||||
.references(() => orgs.orgId, { onDelete: "cascade" }),
|
||||
niceId: varchar("niceId").notNull(),
|
||||
name: varchar("name").notNull(),
|
||||
protocol: varchar("protocol").notNull(),
|
||||
proxyPort: integer("proxyPort").notNull(),
|
||||
destinationPort: integer("destinationPort").notNull(),
|
||||
destinationIp: varchar("destinationIp").notNull(),
|
||||
enabled: boolean("enabled").notNull().default(true),
|
||||
enabled: boolean("enabled").notNull().default(true)
|
||||
});
|
||||
|
||||
export const users = pgTable("user", {
|
||||
@@ -160,7 +228,8 @@ export const users = pgTable("user", {
|
||||
dateCreated: varchar("dateCreated").notNull(),
|
||||
termsAcceptedTimestamp: varchar("termsAcceptedTimestamp"),
|
||||
termsVersion: varchar("termsVersion"),
|
||||
serverAdmin: boolean("serverAdmin").notNull().default(false)
|
||||
serverAdmin: boolean("serverAdmin").notNull().default(false),
|
||||
lastPasswordChange: bigint("lastPasswordChange", { mode: "number" })
|
||||
});
|
||||
|
||||
export const newts = pgTable("newt", {
|
||||
@@ -186,7 +255,8 @@ export const sessions = pgTable("session", {
|
||||
userId: varchar("userId")
|
||||
.notNull()
|
||||
.references(() => users.userId, { onDelete: "cascade" }),
|
||||
expiresAt: bigint("expiresAt", { mode: "number" }).notNull()
|
||||
expiresAt: bigint("expiresAt", { mode: "number" }).notNull(),
|
||||
issuedAt: bigint("issuedAt", { mode: "number" })
|
||||
});
|
||||
|
||||
export const newtSessions = pgTable("newtSession", {
|
||||
@@ -209,7 +279,8 @@ export const userOrgs = pgTable("userOrgs", {
|
||||
roleId: integer("roleId")
|
||||
.notNull()
|
||||
.references(() => roles.roleId),
|
||||
isOwner: boolean("isOwner").notNull().default(false)
|
||||
isOwner: boolean("isOwner").notNull().default(false),
|
||||
autoProvisioned: boolean("autoProvisioned").default(false)
|
||||
});
|
||||
|
||||
export const emailVerificationCodes = pgTable("emailVerificationCodes", {
|
||||
@@ -340,6 +411,14 @@ export const resourcePassword = pgTable("resourcePassword", {
|
||||
passwordHash: varchar("passwordHash").notNull()
|
||||
});
|
||||
|
||||
export const resourceHeaderAuth = pgTable("resourceHeaderAuth", {
|
||||
headerAuthId: serial("headerAuthId").primaryKey(),
|
||||
resourceId: integer("resourceId")
|
||||
.notNull()
|
||||
.references(() => resources.resourceId, { onDelete: "cascade" }),
|
||||
headerAuthHash: varchar("headerAuthHash").notNull()
|
||||
});
|
||||
|
||||
export const resourceAccessToken = pgTable("resourceAccessToken", {
|
||||
accessTokenId: varchar("accessTokenId").primaryKey(),
|
||||
orgId: varchar("orgId")
|
||||
@@ -394,7 +473,8 @@ export const resourceSessions = pgTable("resourceSessions", {
|
||||
{
|
||||
onDelete: "cascade"
|
||||
}
|
||||
)
|
||||
),
|
||||
issuedAt: bigint("issuedAt", { mode: "number" })
|
||||
});
|
||||
|
||||
export const resourceWhitelist = pgTable("resourceWhitelist", {
|
||||
@@ -427,7 +507,7 @@ export const resourceRules = pgTable("resourceRules", {
|
||||
.references(() => resources.resourceId, { onDelete: "cascade" }),
|
||||
enabled: boolean("enabled").notNull().default(true),
|
||||
priority: integer("priority").notNull(),
|
||||
action: varchar("action").notNull(), // ACCEPT, DROP
|
||||
action: varchar("action").notNull(), // ACCEPT, DROP, PASS
|
||||
match: varchar("match").notNull(), // CIDR, PATH, IP
|
||||
value: varchar("value").notNull()
|
||||
});
|
||||
@@ -455,6 +535,7 @@ export const idpOidcConfig = pgTable("idpOidcConfig", {
|
||||
idpId: integer("idpId")
|
||||
.notNull()
|
||||
.references(() => idp.idpId, { onDelete: "cascade" }),
|
||||
variant: varchar("variant").notNull().default("oidc"),
|
||||
clientId: varchar("clientId").notNull(),
|
||||
clientSecret: varchar("clientSecret").notNull(),
|
||||
authUrl: varchar("authUrl").notNull(),
|
||||
@@ -621,6 +702,57 @@ export const setupTokens = pgTable("setupTokens", {
|
||||
dateUsed: varchar("dateUsed")
|
||||
});
|
||||
|
||||
// Blueprint runs
|
||||
export const blueprints = pgTable("blueprints", {
|
||||
blueprintId: serial("blueprintId").primaryKey(),
|
||||
orgId: text("orgId")
|
||||
.references(() => orgs.orgId, {
|
||||
onDelete: "cascade"
|
||||
})
|
||||
.notNull(),
|
||||
name: varchar("name").notNull(),
|
||||
source: varchar("source").notNull(),
|
||||
createdAt: integer("createdAt").notNull(),
|
||||
succeeded: boolean("succeeded").notNull(),
|
||||
contents: text("contents").notNull(),
|
||||
message: text("message")
|
||||
});
|
||||
export const requestAuditLog = pgTable(
|
||||
"requestAuditLog",
|
||||
{
|
||||
id: serial("id").primaryKey(),
|
||||
timestamp: integer("timestamp").notNull(), // this is EPOCH time in seconds
|
||||
orgId: text("orgId").references(() => orgs.orgId, {
|
||||
onDelete: "cascade"
|
||||
}),
|
||||
action: boolean("action").notNull(),
|
||||
reason: integer("reason").notNull(),
|
||||
actorType: text("actorType"),
|
||||
actor: text("actor"),
|
||||
actorId: text("actorId"),
|
||||
resourceId: integer("resourceId"),
|
||||
ip: text("ip"),
|
||||
location: text("location"),
|
||||
userAgent: text("userAgent"),
|
||||
metadata: text("metadata"),
|
||||
headers: text("headers"), // JSON blob
|
||||
query: text("query"), // JSON blob
|
||||
originalRequestURL: text("originalRequestURL"),
|
||||
scheme: text("scheme"),
|
||||
host: text("host"),
|
||||
path: text("path"),
|
||||
method: text("method"),
|
||||
tls: boolean("tls")
|
||||
},
|
||||
(table) => [
|
||||
index("idx_requestAuditLog_timestamp").on(table.timestamp),
|
||||
index("idx_requestAuditLog_org_timestamp").on(
|
||||
table.orgId,
|
||||
table.timestamp
|
||||
)
|
||||
]
|
||||
);
|
||||
|
||||
export type Org = InferSelectModel<typeof orgs>;
|
||||
export type User = InferSelectModel<typeof users>;
|
||||
export type Site = InferSelectModel<typeof sites>;
|
||||
@@ -648,6 +780,7 @@ export type UserOrg = InferSelectModel<typeof userOrgs>;
|
||||
export type ResourceSession = InferSelectModel<typeof resourceSessions>;
|
||||
export type ResourcePincode = InferSelectModel<typeof resourcePincode>;
|
||||
export type ResourcePassword = InferSelectModel<typeof resourcePassword>;
|
||||
export type ResourceHeaderAuth = InferSelectModel<typeof resourceHeaderAuth>;
|
||||
export type ResourceOtp = InferSelectModel<typeof resourceOtp>;
|
||||
export type ResourceAccessToken = InferSelectModel<typeof resourceAccessToken>;
|
||||
export type ResourceWhitelist = InferSelectModel<typeof resourceWhitelist>;
|
||||
@@ -668,3 +801,11 @@ export type RoleClient = InferSelectModel<typeof roleClients>;
|
||||
export type OrgDomains = InferSelectModel<typeof orgDomains>;
|
||||
export type SiteResource = InferSelectModel<typeof siteResources>;
|
||||
export type SetupToken = InferSelectModel<typeof setupTokens>;
|
||||
export type HostMeta = InferSelectModel<typeof hostMeta>;
|
||||
export type TargetHealthCheck = InferSelectModel<typeof targetHealthCheck>;
|
||||
export type IdpOidcConfig = InferSelectModel<typeof idpOidcConfig>;
|
||||
export type Blueprint = InferSelectModel<typeof blueprints>;
|
||||
export type LicenseKey = InferSelectModel<typeof licenseKey>;
|
||||
export type SecurityKey = InferSelectModel<typeof securityKeys>;
|
||||
export type WebauthnChallenge = InferSelectModel<typeof webauthnChallenge>;
|
||||
export type RequestAuditLog = InferSelectModel<typeof requestAuditLog>;
|
||||
187
server/db/queries/verifySessionQueries.ts
Normal file
187
server/db/queries/verifySessionQueries.ts
Normal file
@@ -0,0 +1,187 @@
|
||||
import { db, loginPage, LoginPage, loginPageOrg, Org, orgs } from "@server/db";
|
||||
import {
|
||||
Resource,
|
||||
ResourcePassword,
|
||||
ResourcePincode,
|
||||
ResourceRule,
|
||||
resourcePassword,
|
||||
resourcePincode,
|
||||
resourceHeaderAuth,
|
||||
ResourceHeaderAuth,
|
||||
resourceRules,
|
||||
resources,
|
||||
roleResources,
|
||||
sessions,
|
||||
userOrgs,
|
||||
userResources,
|
||||
users
|
||||
} from "@server/db";
|
||||
import { and, eq } from "drizzle-orm";
|
||||
|
||||
export type ResourceWithAuth = {
|
||||
resource: Resource | null;
|
||||
pincode: ResourcePincode | null;
|
||||
password: ResourcePassword | null;
|
||||
headerAuth: ResourceHeaderAuth | null;
|
||||
org: Org;
|
||||
};
|
||||
|
||||
export type UserSessionWithUser = {
|
||||
session: any;
|
||||
user: any;
|
||||
};
|
||||
|
||||
/**
|
||||
* Get resource by domain with pincode and password information
|
||||
*/
|
||||
export async function getResourceByDomain(
|
||||
domain: string
|
||||
): Promise<ResourceWithAuth | null> {
|
||||
const [result] = await db
|
||||
.select()
|
||||
.from(resources)
|
||||
.leftJoin(
|
||||
resourcePincode,
|
||||
eq(resourcePincode.resourceId, resources.resourceId)
|
||||
)
|
||||
.leftJoin(
|
||||
resourcePassword,
|
||||
eq(resourcePassword.resourceId, resources.resourceId)
|
||||
)
|
||||
.leftJoin(
|
||||
resourceHeaderAuth,
|
||||
eq(resourceHeaderAuth.resourceId, resources.resourceId)
|
||||
)
|
||||
.innerJoin(
|
||||
orgs,
|
||||
eq(orgs.orgId, resources.orgId)
|
||||
)
|
||||
.where(eq(resources.fullDomain, domain))
|
||||
.limit(1);
|
||||
|
||||
if (!result) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return {
|
||||
resource: result.resources,
|
||||
pincode: result.resourcePincode,
|
||||
password: result.resourcePassword,
|
||||
headerAuth: result.resourceHeaderAuth,
|
||||
org: result.orgs
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get user session with user information
|
||||
*/
|
||||
export async function getUserSessionWithUser(
|
||||
userSessionId: string
|
||||
): Promise<UserSessionWithUser | null> {
|
||||
const [res] = await db
|
||||
.select()
|
||||
.from(sessions)
|
||||
.leftJoin(users, eq(users.userId, sessions.userId))
|
||||
.where(eq(sessions.sessionId, userSessionId));
|
||||
|
||||
if (!res) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return {
|
||||
session: res.session,
|
||||
user: res.user
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get user organization role
|
||||
*/
|
||||
export async function getUserOrgRole(userId: string, orgId: string) {
|
||||
const userOrgRole = await db
|
||||
.select()
|
||||
.from(userOrgs)
|
||||
.where(and(eq(userOrgs.userId, userId), eq(userOrgs.orgId, orgId)))
|
||||
.limit(1);
|
||||
|
||||
return userOrgRole.length > 0 ? userOrgRole[0] : null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if role has access to resource
|
||||
*/
|
||||
export async function getRoleResourceAccess(
|
||||
resourceId: number,
|
||||
roleId: number
|
||||
) {
|
||||
const roleResourceAccess = await db
|
||||
.select()
|
||||
.from(roleResources)
|
||||
.where(
|
||||
and(
|
||||
eq(roleResources.resourceId, resourceId),
|
||||
eq(roleResources.roleId, roleId)
|
||||
)
|
||||
)
|
||||
.limit(1);
|
||||
|
||||
return roleResourceAccess.length > 0 ? roleResourceAccess[0] : null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if user has direct access to resource
|
||||
*/
|
||||
export async function getUserResourceAccess(
|
||||
userId: string,
|
||||
resourceId: number
|
||||
) {
|
||||
const userResourceAccess = await db
|
||||
.select()
|
||||
.from(userResources)
|
||||
.where(
|
||||
and(
|
||||
eq(userResources.userId, userId),
|
||||
eq(userResources.resourceId, resourceId)
|
||||
)
|
||||
)
|
||||
.limit(1);
|
||||
|
||||
return userResourceAccess.length > 0 ? userResourceAccess[0] : null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get resource rules for a given resource
|
||||
*/
|
||||
export async function getResourceRules(
|
||||
resourceId: number
|
||||
): Promise<ResourceRule[]> {
|
||||
const rules = await db
|
||||
.select()
|
||||
.from(resourceRules)
|
||||
.where(eq(resourceRules.resourceId, resourceId));
|
||||
|
||||
return rules;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get organization login page
|
||||
*/
|
||||
export async function getOrgLoginPage(
|
||||
orgId: string
|
||||
): Promise<LoginPage | null> {
|
||||
const [result] = await db
|
||||
.select()
|
||||
.from(loginPageOrg)
|
||||
.where(eq(loginPageOrg.orgId, orgId))
|
||||
.innerJoin(
|
||||
loginPage,
|
||||
eq(loginPageOrg.loginPageId, loginPage.loginPageId)
|
||||
)
|
||||
.limit(1);
|
||||
|
||||
if (!result) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return result?.loginPage;
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user