Compare commits
584 Commits
service-st
...
fix-module
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b9573636d8 | ||
|
|
3862ad2a06 | ||
|
|
c447aec9d3 | ||
|
|
5137d19b0f | ||
|
|
453f2649d3 | ||
|
|
58cfcf3d25 | ||
|
|
c260a97cc1 | ||
|
|
3eb64870b0 | ||
|
|
7412b958c6 | ||
|
|
a0c27194a6 | ||
|
|
3437af29cb | ||
|
|
0b1c12d2e5 | ||
|
|
8620761bbd | ||
|
|
d793b6ca07 | ||
|
|
17e9231657 | ||
|
|
acc2674d79 | ||
|
|
c34a21a3bb | ||
|
|
275bff23da | ||
|
|
1a766a3447 | ||
|
|
c22844c83b | ||
|
|
5472ca0e21 | ||
|
|
ad890b0b6b | ||
|
|
a364b5ebf3 | ||
|
|
d0134d131e | ||
|
|
ccf0dace11 | ||
|
|
9977a903ce | ||
|
|
dc9bf5068e | ||
|
|
6b4f79c9fa | ||
|
|
b2985b59e9 | ||
|
|
d4ac3b83ee | ||
|
|
00bf55be5a | ||
|
|
851d6aaa89 | ||
|
|
f007279bee | ||
|
|
5a3381d9ff | ||
|
|
83e51db2e7 | ||
|
|
4e4af8a52f | ||
|
|
54a8ec717e | ||
|
|
d3e5e6edf1 | ||
|
|
a4277ad312 | ||
|
|
8877f2d451 | ||
|
|
9275b66bd9 | ||
|
|
6a964f37d5 | ||
|
|
73f2a4f56f | ||
|
|
85fb0187ee | ||
|
|
db9812a08b | ||
|
|
ca69530591 | ||
|
|
fc5b0e4113 | ||
|
|
278af5f0f4 | ||
|
|
e7baf25ff7 | ||
|
|
fada75144c | ||
|
|
803ef5476f | ||
|
|
016bd263d0 | ||
|
|
f9143f8a5d | ||
|
|
92eb27fcb1 | ||
|
|
0cc9b91ae8 | ||
|
|
2ed3608e34 | ||
|
|
a92a1a7dd1 | ||
|
|
9a903be6d4 | ||
|
|
adea270b27 | ||
|
|
765eb142a5 | ||
|
|
faa1405d6b | ||
|
|
0c93aab818 | ||
|
|
56923ae2c3 | ||
|
|
e2f64e1d40 | ||
|
|
c574b84278 | ||
|
|
640f15d55e | ||
|
|
789d326273 | ||
|
|
1763d85d91 | ||
|
|
082fa05083 | ||
|
|
9ed7190606 | ||
|
|
6c22539dd4 | ||
|
|
e6819ede61 | ||
|
|
186a760529 | ||
|
|
a84aee7b0c | ||
|
|
cab2fa44ba | ||
|
|
5962149e55 | ||
|
|
00f9d08a4b | ||
|
|
3d0c843308 | ||
|
|
847138472b | ||
|
|
c7786a59fd | ||
|
|
3b2d357f10 | ||
|
|
a83dbf604c | ||
|
|
f77456a123 | ||
|
|
6e4c3a638d | ||
|
|
3d2127ce1e | ||
|
|
a4a5916fa2 | ||
|
|
f6727055cd | ||
|
|
0517d87caa | ||
|
|
89e587592c | ||
|
|
439495d738 | ||
|
|
0b2fd681be | ||
|
|
41de615331 | ||
|
|
b7639b1d81 | ||
|
|
602879c9e4 | ||
|
|
53e16242b9 | ||
|
|
24c5146763 | ||
|
|
dca7aa0487 | ||
|
|
647bc4e4df | ||
|
|
1c80223fe3 | ||
|
|
7ac9b00398 | ||
|
|
d37c9e3b04 | ||
|
|
0fe9d0e157 | ||
|
|
5479c767c1 | ||
|
|
edc389ba4b | ||
|
|
4cb17d42e1 | ||
|
|
f26499edb8 | ||
|
|
2857cb7ed8 | ||
|
|
3168fecd52 | ||
|
|
24c20ff243 | ||
|
|
8ba8fda54b | ||
|
|
0992a47b00 | ||
|
|
d5b09f18ed | ||
|
|
fb2fe36c87 | ||
|
|
3db51887b1 | ||
|
|
24f3bcca57 | ||
|
|
85006c8103 | ||
|
|
db5571d623 | ||
|
|
d4bdaec586 | ||
|
|
cb9c8e5b5a | ||
|
|
0a1802c341 | ||
|
|
dfae1a4429 | ||
|
|
c1dc73a21b | ||
|
|
8145740cc1 | ||
|
|
b2a54f5b0d | ||
|
|
9c9adc6e16 | ||
|
|
f7cde8eb0f | ||
|
|
501d020562 | ||
|
|
a9bafd71e1 | ||
|
|
166e4b8081 | ||
|
|
c3eb40f17a | ||
|
|
7330285150 | ||
|
|
8cf8573c61 | ||
|
|
5bfa0d7a9d | ||
|
|
8ea2dd9b72 | ||
|
|
6efcade56a | ||
|
|
6d2372be56 | ||
|
|
626af4691b | ||
|
|
63697ac4b1 | ||
|
|
0ebb1f0c66 | ||
|
|
1dda60847e | ||
|
|
a7bce4cb19 | ||
|
|
a5474bc25f | ||
|
|
f634b8f1fb | ||
|
|
0ad40a0233 | ||
|
|
78abc36cd3 | ||
|
|
f5158b068f | ||
|
|
e6066a6cb1 | ||
|
|
fc8b66effa | ||
|
|
16b92963fd | ||
|
|
2ff3d871ac | ||
|
|
108936ef07 | ||
|
|
c45d4cfec9 | ||
|
|
64217e1281 | ||
|
|
d1421bb534 | ||
|
|
ac20514a8e | ||
|
|
79c4e73a15 | ||
|
|
61a647b436 | ||
|
|
c9a709783a | ||
|
|
c55b369899 | ||
|
|
084b8bacd3 | ||
|
|
47ad7d8a95 | ||
|
|
3798808013 | ||
|
|
43a39267f3 | ||
|
|
db94ea2d2e | ||
|
|
f0533f9bba | ||
|
|
360048fd04 | ||
|
|
8f8426de52 | ||
|
|
4bce390e64 | ||
|
|
2b7837e2b6 | ||
|
|
cbf9678534 | ||
|
|
b38b10c9a6 | ||
|
|
31cbb7dc00 | ||
|
|
0fa4377793 | ||
|
|
7b0d10e8c2 | ||
|
|
bb41adab4b | ||
|
|
648aa7dc59 | ||
|
|
3073969c92 | ||
|
|
2f1dc3a33d | ||
|
|
b707dcea2d | ||
|
|
4f0c8025b2 | ||
|
|
b91bee537a | ||
|
|
7207a3e8cd | ||
|
|
ac675a5af0 | ||
|
|
64caebde62 | ||
|
|
4934884e0c | ||
|
|
22cd9baee2 | ||
|
|
84232b5355 | ||
|
|
5bc7c255c1 | ||
|
|
d11d83f699 | ||
|
|
2ef1b2a8fa | ||
|
|
f7414d7e6e | ||
|
|
ab384150b2 | ||
|
|
0b6939ffee | ||
|
|
bc6a1a9d17 | ||
|
|
7055461cf0 | ||
|
|
a9564df6a9 | ||
|
|
e2dfc74d02 | ||
|
|
326cb60aea | ||
|
|
68b264970a | ||
|
|
1fa4ef82e9 | ||
|
|
bd93651f12 | ||
|
|
85ad51ce4c | ||
|
|
59e50c6150 | ||
|
|
f347568de3 | ||
|
|
bdad7d81b2 | ||
|
|
b8203cdf73 | ||
|
|
431e45cc3a | ||
|
|
f185d28f68 | ||
|
|
d8e6fcf773 | ||
|
|
23b7d24399 | ||
|
|
a1ed512da4 | ||
|
|
40ac96cd10 | ||
|
|
c4da43da0f | ||
|
|
8822f6dadc | ||
|
|
b5a7a91612 | ||
|
|
453b1a91a8 | ||
|
|
70274d69e9 | ||
|
|
c57d8b30d3 | ||
|
|
7407fef21b | ||
|
|
23c152541a | ||
|
|
6765e27031 | ||
|
|
cbb789bc69 | ||
|
|
7f68a21257 | ||
|
|
fc66dc78c3 | ||
|
|
1d0e0f243e | ||
|
|
8134ffd787 | ||
|
|
7f1590c729 | ||
|
|
c65bb0b1ce | ||
|
|
d8bc5269ee | ||
|
|
917407c475 | ||
|
|
d9e6e0c540 | ||
|
|
ef5ab0c2f4 | ||
|
|
34816013ad | ||
|
|
05665b1c7e | ||
|
|
2bebcab736 | ||
|
|
306f83e357 | ||
|
|
04457b1272 | ||
|
|
4986fe30c3 | ||
|
|
de33a07875 | ||
|
|
5233eb7fdb | ||
|
|
94a158b77a | ||
|
|
98af47d0b5 | ||
|
|
4470bb886e | ||
|
|
f4feac0d6b | ||
|
|
7547761812 | ||
|
|
23d11651fc | ||
|
|
03a4ac5bde | ||
|
|
ab50b433ee | ||
|
|
123e8398d8 | ||
|
|
6a2dfb8176 | ||
|
|
332d10e306 | ||
|
|
f3f6692e4d | ||
|
|
954301465f | ||
|
|
2199f4efd5 | ||
|
|
e208c02be7 | ||
|
|
7747e3cc0d | ||
|
|
1c24b4c6cb | ||
|
|
4b1ab4cdde | ||
|
|
4852e79c3c | ||
|
|
0a70ed6268 | ||
|
|
136acc7901 | ||
|
|
70d1dd0deb | ||
|
|
df32da304f | ||
|
|
76eb3c13e9 | ||
|
|
6e88046fd4 | ||
|
|
b3cafa4a8c | ||
|
|
d1cf87d2ce | ||
|
|
dc5485d9f1 | ||
|
|
1b12882e29 | ||
|
|
5be9b8383b | ||
|
|
c308fd63a7 | ||
|
|
fcdfd80b34 | ||
|
|
c5d975542d | ||
|
|
526eccdf16 | ||
|
|
f7dd34be21 | ||
|
|
289732ad20 | ||
|
|
a50b6f7bc7 | ||
|
|
51c679d3a9 | ||
|
|
470c3d330f | ||
|
|
df596ed59f | ||
|
|
f2c1202b03 | ||
|
|
cdd241d8ff | ||
|
|
0803d9c864 | ||
|
|
7171864a5e | ||
|
|
7aa9a34168 | ||
|
|
0ec2c32ff8 | ||
|
|
ea2d6aab65 | ||
|
|
4101ebc45b | ||
|
|
4414403dec | ||
|
|
2d78730037 | ||
|
|
45c7c42634 | ||
|
|
8baf4fcedd | ||
|
|
a41e0ba80f | ||
|
|
798d445f3e | ||
|
|
00bd003be4 | ||
|
|
5841432b6f | ||
|
|
1fb91ec161 | ||
|
|
fc16879336 | ||
|
|
290510ae74 | ||
|
|
7b926d43dc | ||
|
|
d91a44c7c5 | ||
|
|
a47ed71bb7 | ||
|
|
18f9df29da | ||
|
|
2438dc09a2 | ||
|
|
420412e60c | ||
|
|
aee6bc335b | ||
|
|
6ae679fb3d | ||
|
|
b40a13b4c5 | ||
|
|
dd2aa70efd | ||
|
|
2a9c9f7f2c | ||
|
|
82001544fd | ||
|
|
9f352aa362 | ||
|
|
35177ead40 | ||
|
|
1931c17513 | ||
|
|
b12debf373 | ||
|
|
0b3d362357 | ||
|
|
d8119f2308 | ||
|
|
ce36894ab1 | ||
|
|
c5f4f2e1d6 | ||
|
|
c861ffe07b | ||
|
|
6df980bc57 | ||
|
|
9d1d07b0ca | ||
|
|
24a774b5d6 | ||
|
|
442f673128 | ||
|
|
8905b5c5f1 | ||
|
|
3eff656dfa | ||
|
|
79e6f34c9e | ||
|
|
9c6e8f7735 | ||
|
|
cc4fd1369e | ||
|
|
7f32d6f81a | ||
|
|
a450ca10b8 | ||
|
|
06fbf32691 | ||
|
|
d4bd297439 | ||
|
|
acc8043f26 | ||
|
|
35e5d0daab | ||
|
|
e51c9ef1ad | ||
|
|
cdcbe3359a | ||
|
|
e5b51e6a2b | ||
|
|
694ebc5b30 | ||
|
|
ff2555cc4a | ||
|
|
016255459c | ||
|
|
14f03bcab0 | ||
|
|
4dc90b3d39 | ||
|
|
8cdce6c0c8 | ||
|
|
8904cf27a4 | ||
|
|
493194c124 | ||
|
|
5d1600a077 | ||
|
|
7daaacbddf | ||
|
|
30e18bbc66 | ||
|
|
16dffa99c0 | ||
|
|
58ad50b749 | ||
|
|
bc25074f5b | ||
|
|
c79916d06c | ||
|
|
4d53542f79 | ||
|
|
d3ef03aeb3 | ||
|
|
9949fac5ea | ||
|
|
6d236a6282 | ||
|
|
6e6a920796 | ||
|
|
99092a6ef2 | ||
|
|
1897b7bb06 | ||
|
|
878789cf38 | ||
|
|
8a59cf7ea3 | ||
|
|
7ade9cd222 | ||
|
|
447f619ecc | ||
|
|
657a55517b | ||
|
|
16a5b34ddf | ||
|
|
23f303b6ba | ||
|
|
84bf9f3bc5 | ||
|
|
48736011de | ||
|
|
cf5675b7f3 | ||
|
|
f0bbdad9ef | ||
|
|
5f83fe02a1 | ||
|
|
8cb92e143d | ||
|
|
73f5f887f3 | ||
|
|
db4e6c0be5 | ||
|
|
c24892f865 | ||
|
|
6badc14936 | ||
|
|
3d1fb401fd | ||
|
|
f2cdac75e2 | ||
|
|
5d6e35832c | ||
|
|
9aa9ba500e | ||
|
|
2934269279 | ||
|
|
1c7323c90a | ||
|
|
e667e03832 | ||
|
|
7f227b232c | ||
|
|
9d887805a8 | ||
|
|
244e1c7447 | ||
|
|
78911063a6 | ||
|
|
d86509e97b | ||
|
|
6de431df2c | ||
|
|
cda49b5b20 | ||
|
|
678841e64c | ||
|
|
74549164e4 | ||
|
|
6afe8695de | ||
|
|
460800b6fb | ||
|
|
5558bf3b9a | ||
|
|
62701f7730 | ||
|
|
a2f3e2e513 | ||
|
|
4867d467de | ||
|
|
d9685acc37 | ||
|
|
1aaa157f20 | ||
|
|
9a0ad4182f | ||
|
|
65d194af58 | ||
|
|
1f2f71ab03 | ||
|
|
f985187999 | ||
|
|
396a8d1e5e | ||
|
|
651f630080 | ||
|
|
21de41f1c0 | ||
|
|
98e5987e22 | ||
|
|
a77af2d379 | ||
|
|
ccde9e0ba6 | ||
|
|
6f6f582fe3 | ||
|
|
ec70de406b | ||
|
|
29a3140702 | ||
|
|
465eda24bc | ||
|
|
2888907109 | ||
|
|
f770f600c6 | ||
|
|
729f1673b3 | ||
|
|
7c95cb0177 | ||
|
|
b7f159aea3 | ||
|
|
06a0062311 | ||
|
|
aa840d9758 | ||
|
|
d1e6da0779 | ||
|
|
e6981ddd72 | ||
|
|
101c52f7c2 | ||
|
|
a83f301e59 | ||
|
|
5120d90b85 | ||
|
|
ea1e470502 | ||
|
|
f4d6edc501 | ||
|
|
cbbc235570 | ||
|
|
56d9256c02 | ||
|
|
e131d3d036 | ||
|
|
7f5b7b5057 | ||
|
|
c27fa9f56e | ||
|
|
1a1addb19d | ||
|
|
349da24b29 | ||
|
|
717f66b613 | ||
|
|
dcbc8c9a50 | ||
|
|
9834f413cc | ||
|
|
fb5645ae33 | ||
|
|
dc311d78e2 | ||
|
|
f0b1d8b2af | ||
|
|
7f0d55ef74 | ||
|
|
6e8860b3a0 | ||
|
|
5a5ec468c7 | ||
|
|
fbc2b889b5 | ||
|
|
fb094e8f3b | ||
|
|
e2eb26345f | ||
|
|
6f1a94e825 | ||
|
|
05951ffdb9 | ||
|
|
69de5f10c0 | ||
|
|
c01a191f3a | ||
|
|
dfe1a3e67f | ||
|
|
e975b67fad | ||
|
|
5c08893db0 | ||
|
|
cb679dbee2 | ||
|
|
f339ca0d85 | ||
|
|
547ba4276e | ||
|
|
cae63cc45d | ||
|
|
527b4b2e40 | ||
|
|
de0b1b2d70 | ||
|
|
6996a6340a | ||
|
|
3c433da8f5 | ||
|
|
ef2a2bdb67 | ||
|
|
7b61a668e9 | ||
|
|
bdab3e23af | ||
|
|
2b068928a2 | ||
|
|
ec798f89fd | ||
|
|
9efee40477 | ||
|
|
448c22c280 | ||
|
|
6c6e30ae60 | ||
|
|
b27ff67a14 | ||
|
|
c0ffb17e00 | ||
|
|
e9ccf157b6 | ||
|
|
451f2427fe | ||
|
|
1676cdd9a4 | ||
|
|
109e6473ab | ||
|
|
55acff50d0 | ||
|
|
eee1bd1ae0 | ||
|
|
e46d5870ff | ||
|
|
f6ec32a5d1 | ||
|
|
e336d1b19c | ||
|
|
7399f59652 | ||
|
|
088abe396e | ||
|
|
26b31e24a3 | ||
|
|
099f4c2b8b | ||
|
|
b43605c168 | ||
|
|
899dba5a08 | ||
|
|
d2b94ced5a | ||
|
|
cdf9fa1753 | ||
|
|
d1e7e2993d | ||
|
|
e05d85c759 | ||
|
|
53873411a6 | ||
|
|
39e0ab21bd | ||
|
|
8269d869c3 | ||
|
|
e19d1c8122 | ||
|
|
0cd4ff1b12 | ||
|
|
9aebf02f05 | ||
|
|
ffb7b91da7 | ||
|
|
2d264a8e5e | ||
|
|
abf6893714 | ||
|
|
699c56c721 | ||
|
|
2ce5388a75 | ||
|
|
3e664255d6 | ||
|
|
5b1a9d6848 | ||
|
|
1850abdd0d | ||
|
|
ed503f64da | ||
|
|
4074a184b2 | ||
|
|
6fe2b06f09 | ||
|
|
8fe7cb1b3d | ||
|
|
815c6c9438 | ||
|
|
9ce563aa08 | ||
|
|
c25844dd07 | ||
|
|
a167e70e63 | ||
|
|
dd96fe6b73 | ||
|
|
40d35d37e2 | ||
|
|
071f0f8034 | ||
|
|
81d88fe253 | ||
|
|
ab274ce932 | ||
|
|
ba1e598a76 | ||
|
|
b5d29bd301 | ||
|
|
e174e8e029 | ||
|
|
453d2b4a0a | ||
|
|
aadc8a1d63 | ||
|
|
aaca8f4763 | ||
|
|
0a1a63dfdd | ||
|
|
ee87f20471 | ||
|
|
43febe5f33 | ||
|
|
c63bbabceb | ||
|
|
8f1b270b59 | ||
|
|
da0af8bd53 | ||
|
|
f82d18d649 | ||
|
|
287a303484 | ||
|
|
1213608f30 | ||
|
|
fa1693e8c0 | ||
|
|
ed3ed7cb2a | ||
|
|
b2e88fb3fa | ||
|
|
d6ca50218a | ||
|
|
7d1f0956d6 | ||
|
|
d150c80854 | ||
|
|
2d1828d088 | ||
|
|
f7f897a311 | ||
|
|
683ffbdc76 | ||
|
|
480ad3a5f1 | ||
|
|
16361f03e9 | ||
|
|
3fb8b6587d | ||
|
|
6aee353b43 | ||
|
|
e109361e81 | ||
|
|
3c34f81a44 | ||
|
|
72e7c2e9b9 | ||
|
|
03968d8fbc | ||
|
|
2f27b3941e | ||
|
|
e9dc5b9ba6 | ||
|
|
e4ef885cd5 | ||
|
|
9fe457ebd5 | ||
|
|
4a51aa9316 | ||
|
|
308a10d6e6 | ||
|
|
90f513a08f | ||
|
|
4ddc61d132 | ||
|
|
fc0088e9ea | ||
|
|
71094f7fa1 | ||
|
|
a8516cf9c6 | ||
|
|
a89e2f877a | ||
|
|
ed78e49c47 | ||
|
|
3ef0a7919d | ||
|
|
36812d5f95 | ||
|
|
f5bcdb4ba0 | ||
|
|
b69ad0eca5 | ||
|
|
b221c29694 | ||
|
|
7dc7f09173 | ||
|
|
ec3d224e1d | ||
|
|
00c5312080 | ||
|
|
7811a56d2b | ||
|
|
e9401177b7 | ||
|
|
ef56258e8b | ||
|
|
c4d9b39a17 | ||
|
|
1f59b75c20 | ||
|
|
6b6da7b897 | ||
|
|
4391c19ee9 | ||
|
|
eb993b7060 | ||
|
|
08cb6993a8 | ||
|
|
872f640211 | ||
|
|
c58f7c573d | ||
|
|
7b807a0745 |
@@ -1,9 +0,0 @@
|
||||
name: checks
|
||||
on:
|
||||
pull_request:
|
||||
jobs:
|
||||
checks-impure:
|
||||
runs-on: nix
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- run: nix run .#impure-checks
|
||||
2
.github/workflows/repo-sync.yml
vendored
2
.github/workflows/repo-sync.yml
vendored
@@ -10,7 +10,7 @@ jobs:
|
||||
if: github.repository_owner == 'clan-lol'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/create-github-app-token@v2
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -39,7 +39,6 @@ select
|
||||
# Generated files
|
||||
pkgs/clan-app/ui/api/API.json
|
||||
pkgs/clan-app/ui/api/API.ts
|
||||
pkgs/clan-app/ui/api/Inventory.ts
|
||||
pkgs/clan-app/ui/api/modules_schemas.json
|
||||
pkgs/clan-app/ui/api/schema.json
|
||||
pkgs/clan-app/ui/.fonts
|
||||
|
||||
20
CODEOWNERS
20
CODEOWNERS
@@ -0,0 +1,20 @@
|
||||
clanServices/.* @pinpox @kenji
|
||||
|
||||
lib/test/container-test-driver/.* @DavHau @mic92
|
||||
lib/modules/inventory/.* @hsjobeki
|
||||
lib/modules/inventoryClass/.* @hsjobeki
|
||||
|
||||
pkgs/clan-app/ui/.* @hsjobeki @brianmcgee
|
||||
pkgs/clan-app/clan_app/.* @qubasa @hsjobeki
|
||||
|
||||
pkgs/clan-cli/clan_cli/.* @lassulus @mic92 @kenji
|
||||
pkgs/clan-cli/clan_cli/(secrets|vars)/.* @DavHau @lassulus
|
||||
|
||||
pkgs/clan-cli/clan_lib/log_machines/.* @Qubasa
|
||||
pkgs/clan-cli/clan_lib/ssh/.* @Qubasa @Mic92 @lassulus
|
||||
pkgs/clan-cli/clan_lib/tags/.* @hsjobeki
|
||||
pkgs/clan-cli/clan_lib/persist/.* @hsjobeki
|
||||
pkgs/clan-cli/clan_lib/flake/.* @lassulus
|
||||
|
||||
pkgs/clan-cli/api.py @hsjobeki
|
||||
pkgs/clan-cli/openapi.py @hsjobeki
|
||||
@@ -8,7 +8,7 @@ Our mission is simple: to democratize computing by providing tools that empower
|
||||
|
||||
## Features of Clan
|
||||
|
||||
- **Full-Stack System Deployment:** Utilize Clan’s toolkit alongside Nix's reliability to build and manage systems effortlessly.
|
||||
- **Full-Stack System Deployment:** Utilize Clan's toolkit alongside Nix's reliability to build and manage systems effortlessly.
|
||||
- **Overlay Networks:** Secure, private communication channels between devices.
|
||||
- **Virtual Machine Integration:** Seamless operation of VM applications within the main operating system.
|
||||
- **Robust Backup Management:** Long-term, self-hosted data preservation.
|
||||
|
||||
@@ -36,7 +36,6 @@ in
|
||||
++ filter pathExists [
|
||||
./devshell/flake-module.nix
|
||||
./flash/flake-module.nix
|
||||
./impure/flake-module.nix
|
||||
./installation/flake-module.nix
|
||||
./update/flake-module.nix
|
||||
./morph/flake-module.nix
|
||||
@@ -139,33 +138,6 @@ in
|
||||
nixosTests
|
||||
// flakeOutputs
|
||||
// {
|
||||
# TODO: Automatically provide this check to downstream users to check their modules
|
||||
clan-modules-json-compatible =
|
||||
let
|
||||
allSchemas = lib.mapAttrs (
|
||||
_n: m:
|
||||
let
|
||||
schema =
|
||||
(self.clanLib.evalService {
|
||||
modules = [ m ];
|
||||
prefix = [
|
||||
"checks"
|
||||
system
|
||||
];
|
||||
}).config.result.api.schema;
|
||||
in
|
||||
schema
|
||||
) self.clan.modules;
|
||||
in
|
||||
pkgs.runCommand "combined-result"
|
||||
{
|
||||
schemaFile = builtins.toFile "schemas.json" (builtins.toJSON allSchemas);
|
||||
}
|
||||
''
|
||||
mkdir -p $out
|
||||
cat $schemaFile > $out/allSchemas.json
|
||||
'';
|
||||
|
||||
clan-core-for-checks = pkgs.runCommand "clan-core-for-checks" { } ''
|
||||
cp -r ${privateInputs.clan-core-for-checks} $out
|
||||
chmod -R +w $out
|
||||
|
||||
@@ -55,7 +55,8 @@
|
||||
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
|
||||
in
|
||||
{
|
||||
checks = pkgs.lib.mkIf pkgs.stdenv.isLinux {
|
||||
# Skip flash test on aarch64-linux for now as it's too slow
|
||||
checks = lib.optionalAttrs (pkgs.stdenv.isLinux && pkgs.hostPlatform.system != "aarch64-linux") {
|
||||
nixos-test-flash = self.clanLib.test.baseTest {
|
||||
name = "flash";
|
||||
nodes.target = {
|
||||
|
||||
@@ -1,51 +0,0 @@
|
||||
{
|
||||
perSystem =
|
||||
{
|
||||
pkgs,
|
||||
lib,
|
||||
self',
|
||||
...
|
||||
}:
|
||||
{
|
||||
# a script that executes all other checks
|
||||
packages.impure-checks = pkgs.writeShellScriptBin "impure-checks" ''
|
||||
#!${pkgs.bash}/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
unset CLAN_DIR
|
||||
|
||||
export PATH="${
|
||||
lib.makeBinPath (
|
||||
[
|
||||
pkgs.gitMinimal
|
||||
pkgs.nix
|
||||
pkgs.coreutils
|
||||
pkgs.rsync # needed to have rsync installed on the dummy ssh server
|
||||
]
|
||||
++ self'.packages.clan-cli-full.runtimeDependencies
|
||||
)
|
||||
}"
|
||||
ROOT=$(git rev-parse --show-toplevel)
|
||||
cd "$ROOT/pkgs/clan-cli"
|
||||
|
||||
# Set up custom git configuration for tests
|
||||
export GIT_CONFIG_GLOBAL=$(mktemp)
|
||||
git config --file "$GIT_CONFIG_GLOBAL" user.name "Test User"
|
||||
git config --file "$GIT_CONFIG_GLOBAL" user.email "test@example.com"
|
||||
export GIT_CONFIG_SYSTEM=/dev/null
|
||||
|
||||
# this disables dynamic dependency loading in clan-cli
|
||||
export CLAN_NO_DYNAMIC_DEPS=1
|
||||
|
||||
jobs=$(nproc)
|
||||
# Spawning worker in pytest is relatively slow, so we limit the number of jobs to 13
|
||||
# (current number of impure tests)
|
||||
jobs="$((jobs > 13 ? 13 : jobs))"
|
||||
|
||||
nix develop "$ROOT#clan-cli" -c bash -c "TMPDIR=/tmp python -m pytest -n $jobs -m impure ./clan_cli $@"
|
||||
|
||||
# Clean up temporary git config
|
||||
rm -f "$GIT_CONFIG_GLOBAL"
|
||||
'';
|
||||
};
|
||||
}
|
||||
@@ -232,6 +232,7 @@
|
||||
"-i", ssh_conn.ssh_key,
|
||||
"--option", "store", os.environ['CLAN_TEST_STORE'],
|
||||
"--update-hardware-config", "nixos-facter",
|
||||
"--no-persist-state",
|
||||
]
|
||||
|
||||
subprocess.run(clan_cmd, check=True)
|
||||
@@ -241,7 +242,7 @@
|
||||
target.shutdown()
|
||||
except BrokenPipeError:
|
||||
# qemu has already exited
|
||||
pass
|
||||
target.connected = False
|
||||
|
||||
# Create a new machine instance that boots from the installed system
|
||||
installed_machine = create_test_machine(target, "${pkgs.qemu_test}", name="after_install")
|
||||
@@ -275,7 +276,7 @@
|
||||
"${self.checks.x86_64-linux.clan-core-for-checks}",
|
||||
"${closureInfo}"
|
||||
)
|
||||
|
||||
|
||||
# Set up SSH connection
|
||||
ssh_conn = setup_ssh_connection(
|
||||
target,
|
||||
@@ -301,7 +302,8 @@
|
||||
"test-install-machine-without-system",
|
||||
"-i", ssh_conn.ssh_key,
|
||||
"--option", "store", os.environ['CLAN_TEST_STORE'],
|
||||
f"nonrootuser@localhost:{ssh_conn.host_port}"
|
||||
"--target-host", f"nonrootuser@localhost:{ssh_conn.host_port}",
|
||||
"--yes"
|
||||
]
|
||||
|
||||
result = subprocess.run(clan_cmd, capture_output=True, cwd=flake_dir)
|
||||
@@ -325,7 +327,9 @@
|
||||
"test-install-machine-without-system",
|
||||
"-i", ssh_conn.ssh_key,
|
||||
"--option", "store", os.environ['CLAN_TEST_STORE'],
|
||||
f"nonrootuser@localhost:{ssh_conn.host_port}"
|
||||
"--target-host",
|
||||
f"nonrootuser@localhost:{ssh_conn.host_port}",
|
||||
"--yes"
|
||||
]
|
||||
|
||||
result = subprocess.run(clan_cmd, capture_output=True, cwd=flake_dir)
|
||||
|
||||
68
clanServices/coredns/README.md
Normal file
68
clanServices/coredns/README.md
Normal file
@@ -0,0 +1,68 @@
|
||||
This module enables hosting clan-internal services easily, which can be resolved
|
||||
inside your VPN. This allows defining a custom top-level domain (e.g. `.clan`)
|
||||
and exposing endpoints from a machine to others, which will be
|
||||
accessible under `http://<service>.clan` in your browser.
|
||||
|
||||
The service consists of two roles:
|
||||
|
||||
- A `server` role: This is the DNS-server that will be queried when trying to
|
||||
resolve clan-internal services. It defines the top-level domain.
|
||||
- A `default` role: This does two things. First, it sets up the nameservers so
|
||||
thatclan-internal queries are resolved via the `server` machine, while
|
||||
external queries are resolved as normal via DHCP. Second, it allows exposing
|
||||
services (see example below).
|
||||
|
||||
## Example Usage
|
||||
|
||||
Here the machine `dnsserver` is designated as internal DNS-server for the TLD
|
||||
`.foo`. `server01` will host an application that shall be reachable at
|
||||
`http://one.foo` and `server02` is going to be reachable at `http://two.foo`.
|
||||
`client` is any other machine that is part of the clan but does not host any
|
||||
services.
|
||||
|
||||
When `client` tries to resolve `http://one.foo`, the DNS query will be
|
||||
routed to `dnsserver`, which will answer with `192.168.1.3`. If it tries to
|
||||
resolve some external domain (e.g. `https://clan.lol`), the query will not be
|
||||
routed to `dnsserver` but resolved as before, via the nameservers advertised by
|
||||
DHCP.
|
||||
|
||||
```nix
|
||||
inventory = {
|
||||
|
||||
machines = {
|
||||
dnsserver = { }; # 192.168.1.2
|
||||
server01 = { }; # 192.168.1.3
|
||||
server02 = { }; # 192.168.1.4
|
||||
client = { }; # 192.168.1.5
|
||||
};
|
||||
|
||||
instances = {
|
||||
coredns = {
|
||||
|
||||
module.name = "@clan/coredns";
|
||||
module.input = "self";
|
||||
|
||||
# Add the default role to all machines, including `client`
|
||||
roles.default.tags.all = { };
|
||||
|
||||
# DNS server
|
||||
roles.server.machines."dnsserver".settings = {
|
||||
ip = "192.168.1.2";
|
||||
tld = "foo";
|
||||
};
|
||||
|
||||
# First service
|
||||
roles.default.machines."server01".settings = {
|
||||
ip = "192.168.1.3";
|
||||
services = [ "one" ];
|
||||
};
|
||||
|
||||
# Second service
|
||||
roles.default.machines."server02".settings = {
|
||||
ip = "192.168.1.4";
|
||||
services = [ "two" ];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
```
|
||||
157
clanServices/coredns/default.nix
Normal file
157
clanServices/coredns/default.nix
Normal file
@@ -0,0 +1,157 @@
|
||||
{ ... }:
|
||||
{
|
||||
_class = "clan.service";
|
||||
manifest.name = "coredns";
|
||||
manifest.description = "Clan-internal DNS and service exposure";
|
||||
manifest.categories = [ "Network" ];
|
||||
manifest.readme = builtins.readFile ./README.md;
|
||||
|
||||
roles.server = {
|
||||
|
||||
interface =
|
||||
{ lib, ... }:
|
||||
{
|
||||
options.tld = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "clan";
|
||||
description = ''
|
||||
Top-level domain for this instance. All services below this will be
|
||||
resolved internally.
|
||||
'';
|
||||
};
|
||||
|
||||
options.ip = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
# TODO: Set a default
|
||||
description = "IP for the DNS to listen on";
|
||||
};
|
||||
};
|
||||
|
||||
perInstance =
|
||||
{
|
||||
roles,
|
||||
settings,
|
||||
...
|
||||
}:
|
||||
{
|
||||
nixosModule =
|
||||
{
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ 53 ];
|
||||
networking.firewall.allowedUDPPorts = [ 53 ];
|
||||
|
||||
services.coredns =
|
||||
let
|
||||
|
||||
# Get all service entries for one host
|
||||
hostServiceEntries =
|
||||
host:
|
||||
lib.strings.concatStringsSep "\n" (
|
||||
map (
|
||||
service: "${service} IN A ${roles.default.machines.${host}.settings.ip} ; ${host}"
|
||||
) roles.default.machines.${host}.settings.services
|
||||
);
|
||||
|
||||
zonefile = pkgs.writeTextFile {
|
||||
name = "db.${settings.tld}";
|
||||
text = ''
|
||||
$TTL 3600
|
||||
@ IN SOA ns.${settings.tld}. admin.${settings.tld}. 1 7200 3600 1209600 3600
|
||||
IN NS ns.${settings.tld}.
|
||||
ns IN A ${settings.ip} ; DNS server
|
||||
|
||||
''
|
||||
+ (lib.strings.concatStringsSep "\n" (
|
||||
map (host: hostServiceEntries host) (lib.attrNames roles.default.machines)
|
||||
));
|
||||
};
|
||||
|
||||
in
|
||||
{
|
||||
enable = true;
|
||||
config = ''
|
||||
. {
|
||||
forward . 1.1.1.1
|
||||
cache 30
|
||||
}
|
||||
|
||||
${settings.tld} {
|
||||
file ${zonefile}
|
||||
}
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
roles.default = {
|
||||
interface =
|
||||
{ lib, ... }:
|
||||
{
|
||||
options.services = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
default = [ ];
|
||||
description = ''
|
||||
Service endpoints this host exposes (without TLD). Each entry will
|
||||
be resolved to <entry>.<tld> using the configured top-level domain.
|
||||
'';
|
||||
};
|
||||
|
||||
options.ip = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
# TODO: Set a default
|
||||
description = "IP on which the services will listen";
|
||||
};
|
||||
};
|
||||
|
||||
perInstance =
|
||||
{ roles, ... }:
|
||||
{
|
||||
nixosModule =
|
||||
{ lib, ... }:
|
||||
{
|
||||
|
||||
networking.nameservers = map (m: "127.0.0.1:5353#${roles.server.machines.${m}.settings.tld}") (
|
||||
lib.attrNames roles.server.machines
|
||||
);
|
||||
|
||||
services.resolved.domains = map (m: "~${roles.server.machines.${m}.settings.tld}") (
|
||||
lib.attrNames roles.server.machines
|
||||
);
|
||||
|
||||
services.unbound = {
|
||||
enable = true;
|
||||
settings = {
|
||||
server = {
|
||||
port = 5353;
|
||||
verbosity = 2;
|
||||
interface = [ "127.0.0.1" ];
|
||||
access-control = [ "127.0.0.0/8 allow" ];
|
||||
do-not-query-localhost = "no";
|
||||
domain-insecure = map (m: "${roles.server.machines.${m}.settings.tld}.") (
|
||||
lib.attrNames roles.server.machines
|
||||
);
|
||||
};
|
||||
|
||||
# Default: forward everything else to DHCP-provided resolvers
|
||||
forward-zone = [
|
||||
{
|
||||
name = ".";
|
||||
forward-addr = "127.0.0.53@53"; # Forward to systemd-resolved
|
||||
}
|
||||
];
|
||||
stub-zone = map (m: {
|
||||
name = "${roles.server.machines.${m}.settings.tld}.";
|
||||
stub-addr = "${roles.server.machines.${m}.settings.ip}";
|
||||
}) (lib.attrNames roles.server.machines);
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -3,14 +3,16 @@ let
|
||||
module = lib.modules.importApply ./default.nix { };
|
||||
in
|
||||
{
|
||||
clan.modules.state-version = module;
|
||||
clan.modules = {
|
||||
coredns = module;
|
||||
};
|
||||
perSystem =
|
||||
{ ... }:
|
||||
{
|
||||
clan.nixosTests.state-version = {
|
||||
clan.nixosTests.coredns = {
|
||||
imports = [ ./tests/vm/default.nix ];
|
||||
|
||||
clan.modules."@clan/state-version" = module;
|
||||
clan.modules."@clan/coredns" = module;
|
||||
};
|
||||
};
|
||||
}
|
||||
113
clanServices/coredns/tests/vm/default.nix
Normal file
113
clanServices/coredns/tests/vm/default.nix
Normal file
@@ -0,0 +1,113 @@
|
||||
{
|
||||
...
|
||||
}:
|
||||
{
|
||||
name = "coredns";
|
||||
|
||||
clan = {
|
||||
directory = ./.;
|
||||
test.useContainers = true;
|
||||
inventory = {
|
||||
|
||||
machines = {
|
||||
dns = { }; # 192.168.1.2
|
||||
server01 = { }; # 192.168.1.3
|
||||
server02 = { }; # 192.168.1.4
|
||||
client = { }; # 192.168.1.1
|
||||
};
|
||||
|
||||
instances = {
|
||||
coredns = {
|
||||
|
||||
module.name = "@clan/coredns";
|
||||
module.input = "self";
|
||||
|
||||
roles.default.tags.all = { };
|
||||
|
||||
# First service
|
||||
roles.default.machines."server01".settings = {
|
||||
ip = "192.168.1.3";
|
||||
services = [ "one" ];
|
||||
};
|
||||
|
||||
# Second service
|
||||
roles.default.machines."server02".settings = {
|
||||
ip = "192.168.1.4";
|
||||
services = [ "two" ];
|
||||
};
|
||||
|
||||
# DNS server
|
||||
roles.server.machines."dns".settings = {
|
||||
ip = "192.168.1.2";
|
||||
tld = "foo";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
nodes = {
|
||||
dns =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = [ pkgs.net-tools ];
|
||||
};
|
||||
|
||||
client =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = [ pkgs.net-tools ];
|
||||
};
|
||||
|
||||
server01 = {
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
virtualHosts."one.foo" = {
|
||||
locations."/" = {
|
||||
return = "200 'test server response one'";
|
||||
extraConfig = "add_header Content-Type text/plain;";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
server02 = {
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
virtualHosts."two.foo" = {
|
||||
locations."/" = {
|
||||
return = "200 'test server response two'";
|
||||
extraConfig = "add_header Content-Type text/plain;";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
import json
|
||||
start_all()
|
||||
|
||||
machines = [server01, server02, dns, client]
|
||||
|
||||
for m in machines:
|
||||
m.systemctl("start network-online.target")
|
||||
|
||||
for m in machines:
|
||||
m.wait_for_unit("network-online.target")
|
||||
|
||||
# import time
|
||||
# time.sleep(2333333)
|
||||
|
||||
# This should work, but is borken in tests i think? Instead we dig directly
|
||||
|
||||
# client.succeed("curl -k -v http://one.foo")
|
||||
# client.succeed("curl -k -v http://two.foo")
|
||||
|
||||
answer = client.succeed("dig @192.168.1.2 one.foo")
|
||||
assert "192.168.1.3" in answer, "IP not found"
|
||||
|
||||
answer = client.succeed("dig @192.168.1.2 two.foo")
|
||||
assert "192.168.1.4" in answer, "IP not found"
|
||||
|
||||
'';
|
||||
}
|
||||
@@ -4,22 +4,66 @@
|
||||
{
|
||||
|
||||
nixosModule =
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
jsonpath = "/tmp/telegraf.json";
|
||||
auth_user = "prometheus";
|
||||
in
|
||||
{
|
||||
|
||||
networking.firewall.interfaces = lib.mkIf (settings.allowAllInterfaces == false) (
|
||||
builtins.listToAttrs (
|
||||
map (name: {
|
||||
inherit name;
|
||||
value.allowedTCPPorts = [ 9273 ];
|
||||
value.allowedTCPPorts = [
|
||||
9273
|
||||
9990
|
||||
];
|
||||
}) settings.interfaces
|
||||
)
|
||||
);
|
||||
|
||||
networking.firewall.allowedTCPPorts = lib.mkIf (settings.allowAllInterfaces == true) [ 9273 ];
|
||||
networking.firewall.allowedTCPPorts = lib.mkIf (settings.allowAllInterfaces == true) [
|
||||
9273
|
||||
9990
|
||||
];
|
||||
|
||||
clan.core.vars.generators."telegraf" = {
|
||||
|
||||
files.password.restartUnits = [ "telegraf.service" ];
|
||||
files.password-env.restartUnits = [ "telegraf.service" ];
|
||||
files.miniserve-auth.restartUnits = [ "telegraf.service" ];
|
||||
|
||||
runtimeInputs = [
|
||||
pkgs.coreutils
|
||||
pkgs.xkcdpass
|
||||
pkgs.mkpasswd
|
||||
];
|
||||
|
||||
script = ''
|
||||
PASSWORD=$(xkcdpass --numwords 4 --delimiter - --count 1 | tr -d "\n")
|
||||
echo "BASIC_AUTH_PWD=$PASSWORD" > "$out"/password-env
|
||||
echo "${auth_user}:$PASSWORD" > "$out"/miniserve-auth
|
||||
echo "$PASSWORD" | tr -d "\n" > "$out"/password
|
||||
'';
|
||||
};
|
||||
|
||||
systemd.services.telegraf-json = {
|
||||
enable = true;
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
script = "${pkgs.miniserve}/bin/miniserve -p 9990 ${jsonpath} --auth-file ${config.clan.core.vars.generators.telegraf.files.miniserve-auth.path}";
|
||||
};
|
||||
|
||||
services.telegraf = {
|
||||
enable = true;
|
||||
environmentFiles = [
|
||||
(builtins.toString config.clan.core.vars.generators.telegraf.files.password-env.path)
|
||||
];
|
||||
extraConfig = {
|
||||
agent.interval = "60s";
|
||||
inputs = {
|
||||
@@ -33,22 +77,34 @@
|
||||
|
||||
exec =
|
||||
let
|
||||
currentSystemScript = pkgs.writeShellScript "current-system" ''
|
||||
printf "current_system,path=%s present=0\n" $(readlink /run/current-system)
|
||||
nixosSystems = pkgs.writeShellScript "current-system" ''
|
||||
printf "nixos_systems,current_system=%s,booted_system=%s,current_kernel=%s,booted_kernel=%s present=0\n" \
|
||||
"$(readlink /run/current-system)" "$(readlink /run/booted-system)" \
|
||||
"$(basename $(echo /run/current-system/kernel-modules/lib/modules/*))" \
|
||||
"$(basename $(echo /run/booted-system/kernel-modules/lib/modules/*))"
|
||||
'';
|
||||
in
|
||||
[
|
||||
{
|
||||
# Expose the path to current-system as metric. We use
|
||||
# this to check if the machine is up-to-date.
|
||||
commands = [ currentSystemScript ];
|
||||
commands = [ nixosSystems ];
|
||||
data_format = "influx";
|
||||
}
|
||||
];
|
||||
};
|
||||
# sadly there doesn'T seem to exist a telegraf http_client output plugin
|
||||
outputs.prometheus_client = {
|
||||
listen = ":9273";
|
||||
metric_version = 2;
|
||||
basic_username = "${auth_user}";
|
||||
basic_password = "$${BASIC_AUTH_PWD}";
|
||||
};
|
||||
|
||||
outputs.file = {
|
||||
files = [ jsonpath ];
|
||||
data_format = "json";
|
||||
json_timestamp_units = "1s";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
This service generates the `system.stateVersion` of the nixos installation
|
||||
automatically.
|
||||
|
||||
Possible values:
|
||||
[system.stateVersion](https://search.nixos.org/options?channel=unstable&show=system.stateVersion&from=0&size=50&sort=relevance&type=packages&query=stateVersion)
|
||||
|
||||
## Usage
|
||||
|
||||
The following configuration will set `stateVersion` for all machines:
|
||||
|
||||
```
|
||||
inventory.instances = {
|
||||
state-version = {
|
||||
module = {
|
||||
name = "state-version";
|
||||
input = "clan";
|
||||
};
|
||||
roles.default.tags.all = { };
|
||||
};
|
||||
```
|
||||
|
||||
## Migration
|
||||
|
||||
If you are already setting `system.stateVersion`, either let the automatic
|
||||
generation happen, or trigger the generation manually for the machine. The
|
||||
service will take the specified version, if one is already supplied through the
|
||||
config.
|
||||
|
||||
To manually generate the version for a specified machine run:
|
||||
|
||||
```
|
||||
clan vars generate [MACHINE]
|
||||
```
|
||||
|
||||
If the setting was already set, you can then remove `system.stateVersion` from
|
||||
your machine configuration. For new machines, just import the service as shown
|
||||
above.
|
||||
@@ -1,50 +0,0 @@
|
||||
{ ... }:
|
||||
{
|
||||
_class = "clan.service";
|
||||
manifest.name = "clan-core/state-version";
|
||||
manifest.description = "Automatically generate the state version of the nixos installation.";
|
||||
manifest.categories = [ "System" ];
|
||||
manifest.readme = builtins.readFile ./README.md;
|
||||
|
||||
roles.default = {
|
||||
|
||||
perInstance =
|
||||
{ ... }:
|
||||
{
|
||||
nixosModule =
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
var = config.clan.core.vars.generators.state-version.files.version or { };
|
||||
in
|
||||
{
|
||||
|
||||
warnings = [
|
||||
''
|
||||
The clan.state-version service is deprecated and will be
|
||||
removed on 2025-07-15 in favor of a nix option.
|
||||
|
||||
Please migrate your configuration to use `clan.core.settings.state-version.enable = true` instead.
|
||||
''
|
||||
];
|
||||
|
||||
system.stateVersion = lib.mkDefault (lib.removeSuffix "\n" var.value);
|
||||
|
||||
clan.core.vars.generators.state-version = {
|
||||
files.version = {
|
||||
secret = false;
|
||||
value = lib.mkDefault config.system.nixos.release;
|
||||
};
|
||||
runtimeInputs = [ ];
|
||||
script = ''
|
||||
echo -n ${config.system.stateVersion} > "$out"/version
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
{ lib, ... }:
|
||||
{
|
||||
name = "service-state-version";
|
||||
|
||||
clan = {
|
||||
directory = ./.;
|
||||
inventory = {
|
||||
machines.server = { };
|
||||
instances.default = {
|
||||
module.name = "@clan/state-version";
|
||||
module.input = "self";
|
||||
roles.default.machines."server" = { };
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
nodes.server = { };
|
||||
|
||||
testScript = lib.mkDefault ''
|
||||
start_all()
|
||||
'';
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
25.11
|
||||
@@ -17,6 +17,20 @@
|
||||
};
|
||||
};
|
||||
|
||||
# Deploy user Carol on all machines. Prompt only once and use the
|
||||
# same password on all machines. (`share = true`)
|
||||
user-carol = {
|
||||
module = {
|
||||
name = "users";
|
||||
input = "clan";
|
||||
};
|
||||
roles.default.tags.all = { };
|
||||
roles.default.settings = {
|
||||
user = "carol";
|
||||
share = true;
|
||||
};
|
||||
};
|
||||
|
||||
# Deploy user bob only on his laptop. Prompt for a password.
|
||||
user-bob = {
|
||||
module = {
|
||||
@@ -29,3 +43,44 @@
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
## Migration from `root-password` module
|
||||
|
||||
The deprecated `clan.root-password` module has been replaced by the `users` module. Here's how to migrate:
|
||||
|
||||
### 1. Update your flake configuration
|
||||
|
||||
Replace the `root-password` module import with a `users` service instance:
|
||||
|
||||
```nix
|
||||
# OLD - Remove this from your nixosModules:
|
||||
imports = [
|
||||
self.inputs.clan-core.clanModules.root-password
|
||||
];
|
||||
|
||||
# NEW - Add to inventory.instances or machines/flake-module.nix:
|
||||
instances = {
|
||||
users-root = {
|
||||
module.name = "users";
|
||||
module.input = "clan-core";
|
||||
roles.default.tags.nixos = { };
|
||||
roles.default.settings = {
|
||||
user = "root";
|
||||
prompt = false; # Set to true if you want to be prompted
|
||||
groups = [ ];
|
||||
};
|
||||
};
|
||||
};
|
||||
```
|
||||
|
||||
### 2. Migrate vars
|
||||
|
||||
The vars structure has changed from `root-password` to `user-password-root`:
|
||||
|
||||
```bash
|
||||
# For each machine, rename the vars directories:
|
||||
cd vars/per-machine/<machine-name>/
|
||||
mv root-password user-password-root
|
||||
mv user-password-root/password-hash user-password-root/user-password-hash
|
||||
mv user-password-root/password user-password-root/user-password
|
||||
```
|
||||
|
||||
@@ -59,6 +59,17 @@
|
||||
- "input" - Allows the user to access input devices.
|
||||
'';
|
||||
};
|
||||
share = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
example = true;
|
||||
description = ''
|
||||
Weather the user should have the same password on all machines.
|
||||
|
||||
By default, you will be prompted for a new password for every host.
|
||||
Unless `generate` is set to `true`.
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
@@ -82,7 +93,6 @@
|
||||
};
|
||||
|
||||
clan.core.vars.generators."user-password-${settings.user}" = {
|
||||
|
||||
files.user-password-hash.neededFor = "users";
|
||||
files.user-password-hash.restartUnits = lib.optional (config.services.userborn.enable) "userborn.service";
|
||||
files.user-password.deploy = false;
|
||||
@@ -107,6 +117,8 @@
|
||||
pkgs.mkpasswd
|
||||
];
|
||||
|
||||
share = settings.share;
|
||||
|
||||
script =
|
||||
(
|
||||
if settings.prompt then
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
IPv6 address allocator for WireGuard networks.
|
||||
"""IPv6 address allocator for WireGuard networks.
|
||||
|
||||
Network layout:
|
||||
- Base network: /40 ULA prefix (fd00::/8 + 32 bits from hash)
|
||||
@@ -13,6 +12,11 @@ import ipaddress
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Constants for argument count validation
|
||||
MIN_ARGS_BASE = 4
|
||||
MIN_ARGS_CONTROLLER = 5
|
||||
MIN_ARGS_PEER = 5
|
||||
|
||||
|
||||
def hash_string(s: str) -> str:
|
||||
"""Generate SHA256 hash of string."""
|
||||
@@ -20,8 +24,7 @@ def hash_string(s: str) -> str:
|
||||
|
||||
|
||||
def generate_ula_prefix(instance_name: str) -> ipaddress.IPv6Network:
|
||||
"""
|
||||
Generate a /40 ULA prefix from instance name.
|
||||
"""Generate a /40 ULA prefix from instance name.
|
||||
|
||||
Format: fd{32-bit hash}/40
|
||||
This gives us fd00:0000:0000::/40 through fdff:ffff:ff00::/40
|
||||
@@ -41,15 +44,14 @@ def generate_ula_prefix(instance_name: str) -> ipaddress.IPv6Network:
|
||||
prefix = f"fd{prefix_bits:08x}"
|
||||
prefix_formatted = f"{prefix[:4]}:{prefix[4:8]}::/40"
|
||||
|
||||
network = ipaddress.IPv6Network(prefix_formatted)
|
||||
return network
|
||||
return ipaddress.IPv6Network(prefix_formatted)
|
||||
|
||||
|
||||
def generate_controller_subnet(
|
||||
base_network: ipaddress.IPv6Network, controller_name: str
|
||||
base_network: ipaddress.IPv6Network,
|
||||
controller_name: str,
|
||||
) -> ipaddress.IPv6Network:
|
||||
"""
|
||||
Generate a /56 subnet for a controller from the base /40 network.
|
||||
"""Generate a /56 subnet for a controller from the base /40 network.
|
||||
|
||||
We have 16 bits (40 to 56) to allocate controller subnets.
|
||||
This allows for 65,536 possible controller subnets.
|
||||
@@ -62,14 +64,11 @@ def generate_controller_subnet(
|
||||
# The controller subnet is at base_prefix:controller_id::/56
|
||||
base_int = int(base_network.network_address)
|
||||
controller_subnet_int = base_int | (controller_id << (128 - 56))
|
||||
controller_subnet = ipaddress.IPv6Network((controller_subnet_int, 56))
|
||||
|
||||
return controller_subnet
|
||||
return ipaddress.IPv6Network((controller_subnet_int, 56))
|
||||
|
||||
|
||||
def generate_peer_suffix(peer_name: str) -> str:
|
||||
"""
|
||||
Generate a unique 64-bit host suffix for a peer.
|
||||
"""Generate a unique 64-bit host suffix for a peer.
|
||||
|
||||
This suffix will be used in all controller subnets to create unique addresses.
|
||||
Format: :xxxx:xxxx:xxxx:xxxx (64 bits)
|
||||
@@ -79,14 +78,13 @@ def generate_peer_suffix(peer_name: str) -> str:
|
||||
suffix_bits = h[:16]
|
||||
|
||||
# Format as IPv6 suffix without leading colon
|
||||
suffix = f"{suffix_bits[0:4]}:{suffix_bits[4:8]}:{suffix_bits[8:12]}:{suffix_bits[12:16]}"
|
||||
return suffix
|
||||
return f"{suffix_bits[0:4]}:{suffix_bits[4:8]}:{suffix_bits[8:12]}:{suffix_bits[12:16]}"
|
||||
|
||||
|
||||
def main() -> None:
|
||||
if len(sys.argv) < 4:
|
||||
if len(sys.argv) < MIN_ARGS_BASE:
|
||||
print(
|
||||
"Usage: ipv6_allocator.py <output_dir> <instance_name> <controller|peer> <machine_name>"
|
||||
"Usage: ipv6_allocator.py <output_dir> <instance_name> <controller|peer> <machine_name>",
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
@@ -98,7 +96,7 @@ def main() -> None:
|
||||
base_network = generate_ula_prefix(instance_name)
|
||||
|
||||
if node_type == "controller":
|
||||
if len(sys.argv) < 5:
|
||||
if len(sys.argv) < MIN_ARGS_CONTROLLER:
|
||||
print("Controller name required")
|
||||
sys.exit(1)
|
||||
|
||||
@@ -114,7 +112,7 @@ def main() -> None:
|
||||
(output_dir / "prefix").write_text(prefix_str)
|
||||
|
||||
elif node_type == "peer":
|
||||
if len(sys.argv) < 5:
|
||||
if len(sys.argv) < MIN_ARGS_PEER:
|
||||
print("Peer name required")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
24
devFlake/flake.lock
generated
24
devFlake/flake.lock
generated
@@ -3,10 +3,10 @@
|
||||
"clan-core-for-checks": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1755093452,
|
||||
"narHash": "sha256-NKBss7QtNnOqYVyJmYCgaCvYZK0mpQTQc9fLgE1mGyk=",
|
||||
"lastModified": 1756166884,
|
||||
"narHash": "sha256-skg4rwpbCjhpLlrv/Pndd43FoEgrJz98WARtGLhCSzo=",
|
||||
"ref": "main",
|
||||
"rev": "7e97734797f0c6bd3c2d3a51cf54a2a6b371c222",
|
||||
"rev": "f7414d7e6e58709af27b6fe16eb530278e81eaaf",
|
||||
"shallow": true,
|
||||
"type": "git",
|
||||
"url": "https://git.clan.lol/clan/clan-core"
|
||||
@@ -84,11 +84,11 @@
|
||||
},
|
||||
"nixpkgs-dev": {
|
||||
"locked": {
|
||||
"lastModified": 1755166611,
|
||||
"narHash": "sha256-sk8pK8kWz4IE4ErAjKE1d8tMChY6VQR32U4yS68FIog=",
|
||||
"lastModified": 1756578978,
|
||||
"narHash": "sha256-dLgwMLIMyHlSeIDsoT2OcZBkuruIbjhIAv1sGANwtes=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "1a341e3c908f4a3105e737bd13af0318dc06fbe3",
|
||||
"rev": "a85a50bef870537a9705f64ed75e54d1f4bf9c23",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -107,11 +107,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1754869408,
|
||||
"narHash": "sha256-G1zNuxiCDfqNQVoL9j5v+ZYfUER7AI158ev98/JC8LI=",
|
||||
"lastModified": 1755555503,
|
||||
"narHash": "sha256-WiOO7GUOsJ4/DoMy2IC5InnqRDSo2U11la48vCCIjjY=",
|
||||
"owner": "NuschtOS",
|
||||
"repo": "search",
|
||||
"rev": "2f5478267557a0f7a70d953b6c0867a5b4282739",
|
||||
"rev": "6f3efef888b92e6520f10eae15b86ff537e1d2ea",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -165,11 +165,11 @@
|
||||
"nixpkgs": []
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1754847726,
|
||||
"narHash": "sha256-2vX8QjO5lRsDbNYvN9hVHXLU6oMl+V/PsmIiJREG4rE=",
|
||||
"lastModified": 1755934250,
|
||||
"narHash": "sha256-CsDojnMgYsfshQw3t4zjRUkmMmUdZGthl16bXVWgRYU=",
|
||||
"owner": "numtide",
|
||||
"repo": "treefmt-nix",
|
||||
"rev": "7d81f6fb2e19bf84f1c65135d1060d829fae2408",
|
||||
"rev": "74e1a52d5bd9430312f8d1b8b0354c92c17453e5",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
||||
@@ -33,7 +33,6 @@
|
||||
self'.packages.tea-create-pr
|
||||
self'.packages.merge-after-ci
|
||||
self'.packages.pending-reviews
|
||||
self'.packages.agit
|
||||
# treefmt with config defined in ./flake-parts/formatting.nix
|
||||
config.treefmt.build.wrapper
|
||||
];
|
||||
@@ -46,7 +45,7 @@
|
||||
ln -sfT ${inputs.nix-select} "$PRJ_ROOT/pkgs/clan-cli/clan_lib/select"
|
||||
|
||||
# Generate classes.py from schemas
|
||||
${self'.packages.classgen}/bin/classgen ${self'.legacyPackages.schemas.clan-schema-abstract}/schema.json $PRJ_ROOT/pkgs/clan-cli/clan_lib/nix_models/clan.py
|
||||
${self'.packages.classgen}/bin/classgen ${self'.legacyPackages.schemas.clanSchemaJson}/schema.json $PRJ_ROOT/pkgs/clan-cli/clan_lib/nix_models/clan.py
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
2
docs/.gitignore
vendored
2
docs/.gitignore
vendored
@@ -1,5 +1,5 @@
|
||||
/site/reference
|
||||
/site/static
|
||||
/site/options-page
|
||||
/site/options
|
||||
/site/openapi.json
|
||||
!/site/static/extra.css
|
||||
|
||||
@@ -1,13 +1,11 @@
|
||||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}:
|
||||
let
|
||||
suffix = config.clan.core.vars.generators.disk-id.files.diskId.value;
|
||||
mirrorBoot = idx: {
|
||||
# suffix is to prevent disk name collisions
|
||||
name = idx + suffix;
|
||||
name = idx;
|
||||
type = "disk";
|
||||
device = "/dev/disk/by-id/${idx}";
|
||||
content = {
|
||||
|
||||
@@ -1,13 +1,11 @@
|
||||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}:
|
||||
let
|
||||
suffix = config.clan.core.vars.generators.disk-id.files.diskId.value;
|
||||
mirrorBoot = idx: {
|
||||
# suffix is to prevent disk name collisions
|
||||
name = idx + suffix;
|
||||
name = idx;
|
||||
type = "disk";
|
||||
device = "/dev/disk/by-id/${idx}";
|
||||
content = {
|
||||
|
||||
@@ -2,11 +2,11 @@ site_name: Clan Documentation
|
||||
site_url: https://docs.clan.lol
|
||||
repo_url: https://git.clan.lol/clan/clan-core/
|
||||
repo_name: "_>"
|
||||
edit_uri: _edit/main/docs/docs/
|
||||
edit_uri: _edit/main/docs/site/
|
||||
|
||||
validation:
|
||||
omitted_files: warn
|
||||
absolute_links: warn
|
||||
absolute_links: ignore
|
||||
unrecognized_links: warn
|
||||
|
||||
markdown_extensions:
|
||||
@@ -59,14 +59,15 @@ nav:
|
||||
- Configure Disk Config: guides/getting-started/choose-disk.md
|
||||
- Update Machine: guides/getting-started/update.md
|
||||
- Continuous Integration: guides/getting-started/flake-check.md
|
||||
- Using Services: guides/clanServices.md
|
||||
- Convert Existing NixOS Config: guides/getting-started/convert-flake.md
|
||||
- ClanServices: guides/clanServices.md
|
||||
- Backup & Restore: guides/backups.md
|
||||
- Disk Encryption: guides/disk-encryption.md
|
||||
- Age Plugins: guides/age-plugins.md
|
||||
- Secrets management: guides/secrets.md
|
||||
- Target Host: guides/target-host.md
|
||||
- Networking: guides/networking.md
|
||||
- Zerotier VPN: guides/mesh-vpn.md
|
||||
- Secure Boot: guides/secure-boot.md
|
||||
- How to disable Secure Boot: guides/secure-boot.md
|
||||
- Flake-parts: guides/flake-parts.md
|
||||
- macOS: guides/macos.md
|
||||
- Contributing:
|
||||
@@ -77,8 +78,7 @@ nav:
|
||||
- Writing a Service Module: guides/services/community.md
|
||||
- Writing a Disko Template: guides/disko-templates/community.md
|
||||
- Migrations:
|
||||
- Migrate existing Flakes: guides/migrations/migration-guide.md
|
||||
- Migrate inventory Services: guides/migrations/migrate-inventory-services.md
|
||||
- Migrate from clan modules to services: guides/migrations/migrate-inventory-services.md
|
||||
- Facts Vars Migration: guides/migrations/migration-facts-vars.md
|
||||
- Disk id: guides/migrations/disk-id.md
|
||||
- Concepts:
|
||||
@@ -88,12 +88,13 @@ nav:
|
||||
- Templates: concepts/templates.md
|
||||
- Reference:
|
||||
- Overview: reference/index.md
|
||||
- Clan Options: options.md
|
||||
- Browse Options: "/options"
|
||||
- Services:
|
||||
- Overview:
|
||||
- reference/clanServices/index.md
|
||||
- reference/clanServices/admin.md
|
||||
- reference/clanServices/borgbackup.md
|
||||
- reference/clanServices/coredns.md
|
||||
- reference/clanServices/data-mesher.md
|
||||
- reference/clanServices/dyndns.md
|
||||
- reference/clanServices/emergency-access.md
|
||||
@@ -106,7 +107,6 @@ nav:
|
||||
- reference/clanServices/monitoring.md
|
||||
- reference/clanServices/packages.md
|
||||
- reference/clanServices/sshd.md
|
||||
- reference/clanServices/state-version.md
|
||||
- reference/clanServices/syncthing.md
|
||||
- reference/clanServices/trusted-nix-caches.md
|
||||
- reference/clanServices/users.md
|
||||
@@ -155,6 +155,7 @@ nav:
|
||||
- 05-deployment-parameters: decisions/05-deployment-parameters.md
|
||||
- Template: decisions/_template.md
|
||||
- Glossary: reference/glossary.md
|
||||
- Browse Options: "/options"
|
||||
|
||||
docs_dir: site
|
||||
site_dir: out
|
||||
@@ -172,6 +173,7 @@ theme:
|
||||
- content.code.annotate
|
||||
- content.code.copy
|
||||
- content.tabs.link
|
||||
- content.action.edit
|
||||
icon:
|
||||
repo: fontawesome/brands/git
|
||||
custom_dir: overrides
|
||||
|
||||
@@ -54,9 +54,9 @@ pkgs.stdenv.mkDerivation {
|
||||
chmod -R +w ./site/reference
|
||||
echo "Generated API documentation in './site/reference/' "
|
||||
|
||||
rm -r ./site/options-page || true
|
||||
cp -r ${docs-options} ./site/options-page
|
||||
chmod -R +w ./site/options-page
|
||||
rm -rf ./site/options
|
||||
cp -r ${docs-options} ./site/options
|
||||
chmod -R +w ./site/options
|
||||
|
||||
mkdir -p ./site/static/asciinema-player
|
||||
ln -snf ${asciinema-player-js} ./site/static/asciinema-player/asciinema-player.min.js
|
||||
|
||||
@@ -40,6 +40,7 @@ writeShellScriptBin "deploy-docs" ''
|
||||
|
||||
rsync \
|
||||
--checksum \
|
||||
--delete \
|
||||
-e "ssh -o StrictHostKeyChecking=no $sshExtraArgs" \
|
||||
-a ${docs}/ \
|
||||
www@clan.lol:/var/www/docs.clan.lol
|
||||
|
||||
@@ -18,27 +18,8 @@
|
||||
inherit (self) clanModules;
|
||||
clan-core = self;
|
||||
inherit pkgs;
|
||||
evalClanModules = self.clanLib.evalClan.evalClanModules;
|
||||
modulesRolesOptions = self.clanLib.evalClan.evalClanModulesWithRoles {
|
||||
allModules = self.clanModules;
|
||||
inherit pkgs;
|
||||
clan-core = self;
|
||||
};
|
||||
};
|
||||
|
||||
# Frontmatter for clanModules
|
||||
clanModulesFrontmatter =
|
||||
let
|
||||
docs = pkgs.nixosOptionsDoc {
|
||||
options = self.clanLib.modules.frontmatterOptions;
|
||||
transformOptions = self.clanLib.docs.stripStorePathsFromDeclarations;
|
||||
};
|
||||
in
|
||||
docs.optionsJSON;
|
||||
|
||||
# Options available when imported via ` inventory.${moduleName}....${rolesName} `
|
||||
clanModulesViaRoles = pkgs.writeText "info.json" (builtins.toJSON jsonDocs.clanModulesViaRoles);
|
||||
|
||||
# clan service options
|
||||
clanModulesViaService = pkgs.writeText "info.json" (builtins.toJSON jsonDocs.clanModulesViaService);
|
||||
|
||||
@@ -88,12 +69,10 @@
|
||||
}
|
||||
}
|
||||
export CLAN_CORE_DOCS=${jsonDocs.clanCore}/share/doc/nixos/options.json
|
||||
|
||||
# A file that contains the links to all clanModule docs
|
||||
export CLAN_MODULES_VIA_ROLES=${clanModulesViaRoles}
|
||||
export CLAN_MODULES_VIA_SERVICE=${clanModulesViaService}
|
||||
export CLAN_SERVICE_INTERFACE=${self'.legacyPackages.clan-service-module-interface}/share/doc/nixos/options.json
|
||||
# Frontmatter format for clanModules
|
||||
export CLAN_MODULES_FRONTMATTER_DOCS=${clanModulesFrontmatter}/share/doc/nixos/options.json
|
||||
|
||||
export BUILD_CLAN_PATH=${buildClanOptions}/share/doc/nixos/options.json
|
||||
|
||||
@@ -107,7 +86,6 @@
|
||||
legacyPackages = {
|
||||
inherit
|
||||
jsonDocs
|
||||
clanModulesViaRoles
|
||||
clanModulesViaService
|
||||
;
|
||||
};
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
{
|
||||
modulesRolesOptions,
|
||||
nixosOptionsDoc,
|
||||
evalClanModules,
|
||||
lib,
|
||||
pkgs,
|
||||
clan-core,
|
||||
@@ -10,21 +8,36 @@
|
||||
let
|
||||
inherit (clan-core.clanLib.docs) stripStorePathsFromDeclarations;
|
||||
transformOptions = stripStorePathsFromDeclarations;
|
||||
|
||||
nixosConfigurationWithClan =
|
||||
let
|
||||
evaled = lib.evalModules {
|
||||
class = "nixos";
|
||||
modules = [
|
||||
# Basemodule
|
||||
(
|
||||
{ config, ... }:
|
||||
{
|
||||
imports = (import (pkgs.path + "/nixos/modules/module-list.nix"));
|
||||
nixpkgs.pkgs = pkgs;
|
||||
clan.core.name = "dummy";
|
||||
system.stateVersion = config.system.nixos.release;
|
||||
# Set this to work around a bug where `clan.core.settings.machine.name`
|
||||
# is forced due to `networking.interfaces` being forced
|
||||
# somewhere in the nixpkgs options
|
||||
facter.detected.dhcp.enable = lib.mkForce false;
|
||||
}
|
||||
)
|
||||
{
|
||||
clan.core.settings.directory = clan-core;
|
||||
}
|
||||
clan-core.nixosModules.clanCore
|
||||
];
|
||||
};
|
||||
in
|
||||
evaled;
|
||||
in
|
||||
{
|
||||
|
||||
clanModulesViaRoles = lib.mapAttrs (
|
||||
_moduleName: rolesOptions:
|
||||
lib.mapAttrs (
|
||||
_roleName: options:
|
||||
(nixosOptionsDoc {
|
||||
inherit options;
|
||||
warningsAreErrors = true;
|
||||
inherit transformOptions;
|
||||
}).optionsJSON
|
||||
) rolesOptions
|
||||
) modulesRolesOptions;
|
||||
|
||||
# Test with:
|
||||
# nix build .\#legacyPackages.x86_64-linux.clanModulesViaService
|
||||
clanModulesViaService = lib.mapAttrs (
|
||||
@@ -38,7 +51,6 @@ in
|
||||
{
|
||||
roles = lib.mapAttrs (
|
||||
_roleName: role:
|
||||
|
||||
(nixosOptionsDoc {
|
||||
transformOptions =
|
||||
opt:
|
||||
@@ -54,20 +66,13 @@ in
|
||||
warningsAreErrors = true;
|
||||
}).optionsJSON
|
||||
) evaluatedService.config.roles;
|
||||
|
||||
manifest = evaluatedService.config.manifest;
|
||||
|
||||
}
|
||||
) clan-core.clan.modules;
|
||||
|
||||
clanCore =
|
||||
(nixosOptionsDoc {
|
||||
options =
|
||||
((evalClanModules {
|
||||
modules = [ ];
|
||||
inherit pkgs clan-core;
|
||||
}).options
|
||||
).clan.core or { };
|
||||
options = nixosConfigurationWithClan.options.clan.core;
|
||||
warningsAreErrors = true;
|
||||
inherit transformOptions;
|
||||
}).optionsJSON;
|
||||
|
||||
@@ -25,7 +25,7 @@
|
||||
|
||||
serviceModules = self.clan.modules;
|
||||
|
||||
baseHref = "/options-page/";
|
||||
baseHref = "/options/";
|
||||
|
||||
getRoles =
|
||||
module:
|
||||
@@ -126,7 +126,7 @@
|
||||
nestedSettingsOption = mkOption {
|
||||
type = types.raw;
|
||||
description = ''
|
||||
See [instances.${name}.roles.${roleName}.settings](${baseHref}?option_scope=0&option=instances.${name}.roles.${roleName}.settings)
|
||||
See [instances.${name}.roles.${roleName}.settings](${baseHref}?option_scope=0&option=inventory.instances.${name}.roles.${roleName}.settings)
|
||||
'';
|
||||
};
|
||||
settingsOption = mkOption {
|
||||
@@ -161,6 +161,42 @@
|
||||
}
|
||||
];
|
||||
|
||||
baseModule =
|
||||
# Module
|
||||
{ config, ... }:
|
||||
{
|
||||
imports = (import (pkgs.path + "/nixos/modules/module-list.nix"));
|
||||
nixpkgs.pkgs = pkgs;
|
||||
clan.core.name = "dummy";
|
||||
system.stateVersion = config.system.nixos.release;
|
||||
# Set this to work around a bug where `clan.core.settings.machine.name`
|
||||
# is forced due to `networking.interfaces` being forced
|
||||
# somewhere in the nixpkgs options
|
||||
facter.detected.dhcp.enable = lib.mkForce false;
|
||||
};
|
||||
|
||||
evalClanModules =
|
||||
let
|
||||
evaled = lib.evalModules {
|
||||
class = "nixos";
|
||||
modules = [
|
||||
baseModule
|
||||
{
|
||||
clan.core.settings.directory = self;
|
||||
}
|
||||
self.nixosModules.clanCore
|
||||
];
|
||||
};
|
||||
in
|
||||
evaled;
|
||||
|
||||
coreOptions =
|
||||
(pkgs.nixosOptionsDoc {
|
||||
options = (evalClanModules.options).clan.core or { };
|
||||
warningsAreErrors = true;
|
||||
transformOptions = self.clanLib.docs.stripStorePathsFromDeclarations;
|
||||
}).optionsJSON;
|
||||
|
||||
in
|
||||
{
|
||||
# Uncomment for debugging
|
||||
@@ -175,10 +211,17 @@
|
||||
# scopes = mapAttrsToList mkScope serviceModules;
|
||||
scopes = [
|
||||
{
|
||||
name = "Clan";
|
||||
inherit baseHref;
|
||||
name = "Flake Options (clan.nix file)";
|
||||
modules = docModules;
|
||||
urlPrefix = "https://git.clan.lol/clan/clan-core/src/branch/main/";
|
||||
}
|
||||
{
|
||||
name = "Machine Options (clan.core NixOS options)";
|
||||
optionsJSON = "${coreOptions}/share/doc/nixos/options.json";
|
||||
urlPrefix = "https://git.clan.lol/clan/clan-core/src/branch/main/";
|
||||
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
"""Module for rendering NixOS options documentation from JSON format."""
|
||||
|
||||
# Options are available in the following format:
|
||||
# https://github.com/nixos/nixpkgs/blob/master/nixos/lib/make-options-doc/default.nix
|
||||
#
|
||||
@@ -32,30 +34,21 @@ from typing import Any
|
||||
from clan_lib.errors import ClanError
|
||||
from clan_lib.services.modules import (
|
||||
CategoryInfo,
|
||||
Frontmatter,
|
||||
extract_frontmatter,
|
||||
get_roles,
|
||||
ModuleManifest,
|
||||
)
|
||||
|
||||
# Get environment variables
|
||||
CLAN_CORE_PATH = Path(os.environ["CLAN_CORE_PATH"])
|
||||
CLAN_CORE_DOCS = Path(os.environ["CLAN_CORE_DOCS"])
|
||||
CLAN_MODULES_FRONTMATTER_DOCS = os.environ.get("CLAN_MODULES_FRONTMATTER_DOCS")
|
||||
BUILD_CLAN_PATH = os.environ.get("BUILD_CLAN_PATH")
|
||||
|
||||
## Clan modules ##
|
||||
# Some modules can be imported via nix natively
|
||||
CLAN_MODULES_VIA_NIX = os.environ.get("CLAN_MODULES_VIA_NIX")
|
||||
# Some modules can be imported via inventory
|
||||
CLAN_MODULES_VIA_ROLES = os.environ.get("CLAN_MODULES_VIA_ROLES")
|
||||
|
||||
# Options how to author clan.modules
|
||||
# perInstance, perMachine, ...
|
||||
CLAN_SERVICE_INTERFACE = os.environ.get("CLAN_SERVICE_INTERFACE")
|
||||
|
||||
CLAN_MODULES_VIA_SERVICE = os.environ.get("CLAN_MODULES_VIA_SERVICE")
|
||||
|
||||
OUT = os.environ.get("out")
|
||||
OUT = os.environ.get("out") # noqa: SIM112
|
||||
|
||||
|
||||
def sanitize(text: str) -> str:
|
||||
@@ -75,8 +68,7 @@ def render_option_header(name: str) -> str:
|
||||
|
||||
|
||||
def join_lines_with_indentation(lines: list[str], indent: int = 4) -> str:
|
||||
"""
|
||||
Joins multiple lines with a specified number of whitespace characters as indentation.
|
||||
"""Joins multiple lines with a specified number of whitespace characters as indentation.
|
||||
|
||||
Args:
|
||||
lines (list of str): The lines of text to join.
|
||||
@@ -84,6 +76,7 @@ def join_lines_with_indentation(lines: list[str], indent: int = 4) -> str:
|
||||
|
||||
Returns:
|
||||
str: The indented and concatenated string.
|
||||
|
||||
"""
|
||||
# Create the indentation string (e.g., four spaces)
|
||||
indent_str = " " * indent
|
||||
@@ -170,7 +163,10 @@ def render_option(
|
||||
|
||||
|
||||
def print_options(
|
||||
options_file: str, head: str, no_options: str, replace_prefix: str | None = None
|
||||
options_file: str,
|
||||
head: str,
|
||||
no_options: str,
|
||||
replace_prefix: str | None = None,
|
||||
) -> str:
|
||||
res = ""
|
||||
with (Path(options_file) / "share/doc/nixos/options.json").open() as f:
|
||||
@@ -179,32 +175,16 @@ def print_options(
|
||||
res += head if len(options.items()) else no_options
|
||||
for option_name, info in options.items():
|
||||
if replace_prefix:
|
||||
option_name = option_name.replace(replace_prefix + ".", "")
|
||||
display_name = option_name.replace(replace_prefix + ".", "")
|
||||
else:
|
||||
display_name = option_name
|
||||
|
||||
res += render_option(option_name, info, 4)
|
||||
res += render_option(display_name, info, 4)
|
||||
return res
|
||||
|
||||
|
||||
def module_header(module_name: str, has_inventory_feature: bool = False) -> str:
|
||||
indicator = " 🔹" if has_inventory_feature else ""
|
||||
return f"# {module_name}{indicator}\n\n"
|
||||
|
||||
|
||||
def module_nix_usage(module_name: str) -> str:
|
||||
return f"""## Usage via Nix
|
||||
|
||||
**This module can be also imported directly in your nixos configuration. Although it is recommended to use the [inventory](../../concepts/inventory.md) interface if available.**
|
||||
|
||||
Some modules are considered 'low-level' or 'expert modules' and are not available via the inventory interface.
|
||||
|
||||
```nix
|
||||
{{config, lib, inputs, ...}}: {{
|
||||
imports = [ inputs.clan-core.clanModules.{module_name} ];
|
||||
# ...
|
||||
}}
|
||||
```
|
||||
|
||||
"""
|
||||
def module_header(module_name: str) -> str:
|
||||
return f"# {module_name}\n\n"
|
||||
|
||||
|
||||
clan_core_descr = """
|
||||
@@ -223,68 +203,6 @@ The following options are available for this module.
|
||||
"""
|
||||
|
||||
|
||||
def produce_clan_modules_frontmatter_docs() -> None:
|
||||
if not CLAN_MODULES_FRONTMATTER_DOCS:
|
||||
msg = f"Environment variables are not set correctly: $CLAN_CORE_DOCS={CLAN_CORE_DOCS}"
|
||||
raise ClanError(msg)
|
||||
|
||||
if not OUT:
|
||||
msg = f"Environment variables are not set correctly: $out={OUT}"
|
||||
raise ClanError(msg)
|
||||
|
||||
with Path(CLAN_MODULES_FRONTMATTER_DOCS).open() as f:
|
||||
options: dict[str, dict[str, Any]] = json.load(f)
|
||||
|
||||
# header
|
||||
output = """# Frontmatter
|
||||
|
||||
Every clan module has a `frontmatter` section within its readme. It provides
|
||||
machine readable metadata about the module.
|
||||
|
||||
!!! example
|
||||
|
||||
The used format is `TOML`
|
||||
|
||||
The content is separated by `---` and the frontmatter must be placed at the very top of the `README.md` file.
|
||||
|
||||
```toml
|
||||
---
|
||||
description = "A description of the module"
|
||||
categories = ["category1", "category2"]
|
||||
|
||||
[constraints]
|
||||
roles.client.max = 10
|
||||
roles.server.min = 1
|
||||
---
|
||||
# Readme content
|
||||
...
|
||||
```
|
||||
|
||||
"""
|
||||
|
||||
output += """## Overview
|
||||
|
||||
This provides an overview of the available attributes of the `frontmatter`
|
||||
within the `README.md` of a clan module.
|
||||
|
||||
"""
|
||||
# for option_name, info in options.items():
|
||||
# if option_name == "_module.args":
|
||||
# continue
|
||||
# output += render_option(option_name, info)
|
||||
root = options_to_tree(options, debug=True)
|
||||
for option in root.suboptions:
|
||||
output += options_docs_from_tree(option, init_level=2)
|
||||
|
||||
outfile = Path(OUT) / "clanModules/frontmatter/index.md"
|
||||
outfile.parent.mkdir(
|
||||
parents=True,
|
||||
exist_ok=True,
|
||||
)
|
||||
with outfile.open("w") as of:
|
||||
of.write(output)
|
||||
|
||||
|
||||
def produce_clan_core_docs() -> None:
|
||||
if not CLAN_CORE_DOCS:
|
||||
msg = f"Environment variables are not set correctly: $CLAN_CORE_DOCS={CLAN_CORE_DOCS}"
|
||||
@@ -324,7 +242,7 @@ def produce_clan_core_docs() -> None:
|
||||
for submodule_name, split_options in split.items():
|
||||
outfile = f"{module_name}/{submodule_name}.md"
|
||||
print(
|
||||
f"[clan_core.{submodule_name}] Rendering option of: {submodule_name}... {outfile}"
|
||||
f"[clan_core.{submodule_name}] Rendering option of: {submodule_name}... {outfile}",
|
||||
)
|
||||
init_level = 1
|
||||
root = options_to_tree(split_options, debug=True)
|
||||
@@ -359,56 +277,9 @@ def produce_clan_core_docs() -> None:
|
||||
of.write(output)
|
||||
|
||||
|
||||
def render_roles(roles: list[str] | None, module_name: str) -> str:
|
||||
if roles:
|
||||
roles_list = "\n".join([f"- `{r}`" for r in roles])
|
||||
return (
|
||||
f"""
|
||||
### Roles
|
||||
|
||||
This module can be used via predefined roles
|
||||
|
||||
{roles_list}
|
||||
"""
|
||||
"""
|
||||
Every role has its own configuration options, which are each listed below.
|
||||
|
||||
For more information, see the [inventory guide](../../concepts/inventory.md).
|
||||
|
||||
??? Example
|
||||
For example the `admin` module adds the following options globally to all machines where it is used.
|
||||
|
||||
`clan.admin.allowedkeys`
|
||||
|
||||
```nix
|
||||
clan-core.lib.clan {
|
||||
inventory.services = {
|
||||
admin.me = {
|
||||
roles.default.machines = [ "jon" ];
|
||||
config.allowedkeys = [ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQD..." ];
|
||||
};
|
||||
};
|
||||
};
|
||||
```
|
||||
"""
|
||||
)
|
||||
return ""
|
||||
|
||||
|
||||
clan_modules_descr = """
|
||||
Clan modules are [NixOS modules](https://wiki.nixos.org/wiki/NixOS_modules)
|
||||
which have been enhanced with additional features provided by Clan, with
|
||||
certain option types restricted to enable configuration through a graphical
|
||||
interface.
|
||||
|
||||
!!! note "🔹"
|
||||
Modules with this indicator support the [inventory](../../concepts/inventory.md) feature.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
def render_categories(
|
||||
categories: list[str], categories_info: dict[str, CategoryInfo]
|
||||
categories: list[str],
|
||||
categories_info: dict[str, CategoryInfo],
|
||||
) -> str:
|
||||
res = """<div style="display: grid; grid-template-columns: repeat(3, 1fr); gap: 10px;">"""
|
||||
for cat in categories:
|
||||
@@ -473,10 +344,10 @@ Learn how to use `clanServices` in practice in the [Using clanServices guide](..
|
||||
# output += f"`clan.modules.{module_name}`\n"
|
||||
output += f"*{module_info['manifest']['description']}*\n"
|
||||
|
||||
fm = Frontmatter("")
|
||||
# output += "## Categories\n\n"
|
||||
output += render_categories(
|
||||
module_info["manifest"]["categories"], fm.categories_info
|
||||
module_info["manifest"]["categories"],
|
||||
ModuleManifest.categories_info(),
|
||||
)
|
||||
|
||||
output += f"{module_info['manifest']['readme']}\n"
|
||||
@@ -485,7 +356,7 @@ Learn how to use `clanServices` in practice in the [Using clanServices guide](..
|
||||
|
||||
output += f"The {module_name} module has the following roles:\n\n"
|
||||
|
||||
for role_name, _ in module_info["roles"].items():
|
||||
for role_name in module_info["roles"]:
|
||||
output += f"- {role_name}\n"
|
||||
|
||||
for role_name, role_filename in module_info["roles"].items():
|
||||
@@ -505,183 +376,8 @@ Learn how to use `clanServices` in practice in the [Using clanServices guide](..
|
||||
of.write(output)
|
||||
|
||||
|
||||
def produce_clan_modules_docs() -> None:
|
||||
if not CLAN_MODULES_VIA_NIX:
|
||||
msg = f"Environment variables are not set correctly: $CLAN_MODULES_VIA_NIX={CLAN_MODULES_VIA_NIX}"
|
||||
raise ClanError(msg)
|
||||
|
||||
if not CLAN_MODULES_VIA_ROLES:
|
||||
msg = f"Environment variables are not set correctly: $CLAN_MODULES_VIA_ROLES={CLAN_MODULES_VIA_ROLES}"
|
||||
raise ClanError(msg)
|
||||
|
||||
if not CLAN_CORE_PATH:
|
||||
msg = f"Environment variables are not set correctly: $CLAN_CORE_PATH={CLAN_CORE_PATH}"
|
||||
raise ClanError(msg)
|
||||
|
||||
if not OUT:
|
||||
msg = f"Environment variables are not set correctly: $out={OUT}"
|
||||
raise ClanError(msg)
|
||||
|
||||
modules_index = "# Modules Overview\n\n"
|
||||
modules_index += clan_modules_descr
|
||||
modules_index += "## Overview\n\n"
|
||||
modules_index += '<div class="grid cards" markdown>\n\n'
|
||||
|
||||
with Path(CLAN_MODULES_VIA_ROLES).open() as f2:
|
||||
role_links: dict[str, dict[str, str]] = json.load(f2)
|
||||
|
||||
with Path(CLAN_MODULES_VIA_NIX).open() as f:
|
||||
links: dict[str, str] = json.load(f)
|
||||
|
||||
for module_name, options_file in links.items():
|
||||
print(f"Rendering ClanModule: {module_name}")
|
||||
readme_file = CLAN_CORE_PATH / "clanModules" / module_name / "README.md"
|
||||
with readme_file.open() as f:
|
||||
readme = f.read()
|
||||
frontmatter: Frontmatter
|
||||
frontmatter, readme_content = extract_frontmatter(readme, str(readme_file))
|
||||
|
||||
# skip if experimental feature enabled
|
||||
if "experimental" in frontmatter.features:
|
||||
print(f"Skipping {module_name}: Experimental feature")
|
||||
continue
|
||||
|
||||
modules_index += build_option_card(module_name, frontmatter)
|
||||
|
||||
##### Print module documentation #####
|
||||
|
||||
# 1. Header
|
||||
output = module_header(module_name, "inventory" in frontmatter.features)
|
||||
|
||||
# 2. Description from README.md
|
||||
if frontmatter.description:
|
||||
output += f"*{frontmatter.description}*\n\n"
|
||||
|
||||
# 2. Deprecation note if the module is deprecated
|
||||
if "deprecated" in frontmatter.features:
|
||||
output += f"""
|
||||
!!! Warning "Deprecated"
|
||||
The `{module_name}` module is deprecated.*
|
||||
|
||||
Use 'clanServices/{module_name}' or a similar successor instead
|
||||
"""
|
||||
else:
|
||||
output += f"""
|
||||
!!! Warning "Will be deprecated"
|
||||
The `{module_name}` module might eventually be migrated to 'clanServices'*
|
||||
|
||||
See: [clanServices](../../guides/clanServices.md)
|
||||
"""
|
||||
|
||||
# 3. Categories from README.md
|
||||
output += "## Categories\n\n"
|
||||
output += render_categories(frontmatter.categories, frontmatter.categories_info)
|
||||
output += "\n---\n\n"
|
||||
|
||||
# 3. README.md content
|
||||
output += f"{readme_content}\n"
|
||||
|
||||
# 4. Usage
|
||||
##### Print usage via Inventory #####
|
||||
|
||||
# get_roles(str) -> list[str] | None
|
||||
# if not isinstance(options_file, str):
|
||||
roles = get_roles(CLAN_CORE_PATH / "clanModules" / module_name)
|
||||
if roles:
|
||||
# Render inventory usage
|
||||
output += """## Usage via Inventory\n\n"""
|
||||
output += render_roles(roles, module_name)
|
||||
for role in roles:
|
||||
role_options_file = role_links[module_name][role]
|
||||
# Abort if the options file is not found
|
||||
if not isinstance(role_options_file, str):
|
||||
print(
|
||||
f"Error: module: {module_name} in role: {role} - options file not found, Got {role_options_file}"
|
||||
)
|
||||
exit(1)
|
||||
|
||||
no_options = f"""### Options of `{role}` role
|
||||
|
||||
**The `{module_name}` `{role}` doesnt offer / require any options to be set.**
|
||||
"""
|
||||
|
||||
heading = f"""### Options of `{role}` role
|
||||
|
||||
The following options are available when using the `{role}` role.
|
||||
"""
|
||||
output += print_options(
|
||||
role_options_file,
|
||||
heading,
|
||||
no_options,
|
||||
replace_prefix=f"clan.{module_name}",
|
||||
)
|
||||
else:
|
||||
# No roles means no inventory usage
|
||||
output += """## Usage via Inventory
|
||||
|
||||
**This module cannot be used via the inventory interface.**
|
||||
"""
|
||||
|
||||
##### Print usage via Nix / nixos #####
|
||||
if not isinstance(options_file, str):
|
||||
print(
|
||||
f"Skipping {module_name}: Cannot be used via import clanModules.{module_name}"
|
||||
)
|
||||
output += """## Usage via Nix
|
||||
|
||||
**This module cannot be imported directly in your nixos configuration.**
|
||||
|
||||
"""
|
||||
else:
|
||||
output += module_nix_usage(module_name)
|
||||
no_options = "** This module doesnt require any options to be set.**"
|
||||
output += print_options(options_file, options_head, no_options)
|
||||
|
||||
outfile = Path(OUT) / f"clanModules/{module_name}.md"
|
||||
outfile.parent.mkdir(
|
||||
parents=True,
|
||||
exist_ok=True,
|
||||
)
|
||||
with outfile.open("w") as of:
|
||||
of.write(output)
|
||||
|
||||
modules_index += "</div>"
|
||||
modules_index += "\n"
|
||||
modules_outfile = Path(OUT) / "clanModules/index.md"
|
||||
|
||||
with modules_outfile.open("w") as of:
|
||||
of.write(modules_index)
|
||||
|
||||
|
||||
def build_option_card(module_name: str, frontmatter: Frontmatter) -> str:
|
||||
"""
|
||||
Build the overview index card for each reference target option.
|
||||
"""
|
||||
|
||||
def indent_all(text: str, indent_size: int = 4) -> str:
|
||||
"""
|
||||
Indent all lines in a string.
|
||||
"""
|
||||
indent = " " * indent_size
|
||||
lines = text.split("\n")
|
||||
indented_text = indent + ("\n" + indent).join(lines)
|
||||
return indented_text
|
||||
|
||||
def to_md_li(module_name: str, frontmatter: Frontmatter) -> str:
|
||||
md_li = (
|
||||
f"""- **[{module_name}](./{"-".join(module_name.split(" "))}.md)**\n\n"""
|
||||
)
|
||||
md_li += f"""{indent_all("---", 4)}\n\n"""
|
||||
fmd = f"\n{frontmatter.description.strip()}" if frontmatter.description else ""
|
||||
md_li += f"""{indent_all(fmd, 4)}"""
|
||||
return md_li
|
||||
|
||||
return f"{to_md_li(module_name, frontmatter)}\n\n"
|
||||
|
||||
|
||||
def split_options_by_root(options: dict[str, Any]) -> dict[str, dict[str, Any]]:
|
||||
"""
|
||||
Split the flat dictionary of options into a dict of which each entry will construct complete option trees.
|
||||
"""Split the flat dictionary of options into a dict of which each entry will construct complete option trees.
|
||||
{
|
||||
"a": { Data }
|
||||
"a.b": { Data }
|
||||
@@ -765,9 +461,7 @@ def option_short_name(option_name: str) -> str:
|
||||
|
||||
|
||||
def options_to_tree(options: dict[str, Any], debug: bool = False) -> Option:
|
||||
"""
|
||||
Convert the options dictionary to a tree structure.
|
||||
"""
|
||||
"""Convert the options dictionary to a tree structure."""
|
||||
|
||||
# Helper function to create nested structure
|
||||
def add_to_tree(path_parts: list[str], info: Any, current_node: Option) -> None:
|
||||
@@ -819,22 +513,24 @@ def options_to_tree(options: dict[str, Any], debug: bool = False) -> Option:
|
||||
|
||||
|
||||
def options_docs_from_tree(
|
||||
root: Option, init_level: int = 1, prefix: list[str] | None = None
|
||||
root: Option,
|
||||
init_level: int = 1,
|
||||
prefix: list[str] | None = None,
|
||||
) -> str:
|
||||
"""
|
||||
eender the options from the tree structure.
|
||||
"""Eender the options from the tree structure.
|
||||
|
||||
Args:
|
||||
root (Option): The root option node.
|
||||
init_level (int): The initial level of indentation.
|
||||
prefix (list str): Will be printed as common prefix of all attribute names.
|
||||
|
||||
"""
|
||||
|
||||
def render_tree(option: Option, level: int = init_level) -> str:
|
||||
output = ""
|
||||
|
||||
should_render = not option.name.startswith("<") and not option.name.startswith(
|
||||
"_"
|
||||
"_",
|
||||
)
|
||||
if should_render:
|
||||
# short_name = option_short_name(option.name)
|
||||
@@ -855,16 +551,11 @@ def options_docs_from_tree(
|
||||
|
||||
return output
|
||||
|
||||
md = render_tree(root)
|
||||
return md
|
||||
return render_tree(root)
|
||||
|
||||
|
||||
if __name__ == "__main__": #
|
||||
if __name__ == "__main__":
|
||||
produce_clan_core_docs()
|
||||
|
||||
produce_clan_service_author_docs()
|
||||
|
||||
# produce_clan_modules_docs()
|
||||
produce_clan_service_docs()
|
||||
|
||||
# produce_clan_modules_frontmatter_docs()
|
||||
|
||||
@@ -1,15 +1,33 @@
|
||||
# Auto-included Files
|
||||
|
||||
Clan automatically imports the following files from a directory and registers them.
|
||||
Clan automatically imports specific files from each machine directory and registers them, reducing the need for manual configuration.
|
||||
|
||||
## Machine registration
|
||||
## Machine Registration
|
||||
|
||||
Every folder `machines/{machineName}` will be registered automatically as a Clan machine.
|
||||
Every folder under `machines/{machineName}` is automatically registered as a Clan machine.
|
||||
|
||||
!!! info "Automatically loaded files"
|
||||
!!! info "Files loaded automatically for each machine"
|
||||
|
||||
The following files are loaded automatically for each Clan machine:
|
||||
The following files are detected and imported for every Clan machine:
|
||||
|
||||
- [x] `machines/{machineName}/configuration.nix`
|
||||
- [x] `machines/{machineName}/hardware-configuration.nix`
|
||||
- [x] `machines/{machineName}/facter.json` Automatically configured, for further information see [nixos-facter](https://clan.lol/blog/nixos-facter/)
|
||||
- [x] `machines/{machineName}/disko.nix` Automatically loaded, for further information see the [disko docs](https://github.com/nix-community/disko/blob/master/docs/quickstart.md).
|
||||
- [x] `machines/{machineName}/configuration.nix`
|
||||
Main configuration file for the machine.
|
||||
|
||||
- [x] `machines/{machineName}/hardware-configuration.nix`
|
||||
Hardware-specific configuration generated by NixOS.
|
||||
|
||||
- [x] `machines/{machineName}/facter.json`
|
||||
Contains system facts. Automatically generated — see [nixos-facter](https://clan.lol/blog/nixos-facter/) for details.
|
||||
|
||||
- [x] `machines/{machineName}/disko.nix`
|
||||
Disk layout configuration. See the [disko quickstart](https://github.com/nix-community/disko/blob/master/docs/quickstart.md) for more info.
|
||||
|
||||
## Other Auto-included Files
|
||||
|
||||
* **`inventory.json`**
|
||||
Managed by Clan's API.
|
||||
Merges with `clan.inventory` to extend the inventory.
|
||||
|
||||
* **`.clan-flake`**
|
||||
Sentinel file to be used to locate the root of a Clan repository.
|
||||
Falls back to `.git`, `.hg`, `.svn`, or `flake.nix` if not found.
|
||||
|
||||
@@ -1,16 +1,22 @@
|
||||
# Using `clanServices`
|
||||
# Using the Inventory
|
||||
|
||||
Clan’s `clanServices` system is a composable way to define and deploy services across machines.
|
||||
Clan's inventory system is a composable way to define and deploy services across
|
||||
machines.
|
||||
|
||||
This guide shows how to **instantiate** a `clanService`, explains how service definitions are structured in your inventory, and how to pick or create services from modules exposed by flakes.
|
||||
This guide shows how to **instantiate** a `clanService`, explains how service
|
||||
definitions are structured in your inventory, and how to pick or create services
|
||||
from modules exposed by flakes.
|
||||
|
||||
The term **Multi-host-modules** was introduced previously in the [nixus repository](https://github.com/infinisil/nixus) and represents a similar concept.
|
||||
The term **Multi-host-modules** was introduced previously in the [nixus
|
||||
repository](https://github.com/infinisil/nixus) and represents a similar
|
||||
concept.
|
||||
|
||||
---
|
||||
______________________________________________________________________
|
||||
|
||||
## Overview
|
||||
|
||||
Services are used in `inventory.instances`, and then they attach to *roles* and *machines* — meaning you decide which machines run which part of the service.
|
||||
Services are used in `inventory.instances`, and assigned to *roles* and
|
||||
*machines* -- meaning you decide which machines run which part of the service.
|
||||
|
||||
For example:
|
||||
|
||||
@@ -18,119 +24,138 @@ For example:
|
||||
inventory.instances = {
|
||||
borgbackup = {
|
||||
roles.client.machines."laptop" = {};
|
||||
roles.client.machines."server1" = {};
|
||||
roles.client.machines."workstation" = {};
|
||||
|
||||
roles.server.machines."backup-box" = {};
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
This says: “Run borgbackup as a *client* on my *laptop* and *server1*, and as a *server* on *backup-box*.”
|
||||
This says: "Run borgbackup as a *client* on my *laptop* and *workstation*, and
|
||||
as a *server* on *backup-box*". `client` and `server` are roles defined by the
|
||||
`borgbackup` service.
|
||||
|
||||
## Module source specification
|
||||
|
||||
Each instance includes a reference to a **module specification** — this is how Clan knows which service module to use and where it came from.
|
||||
Usually one would just use `imports` but we needd to make the `module source` configurable via Python API.
|
||||
By default it is not required to specify the `module`, in which case it defaults to the preprovided services of clan-core.
|
||||
Each instance includes a reference to a **module specification** -- this is how
|
||||
Clan knows which service module to use and where it came from.
|
||||
|
||||
---
|
||||
|
||||
## Override Example
|
||||
It is not required to specify the `module.input` parameter, in which case it
|
||||
defaults to the pre-provided services of clan-core. In a similar fashion, the
|
||||
`module.name` parameter can also be omitted, it will default to the name of the
|
||||
instance.
|
||||
|
||||
Example of instantiating a `borgbackup` service using `clan-core`:
|
||||
|
||||
```nix
|
||||
inventory.instances = {
|
||||
# Instance Name: Different name for this 'borgbackup' instance
|
||||
borgbackup = {
|
||||
# Since this is instances."borgbackup" the whole `module = { ... }` below is equivalent and optional.
|
||||
module = {
|
||||
name = "borgbackup"; # <-- Name of the module (optional)
|
||||
input = "clan-core"; # <-- The flake input where the service is defined (optional)
|
||||
};
|
||||
|
||||
borgbackup = { # <- Instance name
|
||||
|
||||
# This can be partially/fully specified,
|
||||
# - If the instance name is not the name of the module
|
||||
# - If the input is not clan-core
|
||||
# module = {
|
||||
# name = "borgbackup"; # Name of the module (optional)
|
||||
# input = "clan-core"; # The flake input where the service is defined (optional)
|
||||
# };
|
||||
|
||||
# Participation of the machines is defined via roles
|
||||
# Right side needs to be an attribute set. Its purpose will become clear later
|
||||
roles.client.machines."machine-a" = {};
|
||||
roles.server.machines."backup-host" = {};
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
If you used `clan-core` as an input attribute for your flake:
|
||||
## Module Settings
|
||||
|
||||
Each role might expose configurable options. See clan's [clanServices
|
||||
reference](../reference/clanServices/index.md) for all available options.
|
||||
|
||||
Settings can be set in per-machine or per-role. The latter is applied to all
|
||||
machines that are assigned to that role.
|
||||
|
||||
|
||||
```nix
|
||||
# ↓ module.input = "clan-core"
|
||||
inputs.clan-core.url = "https://git.clan.lol/clan/clan-core/archive/main.tar.gz";
|
||||
```
|
||||
|
||||
## Simplified Example
|
||||
|
||||
If only one instance is needed for a service and the service is a clan core service, the `module` definition can be omitted.
|
||||
|
||||
```nix
|
||||
# Simplified way of specifying a single instance
|
||||
inventory.instances = {
|
||||
# instance name is `borgbackup` -> clan core module `borgbackup` will be loaded.
|
||||
borgbackup = {
|
||||
# Participation of the machines is defined via roles
|
||||
# Right side needs to be an attribute set. Its purpose will become clear later
|
||||
roles.client.machines."machine-a" = {};
|
||||
roles.server.machines."backup-host" = {};
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
## Configuration Example
|
||||
|
||||
Each role might expose configurable options
|
||||
|
||||
See clan's [clanServices reference](../reference/clanServices/index.md) for available options
|
||||
|
||||
```nix
|
||||
inventory.instances = {
|
||||
borgbackup-example = {
|
||||
module = {
|
||||
name = "borgbackup";
|
||||
input = "clan-core";
|
||||
};
|
||||
# Settings for 'machine-a'
|
||||
roles.client.machines."machine-a" = {
|
||||
# 'client' -Settings of 'machine-a'
|
||||
settings = {
|
||||
backupFolders = [
|
||||
/home
|
||||
/var
|
||||
];
|
||||
};
|
||||
# ---------------------------
|
||||
};
|
||||
roles.server.machines."backup-host" = {};
|
||||
|
||||
# Settings for all machines of the role "server"
|
||||
roles.server.settings = {
|
||||
directory = "/var/lib/borgbackup";
|
||||
};
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
## Tags
|
||||
|
||||
Multiple members can be defined using tags as follows
|
||||
Tags can be used to assign multiple machines to a role at once. It can be thought of as a grouping mechanism.
|
||||
|
||||
For example using the `all` tag for services that you want to be configured on all
|
||||
your machines is a common pattern.
|
||||
|
||||
The following example could be used to backup all your machines to a common
|
||||
backup server
|
||||
|
||||
```nix
|
||||
inventory.instances = {
|
||||
borgbackup-example = {
|
||||
module = {
|
||||
name = "borgbackup";
|
||||
input = "clan-core";
|
||||
};
|
||||
#
|
||||
# The 'all' -tag targets all machines
|
||||
roles.client.tags."all" = {};
|
||||
# ---------------------------
|
||||
borgbackup = {
|
||||
# "All" machines are assigned to the borgbackup 'client' role
|
||||
roles.client.tags = [ "all" ];
|
||||
|
||||
# But only one specific machine (backup-host) is assigned to the 'server' role
|
||||
roles.server.machines."backup-host" = {};
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
## Sharing additional Nix configuration
|
||||
|
||||
Sometimes you need to add custom NixOS configuration alongside your clan
|
||||
services. The `extraModules` option allows you to include additional NixOS
|
||||
configuration that is applied for every machine assigned to that role.
|
||||
|
||||
There are multiple valid syntaxes for specifying modules:
|
||||
|
||||
```nix
|
||||
inventory.instances = {
|
||||
borgbackup = {
|
||||
roles.client = {
|
||||
# Direct module reference
|
||||
extraModules = [ ../nixosModules/borgbackup.nix ];
|
||||
|
||||
# Or using self (needs to be json serializable)
|
||||
# See next example, for a workaround.
|
||||
extraModules = [ self.nixosModules.borgbackup ];
|
||||
|
||||
# Or inline module definition, (needs to be json compatible)
|
||||
extraModules = [
|
||||
{
|
||||
# Your module configuration here
|
||||
# ...
|
||||
#
|
||||
# If the module needs to contain non-serializable expressions:
|
||||
imports = [ ./path/to/non-serializable.nix ];
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
## Picking a clanService
|
||||
|
||||
You can use services exposed by Clan’s core module library, `clan-core`.
|
||||
You can use services exposed by Clan's core module library, `clan-core`.
|
||||
|
||||
🔗 See: [List of Available Services in clan-core](../reference/clanServices/index.md)
|
||||
|
||||
@@ -142,18 +167,19 @@ You can also author your own `clanService` modules.
|
||||
|
||||
You might expose your service module from your flake — this makes it easy for other people to also use your module in their clan.
|
||||
|
||||
---
|
||||
______________________________________________________________________
|
||||
|
||||
## 💡 Tips for Working with clanServices
|
||||
|
||||
* You can add multiple inputs to your flake (`clan-core`, `your-org-modules`, etc.) to mix and match services.
|
||||
* Each service instance is isolated by its key in `inventory.instances`, allowing you to deploy multiple versions or roles of the same service type.
|
||||
* Roles can target different machines or be scoped dynamically.
|
||||
- You can add multiple inputs to your flake (`clan-core`, `your-org-modules`, etc.) to mix and match services.
|
||||
- Each service instance is isolated by its key in `inventory.instances`, allowing to deploy multiple versions or roles of the same service type.
|
||||
- Roles can target different machines or be scoped dynamically.
|
||||
|
||||
---
|
||||
______________________________________________________________________
|
||||
|
||||
## What’s Next?
|
||||
## What's Next?
|
||||
|
||||
- [Author your own clanService →](../guides/services/community.md)
|
||||
- [Migrate from clanModules →](../guides/migrations/migrate-inventory-services.md)
|
||||
|
||||
* [Author your own clanService →](../guides/services/community.md)
|
||||
* [Migrate from clanModules →](../guides/migrations/migrate-inventory-services.md)
|
||||
<!-- TODO: * [Understand the architecture →](../explanation/clan-architecture.md) -->
|
||||
|
||||
@@ -90,13 +90,10 @@ export CLAN_DEBUG_COMMANDS=1
|
||||
These options help you pinpoint the source and context of print messages and debug logs during development.
|
||||
|
||||
|
||||
|
||||
## Analyzing Performance
|
||||
|
||||
To understand what's causing slow performance, set the environment variable `export CLAN_CLI_PERF=1`. When you complete a clan command, you'll see a summary of various performance metrics, helping you identify what's taking up time.
|
||||
|
||||
|
||||
|
||||
## See all possible packages and tests
|
||||
|
||||
To quickly show all possible packages and tests execute:
|
||||
@@ -155,28 +152,16 @@ To test the CLI locally in a development environment and set breakpoints for deb
|
||||
|
||||
## Test Locally in a Nix Sandbox
|
||||
|
||||
To run tests in a Nix sandbox, you have two options depending on whether your test functions have been marked as impure or not:
|
||||
|
||||
### Running Tests Marked as Impure
|
||||
|
||||
If your test functions need to execute `nix build` and have been marked as impure because you can't execute `nix build` inside a Nix sandbox, use the following command:
|
||||
To run tests in a Nix sandbox:
|
||||
|
||||
```bash
|
||||
nix run .#impure-checks -L
|
||||
nix build .#checks.x86_64-linux.clan-pytest-with-core
|
||||
```
|
||||
|
||||
This command will run the impure test functions.
|
||||
|
||||
### Running Pure Tests
|
||||
|
||||
For test functions that have not been marked as impure and don't require executing `nix build`, you can use the following command:
|
||||
|
||||
```bash
|
||||
nix build .#checks.x86_64-linux.clan-pytest --rebuild
|
||||
nix build .#checks.x86_64-linux.clan-pytest-without-core
|
||||
```
|
||||
|
||||
This command will run all pure test functions.
|
||||
|
||||
### Inspecting the Nix Sandbox
|
||||
|
||||
If you need to inspect the Nix sandbox while running tests, follow these steps:
|
||||
|
||||
@@ -27,7 +27,7 @@ inputs = {
|
||||
|
||||
## Import the Clan flake-parts Module
|
||||
|
||||
After updating your flake inputs, the next step is to import the Clan flake-parts module. This will make the [Clan options](../options.md) available within `mkFlake`.
|
||||
After updating your flake inputs, the next step is to import the Clan flake-parts module. This will make the [Clan options](/options) available within `mkFlake`.
|
||||
|
||||
```nix
|
||||
{
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
Machines can be added using the following methods
|
||||
|
||||
- Editing nix expressions in flake.nix (i.e. via `clan-core.lib.clan`)
|
||||
- Editing machines/`machine_name`/configuration.nix (automatically included if it exists)
|
||||
- `clan machines create` (imperative)
|
||||
- Create a file `machines/{machine_name}/configuration.nix` (See: [File Autoincludes](../../concepts/autoincludes.md))
|
||||
- Imperative via cli command: `clan machines create`
|
||||
- Editing nix expressions in flake.nix See [`clan-core.lib.clan`](/options/?scope=Flake Options (clan.nix file))
|
||||
|
||||
See the complete [list](../../concepts/autoincludes.md) of auto-loaded files.
|
||||
|
||||
@@ -39,7 +39,6 @@ See the complete [list](../../concepts/autoincludes.md) of auto-loaded files.
|
||||
The imperative command might create a machine folder in `machines/jon`
|
||||
And might persist information in `inventory.json`
|
||||
|
||||
|
||||
### Configuring a machine
|
||||
|
||||
!!! Note
|
||||
|
||||
@@ -1,18 +1,20 @@
|
||||
# Migrate existing NixOS configurations
|
||||
# Convert existing NixOS configurations
|
||||
|
||||
This guide will help you migrate your existing NixOS configurations into Clan.
|
||||
This guide will help you convert your existing NixOS configurations into a Clan.
|
||||
|
||||
!!! Warning
|
||||
Migrating instead of starting new can be trickier and might lead to bugs or
|
||||
unexpected issues. We recommend following the [Getting Started](../getting-started/index.md) guide first. Once you have a working setup, you can easily transfer your NixOS configurations over.
|
||||
unexpected issues. We recommend reading the [Getting Started](./index.md) guide first.
|
||||
|
||||
Once you have a working setup and understand the concepts transfering your NixOS configurations over is easy.
|
||||
|
||||
## Back up your existing configuration
|
||||
|
||||
## Back up your existing configuration!
|
||||
Before you start, it is strongly recommended to back up your existing
|
||||
configuration in any form you see fit. If you use version control to manage
|
||||
your configuration changes, it is also a good idea to follow the migration
|
||||
guide in a separte branch until everything works as expected.
|
||||
|
||||
|
||||
## Starting Point
|
||||
|
||||
We assume you are already using NixOS flakes to manage your configuration. If
|
||||
@@ -43,10 +45,9 @@ have have two hosts: **berlin** and **cologne**.
|
||||
}
|
||||
```
|
||||
|
||||
## Add clan-core Input
|
||||
## 1. Add `clan-core` to `inputs`
|
||||
|
||||
Add `clan-core` to your flake as input. It will provide everything we need to
|
||||
manage your configurations with clan.
|
||||
Add `clan-core` to your flake as input.
|
||||
|
||||
```nix
|
||||
inputs.clan-core = {
|
||||
@@ -56,7 +57,7 @@ inputs.clan-core = {
|
||||
}
|
||||
```
|
||||
|
||||
## Update Outputs
|
||||
## 2. Update Outputs
|
||||
|
||||
To be able to access our newly added dependency, it has to be added to the
|
||||
output parameters.
|
||||
@@ -103,26 +104,23 @@ For the provide flake example, your flake should now look like this:
|
||||
};
|
||||
in
|
||||
{
|
||||
nixosConfigurations = clan.nixosConfigurations;
|
||||
|
||||
inherit (clan) clanInternals;
|
||||
|
||||
clan = {
|
||||
inherit (clan) templates;
|
||||
};
|
||||
inherit (clan.config) nixosConfigurations nixosModules clanInternals;
|
||||
clan = clan.config;
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
Et voilà! Your existing hosts are now part of a clan. Existing Nix tooling
|
||||
✅ Et voilà! Your existing hosts are now part of a clan.
|
||||
|
||||
Existing Nix tooling
|
||||
should still work as normal. To check that you didn't make any errors, run `nix
|
||||
flake show` and verify both hosts are still recognized as if nothing had
|
||||
changed. You should also see the new `clanInternals` output.
|
||||
changed. You should also see the new `clan` output.
|
||||
|
||||
```
|
||||
❯ nix flake show
|
||||
git+file:///my-nixos-config
|
||||
├───clanInternals: unknown
|
||||
├───clan: unknown
|
||||
└───nixosConfigurations
|
||||
├───berlin: NixOS configuration
|
||||
└───cologne: NixOS configuration
|
||||
@@ -131,7 +129,7 @@ git+file:///my-nixos-config
|
||||
Of course you can also rebuild your configuration using `nixos-rebuild` and
|
||||
veryify everything still works.
|
||||
|
||||
## Add Clan CLI devShell
|
||||
## 3. Add `clan-cli` to your `devShells`
|
||||
|
||||
At this point Clan is set up, but you can't use the CLI yet. To do so, it is
|
||||
recommended to expose it via a `devShell` in your flake. It is also possible to
|
||||
@@ -163,8 +161,8 @@ cologne
|
||||
|
||||
## Specify Targets
|
||||
|
||||
Clan needs to know where it can reach your hosts. For each of your hosts, set
|
||||
`clan.core.networking.targetHost` to its adress or hostname.
|
||||
Clan needs to know where it can reach your hosts. For testing purpose set
|
||||
`clan.core.networking.targetHost` to the machines adress or hostname.
|
||||
|
||||
```nix
|
||||
# machines/berlin/configuration.nix
|
||||
@@ -173,6 +171,8 @@ Clan needs to know where it can reach your hosts. For each of your hosts, set
|
||||
}
|
||||
```
|
||||
|
||||
See our guide on for properly [configuring machines networking](../networking.md)
|
||||
|
||||
## Next Steps
|
||||
|
||||
You are now fully set up. Use the CLI to manage your hosts or proceed to
|
||||
@@ -1,110 +1,129 @@
|
||||
# :material-clock-fast: Getting Started
|
||||
|
||||
Ready to create your own Clan and manage a fleet of machines? Follow these simple steps to get started.
|
||||
Ready to manage your fleet of machines?
|
||||
|
||||
This guide walks your through setting up your own declarative infrastructure using clan, git and flakes. By the end of this, you will have one or more machines integrated and installed. You can then import your existing NixOS configuration into this setup if you wish.
|
||||
We will create a declarative infrastructure using **clan**, **git**, and **nix flakes**.
|
||||
|
||||
The following steps are meant to be executed on the machine on which to administer the infrastructure.
|
||||
|
||||
In order to get started you should have at least one machine with either physical or ssh access available as an installation target. Your local machine can also be used as an installation target if it is already running NixOS.
|
||||
You'll finish with a centrally managed fleet, ready to import your existing NixOS configuration.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
=== "**Linux**"
|
||||
Make sure you have the following:
|
||||
|
||||
Clan requires Nix to be installed on your system. Run the following command to install Nix:
|
||||
* 💻 **Administration Machine**: Run the setup commands from this machine.
|
||||
* 🛠️ **Nix**: The Nix package manager, installed on your administration machine.
|
||||
|
||||
??? info "**How to install Nix (Linux / MacOS / NixOS)**"
|
||||
|
||||
**On Linux or macOS:**
|
||||
|
||||
1. Run the recommended installer:
|
||||
```shellSession
|
||||
curl --proto '=https' --tlsv1.2 -sSf -L [https://install.determinate.systems/nix](https://install.determinate.systems/nix) | sh -s -- install
|
||||
```
|
||||
|
||||
2. After installation, ensure flakes are enabled by adding this line to `~/.config/nix/nix.conf`:
|
||||
```
|
||||
experimental-features = nix-command flakes
|
||||
```
|
||||
|
||||
**On NixOS:**
|
||||
|
||||
Nix is already installed. You only need to enable flakes for your user in your `configuration.nix`:
|
||||
|
||||
```nix
|
||||
{
|
||||
nix.settings.experimental-features = [ "nix-command" "flakes" ];
|
||||
}
|
||||
```
|
||||
Then, run `nixos-rebuild switch` to apply the changes.
|
||||
|
||||
* 🎯 **Target Machine(s)**: A remote machine with SSH, or your local machine (if NixOS).
|
||||
|
||||
## Create a New Clan
|
||||
|
||||
1. Navigate to your desired directory:
|
||||
|
||||
```shellSession
|
||||
cd <your-directory>
|
||||
```
|
||||
|
||||
2. Create a new clan flake:
|
||||
|
||||
**Note:** This creates a new directory in your current location
|
||||
|
||||
```shellSession
|
||||
curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install
|
||||
nix run https://git.clan.lol/clan/clan-core/archive/main.tar.gz#clan-cli --refresh -- flakes create
|
||||
```
|
||||
|
||||
If you have previously installed Nix, make sure `experimental-features = nix-command flakes` is present in `~/.config/nix/nix.conf` or `/etc/nix/nix.conf`. If this is not the case, please add it to `~/.config/nix/nix.conf`.
|
||||
3. Enter a **name** in the prompt:
|
||||
|
||||
=== "**NixOS**"
|
||||
|
||||
If you run NixOS the `nix` binary is already installed.
|
||||
|
||||
You will also need to enable the `nix-command` and `flakes` experimental features in your `configuration.nix`:
|
||||
|
||||
```nix
|
||||
{ nix.settings.experimental-features = [ "nix-command" "flakes" ]; }
|
||||
```terminalSession
|
||||
Enter a name for the new clan: my-clan
|
||||
```
|
||||
|
||||
=== "**macOS**"
|
||||
## Project Structure
|
||||
|
||||
Clan requires Nix to be installed on your system. Run the following command to install Nix:
|
||||
Your new directory, `my-clan`, should contain the following structure:
|
||||
|
||||
```shellSession
|
||||
curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install
|
||||
```
|
||||
```
|
||||
my-clan/
|
||||
├── clan.nix
|
||||
├── flake.lock
|
||||
├── flake.nix
|
||||
├── modules/
|
||||
└── sops/
|
||||
```
|
||||
|
||||
If you have previously installed Nix, make sure `experimental-features = nix-command flakes` is present in `~/.config/nix/nix.conf` or `/etc/nix/nix.conf`. If this is not the case, please add it to `~/.config/nix/nix.conf`.
|
||||
!!! note "Templates"
|
||||
This is the structure for the `default` template.
|
||||
|
||||
## Create a new clan
|
||||
Use `clan templates list` and `clan templates --help` for available templates & more. Keep in mind that the exact files may change as templates evolve.
|
||||
|
||||
Initialize a new clan flake
|
||||
|
||||
## Activate the Environment
|
||||
|
||||
To get started, `cd` into your new project directory.
|
||||
|
||||
```shellSession
|
||||
nix run https://git.clan.lol/clan/clan-core/archive/main.tar.gz#clan-cli --refresh -- flakes create
|
||||
cd my-clan
|
||||
```
|
||||
|
||||
This should prompt for a *name*:
|
||||
|
||||
```terminalSession
|
||||
Enter a name for the new clan: my-clan
|
||||
```
|
||||
|
||||
Enter a *name*, confirm with *enter*. A directory with that name will be created and initialized.
|
||||
|
||||
!!! Note
|
||||
This command uses the `default` template
|
||||
|
||||
See `clan templates list` and the `--help` reference for how to use other templates.
|
||||
|
||||
## Explore the Project Structure
|
||||
|
||||
Take a look at all project files:
|
||||
For example, you might see something like:
|
||||
|
||||
```{ .console .no-copy }
|
||||
$ cd my-clan
|
||||
$ ls
|
||||
clan.nix flake.lock flake.nix modules sops
|
||||
```
|
||||
|
||||
|
||||
|
||||
Don’t worry if your output looks different — Clan templates evolve over time.
|
||||
|
||||
To interact with your newly created clan the you need to load the `clan` cli-package it into your environment by running:
|
||||
Now, activate the environment using one of the following methods.
|
||||
|
||||
=== "Automatic (direnv, recommended)"
|
||||
- prerequisite: [install nix-direnv](https://github.com/nix-community/nix-direnv)
|
||||
**Prerequisite**: You must have [nix-direnv](https://github.com/nix-community/nix-direnv) installed.
|
||||
|
||||
Run `direnv allow` to automatically load the environment whenever you enter this directory.
|
||||
```shellSession
|
||||
direnv allow
|
||||
```
|
||||
|
||||
=== "Manual (nix develop)"
|
||||
Run nix develop to load the environment for your current shell session.
|
||||
|
||||
```shellSession
|
||||
nix develop
|
||||
```
|
||||
|
||||
verify that you can run `clan` commands:
|
||||
## Verify the Setup
|
||||
|
||||
Once your environment is active, verify that the clan command is available by running:
|
||||
|
||||
```shellSession
|
||||
clan show
|
||||
```
|
||||
|
||||
You should see something like this:
|
||||
You should see the default metadata for your new clan:
|
||||
|
||||
```shellSession
|
||||
Name: __CHANGE_ME__
|
||||
Description: None
|
||||
```
|
||||
|
||||
To change the name of your clan edit `meta.name` in the `clan.nix` or `flake.nix` file
|
||||
This confirms your setup is working correctly.
|
||||
|
||||
You can now change the default name by editing the `meta.name` field in your `clan.nix` file.
|
||||
|
||||
```{.nix title="clan.nix" hl_lines="3"}
|
||||
{
|
||||
|
||||
@@ -1,12 +1,15 @@
|
||||
|
||||
# Update Your Machines
|
||||
# Update Machines
|
||||
|
||||
Clan CLI enables you to remotely update your machines over SSH. This requires setting up a target address for each target machine.
|
||||
The Clan command line interface enables you to update machines remotely over SSH.
|
||||
In this guide we will teach you how to set a `targetHost` in Nix,
|
||||
and how to define a remote builder for your machine closures.
|
||||
|
||||
### Setting `targetHost`
|
||||
|
||||
In your Nix files, set the `targetHost` to the reachable IP address of your new machine. This eliminates the need to specify `--target-host` with every command.
|
||||
## Setting `targetHost`
|
||||
|
||||
Set the machine’s `targetHost` to the reachable IP address of the new machine.
|
||||
This eliminates the need to specify `--target-host` in CLI commands.
|
||||
|
||||
```{.nix title="clan.nix" hl_lines="9"}
|
||||
{
|
||||
@@ -23,15 +26,42 @@ inventory.machines = {
|
||||
# [...]
|
||||
}
|
||||
```
|
||||
|
||||
The use of `root@` in the target address implies SSH access as the `root` user.
|
||||
Ensure that the root login is secured and only used when necessary.
|
||||
|
||||
## Multiple Target Hosts
|
||||
|
||||
### Setting a Build Host
|
||||
You can now experiment with a new interface that allows you to define multiple `targetHost` addresses for different VPNs. Learn more and try it out in our [networking guide](../networking.md).
|
||||
|
||||
If the machine does not have enough resources to run the NixOS evaluation or build itself,
|
||||
it is also possible to specify a build host instead.
|
||||
During an update, the cli will ssh into the build host and run `nixos-rebuild` from there.
|
||||
## Updating Machine Configurations
|
||||
|
||||
Execute the following command to update the specified machine:
|
||||
|
||||
```bash
|
||||
clan machines update jon
|
||||
```
|
||||
|
||||
All machines can be updated simultaneously by omitting the machine name:
|
||||
|
||||
```bash
|
||||
clan machines update
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
The following options are only needed for special cases, such as limited resources, mixed environments, or private flakes.
|
||||
|
||||
### Setting `buildHost`
|
||||
|
||||
If the machine does not have enough resources to run the NixOS **evaluation** or **build** itself,
|
||||
it is also possible to specify a `buildHost` instead.
|
||||
During an update, clan will ssh into the `buildHost` and run `nixos-rebuild` from there.
|
||||
|
||||
!!! Note
|
||||
The `buildHost` option should be set directly within your machine’s Nix configuration, **not** under `inventory.machines`.
|
||||
|
||||
|
||||
```{.nix hl_lines="5" .no-copy}
|
||||
@@ -45,7 +75,11 @@ buildClan {
|
||||
};
|
||||
```
|
||||
|
||||
You can also override the build host via the command line:
|
||||
### Overriding configuration with CLI flags
|
||||
|
||||
`buildHost` / `targetHost`, and other network settings can be temporarily overridden for a single command:
|
||||
|
||||
For the full list of flags refer to the [Clan CLI](../../reference/cli/index.md)
|
||||
|
||||
```bash
|
||||
# Build on a remote host
|
||||
@@ -56,23 +90,9 @@ clan machines update jon --build-host local
|
||||
```
|
||||
|
||||
!!! Note
|
||||
Make sure that the CPU architecture is the same for the buildHost as for the targetHost.
|
||||
Example:
|
||||
If you want to deploy to a macOS machine, your architecture is an ARM64-Darwin, that means you need a second macOS machine to build it.
|
||||
Make sure the CPU architecture of the `buildHost` matches that of the `targetHost`
|
||||
|
||||
### Updating Machine Configurations
|
||||
|
||||
Execute the following command to update the specified machine:
|
||||
|
||||
```bash
|
||||
clan machines update jon
|
||||
```
|
||||
|
||||
You can also update all configured machines simultaneously by omitting the machine name:
|
||||
|
||||
```bash
|
||||
clan machines update
|
||||
```
|
||||
For example, if deploying to a macOS machine with an ARM64-Darwin architecture, you need a second macOS machine with the same architecture to build it.
|
||||
|
||||
|
||||
### Excluding a machine from `clan machine update`
|
||||
@@ -96,14 +116,15 @@ This is useful for machines that are not always online or are not part of the re
|
||||
### Uploading Flake Inputs
|
||||
|
||||
When updating remote machines, flake inputs are usually fetched by the build host.
|
||||
However, if your flake inputs require authentication (e.g., private repositories),
|
||||
you can use the `--upload-inputs` flag to upload all inputs from your local machine:
|
||||
However, if flake inputs require authentication (e.g., private repositories),
|
||||
|
||||
Use the `--upload-inputs` flag to upload all inputs from your local machine:
|
||||
|
||||
```bash
|
||||
clan machines update jon --upload-inputs
|
||||
```
|
||||
|
||||
This is particularly useful when:
|
||||
- Your flake references private Git repositories
|
||||
- Authentication credentials are only available on your local machine
|
||||
- The flake references private Git repositories
|
||||
- Authentication credentials are only available on local machine
|
||||
- The build host doesn't have access to certain network resources
|
||||
|
||||
@@ -254,7 +254,7 @@ The following table shows the migration status of each deprecated clanModule:
|
||||
| `data-mesher` | ✅ [Migrated](../../reference/clanServices/data-mesher.md) | |
|
||||
| `deltachat` | ❌ Removed | |
|
||||
| `disk-id` | ❌ Removed | |
|
||||
| `dyndns` | [Being Migrated](https://git.clan.lol/clan/clan-core/pulls/4390) | |
|
||||
| `dyndns` | ✅ [Migrated](../../reference/clanServices/dyndns.md) | |
|
||||
| `ergochat` | ❌ Removed | |
|
||||
| `garage` | ✅ [Migrated](../../reference/clanServices/garage.md) | |
|
||||
| `golem-provider` | ❌ Removed | |
|
||||
@@ -263,18 +263,18 @@ The following table shows the migration status of each deprecated clanModule:
|
||||
| `iwd` | ❌ Removed | Use [wifi service](../../reference/clanServices/wifi.md) instead |
|
||||
| `localbackup` | ✅ [Migrated](../../reference/clanServices/localbackup.md) | |
|
||||
| `localsend` | ❌ Removed | |
|
||||
| `machine-id` | ❌ Removed | Now an [option](../../reference/clan.core/settings.md) |
|
||||
| `machine-id` | ✅ [Migrated](../../reference/clan.core/settings.md) | Now an [option](../../reference/clan.core/settings.md) |
|
||||
| `matrix-synapse` | ✅ [Migrated](../../reference/clanServices/matrix-synapse.md) | |
|
||||
| `moonlight` | ❌ Removed | |
|
||||
| `mumble` | ❌ Removed | |
|
||||
| `mycelium` | ✅ [Migrated](../../reference/clanServices/mycelium.md) | |
|
||||
| `nginx` | ❌ Removed | |
|
||||
| `packages` | ✅ [Migrated](../../reference/clanServices/packages.md) | |
|
||||
| `postgresql` | ❌ Removed | Now an [option](../../reference/clan.core/settings.md) |
|
||||
| `root-password` | ✅ [Migrated](../../reference/clanServices/users.md) | |
|
||||
| `postgresql` | ✅ [Migrated](../../reference/clan.core/settings.md) | Now an [option](../../reference/clan.core/settings.md) |
|
||||
| `root-password` | ✅ [Migrated](../../reference/clanServices/users.md) | See [migration guide](../../reference/clanServices/users.md#migration-from-root-password-module) |
|
||||
| `single-disk` | ❌ Removed | |
|
||||
| `sshd` | ✅ [Migrated](../../reference/clanServices/sshd.md) | |
|
||||
| `state-version` | ✅ [Migrated](../../reference/clanServices/state-version.md) | |
|
||||
| `state-version` | ✅ [Migrated](../../reference/clan.core/settings.md) | Now an [option](../../reference/clan.core/settings.md) |
|
||||
| `static-hosts` | ❌ Removed | |
|
||||
| `sunshine` | ❌ Removed | |
|
||||
| `syncthing-static-peers` | ❌ Removed | |
|
||||
|
||||
184
docs/site/guides/networking.md
Normal file
184
docs/site/guides/networking.md
Normal file
@@ -0,0 +1,184 @@
|
||||
# Connecting to Your Machines
|
||||
|
||||
Clan provides automatic networking with fallback mechanisms to reliably connect to your machines.
|
||||
|
||||
## Option 1: Automatic Networking with Fallback (Recommended)
|
||||
|
||||
Clan's networking module automatically manages connections through various network technologies with intelligent fallback. When you run `clan ssh` or `clan machines update`, Clan tries each configured network by priority until one succeeds.
|
||||
|
||||
### Basic Setup with Internet Service
|
||||
|
||||
For machines with public IPs or DNS names, use the `internet` service to configure direct SSH while keeping fallback options:
|
||||
|
||||
```{.nix title="flake.nix" hl_lines="7-10 14-16"}
|
||||
{
|
||||
outputs = { self, clan-core, ... }:
|
||||
let
|
||||
clan = clan-core.lib.clan {
|
||||
inventory.instances = {
|
||||
# Direct SSH with fallback support
|
||||
internet = {
|
||||
roles.default.machines.server1 = {
|
||||
settings.address = "server1.example.com";
|
||||
};
|
||||
roles.default.machines.server2 = {
|
||||
settings.address = "192.168.1.100";
|
||||
};
|
||||
};
|
||||
|
||||
# Fallback: Secure connections via Tor
|
||||
tor = {
|
||||
roles.server.tags.nixos = { };
|
||||
};
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
inherit (clan.config) nixosConfigurations;
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
### Advanced Setup with Multiple Networks
|
||||
|
||||
```{.nix title="flake.nix" hl_lines="7-10 13-16 19-21"}
|
||||
{
|
||||
outputs = { self, clan-core, ... }:
|
||||
let
|
||||
clan = clan-core.lib.clan {
|
||||
inventory.instances = {
|
||||
# Priority 1: Try direct connection first
|
||||
internet = {
|
||||
roles.default.machines.publicserver = {
|
||||
settings.address = "public.example.com";
|
||||
};
|
||||
};
|
||||
|
||||
# Priority 2: VPN for internal machines
|
||||
zerotier = {
|
||||
roles.controller.machines."controller" = { };
|
||||
roles.peer.tags.nixos = { };
|
||||
};
|
||||
|
||||
# Priority 3: Tor as universal fallback
|
||||
tor = {
|
||||
roles.server.tags.nixos = { };
|
||||
};
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
inherit (clan.config) nixosConfigurations;
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
### How It Works
|
||||
|
||||
Clan automatically tries networks in order of priority:
|
||||
1. Direct internet connections (if configured)
|
||||
2. VPN networks (ZeroTier, Tailscale, etc.)
|
||||
3. Tor hidden services
|
||||
4. Any other configured networks
|
||||
|
||||
If one network fails, Clan automatically tries the next.
|
||||
|
||||
### Useful Commands
|
||||
|
||||
```bash
|
||||
# View all configured networks and their status
|
||||
clan network list
|
||||
|
||||
# Test connectivity through all networks
|
||||
clan network ping machine1
|
||||
|
||||
# Show complete network topology
|
||||
clan network overview
|
||||
```
|
||||
|
||||
## Option 2: Manual targetHost (Bypasses Fallback!)
|
||||
|
||||
!!! warning
|
||||
Setting `targetHost` directly **disables all automatic networking and fallback**. Only use this if you need complete control and don't want Clan's intelligent connection management.
|
||||
|
||||
### Using Inventory (For Static Addresses)
|
||||
|
||||
Use inventory-level `targetHost` when the address is **static** and doesn't depend on NixOS configuration:
|
||||
|
||||
```{.nix title="flake.nix" hl_lines="8"}
|
||||
{
|
||||
outputs = { self, clan-core, ... }:
|
||||
let
|
||||
clan = clan-core.lib.clan {
|
||||
inventory.machines.server = {
|
||||
# WARNING: This bypasses all networking modules!
|
||||
# Use for: Static IPs, DNS names, known hostnames
|
||||
deploy.targetHost = "root@192.168.1.100";
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
inherit (clan.config) nixosConfigurations;
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
**When to use inventory-level:**
|
||||
- Static IP addresses: `"root@192.168.1.100"`
|
||||
- DNS names: `"user@server.example.com"`
|
||||
- Any address that doesn't change based on machine configuration
|
||||
|
||||
### Using NixOS Configuration (For Dynamic Addresses)
|
||||
|
||||
Use machine-level `targetHost` when you need to **interpolate values from the NixOS configuration**:
|
||||
|
||||
```{.nix title="flake.nix" hl_lines="7"}
|
||||
{
|
||||
outputs = { self, clan-core, ... }:
|
||||
let
|
||||
clan = clan-core.lib.clan {
|
||||
machines.server = { config, ... }: {
|
||||
# WARNING: This also bypasses all networking modules!
|
||||
# REQUIRED for: Addresses that depend on NixOS config
|
||||
clan.core.networking.targetHost = "root@${config.networking.hostName}.local";
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
inherit (clan.config) nixosConfigurations;
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
**When to use machine-level (NixOS config):**
|
||||
- Using hostName from config: `"root@${config.networking.hostName}.local"`
|
||||
- Building from multiple config values: `"${config.users.users.deploy.name}@${config.networking.hostName}"`
|
||||
- Any address that depends on evaluated NixOS configuration
|
||||
|
||||
!!! info "Key Difference"
|
||||
**Inventory-level** (`deploy.targetHost`) is evaluated immediately and works with static strings.
|
||||
**Machine-level** (`clan.core.networking.targetHost`) is evaluated after NixOS configuration and can access `config.*` values.
|
||||
|
||||
## Quick Decision Guide
|
||||
|
||||
| Scenario | Recommended Approach | Why |
|
||||
|----------|---------------------|-----|
|
||||
| Public servers | `internet` service | Keeps fallback options |
|
||||
| Mixed infrastructure | Multiple networks | Automatic failover |
|
||||
| Machines behind NAT | ZeroTier/Tor | NAT traversal with fallback |
|
||||
| Testing/debugging | Manual targetHost | Full control, no magic |
|
||||
| Single static machine | Manual targetHost | Simple, no overhead |
|
||||
|
||||
## Command-Line Override
|
||||
|
||||
The `--target-host` flag bypasses ALL networking configuration:
|
||||
|
||||
```bash
|
||||
# Emergency access - ignores all networking config
|
||||
clan machines update server --target-host root@backup-ip.com
|
||||
|
||||
# Direct SSH - no fallback attempted
|
||||
clan ssh laptop --target-host user@10.0.0.5
|
||||
```
|
||||
|
||||
Use this for debugging or emergency access when automatic networking isn't working.
|
||||
@@ -255,11 +255,50 @@ outputs = inputs: flake-parts.lib.mkFlake { inherit inputs; } ({self, lib, ...}:
|
||||
})
|
||||
```
|
||||
|
||||
The benefit of this approach is that downstream users can override the value of `myClan` by using `mkForce` or other priority modifiers.
|
||||
The benefit of this approach is that downstream users can override the value of
|
||||
`myClan` by using `mkForce` or other priority modifiers.
|
||||
|
||||
## Example: A machine-type service
|
||||
|
||||
Users often have different types of machines. These could be any classification
|
||||
you like, for example "servers" and "desktops". Having such distictions, allows
|
||||
reusing parts of your configuration that should be appplied to a class of
|
||||
machines. Since this is such a common pattern, here is how to write such a
|
||||
service.
|
||||
|
||||
For this example the we have to roles: `server` and `desktop`. Additionally, we
|
||||
can use the `perMachine` section to add configuration to all machines regardless
|
||||
of their type.
|
||||
|
||||
```nix title="machine-type.nix"
|
||||
{
|
||||
_class = "clan.service";
|
||||
manifest.name = "machine-type";
|
||||
|
||||
roles.server.perInstance.nixosModule = ./server.nix;
|
||||
roles.desktop.perInstance.nixosModule = ./desktop.nix;
|
||||
|
||||
perMachine.nixosModule = {
|
||||
# Configuration for all machines (any type)
|
||||
};
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
In the inventory we the assign machines to a type, e.g. by using tags
|
||||
|
||||
```nix title="flake.nix"
|
||||
instnaces.machine-type = {
|
||||
module.input = "self";
|
||||
module.name = "@pinpox/machine-type";
|
||||
roles.desktop.tags.desktop = { };
|
||||
roles.server.tags.server = { };
|
||||
};
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Further
|
||||
## Further Reading
|
||||
|
||||
- [Reference Documentation for Service Authors](../../reference/clanServices/clan-service-author-interface.md)
|
||||
- [Migration Guide from ClanModules to ClanServices](../../guides/migrations/migrate-inventory-services.md)
|
||||
|
||||
@@ -1,84 +0,0 @@
|
||||
# How to Set `targetHost` for a Machine
|
||||
|
||||
The `targetHost` defines where the machine can be reached for operations like SSH or deployment. You can set it in two ways, depending on your use case.
|
||||
|
||||
---
|
||||
|
||||
## ✅ Option 1: Use the Inventory (Recommended for Static Hosts)
|
||||
|
||||
If the hostname is **static**, like `server.example.com`, set it in the **inventory**:
|
||||
|
||||
```{.nix title="flake.nix" hl_lines="8"}
|
||||
{
|
||||
# edlided
|
||||
outputs =
|
||||
{ self, clan-core, ... }:
|
||||
let
|
||||
# Sometimes this attribute set is defined in clan.nix
|
||||
clan = clan-core.lib.clan {
|
||||
inventory.machines.jon = {
|
||||
deploy.targetHost = "root@server.example.com";
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
inherit (clan.config) nixosConfigurations nixosModules clanInternals;
|
||||
# elided
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
This is fast, simple and explicit, and doesn’t require evaluating the NixOS config. We can also displayed it in the clan-cli or clan-app.
|
||||
|
||||
---
|
||||
|
||||
## ✅ Option 2: Use NixOS (Only for Dynamic Hosts)
|
||||
|
||||
If your target host depends on a **dynamic expression** (like using the machine’s evaluated FQDN), set it inside the NixOS module:
|
||||
|
||||
```{.nix title="flake.nix" hl_lines="8"}
|
||||
{
|
||||
# edlided
|
||||
outputs =
|
||||
{ self, clan-core, ... }:
|
||||
let
|
||||
# Sometimes this attribute set is defined in clan.nix
|
||||
clan = clan-core.lib.clan {
|
||||
machines.jon = {config, ...}: {
|
||||
clan.core.networking.targetHost = "jon@${config.networking.fqdn}";
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
inherit (clan.config) nixosConfigurations nixosModules clanInternals;
|
||||
# elided
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
Use this **only if the value cannot be made static**, because it’s slower and won't be displayed in the clan-cli or clan-app yet.
|
||||
|
||||
---
|
||||
|
||||
## 📝 TL;DR
|
||||
|
||||
| Use Case | Use Inventory? | Example |
|
||||
| ------------------------- | -------------- | -------------------------------- |
|
||||
| Static hostname | ✅ Yes | `root@server.example.com` |
|
||||
| Dynamic config expression | ❌ No | `jon@${config.networking.fqdn}` |
|
||||
|
||||
---
|
||||
|
||||
## 🚀 Coming Soon: Unified Networking Module
|
||||
|
||||
We’re working on a new networking module that will automatically do all of this for you.
|
||||
|
||||
- Easier to use
|
||||
- Sane defaults: You’ll always be able to reach the machine — no need to worry about hostnames.
|
||||
- ✨ Migration from **either method** will be supported and simple.
|
||||
|
||||
## Summary
|
||||
|
||||
- Ask: *Does this hostname dynamically change based on NixOS config?*
|
||||
- If **no**, use the inventory.
|
||||
- If **yes**, then use NixOS config.
|
||||
@@ -1,6 +0,0 @@
|
||||
---
|
||||
template: options.html
|
||||
---
|
||||
|
||||
|
||||
<iframe src="/options-page/" height="1000" width="100%"></iframe>
|
||||
@@ -4,7 +4,7 @@ This section of the site provides an overview of available options and commands
|
||||
|
||||
---
|
||||
|
||||
- [Clan Configuration Option](../options.md) - for defining a Clan
|
||||
- [Clan Configuration Option](/options) - for defining a Clan
|
||||
- Learn how to use the [Clan CLI](./cli/index.md)
|
||||
- Explore available [services](./clanServices/index.md)
|
||||
- [NixOS Configuration Options](./clan.core/index.md) - Additional options avilable on a NixOS machine.
|
||||
|
||||
46
flake.lock
generated
46
flake.lock
generated
@@ -13,11 +13,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1753067306,
|
||||
"narHash": "sha256-jyoEbaXa8/MwVQ+PajUdT63y3gYhgD9o7snO/SLaikw=",
|
||||
"rev": "18dfd42bdb2cfff510b8c74206005f733e38d8b9",
|
||||
"lastModified": 1756091210,
|
||||
"narHash": "sha256-oEUEAZnLbNHi8ti4jY8x10yWcIkYoFc5XD+2hjmOS04=",
|
||||
"rev": "eb831bca21476fa8f6df26cb39e076842634700d",
|
||||
"type": "tarball",
|
||||
"url": "https://git.clan.lol/api/v1/repos/clan/data-mesher/archive/18dfd42bdb2cfff510b8c74206005f733e38d8b9.tar.gz"
|
||||
"url": "https://git.clan.lol/api/v1/repos/clan/data-mesher/archive/eb831bca21476fa8f6df26cb39e076842634700d.tar.gz"
|
||||
},
|
||||
"original": {
|
||||
"type": "tarball",
|
||||
@@ -31,11 +31,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1754971456,
|
||||
"narHash": "sha256-p04ZnIBGzerSyiY2dNGmookCldhldWAu03y0s3P8CB0=",
|
||||
"lastModified": 1756115622,
|
||||
"narHash": "sha256-iv8xVtmLMNLWFcDM/HcAPLRGONyTRpzL9NS09RnryRM=",
|
||||
"owner": "nix-community",
|
||||
"repo": "disko",
|
||||
"rev": "8246829f2e675a46919718f9a64b71afe3bfb22d",
|
||||
"rev": "bafad29f89e83b2d861b493aa23034ea16595560",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -71,11 +71,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1751313918,
|
||||
"narHash": "sha256-HsJM3XLa43WpG+665aGEh8iS8AfEwOIQWk3Mke3e7nk=",
|
||||
"lastModified": 1755825449,
|
||||
"narHash": "sha256-XkiN4NM9Xdy59h69Pc+Vg4PxkSm9EWl6u7k6D5FZ5cM=",
|
||||
"owner": "nix-darwin",
|
||||
"repo": "nix-darwin",
|
||||
"rev": "e04a388232d9a6ba56967ce5b53a8a6f713cdfcf",
|
||||
"rev": "8df64f819698c1fee0c2969696f54a843b2231e8",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -86,11 +86,11 @@
|
||||
},
|
||||
"nix-select": {
|
||||
"locked": {
|
||||
"lastModified": 1745005516,
|
||||
"narHash": "sha256-IVaoOGDIvAa/8I0sdiiZuKptDldrkDWUNf/+ezIRhyc=",
|
||||
"rev": "69d8bf596194c5c35a4e90dd02c52aa530caddf8",
|
||||
"lastModified": 1755887746,
|
||||
"narHash": "sha256-lzWbpHKX0WAn/jJDoCijIDss3rqYIPawe46GDaE6U3g=",
|
||||
"rev": "92c2574c5e113281591be01e89bb9ddb31d19156",
|
||||
"type": "tarball",
|
||||
"url": "https://git.clan.lol/api/v1/repos/clan/nix-select/archive/69d8bf596194c5c35a4e90dd02c52aa530caddf8.tar.gz"
|
||||
"url": "https://git.clan.lol/api/v1/repos/clan/nix-select/archive/92c2574c5e113281591be01e89bb9ddb31d19156.tar.gz"
|
||||
},
|
||||
"original": {
|
||||
"type": "tarball",
|
||||
@@ -99,11 +99,11 @@
|
||||
},
|
||||
"nixos-facter-modules": {
|
||||
"locked": {
|
||||
"lastModified": 1750412875,
|
||||
"narHash": "sha256-uP9Xxw5XcFwjX9lNoYRpybOnIIe1BHfZu5vJnnPg3Jc=",
|
||||
"lastModified": 1756291602,
|
||||
"narHash": "sha256-FYhiArSzcx60OwoH3JBp5Ho1D5HEwmZx6WoquauDv3g=",
|
||||
"owner": "nix-community",
|
||||
"repo": "nixos-facter-modules",
|
||||
"rev": "14df13c84552a7d1f33c1cd18336128fbc43f920",
|
||||
"rev": "5c37cee817c94f50710ab11c25de572bc3604bd5",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -115,10 +115,10 @@
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 315532800,
|
||||
"narHash": "sha256-2ILJtWugqmMyZnaWnHh+5yyw8RZWbKu9rVdeWmrBVhY=",
|
||||
"rev": "a595dde4d0d31606e19dcec73db02279db59d201",
|
||||
"narHash": "sha256-h8Sx4S+/0FpodZji6W9lHzwY5BcuUG85Aj3GfhvGC2o=",
|
||||
"rev": "a650b5d0de99158323597f048667c4d914243224",
|
||||
"type": "tarball",
|
||||
"url": "https://releases.nixos.org/nixpkgs/nixpkgs-25.11pre844295.a595dde4d0d3/nixexprs.tar.xz"
|
||||
"url": "https://releases.nixos.org/nixpkgs/nixpkgs-25.11pre845298.a650b5d0de99/nixexprs.tar.xz"
|
||||
},
|
||||
"original": {
|
||||
"type": "tarball",
|
||||
@@ -181,11 +181,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1754847726,
|
||||
"narHash": "sha256-2vX8QjO5lRsDbNYvN9hVHXLU6oMl+V/PsmIiJREG4rE=",
|
||||
"lastModified": 1755934250,
|
||||
"narHash": "sha256-CsDojnMgYsfshQw3t4zjRUkmMmUdZGthl16bXVWgRYU=",
|
||||
"owner": "numtide",
|
||||
"repo": "treefmt-nix",
|
||||
"rev": "7d81f6fb2e19bf84f1c65135d1060d829fae2408",
|
||||
"rev": "74e1a52d5bd9430312f8d1b8b0354c92c17453e5",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
||||
@@ -67,7 +67,6 @@
|
||||
clan = {
|
||||
meta.name = "clan-core";
|
||||
inventory = {
|
||||
services = { };
|
||||
machines = {
|
||||
"test-darwin-machine" = {
|
||||
machineClass = "darwin";
|
||||
@@ -97,6 +96,7 @@
|
||||
./nixosModules/flake-module.nix
|
||||
./pkgs/flake-module.nix
|
||||
./templates/flake-module.nix
|
||||
./pkgs/clan-cli/clan_cli/tests/flake-module.nix
|
||||
]
|
||||
++ [
|
||||
(if pathExists ./flakeModules/clan.nix then import ./flakeModules/clan.nix inputs.self else { })
|
||||
|
||||
@@ -33,7 +33,6 @@ lib.fix (
|
||||
evalService = clanLib.callLib ./modules/inventory/distributed-service/evalService.nix { };
|
||||
# ------------------------------------
|
||||
# ClanLib functions
|
||||
evalClan = clanLib.callLib ./modules/inventory/eval-clan-modules { };
|
||||
inventory = clanLib.callLib ./modules/inventory { };
|
||||
modules = clanLib.callLib ./modules/inventory/frontmatter { };
|
||||
test = clanLib.callLib ./test { };
|
||||
|
||||
@@ -328,7 +328,7 @@ rec {
|
||||
# To get the type of a Deferred modules we need to know the interface of the place where it is evaluated.
|
||||
# i.e. in case of a clan.service this is the interface of the service which dynamically changes depending on the service
|
||||
# We assign "type" = []
|
||||
# This means any value is valid — or like TypeScript’s unknown.
|
||||
# This means any value is valid — or like TypeScript's unknown.
|
||||
# We can assign the type later, when we know the exact interface.
|
||||
# tsType = "unknown" is a type that we preload for json2ts, such that it gets the correct type in typescript
|
||||
(option.type.name == "deferredModule")
|
||||
|
||||
@@ -245,6 +245,8 @@ in
|
||||
in
|
||||
{ config, ... }:
|
||||
{
|
||||
staticModules = clan-core.clan.modules;
|
||||
|
||||
distributedServices = clanLib.inventory.mapInstances {
|
||||
inherit (clanConfig) inventory exportsModule;
|
||||
inherit flakeInputs directory;
|
||||
|
||||
@@ -639,7 +639,7 @@ in
|
||||
|
||||
Exports are used to share and expose information between instances.
|
||||
|
||||
Define exports in the [`perInstance`](#perInstance) or [`perMachine`](#perMachine) scope.
|
||||
Define exports in the [`perInstance`](#roles.perInstance) or [`perMachine`](#perMachine) scope.
|
||||
|
||||
Accessing the exports:
|
||||
|
||||
|
||||
@@ -1,108 +0,0 @@
|
||||
{
|
||||
lib,
|
||||
clanLib,
|
||||
}:
|
||||
let
|
||||
baseModule =
|
||||
{ pkgs }:
|
||||
# Module
|
||||
{ config, ... }:
|
||||
{
|
||||
imports = (import (pkgs.path + "/nixos/modules/module-list.nix"));
|
||||
nixpkgs.pkgs = pkgs;
|
||||
clan.core.name = "dummy";
|
||||
system.stateVersion = config.system.nixos.release;
|
||||
# Set this to work around a bug where `clan.core.settings.machine.name`
|
||||
# is forced due to `networking.interfaces` being forced
|
||||
# somewhere in the nixpkgs options
|
||||
facter.detected.dhcp.enable = lib.mkForce false;
|
||||
};
|
||||
|
||||
# This function takes a list of module names and evaluates them
|
||||
# [ module ] -> { config, options, ... }
|
||||
evalClanModulesLegacy =
|
||||
{
|
||||
modules,
|
||||
pkgs,
|
||||
clan-core,
|
||||
}:
|
||||
let
|
||||
evaled = lib.evalModules {
|
||||
class = "nixos";
|
||||
modules = [
|
||||
(baseModule { inherit pkgs; })
|
||||
{
|
||||
clan.core.settings.directory = clan-core;
|
||||
}
|
||||
clan-core.nixosModules.clanCore
|
||||
]
|
||||
++ modules;
|
||||
};
|
||||
in
|
||||
# lib.warn ''
|
||||
# doesn't respect role specific interfaces.
|
||||
|
||||
# The following {module}/default.nix file trying to be imported.
|
||||
|
||||
# Modules: ${builtins.toJSON modulenames}
|
||||
|
||||
# This might result in incomplete or incorrect interfaces.
|
||||
|
||||
# FIX: Use evalClanModuleWithRole instead.
|
||||
# ''
|
||||
evaled;
|
||||
|
||||
/*
|
||||
This function takes a list of module names and evaluates them
|
||||
Returns a set of interfaces as described below:
|
||||
|
||||
Fn :: { ${moduleName} = Module; } -> {
|
||||
${moduleName} :: {
|
||||
${roleName}: JSONSchema
|
||||
}
|
||||
}
|
||||
*/
|
||||
evalClanModulesWithRoles =
|
||||
{
|
||||
allModules,
|
||||
clan-core,
|
||||
pkgs,
|
||||
}:
|
||||
let
|
||||
res = builtins.mapAttrs (
|
||||
moduleName: module:
|
||||
let
|
||||
frontmatter = clanLib.modules.getFrontmatter allModules.${moduleName} moduleName;
|
||||
roles =
|
||||
if builtins.elem "inventory" frontmatter.features or [ ] then
|
||||
assert lib.isPath module;
|
||||
clan-core.clanLib.modules.getRoles "Documentation: inventory.modules" allModules moduleName
|
||||
else
|
||||
[ ];
|
||||
in
|
||||
lib.listToAttrs (
|
||||
lib.map (role: {
|
||||
name = role;
|
||||
value =
|
||||
(lib.evalModules {
|
||||
class = "nixos";
|
||||
modules = [
|
||||
(baseModule { inherit pkgs; })
|
||||
clan-core.nixosModules.clanCore
|
||||
{
|
||||
clan.core.settings.directory = clan-core;
|
||||
}
|
||||
# Role interface
|
||||
(module + "/roles/${role}.nix")
|
||||
];
|
||||
}).options.clan.${moduleName} or { };
|
||||
}) roles
|
||||
)
|
||||
) allModules;
|
||||
in
|
||||
res;
|
||||
in
|
||||
{
|
||||
evalClanModules = evalClanModulesLegacy;
|
||||
inherit evalClanModulesWithRoles;
|
||||
}
|
||||
@@ -1,12 +1,8 @@
|
||||
{
|
||||
self,
|
||||
inputs,
|
||||
options,
|
||||
...
|
||||
}:
|
||||
let
|
||||
inputOverrides = self.clanLib.flake-inputs.getOverrides inputs;
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
./distributed-service/flake-module.nix
|
||||
@@ -15,16 +11,13 @@ in
|
||||
{
|
||||
pkgs,
|
||||
lib,
|
||||
config,
|
||||
system,
|
||||
self',
|
||||
...
|
||||
}:
|
||||
{
|
||||
devShells.inventory-schema = pkgs.mkShell {
|
||||
name = "clan-inventory-schema";
|
||||
inputsFrom = with config.checks; [
|
||||
eval-lib-inventory
|
||||
inputsFrom = [
|
||||
self'.devShells.default
|
||||
];
|
||||
};
|
||||
@@ -51,41 +44,5 @@ in
|
||||
warningsAreErrors = true;
|
||||
transformOptions = self.clanLib.docs.stripStorePathsFromDeclarations;
|
||||
}).optionsJSON;
|
||||
|
||||
# Run: nix-unit --extra-experimental-features flakes --flake .#legacyPackages.x86_64-linux.evalTests
|
||||
legacyPackages.evalTests-inventory = import ./tests {
|
||||
inherit lib;
|
||||
clan-core = self;
|
||||
inherit (self) clanLib;
|
||||
inherit (self.inputs) nix-darwin;
|
||||
};
|
||||
|
||||
checks = {
|
||||
eval-lib-inventory = pkgs.runCommand "tests" { nativeBuildInputs = [ pkgs.nix-unit ]; } ''
|
||||
export HOME="$(realpath .)"
|
||||
export NIX_ABORT_ON_WARN=1
|
||||
nix-unit --eval-store "$HOME" \
|
||||
--extra-experimental-features flakes \
|
||||
--show-trace \
|
||||
${inputOverrides} \
|
||||
--flake ${
|
||||
lib.fileset.toSource {
|
||||
root = ../../..;
|
||||
fileset = lib.fileset.unions [
|
||||
../../../flake.nix
|
||||
../../../flake.lock
|
||||
(lib.fileset.fileFilter (file: file.name == "flake-module.nix") ../../..)
|
||||
../../../flakeModules
|
||||
../../../lib
|
||||
../../../nixosModules/clanCore
|
||||
../../../machines
|
||||
../../../inventory.json
|
||||
];
|
||||
}
|
||||
}#legacyPackages.${system}.evalTests-inventory
|
||||
|
||||
touch $out
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
@@ -3,51 +3,6 @@ let
|
||||
# Trim the .nix extension from a filename
|
||||
trimExtension = name: builtins.substring 0 (builtins.stringLength name - 4) name;
|
||||
|
||||
jsonWithoutHeader = clanLib.jsonschema {
|
||||
includeDefaults = true;
|
||||
header = { };
|
||||
};
|
||||
|
||||
getModulesSchema =
|
||||
{
|
||||
modules,
|
||||
clan-core,
|
||||
pkgs,
|
||||
}:
|
||||
lib.mapAttrs
|
||||
(
|
||||
_moduleName: rolesOptions:
|
||||
lib.mapAttrs (_roleName: options: jsonWithoutHeader.parseOptions options { }) rolesOptions
|
||||
)
|
||||
(
|
||||
clanLib.evalClan.evalClanModulesWithRoles {
|
||||
allModules = modules;
|
||||
inherit pkgs clan-core;
|
||||
}
|
||||
);
|
||||
|
||||
evalFrontmatter =
|
||||
{
|
||||
moduleName,
|
||||
instanceName,
|
||||
resolvedRoles,
|
||||
allModules,
|
||||
}:
|
||||
lib.evalModules {
|
||||
modules = [
|
||||
(getFrontmatter allModules.${moduleName} moduleName)
|
||||
./interface.nix
|
||||
{
|
||||
constraints.imports = [
|
||||
(lib.modules.importApply ../constraints {
|
||||
inherit moduleName resolvedRoles instanceName;
|
||||
allRoles = getRoles "inventory.modules" allModules moduleName;
|
||||
})
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
# For Documentation purposes only
|
||||
frontmatterOptions =
|
||||
(lib.evalModules {
|
||||
@@ -119,17 +74,12 @@ let
|
||||
builtins.readDir (checkedPath)
|
||||
)
|
||||
);
|
||||
|
||||
checkConstraints = args: (evalFrontmatter args).config.constraints.assertions;
|
||||
getFrontmatter = _modulepath: _modulename: "clanModules are removed!";
|
||||
in
|
||||
{
|
||||
inherit
|
||||
frontmatterOptions
|
||||
getModulesSchema
|
||||
getFrontmatter
|
||||
|
||||
checkConstraints
|
||||
getRoles
|
||||
;
|
||||
}
|
||||
|
||||
@@ -1,29 +1,14 @@
|
||||
{
|
||||
self,
|
||||
self',
|
||||
lib,
|
||||
pkgs,
|
||||
flakeOptions,
|
||||
...
|
||||
}:
|
||||
let
|
||||
|
||||
modulesSchema = self.clanLib.modules.getModulesSchema {
|
||||
modules = self.clanModules;
|
||||
inherit pkgs;
|
||||
clan-core = self;
|
||||
};
|
||||
|
||||
jsonLib = self.clanLib.jsonschema { inherit includeDefaults; };
|
||||
includeDefaults = true;
|
||||
|
||||
frontMatterSchema = jsonLib.parseOptions self.clanLib.modules.frontmatterOptions { };
|
||||
|
||||
inventorySchema = jsonLib.parseModule ({
|
||||
imports = [ ../../inventoryClass/interface.nix ];
|
||||
_module.args = { inherit (self) clanLib; };
|
||||
});
|
||||
|
||||
opts = (flakeOptions.flake.type.getSubOptions [ "flake" ]);
|
||||
clanOpts = opts.clan.type.getSubOptions [ "clan" ];
|
||||
include = [
|
||||
@@ -36,21 +21,14 @@ let
|
||||
"secrets"
|
||||
"templates"
|
||||
];
|
||||
clanSchema = jsonLib.parseOptions (lib.filterAttrs (n: _v: lib.elem n include) clanOpts) { };
|
||||
clanSchemaNix = jsonLib.parseOptions (lib.filterAttrs (n: _v: lib.elem n include) clanOpts) { };
|
||||
|
||||
renderSchema = pkgs.writers.writePython3Bin "render-schema" {
|
||||
flakeIgnore = [
|
||||
"F401"
|
||||
"E501"
|
||||
];
|
||||
} ./render_schema.py;
|
||||
|
||||
clan-schema-abstract = pkgs.stdenv.mkDerivation {
|
||||
clanSchemaJson = pkgs.stdenv.mkDerivation {
|
||||
name = "clan-schema-files";
|
||||
buildInputs = [ pkgs.cue ];
|
||||
src = ./.;
|
||||
buildPhase = ''
|
||||
export SCHEMA=${builtins.toFile "clan-schema.json" (builtins.toJSON clanSchema)}
|
||||
export SCHEMA=${builtins.toFile "clan-schema.json" (builtins.toJSON clanSchemaNix)}
|
||||
cp $SCHEMA schema.json
|
||||
# Also generate a CUE schema version that is derived from the JSON schema
|
||||
cue import -f -p compose -l '#Root:' schema.json
|
||||
@@ -63,29 +41,7 @@ in
|
||||
{
|
||||
inherit
|
||||
flakeOptions
|
||||
frontMatterSchema
|
||||
clanSchema
|
||||
inventorySchema
|
||||
modulesSchema
|
||||
renderSchema
|
||||
clan-schema-abstract
|
||||
clanSchemaNix
|
||||
clanSchemaJson
|
||||
;
|
||||
|
||||
# Inventory schema, with the modules schema added per role
|
||||
inventory =
|
||||
pkgs.runCommand "rendered"
|
||||
{
|
||||
buildInputs = [
|
||||
pkgs.python3
|
||||
self'.packages.clan-cli
|
||||
];
|
||||
}
|
||||
''
|
||||
export INVENTORY_SCHEMA_PATH=${builtins.toFile "inventory-schema.json" (builtins.toJSON inventorySchema)}
|
||||
export MODULES_SCHEMA_PATH=${builtins.toFile "modules-schema.json" (builtins.toJSON modulesSchema)}
|
||||
|
||||
mkdir $out
|
||||
# The python script will place the schemas in the output directory
|
||||
exec python3 ${renderSchema}/bin/render-schema
|
||||
'';
|
||||
}
|
||||
|
||||
@@ -1,162 +0,0 @@
|
||||
"""
|
||||
Python script to join the abstract inventory schema, with the concrete clan modules
|
||||
Inventory has slots which are 'Any' type.
|
||||
We dont want to evaluate the clanModules interface in nix, when evaluating the inventory
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from clan_lib.errors import ClanError
|
||||
|
||||
# Get environment variables
|
||||
INVENTORY_SCHEMA_PATH = Path(os.environ["INVENTORY_SCHEMA_PATH"])
|
||||
|
||||
# { [moduleName] :: { [roleName] :: SCHEMA }}
|
||||
MODULES_SCHEMA_PATH = Path(os.environ["MODULES_SCHEMA_PATH"])
|
||||
|
||||
OUT = os.environ.get("out")
|
||||
|
||||
if not INVENTORY_SCHEMA_PATH:
|
||||
msg = f"Environment variables are not set correctly: INVENTORY_SCHEMA_PATH={INVENTORY_SCHEMA_PATH}."
|
||||
raise ClanError(msg)
|
||||
|
||||
if not MODULES_SCHEMA_PATH:
|
||||
msg = f"Environment variables are not set correctly: MODULES_SCHEMA_PATH={MODULES_SCHEMA_PATH}."
|
||||
raise ClanError(msg)
|
||||
|
||||
if not OUT:
|
||||
msg = f"Environment variables are not set correctly: OUT={OUT}."
|
||||
raise ClanError(msg)
|
||||
|
||||
|
||||
def service_roles_to_schema(
|
||||
schema: dict[str, Any],
|
||||
service_name: str,
|
||||
roles: list[str],
|
||||
roles_schemas: dict[str, dict[str, Any]],
|
||||
# Original service properties: {'config': Schema, 'machines': Schema, 'meta': Schema, 'extraModules': Schema, ...?}
|
||||
orig: dict[str, Any],
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Add roles to the service schema
|
||||
"""
|
||||
# collect all the roles for the service, to form a type union
|
||||
all_roles_schema: list[dict[str, Any]] = []
|
||||
for role_name, role_schema in roles_schemas.items():
|
||||
role_schema["title"] = f"{module_name}-config-role-{role_name}"
|
||||
all_roles_schema.append(role_schema)
|
||||
|
||||
role_schema = {}
|
||||
for role in roles:
|
||||
role_schema[role] = {
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"properties": {
|
||||
**orig["roles"]["additionalProperties"]["properties"],
|
||||
"config": {
|
||||
**roles_schemas.get(role, {}),
|
||||
"title": f"{service_name}-config-role-{role}",
|
||||
"type": "object",
|
||||
"default": {},
|
||||
"additionalProperties": False,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
machines_schema = {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
**orig["machines"]["additionalProperties"]["properties"],
|
||||
"config": {
|
||||
"title": f"{service_name}-config",
|
||||
"oneOf": all_roles_schema,
|
||||
"type": "object",
|
||||
"default": {},
|
||||
"additionalProperties": False,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
services["properties"][service_name] = {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"properties": {
|
||||
# Original inventory schema
|
||||
**orig,
|
||||
# Inject the roles schemas
|
||||
"roles": {
|
||||
"title": f"{service_name}-roles",
|
||||
"type": "object",
|
||||
"properties": role_schema,
|
||||
"additionalProperties": False,
|
||||
},
|
||||
"machines": machines_schema,
|
||||
"config": {
|
||||
"title": f"{service_name}-config",
|
||||
"oneOf": all_roles_schema,
|
||||
"type": "object",
|
||||
"default": {},
|
||||
"additionalProperties": False,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return schema
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("Joining inventory schema with modules schema")
|
||||
print(f"Inventory schema path: {INVENTORY_SCHEMA_PATH}")
|
||||
print(f"Modules schema path: {MODULES_SCHEMA_PATH}")
|
||||
|
||||
modules_schema = {}
|
||||
with Path.open(MODULES_SCHEMA_PATH) as f:
|
||||
modules_schema = json.load(f)
|
||||
|
||||
inventory_schema = {}
|
||||
with Path.open(INVENTORY_SCHEMA_PATH) as f:
|
||||
inventory_schema = json.load(f)
|
||||
|
||||
services = inventory_schema["properties"]["services"]
|
||||
original_service_props = services["additionalProperties"]["additionalProperties"][
|
||||
"properties"
|
||||
].copy()
|
||||
# Init the outer services schema
|
||||
# Properties (service names) will be filled in the next step
|
||||
services = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
# Service names
|
||||
},
|
||||
"additionalProperties": False,
|
||||
}
|
||||
|
||||
for module_name, roles_schemas in modules_schema.items():
|
||||
# Add the roles schemas to the service schema
|
||||
roles = list(roles_schemas.keys())
|
||||
if roles:
|
||||
services = service_roles_to_schema(
|
||||
services,
|
||||
module_name,
|
||||
roles,
|
||||
roles_schemas,
|
||||
original_service_props,
|
||||
)
|
||||
|
||||
inventory_schema["properties"]["services"] = services
|
||||
|
||||
outpath = Path(OUT)
|
||||
with (outpath / "schema.json").open("w") as f:
|
||||
json.dump(inventory_schema, f, indent=2)
|
||||
|
||||
with (outpath / "modules_schemas.json").open("w") as f:
|
||||
json.dump(modules_schema, f, indent=2)
|
||||
@@ -1,90 +0,0 @@
|
||||
{
|
||||
clan-core,
|
||||
nix-darwin,
|
||||
lib,
|
||||
clanLib,
|
||||
}:
|
||||
let
|
||||
# TODO: Unify these tests with clan tests
|
||||
clan =
|
||||
m:
|
||||
lib.evalModules {
|
||||
specialArgs = { inherit clan-core nix-darwin clanLib; };
|
||||
modules = [
|
||||
clan-core.modules.clan.default
|
||||
{
|
||||
self = { };
|
||||
}
|
||||
m
|
||||
];
|
||||
};
|
||||
in
|
||||
{
|
||||
test_inventory_a =
|
||||
let
|
||||
eval = clan {
|
||||
inventory = {
|
||||
machines = {
|
||||
A = { };
|
||||
};
|
||||
services = {
|
||||
legacyModule = { };
|
||||
};
|
||||
modules = {
|
||||
legacyModule = ./legacyModule;
|
||||
};
|
||||
};
|
||||
directory = ./.;
|
||||
};
|
||||
in
|
||||
{
|
||||
inherit eval;
|
||||
expr = {
|
||||
legacyModule = lib.filterAttrs (
|
||||
name: _: name == "isClanModule"
|
||||
) eval.config.clanInternals.inventoryClass.machines.A.compiledServices.legacyModule;
|
||||
};
|
||||
expected = {
|
||||
legacyModule = {
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
test_inventory_empty =
|
||||
let
|
||||
eval = clan {
|
||||
inventory = { };
|
||||
directory = ./.;
|
||||
};
|
||||
in
|
||||
{
|
||||
# Empty inventory should return an empty module
|
||||
expr = eval.config.clanInternals.inventoryClass.machines;
|
||||
expected = { };
|
||||
};
|
||||
|
||||
test_inventory_module_doesnt_exist =
|
||||
let
|
||||
eval = clan {
|
||||
directory = ./.;
|
||||
inventory = {
|
||||
services = {
|
||||
fanatasy.instance_1 = {
|
||||
roles.default.machines = [ "machine_1" ];
|
||||
};
|
||||
};
|
||||
machines = {
|
||||
"machine_1" = { };
|
||||
};
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
inherit eval;
|
||||
expr = eval.config.clanInternals.inventoryClass.machines.machine_1.machineImports;
|
||||
expectedError = {
|
||||
type = "ThrownError";
|
||||
msg = "ClanModule not found*";
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
---
|
||||
features = [ "inventory" ]
|
||||
---
|
||||
Description
|
||||
@@ -1,9 +0,0 @@
|
||||
{
|
||||
lib,
|
||||
clan-core,
|
||||
...
|
||||
}:
|
||||
{
|
||||
# Just some random stuff
|
||||
options.test = lib.mapAttrs clan-core;
|
||||
}
|
||||
@@ -1,78 +0,0 @@
|
||||
# Integrity validation of the inventory
|
||||
{ config, lib, ... }:
|
||||
{
|
||||
# Assertion must be of type
|
||||
# { assertion :: bool, message :: string, severity :: "error" | "warning" }
|
||||
imports = [
|
||||
# Check that each machine used in a service is defined in the top-level machines
|
||||
{
|
||||
assertions = lib.foldlAttrs (
|
||||
ass1: serviceName: c:
|
||||
ass1
|
||||
++ lib.foldlAttrs (
|
||||
ass2: instanceName: instanceConfig:
|
||||
let
|
||||
topLevelMachines = lib.attrNames config.machines;
|
||||
# All machines must be defined in the top-level machines
|
||||
assertions = lib.foldlAttrs (
|
||||
assertions: roleName: role:
|
||||
assertions
|
||||
++ builtins.filter (a: !a.assertion) (
|
||||
builtins.map (m: {
|
||||
assertion = builtins.elem m topLevelMachines;
|
||||
message = ''
|
||||
Machine '${m}' is not defined in the inventory. This might still work, if the machine is defined via nix.
|
||||
|
||||
Defined in service: '${serviceName}' instance: '${instanceName}' role: '${roleName}'.
|
||||
|
||||
Inventory machines:
|
||||
${builtins.concatStringsSep "\n" (map (n: "'${n}'") topLevelMachines)}
|
||||
'';
|
||||
severity = "warning";
|
||||
}) role.machines
|
||||
)
|
||||
) [ ] instanceConfig.roles;
|
||||
in
|
||||
ass2 ++ assertions
|
||||
) [ ] c
|
||||
) [ ] config.services;
|
||||
}
|
||||
# Check that each tag used in a role is defined in at least one machines tags
|
||||
{
|
||||
assertions = lib.foldlAttrs (
|
||||
ass1: serviceName: c:
|
||||
ass1
|
||||
++ lib.foldlAttrs (
|
||||
ass2: instanceName: instanceConfig:
|
||||
let
|
||||
allTags = lib.foldlAttrs (
|
||||
tags: _machineName: machine:
|
||||
tags ++ machine.tags
|
||||
) [ ] config.machines;
|
||||
# All machines must be defined in the top-level machines
|
||||
assertions = lib.foldlAttrs (
|
||||
assertions: roleName: role:
|
||||
assertions
|
||||
++ builtins.filter (a: !a.assertion) (
|
||||
builtins.map (m: {
|
||||
assertion = builtins.elem m allTags;
|
||||
message = ''
|
||||
Tag '${m}' is not defined in the inventory.
|
||||
|
||||
Defined in service: '${serviceName}' instance: '${instanceName}' role: '${roleName}'.
|
||||
|
||||
Available tags:
|
||||
${builtins.concatStringsSep "\n" (map (n: "'${n}'") allTags)}
|
||||
'';
|
||||
severity = "error";
|
||||
}) role.tags
|
||||
)
|
||||
) [ ] instanceConfig.roles;
|
||||
in
|
||||
ass2 ++ assertions
|
||||
) [ ] c
|
||||
) [ ] config.services;
|
||||
}
|
||||
];
|
||||
|
||||
}
|
||||
@@ -1,268 +1,5 @@
|
||||
{
|
||||
lib,
|
||||
config,
|
||||
clanLib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
inherit (config) inventory directory;
|
||||
resolveTags =
|
||||
# Inventory, { machines :: [string], tags :: [string] }
|
||||
{
|
||||
serviceName,
|
||||
instanceName,
|
||||
roleName,
|
||||
inventory,
|
||||
members,
|
||||
}:
|
||||
{
|
||||
machines =
|
||||
members.machines or [ ]
|
||||
++ (builtins.foldl' (
|
||||
acc: tag:
|
||||
let
|
||||
# For error printing
|
||||
availableTags = lib.foldlAttrs (
|
||||
acc: _: v:
|
||||
v.tags or [ ] ++ acc
|
||||
) [ ] (inventory.machines);
|
||||
|
||||
tagMembers = builtins.attrNames (
|
||||
lib.filterAttrs (_n: v: builtins.elem tag v.tags or [ ]) inventory.machines
|
||||
);
|
||||
in
|
||||
if tagMembers == [ ] then
|
||||
lib.warn ''
|
||||
inventory.services.${serviceName}.${instanceName}: - ${roleName} tags: no machine with tag '${tag}' found.
|
||||
Available tags: ${builtins.toJSON (lib.unique availableTags)}
|
||||
'' [ ]
|
||||
else
|
||||
acc ++ tagMembers
|
||||
) [ ] members.tags or [ ]);
|
||||
};
|
||||
|
||||
checkService =
|
||||
modulepath: serviceName:
|
||||
builtins.elem "inventory" (clanLib.modules.getFrontmatter modulepath serviceName).features or [ ];
|
||||
|
||||
compileMachine =
|
||||
{ machineConfig }:
|
||||
{
|
||||
machineImports = [
|
||||
(lib.optionalAttrs (machineConfig.deploy.targetHost or null != null) {
|
||||
config.clan.core.networking.targetHost = lib.mkForce machineConfig.deploy.targetHost;
|
||||
})
|
||||
(lib.optionalAttrs (machineConfig.deploy.buildHost or null != null) {
|
||||
config.clan.core.networking.buildHost = lib.mkForce machineConfig.deploy.buildHost;
|
||||
})
|
||||
];
|
||||
assertions = { };
|
||||
};
|
||||
|
||||
resolveImports =
|
||||
{
|
||||
supportedRoles,
|
||||
resolvedRolesPerInstance,
|
||||
serviceConfigs,
|
||||
serviceName,
|
||||
machineName,
|
||||
getRoleFile,
|
||||
}:
|
||||
(lib.foldlAttrs (
|
||||
# : [ Modules ] -> String -> ServiceConfig -> [ Modules ]
|
||||
acc2: instanceName: serviceConfig:
|
||||
let
|
||||
resolvedRoles = resolvedRolesPerInstance.${instanceName};
|
||||
|
||||
isInService = builtins.any (members: builtins.elem machineName members.machines) (
|
||||
builtins.attrValues resolvedRoles
|
||||
);
|
||||
|
||||
# all roles where the machine is present
|
||||
machineRoles = builtins.attrNames (
|
||||
lib.filterAttrs (_role: roleConfig: builtins.elem machineName roleConfig.machines) resolvedRoles
|
||||
);
|
||||
|
||||
machineServiceConfig = (serviceConfig.machines.${machineName} or { }).config or { };
|
||||
globalConfig = serviceConfig.config or { };
|
||||
|
||||
globalExtraModules = serviceConfig.extraModules or [ ];
|
||||
machineExtraModules = serviceConfig.machines.${machineName}.extraModules or [ ];
|
||||
roleServiceExtraModules = builtins.foldl' (
|
||||
acc: role: acc ++ serviceConfig.roles.${role}.extraModules or [ ]
|
||||
) [ ] machineRoles;
|
||||
|
||||
# TODO: maybe optimize this don't lookup the role in inverse roles. Imports are not lazy
|
||||
roleModules = builtins.map (
|
||||
role:
|
||||
if builtins.elem role supportedRoles && inventory.modules ? ${serviceName} then
|
||||
getRoleFile role
|
||||
else
|
||||
throw "Module ${serviceName} doesn't have role: '${role}'. Role: ${
|
||||
inventory.modules.${serviceName}
|
||||
}/roles/${role}.nix not found."
|
||||
) machineRoles;
|
||||
|
||||
roleServiceConfigs = builtins.filter (m: m != { }) (
|
||||
builtins.map (role: serviceConfig.roles.${role}.config or { }) machineRoles
|
||||
);
|
||||
|
||||
extraModules = map (s: if builtins.typeOf s == "string" then "${directory}/${s}" else s) (
|
||||
globalExtraModules ++ machineExtraModules ++ roleServiceExtraModules
|
||||
);
|
||||
|
||||
features =
|
||||
(clanLib.modules.getFrontmatter inventory.modules.${serviceName} serviceName).features or [ ];
|
||||
deprecationWarning = lib.optionalAttrs (builtins.elem "deprecated" features) {
|
||||
warnings = [
|
||||
''
|
||||
The '${serviceName}' module has been migrated from `inventory.services` to `inventory.instances`
|
||||
See https://docs.clan.lol/guides/clanServices/ for usage.
|
||||
''
|
||||
];
|
||||
};
|
||||
in
|
||||
if !(serviceConfig.enabled or true) then
|
||||
acc2
|
||||
else if isInService then
|
||||
acc2
|
||||
++ [
|
||||
deprecationWarning
|
||||
{
|
||||
imports = roleModules ++ extraModules;
|
||||
clan.inventory.services.${serviceName}.${instanceName} = {
|
||||
roles = resolvedRoles;
|
||||
# TODO: Add inverseRoles to the service config if needed
|
||||
# inherit inverseRoles;
|
||||
};
|
||||
}
|
||||
(lib.optionalAttrs (globalConfig != { } || machineServiceConfig != { } || roleServiceConfigs != [ ])
|
||||
{
|
||||
clan.${serviceName} = lib.mkMerge (
|
||||
[
|
||||
globalConfig
|
||||
machineServiceConfig
|
||||
]
|
||||
++ roleServiceConfigs
|
||||
);
|
||||
}
|
||||
)
|
||||
]
|
||||
else
|
||||
acc2
|
||||
) [ ] (serviceConfigs));
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
./interface.nix
|
||||
];
|
||||
config = {
|
||||
machines = builtins.mapAttrs (
|
||||
machineName: machineConfig: m:
|
||||
let
|
||||
compiledServices = lib.mapAttrs (
|
||||
_: serviceConfigs:
|
||||
(
|
||||
{ config, ... }:
|
||||
let
|
||||
serviceName = config.serviceName;
|
||||
|
||||
getRoleFile = role: builtins.seq role inventory.modules.${serviceName} + "/roles/${role}.nix";
|
||||
in
|
||||
{
|
||||
_file = "inventory/builder.nix";
|
||||
_module.args = {
|
||||
inherit
|
||||
resolveTags
|
||||
inventory
|
||||
clanLib
|
||||
machineName
|
||||
serviceConfigs
|
||||
;
|
||||
};
|
||||
imports = [
|
||||
./roles.nix
|
||||
];
|
||||
|
||||
machineImports = resolveImports {
|
||||
supportedRoles = config.supportedRoles;
|
||||
resolvedRolesPerInstance = config.resolvedRolesPerInstance;
|
||||
inherit
|
||||
serviceConfigs
|
||||
serviceName
|
||||
machineName
|
||||
getRoleFile
|
||||
;
|
||||
};
|
||||
|
||||
# Assertions
|
||||
assertions = {
|
||||
"checkservice.${serviceName}" = {
|
||||
assertion = checkService inventory.modules.${serviceName} serviceName;
|
||||
message = ''
|
||||
Service ${serviceName} cannot be used in inventory. It does not declare the 'inventory' feature.
|
||||
|
||||
To allow it add the following to the beginning of the README.md of the module:
|
||||
|
||||
---
|
||||
...
|
||||
|
||||
features = [ "inventory" ]
|
||||
---
|
||||
|
||||
Also make sure to test the module with the 'inventory' feature enabled.
|
||||
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
)
|
||||
) (config.inventory.services or { });
|
||||
|
||||
compiledMachine = compileMachine {
|
||||
inherit
|
||||
machineConfig
|
||||
;
|
||||
};
|
||||
|
||||
machineImports = (
|
||||
compiledMachine.machineImports
|
||||
++ builtins.foldl' (
|
||||
acc: service:
|
||||
let
|
||||
failedAssertions = (lib.filterAttrs (_: v: !v.assertion) service.assertions);
|
||||
failedAssertionsImports =
|
||||
if failedAssertions != { } then
|
||||
[
|
||||
{
|
||||
clan.inventory.assertions = failedAssertions;
|
||||
}
|
||||
]
|
||||
else
|
||||
[
|
||||
{
|
||||
clan.inventory.assertions = {
|
||||
"alive.assertion.inventory" = {
|
||||
assertion = true;
|
||||
message = ''
|
||||
No failed assertions found for machine ${machineName}. This will never be displayed.
|
||||
It is here for testing purposes.
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
];
|
||||
in
|
||||
acc
|
||||
++ service.machineImports
|
||||
# Import failed assertions
|
||||
++ failedAssertionsImports
|
||||
) [ ] (builtins.attrValues m.config.compiledServices)
|
||||
);
|
||||
in
|
||||
{
|
||||
inherit machineImports compiledServices compiledMachine;
|
||||
}
|
||||
) (inventory.machines or { });
|
||||
};
|
||||
}
|
||||
|
||||
@@ -16,76 +16,13 @@ in
|
||||
type = types.raw;
|
||||
};
|
||||
machines = mkOption {
|
||||
type = types.attrsOf (
|
||||
submodule (
|
||||
{ name, ... }:
|
||||
let
|
||||
machineName = name;
|
||||
in
|
||||
{
|
||||
options = {
|
||||
compiledMachine = mkOption {
|
||||
type = types.raw;
|
||||
};
|
||||
compiledServices = mkOption {
|
||||
# type = types.attrsOf;
|
||||
type = types.attrsOf (
|
||||
types.submoduleWith {
|
||||
modules = [
|
||||
(
|
||||
{ name, ... }:
|
||||
let
|
||||
serviceName = name;
|
||||
in
|
||||
{
|
||||
options = {
|
||||
machineName = mkOption {
|
||||
default = machineName;
|
||||
readOnly = true;
|
||||
};
|
||||
serviceName = mkOption {
|
||||
default = serviceName;
|
||||
readOnly = true;
|
||||
};
|
||||
# Outputs
|
||||
machineImports = mkOption {
|
||||
type = types.listOf types.raw;
|
||||
};
|
||||
supportedRoles = mkOption {
|
||||
type = types.listOf types.str;
|
||||
};
|
||||
matchedRoles = mkOption {
|
||||
type = types.listOf types.str;
|
||||
};
|
||||
machinesRoles = mkOption {
|
||||
type = types.attrsOf (types.listOf types.str);
|
||||
};
|
||||
resolvedRolesPerInstance = mkOption {
|
||||
type = types.attrsOf (
|
||||
types.attrsOf (submodule {
|
||||
options.machines = mkOption {
|
||||
type = types.listOf types.str;
|
||||
};
|
||||
})
|
||||
);
|
||||
};
|
||||
assertions = mkOption {
|
||||
type = types.attrsOf types.raw;
|
||||
};
|
||||
};
|
||||
}
|
||||
)
|
||||
];
|
||||
}
|
||||
);
|
||||
};
|
||||
machineImports = mkOption {
|
||||
type = types.listOf types.raw;
|
||||
};
|
||||
};
|
||||
}
|
||||
)
|
||||
);
|
||||
type = types.attrsOf (submodule ({
|
||||
options = {
|
||||
machineImports = mkOption {
|
||||
type = types.listOf types.raw;
|
||||
};
|
||||
};
|
||||
}));
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,65 +0,0 @@
|
||||
{
|
||||
lib,
|
||||
config,
|
||||
resolveTags,
|
||||
inventory,
|
||||
clanLib,
|
||||
machineName,
|
||||
serviceConfigs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
serviceName = config.serviceName;
|
||||
in
|
||||
{
|
||||
# Roles resolution
|
||||
# : List String
|
||||
supportedRoles = clanLib.modules.getRoles "inventory.modules" inventory.modules serviceName;
|
||||
matchedRoles = builtins.attrNames (
|
||||
lib.filterAttrs (_: ms: builtins.elem machineName ms) config.machinesRoles
|
||||
);
|
||||
resolvedRolesPerInstance = lib.mapAttrs (
|
||||
instanceName: instanceConfig:
|
||||
let
|
||||
resolvedRoles = lib.genAttrs config.supportedRoles (
|
||||
roleName:
|
||||
resolveTags {
|
||||
members = instanceConfig.roles.${roleName} or { };
|
||||
inherit
|
||||
instanceName
|
||||
serviceName
|
||||
roleName
|
||||
inventory
|
||||
;
|
||||
}
|
||||
);
|
||||
usedRoles = builtins.attrNames instanceConfig.roles;
|
||||
unmatchedRoles = builtins.filter (role: !builtins.elem role config.supportedRoles) usedRoles;
|
||||
in
|
||||
if unmatchedRoles != [ ] then
|
||||
throw ''
|
||||
Roles ${builtins.toJSON unmatchedRoles} are not defined in the service ${serviceName}.
|
||||
Instance: '${instanceName}'
|
||||
Please use one of available roles: ${builtins.toJSON config.supportedRoles}
|
||||
''
|
||||
else
|
||||
resolvedRoles
|
||||
) serviceConfigs;
|
||||
|
||||
machinesRoles = builtins.zipAttrsWith (
|
||||
_n: vs:
|
||||
let
|
||||
flat = builtins.foldl' (acc: s: acc ++ s.machines) [ ] vs;
|
||||
in
|
||||
lib.unique flat
|
||||
) (builtins.attrValues config.resolvedRolesPerInstance);
|
||||
|
||||
assertions = lib.concatMapAttrs (
|
||||
instanceName: resolvedRoles:
|
||||
clanLib.modules.checkConstraints {
|
||||
moduleName = serviceName;
|
||||
allModules = inventory.modules;
|
||||
inherit resolvedRoles instanceName;
|
||||
}
|
||||
) config.resolvedRolesPerInstance;
|
||||
}
|
||||
@@ -31,70 +31,13 @@ let
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
moduleConfig = lib.mkOption {
|
||||
default = { };
|
||||
# TODO: use types.deferredModule
|
||||
# clan.borgbackup MUST be defined as submodule
|
||||
type = types.attrsOf types.anything;
|
||||
description = ''
|
||||
Configuration of the specific clanModule.
|
||||
|
||||
!!! Note
|
||||
Configuration is passed to the nixos configuration scoped to the module.
|
||||
|
||||
```nix
|
||||
clan.<serviceName> = { ... # Config }
|
||||
```
|
||||
'';
|
||||
};
|
||||
|
||||
extraModulesOption = lib.mkOption {
|
||||
description = ''
|
||||
List of additionally imported `.nix` expressions.
|
||||
|
||||
Supported types:
|
||||
|
||||
- **Strings**: Interpreted relative to the 'directory' passed to `lib.clan`.
|
||||
- **Paths**: should be relative to the current file.
|
||||
- **Any**: Nix expression must be serializable to JSON.
|
||||
|
||||
!!! Note
|
||||
**The import only happens if the machine is part of the service or role.**
|
||||
|
||||
Other types are passed through to the nixos configuration.
|
||||
|
||||
???+ Example
|
||||
To import the `special.nix` file
|
||||
|
||||
```
|
||||
. Clan Directory
|
||||
├── flake.nix
|
||||
...
|
||||
└── modules
|
||||
├── special.nix
|
||||
└── ...
|
||||
```
|
||||
|
||||
```nix
|
||||
{
|
||||
extraModules = [ "modules/special.nix" ];
|
||||
}
|
||||
```
|
||||
'';
|
||||
apply = value: if lib.isString value then value else builtins.seq (builtins.toJSON value) value;
|
||||
default = [ ];
|
||||
type = types.listOf (
|
||||
types.oneOf [
|
||||
types.str
|
||||
types.anything
|
||||
]
|
||||
);
|
||||
};
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
./assertions.nix
|
||||
(lib.mkRemovedOptionModule [ "services" ] ''
|
||||
The `inventory.services` option has been removed. Use `inventory.instances` instead.
|
||||
See: https://docs.clan.lol/concepts/inventory/#services
|
||||
'')
|
||||
];
|
||||
options = {
|
||||
# Internal things
|
||||
@@ -312,6 +255,16 @@ in
|
||||
'';
|
||||
};
|
||||
|
||||
installedAt = lib.mkOption {
|
||||
type = types.nullOr types.int;
|
||||
default = null;
|
||||
description = ''
|
||||
Indicates when the machine was first installed.
|
||||
|
||||
Timestamp is in unix time (seconds since epoch).
|
||||
'';
|
||||
};
|
||||
|
||||
tags = lib.mkOption {
|
||||
description = ''
|
||||
List of tags for the machine.
|
||||
@@ -415,160 +368,5 @@ in
|
||||
);
|
||||
default = { };
|
||||
};
|
||||
|
||||
services = lib.mkOption {
|
||||
# TODO: deprecate these options
|
||||
# services are deprecated in favor of `instances`
|
||||
# visible = false;
|
||||
description = ''
|
||||
Services of the inventory.
|
||||
|
||||
- The first `<name>` is the moduleName. It must be a valid clanModule name.
|
||||
- The second `<name>` is an arbitrary instance name.
|
||||
|
||||
???+ Example
|
||||
```nix
|
||||
# ClanModule name. See the module documentation for the available modules.
|
||||
# ↓ ↓ Instance name, can be anything, some services might use it as a unique identifier.
|
||||
services.borgbackup."instance_1" = {
|
||||
roles.client.machines = ["machineA"];
|
||||
};
|
||||
```
|
||||
|
||||
!!! Note
|
||||
Services MUST be added to machines via `roles` exclusively.
|
||||
See [`roles.<rolename>.machines`](#inventory.services.roles.machines) or [`roles.<rolename>.tags`](#inventory.services.roles.tags) for more information.
|
||||
'';
|
||||
default = { };
|
||||
type = types.attrsOf (
|
||||
types.attrsOf (
|
||||
types.submodule (
|
||||
# instance name
|
||||
{ name, ... }:
|
||||
{
|
||||
options.enabled = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Enable or disable the complete service.
|
||||
|
||||
If the service is disabled, it will not be added to any machine.
|
||||
|
||||
!!! Note
|
||||
This flag is primarily used to temporarily disable a service.
|
||||
I.e. A 'backup service' without any 'server' might be incomplete and would cause failure if enabled.
|
||||
'';
|
||||
};
|
||||
options.meta = metaOptionsWith name;
|
||||
options.extraModules = extraModulesOption;
|
||||
options.config = moduleConfig // {
|
||||
description = ''
|
||||
Configuration of the specific clanModule.
|
||||
|
||||
!!! Note
|
||||
Configuration is passed to the nixos configuration scoped to the module.
|
||||
|
||||
```nix
|
||||
clan.<serviceName> = { ... # Config }
|
||||
```
|
||||
|
||||
???+ Example
|
||||
|
||||
For `services.borgbackup` the config is the passed to the machine with the prefix of `clan.borgbackup`.
|
||||
This means all config values are mapped to the `borgbackup` clanModule exclusively (`config.clan.borgbackup`).
|
||||
|
||||
```nix
|
||||
{
|
||||
services.borgbackup."instance_1".config = {
|
||||
destinations = [ ... ];
|
||||
# See the 'borgbackup' module docs for all options
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
!!! Note
|
||||
The module author is responsible for supporting multiple instance configurations in different roles.
|
||||
See each clanModule's documentation for more information.
|
||||
'';
|
||||
};
|
||||
options.machines = lib.mkOption {
|
||||
description = ''
|
||||
Attribute set of machines specific config for the service.
|
||||
|
||||
Will be merged with other service configs, such as the role config and the global config.
|
||||
For machine specific overrides use `mkForce` or other higher priority methods.
|
||||
|
||||
???+ Example
|
||||
|
||||
```{.nix hl_lines="4-7"}
|
||||
services.borgbackup."instance_1" = {
|
||||
roles.client.machines = ["machineA"];
|
||||
|
||||
machines.machineA.config = {
|
||||
# Additional specific config for the machine
|
||||
# This is merged with all other config places
|
||||
};
|
||||
};
|
||||
```
|
||||
'';
|
||||
default = { };
|
||||
type = types.attrsOf (
|
||||
types.submodule {
|
||||
options.extraModules = extraModulesOption;
|
||||
options.config = moduleConfig // {
|
||||
description = ''
|
||||
Additional configuration of the specific machine.
|
||||
|
||||
See how [`service.<name>.<name>.config`](#inventory.services.config) works in general for further information.
|
||||
'';
|
||||
};
|
||||
}
|
||||
);
|
||||
};
|
||||
options.roles = lib.mkOption {
|
||||
default = { };
|
||||
type = types.attrsOf (
|
||||
types.submodule {
|
||||
options.machines = lib.mkOption {
|
||||
default = [ ];
|
||||
type = types.listOf types.str;
|
||||
example = [ "machineA" ];
|
||||
description = ''
|
||||
List of machines which are part of the role.
|
||||
|
||||
The machines are referenced by their `attributeName` in the `inventory.machines` attribute set.
|
||||
|
||||
Memberships are declared here to determine which machines are part of the service.
|
||||
|
||||
Alternatively, `tags` can be used to determine the membership, more dynamically.
|
||||
'';
|
||||
};
|
||||
options.tags = lib.mkOption {
|
||||
default = [ ];
|
||||
apply = lib.unique;
|
||||
type = types.listOf types.str;
|
||||
description = ''
|
||||
List of tags which are used to determine the membership of the role.
|
||||
|
||||
The tags are matched against the `inventory.machines.<machineName>.tags` attribute set.
|
||||
If a machine has at least one tag of the role, it is part of the role.
|
||||
'';
|
||||
};
|
||||
options.config = moduleConfig // {
|
||||
description = ''
|
||||
Additional configuration of the specific role.
|
||||
|
||||
See how [`service.<name>.<name>.config`](#inventory.services.config) works in general for further information.
|
||||
'';
|
||||
};
|
||||
options.extraModules = extraModulesOption;
|
||||
}
|
||||
);
|
||||
};
|
||||
}
|
||||
)
|
||||
)
|
||||
);
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
@@ -11,6 +11,10 @@
|
||||
default =
|
||||
builtins.removeAttrs (clanLib.introspection.getPrios { options = config.inventory.options; })
|
||||
# tags are freeformType which is not supported yet.
|
||||
[ "tags" ];
|
||||
# services is removed and throws an error if accessed.
|
||||
[
|
||||
"tags"
|
||||
"services"
|
||||
];
|
||||
};
|
||||
}
|
||||
|
||||
@@ -27,7 +27,9 @@ in
|
||||
default = { };
|
||||
};
|
||||
tags = lib.mkOption {
|
||||
type = types.attrsOf (types.submodule { });
|
||||
type = types.coercedTo (types.listOf types.str) (t: lib.genAttrs t (_: { })) (
|
||||
types.attrsOf (types.submodule { })
|
||||
);
|
||||
default = { };
|
||||
};
|
||||
settings =
|
||||
|
||||
@@ -23,6 +23,12 @@ let
|
||||
};
|
||||
in
|
||||
{
|
||||
options.staticModules = lib.mkOption {
|
||||
readOnly = true;
|
||||
type = lib.types.raw;
|
||||
|
||||
apply = moduleSet: lib.mapAttrs (inspectModule "<clan-core>") moduleSet;
|
||||
};
|
||||
options.modulesPerSource = lib.mkOption {
|
||||
# { sourceName :: { moduleName :: {} }}
|
||||
readOnly = true;
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
"""Test driver for container-based NixOS testing."""
|
||||
|
||||
import argparse
|
||||
import ctypes
|
||||
import os
|
||||
@@ -11,7 +13,7 @@ import uuid
|
||||
from collections.abc import Callable
|
||||
from contextlib import _GeneratorContextManager
|
||||
from dataclasses import dataclass
|
||||
from functools import cached_property
|
||||
from functools import cache, cached_property
|
||||
from pathlib import Path
|
||||
from tempfile import NamedTemporaryFile, TemporaryDirectory
|
||||
from typing import Any
|
||||
@@ -20,23 +22,21 @@ from colorama import Fore, Style
|
||||
|
||||
from .logger import AbstractLogger, CompositeLogger, TerminalLogger
|
||||
|
||||
# Global flag to track if test environment has been initialized
|
||||
_test_env_initialized = False
|
||||
|
||||
|
||||
@cache
|
||||
def init_test_environment() -> None:
|
||||
"""Set up the test environment (network bridge, /etc/passwd) once."""
|
||||
global _test_env_initialized
|
||||
if _test_env_initialized:
|
||||
return
|
||||
|
||||
# Set up network bridge
|
||||
subprocess.run(
|
||||
["ip", "link", "add", "br0", "type", "bridge"], check=True, text=True
|
||||
["ip", "link", "add", "br0", "type", "bridge"],
|
||||
check=True,
|
||||
text=True,
|
||||
)
|
||||
subprocess.run(["ip", "link", "set", "br0", "up"], check=True, text=True)
|
||||
subprocess.run(
|
||||
["ip", "addr", "add", "192.168.1.254/24", "dev", "br0"], check=True, text=True
|
||||
["ip", "addr", "add", "192.168.1.254/24", "dev", "br0"],
|
||||
check=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
# Set up minimal passwd file for unprivileged operations
|
||||
@@ -44,7 +44,7 @@ def init_test_environment() -> None:
|
||||
passwd_content = """root:x:0:0:Root:/root:/bin/sh
|
||||
nixbld:x:1000:100:Nix build user:/tmp:/bin/sh
|
||||
nobody:x:65534:65534:Nobody:/:/bin/sh
|
||||
"""
|
||||
""" # noqa: S105 - This is not a password, it's a Unix passwd file format for testing
|
||||
|
||||
with NamedTemporaryFile(mode="w", delete=False, prefix="test-passwd-") as f:
|
||||
f.write(passwd_content)
|
||||
@@ -84,8 +84,6 @@ nogroup:x:65534:
|
||||
errno = ctypes.get_errno()
|
||||
raise OSError(errno, os.strerror(errno), "Failed to mount group")
|
||||
|
||||
_test_env_initialized = True
|
||||
|
||||
|
||||
# Load the C library
|
||||
libc = ctypes.CDLL("libc.so.6", use_errno=True)
|
||||
@@ -111,8 +109,7 @@ def mount(
|
||||
mountflags: int = 0,
|
||||
data: str | None = None,
|
||||
) -> None:
|
||||
"""
|
||||
A Python wrapper for the mount system call.
|
||||
"""A Python wrapper for the mount system call.
|
||||
|
||||
:param source: The source of the file system (e.g., device name, remote filesystem).
|
||||
:param target: The mount point (an existing directory).
|
||||
@@ -129,7 +126,11 @@ def mount(
|
||||
|
||||
# Call the mount system call
|
||||
result = libc.mount(
|
||||
source_c, target_c, fstype_c, ctypes.c_ulong(mountflags), data_c
|
||||
source_c,
|
||||
target_c,
|
||||
fstype_c,
|
||||
ctypes.c_ulong(mountflags),
|
||||
data_c,
|
||||
)
|
||||
|
||||
if result != 0:
|
||||
@@ -141,11 +142,11 @@ class Error(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def prepare_machine_root(machinename: str, root: Path) -> None:
|
||||
def prepare_machine_root(root: Path) -> None:
|
||||
root.mkdir(parents=True, exist_ok=True)
|
||||
root.joinpath("etc").mkdir(parents=True, exist_ok=True)
|
||||
root.joinpath(".env").write_text(
|
||||
"\n".join(f"{k}={v}" for k, v in os.environ.items())
|
||||
"\n".join(f"{k}={v}" for k, v in os.environ.items()),
|
||||
)
|
||||
|
||||
|
||||
@@ -157,7 +158,6 @@ def retry(fn: Callable, timeout: int = 900) -> None:
|
||||
"""Call the given function repeatedly, with 1 second intervals,
|
||||
until it returns True or a timeout is reached.
|
||||
"""
|
||||
|
||||
for _ in range(timeout):
|
||||
if fn(False):
|
||||
return
|
||||
@@ -189,7 +189,7 @@ class Machine:
|
||||
return self.get_systemd_process()
|
||||
|
||||
def start(self) -> None:
|
||||
prepare_machine_root(self.name, self.rootdir)
|
||||
prepare_machine_root(self.rootdir)
|
||||
init_test_environment()
|
||||
cmd = [
|
||||
"systemd-nspawn",
|
||||
@@ -212,8 +212,12 @@ class Machine:
|
||||
self.process = subprocess.Popen(cmd, stdout=subprocess.PIPE, text=True, env=env)
|
||||
|
||||
def get_systemd_process(self) -> int:
|
||||
assert self.process is not None, "Machine not started"
|
||||
assert self.process.stdout is not None, "Machine has no stdout"
|
||||
if self.process is None:
|
||||
msg = "Machine not started"
|
||||
raise RuntimeError(msg)
|
||||
if self.process.stdout is None:
|
||||
msg = "Machine has no stdout"
|
||||
raise RuntimeError(msg)
|
||||
|
||||
for line in self.process.stdout:
|
||||
print(line, end="")
|
||||
@@ -230,9 +234,9 @@ class Machine:
|
||||
.read_text()
|
||||
.split()
|
||||
)
|
||||
assert len(childs) == 1, (
|
||||
f"Expected exactly one child process for systemd-nspawn, got {childs}"
|
||||
)
|
||||
if len(childs) != 1:
|
||||
msg = f"Expected exactly one child process for systemd-nspawn, got {childs}"
|
||||
raise RuntimeError(msg)
|
||||
try:
|
||||
return int(childs[0])
|
||||
except ValueError as e:
|
||||
@@ -252,7 +256,9 @@ class Machine:
|
||||
|
||||
def tuple_from_line(line: str) -> tuple[str, str]:
|
||||
match = line_pattern.match(line)
|
||||
assert match is not None
|
||||
if match is None:
|
||||
msg = f"Failed to parse line: {line}"
|
||||
raise RuntimeError(msg)
|
||||
return match[1], match[2]
|
||||
|
||||
return dict(
|
||||
@@ -280,12 +286,11 @@ class Machine:
|
||||
def execute(
|
||||
self,
|
||||
command: str,
|
||||
check_return: bool = True,
|
||||
check_output: bool = True,
|
||||
check_return: bool = True, # noqa: ARG002
|
||||
check_output: bool = True, # noqa: ARG002
|
||||
timeout: int | None = 900,
|
||||
) -> subprocess.CompletedProcess:
|
||||
"""
|
||||
Execute a shell command, returning a list `(status, stdout)`.
|
||||
"""Execute a shell command, returning a list `(status, stdout)`.
|
||||
|
||||
Commands are run with `set -euo pipefail` set:
|
||||
|
||||
@@ -316,21 +321,21 @@ class Machine:
|
||||
`timeout` parameter, e.g., `execute(cmd, timeout=10)` or
|
||||
`execute(cmd, timeout=None)`. The default is 900 seconds.
|
||||
"""
|
||||
|
||||
# Always run command with shell opts
|
||||
command = f"set -eo pipefail; source /etc/profile; set -xu; {command}"
|
||||
|
||||
proc = subprocess.run(
|
||||
return subprocess.run(
|
||||
self.nsenter_command(command),
|
||||
timeout=timeout,
|
||||
check=False,
|
||||
stdout=subprocess.PIPE,
|
||||
text=True,
|
||||
)
|
||||
return proc
|
||||
|
||||
def nested(
|
||||
self, msg: str, attrs: dict[str, str] | None = None
|
||||
self,
|
||||
msg: str,
|
||||
attrs: dict[str, str] | None = None,
|
||||
) -> _GeneratorContextManager:
|
||||
if attrs is None:
|
||||
attrs = {}
|
||||
@@ -339,8 +344,7 @@ class Machine:
|
||||
return self.logger.nested(msg, my_attrs)
|
||||
|
||||
def systemctl(self, q: str) -> subprocess.CompletedProcess:
|
||||
"""
|
||||
Runs `systemctl` commands with optional support for
|
||||
"""Runs `systemctl` commands with optional support for
|
||||
`systemctl --user`
|
||||
|
||||
```py
|
||||
@@ -355,8 +359,7 @@ class Machine:
|
||||
return self.execute(f"systemctl {q}")
|
||||
|
||||
def wait_until_succeeds(self, command: str, timeout: int = 900) -> str:
|
||||
"""
|
||||
Repeat a shell command with 1-second intervals until it succeeds.
|
||||
"""Repeat a shell command with 1-second intervals until it succeeds.
|
||||
Has a default timeout of 900 seconds which can be modified, e.g.
|
||||
`wait_until_succeeds(cmd, timeout=10)`. See `execute` for details on
|
||||
command execution.
|
||||
@@ -374,18 +377,17 @@ class Machine:
|
||||
return output
|
||||
|
||||
def wait_for_open_port(
|
||||
self, port: int, addr: str = "localhost", timeout: int = 900
|
||||
self,
|
||||
port: int,
|
||||
addr: str = "localhost",
|
||||
timeout: int = 900,
|
||||
) -> None:
|
||||
"""
|
||||
Wait for a port to be open on the given address.
|
||||
"""
|
||||
"""Wait for a port to be open on the given address."""
|
||||
command = f"nc -z {shlex.quote(addr)} {port}"
|
||||
self.wait_until_succeeds(command, timeout=timeout)
|
||||
|
||||
def wait_for_file(self, filename: str, timeout: int = 30) -> None:
|
||||
"""
|
||||
Waits until the file exists in the machine's file system.
|
||||
"""
|
||||
"""Waits until the file exists in the machine's file system."""
|
||||
|
||||
def check_file(_last_try: bool) -> bool:
|
||||
result = self.execute(f"test -e {filename}")
|
||||
@@ -395,8 +397,7 @@ class Machine:
|
||||
retry(check_file, timeout)
|
||||
|
||||
def wait_for_unit(self, unit: str, timeout: int = 900) -> None:
|
||||
"""
|
||||
Wait for a systemd unit to get into "active" state.
|
||||
"""Wait for a systemd unit to get into "active" state.
|
||||
Throws exceptions on "failed" and "inactive" states as well as after
|
||||
timing out.
|
||||
"""
|
||||
@@ -441,9 +442,7 @@ class Machine:
|
||||
return res.stdout
|
||||
|
||||
def shutdown(self) -> None:
|
||||
"""
|
||||
Shut down the machine, waiting for the VM to exit.
|
||||
"""
|
||||
"""Shut down the machine, waiting for the VM to exit."""
|
||||
if self.process:
|
||||
self.process.terminate()
|
||||
self.process.wait()
|
||||
@@ -557,7 +556,7 @@ class Driver:
|
||||
rootdir=tempdir_path / container.name,
|
||||
out_dir=self.out_dir,
|
||||
logger=self.logger,
|
||||
)
|
||||
),
|
||||
)
|
||||
|
||||
def start_all(self) -> None:
|
||||
@@ -575,13 +574,15 @@ class Driver:
|
||||
# We lauch a sleep here, so we can pgrep the process cmdline for
|
||||
# the uuid
|
||||
sleep = shutil.which("sleep")
|
||||
assert sleep is not None, "sleep command not found"
|
||||
if sleep is None:
|
||||
msg = "sleep command not found"
|
||||
raise RuntimeError(msg)
|
||||
machine.execute(
|
||||
f"systemd-run /bin/sh -c '{sleep} 999999999 && echo {nspawn_uuid}'",
|
||||
)
|
||||
|
||||
print(
|
||||
f"To attach to container {machine.name} run on the same machine that runs the test:"
|
||||
f"To attach to container {machine.name} run on the same machine that runs the test:",
|
||||
)
|
||||
print(
|
||||
" ".join(
|
||||
@@ -603,8 +604,8 @@ class Driver:
|
||||
"-c",
|
||||
"bash",
|
||||
Style.RESET_ALL,
|
||||
]
|
||||
)
|
||||
],
|
||||
),
|
||||
)
|
||||
|
||||
def test_symbols(self) -> dict[str, Any]:
|
||||
@@ -623,13 +624,13 @@ class Driver:
|
||||
"additionally exposed symbols:\n "
|
||||
+ ", ".join(m.name for m in self.machines)
|
||||
+ ",\n "
|
||||
+ ", ".join(list(general_symbols.keys()))
|
||||
+ ", ".join(list(general_symbols.keys())),
|
||||
)
|
||||
return {**general_symbols, **machine_symbols}
|
||||
|
||||
def test_script(self) -> None:
|
||||
"""Run the test script"""
|
||||
exec(self.testscript, self.test_symbols(), None)
|
||||
exec(self.testscript, self.test_symbols(), None) # noqa: S102
|
||||
|
||||
def run_tests(self) -> None:
|
||||
"""Run the test script (for non-interactive test runs)"""
|
||||
|
||||
@@ -25,27 +25,31 @@ class AbstractLogger(ABC):
|
||||
@abstractmethod
|
||||
@contextmanager
|
||||
def subtest(
|
||||
self, name: str, attributes: dict[str, str] | None = None
|
||||
self,
|
||||
name: str,
|
||||
attributes: dict[str, str] | None = None,
|
||||
) -> Iterator[None]:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
@contextmanager
|
||||
def nested(
|
||||
self, message: str, attributes: dict[str, str] | None = None
|
||||
self,
|
||||
message: str,
|
||||
attributes: dict[str, str] | None = None,
|
||||
) -> Iterator[None]:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def info(self, *args: Any, **kwargs: Any) -> None: # type: ignore
|
||||
def info(self, *args: Any, **kwargs: Any) -> None:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def warning(self, *args: Any, **kwargs: Any) -> None: # type: ignore
|
||||
def warning(self, *args: Any, **kwargs: Any) -> None:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def error(self, *args: Any, **kwargs: Any) -> None: # type: ignore
|
||||
def error(self, *args: Any, **kwargs: Any) -> None:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
@@ -59,6 +63,8 @@ class AbstractLogger(ABC):
|
||||
|
||||
class JunitXMLLogger(AbstractLogger):
|
||||
class TestCaseState:
|
||||
"""State tracking for individual test cases in JUnit XML reports."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.stdout = ""
|
||||
self.stderr = ""
|
||||
@@ -66,7 +72,7 @@ class JunitXMLLogger(AbstractLogger):
|
||||
|
||||
def __init__(self, outfile: Path) -> None:
|
||||
self.tests: dict[str, JunitXMLLogger.TestCaseState] = {
|
||||
"main": self.TestCaseState()
|
||||
"main": self.TestCaseState(),
|
||||
}
|
||||
self.currentSubtest = "main"
|
||||
self.outfile: Path = outfile
|
||||
@@ -74,12 +80,16 @@ class JunitXMLLogger(AbstractLogger):
|
||||
atexit.register(self.close)
|
||||
|
||||
def log(self, message: str, attributes: dict[str, str] | None = None) -> None:
|
||||
del attributes # Unused but kept for API compatibility
|
||||
self.tests[self.currentSubtest].stdout += message + os.linesep
|
||||
|
||||
@contextmanager
|
||||
def subtest(
|
||||
self, name: str, attributes: dict[str, str] | None = None
|
||||
self,
|
||||
name: str,
|
||||
attributes: dict[str, str] | None = None,
|
||||
) -> Iterator[None]:
|
||||
del attributes # Unused but kept for API compatibility
|
||||
old_test = self.currentSubtest
|
||||
self.tests.setdefault(name, self.TestCaseState())
|
||||
self.currentSubtest = name
|
||||
@@ -90,18 +100,24 @@ class JunitXMLLogger(AbstractLogger):
|
||||
|
||||
@contextmanager
|
||||
def nested(
|
||||
self, message: str, attributes: dict[str, str] | None = None
|
||||
self,
|
||||
message: str,
|
||||
attributes: dict[str, str] | None = None,
|
||||
) -> Iterator[None]:
|
||||
del attributes # Unused but kept for API compatibility
|
||||
self.log(message)
|
||||
yield
|
||||
|
||||
def info(self, *args: Any, **kwargs: Any) -> None:
|
||||
del kwargs # Unused but kept for API compatibility
|
||||
self.tests[self.currentSubtest].stdout += args[0] + os.linesep
|
||||
|
||||
def warning(self, *args: Any, **kwargs: Any) -> None:
|
||||
del kwargs # Unused but kept for API compatibility
|
||||
self.tests[self.currentSubtest].stdout += args[0] + os.linesep
|
||||
|
||||
def error(self, *args: Any, **kwargs: Any) -> None:
|
||||
del kwargs # Unused but kept for API compatibility
|
||||
self.tests[self.currentSubtest].stderr += args[0] + os.linesep
|
||||
self.tests[self.currentSubtest].failure = True
|
||||
|
||||
@@ -144,7 +160,9 @@ class CompositeLogger(AbstractLogger):
|
||||
|
||||
@contextmanager
|
||||
def subtest(
|
||||
self, name: str, attributes: dict[str, str] | None = None
|
||||
self,
|
||||
name: str,
|
||||
attributes: dict[str, str] | None = None,
|
||||
) -> Iterator[None]:
|
||||
with ExitStack() as stack:
|
||||
for logger in self.logger_list:
|
||||
@@ -153,22 +171,24 @@ class CompositeLogger(AbstractLogger):
|
||||
|
||||
@contextmanager
|
||||
def nested(
|
||||
self, message: str, attributes: dict[str, str] | None = None
|
||||
self,
|
||||
message: str,
|
||||
attributes: dict[str, str] | None = None,
|
||||
) -> Iterator[None]:
|
||||
with ExitStack() as stack:
|
||||
for logger in self.logger_list:
|
||||
stack.enter_context(logger.nested(message, attributes))
|
||||
yield
|
||||
|
||||
def info(self, *args: Any, **kwargs: Any) -> None: # type: ignore
|
||||
def info(self, *args: Any, **kwargs: Any) -> None: # type: ignore[no-untyped-def]
|
||||
for logger in self.logger_list:
|
||||
logger.info(*args, **kwargs)
|
||||
|
||||
def warning(self, *args: Any, **kwargs: Any) -> None: # type: ignore
|
||||
def warning(self, *args: Any, **kwargs: Any) -> None: # type: ignore[no-untyped-def]
|
||||
for logger in self.logger_list:
|
||||
logger.warning(*args, **kwargs)
|
||||
|
||||
def error(self, *args: Any, **kwargs: Any) -> None: # type: ignore
|
||||
def error(self, *args: Any, **kwargs: Any) -> None: # type: ignore[no-untyped-def]
|
||||
for logger in self.logger_list:
|
||||
logger.error(*args, **kwargs)
|
||||
sys.exit(1)
|
||||
@@ -200,19 +220,24 @@ class TerminalLogger(AbstractLogger):
|
||||
|
||||
@contextmanager
|
||||
def subtest(
|
||||
self, name: str, attributes: dict[str, str] | None = None
|
||||
self,
|
||||
name: str,
|
||||
attributes: dict[str, str] | None = None,
|
||||
) -> Iterator[None]:
|
||||
with self.nested("subtest: " + name, attributes):
|
||||
yield
|
||||
|
||||
@contextmanager
|
||||
def nested(
|
||||
self, message: str, attributes: dict[str, str] | None = None
|
||||
self,
|
||||
message: str,
|
||||
attributes: dict[str, str] | None = None,
|
||||
) -> Iterator[None]:
|
||||
self._eprint(
|
||||
self.maybe_prefix(
|
||||
Style.BRIGHT + Fore.GREEN + message + Style.RESET_ALL, attributes
|
||||
)
|
||||
Style.BRIGHT + Fore.GREEN + message + Style.RESET_ALL,
|
||||
attributes,
|
||||
),
|
||||
)
|
||||
|
||||
tic = time.time()
|
||||
@@ -220,13 +245,13 @@ class TerminalLogger(AbstractLogger):
|
||||
toc = time.time()
|
||||
self.log(f"(finished: {message}, in {toc - tic:.2f} seconds)")
|
||||
|
||||
def info(self, *args: Any, **kwargs: Any) -> None: # type: ignore
|
||||
def info(self, *args: Any, **kwargs: Any) -> None: # type: ignore[no-untyped-def]
|
||||
self.log(*args, **kwargs)
|
||||
|
||||
def warning(self, *args: Any, **kwargs: Any) -> None: # type: ignore
|
||||
def warning(self, *args: Any, **kwargs: Any) -> None: # type: ignore[no-untyped-def]
|
||||
self.log(*args, **kwargs)
|
||||
|
||||
def error(self, *args: Any, **kwargs: Any) -> None: # type: ignore
|
||||
def error(self, *args: Any, **kwargs: Any) -> None: # type: ignore[no-untyped-def]
|
||||
self.log(*args, **kwargs)
|
||||
|
||||
def print_serial_logs(self, enable: bool) -> None:
|
||||
@@ -259,7 +284,9 @@ class XMLLogger(AbstractLogger):
|
||||
return "".join(ch for ch in message if unicodedata.category(ch)[0] != "C")
|
||||
|
||||
def maybe_prefix(
|
||||
self, message: str, attributes: dict[str, str] | None = None
|
||||
self,
|
||||
message: str,
|
||||
attributes: dict[str, str] | None = None,
|
||||
) -> str:
|
||||
if attributes and "machine" in attributes:
|
||||
return f"{attributes['machine']}: {message}"
|
||||
@@ -270,13 +297,13 @@ class XMLLogger(AbstractLogger):
|
||||
self.xml.characters(message)
|
||||
self.xml.endElement("line")
|
||||
|
||||
def info(self, *args: Any, **kwargs: Any) -> None: # type: ignore
|
||||
def info(self, *args: Any, **kwargs: Any) -> None: # type: ignore[no-untyped-def]
|
||||
self.log(*args, **kwargs)
|
||||
|
||||
def warning(self, *args: Any, **kwargs: Any) -> None: # type: ignore
|
||||
def warning(self, *args: Any, **kwargs: Any) -> None: # type: ignore[no-untyped-def]
|
||||
self.log(*args, **kwargs)
|
||||
|
||||
def error(self, *args: Any, **kwargs: Any) -> None: # type: ignore
|
||||
def error(self, *args: Any, **kwargs: Any) -> None: # type: ignore[no-untyped-def]
|
||||
self.log(*args, **kwargs)
|
||||
|
||||
def log(self, message: str, attributes: dict[str, str] | None = None) -> None:
|
||||
@@ -309,14 +336,18 @@ class XMLLogger(AbstractLogger):
|
||||
|
||||
@contextmanager
|
||||
def subtest(
|
||||
self, name: str, attributes: dict[str, str] | None = None
|
||||
self,
|
||||
name: str,
|
||||
attributes: dict[str, str] | None = None,
|
||||
) -> Iterator[None]:
|
||||
with self.nested("subtest: " + name, attributes):
|
||||
yield
|
||||
|
||||
@contextmanager
|
||||
def nested(
|
||||
self, message: str, attributes: dict[str, str] | None = None
|
||||
self,
|
||||
message: str,
|
||||
attributes: dict[str, str] | None = None,
|
||||
) -> Iterator[None]:
|
||||
if attributes is None:
|
||||
attributes = {}
|
||||
|
||||
@@ -8,6 +8,10 @@
|
||||
{
|
||||
imports = lib.optional (_class == "nixos") (
|
||||
lib.mkIf config.clan.core.enableRecommendedDefaults {
|
||||
|
||||
# Enable automatic state-version generation.
|
||||
clan.core.settings.state-version.enable = lib.mkDefault true;
|
||||
|
||||
# Use systemd during boot as well except:
|
||||
# - systems with raids as this currently require manual configuration: https://github.com/NixOS/nixpkgs/issues/210210
|
||||
# - for containers we currently rely on the `stage-2` init script that sets up our /etc
|
||||
@@ -37,6 +41,7 @@
|
||||
};
|
||||
|
||||
config = lib.mkIf config.clan.core.enableRecommendedDefaults {
|
||||
|
||||
# This disables the HTML manual and `nixos-help` command but leaves
|
||||
# `man configuration.nix`
|
||||
documentation.doc.enable = lib.mkDefault false;
|
||||
|
||||
@@ -1,40 +1,17 @@
|
||||
{ ... }:
|
||||
{
|
||||
perSystem =
|
||||
{ ... }:
|
||||
{
|
||||
clan.nixosTests.machine-id = {
|
||||
perSystem.clan.nixosTests.machine-id = {
|
||||
|
||||
name = "service-machine-id";
|
||||
name = "service-machine-id";
|
||||
|
||||
clan = {
|
||||
directory = ./.;
|
||||
|
||||
# Workaround until we can use nodes.server = { };
|
||||
modules."@clan/importer" = ../../../../clanServices/importer;
|
||||
|
||||
inventory = {
|
||||
machines.server = { };
|
||||
instances.importer = {
|
||||
module.name = "@clan/importer";
|
||||
module.input = "self";
|
||||
roles.default.tags.all = { };
|
||||
roles.default.extraModules = [
|
||||
{
|
||||
# Test machine ID generation
|
||||
clan.core.settings.machine-id.enable = true;
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# TODO: Broken. Use instead of importer after fixing.
|
||||
# nodes.server = { };
|
||||
|
||||
# This is not an actual vm test, this is a workaround to
|
||||
# generate the needed vars for the eval test.
|
||||
testScript = "";
|
||||
clan = {
|
||||
directory = ./.;
|
||||
machines.server = {
|
||||
clan.core.settings.machine-id.enable = true;
|
||||
};
|
||||
};
|
||||
|
||||
# This is not an actual vm test, this is a workaround to
|
||||
# generate the needed vars for the eval test.
|
||||
testScript = "";
|
||||
};
|
||||
}
|
||||
|
||||
@@ -10,30 +10,14 @@
|
||||
clan = {
|
||||
directory = ./.;
|
||||
|
||||
# Workaround until we can use nodes.machine = { };
|
||||
modules."@clan/importer" = ../../../../clanServices/importer;
|
||||
|
||||
inventory = {
|
||||
machines.machine = { };
|
||||
instances.importer = {
|
||||
module.name = "@clan/importer";
|
||||
module.input = "self";
|
||||
roles.default.tags.all = { };
|
||||
roles.default.extraModules = [
|
||||
{
|
||||
clan.core.postgresql.enable = true;
|
||||
clan.core.postgresql.users.test = { };
|
||||
clan.core.postgresql.databases.test.create.options.OWNER = "test";
|
||||
clan.core.settings.directory = ./.;
|
||||
}
|
||||
];
|
||||
};
|
||||
machines.machine = {
|
||||
clan.core.postgresql.enable = true;
|
||||
clan.core.postgresql.users.test = { };
|
||||
clan.core.postgresql.databases.test.create.options.OWNER = "test";
|
||||
clan.core.settings.directory = ./.;
|
||||
};
|
||||
};
|
||||
|
||||
# TODO: Broken. Use instead of importer after fixing.
|
||||
# nodes.machine = { };
|
||||
|
||||
testScript =
|
||||
let
|
||||
runpg = "runuser -u postgres -- /run/current-system/sw/bin/psql";
|
||||
|
||||
@@ -9,28 +9,11 @@
|
||||
|
||||
clan = {
|
||||
directory = ./.;
|
||||
|
||||
# Workaround until we can use nodes.server = { };
|
||||
modules."@clan/importer" = ../../../../clanServices/importer;
|
||||
|
||||
inventory = {
|
||||
machines.server = { };
|
||||
instances.importer = {
|
||||
module.name = "@clan/importer";
|
||||
module.input = "self";
|
||||
roles.default.tags.all = { };
|
||||
roles.default.extraModules = [
|
||||
{
|
||||
clan.core.settings.state-version.enable = true;
|
||||
}
|
||||
];
|
||||
};
|
||||
machines.server = {
|
||||
clan.core.settings.state-version.enable = true;
|
||||
};
|
||||
};
|
||||
|
||||
# TODO: Broken. Use instead of importer after fixing.
|
||||
# nodes.server = { };
|
||||
|
||||
# This is not an actual vm test, this is a workaround to
|
||||
# generate the needed vars for the eval test.
|
||||
testScript = "";
|
||||
|
||||
@@ -290,9 +290,11 @@ in
|
||||
};
|
||||
owner = mkOption {
|
||||
description = "The user name or id that will own the file.";
|
||||
type = str;
|
||||
default = "root";
|
||||
};
|
||||
group = mkOption {
|
||||
type = str;
|
||||
description = "The group name or id that will own the file.";
|
||||
default = if _class == "darwin" then "wheel" else "root";
|
||||
defaultText = lib.literalExpression ''if _class == "darwin" then "wheel" else "root"'';
|
||||
@@ -302,6 +304,15 @@ in
|
||||
description = "The unix file mode of the file. Must be a 4-digit octal number.";
|
||||
default = "0400";
|
||||
};
|
||||
exists = mkOption {
|
||||
description = ''
|
||||
Returns true if the file exists, This is used to guard against reading not set value in evaluation.
|
||||
This currently only works for non secret files.
|
||||
'';
|
||||
type = bool;
|
||||
default = if file.config.secret then throw "Cannot determine existance of secret file" else false;
|
||||
defaultText = "Throws error because the existance of a secret file cannot be determined";
|
||||
};
|
||||
value =
|
||||
mkOption {
|
||||
description = ''
|
||||
|
||||
@@ -25,7 +25,7 @@ in
|
||||
);
|
||||
value = mkIf (file.config.secret == false) (
|
||||
# dynamically adjust priority to allow overriding with mkDefault in case the file is not found
|
||||
if (pathExists file.config.flakePath) then
|
||||
if file.config.exists then
|
||||
# if the file is found it should have normal priority
|
||||
readFile file.config.flakePath
|
||||
else
|
||||
@@ -34,6 +34,7 @@ in
|
||||
throw "Please run `clan vars generate ${config.clan.core.settings.machine.name}` as file was not found: ${file.config.path}"
|
||||
)
|
||||
);
|
||||
exists = mkIf (file.config.secret == false) (pathExists file.config.flakePath);
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
116
nixosModules/clanCore/vm-base.nix
Normal file
116
nixosModules/clanCore/vm-base.nix
Normal file
@@ -0,0 +1,116 @@
|
||||
# Standalone VM base module that can be imported independently
|
||||
# This module contains the core VM configuration without the system extension
|
||||
{
|
||||
lib,
|
||||
config,
|
||||
pkgs,
|
||||
modulesPath,
|
||||
...
|
||||
}:
|
||||
let
|
||||
# Flatten the list of state folders into a single list
|
||||
stateFolders = lib.flatten (
|
||||
lib.mapAttrsToList (_item: attrs: attrs.folders) config.clan.core.state
|
||||
);
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
(modulesPath + "/virtualisation/qemu-vm.nix")
|
||||
./serial.nix
|
||||
./waypipe.nix
|
||||
];
|
||||
|
||||
clan.core.state.HOME.folders = [ "/home" ];
|
||||
|
||||
clan.services.waypipe = {
|
||||
inherit (config.clan.core.vm.inspect.waypipe) enable command;
|
||||
};
|
||||
|
||||
# required for issuing shell commands via qga
|
||||
services.qemuGuest.enable = true;
|
||||
|
||||
# required to react to system_powerdown qmp command
|
||||
# Some desktop managers like xfce override the poweroff signal and therefore
|
||||
# make it impossible to handle it via 'logind' directly.
|
||||
services.acpid.enable = true;
|
||||
services.acpid.handlers.power.event = "button/power.*";
|
||||
services.acpid.handlers.power.action = "poweroff";
|
||||
|
||||
# only works on x11
|
||||
services.spice-vdagentd.enable = config.services.xserver.enable;
|
||||
|
||||
boot.initrd.systemd.enable = true;
|
||||
|
||||
boot.initrd.systemd.storePaths = [
|
||||
pkgs.util-linux
|
||||
pkgs.e2fsprogs
|
||||
];
|
||||
boot.initrd.systemd.emergencyAccess = true;
|
||||
|
||||
# userborn would be faster because it doesn't need perl, but it cannot create normal users
|
||||
services.userborn.enable = true;
|
||||
users.mutableUsers = false;
|
||||
users.allowNoPasswordLogin = true;
|
||||
|
||||
boot.initrd.kernelModules = [ "virtiofs" ];
|
||||
virtualisation.writableStore = false;
|
||||
virtualisation.fileSystems = lib.mkForce (
|
||||
{
|
||||
"/nix/store" = {
|
||||
device = "nix-store";
|
||||
options = [
|
||||
"x-systemd.requires=systemd-modules-load.service"
|
||||
"ro"
|
||||
];
|
||||
fsType = "virtiofs";
|
||||
};
|
||||
|
||||
"/" = {
|
||||
device = "/dev/vda";
|
||||
fsType = "ext4";
|
||||
options = [
|
||||
"defaults"
|
||||
"x-systemd.makefs"
|
||||
"nobarrier"
|
||||
"noatime"
|
||||
"nodiratime"
|
||||
"data=writeback"
|
||||
"discard"
|
||||
];
|
||||
};
|
||||
|
||||
"/vmstate" = {
|
||||
device = "/dev/vdb";
|
||||
options = [
|
||||
"x-systemd.makefs"
|
||||
"noatime"
|
||||
"nodiratime"
|
||||
"discard"
|
||||
];
|
||||
noCheck = true;
|
||||
fsType = "ext4";
|
||||
};
|
||||
|
||||
${config.clan.core.facts.secretUploadDirectory} = {
|
||||
device = "secrets";
|
||||
fsType = "9p";
|
||||
neededForBoot = true;
|
||||
options = [
|
||||
"trans=virtio"
|
||||
"version=9p2000.L"
|
||||
"cache=loose"
|
||||
];
|
||||
};
|
||||
}
|
||||
// lib.listToAttrs (
|
||||
map (
|
||||
folder:
|
||||
lib.nameValuePair folder {
|
||||
device = "/vmstate${folder}";
|
||||
fsType = "none";
|
||||
options = [ "bind" ];
|
||||
}
|
||||
) stateFolders
|
||||
)
|
||||
);
|
||||
}
|
||||
@@ -4,116 +4,11 @@
|
||||
pkgs,
|
||||
options,
|
||||
extendModules,
|
||||
modulesPath,
|
||||
...
|
||||
}:
|
||||
let
|
||||
# Flatten the list of state folders into a single list
|
||||
stateFolders = lib.flatten (
|
||||
lib.mapAttrsToList (_item: attrs: attrs.folders) config.clan.core.state
|
||||
);
|
||||
|
||||
vmModule = {
|
||||
imports = [
|
||||
(modulesPath + "/virtualisation/qemu-vm.nix")
|
||||
./serial.nix
|
||||
./waypipe.nix
|
||||
];
|
||||
|
||||
clan.core.state.HOME.folders = [ "/home" ];
|
||||
|
||||
clan.services.waypipe = {
|
||||
inherit (config.clan.core.vm.inspect.waypipe) enable command;
|
||||
};
|
||||
|
||||
# required for issuing shell commands via qga
|
||||
services.qemuGuest.enable = true;
|
||||
|
||||
# required to react to system_powerdown qmp command
|
||||
# Some desktop managers like xfce override the poweroff signal and therefore
|
||||
# make it impossible to handle it via 'logind' directly.
|
||||
services.acpid.enable = true;
|
||||
services.acpid.handlers.power.event = "button/power.*";
|
||||
services.acpid.handlers.power.action = "poweroff";
|
||||
|
||||
# only works on x11
|
||||
services.spice-vdagentd.enable = config.services.xserver.enable;
|
||||
|
||||
boot.initrd.systemd.enable = true;
|
||||
|
||||
boot.initrd.systemd.storePaths = [
|
||||
pkgs.util-linux
|
||||
pkgs.e2fsprogs
|
||||
];
|
||||
boot.initrd.systemd.emergencyAccess = true;
|
||||
|
||||
# userborn would be faster because it doesn't need perl, but it cannot create normal users
|
||||
services.userborn.enable = true;
|
||||
users.mutableUsers = false;
|
||||
users.allowNoPasswordLogin = true;
|
||||
|
||||
boot.initrd.kernelModules = [ "virtiofs" ];
|
||||
virtualisation.writableStore = false;
|
||||
virtualisation.fileSystems = lib.mkForce (
|
||||
{
|
||||
"/nix/store" = {
|
||||
device = "nix-store";
|
||||
options = [
|
||||
"x-systemd.requires=systemd-modules-load.service"
|
||||
"ro"
|
||||
];
|
||||
fsType = "virtiofs";
|
||||
};
|
||||
|
||||
"/" = {
|
||||
device = "/dev/vda";
|
||||
fsType = "ext4";
|
||||
options = [
|
||||
"defaults"
|
||||
"x-systemd.makefs"
|
||||
"nobarrier"
|
||||
"noatime"
|
||||
"nodiratime"
|
||||
"data=writeback"
|
||||
"discard"
|
||||
];
|
||||
};
|
||||
|
||||
"/vmstate" = {
|
||||
device = "/dev/vdb";
|
||||
options = [
|
||||
"x-systemd.makefs"
|
||||
"noatime"
|
||||
"nodiratime"
|
||||
"discard"
|
||||
];
|
||||
noCheck = true;
|
||||
fsType = "ext4";
|
||||
};
|
||||
|
||||
${config.clan.core.facts.secretUploadDirectory} = {
|
||||
device = "secrets";
|
||||
fsType = "9p";
|
||||
neededForBoot = true;
|
||||
options = [
|
||||
"trans=virtio"
|
||||
"version=9p2000.L"
|
||||
"cache=loose"
|
||||
];
|
||||
};
|
||||
}
|
||||
// lib.listToAttrs (
|
||||
map (
|
||||
folder:
|
||||
lib.nameValuePair folder {
|
||||
device = "/vmstate${folder}";
|
||||
fsType = "none";
|
||||
options = [ "bind" ];
|
||||
}
|
||||
) stateFolders
|
||||
)
|
||||
);
|
||||
};
|
||||
# Import the standalone VM base module
|
||||
vmModule = import ./vm-base.nix;
|
||||
|
||||
# We cannot simply merge the VM config into the current system config, because
|
||||
# it is not necessarily a VM.
|
||||
|
||||
@@ -16,6 +16,10 @@ from pathlib import Path
|
||||
from tempfile import TemporaryDirectory
|
||||
from typing import Any
|
||||
|
||||
# Constants
|
||||
NODE_ID_LENGTH = 10
|
||||
NETWORK_ID_LENGTH = 16
|
||||
|
||||
|
||||
class ClanError(Exception):
|
||||
pass
|
||||
@@ -55,9 +59,9 @@ class Identity:
|
||||
|
||||
def node_id(self) -> str:
|
||||
nid = self.public.split(":")[0]
|
||||
assert len(nid) == 10, (
|
||||
f"node_id must be 10 characters long, got {len(nid)}: {nid}"
|
||||
)
|
||||
if len(nid) != NODE_ID_LENGTH:
|
||||
msg = f"node_id must be {NODE_ID_LENGTH} characters long, got {len(nid)}: {nid}"
|
||||
raise ClanError(msg)
|
||||
return nid
|
||||
|
||||
|
||||
@@ -84,9 +88,10 @@ class ZerotierController:
|
||||
headers["Content-Type"] = "application/json"
|
||||
headers["X-ZT1-AUTH"] = self.authtoken
|
||||
url = f"http://127.0.0.1:{self.port}{path}"
|
||||
req = urllib.request.Request(url, headers=headers, method=method, data=body)
|
||||
resp = urllib.request.urlopen(req)
|
||||
return json.load(resp)
|
||||
# Safe: only connecting to localhost zerotier API
|
||||
req = urllib.request.Request(url, headers=headers, method=method, data=body) # noqa: S310
|
||||
with urllib.request.urlopen(req, timeout=5) as resp: # noqa: S310
|
||||
return json.load(resp)
|
||||
|
||||
def status(self) -> dict[str, Any]:
|
||||
return self._http_request("/status")
|
||||
@@ -172,9 +177,9 @@ def create_identity() -> Identity:
|
||||
|
||||
|
||||
def compute_zerotier_ip(network_id: str, identity: Identity) -> ipaddress.IPv6Address:
|
||||
assert len(network_id) == 16, (
|
||||
f"network_id must be 16 characters long, got '{network_id}'"
|
||||
)
|
||||
if len(network_id) != NETWORK_ID_LENGTH:
|
||||
msg = f"network_id must be {NETWORK_ID_LENGTH} characters long, got '{network_id}'"
|
||||
raise ClanError(msg)
|
||||
nwid = int(network_id, 16)
|
||||
node_id = int(identity.node_id(), 16)
|
||||
addr_parts = bytearray(
|
||||
@@ -195,7 +200,7 @@ def compute_zerotier_ip(network_id: str, identity: Identity) -> ipaddress.IPv6Ad
|
||||
(node_id >> 16) & 0xFF,
|
||||
(node_id >> 8) & 0xFF,
|
||||
(node_id) & 0xFF,
|
||||
]
|
||||
],
|
||||
)
|
||||
return ipaddress.IPv6Address(bytes(addr_parts))
|
||||
|
||||
@@ -203,7 +208,10 @@ def compute_zerotier_ip(network_id: str, identity: Identity) -> ipaddress.IPv6Ad
|
||||
def main() -> None:
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--mode", choices=["network", "identity"], required=True, type=str
|
||||
"--mode",
|
||||
choices=["network", "identity"],
|
||||
required=True,
|
||||
type=str,
|
||||
)
|
||||
parser.add_argument("--ip", type=Path, required=True)
|
||||
parser.add_argument("--identity-secret", type=Path, required=True)
|
||||
|
||||
7
nixosModules/clanCore/zerotier/genmoon.py
Normal file → Executable file
7
nixosModules/clanCore/zerotier/genmoon.py
Normal file → Executable file
@@ -6,9 +6,12 @@ import sys
|
||||
from pathlib import Path
|
||||
from tempfile import NamedTemporaryFile
|
||||
|
||||
# Constants
|
||||
REQUIRED_ARGS = 4
|
||||
|
||||
|
||||
def main() -> None:
|
||||
if len(sys.argv) != 4:
|
||||
if len(sys.argv) != REQUIRED_ARGS:
|
||||
print("Usage: genmoon.py <moon.json> <endpoint.json> <moons.d>")
|
||||
sys.exit(1)
|
||||
moon_json_path = sys.argv[1]
|
||||
@@ -17,7 +20,7 @@ def main() -> None:
|
||||
|
||||
moon_json = json.loads(Path(moon_json_path).read_text())
|
||||
moon_json["roots"][0]["stableEndpoints"] = json.loads(
|
||||
Path(endpoint_config).read_text()
|
||||
Path(endpoint_config).read_text(),
|
||||
)
|
||||
|
||||
with NamedTemporaryFile("w") as f:
|
||||
|
||||
@@ -34,4 +34,7 @@ in
|
||||
|
||||
flake.nixosModules.clanCore = clanCore;
|
||||
flake.darwinModules.clanCore = clanCore;
|
||||
|
||||
# Standalone VM base module that can be imported for VM testing
|
||||
flake.nixosModules.clan-vm-base = ./clanCore/vm-base.nix;
|
||||
}
|
||||
|
||||
@@ -12,8 +12,14 @@ let
|
||||
(builtins.match "linux_[0-9]+_[0-9]+" name) != null
|
||||
&& (builtins.tryEval kernelPackages).success
|
||||
&& (
|
||||
(!isUnstable && !kernelPackages.zfs.meta.broken)
|
||||
|| (isUnstable && !kernelPackages.zfs_unstable.meta.broken)
|
||||
let
|
||||
zfsPackage =
|
||||
if isUnstable then
|
||||
kernelPackages.zfs_unstable
|
||||
else
|
||||
kernelPackages.${pkgs.zfs.kernelModuleAttribute};
|
||||
in
|
||||
!(zfsPackage.meta.broken or false)
|
||||
)
|
||||
) pkgs.linuxKernel.packages;
|
||||
latestKernelPackage = lib.last (
|
||||
@@ -24,5 +30,5 @@ let
|
||||
in
|
||||
{
|
||||
# Note this might jump back and worth as kernel get added or removed.
|
||||
boot.kernelPackages = latestKernelPackage;
|
||||
boot.kernelPackages = lib.mkIf (lib.meta.availableOn pkgs.hostPlatform pkgs.zfs) latestKernelPackage;
|
||||
}
|
||||
|
||||
@@ -1,59 +0,0 @@
|
||||
# agit
|
||||
|
||||
A helper script for the AGit workflow with a gitea instance.
|
||||
|
||||
<!-- `$ agit --help` -->
|
||||
|
||||
```
|
||||
usage: agit [-h] {create,c,list,l} ...
|
||||
|
||||
AGit utility for creating and pulling PRs
|
||||
|
||||
positional arguments:
|
||||
{create,c,list,l} Commands
|
||||
create (c) Create an AGit PR
|
||||
list (l) List open AGit pull requests
|
||||
|
||||
options:
|
||||
-h, --help show this help message and exit
|
||||
|
||||
The defaults that are assumed are:
|
||||
TARGET_REMOTE_REPOSITORY = origin
|
||||
DEFAULT_TARGET_BRANCH = main
|
||||
|
||||
Examples:
|
||||
$ agit create
|
||||
Opens editor to compose PR title and description (first line is title, rest is body)
|
||||
|
||||
$ agit create --auto
|
||||
Creates PR using latest commit message automatically
|
||||
|
||||
$ agit create --topic "my-feature"
|
||||
Set a custom topic.
|
||||
|
||||
$ agit create --force
|
||||
Force push to a certain topic
|
||||
|
||||
$ agit list
|
||||
Lists all open pull requests for the current repository
|
||||
|
||||
```
|
||||
|
||||
References:
|
||||
- https://docs.gitea.com/usage/agit
|
||||
- https://git-repo.info/en/2020/03/agit-flow-and-git-repo/
|
||||
|
||||
## How to fetch AGit PR's
|
||||
|
||||
For a hypothetical PR with the number #4077:
|
||||
|
||||
```
|
||||
git fetch origin pull/4077/head:your-favorite-name
|
||||
```
|
||||
|
||||
Replace `your-favorite-name` with your preferred branch name.
|
||||
|
||||
You can push back to the PR with with:
|
||||
```
|
||||
agit create --topic="The topic of the open PR"
|
||||
```
|
||||
@@ -1,570 +0,0 @@
|
||||
import argparse
|
||||
import contextlib
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
import urllib.error
|
||||
import urllib.request
|
||||
from pathlib import Path
|
||||
|
||||
# push origin HEAD:refs/for/main
|
||||
# HEAD: The target branch
|
||||
# origin: The target repository (not a fork!)
|
||||
# HEAD: The local branch containing the changes you are proposing
|
||||
TARGET_REMOTE_REPOSITORY = "origin"
|
||||
DEFAULT_TARGET_BRANCH = "main"
|
||||
|
||||
|
||||
def get_gitea_api_url(remote: str = "origin") -> str:
|
||||
"""Parse the gitea api url, this parser is fairly naive, but should work for most setups"""
|
||||
exit_code, remote_url, error = run_git_command(["git", "remote", "get-url", remote])
|
||||
|
||||
if exit_code != 0:
|
||||
print(f"Error getting remote URL for '{remote}': {error}")
|
||||
sys.exit(1)
|
||||
|
||||
# Parse different remote URL formats
|
||||
# SSH formats: git@git.clan.lol:clan/clan-core.git or gitea@git.clan.lol:clan/clan-core.git
|
||||
# HTTPS format: https://git.clan.lol/clan/clan-core.git
|
||||
|
||||
if (
|
||||
"@" in remote_url
|
||||
and ":" in remote_url
|
||||
and not remote_url.startswith("https://")
|
||||
):
|
||||
# SSH format: [user]@git.clan.lol:clan/clan-core.git
|
||||
host_and_path = remote_url.split("@")[1] # git.clan.lol:clan/clan-core.git
|
||||
host = host_and_path.split(":")[0] # git.clan.lol
|
||||
repo_path = host_and_path.split(":")[1] # clan/clan-core.git
|
||||
if repo_path.endswith(".git"):
|
||||
repo_path = repo_path[:-4] # clan/clan-core
|
||||
elif remote_url.startswith("https://"):
|
||||
# HTTPS format: https://git.clan.lol/clan/clan-core.git
|
||||
url_parts = remote_url.replace("https://", "").split("/")
|
||||
host = url_parts[0] # git.clan.lol
|
||||
repo_path = "/".join(url_parts[1:]) # clan/clan-core.git
|
||||
if repo_path.endswith(".git"):
|
||||
repo_path = repo_path.removesuffix(".git") # clan/clan-core
|
||||
else:
|
||||
print(f"Unsupported remote URL format: {remote_url}")
|
||||
sys.exit(1)
|
||||
|
||||
api_url = f"https://{host}/api/v1/repos/{repo_path}/pulls"
|
||||
return api_url
|
||||
|
||||
|
||||
def fetch_open_prs(remote: str = "origin") -> list[dict]:
|
||||
"""Fetch open pull requests from the Gitea API."""
|
||||
api_url = get_gitea_api_url(remote)
|
||||
|
||||
try:
|
||||
with urllib.request.urlopen(f"{api_url}?state=open") as response:
|
||||
data = json.loads(response.read().decode())
|
||||
return data
|
||||
except urllib.error.URLError as e:
|
||||
print(f"Error fetching PRs from {api_url}: {e}")
|
||||
sys.exit(1)
|
||||
except json.JSONDecodeError as e:
|
||||
print(f"Error parsing JSON response: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def get_repo_info_from_api_url(api_url: str) -> tuple[str, str]:
|
||||
"""Extract repository owner and name from API URL."""
|
||||
# api_url format: https://git.clan.lol/api/v1/repos/clan/clan-core/pulls
|
||||
parts = api_url.split("/")
|
||||
if len(parts) >= 6 and "repos" in parts:
|
||||
repo_index = parts.index("repos")
|
||||
if repo_index + 2 < len(parts):
|
||||
owner = parts[repo_index + 1]
|
||||
repo_name = parts[repo_index + 2]
|
||||
return owner, repo_name
|
||||
msg = f"Invalid API URL format: {api_url}"
|
||||
raise ValueError(msg)
|
||||
|
||||
|
||||
def fetch_pr_statuses(
|
||||
repo_owner: str, repo_name: str, commit_sha: str, host: str
|
||||
) -> list[dict]:
|
||||
"""Fetch CI statuses for a specific commit SHA."""
|
||||
status_url = (
|
||||
f"https://{host}/api/v1/repos/{repo_owner}/{repo_name}/statuses/{commit_sha}"
|
||||
)
|
||||
|
||||
try:
|
||||
request = urllib.request.Request(status_url)
|
||||
with urllib.request.urlopen(request, timeout=3) as response:
|
||||
data = json.loads(response.read().decode())
|
||||
return data
|
||||
except (urllib.error.URLError, json.JSONDecodeError, TimeoutError):
|
||||
# Fail silently for individual status requests to keep listing fast
|
||||
return []
|
||||
|
||||
|
||||
def get_latest_status_by_context(statuses: list[dict]) -> dict[str, str]:
|
||||
"""Group statuses by context and return the latest status for each context."""
|
||||
context_statuses = {}
|
||||
|
||||
for status in statuses:
|
||||
context = status.get("context", "unknown")
|
||||
created_at = status.get("created_at", "")
|
||||
status_state = status.get("status", "unknown")
|
||||
|
||||
if (
|
||||
context not in context_statuses
|
||||
or created_at > context_statuses[context]["created_at"]
|
||||
):
|
||||
context_statuses[context] = {
|
||||
"status": status_state,
|
||||
"created_at": created_at,
|
||||
}
|
||||
|
||||
return {context: info["status"] for context, info in context_statuses.items()}
|
||||
|
||||
|
||||
def status_to_emoji(status: str) -> str:
|
||||
"""Convert status string to emoji."""
|
||||
status_map = {"success": "✅", "failure": "❌", "pending": "🟡", "error": "❓"}
|
||||
return status_map.get(status.lower(), "❓")
|
||||
|
||||
|
||||
def create_osc8_link(url: str, text: str) -> str:
|
||||
return f"\033]8;;{url}\033\\{text}\033]8;;\033\\"
|
||||
|
||||
|
||||
def format_pr_with_status(pr: dict, remote: str = "origin") -> str:
|
||||
"""Format PR title with status emojis and OSC8 link."""
|
||||
title = pr["title"]
|
||||
pr_url = pr.get("html_url", "")
|
||||
|
||||
commit_sha = pr.get("head", {}).get("sha")
|
||||
if not commit_sha:
|
||||
if pr_url:
|
||||
return create_osc8_link(pr_url, title)
|
||||
return title
|
||||
|
||||
try:
|
||||
api_url = get_gitea_api_url(remote)
|
||||
repo_owner, repo_name = get_repo_info_from_api_url(api_url)
|
||||
|
||||
host = api_url.split("/")[2]
|
||||
|
||||
statuses = fetch_pr_statuses(repo_owner, repo_name, commit_sha, host)
|
||||
if not statuses:
|
||||
if pr_url:
|
||||
return create_osc8_link(pr_url, title)
|
||||
return title
|
||||
|
||||
latest_statuses = get_latest_status_by_context(statuses)
|
||||
|
||||
emojis = [status_to_emoji(status) for status in latest_statuses.values()]
|
||||
formatted_title = f"{title} {' '.join(emojis)}" if emojis else title
|
||||
|
||||
return create_osc8_link(pr_url, formatted_title) if pr_url else formatted_title
|
||||
|
||||
except (ValueError, IndexError):
|
||||
# If there's any error in processing, just return the title with link if available
|
||||
if pr_url:
|
||||
return create_osc8_link(pr_url, title)
|
||||
|
||||
return title
|
||||
|
||||
|
||||
def run_git_command(command: list) -> tuple[int, str, str]:
|
||||
"""Run a git command and return exit code, stdout, and stderr."""
|
||||
try:
|
||||
result = subprocess.run(command, capture_output=True, text=True, check=False)
|
||||
return result.returncode, result.stdout.strip(), result.stderr.strip()
|
||||
except Exception as e:
|
||||
return 1, "", str(e)
|
||||
|
||||
|
||||
def get_current_branch_name() -> str:
|
||||
exit_code, branch_name, error = run_git_command(
|
||||
["git", "rev-parse", "--abbrev-ref", "HEAD"]
|
||||
)
|
||||
|
||||
if exit_code != 0:
|
||||
print(f"Error getting branch name: {error}")
|
||||
sys.exit(1)
|
||||
|
||||
return branch_name.strip()
|
||||
|
||||
|
||||
def get_latest_commit_info() -> tuple[str, str]:
|
||||
"""Get the title and body of the latest commit."""
|
||||
exit_code, commit_msg, error = run_git_command(
|
||||
["git", "log", "-1", "--pretty=format:%B"]
|
||||
)
|
||||
|
||||
if exit_code != 0:
|
||||
print(f"Error getting commit info: {error}")
|
||||
sys.exit(1)
|
||||
|
||||
lines = commit_msg.strip().split("\n")
|
||||
title = lines[0].strip() if lines else ""
|
||||
|
||||
body_lines = []
|
||||
for line in lines[1:]:
|
||||
if body_lines or line.strip():
|
||||
body_lines.append(line)
|
||||
|
||||
body = "\n".join(body_lines).strip()
|
||||
|
||||
return title, body
|
||||
|
||||
|
||||
def get_commits_since_main() -> list[tuple[str, str]]:
|
||||
"""Get all commits since main as (title, body) tuples."""
|
||||
exit_code, commit_log, error = run_git_command(
|
||||
[
|
||||
"git",
|
||||
"log",
|
||||
"main..HEAD",
|
||||
"--no-merges",
|
||||
"--pretty=format:%s|%b|---END---",
|
||||
]
|
||||
)
|
||||
|
||||
if exit_code != 0:
|
||||
print(f"Error getting commits since main: {error}")
|
||||
return []
|
||||
|
||||
if not commit_log:
|
||||
return []
|
||||
|
||||
commits = []
|
||||
commit_messages = commit_log.split("---END---")
|
||||
|
||||
for commit_msg in commit_messages:
|
||||
commit_msg = commit_msg.strip()
|
||||
if not commit_msg:
|
||||
continue
|
||||
|
||||
parts = commit_msg.split("|")
|
||||
if len(parts) < 2:
|
||||
continue
|
||||
|
||||
title = parts[0].strip()
|
||||
body = parts[1].strip() if len(parts) > 1 else ""
|
||||
|
||||
if not title:
|
||||
continue
|
||||
|
||||
commits.append((title, body))
|
||||
|
||||
return commits
|
||||
|
||||
|
||||
def open_editor_for_pr() -> tuple[str, str]:
|
||||
"""Open editor to get PR title and description. First line is title, rest is description."""
|
||||
commits_since_main = get_commits_since_main()
|
||||
|
||||
with tempfile.NamedTemporaryFile(
|
||||
mode="w+", suffix="COMMIT_EDITMSG", delete=False
|
||||
) as temp_file:
|
||||
temp_file.flush()
|
||||
temp_file_path = temp_file.name
|
||||
|
||||
for title, body in commits_since_main:
|
||||
temp_file.write(f"{title}\n")
|
||||
if body:
|
||||
temp_file.write(f"{body}\n")
|
||||
temp_file.write("\n")
|
||||
|
||||
temp_file.write("\n")
|
||||
temp_file.write("# Please enter the PR title on the first line.\n")
|
||||
temp_file.write("# Lines starting with '#' will be ignored.\n")
|
||||
temp_file.write("# The first line will be used as the PR title.\n")
|
||||
temp_file.write("# Everything else will be used as the PR description.\n")
|
||||
temp_file.write(
|
||||
"# To abort creation of the PR, close editor with an error code.\n"
|
||||
)
|
||||
temp_file.write("# In vim for example you can use :cq!\n")
|
||||
temp_file.write("#\n")
|
||||
temp_file.write("# All commits since main:\n")
|
||||
temp_file.write("#\n")
|
||||
for i, (title, body) in enumerate(commits_since_main, 1):
|
||||
temp_file.write(f"# Commit {i}:\n")
|
||||
temp_file.write(f"# {title}\n")
|
||||
if body:
|
||||
for line in body.split("\n"):
|
||||
temp_file.write(f"# {line}\n")
|
||||
temp_file.write("#\n")
|
||||
|
||||
try:
|
||||
editor = os.environ.get("EDITOR", "vim")
|
||||
|
||||
exit_code = subprocess.call([editor, temp_file_path])
|
||||
|
||||
if exit_code != 0:
|
||||
print(f"Editor exited with code {exit_code}.")
|
||||
print("AGit PR creation has been aborted.")
|
||||
sys.exit(1)
|
||||
|
||||
with Path(temp_file_path).open() as f:
|
||||
content = f.read()
|
||||
|
||||
lines = []
|
||||
for line in content.split("\n"):
|
||||
if not line.lstrip().startswith("#"):
|
||||
lines.append(line)
|
||||
|
||||
cleaned_content = "\n".join(lines).strip()
|
||||
|
||||
if not cleaned_content:
|
||||
print("No content provided, aborting.")
|
||||
sys.exit(0)
|
||||
|
||||
content_lines = cleaned_content.split("\n")
|
||||
title = content_lines[0].strip()
|
||||
|
||||
if not title:
|
||||
print("No title provided, aborting.")
|
||||
sys.exit(0)
|
||||
|
||||
description_lines = []
|
||||
for line in content_lines[1:]:
|
||||
if description_lines or line.strip():
|
||||
description_lines.append(line)
|
||||
|
||||
description = "\n".join(description_lines).strip()
|
||||
|
||||
return title, description
|
||||
|
||||
finally:
|
||||
with contextlib.suppress(OSError):
|
||||
Path(temp_file_path).unlink()
|
||||
|
||||
|
||||
def create_agit_push(
|
||||
remote: str = "origin",
|
||||
branch: str = "main",
|
||||
topic: str | None = None,
|
||||
title: str | None = None,
|
||||
description: str | None = None,
|
||||
force_push: bool = False,
|
||||
local_branch: str = "HEAD",
|
||||
) -> None:
|
||||
if topic is None:
|
||||
if title is not None:
|
||||
topic = title
|
||||
else:
|
||||
topic = get_current_branch_name()
|
||||
|
||||
refspec = f"{local_branch}:refs/for/{branch}"
|
||||
push_cmd = ["git", "push", remote, refspec]
|
||||
|
||||
push_cmd.extend(["-o", f"topic={topic}"])
|
||||
|
||||
if title:
|
||||
push_cmd.extend(["-o", f"title={title}"])
|
||||
|
||||
if description:
|
||||
escaped_desc = description.rstrip("\n").replace('"', '\\"')
|
||||
push_cmd.extend(["-o", f"description={escaped_desc}"])
|
||||
|
||||
if force_push:
|
||||
push_cmd.extend(["-o", "force-push"])
|
||||
|
||||
if description:
|
||||
print(
|
||||
f" Description: {description[:50]}..."
|
||||
if len(description) > 50
|
||||
else f" Description: {description}"
|
||||
)
|
||||
print()
|
||||
|
||||
exit_code, stdout, stderr = run_git_command(push_cmd)
|
||||
|
||||
if stdout:
|
||||
print(stdout)
|
||||
if stderr:
|
||||
print(stderr, file=sys.stderr)
|
||||
|
||||
if exit_code != 0:
|
||||
print("\nPush failed!")
|
||||
sys.exit(exit_code)
|
||||
else:
|
||||
print("\nPush successful!")
|
||||
|
||||
|
||||
def cmd_create(args: argparse.Namespace) -> None:
|
||||
"""Handle the create subcommand."""
|
||||
title = args.title
|
||||
description = args.description
|
||||
|
||||
if not args.auto and (title is None or description is None):
|
||||
editor_title, editor_description = open_editor_for_pr()
|
||||
if title is None:
|
||||
title = editor_title
|
||||
if description is None:
|
||||
description = editor_description
|
||||
|
||||
create_agit_push(
|
||||
remote=args.remote,
|
||||
branch=args.branch,
|
||||
topic=args.topic,
|
||||
title=title,
|
||||
description=description,
|
||||
force_push=args.force,
|
||||
local_branch=args.local_branch,
|
||||
)
|
||||
|
||||
|
||||
def cmd_list(args: argparse.Namespace) -> None:
|
||||
"""Handle the list subcommand."""
|
||||
prs = fetch_open_prs(args.remote)
|
||||
|
||||
if not prs:
|
||||
print("No open AGit pull requests found.")
|
||||
return
|
||||
|
||||
# This is the only way I found to query the actual AGit PRs
|
||||
# Gitea doesn't seem to have an actual api endpoint for them
|
||||
filtered_prs = [pr for pr in prs if pr.get("head", {}).get("label", "") == ""]
|
||||
|
||||
if not filtered_prs:
|
||||
print("No open AGit pull requests found.")
|
||||
return
|
||||
|
||||
for pr in filtered_prs:
|
||||
formatted_pr = format_pr_with_status(pr, args.remote)
|
||||
print(formatted_pr)
|
||||
|
||||
|
||||
def create_parser() -> argparse.ArgumentParser:
|
||||
parser = argparse.ArgumentParser(
|
||||
prog="agit",
|
||||
description="AGit utility for creating and pulling PRs",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog=f"""
|
||||
The defaults that are assumed are:
|
||||
TARGET_REMOTE_REPOSITORY = {TARGET_REMOTE_REPOSITORY}
|
||||
DEFAULT_TARGET_BRANCH = {DEFAULT_TARGET_BRANCH}
|
||||
|
||||
Examples:
|
||||
$ agit create
|
||||
Opens editor to compose PR title and description (first line is title, rest is body)
|
||||
|
||||
$ agit create --auto
|
||||
Creates PR using latest commit message automatically
|
||||
|
||||
$ agit create --topic "my-feature"
|
||||
Set a custom topic.
|
||||
|
||||
$ agit create --force
|
||||
Force push to a certain topic
|
||||
|
||||
$ agit list
|
||||
Lists all open pull requests for the current repository
|
||||
""",
|
||||
)
|
||||
|
||||
subparsers = parser.add_subparsers(dest="subcommand", help="Commands")
|
||||
|
||||
create_parser = subparsers.add_parser(
|
||||
"create",
|
||||
aliases=["c"],
|
||||
help="Create an AGit PR",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog="""
|
||||
Examples:
|
||||
$ agit create
|
||||
Opens editor to compose PR title and description (first line is title, rest is body).
|
||||
|
||||
$ agit create --auto
|
||||
Creates PR using latest commit message automatically (old behavior).
|
||||
|
||||
$ agit create --topic "my-feature"
|
||||
Set a custom topic.
|
||||
|
||||
$ agit create --force
|
||||
Force push to a certain topic
|
||||
""",
|
||||
)
|
||||
|
||||
list_parser = subparsers.add_parser(
|
||||
"list",
|
||||
aliases=["l"],
|
||||
help="List open AGit pull requests",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog=f"""
|
||||
Examples:
|
||||
$ agit list
|
||||
Lists all open AGit PRs for the current repository.
|
||||
|
||||
$ agit list --remote upstream
|
||||
Lists PRs using the 'upstream' remote instead of '{TARGET_REMOTE_REPOSITORY}'.
|
||||
""",
|
||||
)
|
||||
|
||||
list_parser.add_argument(
|
||||
"-r",
|
||||
"--remote",
|
||||
default=TARGET_REMOTE_REPOSITORY,
|
||||
help=f"Git remote to use for fetching PRs (default: {TARGET_REMOTE_REPOSITORY})",
|
||||
)
|
||||
|
||||
create_parser.add_argument(
|
||||
"-r",
|
||||
"--remote",
|
||||
default=TARGET_REMOTE_REPOSITORY,
|
||||
help=f"Git remote to push to (default: {TARGET_REMOTE_REPOSITORY})",
|
||||
)
|
||||
|
||||
create_parser.add_argument(
|
||||
"-b",
|
||||
"--branch",
|
||||
default=DEFAULT_TARGET_BRANCH,
|
||||
help=f"Target branch for the PR (default: {DEFAULT_TARGET_BRANCH})",
|
||||
)
|
||||
|
||||
create_parser.add_argument(
|
||||
"-l",
|
||||
"--local-branch",
|
||||
default="HEAD",
|
||||
help="Local branch to push (default: HEAD)",
|
||||
)
|
||||
|
||||
create_parser.add_argument(
|
||||
"-t", "--topic", help="Set PR topic (default: current branch name)"
|
||||
)
|
||||
|
||||
create_parser.add_argument(
|
||||
"--title", help="Set the PR title (default: last commit title)"
|
||||
)
|
||||
|
||||
create_parser.add_argument(
|
||||
"--description", help="Override the PR description (default: commit body)"
|
||||
)
|
||||
|
||||
create_parser.add_argument(
|
||||
"-f", "--force", action="store_true", help="Force push the changes"
|
||||
)
|
||||
|
||||
create_parser.add_argument(
|
||||
"-a",
|
||||
"--auto",
|
||||
action="store_true",
|
||||
help="Skip editor and use commit message automatically",
|
||||
)
|
||||
|
||||
create_parser.set_defaults(func=cmd_create)
|
||||
list_parser.set_defaults(func=cmd_list)
|
||||
return parser
|
||||
|
||||
|
||||
def main() -> None:
|
||||
parser = create_parser()
|
||||
args = parser.parse_args()
|
||||
if args.subcommand is None:
|
||||
parser.print_help()
|
||||
sys.exit(0)
|
||||
args.func(args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,27 +0,0 @@
|
||||
{
|
||||
bash,
|
||||
callPackage,
|
||||
git,
|
||||
lib,
|
||||
openssh,
|
||||
...
|
||||
}:
|
||||
let
|
||||
writers = callPackage ../builders/script-writers.nix { };
|
||||
in
|
||||
writers.writePython3Bin "agit" {
|
||||
flakeIgnore = [
|
||||
"E501"
|
||||
"W503" # treefmt reapplies the conditions to trigger this check
|
||||
];
|
||||
makeWrapperArgs = [
|
||||
"--prefix"
|
||||
"PATH"
|
||||
":"
|
||||
(lib.makeBinPath [
|
||||
bash
|
||||
git
|
||||
openssh
|
||||
])
|
||||
];
|
||||
} ./agit.py
|
||||
@@ -13,7 +13,9 @@ log = logging.getLogger(__name__)
|
||||
def main(argv: list[str] = sys.argv) -> int:
|
||||
parser = argparse.ArgumentParser(description="Clan App")
|
||||
parser.add_argument(
|
||||
"--content-uri", type=str, help="The URI of the content to display"
|
||||
"--content-uri",
|
||||
type=str,
|
||||
help="The URI of the content to display",
|
||||
)
|
||||
parser.add_argument("--debug", action="store_true", help="Enable debug mode")
|
||||
parser.add_argument(
|
||||
|
||||
@@ -5,7 +5,7 @@ from contextlib import ExitStack
|
||||
from dataclasses import dataclass, field
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from clan_lib.api import ApiResponse
|
||||
from clan_lib.api import ApiError, ApiResponse, ErrorDataClass
|
||||
from clan_lib.api.tasks import WebThread
|
||||
from clan_lib.async_run import set_current_thread_opkey, set_should_cancel
|
||||
|
||||
@@ -43,7 +43,7 @@ class ApiBridge(ABC):
|
||||
|
||||
def process_request(self, request: BackendRequest) -> None:
|
||||
"""Process an API request through the middleware chain."""
|
||||
from .middleware import MiddlewareContext
|
||||
from .middleware import MiddlewareContext # noqa: PLC0415
|
||||
|
||||
with ExitStack() as stack:
|
||||
context = MiddlewareContext(
|
||||
@@ -56,22 +56,25 @@ class ApiBridge(ABC):
|
||||
for middleware in self.middleware_chain:
|
||||
try:
|
||||
log.debug(
|
||||
f"{middleware.__class__.__name__} => {request.method_name}"
|
||||
f"{middleware.__class__.__name__} => {request.method_name}",
|
||||
)
|
||||
middleware.process(context)
|
||||
except Exception as e:
|
||||
except Exception as e: # noqa: BLE001
|
||||
# If middleware fails, handle error
|
||||
self.send_api_error_response(
|
||||
request.op_key or "unknown", str(e), ["middleware_error"]
|
||||
request.op_key or "unknown",
|
||||
str(e),
|
||||
["middleware_error"],
|
||||
)
|
||||
return
|
||||
|
||||
def send_api_error_response(
|
||||
self, op_key: str, error_message: str, location: list[str]
|
||||
self,
|
||||
op_key: str,
|
||||
error_message: str,
|
||||
location: list[str],
|
||||
) -> None:
|
||||
"""Send an error response."""
|
||||
from clan_lib.api import ApiError, ErrorDataClass
|
||||
|
||||
error_data = ErrorDataClass(
|
||||
op_key=op_key,
|
||||
status="error",
|
||||
@@ -80,7 +83,7 @@ class ApiBridge(ABC):
|
||||
message="An internal error occured",
|
||||
description=error_message,
|
||||
location=location,
|
||||
)
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
@@ -107,6 +110,7 @@ class ApiBridge(ABC):
|
||||
thread_name: Name for the thread (for debugging)
|
||||
wait_for_completion: Whether to wait for the thread to complete
|
||||
timeout: Timeout in seconds when waiting for completion
|
||||
|
||||
"""
|
||||
op_key = request.op_key or "unknown"
|
||||
|
||||
@@ -116,7 +120,7 @@ class ApiBridge(ABC):
|
||||
try:
|
||||
log.debug(
|
||||
f"Processing {request.method_name} with args {request.args} "
|
||||
f"and header {request.header} in thread {thread_name}"
|
||||
f"and header {request.header} in thread {thread_name}",
|
||||
)
|
||||
self.process_request(request)
|
||||
finally:
|
||||
@@ -124,7 +128,9 @@ class ApiBridge(ABC):
|
||||
|
||||
stop_event = threading.Event()
|
||||
thread = threading.Thread(
|
||||
target=thread_task, args=(stop_event,), name=thread_name
|
||||
target=thread_task,
|
||||
args=(stop_event,),
|
||||
name=thread_name,
|
||||
)
|
||||
thread.start()
|
||||
|
||||
@@ -138,5 +144,7 @@ class ApiBridge(ABC):
|
||||
if thread.is_alive():
|
||||
stop_event.set() # Cancel the thread
|
||||
self.send_api_error_response(
|
||||
op_key, "Request timeout", ["api_bridge", request.method_name]
|
||||
op_key,
|
||||
"Request timeout",
|
||||
["api_bridge", request.method_name],
|
||||
)
|
||||
|
||||
@@ -26,8 +26,7 @@ RESULT: dict[str, SuccessDataClass[list[str] | None] | ErrorDataClass] = {}
|
||||
|
||||
|
||||
def get_clan_folder() -> SuccessDataClass[Flake] | ErrorDataClass:
|
||||
"""
|
||||
Opens the clan folder using the GTK file dialog.
|
||||
"""Opens the clan folder using the GTK file dialog.
|
||||
Returns the path to the clan folder or an error if it fails.
|
||||
"""
|
||||
file_request = FileRequest(
|
||||
@@ -52,7 +51,7 @@ def get_clan_folder() -> SuccessDataClass[Flake] | ErrorDataClass:
|
||||
message="No folder selected",
|
||||
description="You must select a folder to open.",
|
||||
location=["get_clan_folder"],
|
||||
)
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
@@ -66,7 +65,7 @@ def get_clan_folder() -> SuccessDataClass[Flake] | ErrorDataClass:
|
||||
message="Invalid clan folder",
|
||||
description=f"The selected folder '{clan_folder}' is not a valid clan folder.",
|
||||
location=["get_clan_folder"],
|
||||
)
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
@@ -92,7 +91,6 @@ def get_system_file(
|
||||
|
||||
def gtk_open_file(file_request: FileRequest, op_key: str) -> bool:
|
||||
def returns(data: SuccessDataClass | ErrorDataClass) -> None:
|
||||
global RESULT
|
||||
RESULT[op_key] = data
|
||||
|
||||
def on_file_select(file_dialog: Gtk.FileDialog, task: Gio.Task) -> None:
|
||||
@@ -102,8 +100,10 @@ def gtk_open_file(file_request: FileRequest, op_key: str) -> bool:
|
||||
selected_path = remove_none([gfile.get_path()])
|
||||
returns(
|
||||
SuccessDataClass(
|
||||
op_key=op_key, data=selected_path, status="success"
|
||||
)
|
||||
op_key=op_key,
|
||||
data=selected_path,
|
||||
status="success",
|
||||
),
|
||||
)
|
||||
except Exception as e:
|
||||
log.exception("Error opening file")
|
||||
@@ -116,9 +116,9 @@ def gtk_open_file(file_request: FileRequest, op_key: str) -> bool:
|
||||
message=e.__class__.__name__,
|
||||
description=str(e),
|
||||
location=["get_system_file"],
|
||||
)
|
||||
),
|
||||
],
|
||||
)
|
||||
),
|
||||
)
|
||||
|
||||
def on_file_select_multiple(file_dialog: Gtk.FileDialog, task: Gio.Task) -> None:
|
||||
@@ -128,8 +128,10 @@ def gtk_open_file(file_request: FileRequest, op_key: str) -> bool:
|
||||
selected_paths = remove_none([gfile.get_path() for gfile in gfiles])
|
||||
returns(
|
||||
SuccessDataClass(
|
||||
op_key=op_key, data=selected_paths, status="success"
|
||||
)
|
||||
op_key=op_key,
|
||||
data=selected_paths,
|
||||
status="success",
|
||||
),
|
||||
)
|
||||
else:
|
||||
returns(SuccessDataClass(op_key=op_key, data=None, status="success"))
|
||||
@@ -144,9 +146,9 @@ def gtk_open_file(file_request: FileRequest, op_key: str) -> bool:
|
||||
message=e.__class__.__name__,
|
||||
description=str(e),
|
||||
location=["get_system_file"],
|
||||
)
|
||||
),
|
||||
],
|
||||
)
|
||||
),
|
||||
)
|
||||
|
||||
def on_folder_select(file_dialog: Gtk.FileDialog, task: Gio.Task) -> None:
|
||||
@@ -156,8 +158,10 @@ def gtk_open_file(file_request: FileRequest, op_key: str) -> bool:
|
||||
selected_path = remove_none([gfile.get_path()])
|
||||
returns(
|
||||
SuccessDataClass(
|
||||
op_key=op_key, data=selected_path, status="success"
|
||||
)
|
||||
op_key=op_key,
|
||||
data=selected_path,
|
||||
status="success",
|
||||
),
|
||||
)
|
||||
else:
|
||||
returns(SuccessDataClass(op_key=op_key, data=None, status="success"))
|
||||
@@ -172,9 +176,9 @@ def gtk_open_file(file_request: FileRequest, op_key: str) -> bool:
|
||||
message=e.__class__.__name__,
|
||||
description=str(e),
|
||||
location=["get_system_file"],
|
||||
)
|
||||
),
|
||||
],
|
||||
)
|
||||
),
|
||||
)
|
||||
|
||||
def on_save_finish(file_dialog: Gtk.FileDialog, task: Gio.Task) -> None:
|
||||
@@ -184,8 +188,10 @@ def gtk_open_file(file_request: FileRequest, op_key: str) -> bool:
|
||||
selected_path = remove_none([gfile.get_path()])
|
||||
returns(
|
||||
SuccessDataClass(
|
||||
op_key=op_key, data=selected_path, status="success"
|
||||
)
|
||||
op_key=op_key,
|
||||
data=selected_path,
|
||||
status="success",
|
||||
),
|
||||
)
|
||||
else:
|
||||
returns(SuccessDataClass(op_key=op_key, data=None, status="success"))
|
||||
@@ -200,9 +206,9 @@ def gtk_open_file(file_request: FileRequest, op_key: str) -> bool:
|
||||
message=e.__class__.__name__,
|
||||
description=str(e),
|
||||
location=["get_system_file"],
|
||||
)
|
||||
),
|
||||
],
|
||||
)
|
||||
),
|
||||
)
|
||||
|
||||
dialog = Gtk.FileDialog()
|
||||
|
||||
@@ -39,7 +39,7 @@ class ArgumentParsingMiddleware(Middleware):
|
||||
|
||||
except Exception as e:
|
||||
log.exception(
|
||||
f"Error while parsing arguments for {context.request.method_name}"
|
||||
f"Error while parsing arguments for {context.request.method_name}",
|
||||
)
|
||||
context.bridge.send_api_error_response(
|
||||
context.request.op_key or "unknown",
|
||||
|
||||
@@ -23,7 +23,9 @@ class Middleware(ABC):
|
||||
"""Process the request through this middleware."""
|
||||
|
||||
def register_context_manager(
|
||||
self, context: MiddlewareContext, cm: AbstractContextManager[Any]
|
||||
self,
|
||||
context: MiddlewareContext,
|
||||
cm: AbstractContextManager[Any],
|
||||
) -> Any:
|
||||
"""Register a context manager with the exit stack."""
|
||||
return context.exit_stack.enter_context(cm)
|
||||
|
||||
@@ -25,23 +25,26 @@ class LoggingMiddleware(Middleware):
|
||||
try:
|
||||
# Handle log group configuration
|
||||
log_group: list[str] | None = context.request.header.get("logging", {}).get(
|
||||
"group_path", None
|
||||
"group_path",
|
||||
None,
|
||||
)
|
||||
if log_group is not None:
|
||||
if not isinstance(log_group, list):
|
||||
msg = f"Expected log_group to be a list, got {type(log_group)}"
|
||||
raise TypeError(msg) # noqa: TRY301
|
||||
log.warning(
|
||||
f"Using log group {log_group} for {context.request.method_name} with op_key {context.request.op_key}"
|
||||
f"Using log group {log_group} for {context.request.method_name} with op_key {context.request.op_key}",
|
||||
)
|
||||
# Create log file
|
||||
log_file = self.log_manager.create_log_file(
|
||||
method, op_key=context.request.op_key or "unknown", group_path=log_group
|
||||
method,
|
||||
op_key=context.request.op_key or "unknown",
|
||||
group_path=log_group,
|
||||
).get_file_path()
|
||||
|
||||
except Exception as e:
|
||||
log.exception(
|
||||
f"Error while handling request header of {context.request.method_name}"
|
||||
f"Error while handling request header of {context.request.method_name}",
|
||||
)
|
||||
context.bridge.send_api_error_response(
|
||||
context.request.op_key or "unknown",
|
||||
@@ -76,7 +79,8 @@ class LoggingMiddleware(Middleware):
|
||||
line_buffering=True,
|
||||
)
|
||||
self.handler = setup_logging(
|
||||
log.getEffectiveLevel(), log_file=handler_stream
|
||||
log.getEffectiveLevel(),
|
||||
log_file=handler_stream,
|
||||
)
|
||||
|
||||
return self
|
||||
@@ -90,10 +94,10 @@ class LoggingMiddleware(Middleware):
|
||||
if self.handler:
|
||||
self.handler.root_logger.removeHandler(self.handler.new_handler)
|
||||
self.handler.new_handler.close()
|
||||
if self.log_f:
|
||||
self.log_f.close()
|
||||
if self.original_ctx:
|
||||
set_async_ctx(self.original_ctx)
|
||||
if self.log_f:
|
||||
self.log_f.close()
|
||||
|
||||
# Register the logging context manager
|
||||
self.register_context_manager(context, LoggingContextManager(log_file))
|
||||
|
||||
@@ -32,7 +32,7 @@ class MethodExecutionMiddleware(Middleware):
|
||||
|
||||
except Exception as e:
|
||||
log.exception(
|
||||
f"Error while handling result of {context.request.method_name}"
|
||||
f"Error while handling result of {context.request.method_name}",
|
||||
)
|
||||
context.bridge.send_api_error_response(
|
||||
context.request.op_key or "unknown",
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
|
||||
@@ -16,6 +17,7 @@ from clan_app.api.middleware import (
|
||||
LoggingMiddleware,
|
||||
MethodExecutionMiddleware,
|
||||
)
|
||||
from clan_app.deps.http.http_server import HttpApiServer
|
||||
from clan_app.deps.webview.webview import Size, SizeHint, Webview
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
@@ -48,7 +50,7 @@ def app_run(app_opts: ClanAppOptions) -> int:
|
||||
# Add a log group ["clans", <dynamic_name>, "machines", <dynamic_name>]
|
||||
log_manager = LogManager(base_dir=user_data_dir() / "clan-app" / "logs")
|
||||
clan_log_group = LogGroupConfig("clans", "Clans").add_child(
|
||||
LogGroupConfig("machines", "Machines")
|
||||
LogGroupConfig("machines", "Machines"),
|
||||
)
|
||||
log_manager = log_manager.add_root_group_config(clan_log_group)
|
||||
# Init LogManager global in log_manager_api module
|
||||
@@ -64,8 +66,6 @@ def app_run(app_opts: ClanAppOptions) -> int:
|
||||
# Start HTTP API server if requested
|
||||
http_server = None
|
||||
if app_opts.http_api:
|
||||
from clan_app.deps.http.http_server import HttpApiServer
|
||||
|
||||
openapi_file = os.getenv("OPENAPI_FILE", None)
|
||||
swagger_dist = os.getenv("SWAGGER_UI_DIST", None)
|
||||
|
||||
@@ -89,14 +89,12 @@ def app_run(app_opts: ClanAppOptions) -> int:
|
||||
# HTTP-only mode - keep the server running
|
||||
log.info("HTTP API server running...")
|
||||
log.info(
|
||||
f"Swagger: http://{app_opts.http_host}:{app_opts.http_port}/api/swagger"
|
||||
f"Swagger: http://{app_opts.http_host}:{app_opts.http_port}/api/swagger",
|
||||
)
|
||||
|
||||
log.info("Press Ctrl+C to stop the server")
|
||||
try:
|
||||
# Keep the main thread alive
|
||||
import time
|
||||
|
||||
while True:
|
||||
time.sleep(1)
|
||||
except KeyboardInterrupt:
|
||||
@@ -121,7 +119,7 @@ def app_run(app_opts: ClanAppOptions) -> int:
|
||||
webview.add_middleware(LoggingMiddleware(log_manager=log_manager))
|
||||
webview.add_middleware(MethodExecutionMiddleware(api=API))
|
||||
|
||||
webview.bind_jsonschema_api(API, log_manager=log_manager)
|
||||
webview.bind_jsonschema_api(API)
|
||||
webview.navigate(content_uri)
|
||||
webview.run()
|
||||
|
||||
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 20 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 3.1 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 726 B |
Binary file not shown.
|
Before Width: | Height: | Size: 375 B |
Binary file not shown.
|
After Width: | Height: | Size: 1.8 KiB |
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user