Compare commits
1094 Commits
fix-typogr
...
demo-v1.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
45e8917679 | ||
|
|
c00e5b3df3 | ||
|
|
4e30c4cfff | ||
|
|
52f5c890cc | ||
|
|
170c461ba1 | ||
|
|
a50d0f1743 | ||
|
|
541f8be7b7 | ||
|
|
119a05d999 | ||
|
|
c7b9db8b49 | ||
|
|
c417157941 | ||
|
|
9fef40258a | ||
|
|
a3d92a766e | ||
|
|
80a335eb4c | ||
|
|
846d2c0f44 | ||
|
|
dc4e9d8dd7 | ||
|
|
02045c589d | ||
|
|
0366147235 | ||
|
|
6b9f8b851e | ||
|
|
99092f6e76 | ||
|
|
549a5800a5 | ||
|
|
f6c811e531 | ||
|
|
7937f3eaae | ||
|
|
155a1ee98c | ||
|
|
dc4634b8eb | ||
|
|
65a2fc9b14 | ||
|
|
f3498f3609 | ||
|
|
a685dd1afe | ||
|
|
4a84d7da97 | ||
|
|
6adc68a354 | ||
|
|
76c906c531 | ||
|
|
3afed7a563 | ||
|
|
a3ef8ce832 | ||
|
|
abaa218e27 | ||
|
|
6ba641c974 | ||
|
|
e1237499d0 | ||
|
|
3284224440 | ||
|
|
9cf670ee29 | ||
|
|
6b004fca6f | ||
|
|
de885c3010 | ||
|
|
ce5e6bfd10 | ||
|
|
aee0ee4d5e | ||
|
|
459dc0608b | ||
|
|
f0895d5e3e | ||
|
|
8338944062 | ||
|
|
09887037f5 | ||
|
|
7b953fe7ab | ||
|
|
6b00957d38 | ||
|
|
9e6ffcd9f9 | ||
|
|
035a9f2358 | ||
|
|
c5aef543b2 | ||
|
|
7ac0292cdd | ||
|
|
c98fdc08de | ||
|
|
d0b075c217 | ||
|
|
3be697d1e9 | ||
|
|
cd2125074f | ||
|
|
ceebfccc82 | ||
|
|
70cf75830d | ||
|
|
d956dbeb77 | ||
|
|
0c167d1e51 | ||
|
|
df574b0dcc | ||
|
|
e9d3a71dbb | ||
|
|
3271e4b6b6 | ||
|
|
a1f8690978 | ||
|
|
2da236b362 | ||
|
|
a23bdaa0a1 | ||
|
|
60c4d231f8 | ||
|
|
26259341d4 | ||
|
|
4a3abf5fe9 | ||
|
|
3bd33844c6 | ||
|
|
fb151b227d | ||
|
|
2d613e3933 | ||
|
|
ae1745289d | ||
|
|
c94d2325ae | ||
|
|
bd5ad0dc31 | ||
|
|
1d2f003210 | ||
|
|
776b238d9c | ||
|
|
5ce7819229 | ||
|
|
7105ab1124 | ||
|
|
de65c4062a | ||
|
|
a610db7192 | ||
|
|
0b291c656f | ||
|
|
312e8b2bc6 | ||
|
|
f1acc44488 | ||
|
|
4a3a585e7c | ||
|
|
d20f47ad5b | ||
|
|
1c7e806bca | ||
|
|
98e1c629c3 | ||
|
|
c944fe8b48 | ||
|
|
07a0e1db09 | ||
|
|
876ffab3b1 | ||
|
|
00876e2311 | ||
|
|
b1bb887dbf | ||
|
|
50344b4937 | ||
|
|
abfa2f218c | ||
|
|
c345c4c0ce | ||
|
|
0c1d4a1d41 | ||
|
|
1a6983e031 | ||
|
|
6455b128a3 | ||
|
|
ca5cc389ac | ||
|
|
969c79b426 | ||
|
|
2c2e4862ba | ||
|
|
e2cc218aa6 | ||
|
|
304dce0caa | ||
|
|
e941334ecf | ||
|
|
30c7d61bbd | ||
|
|
47f4ef2c31 | ||
|
|
25634a044b | ||
|
|
78cd5b3fec | ||
|
|
50b757ab1d | ||
|
|
e1729a0991 | ||
|
|
773630a9b9 | ||
|
|
c86e465e35 | ||
|
|
1cdc2b8ed2 | ||
|
|
796ef76893 | ||
|
|
a29f83de0f | ||
|
|
05aaa180e0 | ||
|
|
1d489438bd | ||
|
|
0c6dd1f042 | ||
|
|
0941367bb3 | ||
|
|
c5b914594a | ||
|
|
3351f87a7f | ||
|
|
04b579f2d3 | ||
|
|
d1ca0eaf80 | ||
|
|
4d4c09da80 | ||
|
|
f7c6ab5888 | ||
|
|
16b043f508 | ||
|
|
0133ccd5f7 | ||
|
|
2ae439ec52 | ||
|
|
0d9f0c3ec1 | ||
|
|
72409a800a | ||
|
|
8d63bb8fd1 | ||
|
|
32ec520fa5 | ||
|
|
f6b6229f78 | ||
|
|
cedd1d7492 | ||
|
|
0429291d40 | ||
|
|
2b75cc4744 | ||
|
|
7bf76eea06 | ||
|
|
0e62348d72 | ||
|
|
398cda65a1 | ||
|
|
3563b6eaa4 | ||
|
|
1e9032b635 | ||
|
|
a5132a4f81 | ||
|
|
93fa4e397b | ||
|
|
359275eee7 | ||
|
|
64846eb395 | ||
|
|
b36d0be524 | ||
|
|
14f83efcc9 | ||
|
|
7a4b176248 | ||
|
|
46da9eb5db | ||
|
|
699515ca89 | ||
|
|
f34df3df02 | ||
|
|
626e5558f3 | ||
|
|
c41bc10325 | ||
|
|
54f2eba1cc | ||
|
|
40beda884c | ||
|
|
0b2439553c | ||
|
|
b9613c4801 | ||
|
|
eeac4902ab | ||
|
|
ba05589af2 | ||
|
|
4a69912667 | ||
|
|
414f475802 | ||
|
|
7266faab4e | ||
|
|
9f8e719b48 | ||
|
|
26e9463248 | ||
|
|
fa00bb522b | ||
|
|
7e1be41843 | ||
|
|
520ff0b334 | ||
|
|
2566fccfcf | ||
|
|
17390fc392 | ||
|
|
b272eebc75 | ||
|
|
c73818c1a5 | ||
|
|
df5e6761e6 | ||
|
|
c29e0086a4 | ||
|
|
4123944732 | ||
|
|
96e2deee1f | ||
|
|
a5137efd48 | ||
|
|
c7fda31ab3 | ||
|
|
738aaa3b5d | ||
|
|
2da5b4a0ff | ||
|
|
dbfbbf8670 | ||
|
|
4fc48f38d5 | ||
|
|
6a75ea56fd | ||
|
|
093566aa42 | ||
|
|
d18c2063c3 | ||
|
|
bd07d59707 | ||
|
|
d803cec108 | ||
|
|
f9dd2e0926 | ||
|
|
7f21e99736 | ||
|
|
0922856fa5 | ||
|
|
f526e1261f | ||
|
|
ae00b3ae65 | ||
|
|
8870351737 | ||
|
|
e4a7f68b0e | ||
|
|
3f55c688d9 | ||
|
|
69d08241e9 | ||
|
|
ef827af89c | ||
|
|
7c5ed85231 | ||
|
|
4383fab8d7 | ||
|
|
d0b165b311 | ||
|
|
c609d84273 | ||
|
|
7e662b7655 | ||
|
|
0ecfaa96cf | ||
|
|
c1e449e055 | ||
|
|
91633df0c8 | ||
|
|
0676d93e4b | ||
|
|
d6fe6b1d69 | ||
|
|
e7bd86a9d5 | ||
|
|
3914d61008 | ||
|
|
7be42146e7 | ||
|
|
01977b2e2a | ||
|
|
1fc524e53e | ||
|
|
c157ecb161 | ||
|
|
6f6fe1c5d4 | ||
|
|
9ec1d594e7 | ||
|
|
2423597f4e | ||
|
|
42235e39be | ||
|
|
7a7954278c | ||
|
|
7069d746ad | ||
|
|
e0bba046ea | ||
|
|
9649e43da5 | ||
|
|
f1642c6c71 | ||
|
|
ae41eb3ff5 | ||
|
|
33afa04b38 | ||
|
|
e247225f6d | ||
|
|
fcadaca010 | ||
|
|
2db632f1d2 | ||
|
|
c4799f1e9a | ||
|
|
ffea26f1c7 | ||
|
|
c05d6f8e35 | ||
|
|
330ef00a7c | ||
|
|
b6b0ca7aa0 | ||
|
|
36e7488da1 | ||
|
|
269063f868 | ||
|
|
d39d3a9a45 | ||
|
|
f5b578d0c7 | ||
|
|
a8230e749e | ||
|
|
968adf24b2 | ||
|
|
98020847d4 | ||
|
|
cb06353451 | ||
|
|
ca265b0c59 | ||
|
|
4d8c20f284 | ||
|
|
ed643e3829 | ||
|
|
d73b04bdfa | ||
|
|
55f91079c7 | ||
|
|
b6ecb0b2e2 | ||
|
|
fe60a1a381 | ||
|
|
df9aa4ccb2 | ||
|
|
de38ce47ee | ||
|
|
30e7e06f59 | ||
|
|
a1575ec19c | ||
|
|
e0e7324876 | ||
|
|
9d877969c9 | ||
|
|
baaa860e63 | ||
|
|
57bbe19c1a | ||
|
|
0c33d14ba5 | ||
|
|
2d71bfbd20 | ||
|
|
6cd3a8e58f | ||
|
|
fdba3f0c48 | ||
|
|
784317eb69 | ||
|
|
284bec3299 | ||
|
|
142fe15e80 | ||
|
|
bbc55743a6 | ||
|
|
a4075510c8 | ||
|
|
7ff884cd71 | ||
|
|
ae6f3fa7d7 | ||
|
|
1eec814e4e | ||
|
|
cc3701f22f | ||
|
|
e98f037dd8 | ||
|
|
56dafff109 | ||
|
|
902ccbc90b | ||
|
|
5c1cf947ab | ||
|
|
d74080f4c7 | ||
|
|
32f17783dd | ||
|
|
218e49770e | ||
|
|
efc7df3aa7 | ||
|
|
dfe5c250b3 | ||
|
|
a12fdabf8f | ||
|
|
09cf4a9ff4 | ||
|
|
e7fa998241 | ||
|
|
89165cc65e | ||
|
|
d60cfbc0a6 | ||
|
|
590d39a29b | ||
|
|
a7d48fefb0 | ||
|
|
9dfc3f9613 | ||
|
|
8ee72ba5fa | ||
|
|
fbea4dcaf1 | ||
|
|
963533e6c1 | ||
|
|
bd7cbb331f | ||
|
|
0bdf55b09f | ||
|
|
ad5559974c | ||
|
|
9caf9a131e | ||
|
|
b7bda330b3 | ||
|
|
5c143bf2da | ||
|
|
962a5641b4 | ||
|
|
cf899cbee9 | ||
|
|
7dc2c21517 | ||
|
|
1a36ef242f | ||
|
|
e139628fbd | ||
|
|
c90053834a | ||
|
|
9d952ba534 | ||
|
|
a1ebe663ce | ||
|
|
b00f4554d8 | ||
|
|
771c0e43ca | ||
|
|
87a8f5518b | ||
|
|
8e84f7997a | ||
|
|
d678c3ac14 | ||
|
|
7b5fffdaf4 | ||
|
|
e46f644233 | ||
|
|
83e49f742e | ||
|
|
483a21a51b | ||
|
|
3633e4ac3f | ||
|
|
30d55fc67e | ||
|
|
6833104c21 | ||
|
|
030d31c8b4 | ||
|
|
b319db0df9 | ||
|
|
136246de79 | ||
|
|
d4b8cef242 | ||
|
|
9f4ab67fc2 | ||
|
|
4a5e5feb73 | ||
|
|
d6052c3497 | ||
|
|
28a6613bde | ||
|
|
aad4073a70 | ||
|
|
8340d8f5f2 | ||
|
|
dec431e69f | ||
|
|
eba34bf2f7 | ||
|
|
003b5a77c6 | ||
|
|
31d95e64f8 | ||
|
|
57bded996b | ||
|
|
9704b5bd6f | ||
|
|
2a0a50fb5e | ||
|
|
73973c1f5e | ||
|
|
4aa60317fa | ||
|
|
4b9635c706 | ||
|
|
c22280b864 | ||
|
|
1496f45fe2 | ||
|
|
f6d4c721c4 | ||
|
|
c4a8193047 | ||
|
|
fca3cff4bf | ||
|
|
a98b5e205f | ||
|
|
9bc75943e3 | ||
|
|
c8ffcadad9 | ||
|
|
3c04d14917 | ||
|
|
ae8029e560 | ||
|
|
2c1533b7e2 | ||
|
|
b8b4bedfdb | ||
|
|
11772eb13e | ||
|
|
4bf0bb9e45 | ||
|
|
76c3c77886 | ||
|
|
64944f896a | ||
|
|
7c6902f70a | ||
|
|
edb503a760 | ||
|
|
48d7d5f38c | ||
|
|
31196e3d3c | ||
|
|
7d22f7c9fc | ||
|
|
72327093c2 | ||
|
|
fa0a7ce122 | ||
|
|
01291c55c1 | ||
|
|
f261578187 | ||
|
|
b885c3d052 | ||
|
|
cf68bd41d6 | ||
|
|
ec2653f2fa | ||
|
|
ff3b8f1db0 | ||
|
|
7f68da2715 | ||
|
|
45e9ab45f7 | ||
|
|
6b8216b3ac | ||
|
|
a5caed0de9 | ||
|
|
bbc411135c | ||
|
|
807b53c71b | ||
|
|
8ea149de07 | ||
|
|
28df338527 | ||
|
|
2f56c73b47 | ||
|
|
006ce36b8a | ||
|
|
7bc54cb524 | ||
|
|
9576047adb | ||
|
|
0eb2f2c708 | ||
|
|
b48fb10f86 | ||
|
|
87b664d3b1 | ||
|
|
cb984f6d43 | ||
|
|
22d5a61a51 | ||
|
|
d694ef9bad | ||
|
|
63c820ed86 | ||
|
|
0cadbe0f1d | ||
|
|
95354fadd8 | ||
|
|
4c093ea2d9 | ||
|
|
062594029a | ||
|
|
fcf21093ac | ||
|
|
af441e71d2 | ||
|
|
c701b43edb | ||
|
|
f183e759d3 | ||
|
|
60bd3a3888 | ||
|
|
0b772668a8 | ||
|
|
e772d29f44 | ||
|
|
d7b1a9f959 | ||
|
|
442551cd24 | ||
|
|
82fb1c36fe | ||
|
|
625e2fd23c | ||
|
|
dd8e1ce758 | ||
|
|
354291440a | ||
|
|
493b377266 | ||
|
|
a7d26e8851 | ||
|
|
b4370c54e1 | ||
|
|
bdcf5dbe8b | ||
|
|
c346e9e613 | ||
|
|
0e6d4cb0bb | ||
|
|
4dbf355ce2 | ||
|
|
0180385d4a | ||
|
|
3883f5378c | ||
|
|
8012bd5870 | ||
|
|
cfe7f4b50f | ||
|
|
9908ba447c | ||
|
|
34e8f42c52 | ||
|
|
583311fcda | ||
|
|
3db73d3396 | ||
|
|
f9b7c5a468 | ||
|
|
a4a16361c9 | ||
|
|
5c9b95c0b9 | ||
|
|
5aa27044e5 | ||
|
|
468e8d7718 | ||
|
|
a931d73e4f | ||
|
|
c88e9d93a2 | ||
|
|
703f3dd6ee | ||
|
|
c51c63087c | ||
|
|
2b2e30a72f | ||
|
|
2ac92ae308 | ||
|
|
e46d13d626 | ||
|
|
3a478c4880 | ||
|
|
261322fae8 | ||
|
|
6d6460ffca | ||
|
|
b5afb3f9c0 | ||
|
|
4fd84d1c48 | ||
|
|
780ffb9c8f | ||
|
|
65cdd51af2 | ||
|
|
373fc83160 | ||
|
|
c6c9ea4f4a | ||
|
|
7d3801a916 | ||
|
|
76b177778e | ||
|
|
31ce8f7357 | ||
|
|
ee705c5976 | ||
|
|
d0362bb757 | ||
|
|
3ae1c9cf75 | ||
|
|
0ee95d1054 | ||
|
|
f1b223d0a1 | ||
|
|
26e3e3872c | ||
|
|
7ae02d86af | ||
|
|
41afc65f34 | ||
|
|
28d18ee501 | ||
|
|
f3b3aba6c5 | ||
|
|
977bbbbd59 | ||
|
|
02a6162a1d | ||
|
|
179d1ed2c6 | ||
|
|
f4ad6d1f61 | ||
|
|
1497e37d2f | ||
|
|
85e3a48702 | ||
|
|
ee5a4ea7d9 | ||
|
|
be6ae589e2 | ||
|
|
91c1e91e47 | ||
|
|
9c20c5b25b | ||
|
|
753a8af510 | ||
|
|
2fa124f274 | ||
|
|
50590ecdc4 | ||
|
|
f0f6bdce96 | ||
|
|
4ace326aeb | ||
|
|
b1a4c502dd | ||
|
|
e032575ec0 | ||
|
|
ebe411d50d | ||
|
|
0ae454c8a9 | ||
|
|
303df741e9 | ||
|
|
f7422e2a35 | ||
|
|
665487c147 | ||
|
|
44badce35b | ||
|
|
a90d0cc806 | ||
|
|
f6e436a591 | ||
|
|
2bd76231a0 | ||
|
|
8a577b1868 | ||
|
|
6df833d59b | ||
|
|
bb7f92f9aa | ||
|
|
272f0e6c8a | ||
|
|
d033f523b8 | ||
|
|
7cbd8f914b | ||
|
|
884c200061 | ||
|
|
e8d7321e26 | ||
|
|
6d670fed42 | ||
|
|
882a5011c0 | ||
|
|
2c048a8a74 | ||
|
|
640430075a | ||
|
|
db5af7ea67 | ||
|
|
ce1ad9f8c2 | ||
|
|
7369978218 | ||
|
|
81512b99a3 | ||
|
|
e13844ff46 | ||
|
|
63a74ced2e | ||
|
|
c701ab7514 | ||
|
|
fad3108218 | ||
|
|
8a0ecd802e | ||
|
|
47d0dd297f | ||
|
|
4b1f5f873c | ||
|
|
948ed521f9 | ||
|
|
136ccc89f1 | ||
|
|
79d0f441d9 | ||
|
|
b8f5c13553 | ||
|
|
f68c10a008 | ||
|
|
2e54575474 | ||
|
|
9eed5a7fec | ||
|
|
a838c4da17 | ||
|
|
832c41df7e | ||
|
|
17c71d2f40 | ||
|
|
c39eb24318 | ||
|
|
7d3ccb23ea | ||
|
|
6072159020 | ||
|
|
8759e5a049 | ||
|
|
dc39f04371 | ||
|
|
f9f2f7bb65 | ||
|
|
38440e644f | ||
|
|
90ff10eac1 | ||
|
|
c901133a81 | ||
|
|
9997f95a4b | ||
|
|
48008c9b7f | ||
|
|
7afaaf8c5a | ||
|
|
f9b3fe0765 | ||
|
|
f54ad0f739 | ||
|
|
48f0381fb5 | ||
|
|
9af58c7bac | ||
|
|
3c4c2995ca | ||
|
|
d47662730f | ||
|
|
e779bc2d11 | ||
|
|
bd7899e48a | ||
|
|
ef72523f54 | ||
|
|
bf837dc842 | ||
|
|
b83a440cb2 | ||
|
|
f7f3bd2e35 | ||
|
|
b4550b3dd9 | ||
|
|
59eb8af2ce | ||
|
|
9f63f725d3 | ||
|
|
9d99f93b37 | ||
|
|
12d742f95f | ||
|
|
8922bcc8f0 | ||
|
|
648c6157ad | ||
|
|
ff03944ff4 | ||
|
|
46188f70de | ||
|
|
efa59abac6 | ||
|
|
3d7f9fefa8 | ||
|
|
ee9d7fe6cb | ||
|
|
584bcc5334 | ||
|
|
30f22acaf5 | ||
|
|
8767e9900d | ||
|
|
fb7d0ff8f2 | ||
|
|
fb474a926a | ||
|
|
808bd3defd | ||
|
|
778349b72d | ||
|
|
c46638f589 | ||
|
|
bf2b8b4cca | ||
|
|
f07fb05220 | ||
|
|
4f0b132088 | ||
|
|
64649ff7a9 | ||
|
|
02c3222f6b | ||
|
|
a01cb9434b | ||
|
|
a7d46a561e | ||
|
|
f419ff842a | ||
|
|
1233ff2644 | ||
|
|
999ad67277 | ||
|
|
43dbdf20d4 | ||
|
|
5e280674fa | ||
|
|
95d41323b0 | ||
|
|
1f03a9b7ec | ||
|
|
97793c5b70 | ||
|
|
0604d5a83d | ||
|
|
20cdcbcb44 | ||
|
|
7f6b0dfaea | ||
|
|
d2fecd0a94 | ||
|
|
91b6094704 | ||
|
|
978d9efd7e | ||
|
|
2f870d2c83 | ||
|
|
12930b4057 | ||
|
|
5c495e3cdc | ||
|
|
5640b84433 | ||
|
|
e185ab971c | ||
|
|
1f558954af | ||
|
|
89b235e18a | ||
|
|
1d7e8046fe | ||
|
|
df0bdb5b5f | ||
|
|
13019e17f0 | ||
|
|
a3e42a958f | ||
|
|
1ea13646ea | ||
|
|
7c50846f00 | ||
|
|
56f135faed | ||
|
|
6af96a6f87 | ||
|
|
7ec378ca62 | ||
|
|
1a6f2fe9cb | ||
|
|
aa39a631ac | ||
|
|
f98df10330 | ||
|
|
2bda2293cb | ||
|
|
df1e166afb | ||
|
|
1e49bf3378 | ||
|
|
a84d628b81 | ||
|
|
fcb22f254c | ||
|
|
9802933ea9 | ||
|
|
9358dfb666 | ||
|
|
c1f6c0db66 | ||
|
|
dca61fa5fa | ||
|
|
8cda86d34c | ||
|
|
75f1815f98 | ||
|
|
30eb6a3ae2 | ||
|
|
2306b2ec50 | ||
|
|
5cc3e88de6 | ||
|
|
fdfe02fb77 | ||
|
|
b2422f9c74 | ||
|
|
8968160221 | ||
|
|
6cb41cb006 | ||
|
|
ae1f177d10 | ||
|
|
b9618e57c5 | ||
|
|
c89ed67f30 | ||
|
|
19d7f17310 | ||
|
|
d815091f10 | ||
|
|
18627baa9c | ||
|
|
b2bbddd1f9 | ||
|
|
d2303df18b | ||
|
|
df363a8cfb | ||
|
|
cf9f24a3b2 | ||
|
|
a314f8390c | ||
|
|
99649f77d4 | ||
|
|
f34abb0034 | ||
|
|
5d41bcee7f | ||
|
|
f2e2d50c06 | ||
|
|
bdc8ef63ed | ||
|
|
eb788393e6 | ||
|
|
b47e38271e | ||
|
|
2dcdf738ce | ||
|
|
ace0ae5c49 | ||
|
|
6a52c5f15b | ||
|
|
7eb664fd08 | ||
|
|
55a1df7868 | ||
|
|
f55fcc1551 | ||
|
|
8b60c68206 | ||
|
|
1652b5c27b | ||
|
|
2395119d21 | ||
|
|
14c4b99891 | ||
|
|
42dba72239 | ||
|
|
236ca4f98c | ||
|
|
eec22d7d39 | ||
|
|
b2998e2e94 | ||
|
|
3a2c407ae0 | ||
|
|
4ed42540ba | ||
|
|
74c8b85e4a | ||
|
|
7a02483534 | ||
|
|
d7476c967e | ||
|
|
97b8c7a701 | ||
|
|
04c754caad | ||
|
|
54ede8a0ff | ||
|
|
e1bccaa54f | ||
|
|
c8e1acac2b | ||
|
|
4170fce7e8 | ||
|
|
c0be813a4c | ||
|
|
657cdd6e67 | ||
|
|
df1acebd87 | ||
|
|
46191b9a9c | ||
|
|
d848298017 | ||
|
|
85a06d47e5 | ||
|
|
13b685f7af | ||
|
|
b518d92678 | ||
|
|
c28089d4b2 | ||
|
|
078c211292 | ||
|
|
39607a0925 | ||
|
|
53ce82984d | ||
|
|
97ea9312ac | ||
|
|
b8ed607658 | ||
|
|
af0714132d | ||
|
|
0206210b7e | ||
|
|
c194adc181 | ||
|
|
163374dc6a | ||
|
|
2a6e329e40 | ||
|
|
f2062db56c | ||
|
|
10e52699be | ||
|
|
4f39abd1de | ||
|
|
ce90b02e06 | ||
|
|
7d030f85a1 | ||
|
|
7240727b26 | ||
|
|
32aa287da5 | ||
|
|
798fbe3839 | ||
|
|
9cb46bf1cd | ||
|
|
766a5a2ae7 | ||
|
|
0efbb461e6 | ||
|
|
2a189995c5 | ||
|
|
0e0803f000 | ||
|
|
c66272761e | ||
|
|
9beb302b9a | ||
|
|
231f1fe322 | ||
|
|
7e99454462 | ||
|
|
c47e59d32d | ||
|
|
7b3d3e20b4 | ||
|
|
1cd7fed34d | ||
|
|
0df53d068c | ||
|
|
15a136b266 | ||
|
|
651056739a | ||
|
|
462535a808 | ||
|
|
223b6bbca7 | ||
|
|
1025901fb3 | ||
|
|
c9afa54c32 | ||
|
|
92eebd7ea7 | ||
|
|
a34d5d20e0 | ||
|
|
eda80579d6 | ||
|
|
a15ddd5998 | ||
|
|
bdcdf59a49 | ||
|
|
8bebe90b52 | ||
|
|
6188957b65 | ||
|
|
fec748fbfd | ||
|
|
896c68bd72 | ||
|
|
d4f73cb32f | ||
|
|
08dbdc0711 | ||
|
|
b09bb155c1 | ||
|
|
2d4a20457b | ||
|
|
efa48052cf | ||
|
|
90a1311365 | ||
|
|
7e42ca0746 | ||
|
|
86afcbe14b | ||
|
|
5a2ee70282 | ||
|
|
754d3bb125 | ||
|
|
9845efad71 | ||
|
|
e967ab3849 | ||
|
|
29c5951932 | ||
|
|
c5007ce7e3 | ||
|
|
557a0db7d1 | ||
|
|
b92052dbfb | ||
|
|
94177d827d | ||
|
|
742886b8d5 | ||
|
|
5cb4019942 | ||
|
|
15dc4fd497 | ||
|
|
85357ec137 | ||
|
|
e1efaedc38 | ||
|
|
96053b679c | ||
|
|
f71e56b111 | ||
|
|
2ee68fb0e2 | ||
|
|
8677c14aaa | ||
|
|
331529c1df | ||
|
|
3e989c57fa | ||
|
|
2e35554e9f | ||
|
|
4361eb3a19 | ||
|
|
b2c6965ec3 | ||
|
|
52a56701ff | ||
|
|
d562c6ac5f | ||
|
|
9875d5d2a3 | ||
|
|
e11b0481a0 | ||
|
|
291cb091c5 | ||
|
|
370485403d | ||
|
|
c9d7a96844 | ||
|
|
65eaece901 | ||
|
|
06aaa35119 | ||
|
|
60721e442d | ||
|
|
cb87ccbfc4 | ||
|
|
61abc96581 | ||
|
|
1524952eb0 | ||
|
|
638416a9c0 | ||
|
|
6fdd1d56ad | ||
|
|
f4e7828725 | ||
|
|
8113260f49 | ||
|
|
fe689d4840 | ||
|
|
3fd7b4ef29 | ||
|
|
17176f25de | ||
|
|
0590e4456a | ||
|
|
845d743d99 | ||
|
|
7a8eef0648 | ||
|
|
4209da96e9 | ||
|
|
e6675cb4d9 | ||
|
|
5c9f826a23 | ||
|
|
032cdd731a | ||
|
|
00ef406713 | ||
|
|
410b7c1158 | ||
|
|
ce66ab036f | ||
|
|
40f4227413 | ||
|
|
eafc55f2e7 | ||
|
|
f9b1a8fa89 | ||
|
|
0378e01cbb | ||
|
|
4aac2012cf | ||
|
|
674d84a43a | ||
|
|
86790a6282 | ||
|
|
b1d0129fc0 | ||
|
|
efd201c7c5 | ||
|
|
a28f910e35 | ||
|
|
3f87ec851f | ||
|
|
c1b4fa6d55 | ||
|
|
3581e0c9a8 | ||
|
|
d1c35301e3 | ||
|
|
59393bb35e | ||
|
|
26bfb793b1 | ||
|
|
a71584d9d2 | ||
|
|
d02acbe04b | ||
|
|
9f464dd14e | ||
|
|
af3f04736b | ||
|
|
8482bc79f6 | ||
|
|
03cabda2d4 | ||
|
|
8cc1c2c4bd | ||
|
|
2ca54afe7f | ||
|
|
32e60f5adc | ||
|
|
fdcd7ad1d9 | ||
|
|
711c70d1f0 | ||
|
|
fa5f39f226 | ||
|
|
cc96fcf916 | ||
|
|
a7848de3a3 | ||
|
|
04836fa9e8 | ||
|
|
79e707b044 | ||
|
|
b2c3b3840f | ||
|
|
902a95d04d | ||
|
|
3e4ad8ae1b | ||
|
|
ba50ce45f1 | ||
|
|
5d186d9fd4 | ||
|
|
0e5c7d2d13 | ||
|
|
ed87aefbad | ||
|
|
25708d1a1a | ||
|
|
a6f51d4dae | ||
|
|
bf176ad277 | ||
|
|
1d45d493ef | ||
|
|
5753f32930 | ||
|
|
f7031bdbdb | ||
|
|
9f6e01671f | ||
|
|
362e36c0b9 | ||
|
|
588bde069f | ||
|
|
c9ef7d6e80 | ||
|
|
a539fb6dc6 | ||
|
|
a1a9c73660 | ||
|
|
cd940abc4e | ||
|
|
2a22348373 | ||
|
|
7ddd624e8e | ||
|
|
65c7867542 | ||
|
|
12e210ad12 | ||
|
|
c4e98667ef | ||
|
|
cd4c14b82e | ||
|
|
a650d9c341 | ||
|
|
c82a4df9f9 | ||
|
|
2f9ec882b2 | ||
|
|
3dad667f97 | ||
|
|
1cca1faedd | ||
|
|
b49433958b | ||
|
|
3650ab491d | ||
|
|
603b48a0fe | ||
|
|
78758319f3 | ||
|
|
4fa2056834 | ||
|
|
2874cf3bdb | ||
|
|
ded13b2da5 | ||
|
|
5483018783 | ||
|
|
26b7effe99 | ||
|
|
6312f47545 | ||
|
|
9ea71c90a6 | ||
|
|
d3310f861b | ||
|
|
9c74c4d661 | ||
|
|
af1b8f68e7 | ||
|
|
ceb6bdd474 | ||
|
|
e72f9ff8ae | ||
|
|
c5b2e9b5f3 | ||
|
|
5ab0840939 | ||
|
|
60b868c4cb | ||
|
|
0e1b4ab96f | ||
|
|
cc47206fd6 | ||
|
|
2d2e097f6d | ||
|
|
386da9e6c4 | ||
|
|
13e8dba417 | ||
|
|
dd04847f93 | ||
|
|
f885c877bc | ||
|
|
8856758dbc | ||
|
|
9559357c56 | ||
|
|
8ab0e9b7cd | ||
|
|
dbf80595fd | ||
|
|
48408f6a49 | ||
|
|
bdb0a6484a | ||
|
|
1870847aab | ||
|
|
6abbf4bacd | ||
|
|
d90becfd80 | ||
|
|
4af74ee5a5 | ||
|
|
fa013b3a0e | ||
|
|
3fb36b3ac1 | ||
|
|
dfed8fdcb2 | ||
|
|
41106a67e2 | ||
|
|
03c8c6f4e4 | ||
|
|
bdd4cfc7d8 | ||
|
|
a2597717b3 | ||
|
|
d0c1eddf79 | ||
|
|
d156218775 | ||
|
|
d015e3ff60 | ||
|
|
99433ac290 | ||
|
|
bc36ec4c1a | ||
|
|
994c190d0f | ||
|
|
e8c9f27515 | ||
|
|
fe1a3f0541 | ||
|
|
827fcbfe46 | ||
|
|
544322ee6e | ||
|
|
018590d30b | ||
|
|
04ba80f614 | ||
|
|
a02a7e1fc5 | ||
|
|
966a33e711 | ||
|
|
b25af9f0f4 | ||
|
|
ffb7c63640 | ||
|
|
3c1d7a0553 | ||
|
|
ff1fb784e7 | ||
|
|
e6d22e6426 | ||
|
|
5e1a26df1a | ||
|
|
1575e4a391 | ||
|
|
ba9b2f6de7 | ||
|
|
017221febb | ||
|
|
c0e2bfbcb8 | ||
|
|
9e6009b454 | ||
|
|
6059b262f1 | ||
|
|
fcd1d18e15 | ||
|
|
061b91406d | ||
|
|
7842c24c9d | ||
|
|
80e37483a3 | ||
|
|
02086463eb | ||
|
|
4abb7d7bf0 | ||
|
|
4a1a6f525a | ||
|
|
b2ef8bf1a3 | ||
|
|
6f7109dab6 | ||
|
|
f27f056fca | ||
|
|
b23d2b65e1 | ||
|
|
de3084066c | ||
|
|
c0f6f78c9e | ||
|
|
2502422bc1 | ||
|
|
361ad990ab | ||
|
|
b1d045131a | ||
|
|
ff11340507 | ||
|
|
dbe289f702 | ||
|
|
8fa241a36b | ||
|
|
7dde66c0df | ||
|
|
986008cef7 | ||
|
|
7d114dee54 | ||
|
|
7eed3d9aa9 | ||
|
|
be9d3d43bf | ||
|
|
e9f3be0056 | ||
|
|
cb2815c4d0 | ||
|
|
d8b69ec883 | ||
|
|
d25c3a0940 | ||
|
|
2bbed10117 | ||
|
|
8e98c50ccd | ||
|
|
e0051a0cba | ||
|
|
da72ec18ad | ||
|
|
c1c68ee1d8 | ||
|
|
7e180d2f12 | ||
|
|
af17c1bd7a | ||
|
|
167533d9ee | ||
|
|
653ad99b22 | ||
|
|
1c0c11a954 | ||
|
|
c40a7bf3fb | ||
|
|
16f44900a3 | ||
|
|
3c11eece4d | ||
|
|
1adcf6980b | ||
|
|
4075b355f3 | ||
|
|
a461024f82 | ||
|
|
b27dad1cc4 | ||
|
|
814d81c1d2 | ||
|
|
6640c78089 | ||
|
|
7c1e04a7d6 | ||
|
|
f9c35ceaa4 | ||
|
|
29efda9608 | ||
|
|
ce7ae81a35 | ||
|
|
84bcfc3929 | ||
|
|
d6b5bac1ce | ||
|
|
82db33d047 | ||
|
|
332f5dc824 | ||
|
|
f224deace9 | ||
|
|
8d38ba93ff | ||
|
|
62caf98f15 | ||
|
|
55743928ae | ||
|
|
347d9676c5 | ||
|
|
b878df8813 | ||
|
|
d810d2b4e1 | ||
|
|
a6698c8301 | ||
|
|
6cdf5615fe | ||
|
|
6e3f2b1ce2 | ||
|
|
89b7ffce6c | ||
|
|
7ca9c49163 | ||
|
|
a5f181a430 | ||
|
|
7cb0b57e8f | ||
|
|
09b6dcacfe | ||
|
|
6a182bd910 | ||
|
|
89e69e5cb6 | ||
|
|
9daeaf5c62 | ||
|
|
5f15ec4495 | ||
|
|
36fc57c0be | ||
|
|
739d3b3578 | ||
|
|
be6592c82c | ||
|
|
0c91bb90ab | ||
|
|
6fafae56b6 | ||
|
|
7184fe277d | ||
|
|
2375bafbae | ||
|
|
6e8e12df6d | ||
|
|
9242b3d118 | ||
|
|
156f792bf3 | ||
|
|
6963e22dfc | ||
|
|
b366beeb2e | ||
|
|
440e0ddcdf | ||
|
|
5a7c4120b6 | ||
|
|
16b33eb0a8 | ||
|
|
293e7f8ae6 | ||
|
|
91c8b9c64f | ||
|
|
deba9ebffd | ||
|
|
1a926f6cf3 | ||
|
|
115193a06b | ||
|
|
0f1b7cf916 | ||
|
|
2abaab401d | ||
|
|
8467457283 | ||
|
|
8014bd7181 | ||
|
|
d55abacfcb | ||
|
|
5d09299870 | ||
|
|
cc70537a91 | ||
|
|
96df913184 | ||
|
|
b587bd1b49 | ||
|
|
1bf4ab22e4 | ||
|
|
3f814c1e5f | ||
|
|
192c396000 | ||
|
|
dafceb6fea | ||
|
|
0bc8f88276 | ||
|
|
73a9b6a172 | ||
|
|
85f7e2f2cd | ||
|
|
628fcd5e04 | ||
|
|
756820e4ca | ||
|
|
a158ab0421 | ||
|
|
59f0a912ea | ||
|
|
bf49bacb4e | ||
|
|
e970dcb658 | ||
|
|
5b1c5eaef0 | ||
|
|
36b37f6fdd | ||
|
|
66210d176f | ||
|
|
612a9e411c | ||
|
|
c0799ce425 | ||
|
|
166a2f795f | ||
|
|
e5cfafe924 | ||
|
|
2f73e6b472 | ||
|
|
c2f276235b | ||
|
|
974f07628b | ||
|
|
041a98fae4 | ||
|
|
4317e681cf | ||
|
|
244ae37144 | ||
|
|
e5899c8e10 | ||
|
|
80aa7f06fb | ||
|
|
b343650771 | ||
|
|
dea49073cb | ||
|
|
08eab785c6 | ||
|
|
8d390af122 | ||
|
|
82c3d91e85 | ||
|
|
a8bab7bb96 | ||
|
|
98028d121f | ||
|
|
3a11c0a746 | ||
|
|
991181bf3c | ||
|
|
c2fb42e953 | ||
|
|
04f3547be0 | ||
|
|
f6c8b963c1 | ||
|
|
d16bb5db26 | ||
|
|
7479fca82b | ||
|
|
b86e02e183 | ||
|
|
9dca1a4672 | ||
|
|
904301c20e | ||
|
|
2e5811e052 | ||
|
|
c1a124d0bf | ||
|
|
5cf9b4428f | ||
|
|
51c1940a1b | ||
|
|
cbd8efeb85 | ||
|
|
4b966ae642 | ||
|
|
0d41bec9ef | ||
|
|
dd870ae903 | ||
|
|
6e1a5ad81f | ||
|
|
079d727a2c | ||
|
|
c771339259 | ||
|
|
74a3c85c29 | ||
|
|
5d9ee64ddc | ||
|
|
45ae86f001 | ||
|
|
49c17cc6d0 | ||
|
|
5ea6c7790f | ||
|
|
8ab7d97301 | ||
|
|
3a413a811c | ||
|
|
87aa063e86 | ||
|
|
7eab8c22cf | ||
|
|
677494811b | ||
|
|
da1927581d | ||
|
|
11d13cd03f | ||
|
|
2cdc959a77 | ||
|
|
0b50e2d29c | ||
|
|
e3f4bddcd0 | ||
|
|
01441b1f5a | ||
|
|
3783359f08 | ||
|
|
851e33d794 | ||
|
|
ec70b34470 | ||
|
|
765f982d11 | ||
|
|
26972ca194 | ||
|
|
45009d52ee | ||
|
|
bf53dd56c5 | ||
|
|
ae2ab090bf | ||
|
|
178a52722e | ||
|
|
22fe132348 | ||
|
|
f3e66bd5a6 | ||
|
|
09364671b3 | ||
|
|
f66d961b61 | ||
|
|
e08c0b338d | ||
|
|
dfb60590d2 | ||
|
|
9825c179a8 | ||
|
|
f6398c85bb | ||
|
|
f8cd486fb2 | ||
|
|
f05c26f346 | ||
|
|
81a13ac6c2 |
4
.envrc
4
.envrc
@@ -1,5 +1,5 @@
|
||||
if ! has nix_direnv_version || ! nix_direnv_version 2.3.0; then
|
||||
source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/2.3.0/direnvrc" "sha256-Dmd+j63L84wuzgyjITIfSxSD57Tx7v51DMxVZOsiUD8="
|
||||
if ! has nix_direnv_version || ! nix_direnv_version 3.0.4; then
|
||||
source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.4/direnvrc" "sha256-DzlYZ33mWF/Gs8DDeyjr8mnVmQGx7ASYqA5WlxwvBG4="
|
||||
fi
|
||||
|
||||
use flake
|
||||
|
||||
@@ -9,4 +9,4 @@ jobs:
|
||||
runs-on: nix
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- run: nix run --refresh github:Mic92/nix-fast-build -- --no-nom
|
||||
- run: nix run --refresh github:Mic92/nix-fast-build -- --no-nom --eval-workers 20
|
||||
|
||||
@@ -1,68 +0,0 @@
|
||||
name: assets1
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
jobs:
|
||||
test:
|
||||
if: ${{ github.actor != 'ui-asset-bot' }}
|
||||
runs-on: nix
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Get changed files using defaults
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v32
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Check if UI files are in the list of modified files
|
||||
run: |
|
||||
set -xeuo pipefail
|
||||
echo "Modified files: $MODIFIED_FILES"
|
||||
if echo "$MODIFIED_FILES" | grep -q "pkgs/ui/" \
|
||||
|| echo "$MODIFIED_FILES" | grep -q ".gitea/workflows/ui_assets.yaml"; then
|
||||
|
||||
echo "UI files have changed"
|
||||
./pkgs/ui/nix/update-ui-assets.sh
|
||||
|
||||
|
||||
# git push if we have a diff
|
||||
if [[ -n $(git diff) ]]; then
|
||||
|
||||
DEPS=$(nix shell --inputs-from '.#' "nixpkgs#coreutils-full" -c bash -c "echo \$PATH")
|
||||
export PATH=$PATH:$DEPS
|
||||
|
||||
# Setup git config
|
||||
git config --global user.email "ui-asset-bot@clan.lol"
|
||||
git config --global user.name "ui-asset-bot"
|
||||
|
||||
################################################
|
||||
# #
|
||||
# WARNING: SECRETS ARE BEING PROCESSED HERE. #
|
||||
# !DO NOT LOG THIS! #
|
||||
# #
|
||||
################################################
|
||||
set +x
|
||||
AUTH_TOKEN=$(echo -n "x-access-token:$GITEA_TOKEN" | base64)
|
||||
git config http."$GITHUB_SERVER_URL/".extraheader "AUTHORIZATION: basic $AUTH_TOKEN"
|
||||
set -x
|
||||
################################################
|
||||
# #
|
||||
# END OF SECRETS AREA #
|
||||
# #
|
||||
################################################
|
||||
|
||||
# Commit and push
|
||||
git commit -am "update ui-assets.nix"
|
||||
|
||||
echo "Current branch: $GITHUB_REF_NAME"
|
||||
git push origin HEAD:$GITHUB_REF_NAME
|
||||
fi
|
||||
else
|
||||
echo "No UI files changed. Skipping asset build and push"
|
||||
fi
|
||||
env:
|
||||
MODIFIED_FILES: ${{ steps.changed-files.outputs.modified_files }}
|
||||
GITEA_TOKEN: ${{ secrets.BOT_ACCESS_TOKEN }}
|
||||
9
.gitignore
vendored
9
.gitignore
vendored
@@ -1,8 +1,17 @@
|
||||
.direnv
|
||||
***/.hypothesis
|
||||
out.log
|
||||
.coverage.*
|
||||
**/qubeclan
|
||||
**/testdir
|
||||
democlan
|
||||
example_clan
|
||||
result*
|
||||
/pkgs/clan-cli/clan_cli/nixpkgs
|
||||
/pkgs/clan-cli/clan_cli/webui/assets
|
||||
/machines
|
||||
nixos.qcow2
|
||||
**/*.glade~
|
||||
|
||||
# python
|
||||
__pycache__
|
||||
|
||||
26
README.md
26
README.md
@@ -1,9 +1,23 @@
|
||||
# clan.lol core
|
||||
# cLAN Core Repository
|
||||
|
||||
This is the monorepo of the clan.lol project
|
||||
In here are all the packages we use, all the nixosModules we use/expose, the CLI and tests for everything.
|
||||
Welcome to the cLAN Core Repository, the heart of the [clan.lol](https://clan.lol/) project! This monorepo houses all the essential packages, NixOS modules, CLI tools, and tests you need to contribute and work with the cLAN project.
|
||||
|
||||
## cLAN config tool
|
||||
## Getting Started
|
||||
|
||||
- The quickstart guide can be found here: [here](/clan/clan-core/src/branch/main/docs/quickstart.md)
|
||||
- Find the docs [here](/clan/clan-core/src/branch/main/docs/clan-config.md)
|
||||
If you're new to cLAN and eager to dive in, start with our quickstart guide:
|
||||
|
||||
- **Quickstart Guide**: Check out [quickstart.md](docs/quickstart.md) to get up and running with cLAN in no time.
|
||||
|
||||
## Managing Secrets
|
||||
|
||||
Security is paramount, and cLAN provides guidelines for handling secrets effectively:
|
||||
|
||||
- **Secrets Management**: Learn how to manage secrets securely by reading [secrets-management.md](docs/secrets-management.md).
|
||||
|
||||
## Contributing to cLAN
|
||||
|
||||
We welcome contributions from the community, and we've prepared a comprehensive guide to help you get started:
|
||||
|
||||
- **Contribution Guidelines**: Find out how to contribute and make a meaningful impact on the cLAN project by reading [contributing.md](docs/contributing.md).
|
||||
|
||||
Whether you're a newcomer or a seasoned developer, we look forward to your contributions and collaboration on the cLAN project. Let's build amazing things together!
|
||||
|
||||
128
checks/backups/flake-module.nix
Normal file
128
checks/backups/flake-module.nix
Normal file
@@ -0,0 +1,128 @@
|
||||
{ self, ... }:
|
||||
let
|
||||
clan = self.lib.buildClan {
|
||||
clanName = "testclan";
|
||||
directory = ../..;
|
||||
machines = {
|
||||
test_backup_client = {
|
||||
imports = [ self.nixosModules.test_backup_client ];
|
||||
fileSystems."/".device = "/dev/null";
|
||||
boot.loader.grub.device = "/dev/null";
|
||||
};
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
flake.nixosConfigurations = { inherit (clan.nixosConfigurations) test_backup_client; };
|
||||
flake.clanInternals.machines = clan.clanInternals.machines;
|
||||
flake.nixosModules = {
|
||||
test_backup_server = { ... }: {
|
||||
imports = [
|
||||
self.clanModules.borgbackup
|
||||
];
|
||||
services.sshd.enable = true;
|
||||
services.borgbackup.repos.testrepo = {
|
||||
authorizedKeys = [
|
||||
(builtins.readFile ../lib/ssh/pubkey)
|
||||
];
|
||||
};
|
||||
};
|
||||
test_backup_client = { pkgs, lib, config, ... }:
|
||||
let
|
||||
dependencies = [
|
||||
self
|
||||
pkgs.stdenv.drvPath
|
||||
clan.clanInternals.machines.x86_64-linux.test_backup_client.config.system.clan.deployment.file
|
||||
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
self.clanModules.borgbackup
|
||||
];
|
||||
networking.hostName = "client";
|
||||
services.sshd.enable = true;
|
||||
users.users.root.openssh.authorizedKeys.keyFiles = [
|
||||
../lib/ssh/pubkey
|
||||
];
|
||||
environment.systemPackages = [ self.packages.${pkgs.system}.clan-cli ];
|
||||
environment.etc."install-closure".source = "${closureInfo}/store-paths";
|
||||
nix.settings = {
|
||||
substituters = lib.mkForce [ ];
|
||||
hashed-mirrors = null;
|
||||
connect-timeout = lib.mkForce 3;
|
||||
flake-registry = pkgs.writeText "flake-registry" ''{"flakes":[],"version":2}'';
|
||||
};
|
||||
system.extraDependencies = dependencies;
|
||||
clanCore.state.test-backups.folders = [ "/var/test-backups" ];
|
||||
clan.borgbackup = {
|
||||
enable = true;
|
||||
destinations.test_backup_server = {
|
||||
repo = "borg@server:.";
|
||||
rsh = "ssh -i /root/.ssh/id_ed25519 -o StrictHostKeyChecking=no";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
perSystem = { nodes, pkgs, ... }: {
|
||||
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux) {
|
||||
test-backups =
|
||||
(import ../lib/test-base.nix)
|
||||
{
|
||||
name = "test-backups";
|
||||
nodes.server = {
|
||||
imports = [
|
||||
self.nixosModules.test_backup_server
|
||||
self.nixosModules.clanCore
|
||||
{
|
||||
clanCore.machineName = "server";
|
||||
clanCore.clanDir = ../..;
|
||||
}
|
||||
];
|
||||
};
|
||||
nodes.client = {
|
||||
imports = [
|
||||
self.nixosModules.test_backup_client
|
||||
self.nixosModules.clanCore
|
||||
{
|
||||
clanCore.machineName = "client";
|
||||
clanCore.clanDir = ../..;
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
import json
|
||||
start_all()
|
||||
|
||||
# setup
|
||||
client.succeed("mkdir -m 700 /root/.ssh")
|
||||
client.succeed(
|
||||
"cat ${../lib/ssh/privkey} > /root/.ssh/id_ed25519"
|
||||
)
|
||||
client.succeed("chmod 600 /root/.ssh/id_ed25519")
|
||||
client.wait_for_unit("sshd", timeout=30)
|
||||
client.succeed("ssh -o StrictHostKeyChecking=accept-new root@client hostname")
|
||||
|
||||
# dummy data
|
||||
client.succeed("mkdir /var/test-backups")
|
||||
client.succeed("echo testing > /var/test-backups/somefile")
|
||||
|
||||
# create
|
||||
client.succeed("clan --flake ${../..} backups create test_backup_client")
|
||||
client.wait_until_succeeds("! systemctl is-active borgbackup-job-test_backup_server")
|
||||
|
||||
# list
|
||||
backup_id = json.loads(client.succeed("borg-job-test_backup_server list --json"))["archives"][0]["archive"]
|
||||
assert(backup_id in client.succeed("clan --flake ${../..} backups list test_backup_client"))
|
||||
|
||||
# restore
|
||||
client.succeed("rm -f /var/test-backups/somefile")
|
||||
client.succeed(f"clan --flake ${../..} backups restore test_backup_client borgbackup {backup_id}")
|
||||
assert(client.succeed("cat /var/test-backups/somefile").strip() == "testing")
|
||||
'';
|
||||
}
|
||||
{ inherit pkgs self; };
|
||||
};
|
||||
};
|
||||
}
|
||||
36
checks/borgbackup/default.nix
Normal file
36
checks/borgbackup/default.nix
Normal file
@@ -0,0 +1,36 @@
|
||||
(import ../lib/container-test.nix) ({ ... }: {
|
||||
name = "borgbackup";
|
||||
|
||||
nodes.machine = { self, ... }: {
|
||||
imports = [
|
||||
self.clanModules.borgbackup
|
||||
self.nixosModules.clanCore
|
||||
{
|
||||
services.openssh.enable = true;
|
||||
services.borgbackup.repos.testrepo = {
|
||||
authorizedKeys = [
|
||||
(builtins.readFile ../lib/ssh/pubkey)
|
||||
];
|
||||
};
|
||||
}
|
||||
{
|
||||
clanCore.machineName = "machine";
|
||||
clanCore.clanDir = ./.;
|
||||
clanCore.state.testState.folders = [ "/etc/state" ];
|
||||
environment.etc.state.text = "hello world";
|
||||
clan.borgbackup = {
|
||||
enable = true;
|
||||
destinations.test = {
|
||||
repo = "borg@localhost:.";
|
||||
rsh = "ssh -i ${../lib/ssh/privkey} -o StrictHostKeyChecking=no";
|
||||
};
|
||||
};
|
||||
}
|
||||
];
|
||||
};
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine.systemctl("start --wait borgbackup-job-test.service")
|
||||
assert "machine-test" in machine.succeed("BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK=yes /run/current-system/sw/bin/borg-job-test list")
|
||||
'';
|
||||
})
|
||||
14
checks/container/default.nix
Normal file
14
checks/container/default.nix
Normal file
@@ -0,0 +1,14 @@
|
||||
(import ../lib/container-test.nix) ({ ... }: {
|
||||
name = "secrets";
|
||||
|
||||
nodes.machine = { ... }: {
|
||||
networking.hostName = "machine";
|
||||
services.openssh.enable = true;
|
||||
services.openssh.startWhenNeeded = false;
|
||||
};
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine.succeed("systemctl status sshd")
|
||||
machine.wait_for_unit("sshd")
|
||||
'';
|
||||
})
|
||||
24
checks/deltachat/default.nix
Normal file
24
checks/deltachat/default.nix
Normal file
@@ -0,0 +1,24 @@
|
||||
(import ../lib/container-test.nix) ({ pkgs, ... }: {
|
||||
name = "secrets";
|
||||
|
||||
nodes.machine = { self, ... }: {
|
||||
imports = [
|
||||
self.clanModules.deltachat
|
||||
self.nixosModules.clanCore
|
||||
{
|
||||
clanCore.machineName = "machine";
|
||||
clanCore.clanDir = ./.;
|
||||
}
|
||||
];
|
||||
};
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine.wait_for_unit("maddy")
|
||||
# imap
|
||||
machine.succeed("${pkgs.netcat}/bin/nc -z -v ::1 143")
|
||||
# smtp submission
|
||||
machine.succeed("${pkgs.netcat}/bin/nc -z -v ::1 587")
|
||||
# smtp
|
||||
machine.succeed("${pkgs.netcat}/bin/nc -z -v ::1 25")
|
||||
'';
|
||||
})
|
||||
@@ -1,6 +1,8 @@
|
||||
{ self, ... }: {
|
||||
imports = [
|
||||
./impure/flake-module.nix
|
||||
./backups/flake-module.nix
|
||||
./installation/flake-module.nix
|
||||
];
|
||||
perSystem = { pkgs, lib, self', ... }: {
|
||||
checks =
|
||||
@@ -11,9 +13,16 @@
|
||||
# this gives us a reference to our flake but also all flake inputs
|
||||
inherit self;
|
||||
};
|
||||
nixosTests = {
|
||||
nixosTests = lib.optionalAttrs (pkgs.stdenv.isLinux) {
|
||||
# import our test
|
||||
secrets = import ./secrets nixosTestArgs;
|
||||
container = import ./container nixosTestArgs;
|
||||
deltachat = import ./deltachat nixosTestArgs;
|
||||
meshnamed = import ./meshnamed nixosTestArgs;
|
||||
zt-tcp-relay = import ./zt-tcp-relay nixosTestArgs;
|
||||
borgbackup = import ./borgbackup nixosTestArgs;
|
||||
syncthing = import ./syncthing nixosTestArgs;
|
||||
wayland-proxy-virtwl = import ./wayland-proxy-virtwl nixosTestArgs;
|
||||
};
|
||||
schemaTests = pkgs.callPackages ./schemas.nix {
|
||||
inherit self;
|
||||
@@ -25,5 +34,21 @@
|
||||
// lib.mapAttrs' (name: config: lib.nameValuePair "home-manager-${name}" config.activation-script) (self'.legacyPackages.homeConfigurations or { });
|
||||
in
|
||||
nixosTests // schemaTests // flakeOutputs;
|
||||
legacyPackages = {
|
||||
nixosTests =
|
||||
let
|
||||
nixosTestArgs = {
|
||||
# reference to nixpkgs for the current system
|
||||
inherit pkgs;
|
||||
# this gives us a reference to our flake but also all flake inputs
|
||||
inherit self;
|
||||
};
|
||||
in
|
||||
lib.optionalAttrs (pkgs.stdenv.isLinux) {
|
||||
# import our test
|
||||
secrets = import ./secrets nixosTestArgs;
|
||||
container = import ./container nixosTestArgs;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,75 +1,18 @@
|
||||
{ self, ... }: {
|
||||
perSystem = { pkgs, lib, self', ... }:
|
||||
let
|
||||
impureChecks = {
|
||||
clan-pytest-impure = pkgs.writeShellScriptBin "clan-pytest-impure" ''
|
||||
#!${pkgs.bash}/bin/bash
|
||||
set -euo pipefail
|
||||
{
|
||||
perSystem = { pkgs, lib, ... }: {
|
||||
# a script that executes all other checks
|
||||
packages.impure-checks = pkgs.writeShellScriptBin "impure-checks" ''
|
||||
#!${pkgs.bash}/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
export PATH="${lib.makeBinPath [
|
||||
pkgs.gitMinimal
|
||||
pkgs.nix
|
||||
]}"
|
||||
ROOT=$(git rev-parse --show-toplevel)
|
||||
cd "$ROOT/pkgs/clan-cli"
|
||||
nix develop "$ROOT#clan-cli" -c bash -c 'TMPDIR=/tmp python -m pytest -m impure -s ./tests'
|
||||
'';
|
||||
check-clan-template = pkgs.writeShellScriptBin "check-clan-template" ''
|
||||
#!${pkgs.bash}/bin/bash
|
||||
set -euox pipefail
|
||||
|
||||
export CLANTMP=$(${pkgs.coreutils}/bin/mktemp -d)
|
||||
trap "${pkgs.coreutils}/bin/chmod -R +w '$CLANTMP'; ${pkgs.coreutils}/bin/rm -rf '$CLANTMP'" EXIT
|
||||
|
||||
export PATH="${lib.makeBinPath [
|
||||
pkgs.coreutils
|
||||
pkgs.curl
|
||||
pkgs.gitMinimal
|
||||
pkgs.gnugrep
|
||||
pkgs.jq
|
||||
pkgs.openssh
|
||||
pkgs.nix
|
||||
self'.packages.clan-cli
|
||||
]}"
|
||||
|
||||
cd $CLANTMP
|
||||
|
||||
echo initialize new clan
|
||||
nix flake init -t ${self}#new-clan
|
||||
|
||||
echo override clan input to the current version
|
||||
nix flake lock --override-input clan-core ${self}
|
||||
nix flake lock --override-input nixpkgs ${self.inputs.nixpkgs}
|
||||
|
||||
echo ensure flake outputs can be listed
|
||||
nix flake show
|
||||
|
||||
echo create a machine
|
||||
clan machines create machine1
|
||||
|
||||
echo check machine1 exists
|
||||
clan machines list | grep -q machine1
|
||||
|
||||
echo check machine1 appears in nixosConfigurations
|
||||
nix flake show --json | jq '.nixosConfigurations' | grep -q machine1
|
||||
|
||||
echo check machine1 jsonschema can be evaluated
|
||||
nix eval .#nixosConfigurations.machine1.config.clanSchema
|
||||
'';
|
||||
};
|
||||
in
|
||||
{
|
||||
packages =
|
||||
impureChecks // {
|
||||
# a script that executes all other checks
|
||||
impure-checks = pkgs.writeShellScriptBin "impure-checks" ''
|
||||
#!${pkgs.bash}/bin/bash
|
||||
set -euo pipefail
|
||||
${lib.concatMapStringsSep "\n" (name: ''
|
||||
echo -e "\n\nrunning check ${name}\n"
|
||||
${impureChecks.${name}}/bin/* "$@"
|
||||
'') (lib.attrNames impureChecks)}
|
||||
'';
|
||||
};
|
||||
};
|
||||
export PATH="${lib.makeBinPath [
|
||||
pkgs.gitMinimal
|
||||
pkgs.nix
|
||||
pkgs.rsync # needed to have rsync installed on the dummy ssh server
|
||||
]}"
|
||||
ROOT=$(git rev-parse --show-toplevel)
|
||||
cd "$ROOT/pkgs/clan-cli"
|
||||
nix develop "$ROOT#clan-cli" -c bash -c "TMPDIR=/tmp python -m pytest -s -m impure ./tests $@"
|
||||
'';
|
||||
};
|
||||
}
|
||||
|
||||
123
checks/installation/flake-module.nix
Normal file
123
checks/installation/flake-module.nix
Normal file
@@ -0,0 +1,123 @@
|
||||
{ self, ... }:
|
||||
let
|
||||
clan = self.lib.buildClan {
|
||||
clanName = "testclan";
|
||||
directory = ../..;
|
||||
machines = {
|
||||
test_install_machine = {
|
||||
imports = [ self.nixosModules.test_install_machine ];
|
||||
};
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
flake.nixosConfigurations = { inherit (clan.nixosConfigurations) test_install_machine; };
|
||||
flake.clanInternals.machines = clan.clanInternals.machines;
|
||||
flake.nixosModules = {
|
||||
test_install_machine = { lib, modulesPath, ... }: {
|
||||
imports = [
|
||||
self.clanModules.diskLayouts
|
||||
(modulesPath + "/testing/test-instrumentation.nix") # we need these 2 modules always to be able to run the tests
|
||||
(modulesPath + "/profiles/qemu-guest.nix")
|
||||
];
|
||||
fileSystems."/nix/store" = lib.mkForce {
|
||||
device = "nix-store";
|
||||
fsType = "9p";
|
||||
neededForBoot = true;
|
||||
options = [ "trans=virtio" "version=9p2000.L" "cache=loose" ];
|
||||
};
|
||||
clan.diskLayouts.singleDiskExt4.device = "/dev/vdb";
|
||||
|
||||
environment.etc."install-successful".text = "ok";
|
||||
|
||||
boot.consoleLogLevel = lib.mkForce 100;
|
||||
boot.kernelParams = [
|
||||
"boot.shell_on_fail"
|
||||
];
|
||||
};
|
||||
};
|
||||
perSystem = { nodes, pkgs, lib, ... }:
|
||||
let
|
||||
dependencies = [
|
||||
self
|
||||
pkgs.stdenv.drvPath
|
||||
clan.clanInternals.machines.x86_64-linux.test_install_machine.config.system.build.toplevel
|
||||
clan.clanInternals.machines.x86_64-linux.test_install_machine.config.system.build.diskoScript
|
||||
clan.clanInternals.machines.x86_64-linux.test_install_machine.config.system.clan.deployment.file
|
||||
pkgs.nixos-anywhere
|
||||
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
|
||||
in
|
||||
{
|
||||
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux) {
|
||||
test-installation =
|
||||
(import ../lib/test-base.nix)
|
||||
{
|
||||
name = "test-installation";
|
||||
nodes.target = {
|
||||
services.openssh.enable = true;
|
||||
users.users.root.openssh.authorizedKeys.keyFiles = [
|
||||
../lib/ssh/pubkey
|
||||
];
|
||||
system.nixos.variant_id = "installer";
|
||||
virtualisation.emptyDiskImages = [ 4096 ];
|
||||
nix.settings = {
|
||||
substituters = lib.mkForce [ ];
|
||||
hashed-mirrors = null;
|
||||
connect-timeout = lib.mkForce 3;
|
||||
flake-registry = pkgs.writeText "flake-registry" ''{"flakes":[],"version":2}'';
|
||||
experimental-features = [
|
||||
"nix-command"
|
||||
"flakes"
|
||||
];
|
||||
};
|
||||
};
|
||||
nodes.client = {
|
||||
environment.systemPackages = [ self.packages.${pkgs.system}.clan-cli ];
|
||||
environment.etc."install-closure".source = "${closureInfo}/store-paths";
|
||||
virtualisation.memorySize = 2048;
|
||||
nix.settings = {
|
||||
substituters = lib.mkForce [ ];
|
||||
hashed-mirrors = null;
|
||||
connect-timeout = lib.mkForce 3;
|
||||
flake-registry = pkgs.writeText "flake-registry" ''{"flakes":[],"version":2}'';
|
||||
experimental-features = [
|
||||
"nix-command"
|
||||
"flakes"
|
||||
];
|
||||
};
|
||||
system.extraDependencies = dependencies;
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
def create_test_machine(oldmachine=None, args={}): # taken from <nixpkgs/nixos/tests/installer.nix>
|
||||
machine = create_machine({
|
||||
"qemuFlags":
|
||||
'-cpu max -m 1024 -virtfs local,path=/nix/store,security_model=none,mount_tag=nix-store,'
|
||||
f' -drive file={oldmachine.state_dir}/empty0.qcow2,id=drive1,if=none,index=1,werror=report'
|
||||
f' -device virtio-blk-pci,drive=drive1',
|
||||
} | args)
|
||||
driver.machines.append(machine)
|
||||
return machine
|
||||
|
||||
|
||||
start_all()
|
||||
|
||||
client.succeed("${pkgs.coreutils}/bin/install -Dm 600 ${../lib/ssh/privkey} /root/.ssh/id_ed25519")
|
||||
client.wait_until_succeeds("ssh -o StrictHostKeyChecking=accept-new -v root@target hostname")
|
||||
|
||||
client.succeed("clan --flake ${../..} machines install test_install_machine root@target >&2")
|
||||
try:
|
||||
target.shutdown()
|
||||
except BrokenPipeError:
|
||||
# qemu has already exited
|
||||
pass
|
||||
|
||||
new_machine = create_test_machine(oldmachine=target, args={ "name": "new_machine" })
|
||||
assert(new_machine.succeed("cat /etc/install-successful").strip() == "ok")
|
||||
'';
|
||||
}
|
||||
{ inherit pkgs self; };
|
||||
};
|
||||
};
|
||||
}
|
||||
88
checks/lib/container-driver/module.nix
Normal file
88
checks/lib/container-driver/module.nix
Normal file
@@ -0,0 +1,88 @@
|
||||
{ hostPkgs, lib, config, ... }:
|
||||
let
|
||||
testDriver = hostPkgs.python3.pkgs.callPackage ./package.nix {
|
||||
inherit (config) extraPythonPackages;
|
||||
inherit (hostPkgs.pkgs) util-linux systemd;
|
||||
};
|
||||
containers = map (m: m.system.build.toplevel) (lib.attrValues config.nodes);
|
||||
pythonizeName = name:
|
||||
let
|
||||
head = lib.substring 0 1 name;
|
||||
tail = lib.substring 1 (-1) name;
|
||||
in
|
||||
(if builtins.match "[A-z_]" head == null then "_" else head) +
|
||||
lib.stringAsChars (c: if builtins.match "[A-z0-9_]" c == null then "_" else c) tail;
|
||||
nodeHostNames =
|
||||
let
|
||||
nodesList = map (c: c.system.name) (lib.attrValues config.nodes);
|
||||
in
|
||||
nodesList ++ lib.optional (lib.length nodesList == 1 && !lib.elem "machine" nodesList) "machine";
|
||||
machineNames = map (name: "${name}: Machine;") pythonizedNames;
|
||||
pythonizedNames = map pythonizeName nodeHostNames;
|
||||
in
|
||||
{
|
||||
driver = lib.mkForce (hostPkgs.runCommand "nixos-test-driver-${config.name}"
|
||||
{
|
||||
nativeBuildInputs = [
|
||||
hostPkgs.makeWrapper
|
||||
] ++ lib.optionals (!config.skipTypeCheck) [ hostPkgs.mypy ];
|
||||
buildInputs = [ testDriver ];
|
||||
testScript = config.testScriptString;
|
||||
preferLocalBuild = true;
|
||||
passthru = config.passthru;
|
||||
meta = config.meta // {
|
||||
mainProgram = "nixos-test-driver";
|
||||
};
|
||||
}
|
||||
''
|
||||
mkdir -p $out/bin
|
||||
|
||||
containers=(${toString containers})
|
||||
|
||||
${lib.optionalString (!config.skipTypeCheck) ''
|
||||
# prepend type hints so the test script can be type checked with mypy
|
||||
cat "${./test-script-prepend.py}" >> testScriptWithTypes
|
||||
echo "${builtins.toString machineNames}" >> testScriptWithTypes
|
||||
echo -n "$testScript" >> testScriptWithTypes
|
||||
|
||||
echo "Running type check (enable/disable: config.skipTypeCheck)"
|
||||
echo "See https://nixos.org/manual/nixos/stable/#test-opt-skipTypeCheck"
|
||||
|
||||
mypy --no-implicit-optional \
|
||||
--pretty \
|
||||
--no-color-output \
|
||||
testScriptWithTypes
|
||||
''}
|
||||
|
||||
echo -n "$testScript" >> $out/test-script
|
||||
|
||||
ln -s ${testDriver}/bin/nixos-test-driver $out/bin/nixos-test-driver
|
||||
|
||||
wrapProgram $out/bin/nixos-test-driver \
|
||||
${lib.concatStringsSep " " (map (name: "--add-flags '--container ${name}'") containers)} \
|
||||
--add-flags "--test-script '$out/test-script'"
|
||||
'');
|
||||
|
||||
test = lib.mkForce (lib.lazyDerivation {
|
||||
# lazyDerivation improves performance when only passthru items and/or meta are used.
|
||||
derivation = hostPkgs.stdenv.mkDerivation {
|
||||
name = "vm-test-run-${config.name}";
|
||||
|
||||
requiredSystemFeatures = [ "uid-range" ];
|
||||
|
||||
buildCommand = ''
|
||||
mkdir -p $out
|
||||
|
||||
# effectively mute the XMLLogger
|
||||
export LOGFILE=/dev/null
|
||||
|
||||
${config.driver}/bin/nixos-test-driver -o $out
|
||||
'';
|
||||
|
||||
passthru = config.passthru;
|
||||
|
||||
meta = config.meta;
|
||||
};
|
||||
inherit (config) passthru meta;
|
||||
});
|
||||
}
|
||||
9
checks/lib/container-driver/package.nix
Normal file
9
checks/lib/container-driver/package.nix
Normal file
@@ -0,0 +1,9 @@
|
||||
{ extraPythonPackages, python3Packages, buildPythonApplication, setuptools, util-linux, systemd }:
|
||||
buildPythonApplication {
|
||||
pname = "test-driver";
|
||||
version = "0.0.1";
|
||||
propagatedBuildInputs = [ util-linux systemd ] ++ extraPythonPackages python3Packages;
|
||||
nativeBuildInputs = [ setuptools ];
|
||||
format = "pyproject";
|
||||
src = ./.;
|
||||
}
|
||||
30
checks/lib/container-driver/pyproject.toml
Normal file
30
checks/lib/container-driver/pyproject.toml
Normal file
@@ -0,0 +1,30 @@
|
||||
[build-system]
|
||||
requires = ["setuptools"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "nixos-test-driver"
|
||||
version = "0.0.0"
|
||||
|
||||
[project.scripts]
|
||||
nixos-test-driver = "test_driver:main"
|
||||
|
||||
[tool.setuptools.packages]
|
||||
find = {}
|
||||
|
||||
[tool.setuptools.package-data]
|
||||
test_driver = ["py.typed"]
|
||||
|
||||
[tool.ruff]
|
||||
target-version = "py311"
|
||||
line-length = 88
|
||||
|
||||
select = [ "E", "F", "I", "U", "N", "RUF", "ANN", "A" ]
|
||||
ignore = ["E501", "ANN101", "ANN401", "A003"]
|
||||
|
||||
[tool.mypy]
|
||||
python_version = "3.11"
|
||||
warn_redundant_casts = true
|
||||
disallow_untyped_calls = true
|
||||
disallow_untyped_defs = true
|
||||
no_implicit_optional = true
|
||||
9
checks/lib/container-driver/test-script-prepend.py
Normal file
9
checks/lib/container-driver/test-script-prepend.py
Normal file
@@ -0,0 +1,9 @@
|
||||
# This file contains type hints that can be prepended to Nix test scripts so they can be type
|
||||
# checked.
|
||||
|
||||
from collections.abc import Callable
|
||||
|
||||
from test_driver import Machine
|
||||
|
||||
start_all: Callable[[], None]
|
||||
machines: list[Machine]
|
||||
354
checks/lib/container-driver/test_driver/__init__.py
Normal file
354
checks/lib/container-driver/test_driver/__init__.py
Normal file
@@ -0,0 +1,354 @@
|
||||
import argparse
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import time
|
||||
from collections.abc import Callable
|
||||
from pathlib import Path
|
||||
from tempfile import TemporaryDirectory
|
||||
from typing import Any
|
||||
|
||||
|
||||
def prepare_machine_root(machinename: str, root: Path) -> None:
|
||||
root.mkdir(parents=True, exist_ok=True)
|
||||
root.joinpath("etc").mkdir(parents=True, exist_ok=True)
|
||||
root.joinpath(".env").write_text(
|
||||
"\n".join(f"{k}={v}" for k, v in os.environ.items())
|
||||
)
|
||||
|
||||
|
||||
def pythonize_name(name: str) -> str:
|
||||
return re.sub(r"^[^A-z_]|[^A-z0-9_]", "_", name)
|
||||
|
||||
|
||||
def retry(fn: Callable, timeout: int = 900) -> None:
|
||||
"""Call the given function repeatedly, with 1 second intervals,
|
||||
until it returns True or a timeout is reached.
|
||||
"""
|
||||
|
||||
for _ in range(timeout):
|
||||
if fn(False):
|
||||
return
|
||||
time.sleep(1)
|
||||
|
||||
if not fn(True):
|
||||
raise Exception(f"action timed out after {timeout} seconds")
|
||||
|
||||
|
||||
class Machine:
|
||||
def __init__(self, name: str, toplevel: Path, rootdir: Path, out_dir: str) -> None:
|
||||
self.name = name
|
||||
self.toplevel = toplevel
|
||||
self.out_dir = out_dir
|
||||
self.process: subprocess.Popen | None = None
|
||||
self.rootdir: Path = rootdir
|
||||
|
||||
def start(self) -> None:
|
||||
prepare_machine_root(self.name, self.rootdir)
|
||||
cmd = [
|
||||
"systemd-nspawn",
|
||||
"--keep-unit",
|
||||
"-M",
|
||||
self.name,
|
||||
"-D",
|
||||
self.rootdir,
|
||||
"--register=no",
|
||||
"--resolv-conf=off",
|
||||
"--bind-ro=/nix/store",
|
||||
"--bind",
|
||||
self.out_dir,
|
||||
"--bind=/proc:/run/host/proc",
|
||||
"--bind=/sys:/run/host/sys",
|
||||
"--private-network",
|
||||
self.toplevel.joinpath("init"),
|
||||
]
|
||||
env = os.environ.copy()
|
||||
env["SYSTEMD_NSPAWN_UNIFIED_HIERARCHY"] = "1"
|
||||
self.process = subprocess.Popen(cmd, stdout=subprocess.PIPE, text=True, env=env)
|
||||
self.container_pid = self.get_systemd_process()
|
||||
|
||||
def get_systemd_process(self) -> int:
|
||||
assert self.process is not None, "Machine not started"
|
||||
assert self.process.stdout is not None, "Machine has no stdout"
|
||||
for line in self.process.stdout:
|
||||
print(line, end="")
|
||||
if line.startswith("systemd[1]: Startup finished in"):
|
||||
break
|
||||
else:
|
||||
raise RuntimeError(f"Failed to start container {self.name}")
|
||||
childs = (
|
||||
Path(f"/proc/{self.process.pid}/task/{self.process.pid}/children")
|
||||
.read_text()
|
||||
.split()
|
||||
)
|
||||
assert (
|
||||
len(childs) == 1
|
||||
), f"Expected exactly one child process for systemd-nspawn, got {childs}"
|
||||
try:
|
||||
return int(childs[0])
|
||||
except ValueError:
|
||||
raise RuntimeError(f"Failed to parse child process id {childs[0]}")
|
||||
|
||||
def get_unit_info(self, unit: str) -> dict[str, str]:
|
||||
proc = self.systemctl(f'--no-pager show "{unit}"')
|
||||
if proc.returncode != 0:
|
||||
raise Exception(
|
||||
f'retrieving systemctl info for unit "{unit}"'
|
||||
+ f" failed with exit code {proc.returncode}"
|
||||
)
|
||||
|
||||
line_pattern = re.compile(r"^([^=]+)=(.*)$")
|
||||
|
||||
def tuple_from_line(line: str) -> tuple[str, str]:
|
||||
match = line_pattern.match(line)
|
||||
assert match is not None
|
||||
return match[1], match[2]
|
||||
|
||||
return dict(
|
||||
tuple_from_line(line)
|
||||
for line in proc.stdout.split("\n")
|
||||
if line_pattern.match(line)
|
||||
)
|
||||
|
||||
def execute(
|
||||
self,
|
||||
command: str,
|
||||
check_return: bool = True,
|
||||
check_output: bool = True,
|
||||
timeout: int | None = 900,
|
||||
) -> subprocess.CompletedProcess:
|
||||
"""
|
||||
Execute a shell command, returning a list `(status, stdout)`.
|
||||
|
||||
Commands are run with `set -euo pipefail` set:
|
||||
|
||||
- If several commands are separated by `;` and one fails, the
|
||||
command as a whole will fail.
|
||||
|
||||
- For pipelines, the last non-zero exit status will be returned
|
||||
(if there is one; otherwise zero will be returned).
|
||||
|
||||
- Dereferencing unset variables fails the command.
|
||||
|
||||
- It will wait for stdout to be closed.
|
||||
|
||||
If the command detaches, it must close stdout, as `execute` will wait
|
||||
for this to consume all output reliably. This can be achieved by
|
||||
redirecting stdout to stderr `>&2`, to `/dev/console`, `/dev/null` or
|
||||
a file. Examples of detaching commands are `sleep 365d &`, where the
|
||||
shell forks a new process that can write to stdout and `xclip -i`, where
|
||||
the `xclip` command itself forks without closing stdout.
|
||||
|
||||
Takes an optional parameter `check_return` that defaults to `True`.
|
||||
Setting this parameter to `False` will not check for the return code
|
||||
and return -1 instead. This can be used for commands that shut down
|
||||
the VM and would therefore break the pipe that would be used for
|
||||
retrieving the return code.
|
||||
|
||||
A timeout for the command can be specified (in seconds) using the optional
|
||||
`timeout` parameter, e.g., `execute(cmd, timeout=10)` or
|
||||
`execute(cmd, timeout=None)`. The default is 900 seconds.
|
||||
"""
|
||||
|
||||
# Always run command with shell opts
|
||||
command = f"set -euo pipefail; {command}"
|
||||
|
||||
proc = subprocess.run(
|
||||
[
|
||||
"nsenter",
|
||||
"--target",
|
||||
str(self.container_pid),
|
||||
"--mount",
|
||||
"--uts",
|
||||
"--ipc",
|
||||
"--net",
|
||||
"--pid",
|
||||
"--cgroup",
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
command,
|
||||
],
|
||||
timeout=timeout,
|
||||
check=False,
|
||||
stdout=subprocess.PIPE,
|
||||
text=True,
|
||||
)
|
||||
return proc
|
||||
|
||||
def systemctl(self, q: str) -> subprocess.CompletedProcess:
|
||||
"""
|
||||
Runs `systemctl` commands with optional support for
|
||||
`systemctl --user`
|
||||
|
||||
```py
|
||||
# run `systemctl list-jobs --no-pager`
|
||||
machine.systemctl("list-jobs --no-pager")
|
||||
|
||||
# spawn a shell for `any-user` and run
|
||||
# `systemctl --user list-jobs --no-pager`
|
||||
machine.systemctl("list-jobs --no-pager", "any-user")
|
||||
```
|
||||
"""
|
||||
return self.execute(f"systemctl {q}")
|
||||
|
||||
def wait_for_unit(self, unit: str, timeout: int = 900) -> None:
|
||||
"""
|
||||
Wait for a systemd unit to get into "active" state.
|
||||
Throws exceptions on "failed" and "inactive" states as well as after
|
||||
timing out.
|
||||
"""
|
||||
|
||||
def check_active(_: bool) -> bool:
|
||||
info = self.get_unit_info(unit)
|
||||
state = info["ActiveState"]
|
||||
if state == "failed":
|
||||
raise Exception(f'unit "{unit}" reached state "{state}"')
|
||||
|
||||
if state == "inactive":
|
||||
proc = self.systemctl("list-jobs --full 2>&1")
|
||||
if "No jobs" in proc.stdout:
|
||||
info = self.get_unit_info(unit)
|
||||
if info["ActiveState"] == state:
|
||||
raise Exception(
|
||||
f'unit "{unit}" is inactive and there are no pending jobs'
|
||||
)
|
||||
|
||||
return state == "active"
|
||||
|
||||
retry(check_active, timeout)
|
||||
|
||||
def succeed(self, command: str, timeout: int | None = None) -> str:
|
||||
res = self.execute(command, timeout=timeout)
|
||||
if res.returncode != 0:
|
||||
raise RuntimeError(f"Failed to run command {command}")
|
||||
return res.stdout
|
||||
|
||||
def shutdown(self) -> None:
|
||||
"""
|
||||
Shut down the machine, waiting for the VM to exit.
|
||||
"""
|
||||
if self.process:
|
||||
self.process.terminate()
|
||||
self.process.wait()
|
||||
self.process = None
|
||||
|
||||
def release(self) -> None:
|
||||
self.shutdown()
|
||||
|
||||
|
||||
def setup_filesystems() -> None:
|
||||
# We don't care about cleaning up the mount points, since we're running in a nix sandbox.
|
||||
Path("/run").mkdir(parents=True, exist_ok=True)
|
||||
subprocess.run(["mount", "-t", "tmpfs", "none", "/run"], check=True)
|
||||
subprocess.run(["mount", "-t", "cgroup2", "none", "/sys/fs/cgroup"], check=True)
|
||||
Path("/etc").chmod(0o755)
|
||||
Path("/etc/os-release").touch()
|
||||
Path("/etc/machine-id").write_text("a5ea3f98dedc0278b6f3cc8c37eeaeac")
|
||||
|
||||
|
||||
class Driver:
|
||||
def __init__(self, containers: list[Path], testscript: str, out_dir: str) -> None:
|
||||
self.containers = containers
|
||||
self.testscript = testscript
|
||||
self.out_dir = out_dir
|
||||
setup_filesystems()
|
||||
|
||||
self.tempdir = TemporaryDirectory()
|
||||
tempdir_path = Path(self.tempdir.name)
|
||||
|
||||
self.machines = []
|
||||
for container in containers:
|
||||
name_match = re.match(r".*-nixos-system-(.+)-\d.+", container.name)
|
||||
if not name_match:
|
||||
raise ValueError(f"Unable to extract hostname from {container.name}")
|
||||
name = name_match.group(1)
|
||||
self.machines.append(
|
||||
Machine(
|
||||
name=name,
|
||||
toplevel=container,
|
||||
rootdir=tempdir_path / name,
|
||||
out_dir=self.out_dir,
|
||||
)
|
||||
)
|
||||
|
||||
def start_all(self) -> None:
|
||||
for machine in self.machines:
|
||||
machine.start()
|
||||
|
||||
def test_symbols(self) -> dict[str, Any]:
|
||||
general_symbols = dict(
|
||||
start_all=self.start_all,
|
||||
machines=self.machines,
|
||||
driver=self,
|
||||
Machine=Machine, # for typing
|
||||
)
|
||||
machine_symbols = {pythonize_name(m.name): m for m in self.machines}
|
||||
# If there's exactly one machine, make it available under the name
|
||||
# "machine", even if it's not called that.
|
||||
if len(self.machines) == 1:
|
||||
(machine_symbols["machine"],) = self.machines
|
||||
print(
|
||||
"additionally exposed symbols:\n "
|
||||
+ ", ".join(map(lambda m: m.name, self.machines))
|
||||
+ ",\n "
|
||||
+ ", ".join(list(general_symbols.keys()))
|
||||
)
|
||||
return {**general_symbols, **machine_symbols}
|
||||
|
||||
def test_script(self) -> None:
|
||||
"""Run the test script"""
|
||||
exec(self.testscript, self.test_symbols(), None)
|
||||
|
||||
def run_tests(self) -> None:
|
||||
"""Run the test script (for non-interactive test runs)"""
|
||||
self.test_script()
|
||||
|
||||
def __enter__(self) -> "Driver":
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
|
||||
for machine in self.machines:
|
||||
machine.release()
|
||||
|
||||
|
||||
def writeable_dir(arg: str) -> Path:
|
||||
"""Raises an ArgumentTypeError if the given argument isn't a writeable directory
|
||||
Note: We want to fail as early as possible if a directory isn't writeable,
|
||||
since an executed nixos-test could fail (very late) because of the test-driver
|
||||
writing in a directory without proper permissions.
|
||||
"""
|
||||
path = Path(arg)
|
||||
if not path.is_dir():
|
||||
raise argparse.ArgumentTypeError(f"{path} is not a directory")
|
||||
if not os.access(path, os.W_OK):
|
||||
raise argparse.ArgumentTypeError(f"{path} is not a writeable directory")
|
||||
return path
|
||||
|
||||
|
||||
def main() -> None:
|
||||
arg_parser = argparse.ArgumentParser(prog="nixos-test-driver")
|
||||
arg_parser.add_argument(
|
||||
"--containers",
|
||||
nargs="+",
|
||||
type=Path,
|
||||
help="container system toplevel paths",
|
||||
)
|
||||
arg_parser.add_argument(
|
||||
"--test-script",
|
||||
help="the test script to run",
|
||||
type=Path,
|
||||
)
|
||||
arg_parser.add_argument(
|
||||
"-o",
|
||||
"--output-directory",
|
||||
default=Path.cwd(),
|
||||
help="the directory to bind to /run/test-results",
|
||||
type=writeable_dir,
|
||||
)
|
||||
args = arg_parser.parse_args()
|
||||
with Driver(
|
||||
args.containers,
|
||||
args.test_script.read_text(),
|
||||
args.output_directory.resolve(),
|
||||
) as driver:
|
||||
driver.run_tests()
|
||||
33
checks/lib/container-test.nix
Normal file
33
checks/lib/container-test.nix
Normal file
@@ -0,0 +1,33 @@
|
||||
test:
|
||||
{ pkgs
|
||||
, self
|
||||
, ...
|
||||
}:
|
||||
let
|
||||
inherit (pkgs) lib;
|
||||
nixos-lib = import (pkgs.path + "/nixos/lib") { };
|
||||
in
|
||||
(nixos-lib.runTest ({ hostPkgs, ... }: {
|
||||
hostPkgs = pkgs;
|
||||
# speed-up evaluation
|
||||
defaults = {
|
||||
documentation.enable = lib.mkDefault false;
|
||||
boot.isContainer = true;
|
||||
|
||||
# undo qemu stuff
|
||||
system.build.initialRamdisk = "";
|
||||
virtualisation.sharedDirectories = lib.mkForce { };
|
||||
networking.useDHCP = false;
|
||||
|
||||
# we have not private networking so far
|
||||
networking.interfaces = lib.mkForce { };
|
||||
#networking.primaryIPAddress = lib.mkForce null;
|
||||
systemd.services.backdoor.enable = false;
|
||||
};
|
||||
# to accept external dependencies such as disko
|
||||
node.specialArgs.self = self;
|
||||
imports = [
|
||||
test
|
||||
./container-driver/module.nix
|
||||
];
|
||||
})).config.result
|
||||
7
checks/lib/ssh/privkey
Normal file
7
checks/lib/ssh/privkey
Normal file
@@ -0,0 +1,7 @@
|
||||
-----BEGIN OPENSSH PRIVATE KEY-----
|
||||
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
|
||||
QyNTUxOQAAACASG8CFZy8vrqA2erivzgnNUCuOkiBngt5lXPOXai2EMAAAAJAOOON0Djjj
|
||||
dAAAAAtzc2gtZWQyNTUxOQAAACASG8CFZy8vrqA2erivzgnNUCuOkiBngt5lXPOXai2EMA
|
||||
AAAEDTjUOWSYeU3Xu+Ol1731b9rXeEVXSdrhVOraA+7/35JBIbwIVnLy+uoDZ6uK/OCc1Q
|
||||
K46SIGeC3mVc85dqLYQwAAAADGxhc3NAaWduYXZpYQE=
|
||||
-----END OPENSSH PRIVATE KEY-----
|
||||
1
checks/lib/ssh/pubkey
Normal file
1
checks/lib/ssh/pubkey
Normal file
@@ -0,0 +1 @@
|
||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBIbwIVnLy+uoDZ6uK/OCc1QK46SIGeC3mVc85dqLYQw lass@ignavia
|
||||
@@ -10,9 +10,12 @@ in
|
||||
(nixos-lib.runTest {
|
||||
hostPkgs = pkgs;
|
||||
# speed-up evaluation
|
||||
defaults.documentation.enable = lib.mkDefault false;
|
||||
defaults = {
|
||||
documentation.enable = lib.mkDefault false;
|
||||
nix.settings.min-free = 0;
|
||||
};
|
||||
|
||||
# to accept external dependencies such as disko
|
||||
node.specialArgs.self = self;
|
||||
imports = [ test ];
|
||||
}).config.result
|
||||
|
||||
|
||||
21
checks/meshnamed/default.nix
Normal file
21
checks/meshnamed/default.nix
Normal file
@@ -0,0 +1,21 @@
|
||||
(import ../lib/container-test.nix) ({ pkgs, ... }: {
|
||||
name = "meshnamed";
|
||||
|
||||
nodes.machine = { self, ... }: {
|
||||
imports = [
|
||||
self.nixosModules.clanCore
|
||||
{
|
||||
clanCore.machineName = "machine";
|
||||
clan.networking.meshnamed.networks.vpn.subnet = "fd43:7def:4b50:28d0:4e99:9347:3035:17ef/88";
|
||||
clanCore.clanDir = ./.;
|
||||
}
|
||||
];
|
||||
};
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine.wait_for_unit("meshnamed")
|
||||
out = machine.succeed("${pkgs.dnsutils}/bin/dig AAAA foo.7vbx332lkaunatuzsndtanix54.vpn @meshnamed +short")
|
||||
print(out)
|
||||
assert out.strip() == "fd43:7def:4b50:28d0:4e99:9347:3035:17ef"
|
||||
'';
|
||||
})
|
||||
@@ -1,54 +0,0 @@
|
||||
{ self, lib, inputs, ... }:
|
||||
let
|
||||
inherit (builtins)
|
||||
mapAttrs
|
||||
toJSON
|
||||
toFile
|
||||
;
|
||||
inherit (lib)
|
||||
mapAttrs'
|
||||
;
|
||||
clanLib = self.lib;
|
||||
clanModules = self.clanModules;
|
||||
|
||||
|
||||
in
|
||||
{
|
||||
perSystem = { pkgs, ... }:
|
||||
let
|
||||
baseModule = {
|
||||
imports =
|
||||
(import (inputs.nixpkgs + "/nixos/modules/module-list.nix"))
|
||||
++ [{
|
||||
nixpkgs.hostPlatform = pkgs.system;
|
||||
}];
|
||||
};
|
||||
|
||||
optionsFromModule = module:
|
||||
let
|
||||
evaled = lib.evalModules {
|
||||
modules = [ module baseModule ];
|
||||
};
|
||||
in
|
||||
evaled.options.clan.networking;
|
||||
|
||||
clanModuleSchemas =
|
||||
mapAttrs
|
||||
(_: module: clanLib.jsonschema.parseOptions (optionsFromModule module))
|
||||
clanModules;
|
||||
|
||||
mkTest = name: schema: pkgs.runCommand "schema-${name}" { } ''
|
||||
${pkgs.check-jsonschema}/bin/check-jsonschema \
|
||||
--check-metaschema ${toFile "schema-${name}" (toJSON schema)}
|
||||
touch $out
|
||||
'';
|
||||
in
|
||||
{
|
||||
checks = mapAttrs'
|
||||
(name: schema: {
|
||||
name = "schema-${name}";
|
||||
value = mkTest name schema;
|
||||
})
|
||||
clanModuleSchemas;
|
||||
};
|
||||
}
|
||||
@@ -7,6 +7,7 @@ let
|
||||
(import (pkgs.path + "/nixos/modules/module-list.nix"))
|
||||
++ [{
|
||||
nixpkgs.hostPlatform = "x86_64-linux";
|
||||
clanCore.clanName = "dummy";
|
||||
}];
|
||||
};
|
||||
|
||||
|
||||
108
checks/syncthing/default.nix
Normal file
108
checks/syncthing/default.nix
Normal file
@@ -0,0 +1,108 @@
|
||||
(import ../lib/test-base.nix) (
|
||||
# Using nixos-test, because our own test system doesn't support the necessary
|
||||
# features for systemd.
|
||||
{ lib, ... }:
|
||||
{
|
||||
name = "syncthing";
|
||||
|
||||
nodes.introducer =
|
||||
{ self, ... }:
|
||||
{
|
||||
imports = [
|
||||
self.clanModules.syncthing
|
||||
self.nixosModules.clanCore
|
||||
{
|
||||
clanCore.machineName = "introducer";
|
||||
clanCore.clanDir = ./.;
|
||||
environment.etc = {
|
||||
"syncthing.pam".source = ./introducer/introducer_test_cert;
|
||||
"syncthing.key".source = ./introducer/introducer_test_key;
|
||||
"syncthing.api".source = ./introducer/introducer_test_api;
|
||||
};
|
||||
clanCore.secrets.syncthing.secrets."syncthing.api".path = "/etc/syncthing.api";
|
||||
services.syncthing.cert = "/etc/syncthing.pam";
|
||||
services.syncthing.key = "/etc/syncthing.key";
|
||||
# Doesn't test zerotier!
|
||||
services.syncthing.openDefaultPorts = true;
|
||||
services.syncthing.settings.folders = {
|
||||
"Shared" = {
|
||||
enable = true;
|
||||
path = "~/Shared";
|
||||
versioning = {
|
||||
type = "trashcan";
|
||||
params = {
|
||||
cleanoutDays = "30";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
clan.syncthing.autoAcceptDevices = true;
|
||||
clan.syncthing.autoShares = [ "Shared" ];
|
||||
# For faster Tests
|
||||
systemd.timers.syncthing-auto-accept.timerConfig = {
|
||||
OnActiveSec = 1;
|
||||
OnUnitActiveSec = 1;
|
||||
};
|
||||
}
|
||||
];
|
||||
};
|
||||
nodes.peer1 =
|
||||
{ self, ... }:
|
||||
{
|
||||
imports = [
|
||||
self.clanModules.syncthing
|
||||
self.nixosModules.clanCore
|
||||
{
|
||||
clanCore.machineName = "peer1";
|
||||
clanCore.clanDir = ./.;
|
||||
clan.syncthing.introducer = lib.strings.removeSuffix "\n" (
|
||||
builtins.readFile ./introducer/introducer_device_id
|
||||
);
|
||||
environment.etc = {
|
||||
"syncthing.pam".source = ./peer_1/peer_1_test_cert;
|
||||
"syncthing.key".source = ./peer_1/peer_1_test_key;
|
||||
};
|
||||
services.syncthing.openDefaultPorts = true;
|
||||
services.syncthing.cert = "/etc/syncthing.pam";
|
||||
services.syncthing.key = "/etc/syncthing.key";
|
||||
}
|
||||
];
|
||||
};
|
||||
nodes.peer2 =
|
||||
{ self, ... }:
|
||||
{
|
||||
imports = [
|
||||
self.clanModules.syncthing
|
||||
self.nixosModules.clanCore
|
||||
{
|
||||
clanCore.machineName = "peer2";
|
||||
clanCore.clanDir = ./.;
|
||||
clan.syncthing.introducer = lib.strings.removeSuffix "\n" (
|
||||
builtins.readFile ./introducer/introducer_device_id
|
||||
);
|
||||
environment.etc = {
|
||||
"syncthing.pam".source = ./peer_2/peer_2_test_cert;
|
||||
"syncthing.key".source = ./peer_2/peer_2_test_key;
|
||||
};
|
||||
services.syncthing.openDefaultPorts = true;
|
||||
services.syncthing.cert = "/etc/syncthing.pam";
|
||||
services.syncthing.key = "/etc/syncthing.key";
|
||||
}
|
||||
];
|
||||
};
|
||||
testScript = ''
|
||||
start_all()
|
||||
introducer.wait_for_unit("syncthing")
|
||||
peer1.wait_for_unit("syncthing")
|
||||
peer2.wait_for_unit("syncthing")
|
||||
peer1.wait_for_file("/home/user/Shared")
|
||||
peer2.wait_for_file("/home/user/Shared")
|
||||
introducer.shutdown()
|
||||
peer1.execute("echo hello > /home/user/Shared/hello")
|
||||
peer2.wait_for_file("/home/user/Shared/hello")
|
||||
out = peer2.succeed("cat /home/user/Shared/hello")
|
||||
print(out)
|
||||
assert "hello" in out
|
||||
'';
|
||||
}
|
||||
)
|
||||
1
checks/syncthing/introducer/introducer_device_id
Normal file
1
checks/syncthing/introducer/introducer_device_id
Normal file
@@ -0,0 +1 @@
|
||||
RN4ZZIJ-5AOJVWT-JD5IAAZ-SWVDTHU-B4RWCXE-AEM3SRG-QBM2KC5-JTGUNQT
|
||||
1
checks/syncthing/introducer/introducer_test_api
Normal file
1
checks/syncthing/introducer/introducer_test_api
Normal file
@@ -0,0 +1 @@
|
||||
fKwzSQK43LWMnjVK2TDjpTkziY364dvP
|
||||
14
checks/syncthing/introducer/introducer_test_cert
Normal file
14
checks/syncthing/introducer/introducer_test_cert
Normal file
@@ -0,0 +1,14 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIICHDCCAaOgAwIBAgIJAJDWPRNYN7/7MAoGCCqGSM49BAMCMEoxEjAQBgNVBAoT
|
||||
CVN5bmN0aGluZzEgMB4GA1UECxMXQXV0b21hdGljYWxseSBHZW5lcmF0ZWQxEjAQ
|
||||
BgNVBAMTCXN5bmN0aGluZzAeFw0yMzEyMDUwMDAwMDBaFw00MzExMzAwMDAwMDBa
|
||||
MEoxEjAQBgNVBAoTCVN5bmN0aGluZzEgMB4GA1UECxMXQXV0b21hdGljYWxseSBH
|
||||
ZW5lcmF0ZWQxEjAQBgNVBAMTCXN5bmN0aGluZzB2MBAGByqGSM49AgEGBSuBBAAi
|
||||
A2IABEzIpSQGUVVlrSndNjiwkgZ045eH26agwT5RTN44bGRe8SJqBWC7HP3V7u1C
|
||||
6ZQZALSDoDUG5Oi89wGrFnxU48mYFSJFlZAVzyZoqfxVMof3vnk3uFDPo47HA4ex
|
||||
8fi6yaNVMFMwDgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggr
|
||||
BgEFBQcDAjAMBgNVHRMBAf8EAjAAMBQGA1UdEQQNMAuCCXN5bmN0aGluZzAKBggq
|
||||
hkjOPQQDAgNnADBkAjB+d84wmaQuv3c94ctxV0sMh23xeTR1cPNcE8wbPQYxHmbO
|
||||
HbJ3IWo5HF3di63pVgECMBUfzpmFo8dshYR2/76Ovh573Svzk2+NKEMrqRyoNVFr
|
||||
JNQFhCtHbFT1rYfqYWgJBQ==
|
||||
-----END CERTIFICATE-----
|
||||
6
checks/syncthing/introducer/introducer_test_key
Normal file
6
checks/syncthing/introducer/introducer_test_key
Normal file
@@ -0,0 +1,6 @@
|
||||
-----BEGIN EC PRIVATE KEY-----
|
||||
MIGkAgEBBDBvqJxL4s7JFy0y6Ulg7C9C0m3N9VZlW328uMJrwznGuCdRHa/VD4qY
|
||||
IcjtwJisdaqgBwYFK4EEACKhZANiAARMyKUkBlFVZa0p3TY4sJIGdOOXh9umoME+
|
||||
UUzeOGxkXvEiagVguxz91e7tQumUGQC0g6A1BuTovPcBqxZ8VOPJmBUiRZWQFc8m
|
||||
aKn8VTKH9755N7hQz6OOxwOHsfH4usk=
|
||||
-----END EC PRIVATE KEY-----
|
||||
14
checks/syncthing/peer_1/peer_1_test_cert
Normal file
14
checks/syncthing/peer_1/peer_1_test_cert
Normal file
@@ -0,0 +1,14 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIICHTCCAaKgAwIBAgIIT2gZuvqVFP0wCgYIKoZIzj0EAwIwSjESMBAGA1UEChMJ
|
||||
U3luY3RoaW5nMSAwHgYDVQQLExdBdXRvbWF0aWNhbGx5IEdlbmVyYXRlZDESMBAG
|
||||
A1UEAxMJc3luY3RoaW5nMB4XDTIzMTIwNjAwMDAwMFoXDTQzMTIwMTAwMDAwMFow
|
||||
SjESMBAGA1UEChMJU3luY3RoaW5nMSAwHgYDVQQLExdBdXRvbWF0aWNhbGx5IEdl
|
||||
bmVyYXRlZDESMBAGA1UEAxMJc3luY3RoaW5nMHYwEAYHKoZIzj0CAQYFK4EEACID
|
||||
YgAEBAr1CsciwCa0vi7eC6xxuSGijY3txbjtsyFanec/fge4oJBD3rVpaLKFETb3
|
||||
TvHHsuvblzElcP483MEVq6FMUoxwuL9CzTtpJrRhtwSmAs8AHLFu8irVn8sZjgkL
|
||||
sXMho1UwUzAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsG
|
||||
AQUFBwMCMAwGA1UdEwEB/wQCMAAwFAYDVR0RBA0wC4IJc3luY3RoaW5nMAoGCCqG
|
||||
SM49BAMCA2kAMGYCMQDbrtLgfcyMMIkNQn+PJe9DHYAqj8C47LQcWuIY/nekhOu0
|
||||
aUfKctEAwyBtI60Y5zcCMQCEdgD/6CNBh7Qqq3z3CKPhlrpxHtCO5tNw17k0jfdH
|
||||
haCwJInHZvZgclHk4EtFpTw=
|
||||
-----END CERTIFICATE-----
|
||||
6
checks/syncthing/peer_1/peer_1_test_key
Normal file
6
checks/syncthing/peer_1/peer_1_test_key
Normal file
@@ -0,0 +1,6 @@
|
||||
-----BEGIN EC PRIVATE KEY-----
|
||||
MIGkAgEBBDA14Nqo17Xs/xRLGH2KLuyzjKp4eW9iWFobVNM93RZZbECT++W3XcQc
|
||||
cEc5WVtiPmWgBwYFK4EEACKhZANiAAQECvUKxyLAJrS+Lt4LrHG5IaKNje3FuO2z
|
||||
IVqd5z9+B7igkEPetWlosoURNvdO8cey69uXMSVw/jzcwRWroUxSjHC4v0LNO2km
|
||||
tGG3BKYCzwAcsW7yKtWfyxmOCQuxcyE=
|
||||
-----END EC PRIVATE KEY-----
|
||||
14
checks/syncthing/peer_2/peer_2_test_cert
Normal file
14
checks/syncthing/peer_2/peer_2_test_cert
Normal file
@@ -0,0 +1,14 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIICHjCCAaOgAwIBAgIJAKbMWefkf1rVMAoGCCqGSM49BAMCMEoxEjAQBgNVBAoT
|
||||
CVN5bmN0aGluZzEgMB4GA1UECxMXQXV0b21hdGljYWxseSBHZW5lcmF0ZWQxEjAQ
|
||||
BgNVBAMTCXN5bmN0aGluZzAeFw0yMzEyMDYwMDAwMDBaFw00MzEyMDEwMDAwMDBa
|
||||
MEoxEjAQBgNVBAoTCVN5bmN0aGluZzEgMB4GA1UECxMXQXV0b21hdGljYWxseSBH
|
||||
ZW5lcmF0ZWQxEjAQBgNVBAMTCXN5bmN0aGluZzB2MBAGByqGSM49AgEGBSuBBAAi
|
||||
A2IABFZTMt4RfsfBue0va7QuNdjfXMI4HfZzJCEcG+b9MtV7FlDmwMKX5fgGykD9
|
||||
FBbC7yiza3+xCobdMb5bakz1qYJ7nUFCv1mwSDo2eNM+/XE+rJmlre8NwkwGmvzl
|
||||
h1uhyqNVMFMwDgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggr
|
||||
BgEFBQcDAjAMBgNVHRMBAf8EAjAAMBQGA1UdEQQNMAuCCXN5bmN0aGluZzAKBggq
|
||||
hkjOPQQDAgNpADBmAjEAwzhsroN6R4/quWeXj6dO5gt5CfSTLkLee6vrcuIP5i1U
|
||||
rZvJ3OKQVmmGG6IWYe7iAjEAyuq3X2wznaqiw2YK3IDI4qVeYWpCUap0fwRNq7/x
|
||||
4dC4k+BOzHcuJOwNBIY/bEuK
|
||||
-----END CERTIFICATE-----
|
||||
6
checks/syncthing/peer_2/peer_2_test_key
Normal file
6
checks/syncthing/peer_2/peer_2_test_key
Normal file
@@ -0,0 +1,6 @@
|
||||
-----BEGIN EC PRIVATE KEY-----
|
||||
MIGkAgEBBDCXHGpvumKjjDRxB6SsjZOb7duw3w+rdlGQCJTIvRThLjD6zwjnyImi
|
||||
7c3PD5nWtLqgBwYFK4EEACKhZANiAARWUzLeEX7HwbntL2u0LjXY31zCOB32cyQh
|
||||
HBvm/TLVexZQ5sDCl+X4BspA/RQWwu8os2t/sQqG3TG+W2pM9amCe51BQr9ZsEg6
|
||||
NnjTPv1xPqyZpa3vDcJMBpr85Ydboco=
|
||||
-----END EC PRIVATE KEY-----
|
||||
25
checks/wayland-proxy-virtwl/default.nix
Normal file
25
checks/wayland-proxy-virtwl/default.nix
Normal file
@@ -0,0 +1,25 @@
|
||||
import ../lib/test-base.nix ({ config, pkgs, lib, ... }: {
|
||||
name = "wayland-proxy-virtwl";
|
||||
|
||||
nodes.machine = { self, ... }: {
|
||||
imports = [
|
||||
self.nixosModules.clanCore
|
||||
{
|
||||
clanCore.machineName = "machine";
|
||||
clanCore.clanDir = ./.;
|
||||
}
|
||||
];
|
||||
services.wayland-proxy-virtwl.enable = true;
|
||||
|
||||
virtualisation.qemu.options = [
|
||||
"-vga none -device virtio-gpu-rutabaga,cross-domain=on,hostmem=4G,wsi=headless"
|
||||
];
|
||||
|
||||
virtualisation.qemu.package = lib.mkForce pkgs.qemu_kvm;
|
||||
};
|
||||
testScript = ''
|
||||
start_all()
|
||||
# use machinectl
|
||||
machine.succeed("machinectl shell .host ${config.nodes.machine.systemd.package}/bin/systemctl --user start wayland-proxy-virtwl >&2")
|
||||
'';
|
||||
})
|
||||
20
checks/zt-tcp-relay/default.nix
Normal file
20
checks/zt-tcp-relay/default.nix
Normal file
@@ -0,0 +1,20 @@
|
||||
(import ../lib/container-test.nix) ({ pkgs, ... }: {
|
||||
name = "zt-tcp-relay";
|
||||
|
||||
nodes.machine = { self, ... }: {
|
||||
imports = [
|
||||
self.nixosModules.clanCore
|
||||
self.clanModules.zt-tcp-relay
|
||||
{
|
||||
clanCore.machineName = "machine";
|
||||
clanCore.clanDir = ./.;
|
||||
}
|
||||
];
|
||||
};
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine.wait_for_unit("zt-tcp-relay.service")
|
||||
out = machine.succeed("${pkgs.netcat}/bin/nc -z -v localhost 4443")
|
||||
print(out)
|
||||
'';
|
||||
})
|
||||
90
clanModules/borgbackup.nix
Normal file
90
clanModules/borgbackup.nix
Normal file
@@ -0,0 +1,90 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
let
|
||||
cfg = config.clan.borgbackup;
|
||||
in
|
||||
{
|
||||
options.clan.borgbackup = {
|
||||
enable = lib.mkEnableOption "backups with borgbackup";
|
||||
destinations = lib.mkOption {
|
||||
type = lib.types.attrsOf (lib.types.submodule ({ name, ... }: {
|
||||
options = {
|
||||
name = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = name;
|
||||
description = "the name of the backup job";
|
||||
};
|
||||
repo = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "the borgbackup repository to backup to";
|
||||
};
|
||||
rsh = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "ssh -i ${config.clanCore.secrets.borgbackup.secrets."borgbackup.ssh".path} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null";
|
||||
description = "the rsh to use for the backup";
|
||||
};
|
||||
|
||||
};
|
||||
}));
|
||||
description = ''
|
||||
destinations where the machine should be backuped to
|
||||
'';
|
||||
};
|
||||
};
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.borgbackup.jobs = lib.mapAttrs
|
||||
(_: dest: {
|
||||
paths = lib.flatten (map (state: state.folders) (lib.attrValues config.clanCore.state));
|
||||
exclude = [
|
||||
"*.pyc"
|
||||
];
|
||||
repo = dest.repo;
|
||||
environment.BORG_RSH = dest.rsh;
|
||||
encryption.mode = "none";
|
||||
compression = "auto,zstd";
|
||||
startAt = "*-*-* 01:00:00";
|
||||
preHook = ''
|
||||
set -x
|
||||
'';
|
||||
|
||||
prune.keep = {
|
||||
within = "1d"; # Keep all archives from the last day
|
||||
daily = 7;
|
||||
weekly = 4;
|
||||
monthly = 0;
|
||||
};
|
||||
})
|
||||
cfg.destinations;
|
||||
|
||||
clanCore.secrets.borgbackup = {
|
||||
facts."borgbackup.ssh.pub" = { };
|
||||
secrets."borgbackup.ssh" = { };
|
||||
generator.path = [ pkgs.openssh pkgs.coreutils ];
|
||||
generator.script = ''
|
||||
ssh-keygen -t ed25519 -N "" -f "$secrets"/borgbackup.ssh
|
||||
mv "$secrets"/borgbackup.ssh.pub "$facts"/borgbackup.ssh.pub
|
||||
'';
|
||||
};
|
||||
|
||||
clanCore.backups.providers.borgbackup = {
|
||||
# TODO list needs to run locally or on the remote machine
|
||||
list = ''
|
||||
${lib.concatMapStringsSep "\n" (dest: ''
|
||||
# we need yes here to skip the changed url verification
|
||||
yes y | borg-job-${dest.name} list --json | jq -r '. + {"job-name": "${dest.name}"}'
|
||||
'') (lib.attrValues cfg.destinations)}
|
||||
'';
|
||||
create = ''
|
||||
${lib.concatMapStringsSep "\n" (dest: ''
|
||||
systemctl start borgbackup-job-${dest.name}
|
||||
'') (lib.attrValues cfg.destinations)}
|
||||
'';
|
||||
|
||||
restore = ''
|
||||
set -efu
|
||||
cd /
|
||||
IFS=';' read -ra FOLDER <<< "$FOLDERS"
|
||||
yes y | borg-job-"$JOB" extract --list "$LOCATION"::"$ARCHIVE_ID" "''${FOLDER[@]}"
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
146
clanModules/deltachat.nix
Normal file
146
clanModules/deltachat.nix
Normal file
@@ -0,0 +1,146 @@
|
||||
{ config, pkgs, ... }: {
|
||||
networking.firewall.interfaces."zt+".allowedTCPPorts = [ 25 ]; # smtp with other hosts
|
||||
environment.systemPackages = [ pkgs.deltachat-desktop ];
|
||||
|
||||
services.maddy =
|
||||
let
|
||||
# FIXME move this to public setting
|
||||
meshname = config.clanCore.secrets.zerotier.facts.zerotier-meshname.value or null;
|
||||
domain = if meshname == null then "${config.clanCore.machineName}.local" else "${meshname}.vpn";
|
||||
in
|
||||
{
|
||||
enable = true;
|
||||
primaryDomain = domain;
|
||||
config = ''
|
||||
# Minimal configuration with TLS disabled, adapted from upstream example
|
||||
# configuration here https://github.com/foxcpp/maddy/blob/master/maddy.conf
|
||||
# Do not use this in unencrypted networks!
|
||||
|
||||
auth.pass_table local_authdb {
|
||||
table sql_table {
|
||||
driver sqlite3
|
||||
dsn credentials.db
|
||||
table_name passwords
|
||||
}
|
||||
}
|
||||
|
||||
storage.imapsql local_mailboxes {
|
||||
driver sqlite3
|
||||
dsn imapsql.db
|
||||
}
|
||||
|
||||
table.chain local_rewrites {
|
||||
optional_step regexp "(.+)\+(.+)@(.+)" "$1@$3"
|
||||
optional_step static {
|
||||
entry postmaster postmaster@$(primary_domain)
|
||||
}
|
||||
optional_step file /etc/maddy/aliases
|
||||
}
|
||||
|
||||
msgpipeline local_routing {
|
||||
destination postmaster $(local_domains) {
|
||||
modify {
|
||||
replace_rcpt &local_rewrites
|
||||
}
|
||||
deliver_to &local_mailboxes
|
||||
}
|
||||
default_destination {
|
||||
reject 550 5.1.1 "User doesn't exist"
|
||||
}
|
||||
}
|
||||
|
||||
smtp tcp://[::]:25 {
|
||||
limits {
|
||||
all rate 20 1s
|
||||
all concurrency 10
|
||||
}
|
||||
dmarc yes
|
||||
check {
|
||||
require_mx_record
|
||||
dkim
|
||||
spf
|
||||
}
|
||||
source $(local_domains) {
|
||||
reject 501 5.1.8 "Use Submission for outgoing SMTP"
|
||||
}
|
||||
default_source {
|
||||
destination postmaster $(local_domains) {
|
||||
deliver_to &local_routing
|
||||
}
|
||||
default_destination {
|
||||
reject 550 5.1.1 "User doesn't exist"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
submission tcp://[::1]:587 {
|
||||
limits {
|
||||
all rate 50 1s
|
||||
}
|
||||
auth &local_authdb
|
||||
source $(local_domains) {
|
||||
check {
|
||||
authorize_sender {
|
||||
prepare_email &local_rewrites
|
||||
user_to_email identity
|
||||
}
|
||||
}
|
||||
destination postmaster $(local_domains) {
|
||||
deliver_to &local_routing
|
||||
}
|
||||
default_destination {
|
||||
modify {
|
||||
dkim $(primary_domain) $(local_domains) default
|
||||
}
|
||||
deliver_to &remote_queue
|
||||
}
|
||||
}
|
||||
default_source {
|
||||
reject 501 5.1.8 "Non-local sender domain"
|
||||
}
|
||||
}
|
||||
|
||||
target.remote outbound_delivery {
|
||||
limits {
|
||||
destination rate 20 1s
|
||||
destination concurrency 10
|
||||
}
|
||||
mx_auth {
|
||||
dane
|
||||
mtasts {
|
||||
cache fs
|
||||
fs_dir mtasts_cache/
|
||||
}
|
||||
local_policy {
|
||||
min_tls_level encrypted
|
||||
min_mx_level none
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
target.queue remote_queue {
|
||||
target &outbound_delivery
|
||||
autogenerated_msg_domain $(primary_domain)
|
||||
bounce {
|
||||
destination postmaster $(local_domains) {
|
||||
deliver_to &local_routing
|
||||
}
|
||||
default_destination {
|
||||
reject 550 5.0.0 "Refusing to send DSNs to non-local addresses"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
imap tcp://[::1]:143 {
|
||||
auth &local_authdb
|
||||
storage &local_mailboxes
|
||||
}
|
||||
'';
|
||||
ensureAccounts = [
|
||||
"user@${domain}"
|
||||
];
|
||||
ensureCredentials = {
|
||||
"user@${domain}".passwordFile = pkgs.writeText "dummy" "foobar";
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,12 +1,18 @@
|
||||
{ self, lib, ... }: {
|
||||
{ inputs, ... }: {
|
||||
flake.clanModules = {
|
||||
diskLayouts = lib.mapAttrs'
|
||||
(name: _: lib.nameValuePair (lib.removeSuffix ".nix" name) {
|
||||
imports = [
|
||||
self.inputs.disko.nixosModules.disko
|
||||
./diskLayouts/${name}
|
||||
];
|
||||
})
|
||||
(builtins.readDir ./diskLayouts);
|
||||
diskLayouts = {
|
||||
imports = [
|
||||
./diskLayouts.nix
|
||||
inputs.disko.nixosModules.default
|
||||
];
|
||||
};
|
||||
borgbackup = ./borgbackup.nix;
|
||||
deltachat = ./deltachat.nix;
|
||||
moonlight = ./moonlight.nix;
|
||||
sunshine = ./sunshine.nix;
|
||||
syncthing = ./syncthing.nix;
|
||||
xfce = ./xfce.nix;
|
||||
zt-tcp-relay = ./zt-tcp-relay.nix;
|
||||
localsend = ./localsend.nix;
|
||||
};
|
||||
}
|
||||
|
||||
43
clanModules/localsend.nix
Normal file
43
clanModules/localsend.nix
Normal file
@@ -0,0 +1,43 @@
|
||||
{ config
|
||||
, pkgs
|
||||
, lib
|
||||
, ...
|
||||
}:
|
||||
{
|
||||
# Integration can be improved, if the following issues get implemented:
|
||||
# - cli frontend: https://github.com/localsend/localsend/issues/11
|
||||
# - ipv6 support: https://github.com/localsend/localsend/issues/549
|
||||
options.clan.localsend = {
|
||||
enable = lib.mkEnableOption (lib.mdDoc "enable the localsend module");
|
||||
defaultLocation = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "The default download location";
|
||||
};
|
||||
package = lib.mkPackageOption pkgs "localsend" { };
|
||||
};
|
||||
|
||||
imports =
|
||||
if config.clan.localsend.enable then
|
||||
[
|
||||
{
|
||||
clanCore.state.localsend.folders = [
|
||||
"/var/localsend"
|
||||
config.clan.localsend.defaultLocation
|
||||
];
|
||||
environment.systemPackages = [ config.clan.localsend.package ];
|
||||
|
||||
networking.firewall.interfaces."zt+".allowedTCPPorts = [ 53317 ];
|
||||
networking.firewall.interfaces."zt+".allowedUDPPorts = [ 53317 ];
|
||||
|
||||
#TODO: This is currently needed because there is no ipv6 multicasting support yet
|
||||
#
|
||||
systemd.network.networks."09-zerotier" = {
|
||||
networkConfig = {
|
||||
Address = "192.168.56.2/24";
|
||||
};
|
||||
};
|
||||
}
|
||||
]
|
||||
else
|
||||
[ ];
|
||||
}
|
||||
4
clanModules/moonlight.nix
Normal file
4
clanModules/moonlight.nix
Normal file
@@ -0,0 +1,4 @@
|
||||
{ pkgs, ... }: {
|
||||
hardware.opengl.enable = true;
|
||||
environment.systemPackages = [ pkgs.moonlight-qt ];
|
||||
}
|
||||
109
clanModules/sunshine.nix
Normal file
109
clanModules/sunshine.nix
Normal file
@@ -0,0 +1,109 @@
|
||||
{ pkgs, config, ... }:
|
||||
{
|
||||
networking.firewall = {
|
||||
allowedTCPPorts = [
|
||||
47984
|
||||
47989
|
||||
47990
|
||||
48010
|
||||
];
|
||||
|
||||
allowedUDPPorts = [
|
||||
47998
|
||||
47999
|
||||
48000
|
||||
48002
|
||||
48010
|
||||
];
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPortRanges = [
|
||||
{
|
||||
from = 47984;
|
||||
to = 48010;
|
||||
}
|
||||
];
|
||||
networking.firewall.allowedUDPPortRanges = [
|
||||
{
|
||||
from = 47998;
|
||||
to = 48010;
|
||||
}
|
||||
];
|
||||
|
||||
environment.systemPackages = [
|
||||
pkgs.sunshine
|
||||
pkgs.avahi
|
||||
# Convenience script, until we find a better UX
|
||||
(pkgs.writers.writeDashBin "sun" ''
|
||||
${pkgs.sunshine}/bin/sunshine -1 ${
|
||||
pkgs.writeText "sunshine.conf" ''
|
||||
address_family = both
|
||||
''
|
||||
} "$@"
|
||||
'')
|
||||
# Create a dummy account, for easier setup,
|
||||
# don't use this account in actual production yet.
|
||||
(pkgs.writers.writeDashBin "init-sun" ''
|
||||
${pkgs.sunshine}/bin/sunshine \
|
||||
--creds "sun" "sun"
|
||||
'')
|
||||
];
|
||||
|
||||
# Required to simulate input
|
||||
boot.kernelModules = [ "uinput" ];
|
||||
security.rtkit.enable = true;
|
||||
|
||||
# services.udev.extraRules = ''
|
||||
# KERNEL=="uinput", SUBSYSTEM=="misc", OPTIONS+="static_node=uinput", TAG+="uaccess"
|
||||
# '';
|
||||
|
||||
services.udev.extraRules = ''
|
||||
KERNEL=="uinput", GROUP="input", MODE="0660" OPTIONS+="static_node=uinput"
|
||||
'';
|
||||
|
||||
security.wrappers.sunshine = {
|
||||
owner = "root";
|
||||
group = "root";
|
||||
capabilities = "cap_sys_admin+p";
|
||||
source = "${pkgs.sunshine}/bin/sunshine";
|
||||
};
|
||||
|
||||
systemd.user.services.sunshine = {
|
||||
description = "sunshine";
|
||||
wantedBy = [ "graphical-session.target" ];
|
||||
environment = {
|
||||
DISPLAY = ":0";
|
||||
};
|
||||
serviceConfig = {
|
||||
ExecStart = "${config.security.wrapperDir}/sunshine";
|
||||
};
|
||||
};
|
||||
|
||||
# xdg.configFile."sunshine/apps.json".text = builtins.toJSON {
|
||||
# env = "/run/current-system/sw/bin";
|
||||
# apps = [
|
||||
# {
|
||||
# name = "Steam";
|
||||
# output = "steam.txt";
|
||||
# detached = [
|
||||
# "${pkgs.util-linux}/bin/setsid ${pkgs.steam}/bin/steam steam://open/bigpicture"
|
||||
# ];
|
||||
# image-path = "steam.png";
|
||||
# }
|
||||
# ];
|
||||
# };
|
||||
|
||||
services = {
|
||||
avahi = {
|
||||
enable = true;
|
||||
reflector = true;
|
||||
nssmdns = true;
|
||||
publish = {
|
||||
enable = true;
|
||||
addresses = true;
|
||||
userServices = true;
|
||||
workstation = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
215
clanModules/syncthing.nix
Normal file
215
clanModules/syncthing.nix
Normal file
@@ -0,0 +1,215 @@
|
||||
{ config
|
||||
, pkgs
|
||||
, lib
|
||||
, ...
|
||||
}:
|
||||
{
|
||||
options.clan.syncthing = {
|
||||
id = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
example = "BABNJY4-G2ICDLF-QQEG7DD-N3OBNGF-BCCOFK6-MV3K7QJ-2WUZHXS-7DTW4AS";
|
||||
default = config.clanCore.secrets.syncthing.facts."syncthing.pub".value or null;
|
||||
};
|
||||
introducer = lib.mkOption {
|
||||
description = ''
|
||||
The introducer for the machine.
|
||||
'';
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
default = null;
|
||||
};
|
||||
autoAcceptDevices = lib.mkOption {
|
||||
description = ''
|
||||
Auto accept incoming device requests.
|
||||
Should only be used on the introducer.
|
||||
'';
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
};
|
||||
autoShares = lib.mkOption {
|
||||
description = ''
|
||||
Auto share the following Folders by their ID's with introduced devices.
|
||||
Should only be used on the introducer.
|
||||
'';
|
||||
type = lib.types.listOf lib.types.str;
|
||||
default = [ ];
|
||||
};
|
||||
};
|
||||
|
||||
imports = [
|
||||
{
|
||||
# Syncthing ports: 8384 for remote access to GUI
|
||||
# 22000 TCP and/or UDP for sync traffic
|
||||
# 21027/UDP for discovery
|
||||
# source: https://docs.syncthing.net/users/firewall.html
|
||||
networking.firewall.interfaces."zt+".allowedTCPPorts = [
|
||||
8384
|
||||
22000
|
||||
];
|
||||
networking.firewall.allowedTCPPorts = [ 8384 ];
|
||||
networking.firewall.interfaces."zt+".allowedUDPPorts = [
|
||||
22000
|
||||
21027
|
||||
];
|
||||
|
||||
assertions = [
|
||||
{
|
||||
assertion =
|
||||
lib.all (attr: builtins.hasAttr attr config.services.syncthing.settings.folders)
|
||||
config.clan.syncthing.autoShares;
|
||||
message = ''
|
||||
Syncthing: If you want to AutoShare a folder, you need to have it configured on the sharing device.
|
||||
'';
|
||||
}
|
||||
];
|
||||
|
||||
# Activates inofify compatibilty on syncthing
|
||||
boot.kernel.sysctl."fs.inotify.max_user_watches" = 524288;
|
||||
|
||||
|
||||
services.syncthing = {
|
||||
enable = true;
|
||||
configDir = "/var/lib/syncthing";
|
||||
|
||||
overrideFolders = true;
|
||||
overrideDevices = true;
|
||||
|
||||
dataDir = lib.mkDefault "/home/user/";
|
||||
|
||||
group = "syncthing";
|
||||
|
||||
key =
|
||||
lib.mkDefault
|
||||
config.clanCore.secrets.syncthing.secrets."syncthing.key".path or null;
|
||||
cert =
|
||||
lib.mkDefault
|
||||
config.clanCore.secrets.syncthing.secrets."syncthing.cert".path or null;
|
||||
|
||||
settings = {
|
||||
options = {
|
||||
urAccepted = -1;
|
||||
allowedNetworks = [ config.clan.networking.zerotier.subnet ];
|
||||
};
|
||||
devices =
|
||||
{ }
|
||||
// (
|
||||
if (config.clan.syncthing.introducer == null) then
|
||||
{ }
|
||||
else
|
||||
{
|
||||
"${config.clan.syncthing.introducer}" = {
|
||||
name = "introducer";
|
||||
id = config.clan.syncthing.introducer;
|
||||
introducer = true;
|
||||
autoAcceptFolders = true;
|
||||
};
|
||||
}
|
||||
);
|
||||
};
|
||||
};
|
||||
systemd.services.syncthing-auto-accept =
|
||||
let
|
||||
baseAddress = "127.0.0.1:8384";
|
||||
getPendingDevices = "/rest/cluster/pending/devices";
|
||||
postNewDevice = "/rest/config/devices";
|
||||
SharedFolderById = "/rest/config/folders/";
|
||||
apiKey = config.clanCore.secrets.syncthing.secrets."syncthing.api".path or null;
|
||||
in
|
||||
lib.mkIf config.clan.syncthing.autoAcceptDevices {
|
||||
description = "Syncthing auto accept devices";
|
||||
requisite = [ "syncthing.service" ];
|
||||
after = [ "syncthing.service" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
script = ''
|
||||
set -x
|
||||
# query pending deviceID's
|
||||
APIKEY=$(cat ${apiKey})
|
||||
PENDING=$(${
|
||||
lib.getExe pkgs.curl
|
||||
} -X GET -H "X-API-Key: $APIKEY" ${baseAddress}${getPendingDevices})
|
||||
PENDING=$(echo $PENDING | ${lib.getExe pkgs.jq} keys[])
|
||||
|
||||
# accept pending deviceID's
|
||||
for ID in $PENDING;do
|
||||
${
|
||||
lib.getExe pkgs.curl
|
||||
} -X POST -d "{\"deviceId\": $ID}" -H "Content-Type: application/json" -H "X-API-Key: $APIKEY" ${baseAddress}${postNewDevice}
|
||||
|
||||
# get all shared folders by their ID
|
||||
for folder in ${builtins.toString config.clan.syncthing.autoShares}; do
|
||||
SHARED_IDS=$(${
|
||||
lib.getExe pkgs.curl
|
||||
} -X GET -H "X-API-Key: $APIKEY" ${baseAddress}${SharedFolderById}"$folder" | ${
|
||||
lib.getExe pkgs.jq
|
||||
} ."devices")
|
||||
PATCHED_IDS=$(echo $SHARED_IDS | ${
|
||||
lib.getExe pkgs.jq
|
||||
} ".+= [{\"deviceID\": $ID, \"introducedBy\": \"\", \"encryptionPassword\": \"\"}]")
|
||||
${
|
||||
lib.getExe pkgs.curl
|
||||
} -X PATCH -d "{\"devices\": $PATCHED_IDS}" -H "X-API-Key: $APIKEY" ${baseAddress}${SharedFolderById}"$folder"
|
||||
done
|
||||
done
|
||||
'';
|
||||
};
|
||||
|
||||
systemd.timers.syncthing-auto-accept =
|
||||
lib.mkIf config.clan.syncthing.autoAcceptDevices
|
||||
{
|
||||
description = "Syncthing Auto Accept";
|
||||
|
||||
wantedBy = [ "syncthing-auto-accept.service" ];
|
||||
|
||||
timerConfig = {
|
||||
OnActiveSec = lib.mkDefault 60;
|
||||
OnUnitActiveSec = lib.mkDefault 60;
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.syncthing-init-api-key =
|
||||
let
|
||||
apiKey = config.clanCore.secrets.syncthing.secrets."syncthing.api".path or null;
|
||||
in
|
||||
lib.mkIf config.clan.syncthing.autoAcceptDevices {
|
||||
description = "Set the api key";
|
||||
after = [ "syncthing-init.service" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
script = ''
|
||||
# set -x
|
||||
set -efu pipefail
|
||||
|
||||
APIKEY=$(cat ${apiKey})
|
||||
${
|
||||
lib.getExe pkgs.gnused
|
||||
} -i "s/<apikey>.*<\/apikey>/<apikey>$APIKEY<\/apikey>/" /var/lib/syncthing/config.xml
|
||||
# sudo systemctl restart syncthing.service
|
||||
systemctl restart syncthing.service
|
||||
'';
|
||||
serviceConfig = {
|
||||
WorkingDirectory = "/var/lib/syncthing";
|
||||
BindReadOnlyPaths = [ apiKey ];
|
||||
Type = "oneshot";
|
||||
};
|
||||
};
|
||||
|
||||
clanCore.secrets.syncthing = {
|
||||
secrets."syncthing.key" = { };
|
||||
secrets."syncthing.cert" = { };
|
||||
secrets."syncthing.api" = { };
|
||||
facts."syncthing.pub" = { };
|
||||
generator.path = [
|
||||
pkgs.coreutils
|
||||
pkgs.gnugrep
|
||||
pkgs.syncthing
|
||||
];
|
||||
generator.script = ''
|
||||
syncthing generate --config "$secrets"
|
||||
mv "$secrets"/key.pem "$secrets"/syncthing.key
|
||||
mv "$secrets"/cert.pem "$secrets"/syncthing.cert
|
||||
cat "$secrets"/config.xml | grep -oP '(?<=<device id=")[^"]+' | uniq > "$facts"/syncthing.pub
|
||||
cat "$secrets"/config.xml | grep -oP '<apikey>\K[^<]+' | uniq > "$secrets"/syncthing.api
|
||||
'';
|
||||
};
|
||||
}
|
||||
];
|
||||
}
|
||||
7
clanModules/xfce.nix
Normal file
7
clanModules/xfce.nix
Normal file
@@ -0,0 +1,7 @@
|
||||
{
|
||||
services.xserver = {
|
||||
enable = true;
|
||||
desktopManager.xfce.enable = true;
|
||||
layout = "us";
|
||||
};
|
||||
}
|
||||
23
clanModules/zt-tcp-relay.nix
Normal file
23
clanModules/zt-tcp-relay.nix
Normal file
@@ -0,0 +1,23 @@
|
||||
{ pkgs, lib, config, ... }: {
|
||||
options.clan.zt-tcp-relay = {
|
||||
port = lib.mkOption {
|
||||
type = lib.types.port;
|
||||
default = 4443;
|
||||
description = "Port to listen on";
|
||||
};
|
||||
};
|
||||
config = {
|
||||
networking.firewall.allowedTCPPorts = [ config.clan.zt-tcp-relay.port ];
|
||||
|
||||
systemd.services.zt-tcp-relay = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.callPackage ../pkgs/zt-tcp-relay {}}/bin/zt-tcp-relay --listen [::]:${builtins.toString config.clan.zt-tcp-relay.port}";
|
||||
Restart = "always";
|
||||
RestartSec = "5";
|
||||
dynamicUsers = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -10,6 +10,7 @@
|
||||
pkgs.tea
|
||||
self'.packages.tea-create-pr
|
||||
self'.packages.merge-after-ci
|
||||
self'.packages.pending-reviews
|
||||
# treefmt with config defined in ./flake-parts/formatting.nix
|
||||
config.treefmt.build.wrapper
|
||||
];
|
||||
|
||||
10
docs/admins/_index.md
Normal file
10
docs/admins/_index.md
Normal file
@@ -0,0 +1,10 @@
|
||||
+++
|
||||
title = "Admin Documentation"
|
||||
description = "Documentation administrators creating or managing cLANs"
|
||||
date = 2025-05-01T19:00:00+00:00
|
||||
updated = 2021-05-01T19:00:00+00:00
|
||||
template = "docs/section.html"
|
||||
weight = 15
|
||||
sort_by = "title"
|
||||
draft = false
|
||||
+++
|
||||
115
docs/admins/machines.md
Normal file
115
docs/admins/machines.md
Normal file
@@ -0,0 +1,115 @@
|
||||
# Managing NixOS Machines
|
||||
|
||||
## Add Your First Machine
|
||||
|
||||
To start managing a new machine, use the following commands to create and then list your machines:
|
||||
|
||||
```shellSession
|
||||
$ clan machines create my-machine
|
||||
$ clan machines list
|
||||
my-machine
|
||||
```
|
||||
|
||||
## Configure Your Machine
|
||||
|
||||
In the example below, we demonstrate how to add a new user named `my-user` and set a password. This user will be configured to log in to the machine `my-machine`.
|
||||
|
||||
### Creating a New User
|
||||
|
||||
```shellSession
|
||||
# Add a new user
|
||||
$ clan config --machine my-machine users.users.my-user.isNormalUser true
|
||||
|
||||
# Set a password for the user
|
||||
$ clan config --machine my-machine users.users.my-user.hashedPassword $(mkpasswd)
|
||||
```
|
||||
|
||||
_Note: The `$(mkpasswd)` command generates a hashed password. Ensure you have the `mkpasswd` utility installed or use an alternative method to generate a secure hashed password._
|
||||
|
||||
## Test Your Machine Configuration Inside a VM
|
||||
|
||||
Before deploying your configuration to a live environment, you can run a virtual machine (VM) to test the settings:
|
||||
|
||||
```shellSession
|
||||
$ clan vms run my-machine
|
||||
```
|
||||
|
||||
This command run a VM based on the configuration of `my-machine`, allowing you to verify changes in a controlled environment.
|
||||
|
||||
## Installing a New Machine
|
||||
|
||||
Clan CLI, in conjunction with [nixos-anywhere](https://github.com/nix-community/nixos-anywhere), provides a seamless method for installing NixOS on various machines.
|
||||
This process involves preparing a suitable hardware and disk partitioning configuration and ensuring the target machine is accessible via SSH.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- A running Linux system with SSH on the target machine is required. This is typically pre-configured for many server providers.
|
||||
- For installations on physical hardware, create a NixOS installer image and transfer it to a bootable USB drive as described below.
|
||||
|
||||
## Creating a Bootable USB Drive on Linux
|
||||
|
||||
To create a bootable USB flash drive with the NixOS installer:
|
||||
|
||||
1. **Build the Installer Image**:
|
||||
|
||||
```shellSession
|
||||
$ nix build git+https://git.clan.lol/clan/clan-core.git#install-iso
|
||||
```
|
||||
|
||||
2. **Prepare the USB Flash Drive**:
|
||||
|
||||
- Insert your USB flash drive into your computer.
|
||||
- Identify your flash drive with `lsblk`. Look for the device with a matching size.
|
||||
- Ensure all partitions on the drive are unmounted. Replace `sdX` in the command below with your device identifier (like `sdb`, etc.):
|
||||
|
||||
```shellSession
|
||||
sudo umount /dev/sdX*
|
||||
```
|
||||
|
||||
3. **Write the Image to the USB Drive**:
|
||||
|
||||
- Use the `dd` utility to write the NixOS installer image to your USB drive:
|
||||
|
||||
```shellSession
|
||||
sudo dd bs=4M conv=fsync oflag=direct status=progress if=./result/stick.raw of=/dev/sdX
|
||||
```
|
||||
|
||||
4. **Boot and Connect**:
|
||||
- After writing the installer to the USB drive, use it to boot the target machine.
|
||||
- The installer will display an IP address and a root password, which you can use to connect via SSH.
|
||||
|
||||
### Finishing the installation
|
||||
|
||||
With the target machine running Linux and accessible via SSH, execute the following command to install NixOS on the target machine, replacing `<target_host>` with the machine's hostname or IP address:
|
||||
|
||||
```shellSession
|
||||
$ clan machines install my-machine <target_host>
|
||||
```
|
||||
|
||||
## Update Your Machines
|
||||
|
||||
Clan CLI enables you to remotely update your machines over SSH. This requires setting up a deployment address for each target machine.
|
||||
|
||||
### Setting the Deployment Address
|
||||
|
||||
Replace `host_or_ip` with the actual hostname or IP address of your target machine:
|
||||
|
||||
```shellSession
|
||||
$ clan config --machine my-machine clan.networking.deploymentAddress root@host_or_ip
|
||||
```
|
||||
|
||||
_Note: The use of `root@` in the deployment address implies SSH access as the root user. Ensure that the root login is secured and only used when necessary._
|
||||
|
||||
### Updating Machine Configurations
|
||||
|
||||
Execute the following command to update the specified machine:
|
||||
|
||||
```shellSession
|
||||
$ clan machines update my-machine
|
||||
```
|
||||
|
||||
You can also update all configured machines simultaneously by omitting the machine name:
|
||||
|
||||
```shellSession
|
||||
$ clan machines update
|
||||
```
|
||||
@@ -1,22 +1,22 @@
|
||||
# Initializing a New Clan Project
|
||||
|
||||
## Clone the Clan Template
|
||||
## Create a new flake
|
||||
|
||||
1. To start a new project, execute the following command to add the clan cli to your shell:
|
||||
|
||||
```bash
|
||||
```shellSession
|
||||
$ nix shell git+https://git.clan.lol/clan/clan-core
|
||||
```
|
||||
|
||||
2. Than use the following command to clone the clan core template into the current directory:
|
||||
2. Then use the following commands to initialize a new clan-flake:
|
||||
|
||||
```
|
||||
$ clan create .
|
||||
```shellSession
|
||||
$ clan flake create my-clan
|
||||
```
|
||||
|
||||
This action will generate two primary files: `flake.nix` and `.clan-flake`.
|
||||
|
||||
```bash
|
||||
```shellSession
|
||||
$ ls -la
|
||||
drwx------ joerg users 5 B a minute ago ./
|
||||
drwxrwxrwt root root 139 B 12 seconds ago ../
|
||||
@@ -30,9 +30,9 @@ drwxrwxrwt root root 139 B 12 seconds ago ../
|
||||
The `.clan-flake` marker file serves an optional purpose: it helps the `clan-cli` utility locate the project's root directory.
|
||||
If `.clan-flake` is missing, `clan-cli` will instead search for other indicators like `.git`, `.hg`, `.svn`, or `flake.nix` to identify the project root.
|
||||
|
||||
## Modifying the configuration
|
||||
## What's next
|
||||
|
||||
After cloning the template the next step is to modify the `flake.nix` and follow the instructions in it to add more machines.
|
||||
After creating your flake, you can check out how to add [new machines](./machines.md)
|
||||
|
||||
---
|
||||
|
||||
@@ -44,11 +44,11 @@ Absolutely, let's break down the migration step by step, explaining each action
|
||||
|
||||
1. **Backup Your Current Configuration**: Always start by making a backup of your current NixOS configuration to ensure you can revert if needed.
|
||||
|
||||
```shell
|
||||
cp -r /etc/nixos ~/nixos-backup
|
||||
```shellSession
|
||||
$ cp -r /etc/nixos ~/nixos-backup
|
||||
```
|
||||
|
||||
2. **Update Flake Inputs**: The patch adds a new input named `clan-core` to your `flake.nix`. This input points to a Git repository for Clan Core. Here's the addition:
|
||||
2. **Update Flake Inputs**: Add a new input for the `clan-core` dependency:
|
||||
|
||||
```nix
|
||||
inputs.clan-core = {
|
||||
@@ -91,6 +91,7 @@ Absolutely, let's break down the migration step by step, explaining each action
|
||||
# this needs to point at the repository root
|
||||
directory = self;
|
||||
specialArgs = {};
|
||||
clanName = "NEEDS_TO_BE_UNIQUE"; # TODO: Changeme
|
||||
machines = {
|
||||
example-desktop = {
|
||||
nixpkgs.hostPlatform = "x86_64-linux";
|
||||
@@ -100,18 +101,19 @@ Absolutely, let's break down the migration step by step, explaining each action
|
||||
};
|
||||
};
|
||||
};
|
||||
in { inherit (clan) nixosConfigurations clanInternal; }
|
||||
in { inherit (clan) nixosConfigurations clanInternals; }
|
||||
```
|
||||
|
||||
- `nixosConfigurations`: Defines NixOS configurations, using Clan Core’s `buildClan` function to manage the machines.
|
||||
- Inside `machines`, a new machine configuration is defined (in this case, `example-desktop`).
|
||||
- Inside `example-desktop` which is the target machine hostname, `nixpkgs.hostPlatform` specifies the host platform as `x86_64-linux`.
|
||||
- `clanInternals`: Is required to enable evaluation of the secret generation/upload script on every architecture
|
||||
- `clanName`: Is required and needs to be globally unique, as else we have a cLAN name clash
|
||||
|
||||
4. **Rebuild and Switch**: Rebuild your NixOS configuration using the updated flake:
|
||||
|
||||
```shell
|
||||
sudo nixos-rebuild switch --flake .
|
||||
```shellSession
|
||||
$ sudo nixos-rebuild switch --flake .
|
||||
```
|
||||
|
||||
- This command rebuilds and switches to the new configuration. Make sure to include the `--flake .` argument to use the current directory as the flake source.
|
||||
@@ -120,10 +122,14 @@ Absolutely, let's break down the migration step by step, explaining each action
|
||||
|
||||
6. **Reboot**: If everything is fine, you can reboot your system to apply the changes:
|
||||
|
||||
```shell
|
||||
sudo reboot
|
||||
```shellSession
|
||||
$ sudo reboot
|
||||
```
|
||||
|
||||
7. **Verify**: After the reboot, confirm that your system is running with the new configuration, and all services and applications are functioning as expected.
|
||||
|
||||
By following these steps, you've successfully migrated your NixOS Flake configuration to include the `clan-core` input and adapted the `outputs` section to work with Clan Core's new machine provisioning method.
|
||||
|
||||
## What's next
|
||||
|
||||
After creating your flake, you can check out how to add [new machines](./machines.md)
|
||||
@@ -9,7 +9,7 @@ integrates with [sops-nix](https://github.com/Mic92/sops-nix) on NixOS machines.
|
||||
|
||||
To begin, generate a key pair:
|
||||
|
||||
```console
|
||||
```shellSession
|
||||
$ clan secrets key generate
|
||||
```
|
||||
|
||||
@@ -27,7 +27,7 @@ user with your user name)
|
||||
|
||||
Next, add your public key to the Clan flake repository:
|
||||
|
||||
```console
|
||||
```shellSession
|
||||
$ clan secrets users add <your_username> <your_public_key>
|
||||
```
|
||||
|
||||
@@ -42,7 +42,7 @@ sops/
|
||||
|
||||
Now, to set your first secret:
|
||||
|
||||
```console
|
||||
```shellSession
|
||||
$ clan secrets set mysecret
|
||||
Paste your secret:
|
||||
```
|
||||
@@ -51,13 +51,13 @@ Note: As you type your secret, keypresses won't be displayed. Press Enter to sav
|
||||
|
||||
Retrieve the stored secret:
|
||||
|
||||
```console
|
||||
```shellSession
|
||||
$ clan secrets get mysecret
|
||||
```
|
||||
|
||||
And list all secrets like this:
|
||||
|
||||
```console
|
||||
```shellSession
|
||||
$ clan secrets list
|
||||
```
|
||||
|
||||
@@ -79,19 +79,19 @@ By default, secrets are encrypted with your key to ensure readability.
|
||||
|
||||
New machines in Clan come with age keys stored in `./sops/machines/<machine_name>`. To list these machines:
|
||||
|
||||
```console
|
||||
```shellSession
|
||||
$ clan secrets machines list
|
||||
```
|
||||
|
||||
For existing machines, add their keys:
|
||||
|
||||
```console
|
||||
```shellSession
|
||||
$ clan secrets machines add <machine_name> <age_key>
|
||||
```
|
||||
|
||||
To fetch an age key from an SSH host key:
|
||||
|
||||
```console
|
||||
```shellSession
|
||||
$ ssh-keyscan <domain_name> | nix shell nixpkgs#ssh-to-age -c ssh-to-age
|
||||
```
|
||||
|
||||
@@ -99,13 +99,13 @@ $ ssh-keyscan <domain_name> | nix shell nixpkgs#ssh-to-age -c ssh-to-age
|
||||
|
||||
By default, secrets are encrypted for your key. To specify which users and machines can access a secret:
|
||||
|
||||
```console
|
||||
```shellSession
|
||||
$ clan secrets set --machine <machine1> --machine <machine2> --user <user1> --user <user2> <secret_name>
|
||||
```
|
||||
|
||||
You can add machines/users to existing secrets without modifying the secret:
|
||||
|
||||
```console
|
||||
```shellSession
|
||||
$ clan secrets machines add-secret <machine_name> <secret_name>
|
||||
```
|
||||
|
||||
@@ -117,19 +117,19 @@ For convenience, Clan CLI allows group creation to simplify access management. H
|
||||
|
||||
Assign users to a new group, e.g., `admins`:
|
||||
|
||||
```console
|
||||
```shellSession
|
||||
$ clan secrets groups add admins <username>
|
||||
```
|
||||
|
||||
2. **Listing Groups**:
|
||||
|
||||
```console
|
||||
```shellSession
|
||||
$ clan secrets groups list
|
||||
```
|
||||
|
||||
3. **Assigning Secrets to Groups**:
|
||||
|
||||
```console
|
||||
```shellSession
|
||||
$ clan secrets groups add-secret <group_name> <secret_name>
|
||||
```
|
||||
|
||||
@@ -162,7 +162,7 @@ commonly allows to put all secrets in a yaml or json documents.
|
||||
|
||||
If you already happend to use sops-nix, you can migrate by using the `clan secrets import-sops` command by importing these documents:
|
||||
|
||||
```console
|
||||
```shellSession
|
||||
% clan secrets import-sops --prefix matchbox- --group admins --machine matchbox nixos/matchbox/secrets/secrets.yaml
|
||||
```
|
||||
|
||||
69
docs/admins/zerotier.md
Normal file
69
docs/admins/zerotier.md
Normal file
@@ -0,0 +1,69 @@
|
||||
# ZeroTier Configuration with NixOS in Clan
|
||||
|
||||
This guide provides detailed instructions for configuring
|
||||
[ZeroTier VPN](https://zerotier.com) within Clan. Follow the
|
||||
outlined steps to set up a machine as a VPN controller (`<CONTROLLER>`) and to
|
||||
include a new machine into the VPN.
|
||||
|
||||
## 1. Setting Up the VPN Controller
|
||||
|
||||
The VPN controller is initially essential for providing configuration to new
|
||||
peers. Post the address allocation, the controller's continuous operation is not
|
||||
crucial.
|
||||
|
||||
### Instructions:
|
||||
|
||||
1. **Designate a Machine**: Label a machine as the VPN controller in the clan,
|
||||
referred to as `<CONTROLLER>` henceforth in this guide.
|
||||
2. **Add Configuration**: Input the below configuration to the NixOS
|
||||
configuration of the controller machine:
|
||||
```nix
|
||||
clan.networking.zerotier.controller = {
|
||||
enable = true;
|
||||
public = true;
|
||||
};
|
||||
```
|
||||
3. **Update the Controller Machine**: Execute the following:
|
||||
```console
|
||||
$ clan machines update <CONTROLLER>
|
||||
```
|
||||
Your machine is now operational as the VPN controller.
|
||||
|
||||
## 2. Integrating a New Machine to the VPN
|
||||
|
||||
To introduce a new machine to the VPN, adhere to the following steps:
|
||||
|
||||
### Instructions:
|
||||
|
||||
1. **Update Configuration**: On the new machine, incorporate the below to its
|
||||
configuration, substituting `<CONTROLLER>` with the controller machine name:
|
||||
```nix
|
||||
{ config, ... }: {
|
||||
clan.networking.zerotier.networkId = builtins.readFile (config.clanCore.clanDir + "/machines/<CONTROLLER>/facts/zerotier-network-id");
|
||||
}
|
||||
```
|
||||
2. **Update the New Machine**: Execute:
|
||||
```console
|
||||
$ clan machines update <NEW_MACHINE>
|
||||
```
|
||||
Replace `<NEW_MACHINE>` with the designated new machine name.
|
||||
3. **Retrieve the ZeroTier ID**: On the `new_machine`, execute:
|
||||
```console
|
||||
$ sudo zerotier-cli info
|
||||
```
|
||||
Example Output: `200 info d2c71971db 1.12.1 OFFLINE`, where `d2c71971db` is
|
||||
the ZeroTier ID.
|
||||
4. **Authorize the New Machine on Controller**: On the controller machine,
|
||||
execute:
|
||||
```console
|
||||
$ sudo zerotier-members allow <ID>
|
||||
```
|
||||
Substitute `<ID>` with the ZeroTier ID obtained previously.
|
||||
5. **Verify Connection**: On the `new_machine`, re-execute:
|
||||
```console
|
||||
$ sudo zerotier-cli info
|
||||
```
|
||||
The status should now be "ONLINE" e.g., `200 info 47303517ef 1.12.1 ONLINE`.
|
||||
|
||||
Congratulations! The new machine is now part of the VPN, and the ZeroTier
|
||||
configuration on NixOS within the Clan project is complete.
|
||||
138
docs/contributing/api-guidelines.md
Normal file
138
docs/contributing/api-guidelines.md
Normal file
@@ -0,0 +1,138 @@
|
||||
# API Guidelines
|
||||
|
||||
This issue serves to collect our common understanding how to design our API so that it is extensible and usable and understandable.
|
||||
|
||||
## Resource oriented
|
||||
|
||||
A resource-oriented API is generally modeled as a resource hierarchy, where each node is either a simple resource or a collection resource. For convenience, they are often called a resource and a collection, respectively.
|
||||
|
||||
Examples of Resource Nouns:
|
||||
|
||||
`machine`
|
||||
`user`
|
||||
`flake`
|
||||
|
||||
Often resources have sub-resources. Even if it is not foreseen, it is recommended to use plural (trailing `s`) on resources to allow them to be collections of sub-resources.
|
||||
|
||||
e.g,
|
||||
|
||||
`users`
|
||||
->
|
||||
`users/*/profile`
|
||||
|
||||
## Verbs
|
||||
|
||||
Verbs should not be part of the URL
|
||||
|
||||
Bad:
|
||||
`/api/create-products`
|
||||
|
||||
Good:
|
||||
`/api/products`
|
||||
|
||||
Only resources are part of the URL, verbs are described via the HTTP Method.
|
||||
|
||||
Exception:
|
||||
|
||||
If a different HTTP Method must be used for technical reasons it is okay to terminate the path with a (short) verb / action.
|
||||
|
||||
Okay ish:
|
||||
`/api/products/create`
|
||||
|
||||
## Usually the following HTTP Methods exist to interact with a resource
|
||||
|
||||
- POST (create an order for a resource)
|
||||
- GET (retrieve the information)
|
||||
- PUT (update and replace information)
|
||||
- PATCH (update and modify information) **(Not used yet)**
|
||||
- DELETE (delete the item)
|
||||
|
||||
## Every resource should be CRUD compatible
|
||||
|
||||
All API resources MUST be designed in a way that allows the typical CRUD operations.
|
||||
|
||||
Where crud stands for:
|
||||
|
||||
C - Create
|
||||
R - Read
|
||||
U - Update
|
||||
D - Delete
|
||||
|
||||
Resources should implement at least a "Read" operation.
|
||||
|
||||
## Body
|
||||
|
||||
Use JSON as an exchange format.
|
||||
|
||||
All responses MUST be JSON parseable.
|
||||
|
||||
Bad:
|
||||
`bare string`
|
||||
|
||||
Better:
|
||||
`"quoted string"`
|
||||
|
||||
Best: (Enveloped see next section)
|
||||
`{ name: "quoted string"}`
|
||||
|
||||
Errors should have a consistent JSON format, such that it is clear in which field to look at for displaying error messages.
|
||||
|
||||
## Envelop all Data collections
|
||||
|
||||
Response data should be wrapped into an JSON Object `{}`
|
||||
Lists `[]` should also contain Objects `{}`.
|
||||
This allows everything, to be extensible, without breaking backwards compatibility. (Adding fields is trivial, since the schema doesn't change)
|
||||
|
||||
Example:
|
||||
|
||||
```
|
||||
{
|
||||
"users": [{
|
||||
first_name: "John",
|
||||
last_name: "Doe",
|
||||
…
|
||||
}, {
|
||||
first_name: "Jane",
|
||||
last_name: "Doe",
|
||||
…
|
||||
}
|
||||
....
|
||||
],
|
||||
"skip": 0,
|
||||
"limit": 20,
|
||||
....
|
||||
}
|
||||
```
|
||||
|
||||
Bad Example of a breaking change:
|
||||
`GET /api/flakes`
|
||||
`old`
|
||||
|
||||
```
|
||||
[
|
||||
"dream2nix"
|
||||
"disko"
|
||||
]
|
||||
```
|
||||
|
||||
`new`
|
||||
|
||||
```
|
||||
[
|
||||
{
|
||||
name: "dream2nix",
|
||||
url: "github/...."
|
||||
},
|
||||
{
|
||||
name: "disko",
|
||||
url: "github/...."
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
Those kind of breaking changes can be avoided by using an object from the beginning.
|
||||
Even if the object only contains one key, it is extensible, without breaking.
|
||||
|
||||
## More will follow.
|
||||
|
||||
...maybe
|
||||
217
docs/contributing/contributing.md
Normal file
217
docs/contributing/contributing.md
Normal file
@@ -0,0 +1,217 @@
|
||||
# Contributing
|
||||
|
||||
**Frontend**: Our frontend is powered by [React NextJS](https://nextjs.org/), a popular and versatile framework for building web applications.
|
||||
|
||||
**Backend**: For the backend, we use Python along with the [FastAPI framework](https://fastapi.tiangolo.com/). To ensure seamless communication between the frontend and backend, we generate an `openapi.json` file from the Python code, which defines the REST API. This file is then used with [Orval](https://orval.dev/) to generate TypeScript bindings for the REST API. We're committed to code correctness, so we use [mypy](https://mypy-lang.org/) to ensure that our Python code is statically typed correctly. For backend testing, we rely on [pytest](https://docs.pytest.org/en/7.4.x/).
|
||||
|
||||
**Continuous Integration (CI)**: We've set up a CI bot that rigorously checks your code using the quality assurance (QA) tools mentioned above. If any errors are detected, it will block pull requests until they're resolved.
|
||||
|
||||
**Dependency Management**: We use the [Nix package manager](https://nixos.org/) to manage dependencies and ensure reproducibility, making your development process more robust.
|
||||
|
||||
## Supported Operating Systems
|
||||
|
||||
- Linux
|
||||
- macOS
|
||||
|
||||
# Getting Started with the Development Environment
|
||||
|
||||
Let's get your development environment up and running:
|
||||
|
||||
1. **Install Nix Package Manager**:
|
||||
|
||||
- You can install the Nix package manager by either [downloading the Nix installer](https://github.com/DeterminateSystems/nix-installer/releases) or running this command:
|
||||
```bash
|
||||
curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install
|
||||
```
|
||||
|
||||
2. **Install direnv**:
|
||||
|
||||
- Download the direnv package from [here](https://direnv.net/docs/installation.html) or run the following command:
|
||||
```bash
|
||||
curl -sfL https://direnv.net/install.sh | bash
|
||||
```
|
||||
|
||||
3. **Add direnv to your shell**:
|
||||
|
||||
- Direnv needs to [hook into your shell](https://direnv.net/docs/hook.html) to work.
|
||||
You can do this by executing following command:
|
||||
|
||||
```bash
|
||||
echo 'eval "$(direnv hook zsh)"' >> ~/.zshrc && echo 'eval "$(direnv hook bash)"' >> ~/.bashrc && eval "$SHELL"
|
||||
```
|
||||
|
||||
4. **Clone the Repository and Navigate**:
|
||||
|
||||
- Clone this repository and navigate to it.
|
||||
|
||||
5. **Allow .envrc**:
|
||||
|
||||
- When you enter the directory, you'll receive an error message like this:
|
||||
```bash
|
||||
direnv: error .envrc is blocked. Run `direnv allow` to approve its content
|
||||
```
|
||||
- Execute `direnv allow` to automatically execute the shell script `.envrc` when entering the directory.
|
||||
|
||||
6. **Build the Backend**:
|
||||
|
||||
- Go to the `pkgs/clan-cli` directory and execute:
|
||||
```bash
|
||||
direnv allow
|
||||
```
|
||||
- Wait for the backend to build.
|
||||
|
||||
7. **Start the Backend Server**:
|
||||
|
||||
- To start the backend server, execute:
|
||||
```bash
|
||||
clan webui --reload --no-open --log-level debug
|
||||
```
|
||||
- The server will automatically restart if any Python files change.
|
||||
|
||||
8. **Build the Frontend**:
|
||||
|
||||
- In a different shell, navigate to the `pkgs/ui` directory and execute:
|
||||
```bash
|
||||
direnv allow
|
||||
```
|
||||
- Wait for the frontend to build.
|
||||
|
||||
NOTE: If you have the error "@clan/colors.json" you executed `npm install`, please do not do that. `direnv reload` will handle dependency management. Please delete node_modules with `rm -rf node_modules`.
|
||||
|
||||
9. **Start the Frontend**:
|
||||
- To start the frontend, execute:
|
||||
```bash
|
||||
npm run dev
|
||||
```
|
||||
- Access the website by going to [http://localhost:3000](http://localhost:3000).
|
||||
|
||||
# Setting Up Your Git Workflow
|
||||
|
||||
Let's set up your Git workflow to collaborate effectively:
|
||||
|
||||
1. **Register Your Gitea Account Locally**:
|
||||
|
||||
- Execute the following command to add your Gitea account locally:
|
||||
```bash
|
||||
tea login add
|
||||
```
|
||||
- Fill out the prompt as follows:
|
||||
- URL of Gitea instance: `https://gitea.gchq.icu`
|
||||
- Name of new Login [gitea.gchq.icu]: `gitea.gchq.icu:7171`
|
||||
- Do you have an access token? No
|
||||
- Username: YourUsername
|
||||
- Password: YourPassword
|
||||
- Set Optional settings: No
|
||||
|
||||
2. **Git Workflow**:
|
||||
|
||||
1. Add your changes to Git using `git add <file1> <file2>`.
|
||||
2. Run `nix fmt` to lint your files.
|
||||
3. Commit your changes with a descriptive message: `git commit -a -m "My descriptive commit message"`.
|
||||
4. Make sure your branch has the latest changes from upstream by executing:
|
||||
```bash
|
||||
git fetch && git rebase origin/main --autostash
|
||||
```
|
||||
5. Use `git status` to check for merge conflicts.
|
||||
6. If conflicts exist, resolve them. Here's a tutorial for resolving conflicts in [VSCode](https://code.visualstudio.com/docs/sourcecontrol/overview#_merge-conflicts).
|
||||
7. After resolving conflicts, execute `git merge --continue` and repeat step 5 until there are no conflicts.
|
||||
|
||||
3. **Create a Pull Request**:
|
||||
|
||||
- To automatically open a pull request that gets merged if all tests pass, execute:
|
||||
```bash
|
||||
merge-after-ci
|
||||
```
|
||||
|
||||
4. **Review Your Pull Request**:
|
||||
|
||||
- Visit https://gitea.gchq.icu and go to the project page. Check under "Pull Requests" for any issues with your pull request.
|
||||
|
||||
5. **Push Your Changes**:
|
||||
- If there are issues, fix them and redo step 2. Afterward, execute:
|
||||
```bash
|
||||
git push origin HEAD:YourUsername-main
|
||||
```
|
||||
- This will directly push to your open pull request.
|
||||
|
||||
# Debugging
|
||||
|
||||
When working on the backend of your project, debugging is an essential part of the development process. Here are some methods for debugging and testing the backend of your application:
|
||||
|
||||
## Test Backend Locally in Devshell with Breakpoints
|
||||
|
||||
To test the backend locally in a development environment and set breakpoints for debugging, follow these steps:
|
||||
|
||||
1. Run the following command to execute your tests and allow for debugging with breakpoints:
|
||||
```bash
|
||||
pytest -n0 -s --maxfail=1
|
||||
```
|
||||
You can place `breakpoint()` in your Python code where you want to trigger a breakpoint for debugging.
|
||||
|
||||
## Test Backend Locally in a Nix Sandbox
|
||||
|
||||
To run your backend tests in a Nix sandbox, you have two options depending on whether your test functions have been marked as impure or not:
|
||||
|
||||
### Running Tests Marked as Impure
|
||||
|
||||
If your test functions need to execute `nix build` and have been marked as impure because you can't execute `nix build` inside a Nix sandbox, use the following command:
|
||||
|
||||
```bash
|
||||
nix run .#impure-checks
|
||||
```
|
||||
|
||||
This command will run the impure test functions.
|
||||
|
||||
### Running Pure Tests
|
||||
|
||||
For test functions that have not been marked as impure and don't require executing `nix build`, you can use the following command:
|
||||
|
||||
```bash
|
||||
nix build .#checks.x86_64-linux.clan-pytest --rebuild
|
||||
```
|
||||
|
||||
This command will run all pure test functions.
|
||||
|
||||
### Inspecting the Nix Sandbox
|
||||
|
||||
If you need to inspect the Nix sandbox while running tests, follow these steps:
|
||||
|
||||
1. Insert an endless sleep into your test code where you want to pause the execution. For example:
|
||||
|
||||
```python
|
||||
import time
|
||||
time.sleep(3600) # Sleep for one hour
|
||||
```
|
||||
|
||||
2. Use `cntr` and `psgrep` to attach to the Nix sandbox. This allows you to interactively debug your code while it's paused. For example:
|
||||
|
||||
```bash
|
||||
cntr exec -w your_sandbox_name
|
||||
psgrep -a -x your_python_process_name
|
||||
```
|
||||
|
||||
These debugging and testing methods will help you identify and fix issues in your backend code efficiently, ensuring the reliability and robustness of your application.
|
||||
|
||||
For more information on testing read [property and contract based testing](testing.md)
|
||||
|
||||
# Using this Template
|
||||
|
||||
To make the most of this template:
|
||||
|
||||
1. Set up a new Gitea account named `ui-asset-bot`. Generate an access token with all access permissions and set it under `settings/actions/secrets` as a secret called `BOT_ACCESS_TOKEN`.
|
||||
|
||||
- Also, edit the file `.gitea/workflows/ui_assets.yaml` and change the `BOT_EMAIL` variable to match the email you set for that account. Gitea matches commits to accounts by their email address, so this step is essential.
|
||||
|
||||
2. Create a second Gitea account named `merge-bot`. Edit the file `pkgs/merge-after-ci/default.nix` if the name should be different. Under "Branches," set the main branch to be protected and add `merge-bot` to the whitelisted users for pushing. Set the unprotected file pattern to `**/ui-assets.nix`.
|
||||
|
||||
- Enable the status check for "build / test (pull_request)."
|
||||
|
||||
3. Add both `merge-bot` and `ui-asset-bot` as collaborators.
|
||||
- Set the option to "Delete pull request branch after merge by default."
|
||||
- Also, set the default merge style to "Rebase then create merge commit."
|
||||
|
||||
With this template, you're well-equipped to build and collaborate on high-quality websites efficiently. Happy coding!.
|
||||
|
||||
# API guidelines
|
||||
|
||||
see [./api-guidelines](./api-guidelines)
|
||||
111
docs/contributing/testing.md
Normal file
111
docs/contributing/testing.md
Normal file
@@ -0,0 +1,111 @@
|
||||
# Property vs Contract based testing
|
||||
|
||||
In this section, we'll explore the importance of testing the backend of your FastAPI application, specifically focusing on the advantages of using contract-based testing with property-based testing frameworks.
|
||||
|
||||
## Why Use Property-Based Testing?
|
||||
|
||||
Property-based testing is a powerful approach to test your APIs, offering several key benefits:
|
||||
|
||||
### 1. Scope
|
||||
|
||||
Instead of having to write numerous test cases for various input arguments, property-based testing enables you to test a range of arguments for each parameter using a single test. This approach significantly enhances the robustness of your test suite while reducing redundancy in your testing code. In short, your test code becomes cleaner, more DRY (Don't Repeat Yourself), and more efficient. It also becomes more effective as you can easily test numerous edge cases.
|
||||
|
||||
### 2. Reproducibility
|
||||
|
||||
Property-based testing tools retain test cases and their results, allowing you to reproduce and replay tests in case of failure. This feature is invaluable for debugging and ensuring the stability of your application over time.
|
||||
|
||||
## Frameworks for Property-Based Testing
|
||||
|
||||
To implement property-based testing in FastAPI, you can use the following framework:
|
||||
|
||||
- [Hypothesis: Property-Based Testing](https://hypothesis.readthedocs.io/en/latest/quickstart.html)
|
||||
- [Schemathesis](https://schemathesis.readthedocs.io/en/stable/#id2)
|
||||
|
||||
## Example
|
||||
|
||||
Running schemathesis fuzzer on GET requests
|
||||
|
||||
```bash
|
||||
nix run .#runSchemaTests
|
||||
```
|
||||
|
||||
If you want to test more request types edit the file [flake-module.nix](../checks/impure/flake-module.nix)
|
||||
|
||||
After a run it will upload the results to `schemathesis.io` and give you a link to the report.
|
||||
The credentials to the account are `Username: schemathesis@qube.email` and `Password:6tv4eP96WXsarF`
|
||||
|
||||
## Why Schemas Are Not Contracts
|
||||
|
||||
A schema is a description of the data structure of your API, whereas a contract defines not only the structure but also the expected behavior and constraints. The following resource explains why schemas are not contracts in more detail:
|
||||
|
||||
- [Why Schemas Are Not Contracts](https://pactflow.io/blog/schemas-are-not-contracts/)
|
||||
|
||||
In a nutshell, schemas may define the data structure but often fail to capture complex constraints and the expected interactions between different API endpoints. Contracts fill this gap by specifying both the structure and behavior of your API.
|
||||
|
||||
## Why Use Contract-Driven Testing?
|
||||
|
||||
Contract-driven testing combines the benefits of type annotations and property-based testing, providing a robust approach to ensuring the correctness of your APIs.
|
||||
|
||||
- Contracts become an integral part of the function signature and can be checked statically, ensuring that the API adheres to the defined contract.
|
||||
- Contracts, like property-based tests, allow you to specify conditions and constraints, with the testing framework automatically generating test cases and verifying call results.
|
||||
|
||||
### Frameworks for Contract-Driven Testing
|
||||
|
||||
To implement contract-driven testing in FastAPI, consider the following framework and extension:
|
||||
|
||||
- [Deal: Contract Driven Development](https://deal.readthedocs.io/)
|
||||
By adopting contract-driven testing, you can ensure that your FastAPI application not only has a well-defined structure but also behaves correctly, making it more robust and reliable.
|
||||
- [Whitepaper: Python by contract](https://users.ece.utexas.edu/~gligoric/papers/ZhangETAL22PythonByContractDataset.pdf) This paper goes more into detail how it works
|
||||
|
||||
## Examples
|
||||
|
||||
You can annotate functions with `@deal.raises(ClanError)` to say that they can _only_ raise a ClanError Exception.
|
||||
|
||||
```python
|
||||
import deal
|
||||
|
||||
@deal.raises(ClanError)
|
||||
def get_task(uuid: UUID) -> BaseTask:
|
||||
global POOL
|
||||
return POOL[uuid]
|
||||
```
|
||||
|
||||
To say that it can raise multiple exceptions just add after one another separated with a `,`
|
||||
|
||||
```python
|
||||
import deal
|
||||
|
||||
@deal.raises(ClanError, IndexError, ZeroDivisionError)
|
||||
def get_task(uuid: UUID) -> BaseTask:
|
||||
global POOL
|
||||
return POOL[uuid]
|
||||
```
|
||||
|
||||
### Adding deal annotated functions to pytest
|
||||
|
||||
```python
|
||||
from clan_cli.task_manager import get_task
|
||||
import deal
|
||||
|
||||
@deal.cases(get_task) # <--- Add function get_task to testing corpus
|
||||
def test_get_task(case: deal.TestCase) -> None:
|
||||
case() # <--- Call testing framework with function
|
||||
```
|
||||
|
||||
### Adding example input for deeper testing
|
||||
|
||||
You can combine hypothesis annotations with deal annotations to add example inputs to the function so that the verifier can reach deeper parts of the function.
|
||||
|
||||
```python
|
||||
import deal
|
||||
|
||||
@deal.example(lambda: get_task(UUID("5c2061e0-4512-4b30-aa8e-7be4a75b8b45"))) # type: ignore
|
||||
@deal.example(lambda: get_task(UUID("7c2061e6-4512-4b30-aa8e-7be4a75b8b45"))) # type: ignore
|
||||
@deal.raises(ClanError)
|
||||
def get_task(uuid: UUID) -> BaseTask:
|
||||
global POOL
|
||||
return POOL[uuid]
|
||||
```
|
||||
|
||||
You can also add `pre` and `post` conditions. A `pre` condition must be true before the function is executed. A `post` condition must be true after the function was executed. For more information read the [Writing Contracts Section](https://deal.readthedocs.io/basic/values.html).
|
||||
Or read the [API doc of Deal](https://deal.readthedocs.io/details/api.html)
|
||||
62
flake.lock
generated
62
flake.lock
generated
@@ -7,16 +7,15 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1695379860,
|
||||
"narHash": "sha256-ADsWLw33T/6APAoEfwPVkhuUHbH/BW/Jz5cgTgijsIs=",
|
||||
"lastModified": 1706491084,
|
||||
"narHash": "sha256-eaEv+orTmr2arXpoE4aFZQMVPOYXCBEbLgK22kOtkhs=",
|
||||
"owner": "nix-community",
|
||||
"repo": "disko",
|
||||
"rev": "26cf7576b85fd0b4070d8bd84475021e01d63814",
|
||||
"rev": "f67ba6552845ea5d7f596a24d57c33a8a9dc8de9",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-community",
|
||||
"ref": "party",
|
||||
"repo": "disko",
|
||||
"type": "github"
|
||||
}
|
||||
@@ -28,11 +27,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1693611461,
|
||||
"narHash": "sha256-aPODl8vAgGQ0ZYFIRisxYG5MOGSkIczvu2Cd8Gb9+1Y=",
|
||||
"lastModified": 1704982712,
|
||||
"narHash": "sha256-2Ptt+9h8dczgle2Oo6z5ni5rt/uLMG47UFTR1ry/wgg=",
|
||||
"owner": "hercules-ci",
|
||||
"repo": "flake-parts",
|
||||
"rev": "7f53fdb7bdc5bb237da7fefef12d099e4fd611ca",
|
||||
"rev": "07f6395285469419cf9d078f59b5b49993198c00",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -41,26 +40,6 @@
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"floco": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1694873346,
|
||||
"narHash": "sha256-Uvh03bg0a6ZnNWiX1Gb8g+m343wSJ/wb8ryUASt0loc=",
|
||||
"owner": "aakropotkin",
|
||||
"repo": "floco",
|
||||
"rev": "d16bd444ab9d29a6640f52ee4e43a66528e07515",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "aakropotkin",
|
||||
"repo": "floco",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixlib": {
|
||||
"locked": {
|
||||
"lastModified": 1693701915,
|
||||
@@ -84,11 +63,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1693791338,
|
||||
"narHash": "sha256-wHmtB5H8AJTUaeGHw+0hsQ6nU4VyvVrP2P4NeCocRzY=",
|
||||
"lastModified": 1706085261,
|
||||
"narHash": "sha256-7PgpHRHyShINcqgevPP1fJ6N8kM5ZSOJnk3QZBrOCQ0=",
|
||||
"owner": "nix-community",
|
||||
"repo": "nixos-generators",
|
||||
"rev": "8ee78470029e641cddbd8721496da1316b47d3b4",
|
||||
"rev": "896f6589db5b25023b812bbb6c1f5d3a499b1132",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -99,16 +78,16 @@
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1694767346,
|
||||
"narHash": "sha256-5uH27SiVFUwsTsqC5rs3kS7pBoNhtoy9QfTP9BmknGk=",
|
||||
"lastModified": 1706440623,
|
||||
"narHash": "sha256-MzqsevUkrIVpAbbN7Wn3mGlYklkm2geaozGTFxtnYgA=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "ace5093e36ab1e95cb9463863491bee90d5a4183",
|
||||
"rev": "50071d87c75300c037e28439c5176c3933b9fce5",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-unstable",
|
||||
"ref": "nixos-unstable-small",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
@@ -117,7 +96,6 @@
|
||||
"inputs": {
|
||||
"disko": "disko",
|
||||
"flake-parts": "flake-parts",
|
||||
"floco": "floco",
|
||||
"nixos-generators": "nixos-generators",
|
||||
"nixpkgs": "nixpkgs",
|
||||
"sops-nix": "sops-nix",
|
||||
@@ -127,16 +105,16 @@
|
||||
"sops-nix": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"sops-nix"
|
||||
"nixpkgs"
|
||||
],
|
||||
"nixpkgs-stable": []
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1694495315,
|
||||
"narHash": "sha256-sZEYXs9T1NVHZSSbMqBEtEm2PGa7dEDcx0ttQkArORc=",
|
||||
"lastModified": 1706410821,
|
||||
"narHash": "sha256-iCfXspqUOPLwRobqQNAQeKzprEyVowLMn17QaRPQc+M=",
|
||||
"owner": "Mic92",
|
||||
"repo": "sops-nix",
|
||||
"rev": "ea208e55f8742fdcc0986b256bdfa8986f5e4415",
|
||||
"rev": "73bf36912e31a6b21af6e0f39218e067283c67ef",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -152,11 +130,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1694528738,
|
||||
"narHash": "sha256-aWMEjib5oTqEzF9f3WXffC1cwICo6v/4dYKjwNktV8k=",
|
||||
"lastModified": 1706462057,
|
||||
"narHash": "sha256-7dG1D4iqqt0bEbBqUWk6lZiSqqwwAO0Hd1L5opVyhNM=",
|
||||
"owner": "numtide",
|
||||
"repo": "treefmt-nix",
|
||||
"rev": "7a49c388d7a6b63bb551b1ddedfa4efab8f400d8",
|
||||
"rev": "c6153c2a3ff4c38d231e3ae99af29b87f1df5901",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
||||
32
flake.nix
32
flake.nix
@@ -1,14 +1,16 @@
|
||||
{
|
||||
description = "clan.lol base operating system";
|
||||
|
||||
nixConfig.extra-substituters = [ "https://cache.clan.lol" ];
|
||||
nixConfig.extra-trusted-public-keys = [ "cache.clan.lol-1:3KztgSAB5R1M+Dz7vzkBGzXdodizbgLXGXKXlcQLA28=" ];
|
||||
|
||||
inputs = {
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
|
||||
floco.url = "github:aakropotkin/floco";
|
||||
floco.inputs.nixpkgs.follows = "nixpkgs";
|
||||
disko.url = "github:nix-community/disko/party";
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable-small";
|
||||
|
||||
disko.url = "github:nix-community/disko";
|
||||
disko.inputs.nixpkgs.follows = "nixpkgs";
|
||||
sops-nix.url = "github:Mic92/sops-nix";
|
||||
sops-nix.inputs.nixpkgs.follows = "sops-nix";
|
||||
sops-nix.inputs.nixpkgs.follows = "nixpkgs";
|
||||
sops-nix.inputs.nixpkgs-stable.follows = "";
|
||||
nixos-generators.url = "github:nix-community/nixos-generators";
|
||||
nixos-generators.inputs.nixpkgs.follows = "nixpkgs";
|
||||
@@ -19,10 +21,11 @@
|
||||
};
|
||||
|
||||
outputs = inputs @ { flake-parts, ... }:
|
||||
flake-parts.lib.mkFlake { inherit inputs; } ({ ... }: {
|
||||
flake-parts.lib.mkFlake { inherit inputs; } ({ lib, ... }: {
|
||||
systems = [
|
||||
"x86_64-linux"
|
||||
"aarch64-linux"
|
||||
"aarch64-darwin"
|
||||
];
|
||||
imports = [
|
||||
./checks/flake-module.nix
|
||||
@@ -35,7 +38,22 @@
|
||||
|
||||
./lib/flake-module.nix
|
||||
./nixosModules/flake-module.nix
|
||||
./nixosModules/clanCore/flake-module.nix
|
||||
{
|
||||
options.flake = flake-parts.lib.mkSubmoduleOptions {
|
||||
clanInternals = lib.mkOption {
|
||||
type = lib.types.submodule {
|
||||
options = {
|
||||
all-machines-json = lib.mkOption {
|
||||
type = lib.types.attrsOf lib.types.str;
|
||||
};
|
||||
machines = lib.mkOption {
|
||||
type = lib.types.attrsOf (lib.types.attrsOf lib.types.unspecified);
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
];
|
||||
});
|
||||
}
|
||||
|
||||
@@ -10,18 +10,11 @@
|
||||
treefmt.flakeCheck = true;
|
||||
treefmt.flakeFormatter = true;
|
||||
treefmt.programs.shellcheck.enable = true;
|
||||
treefmt.programs.prettier.enable = true;
|
||||
treefmt.programs.prettier.settings.plugins = [
|
||||
"${self'.packages.prettier-plugin-tailwindcss}/lib/node_modules/prettier-plugin-tailwindcss/dist/index.mjs"
|
||||
];
|
||||
treefmt.settings.formatter.prettier.excludes = [
|
||||
"secrets.yaml"
|
||||
"key.json"
|
||||
];
|
||||
|
||||
treefmt.programs.mypy.enable = true;
|
||||
treefmt.programs.mypy.directories = {
|
||||
"pkgs/clan-cli".extraPythonPackages = self'.packages.clan-cli.pytestDependencies;
|
||||
"pkgs/clan-vm-manager".extraPythonPackages = self'.packages.clan-vm-manager.propagatedBuildInputs;
|
||||
};
|
||||
|
||||
treefmt.settings.formatter.nix = {
|
||||
@@ -37,7 +30,6 @@
|
||||
"--" # this argument is ignored by bash
|
||||
];
|
||||
includes = [ "*.nix" ];
|
||||
excludes = [ "pkgs/node-packages/*.nix" ];
|
||||
};
|
||||
treefmt.settings.formatter.python = {
|
||||
command = "sh";
|
||||
@@ -45,7 +37,7 @@
|
||||
"-eucx"
|
||||
''
|
||||
${lib.getExe pkgs.ruff} --fix "$@"
|
||||
${lib.getExe pkgs.black} "$@"
|
||||
${lib.getExe pkgs.ruff} format "$@"
|
||||
''
|
||||
"--" # this argument is ignored by bash
|
||||
];
|
||||
|
||||
@@ -1,37 +1,65 @@
|
||||
{ nixpkgs, self, lib }:
|
||||
{ clan-core, nixpkgs, lib }:
|
||||
{ directory # The directory containing the machines subdirectory
|
||||
, specialArgs ? { } # Extra arguments to pass to nixosSystem i.e. useful to make self available
|
||||
, machines ? { } # allows to include machine-specific modules i.e. machines.${name} = { ... }
|
||||
, clanName # Needs to be (globally) unique, as this determines the folder name where the flake gets downloaded to.
|
||||
, clanIcon ? null # A path to an icon to be used for the clan
|
||||
}:
|
||||
let
|
||||
machinesDirs = lib.optionalAttrs (builtins.pathExists "${directory}/machines") (builtins.readDir (directory + /machines));
|
||||
|
||||
machineSettings = machineName:
|
||||
lib.optionalAttrs (builtins.pathExists "${directory}/machines/${machineName}/settings.json")
|
||||
(builtins.fromJSON
|
||||
(builtins.readFile (directory + /machines/${machineName}/settings.json)));
|
||||
# CLAN_MACHINE_SETTINGS_FILE allows to override the settings file temporarily
|
||||
# This is useful for doing a dry-run before writing changes into the settings.json
|
||||
# Using CLAN_MACHINE_SETTINGS_FILE requires passing --impure to nix eval
|
||||
if builtins.getEnv "CLAN_MACHINE_SETTINGS_FILE" != ""
|
||||
then builtins.fromJSON (builtins.readFile (builtins.getEnv "CLAN_MACHINE_SETTINGS_FILE"))
|
||||
else
|
||||
lib.optionalAttrs (builtins.pathExists "${directory}/machines/${machineName}/settings.json")
|
||||
(builtins.fromJSON
|
||||
(builtins.readFile (directory + /machines/${machineName}/settings.json)));
|
||||
|
||||
nixosConfiguration = { system ? "x86_64-linux", name }: nixpkgs.lib.nixosSystem {
|
||||
modules = [
|
||||
self.nixosModules.clanCore
|
||||
(machineSettings name)
|
||||
(machines.${name} or { })
|
||||
{
|
||||
clanCore.machineName = name;
|
||||
clanCore.clanDir = directory;
|
||||
# TODO: remove this once we have a hardware-config mechanism
|
||||
nixpkgs.hostPlatform = lib.mkDefault system;
|
||||
}
|
||||
];
|
||||
# Read additional imports specified via a config option in settings.json
|
||||
# This is not an infinite recursion, because the imports are discovered here
|
||||
# before calling evalModules.
|
||||
# It is still useful to have the imports as an option, as this allows for type
|
||||
# checking and easy integration with the config frontend(s)
|
||||
machineImports = machineSettings:
|
||||
map
|
||||
(module: clan-core.clanModules.${module})
|
||||
(machineSettings.clanImports or [ ]);
|
||||
|
||||
# TODO: remove default system once we have a hardware-config mechanism
|
||||
nixosConfiguration = { system ? "x86_64-linux", name, forceSystem ? false }: nixpkgs.lib.nixosSystem {
|
||||
modules =
|
||||
let
|
||||
settings = machineSettings name;
|
||||
in
|
||||
(machineImports settings)
|
||||
++ [
|
||||
settings
|
||||
clan-core.nixosModules.clanCore
|
||||
(machines.${name} or { })
|
||||
{
|
||||
clanCore.machineName = name;
|
||||
clanCore.clanName = clanName;
|
||||
clanCore.clanIcon = clanIcon;
|
||||
clanCore.clanDir = directory;
|
||||
nixpkgs.hostPlatform = if forceSystem then lib.mkForce system else lib.mkDefault system;
|
||||
|
||||
# speeds up nix commands by using the nixpkgs from the host system (especially useful in VMs)
|
||||
nix.registry.nixpkgs.to = {
|
||||
type = "path";
|
||||
path = lib.mkDefault nixpkgs;
|
||||
};
|
||||
}
|
||||
];
|
||||
inherit specialArgs;
|
||||
};
|
||||
|
||||
nixosConfigurations = lib.mapAttrs
|
||||
(name: _:
|
||||
nixosConfiguration { inherit name; })
|
||||
(machinesDirs // machines);
|
||||
allMachines = machinesDirs // machines;
|
||||
|
||||
systems = [
|
||||
supportedSystems = [
|
||||
"x86_64-linux"
|
||||
"aarch64-linux"
|
||||
"riscv64-linux"
|
||||
@@ -39,16 +67,24 @@ let
|
||||
"aarch64-darwin"
|
||||
];
|
||||
|
||||
clanInternals = {
|
||||
machines = lib.mapAttrs
|
||||
(name: _:
|
||||
(builtins.listToAttrs (map
|
||||
(system:
|
||||
lib.nameValuePair system (nixosConfiguration { inherit name system; })
|
||||
)
|
||||
systems))
|
||||
)
|
||||
(machinesDirs // machines);
|
||||
};
|
||||
nixosConfigurations = lib.mapAttrs (name: _: nixosConfiguration { inherit name; }) allMachines;
|
||||
|
||||
# This instantiates nixos for each system that we support:
|
||||
# configPerSystem = <system>.<machine>.nixosConfiguration
|
||||
# We need this to build nixos secret generators for each system
|
||||
configsPerSystem = builtins.listToAttrs
|
||||
(builtins.map
|
||||
(system: lib.nameValuePair system
|
||||
(lib.mapAttrs (name: _: nixosConfiguration { inherit name system; forceSystem = true; }) allMachines))
|
||||
supportedSystems);
|
||||
in
|
||||
{ inherit nixosConfigurations clanInternals; }
|
||||
{
|
||||
inherit nixosConfigurations;
|
||||
|
||||
clanInternals = {
|
||||
machines = configsPerSystem;
|
||||
all-machines-json = lib.mapAttrs
|
||||
(system: configs: nixpkgs.legacyPackages.${system}.writers.writeJSON "machines.json" (lib.mapAttrs (_: m: m.config.system.clan.deployment.data) configs))
|
||||
configsPerSystem;
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,18 +1,6 @@
|
||||
{ lib, self, nixpkgs, ... }:
|
||||
{ lib, clan-core, nixpkgs, ... }:
|
||||
{
|
||||
findNixFiles = folder:
|
||||
lib.mapAttrs'
|
||||
(name: type:
|
||||
if
|
||||
type == "directory"
|
||||
then
|
||||
lib.nameValuePair name "${folder}/${name}"
|
||||
else
|
||||
lib.nameValuePair (lib.removeSuffix ".nix" name) "${folder}/${name}"
|
||||
)
|
||||
(builtins.readDir folder);
|
||||
|
||||
jsonschema = import ./jsonschema { inherit lib; };
|
||||
|
||||
buildClan = import ./build-clan { inherit lib self nixpkgs; };
|
||||
buildClan = import ./build-clan { inherit clan-core lib nixpkgs; };
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
];
|
||||
flake.lib = import ./default.nix {
|
||||
inherit lib;
|
||||
inherit self;
|
||||
inherit (inputs) nixpkgs;
|
||||
clan-core = self;
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,21 +1,27 @@
|
||||
{ lib ? import <nixpkgs/lib> }:
|
||||
{ lib ? import <nixpkgs/lib>
|
||||
, excludedTypes ? [
|
||||
"functionTo"
|
||||
"package"
|
||||
]
|
||||
}:
|
||||
let
|
||||
|
||||
# from nixos type to jsonschema type
|
||||
typeMap = {
|
||||
bool = "boolean";
|
||||
float = "number";
|
||||
int = "integer";
|
||||
str = "string";
|
||||
path = "string"; # TODO add prober path checks
|
||||
};
|
||||
|
||||
# remove _module attribute from options
|
||||
clean = opts: builtins.removeAttrs opts [ "_module" ];
|
||||
|
||||
# throw error if option type is not supported
|
||||
notSupported = option: throw
|
||||
"option type '${option.type.description}' not supported by jsonschema converter";
|
||||
notSupported = option: lib.trace option throw ''
|
||||
option type '${option.type.name}' ('${option.type.description}') not supported by jsonschema converter
|
||||
location: ${lib.concatStringsSep "." option.loc}
|
||||
'';
|
||||
|
||||
isExcludedOption = option: (lib.elem (option.type.name or null) excludedTypes);
|
||||
|
||||
filterExcluded = lib.filter (opt: ! isExcludedOption opt);
|
||||
|
||||
filterExcludedAttrs = lib.filterAttrs (_name: opt: ! isExcludedOption opt);
|
||||
|
||||
allBasicTypes =
|
||||
[ "boolean" "integer" "number" "string" "array" "object" "null" ];
|
||||
|
||||
in
|
||||
rec {
|
||||
@@ -32,10 +38,11 @@ rec {
|
||||
# parses a set of evaluated nixos options to a jsonschema
|
||||
parseOptions = options':
|
||||
let
|
||||
options = clean options';
|
||||
options = filterExcludedAttrs (clean options');
|
||||
# parse options to jsonschema properties
|
||||
properties = lib.mapAttrs (_name: option: parseOption option) options;
|
||||
isRequired = prop: ! (prop ? default || prop.type == "object");
|
||||
# TODO: figure out how to handle if prop.anyOf is used
|
||||
isRequired = prop: ! (prop ? default || prop.type or null == "object");
|
||||
requiredProps = lib.filterAttrs (_: prop: isRequired prop) properties;
|
||||
required = lib.optionalAttrs (requiredProps != { }) {
|
||||
required = lib.attrNames requiredProps;
|
||||
@@ -54,27 +61,50 @@ rec {
|
||||
inherit (option) default;
|
||||
};
|
||||
description = lib.optionalAttrs (option ? description) {
|
||||
inherit (option) description;
|
||||
description = option.description.text or option.description;
|
||||
};
|
||||
in
|
||||
|
||||
# either type
|
||||
# TODO: if all nested optiosn are excluded, the parent sould be excluded too
|
||||
if option.type.name or null == "either"
|
||||
# return jsonschema property definition for either
|
||||
then
|
||||
let
|
||||
optionsList' = [
|
||||
{ type = option.type.nestedTypes.left; _type = "option"; loc = option.loc; }
|
||||
{ type = option.type.nestedTypes.right; _type = "option"; loc = option.loc; }
|
||||
];
|
||||
optionsList = filterExcluded optionsList';
|
||||
in
|
||||
default // description // {
|
||||
anyOf = map parseOption optionsList;
|
||||
}
|
||||
|
||||
# handle nested options (not a submodule)
|
||||
if ! option ? _type
|
||||
else if ! option ? _type
|
||||
then parseOptions option
|
||||
|
||||
# throw if not an option
|
||||
else if option._type != "option"
|
||||
else if option._type != "option" && option._type != "option-type"
|
||||
then throw "parseOption: not an option"
|
||||
|
||||
# parse nullOr
|
||||
else if option.type.name == "nullOr"
|
||||
# return jsonschema property definition for nullOr
|
||||
then default // description // {
|
||||
type = [
|
||||
"null"
|
||||
(typeMap.${option.type.functor.wrapped.name} or (notSupported option))
|
||||
];
|
||||
}
|
||||
then
|
||||
let
|
||||
nestedOption =
|
||||
{ type = option.type.nestedTypes.elemType; _type = "option"; loc = option.loc; };
|
||||
in
|
||||
default // description // {
|
||||
anyOf =
|
||||
[{ type = "null"; }]
|
||||
++ (
|
||||
lib.optional (! isExcludedOption nestedOption)
|
||||
(parseOption nestedOption)
|
||||
);
|
||||
}
|
||||
|
||||
# parse bool
|
||||
else if option.type.name == "bool"
|
||||
@@ -91,7 +121,7 @@ rec {
|
||||
}
|
||||
|
||||
# parse int
|
||||
else if option.type.name == "int"
|
||||
else if (option.type.name == "int" || option.type.name == "positiveInt")
|
||||
# return jsonschema property definition for int
|
||||
then default // description // {
|
||||
type = "integer";
|
||||
@@ -111,6 +141,27 @@ rec {
|
||||
type = "string";
|
||||
}
|
||||
|
||||
# parse anything
|
||||
else if option.type.name == "anything"
|
||||
# return jsonschema property definition for anything
|
||||
then default // description // {
|
||||
type = allBasicTypes;
|
||||
}
|
||||
|
||||
# parse unspecified
|
||||
else if option.type.name == "unspecified"
|
||||
# return jsonschema property definition for unspecified
|
||||
then default // description // {
|
||||
type = allBasicTypes;
|
||||
}
|
||||
|
||||
# parse raw
|
||||
else if option.type.name == "raw"
|
||||
# return jsonschema property definition for raw
|
||||
then default // description // {
|
||||
type = allBasicTypes;
|
||||
}
|
||||
|
||||
# parse enum
|
||||
else if option.type.name == "enum"
|
||||
# return jsonschema property definition for enum
|
||||
@@ -127,15 +178,26 @@ rec {
|
||||
}
|
||||
|
||||
# parse list
|
||||
else if (option.type.name == "listOf")
|
||||
# return jsonschema property definition for list
|
||||
then
|
||||
let
|
||||
nestedOption = { type = option.type.functor.wrapped; _type = "option"; loc = option.loc; };
|
||||
in
|
||||
default // description // {
|
||||
type = "array";
|
||||
}
|
||||
// (lib.optionalAttrs (! isExcludedOption nestedOption) {
|
||||
items = parseOption nestedOption;
|
||||
})
|
||||
|
||||
# parse list of unspecified
|
||||
else if
|
||||
(option.type.name == "listOf")
|
||||
&& (typeMap ? "${option.type.functor.wrapped.name}")
|
||||
&& (option.type.functor.wrapped.name == "unspecified")
|
||||
# return jsonschema property definition for list
|
||||
then default // description // {
|
||||
type = "array";
|
||||
items = {
|
||||
type = typeMap.${option.type.functor.wrapped.name};
|
||||
};
|
||||
}
|
||||
|
||||
# parse attrsOf submodule
|
||||
@@ -147,15 +209,29 @@ rec {
|
||||
}
|
||||
|
||||
# parse attrs
|
||||
else if option.type.name == "attrsOf"
|
||||
else if option.type.name == "attrs"
|
||||
# return jsonschema property definition for attrs
|
||||
then default // description // {
|
||||
type = "object";
|
||||
additionalProperties = {
|
||||
type = typeMap.${option.type.nestedTypes.elemType.name} or (notSupported option);
|
||||
};
|
||||
additionalProperties = true;
|
||||
}
|
||||
|
||||
# parse attrsOf
|
||||
# TODO: if nested option is excluded, the parent sould be excluded too
|
||||
else if option.type.name == "attrsOf" || option.type.name == "lazyAttrsOf"
|
||||
# return jsonschema property definition for attrs
|
||||
then
|
||||
let
|
||||
nestedOption = { type = option.type.nestedTypes.elemType; _type = "option"; loc = option.loc; };
|
||||
in
|
||||
default // description // {
|
||||
type = "object";
|
||||
additionalProperties =
|
||||
if ! isExcludedOption nestedOption
|
||||
then parseOption { type = option.type.nestedTypes.elemType; _type = "option"; loc = option.loc; }
|
||||
else false;
|
||||
}
|
||||
|
||||
# parse submodule
|
||||
else if option.type.name == "submodule"
|
||||
# return jsonschema property definition for submodule
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
perSystem = { pkgs, self', ... }: {
|
||||
perSystem = { pkgs, ... }: {
|
||||
checks = {
|
||||
|
||||
# check if the `clan config` example jsonschema and data is valid
|
||||
@@ -19,7 +19,7 @@
|
||||
# check if the `clan config` nix jsonschema converter unit tests succeed
|
||||
lib-jsonschema-nix-unit-tests = pkgs.runCommand "lib-jsonschema-nix-unit-tests" { } ''
|
||||
export NIX_PATH=nixpkgs=${pkgs.path}
|
||||
${self'.packages.nix-unit}/bin/nix-unit \
|
||||
${pkgs.nix-unit}/bin/nix-unit \
|
||||
${./.}/test.nix \
|
||||
--eval-store $(realpath .)
|
||||
touch $out
|
||||
|
||||
@@ -39,6 +39,28 @@ in
|
||||
};
|
||||
};
|
||||
|
||||
testDescriptionIsAttrs =
|
||||
let
|
||||
evaledConfig = lib.evalModules {
|
||||
modules = [{
|
||||
options.opt = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
description = {
|
||||
_type = "mdDoc";
|
||||
text = description;
|
||||
};
|
||||
};
|
||||
}];
|
||||
};
|
||||
in
|
||||
{
|
||||
expr = slib.parseOption evaledConfig.options.opt;
|
||||
expected = {
|
||||
type = "boolean";
|
||||
inherit description;
|
||||
};
|
||||
};
|
||||
|
||||
testBool =
|
||||
let
|
||||
default = false;
|
||||
@@ -115,6 +137,34 @@ in
|
||||
};
|
||||
};
|
||||
|
||||
testListOfUnspecified =
|
||||
let
|
||||
default = [ 1 2 3 ];
|
||||
in
|
||||
{
|
||||
expr = slib.parseOption (evalType (lib.types.listOf lib.types.unspecified) default);
|
||||
expected = {
|
||||
type = "array";
|
||||
items = {
|
||||
type = [ "boolean" "integer" "number" "string" "array" "object" "null" ];
|
||||
};
|
||||
inherit default description;
|
||||
};
|
||||
};
|
||||
|
||||
testAttrs =
|
||||
let
|
||||
default = { foo = 1; bar = 2; baz = 3; };
|
||||
in
|
||||
{
|
||||
expr = slib.parseOption (evalType (lib.types.attrs) default);
|
||||
expected = {
|
||||
type = "object";
|
||||
additionalProperties = true;
|
||||
inherit default description;
|
||||
};
|
||||
};
|
||||
|
||||
testAttrsOfInt =
|
||||
let
|
||||
default = { foo = 1; bar = 2; baz = 3; };
|
||||
@@ -130,6 +180,21 @@ in
|
||||
};
|
||||
};
|
||||
|
||||
testLazyAttrsOfInt =
|
||||
let
|
||||
default = { foo = 1; bar = 2; baz = 3; };
|
||||
in
|
||||
{
|
||||
expr = slib.parseOption (evalType (lib.types.lazyAttrsOf lib.types.int) default);
|
||||
expected = {
|
||||
type = "object";
|
||||
additionalProperties = {
|
||||
type = "integer";
|
||||
};
|
||||
inherit default description;
|
||||
};
|
||||
};
|
||||
|
||||
testNullOrBool =
|
||||
let
|
||||
default = null; # null is a valid value for this type
|
||||
@@ -137,7 +202,30 @@ in
|
||||
{
|
||||
expr = slib.parseOption (evalType (lib.types.nullOr lib.types.bool) default);
|
||||
expected = {
|
||||
type = [ "null" "boolean" ];
|
||||
anyOf = [
|
||||
{ type = "null"; }
|
||||
{ type = "boolean"; }
|
||||
];
|
||||
inherit default description;
|
||||
};
|
||||
};
|
||||
|
||||
testNullOrNullOr =
|
||||
let
|
||||
default = null; # null is a valid value for this type
|
||||
in
|
||||
{
|
||||
expr = slib.parseOption (evalType (lib.types.nullOr (lib.types.nullOr lib.types.bool)) default);
|
||||
expected = {
|
||||
anyOf = [
|
||||
{ type = "null"; }
|
||||
{
|
||||
anyOf = [
|
||||
{ type = "null"; }
|
||||
{ type = "boolean"; }
|
||||
];
|
||||
}
|
||||
];
|
||||
inherit default description;
|
||||
};
|
||||
};
|
||||
@@ -246,4 +334,55 @@ in
|
||||
inherit default description;
|
||||
};
|
||||
};
|
||||
|
||||
testEither =
|
||||
let
|
||||
default = "foo";
|
||||
in
|
||||
{
|
||||
expr = slib.parseOption (evalType (lib.types.either lib.types.bool lib.types.str) default);
|
||||
expected = {
|
||||
anyOf = [
|
||||
{ type = "boolean"; }
|
||||
{ type = "string"; }
|
||||
];
|
||||
inherit default description;
|
||||
};
|
||||
};
|
||||
|
||||
testAnything =
|
||||
let
|
||||
default = "foo";
|
||||
in
|
||||
{
|
||||
expr = slib.parseOption (evalType lib.types.anything default);
|
||||
expected = {
|
||||
inherit default description;
|
||||
type = [ "boolean" "integer" "number" "string" "array" "object" "null" ];
|
||||
};
|
||||
};
|
||||
|
||||
testUnspecified =
|
||||
let
|
||||
default = "foo";
|
||||
in
|
||||
{
|
||||
expr = slib.parseOption (evalType lib.types.unspecified default);
|
||||
expected = {
|
||||
inherit default description;
|
||||
type = [ "boolean" "integer" "number" "string" "array" "object" "null" ];
|
||||
};
|
||||
};
|
||||
|
||||
testRaw =
|
||||
let
|
||||
default = "foo";
|
||||
in
|
||||
{
|
||||
expr = slib.parseOption (evalType lib.types.raw default);
|
||||
expected = {
|
||||
inherit default description;
|
||||
type = [ "boolean" "integer" "number" "string" "array" "object" "null" ];
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
49
nixosModules/clanCore/backups.nix
Normal file
49
nixosModules/clanCore/backups.nix
Normal file
@@ -0,0 +1,49 @@
|
||||
{ lib, ... }:
|
||||
{
|
||||
imports = [
|
||||
./state.nix
|
||||
];
|
||||
options.clanCore.backups = {
|
||||
providers = lib.mkOption {
|
||||
type = lib.types.attrsOf (lib.types.submodule ({ name, ... }: {
|
||||
options = {
|
||||
name = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = name;
|
||||
description = ''
|
||||
Name of the backup provider
|
||||
'';
|
||||
};
|
||||
list = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = ''
|
||||
script to list backups
|
||||
'';
|
||||
};
|
||||
restore = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = ''
|
||||
script to restore a backup
|
||||
should take an optional service name as argument
|
||||
gets ARCHIVE_ID, LOCATION, JOB and FOLDERS as environment variables
|
||||
ARCHIVE_ID is the id of the backup
|
||||
LOCATION is the remote identifier of the backup
|
||||
JOB is the job name of the backup
|
||||
FOLDERS is a colon separated list of folders to restore
|
||||
'';
|
||||
};
|
||||
create = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = ''
|
||||
script to start a backup
|
||||
'';
|
||||
};
|
||||
};
|
||||
}));
|
||||
default = { };
|
||||
description = ''
|
||||
Configured backup providers which are used by this machine
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
{ lib, ... }: {
|
||||
options.clan.bloatware = lib.mkOption {
|
||||
type = lib.types.submodule {
|
||||
imports = [
|
||||
../../../lib/jsonschema/example-interface.nix
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
19
nixosModules/clanCore/default.nix
Normal file
19
nixosModules/clanCore/default.nix
Normal file
@@ -0,0 +1,19 @@
|
||||
{
|
||||
imports = [
|
||||
./backups.nix
|
||||
./manual.nix
|
||||
./imports.nix
|
||||
./meshnamed
|
||||
./metadata.nix
|
||||
./networking.nix
|
||||
./nix-settings.nix
|
||||
./options.nix
|
||||
./outputs.nix
|
||||
./packages.nix
|
||||
./schema.nix
|
||||
./secrets
|
||||
./vm.nix
|
||||
./wayland-proxy-virtwl.nix
|
||||
./zerotier
|
||||
];
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
{ self, inputs, lib, ... }: {
|
||||
flake.nixosModules.clanCore = { pkgs, options, ... }: {
|
||||
imports = [
|
||||
./secrets
|
||||
./zerotier.nix
|
||||
./networking.nix
|
||||
inputs.sops-nix.nixosModules.sops
|
||||
# just some example options. Can be removed later
|
||||
./bloatware
|
||||
./vm.nix
|
||||
./options.nix
|
||||
];
|
||||
options.clanSchema = lib.mkOption {
|
||||
type = lib.types.attrs;
|
||||
description = "The json schema for the .clan options namespace";
|
||||
default = self.lib.jsonschema.parseOptions options.clan;
|
||||
};
|
||||
options.clanCore = {
|
||||
clanDir = lib.mkOption {
|
||||
type = lib.types.either lib.types.path lib.types.str;
|
||||
description = ''
|
||||
the location of the flake repo, used to calculate the location of facts and secrets
|
||||
'';
|
||||
};
|
||||
machineName = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = ''
|
||||
the name of the machine
|
||||
'';
|
||||
};
|
||||
clanPkgs = lib.mkOption {
|
||||
default = self.packages.${pkgs.system};
|
||||
defaultText = "self.packages.${pkgs.system}";
|
||||
internal = true;
|
||||
};
|
||||
};
|
||||
options.system.clan = lib.mkOption {
|
||||
type = lib.types.lazyAttrsOf lib.types.raw;
|
||||
description = ''
|
||||
utility outputs for clan management of this machine
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
16
nixosModules/clanCore/imports.nix
Normal file
16
nixosModules/clanCore/imports.nix
Normal file
@@ -0,0 +1,16 @@
|
||||
{ lib
|
||||
, ...
|
||||
}: {
|
||||
/*
|
||||
Declaring imports inside the module system does not trigger an infinite
|
||||
recursion in this case because buildClan generates the imports from the
|
||||
settings.json file before calling out to evalModules.
|
||||
*/
|
||||
options.clanImports = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
description = ''
|
||||
A list of imported module names imported from clan-core.clanModules.<name>
|
||||
The buildClan function will automatically import these modules for the current machine.
|
||||
'';
|
||||
};
|
||||
}
|
||||
1
nixosModules/clanCore/manual.nix
Normal file
1
nixosModules/clanCore/manual.nix
Normal file
@@ -0,0 +1 @@
|
||||
{ pkgs, ... }: { documentation.nixos.enable = pkgs.lib.mkDefault false; }
|
||||
84
nixosModules/clanCore/meshnamed/default.nix
Normal file
84
nixosModules/clanCore/meshnamed/default.nix
Normal file
@@ -0,0 +1,84 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
let
|
||||
cfg = config.clan.networking.meshnamed;
|
||||
in
|
||||
{
|
||||
options.clan.networking.meshnamed = {
|
||||
enable = (lib.mkEnableOption "meshnamed") // {
|
||||
default = config.clan.networking.meshnamed.networks != { };
|
||||
};
|
||||
listenAddress = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "fd66:29e9:f422:8dfe:beba:68ec:bd09:7876";
|
||||
description = lib.mdDoc ''
|
||||
The address to listen on.
|
||||
'';
|
||||
};
|
||||
networks = lib.mkOption {
|
||||
default = { };
|
||||
type = lib.types.attrsOf (lib.types.submodule ({ name, ... }: {
|
||||
options = {
|
||||
name = lib.mkOption {
|
||||
default = name;
|
||||
type = lib.types.str;
|
||||
example = "my-network";
|
||||
description = lib.mdDoc ''
|
||||
The name of the network.
|
||||
'';
|
||||
};
|
||||
subnet = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
example = "fd43:7def:4b50:28d0:4e99:9347:3035:17ef/88";
|
||||
description = lib.mdDoc ''
|
||||
The subnet to use for the mesh network.
|
||||
'';
|
||||
};
|
||||
};
|
||||
}));
|
||||
};
|
||||
};
|
||||
config = lib.mkIf config.clan.networking.meshnamed.enable {
|
||||
# we assign this random source address to bind meshnamed to.
|
||||
systemd.network.netdevs."08-meshnamed" = {
|
||||
netdevConfig = {
|
||||
Name = "meshnamed";
|
||||
Kind = "dummy";
|
||||
};
|
||||
};
|
||||
systemd.network.networks."08-meshnamed" = {
|
||||
matchConfig.Name = "meshnamed";
|
||||
networkConfig = {
|
||||
Address = [ "${cfg.listenAddress}/128" ];
|
||||
DNS = [ config.clan.networking.meshnamed.listenAddress ];
|
||||
Domains = [ "~${lib.concatMapStringsSep "," (network: network.name) (builtins.attrValues config.clan.networking.meshnamed.networks)}" ];
|
||||
};
|
||||
};
|
||||
|
||||
# for convenience, so we can debug with dig
|
||||
networking.extraHosts = ''
|
||||
${cfg.listenAddress} meshnamed
|
||||
'';
|
||||
|
||||
networking.networkmanager.unmanaged = [ "interface-name:meshnamed" ];
|
||||
|
||||
systemd.services.meshnamed =
|
||||
let
|
||||
networks = lib.concatMapStringsSep "," (network: "${network.name}=${network.subnet}")
|
||||
(builtins.attrValues config.clan.networking.meshnamed.networks);
|
||||
in
|
||||
{
|
||||
# fix container test
|
||||
after = [ "network.target" ] ++ lib.optional config.boot.isContainer "sys-devices-virtual-net-meshnamed.device";
|
||||
bindsTo = lib.optional (!config.boot.isContainer) "sys-devices-virtual-net-meshnamed.device";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
ExecStart = "${pkgs.callPackage ../../../pkgs/meshname/default.nix { }}/bin/meshnamed -networks ${networks} -listenaddr [${cfg.listenAddress}]:53";
|
||||
|
||||
# to bind port 53
|
||||
AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" ];
|
||||
DynamicUser = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
32
nixosModules/clanCore/metadata.nix
Normal file
32
nixosModules/clanCore/metadata.nix
Normal file
@@ -0,0 +1,32 @@
|
||||
{ lib, pkgs, ... }: {
|
||||
options.clanCore = {
|
||||
clanName = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = ''
|
||||
the name of the clan
|
||||
'';
|
||||
};
|
||||
clanDir = lib.mkOption {
|
||||
type = lib.types.either lib.types.path lib.types.str;
|
||||
description = ''
|
||||
the location of the flake repo, used to calculate the location of facts and secrets
|
||||
'';
|
||||
};
|
||||
clanIcon = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.path;
|
||||
description = ''
|
||||
the location of the clan icon
|
||||
'';
|
||||
};
|
||||
machineName = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = ''
|
||||
the name of the machine
|
||||
'';
|
||||
};
|
||||
clanPkgs = lib.mkOption {
|
||||
defaultText = "self.packages.${pkgs.system}";
|
||||
internal = true;
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -18,4 +18,32 @@
|
||||
default = "root@${config.networking.hostName}";
|
||||
};
|
||||
};
|
||||
config = {
|
||||
# conflicts with systemd-resolved
|
||||
networking.useHostResolvConf = false;
|
||||
|
||||
# Allow PMTU / DHCP
|
||||
networking.firewall.allowPing = true;
|
||||
|
||||
# The notion of "online" is a broken concept
|
||||
# https://github.com/systemd/systemd/blob/e1b45a756f71deac8c1aa9a008bd0dab47f64777/NEWS#L13
|
||||
systemd.services.NetworkManager-wait-online.enable = false;
|
||||
systemd.network.wait-online.enable = false;
|
||||
|
||||
# Provide a default network configuration but don't compete with network-manager or dhcpcd
|
||||
systemd.network.networks."50-uplink" = lib.mkIf (!(config.networking.networkmanager.enable || config.networking.dhcpcd.enable)) {
|
||||
matchConfig.Type = "ether";
|
||||
networkConfig = {
|
||||
DHCP = "yes";
|
||||
LLDP = "yes";
|
||||
LLMNR = "yes";
|
||||
MulticastDNS = "yes";
|
||||
IPv6AcceptRA = "yes";
|
||||
};
|
||||
};
|
||||
|
||||
# Use networkd instead of the pile of shell scripts
|
||||
networking.useNetworkd = lib.mkDefault true;
|
||||
networking.useDHCP = lib.mkDefault false;
|
||||
};
|
||||
}
|
||||
|
||||
28
nixosModules/clanCore/nix-settings.nix
Normal file
28
nixosModules/clanCore/nix-settings.nix
Normal file
@@ -0,0 +1,28 @@
|
||||
{ lib, ... }:
|
||||
# Taken from:
|
||||
# https://github.com/nix-community/srvos/blob/main/nixos/common/nix.nix
|
||||
{
|
||||
# Fallback quickly if substituters are not available.
|
||||
nix.settings.connect-timeout = 5;
|
||||
|
||||
# Enable flakes
|
||||
nix.settings.experimental-features = [
|
||||
"nix-command"
|
||||
"flakes"
|
||||
"repl-flake"
|
||||
];
|
||||
|
||||
# The default at 10 is rarely enough.
|
||||
nix.settings.log-lines = lib.mkDefault 25;
|
||||
|
||||
# Avoid disk full issues
|
||||
nix.settings.max-free = lib.mkDefault (3000 * 1024 * 1024);
|
||||
nix.settings.min-free = lib.mkDefault (512 * 1024 * 1024);
|
||||
|
||||
nix.daemonCPUSchedPolicy = lib.mkDefault "batch";
|
||||
nix.daemonIOSchedClass = lib.mkDefault "idle";
|
||||
nix.daemonIOSchedPriority = lib.mkDefault 7;
|
||||
|
||||
# Avoid copying unnecessary stuff over SSH
|
||||
nix.settings.builders-use-substitutes = true;
|
||||
}
|
||||
75
nixosModules/clanCore/outputs.nix
Normal file
75
nixosModules/clanCore/outputs.nix
Normal file
@@ -0,0 +1,75 @@
|
||||
{ config, lib, pkgs, ... }: {
|
||||
# TODO: factor these out into a separate interface.nix.
|
||||
# Also think about moving these options out of `system.clan`.
|
||||
# Maybe we should not re-use the already polluted confg.system namespace
|
||||
# and instead have a separate top-level namespace like `clanOutputs`, with
|
||||
# well defined options marked as `internal = true;`.
|
||||
options.system.clan = lib.mkOption {
|
||||
type = lib.types.submodule {
|
||||
options = {
|
||||
deployment.data = lib.mkOption {
|
||||
type = lib.types.attrs;
|
||||
description = ''
|
||||
the data to be written to the deployment.json file
|
||||
'';
|
||||
};
|
||||
deployment.file = lib.mkOption {
|
||||
type = lib.types.path;
|
||||
description = ''
|
||||
the location of the deployment.json file
|
||||
'';
|
||||
};
|
||||
deploymentAddress = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = ''
|
||||
the address of the deployment server
|
||||
'';
|
||||
};
|
||||
secretsUploadDirectory = lib.mkOption {
|
||||
type = lib.types.path;
|
||||
description = ''
|
||||
the directory on the deployment server where secrets are uploaded
|
||||
'';
|
||||
};
|
||||
secretsModule = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = ''
|
||||
the python import path to the secrets module
|
||||
'';
|
||||
};
|
||||
secretsData = lib.mkOption {
|
||||
type = lib.types.path;
|
||||
description = ''
|
||||
secret data as json for the generator
|
||||
'';
|
||||
default = pkgs.writers.writeJSON "secrets.json" (lib.mapAttrs
|
||||
(_name: secret: {
|
||||
secrets = builtins.attrNames secret.secrets;
|
||||
facts = lib.mapAttrs (_: secret: secret.path) secret.facts;
|
||||
generator = secret.generator.finalScript;
|
||||
})
|
||||
config.clanCore.secrets);
|
||||
};
|
||||
vm.create = lib.mkOption {
|
||||
type = lib.types.path;
|
||||
description = ''
|
||||
json metadata about the vm
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
description = ''
|
||||
utility outputs for clan management of this machine
|
||||
'';
|
||||
};
|
||||
# optimization for faster secret generate/upload and machines update
|
||||
config = {
|
||||
system.clan.deployment.data = {
|
||||
inherit (config.system.clan) secretsModule secretsData;
|
||||
inherit (config.clan.networking) deploymentAddress;
|
||||
inherit (config.clanCore) secretsUploadDirectory;
|
||||
};
|
||||
system.clan.deployment.file = pkgs.writeText "deployment.json" (builtins.toJSON config.system.clan.deployment.data);
|
||||
};
|
||||
|
||||
}
|
||||
10
nixosModules/clanCore/packages.nix
Normal file
10
nixosModules/clanCore/packages.nix
Normal file
@@ -0,0 +1,10 @@
|
||||
{ pkgs, ... }: {
|
||||
# essential debugging tools for networked services
|
||||
environment.systemPackages = [
|
||||
pkgs.dnsutils
|
||||
pkgs.tcpdump
|
||||
pkgs.curl
|
||||
pkgs.jq
|
||||
pkgs.htop
|
||||
];
|
||||
}
|
||||
11
nixosModules/clanCore/schema.nix
Normal file
11
nixosModules/clanCore/schema.nix
Normal file
@@ -0,0 +1,11 @@
|
||||
{ options, lib, ... }:
|
||||
let
|
||||
jsonschema = import ../../lib/jsonschema { inherit lib; };
|
||||
in
|
||||
{
|
||||
options.clanSchema = lib.mkOption {
|
||||
type = lib.types.attrs;
|
||||
description = "The json schema for the .clan options namespace";
|
||||
default = jsonschema.parseOptions options.clan;
|
||||
};
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
{ config, lib, ... }:
|
||||
{
|
||||
options.clanCore.secretStore = lib.mkOption {
|
||||
type = lib.types.enum [ "sops" "password-store" "custom" ];
|
||||
@@ -6,9 +6,32 @@
|
||||
description = ''
|
||||
method to store secrets
|
||||
custom can be used to define a custom secret store.
|
||||
one would have to define system.clan.generateSecrets and system.clan.uploadSecrets
|
||||
'';
|
||||
};
|
||||
|
||||
options.clanCore.secretsDirectory = lib.mkOption {
|
||||
type = lib.types.path;
|
||||
description = ''
|
||||
The directory where secrets are installed to. This is backend specific.
|
||||
'';
|
||||
};
|
||||
|
||||
options.clanCore.secretsUploadDirectory = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.path;
|
||||
default = null;
|
||||
description = ''
|
||||
The directory where secrets are uploaded into, This is backend specific.
|
||||
'';
|
||||
};
|
||||
|
||||
options.clanCore.secretsPrefix = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "";
|
||||
description = ''
|
||||
Prefix for secrets. This is backend specific.
|
||||
'';
|
||||
};
|
||||
|
||||
options.clanCore.secrets = lib.mkOption {
|
||||
default = { };
|
||||
type = lib.types.attrsOf
|
||||
@@ -22,32 +45,67 @@
|
||||
'';
|
||||
};
|
||||
generator = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = ''
|
||||
Script to generate the secret.
|
||||
The script will be called with the following variables:
|
||||
- facts: path to a directory where facts can be stored
|
||||
- secrets: path to a directory where secrets can be stored
|
||||
The script is expected to generate all secrets and facts defined in the module.
|
||||
'';
|
||||
};
|
||||
secrets = lib.mkOption {
|
||||
type = lib.types.attrsOf (lib.types.submodule (secret: {
|
||||
type = lib.types.submodule ({ config, ... }: {
|
||||
options = {
|
||||
name = lib.mkOption {
|
||||
path = lib.mkOption {
|
||||
type = lib.types.listOf (lib.types.either lib.types.path lib.types.package);
|
||||
default = [ ];
|
||||
description = ''
|
||||
Extra paths to add to the PATH environment variable when running the generator.
|
||||
'';
|
||||
};
|
||||
script = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = ''
|
||||
name of the secret
|
||||
Script to generate the secret.
|
||||
The script will be called with the following variables:
|
||||
- facts: path to a directory where facts can be stored
|
||||
- secrets: path to a directory where secrets can be stored
|
||||
The script is expected to generate all secrets and facts defined in the module.
|
||||
'';
|
||||
};
|
||||
finalScript = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
readOnly = true;
|
||||
internal = true;
|
||||
default = ''
|
||||
export PATH="${lib.makeBinPath config.path}"
|
||||
set -efu -o pipefail
|
||||
${config.script}
|
||||
'';
|
||||
default = secret.config._module.args.name;
|
||||
};
|
||||
};
|
||||
}));
|
||||
description = ''
|
||||
path where the secret is located in the filesystem
|
||||
'';
|
||||
});
|
||||
};
|
||||
secrets =
|
||||
let
|
||||
config' = config;
|
||||
in
|
||||
lib.mkOption {
|
||||
type = lib.types.attrsOf (lib.types.submodule ({ config, ... }: {
|
||||
options = {
|
||||
name = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = ''
|
||||
name of the secret
|
||||
'';
|
||||
default = config._module.args.name;
|
||||
};
|
||||
path = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = ''
|
||||
path to a secret which is generated by the generator
|
||||
'';
|
||||
default = "${config'.clanCore.secretsDirectory}/${config'.clanCore.secretsPrefix}${config.name}";
|
||||
};
|
||||
};
|
||||
}));
|
||||
description = ''
|
||||
path where the secret is located in the filesystem
|
||||
'';
|
||||
};
|
||||
facts = lib.mkOption {
|
||||
default = { };
|
||||
type = lib.types.attrsOf (lib.types.submodule (fact: {
|
||||
options = {
|
||||
name = lib.mkOption {
|
||||
@@ -66,11 +124,12 @@
|
||||
};
|
||||
value = lib.mkOption {
|
||||
defaultText = lib.literalExpression "\${config.clanCore.clanDir}/\${fact.config.path}";
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
default =
|
||||
if builtins.pathExists "${config.clanCore.clanDir}/${fact.config.path}" then
|
||||
builtins.readFile "${config.clanCore.clanDir}/${fact.config.path}"
|
||||
lib.strings.removeSuffix "\n" (builtins.readFile "${config.clanCore.clanDir}/${fact.config.path}")
|
||||
else
|
||||
"";
|
||||
null;
|
||||
};
|
||||
};
|
||||
}));
|
||||
@@ -78,10 +137,6 @@
|
||||
};
|
||||
}));
|
||||
};
|
||||
config.system.build.generateUploadSecrets = pkgs.writeScript "generate_upload_secrets" ''
|
||||
${config.system.clan.generateSecrets}
|
||||
${config.system.clan.uploadSecrets}
|
||||
'';
|
||||
imports = [
|
||||
./sops.nix
|
||||
./password-store.nix
|
||||
|
||||
@@ -1,7 +1,4 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
let
|
||||
passwordstoreDir = "\${PASSWORD_STORE_DIR:-$HOME/.password-store}";
|
||||
in
|
||||
{ config, lib, ... }:
|
||||
{
|
||||
options.clan.password-store.targetDirectory = lib.mkOption {
|
||||
type = lib.types.path;
|
||||
@@ -11,108 +8,9 @@ in
|
||||
'';
|
||||
};
|
||||
config = lib.mkIf (config.clanCore.secretStore == "password-store") {
|
||||
system.clan.generateSecrets = pkgs.writeScript "generate-secrets" ''
|
||||
#!/bin/sh
|
||||
set -efu
|
||||
|
||||
test -d "$CLAN_DIR"
|
||||
PATH=${lib.makeBinPath [
|
||||
pkgs.pass
|
||||
]}:$PATH
|
||||
|
||||
# TODO maybe initialize password store if it doesn't exist yet
|
||||
|
||||
${lib.foldlAttrs (acc: n: v: ''
|
||||
${acc}
|
||||
# ${n}
|
||||
# if any of the secrets are missing, we regenerate all connected facts/secrets
|
||||
(if ! ${lib.concatMapStringsSep " && " (x: "pass show machines/${config.clanCore.machineName}/${x.name} >/dev/null") (lib.attrValues v.secrets)}; then
|
||||
|
||||
facts=$(mktemp -d)
|
||||
trap "rm -rf $facts" EXIT
|
||||
secrets=$(mktemp -d)
|
||||
trap "rm -rf $secrets" EXIT
|
||||
${v.generator}
|
||||
|
||||
${lib.concatMapStrings (fact: ''
|
||||
mkdir -p "$(dirname ${fact.path})"
|
||||
cp "$facts"/${fact.name} "$CLAN_DIR"/${fact.path}
|
||||
'') (lib.attrValues v.facts)}
|
||||
|
||||
${lib.concatMapStrings (secret: ''
|
||||
cat "$secrets"/${secret.name} | pass insert -m machines/${config.clanCore.machineName}/${secret.name}
|
||||
'') (lib.attrValues v.secrets)}
|
||||
fi)
|
||||
'') "" config.clanCore.secrets}
|
||||
'';
|
||||
system.clan.uploadSecrets = pkgs.writeScript "upload-secrets" ''
|
||||
#!/bin/sh
|
||||
set -efu
|
||||
|
||||
target=$1
|
||||
|
||||
umask 0077
|
||||
|
||||
PATH=${lib.makeBinPath [
|
||||
pkgs.pass
|
||||
pkgs.git
|
||||
pkgs.findutils
|
||||
pkgs.rsync
|
||||
]}:$PATH:${lib.getBin pkgs.openssh}
|
||||
|
||||
if test -e ${passwordstoreDir}/.git; then
|
||||
local_pass_info=$(
|
||||
git -C ${passwordstoreDir} log -1 --format=%H machines/${config.clanCore.machineName}
|
||||
# we append a hash for every symlink, otherwise we would miss updates on
|
||||
# files where the symlink points to
|
||||
find ${passwordstoreDir}/machines/${config.clanCore.machineName} -type l \
|
||||
-exec realpath {} + |
|
||||
sort |
|
||||
xargs -r -n 1 git -C ${passwordstoreDir} log -1 --format=%H
|
||||
)
|
||||
remote_pass_info=$(ssh "$target" -- ${lib.escapeShellArg ''
|
||||
cat ${config.clan.password-store.targetDirectory}/.pass_info || :
|
||||
''})
|
||||
|
||||
if test "$local_pass_info" = "$remote_pass_info"; then
|
||||
echo secrets already match
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
|
||||
tmp_dir=$(mktemp -dt populate-pass.XXXXXXXX)
|
||||
trap cleanup EXIT
|
||||
cleanup() {
|
||||
rm -fR "$tmp_dir"
|
||||
}
|
||||
|
||||
find ${passwordstoreDir}/machines/${config.clanCore.machineName} -type f -follow ! -name .gpg-id |
|
||||
while read -r gpg_path; do
|
||||
|
||||
rel_name=''${gpg_path#${passwordstoreDir}}
|
||||
rel_name=''${rel_name%.gpg}
|
||||
|
||||
pass_date=$(
|
||||
if test -e ${passwordstoreDir}/.git; then
|
||||
git -C ${passwordstoreDir} log -1 --format=%aI "$gpg_path"
|
||||
fi
|
||||
)
|
||||
pass_name=$rel_name
|
||||
tmp_path=$tmp_dir/$(basename $rel_name)
|
||||
|
||||
mkdir -p "$(dirname "$tmp_path")"
|
||||
pass show "$pass_name" > "$tmp_path"
|
||||
if [ -n "$pass_date" ]; then
|
||||
touch -d "$pass_date" "$tmp_path"
|
||||
fi
|
||||
done
|
||||
|
||||
if test -n "''${local_pass_info-}"; then
|
||||
echo "$local_pass_info" > "$tmp_dir"/.pass_info
|
||||
fi
|
||||
|
||||
rsync --mkpath --delete -a "$tmp_dir"/ "$target":${config.clan.password-store.targetDirectory}/
|
||||
'';
|
||||
clanCore.secretsDirectory = config.clan.password-store.targetDirectory;
|
||||
clanCore.secretsUploadDirectory = config.clan.password-store.targetDirectory;
|
||||
system.clan.secretsModule = "clan_cli.secrets.modules.password_store";
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ let
|
||||
secretsDir = config.clanCore.clanDir + "/sops/secrets";
|
||||
groupsDir = config.clanCore.clanDir + "/sops/groups";
|
||||
|
||||
|
||||
# My symlink is in the nixos module detected as a directory also it works in the repl. Is this because of pure evaluation?
|
||||
containsSymlink = path:
|
||||
builtins.pathExists path && (builtins.readFileType path == "directory" || builtins.readFileType path == "symlink");
|
||||
@@ -22,23 +23,9 @@ let
|
||||
in
|
||||
{
|
||||
config = lib.mkIf (config.clanCore.secretStore == "sops") {
|
||||
system.clan = {
|
||||
generateSecrets = pkgs.writeScript "generate-secrets" ''
|
||||
#!${pkgs.python3}/bin/python
|
||||
import json
|
||||
from clan_cli.secrets.sops_generate import generate_secrets_from_nix
|
||||
args = json.loads(${builtins.toJSON (builtins.toJSON { machine_name = config.clanCore.machineName; secret_submodules = config.clanCore.secrets; })})
|
||||
generate_secrets_from_nix(**args)
|
||||
'';
|
||||
uploadSecrets = pkgs.writeScript "upload-secrets" ''
|
||||
#!${pkgs.python3}/bin/python
|
||||
import json
|
||||
from clan_cli.secrets.sops_generate import upload_age_key_from_nix
|
||||
# the second toJSON is needed to escape the string for the python
|
||||
args = json.loads(${builtins.toJSON (builtins.toJSON { machine_name = config.clanCore.machineName; deployment_address = config.clan.networking.deploymentAddress; age_key_file = config.sops.age.keyFile; })})
|
||||
upload_age_key_from_nix(**args)
|
||||
'';
|
||||
};
|
||||
clanCore.secretsDirectory = "/run/secrets";
|
||||
clanCore.secretsPrefix = config.clanCore.machineName + "-";
|
||||
system.clan.secretsModule = "clan_cli.secrets.modules.sops";
|
||||
sops.secrets = builtins.mapAttrs
|
||||
(name: _: {
|
||||
sopsFile = config.clanCore.clanDir + "/sops/secrets/${name}/secret";
|
||||
@@ -50,5 +37,6 @@ in
|
||||
|
||||
sops.age.keyFile = lib.mkIf (builtins.pathExists (config.clanCore.clanDir + "/sops/secrets/${config.clanCore.machineName}-age.key/secret"))
|
||||
(lib.mkDefault "/var/lib/sops-nix/key.txt");
|
||||
clanCore.secretsUploadDirectory = lib.mkDefault "/var/lib/sops-nix";
|
||||
};
|
||||
}
|
||||
|
||||
32
nixosModules/clanCore/serial.nix
Normal file
32
nixosModules/clanCore/serial.nix
Normal file
@@ -0,0 +1,32 @@
|
||||
{ pkgs, ... }:
|
||||
let
|
||||
# Based on https://unix.stackexchange.com/questions/16578/resizable-serial-console-window
|
||||
resize = pkgs.writeShellScriptBin "resize" ''
|
||||
export PATH=${pkgs.coreutils}/bin
|
||||
if [ ! -t 0 ]; then
|
||||
# not a interactive...
|
||||
exit 0
|
||||
fi
|
||||
TTY="$(tty)"
|
||||
if [[ "$TTY" != /dev/ttyS* ]] && [[ "$TTY" != /dev/ttyAMA* ]] && [[ "$TTY" != /dev/ttySIF* ]]; then
|
||||
# probably not a known serial console, we could make this check more
|
||||
# precise by using `setserial` but this would require some additional
|
||||
# dependency
|
||||
exit 0
|
||||
fi
|
||||
old=$(stty -g)
|
||||
stty raw -echo min 0 time 5
|
||||
|
||||
printf '\0337\033[r\033[999;999H\033[6n\0338' > /dev/tty
|
||||
IFS='[;R' read -r _ rows cols _ < /dev/tty
|
||||
|
||||
stty "$old"
|
||||
stty cols "$cols" rows "$rows"
|
||||
'';
|
||||
in
|
||||
{
|
||||
environment.loginShellInit = "${resize}/bin/resize";
|
||||
|
||||
# default is something like vt220... however we want to get alt least some colors...
|
||||
systemd.services."serial-getty@".environment.TERM = "xterm-256color";
|
||||
}
|
||||
40
nixosModules/clanCore/state.nix
Normal file
40
nixosModules/clanCore/state.nix
Normal file
@@ -0,0 +1,40 @@
|
||||
{ lib, ... }:
|
||||
{
|
||||
# defaults
|
||||
# FIXME: currently broken, will be fixed soon
|
||||
#config.clanCore.state.HOME.folders = [ "/home" ];
|
||||
|
||||
# interface
|
||||
options.clanCore.state = lib.mkOption {
|
||||
default = { };
|
||||
type = lib.types.attrsOf
|
||||
(lib.types.submodule ({ ... }: {
|
||||
options = {
|
||||
folders = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
description = ''
|
||||
Folder where state resides in
|
||||
'';
|
||||
};
|
||||
preRestoreScript = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = ":";
|
||||
description = ''
|
||||
script to run before restoring the state dir from a backup
|
||||
|
||||
Utilize this to stop services which currently access these folders
|
||||
'';
|
||||
};
|
||||
postRestoreScript = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = ":";
|
||||
description = ''
|
||||
script to restore the service after the state dir was restored from a backup
|
||||
|
||||
Utilize this to start services which were previously stopped
|
||||
'';
|
||||
};
|
||||
};
|
||||
}));
|
||||
};
|
||||
}
|
||||
@@ -1,8 +1,152 @@
|
||||
{ config, options, lib, ... }: {
|
||||
system.clan.vm.config = {
|
||||
enabled = options.virtualisation ? cores;
|
||||
} // (lib.optionalAttrs (options.virtualisation ? cores) {
|
||||
inherit (config.virtualisation) cores graphics;
|
||||
memory_size = config.virtualisation.memorySize;
|
||||
});
|
||||
{ lib, config, pkgs, options, extendModules, modulesPath, ... }:
|
||||
let
|
||||
# Generates a fileSystems entry for bind mounting a given state folder path
|
||||
# It binds directories from /var/clanstate/{some-path} to /{some-path}.
|
||||
# As a result, all state paths will be persisted across reboots, because
|
||||
# the state folder is mounted from the host system.
|
||||
mkBindMount = path: {
|
||||
name = path;
|
||||
value = {
|
||||
device = "/var/clanstate/${path}";
|
||||
options = [ "bind" ];
|
||||
};
|
||||
};
|
||||
|
||||
# Flatten the list of state folders into a single list
|
||||
stateFolders = lib.flatten (
|
||||
lib.mapAttrsToList
|
||||
(_item: attrs: attrs.folders)
|
||||
config.clanCore.state
|
||||
);
|
||||
|
||||
# A module setting up bind mounts for all state folders
|
||||
stateMounts = {
|
||||
virtualisation.fileSystems =
|
||||
lib.listToAttrs
|
||||
(map mkBindMount stateFolders);
|
||||
};
|
||||
|
||||
vmModule = {
|
||||
imports = [
|
||||
(modulesPath + "/virtualisation/qemu-vm.nix")
|
||||
./serial.nix
|
||||
stateMounts
|
||||
];
|
||||
virtualisation.fileSystems = {
|
||||
${config.clanCore.secretsUploadDirectory} = lib.mkForce {
|
||||
device = "secrets";
|
||||
fsType = "9p";
|
||||
neededForBoot = true;
|
||||
options = [ "trans=virtio" "version=9p2000.L" "cache=loose" ];
|
||||
};
|
||||
"/var/clanstate" = {
|
||||
device = "state";
|
||||
fsType = "9p";
|
||||
options = [ "trans=virtio" "version=9p2000.L" "cache=loose" ];
|
||||
};
|
||||
};
|
||||
boot.initrd.systemd.enable = true;
|
||||
};
|
||||
|
||||
# We cannot simply merge the VM config into the current system config, because
|
||||
# it is not necessarily a VM.
|
||||
# Instead we use extendModules to create a second instance of the current
|
||||
# system configuration, and then merge the VM config into that.
|
||||
vmConfig = extendModules {
|
||||
modules = [ vmModule stateMounts ];
|
||||
};
|
||||
in
|
||||
{
|
||||
options = {
|
||||
clan.virtualisation = {
|
||||
cores = lib.mkOption {
|
||||
type = lib.types.ints.positive;
|
||||
default = 1;
|
||||
description = lib.mdDoc ''
|
||||
Specify the number of cores the guest is permitted to use.
|
||||
The number can be higher than the available cores on the
|
||||
host system.
|
||||
'';
|
||||
};
|
||||
|
||||
memorySize = lib.mkOption {
|
||||
type = lib.types.ints.positive;
|
||||
default = 1024;
|
||||
description = lib.mdDoc ''
|
||||
The memory size in megabytes of the virtual machine.
|
||||
'';
|
||||
};
|
||||
|
||||
graphics = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = true;
|
||||
description = lib.mdDoc ''
|
||||
Whether to run QEMU with a graphics window, or in nographic mode.
|
||||
Serial console will be enabled on both settings, but this will
|
||||
change the preferred console.
|
||||
'';
|
||||
};
|
||||
};
|
||||
# All important VM config variables needed by the vm runner
|
||||
# this is really just a remapping of values defined elsewhere
|
||||
# and therefore not intended to be set by the user
|
||||
clanCore.vm.inspect = {
|
||||
clan_name = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
internal = true;
|
||||
readOnly = true;
|
||||
description = ''
|
||||
the name of the clan
|
||||
'';
|
||||
};
|
||||
memory_size = lib.mkOption {
|
||||
type = lib.types.int;
|
||||
internal = true;
|
||||
readOnly = true;
|
||||
description = ''
|
||||
the amount of memory to allocate to the vm
|
||||
'';
|
||||
};
|
||||
cores = lib.mkOption {
|
||||
type = lib.types.int;
|
||||
internal = true;
|
||||
readOnly = true;
|
||||
description = ''
|
||||
the number of cores to allocate to the vm
|
||||
'';
|
||||
};
|
||||
graphics = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
internal = true;
|
||||
readOnly = true;
|
||||
description = ''
|
||||
whether to enable graphics for the vm
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = {
|
||||
# for clan vm inspect
|
||||
clanCore.vm.inspect = {
|
||||
clan_name = config.clanCore.clanName;
|
||||
memory_size = config.clan.virtualisation.memorySize;
|
||||
inherit (config.clan.virtualisation) cores graphics;
|
||||
};
|
||||
# for clan vm create
|
||||
system.clan.vm = {
|
||||
create = pkgs.writeText "vm.json" (builtins.toJSON {
|
||||
initrd = "${vmConfig.config.system.build.initialRamdisk}/${vmConfig.config.system.boot.loader.initrdFile}";
|
||||
toplevel = vmConfig.config.system.build.toplevel;
|
||||
regInfo = (pkgs.closureInfo { rootPaths = vmConfig.config.virtualisation.additionalPaths; });
|
||||
inherit (config.clan.virtualisation) memorySize cores graphics;
|
||||
});
|
||||
};
|
||||
|
||||
virtualisation = lib.optionalAttrs (options.virtualisation ? cores) {
|
||||
memorySize = lib.mkDefault config.clan.virtualisation.memorySize;
|
||||
graphics = lib.mkDefault config.clan.virtualisation.graphics;
|
||||
cores = lib.mkDefault config.clan.virtualisation.cores;
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
43
nixosModules/clanCore/wayland-proxy-virtwl.nix
Normal file
43
nixosModules/clanCore/wayland-proxy-virtwl.nix
Normal file
@@ -0,0 +1,43 @@
|
||||
{ pkgs, config, lib, ... }:
|
||||
{
|
||||
options = {
|
||||
# maybe upstream this?
|
||||
services.wayland-proxy-virtwl = {
|
||||
enable = lib.mkEnableOption "wayland-proxy-virtwl";
|
||||
package = lib.mkPackageOption pkgs "wayland-proxy-virtwl" { };
|
||||
};
|
||||
};
|
||||
config = lib.mkIf config.services.wayland-proxy-virtwl.enable {
|
||||
programs.xwayland.enable = lib.mkDefault true;
|
||||
environment.etc."X11/xkb".source = config.services.xserver.xkb.dir;
|
||||
|
||||
environment.sessionVariables = {
|
||||
WAYLAND_DISPLAY = "wayland-1";
|
||||
DISPLAY = ":1";
|
||||
QT_QPA_PLATFORM = "wayland"; # Qt Applications
|
||||
GDK_BACKEND = "wayland"; # GTK Applications
|
||||
XDG_SESSION_TYPE = "wayland"; # Electron Applications
|
||||
SDL_VIDEODRIVER = "wayland";
|
||||
CLUTTER_BACKEND = "wayland";
|
||||
};
|
||||
|
||||
# Is there a better way to do this?
|
||||
programs.bash.loginShellInit = ''
|
||||
if [ "$(tty)" = "/dev/ttyS0" ]; then
|
||||
systemctl --user start graphical-session.target
|
||||
fi
|
||||
'';
|
||||
|
||||
systemd.user.services.wayland-proxy-virtwl = {
|
||||
description = "Wayland proxy for virtwl";
|
||||
before = [ "graphical-session.target" ];
|
||||
wantedBy = [ "graphical-session.target" ];
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
ExecStart = "${config.services.wayland-proxy-virtwl.package}/bin/wayland-proxy-virtwl --virtio-gpu --x-display=1 --xwayland-binary=${pkgs.xwayland}/bin/Xwayland";
|
||||
Restart = "always";
|
||||
RestartSec = 5;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,100 +0,0 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
let
|
||||
cfg = config.clan.networking.zerotier;
|
||||
in
|
||||
{
|
||||
options.clan.networking.zerotier = {
|
||||
networkId = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = ''
|
||||
zerotier networking id
|
||||
'';
|
||||
};
|
||||
controller = {
|
||||
enable = lib.mkEnableOption "turn this machine into the networkcontroller";
|
||||
public = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
everyone can join a public network without having the administrator to accept
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
config = {
|
||||
systemd.network.networks.zerotier = {
|
||||
matchConfig.Name = "zt*";
|
||||
networkConfig = {
|
||||
LLMNR = true;
|
||||
LLDP = true;
|
||||
MulticastDNS = true;
|
||||
KeepConfiguration = "static";
|
||||
};
|
||||
};
|
||||
networking.firewall.allowedUDPPorts = [ 9993 ];
|
||||
networking.firewall.interfaces."zt+".allowedTCPPorts = [ 5353 ];
|
||||
networking.firewall.interfaces."zt+".allowedUDPPorts = [ 5353 ];
|
||||
services.zerotierone = {
|
||||
enable = true;
|
||||
joinNetworks = [ cfg.networkId ];
|
||||
};
|
||||
} // lib.mkIf cfg.controller.enable {
|
||||
# only the controller needs to have the key in the repo, the other clients can be dynamic
|
||||
# we generate the zerotier code manually for the controller, since it's part of the bootstrap command
|
||||
clanCore.secrets.zerotier = {
|
||||
facts."network.id" = { };
|
||||
secrets."identity.secret" = { };
|
||||
generator = ''
|
||||
TMPDIR=$(mktemp -d)
|
||||
trap 'rm -rf "$TMPDIR"' EXIT
|
||||
${config.clanCore.clanPkgs.clan-cli}/bin/clan zerotier --outpath "$TMPDIR"
|
||||
cp "$TMPDIR"/network.id "$facts"/network.id
|
||||
cp "$TMPDIR"/identity.secret "$secrets"/identity.secret
|
||||
'';
|
||||
};
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"L+ /var/lib/zerotierone/controller.d/network/${cfg.networkId}.json - - - - ${pkgs.writeText "net.json" (builtins.toJSON {
|
||||
authTokens = [
|
||||
null
|
||||
];
|
||||
authorizationEndpoint = "";
|
||||
capabilities = [];
|
||||
clientId = "";
|
||||
dns = [];
|
||||
enableBroadcast = true;
|
||||
id = cfg.networkId;
|
||||
ipAssignmentPools = [];
|
||||
mtu = 2800;
|
||||
multicastLimit = 32;
|
||||
name = "";
|
||||
uwid = cfg.networkId;
|
||||
objtype = "network";
|
||||
private = true;
|
||||
remoteTraceLevel = 0;
|
||||
remoteTraceTarget = null;
|
||||
revision = 1;
|
||||
routes = [];
|
||||
rules = [
|
||||
{
|
||||
not = false;
|
||||
or = false;
|
||||
type = "ACTION_ACCEPT";
|
||||
}
|
||||
];
|
||||
rulesSource = "";
|
||||
ssoEnabled = false;
|
||||
tags = [];
|
||||
v4AssignMode = {
|
||||
zt = false;
|
||||
};
|
||||
v6AssignMode = {
|
||||
"6plane" = false;
|
||||
rfc4193 = false;
|
||||
zt = false;
|
||||
};
|
||||
})}"
|
||||
];
|
||||
};
|
||||
}
|
||||
|
||||
252
nixosModules/clanCore/zerotier/default.nix
Normal file
252
nixosModules/clanCore/zerotier/default.nix
Normal file
@@ -0,0 +1,252 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
let
|
||||
cfg = config.clan.networking.zerotier;
|
||||
facts = config.clanCore.secrets.zerotier.facts or { };
|
||||
genMoonScript = pkgs.runCommand "genmoon" { nativeBuildInputs = [ pkgs.python3 ]; } ''
|
||||
install -Dm755 ${./genmoon.py} $out/bin/genmoon
|
||||
patchShebangs $out/bin/genmoon
|
||||
'';
|
||||
networkConfig = {
|
||||
authTokens = [
|
||||
null
|
||||
];
|
||||
authorizationEndpoint = "";
|
||||
capabilities = [ ];
|
||||
clientId = "";
|
||||
dns = [ ];
|
||||
enableBroadcast = true;
|
||||
id = cfg.networkId;
|
||||
ipAssignmentPools = [ ];
|
||||
mtu = 2800;
|
||||
multicastLimit = 32;
|
||||
name = cfg.name;
|
||||
uwid = cfg.networkId;
|
||||
objtype = "network";
|
||||
private = !cfg.controller.public;
|
||||
remoteTraceLevel = 0;
|
||||
remoteTraceTarget = null;
|
||||
revision = 1;
|
||||
routes = [ ];
|
||||
rules = [
|
||||
{
|
||||
not = false;
|
||||
or = false;
|
||||
type = "ACTION_ACCEPT";
|
||||
}
|
||||
];
|
||||
rulesSource = "";
|
||||
ssoEnabled = false;
|
||||
tags = [ ];
|
||||
v4AssignMode = {
|
||||
zt = false;
|
||||
};
|
||||
v6AssignMode = {
|
||||
"6plane" = false;
|
||||
rfc4193 = true;
|
||||
zt = false;
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
options.clan.networking.zerotier = {
|
||||
networkId = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
default = null;
|
||||
description = ''
|
||||
zerotier networking id
|
||||
'';
|
||||
};
|
||||
name = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = config.clanCore.clanName;
|
||||
description = ''
|
||||
zerotier network name
|
||||
'';
|
||||
};
|
||||
moon = {
|
||||
stableEndpoints = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
default = [ ];
|
||||
description = ''
|
||||
Make this machine a moon.
|
||||
Other machines can join this moon by adding this moon in their config.
|
||||
It will be reachable under the given stable endpoints.
|
||||
'';
|
||||
};
|
||||
orbitMoons = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
default = [ ];
|
||||
description = ''
|
||||
Join these moons.
|
||||
This machine will be able to reach all machines in these moons.
|
||||
'';
|
||||
};
|
||||
};
|
||||
subnet = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
readOnly = true;
|
||||
default =
|
||||
if cfg.networkId == null then
|
||||
null
|
||||
else
|
||||
let
|
||||
part0 = builtins.substring 0 2 cfg.networkId;
|
||||
part1 = builtins.substring 2 2 cfg.networkId;
|
||||
part2 = builtins.substring 4 2 cfg.networkId;
|
||||
part3 = builtins.substring 6 2 cfg.networkId;
|
||||
part4 = builtins.substring 8 2 cfg.networkId;
|
||||
part5 = builtins.substring 10 2 cfg.networkId;
|
||||
part6 = builtins.substring 12 2 cfg.networkId;
|
||||
part7 = builtins.substring 14 2 cfg.networkId;
|
||||
in
|
||||
"fd${part0}:${part1}${part2}:${part3}${part4}:${part5}${part6}:${part7}99:9300::/88";
|
||||
description = ''
|
||||
zerotier subnet
|
||||
'';
|
||||
};
|
||||
controller = {
|
||||
enable = lib.mkEnableOption "turn this machine into the networkcontroller";
|
||||
public = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
everyone can join a public network without having the administrator to accept
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
config = lib.mkMerge [
|
||||
({
|
||||
# Override license so that we can build zerotierone without
|
||||
# having to re-import nixpkgs.
|
||||
services.zerotierone.package = lib.mkDefault (pkgs.zerotierone.overrideAttrs (_old: { meta = { }; }));
|
||||
})
|
||||
(lib.mkIf ((facts.zerotier-meshname.value or null) != null) {
|
||||
environment.etc."zerotier/hostname".text = "${facts.zerotier-meshname.value}.vpn";
|
||||
})
|
||||
(lib.mkIf ((facts.zerotier-ip.value or null) != null) {
|
||||
environment.etc."zerotier/ip".text = facts.zerotier-ip.value;
|
||||
})
|
||||
(lib.mkIf (cfg.networkId != null) {
|
||||
clan.networking.meshnamed.networks.vpn.subnet = cfg.subnet;
|
||||
|
||||
systemd.network.networks."09-zerotier" = {
|
||||
matchConfig.Name = "zt*";
|
||||
networkConfig = {
|
||||
LLMNR = true;
|
||||
LLDP = true;
|
||||
MulticastDNS = true;
|
||||
KeepConfiguration = "static";
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.zerotierone.serviceConfig.ExecStartPre = [
|
||||
"+${pkgs.writeShellScript "init-zerotier" ''
|
||||
cp ${config.clanCore.secrets.zerotier.secrets.zerotier-identity-secret.path} /var/lib/zerotier-one/identity.secret
|
||||
zerotier-idtool getpublic /var/lib/zerotier-one/identity.secret > /var/lib/zerotier-one/identity.public
|
||||
|
||||
${lib.optionalString (cfg.controller.enable) ''
|
||||
mkdir -p /var/lib/zerotier-one/controller.d/network
|
||||
ln -sfT ${pkgs.writeText "net.json" (builtins.toJSON networkConfig)} /var/lib/zerotier-one/controller.d/network/${cfg.networkId}.json
|
||||
''}
|
||||
${lib.optionalString (cfg.moon.stableEndpoints != []) ''
|
||||
if [[ ! -f /var/lib/zerotier-one/moon.json ]]; then
|
||||
zerotier-idtool initmoon /var/lib/zerotier-one/identity.public > /var/lib/zerotier-one/moon.json
|
||||
fi
|
||||
${genMoonScript}/bin/genmoon /var/lib/zerotier-one/moon.json ${builtins.toFile "moon.json" (builtins.toJSON cfg.moon.stableEndpoints)} /var/lib/zerotier-one/moons.d
|
||||
''}
|
||||
|
||||
# cleanup old networks
|
||||
if [[ -d /var/lib/zerotier-one/networks.d ]]; then
|
||||
find /var/lib/zerotier-one/networks.d \
|
||||
-type f \
|
||||
-name "*.conf" \
|
||||
-not \( ${lib.concatMapStringsSep " -o " (netId: ''-name "${netId}.conf"'') config.services.zerotierone.joinNetworks} \) \
|
||||
-delete
|
||||
fi
|
||||
''}"
|
||||
];
|
||||
systemd.services.zerotierone.serviceConfig.ExecStartPost = [
|
||||
"+${pkgs.writeShellScript "configure-interface" ''
|
||||
while ! ${pkgs.netcat}/bin/nc -z localhost 9993; do
|
||||
sleep 0.1
|
||||
done
|
||||
zerotier-cli listnetworks -j | ${pkgs.jq}/bin/jq -r '.[] | [.portDeviceName, .name] | @tsv' \
|
||||
| while IFS=$'\t' read -r portDeviceName name; do
|
||||
if [[ -z "$name" ]] || [[ -z "$portDeviceName" ]]; then
|
||||
continue
|
||||
fi
|
||||
# Execute the command for each element
|
||||
${pkgs.iproute2}/bin/ip link property add dev "$portDeviceName" altname "$name"
|
||||
done
|
||||
|
||||
${lib.concatMapStringsSep "\n" (moon: ''
|
||||
zerotier-cli orbit ${moon} ${moon}
|
||||
'') cfg.moon.orbitMoons}
|
||||
''}"
|
||||
];
|
||||
|
||||
networking.firewall.interfaces."zt+".allowedTCPPorts = [ 5353 ]; # mdns
|
||||
networking.firewall.interfaces."zt+".allowedUDPPorts = [ 5353 ]; # mdns
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ 9993 ]; # zerotier
|
||||
networking.firewall.allowedUDPPorts = [ 9993 ]; # zerotier
|
||||
|
||||
networking.networkmanager.unmanaged = [ "interface-name:zt*" ];
|
||||
|
||||
services.zerotierone = {
|
||||
enable = true;
|
||||
joinNetworks = [ cfg.networkId ];
|
||||
};
|
||||
|
||||
# The official zerotier tcp relay no longer works: https://github.com/zerotier/ZeroTierOne/issues/2202
|
||||
# So we host our own relay in https://git.clan.lol/clan/clan-infra
|
||||
services.zerotierone.localConf.settings.tcpFallbackRelay = "65.21.12.51/4443";
|
||||
})
|
||||
(lib.mkIf cfg.controller.enable {
|
||||
# only the controller needs to have the key in the repo, the other clients can be dynamic
|
||||
# we generate the zerotier code manually for the controller, since it's part of the bootstrap command
|
||||
clanCore.secrets.zerotier = {
|
||||
facts.zerotier-ip = { };
|
||||
facts.zerotier-meshname = { };
|
||||
facts.zerotier-network-id = { };
|
||||
secrets.zerotier-identity-secret = { };
|
||||
generator.path = [ config.services.zerotierone.package pkgs.fakeroot pkgs.python3 ];
|
||||
generator.script = ''
|
||||
python3 ${./generate.py} --mode network \
|
||||
--ip "$facts/zerotier-ip" \
|
||||
--meshname "$facts/zerotier-meshname" \
|
||||
--identity-secret "$secrets/zerotier-identity-secret" \
|
||||
--network-id "$facts/zerotier-network-id"
|
||||
'';
|
||||
};
|
||||
# clanCore.state.zerotier.folders = [ "/var/lib/zerotier-one" ];
|
||||
|
||||
environment.systemPackages = [ config.clanCore.clanPkgs.zerotier-members ];
|
||||
})
|
||||
(lib.mkIf (config.clanCore.secretsUploadDirectory != null && !cfg.controller.enable && cfg.networkId != null) {
|
||||
clanCore.secrets.zerotier = {
|
||||
facts.zerotier-ip = { };
|
||||
facts.zerotier-meshname = { };
|
||||
secrets.zerotier-identity-secret = { };
|
||||
generator.path = [ config.services.zerotierone.package pkgs.python3 ];
|
||||
generator.script = ''
|
||||
python3 ${./generate.py} --mode identity \
|
||||
--ip "$facts/zerotier-ip" \
|
||||
--meshname "$facts/zerotier-meshname" \
|
||||
--identity-secret "$secrets/zerotier-identity-secret" \
|
||||
--network-id ${cfg.networkId}
|
||||
'';
|
||||
};
|
||||
})
|
||||
(lib.mkIf (cfg.controller.enable && (facts.zerotier-network-id.value or null) != null) {
|
||||
clan.networking.zerotier.networkId = facts.zerotier-network-id.value;
|
||||
environment.etc."zerotier/network-id".text = facts.zerotier-network-id.value;
|
||||
systemd.services.zerotierone.serviceConfig.ExecStartPost = [
|
||||
"+${pkgs.writeShellScript "whitelist-controller" ''
|
||||
${config.clanCore.clanPkgs.zerotier-members}/bin/zerotier-members allow ${builtins.substring 0 10 cfg.networkId}
|
||||
''}"
|
||||
];
|
||||
})
|
||||
];
|
||||
}
|
||||
226
nixosModules/clanCore/zerotier/generate.py
Normal file
226
nixosModules/clanCore/zerotier/generate.py
Normal file
@@ -0,0 +1,226 @@
|
||||
import argparse
|
||||
import base64
|
||||
import contextlib
|
||||
import ipaddress
|
||||
import json
|
||||
import socket
|
||||
import subprocess
|
||||
import time
|
||||
import urllib.request
|
||||
from collections.abc import Iterator
|
||||
from contextlib import contextmanager
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from tempfile import TemporaryDirectory
|
||||
from typing import Any
|
||||
|
||||
|
||||
class ClanError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def try_bind_port(port: int) -> bool:
|
||||
tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
with tcp, udp:
|
||||
try:
|
||||
tcp.bind(("127.0.0.1", port))
|
||||
udp.bind(("127.0.0.1", port))
|
||||
return True
|
||||
except OSError:
|
||||
return False
|
||||
|
||||
|
||||
def try_connect_port(port: int) -> bool:
|
||||
sock = socket.socket(socket.AF_INET)
|
||||
result = sock.connect_ex(("127.0.0.1", port))
|
||||
sock.close()
|
||||
return result == 0
|
||||
|
||||
|
||||
def find_free_port() -> int | None:
|
||||
"""Find an unused localhost port from 1024-65535 and return it."""
|
||||
with contextlib.closing(socket.socket(type=socket.SOCK_STREAM)) as sock:
|
||||
sock.bind(("127.0.0.1", 0))
|
||||
return sock.getsockname()[1]
|
||||
|
||||
|
||||
class Identity:
|
||||
def __init__(self, path: Path) -> None:
|
||||
self.public = (path / "identity.public").read_text()
|
||||
self.private = (path / "identity.secret").read_text()
|
||||
|
||||
def node_id(self) -> str:
|
||||
nid = self.public.split(":")[0]
|
||||
assert (
|
||||
len(nid) == 10
|
||||
), f"node_id must be 10 characters long, got {len(nid)}: {nid}"
|
||||
return nid
|
||||
|
||||
|
||||
class ZerotierController:
|
||||
def __init__(self, port: int, home: Path) -> None:
|
||||
self.port = port
|
||||
self.home = home
|
||||
self.authtoken = (home / "authtoken.secret").read_text()
|
||||
self.identity = Identity(home)
|
||||
|
||||
def _http_request(
|
||||
self,
|
||||
path: str,
|
||||
method: str = "GET",
|
||||
headers: dict[str, str] = {},
|
||||
data: dict[str, Any] | None = None,
|
||||
) -> dict[str, Any]:
|
||||
body = None
|
||||
headers = headers.copy()
|
||||
if data is not None:
|
||||
body = json.dumps(data).encode("ascii")
|
||||
headers["Content-Type"] = "application/json"
|
||||
headers["X-ZT1-AUTH"] = self.authtoken
|
||||
url = f"http://127.0.0.1:{self.port}{path}"
|
||||
req = urllib.request.Request(url, headers=headers, method=method, data=body)
|
||||
resp = urllib.request.urlopen(req)
|
||||
return json.load(resp)
|
||||
|
||||
def status(self) -> dict[str, Any]:
|
||||
return self._http_request("/status")
|
||||
|
||||
def create_network(self, data: dict[str, Any] = {}) -> dict[str, Any]:
|
||||
return self._http_request(
|
||||
f"/controller/network/{self.identity.node_id()}______",
|
||||
method="POST",
|
||||
data=data,
|
||||
)
|
||||
|
||||
def get_network(self, network_id: str) -> dict[str, Any]:
|
||||
return self._http_request(f"/controller/network/{network_id}")
|
||||
|
||||
|
||||
@contextmanager
|
||||
def zerotier_controller() -> Iterator[ZerotierController]:
|
||||
# This check could be racy but it's unlikely in practice
|
||||
controller_port = find_free_port()
|
||||
if controller_port is None:
|
||||
raise ClanError("cannot find a free port for zerotier controller")
|
||||
|
||||
with TemporaryDirectory() as d:
|
||||
tempdir = Path(d)
|
||||
home = tempdir / "zerotier-one"
|
||||
home.mkdir()
|
||||
cmd = [
|
||||
"fakeroot",
|
||||
"--",
|
||||
"zerotier-one",
|
||||
f"-p{controller_port}",
|
||||
str(home),
|
||||
]
|
||||
with subprocess.Popen(cmd) as p:
|
||||
try:
|
||||
print(
|
||||
f"wait for controller to be started on 127.0.0.1:{controller_port}...",
|
||||
)
|
||||
while not try_connect_port(controller_port):
|
||||
status = p.poll()
|
||||
if status is not None:
|
||||
raise ClanError(
|
||||
f"zerotier-one has been terminated unexpected with {status}"
|
||||
)
|
||||
time.sleep(0.1)
|
||||
print()
|
||||
|
||||
yield ZerotierController(controller_port, home)
|
||||
finally:
|
||||
p.terminate()
|
||||
p.wait()
|
||||
|
||||
|
||||
@dataclass
|
||||
class NetworkController:
|
||||
networkid: str
|
||||
identity: Identity
|
||||
|
||||
|
||||
# TODO: allow merging more network configuration here
|
||||
def create_network_controller() -> NetworkController:
|
||||
with zerotier_controller() as controller:
|
||||
network = controller.create_network()
|
||||
return NetworkController(network["nwid"], controller.identity)
|
||||
|
||||
|
||||
def create_identity() -> Identity:
|
||||
with TemporaryDirectory() as d:
|
||||
tmpdir = Path(d)
|
||||
private = tmpdir / "identity.secret"
|
||||
public = tmpdir / "identity.public"
|
||||
subprocess.run(["zerotier-idtool", "generate", private, public])
|
||||
return Identity(tmpdir)
|
||||
|
||||
|
||||
def compute_zerotier_ip(network_id: str, identity: Identity) -> ipaddress.IPv6Address:
|
||||
assert (
|
||||
len(network_id) == 16
|
||||
), "network_id must be 16 characters long, got {network_id}"
|
||||
nwid = int(network_id, 16)
|
||||
node_id = int(identity.node_id(), 16)
|
||||
addr_parts = bytearray(
|
||||
[
|
||||
0xFD,
|
||||
(nwid >> 56) & 0xFF,
|
||||
(nwid >> 48) & 0xFF,
|
||||
(nwid >> 40) & 0xFF,
|
||||
(nwid >> 32) & 0xFF,
|
||||
(nwid >> 24) & 0xFF,
|
||||
(nwid >> 16) & 0xFF,
|
||||
(nwid >> 8) & 0xFF,
|
||||
(nwid) & 0xFF,
|
||||
0x99,
|
||||
0x93,
|
||||
(node_id >> 32) & 0xFF,
|
||||
(node_id >> 24) & 0xFF,
|
||||
(node_id >> 16) & 0xFF,
|
||||
(node_id >> 8) & 0xFF,
|
||||
(node_id) & 0xFF,
|
||||
]
|
||||
)
|
||||
return ipaddress.IPv6Address(bytes(addr_parts))
|
||||
|
||||
|
||||
def compute_zerotier_meshname(ip: ipaddress.IPv6Address) -> str:
|
||||
return base64.b32encode(ip.packed)[0:26].decode("ascii").lower()
|
||||
|
||||
|
||||
def main() -> None:
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--mode", choices=["network", "identity"], required=True, type=str
|
||||
)
|
||||
parser.add_argument("--ip", type=Path, required=True)
|
||||
parser.add_argument("--meshname", type=Path, required=True)
|
||||
parser.add_argument("--identity-secret", type=Path, required=True)
|
||||
parser.add_argument("--network-id", type=str, required=False)
|
||||
args = parser.parse_args()
|
||||
|
||||
match args.mode:
|
||||
case "network":
|
||||
if args.network_id is None:
|
||||
raise ValueError("network_id parameter is required")
|
||||
controller = create_network_controller()
|
||||
identity = controller.identity
|
||||
network_id = controller.networkid
|
||||
Path(args.network_id).write_text(network_id)
|
||||
case "identity":
|
||||
identity = create_identity()
|
||||
network_id = args.network_id
|
||||
case _:
|
||||
raise ValueError(f"unknown mode {args.mode}")
|
||||
ip = compute_zerotier_ip(network_id, identity)
|
||||
meshname = compute_zerotier_meshname(ip)
|
||||
|
||||
args.identity_secret.write_text(identity.private)
|
||||
args.ip.write_text(ip.compressed)
|
||||
args.meshname.write_text(meshname)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
31
nixosModules/clanCore/zerotier/genmoon.py
Normal file
31
nixosModules/clanCore/zerotier/genmoon.py
Normal file
@@ -0,0 +1,31 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import json
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from tempfile import NamedTemporaryFile
|
||||
|
||||
|
||||
def main() -> None:
|
||||
if len(sys.argv) != 4:
|
||||
print("Usage: genmoon.py <moon.json> <endpoint.json> <moons.d>")
|
||||
sys.exit(1)
|
||||
moon_json = sys.argv[1]
|
||||
endpoint_config = sys.argv[2]
|
||||
moons_d = sys.argv[3]
|
||||
|
||||
moon_json = json.loads(Path(moon_json).read_text())
|
||||
moon_json["roots"][0]["stableEndpoints"] = json.loads(
|
||||
Path(endpoint_config).read_text()
|
||||
)
|
||||
|
||||
with NamedTemporaryFile("w") as f:
|
||||
f.write(json.dumps(moon_json))
|
||||
f.flush()
|
||||
Path(moons_d).mkdir(parents=True, exist_ok=True)
|
||||
subprocess.run(["zerotier-idtool", "genmoon", f.name], cwd=moons_d)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,6 +1,13 @@
|
||||
{ ... }: {
|
||||
{ inputs, self, ... }: {
|
||||
flake.nixosModules = {
|
||||
hidden-ssh-announce.imports = [ ./hidden-ssh-announce.nix ];
|
||||
installer.imports = [ ./installer ];
|
||||
clanCore.imports = [
|
||||
inputs.sops-nix.nixosModules.sops
|
||||
./clanCore
|
||||
({ pkgs, lib, ... }: {
|
||||
clanCore.clanPkgs = lib.mkDefault self.packages.${pkgs.hostPlatform.system};
|
||||
})
|
||||
];
|
||||
};
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig = {
|
||||
# ${pkgs.tor}/bin/torify
|
||||
ExecStart = pkgs.writers.writeDash "announce-hidden-service" ''
|
||||
ExecStart = pkgs.writeShellScript "announce-hidden-service" ''
|
||||
set -efu
|
||||
until test -e ${config.services.tor.settings.DataDirectory}/onion/hidden-ssh/hostname; do
|
||||
echo "still waiting for ${config.services.tor.settings.DataDirectory}/onion/hidden-ssh/hostname"
|
||||
|
||||
@@ -19,26 +19,32 @@
|
||||
'';
|
||||
hidden-ssh-announce = {
|
||||
enable = true;
|
||||
script = pkgs.writers.writeDash "write-hostname" ''
|
||||
script = pkgs.writeShellScript "write-hostname" ''
|
||||
set -efu
|
||||
export PATH=${lib.makeBinPath (with pkgs; [ iproute2 coreutils jq qrencode ])}
|
||||
|
||||
mkdir -p /var/shared
|
||||
echo "$1" > /var/shared/onion-hostname
|
||||
${pkgs.jq}/bin/jq -nc \
|
||||
local_addrs=$(ip -json addr | jq '[map(.addr_info) | flatten | .[] | select(.scope == "global") | .local]')
|
||||
jq -nc \
|
||||
--arg password "$(cat /var/shared/root-password)" \
|
||||
--arg address "$(cat /var/shared/onion-hostname)" '{
|
||||
password: $password, address: $address
|
||||
}' > /var/shared/login.info
|
||||
cat /var/shared/login.info |
|
||||
${pkgs.qrencode}/bin/qrencode -t utf8 -o /var/shared/qrcode.utf8
|
||||
cat /var/shared/login.info |
|
||||
${pkgs.qrencode}/bin/qrencode -t png -o /var/shared/qrcode.png
|
||||
--arg onion_address "$(cat /var/shared/onion-hostname)" \
|
||||
--argjson local_addrs "$local_addrs" \
|
||||
'{ password: $password, onion_address: $onion_address, local_addresses: $local_addrs }' \
|
||||
> /var/shared/login.json
|
||||
cat /var/shared/login.json | qrencode -t utf8 -o /var/shared/qrcode.utf8
|
||||
'';
|
||||
};
|
||||
services.getty.autologinUser = lib.mkForce "root";
|
||||
programs.bash.interactiveShellInit = ''
|
||||
if [ "$(tty)" = "/dev/tty1" ]; then
|
||||
echo 'waiting for tor to generate the hidden service'
|
||||
until test -e /var/shared/qrcode.utf8; do echo .; sleep 1; done
|
||||
echo -n 'waiting for tor to generate the hidden service'
|
||||
until test -e /var/shared/qrcode.utf8; do echo -n .; sleep 1; done
|
||||
echo
|
||||
echo "Root password: $(cat /var/shared/root-password)"
|
||||
echo "Onion address: $(cat /var/shared/onion-hostname)"
|
||||
echo "Local network addresses:"
|
||||
${pkgs.iproute}/bin/ip -brief -color addr | grep -v 127.0.0.1
|
||||
cat /var/shared/qrcode.utf8
|
||||
fi
|
||||
'';
|
||||
|
||||
@@ -1,12 +1,6 @@
|
||||
# Because we depend on nixpkgs sources, uploading to builders takes a long time
|
||||
|
||||
source_up
|
||||
|
||||
if type nix_direnv_watch_file &>/dev/null; then
|
||||
nix_direnv_watch_file flake-module.nix
|
||||
nix_direnv_watch_file default.nix
|
||||
else
|
||||
direnv watch flake-module.nix
|
||||
direnv watch default.nix
|
||||
fi
|
||||
watch_file flake-module.nix default.nix
|
||||
|
||||
# Because we depend on nixpkgs sources, uploading to builders takes a long time
|
||||
use flake .#clan-cli --builders ''
|
||||
|
||||
26
pkgs/clan-cli/.vscode/launch.json
vendored
Normal file
26
pkgs/clan-cli/.vscode/launch.json
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
{
|
||||
// Use IntelliSense to learn about possible attributes.
|
||||
// Hover to view descriptions of existing attributes.
|
||||
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Clan Webui",
|
||||
"type": "python",
|
||||
"request": "launch",
|
||||
"module": "clan_cli.webui",
|
||||
"justMyCode": false,
|
||||
"args": [ "--reload", "--no-open", "--log-level", "debug" ],
|
||||
|
||||
},
|
||||
{
|
||||
"name": "Clan Cli VMs",
|
||||
"type": "python",
|
||||
"request": "launch",
|
||||
"module": "clan_cli",
|
||||
"justMyCode": false,
|
||||
"args": [ "vms" ],
|
||||
|
||||
}
|
||||
]
|
||||
}
|
||||
22
pkgs/clan-cli/.vscode/settings.json
vendored
Normal file
22
pkgs/clan-cli/.vscode/settings.json
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
{
|
||||
"python.testing.pytestArgs": [
|
||||
// Coverage is not supported by vscode:
|
||||
// https://github.com/Microsoft/vscode-python/issues/693
|
||||
// Note that this will make pytest fail if pytest-cov is not installed,
|
||||
// if that's the case, then this option needs to be be removed (overrides
|
||||
// can be set at a workspace level, it's up to you to decide what's the
|
||||
// best approach). You might also prefer to only set this option
|
||||
// per-workspace (wherever coverage is used).
|
||||
"--no-cov",
|
||||
"tests"
|
||||
],
|
||||
"python.testing.unittestEnabled": false,
|
||||
"python.testing.pytestEnabled": true,
|
||||
"search.exclude": {
|
||||
"**/.direnv": true
|
||||
},
|
||||
"python.linting.mypyPath": "mypy",
|
||||
"python.linting.mypyEnabled": true,
|
||||
"python.linting.enabled": true,
|
||||
"python.defaultInterpreterPath": "python"
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user