Compare commits
559 Commits
Qubasa-deb
...
Qubasa-mai
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7b49653aaf | ||
|
|
1beb9a8ca0 | ||
|
|
d1a79653fe | ||
|
|
351ce1414a | ||
|
|
e2ccd979ed | ||
|
|
f5f3f96809 | ||
|
|
59253a9c71 | ||
|
|
aa03adc581 | ||
|
|
ffd84d50f7 | ||
|
|
679387e4ba | ||
|
|
1d60f94cc5 | ||
|
|
1235177541 | ||
|
|
5c08e9a38d | ||
|
|
28dd54d866 | ||
|
|
5baf37f7e9 | ||
|
|
ff669e2957 | ||
|
|
8d4c1839e7 | ||
|
|
0765d981c6 | ||
|
|
10c27a0152 | ||
|
|
ccb5af9565 | ||
|
|
828eff528a | ||
|
|
cbf47580cf | ||
|
|
355ac57ccb | ||
|
|
227e293421 | ||
|
|
9b3621b516 | ||
|
|
62f09a450f | ||
|
|
95282bd880 | ||
|
|
7a49ec252e | ||
|
|
5f9ee97cab | ||
|
|
c6be9bbf07 | ||
|
|
d77ae5eed0 | ||
|
|
3c2888edc7 | ||
|
|
b0f23353ef | ||
|
|
3fccccc092 | ||
|
|
0a5d1bf322 | ||
|
|
9ca5cb7bcc | ||
|
|
cc1b356a94 | ||
|
|
9aa8c1b8eb | ||
|
|
709d773768 | ||
|
|
845abd1356 | ||
|
|
2b4a4f2422 | ||
|
|
82da5b6734 | ||
|
|
33a9fd8d3d | ||
|
|
4beb097a95 | ||
|
|
b4cd62b9f8 | ||
|
|
ee7b98c34d | ||
|
|
8552d4b3bd | ||
|
|
375edcff81 | ||
|
|
3183b26777 | ||
|
|
0feacaf300 | ||
|
|
6917021996 | ||
|
|
3965f7b59f | ||
|
|
610a70e4f8 | ||
|
|
6134eb0293 | ||
|
|
62e9fe8f9f | ||
|
|
5bc2d00014 | ||
|
|
616b294b8c | ||
|
|
2d7b92b3f9 | ||
|
|
0487670d30 | ||
|
|
4cd174b268 | ||
|
|
a8b257f32c | ||
|
|
047b767054 | ||
|
|
c74d23b799 | ||
|
|
850627c5c6 | ||
|
|
60d56c4e3b | ||
|
|
4911901f7c | ||
|
|
a96860a24b | ||
|
|
c429b41d2e | ||
|
|
fe305f7f47 | ||
|
|
591d397df9 | ||
|
|
8231979bae | ||
|
|
6899461d0d | ||
|
|
16b067d291 | ||
|
|
93cbe62765 | ||
|
|
7fef29d7aa | ||
|
|
952d1facce | ||
|
|
a565a85a5e | ||
|
|
3d5ef5e909 | ||
|
|
a5c5033273 | ||
|
|
0ee0351e3e | ||
|
|
c02f19205f | ||
|
|
dbcb8d6a4c | ||
|
|
039b309255 | ||
|
|
538374558d | ||
|
|
ef5ad09b2d | ||
|
|
9780463e6a | ||
|
|
cac4b1200c | ||
|
|
c8db27340e | ||
|
|
31a9c74e88 | ||
|
|
dc8bfab65d | ||
|
|
33abb7ecd7 | ||
|
|
fcbdae9d09 | ||
|
|
27b5680441 | ||
|
|
f13971167f | ||
|
|
e75b5f3a2e | ||
|
|
d5c0a2eb9c | ||
|
|
8cc8d09a11 | ||
|
|
dfa3305450 | ||
|
|
94415dfd0e | ||
|
|
6fb5bca801 | ||
|
|
4162810ee1 | ||
|
|
0b3badb0ef | ||
|
|
6a5954ad77 | ||
|
|
02231b979b | ||
|
|
028f6a4d3d | ||
|
|
170908db7b | ||
|
|
39e6534dbb | ||
|
|
71809c1bdc | ||
|
|
eecedf95e4 | ||
|
|
a208a9973c | ||
|
|
d276d2faea | ||
|
|
d470283dca | ||
|
|
88dab7d8bd | ||
|
|
8474a0aaef | ||
|
|
5ab2f206ea | ||
|
|
ea8037006f | ||
|
|
3a682a6b3e | ||
|
|
0556ea624f | ||
|
|
8671fd7407 | ||
|
|
3a9f0eb608 | ||
|
|
1736b0f539 | ||
|
|
eb375f3d81 | ||
|
|
6162b82adb | ||
|
|
085189d1c4 | ||
|
|
3cb22ad2a1 | ||
|
|
27269d4ed9 | ||
|
|
7cbedc74a5 | ||
|
|
5ac30a767b | ||
|
|
89c6bcda4d | ||
|
|
51da020de2 | ||
|
|
e943d8531f | ||
|
|
13b9c23db9 | ||
|
|
ad43f323b8 | ||
|
|
aeb3cc4428 | ||
|
|
d81ca7206b | ||
|
|
0011cf594a | ||
|
|
41cd4533ba | ||
|
|
c15544e928 | ||
|
|
fa0fe23985 | ||
|
|
1497e76bc2 | ||
|
|
b3d9c23e39 | ||
|
|
5520641feb | ||
|
|
97f5a6bd4c | ||
|
|
3b2b5db84a | ||
|
|
84da7d437d | ||
|
|
b2db2c7abc | ||
|
|
cb104b700d | ||
|
|
41054885db | ||
|
|
70c63221ec | ||
|
|
9c130c73e4 | ||
|
|
178fff0618 | ||
|
|
6324b495ee | ||
|
|
ce7a70f9e1 | ||
|
|
7102af9bd9 | ||
|
|
b38fddaf29 | ||
|
|
e7ffcedd14 | ||
|
|
b5a66e767b | ||
|
|
854d0fa83e | ||
|
|
4ccf5ca373 | ||
|
|
781d439567 | ||
|
|
68e00ff613 | ||
|
|
828028e4b3 | ||
|
|
b48d07f5c5 | ||
|
|
ea8c9ed649 | ||
|
|
68cb04c958 | ||
|
|
b8cb85fc72 | ||
|
|
bdb97308d0 | ||
|
|
9708bdc6e7 | ||
|
|
9ac8a45f1d | ||
|
|
a14fe1aef8 | ||
|
|
b1401d6e6b | ||
|
|
f882c86fb0 | ||
|
|
98d566c46e | ||
|
|
c4ec4ccb3f | ||
|
|
5a6677379a | ||
|
|
30d19d088f | ||
|
|
f3c45eb23e | ||
|
|
eaac6c76e2 | ||
|
|
0939b29a8e | ||
|
|
a2a395cdb0 | ||
|
|
df7429dbe7 | ||
|
|
362faaf063 | ||
|
|
e215a9db6e | ||
|
|
a5dd76b66d | ||
|
|
4472c51c25 | ||
|
|
c6cf9d1336 | ||
|
|
9b6e42790e | ||
|
|
547b012e0b | ||
|
|
9797ef792a | ||
|
|
fe0de90a28 | ||
|
|
539fd30206 | ||
|
|
a11d5471ec | ||
|
|
19f2facbce | ||
|
|
468a25034e | ||
|
|
a2b76eb5a2 | ||
|
|
ba0ed30997 | ||
|
|
2a4d2c9cb5 | ||
|
|
4c1e74fae6 | ||
|
|
cee62bf168 | ||
|
|
a865213894 | ||
|
|
d8f9375580 | ||
|
|
526072806f | ||
|
|
91a19d9ea9 | ||
|
|
38c7644692 | ||
|
|
726f2ab5f8 | ||
|
|
5918620535 | ||
|
|
58e85eda9c | ||
|
|
e98e817941 | ||
|
|
fe92c7d1e6 | ||
|
|
4222f9788c | ||
|
|
3d80423259 | ||
|
|
186e81d8b9 | ||
|
|
212c899767 | ||
|
|
312c12c98f | ||
|
|
2ec4e49650 | ||
|
|
4e5b4a1b80 | ||
|
|
ccb3bdb740 | ||
|
|
a903a9028b | ||
|
|
ba28691747 | ||
|
|
e7aa5cfb4e | ||
|
|
8b74147721 | ||
|
|
299180703e | ||
|
|
6c941deb96 | ||
|
|
39761946a0 | ||
|
|
b71e16dd5d | ||
|
|
0da1a05b55 | ||
|
|
3551d061ce | ||
|
|
6099aeb0c6 | ||
|
|
bcd6c7108a | ||
|
|
d20f13abe7 | ||
|
|
cfeda1f06d | ||
|
|
73dd981f71 | ||
|
|
bc239e104c | ||
|
|
bd2702df6d | ||
|
|
7b0e652a7a | ||
|
|
0c0eafe0f5 | ||
|
|
3e0cd4bdfb | ||
|
|
2cf40fea51 | ||
|
|
40d1a76d8a | ||
|
|
60b22fdf0e | ||
|
|
cb13e7fab8 | ||
|
|
b82a3b6085 | ||
|
|
44345ed28b | ||
|
|
456b25c921 | ||
|
|
dfb5e5123f | ||
|
|
636ee65428 | ||
|
|
cbf8685f6e | ||
|
|
500af543bb | ||
|
|
46971aa51f | ||
|
|
3d83266916 | ||
|
|
b87768d44a | ||
|
|
5b821c610d | ||
|
|
347a5a5f76 | ||
|
|
8f6dd4acc4 | ||
|
|
f3cbd0b289 | ||
|
|
7b8a980336 | ||
|
|
d53e062024 | ||
|
|
5ac629f549 | ||
|
|
6c7fc15c0e | ||
|
|
3121c5ecdb | ||
|
|
ada544ef56 | ||
|
|
3e0f9f52bb | ||
|
|
3992d0ed0d | ||
|
|
6037dde559 | ||
|
|
baa0a615ea | ||
|
|
b0760bc2b9 | ||
|
|
6a33fe8e7a | ||
|
|
1f3bd09245 | ||
|
|
122dbf4240 | ||
|
|
8ac286bcaf | ||
|
|
8fcc004b68 | ||
|
|
37bbbefa8e | ||
|
|
d44def5381 | ||
|
|
03ce74fc74 | ||
|
|
6c8137d30b | ||
|
|
27a3126d68 | ||
|
|
faee6c2a79 | ||
|
|
6070219b1a | ||
|
|
a5e32f9b6d | ||
|
|
89e3793831 | ||
|
|
fd908e18c3 | ||
|
|
a4d4b991a1 | ||
|
|
4670525106 | ||
|
|
5a0ed03c56 | ||
|
|
af228db398 | ||
|
|
b0e7de3c8b | ||
|
|
cb89fb0847 | ||
|
|
014aec9531 | ||
|
|
160bbfcb37 | ||
|
|
5c68e129b7 | ||
|
|
bc53c7a886 | ||
|
|
61c1943ccc | ||
|
|
c3013c1a02 | ||
|
|
3cff6577da | ||
|
|
c795a1d895 | ||
|
|
66e166068e | ||
|
|
0c7173afd0 | ||
|
|
d5e391ecc8 | ||
|
|
2a3bc7b31b | ||
|
|
b54346ce03 | ||
|
|
39bc7c1f17 | ||
|
|
153b5560c3 | ||
|
|
2412513ad4 | ||
|
|
873f650678 | ||
|
|
35aedddf65 | ||
|
|
663ab70465 | ||
|
|
4f1e2ba582 | ||
|
|
d3bd120a04 | ||
|
|
f8bf39e43a | ||
|
|
93a7e272b1 | ||
|
|
de3153259d | ||
|
|
bf492d4deb | ||
|
|
41cb679eab | ||
|
|
b138cfcd69 | ||
|
|
a22d426b25 | ||
|
|
c0f07afb98 | ||
|
|
0eaaabcf63 | ||
|
|
7df51d0474 | ||
|
|
5a6038f742 | ||
|
|
15e8df894e | ||
|
|
50924ad7ff | ||
|
|
2e212e3e31 | ||
|
|
23b57b0a3a | ||
|
|
69d092c46b | ||
|
|
2663a181d0 | ||
|
|
9ab81a9c5d | ||
|
|
0872b781d7 | ||
|
|
86e91c8604 | ||
|
|
14377f25c9 | ||
|
|
9b706c148b | ||
|
|
dee284d669 | ||
|
|
718e553211 | ||
|
|
cbe3cb94b7 | ||
|
|
91661da320 | ||
|
|
7ebc11f96f | ||
|
|
27ef7040c2 | ||
|
|
283aad7ea0 | ||
|
|
775088ccd9 | ||
|
|
d71a8329f2 | ||
|
|
022d0babc5 | ||
|
|
934d8fc2a4 | ||
|
|
e75b50e335 | ||
|
|
f9fc6904f0 | ||
|
|
6deaab506a | ||
|
|
32748c14f4 | ||
|
|
6d2845c645 | ||
|
|
4899c38e52 | ||
|
|
0d69d72899 | ||
|
|
34904b8758 | ||
|
|
51d65873a7 | ||
|
|
02929e9d42 | ||
|
|
2018de8d9e | ||
|
|
cffd2450e3 | ||
|
|
b21c14d8a5 | ||
|
|
34ed0e8b0b | ||
|
|
ae2990657a | ||
|
|
3d8bececc3 | ||
|
|
9d06dec7d0 | ||
|
|
4878b773cb | ||
|
|
5314eb4cfa | ||
|
|
decf340258 | ||
|
|
f2ab298caa | ||
|
|
339bf9900e | ||
|
|
11468c42af | ||
|
|
6242416fc4 | ||
|
|
3811aef9b2 | ||
|
|
6755aa2c70 | ||
|
|
316e33f54a | ||
|
|
871326fb91 | ||
|
|
97f3963ac6 | ||
|
|
0309a80c92 | ||
|
|
9b438689fc | ||
|
|
d0bfd5c879 | ||
|
|
8d12b0da31 | ||
|
|
f85fd9ccc1 | ||
|
|
e88c8a8f2d | ||
|
|
509a1bcb94 | ||
|
|
23d759219f | ||
|
|
c99296aae8 | ||
|
|
947095ad13 | ||
|
|
34b36fa309 | ||
|
|
67f26a3abe | ||
|
|
30c7e32a3d | ||
|
|
9387d64619 | ||
|
|
dc02296243 | ||
|
|
0a27576021 | ||
|
|
2341b0e673 | ||
|
|
6f1ab30881 | ||
|
|
45058ff818 | ||
|
|
ffbf2ff801 | ||
|
|
1694f8b3a8 | ||
|
|
c746b84a6f | ||
|
|
fe2cfd3b37 | ||
|
|
26143b4b5b | ||
|
|
90fbe807d6 | ||
|
|
3ab497afa9 | ||
|
|
e03ee4407c | ||
|
|
c7e9bfbefe | ||
|
|
83169dc93e | ||
|
|
23360377cf | ||
|
|
07bd686850 | ||
|
|
8634087309 | ||
|
|
fe6cca3c47 | ||
|
|
1fb676affb | ||
|
|
1248adfd45 | ||
|
|
5d2e3b2b21 | ||
|
|
0f4cdd31cd | ||
|
|
0e3e6c29c7 | ||
|
|
d770830c03 | ||
|
|
57d82c9315 | ||
|
|
e96bd0816f | ||
|
|
7545a9a883 | ||
|
|
c212538ac7 | ||
|
|
839bb17284 | ||
|
|
4a144e77a4 | ||
|
|
6e47f1ee96 | ||
|
|
1ba8090188 | ||
|
|
974279eddd | ||
|
|
e36654daa1 | ||
|
|
559a95cdde | ||
|
|
cabd848fb7 | ||
|
|
dfdcad7fc5 | ||
|
|
ce148c23a5 | ||
|
|
7308eb8dc7 | ||
|
|
fe399f773e | ||
|
|
906126f91f | ||
|
|
8c75051611 | ||
|
|
096f1b5e8a | ||
|
|
47257cf56a | ||
|
|
62c6da4c32 | ||
|
|
076de05a88 | ||
|
|
6ba1850678 | ||
|
|
6f2ae1e1f2 | ||
|
|
0ec38c7919 | ||
|
|
8acb15612d | ||
|
|
7b1639e8f3 | ||
|
|
505d038918 | ||
|
|
f09aaa57f1 | ||
|
|
86219f436d | ||
|
|
c5a72f67c3 | ||
|
|
553fb24657 | ||
|
|
01d86b6482 | ||
|
|
3ffefc3064 | ||
|
|
65bd3a9ac6 | ||
|
|
8d0a9762a2 | ||
|
|
0d2ee39746 | ||
|
|
b19beb8913 | ||
|
|
e5bfa926e2 | ||
|
|
33de028409 | ||
|
|
d4305f8b3c | ||
|
|
850eabb98c | ||
|
|
708a3aabf6 | ||
|
|
8ff71d1445 | ||
|
|
d10eb7189d | ||
|
|
081dc4a5fd | ||
|
|
cb2f0c5222 | ||
|
|
3ec028d672 | ||
|
|
9cd0572734 | ||
|
|
669295b8f7 | ||
|
|
72ca99e2c7 | ||
|
|
7a17a04698 | ||
|
|
8aa0a9c0d0 | ||
|
|
f55b02e1c9 | ||
|
|
c4a1e3ec95 | ||
|
|
e973e64aaf | ||
|
|
ca2d7e9afc | ||
|
|
def681c125 | ||
|
|
2dfff1b314 | ||
|
|
30825361a4 | ||
|
|
af3f652011 | ||
|
|
aabf2b0796 | ||
|
|
0b06970a33 | ||
|
|
16e6b0d406 | ||
|
|
968f427404 | ||
|
|
16ccebc1fb | ||
|
|
5a179f4b91 | ||
|
|
57c4e8e929 | ||
|
|
4000571550 | ||
|
|
a53367bd11 | ||
|
|
e00195f2ef | ||
|
|
4e9901ab19 | ||
|
|
a240bbcf04 | ||
|
|
2b18e6eccc | ||
|
|
010caab16c | ||
|
|
1a70165260 | ||
|
|
66d5f5b55e | ||
|
|
31479d47fc | ||
|
|
40ad0ee8fb | ||
|
|
fbdf0931b9 | ||
|
|
7531ff4499 | ||
|
|
e2aa66d86f | ||
|
|
1b420baa3f | ||
|
|
9d29cc63ad | ||
|
|
ef62e0b04c | ||
|
|
9a11c183e7 | ||
|
|
5dee67de6f | ||
|
|
53dde03ce9 | ||
|
|
2310379d7f | ||
|
|
a6d5ed96d1 | ||
|
|
479d89f9f0 | ||
|
|
4bc84542e4 | ||
|
|
41ceb40d13 | ||
|
|
923b100e3e | ||
|
|
819e66c2da | ||
|
|
0c1c65f519 | ||
|
|
ae7e9e75a9 | ||
|
|
572bc30a82 | ||
|
|
983cddc979 | ||
|
|
159946606c | ||
|
|
f40b1484f8 | ||
|
|
17c62612ff | ||
|
|
3eaffe1ac6 | ||
|
|
37fd0b3b6a | ||
|
|
d6dd1e4652 | ||
|
|
35ae1f2286 | ||
|
|
b56dac3b96 | ||
|
|
26d286a234 | ||
|
|
103c740b10 | ||
|
|
9df9e97970 | ||
|
|
00e9ef28fd | ||
|
|
afa03bc8d7 | ||
|
|
cd48ad1bbc | ||
|
|
0ec238f406 | ||
|
|
2b96e1b6d9 | ||
|
|
ed5754abb3 | ||
|
|
bbe37a998f | ||
|
|
f743ec2616 | ||
|
|
f9314ea139 | ||
|
|
0caba58441 | ||
|
|
64e37a8970 | ||
|
|
df0550b6a6 | ||
|
|
0536127044 | ||
|
|
8d4d98361d | ||
|
|
4f416eb32d | ||
|
|
aa846cb39f | ||
|
|
a68760d9c7 | ||
|
|
898d762366 | ||
|
|
3dd4ff31dd | ||
|
|
baeb31c228 | ||
|
|
aa762a7301 | ||
|
|
a3f6fb21c8 | ||
|
|
4dcdb3e926 | ||
|
|
b73de90487 | ||
|
|
b02dc42b0a | ||
|
|
944ac371bd | ||
|
|
cfaa2f2e26 | ||
|
|
65ccf8e970 | ||
|
|
7fe996848e | ||
|
|
c6fe4f2625 | ||
|
|
b6059fc506 | ||
|
|
3d962ee948 | ||
|
|
fe80e3c630 | ||
|
|
20b8532822 | ||
|
|
c21cc4b00d | ||
|
|
c7d9a7de2b | ||
|
|
ca3e989aba | ||
|
|
5b51950e6d | ||
|
|
4f76368f8e | ||
|
|
1420430d39 |
@@ -8,5 +8,5 @@ jobs:
|
||||
checks-impure:
|
||||
runs-on: nix
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- run: nix run .#impure-checks
|
||||
|
||||
@@ -7,7 +7,7 @@ jobs:
|
||||
deploy-docs:
|
||||
runs-on: nix
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- run: nix run .#deploy-docs
|
||||
env:
|
||||
SSH_HOMEPAGE_KEY: ${{ secrets.SSH_HOMEPAGE_KEY }}
|
||||
|
||||
6
.github/dependabot.yml
vendored
Normal file
6
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
9
.github/workflows/repo-sync.yml
vendored
9
.github/workflows/repo-sync.yml
vendored
@@ -3,10 +3,8 @@ on:
|
||||
schedule:
|
||||
- cron: "39 * * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
repo-sync:
|
||||
if: github.repository_owner == 'clan-lol'
|
||||
@@ -15,10 +13,15 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/create-github-app-token@v1
|
||||
id: app-token
|
||||
with:
|
||||
app-id: ${{ vars.CI_APP_ID }}
|
||||
private-key: ${{ secrets.CI_PRIVATE_KEY }}
|
||||
- name: repo-sync
|
||||
uses: repo-sync/github-sync@v2
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GITHUB_TOKEN: ${{ steps.app-token.outputs.token }}
|
||||
with:
|
||||
source_repo: "https://git.clan.lol/clan/clan-core.git"
|
||||
source_branch: "main"
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -14,7 +14,7 @@ example_clan
|
||||
nixos.qcow2
|
||||
**/*.glade~
|
||||
/docs/out
|
||||
|
||||
**/.local.env
|
||||
|
||||
# dream2nix
|
||||
.dream2nix
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# Contributing to Clan
|
||||
|
||||
Go to the Contributing guide at https://docs.clan.lol/manual/contribute/
|
||||
<!-- Local file: docs/CONTRIBUTING.md -->
|
||||
Go to the Contributing guide at https://docs.clan.lol/manual/contribute/
|
||||
@@ -5,6 +5,12 @@
|
||||
fileSystems."/".device = "/dev/null";
|
||||
boot.loader.grub.device = "/dev/null";
|
||||
};
|
||||
clan.inventory.services = {
|
||||
borgbackup.test-backup = {
|
||||
roles.client.machines = [ "test-backup" ];
|
||||
roles.server.machines = [ "test-backup" ];
|
||||
};
|
||||
};
|
||||
flake.nixosModules = {
|
||||
test-backup =
|
||||
{
|
||||
@@ -22,13 +28,20 @@
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
self.clanModules.borgbackup
|
||||
# Do not import inventory modules. They should be configured via 'clan.inventory'
|
||||
#
|
||||
# TODO: Configure localbackup via inventory
|
||||
self.clanModules.localbackup
|
||||
];
|
||||
# Borgbackup overrides
|
||||
services.borgbackup.repos.test-backups = {
|
||||
path = "/var/lib/borgbackup/test-backups";
|
||||
authorizedKeys = [ (builtins.readFile ../lib/ssh/pubkey) ];
|
||||
};
|
||||
clan.borgbackup.destinations.test-backup.repo = lib.mkForce "borg@machine:.";
|
||||
|
||||
clan.core.networking.targetHost = "machine";
|
||||
networking.hostName = "machine";
|
||||
services.openssh.settings.UseDns = false;
|
||||
nixpkgs.hostPlatform = "x86_64-linux";
|
||||
|
||||
programs.ssh.knownHosts = {
|
||||
machine.hostNames = [ "machine" ];
|
||||
@@ -37,6 +50,8 @@
|
||||
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
settings.UsePAM = false;
|
||||
settings.UseDns = false;
|
||||
hostKeys = [
|
||||
{
|
||||
path = "/root/.ssh/id_ed25519";
|
||||
@@ -47,6 +62,10 @@
|
||||
|
||||
users.users.root.openssh.authorizedKeys.keyFiles = [ ../lib/ssh/pubkey ];
|
||||
|
||||
# This is needed to unlock the user for sshd
|
||||
# Because we use sshd without setuid binaries
|
||||
users.users.borg.initialPassword = "hello";
|
||||
|
||||
systemd.tmpfiles.settings."vmsecrets" = {
|
||||
"/root/.ssh/id_ed25519" = {
|
||||
C.argument = "${../lib/ssh/privkey}";
|
||||
@@ -62,14 +81,14 @@
|
||||
user = "root";
|
||||
};
|
||||
};
|
||||
"/etc/secrets/borgbackup.ssh" = {
|
||||
"/etc/secrets/borgbackup/borgbackup.ssh" = {
|
||||
C.argument = "${../lib/ssh/privkey}";
|
||||
z = {
|
||||
mode = "0400";
|
||||
user = "root";
|
||||
};
|
||||
};
|
||||
"/etc/secrets/borgbackup.repokey" = {
|
||||
"/etc/secrets/borgbackup/borgbackup.repokey" = {
|
||||
C.argument = builtins.toString (pkgs.writeText "repokey" "repokey12345");
|
||||
z = {
|
||||
mode = "0400";
|
||||
@@ -78,8 +97,7 @@
|
||||
};
|
||||
};
|
||||
clan.core.facts.secretStore = "vm";
|
||||
# TODO: set this backend as well, once we have implemented it.
|
||||
#clan.core.vars.settings.secretStore = "vm";
|
||||
clan.core.vars.settings.secretStore = "vm";
|
||||
|
||||
environment.systemPackages = [ self.packages.${pkgs.system}.clan-cli ];
|
||||
environment.etc.install-closure.source = "${closureInfo}/store-paths";
|
||||
@@ -104,7 +122,6 @@
|
||||
'';
|
||||
folders = [ "/var/test-service" ];
|
||||
};
|
||||
clan.borgbackup.destinations.test-backup.repo = "borg@machine:.";
|
||||
|
||||
fileSystems."/mnt/external-disk" = {
|
||||
device = "/dev/vdb"; # created in tests with virtualisation.emptyDisks
|
||||
@@ -125,29 +142,55 @@
|
||||
touch /run/unmount-external-disk
|
||||
'';
|
||||
};
|
||||
|
||||
services.borgbackup.repos.test-backups = {
|
||||
path = "/var/lib/borgbackup/test-backups";
|
||||
authorizedKeys = [ (builtins.readFile ../lib/ssh/pubkey) ];
|
||||
};
|
||||
};
|
||||
};
|
||||
perSystem =
|
||||
{ pkgs, ... }:
|
||||
let
|
||||
clanCore = self.filter {
|
||||
include = [
|
||||
"checks/backups"
|
||||
"checks/flake-module.nix"
|
||||
"clanModules/borgbackup"
|
||||
"clanModules/flake-module.nix"
|
||||
"clanModules/localbackup"
|
||||
"clanModules/packages"
|
||||
"clanModules/single-disk"
|
||||
"clanModules/zerotier"
|
||||
"flake.lock"
|
||||
"flakeModules"
|
||||
"inventory.json"
|
||||
"lib/build-clan"
|
||||
"lib/default.nix"
|
||||
"lib/select.nix"
|
||||
"lib/flake-module.nix"
|
||||
"lib/frontmatter"
|
||||
"lib/inventory"
|
||||
"lib/constraints"
|
||||
"nixosModules"
|
||||
];
|
||||
};
|
||||
in
|
||||
{
|
||||
# Needs investigation on aarch64-linux
|
||||
# vm-test-run-test-backups> qemu-kvm: No machine specified, and there is no default
|
||||
# vm-test-run-test-backups> Use -machine help to list supported machines
|
||||
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux && pkgs.stdenv.hostPlatform.system != "aarch64-linux") {
|
||||
test-backups = (import ../lib/test-base.nix) {
|
||||
checks = pkgs.lib.mkIf pkgs.stdenv.isLinux {
|
||||
test-backups = (import ../lib/container-test.nix) {
|
||||
name = "test-backups";
|
||||
nodes.machine = {
|
||||
imports = [
|
||||
self.nixosModules.clanCore
|
||||
self.nixosModules.test-backup
|
||||
];
|
||||
virtualisation.emptyDiskImages = [ 256 ];
|
||||
imports =
|
||||
[
|
||||
self.nixosModules.clanCore
|
||||
# Some custom overrides for the backup tests
|
||||
self.nixosModules.test-backup
|
||||
]
|
||||
++
|
||||
# import the inventory generated nixosModules
|
||||
self.clanInternals.inventoryClass.machines.test-backup.machineImports;
|
||||
clan.core.settings.directory = ./.;
|
||||
environment.systemPackages = [
|
||||
(pkgs.writeShellScriptBin "foo" ''
|
||||
echo ${clanCore}
|
||||
'')
|
||||
];
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
@@ -159,14 +202,14 @@
|
||||
machine.succeed("echo testing > /var/test-backups/somefile")
|
||||
|
||||
# create
|
||||
machine.succeed("clan backups create --debug --flake ${self} test-backup")
|
||||
machine.succeed("clan backups create --debug --flake ${clanCore} test-backup")
|
||||
machine.wait_until_succeeds("! systemctl is-active borgbackup-job-test-backup >&2")
|
||||
machine.succeed("test -f /run/mount-external-disk")
|
||||
machine.succeed("test -f /run/unmount-external-disk")
|
||||
|
||||
# list
|
||||
backup_id = json.loads(machine.succeed("borg-job-test-backup list --json"))["archives"][0]["archive"]
|
||||
out = machine.succeed("clan backups list --debug --flake ${self} test-backup").strip()
|
||||
out = machine.succeed("clan backups list --debug --flake ${clanCore} test-backup").strip()
|
||||
print(out)
|
||||
assert backup_id in out, f"backup {backup_id} not found in {out}"
|
||||
localbackup_id = "hdd::/mnt/external-disk/snapshot.0"
|
||||
@@ -174,7 +217,7 @@
|
||||
|
||||
## borgbackup restore
|
||||
machine.succeed("rm -f /var/test-backups/somefile")
|
||||
machine.succeed(f"clan backups restore --debug --flake ${self} test-backup borgbackup 'test-backup::borg@machine:.::{backup_id}' >&2")
|
||||
machine.succeed(f"clan backups restore --debug --flake ${clanCore} test-backup borgbackup 'test-backup::borg@machine:.::{backup_id}' >&2")
|
||||
assert machine.succeed("cat /var/test-backups/somefile").strip() == "testing", "restore failed"
|
||||
machine.succeed("test -f /var/test-service/pre-restore-command")
|
||||
machine.succeed("test -f /var/test-service/post-restore-command")
|
||||
@@ -182,7 +225,7 @@
|
||||
|
||||
## localbackup restore
|
||||
machine.succeed("rm -rf /var/test-backups/somefile /var/test-service/ && mkdir -p /var/test-service")
|
||||
machine.succeed(f"clan backups restore --debug --flake ${self} test-backup localbackup '{localbackup_id}' >&2")
|
||||
machine.succeed(f"clan backups restore --debug --flake ${clanCore} test-backup localbackup '{localbackup_id}' >&2")
|
||||
assert machine.succeed("cat /var/test-backups/somefile").strip() == "testing", "restore failed"
|
||||
machine.succeed("test -f /var/test-service/pre-restore-command")
|
||||
machine.succeed("test -f /var/test-service/post-restore-command")
|
||||
|
||||
@@ -21,14 +21,14 @@
|
||||
clan.core.state.testState.folders = [ "/etc/state" ];
|
||||
environment.etc.state.text = "hello world";
|
||||
systemd.tmpfiles.settings."vmsecrets" = {
|
||||
"/etc/secrets/borgbackup.ssh" = {
|
||||
"/etc/secrets/borgbackup/borgbackup.ssh" = {
|
||||
C.argument = "${../lib/ssh/privkey}";
|
||||
z = {
|
||||
mode = "0400";
|
||||
user = "root";
|
||||
};
|
||||
};
|
||||
"/etc/secrets/borgbackup.repokey" = {
|
||||
"/etc/secrets/borgbackup/borgbackup.repokey" = {
|
||||
C.argument = builtins.toString (pkgs.writeText "repokey" "repokey12345");
|
||||
z = {
|
||||
mode = "0400";
|
||||
@@ -36,7 +36,8 @@
|
||||
};
|
||||
};
|
||||
};
|
||||
clan.core.facts.secretStore = "vm";
|
||||
# clan.core.facts.secretStore = "vm";
|
||||
clan.core.vars.settings.secretStore = "vm";
|
||||
|
||||
clan.borgbackup.destinations.test.repo = "borg@localhost:.";
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
(import ../lib/container-test.nix) (
|
||||
{ ... }:
|
||||
{
|
||||
name = "secrets";
|
||||
name = "container";
|
||||
|
||||
nodes.machine =
|
||||
{ ... }:
|
||||
|
||||
@@ -1,11 +1,19 @@
|
||||
{ self, ... }:
|
||||
{ self, lib, ... }:
|
||||
let
|
||||
inherit (lib)
|
||||
filter
|
||||
pathExists
|
||||
;
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
imports = filter pathExists [
|
||||
./backups/flake-module.nix
|
||||
./devshell/flake-module.nix
|
||||
./flash/flake-module.nix
|
||||
./impure/flake-module.nix
|
||||
./installation/flake-module.nix
|
||||
./installation-without-system/flake-module.nix
|
||||
./morph/flake-module.nix
|
||||
./nixos-documentation/flake-module.nix
|
||||
];
|
||||
perSystem =
|
||||
@@ -42,7 +50,7 @@
|
||||
flakeOutputs =
|
||||
lib.mapAttrs' (
|
||||
name: config: lib.nameValuePair "nixos-${name}" config.config.system.build.toplevel
|
||||
) self.nixosConfigurations
|
||||
) (lib.filterAttrs (n: _: !lib.hasPrefix "test-" n) self.nixosConfigurations)
|
||||
// lib.mapAttrs' (n: lib.nameValuePair "package-${n}") self'.packages
|
||||
// lib.mapAttrs' (n: lib.nameValuePair "devShell-${n}") self'.devShells
|
||||
// lib.mapAttrs' (name: config: lib.nameValuePair "home-manager-${name}" config.activation-script) (
|
||||
|
||||
@@ -1,5 +1,39 @@
|
||||
{ self, ... }:
|
||||
{
|
||||
config,
|
||||
self,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
clan.machines = lib.listToAttrs (
|
||||
lib.map (
|
||||
system:
|
||||
lib.nameValuePair "test-flash-machine-${system}" {
|
||||
clan.core.networking.targetHost = "test-flash-machine";
|
||||
fileSystems."/".device = lib.mkDefault "/dev/vda";
|
||||
boot.loader.grub.device = lib.mkDefault "/dev/vda";
|
||||
|
||||
# We need to use `mkForce` because we inherit from `test-install-machine`
|
||||
# which currently hardcodes `nixpkgs.hostPlatform`
|
||||
nixpkgs.hostPlatform = lib.mkForce system;
|
||||
|
||||
imports = [ self.nixosModules.test-flash-machine ];
|
||||
}
|
||||
) (lib.filter (lib.hasSuffix "linux") config.systems)
|
||||
);
|
||||
|
||||
flake.nixosModules = {
|
||||
test-flash-machine =
|
||||
{ lib, ... }:
|
||||
{
|
||||
imports = [ self.nixosModules.test-install-machine ];
|
||||
|
||||
clan.core.vars.generators.test = lib.mkForce { };
|
||||
|
||||
disko.devices.disk.main.preCreateHook = lib.mkForce "";
|
||||
};
|
||||
};
|
||||
|
||||
perSystem =
|
||||
{
|
||||
nodes,
|
||||
@@ -10,17 +44,20 @@
|
||||
let
|
||||
dependencies = [
|
||||
pkgs.disko
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine.config.system.build.toplevel
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine.config.system.build.diskoScript
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine.config.system.build.diskoScript.drvPath
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine.config.system.clan.deployment.file
|
||||
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".pkgs.perlPackages.ConfigIniFiles
|
||||
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".pkgs.perlPackages.FileSlurp
|
||||
|
||||
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.toplevel
|
||||
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.diskoScript
|
||||
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.diskoScript.drvPath
|
||||
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.clan.deployment.file
|
||||
|
||||
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
|
||||
in
|
||||
{
|
||||
# Currently disabled...
|
||||
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux) {
|
||||
flash = (import ../lib/test-base.nix) {
|
||||
checks = pkgs.lib.mkIf pkgs.stdenv.isLinux {
|
||||
test-flash = (import ../lib/test-base.nix) {
|
||||
name = "flash";
|
||||
nodes.target = {
|
||||
virtualisation.emptyDiskImages = [ 4096 ];
|
||||
@@ -42,7 +79,7 @@
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
machine.succeed("clan flash write --debug --flake ${../..} --yes --disk main /dev/vdb test-install-machine")
|
||||
machine.succeed("clan flash write --debug --flake ${../..} --yes --disk main /dev/vdb test-flash-machine-${pkgs.hostPlatform.system}")
|
||||
'';
|
||||
} { inherit pkgs self; };
|
||||
};
|
||||
|
||||
@@ -30,6 +30,7 @@
|
||||
# this disables dynamic dependency loading in clan-cli
|
||||
export CLAN_NO_DYNAMIC_DEPS=1
|
||||
|
||||
export IN_PYTEST=1
|
||||
nix develop "$ROOT#clan-cli" -c bash -c "TMPDIR=/tmp python -m pytest -m impure ./tests $@"
|
||||
'';
|
||||
};
|
||||
|
||||
241
checks/installation-without-system/flake-module.nix
Normal file
241
checks/installation-without-system/flake-module.nix
Normal file
@@ -0,0 +1,241 @@
|
||||
{
|
||||
self,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
# The purpose of this test is to ensure `clan machines install` works
|
||||
# for machines that don't have a hardware config yet.
|
||||
|
||||
# If this test starts failing it could be due to the `facter.json` being out of date
|
||||
# you can get a new one by adding
|
||||
# client.fail("cat test-flake/machines/test-install-machine/facter.json >&2")
|
||||
# to the installation test.
|
||||
clan.machines.test-install-machine-without-system = {
|
||||
fileSystems."/".device = lib.mkDefault "/dev/vda";
|
||||
boot.loader.grub.device = lib.mkDefault "/dev/vda";
|
||||
|
||||
imports = [ self.nixosModules.test-install-machine-without-system ];
|
||||
};
|
||||
clan.machines.test-install-machine-with-system =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
# https://git.clan.lol/clan/test-fixtures
|
||||
facter.reportPath = builtins.fetchurl {
|
||||
url = "https://git.clan.lol/clan/test-fixtures/raw/commit/4a2bc56d886578124b05060d3fb7eddc38c019f8/nixos-vm-facter-json/${pkgs.hostPlatform.system}.json";
|
||||
sha256 =
|
||||
{
|
||||
aarch64-linux = "sha256:1rlfymk03rmfkm2qgrc8l5kj5i20srx79n1y1h4nzlpwaz0j7hh2";
|
||||
x86_64-linux = "sha256:16myh0ll2gdwsiwkjw5ba4dl23ppwbsanxx214863j7nvzx42pws";
|
||||
}
|
||||
.${pkgs.hostPlatform.system};
|
||||
};
|
||||
|
||||
fileSystems."/".device = lib.mkDefault "/dev/vda";
|
||||
boot.loader.grub.device = lib.mkDefault "/dev/vda";
|
||||
|
||||
imports = [ self.nixosModules.test-install-machine-without-system ];
|
||||
};
|
||||
flake.nixosModules = {
|
||||
test-install-machine-without-system =
|
||||
{ lib, modulesPath, ... }:
|
||||
{
|
||||
imports = [
|
||||
(modulesPath + "/testing/test-instrumentation.nix") # we need these 2 modules always to be able to run the tests
|
||||
(modulesPath + "/profiles/qemu-guest.nix")
|
||||
../lib/minify.nix
|
||||
];
|
||||
|
||||
networking.hostName = "test-install-machine";
|
||||
|
||||
environment.etc."install-successful".text = "ok";
|
||||
|
||||
boot.consoleLogLevel = lib.mkForce 100;
|
||||
boot.kernelParams = [ "boot.shell_on_fail" ];
|
||||
|
||||
# disko config
|
||||
boot.loader.grub.efiSupport = lib.mkDefault true;
|
||||
boot.loader.grub.efiInstallAsRemovable = lib.mkDefault true;
|
||||
clan.core.vars.settings.secretStore = "vm";
|
||||
clan.core.vars.generators.test = {
|
||||
files.test.neededFor = "partitioning";
|
||||
script = ''
|
||||
echo "notok" > $out/test
|
||||
'';
|
||||
};
|
||||
disko.devices = {
|
||||
disk = {
|
||||
main = {
|
||||
type = "disk";
|
||||
device = "/dev/vda";
|
||||
|
||||
preCreateHook = ''
|
||||
test -e /run/partitioning-secrets/test/test
|
||||
'';
|
||||
|
||||
content = {
|
||||
type = "gpt";
|
||||
partitions = {
|
||||
boot = {
|
||||
size = "1M";
|
||||
type = "EF02"; # for grub MBR
|
||||
priority = 1;
|
||||
};
|
||||
ESP = {
|
||||
size = "512M";
|
||||
type = "EF00";
|
||||
content = {
|
||||
type = "filesystem";
|
||||
format = "vfat";
|
||||
mountpoint = "/boot";
|
||||
mountOptions = [ "umask=0077" ];
|
||||
};
|
||||
};
|
||||
root = {
|
||||
size = "100%";
|
||||
content = {
|
||||
type = "filesystem";
|
||||
format = "ext4";
|
||||
mountpoint = "/";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
perSystem =
|
||||
{
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
dependencies = [
|
||||
self
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.toplevel
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.diskoScript
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.clan.deployment.file
|
||||
pkgs.stdenv.drvPath
|
||||
pkgs.bash.drvPath
|
||||
pkgs.nixos-anywhere
|
||||
pkgs.bubblewrap
|
||||
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
|
||||
# with Nix 2.24 we get:
|
||||
# vm-test-run-test-installation> client # error: sized: unexpected end-of-file
|
||||
# vm-test-run-test-installation> client # error: unexpected end-of-file
|
||||
# This seems to be fixed with Nix 2.26
|
||||
# Remove this line once `pkgs.nix` is 2.26+
|
||||
nixPackage =
|
||||
assert
|
||||
lib.versionOlder pkgs.nix.version "2.26"
|
||||
&& lib.versionAtLeast pkgs.nixVersions.latest.version "2.26";
|
||||
pkgs.nixVersions.latest;
|
||||
in
|
||||
{
|
||||
# On aarch64-linux, hangs on reboot with after installation:
|
||||
# vm-test-run-test-installation-without-system> installer # [ 288.002871] reboot: Restarting system
|
||||
# vm-test-run-test-installation-without-system> client # [test-install-machine] ### Done! ###
|
||||
# vm-test-run-test-installation-without-system> client # [test-install-machine] + step 'Done!'
|
||||
# vm-test-run-test-installation-without-system> client # [test-install-machine] + echo '### Done! ###'
|
||||
# vm-test-run-test-installation-without-system> client # [test-install-machine] + rm -rf /tmp/tmp.qb16EAq7hJ
|
||||
# vm-test-run-test-installation-without-system> (finished: must succeed: clan machines install --debug --flake test-flake --yes test-install-machine-without-system --target-host root@installer --update-hardware-config nixos-facter >&2, in 154.62 seconds)
|
||||
# vm-test-run-test-installation-without-system> target: starting vm
|
||||
# vm-test-run-test-installation-without-system> target: QEMU running (pid 144)
|
||||
# vm-test-run-test-installation-without-system> target: waiting for unit multi-user.target
|
||||
# vm-test-run-test-installation-without-system> target: waiting for the VM to finish booting
|
||||
# vm-test-run-test-installation-without-system> target: Guest root shell did not produce any data yet...
|
||||
# vm-test-run-test-installation-without-system> target: To debug, enter the VM and run 'systemctl status backdoor.service'.
|
||||
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux && !pkgs.stdenv.isAarch64) {
|
||||
test-installation-without-system = (import ../lib/test-base.nix) {
|
||||
name = "test-installation-without-system";
|
||||
nodes.target = {
|
||||
services.openssh.enable = true;
|
||||
virtualisation.diskImage = "./target.qcow2";
|
||||
virtualisation.useBootLoader = true;
|
||||
nix.package = nixPackage;
|
||||
};
|
||||
nodes.installer =
|
||||
{ modulesPath, ... }:
|
||||
{
|
||||
imports = [
|
||||
(modulesPath + "/../tests/common/auto-format-root-device.nix")
|
||||
];
|
||||
services.openssh.enable = true;
|
||||
system.nixos.variant_id = "installer";
|
||||
environment.systemPackages = [ pkgs.nixos-facter ];
|
||||
virtualisation.emptyDiskImages = [ 512 ];
|
||||
virtualisation.diskSize = 8 * 1024;
|
||||
virtualisation.rootDevice = "/dev/vdb";
|
||||
# both installer and target need to use the same diskImage
|
||||
virtualisation.diskImage = "./target.qcow2";
|
||||
nix.package = nixPackage;
|
||||
nix.settings = {
|
||||
substituters = lib.mkForce [ ];
|
||||
hashed-mirrors = null;
|
||||
connect-timeout = lib.mkForce 3;
|
||||
flake-registry = pkgs.writeText "flake-registry" ''{"flakes":[],"version":2}'';
|
||||
experimental-features = [
|
||||
"nix-command"
|
||||
"flakes"
|
||||
];
|
||||
};
|
||||
users.users.nonrootuser = {
|
||||
isNormalUser = true;
|
||||
openssh.authorizedKeys.keyFiles = [ ../lib/ssh/pubkey ];
|
||||
extraGroups = [ "wheel" ];
|
||||
};
|
||||
security.sudo.wheelNeedsPassword = false;
|
||||
system.extraDependencies = dependencies;
|
||||
};
|
||||
nodes.client = {
|
||||
environment.systemPackages = [
|
||||
self.packages.${pkgs.system}.clan-cli
|
||||
] ++ self.packages.${pkgs.system}.clan-cli.runtimeDependencies;
|
||||
environment.etc."install-closure".source = "${closureInfo}/store-paths";
|
||||
virtualisation.memorySize = 3048;
|
||||
nix.package = nixPackage;
|
||||
nix.settings = {
|
||||
substituters = lib.mkForce [ ];
|
||||
hashed-mirrors = null;
|
||||
connect-timeout = lib.mkForce 3;
|
||||
flake-registry = pkgs.writeText "flake-registry" ''{"flakes":[],"version":2}'';
|
||||
experimental-features = [
|
||||
"nix-command"
|
||||
"flakes"
|
||||
];
|
||||
};
|
||||
system.extraDependencies = dependencies;
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
client.start()
|
||||
installer.start()
|
||||
|
||||
client.succeed("${pkgs.coreutils}/bin/install -Dm 600 ${../lib/ssh/privkey} /root/.ssh/id_ed25519")
|
||||
client.wait_until_succeeds("timeout 2 ssh -o StrictHostKeyChecking=accept-new -v nonrootuser@installer hostname")
|
||||
client.succeed("cp -r ${../..} test-flake && chmod -R +w test-flake")
|
||||
client.fail("test -f test-flake/machines/test-install-machine-without-system/hardware-configuration.nix")
|
||||
client.fail("test -f test-flake/machines/test-install-machine-without-system/facter.json")
|
||||
client.succeed("clan machines update-hardware-config --flake test-flake test-install-machine-without-system nonrootuser@installer >&2")
|
||||
client.succeed("test -f test-flake/machines/test-install-machine-without-system/facter.json")
|
||||
client.succeed("rm test-flake/machines/test-install-machine-without-system/facter.json")
|
||||
client.succeed("clan machines install --debug --flake test-flake --yes test-install-machine-without-system --target-host nonrootuser@installer --update-hardware-config nixos-facter >&2")
|
||||
try:
|
||||
installer.shutdown()
|
||||
except BrokenPipeError:
|
||||
# qemu has already exited
|
||||
pass
|
||||
|
||||
target.state_dir = installer.state_dir
|
||||
target.start()
|
||||
target.wait_for_unit("multi-user.target")
|
||||
assert(target.succeed("cat /etc/install-successful").strip() == "ok")
|
||||
'';
|
||||
} { inherit pkgs self; };
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,6 +1,5 @@
|
||||
{
|
||||
self,
|
||||
inputs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
@@ -17,18 +16,68 @@
|
||||
{ lib, modulesPath, ... }:
|
||||
{
|
||||
imports = [
|
||||
self.clanModules.single-disk
|
||||
(modulesPath + "/testing/test-instrumentation.nix") # we need these 2 modules always to be able to run the tests
|
||||
(modulesPath + "/profiles/qemu-guest.nix")
|
||||
../lib/minify.nix
|
||||
];
|
||||
clan.single-disk.device = "/dev/vda";
|
||||
|
||||
environment.etc."install-successful".text = "ok";
|
||||
|
||||
nixpkgs.hostPlatform = "x86_64-linux";
|
||||
boot.consoleLogLevel = lib.mkForce 100;
|
||||
boot.kernelParams = [ "boot.shell_on_fail" ];
|
||||
|
||||
# disko config
|
||||
boot.loader.grub.efiSupport = lib.mkDefault true;
|
||||
boot.loader.grub.efiInstallAsRemovable = lib.mkDefault true;
|
||||
clan.core.vars.settings.secretStore = "vm";
|
||||
clan.core.vars.generators.test = {
|
||||
files.test.neededFor = "partitioning";
|
||||
script = ''
|
||||
echo "notok" > $out/test
|
||||
'';
|
||||
};
|
||||
disko.devices = {
|
||||
disk = {
|
||||
main = {
|
||||
type = "disk";
|
||||
device = "/dev/vda";
|
||||
|
||||
preCreateHook = ''
|
||||
test -e /run/partitioning-secrets/test/test
|
||||
'';
|
||||
|
||||
content = {
|
||||
type = "gpt";
|
||||
partitions = {
|
||||
boot = {
|
||||
size = "1M";
|
||||
type = "EF02"; # for grub MBR
|
||||
priority = 1;
|
||||
};
|
||||
ESP = {
|
||||
size = "512M";
|
||||
type = "EF00";
|
||||
content = {
|
||||
type = "filesystem";
|
||||
format = "vfat";
|
||||
mountpoint = "/boot";
|
||||
mountOptions = [ "umask=0077" ];
|
||||
};
|
||||
};
|
||||
root = {
|
||||
size = "100%";
|
||||
content = {
|
||||
type = "filesystem";
|
||||
format = "ext4";
|
||||
mountpoint = "/";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
perSystem =
|
||||
@@ -43,10 +92,23 @@
|
||||
self.nixosConfigurations.test-install-machine.config.system.build.toplevel
|
||||
self.nixosConfigurations.test-install-machine.config.system.build.diskoScript
|
||||
self.nixosConfigurations.test-install-machine.config.system.clan.deployment.file
|
||||
pkgs.bash.drvPath
|
||||
pkgs.stdenv.drvPath
|
||||
pkgs.nixos-anywhere
|
||||
pkgs.bubblewrap
|
||||
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
|
||||
|
||||
# with Nix 2.24 we get:
|
||||
# vm-test-run-test-installation> client # error: sized: unexpected end-of-file
|
||||
# vm-test-run-test-installation> client # error: unexpected end-of-file
|
||||
# This seems to be fixed with Nix 2.26
|
||||
# Remove this line once `pkgs.nix` is 2.26+
|
||||
nixPackage =
|
||||
assert
|
||||
lib.versionOlder pkgs.nix.version "2.26"
|
||||
&& lib.versionAtLeast pkgs.nixVersions.latest.version "2.26";
|
||||
pkgs.nixVersions.latest;
|
||||
in
|
||||
{
|
||||
# On aarch64-linux, hangs on reboot with after installation:
|
||||
@@ -58,13 +120,14 @@
|
||||
# vm-test-run-test-installation> new_machine: QEMU running (pid 80)
|
||||
# vm-test-run-test-installation> new_machine: Guest root shell did not produce any data yet...
|
||||
# vm-test-run-test-installation> new_machine: To debug, enter the VM and run 'systemctl status backdoor.service'.
|
||||
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux && pkgs.stdenv.hostPlatform.system != "aarch64-linux") {
|
||||
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux && !pkgs.stdenv.isAarch64) {
|
||||
test-installation = (import ../lib/test-base.nix) {
|
||||
name = "test-installation";
|
||||
nodes.target = {
|
||||
services.openssh.enable = true;
|
||||
virtualisation.diskImage = "./target.qcow2";
|
||||
virtualisation.useBootLoader = true;
|
||||
nix.package = nixPackage;
|
||||
|
||||
# virtualisation.fileSystems."/" = {
|
||||
# device = "/dev/disk/by-label/this-is-not-real-and-will-never-be-used";
|
||||
@@ -86,6 +149,7 @@
|
||||
virtualisation.rootDevice = "/dev/vdb";
|
||||
# both installer and target need to use the same diskImage
|
||||
virtualisation.diskImage = "./target.qcow2";
|
||||
nix.package = nixPackage;
|
||||
nix.settings = {
|
||||
substituters = lib.mkForce [ ];
|
||||
hashed-mirrors = null;
|
||||
@@ -103,7 +167,8 @@
|
||||
self.packages.${pkgs.system}.clan-cli
|
||||
] ++ self.packages.${pkgs.system}.clan-cli.runtimeDependencies;
|
||||
environment.etc."install-closure".source = "${closureInfo}/store-paths";
|
||||
virtualisation.memorySize = 2048;
|
||||
virtualisation.memorySize = 3048;
|
||||
nix.package = nixPackage;
|
||||
nix.settings = {
|
||||
substituters = lib.mkForce [ ];
|
||||
hashed-mirrors = null;
|
||||
@@ -124,12 +189,19 @@
|
||||
client.succeed("${pkgs.coreutils}/bin/install -Dm 600 ${../lib/ssh/privkey} /root/.ssh/id_ed25519")
|
||||
client.wait_until_succeeds("timeout 2 ssh -o StrictHostKeyChecking=accept-new -v root@installer hostname")
|
||||
client.succeed("cp -r ${../..} test-flake && chmod -R +w test-flake")
|
||||
|
||||
# test that we can generate hardware configurations
|
||||
client.fail("test -f test-flake/machines/test-install-machine/facter.json")
|
||||
client.fail("test -f test-flake/machines/test-install-machine/hardware-configuration.nix")
|
||||
client.succeed("clan machines update-hardware-config --flake test-flake test-install-machine root@installer >&2")
|
||||
client.succeed("test -f test-flake/machines/test-install-machine/hardware-configuration.nix")
|
||||
client.succeed("clan machines update-hardware-config --backend nixos-facter --flake test-flake test-install-machine root@installer>&2")
|
||||
client.succeed("test -f test-flake/machines/test-install-machine/facter.json")
|
||||
client.succeed("clan machines install --debug --flake ${../..} --yes test-install-machine --target-host root@installer >&2")
|
||||
client.succeed("clan machines update-hardware-config --backend nixos-generate-config --flake test-flake test-install-machine root@installer>&2")
|
||||
client.succeed("test -f test-flake/machines/test-install-machine/hardware-configuration.nix")
|
||||
|
||||
# but we don't use them because they're not cached
|
||||
client.succeed("rm test-flake/machines/test-install-machine/hardware-configuration.nix test-flake/machines/test-install-machine/facter.json")
|
||||
|
||||
client.succeed("clan machines install --debug --flake test-flake --yes test-install-machine --target-host root@installer >&2")
|
||||
try:
|
||||
installer.shutdown()
|
||||
except BrokenPipeError:
|
||||
|
||||
@@ -7,9 +7,19 @@
|
||||
let
|
||||
testDriver = hostPkgs.python3.pkgs.callPackage ./package.nix {
|
||||
inherit (config) extraPythonPackages;
|
||||
inherit (hostPkgs.pkgs) util-linux systemd;
|
||||
inherit (hostPkgs.pkgs) util-linux systemd nix;
|
||||
};
|
||||
containers = map (m: m.system.build.toplevel) (lib.attrValues config.nodes);
|
||||
containers =
|
||||
testScript:
|
||||
map (m: [
|
||||
m.system.build.toplevel
|
||||
(hostPkgs.closureInfo {
|
||||
rootPaths = [
|
||||
m.system.build.toplevel
|
||||
(hostPkgs.writeText "testScript" testScript)
|
||||
];
|
||||
})
|
||||
]) (lib.attrValues config.nodes);
|
||||
pythonizeName =
|
||||
name:
|
||||
let
|
||||
@@ -44,8 +54,6 @@ in
|
||||
''
|
||||
mkdir -p $out/bin
|
||||
|
||||
containers=(${toString containers})
|
||||
|
||||
${lib.optionalString (!config.skipTypeCheck) ''
|
||||
# prepend type hints so the test script can be type checked with mypy
|
||||
cat "${./test-script-prepend.py}" >> testScriptWithTypes
|
||||
@@ -66,7 +74,13 @@ in
|
||||
ln -s ${testDriver}/bin/nixos-test-driver $out/bin/nixos-test-driver
|
||||
|
||||
wrapProgram $out/bin/nixos-test-driver \
|
||||
${lib.concatStringsSep " " (map (name: "--add-flags '--container ${name}'") containers)} \
|
||||
${
|
||||
lib.concatStringsSep " " (
|
||||
map (container: "--add-flags '--container ${builtins.toString container}'") (
|
||||
containers config.testScriptString
|
||||
)
|
||||
)
|
||||
} \
|
||||
--add-flags "--test-script '$out/test-script'"
|
||||
''
|
||||
);
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
setuptools,
|
||||
util-linux,
|
||||
systemd,
|
||||
nix,
|
||||
colorama,
|
||||
junit-xml,
|
||||
}:
|
||||
@@ -16,6 +17,7 @@ buildPythonApplication {
|
||||
systemd
|
||||
colorama
|
||||
junit-xml
|
||||
nix
|
||||
] ++ extraPythonPackages python3Packages;
|
||||
nativeBuildInputs = [ setuptools ];
|
||||
format = "pyproject";
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import argparse
|
||||
import ctypes
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
@@ -12,6 +13,55 @@ from typing import Any
|
||||
|
||||
from .logger import AbstractLogger, CompositeLogger, TerminalLogger
|
||||
|
||||
# Load the C library
|
||||
libc = ctypes.CDLL("libc.so.6", use_errno=True)
|
||||
|
||||
# Define the mount function
|
||||
libc.mount.argtypes = [
|
||||
ctypes.c_char_p, # source
|
||||
ctypes.c_char_p, # target
|
||||
ctypes.c_char_p, # filesystemtype
|
||||
ctypes.c_ulong, # mountflags
|
||||
ctypes.c_void_p, # data
|
||||
]
|
||||
libc.mount.restype = ctypes.c_int
|
||||
|
||||
MS_BIND = 0x1000
|
||||
MS_REC = 0x4000
|
||||
|
||||
|
||||
def mount(
|
||||
source: Path,
|
||||
target: Path,
|
||||
filesystemtype: str,
|
||||
mountflags: int = 0,
|
||||
data: str | None = None,
|
||||
) -> None:
|
||||
"""
|
||||
A Python wrapper for the mount system call.
|
||||
|
||||
:param source: The source of the file system (e.g., device name, remote filesystem).
|
||||
:param target: The mount point (an existing directory).
|
||||
:param filesystemtype: The filesystem type (e.g., "ext4", "nfs").
|
||||
:param mountflags: Mount options flags.
|
||||
:param data: File system-specific data (e.g., options like "rw").
|
||||
:raises OSError: If the mount system call fails.
|
||||
"""
|
||||
# Convert Python strings to C-compatible strings
|
||||
source_c = ctypes.c_char_p(str(source).encode("utf-8"))
|
||||
target_c = ctypes.c_char_p(str(target).encode("utf-8"))
|
||||
fstype_c = ctypes.c_char_p(filesystemtype.encode("utf-8"))
|
||||
data_c = ctypes.c_char_p(data.encode("utf-8")) if data else None
|
||||
|
||||
# Call the mount system call
|
||||
result = libc.mount(
|
||||
source_c, target_c, fstype_c, ctypes.c_ulong(mountflags), data_c
|
||||
)
|
||||
|
||||
if result != 0:
|
||||
errno = ctypes.get_errno()
|
||||
raise OSError(errno, os.strerror(errno))
|
||||
|
||||
|
||||
class Error(Exception):
|
||||
pass
|
||||
@@ -71,7 +121,7 @@ class Machine:
|
||||
self.rootdir,
|
||||
"--register=no",
|
||||
"--resolv-conf=off",
|
||||
"--bind-ro=/nix/store",
|
||||
"--bind=/nix",
|
||||
"--bind",
|
||||
self.out_dir,
|
||||
"--bind=/proc:/run/host/proc",
|
||||
@@ -102,9 +152,9 @@ class Machine:
|
||||
.read_text()
|
||||
.split()
|
||||
)
|
||||
assert (
|
||||
len(childs) == 1
|
||||
), f"Expected exactly one child process for systemd-nspawn, got {childs}"
|
||||
assert len(childs) == 1, (
|
||||
f"Expected exactly one child process for systemd-nspawn, got {childs}"
|
||||
)
|
||||
try:
|
||||
return int(childs[0])
|
||||
except ValueError as e:
|
||||
@@ -253,7 +303,9 @@ class Machine:
|
||||
info = self.get_unit_info(unit)
|
||||
state = info["ActiveState"]
|
||||
if state == "failed":
|
||||
msg = f'unit "{unit}" reached state "{state}"'
|
||||
proc = self.systemctl(f"--lines 0 status {unit}")
|
||||
journal = self.execute(f"journalctl -u {unit} --no-pager")
|
||||
msg = f'unit "{unit}" reached state "{state}":\n{proc.stdout}\n{journal.stdout}'
|
||||
raise Error(msg)
|
||||
|
||||
if state == "inactive":
|
||||
@@ -271,7 +323,9 @@ class Machine:
|
||||
def succeed(self, command: str, timeout: int | None = None) -> str:
|
||||
res = self.execute(command, timeout=timeout)
|
||||
if res.returncode != 0:
|
||||
msg = f"Failed to run command {command}"
|
||||
msg = f"Failed to run command {command}\n"
|
||||
msg += f"Exit code: {res.returncode}\n"
|
||||
msg += f"Stdout: {res.stdout}"
|
||||
raise RuntimeError(msg)
|
||||
return res.stdout
|
||||
|
||||
@@ -288,6 +342,12 @@ class Machine:
|
||||
self.shutdown()
|
||||
|
||||
|
||||
NIX_DIR = Path("/nix")
|
||||
NIX_STORE = Path("/nix/store/")
|
||||
NEW_NIX_DIR = Path("/.nix-rw")
|
||||
NEW_NIX_STORE_DIR = NEW_NIX_DIR / "store"
|
||||
|
||||
|
||||
def setup_filesystems() -> None:
|
||||
# We don't care about cleaning up the mount points, since we're running in a nix sandbox.
|
||||
Path("/run").mkdir(parents=True, exist_ok=True)
|
||||
@@ -296,6 +356,32 @@ def setup_filesystems() -> None:
|
||||
Path("/etc").chmod(0o755)
|
||||
Path("/etc/os-release").touch()
|
||||
Path("/etc/machine-id").write_text("a5ea3f98dedc0278b6f3cc8c37eeaeac")
|
||||
NEW_NIX_STORE_DIR.mkdir(parents=True)
|
||||
# Read /proc/mounts and replicate every bind mount
|
||||
with Path("/proc/self/mounts").open() as f:
|
||||
for line in f:
|
||||
columns = line.split(" ")
|
||||
source = Path(columns[1])
|
||||
if source.parent != NIX_STORE:
|
||||
continue
|
||||
target = NEW_NIX_STORE_DIR / source.name
|
||||
if source.is_dir():
|
||||
target.mkdir()
|
||||
else:
|
||||
target.touch()
|
||||
try:
|
||||
mount(source, target, "none", MS_BIND)
|
||||
except OSError as e:
|
||||
msg = f"mount({source}, {target}) failed"
|
||||
raise Error(msg) from e
|
||||
out = Path(os.environ["out"])
|
||||
(NEW_NIX_STORE_DIR / out.name).mkdir()
|
||||
mount(NEW_NIX_DIR, NIX_DIR, "none", MS_BIND | MS_REC)
|
||||
|
||||
|
||||
def load_nix_db(closure_info: Path) -> None:
|
||||
with (closure_info / "registration").open() as f:
|
||||
subprocess.run(["nix-store", "--load-db"], stdin=f, check=True, text=True)
|
||||
|
||||
|
||||
class Driver:
|
||||
@@ -303,7 +389,7 @@ class Driver:
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
containers: list[Path],
|
||||
containers: list[tuple[Path, Path]],
|
||||
logger: AbstractLogger,
|
||||
testscript: str,
|
||||
out_dir: str,
|
||||
@@ -313,21 +399,24 @@ class Driver:
|
||||
self.out_dir = out_dir
|
||||
self.logger = logger
|
||||
setup_filesystems()
|
||||
# TODO: this won't work for multiple containers
|
||||
assert len(containers) == 1, "Only one container is supported at the moment"
|
||||
load_nix_db(containers[0][1])
|
||||
|
||||
self.tempdir = TemporaryDirectory()
|
||||
tempdir_path = Path(self.tempdir.name)
|
||||
|
||||
self.machines = []
|
||||
for container in containers:
|
||||
name_match = re.match(r".*-nixos-system-(.+)-(.+)", container.name)
|
||||
name_match = re.match(r".*-nixos-system-(.+)-(.+)", container[0].name)
|
||||
if not name_match:
|
||||
msg = f"Unable to extract hostname from {container.name}"
|
||||
msg = f"Unable to extract hostname from {container[0].name}"
|
||||
raise Error(msg)
|
||||
name = name_match.group(1)
|
||||
self.machines.append(
|
||||
Machine(
|
||||
name=name,
|
||||
toplevel=container,
|
||||
toplevel=container[0],
|
||||
rootdir=tempdir_path / name,
|
||||
out_dir=self.out_dir,
|
||||
logger=self.logger,
|
||||
@@ -399,9 +488,11 @@ def main() -> None:
|
||||
arg_parser = argparse.ArgumentParser(prog="nixos-test-driver")
|
||||
arg_parser.add_argument(
|
||||
"--containers",
|
||||
nargs="+",
|
||||
nargs=2,
|
||||
action="append",
|
||||
type=Path,
|
||||
help="container system toplevel paths",
|
||||
metavar=("TOPLEVEL_STORE_DIR", "CLOSURE_INFO"),
|
||||
help="container system toplevel store dir and closure info",
|
||||
)
|
||||
arg_parser.add_argument(
|
||||
"--test-script",
|
||||
|
||||
@@ -16,6 +16,9 @@ in
|
||||
documentation.enable = lib.mkDefault false;
|
||||
boot.isContainer = true;
|
||||
|
||||
# needed since nixpkgs 7fb2f407c01b017737eafc26b065d7f56434a992 removed the getty unit by default
|
||||
console.enable = true;
|
||||
|
||||
# undo qemu stuff
|
||||
system.build.initialRamdisk = "";
|
||||
virtualisation.sharedDirectories = lib.mkForce { };
|
||||
@@ -25,9 +28,13 @@ in
|
||||
networking.interfaces = lib.mkForce { };
|
||||
#networking.primaryIPAddress = lib.mkForce null;
|
||||
systemd.services.backdoor.enable = false;
|
||||
|
||||
# we don't have permission to set cpu scheduler in our container
|
||||
systemd.services.nix-daemon.serviceConfig.CPUSchedulingPolicy = lib.mkForce "";
|
||||
};
|
||||
# to accept external dependencies such as disko
|
||||
node.specialArgs.self = self;
|
||||
_module.args = { inherit self; };
|
||||
imports = [
|
||||
test
|
||||
./container-driver/module.nix
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
{ lib, ... }:
|
||||
{
|
||||
nixpkgs.flake.setFlakeRegistry = false;
|
||||
nixpkgs.flake.setNixPath = false;
|
||||
nix.registry.nixpkgs.to = { };
|
||||
nix.registry = lib.mkForce { };
|
||||
documentation.doc.enable = false;
|
||||
documentation.man.enable = false;
|
||||
}
|
||||
|
||||
@@ -7,15 +7,19 @@ in
|
||||
(nixos-lib.runTest {
|
||||
hostPkgs = pkgs;
|
||||
# speed-up evaluation
|
||||
defaults = {
|
||||
imports = [
|
||||
./minify.nix
|
||||
];
|
||||
documentation.enable = lib.mkDefault false;
|
||||
nix.settings.min-free = 0;
|
||||
system.stateVersion = lib.version;
|
||||
};
|
||||
defaults = (
|
||||
{ config, ... }:
|
||||
{
|
||||
imports = [
|
||||
./minify.nix
|
||||
];
|
||||
documentation.enable = lib.mkDefault false;
|
||||
nix.settings.min-free = 0;
|
||||
system.stateVersion = config.system.nixos.release;
|
||||
}
|
||||
);
|
||||
|
||||
_module.args = { inherit self; };
|
||||
# to accept external dependencies such as disko
|
||||
node.specialArgs.self = self;
|
||||
imports = [ test ];
|
||||
|
||||
@@ -31,6 +31,8 @@
|
||||
clan.matrix-synapse.users.someuser = { };
|
||||
|
||||
clan.core.facts.secretStore = "vm";
|
||||
clan.core.vars.settings.secretStore = "vm";
|
||||
clan.core.vars.settings.publicStore = "in_repo";
|
||||
|
||||
# because we use systemd-tmpfiles to copy the secrets, we need to a separate systemd-tmpfiles call to provision them.
|
||||
boot.postBootCommands = "${config.systemd.package}/bin/systemd-tmpfiles --create /etc/tmpfiles.d/00-vmsecrets.conf";
|
||||
@@ -41,21 +43,21 @@
|
||||
d.mode = "0700";
|
||||
z.mode = "0700";
|
||||
};
|
||||
"/etc/secrets/synapse-registration_shared_secret" = {
|
||||
"/etc/secrets/matrix-synapse/synapse-registration_shared_secret" = {
|
||||
f.argument = "supersecret";
|
||||
z = {
|
||||
mode = "0400";
|
||||
user = "root";
|
||||
};
|
||||
};
|
||||
"/etc/secrets/matrix-password-admin" = {
|
||||
"/etc/secrets/matrix-password-admin/matrix-password-admin" = {
|
||||
f.argument = "matrix-password1";
|
||||
z = {
|
||||
mode = "0400";
|
||||
user = "root";
|
||||
};
|
||||
};
|
||||
"/etc/secrets/matrix-password-someuser" = {
|
||||
"/etc/secrets/matrix-password-someuser/matrix-password-someuser" = {
|
||||
f.argument = "matrix-password2";
|
||||
z = {
|
||||
mode = "0400";
|
||||
|
||||
62
checks/morph/flake-module.nix
Normal file
62
checks/morph/flake-module.nix
Normal file
@@ -0,0 +1,62 @@
|
||||
{
|
||||
self,
|
||||
...
|
||||
}:
|
||||
{
|
||||
clan.machines.test-morph-machine = {
|
||||
imports = [
|
||||
./template/configuration.nix
|
||||
self.nixosModules.clanCore
|
||||
];
|
||||
nixpkgs.hostPlatform = "x86_64-linux";
|
||||
environment.etc."testfile".text = "morphed";
|
||||
};
|
||||
|
||||
clan.templates.machine.test-morph-template = {
|
||||
description = "Morph a machine";
|
||||
path = ./template;
|
||||
};
|
||||
|
||||
perSystem =
|
||||
{
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux && !pkgs.stdenv.isAarch64) {
|
||||
test-morph = (import ../lib/test-base.nix) {
|
||||
name = "morph";
|
||||
|
||||
nodes = {
|
||||
actual =
|
||||
{ pkgs, ... }:
|
||||
let
|
||||
dependencies = [
|
||||
self
|
||||
pkgs.nixos-anywhere
|
||||
pkgs.stdenv.drvPath
|
||||
pkgs.stdenvNoCC
|
||||
self.nixosConfigurations.test-morph-machine.config.system.build.toplevel
|
||||
self.nixosConfigurations.test-morph-machine.config.system.clan.deployment.file
|
||||
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
|
||||
in
|
||||
|
||||
{
|
||||
environment.etc."install-closure".source = "${closureInfo}/store-paths";
|
||||
system.extraDependencies = dependencies;
|
||||
virtualisation.memorySize = 2048;
|
||||
environment.systemPackages = [ self.packages.${pkgs.system}.clan-cli ];
|
||||
};
|
||||
};
|
||||
testScript = ''
|
||||
start_all()
|
||||
actual.fail("cat /etc/testfile")
|
||||
actual.succeed("env CLAN_DIR=${self} clan machines morph test-morph-template --i-will-be-fired-for-using-this --debug --name test-morph-machine")
|
||||
assert actual.succeed("cat /etc/testfile") == "morphed"
|
||||
'';
|
||||
} { inherit pkgs self; };
|
||||
};
|
||||
|
||||
};
|
||||
}
|
||||
12
checks/morph/template/configuration.nix
Normal file
12
checks/morph/template/configuration.nix
Normal file
@@ -0,0 +1,12 @@
|
||||
{ modulesPath, ... }:
|
||||
{
|
||||
imports = [
|
||||
# we need these 2 modules always to be able to run the tests
|
||||
(modulesPath + "/testing/test-instrumentation.nix")
|
||||
(modulesPath + "/virtualisation/qemu-vm.nix")
|
||||
|
||||
(modulesPath + "/profiles/minimal.nix")
|
||||
];
|
||||
|
||||
clan.core.enableRecommendedDefaults = false;
|
||||
}
|
||||
8
clanModules/auto-upgrade/README.md
Normal file
8
clanModules/auto-upgrade/README.md
Normal file
@@ -0,0 +1,8 @@
|
||||
---
|
||||
description = "Set up automatic upgrades"
|
||||
categories = ["System"]
|
||||
features = [ "inventory" ]
|
||||
---
|
||||
|
||||
Whether to periodically upgrade NixOS to the latest version. If enabled, a
|
||||
systemd timer will run `nixos-rebuild switch --upgrade` once a day.
|
||||
24
clanModules/auto-upgrade/roles/default.nix
Normal file
24
clanModules/auto-upgrade/roles/default.nix
Normal file
@@ -0,0 +1,24 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
cfg = config.clan.autoUpgrade;
|
||||
in
|
||||
{
|
||||
options.clan.autoUpgrade = {
|
||||
flake = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "Flake reference";
|
||||
};
|
||||
};
|
||||
config = {
|
||||
system.autoUpgrade = {
|
||||
inherit (cfg.clan.autoUpgrade) flake;
|
||||
enable = true;
|
||||
dates = "02:00";
|
||||
randomizedDelaySec = "45min";
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -63,9 +63,9 @@ in
|
||||
rsh = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "ssh -i ${
|
||||
config.clan.core.facts.services.borgbackup.secret."borgbackup.ssh".path
|
||||
config.clan.core.vars.generators.borgbackup.files."borgbackup.ssh".path
|
||||
} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=Yes";
|
||||
defaultText = "ssh -i \${config.clan.core.facts.services.borgbackup.secret.\"borgbackup.ssh\".path} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null";
|
||||
defaultText = "ssh -i \${config.clan.core.vars.generators.borgbackup.files.\"borgbackup.ssh\".path} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null";
|
||||
description = "the rsh to use for the backup";
|
||||
};
|
||||
};
|
||||
@@ -126,7 +126,7 @@ in
|
||||
|
||||
encryption = {
|
||||
mode = "repokey";
|
||||
passCommand = "cat ${config.clan.core.facts.services.borgbackup.secret."borgbackup.repokey".path}";
|
||||
passCommand = "cat ${config.clan.core.vars.generators.borgbackup.files."borgbackup.repokey".path}";
|
||||
};
|
||||
|
||||
prune.keep = {
|
||||
@@ -177,20 +177,21 @@ in
|
||||
})
|
||||
];
|
||||
|
||||
# Facts generation. So the client can authenticate to the server
|
||||
clan.core.facts.services.borgbackup = {
|
||||
public."borgbackup.ssh.pub" = { };
|
||||
secret."borgbackup.ssh" = { };
|
||||
secret."borgbackup.repokey" = { };
|
||||
generator.path = [
|
||||
pkgs.openssh
|
||||
clan.core.vars.generators.borgbackup = {
|
||||
|
||||
files."borgbackup.ssh.pub".secret = false;
|
||||
files."borgbackup.ssh" = { };
|
||||
files."borgbackup.repokey" = { };
|
||||
|
||||
migrateFact = "borgbackup";
|
||||
runtimeInputs = [
|
||||
pkgs.coreutils
|
||||
pkgs.openssh
|
||||
pkgs.xkcdpass
|
||||
];
|
||||
generator.script = ''
|
||||
ssh-keygen -t ed25519 -N "" -f "$secrets"/borgbackup.ssh
|
||||
mv "$secrets"/borgbackup.ssh.pub "$facts"/borgbackup.ssh.pub
|
||||
xkcdpass -n 4 -d - > "$secrets"/borgbackup.repokey
|
||||
script = ''
|
||||
ssh-keygen -t ed25519 -N "" -f $out/borgbackup.ssh
|
||||
xkcdpass -n 4 -d - > $out/borgbackup.repokey
|
||||
'';
|
||||
};
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{ config, lib, ... }:
|
||||
let
|
||||
dir = config.clan.core.settings.directory;
|
||||
machineDir = dir + "/machines/";
|
||||
machineDir = dir + "/vars/per-machine/";
|
||||
machineName = config.clan.core.settings.machine.name;
|
||||
|
||||
# Instances might be empty, if the module is not used via the inventory
|
||||
@@ -33,7 +33,8 @@ in
|
||||
};
|
||||
config.services.borgbackup.repos =
|
||||
let
|
||||
borgbackupIpMachinePath = machines: machineDir + machines + "/facts/borgbackup.ssh.pub";
|
||||
borgbackupIpMachinePath = machine: machineDir + machine + "/borgbackup/borgbackup.ssh.pub/value";
|
||||
|
||||
machinesMaybeKey = builtins.map (
|
||||
machine:
|
||||
let
|
||||
@@ -44,7 +45,7 @@ in
|
||||
else
|
||||
lib.warn ''
|
||||
Machine ${machine} does not have a borgbackup key at ${fullPath},
|
||||
run `clan facts generate ${machine}` to generate it.
|
||||
run `clan var generate ${machine}` to generate it.
|
||||
'' null
|
||||
) allClients;
|
||||
|
||||
|
||||
@@ -52,7 +52,7 @@ let
|
||||
migrateFact = "${secret_id opt}";
|
||||
prompts.${secret_id opt} = {
|
||||
type = "hidden";
|
||||
createFile = true;
|
||||
persist = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
@@ -1,7 +1,15 @@
|
||||
{ ... }:
|
||||
{ lib, ... }:
|
||||
let
|
||||
inherit (lib)
|
||||
filterAttrs
|
||||
pathExists
|
||||
;
|
||||
in
|
||||
{
|
||||
flake.clanModules = {
|
||||
# only import available files, as this allows to filter the files for tests.
|
||||
flake.clanModules = filterAttrs (_name: pathExists) {
|
||||
admin = ./admin;
|
||||
auto-upgrade = ./auto-upgrade;
|
||||
borgbackup = ./borgbackup;
|
||||
borgbackup-static = ./borgbackup-static;
|
||||
deltachat = ./deltachat;
|
||||
@@ -19,6 +27,7 @@
|
||||
matrix-synapse = ./matrix-synapse;
|
||||
moonlight = ./moonlight;
|
||||
mumble = ./mumble;
|
||||
mycelium = ./mycelium;
|
||||
nginx = ./nginx;
|
||||
packages = ./packages;
|
||||
postgresql = ./postgresql;
|
||||
|
||||
@@ -3,8 +3,7 @@ description = "S3-compatible object store for small self-hosted geo-distributed
|
||||
---
|
||||
|
||||
This module generates garage specific keys automatically.
|
||||
When using garage in a distributed deployment the `rpc_key` between connected instances must be shared.
|
||||
This is currently still a manual process.
|
||||
Also shares the `rpc_secret` between instances.
|
||||
|
||||
Options: [NixosModuleOptions](https://search.nixos.org/options?channel=unstable&size=50&sort=relevance&type=packages&query=garage)
|
||||
Documentation: https://garagehq.deuxfleurs.fr/
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
{
|
||||
systemd.services.garage.serviceConfig = {
|
||||
LoadCredential = [
|
||||
"rpc_secret_path:${config.clan.core.facts.services.garage.secret.garage_rpc_secret.path}"
|
||||
"admin_token_path:${config.clan.core.facts.services.garage.secret.garage_admin_token.path}"
|
||||
"metrics_token_path:${config.clan.core.facts.services.garage.secret.garage_metrics_token.path}"
|
||||
"rpc_secret_path:${config.clan.core.vars.generators.garage-shared.files.rpc_secret.path}"
|
||||
"admin_token_path:${config.clan.core.vars.generators.garage.files.admin_token.path}"
|
||||
"metrics_token_path:${config.clan.core.vars.generators.garage.files.metrics_token.path}"
|
||||
];
|
||||
Environment = [
|
||||
"GARAGE_ALLOW_WORLD_READABLE_SECRETS=true"
|
||||
@@ -14,37 +14,30 @@
|
||||
];
|
||||
};
|
||||
|
||||
clan.core.facts.services.garage = {
|
||||
secret.garage_rpc_secret = { };
|
||||
secret.garage_admin_token = { };
|
||||
secret.garage_metrics_token = { };
|
||||
generator.path = [
|
||||
clan.core.vars.generators.garage = {
|
||||
files.admin_token = { };
|
||||
files.metrics_token = { };
|
||||
runtimeInputs = [
|
||||
pkgs.coreutils
|
||||
pkgs.openssl
|
||||
];
|
||||
generator.script = ''
|
||||
openssl rand -hex -out $secrets/garage_rpc_secret 32
|
||||
openssl rand -base64 -out $secrets/garage_admin_token 32
|
||||
openssl rand -base64 -out $secrets/garage_metrics_token 32
|
||||
script = ''
|
||||
openssl rand -base64 -out $out/admin_token 32
|
||||
openssl rand -base64 -out $out/metrics_token 32
|
||||
'';
|
||||
};
|
||||
|
||||
# TODO: Vars is not in a useable state currently
|
||||
# Move back, once it is implemented.
|
||||
# clan.core.vars.generators.garage = {
|
||||
# files.rpc_secret = { };
|
||||
# files.admin_token = { };
|
||||
# files.metrics_token = { };
|
||||
# runtimeInputs = [
|
||||
# pkgs.coreutils
|
||||
# pkgs.openssl
|
||||
# ];
|
||||
# script = ''
|
||||
# openssl rand -hex -out $out/rpc_secret 32
|
||||
# openssl rand -base64 -out $out/admin_token 32
|
||||
# openssl rand -base64 -out $out/metrics_token 32
|
||||
# '';
|
||||
# };
|
||||
clan.core.vars.generators.garage-shared = {
|
||||
share = true;
|
||||
files.rpc_secret = { };
|
||||
runtimeInputs = [
|
||||
pkgs.coreutils
|
||||
pkgs.openssl
|
||||
];
|
||||
script = ''
|
||||
openssl rand -hex -out $out/rpc_secret 32
|
||||
'';
|
||||
};
|
||||
|
||||
clan.core.state.garage.folders = [ config.services.garage.settings.metadata_dir ];
|
||||
}
|
||||
|
||||
@@ -6,4 +6,4 @@ categories = [ "Network" ]
|
||||
|
||||
!!! Warning
|
||||
If you've been using network manager + wpa_supplicant and now are switching to IWD read this migration guide:
|
||||
https://iwd.wiki.kernel.org/networkmanager#converting_network_profiles
|
||||
https://archive.kernel.org/oldwiki/iwd.wiki.kernel.org/networkmanager.html#converting_network_profiles
|
||||
|
||||
@@ -1,4 +1,9 @@
|
||||
{ lib, config, ... }:
|
||||
{
|
||||
lib,
|
||||
config,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
cfg = config.clan.iwd;
|
||||
@@ -12,12 +17,13 @@ let
|
||||
{
|
||||
secret.${secret_name} = { };
|
||||
generator.prompt = "Wifi password for '${value.ssid}'";
|
||||
# ref. man iwd.network
|
||||
generator.script = ''
|
||||
config="
|
||||
[Settings]
|
||||
AutoConnect=${if value.AutoConnect then "true" else "false"}
|
||||
[Security]
|
||||
Passphrase=\"$prompt_value\"
|
||||
Passphrase=$(echo -e "$prompt_value" | ${lib.getExe pkgs.gnused} "s=\\\=\\\\\\\=g;s=\t=\\\t=g;s=\r=\\\r=g;s=^ =\\\s=")
|
||||
"
|
||||
echo "$config" > "$secrets/${secret_name}"
|
||||
'';
|
||||
|
||||
@@ -10,18 +10,18 @@ let
|
||||
in
|
||||
{
|
||||
config = lib.mkMerge [
|
||||
(lib.mkIf ((var.machineId.value or null) != null) {
|
||||
(lib.mkIf ((var.value or null) != null) {
|
||||
assertions = [
|
||||
{
|
||||
assertion = lib.stringLength var.machineId.value == 32;
|
||||
assertion = lib.stringLength var.value == 32;
|
||||
message = "machineId must be exactly 32 characters long.";
|
||||
}
|
||||
];
|
||||
boot.kernelParams = [
|
||||
''systemd.machine_id=${var.machineId.value}''
|
||||
''systemd.machine_id=${var.value}''
|
||||
];
|
||||
environment.etc."machine-id" = {
|
||||
text = var.machineId.value;
|
||||
text = var.value;
|
||||
};
|
||||
})
|
||||
{
|
||||
|
||||
@@ -106,17 +106,6 @@ in
|
||||
};
|
||||
};
|
||||
|
||||
systemd.tmpfiles.settings."01-matrix" = {
|
||||
"/run/synapse-registration-shared-secret" = {
|
||||
C.argument =
|
||||
config.clan.core.facts.services.matrix-synapse.secret.synapse-registration_shared_secret.path;
|
||||
z = {
|
||||
mode = "0400";
|
||||
user = "matrix-synapse";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
clan.postgresql.users.matrix-synapse = { };
|
||||
clan.postgresql.databases.matrix-synapse.create.options = {
|
||||
TEMPLATE = "template0";
|
||||
@@ -127,26 +116,28 @@ in
|
||||
};
|
||||
clan.postgresql.databases.matrix-synapse.restore.stopOnRestore = [ "matrix-synapse" ];
|
||||
|
||||
clan.core.facts.services =
|
||||
clan.core.vars.generators =
|
||||
{
|
||||
"matrix-synapse" = {
|
||||
secret."synapse-registration_shared_secret" = { };
|
||||
generator.path = with pkgs; [
|
||||
files."synapse-registration_shared_secret" = { };
|
||||
runtimeInputs = with pkgs; [
|
||||
coreutils
|
||||
pwgen
|
||||
];
|
||||
generator.script = ''
|
||||
echo -n "$(pwgen -s 32 1)" > "$secrets"/synapse-registration_shared_secret
|
||||
migrateFact = "matrix-synapse";
|
||||
script = ''
|
||||
echo -n "$(pwgen -s 32 1)" > "$out"/synapse-registration_shared_secret
|
||||
'';
|
||||
};
|
||||
}
|
||||
// lib.mapAttrs' (
|
||||
name: user:
|
||||
lib.nameValuePair "matrix-password-${user.name}" {
|
||||
secret."matrix-password-${user.name}" = { };
|
||||
generator.path = with pkgs; [ xkcdpass ];
|
||||
generator.script = ''
|
||||
xkcdpass -n 4 -d - > "$secrets"/${lib.escapeShellArg "matrix-password-${user.name}"}
|
||||
files."matrix-password-${user.name}" = { };
|
||||
migrateFact = "matrix-password-${user.name}";
|
||||
runtimeInputs = with pkgs; [ xkcdpass ];
|
||||
script = ''
|
||||
xkcdpass -n 4 -d - > "$out"/${lib.escapeShellArg "matrix-password-${user.name}"}
|
||||
'';
|
||||
}
|
||||
) cfg.users;
|
||||
@@ -163,14 +154,20 @@ in
|
||||
+ lib.concatMapStringsSep "\n" (user: ''
|
||||
# only create user if it doesn't exist
|
||||
/run/current-system/sw/bin/matrix-synapse-register_new_matrix_user --exists-ok --password-file ${
|
||||
config.clan.core.facts.services."matrix-password-${user.name}".secret."matrix-password-${user.name}".path
|
||||
config.clan.core.vars.generators."matrix-password-${user.name}".files."matrix-password-${user.name}".path
|
||||
} --user "${user.name}" ${if user.admin then "--admin" else "--no-admin"}
|
||||
'') (lib.attrValues cfg.users);
|
||||
in
|
||||
{
|
||||
path = [ pkgs.curl ];
|
||||
serviceConfig.ExecStartPre = lib.mkBefore [
|
||||
"+${pkgs.coreutils}/bin/install -o matrix-synapse -g matrix-synapse ${
|
||||
lib.escapeShellArg
|
||||
config.clan.core.vars.generators.matrix-synapse.files."synapse-registration_shared_secret".path
|
||||
} /run/synapse-registration-shared-secret"
|
||||
];
|
||||
serviceConfig.ExecStartPost = [
|
||||
(''+${pkgs.writeShellScript "matrix-synapse-create-users" usersScript}'')
|
||||
''+${pkgs.writeShellScript "matrix-synapse-create-users" usersScript}''
|
||||
];
|
||||
};
|
||||
|
||||
|
||||
30
clanModules/mycelium/README.md
Normal file
30
clanModules/mycelium/README.md
Normal file
@@ -0,0 +1,30 @@
|
||||
---
|
||||
description = "End-2-end encrypted IPv6 overlay network"
|
||||
categories = ["System", "Network"]
|
||||
features = [ "inventory" ]
|
||||
---
|
||||
Mycelium is an IPv6 overlay network written in Rust. Each node that joins the overlay network will receive an overlay network IP in the 400::/7 range.
|
||||
|
||||
Features:
|
||||
- Mycelium, is locality aware, it will look for the shortest path between nodes
|
||||
- All traffic between the nodes is end-2-end encrypted
|
||||
- Traffic can be routed over nodes of friends, location aware
|
||||
- If a physical link goes down Mycelium will automatically reroute your traffic
|
||||
- The IP address is IPV6 and linked to private key
|
||||
- A simple reliable messagebus is implemented on top of Mycelium
|
||||
- Mycelium has multiple ways how to communicate quic, tcp, ... and we are working on holepunching for Quick which means P2P traffic without middlemen for NATted networks e.g. most homes
|
||||
- Scalability is very important for us, we tried many overlay networks before and got stuck on all of them, we are trying to design a network which scales to a planetary level
|
||||
- You can run mycelium without TUN and only use it as reliable message bus.
|
||||
|
||||
|
||||
An example configuration might look like this in the inventory:
|
||||
```nix
|
||||
mycelium.default = {
|
||||
roles.peer.machines = [
|
||||
"berlin"
|
||||
"munich"
|
||||
];
|
||||
};
|
||||
```
|
||||
|
||||
This will add the machines named `berlin` and `munich` to the `mycelium` vpn.
|
||||
45
clanModules/mycelium/roles/peer.nix
Normal file
45
clanModules/mycelium/roles/peer.nix
Normal file
@@ -0,0 +1,45 @@
|
||||
{
|
||||
pkgs,
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
options = {
|
||||
clan.mycelium.openFirewall = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = true;
|
||||
description = "Open the firewall for mycelium";
|
||||
};
|
||||
|
||||
clan.mycelium.addHostedPublicNodes = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = true;
|
||||
description = "Add hosted Public nodes";
|
||||
};
|
||||
};
|
||||
|
||||
config.services.mycelium = {
|
||||
enable = true;
|
||||
addHostedPublicNodes = lib.mkDefault config.clan.mycelium.addHostedPublicNodes;
|
||||
openFirewall = lib.mkDefault config.clan.mycelium.openFirewall;
|
||||
keyFile = config.clan.core.vars.generators.mycelium.files.key.path;
|
||||
};
|
||||
|
||||
config.clan.core.vars.generators.mycelium = {
|
||||
files."key" = { };
|
||||
files."ip".secret = false;
|
||||
files."pubkey".secret = false;
|
||||
runtimeInputs = [
|
||||
pkgs.mycelium
|
||||
pkgs.coreutils
|
||||
pkgs.jq
|
||||
];
|
||||
script = ''
|
||||
timeout 5 mycelium --key-file "$out"/key || :
|
||||
mycelium inspect --key-file "$out"/key --json | jq -r .publicKey > "$out"/pubkey
|
||||
mycelium inspect --key-file "$out"/key --json | jq -r .address > "$out"/ip
|
||||
'';
|
||||
};
|
||||
|
||||
}
|
||||
@@ -1,5 +1,7 @@
|
||||
---
|
||||
description = "Automatically generates and configures a password for the root user."
|
||||
categories = ["System"]
|
||||
features = [ "inventory" ]
|
||||
---
|
||||
|
||||
After the system was installed/deployed the following command can be used to display the root-password:
|
||||
|
||||
@@ -1,29 +1,6 @@
|
||||
# Dont import this file
|
||||
# It is only here for backwards compatibility.
|
||||
# Dont author new modules with this file.
|
||||
{
|
||||
pkgs,
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
users.mutableUsers = false;
|
||||
users.users.root.hashedPasswordFile =
|
||||
config.clan.core.facts.services.root-password.secret.password-hash.path;
|
||||
|
||||
sops.secrets = lib.mkIf (config.clan.core.facts.secretStore == "sops") {
|
||||
"${config.clan.core.settings.machine.name}-password-hash".neededForUsers = true;
|
||||
};
|
||||
|
||||
clan.core.facts.services.root-password = {
|
||||
secret.password = { };
|
||||
secret.password-hash = { };
|
||||
generator.path = with pkgs; [
|
||||
coreutils
|
||||
xkcdpass
|
||||
mkpasswd
|
||||
];
|
||||
generator.script = ''
|
||||
xkcdpass --numwords 3 --delimiter - --count 1 | tr -d "\n" > $secrets/password
|
||||
cat $secrets/password | mkpasswd -s -m sha-512 | tr -d "\n" > $secrets/password-hash
|
||||
'';
|
||||
};
|
||||
imports = [ ./roles/default.nix ];
|
||||
}
|
||||
|
||||
38
clanModules/root-password/roles/default.nix
Normal file
38
clanModules/root-password/roles/default.nix
Normal file
@@ -0,0 +1,38 @@
|
||||
{
|
||||
pkgs,
|
||||
config,
|
||||
...
|
||||
}:
|
||||
{
|
||||
users.mutableUsers = false;
|
||||
users.users.root.hashedPasswordFile =
|
||||
config.clan.core.vars.generators.root-password.files.password-hash.path;
|
||||
|
||||
clan.core.vars.generators.root-password = {
|
||||
files.password-hash = {
|
||||
neededFor = "users";
|
||||
};
|
||||
files.password = {
|
||||
deploy = false;
|
||||
};
|
||||
migrateFact = "root-password";
|
||||
runtimeInputs = [
|
||||
pkgs.coreutils
|
||||
pkgs.mkpasswd
|
||||
pkgs.xkcdpass
|
||||
];
|
||||
prompts.password.type = "hidden";
|
||||
prompts.password.persist = true;
|
||||
prompts.password.description = "You can autogenerate a password, if you leave this prompt blank.";
|
||||
|
||||
script = ''
|
||||
prompt_value=$(cat $prompts/password)
|
||||
if [[ -n ''${prompt_value-} ]]; then
|
||||
echo $prompt_value | tr -d "\n" > $out/password
|
||||
else
|
||||
xkcdpass --numwords 3 --delimiter - --count 1 | tr -d "\n" > $out/password
|
||||
fi
|
||||
mkpasswd -s -m sha-512 < $out/password | tr -d "\n" > $out/password-hash
|
||||
'';
|
||||
};
|
||||
}
|
||||
@@ -37,6 +37,7 @@ in
|
||||
type = "rsa";
|
||||
};
|
||||
};
|
||||
|
||||
clan.core.vars.generators.openssh = {
|
||||
files."ssh.id_ed25519" = { };
|
||||
files."ssh.id_ed25519.pub".secret = false;
|
||||
@@ -50,6 +51,14 @@ in
|
||||
'';
|
||||
};
|
||||
|
||||
programs.ssh.knownHosts.clan-sshd-self-ed25519 = {
|
||||
hostNames = [
|
||||
"localhost"
|
||||
config.networking.hostName
|
||||
] ++ (lib.optional (config.networking.domain != null) config.networking.fqdn);
|
||||
publicKey = config.clan.core.vars.generators.openssh.files."ssh.id_ed25519.pub".value;
|
||||
};
|
||||
|
||||
clan.core.vars.generators.openssh-rsa = lib.mkIf config.clan.sshd.hostKeys.rsa.enable {
|
||||
files."ssh.id_rsa" = { };
|
||||
files."ssh.id_rsa.pub".secret = false;
|
||||
|
||||
@@ -3,7 +3,7 @@ let
|
||||
var = config.clan.core.vars.generators.state-version.files.version or { };
|
||||
in
|
||||
{
|
||||
system.stateVersion = lib.mkDefault var.value;
|
||||
system.stateVersion = lib.mkDefault (lib.removeSuffix "\n" var.value);
|
||||
|
||||
clan.core.vars.generators.state-version = {
|
||||
files.version = {
|
||||
|
||||
@@ -7,7 +7,8 @@
|
||||
let
|
||||
dir = config.clan.core.settings.directory;
|
||||
machineDir = dir + "/machines/";
|
||||
syncthingPublicKeyPath = machines: machineDir + machines + "/facts/syncthing.pub";
|
||||
machineVarDir = dir + "/vars/per-machine/";
|
||||
syncthingPublicKeyPath = machines: machineVarDir + machines + "/syncthing/id/value";
|
||||
machinesFileSet = builtins.readDir machineDir;
|
||||
machines = lib.mapAttrsToList (name: _: name) machinesFileSet;
|
||||
syncthingPublicKeysUnchecked = builtins.map (
|
||||
@@ -83,24 +84,26 @@ in
|
||||
configDir = "/var/lib/syncthing";
|
||||
group = "syncthing";
|
||||
|
||||
key = lib.mkDefault config.clan.core.facts.services.syncthing.secret."syncthing.key".path or null;
|
||||
cert = lib.mkDefault config.clan.core.facts.services.syncthing.secret."syncthing.cert".path or null;
|
||||
key = lib.mkDefault config.clan.core.vars.generators.syncthing.files.key.path or null;
|
||||
cert = lib.mkDefault config.clan.core.vars.generators.syncthing.files.cert.path or null;
|
||||
};
|
||||
|
||||
clan.core.facts.services.syncthing = {
|
||||
secret."syncthing.key" = { };
|
||||
secret."syncthing.cert" = { };
|
||||
public."syncthing.pub" = { };
|
||||
generator.path = [
|
||||
clan.core.vars.generators.syncthing = {
|
||||
files.key = { };
|
||||
files.cert = { };
|
||||
files.api = { };
|
||||
files.id.secret = false;
|
||||
runtimeInputs = [
|
||||
pkgs.coreutils
|
||||
pkgs.gnugrep
|
||||
pkgs.syncthing
|
||||
];
|
||||
generator.script = ''
|
||||
syncthing generate --config "$secrets"
|
||||
mv "$secrets"/key.pem "$secrets"/syncthing.key
|
||||
mv "$secrets"/cert.pem "$secrets"/syncthing.cert
|
||||
cat "$secrets"/config.xml | grep -oP '(?<=<device id=")[^"]+' | uniq > "$facts"/syncthing.pub
|
||||
script = ''
|
||||
syncthing generate --config $out
|
||||
mv $out/key.pem $out/key
|
||||
mv $out/cert.pem $out/cert
|
||||
cat $out/config.xml | grep -oP '(?<=<device id=")[^"]+' | uniq > $out/id
|
||||
cat $out/config.xml | grep -oP '<apikey>\K[^<]+' | uniq > $out/api
|
||||
'';
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
---
|
||||
description = "Automatically generates and configures a password for the specified user account."
|
||||
categories = ["System"]
|
||||
features = ["inventory"]
|
||||
---
|
||||
|
||||
If setting the option prompt to true, the user will be prompted to type in their desired password.
|
||||
|
||||
@@ -1,58 +1,6 @@
|
||||
# Dont import this file
|
||||
# It is only here for backwards compatibility.
|
||||
# Dont author new modules with this file.
|
||||
{
|
||||
pkgs,
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
cfg = config.clan.user-password;
|
||||
in
|
||||
{
|
||||
options.clan.user-password = {
|
||||
user = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
example = "alice";
|
||||
description = "The user the password should be generated for.";
|
||||
};
|
||||
prompt = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = true;
|
||||
example = false;
|
||||
description = "Whether the user should be prompted.";
|
||||
};
|
||||
};
|
||||
|
||||
config = {
|
||||
users.mutableUsers = false;
|
||||
users.users.${cfg.user} = {
|
||||
hashedPasswordFile = config.clan.core.facts.services.user-password.secret.user-password-hash.path;
|
||||
isNormalUser = lib.mkDefault true;
|
||||
};
|
||||
|
||||
sops.secrets = lib.mkIf (config.clan.core.facts.secretStore == "sops") {
|
||||
"${config.clan.core.settings.machine.name}-user-password-hash".neededForUsers = true;
|
||||
};
|
||||
|
||||
clan.core.facts.services.user-password = {
|
||||
secret.user-password = { };
|
||||
secret.user-password-hash = { };
|
||||
generator.prompt = (
|
||||
lib.mkIf config.clan.user-password.prompt "Set the password for your user '${config.clan.user-password.user}'.
|
||||
You can autogenerate a password, if you leave this prompt blank."
|
||||
);
|
||||
generator.path = with pkgs; [
|
||||
coreutils
|
||||
xkcdpass
|
||||
mkpasswd
|
||||
];
|
||||
generator.script = ''
|
||||
if [[ -n ''${prompt_value-} ]]; then
|
||||
echo $prompt_value | tr -d "\n" > $secrets/user-password
|
||||
else
|
||||
xkcdpass --numwords 3 --delimiter - --count 1 | tr -d "\n" > $secrets/user-password
|
||||
fi
|
||||
cat $secrets/user-password | mkpasswd -s -m sha-512 | tr -d "\n" > $secrets/user-password-hash
|
||||
'';
|
||||
};
|
||||
};
|
||||
imports = [ ./roles/default.nix ];
|
||||
}
|
||||
|
||||
58
clanModules/user-password/roles/default.nix
Normal file
58
clanModules/user-password/roles/default.nix
Normal file
@@ -0,0 +1,58 @@
|
||||
{
|
||||
pkgs,
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
cfg = config.clan.user-password;
|
||||
in
|
||||
{
|
||||
options.clan.user-password = {
|
||||
user = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
example = "alice";
|
||||
description = "The user the password should be generated for.";
|
||||
};
|
||||
prompt = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = true;
|
||||
example = false;
|
||||
description = "Whether the user should be prompted.";
|
||||
};
|
||||
};
|
||||
|
||||
config = {
|
||||
users.mutableUsers = false;
|
||||
users.users.${cfg.user} = {
|
||||
hashedPasswordFile = config.clan.core.facts.services.user-password.secret.user-password-hash.path;
|
||||
isNormalUser = lib.mkDefault true;
|
||||
};
|
||||
|
||||
sops.secrets = lib.mkIf (config.clan.core.facts.secretStore == "sops") {
|
||||
"${config.clan.core.settings.machine.name}-user-password-hash".neededForUsers = true;
|
||||
};
|
||||
|
||||
clan.core.facts.services.user-password = {
|
||||
secret.user-password = { };
|
||||
secret.user-password-hash = { };
|
||||
generator.prompt = (
|
||||
lib.mkIf config.clan.user-password.prompt "Set the password for your user '${config.clan.user-password.user}'.
|
||||
You can autogenerate a password, if you leave this prompt blank."
|
||||
);
|
||||
generator.path = with pkgs; [
|
||||
coreutils
|
||||
xkcdpass
|
||||
mkpasswd
|
||||
];
|
||||
generator.script = ''
|
||||
if [[ -n ''${prompt_value-} ]]; then
|
||||
echo $prompt_value | tr -d "\n" > $secrets/user-password
|
||||
else
|
||||
xkcdpass --numwords 3 --delimiter - --count 1 | tr -d "\n" > $secrets/user-password
|
||||
fi
|
||||
cat $secrets/user-password | mkpasswd -s -m sha-512 | tr -d "\n" > $secrets/user-password-hash
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -14,9 +14,9 @@ let
|
||||
name = "iwd.${name}";
|
||||
value = {
|
||||
prompts.ssid.type = "line";
|
||||
prompts.ssid.createFile = true;
|
||||
prompts.ssid.persist = true;
|
||||
prompts.password.type = "hidden";
|
||||
prompts.password.createFile = true;
|
||||
prompts.password.persist = true;
|
||||
share = true;
|
||||
};
|
||||
};
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
description = "Configures [Zerotier VPN](https://zerotier.com) secure and efficient networking within a Clan.."
|
||||
description = "Configures [Zerotier VPN](https://zerotier.com) secure and efficient networking within a Clan."
|
||||
features = [ "inventory" ]
|
||||
categories = [ "Network", "System" ]
|
||||
|
||||
|
||||
547
decisions/01-ClanModules.md
Normal file
547
decisions/01-ClanModules.md
Normal file
@@ -0,0 +1,547 @@
|
||||
# Clan service modules
|
||||
|
||||
Status: Accepted
|
||||
|
||||
## Context
|
||||
|
||||
To define a service in Clan, you need to define two things:
|
||||
|
||||
- `clanModule` - defined by module authors
|
||||
- `inventory` - defined by users
|
||||
|
||||
The `clanModule` is currently a plain NixOS module. It is conditionally imported into each machine depending on the `service` and `role`.
|
||||
|
||||
A `role` is a function of a machine within a service. For example in the `backup` service there are `client` and `server` roles.
|
||||
|
||||
The `inventory` contains the settings for the user/consumer of the module. It describes what `services` run on each machine and with which `roles`.
|
||||
|
||||
Additionally any `service` can be instantiated multiple times.
|
||||
|
||||
This ADR proposes that we change how to write a `clanModule`. The `inventory` should get a new attribute called `instances` that allow for configuration of these modules.
|
||||
|
||||
### Status Quo
|
||||
|
||||
In this example the user configures 2 instances of the `networking` service:
|
||||
|
||||
The *user* defines
|
||||
|
||||
```nix
|
||||
{
|
||||
inventory.services = {
|
||||
# anything inside an instance is instance specific
|
||||
networking."instance1" = {
|
||||
roles.client.tags = [ "all" ];
|
||||
machines.foo.config = { ... /* machine specific settings */ };
|
||||
|
||||
# this will not apply to `clients` outside of `instance1`
|
||||
roles.client.config = { ... /* client specific settings */ };
|
||||
};
|
||||
networking."instance2" = {
|
||||
roles.server.tags = [ "all" ];
|
||||
config = { ... /* applies to every machine that runs this instance */ };
|
||||
};
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
The *module author* defines:
|
||||
|
||||
```nix
|
||||
# networking/roles/client.nix
|
||||
{ config, ... }:
|
||||
let
|
||||
instances = config.clan.inventory.services.networking or { };
|
||||
|
||||
serviceConfig = config.clan.networking;
|
||||
in {
|
||||
## Set some nixos options
|
||||
}
|
||||
```
|
||||
|
||||
### Problems
|
||||
|
||||
Problems with the current way of writing clanModules:
|
||||
|
||||
1. No way to retrieve the config of a single service instance, together with its name.
|
||||
2. Directly exporting a single, anonymous nixosModule without any intermediary attribute layers doesn't leave room for exporting other inventory resources such as potentially `vars` or `homeManagerConfig`.
|
||||
3. Can't access multiple config instances individually.
|
||||
Example:
|
||||
```nix
|
||||
inventory = {
|
||||
services = {
|
||||
network.c-base = {
|
||||
instanceConfig.ips = {
|
||||
mors = "172.139.0.2";
|
||||
};
|
||||
};
|
||||
network.gg23 = {
|
||||
instanceConfig.ips = {
|
||||
mors = "10.23.0.2";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
```
|
||||
This doesn't work because all instance configs are applied to the same namespace. So this results in a conflict currently.
|
||||
Resolving this problem means that new inventory modules cannot be plain nixos modules anymore. If they are configured via `instances` / `instanceConfig` they cannot be configured without using the inventory. (There might be ways to inject instanceConfig but that requires knowledge of inventory internals)
|
||||
|
||||
4. Writing modules for multiple instances is cumbersome. Currently the clanModule author has to write one or multiple `fold` operations for potentially every nixos option to define how multiple service instances merge into every single one option. The new idea behind this adr is to pull the common fold function into the outer context provide it as a common helper. (See the example below. `perInstance` analog to the well known `perSystem` of flake-parts)
|
||||
|
||||
5. Each role has a different interface. We need to render that interface into json-schema which includes creating an unnecessary test machine currently. Defining the interface at a higher level (outside of any machine context) allows faster evaluation and an isolation by design from any machine.
|
||||
This allows rendering the UI (options tree) of a service by just knowing the service and the corresponding roles without creating a dummy machine.
|
||||
|
||||
6. The interface of defining config is wrong. It is possible to define config that applies to multiple machine at once. It is possible to define config that applies to
|
||||
a machine as a hole. But this is wrong behavior because the options exist at the role level. So config must also always exist at the role level.
|
||||
Currently we merge options and config together but that may produce conflicts. Those module system conflicts are very hard to foresee since they depend on what roles exist at runtime.
|
||||
|
||||
## Proposed Change
|
||||
|
||||
We will create a new module class which is defined by `_class = "clan.service"` ([documented here](https://nixos.org/manual/nixpkgs/stable/#module-system-lib-evalModules-param-class)).
|
||||
|
||||
Existing clan modules will still work by continuing to be plain NixOS modules. All new modules can set `_class = "clan.service";` to use the proposed features.
|
||||
|
||||
In short the change introduces a new module class that makes the currently necessary folding of `clan.service`s `instances` and `roles` a common operation. The module author can define the inner function of the fold operations which is called a `clan.service` module.
|
||||
|
||||
There are the following attributes of such a module:
|
||||
|
||||
### `roles.<roleName>.interface`
|
||||
|
||||
Each role can have a different interface for how to be configured.
|
||||
I.e.: A `client` role might have different options than a `server` role.
|
||||
|
||||
This attribute should be used to define `options`. (Not `config` !)
|
||||
|
||||
The end-user defines the corresponding `config`.
|
||||
|
||||
This submodule will be evaluated for each `instance role` combination and passed as argument into `perInstance`.
|
||||
|
||||
This submodules `options` will be evaluated to build the UI for that module dynamically.
|
||||
|
||||
### **Result attributes**
|
||||
|
||||
Some common result attributes are produced by modules of this proposal, those will be referenced later in this document but are commonly defined as:
|
||||
|
||||
- `nixosModule` A single nixos module. (`{config, ...}:{ environment.systemPackages = []; }`)
|
||||
- `services.<serviceName>` An attribute set of `_class = clan.service`. Which contain the same thing as this whole ADR proposes.
|
||||
- `vars` To be defined. Reserved for now.
|
||||
|
||||
### `roles.<roleName>.perInstance`
|
||||
|
||||
This acts like a function that maps over all `service instances` of a given `role`.
|
||||
It produces the previously defined **result attributes**.
|
||||
|
||||
I.e. This allows to produce multiple `nixosModules` one for every instance of the service.
|
||||
Hence making multiple `service instances` convenient by leveraging the module-system merge behavior.
|
||||
|
||||
### `perMachine`
|
||||
|
||||
This acts like a function that maps over all `machines` of a given `service`.
|
||||
It produces the previously defined **result attributes**.
|
||||
|
||||
I.e. this allows to produce exactly one `nixosModule` per `service`.
|
||||
Making it easy to set nixos-options only once if they have a one-to-one relation to a service being enabled.
|
||||
|
||||
Note: `lib.mkIf` can be used on i.e. `roleName` to make the scope more specific.
|
||||
|
||||
### `services.<serviceName>`
|
||||
|
||||
This allows to define nested services.
|
||||
i.e the *service* `backup` might define a nested *service* `ssh` which sets up an ssh connection.
|
||||
|
||||
This can be defined in `perMachine` and `perInstance`
|
||||
|
||||
- For Every `instance` a given `service` may add multiple nested `services`.
|
||||
- A given `service` may add a static set of nested `services`; Even if there are multiple instances of the same given service.
|
||||
|
||||
Q: Why is this not a top-level attribute?
|
||||
A: Because nested service definitions may also depend on a `role` which must be resolved depending on `machine` and `instance`. The top-level module doesn't know anything about machines. Keeping the service layer machine agnostic allows us to build the UI for a module without adding any machines. (One of the problems with the current system)
|
||||
|
||||
```
|
||||
zerotier/default.nix
|
||||
```
|
||||
```nix
|
||||
# Some example module
|
||||
{
|
||||
_class = "clan.service";
|
||||
|
||||
# Analog to flake-parts 'perSystem' only that it takes instance
|
||||
# The exact arguments will be specified and documented along with the actual implementation.
|
||||
roles.client.perInstance = {
|
||||
# attrs : settings of that instance
|
||||
settings,
|
||||
# string : name of the instance
|
||||
instanceName,
|
||||
# { name :: string , roles :: listOf string; }
|
||||
machine,
|
||||
# { {roleName} :: { machines :: listOf string; } }
|
||||
roles,
|
||||
...
|
||||
}:
|
||||
{
|
||||
# Return a nixos module for every instance.
|
||||
# The module author must be aware that this may return multiple modules (one for every instance) which are merged natively
|
||||
nixosModule = {
|
||||
config.debug."${instanceName}-client" = instanceConfig;
|
||||
};
|
||||
};
|
||||
# Function that is called once for every machine with the role "client"
|
||||
# Receives at least the following parameters:
|
||||
#
|
||||
# machine :: { name :: String, roles :: listOf string; }
|
||||
# Name of the machine
|
||||
#
|
||||
# instances :: { instanceName :: { roleName :: { machines :: [ string ]; }}}
|
||||
# Resolved roles
|
||||
# Same type as currently in `clan.inventory.services.<ServiceName>.<InstanceName>.roles`
|
||||
#
|
||||
# The exact arguments will be specified and documented along with the actual implementation.
|
||||
perMachine = {machine, instances, ... }: {
|
||||
nixosModule =
|
||||
{ lib, ... }:
|
||||
{
|
||||
# Some shared code should be put into a shared file
|
||||
# Which is then imported into all/some roles
|
||||
imports = [
|
||||
../shared.nix
|
||||
] ++
|
||||
(lib.optional (builtins.elem "client" machine.roles)
|
||||
{
|
||||
options.debug = lib.mkOption {
|
||||
type = lib.types.attrsOf lib.types.raw;
|
||||
};
|
||||
});
|
||||
};
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
## Inventory.instances
|
||||
|
||||
This document also proposes to add a new attribute to the inventory that allow for exclusive configuration of the new modules.
|
||||
This allows to better separate the new and the old way of writing and configuring modules. Keeping the new implementation more focussed and keeping existing technical debt out from the beginning.
|
||||
|
||||
The following thoughts went into this:
|
||||
|
||||
- Getting rid of `<serviceName>`: Using only the attribute name (plain string) is not sufficient for defining the source of the service module. Encoding meta information into it would also require some extensible format specification and parser.
|
||||
- removing instanceConfig and machineConfig: There is no such config. Service configuration must always be role specific, because the options are defined on the role.
|
||||
- renaming `config` to `settings` or similar. Since `config` is a module system internal name.
|
||||
- Tags and machines should be an attribute set to allow setting `settings` on that level instead.
|
||||
|
||||
```nix
|
||||
{
|
||||
inventory.instances = {
|
||||
"instance1" = {
|
||||
# Allows to define where the module should be imported from.
|
||||
module = {
|
||||
input = "clan-core";
|
||||
name = "borgbackup";
|
||||
};
|
||||
# settings that apply to all client machines
|
||||
roles.client.settings = {};
|
||||
# settings that apply to the client service of machine with name <machineName>
|
||||
# There might be a server service that takes different settings on the same machine!
|
||||
roles.client.machines.<machineName>.settings = {};
|
||||
# settings that apply to all client-instances with tag <tagName>
|
||||
roles.client.tags.<tagName>.settings = {};
|
||||
};
|
||||
"instance2" = {
|
||||
# ...
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
## Iteration note
|
||||
|
||||
We want to implement the system as described. Once we have sufficient data on real world use-cases and modules we might revisit this document along with the updated implementation.
|
||||
|
||||
|
||||
## Real world example
|
||||
|
||||
The following module demonstrates the idea in the example of *borgbackup*.
|
||||
|
||||
```nix
|
||||
{
|
||||
_class = "clan.service";
|
||||
|
||||
# Define the 'options' of 'settings' see argument of perInstance
|
||||
roles.server.interface =
|
||||
{ lib, ... }:
|
||||
{
|
||||
options = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "/var/lib/borgbackup";
|
||||
description = ''
|
||||
The directory where the borgbackup repositories are stored.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
roles.server.perInstance =
|
||||
{
|
||||
instanceName,
|
||||
settings,
|
||||
roles,
|
||||
...
|
||||
}:
|
||||
{
|
||||
nixosModule =
|
||||
{ config, lib, ... }:
|
||||
let
|
||||
dir = config.clan.core.settings.directory;
|
||||
machineDir = dir + "/vars/per-machine/";
|
||||
allClients = roles.client.machines;
|
||||
in
|
||||
{
|
||||
# services.borgbackup is a native nixos option
|
||||
config.services.borgbackup.repos =
|
||||
let
|
||||
borgbackupIpMachinePath = machine: machineDir + machine + "/borgbackup/borgbackup.ssh.pub/value";
|
||||
|
||||
machinesMaybeKey = builtins.map (
|
||||
machine:
|
||||
let
|
||||
fullPath = borgbackupIpMachinePath machine;
|
||||
in
|
||||
if builtins.pathExists fullPath then
|
||||
machine
|
||||
else
|
||||
lib.warn ''
|
||||
Machine ${machine} does not have a borgbackup key at ${fullPath},
|
||||
run `clan var generate ${machine}` to generate it.
|
||||
'' null
|
||||
) allClients;
|
||||
|
||||
machinesWithKey = lib.filter (x: x != null) machinesMaybeKey;
|
||||
|
||||
hosts = builtins.map (machine: {
|
||||
name = instanceName + machine;
|
||||
value = {
|
||||
path = "${settings.directory}/${machine}";
|
||||
authorizedKeys = [ (builtins.readFile (borgbackupIpMachinePath machine)) ];
|
||||
};
|
||||
}) machinesWithKey;
|
||||
in
|
||||
if (builtins.listToAttrs hosts) != [ ] then builtins.listToAttrs hosts else { };
|
||||
};
|
||||
};
|
||||
|
||||
roles.client.interface =
|
||||
{ lib, ... }:
|
||||
{
|
||||
# There might be a better interface now. This is just how clan borgbackup was configured in the 'old' way
|
||||
options.destinations = lib.mkOption {
|
||||
type = lib.types.attrsOf (
|
||||
lib.types.submodule (
|
||||
{ name, ... }:
|
||||
{
|
||||
options = {
|
||||
name = lib.mkOption {
|
||||
type = lib.types.strMatching "^[a-zA-Z0-9._-]+$";
|
||||
default = name;
|
||||
description = "the name of the backup job";
|
||||
};
|
||||
repo = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "the borgbackup repository to backup to";
|
||||
};
|
||||
rsh = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
default = null;
|
||||
defaultText = "ssh -i \${config.clan.core.vars.generators.borgbackup.files.\"borgbackup.ssh\".path} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null";
|
||||
description = "the rsh to use for the backup";
|
||||
};
|
||||
};
|
||||
}
|
||||
)
|
||||
);
|
||||
default = { };
|
||||
description = ''
|
||||
destinations where the machine should be backed up to
|
||||
'';
|
||||
};
|
||||
|
||||
options.exclude = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
example = [ "*.pyc" ];
|
||||
default = [ ];
|
||||
description = ''
|
||||
Directories/Files to exclude from the backup.
|
||||
Use * as a wildcard.
|
||||
'';
|
||||
};
|
||||
};
|
||||
roles.client.perInstance =
|
||||
{
|
||||
instanceName,
|
||||
roles,
|
||||
machine,
|
||||
settings,
|
||||
...
|
||||
}:
|
||||
{
|
||||
nixosModule =
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
allServers = roles.server.machines;
|
||||
|
||||
# machineName = config.clan.core.settings.machine.name;
|
||||
|
||||
# cfg = config.clan.borgbackup;
|
||||
preBackupScript = ''
|
||||
declare -A preCommandErrors
|
||||
|
||||
${lib.concatMapStringsSep "\n" (
|
||||
state:
|
||||
lib.optionalString (state.preBackupCommand != null) ''
|
||||
echo "Running pre-backup command for ${state.name}"
|
||||
if ! /run/current-system/sw/bin/${state.preBackupCommand}; then
|
||||
preCommandErrors["${state.name}"]=1
|
||||
fi
|
||||
''
|
||||
) (lib.attrValues config.clan.core.state)}
|
||||
|
||||
if [[ ''${#preCommandErrors[@]} -gt 0 ]]; then
|
||||
echo "pre-backup commands failed for the following services:"
|
||||
for state in "''${!preCommandErrors[@]}"; do
|
||||
echo " $state"
|
||||
done
|
||||
exit 1
|
||||
fi
|
||||
'';
|
||||
|
||||
destinations =
|
||||
let
|
||||
destList = builtins.map (serverName: {
|
||||
name = "${instanceName}-${serverName}";
|
||||
value = {
|
||||
repo = "borg@${serverName}:/var/lib/borgbackup/${machine.name}";
|
||||
rsh = "ssh -i ${
|
||||
config.clan.core.vars.generators."borgbackup-${instanceName}".files."borgbackup.ssh".path
|
||||
} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=Yes";
|
||||
} // settings.destinations.${serverName};
|
||||
}) allServers;
|
||||
in
|
||||
(builtins.listToAttrs destList);
|
||||
in
|
||||
{
|
||||
config = {
|
||||
# Derived from the destinations
|
||||
systemd.services = lib.mapAttrs' (
|
||||
_: dest:
|
||||
lib.nameValuePair "borgbackup-job-${instanceName}-${dest.name}" {
|
||||
# since borgbackup mounts the system read-only, we need to run in a ExecStartPre script, so we can generate additional files.
|
||||
serviceConfig.ExecStartPre = [
|
||||
''+${pkgs.writeShellScript "borgbackup-job-${dest.name}-pre-backup-commands" preBackupScript}''
|
||||
];
|
||||
}
|
||||
) destinations;
|
||||
|
||||
services.borgbackup.jobs = lib.mapAttrs (_destinationName: dest: {
|
||||
paths = lib.unique (
|
||||
lib.flatten (map (state: state.folders) (lib.attrValues config.clan.core.state))
|
||||
);
|
||||
exclude = settings.exclude;
|
||||
repo = dest.repo;
|
||||
environment.BORG_RSH = dest.rsh;
|
||||
compression = "auto,zstd";
|
||||
startAt = "*-*-* 01:00:00";
|
||||
persistentTimer = true;
|
||||
|
||||
encryption = {
|
||||
mode = "repokey";
|
||||
passCommand = "cat ${config.clan.core.vars.generators."borgbackup-${instanceName}".files."borgbackup.repokey".path}";
|
||||
};
|
||||
|
||||
prune.keep = {
|
||||
within = "1d"; # Keep all archives from the last day
|
||||
daily = 7;
|
||||
weekly = 4;
|
||||
monthly = 0;
|
||||
};
|
||||
}) destinations;
|
||||
|
||||
environment.systemPackages = [
|
||||
(pkgs.writeShellApplication {
|
||||
name = "borgbackup-create";
|
||||
runtimeInputs = [ config.systemd.package ];
|
||||
text = ''
|
||||
${lib.concatMapStringsSep "\n" (dest: ''
|
||||
systemctl start borgbackup-job-${dest.name}
|
||||
'') (lib.attrValues destinations)}
|
||||
'';
|
||||
})
|
||||
(pkgs.writeShellApplication {
|
||||
name = "borgbackup-list";
|
||||
runtimeInputs = [ pkgs.jq ];
|
||||
text = ''
|
||||
(${
|
||||
lib.concatMapStringsSep "\n" (
|
||||
dest:
|
||||
# we need yes here to skip the changed url verification
|
||||
''echo y | /run/current-system/sw/bin/borg-job-${dest.name} list --json | jq '[.archives[] | {"name": ("${dest.name}::${dest.repo}::" + .name)}]' ''
|
||||
) (lib.attrValues destinations)
|
||||
}) | jq -s 'add // []'
|
||||
'';
|
||||
})
|
||||
(pkgs.writeShellApplication {
|
||||
name = "borgbackup-restore";
|
||||
runtimeInputs = [ pkgs.gawk ];
|
||||
text = ''
|
||||
cd /
|
||||
IFS=':' read -ra FOLDER <<< "''${FOLDERS-}"
|
||||
job_name=$(echo "$NAME" | awk -F'::' '{print $1}')
|
||||
backup_name=''${NAME#"$job_name"::}
|
||||
if [[ ! -x /run/current-system/sw/bin/borg-job-"$job_name" ]]; then
|
||||
echo "borg-job-$job_name not found: Backup name is invalid" >&2
|
||||
exit 1
|
||||
fi
|
||||
echo y | /run/current-system/sw/bin/borg-job-"$job_name" extract "$backup_name" "''${FOLDER[@]}"
|
||||
'';
|
||||
})
|
||||
];
|
||||
# every borgbackup instance adds its own vars
|
||||
clan.core.vars.generators."borgbackup-${instanceName}" = {
|
||||
files."borgbackup.ssh.pub".secret = false;
|
||||
files."borgbackup.ssh" = { };
|
||||
files."borgbackup.repokey" = { };
|
||||
|
||||
migrateFact = "borgbackup";
|
||||
runtimeInputs = [
|
||||
pkgs.coreutils
|
||||
pkgs.openssh
|
||||
pkgs.xkcdpass
|
||||
];
|
||||
script = ''
|
||||
ssh-keygen -t ed25519 -N "" -f $out/borgbackup.ssh
|
||||
xkcdpass -n 4 -d - > $out/borgbackup.repokey
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
perMachine = {
|
||||
nixosModule =
|
||||
{ ... }:
|
||||
{
|
||||
clan.core.backups.providers.borgbackup = {
|
||||
list = "borgbackup-list";
|
||||
create = "borgbackup-create";
|
||||
restore = "borgbackup-restore";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
## Prior-art
|
||||
|
||||
- https://github.com/NixOS/nixops
|
||||
- https://github.com/infinisil/nixus
|
||||
116
decisions/02-clan-api.md
Normal file
116
decisions/02-clan-api.md
Normal file
@@ -0,0 +1,116 @@
|
||||
# Clan as library
|
||||
|
||||
## Status
|
||||
|
||||
Accepted
|
||||
|
||||
## Context
|
||||
|
||||
In the long term we envision the clan application will consist of the following user facing tools in the long term.
|
||||
|
||||
- `CLI`
|
||||
- `TUI`
|
||||
- `Desktop Application`
|
||||
- `REST-API`
|
||||
- `Mobile Application`
|
||||
|
||||
We might not be sure whether all of those will exist but the architecture should be generic such that those are possible without major changes of the underlying system.
|
||||
|
||||
## Decision
|
||||
|
||||
This leads to the conclusion that we should do `library` centric development.
|
||||
With the current `clan` python code beeing a library that can be imported to create various tools ontop of it.
|
||||
All **CLI** or **UI** related parts should be moved out of the main library.
|
||||
|
||||
*Note: The next person who wants implement any new frontend should do this first. Currently it looks like the TUI is the next one.*
|
||||
|
||||
Imagine roughly the following architecture:
|
||||
|
||||
```mermaid
|
||||
graph TD
|
||||
%% Define styles
|
||||
classDef frontend fill:#f9f,stroke:#333,stroke-width:2px;
|
||||
classDef backend fill:#bbf,stroke:#333,stroke-width:2px;
|
||||
classDef storage fill:#ff9,stroke:#333,stroke-width:2px;
|
||||
classDef testing fill:#cfc,stroke:#333,stroke-width:2px;
|
||||
|
||||
%% Define nodes
|
||||
user(["User"]) -->|Interacts with| Frontends
|
||||
|
||||
subgraph "Frontends"
|
||||
CLI["CLI"]:::frontend
|
||||
APP["Desktop App"]:::frontend
|
||||
TUI["TUI"]:::frontend
|
||||
REST["REST API"]:::frontend
|
||||
end
|
||||
|
||||
subgraph "Python"
|
||||
API["Library <br>for interacting with clan"]:::backend
|
||||
BusinessLogic["Business Logic<br>Implements actions like 'machine create'"]:::backend
|
||||
STORAGE[("Persistence")]:::storage
|
||||
NIX["Nix Eval & Build"]:::backend
|
||||
end
|
||||
|
||||
subgraph "CI/CD & Tests"
|
||||
TEST["Feature Testing"]:::testing
|
||||
end
|
||||
|
||||
%% Define connections
|
||||
CLI --> API
|
||||
APP --> API
|
||||
TUI --> API
|
||||
REST --> API
|
||||
|
||||
TEST --> API
|
||||
|
||||
API --> BusinessLogic
|
||||
BusinessLogic --> STORAGE
|
||||
BusinessLogic --> NIX
|
||||
```
|
||||
|
||||
With this very simple design it is ensured that all the basic features remain stable across all frontends.
|
||||
In the end it is straight forward to create python library function calls in a testing framework to ensure that kind of stability.
|
||||
|
||||
Integration tests and smaller unit-tests should both be utilized to ensure the stability of the library.
|
||||
|
||||
Note: Library function don't have to be json-serializable in general.
|
||||
|
||||
Persistence includes but is not limited to: creating git commits, writing to inventory.json, reading and writing vars and to/from disk in general.
|
||||
|
||||
## Benefits / Drawbacks
|
||||
|
||||
- (+) Less tight coupling of frontend- / backend-teams
|
||||
- (+) Consistency and inherent behavior
|
||||
- (+) Performance & Scalability
|
||||
- (+) Different frontends for different user groups
|
||||
- (+) Documentation per library function makes it convenient to interact with the clan resources.
|
||||
- (+) Testing the library ensures stability of the underlyings for all layers above.
|
||||
- (-) Complexity overhead
|
||||
- (-) library needs to be designed / documented
|
||||
- (+) library can be well documented since it is a finite set of functions.
|
||||
- (-) Error handling might be harder.
|
||||
- (+) Common error reporting
|
||||
- (-) different frontends need different features. The library must include them all.
|
||||
- (+) All those core features must be implemented anyways.
|
||||
- (+) VPN Benchmarking uses the existing library's already and works relatively well.
|
||||
|
||||
## Implementation considerations
|
||||
|
||||
Not all required details that need to change over time are possible to be pointed out ahead of time.
|
||||
The goal of this document is to create a common understanding for how we like our project to be structured.
|
||||
Any future commits should contribute to this goal.
|
||||
|
||||
Some ideas what might be needed to change:
|
||||
|
||||
- Having separate locations or packages for the library and the CLI.
|
||||
- Rename the `clan_cli` package to `clan` and move the `cli` frontend into a subfolder or a separate package.
|
||||
- Python Argparse or other cli related code should not exist in the `clan` python library.
|
||||
- `__init__.py` should be very minimal. Only init the business logic models and resources. Note that all `__init__.py` files all the way up in the module tree are always executed as part of the python module import logic and thus should be as small as possible.
|
||||
i.e. `from clan_cli.vars.generators import ...` executes both `clan_cli/__init__.py` and `clan_cli/vars/__init__.py` if any of those exist.
|
||||
- `api` folder doesn't make sense since the python library `clan` is the api.
|
||||
- Logic needed for the webui that performs json serialization and deserialization will be some `json-adapter` folder or package.
|
||||
- Code for serializing dataclasses and typed dictionaries is needed for the persistence layer. (i.e. for read-write of inventory.json)
|
||||
- The inventory-json is a backend resource, that is internal. Its logic includes merging, unmerging and partial updates with considering nix values and their priorities. Nobody should try to read or write to it directly.
|
||||
Instead there will be library methods i.e. to add a `service` or to update/read/delete some information from it.
|
||||
- Library functions should be carefully designed with suitable conventions for writing good api's in mind. (i.e: https://swagger.io/resources/articles/best-practices-in-api-design/)
|
||||
|
||||
1
decisions/README.md
Normal file
1
decisions/README.md
Normal file
@@ -0,0 +1 @@
|
||||
see [architecture-decision-record](https://github.com/joelparkerhenderson/architecture-decision-record)
|
||||
24
decisions/_template.md
Normal file
24
decisions/_template.md
Normal file
@@ -0,0 +1,24 @@
|
||||
# Decision record template by Michael Nygard
|
||||
|
||||
This is the template in [Documenting architecture decisions - Michael Nygard](http://thinkrelevance.com/blog/2011/11/15/documenting-architecture-decisions).
|
||||
You can use [adr-tools](https://github.com/npryce/adr-tools) for managing the ADR files.
|
||||
|
||||
In each ADR file, write these sections:
|
||||
|
||||
# Title
|
||||
|
||||
## Status
|
||||
|
||||
What is the status, such as proposed, accepted, rejected, deprecated, superseded, etc.?
|
||||
|
||||
## Context
|
||||
|
||||
What is the issue that we're seeing that is motivating this decision or change?
|
||||
|
||||
## Decision
|
||||
|
||||
What is the change that we're proposing and/or doing?
|
||||
|
||||
## Consequences
|
||||
|
||||
What becomes easier or more difficult to do because of this change?
|
||||
4
docs/.gitignore
vendored
4
docs/.gitignore
vendored
@@ -1,3 +1,3 @@
|
||||
/site/reference
|
||||
/site/static/Roboto-Regular.ttf
|
||||
/site/static/FiraCode-VF.ttf
|
||||
/site/static
|
||||
!/site/static/extra.css
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
# Contributing
|
||||
# Contributing to Clan
|
||||
|
||||
|
||||
**Continuous Integration (CI)**: Each pull request gets automatically tested by gitea. If any errors are detected, it will block pull requests until they're resolved.
|
||||
|
||||
@@ -20,14 +21,14 @@ Let's get your development environment up and running:
|
||||
curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install
|
||||
```
|
||||
|
||||
2. **Install direnv**:
|
||||
1. **Install direnv**:
|
||||
|
||||
- To automatically setup a devshell on entering the directory
|
||||
```bash
|
||||
nix profile install nixpkgs#nix-direnv-flakes nixpkgs#direnv
|
||||
```
|
||||
|
||||
3. **Add direnv to your shell**:
|
||||
1. **Add direnv to your shell**:
|
||||
|
||||
- Direnv needs to [hook into your shell](https://direnv.net/docs/hook.html) to work.
|
||||
You can do this by executing following command. The example below will setup direnv for `zsh` and `bash`
|
||||
@@ -36,10 +37,10 @@ Let's get your development environment up and running:
|
||||
echo 'eval "$(direnv hook zsh)"' >> ~/.zshrc && echo 'eval "$(direnv hook bash)"' >> ~/.bashrc && eval "$SHELL"
|
||||
```
|
||||
|
||||
3. **Allow the devshell**
|
||||
1. **Allow the devshell**
|
||||
- Go to `clan-core/pkgs/clan-cli` and do a `direnv allow` to setup the necessary development environment to execute the `clan` command
|
||||
|
||||
4. **Create a Gitea Account**:
|
||||
1. **Create a Gitea Account**:
|
||||
- Register an account on https://git.clan.lol
|
||||
- Fork the [clan-core](https://git.clan.lol/clan/clan-core) repository
|
||||
- Clone the repository and navigate to it
|
||||
@@ -47,30 +48,7 @@ Let's get your development environment up and running:
|
||||
```bash
|
||||
git remote add upstream gitea@git.clan.lol:clan/clan-core.git
|
||||
```
|
||||
5. **Create an access token**:
|
||||
- Log in to Gitea.
|
||||
- Go to your account settings.
|
||||
- Navigate to the Applications section.
|
||||
- Click Generate New Token.
|
||||
- Name your token and select all available scopes.
|
||||
- Generate the token and copy it for later use.
|
||||
- Your access token is now ready to use with all permissions.
|
||||
|
||||
5. **Register Your Gitea Account Locally**:
|
||||
|
||||
- Execute the following command to add your Gitea account locally:
|
||||
```bash
|
||||
tea login add
|
||||
```
|
||||
- Fill out the prompt as follows:
|
||||
- URL of Gitea instance: `https://git.clan.lol`
|
||||
- Name of new Login [git.clan.lol]:
|
||||
- Do you have an access token? Yes
|
||||
- Token: <yourtoken>
|
||||
- Set Optional settings: No
|
||||
|
||||
|
||||
6. **Allow .envrc**:
|
||||
1. **Allow .envrc**:
|
||||
|
||||
- When you enter the directory, you'll receive an error message like this:
|
||||
```bash
|
||||
@@ -78,7 +56,7 @@ Let's get your development environment up and running:
|
||||
```
|
||||
- Execute `direnv allow` to automatically execute the shell script `.envrc` when entering the directory.
|
||||
|
||||
7. **(Optional) Install Git Hooks**:
|
||||
1. **(Optional) Install Git Hooks**:
|
||||
- To syntax check your code you can run:
|
||||
```bash
|
||||
nix fmt
|
||||
@@ -88,18 +66,44 @@ Let's get your development environment up and running:
|
||||
./scripts/pre-commit
|
||||
```
|
||||
|
||||
8. **Open a Pull Request**:
|
||||
- To automatically open up a pull request you can use our tool called:
|
||||
```
|
||||
merge-after-ci --reviewers Mic92 Lassulus Qubasa
|
||||
```
|
||||
## Related Projects
|
||||
|
||||
- **Data Mesher**: [data-mesher](https://git.clan.lol/clan/data-mesher)
|
||||
- **Nixos Facter**: [nixos-facter](https://github.com/nix-community/nixos-facter)
|
||||
- **Nixos Anywhere**: [nixos-anywhere](https://github.com/nix-community/nixos-anywhere)
|
||||
- **Disko**: [disko](https://github.com/nix-community/disko)
|
||||
|
||||
## Fixing Bugs or Adding Features in Clan-CLI
|
||||
|
||||
### Whats Next?
|
||||
If you have a bug fix or feature that involves a related project, clone the relevant repository and replace its invocation in your local setup.
|
||||
|
||||
Please look into the [debugging](./debugging.md) guide next!
|
||||
For instance, if you need to update `nixos-anywhere` in clan-cli, find its usage:
|
||||
|
||||
```python
|
||||
run(
|
||||
nix_shell(
|
||||
["nixpkgs#nixos-anywhere"],
|
||||
cmd,
|
||||
),
|
||||
RunOpts(log=Log.BOTH, prefix=machine.name, needs_user_terminal=True),
|
||||
)
|
||||
```
|
||||
|
||||
You can replace `"nixpkgs#nixos-anywhere"` with your local path:
|
||||
|
||||
```python
|
||||
run(
|
||||
nix_shell(
|
||||
["<path_to_local_src>#nixos-anywhere"],
|
||||
cmd,
|
||||
),
|
||||
RunOpts(log=Log.BOTH, prefix=machine.name, needs_user_terminal=True),
|
||||
)
|
||||
|
||||
```
|
||||
|
||||
The <path_to_local_src> doesn't need to be a local path, it can be any valid [flakeref](https://nix.dev/manual/nix/2.26/command-ref/new-cli/nix3-flake.html#flake-references).
|
||||
And thus can point to test already opened PRs for example.
|
||||
|
||||
# Standards
|
||||
|
||||
|
||||
@@ -43,27 +43,31 @@ exclude_docs: |
|
||||
nav:
|
||||
- Home: index.md
|
||||
- Getting Started:
|
||||
- Getting Started: getting-started/index.md
|
||||
- Installer: getting-started/installer.md
|
||||
- Configure: getting-started/configure.md
|
||||
- Setup Clan: getting-started/index.md
|
||||
- Create Installer: getting-started/installer.md
|
||||
- Add Machines: getting-started/configure.md
|
||||
- Secrets & Facts: getting-started/secrets.md
|
||||
- Deploy Machine: getting-started/deploy.md
|
||||
- Continuous Integration: getting-started/check.md
|
||||
- Guides:
|
||||
- Overview: manual/index.md
|
||||
- Disk Encryption: getting-started/disk-encryption.md
|
||||
- Mesh VPN: getting-started/mesh-vpn.md
|
||||
- Backup & Restore: getting-started/backups.md
|
||||
- Adding Machines: manual/adding-machines.md
|
||||
- Vars Backend: manual/vars-backend.md
|
||||
- Facts Backend: manual/secrets.md
|
||||
- Autoincludes: manual/adding-machines.md
|
||||
- Inventory: manual/inventory.md
|
||||
- Secrets: manual/secrets.md
|
||||
- Secure Boot: manual/secure-boot.md
|
||||
- Flake-parts: manual/flake-parts.md
|
||||
- Authoring:
|
||||
- Modules: clanmodules/index.md
|
||||
- Disk Templates: manual/disk-templates.md
|
||||
- Contribute: manual/contribute.md
|
||||
- Debugging: manual/debugging.md
|
||||
- Contributing:
|
||||
- Contribute: contributing/contribute.md
|
||||
- Debugging: contributing/debugging.md
|
||||
- Testing: contributing/testing.md
|
||||
- Repo Layout: manual/repo-layout.md
|
||||
- Migrate existing Flakes: manual/migration-guide.md
|
||||
# - Concepts:
|
||||
# - Overview: concepts/index.md
|
||||
- Reference:
|
||||
@@ -91,6 +95,7 @@ nav:
|
||||
- reference/clanModules/matrix-synapse.md
|
||||
- reference/clanModules/moonlight.md
|
||||
- reference/clanModules/mumble.md
|
||||
- reference/clanModules/mycelium.md
|
||||
- reference/clanModules/nginx.md
|
||||
- reference/clanModules/packages.md
|
||||
- reference/clanModules/postgresql.md
|
||||
@@ -105,6 +110,7 @@ nav:
|
||||
- reference/clanModules/thelounge.md
|
||||
- reference/clanModules/trusted-nix-caches.md
|
||||
- reference/clanModules/user-password.md
|
||||
- reference/clanModules/auto-upgrade.md
|
||||
- reference/clanModules/vaultwarden.md
|
||||
- reference/clanModules/xfce.md
|
||||
- reference/clanModules/zerotier-static-peers.md
|
||||
@@ -119,6 +125,7 @@ nav:
|
||||
- reference/cli/flash.md
|
||||
- reference/cli/history.md
|
||||
- reference/cli/machines.md
|
||||
- reference/cli/select.md
|
||||
- reference/cli/secrets.md
|
||||
- reference/cli/show.md
|
||||
- reference/cli/ssh.md
|
||||
|
||||
@@ -69,7 +69,13 @@
|
||||
];
|
||||
}
|
||||
''
|
||||
export CLAN_CORE_PATH=${self}
|
||||
export CLAN_CORE_PATH=${
|
||||
self.filter {
|
||||
include = [
|
||||
"clanModules"
|
||||
];
|
||||
}
|
||||
}
|
||||
export CLAN_CORE_DOCS=${jsonDocs.clanCore}/share/doc/nixos/options.json
|
||||
# A file that contains the links to all clanModule docs
|
||||
export CLAN_MODULES_VIA_ROLES=${clanModulesViaRoles}
|
||||
|
||||
@@ -103,7 +103,7 @@ def render_option(
|
||||
read_only = option.get("readOnly")
|
||||
|
||||
res = f"""
|
||||
{"#" * level} {sanitize(name) if short_head is None else sanitize(short_head)} {"{: #"+sanitize_anchor(name)+"}" if level > 1 else ""}
|
||||
{"#" * level} {sanitize(name) if short_head is None else sanitize(short_head)} {"{: #" + sanitize_anchor(name) + "}" if level > 1 else ""}
|
||||
|
||||
"""
|
||||
|
||||
@@ -125,7 +125,7 @@ def render_option(
|
||||
**Default**:
|
||||
|
||||
```nix
|
||||
{option.get("default",{}).get("text") if option.get("default") else "No default set."}
|
||||
{option.get("default", {}).get("text") if option.get("default") else "No default set."}
|
||||
```
|
||||
"""
|
||||
example = option.get("example", {}).get("text")
|
||||
@@ -585,7 +585,7 @@ Each attribute is documented below
|
||||
|
||||
```nix
|
||||
buildClan {
|
||||
directory = self;
|
||||
self = self;
|
||||
machines = {
|
||||
jon = { };
|
||||
sara = { };
|
||||
|
||||
@@ -48,12 +48,12 @@ clanModules/borgbackup
|
||||
=== "User module"
|
||||
|
||||
If the module should be ad-hoc loaded.
|
||||
It can be made avilable in any project via the [`clan.inventory.modules`](../reference/nix-api/inventory.md#inventory.modules) attribute.
|
||||
It can be made available in any project via the [`clan.inventory.modules`](../reference/nix-api/inventory.md#inventory.modules) attribute.
|
||||
|
||||
```nix title="flake.nix"
|
||||
# ...
|
||||
buildClan {
|
||||
# 1. Add the module to the avilable inventory modules
|
||||
# 1. Add the module to the available clanModules with inventory support
|
||||
inventory.modules = {
|
||||
custom-module = ./modules/my_module;
|
||||
};
|
||||
@@ -111,7 +111,7 @@ Adds the roles: `client` and `server`
|
||||
Sometimes a `ClanModule` should be usable via both clan's `inventory` concept but also natively as a NixOS module.
|
||||
|
||||
> In the long term, we want most modules to implement support for the inventory,
|
||||
> but we are also aware that there are certain low-level modules that always serve as a backend for other higher-level inventory modules.
|
||||
> but we are also aware that there are certain low-level modules that always serve as a backend for other higher-level `clanModules` with inventory support.
|
||||
> These modules may not want to implement inventory interfaces as they are always used directly by other modules.
|
||||
|
||||
This can be achieved by placing an additional `default.nix` into the root of the ClanModules directory as shown:
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
# Debugging
|
||||
|
||||
Here are some methods for debugging and testing the clan-cli
|
||||
|
||||
@@ -52,6 +51,20 @@ wintux
|
||||
|
||||
If you're using VSCode, it has a handy feature that makes paths to source code files clickable in the integrated terminal. Combined with the previously mentioned techniques, this allows you to open a Clan in VSCode, execute a command like `clan machines list --debug`, and receive a printed path to the code that initiates the subprocess. With the `Ctrl` key (or `Cmd` on macOS) and a mouse click, you can jump directly to the corresponding line in the code file and add a `breakpoint()` function to it, to inspect the internal state.
|
||||
|
||||
|
||||
|
||||
## Finding Print Messages
|
||||
|
||||
To identify where a specific print message comes from, you can enable a helpful feature. Simply set the environment variable `export TRACE_PRINT=1`. When you run commands with `--debug` mode, each print message will include information about its source location.
|
||||
|
||||
If you need more details, you can expand the stack trace information that appears with each print by setting the environment variable `export TRACE_DEPTH=3`.
|
||||
|
||||
## Analyzing Performance
|
||||
|
||||
To understand what's causing slow performance, set the environment variable `export CLAN_CLI_PERF=1`. When you complete a clan command, you'll see a summary of various performance metrics, helping you identify what's taking up time.
|
||||
|
||||
|
||||
|
||||
## See all possible packages and tests
|
||||
|
||||
To quickly show all possible packages and tests execute:
|
||||
@@ -152,6 +165,3 @@ If you need to inspect the Nix sandbox while running tests, follow these steps:
|
||||
|
||||
Or you can also use the [nix breakpoint hook](https://nixos.org/manual/nixpkgs/stable/#breakpointhook)
|
||||
|
||||
### What's next?
|
||||
|
||||
Please look into the [repo layout](./repo-layout.md) guide next!
|
||||
316
docs/site/contributing/testing.md
Normal file
316
docs/site/contributing/testing.md
Normal file
@@ -0,0 +1,316 @@
|
||||
# Testing your contributions
|
||||
|
||||
Each feature added to clan should be tested extensively via automated tests.
|
||||
|
||||
This document covers different methods of automated testing, including creating, running and debugging such tests.
|
||||
|
||||
In order to test the behavior of clan, different testing frameworks are used depending on the concern:
|
||||
|
||||
- NixOS VM tests: for high level integration
|
||||
- NixOS container tests: for high level integration
|
||||
- Python tests via pytest: for unit tests and integration tests
|
||||
- Nix eval tests: for nix functions, libraries, modules, etc.
|
||||
|
||||
## NixOS VM Tests
|
||||
|
||||
The [NixOS VM Testing Framework](https://nixos.org/manual/nixos/stable/index.html#sec-nixos-tests) is used to create high level integration tests, by running one or more VMs generated from a specified config. Commands can be executed on the booted machine(s) to verify a deployment of a service works as expected. All machines within a test are connected by a virtual network. Internet access is not available.
|
||||
|
||||
### When to use VM tests
|
||||
|
||||
- testing that a service defined through a clan module works as expected after deployment
|
||||
- testing clan-cli subcommands which require accessing a remote machine
|
||||
|
||||
### When not to use VM tests
|
||||
|
||||
NixOS VM Tests are slow and expensive. They should only be used for testing high level integration of components.
|
||||
VM tests should be avoided wherever it is possible to implement a cheaper unit test instead.
|
||||
|
||||
- testing detailed behavior of a certain clan-cli command -> use unit testing via pytest instead
|
||||
- regression testing -> add a unit test
|
||||
|
||||
### Finding examples for VM tests
|
||||
|
||||
Existing nixos vm tests in clan-core can be found by using ripgrep:
|
||||
```shellSession
|
||||
rg "import.*/lib/test-base.nix"
|
||||
```
|
||||
|
||||
### Locating definitions of failing VM tests
|
||||
|
||||
All nixos vm tests in clan are exported as individual flake outputs under `checks.x86_64-linux.{test-attr-name}`.
|
||||
If a test fails in CI:
|
||||
|
||||
- look for the job name of the test near the top if the CI Job page, like, for example `gitea:clan/clan-core#checks.x86_64-linux.borgbackup/1242`
|
||||
- in this case `checks.x86_64-linux.borgbackup` is the attribute path
|
||||
- note the last element of that attribute path, in this case `borgbackup`
|
||||
- search for the attribute name inside the `/checks` directory via ripgrep
|
||||
|
||||
example: locating the vm test named `borgbackup`:
|
||||
|
||||
```shellSession
|
||||
$ rg "borgbackup =" ./checks
|
||||
./checks/flake-module.nix
|
||||
41: borgbackup = import ./borgbackup nixosTestArgs;
|
||||
```
|
||||
|
||||
-> the location of that test is `/checks/flake-module.nix` line `41`.
|
||||
|
||||
### Adding vm tests
|
||||
|
||||
Create a nixos test module under `/checks/{name}/default.nix` and import it in `/checks/flake-module.nix`.
|
||||
|
||||
|
||||
### Running VM tests
|
||||
|
||||
```shellSession
|
||||
nix build .#checks.x86_64-linux.{test-attr-name}
|
||||
```
|
||||
(replace `{test-attr-name}` with the name of the test)
|
||||
|
||||
### Debugging VM tests
|
||||
|
||||
The following techniques can be used to debug a VM test:
|
||||
|
||||
#### Print Statements
|
||||
|
||||
Locate the definition (see above) and add print statements, like, for example `print(client.succeed("systemctl --failed"))`, then re-run the test via `nix build` (see above)
|
||||
|
||||
#### Interactive Shell
|
||||
|
||||
- Execute the vm test outside the nix Sandbox via the following command:
|
||||
`nix run .#checks.x86_64-linux.{test-attr-name}.driver -- --interactive`
|
||||
- Then run the commands in the machines manually, like for example:
|
||||
```python3
|
||||
start_all()
|
||||
machine1.succeed("echo hello")
|
||||
```
|
||||
|
||||
#### Breakpoints
|
||||
|
||||
To get an interactive shell at a specific line in the VM test script, add a `breakpoint()` call before the line to debug, then run the test outside of the sandbox via:
|
||||
`nix run .#checks.x86_64-linux.{test-attr-name}.driver`
|
||||
|
||||
|
||||
## NixOS Container Tests
|
||||
|
||||
Those are very similar to NixOS VM tests, as in they run virtualized nixos machines, but instead of using VMs, they use containers which are much cheaper to launch.
|
||||
As of now the container test driver is a downstream development in clan-core.
|
||||
Basically everything stated under the NixOS VM tests sections applies here, except some limitations.
|
||||
|
||||
Limitations:
|
||||
|
||||
- does not yet support networking
|
||||
- supports only one machine as of now
|
||||
|
||||
|
||||
### Where to find examples for NixOS container tests
|
||||
|
||||
Existing nixos container tests in clan-core can be found by using ripgrep:
|
||||
```shellSession
|
||||
rg "import.*/lib/container-test.nix"
|
||||
```
|
||||
|
||||
|
||||
## Python tests via pytest
|
||||
|
||||
Since the clan cli is written in python, the `pytest` framework is used to define unit tests and integration tests via python
|
||||
|
||||
Due to superior efficiency,
|
||||
|
||||
### When to use python tests
|
||||
|
||||
- writing unit tests for python functions and modules, or bugfixes of such
|
||||
- all integrations tests that do not require building or running a nixos machine
|
||||
- impure integrations tests that require internet access (very rare, try to avoid)
|
||||
|
||||
|
||||
### When not to use python tests
|
||||
|
||||
- integrations tests that require building or running a nixos machine (use NixOS VM or container tests instead)
|
||||
- testing behavior of a nix function or library (use nix eval tests instead)
|
||||
|
||||
### Finding examples of python tests
|
||||
|
||||
Existing python tests in clan-core can be found by using ripgrep:
|
||||
```shellSession
|
||||
rg "import pytest"
|
||||
```
|
||||
|
||||
### Locating definitions of failing python tests
|
||||
|
||||
If any python test fails in the CI pipeline, an error message like this can be found at the end of the log:
|
||||
```
|
||||
...
|
||||
FAILED tests/test_machines_cli.py::test_machine_delete - clan_cli.errors.ClanError: Template 'new-machine' not in 'inputs.clan-core
|
||||
...
|
||||
```
|
||||
|
||||
In this case the test is defined in the file `/tests/test_machines_cli.py` via the test function `test_machine_delete`.
|
||||
|
||||
### Adding python tests
|
||||
|
||||
If a specific python module is tested, the test should be located near the tested module in a subdirectory called `./tests`
|
||||
If the test is not clearly related to a specific module, put it in the top-level `./tests` directory of the tested python package. For `clan-cli` this would be `/pkgs/clan-cli/clan_cli/tests`.
|
||||
All filenames must be prefixed with `test_` and test functions prefixed with `test_` for pytest to discover them.
|
||||
|
||||
### Running python tests
|
||||
|
||||
#### Running all python tests
|
||||
|
||||
To run all python tests which are executed in the CI pipeline locally, use this `nix build` command
|
||||
|
||||
```shellSession
|
||||
nix build .#checks.x86_64-linux.clan-pytest-{with,without}-core
|
||||
```
|
||||
|
||||
#### Running a specific python test
|
||||
|
||||
To run a specific python test outside the nix sandbox
|
||||
|
||||
1. Enter the development environment of the python package, by either:
|
||||
- Having direnv enabled and entering the directory of the package (eg. `/pkgs/clan-cli`)
|
||||
- Or using the command `select-shell {package}` in the top-level dev shell of clan-core, (eg. `switch-shell clan-cli`)
|
||||
2. Execute the test via pytest using issuing
|
||||
`pytest ./path/to/test_file.py:test_function_name -s -n0`
|
||||
|
||||
The flags `-sn0` are useful to forwards all stdout/stderr output to the terminal and be able to debug interactively via `breakpoint()`.
|
||||
|
||||
|
||||
### Debugging python tests
|
||||
|
||||
To debug a specific python test, find its definition (see above) and make sure to enter the correct dev environment for that python package.
|
||||
|
||||
Modify the test and add `breakpoint()` statements to it.
|
||||
|
||||
Execute the test using the flags `-sn0` in order to get an interactive shell at the breakpoint:
|
||||
|
||||
```shelSession
|
||||
pytest ./path/to/test_file.py:test_function_name -sn0
|
||||
```
|
||||
|
||||
## Nix Eval Tests
|
||||
|
||||
### When to use nix eval tests
|
||||
|
||||
Nix eval tests are good for testing any nix logic, including
|
||||
|
||||
- nix functions
|
||||
- nix libraries
|
||||
- modules for the nixos module system
|
||||
|
||||
When not to use
|
||||
|
||||
- tests that require building nix derivations (except some very cheap ones)
|
||||
- tests that require running programs written in other languages
|
||||
- tests that require building or running nixos machines
|
||||
|
||||
### Finding examples of nix eval tests
|
||||
|
||||
Existing nix eval tests can be found via this ripgrep command:
|
||||
|
||||
```shellSession
|
||||
rg "nix-unit --eval-store"
|
||||
```
|
||||
|
||||
### Locating definitions of failing nix eval tests
|
||||
|
||||
Failing nix eval tests look like this:
|
||||
|
||||
```shellSession
|
||||
> ✅ test_attrsOf_attrsOf_submodule
|
||||
> ✅ test_attrsOf_submodule
|
||||
> ❌ test_default
|
||||
> /build/nix-8-2/expected.nix --- Nix
|
||||
> 1 { foo = { bar = { __prio = 1500; }; } 1 { foo = { bar = { __prio = 1501; }; }
|
||||
> . ; } . ; }
|
||||
>
|
||||
>
|
||||
> ✅ test_no_default
|
||||
> ✅ test_submodule
|
||||
> ✅ test_submoduleWith
|
||||
> ✅ test_submodule_with_merging
|
||||
>
|
||||
> 😢 6/7 successful
|
||||
> error: Tests failed
|
||||
```
|
||||
|
||||
To locate the definition, find the flake attribute name of the failing test near the top of the CI Job page, like for example `gitea:clan/clan-core#checks.x86_64-linux.lib-values-eval/1242`.
|
||||
|
||||
In this case `lib-values-eval` is the attribute we are looking for.
|
||||
|
||||
Find the attribute via ripgrep:
|
||||
|
||||
```shellSession
|
||||
$ rg "lib-values-eval ="
|
||||
lib/values/flake-module.nix
|
||||
21: lib-values-eval = pkgs.runCommand "tests" { nativeBuildInputs = [ pkgs.nix-unit ]; } ''
|
||||
grmpf@grmpf-nix ~/p/c/clan-core (test-docs)>
|
||||
```
|
||||
|
||||
In this case the test is defined in the file `lib/values/flake-module.nix` line 21
|
||||
|
||||
### Adding nix eval tests
|
||||
|
||||
In clan core, the following pattern is usually followed:
|
||||
|
||||
- tests are put in a `test.nix` file
|
||||
- a CI Job is exposed via a `flake-module.nix`
|
||||
- that `flake-module.nix` is imported via the `flake.nix` at the root of the project
|
||||
|
||||
For example see `/lib/values/{test.nix,flake-module.nix}`.
|
||||
|
||||
### Running nix eval tests
|
||||
|
||||
Since all nix eval tests are exposed via the flake outputs, they can be ran via `nix build`:
|
||||
|
||||
```shellSession
|
||||
nix build .#checks.x86_64-linux.{test-attr-name}
|
||||
```
|
||||
|
||||
For quicker iteration times, instead of `nix build` use the `nix-unit` command available in the dev environment.
|
||||
Example:
|
||||
|
||||
```shellSession
|
||||
nix-unit --flake .#legacyPackages.x86_64-linux.{test-attr-name}
|
||||
```
|
||||
|
||||
### Debugging nix eval tests
|
||||
|
||||
Follow the instructions above to find the definition of the test, then use one of the following techniques:
|
||||
|
||||
#### Print debugging
|
||||
|
||||
Add `lib.trace` or `lib.traceVal` statements in order to print some variables during evaluation
|
||||
|
||||
#### Nix repl
|
||||
|
||||
Use `nix repl` to evaluate to inspec the test.
|
||||
|
||||
Each test consists opf an `expr` (expression) and an `expected` field. `nix-unit` simply checks if `expr == expected` and prints the diff if that's not the case.
|
||||
|
||||
`nix repl` can be used to inspect `expr` manually, or any other variables that you choose to expose.
|
||||
|
||||
Example:
|
||||
|
||||
```shellSession
|
||||
$ nix repl
|
||||
Nix 2.25.5
|
||||
Type :? for help.
|
||||
nix-repl> tests = import ./lib/values/test.nix {}
|
||||
|
||||
nix-repl> tests
|
||||
{
|
||||
test_attrsOf_attrsOf_submodule = { ... };
|
||||
test_attrsOf_submodule = { ... };
|
||||
test_default = { ... };
|
||||
test_no_default = { ... };
|
||||
test_submodule = { ... };
|
||||
test_submoduleWith = { ... };
|
||||
test_submodule_with_merging = { ... };
|
||||
}
|
||||
|
||||
nix-repl> tests.test_default.expr
|
||||
{
|
||||
foo = { ... };
|
||||
}
|
||||
```
|
||||
@@ -1,6 +1,4 @@
|
||||
# Backups
|
||||
|
||||
## Introduction to Backups
|
||||
# Introduction to Backups
|
||||
|
||||
When you're managing your own services, creating regular backups is crucial to ensure your data's safety.
|
||||
This guide introduces you to Clan's built-in backup functionalities.
|
||||
@@ -9,8 +7,6 @@ We might add more options in the future, but for now, let's dive into how you ca
|
||||
|
||||
## Backing Up Locally with Localbackup
|
||||
|
||||
### What is Localbackup?
|
||||
|
||||
Localbackup lets you backup your data onto physical storage devices connected to your computer,
|
||||
such as USB hard drives or network-attached storage. It uses a tool called rsnapshot for this purpose.
|
||||
|
||||
@@ -147,3 +143,25 @@ Ensure the path to the public key is correct.
|
||||
```bash
|
||||
clan backups create mymachine
|
||||
```
|
||||
|
||||
- **Restoring Backups:** To restore a backup that has been listed by the list command (NAME):
|
||||
|
||||
```bash
|
||||
clan backups restore [MACHINE] [PROVIDER] [NAME]
|
||||
|
||||
```
|
||||
|
||||
Example (Restoring a machine called `client` with the backup provider `borgbackup`):
|
||||
|
||||
```bash
|
||||
clan backups restore client borgbackup [NAME]
|
||||
|
||||
```
|
||||
|
||||
The `backups` command is service aware and allows optional specification of the `--service` flag.
|
||||
|
||||
To only restore the service called `zerotier` on a machine called `controller` through the backup provider `borgbackup` use the following command:
|
||||
|
||||
```bash
|
||||
clan backups restore client borgbackup [NAME] --service zerotier
|
||||
```
|
||||
|
||||
28
docs/site/getting-started/check.md
Normal file
28
docs/site/getting-started/check.md
Normal file
@@ -0,0 +1,28 @@
|
||||
### Generate Facts and Vars
|
||||
|
||||
Typically, this step is handled automatically when a machine is deployed. However, to enable the use of `nix flake check` with your configuration, it must be completed manually beforehand.
|
||||
|
||||
Currently, generating all the necessary facts requires two separate commands. This is due to the coexistence of two parallel secret management solutions:
|
||||
the newer, recommended version (`clan vars`) and the older version (`clan facts`) that we are slowly phasing out.
|
||||
|
||||
To generate both facts and vars, execute the following commands:
|
||||
|
||||
```sh
|
||||
clan facts generate && clan vars generate
|
||||
```
|
||||
|
||||
|
||||
### Check Configuration
|
||||
|
||||
Validate your configuration by running:
|
||||
|
||||
```bash
|
||||
nix flake check
|
||||
```
|
||||
|
||||
This command helps ensure that your system configuration is correct and free from errors.
|
||||
|
||||
!!! Tip
|
||||
|
||||
You can integrate this step into your [Continuous Integration](https://en.wikipedia.org/wiki/Continuous_integration) workflow to ensure that only valid Nix configurations are merged into your codebase.
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
# Configuration - How to configure clan with your own machines
|
||||
|
||||
Managing machine configurations can be done in the following ways:
|
||||
|
||||
- writing `nix` expressions in a `flake.nix` file,
|
||||
- placing `autoincluded` files into your machine directory,
|
||||
- configuring everything in a simple UI (upcoming).
|
||||
|
||||
Clan currently offers the following methods to configure machines:
|
||||
|
||||
@@ -80,9 +78,14 @@ Adding or configuring a new machine requires two simple steps:
|
||||
└─nvme0n1p3 nvme-eui.e8238fa6bf530001001b448b4aec2929-part3 swap 16.8G
|
||||
```
|
||||
|
||||
1. Edit the following fields inside the `./machines/jon/configuration.nix` and/or `./machines/sara/configuration.nix`
|
||||
!!! Warning
|
||||
Make sure to copy the `ID-LINK` from toplevel disk device like `nvme0n1` or `sda` instead of `nvme0n1p1` or `sda1`
|
||||
|
||||
```nix title="./machines/<machine>/configuration.nix" hl_lines="13 18 23 27"
|
||||
|
||||
2. Edit the following fields inside the `./machines/jon/configuration.nix` and/or `./machines/sara/configuration.nix`
|
||||
|
||||
<!-- Note: Use "jon" instead of "<machine>" as "<" is not supported in title tag -->
|
||||
```nix title="./machines/jon/configuration.nix" hl_lines="13 18 22 26"
|
||||
{
|
||||
imports = [
|
||||
./hardware-configuration.nix
|
||||
@@ -95,16 +98,15 @@ Adding or configuring a new machine requires two simple steps:
|
||||
];
|
||||
|
||||
# Put your username here for login
|
||||
users.users.user.username = "__YOUR_USERNAME__";
|
||||
users.users.user.name = "__YOUR_USERNAME__";
|
||||
|
||||
# Set this for clan commands use ssh i.e. `clan machines update`
|
||||
# Set this for clan commands that use ssh
|
||||
# If you change the hostname, you need to update this line to root@<new-hostname>
|
||||
# This only works however if you have avahi running on your admin machine else use IP
|
||||
clan.core.networking.targetHost = "root@__IP__";
|
||||
|
||||
# You can get your disk id by running the following command on the installer:
|
||||
# Replace <IP> with the IP of the installer printed on the screen or by running the `ip addr` command.
|
||||
# ssh root@<IP> lsblk --output NAME,ID-LINK,FSTYPE,SIZE,MOUNTPOINT
|
||||
|
||||
# Replace this __CHANGE_ME__ with the result of the lsblk command from step 1.
|
||||
disko.devices.disk.main.device = "/dev/disk/by-id/__CHANGE_ME__";
|
||||
|
||||
# IMPORTANT! Add your SSH key here
|
||||
@@ -115,79 +117,32 @@ Adding or configuring a new machine requires two simple steps:
|
||||
}
|
||||
```
|
||||
|
||||
You can also create additional machines using the `clan machines create` command:
|
||||
|
||||
```
|
||||
$ clan machines create --help
|
||||
usage: clan [-h] [SUBCOMMAND] machines create [-h] [--tags TAGS [TAGS ...]] [--template-name TEMPLATE_NAME]
|
||||
[--target-host TARGET_HOST] [--debug] [--option name value] [--flake PATH]
|
||||
machine_name
|
||||
|
||||
positional arguments:
|
||||
machine_name The name of the machine to create
|
||||
|
||||
options:
|
||||
-h, --help show this help message and exit
|
||||
--tags TAGS [TAGS ...]
|
||||
Tags to associate with the machine. Can be used to assign multiple machines to services.
|
||||
--template-name TEMPLATE_NAME
|
||||
The name of the template machine to import
|
||||
--target-host TARGET_HOST
|
||||
Address of the machine to install and update, in the format of user@host:1234
|
||||
--debug Enable debug logging
|
||||
--option name value Nix option to set
|
||||
--flake PATH path to the flake where the clan resides in, can be a remote flake or local, can be set through
|
||||
the [CLAN_DIR] environment variable
|
||||
```
|
||||
|
||||
|
||||
!!! Info "Replace `__YOUR_USERNAME__` with the ip of your machine, if you use avahi you can also use your hostname"
|
||||
!!! Info "Replace `__IP__` with the ip of your machine, if you use avahi you can also use your hostname"
|
||||
!!! Info "Replace `__CHANGE_ME__` with the appropriate identifier, such as `nvme-eui.e8238fa6bf530001001b448b4aec2929`"
|
||||
!!! Info "Replace `__CHANGE_ME__` with the appropriate `ID-LINK` identifier, such as `nvme-eui.e8238fa6bf530001001b448b4aec2929`"
|
||||
!!! Info "Replace `__YOUR_SSH_KEY__` with your personal key, like `ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILoMI0NC5eT9pHlQExrvR5ASV3iW9+BXwhfchq0smXUJ jon@jon-desktop`"
|
||||
|
||||
These steps will allow you to update your machine later.
|
||||
|
||||
### Step 2: Detect Drivers
|
||||
You can also create additional machines using the cli:
|
||||
|
||||
Generate the `hardware-configuration.nix` file for your machine by executing the following command:
|
||||
```
|
||||
$ clan machines create <machinename>
|
||||
```
|
||||
|
||||
```bash
|
||||
clan machines update-hardware-config [MACHINE_NAME] [HOSTNAME]
|
||||
```
|
||||
|
||||
replace `[MACHINE_NAME]` with the name of the machine i.e. `jon` and `[HOSTNAME]` with the `ip_address` or `hostname` of the machine within the network. i.e. `<IP>`
|
||||
|
||||
!!! Example
|
||||
```bash
|
||||
clan machines update-hardware-config jon
|
||||
```
|
||||
|
||||
This command connects to the ip configured in the previous step, runs `nixos-generate-config` to detect hardware configurations (excluding filesystems), and writes them to `machines/jon/hardware-configuration.nix`.
|
||||
|
||||
### Step 3: Custom Disk Formatting
|
||||
### Step 2: Custom Disk Formatting
|
||||
|
||||
In `./modules/disko.nix`, a simple `ext4` disk partitioning scheme is defined for the Disko module. For more complex disk partitioning setups,
|
||||
refer to the [Disko templates](https://github.com/nix-community/disko-templates) or [Disko examples](https://github.com/nix-community/disko/tree/master/example).
|
||||
|
||||
### Step 4: Custom Configuration
|
||||
### (Optional): Renaming Machine
|
||||
|
||||
Modify `./machines/jon/configuration.nix` to personalize the system settings according to your requirements.
|
||||
If you wish to name your machine to something else, do the following steps:
|
||||
For renaming jon to your own machine name, you can use the following command:
|
||||
|
||||
```
|
||||
mv ./machines/jon/configuration.nix ./machines/newname/configuration.nix
|
||||
git mv ./machines/jon ./machines/newname
|
||||
```
|
||||
|
||||
Than rename `jon` to your preferred name in `machines` in `flake.nix` as well as the import line:
|
||||
|
||||
```diff
|
||||
- imports = [ ./machines/jon/configuration.nix ];
|
||||
+ imports = [ ./machines/__NEW_NAME__/configuration.nix ];
|
||||
```
|
||||
|
||||
!!! Info "Replace `__NEW_NAME__` with the name of the machine"
|
||||
|
||||
Note that our clan lives inside a git repository.
|
||||
Only files that have been added with `git add` are recognized by `nix`.
|
||||
So for every file that you add or rename you also need to run:
|
||||
@@ -196,22 +151,11 @@ So for every file that you add or rename you also need to run:
|
||||
git add ./path/to/my/file
|
||||
```
|
||||
|
||||
For renaming jon to your own machine name, you can use the following command:
|
||||
|
||||
```
|
||||
git mv ./machines/jon ./machines/newname
|
||||
```
|
||||
### (Optional): Removing a Machine
|
||||
|
||||
If you only want to setup a single machine at this point, you can delete `sara` from `flake.nix` as well as from the machines directory:
|
||||
|
||||
```
|
||||
git rm ./machines/sara
|
||||
git rm -rf ./machines/sara
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## What's next?
|
||||
|
||||
- [Secrets & Facts](secrets.md): Setting up secrets with sops-nix
|
||||
|
||||
---
|
||||
|
||||
@@ -1,14 +1,7 @@
|
||||
# Deploy Machine
|
||||
# Deploy your Clan
|
||||
|
||||
Integrating a new machine into your Clan environment is an easy yet flexible process, allowing for a straight forward management of multiple NixOS configurations.
|
||||
Now that you have created a new machine, we will walk through how to install it.
|
||||
|
||||
We'll walk you through adding a new computer to your Clan.
|
||||
|
||||
## Installing a New Machine
|
||||
|
||||
Clan CLI, in conjunction with [nixos-anywhere](https://github.com/nix-community/nixos-anywhere), provides a seamless method for installing NixOS on various machines.
|
||||
|
||||
This process involves preparing a suitable hardware and disk partitioning configuration and ensuring the target machine is accessible via SSH.
|
||||
|
||||
### Step 0. Prerequisites
|
||||
|
||||
@@ -25,7 +18,7 @@ This process involves preparing a suitable hardware and disk partitioning config
|
||||
|
||||
2. Boot the target machine and connect it to a network that makes it reachable from your setup computer.
|
||||
|
||||
=== "**Remote Machines**"
|
||||
=== "**Cloud VMs**"
|
||||
|
||||
- [x] **Two Computers**: You need one computer that you're getting ready (we'll call this the Target Computer) and another one to set it up from (we'll call this the Setup Computer). Make sure both can talk to each other over the network using SSH.
|
||||
- [x] **Machine configuration**: See our basic [configuration guide](./configure.md)
|
||||
@@ -108,32 +101,27 @@ This process involves preparing a suitable hardware and disk partitioning config
|
||||
For easy sharing of deployment information via QR code, we highly recommend using [KDE Connect](https://apps.kde.org/de/kdeconnect/).
|
||||
|
||||
There are two ways to deploy your machine:
|
||||
=== "**Password Auth**"
|
||||
Run the following command to login over SSH with password authentication
|
||||
```bash
|
||||
clan machines install [MACHINE] --target-host <IP> --update-hardware-config nixos-facter
|
||||
```
|
||||
=== "**QR Code Auth**"
|
||||
Using the JSON contents of the QR Code:
|
||||
```terminal
|
||||
clan machines install [MACHINE] --json "[JSON]" --update-hardware-config nixos-facter
|
||||
```
|
||||
OR using a picture containing the QR code
|
||||
```terminal
|
||||
clan machines install [MACHINE] --png [PATH] --update-hardware-config nixos-facter
|
||||
```
|
||||
|
||||
1. **SSH with Password Authentication**
|
||||
Run the following command to install using SSH:
|
||||
```bash
|
||||
clan machines install [MACHINE] --target-host <IP>
|
||||
```
|
||||
|
||||
2. **Scanning a QR Code for Installation Details**
|
||||
You can input the information by following one of these methods:
|
||||
- **Using a JSON String or File Path:**
|
||||
Provide the path to a JSON string or input the string directly:
|
||||
```terminal
|
||||
clan machines install [MACHINE] --json [JSON]
|
||||
```
|
||||
- **Using an Image Containing the QR Code:**
|
||||
Provide the path to an image file containing the relevant QR code:
|
||||
```terminal
|
||||
clan machines install [MACHINE] --png [PATH]
|
||||
```
|
||||
|
||||
=== "**SSH access**"
|
||||
=== "**Cloud VM**"
|
||||
|
||||
Replace `<target_host>` with the **target computers' ip address**:
|
||||
|
||||
```bash
|
||||
clan machines install [MACHINE] --target-host <target_host>
|
||||
clan machines install [MACHINE] --target-host <target_host> --update-hardware-config nixos-facter
|
||||
```
|
||||
|
||||
|
||||
@@ -215,12 +203,4 @@ buildClan {
|
||||
|
||||
This is useful for machines that are not always online or are not part of the regular update cycle.
|
||||
|
||||
---
|
||||
|
||||
## What's next ?
|
||||
|
||||
- [**Disk Encryption**](./disk-encryption.md): Configure disk encryption with remote decryption
|
||||
- [**Mesh VPN**](./mesh-vpn.md): Configuring a secure mesh network.
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -49,7 +49,8 @@ Replace `kernelModules` with the ethernet module loaded one on your target machi
|
||||
port = 7172;
|
||||
authorizedKeys = [ "<yourkey>" ];
|
||||
hostKeys = [
|
||||
"/var/lib/initrd-ssh-key"
|
||||
"/var/lib/initrd_host_ed25519_key"
|
||||
"/var/lib/initrd_host_rsa_key"
|
||||
];
|
||||
};
|
||||
};
|
||||
@@ -73,7 +74,7 @@ Before starting the installation process, ensure that the SSH public key is copi
|
||||
ssh-copy-id -o PreferredAuthentications=password -o PubkeyAuthentication=no root@nixos-installer.local
|
||||
```
|
||||
|
||||
### Step 1.5: Prepare Secret Key and Clear Disk Data
|
||||
### Step 1.5: Prepare Secret Key and Partition Disks
|
||||
|
||||
1. Access the installer using SSH:
|
||||
|
||||
@@ -90,13 +91,13 @@ nano /tmp/secret.key
|
||||
3. Discard the old disk partition data:
|
||||
|
||||
```bash
|
||||
blkdiscard /dev/disk/by-id/nvme-eui.002538b931b59865
|
||||
blkdiscard /dev/disk/by-id/<installdisk>
|
||||
```
|
||||
|
||||
4. Run the `clan` machine installation with the following command:
|
||||
4. Run `clan` machines install, only running kexec and disko, with the following command:
|
||||
|
||||
```bash
|
||||
clan machines install gchq-local --target-host root@nixos-installer --yes --no-reboot
|
||||
clan machines install gchq-local --target-host root@nixos-installer --phases kexec,disko
|
||||
```
|
||||
|
||||
### Step 2: ZFS Pool Import and System Installation
|
||||
@@ -107,14 +108,10 @@ clan machines install gchq-local --target-host root@nixos-installer --yes --no-r
|
||||
ssh root@nixos-installer.local
|
||||
```
|
||||
|
||||
2. Perform the following commands on the remote installation environment:
|
||||
2. Run the following command on the remote installation environment:
|
||||
|
||||
```bash
|
||||
zpool import zroot
|
||||
zfs set keylocation=prompt zroot/root
|
||||
zfs load-key zroot/root
|
||||
zfs set mountpoint=/mnt zroot/root/nixos
|
||||
mount /dev/nvme0n1p2 /mnt/boot
|
||||
```
|
||||
|
||||
3. Disconnect from the SSH session:
|
||||
@@ -123,43 +120,36 @@ mount /dev/nvme0n1p2 /mnt/boot
|
||||
CTRL+D
|
||||
```
|
||||
|
||||
4. Securely copy your local `initrd_rsa_key` to the installer's `/mnt` directory:
|
||||
4. Locally generate ssh host keys. You only need to generate ones for the algorithms you're using in `authorizedKeys`.
|
||||
|
||||
```bash
|
||||
scp ~/.ssh/initrd_rsa_key root@nixos-installer.local:/mnt/var/lib/initrd-ssh-key
|
||||
ssh-keygen -q -N "" -t ed25519 -f ./initrd_host_ed25519_key
|
||||
ssh-keygen -q -N "" -t rsa -b 4096 -f ./initrd_host_rsa_key
|
||||
```
|
||||
|
||||
5. SSH back into the installer:
|
||||
5. Securely copy your local initrd ssh host keys to the installer's `/mnt` directory:
|
||||
|
||||
```bash
|
||||
ssh root@nixos-installer.local
|
||||
scp ./initrd_host* root@nixos-installer.local:/mnt/var/lib/
|
||||
```
|
||||
|
||||
6. Navigate to the `/mnt` directory, enter the `nixos-enter` environment, and then exit:
|
||||
|
||||
6. Install nixos to the mounted partitions
|
||||
```bash
|
||||
cd /mnt
|
||||
nixos-enter
|
||||
realpath /run/current-system
|
||||
exit
|
||||
clan machines install gchq-local --target-host root@nixos-installer --phases install
|
||||
```
|
||||
|
||||
7. Run the `nixos-install` command with the appropriate system path `<SYS_PATH>`:
|
||||
|
||||
```bash
|
||||
nixos-install --no-root-passwd --no-channel-copy --root /mnt --system <SYS_PATH>
|
||||
```
|
||||
|
||||
8. After the installation process, unmount `/mnt/boot`, change the ZFS mountpoint, and reboot the system:
|
||||
7. After the installation process, unmount `/mnt/boot`, change the ZFS mountpoints and unmount all the ZFS volumes by exporting the zpool:
|
||||
|
||||
```bash
|
||||
umount /mnt/boot
|
||||
cd /
|
||||
zfs set mountpoint=/ zroot/root/nixos
|
||||
reboot
|
||||
zfs set -u mountpoint=/ zroot/root/nixos
|
||||
zfs set -u mountpoint=/tmp zroot/root/tmp
|
||||
zfs set -u mountpoint=/home zroot/root/home
|
||||
zpool export zroot
|
||||
```
|
||||
|
||||
9. Perform a hard reboot of the machine and remove the USB stick.
|
||||
8. Perform a reboot of the machine and remove the USB installer.
|
||||
|
||||
### Step 3: Accessing the Initial Ramdisk (initrd) Environment
|
||||
|
||||
|
||||
@@ -42,7 +42,7 @@ By the end of this guide, you'll have a fresh NixOS configuration ready to push
|
||||
Add the Clan CLI into your development workflow:
|
||||
|
||||
```bash
|
||||
nix shell git+https://git.clan.lol/clan/clan-core#clan-cli
|
||||
nix shell git+https://git.clan.lol/clan/clan-core#clan-cli --refresh
|
||||
```
|
||||
|
||||
You can find reference documentation for the `clan` cli program [here](../reference/cli/index.md).
|
||||
@@ -54,6 +54,8 @@ clan --help
|
||||
|
||||
### Step 2: Initialize Your Project
|
||||
|
||||
If you want to migrate an existing project, follow this [guide](https://docs.clan.lol/manual/migration-guide/).
|
||||
|
||||
Set the foundation of your Clan project by initializing it as follows:
|
||||
|
||||
```bash
|
||||
@@ -90,6 +92,21 @@ This should yield the following:
|
||||
5 directories, 9 files
|
||||
```
|
||||
|
||||
??? info "Recommended way of sourcing the `clan` cli tool"
|
||||
The default template also adds the `clan` cli tool to the development shell.
|
||||
Meaning you can get the exact version you need directly from the folder
|
||||
you are in right now.
|
||||
|
||||
In the `my-clan` directory run the following command:
|
||||
```
|
||||
nix develop
|
||||
```
|
||||
That way you will have the tool available in the shell environment.
|
||||
We also recommend setting up [direnv](https://direnv.net/) for your shell, for a more convenient
|
||||
experience.
|
||||
|
||||
|
||||
|
||||
```bash
|
||||
clan machines list
|
||||
```
|
||||
@@ -103,10 +120,3 @@ sara
|
||||
|
||||
You just successfully bootstrapped your first clan directory.
|
||||
|
||||
---
|
||||
|
||||
### What's Next?
|
||||
|
||||
- [**Installer**](./installer.md): Setting up new computers remotely is easy with an USB stick.
|
||||
|
||||
---
|
||||
|
||||
@@ -1,11 +1,16 @@
|
||||
# Installer
|
||||
# Clan Installer Image for Physical Machines
|
||||
|
||||
Our installer image simplifies the process of performing remote installations.
|
||||
To install Clan on physical machines, you need to use our custom installer image. This is necessary for proper installation and operation.
|
||||
|
||||
Follow our step-by-step guide to create and transfer this image onto a bootable USB drive.
|
||||
!!! note "Using a Cloud VM?"
|
||||
If you're using a cloud provider's virtual machine (VM), you can skip this section and go directly to the [Configure Machines](configure.md) step. In this scenario, we automatically use [nixos-anywhere](https://github.com/nix-community/nixos-anywhere) to replace the kernel during runtime.
|
||||
|
||||
??? info "Why nixos-anywhere Doesn't Work on Physical Hardware?"
|
||||
nixos-anywhere relies on [kexec](https://wiki.archlinux.org/title/Kexec) to replace the running kernel with our custom one. This method often has compatibility issues with real hardware, especially systems with dedicated graphics cards like laptops and servers, leading to crashes and black screens.
|
||||
|
||||
??? info "Reasons for a Custom Install Image"
|
||||
Our custom install images are built to include essential tools like [nixos-facter](https://github.com/nix-community/nixos-facter) and support for [ZFS](https://wiki.archlinux.org/title/ZFS). They're also optimized to run on systems with as little as 1 GB of RAM, ensuring efficient performance even on lower-end hardware.
|
||||
|
||||
!!! info
|
||||
If you already have a NixOS machine you can ssh into (in the cloud for example) you can skip this chapter and go directly to [Configure Machines](configure.md).
|
||||
|
||||
### Step 0. Prerequisites
|
||||
|
||||
@@ -40,9 +45,9 @@ Follow our step-by-step guide to create and transfer this image onto a bootable
|
||||
sudo umount /dev/sdb1
|
||||
```
|
||||
=== "**Linux OS**"
|
||||
### Step 2. Flash Custom Installer
|
||||
### Step 2. Create a Custom Installer
|
||||
|
||||
Using clan flash enables the inclusion of ssh public keys and wifi access points.
|
||||
Using clan flash enables the inclusion of ssh public keys into the image.
|
||||
It also allows to set language and keymap in the installer image.
|
||||
|
||||
```bash
|
||||
@@ -61,7 +66,8 @@ sudo umount /dev/sdb1
|
||||
|
||||
The `clan flash` utility will erase the disk. Make sure to specify the correct device
|
||||
|
||||
- **SSH-Pubkey Option**:
|
||||
- **SSH-Pubkey Option**
|
||||
|
||||
To add an ssh public key into the installer image append the option:
|
||||
```
|
||||
--ssh-pubkey <pubkey_path>
|
||||
@@ -69,19 +75,21 @@ sudo umount /dev/sdb1
|
||||
If you do not have an ssh key yet, you can generate one with `ssh-keygen -t ed25519` command.
|
||||
This ssh key will be installed into the root user.
|
||||
|
||||
- **Connect to the installer
|
||||
- **Connect to the installer**
|
||||
|
||||
On boot, the installer will display on-screen the IP address it received from the network.
|
||||
If you need to configure Wi-Fi first, refer to the next section.
|
||||
If Multicast-DNS (Avahi) is enabled on your own machine, you can also access the installer using the `flash-installer.local` address.
|
||||
|
||||
- **List Keymaps**:
|
||||
- **List Keymaps**
|
||||
|
||||
You can get a list of all keymaps with the following command:
|
||||
```
|
||||
clan flash list keymaps
|
||||
```
|
||||
|
||||
- **List Languages**:
|
||||
- **List Languages**
|
||||
|
||||
You can get a list of all languages with the following command:
|
||||
```
|
||||
clan flash list languages
|
||||
@@ -194,10 +202,3 @@ Press ++ctrl+d++ to exit `IWD`.
|
||||
|
||||
You're all set up
|
||||
|
||||
---
|
||||
|
||||
## What's next?
|
||||
|
||||
- [Configure Machines](configure.md): Customize machine configuration
|
||||
|
||||
---
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
# Mesh VPN
|
||||
|
||||
This guide provides detailed instructions for configuring
|
||||
[ZeroTier VPN](https://zerotier.com) within Clan. Follow the
|
||||
@@ -19,89 +18,128 @@ Clan
|
||||
If you select multiple network technologies at the same time. e.g. (zerotier + yggdrassil)
|
||||
You must choose one of them as primary network and the machines are always connected via the primary network.
|
||||
|
||||
## 1. Set-Up the VPN Controller
|
||||
|
||||
The VPN controller is initially essential for providing configuration to new
|
||||
peers. Once addresses are allocated, the controller's continuous operation is not essential.
|
||||
|
||||
1. **Designate a Machine**: Label a machine as the VPN controller in the clan,
|
||||
referred to as `<CONTROLLER>` henceforth in this guide.
|
||||
2. **Add Configuration**: Input the following configuration to the NixOS
|
||||
configuration of the controller machine:
|
||||
```nix
|
||||
clan.core.networking.zerotier.controller = {
|
||||
enable = true;
|
||||
public = true;
|
||||
};
|
||||
```
|
||||
3. **Update the Controller Machine**: Execute the following:
|
||||
```bash
|
||||
clan machines update <CONTROLLER>
|
||||
```
|
||||
Your machine is now operational as the VPN controller.
|
||||
|
||||
## 2. Add Machines to the VPN
|
||||
|
||||
To introduce a new machine to the VPN, adhere to the following steps:
|
||||
|
||||
1. **Update Configuration**: On the new machine, incorporate the following to its
|
||||
configuration, substituting `<CONTROLLER>` with the controller machine name:
|
||||
```nix
|
||||
{ config, ... }: {
|
||||
clan.core.networking.zerotier.networkId = builtins.readFile (config.clan.core.settings.directory + "/machines/<CONTROLLER>/facts/zerotier-network-id");
|
||||
}
|
||||
```
|
||||
1. **Update the New Machine**: Execute:
|
||||
```bash
|
||||
$ clan machines update <NEW_MACHINE>
|
||||
```
|
||||
Replace `<NEW_MACHINE>` with the designated new machine name.
|
||||
|
||||
!!! Note "For Private Networks"
|
||||
1. **Retrieve Zerotier Metadata**
|
||||
|
||||
=== "From the repo"
|
||||
**Retrieve the ZeroTier IP**: In the clan repo, execute:
|
||||
```console
|
||||
$ clan facts list <NEW_MACHINE> | jq -r '.["zerotier-ip"]'
|
||||
```
|
||||
|
||||
The returned address is the Zerotier IP address of the machine.
|
||||
|
||||
=== "On the new machine"
|
||||
**Retrieve the ZeroTier ID**: On the `new_machine`, execute:
|
||||
```bash
|
||||
$ sudo zerotier-cli info
|
||||
```
|
||||
Example Output:
|
||||
```{.console, .no-copy}
|
||||
200 info d2c71971db 1.12.1 OFFLINE
|
||||
```
|
||||
, where `d2c71971db` is the ZeroTier ID.
|
||||
This guide shows you how to configure `zerotier` either through `NixOS Options` directly, or Clan's `Inventory` System.
|
||||
|
||||
|
||||
2. **Authorize the New Machine on the Controller**: On the controller machine,
|
||||
execute:
|
||||
=== "**Inventory**"
|
||||
## 1. Choose the Controller
|
||||
|
||||
=== "with ZerotierIP"
|
||||
```bash
|
||||
$ sudo zerotier-members allow --member-ip <IP>
|
||||
```
|
||||
Substitute `<IP>` with the ZeroTier IP obtained previously.
|
||||
=== "with ZerotierID"
|
||||
```bash
|
||||
$ sudo zerotier-members allow <ID>
|
||||
```
|
||||
Substitute `<ID>` with the ZeroTier ID obtained previously.
|
||||
The controller is the initial entrypoint for new machines into the vpn.
|
||||
It will sign the id's of new machines.
|
||||
Once id's are signed, the controller's continuous operation is not essential.
|
||||
A good controller choice is nevertheless a machine that can always be reached for updates - so that new peers can be added to the network.
|
||||
|
||||
2. **Verify Connection**: On the `new_machine`, re-execute:
|
||||
```bash
|
||||
$ sudo zerotier-cli info
|
||||
```
|
||||
The status should now be "ONLINE":
|
||||
```{.console, .no-copy}
|
||||
200 info d2c71971db 1.12.1 ONLINE
|
||||
```
|
||||
For the purpose of this guide we have two machines:
|
||||
|
||||
- The `controller` machine, which will be the zerotier controller.
|
||||
- The `new_machine` machine, which is the machine we want to add to the vpn network.
|
||||
|
||||
## 2. Configure the Inventory
|
||||
```nix
|
||||
clan.inventory = {
|
||||
services.zerotier.default = {
|
||||
roles.controller.machines = [
|
||||
"controller"
|
||||
];
|
||||
roles.peer.machines = [
|
||||
"new_machine"
|
||||
];
|
||||
};
|
||||
};
|
||||
```
|
||||
|
||||
## 3. Apply the Configuration
|
||||
Update the `controller` machine:
|
||||
|
||||
```bash
|
||||
clan machines update controller
|
||||
```
|
||||
|
||||
|
||||
=== "**NixOS Options**"
|
||||
## 1. Set-Up the VPN Controller
|
||||
|
||||
The VPN controller is initially essential for providing configuration to new
|
||||
peers. Once addresses are allocated, the controller's continuous operation is not essential.
|
||||
|
||||
1. **Designate a Machine**: Label a machine as the VPN controller in the clan,
|
||||
referred to as `<CONTROLLER>` henceforth in this guide.
|
||||
2. **Add Configuration**: Input the following configuration to the NixOS
|
||||
configuration of the controller machine:
|
||||
```nix
|
||||
clan.core.networking.zerotier.controller = {
|
||||
enable = true;
|
||||
public = true;
|
||||
};
|
||||
```
|
||||
3. **Update the Controller Machine**: Execute the following:
|
||||
```bash
|
||||
clan machines update <CONTROLLER>
|
||||
```
|
||||
Your machine is now operational as the VPN controller.
|
||||
|
||||
## 2. Add Machines to the VPN
|
||||
|
||||
To introduce a new machine to the VPN, adhere to the following steps:
|
||||
|
||||
1. **Update Configuration**: On the new machine, incorporate the following to its
|
||||
configuration, substituting `<CONTROLLER>` with the controller machine name:
|
||||
```nix
|
||||
{ config, ... }: {
|
||||
clan.core.networking.zerotier.networkId = builtins.readFile (config.clan.core.settings.directory + "/machines/<CONTROLLER>/facts/zerotier-network-id");
|
||||
}
|
||||
```
|
||||
1. **Update the New Machine**: Execute:
|
||||
```bash
|
||||
$ clan machines update <NEW_MACHINE>
|
||||
```
|
||||
Replace `<NEW_MACHINE>` with the designated new machine name.
|
||||
|
||||
!!! Note "For Private Networks"
|
||||
1. **Retrieve Zerotier Metadata**
|
||||
|
||||
=== "From the repo"
|
||||
**Retrieve the ZeroTier IP**: In the clan repo, execute:
|
||||
```console
|
||||
$ clan facts list <NEW_MACHINE> | jq -r '.["zerotier-ip"]'
|
||||
```
|
||||
|
||||
The returned address is the Zerotier IP address of the machine.
|
||||
|
||||
=== "On the new machine"
|
||||
**Retrieve the ZeroTier ID**: On the `new_machine`, execute:
|
||||
```bash
|
||||
$ sudo zerotier-cli info
|
||||
```
|
||||
Example Output:
|
||||
```{.console, .no-copy}
|
||||
200 info d2c71971db 1.12.1 OFFLINE
|
||||
```
|
||||
, where `d2c71971db` is the ZeroTier ID.
|
||||
|
||||
|
||||
2. **Authorize the New Machine on the Controller**: On the controller machine,
|
||||
execute:
|
||||
|
||||
=== "with ZerotierIP"
|
||||
```bash
|
||||
$ sudo zerotier-members allow --member-ip <IP>
|
||||
```
|
||||
Substitute `<IP>` with the ZeroTier IP obtained previously.
|
||||
=== "with ZerotierID"
|
||||
```bash
|
||||
$ sudo zerotier-members allow <ID>
|
||||
```
|
||||
Substitute `<ID>` with the ZeroTier ID obtained previously.
|
||||
|
||||
2. **Verify Connection**: On the `new_machine`, re-execute:
|
||||
```bash
|
||||
$ sudo zerotier-cli info
|
||||
```
|
||||
The status should now be "ONLINE":
|
||||
```{.console, .no-copy}
|
||||
200 info d2c71971db 1.12.1 ONLINE
|
||||
```
|
||||
|
||||
!!! success "Congratulations!"
|
||||
The new machine is now part of the VPN, and the ZeroTier
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# Secrets / Facts
|
||||
|
||||
Clan enables encryption of secrets (such as passwords & keys) ensuring security and ease-of-use among users.
|
||||
|
||||
Clan utilizes the [sops](https://github.com/getsops/sops) format and integrates with [sops-nix](https://github.com/Mic92/sops-nix) on NixOS machines.
|
||||
By default Clan utilizes the [sops](https://github.com/getsops/sops) format and integrates with [sops-nix](https://github.com/Mic92/sops-nix) on NixOS machines.
|
||||
Clan can also be configured to be used with other secret store [backends](https://docs.clan.lol/reference/clan-core/vars/#clan.core.vars.settings.secretStore).
|
||||
|
||||
This guide will walk you through:
|
||||
|
||||
@@ -40,7 +40,7 @@ Also add your age public key to the repository with 'clan secrets users add YOUR
|
||||
### Add Your Public Key
|
||||
|
||||
```bash
|
||||
clan secrets users add $USER <your_public_key>
|
||||
clan secrets users add $USER --age-key <your_public_key>
|
||||
```
|
||||
|
||||
It's best to choose the same username as on your Setup/Admin Machine that you use to control the deployment with.
|
||||
@@ -54,38 +54,3 @@ sops/
|
||||
└── key.json
|
||||
```
|
||||
If you followed the quickstart tutorial all necessary secrets are initialized at this point.
|
||||
|
||||
|
||||
|
||||
### Generate Facts and Vars
|
||||
|
||||
Typically, this step is handled automatically when a machine is deployed. However, to enable the use of `nix flake check` with your configuration, it must be completed manually beforehand.
|
||||
|
||||
Currently, generating all the necessary facts requires two separate commands. This is due to the coexistence of two parallel secret management solutions: the older, stable version (`clan secrets` and `clan facts`) and the newer, experimental version (`clan vars`).
|
||||
|
||||
To generate both facts and vars, execute the following commands:
|
||||
|
||||
```sh
|
||||
clan facts generate && clan vars generate
|
||||
```
|
||||
|
||||
|
||||
### Check Configuration
|
||||
|
||||
Validate your configuration by running:
|
||||
|
||||
```bash
|
||||
nix flake check
|
||||
```
|
||||
|
||||
This command helps ensure that your system configuration is correct and free from errors.
|
||||
|
||||
!!! Tip
|
||||
|
||||
You can integrate this step into your [Continuous Integration](https://en.wikipedia.org/wiki/Continuous_integration) workflow to ensure that only valid Nix configurations are merged into your codebase.
|
||||
|
||||
|
||||
## What's next?
|
||||
|
||||
- [Deployment](deploy.md): How to remotely deploy your machine
|
||||
- Full [Secrets](../manual/secrets.md) guide If you want to know more about how to save and share passwords in your clan
|
||||
|
||||
@@ -4,13 +4,13 @@ hide:
|
||||
- toc
|
||||
---
|
||||
|
||||
# :material-home: Welcome to **Clan**'s awesome documentation
|
||||
# :material-home: Welcome to **Clan**'s documentation
|
||||
|
||||
[Getting Started](./getting-started/index.md){ .md-button }
|
||||
|
||||
## What's inside
|
||||
## Tutorials
|
||||
|
||||
This documentation is structured into the following sections
|
||||
**Learning-oriented adventures with a hands-on experience.**
|
||||
|
||||
<div class="grid cards" markdown>
|
||||
|
||||
@@ -19,25 +19,69 @@ This documentation is structured into the following sections
|
||||
---
|
||||
|
||||
Create your own clan and get everything
|
||||
running in a couple of minutes.
|
||||
running in minutes
|
||||
|
||||
[:octicons-arrow-right-24: Getting started](./getting-started/index.md)
|
||||
|
||||
|
||||
- :material-sign-direction:{ .lg .middle } __Guides__
|
||||
- :fontawesome-solid-user-group:{ .lg .middle } __Authoring Modules__
|
||||
|
||||
---
|
||||
|
||||
Instructions and explanations for practical Implementations ordered by Topic.
|
||||
Create clanModules that can be reused by the community.
|
||||
|
||||
[:octicons-arrow-right-24: Authoring clanModules](./clanmodules/index.md)
|
||||
|
||||
</div>
|
||||
|
||||
## :material-book: Guides
|
||||
|
||||
**How-to Guides for achieving a certain goal or solving a specific issue.**
|
||||
|
||||
<div class="grid cards" markdown>
|
||||
|
||||
- [Autoincludes](./manual/adding-machines.md)
|
||||
|
||||
---
|
||||
|
||||
Learn how Clan automatically includes machines and Nix files.
|
||||
|
||||
- [Vars Backend](./manual/vars-backend.md)
|
||||
|
||||
---
|
||||
|
||||
Learn how to manage secrets with facts.
|
||||
|
||||
- [Inventory](./manual/inventory.md)
|
||||
|
||||
---
|
||||
|
||||
Clan's declaration format for running **services** on one or multiple **machines**.
|
||||
|
||||
- [Flake-parts](./manual/flake-parts.md)
|
||||
|
||||
---
|
||||
|
||||
Use clan with [https://flake.parts/]()
|
||||
|
||||
- [Contribute](./contributing/contribute.md)
|
||||
|
||||
---
|
||||
|
||||
Discover how to set up a development environment to contribute to Clan!
|
||||
|
||||
</div>
|
||||
|
||||
## API Reference
|
||||
|
||||
**Reference API Documentation**
|
||||
|
||||
<div class="grid cards" markdown>
|
||||
|
||||
- [Reference Overview](./reference/index.md)
|
||||
|
||||
---
|
||||
|
||||
Learn how to interface with Clan programmatically
|
||||
|
||||
[:octicons-arrow-right-24: Guides](./manual/index.md)
|
||||
|
||||
- :material-api:{ .lg .middle } __Reference__
|
||||
|
||||
---
|
||||
|
||||
Detailed Specification of Functions and APIs.
|
||||
|
||||
[:octicons-arrow-right-24: Reference](./reference/index.md)
|
||||
|
||||
</div>
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
# Adding Machines
|
||||
|
||||
Clan has two general methods of adding machines:
|
||||
|
||||
@@ -18,6 +17,8 @@ Every folder `machines/{machineName}` will be registered automatically as a Clan
|
||||
- [x] `machines/{machineName}/facter.json` Automatically configured, for further information see [nixos-facter](https://clan.lol/blog/nixos-facter/)
|
||||
- [x] `machines/{machineName}/disko.nix` Automatically loaded, for further information see the [disko docs](https://github.com/nix-community/disko/blob/master/docs/quickstart.md).
|
||||
|
||||
|
||||
|
||||
## Manual declaration
|
||||
|
||||
Machines can also be added manually under `buildClan`, `clan.*` in flake-parts or via [`inventory`](../manual/inventory.md).
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
# Disk Templates
|
||||
|
||||
|
||||
!!! Danger ":fontawesome-solid-road-barrier: Under Construction :fontawesome-solid-road-barrier:"
|
||||
Currently under construction use with caution
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
# Clan with `flake-parts`
|
||||
|
||||
Clan supports integration with [flake.parts](https://flake.parts/) a tool which allows composing nixos modules in a modular way.
|
||||
|
||||
|
||||
@@ -1,66 +0,0 @@
|
||||
# :material-book: Guides
|
||||
|
||||
Instructions and explanations for practical Implementations ordered by Topics.
|
||||
|
||||
## Tutorials
|
||||
|
||||
**Learning-oriented adventures with a hands-on experience.**
|
||||
|
||||
<div class="grid cards" markdown>
|
||||
|
||||
- :material-clock-fast:{ .lg .middle } __Set up in 15 minutes__
|
||||
|
||||
---
|
||||
|
||||
Create your own clan and get everything
|
||||
running in minutes
|
||||
|
||||
[:octicons-arrow-right-24: Getting started](../getting-started/index.md)
|
||||
|
||||
- :fontawesome-solid-user-group:{ .lg .middle } __Authoring Modules__
|
||||
|
||||
---
|
||||
|
||||
Create clanModules that can be reused by the community.
|
||||
|
||||
[:octicons-arrow-right-24: Authoring clanModules](../clanmodules/index.md)
|
||||
|
||||
</div>
|
||||
|
||||
## Guides
|
||||
|
||||
**How-to Guides for achieving a certain goal or solving a specific issue.**
|
||||
|
||||
<div class="grid cards" markdown>
|
||||
|
||||
- [Machines](./adding-machines.md)
|
||||
|
||||
---
|
||||
|
||||
Learn how Clan automatically includes machines and Nix files.
|
||||
|
||||
- [Secrets](./secrets.md)
|
||||
|
||||
---
|
||||
|
||||
Learn how to manage secrets.
|
||||
|
||||
- [Inventory](./inventory.md)
|
||||
|
||||
---
|
||||
|
||||
Clan's declaration format for running **services** on one or multiple **machines**.
|
||||
|
||||
- [Flake-parts](./flake-parts.md)
|
||||
|
||||
---
|
||||
|
||||
Use clan with [https://flake-parts.dev]()
|
||||
|
||||
- [Contribute](./contribute.md)
|
||||
|
||||
---
|
||||
|
||||
Discover how to set up a development environment to contribute to Clan!
|
||||
|
||||
</div>
|
||||
@@ -1,10 +1,15 @@
|
||||
# Inventory
|
||||
|
||||
`Inventory` is an abstract service layer for consistently configuring distributed services across machine boundaries.
|
||||
|
||||
See [Inventory API Documentation](../reference/nix-api/inventory.md)
|
||||
## Concept
|
||||
|
||||
This guide will walk you through setting up a backup service, where the inventory becomes useful.
|
||||
Its concept is slightly different to what NixOS veterans might be used to. The inventory is a service definition on a higher level, not a machine configuration. This allows you to define a consistent and coherent service.
|
||||
|
||||
The inventory logic will automatically derive the modules and configurations to enable on each machine in your `clan` based on its `role`. This makes it super easy to setup distributed `services` such as Backups, Networking, traditional cloud services, or peer-to-peer based applications.
|
||||
|
||||
The following tutorial will walk through setting up a Backup service where the terms `Service` and `Role` will become more clear.
|
||||
|
||||
See also: [Inventory API Documentation](../reference/nix-api/inventory.md)
|
||||
|
||||
!!! example "Experimental status"
|
||||
The inventory implementation is not considered stable yet.
|
||||
@@ -18,17 +23,13 @@ This guide will walk you through setting up a backup service, where the inventor
|
||||
|
||||
## Services
|
||||
|
||||
The inventory defines `services`. Membership of `machines` is defined via roles exclusively.
|
||||
The inventory defines `services`. Membership of `machines` is defined via `roles` exclusively.
|
||||
|
||||
See the each [module documentation](../reference/clanModules/index.md) for available roles.
|
||||
|
||||
!!! Note
|
||||
It is possible to use any [clanModule](../reference/clanModules/index.md) in the inventory and add machines via
|
||||
`roles.default.*`
|
||||
See each [modules documentation](../reference/clanModules/index.md) for its available roles.
|
||||
|
||||
### Adding services to machines
|
||||
|
||||
A module can be added to one or multiple machines via `Roles`. clan's `Role` interface provide sane defaults for a module this allows the module author to reduce the configuration overhead to a minimum.
|
||||
A service can be added to one or multiple machines via `Roles`. clan's `Role` interface provide sane defaults for a module this allows the module author to reduce the configuration overhead to a minimum.
|
||||
|
||||
Each service can still be customized and configured according to the modules options.
|
||||
|
||||
|
||||
171
docs/site/manual/migration-guide.md
Normal file
171
docs/site/manual/migration-guide.md
Normal file
@@ -0,0 +1,171 @@
|
||||
# Migrate existing NixOS configurations
|
||||
|
||||
This guide will help you migrate your existing Nix configurations into Clan.
|
||||
|
||||
!!! Warning
|
||||
Migrating instead of starting new can be trickier and might lead to bugs or
|
||||
unexpected issues. We recommend following the [Getting Started](../getting-started/index.md) guide first. Once you have a working setup, you can easily transfer your Nix configurations over.
|
||||
|
||||
## Back up your existing configuration!
|
||||
Before you start, it is strongly recommended to back up your existing
|
||||
configuration in any form you see fit. If you use version control to manage
|
||||
your configuration changes, it is also a good idea to follow the migration
|
||||
guide in a separte branch until everything works as expected.
|
||||
|
||||
|
||||
## Starting Point
|
||||
|
||||
We assume you are already using NixOS flakes to manage your configuration. If
|
||||
not, migrate to a flake-based setup following the official [NixOS
|
||||
documentation](https://nix.dev/manual/nix/2.25/command-ref/new-cli/nix3-flake.html).
|
||||
The snippet below shows a common Nix flake. For this example we will assume you
|
||||
have have two hosts: **berlin** and **cologne**.
|
||||
|
||||
```nix
|
||||
{
|
||||
inputs.nixpkgs.url = "github:nixos/nixpkgs/nixos-unstable";
|
||||
|
||||
outputs = { self, nixpkgs, ... }: {
|
||||
|
||||
nixosConfigurations = {
|
||||
|
||||
berlin = nixpkgs.lib.nixosSystem {
|
||||
system = "x86_64-linux";
|
||||
modules = [./machines/berlin/configuration.nix];
|
||||
};
|
||||
|
||||
cologne = nixpkgs.lib.nixosSystem {
|
||||
system = "x86_64-linux";
|
||||
modules = [./machines/cologne/configuration.nix];
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
## Add clan-core Input
|
||||
|
||||
Add `clan-core` to your flake as input. It will provide everything we need to
|
||||
manage your configurations with clan.
|
||||
|
||||
```nix
|
||||
inputs.clan-core = {
|
||||
url = "git+https://git.clan.lol/clan/clan-core";
|
||||
# Don't do this if your machines are on nixpkgs stable.
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
```
|
||||
|
||||
## Update Outputs
|
||||
|
||||
To be able to access our newly added dependency, it has to be added to the
|
||||
output parameters.
|
||||
|
||||
```diff
|
||||
- outputs = { self, nixpkgs, ... }:
|
||||
+ outputs = { self, nixpkgs, clan-core }:
|
||||
```
|
||||
|
||||
The existing `nixosConfigurations` output of your flake will be created by
|
||||
clan. In addition, a new `clanInternals` output will be added. Since both of
|
||||
these are provided by the output of `lib.buildClan`, a common syntax is to use a
|
||||
`let...in` statement to create your clan and access it's parameters in the flake
|
||||
outputs.
|
||||
|
||||
For the provide flake example, your flake should now look like this:
|
||||
|
||||
```nix
|
||||
{
|
||||
inputs.nixpkgs.url = "github:nixos/nixpkgs/nixos-unstable";
|
||||
|
||||
outputs = { self, nixpkgs, ... }:
|
||||
let
|
||||
clan = clan-core.lib.buildClan {
|
||||
self = self; # this needs to point at the repository root
|
||||
specialArgs = {};
|
||||
inventory.meta.name = "NEEDS_TO_BE_UNIQUE"; # TODO: Changeme
|
||||
|
||||
machines = {
|
||||
berlin = {
|
||||
nixpkgs.hostPlatform = "x86_64-linux";
|
||||
imports = [ ./machines/berlin/configuration.nix ];
|
||||
};
|
||||
cologne = {
|
||||
nixpkgs.hostPlatform = "x86_64-linux";
|
||||
imports = [ ./machines/cologne/configuration.nix ];
|
||||
};
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
nixosConfigurations = clan.nixosConfigurations;
|
||||
inherit (clan) clanInternals;
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
Et voilà! Your existing hosts are now part of a clan. Existing Nix tooling
|
||||
should still work as normal. To check that you didn't make any errors, run `nix
|
||||
flake show` and verify both hosts are still recognized as if nothing had
|
||||
changed. You should also see the new `clanInternals` output.
|
||||
|
||||
```
|
||||
❯ nix flake show
|
||||
git+file:///my-nixos-config
|
||||
├───clanInternals: unknown
|
||||
└───nixosConfigurations
|
||||
├───berlin: NixOS configuration
|
||||
└───cologne: NixOS configuration
|
||||
```
|
||||
|
||||
Of course you can also rebuild your configuration using `nixos-rebuild` and
|
||||
veryify everything still works.
|
||||
|
||||
## Add Clan CLI devShell
|
||||
|
||||
At this point Clan is set up, but you can't use the CLI yet. To do so, it is
|
||||
recommended to expose it via a `devShell` in your flake. It is also possible to
|
||||
install it any other way you would install a package in Nix, but using a
|
||||
developtment shell ensures the CLI's version will always be in sync with your
|
||||
configuration.
|
||||
|
||||
A minimal example is provided below, add it to your flake outputs.
|
||||
|
||||
```nix
|
||||
devShells."x86_64-linux".default = nixpkgs.legacyPackages."x86_64-linux".mkShell {
|
||||
packages = [ clan-core.packages."x86_64-linux".clan-cli ];
|
||||
};
|
||||
```
|
||||
|
||||
To use the CLI, execute `nix develop` in the directory of your flake. The
|
||||
resulting shell, provides you with the `clan` CLI tool. Since you will be using
|
||||
it every time you interact with Clan, it is recommended to set up
|
||||
[direnv](https://direnv.net/).
|
||||
|
||||
Verify everything works as expected by running `clan machines list`.
|
||||
|
||||
```
|
||||
❯ nix develop
|
||||
[user@host:~/my-nixos-config]$ clan machines list
|
||||
berlin
|
||||
cologne
|
||||
```
|
||||
|
||||
## Specify Targets
|
||||
|
||||
Clan needs to know where it can reach your hosts. For each of your hosts, set
|
||||
`clan.core.networking.targetHost` to its adress or hostname.
|
||||
|
||||
```nix
|
||||
# machines/berlin/configuration.nix
|
||||
{
|
||||
clan.core.networking.targetHost = "123.4.56.78";
|
||||
}
|
||||
```
|
||||
|
||||
## Next Steps
|
||||
|
||||
You are now fully set up. Use the CLI to manage your hosts or proceed to
|
||||
configure further services. At this point you should be able to run commands
|
||||
like `clan machines update berlin` to deploy a host.
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
# Repo Layout Guide
|
||||
|
||||
This guide will help you navigate the codebase and locate key files:
|
||||
|
||||
@@ -19,42 +18,3 @@ $ tree -L 1
|
||||
├── templates # Template files for creating a new Clan
|
||||
└── vars
|
||||
```
|
||||
|
||||
## Getting Started with Infrastructure
|
||||
|
||||
To dive into infrastructure, check out our clan infra repo: [clan-infra](https://git.clan.lol/clan/clan-infra). Please provide us with your public SOPS key so we can add you as an admin.
|
||||
|
||||
## Related Projects
|
||||
|
||||
- **Data Mesher**: [dm](https://git.clan.lol/clan/dm)
|
||||
- **Nixos Facter**: [nixos-facter](https://github.com/nix-community/nixos-facter)
|
||||
- **Nixos Anywhere**: [nixos-anywhere](https://github.com/nix-community/nixos-anywhere)
|
||||
- **Disko**: [disko](https://github.com/nix-community/disko)
|
||||
|
||||
## Fixing Bugs or Adding Features in Clan-CLI
|
||||
|
||||
If you have a bug fix or feature that involves a related project, clone the relevant repository and replace its invocation in your local setup.
|
||||
|
||||
For instance, if you need to update `nixos-anywhere` in clan-cli, find its usage:
|
||||
|
||||
```python
|
||||
run(
|
||||
nix_shell(
|
||||
["nixpkgs#nixos-anywhere"],
|
||||
cmd,
|
||||
),
|
||||
RunOpts(log=Log.BOTH, prefix=machine.name, needs_user_terminal=True),
|
||||
)
|
||||
```
|
||||
|
||||
You can replace `"nixpkgs#nixos-anywhere"` with your local path:
|
||||
|
||||
```python
|
||||
run(
|
||||
nix_shell(
|
||||
["<path_to_local_src>#nixos-anywhere"],
|
||||
cmd,
|
||||
),
|
||||
RunOpts(log=Log.BOTH, prefix=machine.name, needs_user_terminal=True),
|
||||
)
|
||||
```
|
||||
|
||||
151
docs/site/manual/vars-backend.md
Normal file
151
docs/site/manual/vars-backend.md
Normal file
@@ -0,0 +1,151 @@
|
||||
|
||||
!!! Note
|
||||
Vars is the new secret backend that will soon replace the Facts backend
|
||||
|
||||
|
||||
Defining a linux user's password via the nixos configuration previously required running `mkpasswd ...` and then copying the hash back into the nix configuration.
|
||||
|
||||
In this example, we will guide you through automating that interaction using clan `vars`.
|
||||
|
||||
For a more general explanation of what clan vars are and how it works, see the intro of the [Reference Documentation for vars](https://docs.clan.lol/reference/clan-core/vars/)
|
||||
|
||||
This guide assumes
|
||||
- clan is set up already (see [Getting Started](../getting-started/index.md))
|
||||
- a machine has been added to the clan (see [Adding Machines](./adding-machines.md))
|
||||
|
||||
This section will walk you through the following steps:
|
||||
|
||||
1. declare a `generator` in the machine's nixos configuration
|
||||
2. inspect the status via the clan cli
|
||||
3. generate the vars
|
||||
4. observer the changes
|
||||
5. update the machine
|
||||
6. share the root password between machines
|
||||
7. change the password
|
||||
|
||||
## Declare the generator
|
||||
|
||||
In this example, a `vars` `generator` is used to:
|
||||
|
||||
- prompt the user for the password
|
||||
- run the required `mkpasswd` command to generate the hash
|
||||
- store the hash in a file
|
||||
- expose the file path to the nixos configuration
|
||||
|
||||
Create a new nix file `root-password.nix` with the following content and import it into your `configuration.nix`
|
||||
```nix
|
||||
{config, pkgs, ...}: {
|
||||
|
||||
clan.core.vars.generators.root-password = {
|
||||
# prompt the user for a password
|
||||
# (`password-input` being an arbitrary name)
|
||||
prompts.password-input.description = "the root user's password";
|
||||
prompts.password-input.type = "hidden";
|
||||
# don't store the prompted password itself
|
||||
prompts.password-input.persist = false;
|
||||
# define an output file for storing the hash
|
||||
files.password-hash.secret = false;
|
||||
# define the logic for generating the hash
|
||||
script = ''
|
||||
cat $prompts/password-input | mkpasswd -m sha-512 > $out/password-hash
|
||||
'';
|
||||
# the tools required by the script
|
||||
runtimeInputs = [ pkgs.mkpasswd ];
|
||||
};
|
||||
|
||||
# ensure users are immutable (otherwise the following config might be ignored)
|
||||
users.mutableUsers = false;
|
||||
# set the root password to the file containing the hash
|
||||
users.users.root.hashedPasswordFile =
|
||||
# clan will make sure, this path exists
|
||||
config.clan.core.vars.generators.root-password.files.password-hash.path;
|
||||
}
|
||||
```
|
||||
|
||||
## Inspect the status
|
||||
|
||||
Executing `clan vars list`, you should see the following:
|
||||
```shellSession
|
||||
$ clan vars list my_machine
|
||||
root-password/password-hash: <not set>
|
||||
```
|
||||
|
||||
...indicating that the value `password-hash` for the generator `root-password` is not set yet.
|
||||
|
||||
## Generate the values
|
||||
|
||||
This step is not strictly necessary, as deploying the machine via `clan machines update` would trigger the generator as well.
|
||||
|
||||
To run the generator, execute `clan vars generate` for your machine
|
||||
```shellSession
|
||||
$ clan vars generate my_machine
|
||||
Enter the value for root-password/password-input (hidden):
|
||||
```
|
||||
|
||||
After entering the value, the updated status is reported:
|
||||
```shellSession
|
||||
Updated var root-password/password-hash
|
||||
old: <not set>
|
||||
new: $6$RMats/YMeypFtcYX$DUi...
|
||||
```
|
||||
|
||||
## Observe the changes
|
||||
|
||||
With the last step, a new file was created in your repository:
|
||||
`vars/per-machine/my-machine/root-password/password-hash/value`
|
||||
|
||||
If the repository is a git repository, a commit was created automatically:
|
||||
```shellSession
|
||||
$ git log -n1
|
||||
commit ... (HEAD -> master)
|
||||
Author: ...
|
||||
Date: ...
|
||||
|
||||
Update vars via generator root-password for machine grmpf-nix
|
||||
```
|
||||
|
||||
## Update the machine
|
||||
|
||||
```shell
|
||||
clan machines update my_machine
|
||||
```
|
||||
|
||||
## Share root password between machines
|
||||
|
||||
If we just imported the `root-password.nix` from above into more machines, clan would ask for a new password for each additional machine.
|
||||
|
||||
If the root password instead should only be entered once and shared across all machines, the generator defined above needs to be declared as `shared`, by adding `share = true` to it:
|
||||
```nix
|
||||
{config, pkgs, ...}: {
|
||||
clan.vars.generators.root-password = {
|
||||
share = true;
|
||||
# ...
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Importing that shared generator into each machine, will ensure that the password is only asked once the first machine gets updated and then re-used for all subsequent machines.
|
||||
|
||||
## Change the root password
|
||||
|
||||
Changing the password can be done via this command.
|
||||
Replace `my-machine` with your machine.
|
||||
If the password is shared, just pick any machine that has the generator declared.
|
||||
|
||||
```shellSession
|
||||
$ clan vars generate my-machine --generator root-password --regenerate
|
||||
...
|
||||
Enter the value for root-password/password-input (hidden):
|
||||
Input received. Processing...
|
||||
...
|
||||
Updated var root-password/password-hash
|
||||
old: $6$tb27m6EOdff.X9TM$19N...
|
||||
|
||||
new: $6$OyoQtDVzeemgh8EQ$zRK...
|
||||
```
|
||||
|
||||
|
||||
## Further Reading
|
||||
|
||||
- [Reference Documentation for `clan.core.vars` nixos options](../reference/clan-core/vars.md)
|
||||
- [Reference Documentation for the `clan vars` cli command](../reference/cli/vars.md)
|
||||
@@ -1 +0,0 @@
|
||||
/nix/store/8y5h98wk5p94mv1wyb2c4gkrr7bswd19-asciinema-player.css
|
||||
@@ -1 +0,0 @@
|
||||
/nix/store/w0i3f9qzn9n6jmfnfgiw5wnab2f9ssdw-asciinema-player.min.js
|
||||
@@ -15,3 +15,8 @@
|
||||
.md-header img {
|
||||
filter: invert(100%) brightness(100%);
|
||||
}
|
||||
|
||||
.md-nav__title,
|
||||
.md-nav__item.md-nav__item--section > label > span {
|
||||
color: var(--md-typeset-a-color);
|
||||
}
|
||||
|
||||
47
flake.lock
generated
47
flake.lock
generated
@@ -7,11 +7,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1735468753,
|
||||
"narHash": "sha256-2dt1nOe9zf9pDkf5Kn7FUFyPRo581s0n90jxYXJ94l0=",
|
||||
"lastModified": 1741786315,
|
||||
"narHash": "sha256-VT65AE2syHVj6v/DGB496bqBnu1PXrrzwlw07/Zpllc=",
|
||||
"owner": "nix-community",
|
||||
"repo": "disko",
|
||||
"rev": "84a5b93637cc16cbfcc61b6e1684d626df61eb21",
|
||||
"rev": "0d8c6ad4a43906d14abd5c60e0ffe7b587b213de",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -27,11 +27,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1735774679,
|
||||
"narHash": "sha256-soePLBazJk0qQdDVhdbM98vYdssfs3WFedcq+raipRI=",
|
||||
"lastModified": 1741352980,
|
||||
"narHash": "sha256-+u2UunDA4Cl5Fci3m7S643HzKmIDAe+fiXrLqYsR2fs=",
|
||||
"owner": "hercules-ci",
|
||||
"repo": "flake-parts",
|
||||
"rev": "f2f7418ce0ab4a5309a4596161d154cfc877af66",
|
||||
"rev": "f4330d22f1c5d2ba72d3d22df5597d123fdb60a9",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -42,11 +42,11 @@
|
||||
},
|
||||
"nixos-facter-modules": {
|
||||
"locked": {
|
||||
"lastModified": 1734596637,
|
||||
"narHash": "sha256-MRqwVAe3gsb88u4ME1UidmZFVCx+FEnoob0zkpO9DMY=",
|
||||
"lastModified": 1738752252,
|
||||
"narHash": "sha256-/nA3tDdp/2g0FBy8966ppC2WDoyXtUWaHkZWL+N3ZKc=",
|
||||
"owner": "numtide",
|
||||
"repo": "nixos-facter-modules",
|
||||
"rev": "536472754982bf03079b4b4e0261838a760587c0",
|
||||
"rev": "60f8b8f3f99667de6a493a44375e5506bf0c48b1",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -57,18 +57,15 @@
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1734435836,
|
||||
"narHash": "sha256-kMBQ5PRiFLagltK0sH+08aiNt3zGERC2297iB6vrvlU=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "4989a246d7a390a859852baddb1013f825435cee",
|
||||
"type": "github"
|
||||
"lastModified": 315532800,
|
||||
"narHash": "sha256-+bxPXRQiQ0SsjR8syBcc8X+S8WGllNM+Qreu5Td7gnI=",
|
||||
"rev": "1750f3c1c89488e2ffdd47cab9d05454dddfb734",
|
||||
"type": "tarball",
|
||||
"url": "https://releases.nixos.org/nixpkgs/nixpkgs-25.05pre773343.1750f3c1c894/nixexprs.tar.xz"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixpkgs-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
"type": "tarball",
|
||||
"url": "https://nixos.org/channels/nixpkgs-unstable/nixexprs.tar.xz"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
@@ -89,11 +86,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1736064798,
|
||||
"narHash": "sha256-xJRN0FmX9QJ6+w8eIIIxzBU1AyQcLKJ1M/Gp6lnSD20=",
|
||||
"lastModified": 1742700801,
|
||||
"narHash": "sha256-ZGlpUDsuBdeZeTNgoMv+aw0ByXT2J3wkYw9kJwkAS4M=",
|
||||
"owner": "Mic92",
|
||||
"repo": "sops-nix",
|
||||
"rev": "5dc08f9cc77f03b43aacffdfbc8316807773c930",
|
||||
"rev": "67566fe68a8bed2a7b1175fdfb0697ed22ae8852",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -124,11 +121,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1736115332,
|
||||
"narHash": "sha256-FBG9d7e0BTFfxVdw4b5EmNll2Mv7hfRc54hbB4LrKko=",
|
||||
"lastModified": 1742982148,
|
||||
"narHash": "sha256-aRA6LSxjlbMI6MmMzi/M5WH/ynd8pK+vACD9za3MKLQ=",
|
||||
"owner": "numtide",
|
||||
"repo": "treefmt-nix",
|
||||
"rev": "1788ca5acd4b542b923d4757d4cfe4183cc6a92d",
|
||||
"rev": "61c88349bf6dff49fa52d7dfc39b21026c2a8881",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
||||
48
flake.nix
48
flake.nix
@@ -2,7 +2,7 @@
|
||||
description = "clan.lol base operating system";
|
||||
|
||||
inputs = {
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
|
||||
nixpkgs.url = "https://nixos.org/channels/nixpkgs-unstable/nixexprs.tar.xz";
|
||||
|
||||
flake-parts.url = "github:hercules-ci/flake-parts";
|
||||
flake-parts.inputs.nixpkgs-lib.follows = "nixpkgs";
|
||||
@@ -24,36 +24,50 @@
|
||||
outputs =
|
||||
inputs@{
|
||||
flake-parts,
|
||||
nixpkgs,
|
||||
self,
|
||||
systems,
|
||||
...
|
||||
}:
|
||||
let
|
||||
inherit (nixpkgs.lib)
|
||||
filter
|
||||
optional
|
||||
pathExists
|
||||
;
|
||||
in
|
||||
flake-parts.lib.mkFlake { inherit inputs; } (
|
||||
{ ... }:
|
||||
{
|
||||
clan = {
|
||||
meta.name = "clan-core";
|
||||
directory = self;
|
||||
};
|
||||
|
||||
systems = import systems;
|
||||
imports = [
|
||||
./checks/flake-module.nix
|
||||
./clanModules/flake-module.nix
|
||||
./flakeModules/flake-module.nix
|
||||
(import ./flakeModules/clan.nix inputs.self)
|
||||
./devShell.nix
|
||||
# TODO: migrate this @davHau
|
||||
# ./docs/flake-module
|
||||
./docs/nix/flake-module.nix
|
||||
./lib/flake-module.nix
|
||||
./nixosModules/flake-module.nix
|
||||
./nixosModules/clanCore/vars/flake-module.nix
|
||||
./pkgs/flake-module.nix
|
||||
./templates/flake-module.nix
|
||||
imports =
|
||||
# only importing existing paths allows to minimize the flake for test
|
||||
# by removing files
|
||||
filter pathExists [
|
||||
./checks/flake-module.nix
|
||||
./clanModules/flake-module.nix
|
||||
./devShell.nix
|
||||
./docs/nix/flake-module.nix
|
||||
./flakeModules/flake-module.nix
|
||||
./flakeModules/demo_iso.nix
|
||||
./lib/filter-clan-core/flake-module.nix
|
||||
./lib/flake-module.nix
|
||||
./nixosModules/clanCore/vars/flake-module.nix
|
||||
./nixosModules/flake-module.nix
|
||||
./pkgs/flake-module.nix
|
||||
./templates/flake-module.nix
|
||||
]
|
||||
++ [
|
||||
(if pathExists ./flakeModules/clan.nix then import ./flakeModules/clan.nix inputs.self else { })
|
||||
]
|
||||
# Make treefmt-nix optional
|
||||
# This only works if you set inputs.clan-core.inputs.treefmt-nix.follows
|
||||
# to a non-empty input that doesn't export a flakeModule
|
||||
] ++ inputs.nixpkgs.lib.optional (inputs.treefmt-nix ? flakeModule) ./formatter.nix;
|
||||
++ optional (pathExists ./formatter.nix && inputs.treefmt-nix ? flakeModule) ./formatter.nix;
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
@@ -27,9 +27,13 @@ in
|
||||
};
|
||||
|
||||
options.flake = flake-parts-lib.mkSubmoduleOptions {
|
||||
clan = lib.mkOption { type = types.raw; };
|
||||
clanInternals = lib.mkOption { type = types.raw; };
|
||||
};
|
||||
config = {
|
||||
flake.clan = {
|
||||
inherit (config.clan.clanInternals) templates;
|
||||
};
|
||||
flake.clanInternals = config.clan.clanInternals;
|
||||
flake.nixosConfigurations = config.clan.nixosConfigurations;
|
||||
};
|
||||
|
||||
101
flakeModules/demo_iso.nix
Normal file
101
flakeModules/demo_iso.nix
Normal file
@@ -0,0 +1,101 @@
|
||||
{ self, ... }:
|
||||
|
||||
let
|
||||
pkgs = self.inputs.nixpkgs.legacyPackages.x86_64-linux;
|
||||
|
||||
demoModule = {
|
||||
imports = [
|
||||
"${self.clanModules.mycelium}/roles/peer.nix"
|
||||
# TODO do we need this? maybe not
|
||||
(
|
||||
{ modulesPath, ... }:
|
||||
{
|
||||
imports = [ "${modulesPath}/installer/cd-dvd/iso-image.nix" ];
|
||||
}
|
||||
)
|
||||
];
|
||||
};
|
||||
|
||||
clan_welcome = pkgs.writeShellApplication {
|
||||
name = "clan_welcome";
|
||||
runtimeInputs = [
|
||||
pkgs.gum
|
||||
pkgs.gitMinimal
|
||||
pkgs.retry
|
||||
self.packages.${pkgs.system}.clan-cli
|
||||
];
|
||||
text = ''
|
||||
set -efu
|
||||
|
||||
gum confirm '
|
||||
Welcome to Clan, a NixOS-based operating system for the CLAN project.
|
||||
This installer can be used to try out clan on your machine, for that reason we setup a cooperative environment to play and hack together :)
|
||||
' || exit 1
|
||||
until retry -t 5 ping -c 1 -W 1 git.clan.lol &> /dev/null; do
|
||||
# TODO make this nicer
|
||||
nmtui
|
||||
done
|
||||
if ! test -e ~/clan-core; then
|
||||
# git clone https://git.clan.lol/clan/clan-core.git ~/clan-core
|
||||
cp -rv ${self} clan-core
|
||||
fi
|
||||
cd clan-core
|
||||
clan machines morph demo-template --i-will-be-fired-for-using-this
|
||||
exit
|
||||
'';
|
||||
};
|
||||
|
||||
morphModule = {
|
||||
imports = [
|
||||
(
|
||||
{ modulesPath, ... }:
|
||||
{
|
||||
imports = [ "${modulesPath}/image/images.nix" ];
|
||||
}
|
||||
)
|
||||
];
|
||||
image.modules.iso.isoImage.squashfsCompression = "zstd -Xcompression-level 1";
|
||||
networking.networkmanager.enable = true;
|
||||
services.getty.autologinUser = "root";
|
||||
programs.bash.interactiveShellInit = ''
|
||||
if [[ "$(tty)" =~ /dev/(tty1|hvc0|ttyS0)$ ]]; then
|
||||
# workaround for https://github.com/NixOS/nixpkgs/issues/219239
|
||||
systemctl restart systemd-vconsole-setup.service
|
||||
|
||||
reset
|
||||
|
||||
${clan_welcome}/bin/clan_welcome
|
||||
fi
|
||||
'';
|
||||
};
|
||||
in
|
||||
{
|
||||
clan.templates.machine.demo-template = {
|
||||
description = "Demo machine for the CLAN project";
|
||||
# path = pkgs.runCommand "demo-template" {} ''
|
||||
# mkdir -p $out
|
||||
# echo '{ self, ... }: { imports = [ self.nixosModules.demoModule ]; }' > $out/configuration.nix
|
||||
# '';
|
||||
path = ./demo_template;
|
||||
};
|
||||
flake.nixosModules = { inherit morphModule demoModule; };
|
||||
perSystem =
|
||||
{ system, lib, ... }:
|
||||
{
|
||||
packages =
|
||||
lib.mkIf
|
||||
(lib.any (x: x == system) [
|
||||
"x86_64-linux"
|
||||
"aarch64-linux"
|
||||
])
|
||||
{
|
||||
demo-iso =
|
||||
(self.inputs.nixpkgs.lib.nixosSystem {
|
||||
modules = [
|
||||
{ nixpkgs.hostPlatform = system; }
|
||||
morphModule
|
||||
];
|
||||
}).config.system.build.images.iso;
|
||||
};
|
||||
};
|
||||
}
|
||||
38
flakeModules/demo_template/configuration.nix
Normal file
38
flakeModules/demo_template/configuration.nix
Normal file
@@ -0,0 +1,38 @@
|
||||
{ pkgs, config, ... }:
|
||||
{
|
||||
fileSystems."/".device = "nodev";
|
||||
boot.loader.grub.device = "nodev";
|
||||
clan.core.vars.settings.secretStore = "fs";
|
||||
clan.core.vars.generators.mycelium = {
|
||||
files."key" = { };
|
||||
files."ip".secret = false;
|
||||
files."pubkey".secret = false;
|
||||
runtimeInputs = [
|
||||
pkgs.mycelium
|
||||
pkgs.coreutils
|
||||
pkgs.jq
|
||||
];
|
||||
script = ''
|
||||
timeout 5 mycelium --key-file "$out"/key || :
|
||||
mycelium inspect --key-file "$out"/key --json | jq -r .publicKey > "$out"/pubkey
|
||||
mycelium inspect --key-file "$out"/key --json | jq -r .address > "$out"/ip
|
||||
'';
|
||||
};
|
||||
services.mycelium = {
|
||||
enable = true;
|
||||
addHostedPublicNodes = true;
|
||||
openFirewall = true;
|
||||
keyFile = config.clan.core.vars.generators.mycelium.files.key.path;
|
||||
};
|
||||
services.getty.autologinUser = "root";
|
||||
programs.bash.interactiveShellInit = ''
|
||||
if [[ "$(tty)" =~ /dev/(tty1|hvc0|ttyS0)$ ]]; then
|
||||
# workaround for https://github.com/NixOS/nixpkgs/issues/219239
|
||||
systemctl restart systemd-vconsole-setup.service
|
||||
|
||||
reset
|
||||
|
||||
your mycelium IP is: $(cat /var/lib/mycelium/ip)
|
||||
fi
|
||||
'';
|
||||
}
|
||||
@@ -92,13 +92,12 @@
|
||||
treefmt.programs.mypy.directories =
|
||||
{
|
||||
"clan-cli" = {
|
||||
extraPythonPackages = self'.packages.clan-cli.testDependencies;
|
||||
directory = "pkgs/clan-cli";
|
||||
extraPythonPackages = (self'.packages.clan-cli.devshellPyDeps pkgs.python3Packages);
|
||||
};
|
||||
"clan-app" = {
|
||||
directory = "pkgs/clan-app";
|
||||
extraPythonPackages =
|
||||
(self'.packages.clan-app.externalTestDeps or [ ]) ++ self'.packages.clan-cli.testDependencies;
|
||||
extraPythonPackages = (self'.packages.clan-app.devshellPyDeps pkgs.python3Packages);
|
||||
extraPythonPaths = [ "../clan-cli" ];
|
||||
};
|
||||
}
|
||||
@@ -107,8 +106,9 @@
|
||||
{
|
||||
"clan-vm-manager" = {
|
||||
directory = "pkgs/clan-vm-manager";
|
||||
extraPythonPackages =
|
||||
self'.packages.clan-vm-manager.externalTestDeps ++ self'.packages.clan-cli.testDependencies;
|
||||
extraPythonPackages = self'.packages.clan-vm-manager.externalTestDeps ++ [
|
||||
(pkgs.python3.withPackages (ps: self'.packages.clan-cli.devshellPyDeps ps))
|
||||
];
|
||||
extraPythonPaths = [ "../clan-cli" ];
|
||||
};
|
||||
}
|
||||
|
||||
@@ -56,7 +56,7 @@
|
||||
"machines": {
|
||||
"test-inventory-machine": {
|
||||
"config": {
|
||||
"packages": ["zed-editor"]
|
||||
"packages": ["hello"]
|
||||
},
|
||||
"extraModules": []
|
||||
}
|
||||
|
||||
23
lib/build-clan/auto-imports.nix
Normal file
23
lib/build-clan/auto-imports.nix
Normal file
@@ -0,0 +1,23 @@
|
||||
{
|
||||
lib,
|
||||
self,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
# Returns an attrset with inputs that have the attribute `clanModules`
|
||||
inputsWithClanModules = lib.filterAttrs (
|
||||
_name: value: builtins.hasAttr "clanModules" value
|
||||
) self.inputs;
|
||||
|
||||
flattenedClanModules = lib.foldl' (
|
||||
acc: input:
|
||||
lib.mkMerge [
|
||||
acc
|
||||
input.clanModules
|
||||
]
|
||||
) { } (lib.attrValues inputsWithClanModules);
|
||||
in
|
||||
{
|
||||
inventory.modules = flattenedClanModules;
|
||||
}
|
||||
@@ -8,7 +8,8 @@
|
||||
}:
|
||||
{
|
||||
## Inputs
|
||||
directory, # The directory containing the machines subdirectory # allows to include machine-specific modules i.e. machines.${name} = { ... }
|
||||
self ? lib.warn "Argument: 'self' must be set when using 'buildClan'." null, # Reference to the current flake
|
||||
# allows to include machine-specific modules i.e. machines.${name} = { ... }
|
||||
# A map from arch to pkgs, if specified this nixpkgs will be only imported once for each system.
|
||||
# This improves performance, but all nipxkgs.* options will be ignored.
|
||||
# deadnix: skip
|
||||
@@ -23,11 +24,12 @@ let
|
||||
inherit
|
||||
lib
|
||||
nixpkgs
|
||||
specialArgs
|
||||
clan-core
|
||||
self
|
||||
;
|
||||
self = directory;
|
||||
inherit specialArgs;
|
||||
};
|
||||
|
||||
rest = builtins.removeAttrs attrs [ "specialArgs" ];
|
||||
in
|
||||
eval {
|
||||
|
||||
@@ -2,8 +2,8 @@
|
||||
lib,
|
||||
nixpkgs,
|
||||
clan-core,
|
||||
specialArgs ? { },
|
||||
self,
|
||||
specialArgs ? { },
|
||||
}:
|
||||
# Returns a function that takes self, which should point to the directory of the flake
|
||||
module:
|
||||
@@ -14,6 +14,8 @@ module:
|
||||
modules = [
|
||||
./interface.nix
|
||||
module
|
||||
{ inherit specialArgs; }
|
||||
{
|
||||
inherit specialArgs;
|
||||
}
|
||||
];
|
||||
}).config
|
||||
|
||||
@@ -20,12 +20,11 @@ in
|
||||
jsonDocs = import ./eval-docs.nix {
|
||||
inherit pkgs lib;
|
||||
};
|
||||
|
||||
in
|
||||
{
|
||||
legacyPackages.clan-internals-docs = jsonDocs.optionsJSON;
|
||||
|
||||
# Run: nix-unit --extra-experimental-features flakes --flake .#legacyPackages.x86_64-linux.evalTests
|
||||
# Run: nix-unit --extra-experimental-features flakes --flake .#legacyPackages.x86_64-linux.evalTests-build-clan
|
||||
legacyPackages.evalTests-build-clan = import ./tests.nix {
|
||||
inherit lib;
|
||||
inherit (inputs) nixpkgs;
|
||||
@@ -39,7 +38,20 @@ in
|
||||
nix-unit --eval-store "$HOME" \
|
||||
--extra-experimental-features flakes \
|
||||
${inputOverrides} \
|
||||
--flake ${self}#legacyPackages.${system}.evalTests-build-clan
|
||||
--flake ${
|
||||
self.filter {
|
||||
include = [
|
||||
"flakeModules"
|
||||
"inventory.json"
|
||||
"lib/build-clan"
|
||||
"lib/default.nix"
|
||||
"lib/flake-module.nix"
|
||||
"lib/inventory"
|
||||
"machines"
|
||||
"nixosModules"
|
||||
];
|
||||
}
|
||||
}#legacyPackages.${system}.evalTests-build-clan
|
||||
|
||||
touch $out
|
||||
'';
|
||||
|
||||
@@ -8,10 +8,26 @@ let
|
||||
in
|
||||
{
|
||||
options = {
|
||||
# Required options
|
||||
directory = lib.mkOption {
|
||||
type = types.path;
|
||||
self = lib.mkOption {
|
||||
type = types.raw;
|
||||
default = self;
|
||||
defaultText = "Reference to the current flake";
|
||||
description = ''
|
||||
This is used to import external clan modules.
|
||||
'';
|
||||
};
|
||||
|
||||
directory = lib.mkOption {
|
||||
type = types.coercedTo lib.types.raw (
|
||||
v:
|
||||
if lib.isAttrs v then
|
||||
lib.warn "It appears you set 'clan.directory = self'. Instead set 'clan.self = self'. 'clan.directory' expects a path" v
|
||||
else if v == null then
|
||||
throw "Please set either clan.self or clan.directory"
|
||||
else
|
||||
builtins.toString v
|
||||
) lib.types.path;
|
||||
default = builtins.toString self;
|
||||
defaultText = "Root directory of the flake";
|
||||
description = ''
|
||||
The directory containing the clan.
|
||||
@@ -53,6 +69,15 @@ in
|
||||
```
|
||||
'';
|
||||
};
|
||||
|
||||
templates = lib.mkOption {
|
||||
type = types.submodule { imports = [ ./templates/interface.nix ]; };
|
||||
default = { };
|
||||
description = ''
|
||||
Define Clan templates.
|
||||
'';
|
||||
};
|
||||
|
||||
inventory = lib.mkOption {
|
||||
type = types.submodule { imports = [ ../inventory/build-inventory/interface.nix ]; };
|
||||
description = ''
|
||||
@@ -96,11 +121,11 @@ in
|
||||
type = types.lazyAttrsOf types.raw;
|
||||
default = { };
|
||||
};
|
||||
|
||||
# flake.clanInternals
|
||||
clanInternals = lib.mkOption {
|
||||
# Hide from documentation. Exposes internals to the cli.
|
||||
visible = false;
|
||||
# type = types.raw;
|
||||
# ClanInternals
|
||||
type = types.submodule {
|
||||
options = {
|
||||
@@ -108,15 +133,20 @@ in
|
||||
# We don't specify the type here, for better performance.
|
||||
inventory = lib.mkOption { type = lib.types.raw; };
|
||||
inventoryValuesPrios = lib.mkOption { type = lib.types.raw; };
|
||||
# all exported clan templates from this clan
|
||||
templates = lib.mkOption { type = lib.types.raw; };
|
||||
# all exported clan modules from this clan
|
||||
modules = lib.mkOption { type = lib.types.raw; };
|
||||
# all inventory module schemas
|
||||
moduleSchemas = lib.mkOption { type = lib.types.raw; };
|
||||
inventoryFile = lib.mkOption { type = lib.types.raw; };
|
||||
# The machine 'imports' generated by the inventory per machine
|
||||
serviceConfigs = lib.mkOption { type = lib.types.raw; };
|
||||
inventoryClass = lib.mkOption { type = lib.types.raw; };
|
||||
# clan-core's modules
|
||||
clanModules = lib.mkOption { type = lib.types.raw; };
|
||||
source = lib.mkOption { type = lib.types.raw; };
|
||||
meta = lib.mkOption { type = lib.types.raw; };
|
||||
lib = lib.mkOption { type = lib.types.raw; };
|
||||
all-machines-json = lib.mkOption { type = lib.types.raw; };
|
||||
machines = lib.mkOption { type = lib.types.raw; };
|
||||
machinesFunc = lib.mkOption { type = lib.types.raw; };
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# NixOS module
|
||||
{
|
||||
config,
|
||||
clan-core,
|
||||
@@ -41,10 +42,9 @@ let
|
||||
|
||||
# map from machine name to service configuration
|
||||
# { ${machineName} :: Config }
|
||||
serviceConfigs = (
|
||||
inventoryClass = (
|
||||
buildInventory {
|
||||
inherit inventory;
|
||||
inherit directory;
|
||||
inherit inventory directory;
|
||||
}
|
||||
);
|
||||
|
||||
@@ -76,7 +76,7 @@ let
|
||||
(machines.${name} or { })
|
||||
# Inherit the inventory assertions ?
|
||||
# { inherit (mergedInventory) assertions; }
|
||||
{ imports = serviceConfigs.${name} or [ ]; }
|
||||
{ imports = inventoryClass.machines.${name}.machineImports or [ ]; }
|
||||
(
|
||||
{
|
||||
# Settings
|
||||
@@ -96,12 +96,6 @@ let
|
||||
|
||||
networking.hostName = lib.mkDefault name;
|
||||
|
||||
# speeds up nix commands by using the nixpkgs from the host system (especially useful in VMs)
|
||||
nix.registry.nixpkgs.to = lib.mkDefault {
|
||||
type = "path";
|
||||
path = lib.mkDefault nixpkgs;
|
||||
};
|
||||
|
||||
# For vars we need to override the system so we run vars
|
||||
# generators on the machine that runs `clan vars generate`. If a
|
||||
# users is using the `pkgsForSystem`, we don't set
|
||||
@@ -167,9 +161,11 @@ let
|
||||
(builtins.fromJSON (builtins.readFile inventoryFile))
|
||||
else
|
||||
{ };
|
||||
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
./auto-imports.nix
|
||||
# Merge the inventory file
|
||||
{
|
||||
inventory = _: {
|
||||
@@ -181,7 +177,7 @@ in
|
||||
{
|
||||
inventory.machines = lib.optionalAttrs (builtins.pathExists "${directory}/machines") (
|
||||
builtins.mapAttrs (_n: _v: { }) (
|
||||
(lib.filterAttrs (_: t: t == "directory") (builtins.readDir "${directory}/machines"))
|
||||
lib.filterAttrs (_: t: t == "directory") (builtins.readDir "${directory}/machines")
|
||||
)
|
||||
);
|
||||
}
|
||||
@@ -189,7 +185,9 @@ in
|
||||
inventory.machines = lib.mapAttrs (_n: _: { }) config.machines;
|
||||
}
|
||||
# Merge the meta attributes from the buildClan function
|
||||
{ inventory.modules = clan-core.clanModules; }
|
||||
{
|
||||
inventory.modules = clan-core.clanModules;
|
||||
}
|
||||
# config.inventory.meta <- config.meta
|
||||
{ inventory.meta = config.meta; }
|
||||
# Set default for computed tags
|
||||
@@ -200,7 +198,7 @@ in
|
||||
|
||||
clanInternals = {
|
||||
moduleSchemas = clan-core.lib.modules.getModulesSchema config.inventory.modules;
|
||||
inherit serviceConfigs;
|
||||
inherit inventoryClass;
|
||||
inherit (clan-core) clanModules;
|
||||
inherit inventoryFile;
|
||||
inventoryValuesPrios =
|
||||
@@ -208,8 +206,14 @@ in
|
||||
builtins.removeAttrs (clan-core.lib.values.getPrios { options = inventory.options; })
|
||||
# tags are freeformType which is not supported yet.
|
||||
[ "tags" ];
|
||||
|
||||
modules = config.modules;
|
||||
templates = config.templates;
|
||||
inventory = config.inventory;
|
||||
meta = config.inventory.meta;
|
||||
lib = {
|
||||
inherit (clan-core.lib) select;
|
||||
};
|
||||
|
||||
source = "${clan-core}";
|
||||
|
||||
|
||||
57
lib/build-clan/templates/interface.nix
Normal file
57
lib/build-clan/templates/interface.nix
Normal file
@@ -0,0 +1,57 @@
|
||||
{
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
inherit (lib) types;
|
||||
|
||||
templateType = types.submodule (
|
||||
{ name, ... }:
|
||||
{
|
||||
options.description = lib.mkOption {
|
||||
type = types.str;
|
||||
default = name;
|
||||
description = ''
|
||||
The name of the template.
|
||||
'';
|
||||
};
|
||||
|
||||
options.path = lib.mkOption {
|
||||
type = types.path;
|
||||
description = ''
|
||||
Holds the path to the clan template.
|
||||
'';
|
||||
};
|
||||
}
|
||||
);
|
||||
in
|
||||
{
|
||||
options = {
|
||||
# clan.templates.clan
|
||||
clan = lib.mkOption {
|
||||
type = types.attrsOf templateType;
|
||||
default = { };
|
||||
description = ''
|
||||
Holds the different clan templates.
|
||||
'';
|
||||
};
|
||||
|
||||
# clan.templates.disko
|
||||
disko = lib.mkOption {
|
||||
type = types.attrsOf templateType;
|
||||
default = { };
|
||||
description = ''
|
||||
Holds different disko templates.
|
||||
'';
|
||||
};
|
||||
|
||||
# clan.templates.machine
|
||||
machine = lib.mkOption {
|
||||
type = types.attrsOf templateType;
|
||||
default = { };
|
||||
description = ''
|
||||
Holds the different machine templates.
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -10,24 +10,52 @@ let
|
||||
inherit lib nixpkgs clan-core;
|
||||
self = ./.;
|
||||
};
|
||||
|
||||
# Shallowly force all attribute values to be evaluated.
|
||||
shallowForceAllAttributes = lib.foldlAttrs (
|
||||
_acc: _name: value:
|
||||
lib.seq value true
|
||||
) true;
|
||||
in
|
||||
#######
|
||||
{
|
||||
test_only_required =
|
||||
test_missing_self =
|
||||
let
|
||||
config = evalClan {
|
||||
config = buildClan {
|
||||
meta.name = "test";
|
||||
imports = [ ./module.nix ];
|
||||
};
|
||||
in
|
||||
{
|
||||
expr = config.inventory ? meta;
|
||||
expr = shallowForceAllAttributes config;
|
||||
expectedError = {
|
||||
type = "ThrownError";
|
||||
msg = "A definition for option `directory' is not of type `absolute path*";
|
||||
};
|
||||
};
|
||||
|
||||
test_only_required =
|
||||
let
|
||||
config = evalClan {
|
||||
self = {
|
||||
inputs = { };
|
||||
outPath = ./.;
|
||||
};
|
||||
meta.name = "test";
|
||||
imports = [ ./module.nix ];
|
||||
};
|
||||
in
|
||||
{
|
||||
expr = shallowForceAllAttributes config;
|
||||
expected = true;
|
||||
};
|
||||
|
||||
test_all_simple =
|
||||
let
|
||||
config = evalClan {
|
||||
self = {
|
||||
inputs = { };
|
||||
};
|
||||
directory = ./.;
|
||||
machines = { };
|
||||
inventory = {
|
||||
@@ -43,6 +71,10 @@ in
|
||||
test_outputs_clanInternals =
|
||||
let
|
||||
config = evalClan {
|
||||
self = {
|
||||
inputs = { };
|
||||
};
|
||||
directory = ./.;
|
||||
imports = [
|
||||
# What the user needs to specif
|
||||
{
|
||||
@@ -68,6 +100,9 @@ in
|
||||
test_fn_simple =
|
||||
let
|
||||
result = buildClan {
|
||||
self = {
|
||||
inputs = { };
|
||||
};
|
||||
directory = ./.;
|
||||
meta.name = "test";
|
||||
};
|
||||
@@ -84,6 +119,9 @@ in
|
||||
test_fn_extensiv_meta =
|
||||
let
|
||||
result = buildClan {
|
||||
self = {
|
||||
inputs = { };
|
||||
};
|
||||
directory = ./.;
|
||||
meta.name = "test";
|
||||
meta.description = "test";
|
||||
@@ -104,6 +142,9 @@ in
|
||||
test_fn_clan_core =
|
||||
let
|
||||
result = buildClan {
|
||||
self = {
|
||||
inputs = { };
|
||||
};
|
||||
directory = ../../.;
|
||||
meta.name = "test-clan-core";
|
||||
};
|
||||
@@ -119,6 +160,9 @@ in
|
||||
test_buildClan_all_machines =
|
||||
let
|
||||
result = buildClan {
|
||||
self = {
|
||||
inputs = { };
|
||||
};
|
||||
directory = ./.;
|
||||
meta.name = "test";
|
||||
inventory.machines.machine1.meta.name = "machine1";
|
||||
@@ -138,6 +182,9 @@ in
|
||||
test_buildClan_specialArgs =
|
||||
let
|
||||
result = buildClan {
|
||||
self = {
|
||||
inputs = { };
|
||||
};
|
||||
directory = ./.;
|
||||
meta.name = "test";
|
||||
specialArgs.foo = "dream2nix";
|
||||
|
||||
@@ -21,4 +21,5 @@ in
|
||||
inherit lib;
|
||||
self = clan-core;
|
||||
};
|
||||
select = import ./select.nix;
|
||||
}
|
||||
|
||||
@@ -6,11 +6,14 @@
|
||||
let
|
||||
baseModule = {
|
||||
imports = (import (pkgs.path + "/nixos/modules/module-list.nix")) ++ [
|
||||
{
|
||||
nixpkgs.pkgs = pkgs;
|
||||
clan.core.name = "dummy";
|
||||
system.stateVersion = lib.version;
|
||||
}
|
||||
(
|
||||
{ config, ... }:
|
||||
{
|
||||
nixpkgs.pkgs = pkgs;
|
||||
clan.core.name = "dummy";
|
||||
system.stateVersion = config.system.nixos.release;
|
||||
}
|
||||
)
|
||||
];
|
||||
};
|
||||
|
||||
|
||||
18
lib/filter-clan-core/flake-module.nix
Normal file
18
lib/filter-clan-core/flake-module.nix
Normal file
@@ -0,0 +1,18 @@
|
||||
{ self, ... }:
|
||||
let
|
||||
nixFilter = import ./nix-filter.nix;
|
||||
in
|
||||
{
|
||||
flake.filter =
|
||||
{
|
||||
include ? [ ],
|
||||
exclude ? [ ],
|
||||
}:
|
||||
nixFilter.filter {
|
||||
inherit exclude;
|
||||
include = include ++ [
|
||||
"flake.nix"
|
||||
];
|
||||
root = self;
|
||||
};
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user