Compare commits

...

302 Commits

Author SHA1 Message Date
a-kenji
c7c400f51f wip 2024-04-24 10:42:34 +02:00
a-kenji
ea6bd8f41d checks 2024-04-24 10:42:34 +02:00
clan-bot
b702ca686e Merge pull request 'add: cd command' (#1265) from a-kenji-docs-patch-1 into main 2024-04-23 18:57:09 +00:00
a-kenji
acdb0a9b27 add: cd command 2024-04-23 20:52:41 +02:00
clan-bot
70ed0757a3 Merge pull request 'vm-manager: add empty list screen' (#1264) from hsjobeki-vm-manager/empty-splash into main 2024-04-23 14:22:47 +00:00
Johannes Kirschbauer
9778c432c2 vm-manager: add empty list screen 2024-04-23 16:18:10 +02:00
Johannes Kirschbauer
1da6a0c5a2 vm-manager: add empty list screen 2024-04-23 16:17:09 +02:00
Johannes Kirschbauer
5f5155023c vm-manager: add empty list screen 2024-04-23 16:16:48 +02:00
Johannes Kirschbauer
1366d0bcf6 WIP 2024-04-23 11:16:33 +02:00
clan-bot
351571a655 Merge pull request 'installer: fix qr code again' (#1260) from lassulus-HEAD into main 2024-04-22 19:10:59 +00:00
lassulus
3c02453705 installer: fix qr code again 2024-04-22 21:04:41 +02:00
clan-bot
7a74c86c70 Merge pull request 'clan-cli ssh: --json can be file or str' (#1259) from lassulus-HEAD into main 2024-04-22 18:56:19 +00:00
lassulus
4ae5b24d24 installer: make qrcode compatible with nixos-images 2024-04-22 20:49:35 +02:00
clan-bot
05b510230f Merge pull request 'clan-cli ssh: --json can be file or str' (#1258) from lassulus-HEAD into main 2024-04-22 18:27:35 +00:00
lassulus
9cb23b807c clan-cli ssh: fix qr code format 2024-04-22 20:21:44 +02:00
lassulus
0a1cc29abf clan-cli ssh: --json can be file or str 2024-04-22 20:13:17 +02:00
clan-bot
1a87df646d Merge pull request 'facts password-store: take path from variable' (#1256) from lassulus-HEAD into main 2024-04-21 17:16:32 +00:00
lassulus
4964415d34 facts password-store: take path from variable 2024-04-21 19:09:46 +02:00
clan-bot
9ac0839bd5 Merge pull request 'clan: clan machines install use verbose flag' (#1252) from a-kenji-clan/install into main 2024-04-19 14:45:29 +00:00
a-kenji
6becce81cb clan: clan machines install use verbose flag 2024-04-19 16:38:39 +02:00
clan-bot
8b1eae8c27 Merge pull request 'fix actual installer' (#1251) from image into main 2024-04-19 12:18:06 +00:00
Jörg Thalheim
5cfc9f7db4 fix actual installer 2024-04-19 14:13:02 +02:00
Jörg Thalheim
2c96e467fa deploy-docs: fix undefined variable 2024-04-19 13:59:39 +02:00
Mic92
3db2ecece6 Merge pull request 'deploy-docs: fix undefined variable' (#1250) from image into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/1250
2024-04-19 11:59:35 +00:00
Jörg Thalheim
8d74983103 deploy-docs: fix undefined variable 2024-04-19 11:59:35 +00:00
clan-bot
81f7237a41 Merge pull request 'Change iso to nixos-images version' (#1249) from image into main 2024-04-19 11:49:51 +00:00
Jörg Thalheim
3ebc2e8be9 Change iso to nixos-images version 2024-04-19 13:43:40 +02:00
clan-bot
a810e96a20 Merge pull request 'docs: clan core ref' (#1246) from hsjobeki-main into main 2024-04-18 11:12:28 +00:00
Johannes Kirschbauer
1a99e033eb docs: clan core ref 2024-04-18 13:05:58 +02:00
clan-bot
6d2ec12cca Merge pull request 'clan: clan ssh fix password login' (#1245) from a-kenji-clan-cli/fix-ssh-passowrd into main 2024-04-17 19:52:00 +00:00
a-kenji
e81a7415d8 clan: clan ssh fix password login 2024-04-17 21:45:14 +02:00
clan-bot
d2dffe30a3 Merge pull request 'docs: add clan modules readme support' (#1244) from hsjobeki-main into main 2024-04-17 16:31:17 +00:00
Johannes Kirschbauer
a2074bb82b docs: init synthing & deltachat 2024-04-17 18:26:37 +02:00
clan-bot
f964304224 Merge pull request 'clan: clan ssh fix for new installer image' (#1243) from a-kenji-clan/fix-ssh into main 2024-04-17 16:25:38 +00:00
a-kenji
72811d0828 clan: clan ssh fix for new installer image 2024-04-17 18:20:40 +02:00
clan-bot
22b767466c Merge pull request 'pkgs: add metadata' (#1238) from a-kenji-pkgs/add-metadata into main 2024-04-17 11:35:19 +00:00
a-kenji
9f808b1bdb pkgs: add metadata
Closes #1151
2024-04-17 13:32:50 +02:00
clan-bot
ed9d65a91c Merge pull request 'docs: add clan modules readme support' (#1237) from hsjobeki-main into main 2024-04-17 10:58:17 +00:00
Johannes Kirschbauer
87559613ed docs: add clan modules readme support 2024-04-17 12:52:04 +02:00
kenji
0bae84b1ae Merge pull request 'clan-cli: fix clan ssh --json and --png' (#1234) from clan/ssh into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/1234
2024-04-17 10:12:37 +00:00
a-kenji
498d29cca1 clan-cli: fix clan ssh --json and --png
This fixes `clan ssh` with the `--json` and `--png` flags.

It will now correctly use the actual fields that are present in the
generated json.

- probes if the ports are accessible
- if accessible will attempt a single ssh connection with the provided
password, in order to not spam ssh attempts

Fixes #1177
2024-04-17 10:12:37 +00:00
clan-bot
a33a76ecd2 Merge pull request 'rework multicast support' (#1236) from parallelism into main 2024-04-17 09:05:05 +00:00
Jörg Thalheim
8658e1694a rework multicast support 2024-04-17 10:57:17 +02:00
clan-bot
0dde758296 Merge pull request 'docs: split clan-core options into sub-pages' (#1235) from hsjobeki-main into main 2024-04-17 07:45:54 +00:00
Johannes Kirschbauer
5e33a0b3b8 docs: split clan-core options into sub-pages 2024-04-17 09:39:40 +02:00
clan-bot
c57cc5204c Merge pull request 'docs: generate api docs' (#1233) from hsjobeki-tutorials into main 2024-04-16 17:13:38 +00:00
Johannes Kirschbauer
9a3f27ea08 docs: generate api docs 2024-04-16 19:07:28 +02:00
clan-bot
b7f5e98db0 Merge pull request 'documentation: convert note to admonition' (#1227) from a-kenji-docs/set-target-host into main 2024-04-16 10:34:38 +00:00
a-kenji
1db0ace17b documentation: convert note to admonition 2024-04-16 12:28:32 +02:00
clan-bot
059e4efcdc Merge pull request 'docs: improve flake-parts configuration' (#1226) from hsjobeki-tutorials into main 2024-04-16 10:08:54 +00:00
Johannes Kirschbauer
581b48b518 docs: improve flake-parts configuration 2024-04-16 12:02:44 +02:00
clan-bot
f8b881c41e Merge pull request 'refactor facts command to regenerate facts for all machines' (#1223) from parallelism into main 2024-04-15 20:35:22 +00:00
Jörg Thalheim
dcad0d0d79 include machine name when generating secrets 2024-04-15 22:17:53 +02:00
Jörg Thalheim
a4b15d2ca2 flake-parts: fix merging of all-machines-json 2024-04-15 22:14:54 +02:00
Jörg Thalheim
f385e0e037 refactor facts command to regenerate facts for all machines 2024-04-15 22:14:54 +02:00
clan-bot
060e3baa08 Merge pull request 'documentation: improve presentation' (#1222) from a-kenji-docs/improve into main 2024-04-15 10:03:34 +00:00
a-kenji
2d42af3675 documentation: improve presentation 2024-04-15 11:57:23 +02:00
kenji
ca0c109b76 Merge pull request 'broken links replaced (and linking to doc pages (mkdocs rendered pages))' (#1220) from vater/clan-core:main into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/1220
2024-04-15 09:49:14 +00:00
vater
8ffe5a562f broken links removed (and linking to doc pages (mkdocs rendered pages)) 2024-04-14 22:38:05 +00:00
clan-bot
997b9d5426 Merge pull request 'documentation: add contribution standards' (#1219) from a-kenji-documentation/standards into main 2024-04-14 15:00:44 +00:00
a-kenji
8322d5dc27 documentation: add contribution standards 2024-04-14 16:54:23 +02:00
clan-bot
419936d1b4 Merge pull request 'documentation: small admonition nit' (#1216) from a-kenji-documentation/nits into main 2024-04-14 13:24:29 +00:00
a-kenji
a81da72ec4 documentation: small admonition nit 2024-04-14 15:18:16 +02:00
clan-bot
4ae5840078 Merge pull request 'documentation: improve configuration' (#1215) from a-kenji-docs/configuration into main 2024-04-14 13:05:18 +00:00
a-kenji
5b846c7c6f documentation: improve configuration 2024-04-14 15:00:41 +02:00
clan-bot
03c109c7f5 Merge pull request 'documenation: improve flake-parts' (#1214) from a-kenji-documentation/improve/flake-parts into main 2024-04-14 12:27:34 +00:00
a-kenji
a1f5024fde documenation: improve flake-parts 2024-04-14 14:23:11 +02:00
clan-bot
09a5fd31a6 Merge pull request 'docs: fix contributing casing' (#1213) from a-kenji-docs/fix-contrib into main 2024-04-14 11:29:15 +00:00
a-kenji
933401eb62 docs: fix contributing casing 2024-04-14 13:22:51 +02:00
clan-bot
b1c0b90fb0 Merge pull request 'documentation: improve secrets page' (#1212) from a-kenji-docs/secrets into main 2024-04-14 11:19:49 +00:00
a-kenji
4442ba777a documentation: improve secrets page 2024-04-14 13:15:26 +02:00
clan-bot
bc7c3ad782 Merge pull request 'documentation: improve networking section' (#1211) from a-kenji-docs/improve-networking into main 2024-04-14 10:49:36 +00:00
a-kenji
062de6866e documentation: improve networking section 2024-04-14 12:43:09 +02:00
clan-bot
1140a847ad Merge pull request 'documenation: improve styling' (#1210) from a-kenji-docs/installer-style into main 2024-04-14 10:35:15 +00:00
a-kenji
711d5d4319 documenation: improve styling 2024-04-14 12:30:56 +02:00
clan-bot
f8675949b9 Merge pull request 'documentation: improve installer wording' (#1209) from a-kenji-docs/installer-wording into main 2024-04-14 10:26:43 +00:00
a-kenji
18a961332e documentation: improve installer wording 2024-04-14 12:20:30 +02:00
clan-bot
d1457c424a Merge pull request 'documentation: improve templates section' (#1208) from a-kenji-docs/update-template into main 2024-04-14 09:58:50 +00:00
clan-bot
d717d9e90d Merge pull request 'documentation: fix typo' (#1207) from a-kenji-fix/typ into main 2024-04-14 09:56:20 +00:00
clan-bot
1bec39cfc6 Merge pull request 'documentation: disable copy for outputs' (#1206) from a-kenji-docs/outputs into main 2024-04-14 09:52:01 +00:00
a-kenji
3d0d124b8a documentation: improve templates section 2024-04-14 11:51:43 +02:00
a-kenji
6cfe735c69 documentation: fix typo 2024-04-14 11:46:53 +02:00
a-kenji
b28d7e45d3 documentation: disable copy for outputs 2024-04-14 11:45:20 +02:00
clan-bot
d7feff104e Merge pull request 'docs: fix migrated build directory' (#1205) from hsjobeki-tutorials into main 2024-04-13 17:45:24 +00:00
Johannes Kirschbauer
10ad6da359 docs: fix migrated build directory 2024-04-13 19:40:28 +02:00
clan-bot
57791ef52a Merge pull request 'docs: decompose configuration guide' (#1204) from hsjobeki-tutorials into main 2024-04-13 15:52:29 +00:00
Johannes Kirschbauer
3a9c84cb45 docs: decompose configuration guide 2024-04-13 15:46:38 +00:00
clan-bot
d2b7bd593b Merge pull request 'documentation: standardize on bash doccomments' (#1203) from a-kenji-docs/standardize-bash into main 2024-04-13 14:41:20 +00:00
a-kenji
7f89740d1b documentation: standardize on bash doccomments 2024-04-13 16:35:16 +02:00
clan-bot
b8d863240c Merge pull request 'docs: improve template' (#1201) from hsjobeki-tutorials into main 2024-04-13 14:33:28 +00:00
Johannes Kirschbauer
b2a1f8571c format 2024-04-13 16:29:24 +02:00
Johannes Kirschbauer
d021b2fb34 template: remove system 2024-04-13 16:00:01 +02:00
Johannes Kirschbauer
fa5058bce4 docs: reword 2024-04-13 15:55:15 +02:00
Johannes Kirschbauer
1978aae39f docs: improve template 2024-04-13 15:53:45 +02:00
clan-bot
6212492c89 Merge pull request 'documentation: make lsblk command copyable' (#1200) from a-kenji-docs/impr into main 2024-04-13 13:21:59 +00:00
a-kenji
4874500b8f documentation: make lsblk command copyable 2024-04-13 15:16:05 +02:00
clan-bot
579994aea6 Merge pull request 'hsjobeki-tutorials' (#1199) from hsjobeki-tutorials into main 2024-04-13 12:57:33 +00:00
Johannes Kirschbauer
2207fd8961 docs: add success block 2024-04-13 14:53:18 +02:00
Johannes Kirschbauer
ff99b10616 docs: rephrase tabs 2024-04-13 14:48:26 +02:00
Johannes Kirschbauer
babf7e3d12 docs: clean up directory structure 2024-04-13 14:48:26 +02:00
clan-bot
7d543da8c2 Merge pull request 'documentation: fix wording' (#1198) from a-kenji-docs/wording into main 2024-04-13 12:19:10 +00:00
a-kenji
f464eafe6c documentation: fix wording 2024-04-13 14:12:47 +02:00
clan-bot
a9347f4ed9 Merge pull request 'docs: move hardware / cloud computers behind tab selection' (#1197) from hsjobeki-tutorials into main 2024-04-13 12:03:57 +00:00
Johannes Kirschbauer
8de732239d docs: move hardware / cloud computers behind tab selection 2024-04-13 13:59:48 +02:00
clan-bot
e52a9f3a16 Merge pull request 'documentation: allow code content to be copied' (#1196) from a-kenji-documentation/impl/copy into main 2024-04-13 11:55:52 +00:00
clan-bot
579b800755 Merge pull request 'docs: remove custom css' (#1195) from hsjobeki-tutorials into main 2024-04-13 11:50:12 +00:00
a-kenji
92de72427e documentation: allow code content to be copied
Allow code content to be copied.

Fixes #1179
2024-04-13 13:48:25 +02:00
Johannes Kirschbauer
e74d0aa3d2 docs: remove custom css 2024-04-13 11:44:32 +00:00
clan-bot
1f11c67e23 Merge pull request 'documentation: improve wifi' (#1194) from a-kenji-docs/improve into main 2024-04-13 11:36:58 +00:00
a-kenji
077598b3ac documentation: improve wifi 2024-04-13 13:32:54 +02:00
clan-bot
35a5131b24 Merge pull request 'vm-manager: fix typos in error message' (#1193) from a-kenji-vm-manager/fix into main 2024-04-13 10:28:54 +00:00
clan-bot
1b77f746bc Merge pull request 'documentation: fix wording' (#1192) from a-kenji-docs/fix into main 2024-04-13 10:25:30 +00:00
clan-bot
275b61925a Merge pull request 'documentation: fix wording' (#1191) from a-kenji-docs/wording into main 2024-04-13 10:22:59 +00:00
clan-bot
e8e37bfb6c Merge pull request 'documentation: add missing character' (#1190) from a-kenji-docs/missing into main 2024-04-13 10:19:05 +00:00
a-kenji
b474de8137 vm-manager: fix typos in error message 2024-04-13 12:18:58 +02:00
clan-bot
57096ae0f4 Merge pull request 'documentation: add correct nix-shell invocation' (#1189) from a-kenji-docs/add-clan-cli into main 2024-04-13 10:14:45 +00:00
a-kenji
b5746906fb documentation: fix wording 2024-04-13 12:14:20 +02:00
a-kenji
ff035d34ed documentation: fix wording 2024-04-13 12:11:13 +02:00
clan-bot
9747d77461 Merge pull request 'docs: fix wording' (#1188) from a-kenji-docs/install into main 2024-04-13 10:10:51 +00:00
a-kenji
e58204a5a7 documentation: add missing character 2024-04-13 12:09:40 +02:00
a-kenji
985deb27a9 documentation: add correct nix-shell invocation 2024-04-13 12:08:35 +02:00
a-kenji
1c690c2a66 docs: fix wording 2024-04-13 12:06:29 +02:00
clan-bot
136b317def Merge pull request 'clan-vm-manager: fix wording in toast' (#1187) from a-kenji-clan-vm-manager/fix/toast into main 2024-04-13 10:02:03 +00:00
a-kenji
9f3fcaf68e clan-vm-manager: fix wording in toast 2024-04-13 11:55:58 +02:00
clan-bot
c4ef4b1950 Merge pull request 'docs: fix wording and typo' (#1185) from a-kenji-doc/fix/wording into main 2024-04-13 09:53:15 +00:00
a-kenji
42e653a647 docs: fix wording and typo 2024-04-13 11:41:00 +02:00
clan-bot
8d6659e60b Merge pull request 'clan-cli: fix typos' (#1183) from a-kenji-clan-cli/fix/typos into main 2024-04-12 15:47:32 +00:00
a-kenji
fff810ed43 clan-cli: fix typos 2024-04-12 17:40:54 +02:00
hsjobeki
2df2787989 Merge pull request 'docs: self host documentation with mkDocs' (#1176) from hsjobeki-tutorials into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/1176
2024-04-12 15:14:43 +00:00
Johannes Kirschbauer
70cdf23875 docs: add theming 2024-04-12 17:13:11 +02:00
Jörg Thalheim
4d75feea65 deploy-docs: fail on error 2024-04-12 17:13:11 +02:00
Jörg Thalheim
c3f2c548a6 add deploy script for homepage 2024-04-12 17:13:11 +02:00
Jörg Thalheim
30663d563d fix build on macos 2024-04-12 17:13:11 +02:00
Johannes Kirschbauer
43102906aa disable module docs until migrated 2024-04-12 17:13:11 +02:00
Johannes Kirschbauer
445d547814 remove checks 2024-04-12 17:13:11 +02:00
Johannes Kirschbauer
28773725ec docs: fix links 2024-04-12 17:13:11 +02:00
Johannes Kirschbauer
ecd48df496 worklow: disable link checking, this is done by mkDocs 2024-04-12 17:13:11 +02:00
Valentin Gagarin
d4f10c34c4 fix up title 2024-04-12 17:13:11 +02:00
Valentin Gagarin
e04e4e4fdb make "Getting Started" the start page 2024-04-12 17:13:11 +02:00
Valentin Gagarin
60f2bf54c3 add instructions to live reload docs 2024-04-12 17:13:11 +02:00
Johannes Kirschbauer
1e08a454fb docs: self host documentation with mkDocs 2024-04-12 17:13:11 +02:00
clan-bot
f61a78a1cf Merge pull request 'docs: fix diskLayouts' (#1182) from a-kenji-documentation/fix/docs into main 2024-04-12 14:54:54 +00:00
a-kenji
f76e6cfd1e docs: fix diskLayouts 2024-04-12 16:44:00 +02:00
kenji
ae8e15dc5e Merge pull request 'fix installer' (#1175) from docs into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/1175
2024-04-12 14:00:25 +00:00
a-kenji
26c71d9720 installer: add zstd compression 2024-04-12 14:00:25 +00:00
Jörg Thalheim
088e0d3eee fix installer 2024-04-12 14:00:25 +00:00
clan-bot
cb20f62486 Merge pull request 'docs: fix iso location' (#1174) from a-kenji-fix/docss into main 2024-04-12 13:15:29 +00:00
clan-bot
828d61fef5 Merge pull request 'docs: remove superfluous heading' (#1173) from a-kenji-fix/docs into main 2024-04-12 13:11:25 +00:00
a-kenji
75fc8fd35a docs: fix iso location 2024-04-12 15:06:01 +02:00
a-kenji
684cadebc3 docs: remove superfluous heading 2024-04-12 14:55:59 +02:00
Mic92
6ddd70e2be Merge pull request 'fix case when secrets are regenerated during update/install' (#1172) from docs into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/1172
2024-04-12 12:51:27 +00:00
Jörg Thalheim
b3522b73aa fix case when secrets are regenerated during update/install 2024-04-12 14:46:51 +02:00
Mic92
573a462aee Merge pull request 'Rework and fix root-password module' (#1171) from docs into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/1171
2024-04-12 12:00:43 +00:00
Jörg Thalheim
3f8ab35a19 rework root-password module 2024-04-12 12:00:43 +00:00
clan-bot
895f6fbc8a Merge pull request 'sops/compat: fix name reference' (#1170) from docs into main 2024-04-12 11:29:32 +00:00
Jörg Thalheim
6958da2d57 sops/compat: fix name reference 2024-04-12 13:23:52 +02:00
clan-bot
2e6e9b175e Merge pull request 'sops: fix secret path generation' (#1168) from docs into main 2024-04-12 11:19:56 +00:00
Jörg Thalheim
58446db110 sops: fix secret path generation 2024-04-12 13:12:31 +02:00
Mic92
396071a925 Merge pull request 'quickstart: fix link' (#1162) from docs into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/1162
2024-04-12 10:09:28 +00:00
Jörg Thalheim
439714a242 quickstart: fix link 2024-04-12 10:09:28 +00:00
clan-bot
13e1aefb65 Merge pull request 'clan-vm-manager: Add install-desktop.sh' (#1161) from Qubasa-main into main 2024-04-12 09:54:09 +00:00
Qubasa
057d0defee clan-vm-manager: Add install-desktop.sh. Fix incorrect doku link 2024-04-12 11:47:18 +02:00
hsjobeki
7dcadd3025 Merge pull request 'docs: fix unsupported languages' (#1157) from hsjobeki-tutorials into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/1157
2024-04-10 16:50:07 +00:00
Johannes Kirschbauer
d292f2de98 docs: fix unsupported languages 2024-04-10 18:40:51 +02:00
hsjobeki
6aec3ac73d Merge pull request 'docs: improve tutorials' (#1156) from hsjobeki-tutorials into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/1156
2024-04-10 16:25:18 +00:00
Johannes Kirschbauer
e6acbadae6 docs: improve template & secrets 2024-04-10 16:25:18 +00:00
Mic92
00558923a5 Merge pull request 'factstore: secret backends now can return the path to a secret dynamically' (#1143) from networkd into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/1143
2024-04-10 15:14:30 +00:00
Jörg Thalheim
82aafc287e also enable LLMNR 2024-04-10 15:14:30 +00:00
Jörg Thalheim
0d4e1f870b factstore: secret backends now can return the path to a secret dynamically
try to move path function out
2024-04-10 15:14:30 +00:00
Jörg Thalheim
faaf6649c5 fix multicast dns for ethernet 2024-04-10 15:14:30 +00:00
clan-bot
f33c3ece3d Merge pull request 'don't hard-code page weight' (#1118) from fricklerhandwerk/clan-core:doc-contributing into main 2024-04-10 13:35:50 +00:00
clan-bot
a5586d27f0 Merge pull request 'build iso with nixos-generators' (#1155) from lassulus-generators-iso into main 2024-04-10 13:20:00 +00:00
lassulus
70282b8d77 build iso with nixos-generators 2024-04-10 15:03:55 +02:00
clan-bot
6d050c0c10 Merge pull request 'docs: improve tutorials' (#1154) from hsjobeki-tutorials into main 2024-04-10 13:01:21 +00:00
Johannes Kirschbauer
87eb38a2c9 docs: move install stick into sperate docs 2024-04-10 14:54:41 +02:00
clan-bot
388c9c94e4 Merge pull request 'tutorials' (#1149) from tutorials into main 2024-04-10 11:43:07 +00:00
Johannes Kirschbauer
960e560d84 docs: fix links 2024-04-10 13:37:54 +02:00
Johannes Kirschbauer
d951c570f0 docs: improve getting started 2024-04-10 13:33:47 +02:00
Johannes Kirschbauer
adfdc96b64 docs: improve tutorials 2024-04-10 13:33:47 +02:00
clan-bot
f7a29ebaf8 Merge pull request 'templates: makes template working' (#1148) from hsjobeki-main into main 2024-04-10 10:38:56 +00:00
Johannes Kirschbauer
996fdd6c9c templates: makes template working 2024-04-10 12:28:41 +02:00
clan-bot
d3e42a3ad2 Merge pull request 'add root-password module' (#1147) from hsjobeki-main into main 2024-04-10 09:36:16 +00:00
Johannes Kirschbauer
292ac97067 add root-password module 2024-04-10 11:23:39 +02:00
Mic92
84f527fc39 Merge pull request 'docs: improve tutorials' (#1126) from tutorials into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/1126
2024-04-10 09:14:03 +00:00
Johannes Kirschbauer
c4c843ba18 docs: improve tutorials 2024-04-10 09:14:03 +00:00
Mic92
915864f637 Merge pull request 'fix quickstart instructions' (#1146) from alejandrosame/clan-core:fix/quickstart into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/1146
2024-04-10 09:13:13 +00:00
Alejandro Sanchez Medina
8ab9021c3d fix quickstart instructions
The guide instructs the reader to look for the PTUUID (partition ID) to fill
disk ID in the disklayout. This leads to an error as the partition gets deleted
and the UUID is no longer valid. The ID-LINK field is a unique ID provided by
the hardware manufacturer.
2024-04-09 20:57:36 +02:00
Mic92
36ce43bfcf Merge pull request 'enable multicast fore default dhcp network' (#1141) from networkd into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/1141
2024-04-09 13:35:51 +00:00
Jörg Thalheim
a8718b92d4 enable multicast fore default dhcp network 2024-04-09 13:35:51 +00:00
clan-bot
5dac575be8 Merge pull request 'Set networking.hostName to clanCore.machineName' (#1140) from networkd into main 2024-04-09 13:14:44 +00:00
Jörg Thalheim
19a62817f2 sops: strip out clanCore.machineName instead of networking.hostName 2024-04-09 15:10:27 +02:00
Jörg Thalheim
1ac982fbdb set networking.hostName to clanCore.machineName 2024-04-09 15:10:08 +02:00
clan-bot
26146edbc5 Merge pull request 'drop custom systemd-networkd unit' (#1132) from networkd into main 2024-04-09 10:37:51 +00:00
Jörg Thalheim
e8ebfb2e2a drop custom systemd-networkd unit
We no longer use multicast dns. This one doesn't
conflict with nixos-generate-config.
2024-04-09 12:31:57 +02:00
clan-bot
3480b7d089 Merge pull request 'vms/run.py: refactor use kwargs instead of dataclass for function args' (#1125) from DavHau-dave into main 2024-04-09 07:36:02 +00:00
DavHau
fc73301ed9 vms/run.py: refactor use kwargs instead of dataclass for function args 2024-04-09 14:30:46 +07:00
clan-bot
30db1039d1 Merge pull request 'Automatic flake update - 2024-04-08T00:00+00:00' (#1124) from flake-update-2024-04-08 into main 2024-04-08 00:13:31 +00:00
Clan Merge Bot
8429ccccb3 update flake lock - 2024-04-08T00:00+00:00
Flake lock file updates:

• Updated input 'disko':
    'github:nix-community/disko/502241afa3de2a24865ddcbe4c122f4546e32092' (2024-03-28)
  → 'github:nix-community/disko/0a17298c0d96190ef3be729d594ba202b9c53beb' (2024-04-05)
• Updated input 'flake-parts':
    'github:hercules-ci/flake-parts/f7b3c975cf067e56e7cda6cb098ebe3fb4d74ca2' (2024-03-01)
  → 'github:hercules-ci/flake-parts/9126214d0a59633752a136528f5f3b9aa8565b7d' (2024-04-01)
• Updated input 'nixos-generators':
    'github:nix-community/nixos-generators/63194fceafbfe583a9eb7d16ab499adc0a6c0bc2' (2024-03-28)
  → 'github:nix-community/nixos-generators/0c15e76bed5432d7775a22e8d22059511f59d23a' (2024-04-04)
• Updated input 'nixos-generators/nixlib':
    'github:nix-community/nixpkgs.lib/b2a1eeef8c185f6bd27432b053ff09d773244cbc' (2024-03-24)
  → 'github:nix-community/nixpkgs.lib/90b1a963ff84dc532db92f678296ff2499a60a87' (2024-03-31)
• Updated input 'nixpkgs':
    'github:NixOS/nixpkgs/cd1c70d941d69d8d6425984ff8aefca9b28e861a' (2024-03-31)
  → 'github:NixOS/nixpkgs/298edc8f1e0dfffce67f50375c9f5952e04a6d02' (2024-04-07)
• Updated input 'sops-nix':
    'github:Mic92/sops-nix/99b1e37f9fc0960d064a7862eb7adfb92e64fa10' (2024-03-31)
  → 'github:Mic92/sops-nix/39191e8e6265b106c9a2ba0cfd3a4dafe98a31c6' (2024-04-07)
• Updated input 'treefmt-nix':
    'github:numtide/treefmt-nix/1810d51a015c1730f2fe05a255258649799df416' (2024-03-30)
  → 'github:numtide/treefmt-nix/49dc4a92b02b8e68798abd99184f228243b6e3ac' (2024-04-01)
2024-04-08 00:00:15 +00:00
clan-bot
d89edef9a1 Merge pull request 'improve starter template' (#1123) from Qubasa-better-template into main 2024-04-07 19:12:02 +00:00
Qubasa
1e0d73e8a9 Improve documentation 2024-04-07 21:07:02 +02:00
Qubasa
4faba7c8e1 template: Reverted systemd-boot in installer. EF02 partition on wrong second place 2024-04-07 17:09:44 +02:00
Qubasa
83346eeff5 template: Fixed incorrect nix code 2024-04-07 15:59:33 +02:00
Johannes Kirschbauer
55f3878e67 machine.md: clean up guide 2024-04-07 14:32:23 +02:00
Johannes Kirschbauer
49d83fd659 migrate.md: make docs more approachable 2024-04-07 12:53:15 +02:00
Qubasa
6a610c7a0b Improve documentation 2024-04-06 13:35:17 +02:00
Qubasa
033f7c67f4 Improve documentation 2024-04-06 13:34:40 +02:00
Qubasa
6d8d211968 Improve documentation 2024-04-06 12:34:49 +02:00
Qubasa
91dddc2281 Fix merge issue 2024-04-06 11:56:06 +02:00
Qubasa
a520116584 Improve documentation 2024-04-06 11:54:34 +02:00
Valentin Gagarin
0681f6bf7c docs: don't set page weight
since currently the web site simply dumps these pages, this makes it
impossible to control the item order
2024-04-05 21:52:56 +02:00
Jörg Thalheim
e68eba914e improve starter template 2024-04-05 18:55:14 +02:00
Mic92
fa74d1c0b3 Merge pull request 'flash-fixes' (#1116) from flash-fixes into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/1116
2024-04-05 16:54:31 +00:00
Jörg Thalheim
1fd28f2f4c flake-parts: fixup type for specialArgs 2024-04-05 18:48:37 +02:00
Jörg Thalheim
818cc4d135 flash: expose mode option 2024-04-05 18:48:37 +02:00
Jörg Thalheim
c5e5a7edc7 grub: enable efi support by default 2024-04-05 18:47:11 +02:00
Qubasa
2e29c031ef Improved docs 2024-04-05 18:03:14 +02:00
Qubasa
f2ff815aa7 Add machines/my-machine/settings.json 2024-04-05 16:25:05 +02:00
Jörg Thalheim
1fc4739ee3 improve starter template 2024-04-05 13:08:32 +02:00
clan-bot
cb103c7772 Merge pull request 'add flake parts tutorial' (#1106) from flake-parts into main 2024-04-04 13:09:52 +00:00
Jörg Thalheim
7b230e2308 add flake parts tutorial 2024-04-04 15:05:08 +02:00
clan-bot
e78d0da30f Merge pull request 'flake-parts: fix clan-core reference' (#1105) from flake-parts into main 2024-04-04 11:42:32 +00:00
Jörg Thalheim
28e8af60cf flake-parts: fix clan-core reference 2024-04-04 13:38:15 +02:00
Mic92
2bc027cece Merge pull request 'Add flake-parts module' (#1104) from flake-parts into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/1104
2024-04-04 11:08:08 +00:00
Jörg Thalheim
5ffae2070d drop unused test_backup_client machine 2024-04-04 11:24:30 +02:00
Jörg Thalheim
3212410704 add flake-parts module for clan 2024-04-03 16:45:06 +02:00
Jörg Thalheim
f7077e3540 flash: improve prompt if no disk is specified 2024-04-03 12:00:02 +02:00
Jörg Thalheim
1c1c143b8d docs: replace hashedPassword with initialHashedPassword 2024-04-03 12:00:02 +02:00
clan-bot
6e4786d08e Merge pull request 'Clan Manager: connect events with feedback system' (#1103) from hsjobeki-main into main 2024-04-03 09:41:03 +00:00
Johannes Kirschbauer
de91938760 Clan VM Manager: connect feedback with events 2024-04-03 11:36:33 +02:00
clan-bot
a6ba73c4a0 Merge pull request 'fix: readme typo' (#1102) from hsjobeki-main into main 2024-04-03 07:51:04 +00:00
Johannes Kirschbauer
a6f8f3fb58 fix: readme typo 2024-04-03 09:43:24 +02:00
clan-bot
69aa46a1d5 Merge pull request 'sops: fix decrypte path on machine' (#1091) from fact_refactor2 into main 2024-04-02 11:55:53 +00:00
Jörg Thalheim
8b4dbc60b5 clan/install: drop -t flag from nixos-anywhere 2024-04-02 13:50:24 +02:00
Jörg Thalheim
5b838c0d9c sops: fix decrypte path on machine 2024-04-02 13:50:24 +02:00
clan-bot
b342e3f991 Merge pull request 'syncthing: fix conflicting definition of fs.inotify.max_user_watches' (#1090) from DavHau-dave into main 2024-04-02 11:00:26 +00:00
DavHau
dd0dbbd29f syncthing: fix conflicting definition of fs.inotify.max_user_watches 2024-04-02 17:56:07 +07:00
clan-bot
7de7e25e78 Merge pull request 'flatpak: add gitignore' (#1089) from a-kenji-gi/flatpak into main 2024-04-02 10:54:34 +00:00
a-kenji
97be9f1c4d flatpak: add gitignore 2024-04-02 12:48:19 +02:00
clan-bot
439293a079 Merge pull request 'clan-cli: add a check for the flatpak sandbox' (#1088) from a-kenji-clan-cli/check/flatpak-sandbox into main 2024-04-02 10:24:49 +00:00
a-kenji
9bb4c8d094 clan-cli: add a check for the flatpak sandbox
Allows for differentiation between sandbox and non sandbox usage.
2024-04-02 12:19:48 +02:00
clan-bot
44d897e89f Merge pull request 'modules: add ergochat' (#1078) from a-kenji-add/ergo into main 2024-04-02 09:36:55 +00:00
a-kenji
1a40ce0a8f modules: add ergochat 2024-04-02 11:30:22 +02:00
clan-bot
ff0e66512f Merge pull request 'buildClan: add clan-core to specialArgs' (#1076) from DavHau-dave into main 2024-04-02 09:15:37 +00:00
clan-bot
78259ad61e Merge pull request 'modules: add thelounge' (#1075) from a-kenji-init/ergochat into main 2024-04-02 09:07:18 +00:00
DavHau
6f9216d3b6 buildClan: add clan-core to specialArgs 2024-04-02 16:03:45 +07:00
a-kenji
3bdface3db modules: add thelounge 2024-04-02 11:01:00 +02:00
clan-bot
388eff3baa Merge pull request 'cli/secrets: show hint in --help on how to retrieve a key' (#1074) from DavHau-dave into main 2024-04-02 07:40:54 +00:00
DavHau
16ae51105e cli/secrets: show hint in --help on how to retrieve a key 2024-04-02 14:36:01 +07:00
clan-bot
3428b76dcb Merge pull request 'moonlight: fix facts declaration' (#1072) from DavHau-dave into main 2024-04-01 10:48:25 +00:00
DavHau
1a3d5e1ad6 moonlight: fix facts declaration 2024-04-01 17:44:22 +07:00
clan-bot
d075b18653 Merge pull request 'Automatic flake update - 2024-04-01T00:00+00:00' (#1070) from flake-update-2024-04-01 into main 2024-04-01 00:11:28 +00:00
Clan Merge Bot
c9108d5460 update flake lock - 2024-04-01T00:00+00:00
Flake lock file updates:

• Updated input 'disko':
    'github:nix-community/disko/5d2d3e421ade554b19b4dbb0d11a04023378a330' (2024-03-24)
  → 'github:nix-community/disko/502241afa3de2a24865ddcbe4c122f4546e32092' (2024-03-28)
• Updated input 'nixos-generators':
    'github:nix-community/nixos-generators/2b3720c7af2271be8cee713cd2f69c5127b0a8e4' (2024-03-25)
  → 'github:nix-community/nixos-generators/63194fceafbfe583a9eb7d16ab499adc0a6c0bc2' (2024-03-28)
• Updated input 'nixpkgs':
    'github:NixOS/nixpkgs/57e6b3a9e4ebec5aa121188301f04a6b8c354c9b' (2024-03-25)
  → 'github:NixOS/nixpkgs/cd1c70d941d69d8d6425984ff8aefca9b28e861a' (2024-03-31)
• Updated input 'sops-nix':
    'github:Mic92/sops-nix/405987a66cce9a4a82f321f11b205982a7127c88' (2024-03-24)
  → 'github:Mic92/sops-nix/99b1e37f9fc0960d064a7862eb7adfb92e64fa10' (2024-03-31)
• Updated input 'treefmt-nix':
    'github:numtide/treefmt-nix/7ee5aaac63c30d3c97a8c56efe89f3b2aa9ae564' (2024-03-18)
  → 'github:numtide/treefmt-nix/1810d51a015c1730f2fe05a255258649799df416' (2024-03-30)
2024-04-01 00:00:15 +00:00
clan-bot
eeb703985e Merge pull request 'docs: render zola pages in clan-core flake' (#1069) from DavHau-dave into main 2024-03-31 05:37:36 +00:00
DavHau
492256ec54 docs: render zola pages in clan-core flake
This integrates the generated options docs part of our website into the clan-core project. This is better than having it in a separate repos because we want to lear about breakages as early as possible.

Changes which break the documentation should be blocked by this early on
2024-03-31 12:33:31 +07:00
clan-bot
62f201696d Merge pull request 'docs: render module options docs' (#1068) from DavHau-dave into main 2024-03-30 17:31:34 +00:00
DavHau
e0bdf1ce39 docs: render module options docs
fix errors while rendering some modules
2024-03-31 00:26:08 +07:00
clan-bot
ec105d8ef8 Merge pull request 'secrets: improve description of generator script' (#1067) from DavHau-dave into main 2024-03-30 10:22:40 +00:00
DavHau
72cc85cd2f secrets: improve description of generator script 2024-03-30 17:16:26 +07:00
clan-bot
0f73a6e1cf Merge pull request 'migrate secrets to new api' (#1064) from fact_refactor2 into main 2024-03-28 10:17:24 +00:00
Jörg Thalheim
65d116ec28 migrate secrets to new api 2024-03-28 11:02:14 +01:00
clan-bot
b10c4f5846 Merge pull request 'refactor clanCore.secrets -> clanCore.facts' (#1040) from facts_refactor2 into main 2024-03-27 15:20:21 +00:00
lassulus
a8d35d37e7 refactor clanCore.secrets -> clanCore.facts 2024-03-27 16:03:16 +01:00
clan-bot
8950c8d3bd Merge pull request 'clan-cli: Fix tmpdir leak and fix tests/temporary_dir inconsistencies' (#1063) from Qubasa-fix_tmpdir_leak2 into main 2024-03-27 14:58:20 +00:00
Qubasa
e6ad0cfbc1 clan-cli: Fix tmpdir leak and fix tests/temporary_dir inconsistencies 2024-03-27 15:51:52 +01:00
clan-bot
0676bf7283 Merge pull request 'checks: add check for rendering docs' (#1058) from DavHau-dave into main 2024-03-27 10:03:38 +00:00
DavHau
3771be2110 checks: add check for rendering docs 2024-03-27 16:59:42 +07:00
clan-bot
d59673e89a Merge pull request 'Improved README' (#1057) from Qubasa-improv_readme3 into main 2024-03-26 18:29:50 +00:00
Qubasa
946f026c23 Improved README 2024-03-26 19:20:00 +01:00
clan-bot
8715c3ef88 Merge pull request 'rewrite backups documentation' (#1055) from Mic92-main into main 2024-03-26 16:29:32 +00:00
Jörg Thalheim
0c21fcf2eb rewrite backups documentation 2024-03-26 17:21:06 +01:00
clan-bot
9a82f8cc8b Merge pull request 'localbackup: also create mountpoints' (#1053) from Mic92-main into main 2024-03-26 14:59:11 +00:00
Jörg Thalheim
e27e6e6102 localbackup: also create mountpoints 2024-03-26 15:53:13 +01:00
clan-bot
4ff262fd60 Merge pull request 'localbackup: rename mountHook/umountHook to preMountHook/postUnmountHook' (#1050) from Mic92-main into main 2024-03-26 14:02:40 +00:00
Jörg Thalheim
74b5f6c61a localbackup: rename mountHook/umountHook to preMountHook/postUnmountHook 2024-03-26 14:58:37 +01:00
clan-bot
553b8b8476 Merge pull request 'cli/ssh: allocate tty by default' (#1043) from Mic92-main into main 2024-03-26 12:18:27 +00:00
Jörg Thalheim
80abeef994 Revert "ssh: add interactive flag"
This reverts commit c5db14dea8.
2024-03-26 13:13:35 +01:00
Jörg Thalheim
7b8a49bf6c ssh: default tty to False
nix behaves weird when the terminal is interactive because
we are also do line buffering.
2024-03-26 13:05:11 +01:00
Jörg Thalheim
54f0526c5b update nixos-generators 2024-03-26 12:52:00 +01:00
Jörg Thalheim
10a12eb85c ruff: switch to check subcommand 2024-03-26 12:52:00 +01:00
Jörg Thalheim
c5db14dea8 ssh: add interactive flag 2024-03-26 12:51:46 +01:00
Jörg Thalheim
0e2cb172e6 cli/ssh: allocate tty by default
-t is only enabled when the local ssh command is also connected to a tty,
so it seems to be enabled by default.
2024-03-26 12:07:15 +01:00
clan-bot
a21f731536 Merge pull request 'localbackup: add missing config arg to submodule' (#1042) from Mic92-main into main 2024-03-26 10:47:35 +00:00
Jörg Thalheim
bd989085ac localbackup: add missing config arg to submodule 2024-03-26 11:41:03 +01:00
clan-bot
dca1eee3a3 Merge pull request 'documentation: fix grammer' (#1041) from a-kenji-fix/grammar into main 2024-03-25 15:10:10 +00:00
a-kenji
92b1f86b7e documentation: fix grammer 2024-03-25 16:03:18 +01:00
clan-bot
6055dbe123 Merge pull request 'documentation fixes for facts generate command' (#1038) from facts-generate into main 2024-03-25 14:38:30 +00:00
Jörg Thalheim
68ac6321ee docs/backup: update command needed to regenerate backups 2024-03-25 15:33:57 +01:00
Jörg Thalheim
270f906412 fix casing in facts generate logs 2024-03-25 15:33:57 +01:00
clan-bot
ffa1d9ca6c Merge pull request 'waypipe: fix the waypipe module' (#1037) from a-kenji-modules/waypipe/fix into main 2024-03-25 14:27:14 +00:00
a-kenji
187bebae47 waypipe: fix the waypipe module 2024-03-25 15:21:16 +01:00
clan-bot
a6f1fede97 Merge pull request 'localbackup: add regression test' (#1035) from localbackup into main 2024-03-25 13:00:14 +00:00
Mic92
e3c608c16d Merge pull request 'switch back to upstream waypipe' (#1025) from waypipe into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/1025
2024-03-25 12:54:16 +00:00
Jörg Thalheim
fee37dc1db bump nixpkgs 2024-03-25 13:36:30 +01:00
Jörg Thalheim
a886fd9b2d switch back to upstream waypipe 2024-03-25 13:34:02 +01:00
Jörg Thalheim
d291b1db63 localbackup: integrate into regression test 2024-03-25 13:26:42 +01:00
Jörg Thalheim
45212e2ba5 localbackup: add mounthooks 2024-03-25 13:26:42 +01:00
Jörg Thalheim
916e37eb26 localbackup: add pre/post exec hooks 2024-03-25 12:42:59 +01:00
Jörg Thalheim
58ae9d9cd0 localbackup: default to empty target list 2024-03-25 12:42:59 +01:00
clan-bot
62bef16092 Merge pull request 'Re-encrypt secrets after rotating users/machines keys' (#1034) from yubikey-support into main 2024-03-25 11:40:32 +00:00
Jörg Thalheim
0fa36252c2 re-encrypt secrets after rotating users/machines keys 2024-03-25 12:34:29 +01:00
Jörg Thalheim
b6d5f8a6ce docs/backup: extend documentation 2024-03-25 11:07:09 +01:00
Jörg Thalheim
cd9db02db0 add hint to use --force when a key already exists 2024-03-25 11:06:20 +01:00
130 changed files with 3871 additions and 1619 deletions

View File

@@ -10,11 +10,6 @@ jobs:
steps:
- uses: actions/checkout@v3
- run: nix run --refresh github:Mic92/nix-fast-build -- --no-nom --eval-workers 10
check-links:
runs-on: nix
steps:
- uses: actions/checkout@v3
- run: nix run --refresh --inputs-from .# nixpkgs#lychee .
checks-impure:
runs-on: nix
steps:

8
.gitignore vendored
View File

@@ -11,6 +11,7 @@ result*
/pkgs/clan-cli/clan_cli/webui/assets
nixos.qcow2
**/*.glade~
/docs/out
# python
__pycache__
@@ -20,3 +21,10 @@ __pycache__
.reports
.ruff_cache
htmlcov
# flatpak
.flatpak-builder
build
build-dir
repo
.env

21
CONTRIBUTING.md Normal file
View File

@@ -0,0 +1,21 @@
# Contributing to cLAN
## Live-reloading documentation
Enter the `docs` directory:
```shell-session
cd docs
```
Enter the development shell or enable `direnv`:
```shell-session
direnv allow
```
Run a local server:
```shell-session
mkdocs serve
```

View File

@@ -1,28 +1,45 @@
# cLAN Core Repository
Welcome to the cLAN Core Repository, the heart of the [clan.lol](https://clan.lol/) project! This monorepo houses all the essential packages, NixOS modules, CLI tools, and tests you need to contribute and work with the cLAN project.
Welcome to the cLAN Core Repository, the heart of the [clan.lol](https://clan.lol/) project! This monorepo is the foundation of Clan, a revolutionary open-source project aimed at restoring fun, freedom, and functionality to computing. Here, you'll find all the essential packages, NixOS modules, CLI tools, and tests needed to contribute to and work with the cLAN project. Clan leverages the Nix system to ensure reliability, security, and seamless management of digital environments, putting the power back into the hands of users.
## Getting Started
## Why Clan?
If you're new to cLAN and eager to dive in, start with our quickstart guide:
Our mission is simple: to democratize computing by providing tools that empower users, foster innovation, and challenge outdated paradigms. Clan represents our contribution to a future where technology serves humanity, not the other way around. By participating in Clan, you're joining a movement dedicated to creating a secure, user-empowered digital future.
- **Quickstart Guide**: Check out [quickstart.md](docs/admins/quickstart.md) to get up and running with cLAN in no time.
## Features of Clan
## Managing Secrets
- **Full-Stack System Deployment:** Utilize Clans toolkit alongside Nix's reliability to build and manage systems effortlessly.
- **Overlay Networks:** Secure, private communication channels between devices.
- **Virtual Machine Integration:** Seamless operation of VM applications within the main operating system.
- **Robust Backup Management:** Long-term, self-hosted data preservation.
- **Intuitive Secret Management:** Simplified encryption and password management processes.
Security is paramount, and cLAN provides guidelines for handling secrets effectively:
## Getting Started with cLAN
- **Secrets Management**: Learn how to manage secrets securely by reading [secrets-management.md](docs/admins/secrets-management.md).
If you're new to cLAN and eager to dive in, start with our quickstart guide and explore the core functionalities that Clan offers:
## Contributing to cLAN
- **Quickstart Guide**: Check out [getting started](https://docs.clan.lol/#starting-with-a-new-clan-project)<!-- [docs/site/index.md](docs/site/index.md) --> to get up and running with cLAN in no time.
We welcome contributions from the community, and we've prepared a comprehensive guide to help you get started:
### Managing Secrets
- **Contribution Guidelines**: Find out how to contribute and make a meaningful impact on the cLAN project by reading [contributing.md](docs/contributing/contributing.md).
In the Clan ecosystem, security is paramount. Learn how to handle secrets effectively:
Whether you're a newcomer or a seasoned developer, we look forward to your contributions and collaboration on the cLAN project. Let's build amazing things together!
- **Secrets Management**: Securely manage secrets by consulting [secrets](https://docs.clan.lol/getting-started/secrets/)<!-- [secrets.md](docs/site/getting-started/secrets.md) -->.
### Contributing to cLAN
The Clan project thrives on community contributions. We welcome everyone to contribute and collaborate:
- **Contribution Guidelines**: Make a meaningful impact by following the steps in [contributing](https://docs.clan.lol/contributing/contributing/)<!-- [contributing.md](docs/CONTRIBUTING.md) -->.
## Join the Revolution
Clan is more than a tool; it's a movement towards a better digital future. By contributing to the cLAN project, you're part of changing technology for the better, together.
### Community and Support
Connect with us and the Clan community for support and discussion:
- [Matrix channel](https://matrix.to/#/!djzOHBBBHnwQkgNgdV:matrix.org?via=blog.clan.lol) for live discussions.
- IRC bridges (coming soon) for real-time chat support.
### development environment
Setup `direnv` and `nix-direnv` and execute `dienv allow`.
To switch between different dev environments execute `select-shell`.

View File

@@ -1,17 +1,11 @@
{ self, ... }:
{
flake.clanInternals =
(self.lib.buildClan {
clanName = "testclan";
directory = ../..;
machines.test-backup = {
imports = [ self.nixosModules.test-backup ];
fileSystems."/".device = "/dev/null";
boot.loader.grub.device = "/dev/null";
};
}).clanInternals;
clan.machines.test-backup = {
imports = [ self.nixosModules.test-backup ];
fileSystems."/".device = "/dev/null";
boot.loader.grub.device = "/dev/null";
};
flake.nixosModules = {
test-backup =
{
pkgs,
@@ -30,6 +24,7 @@
{
imports = [
self.clanModules.borgbackup
self.clanModules.localbackup
self.clanModules.sshd
];
clan.networking.targetHost = "machine";
@@ -73,8 +68,7 @@
};
};
};
clanCore.secretStore = "vm";
clanCore.clanDir = ../..;
clanCore.facts.secretStore = "vm";
environment.systemPackages = [
self.packages.${pkgs.system}.clan-cli
@@ -102,6 +96,26 @@
};
clan.borgbackup.destinations.test-backup.repo = "borg@machine:.";
fileSystems."/mnt/external-disk" = {
device = "/dev/vdb"; # created in tests with virtualisation.emptyDisks
autoFormat = true;
fsType = "ext4";
options = [
"defaults"
"noauto"
];
};
clan.localbackup.targets.hdd = {
directory = "/mnt/external-disk";
preMountHook = ''
touch /run/mount-external-disk
'';
postUnmountHook = ''
touch /run/unmount-external-disk
'';
};
services.borgbackup.repos.test-backups = {
path = "/var/lib/borgbackup/test-backups";
authorizedKeys = [ (builtins.readFile ../lib/ssh/pubkey) ];
@@ -114,10 +128,13 @@
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux) {
test-backups = (import ../lib/test-base.nix) {
name = "test-backups";
nodes.machine.imports = [
self.nixosModules.clanCore
self.nixosModules.test-backup
];
nodes.machine = {
imports = [
self.nixosModules.clanCore
self.nixosModules.test-backup
];
virtualisation.emptyDiskImages = [ 256 ];
};
testScript = ''
import json
@@ -130,16 +147,27 @@
# create
machine.succeed("clan --debug --flake ${self} backups create test-backup")
machine.wait_until_succeeds("! systemctl is-active borgbackup-job-test-backup >&2")
machine.succeed("test -f /run/mount-external-disk")
machine.succeed("test -f /run/unmount-external-disk")
# list
backup_id = json.loads(machine.succeed("borg-job-test-backup list --json"))["archives"][0]["archive"]
out = machine.succeed("clan --debug --flake ${self} backups list test-backup").strip()
print(out)
assert backup_id in out, f"backup {backup_id} not found in {out}"
localbackup_id = "hdd::/mnt/external-disk/snapshot.0"
assert localbackup_id in out, "localbackup not found in {out}"
# restore
## borgbackup restore
machine.succeed("rm -f /var/test-backups/somefile")
machine.succeed(f"clan --debug --flake ${self} backups restore test-backup borgbackup {out} >&2")
machine.succeed(f"clan --debug --flake ${self} backups restore test-backup borgbackup 'test-backup::borg@machine:.::{backup_id}' >&2")
assert machine.succeed("cat /var/test-backups/somefile").strip() == "testing", "restore failed"
machine.succeed("test -f /var/test-service/pre-restore-command")
machine.succeed("test -f /var/test-service/post-restore-command")
## localbackup restore
machine.succeed("rm -f /var/test-backups/somefile /var/test-service/{pre,post}-restore-command")
machine.succeed(f"clan --debug --flake ${self} backups restore test-backup localbackup '{localbackup_id}' >&2")
assert machine.succeed("cat /var/test-backups/somefile").strip() == "testing", "restore failed"
machine.succeed("test -f /var/test-service/pre-restore-command")
machine.succeed("test -f /var/test-service/post-restore-command")

View File

@@ -36,7 +36,7 @@
};
};
};
clanCore.secretStore = "vm";
clanCore.facts.secretStore = "vm";
clan.borgbackup.destinations.test.repo = "borg@localhost:.";
}

View File

@@ -16,6 +16,19 @@
{
checks =
let
# ensure all options can be rendered after importing clan into nixos
renderClanOptions =
let
docs = pkgs.nixosOptionsDoc {
options =
(pkgs.nixos {
imports = [ self.nixosModules.clanCore ];
clanCore.clanDir = ./.;
}).options;
warningsAreErrors = false;
};
in
docs.optionsJSON;
nixosTestArgs = {
# reference to nixpkgs for the current system
inherit pkgs;
@@ -45,7 +58,7 @@
self'.legacyPackages.homeConfigurations or { }
);
in
nixosTests // schemaTests // flakeOutputs;
{ inherit renderClanOptions; } // nixosTests // schemaTests // flakeOutputs;
legacyPackages = {
nixosTests =
let

View File

@@ -1,21 +1,12 @@
{ self, ... }:
let
clan = self.lib.buildClan {
clanName = "testclan";
directory = ../..;
machines = {
test_install_machine = {
clan.networking.targetHost = "test_install_machine";
imports = [ self.nixosModules.test_install_machine ];
};
};
};
in
{ self, lib, ... }:
{
flake.nixosConfigurations = {
inherit (clan.nixosConfigurations) test_install_machine;
clan.machines.test_install_machine = {
clan.networking.targetHost = "test_install_machine";
fileSystems."/".device = lib.mkDefault "/dev/null";
boot.loader.grub.device = lib.mkDefault "/dev/null";
imports = [ self.nixosModules.test_install_machine ];
};
flake.clanInternals = clan.clanInternals;
flake.nixosModules = {
test_install_machine =
{ lib, modulesPath, ... }:
@@ -43,10 +34,10 @@ in
let
dependencies = [
self
self.nixosConfigurations.test_install_machine.config.system.build.toplevel
self.nixosConfigurations.test_install_machine.config.system.build.diskoScript
self.nixosConfigurations.test_install_machine.config.system.clan.deployment.file
pkgs.stdenv.drvPath
clan.clanInternals.machines.x86_64-linux.test_install_machine.config.system.build.toplevel
clan.clanInternals.machines.x86_64-linux.test_install_machine.config.system.build.diskoScript
clan.clanInternals.machines.x86_64-linux.test_install_machine.config.system.clan.deployment.file
pkgs.nixos-anywhere
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };

View File

@@ -19,7 +19,7 @@
}
{
# secret override
clanCore.secrets.matrix-synapse.secrets.synapse-registration_shared_secret.path = "${./synapse-registration_shared_secret}";
clanCore.facts.services.matrix-synapse.secret.synapse-registration_shared_secret.path = "${./synapse-registration_shared_secret}";
services.nginx.virtualHosts."matrix.clan.test" = {
enableACME = lib.mkForce false;
forceSSL = lib.mkForce false;

View File

@@ -19,7 +19,7 @@
"syncthing.key".source = ./introducer/introducer_test_key;
"syncthing.api".source = ./introducer/introducer_test_api;
};
clanCore.secrets.syncthing.secrets."syncthing.api".path = "/etc/syncthing.api";
clanCore.facts.services.syncthing.secret."syncthing.api".path = "/etc/syncthing.api";
services.syncthing.cert = "/etc/syncthing.pam";
services.syncthing.key = "/etc/syncthing.key";
# Doesn't test zerotier!

View File

@@ -26,8 +26,9 @@ in
rsh = lib.mkOption {
type = lib.types.str;
default = "ssh -i ${
config.clanCore.secrets.borgbackup.secrets."borgbackup.ssh".path
config.clanCore.facts.services.borgbackup.secret."borgbackup.ssh".path
} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null";
defaultText = "ssh -i \${config.clanCore.facts.services.borgbackup.secret.\"borgbackup.ssh\".path} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null";
description = "the rsh to use for the backup";
};
};
@@ -63,7 +64,7 @@ in
encryption = {
mode = "repokey";
passCommand = "cat ${config.clanCore.secrets.borgbackup.secrets."borgbackup.repokey".path}";
passCommand = "cat ${config.clanCore.facts.services.borgbackup.secret."borgbackup.repokey".path}";
};
prune.keep = {
@@ -74,10 +75,10 @@ in
};
}) cfg.destinations;
clanCore.secrets.borgbackup = {
facts."borgbackup.ssh.pub" = { };
secrets."borgbackup.ssh" = { };
secrets."borgbackup.repokey" = { };
clanCore.facts.services.borgbackup = {
public."borgbackup.ssh.pub" = { };
secret."borgbackup.ssh" = { };
secret."borgbackup.repokey" = { };
generator.path = [
pkgs.openssh
pkgs.coreutils

View File

@@ -0,0 +1,15 @@
Email-based instant messaging for Desktop.
!!! warning "Under construction"
!!! info
This module will automatically configure an email server on the machine for handling the e-mail messaging seamlessly.
## Features
- [x] **Email-based**: Uses any email account as its backend.
- [x] **End-to-End Encryption**: Supports Autocrypt to automatically encrypt messages.
- [x] **No Phone Number Required**: Uses your email address instead of a phone number.
- [x] **Cross-Platform**: Available on desktop and mobile platforms.
- [x] **Automatic Server Setup**: Includes your own DeltaChat server for enhanced control and privacy.
- [ ] **Bake a cake**: This module cannot cake a bake.

View File

@@ -6,33 +6,37 @@
example = "/dev/disk/by-id/ata-Samsung_SSD_850_EVO_250GB_S21PNXAGB12345";
};
};
config.disko.devices = {
disk = {
main = {
type = "disk";
device = config.clan.diskLayouts.singleDiskExt4.device;
content = {
type = "gpt";
partitions = {
boot = {
size = "1M";
type = "EF02"; # for grub MBR
};
ESP = {
size = "512M";
type = "EF00";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
config = {
boot.loader.grub.efiSupport = lib.mkDefault true;
boot.loader.grub.efiInstallAsRemovable = lib.mkDefault true;
disko.devices = {
disk = {
main = {
type = "disk";
device = config.clan.diskLayouts.singleDiskExt4.device;
content = {
type = "gpt";
partitions = {
boot = {
size = "1M";
type = "EF02"; # for grub MBR
};
};
root = {
size = "100%";
content = {
type = "filesystem";
format = "ext4";
mountpoint = "/";
ESP = {
size = "512M";
type = "EF00";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
};
};
root = {
size = "100%";
content = {
type = "filesystem";
format = "ext4";
mountpoint = "/";
};
};
};
};

14
clanModules/ergochat.nix Normal file
View File

@@ -0,0 +1,14 @@
_: {
services.ergochat = {
enable = true;
settings = {
datastore = {
autoupgrade = true;
path = "/var/lib/ergo/ircd.db";
};
};
};
clanCore.state.ergochat.folders = [ "/var/lib/ergo" ];
}

View File

@@ -8,19 +8,22 @@
];
};
borgbackup = ./borgbackup.nix;
ergochat = ./ergochat.nix;
deltachat = ./deltachat;
graphical = ./graphical.nix;
localbackup = ./localbackup.nix;
deltachat = ./deltachat.nix;
localsend = ./localsend.nix;
matrix-synapse = ./matrix-synapse.nix;
moonlight = ./moonlight.nix;
sunshine = ./sunshine.nix;
syncthing = ./syncthing.nix;
sshd = ./sshd.nix;
sunshine = ./sunshine.nix;
syncthing = ./syncthing;
root-password = ./root-password;
thelounge = ./thelounge.nix;
vm-user = ./vm-user.nix;
graphical = ./graphical.nix;
waypipe = ./waypipe.nix;
xfce = ./xfce.nix;
xfce-vm = ./xfce-vm.nix;
zt-tcp-relay = ./zt-tcp-relay.nix;
localsend = ./localsend.nix;
waypipe = ./waypipe.nix;
};
}

View File

@@ -8,7 +8,7 @@ let
cfg = config.clan.localbackup;
rsnapshotConfig = target: states: ''
config_version 1.2
snapshot_root ${target}
snapshot_root ${target.directory}
sync_first 1
cmd_cp ${pkgs.coreutils}/bin/cp
cmd_rm ${pkgs.coreutils}/bin/rm
@@ -17,6 +17,19 @@ let
cmd_logger ${pkgs.inetutils}/bin/logger
cmd_du ${pkgs.coreutils}/bin/du
cmd_rsnapshot_diff ${pkgs.rsnapshot}/bin/rsnapshot-diff
${lib.optionalString (target.preBackupHook != null) ''
cmd_preexec ${pkgs.writeShellScript "preexec.sh" ''
set -efu -o pipefail
${target.preBackupHook}
''}
''}
${lib.optionalString (target.postBackupHook != null) ''
cmd_postexec ${pkgs.writeShellScript "postexec.sh" ''
set -efu -o pipefail
${target.postBackupHook}
''}
''}
retain snapshot ${builtins.toString config.clan.localbackup.snapshots}
${lib.concatMapStringsSep "\n" (state: ''
${lib.concatMapStringsSep "\n" (folder: ''
@@ -34,7 +47,7 @@ in
{
options = {
name = lib.mkOption {
type = lib.types.str;
type = lib.types.strMatching "^[a-zA-Z0-9._-]+$";
default = name;
description = "the name of the backup job";
};
@@ -43,14 +56,45 @@ in
description = "the directory to backup";
};
mountpoint = lib.mkOption {
type = lib.types.nullOr (lib.types.strMatching "^[a-zA-Z0-9./_-]+$");
type = lib.types.nullOr lib.types.str;
default = null;
description = "mountpoint of the directory to backup. If set, the directory will be mounted before the backup and unmounted afterwards";
};
preMountHook = lib.mkOption {
type = lib.types.nullOr lib.types.lines;
default = null;
description = "Shell commands to run before the directory is mounted";
};
postMountHook = lib.mkOption {
type = lib.types.nullOr lib.types.lines;
default = null;
description = "Shell commands to run after the directory is mounted";
};
preUnmountHook = lib.mkOption {
type = lib.types.nullOr lib.types.lines;
default = null;
description = "Shell commands to run before the directory is unmounted";
};
postUnmountHook = lib.mkOption {
type = lib.types.nullOr lib.types.lines;
default = null;
description = "Shell commands to run after the directory is unmounted";
};
preBackupHook = lib.mkOption {
type = lib.types.nullOr lib.types.lines;
default = null;
description = "Shell commands to run before the backup";
};
postBackupHook = lib.mkOption {
type = lib.types.nullOr lib.types.lines;
default = null;
description = "Shell commands to run after the backup";
};
};
}
)
);
default = { };
description = "List of directories where backups are stored";
};
@@ -63,83 +107,111 @@ in
config =
let
setupMount =
mountpoint:
lib.optionalString (mountpoint != null) ''
mkdir -p ${lib.escapeShellArg mountpoint}
if mountpoint -q ${lib.escapeShellArg mountpoint}; then
umount ${lib.escapeShellArg mountpoint}
fi
mount ${lib.escapeShellArg mountpoint}
trap "umount ${lib.escapeShellArg mountpoint}" EXIT
'';
mountHook = target: ''
if [[ -x /run/current-system/sw/bin/localbackup-mount-${target.name} ]]; then
/run/current-system/sw/bin/localbackup-mount-${target.name}
fi
if [[ -x /run/current-system/sw/bin/localbackup-unmount-${target.name} ]]; then
trap "/run/current-system/sw/bin/localbackup-unmount-${target.name}" EXIT
fi
'';
in
lib.mkIf (cfg.targets != [ ]) {
environment.systemPackages = [
(pkgs.writeShellScriptBin "localbackup-create" ''
set -efu -o pipefail
export PATH=${
lib.makeBinPath [
pkgs.rsnapshot
pkgs.coreutils
pkgs.util-linux
]
}
${lib.concatMapStringsSep "\n" (target: ''
(
echo "Creating backup '${target.name}'"
${setupMount target.mountpoint}
rsnapshot -c "${pkgs.writeText "rsnapshot.conf" (rsnapshotConfig target.directory (lib.attrValues config.clanCore.state))}" sync
rsnapshot -c "${pkgs.writeText "rsnapshot.conf" (rsnapshotConfig target.directory (lib.attrValues config.clanCore.state))}" snapshot
)
'') (builtins.attrValues cfg.targets)}
'')
(pkgs.writeShellScriptBin "localbackup-list" ''
set -efu -o pipefail
export PATH=${
lib.makeBinPath [
pkgs.jq
pkgs.findutils
pkgs.coreutils
pkgs.util-linux
]
}
(${
lib.concatMapStringsSep "\n" (target: ''
lib.mkIf (cfg.targets != { }) {
environment.systemPackages =
[
(pkgs.writeShellScriptBin "localbackup-create" ''
set -efu -o pipefail
export PATH=${
lib.makeBinPath [
pkgs.rsnapshot
pkgs.coreutils
pkgs.util-linux
]
}
${lib.concatMapStringsSep "\n" (target: ''
(
${setupMount target.mountpoint}
find ${lib.escapeShellArg target.directory} -mindepth 1 -maxdepth 1 -name "snapshot.*" -print0 -type d \
| jq -Rs 'split("\u0000") | .[] | select(. != "") | { "name": ("${target.mountpoint}::" + .)}'
${mountHook target}
echo "Creating backup '${target.name}'"
rsnapshot -c "${pkgs.writeText "rsnapshot.conf" (rsnapshotConfig target (lib.attrValues config.clanCore.state))}" sync
rsnapshot -c "${pkgs.writeText "rsnapshot.conf" (rsnapshotConfig target (lib.attrValues config.clanCore.state))}" snapshot
)
'') (builtins.attrValues cfg.targets)
}) | jq -s .
'')
(pkgs.writeShellScriptBin "localbackup-restore" ''
set -efu -o pipefail
export PATH=${
lib.makeBinPath [
pkgs.rsync
pkgs.coreutils
pkgs.util-linux
pkgs.gawk
]
}
mountpoint=$(awk -F'::' '{print $1}' <<< $NAME)
backupname=''${NAME#$mountpoint::}
'') (builtins.attrValues cfg.targets)}
'')
(pkgs.writeShellScriptBin "localbackup-list" ''
set -efu -o pipefail
export PATH=${
lib.makeBinPath [
pkgs.jq
pkgs.findutils
pkgs.coreutils
pkgs.util-linux
]
}
(${
lib.concatMapStringsSep "\n" (target: ''
(
${mountHook target}
find ${lib.escapeShellArg target.directory} -mindepth 1 -maxdepth 1 -name "snapshot.*" -print0 -type d \
| jq -Rs 'split("\u0000") | .[] | select(. != "") | { "name": ("${target.name}::" + .)}'
)
'') (builtins.attrValues cfg.targets)
}) | jq -s .
'')
(pkgs.writeShellScriptBin "localbackup-restore" ''
set -efu -o pipefail
export PATH=${
lib.makeBinPath [
pkgs.rsync
pkgs.coreutils
pkgs.util-linux
pkgs.gawk
]
}
name=$(awk -F'::' '{print $1}' <<< $NAME)
backupname=''${NAME#$name::}
mkdir -p "$mountpoint"
if mountpoint -q "$mountpoint"; then
umount "$mountpoint"
fi
mount "$mountpoint"
trap "umount $mountpoint" EXIT
if command -v localbackup-mount-$name; then
localbackup-mount-$name
fi
if command -v localbackup-unmount-$name; then
trap "localbackup-unmount-$name" EXIT
fi
IFS=';' read -ra FOLDER <<< "$FOLDERS"
for folder in "''${FOLDER[@]}"; do
rsync -a "$backupname/${config.networking.hostName}$folder/" "$folder"
done
'')
];
if [[ ! -d $backupname ]]; then
echo "No backup found $backupname"
exit 1
fi
IFS=';' read -ra FOLDER <<< "$FOLDERS"
for folder in "''${FOLDER[@]}"; do
rsync -a "$backupname/${config.networking.hostName}$folder/" "$folder"
done
'')
]
++ (lib.mapAttrsToList (
name: target:
pkgs.writeShellScriptBin ("localbackup-mount-" + name) ''
set -efu -o pipefail
${lib.optionalString (target.preMountHook != null) target.preMountHook}
${lib.optionalString (target.mountpoint != null) ''
if ! ${pkgs.util-linux}/bin/mountpoint -q ${lib.escapeShellArg target.mountpoint}; then
${pkgs.util-linux}/bin/mount -o X-mount.mkdir ${lib.escapeShellArg target.mountpoint}
fi
''}
${lib.optionalString (target.postMountHook != null) target.postMountHook}
''
) cfg.targets)
++ lib.mapAttrsToList (
name: target:
pkgs.writeShellScriptBin ("localbackup-unmount-" + name) ''
set -efu -o pipefail
${lib.optionalString (target.preUnmountHook != null) target.preUnmountHook}
${lib.optionalString (
target.mountpoint != null
) "${pkgs.util-linux}/bin/umount ${lib.escapeShellArg target.mountpoint}"}
${lib.optionalString (target.postUnmountHook != null) target.postUnmountHook}
''
) cfg.targets;
clanCore.backups.providers.localbackup = {
# TODO list needs to run locally or on the remote machine

View File

@@ -17,28 +17,22 @@
package = lib.mkPackageOption pkgs "localsend" { };
};
imports =
if config.clan.localsend.enable then
[
{
clanCore.state.localsend.folders = [
"/var/localsend"
config.clan.localsend.defaultLocation
];
environment.systemPackages = [ config.clan.localsend.package ];
config = lib.mkIf config.clan.localsend.enable {
clanCore.state.localsend.folders = [
"/var/localsend"
config.clan.localsend.defaultLocation
];
environment.systemPackages = [ config.clan.localsend.package ];
networking.firewall.interfaces."zt+".allowedTCPPorts = [ 53317 ];
networking.firewall.interfaces."zt+".allowedUDPPorts = [ 53317 ];
networking.firewall.interfaces."zt+".allowedTCPPorts = [ 53317 ];
networking.firewall.interfaces."zt+".allowedUDPPorts = [ 53317 ];
#TODO: This is currently needed because there is no ipv6 multicasting support yet
#
systemd.network.networks."09-zerotier" = {
networkConfig = {
Address = "192.168.56.2/24";
};
};
}
]
else
[ ];
#TODO: This is currently needed because there is no ipv6 multicasting support yet
#
systemd.network.networks."09-zerotier" = {
networkConfig = {
Address = "192.168.56.2/24";
};
};
};
}

View File

@@ -54,14 +54,14 @@ in
systemd.services.matrix-synapse.serviceConfig.ExecStartPre = [
"+${pkgs.writeScript "copy_registration_shared_secret" ''
#!/bin/sh
cp ${config.clanCore.secrets.matrix-synapse.secrets.synapse-registration_shared_secret.path} /var/lib/matrix-synapse/registration_shared_secret.yaml
cp ${config.clanCore.facts.services.matrix-synapse.secret.synapse-registration_shared_secret.path} /var/lib/matrix-synapse/registration_shared_secret.yaml
chown matrix-synapse:matrix-synapse /var/lib/matrix-synapse/registration_shared_secret.yaml
chmod 600 /var/lib/matrix-synapse/registration_shared_secret.yaml
''}"
];
clanCore.secrets."matrix-synapse" = {
secrets."synapse-registration_shared_secret" = { };
clanCore.facts.services."matrix-synapse" = {
secret."synapse-registration_shared_secret" = { };
generator.path = with pkgs; [
coreutils
pwgen

View File

@@ -13,10 +13,10 @@ in
systemd.tmpfiles.rules = [
"d '/var/lib/moonlight' 0770 'user' 'users' - -"
"C '/var/lib/moonlight/moonlight.cert' 0644 'user' 'users' - ${
config.clanCore.secrets.moonlight.secrets."moonlight.cert".path or ""
config.clanCore.facts.services.moonlight.secret."moonlight.cert".path or ""
}"
"C '/var/lib/moonlight/moonlight.key' 0644 'user' 'users' - ${
config.clanCore.secrets.moonlight.secrets."moonlight.key".path or ""
config.clanCore.facts.services.moonlight.secret."moonlight.key".path or ""
}"
];
@@ -45,7 +45,7 @@ in
systemd.user.services.moonlight-join = {
description = "Join sunshine hosts";
script = ''${ms-accept}/bin/moonlight-sunshine-accept moonlight join --port ${builtins.toString defaultPort} --cert '${
config.clanCore.secrets.moonlight.facts."moonlight.cert".value or ""
config.clanCore.facts.services.moonlight.public."moonlight.cert".value or ""
}' --host fd2e:25da:6035:c98f:cd99:93e0:b9b8:9ca1'';
serviceConfig = {
Type = "oneshot";
@@ -68,10 +68,10 @@ in
};
};
clanCore.secrets.moonlight = {
secrets."moonlight.key" = { };
secrets."moonlight.cert" = { };
facts."moonlight.cert" = { };
clanCore.facts.services.moonlight = {
secret."moonlight.key" = { };
secret."moonlight.cert" = { };
public."moonlight.cert" = { };
generator.path = [
pkgs.coreutils
ms-accept

View File

@@ -0,0 +1,13 @@
Creates a root-password
!!! tip "This module sets the password for the root user (automatically)."
After the system was installed/deployed the following command can be used to display the root-password:
```bash
clan secrets get {machine_name}-password
```
---
See also: [Facts / Secrets](../../getting-started/secrets.md)

View File

@@ -0,0 +1,20 @@
{ pkgs, config, ... }:
{
users.mutableUsers = false;
users.users.root.hashedPasswordFile =
config.clanCore.facts.services.root-password.secret.password-hash.path;
sops.secrets."${config.clanCore.machineName}-password-hash".neededForUsers = true;
clanCore.facts.services.root-password = {
secret.password = { };
secret.password-hash = { };
generator.path = with pkgs; [
coreutils
xkcdpass
mkpasswd
];
generator.script = ''
xkcdpass --numwords 3 --delimiter - --count 1 > $secrets/password
cat $secrets/password | mkpasswd -s -m sha-512 > $secrets/password-hash
'';
};
}

View File

@@ -4,14 +4,14 @@
services.openssh.hostKeys = [
{
path = config.clanCore.secrets.openssh.secrets."ssh.id_ed25519".path;
path = config.clanCore.facts.services.openssh.secret."ssh.id_ed25519".path;
type = "ed25519";
}
];
clanCore.secrets.openssh = {
secrets."ssh.id_ed25519" = { };
facts."ssh.id_ed25519.pub" = { };
clanCore.facts.services.openssh = {
secret."ssh.id_ed25519" = { };
public."ssh.id_ed25519.pub" = { };
generator.path = [
pkgs.coreutils
pkgs.openssh

View File

@@ -97,10 +97,10 @@ in
systemd.tmpfiles.rules = [
"d '/var/lib/sunshine' 0770 'user' 'users' - -"
"C '/var/lib/sunshine/sunshine.cert' 0644 'user' 'users' - ${
config.clanCore.secrets.sunshine.secrets."sunshine.cert".path or ""
config.clanCore.facts.services.sunshine.secret."sunshine.cert".path or ""
}"
"C '/var/lib/sunshine/sunshine.key' 0644 'user' 'users' - ${
config.clanCore.secrets.sunshine.secrets."sunshine.key".path or ""
config.clanCore.facts.services.sunshine.secret."sunshine.key".path or ""
}"
];
@@ -117,8 +117,8 @@ in
RestartSec = "5s";
ReadWritePaths = [ "/var/lib/sunshine" ];
ReadOnlyPaths = [
(config.clanCore.secrets.sunshine.secrets."sunshine.key".path or "")
(config.clanCore.secrets.sunshine.secrets."sunshine.cert".path or "")
(config.clanCore.facts.services.sunshine.secret."sunshine.key".path or "")
(config.clanCore.facts.services.sunshine.secret."sunshine.cert".path or "")
];
};
wantedBy = [ "graphical-session.target" ];
@@ -137,7 +137,7 @@ in
startLimitIntervalSec = 500;
script = ''
${ms-accept}/bin/moonlight-sunshine-accept sunshine init-state --uuid ${
config.clanCore.secrets.sunshine.facts.sunshine-uuid.value or null
config.clanCore.facts.services.sunshine.public.sunshine-uuid.value or null
} --state-file /var/lib/sunshine/state.json
'';
serviceConfig = {
@@ -173,9 +173,9 @@ in
startLimitIntervalSec = 500;
script = ''
${ms-accept}/bin/moonlight-sunshine-accept sunshine listen --port ${builtins.toString listenPort} --uuid ${
config.clanCore.secrets.sunshine.facts.sunshine-uuid.value or null
config.clanCore.facts.services.sunshine.public.sunshine-uuid.value or null
} --state /var/lib/sunshine/state.json --cert '${
config.clanCore.secrets.sunshine.facts."sunshine.cert".value or null
config.clanCore.facts.services.sunshine.public."sunshine.cert".value or null
}'
'';
serviceConfig = {
@@ -187,11 +187,11 @@ in
wantedBy = [ "graphical-session.target" ];
};
clanCore.secrets.sunshine = {
secrets."sunshine.key" = { };
secrets."sunshine.cert" = { };
facts."sunshine-uuid" = { };
facts."sunshine.cert" = { };
clanCore.facts.services.ergochat = {
secret."sunshine.key" = { };
secret."sunshine.cert" = { };
public."sunshine-uuid" = { };
public."sunshine.cert" = { };
generator.path = [
pkgs.coreutils
ms-accept

View File

@@ -0,0 +1,34 @@
Syncthing is a free, open-source file synchronization application designed to allow users to synchronize files between multiple devices over the internet or local networks securely and privately.
It is an alternative to cloud-based file sharing services.
## Usage
We recommend configuring this module as an sync-service through the provided options. Although it provides a Web GUI through which more usage scenarios are supported.
## Features
- **Private and Secure**: Syncthing uses TLS encryption to secure data transfer between devices, ensuring that only the intended devices can read your data.
- **Decentralized**: No central server is involved in the data transfer. Each device communicates directly with others.
- **Open Source**: The source code is openly available for audit and contribution, fostering trust and continuous improvement.
- **Cross-Platform**: Syncthing supports multiple platforms including Windows, macOS, Linux, BSD, and Android.
- **Real-time Synchronization**: Changes made to files are synchronized in real-time across all connected devices.
- **Web GUI**: It includes a user-friendly web interface for managing devices and configurations. (`127.0.0.1:8384`)
## Configuration
- **Share Folders**: Select folders to share with connected devices and configure permissions and synchronization parameters.
!!! info
Clan automatically discovers other devices. Automatic discovery requires one machine to be an [introducer](#clan.syncthing.introducer)
If that is not the case you can add the other device by its Device ID manually.
You can find and share Device IDs under the "Add Device" button in the Web GUI. (`127.0.0.1:8384`)
## Troubleshooting
- **Sync Conflicts**: Resolve synchronization conflicts manually by reviewing file versions and modification times in the Web GUI (`127.0.0.1:8384`).
## Support
- **Documentation**: Extensive documentation is available on the [Syncthing website](https://docs.syncthing.net/).

View File

@@ -9,7 +9,8 @@
id = lib.mkOption {
type = lib.types.nullOr lib.types.str;
example = "BABNJY4-G2ICDLF-QQEG7DD-N3OBNGF-BCCOFK6-MV3K7QJ-2WUZHXS-7DTW4AS";
default = config.clanCore.secrets.syncthing.facts."syncthing.pub".value or null;
default = config.clanCore.facts.services.syncthing.public."syncthing.pub".value or null;
defaultText = "config.clanCore.facts.services.syncthing.public.\"syncthing.pub\".value";
};
introducer = lib.mkOption {
description = ''
@@ -63,8 +64,10 @@
}
];
# Activates inofify compatibility on syncthing
boot.kernel.sysctl."fs.inotify.max_user_watches" = lib.mkDefault 524288;
# Activates inotify compatibility on syncthing
# use mkOverride 900 here as it otherwise would collide with the default of the
# upstream nixos xserver.nix
boot.kernel.sysctl."fs.inotify.max_user_watches" = lib.mkOverride 900 524288;
services.syncthing = {
enable = true;
@@ -112,7 +115,7 @@
getPendingDevices = "/rest/cluster/pending/devices";
postNewDevice = "/rest/config/devices";
SharedFolderById = "/rest/config/folders/";
apiKey = config.clanCore.secrets.syncthing.secrets."syncthing.api".path or null;
apiKey = config.clanCore.facts.services.syncthing.secret."syncthing.api".path or null;
in
lib.mkIf config.clan.syncthing.autoAcceptDevices {
description = "Syncthing auto accept devices";
@@ -154,7 +157,7 @@
systemd.services.syncthing-init-api-key =
let
apiKey = config.clanCore.secrets.syncthing.secrets."syncthing.api".path or null;
apiKey = config.clanCore.facts.services.syncthing.secret."syncthing.api".path or null;
in
lib.mkIf config.clan.syncthing.autoAcceptDevices {
description = "Set the api key";
@@ -176,11 +179,11 @@
};
};
clanCore.secrets.syncthing = {
secrets."syncthing.key" = { };
secrets."syncthing.cert" = { };
secrets."syncthing.api" = { };
facts."syncthing.pub" = { };
clanCore.facts.services.syncthing = {
secret."syncthing.key" = { };
secret."syncthing.cert" = { };
secret."syncthing.api" = { };
public."syncthing.pub" = { };
generator.path = [
pkgs.coreutils
pkgs.gnugrep

15
clanModules/thelounge.nix Normal file
View File

@@ -0,0 +1,15 @@
_: {
services.thelounge = {
enable = true;
public = true;
extraConfig = {
prefetch = true;
defaults = {
port = 6667;
tls = false;
};
};
};
clanCore.state.thelounde.folders = [ "/var/lib/thelounge" ];
}

View File

@@ -68,7 +68,7 @@
SDL_VIDEODRIVER=wayland
'';
script = ''
${lib.getExe config.clanCore.clanPkgs.waypipe} \
${lib.getExe pkgs.waypipe} \
${lib.escapeShellArgs config.clan.services.waypipe.flags} \
${lib.escapeShellArgs config.clan.services.waypipe.command}
'';

6
docs/.envrc Normal file
View File

@@ -0,0 +1,6 @@
source_up
watch_file $(find ./nix -name "*.nix" -printf '"%p" ')
# Because we depend on nixpkgs sources, uploading to builders takes a long time
use flake .#docs --builders ''

1
docs/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/site/reference

View File

@@ -153,3 +153,9 @@ If you need to inspect the Nix sandbox while running tests, follow these steps:
cntr exec -w your_sandbox_name
psgrep -a -x your_python_process_name
```
# Standards
Every new module name should be in kebab-case.
Every fact definition, where possible should be in kebab-case.

View File

@@ -1,10 +0,0 @@
+++
title = "Admin Documentation"
description = "Documentation administrators creating or managing cLANs"
date = 2025-05-01T19:00:00+00:00
updated = 2021-05-01T19:00:00+00:00
template = "docs/section.html"
weight = 15
sort_by = "title"
draft = false
+++

View File

@@ -1,79 +0,0 @@
# Backups
When self-hosting services, it's important to have a robust backup and restore strategy.
Therefore clan comes with a backup integration based on [borgbackup](https://www.borgbackup.org/).
More backup backends may come in future as clan provides an interchangeable interface on top of the backup implementation.
# Getting started with borgbackup
Borgbackup consists of two components a backup repository that can be hosted on one machine and contains the backup
and a backup client that will push it's data to the backup repository.
## Borgbackup client
First you need to specify the remote server to backup to. Replace `hostname` with a reachable dns or ip address.
```nix
{
clan.borgbackup.destinations = {
myhostname = {
repo = "borg@hostname:/var/lib/borgbackup/myhostname";
};
};
}
```
Services in clan can specify custom folders that need a backup by setting `clanCore.state.<service>.folders` option.
As a user you can also append to the list by adding your own directories to be backed up i.e.:
```nix
{ clanCore.state.userdata.folders = [ "/home" "/root" ]; }
```
Than run `clan secrets generate <yourmachine>` replacing `<yourmachine>` with the actual machine name.
This will generate the backup borg credentials and ssh keys for accessing the borgbackup repository.
Your ssh public key will be stored in the root of the repository here at this location `./machines/<yourmachine>/facts/borgbackup.ssh.pub`.
We need this for the next step.
## Borgbackup repository
In the next step we are going to set up the backup server.
Choose here a machine with sufficient disk space.
The machine needs to have the ssh daemon enabled as it is used in borgbackup for accessing the backup repository.
Add the following configuration to your backup server:
```nix
{
openssh.services.enable = true;
services.borgbackup.repos = {
myhostname = {
path = "/var/lib/borgbackup/myhostname";
authorizedKeys = [
(builtins.readFile ./machines/myhostname/facts/borgbackup.ssh.pub)
];
};
};
}
```
Replace `myhostname` with the name of the machine you want to backup. The path to the public key needs to be relative to the
configuration file, so you may have to adapt it if the configuration is not in the root directory of your clan flake.
Afterwards run `clan machines update` to update both the borgbackup server and the borgbackup client.
By default the backup is scheduled every night at 01:00 midnight. If machines are not online around this time,
they will attempt to run the backup once they come back.
When the next backup is scheduled, can be inspected like this on the device:
```
$ systemctl list-timers | grep -E 'NEXT|borg'
NEXT LEFT LAST PASSED UNIT ACTIVATES
Thu 2024-03-14 01:00:00 CET 17h Wed 2024-03-13 01:00:00 CET 6h ago borgbackup-job-myhostname.timer borgbackup-job-myhostname.service
```
```
```

View File

@@ -1,69 +0,0 @@
# cLAN config
`clan config` allows you to manage your nixos configuration via the terminal.
Similar as how `git config` reads and sets git options, `clan config` does the same with your nixos options
It also supports auto completion making it easy to find the right options.
## Set up clan-config
Add the clan tool to your flake inputs:
```
clan.url = "git+https://git.clan.lol/clan/clan-core";
```
and inside the mkFlake:
```
imports = [
inputs.clan.flakeModules.clan-config
];
```
Add an empty config file and add it to git
```command
echo "{}" > ./clan-settings.json
git add ./clan-settings.json
```
Import the clan-config module into your nixos configuration:
```nix
{
imports = [
# clan-settings.json is located in the same directory as your flake.
# Adapt the path if necessary.
(builtins.fromJSON (builtins.readFile ./clan-settings.json))
];
}
```
Make sure your nixos configuration is set a default
```nix
{self, ...}: {
flake.nixosConfigurations.default = self.nixosConfigurations.my-machine;
}
```
Use all inputs provided by the clan-config devShell in your own devShell:
```nix
{ ... }: {
perSystem = { pkgs, self', ... }: {
devShells.default = pkgs.mkShell {
inputsFrom = [ self'.devShells.clan-config ];
# ...
};
};
}
```
re-load your dev-shell to make the clan tool available.
```command
clan config --help
```

View File

@@ -1,138 +0,0 @@
# Managing NixOS Machines
## Add Your First Machine
To start managing a new machine, use the following commands to create and then list your machines:
```shellSession
$ clan machines create my-machine
$ clan machines list
my-machine
```
## Configure Your Machine
In the example below, we demonstrate how to add a new user named `my-user` and set a password. This user will be configured to log in to the machine `my-machine`.
### Creating a New User
```shellSession
# Add a new user
$ clan config --machine my-machine users.users.my-user.isNormalUser true
# Set a password for the user
$ clan config --machine my-machine users.users.my-user.hashedPassword $(mkpasswd)
```
_Note: The `$(mkpasswd)` command generates a hashed password. Ensure you have the `mkpasswd` utility installed or use an alternative method to generate a secure hashed password._
## Test Your Machine Configuration Inside a VM
Before deploying your configuration to a live environment, you can run a virtual machine (VM) to test the settings:
```shellSession
$ clan vms run my-machine
```
This command run a VM based on the configuration of `my-machine`, allowing you to verify changes in a controlled environment.
## Installing a New Machine
Clan CLI, in conjunction with [nixos-anywhere](https://github.com/nix-community/nixos-anywhere), provides a seamless method for installing NixOS on various machines.
This process involves preparing a suitable hardware and disk partitioning configuration and ensuring the target machine is accessible via SSH.
### Prerequisites
- A running Linux system with SSH on the target machine is required. This is typically pre-configured for many server providers.
- For installations on physical hardware, create a NixOS installer image and transfer it to a bootable USB drive as described below.
## Creating a Bootable USB Drive on Linux
To create a bootable USB flash drive with the NixOS installer:
1. **Build the Installer Image**:
```shellSession
$ nix build git+https://git.clan.lol/clan/clan-core.git#install-iso
```
2. **Prepare the USB Flash Drive**:
- Insert your USB flash drive into your computer.
- Identify your flash drive with `lsblk`. Look for the device with a matching size.
- Ensure all partitions on the drive are unmounted. Replace `sdX` in the command below with your device identifier (like `sdb`, etc.):
```shellSession
sudo umount /dev/sdX*
```
3. **Write the Image to the USB Drive**:
- Use the `dd` utility to write the NixOS installer image to your USB drive:
```shellSession
sudo dd bs=4M conv=fsync oflag=direct status=progress if=./result/stick.raw of=/dev/sdX
```
4. **Boot and Connect**:
- After writing the installer to the USB drive, use it to boot the target machine.
- The installer will display an IP address and a root password, which you can use to connect via SSH.
### Finishing the installation
With the target machine running Linux and accessible via SSH, execute the following command to install NixOS on the target machine, replacing `<target_host>` with the machine's hostname or IP address:
```shellSession
$ clan machines install my-machine <target_host>
```
## Update Your Machines
Clan CLI enables you to remotely update your machines over SSH. This requires setting up a target address for each target machine.
### Setting the Target Host
Replace `host_or_ip` with the actual hostname or IP address of your target machine:
```shellSession
$ clan config --machine my-machine clan.networking.targetHost root@host_or_ip
```
_Note: The use of `root@` in the target address implies SSH access as the root user.
Ensure that the root login is secured and only used when necessary._
### Updating Machine Configurations
Execute the following command to update the specified machine:
```shellSession
$ clan machines update my-machine
```
You can also update all configured machines simultaneously by omitting the machine name:
```shellSession
$ clan machines update
```
### Setting a Build Host
If the machine does not have enough resources to run the NixOS evaluation or build itself,
it is also possible to specify a build host instead.
During an update, the cli will ssh into the build host and run `nixos-rebuild` from there.
```shellSession
$ clan config --machine my-machine clan.networking.buildHost root@host_or_ip
```
### Excluding a machine from `clan machine update`
To exclude machines from beeing updated when running `clan machines update` without any machines specified,
one can set the `clan.deployment.requireExplicitUpdate` option to true:
```shellSession
$ clan config --machine my-machine clan.deployment.requireExplicitUpdate true
```
This is useful for machines that are not always online or are not part of the regular update cycle.

View File

@@ -1,135 +0,0 @@
# Initializing a New Clan Project
## Create a new flake
1. To start a new project, execute the following command to add the clan cli to your shell:
```shellSession
$ nix shell git+https://git.clan.lol/clan/clan-core
```
2. Then use the following commands to initialize a new clan-flake:
```shellSession
$ clan flake create my-clan
```
This action will generate two primary files: `flake.nix` and `.clan-flake`.
```shellSession
$ ls -la
drwx------ joerg users 5 B a minute ago ./
drwxrwxrwt root root 139 B 12 seconds ago ../
.rw-r--r-- joerg users 77 B a minute ago .clan-flake
.rw-r--r-- joerg users 4.8 KB a minute ago flake.lock
.rw-r--r-- joerg users 242 B a minute ago flake.nix
```
### Understanding the .clan-flake Marker File
The `.clan-flake` marker file serves an optional purpose: it helps the `clan-cli` utility locate the project's root directory.
If `.clan-flake` is missing, `clan-cli` will instead search for other indicators like `.git`, `.hg`, `.svn`, or `flake.nix` to identify the project root.
## What's next
After creating your flake, you can check out how to add [new machines](./machines.md)
---
# Migrating Existing NixOS Configuration Flake
Absolutely, let's break down the migration step by step, explaining each action in detail:
#### Before You Begin
1. **Backup Your Current Configuration**: Always start by making a backup of your current NixOS configuration to ensure you can revert if needed.
```shellSession
$ cp -r /etc/nixos ~/nixos-backup
```
2. **Update Flake Inputs**: Add a new input for the `clan-core` dependency:
```nix
inputs.clan-core = {
url = "git+https://git.clan.lol/clan/clan-core";
# Don't do this if your machines are on nixpkgs stable.
inputs.nixpkgs.follows = "nixpkgs";
};
```
- `url`: Specifies the Git repository URL for Clan Core.
- `inputs.nixpkgs.follows`: Tells Nix to use the same `nixpkgs` input as your main input (in this case, it follows `nixpkgs`).
3. **Update Outputs**: Then modify the `outputs` section of your `flake.nix` to adapt to Clan Core's new provisioning method. The key changes are as follows:
Add `clan-core` to the output
```diff
- outputs = { self, nixpkgs, }:
+ outputs = { self, nixpkgs, clan-core }:
```
Previous configuration:
```nix
{
nixosConfigurations.example-desktop = nixpkgs.lib.nixosSystem {
system = "x86_64-linux";
modules = [
./configuration.nix
];
[...]
};
}
```
After change:
```nix
let clan = clan-core.lib.buildClan {
# this needs to point at the repository root
directory = self;
specialArgs = {};
clanName = "NEEDS_TO_BE_UNIQUE"; # TODO: Changeme
machines = {
example-desktop = {
nixpkgs.hostPlatform = "x86_64-linux";
imports = [
./configuration.nix
];
};
};
};
in { inherit (clan) nixosConfigurations clanInternals; }
```
- `nixosConfigurations`: Defines NixOS configurations, using Clan Cores `buildClan` function to manage the machines.
- Inside `machines`, a new machine configuration is defined (in this case, `example-desktop`).
- Inside `example-desktop` which is the target machine hostname, `nixpkgs.hostPlatform` specifies the host platform as `x86_64-linux`.
- `clanInternals`: Is required to enable evaluation of the secret generation/upload script on every architecture
- `clanName`: Is required and needs to be globally unique, as else we have a cLAN name clash
4. **Rebuild and Switch**: Rebuild your NixOS configuration using the updated flake:
```shellSession
$ sudo nixos-rebuild switch --flake .
```
- This command rebuilds and switches to the new configuration. Make sure to include the `--flake .` argument to use the current directory as the flake source.
5. **Test Configuration**: Before rebooting, verify that your new configuration builds without errors or warnings.
6. **Reboot**: If everything is fine, you can reboot your system to apply the changes:
```shellSession
$ sudo reboot
```
7. **Verify**: After the reboot, confirm that your system is running with the new configuration, and all services and applications are functioning as expected.
By following these steps, you've successfully migrated your NixOS Flake configuration to include the `clan-core` input and adapted the `outputs` section to work with Clan Core's new machine provisioning method.
## What's next
After creating your flake, you can check out how to add [new machines](./machines.md)

View File

@@ -1,69 +0,0 @@
# ZeroTier Configuration with NixOS in Clan
This guide provides detailed instructions for configuring
[ZeroTier VPN](https://zerotier.com) within Clan. Follow the
outlined steps to set up a machine as a VPN controller (`<CONTROLLER>`) and to
include a new machine into the VPN.
## 1. Setting Up the VPN Controller
The VPN controller is initially essential for providing configuration to new
peers. Post the address allocation, the controller's continuous operation is not
crucial.
### Instructions:
1. **Designate a Machine**: Label a machine as the VPN controller in the clan,
referred to as `<CONTROLLER>` henceforth in this guide.
2. **Add Configuration**: Input the below configuration to the NixOS
configuration of the controller machine:
```nix
clan.networking.zerotier.controller = {
enable = true;
public = true;
};
```
3. **Update the Controller Machine**: Execute the following:
```console
$ clan machines update <CONTROLLER>
```
Your machine is now operational as the VPN controller.
## 2. Integrating a New Machine to the VPN
To introduce a new machine to the VPN, adhere to the following steps:
### Instructions:
1. **Update Configuration**: On the new machine, incorporate the below to its
configuration, substituting `<CONTROLLER>` with the controller machine name:
```nix
{ config, ... }: {
clan.networking.zerotier.networkId = builtins.readFile (config.clanCore.clanDir + "/machines/<CONTROLLER>/facts/zerotier-network-id");
}
```
2. **Update the New Machine**: Execute:
```console
$ clan machines update <NEW_MACHINE>
```
Replace `<NEW_MACHINE>` with the designated new machine name.
3. **Retrieve the ZeroTier ID**: On the `new_machine`, execute:
```console
$ sudo zerotier-cli info
```
Example Output: `200 info d2c71971db 1.12.1 OFFLINE`, where `d2c71971db` is
the ZeroTier ID.
4. **Authorize the New Machine on Controller**: On the controller machine,
execute:
```console
$ sudo zerotier-members allow <ID>
```
Substitute `<ID>` with the ZeroTier ID obtained previously.
5. **Verify Connection**: On the `new_machine`, re-execute:
```console
$ sudo zerotier-cli info
```
The status should now be "ONLINE" e.g., `200 info 47303517ef 1.12.1 ONLINE`.
Congratulations! The new machine is now part of the VPN, and the ZeroTier
configuration on NixOS within the Clan project is complete.

111
docs/mkdocs.yml Normal file
View File

@@ -0,0 +1,111 @@
site_name: cLAN documentation
site_url: https://docs.clan.lol
repo_url: https://git.clan.lol/clan/clan-core/
repo_name: clan-core
edit_uri: _edit/main/docs/docs/
validation:
omitted_files: warn
absolute_links: warn
unrecognized_links: warn
markdown_extensions:
- attr_list
- pymdownx.emoji:
emoji_index: !!python/name:material.extensions.emoji.twemoji
emoji_generator: !!python/name:material.extensions.emoji.to_svg
- pymdownx.tasklist:
custom_checkbox: true
- pymdownx.superfences
- pymdownx.tabbed:
alternate_style: true
- footnotes
- meta
- admonition
- pymdownx.details
- pymdownx.highlight:
use_pygments: true
- toc:
title: On this page
exclude_docs: |
.*
!templates/
/drafts/
nav:
- Getting started:
- index.md
- Configure: getting-started/configure.md
- Deploy Machine: getting-started/machines.md
- Installer: getting-started/installer.md
- Setup Networking: getting-started/networking.md
- Provision Secrets & Passwords: getting-started/secrets.md
- Backup & Restore: getting-started/backups.md
- Flake-parts: getting-started/flake-parts.md
- Templates: templates/index.md
- Reference:
- clan-core:
- reference/clan-core/index.md
- reference/clan-core/backups.md
- reference/clan-core/facts.md
- reference/clan-core/sops.md
- reference/clan-core/state.md
- clanModules:
- reference/clanModules/borgbackup.md
- reference/clanModules/deltachat.md
- reference/clanModules/diskLayouts.md
- reference/clanModules/ergochat.md
- reference/clanModules/graphical.md
- reference/clanModules/localbackup.md
- reference/clanModules/localsend.md
- reference/clanModules/matrix-synapse.md
- reference/clanModules/moonlight.md
- reference/clanModules/root-password.md
- reference/clanModules/sshd.md
- reference/clanModules/sunshine.md
- reference/clanModules/syncthing.md
- reference/clanModules/thelounge.md
- reference/clanModules/vm-user.md
- reference/clanModules/waypipe.md
- reference/clanModules/xfce-vm.md
- reference/clanModules/xfce.md
- reference/clanModules/zt-tcp-relay.md
- Contributing: contributing/contributing.md
docs_dir: site
site_dir: out
theme:
logo: static/logo.png
name: material
features:
- navigation.instant
- navigation.tabs
- content.code.copy
- content.tabs.link
icon:
repo: fontawesome/brands/git
palette:
# Palette toggle for light mode
- media: "(prefers-color-scheme: light)"
scheme: default
primary: teal
accent: deep purple
toggle:
icon: material/weather-night
name: Switch to dark mode
# Palette toggle for dark mode
- media: "(prefers-color-scheme: dark)"
primary: teal
accent: deep purple
scheme: slate
toggle:
icon: material/weather-sunny
name: Switch to light mode
plugins:
- search

27
docs/nix/default.nix Normal file
View File

@@ -0,0 +1,27 @@
{ pkgs, module-docs, ... }:
pkgs.stdenv.mkDerivation {
name = "clan-documentation";
src = ../.;
nativeBuildInputs =
[ pkgs.python3 ]
++ (with pkgs.python3Packages; [
mkdocs
mkdocs-material
]);
configurePhase = ''
mkdir -p ./site/reference
cp -af ${module-docs}/* ./site/reference/
'';
buildPhase = ''
mkdocs build --strict
ls -la .
'';
installPhase = ''
cp -a out/ $out/
'';
}

36
docs/nix/deploy-docs.nix Normal file
View File

@@ -0,0 +1,36 @@
{
writeShellScriptBin,
coreutils,
openssh,
rsync,
lib,
docs,
}:
writeShellScriptBin "deploy-docs" ''
set -eux -o pipefail
export PATH="${
lib.makeBinPath [
coreutils
openssh
rsync
]
}"
if [ -n "''${SSH_HOMEPAGE_KEY:-}" ]; then
echo "$SSH_HOMEPAGE_KEY" > ./ssh_key
chmod 600 ./ssh_key
sshExtraArgs="-i ./ssh_key"
else
sshExtraArgs=
fi
rsync \
-e "ssh -o StrictHostKeyChecking=no $sshExtraArgs" \
-a ${docs}/ \
www@clan.lol:/var/www/docs.clan.lol
if [ -e ./ssh_key ]; then
rm ./ssh_key
fi
''

72
docs/nix/flake-module.nix Normal file
View File

@@ -0,0 +1,72 @@
{ inputs, self, ... }:
{
perSystem =
{
config,
self',
pkgs,
...
}:
let
# Simply evaluated options (JSON)
# { clanCore = «derivation JSON»; clanModules = { ${name} = «derivation JSON» }; }
jsonDocs = import ./get-module-docs.nix {
inherit (inputs) nixpkgs;
inherit pkgs self;
inherit (self.nixosModules) clanCore;
inherit (self) clanModules;
};
clanModulesFileInfo = pkgs.writeText "info.json" (builtins.toJSON jsonDocs.clanModules);
clanModulesReadmes = pkgs.writeText "info.json" (builtins.toJSON jsonDocs.clanModulesReadmes);
# Simply evaluated options (JSON)
renderOptions =
pkgs.runCommand "renderOptions.py"
{
# TODO: ruff does not splice properly in nativeBuildInputs
depsBuildBuild = [ pkgs.ruff ];
nativeBuildInputs = [
pkgs.python3
pkgs.mypy
];
}
''
install ${./scripts/renderOptions.py} $out
patchShebangs --build $out
ruff format --check --diff $out
ruff --line-length 88 $out
mypy --strict $out
'';
module-docs = pkgs.runCommand "rendered" { nativeBuildInputs = [ pkgs.python3 ]; } ''
export CLAN_CORE=${jsonDocs.clanCore}/share/doc/nixos/options.json
# A file that contains the links to all clanModule docs
export CLAN_MODULES=${clanModulesFileInfo}
export CLAN_MODULES_READMES=${clanModulesReadmes}
mkdir $out
# The python script will place mkDocs files in the output directory
python3 ${renderOptions}
'';
in
{
devShells.docs = pkgs.callPackage ./shell.nix {
inherit (self'.packages) docs;
inherit module-docs;
};
packages = {
docs = pkgs.python3.pkgs.callPackage ./default.nix {
inherit (inputs) nixpkgs;
inherit module-docs;
};
deploy-docs = pkgs.callPackage ./deploy-docs.nix { inherit (config.packages) docs; };
inherit module-docs;
};
legacyPackages = {
foo = jsonDocs;
};
};
}

View File

@@ -0,0 +1,64 @@
{
nixpkgs,
pkgs,
clanCore,
clanModules,
self,
}:
let
allNixosModules = (import "${nixpkgs}/nixos/modules/module-list.nix") ++ [
"${nixpkgs}/nixos/modules/misc/assertions.nix"
{ nixpkgs.hostPlatform = "x86_64-linux"; }
];
clanCoreNixosModules = [
clanCore
{ clanCore.clanDir = ./.; }
] ++ allNixosModules;
# TODO: optimally we would not have to evaluate all nixos modules for every page
# but some of our module options secretly depend on nixos modules.
# We would have to get rid of these implicit dependencies and make them explicit
clanCoreNixos = pkgs.nixos { imports = clanCoreNixosModules; };
# using extendModules here instead of re-evaluating nixos every time
# improves eval performance slightly (10%)
getOptions = modules: (clanCoreNixos.extendModules { inherit modules; }).options;
evalDocs =
options:
pkgs.nixosOptionsDoc {
options = options;
warningsAreErrors = false;
};
# clanModules docs
clanModulesDocs = builtins.mapAttrs (
name: module: (evalDocs ((getOptions [ module ]).clan.${name} or { })).optionsJSON
) clanModules;
clanModulesReadmes = builtins.mapAttrs (
module_name: _module:
let
readme = "${self}/clanModules/${module_name}/README.md";
readmeContents =
if
builtins.trace "Trying to get Module README.md for ${module_name} from ${readme}"
# TODO: Edge cases
(builtins.pathExists readme)
then
(builtins.readFile readme)
else
null;
in
readmeContents
) clanModules;
# clanCore docs
clanCoreDocs = (evalDocs (getOptions [ ]).clanCore).optionsJSON;
in
{
inherit clanModulesReadmes;
clanCore = clanCoreDocs;
clanModules = clanModulesDocs;
}

View File

@@ -0,0 +1,235 @@
# Options are available in the following format:
# https://github.com/nixos/nixpkgs/blob/master/nixos/lib/make-options-doc/default.nix
#
# ```json
# {
# ...
# "fileSystems.<name>.options": {
# "declarations": ["nixos/modules/tasks/filesystems.nix"],
# "default": {
# "_type": "literalExpression",
# "text": "[\n \"defaults\"\n]"
# },
# "description": "Options used to mount the file system.",
# "example": {
# "_type": "literalExpression",
# "text": "[\n \"data=journal\"\n]"
# },
# "loc": ["fileSystems", "<name>", "options"],
# "readOnly": false,
# "type": "non-empty (list of string (with check: non-empty))"
# "relatedPackages": "- [`pkgs.tmux`](\n https://search.nixos.org/packages?show=tmux&sort=relevance&query=tmux\n )\n",
# }
# }
# ```
import json
import os
from pathlib import Path
from typing import Any
# Get environment variables
CLAN_CORE = os.getenv("CLAN_CORE")
CLAN_MODULES = os.environ.get("CLAN_MODULES")
CLAN_MODULES_READMES = os.environ.get("CLAN_MODULES_READMES")
OUT = os.environ.get("out")
def sanitize(text: str) -> str:
return text.replace(">", "\\>")
def replace_store_path(text: str) -> Path:
res = text
if text.startswith("/nix/store/"):
res = "https://git.clan.lol/clan/clan-core/src/branch/main/" + str(
Path(*Path(text).parts[4:])
)
return Path(res)
def render_option_header(name: str) -> str:
return f"# {name}\n"
def join_lines_with_indentation(lines: list[str], indent: int = 4) -> str:
"""
Joins multiple lines with a specified number of whitespace characters as indentation.
Args:
lines (list of str): The lines of text to join.
indent (int): The number of whitespace characters to use as indentation for each line.
Returns:
str: The indented and concatenated string.
"""
# Create the indentation string (e.g., four spaces)
indent_str = " " * indent
# Join each line with the indentation added at the beginning
return "\n".join(indent_str + line for line in lines)
def render_option(name: str, option: dict[str, Any], level: int = 3) -> str:
read_only = option.get("readOnly")
res = f"""
{"#" * level} {sanitize(name)} {{#{sanitize(name)}}}
{"Readonly" if read_only else ""}
{option.get("description", "No description available.")}
**Type**: `{option["type"]}`
"""
if option.get("default"):
res += f"""
**Default**:
```nix
{option["default"]["text"] if option.get("default") else "No default set."}
```
"""
example = option.get("example", {}).get("text")
if example:
example_indented = join_lines_with_indentation(example.split("\n"))
res += f"""
???+ example
```nix
{example_indented}
```
"""
if option.get("relatedPackages"):
res += f"""
### Related Packages
{option["relatedPackages"]}
"""
decls = option.get("declarations", [])
source_path = replace_store_path(decls[0])
res += f"""
:simple-git: [{source_path.name}]({source_path})
"""
res += "\n"
return res
def module_header(module_name: str) -> str:
return f"# {module_name}\n"
def module_usage(module_name: str) -> str:
return f"""## Usage
To use this module, import it like this:
```nix
{{config, lib, inputs, ...}}: {{
imports = [ inputs.clan-core.clanModules.{module_name} ];
# ...
}}
```
"""
clan_core_descr = """ClanCore delivers all the essential features for every clan.
It's always included in your setup, and you can customize your clan's behavior with the configuration [options](#module-options) provided below.
"""
options_head = "\n## Module Options\n"
def produce_clan_core_docs() -> None:
if not CLAN_CORE:
raise ValueError(
f"Environment variables are not set correctly: $CLAN_CORE={CLAN_CORE}"
)
if not OUT:
raise ValueError(f"Environment variables are not set correctly: $out={OUT}")
# A mapping of output file to content
core_outputs: dict[str, str] = {}
with open(CLAN_CORE) as f:
options: dict[str, dict[str, Any]] = json.load(f)
module_name = "clan-core"
for option_name, info in options.items():
outfile = f"{module_name}/index.md"
# Create seperate files for nested options
if len(option_name.split(".")) <= 2:
# i.e. clan-core.clanDir
output = core_outputs.get(
outfile,
module_header(module_name) + clan_core_descr + options_head,
)
output += render_option(option_name, info)
# Update the content
core_outputs[outfile] = output
else:
# Clan sub-options
[_, sub] = option_name.split(".")[0:2]
outfile = f"{module_name}/{sub}.md"
# Get the content or write the header
output = core_outputs.get(outfile, render_option_header(sub))
output += render_option(option_name, info)
# Update the content
core_outputs[outfile] = output
for outfile, output in core_outputs.items():
(Path(OUT) / outfile).parent.mkdir(parents=True, exist_ok=True)
with open(Path(OUT) / outfile, "w") as of:
of.write(output)
def produce_clan_modules_docs() -> None:
if not CLAN_MODULES:
raise ValueError(
f"Environment variables are not set correctly: $CLAN_MODULES={CLAN_MODULES}"
)
if not CLAN_MODULES_READMES:
raise ValueError(
f"Environment variables are not set correctly: $CLAN_MODULES_READMES={CLAN_MODULES_READMES}"
)
if not OUT:
raise ValueError(f"Environment variables are not set correctly: $out={OUT}")
with open(CLAN_MODULES) as f:
links: dict[str, str] = json.load(f)
with open(CLAN_MODULES_READMES) as readme:
readme_map: dict[str, str] = json.load(readme)
# {'borgbackup': '/nix/store/hi17dwgy7963ddd4ijh81fv0c9sbh8sw-options.json', ... }
for module_name, options_file in links.items():
with open(Path(options_file) / "share/doc/nixos/options.json") as f:
options: dict[str, dict[str, Any]] = json.load(f)
print(f"Rendering options for {module_name}...")
output = module_header(module_name)
if readme_map.get(module_name, None):
output += f"{readme_map[module_name]}\n"
output += module_usage(module_name)
output += options_head if len(options.items()) else ""
for option_name, info in options.items():
output += render_option(option_name, info)
outfile = Path(OUT) / f"clanModules/{module_name}.md"
outfile.parent.mkdir(
parents=True,
exist_ok=True,
)
with open(outfile, "w") as of:
of.write(output)
if __name__ == "__main__":
produce_clan_core_docs()
produce_clan_modules_docs()

16
docs/nix/shell.nix Normal file
View File

@@ -0,0 +1,16 @@
{
docs,
pkgs,
module-docs,
...
}:
pkgs.mkShell {
inputsFrom = [ docs ];
shellHook = ''
mkdir -p ./site/reference
cp -af ${module-docs}/* ./site/reference/
chmod +w ./site/reference/*
echo "Generated API documentation in './site/reference/' "
'';
}

View File

@@ -0,0 +1 @@
../../CONTRIBUTING.md

View File

@@ -0,0 +1,146 @@
# Hardware Installation
For installations on physical hardware, create a NixOS installer image and transfer it to a bootable USB drive as described below.
## Creating a Bootable USB Drive on Linux
To create a bootable USB flash drive with the NixOS installer:
### Download the install iso
Either with wget:
```shellSession
wget https://github.com/nix-community/nixos-images/releases/download/nixos-unstable/nixos-installer-x86_64-linux.iso
```
or with curl:
```shellSession
curl -L https://github.com/nix-community/nixos-images/releases/download/nixos-unstable/nixos-installer-x86_64-linux.iso -o nixos-installer-x86_64-linux.iso
```
### Prepare the USB Flash Drive
1. Insert your USB flash drive into your computer.
2. Identify your flash drive with `lsblk`.
```shellSession
lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS
sdb 8:0 1 117,2G 0 disk
└─sdb1 8:1 1 117,2G 0 part /run/media/qubasa/INTENSO
nvme0n1 259:0 0 1,8T 0 disk
├─nvme0n1p1 259:1 0 512M 0 part /boot
└─nvme0n1p2 259:2 0 1,8T 0 part
└─luks-f7600028-9d83-4967-84bc-dd2f498bc486 254:0 0 1,8T 0 crypt /nix/store
```
In this case it's `sdb`
3. Ensure all partitions on the drive are unmounted. Replace `sdX` in the command below with your device identifier (like `sdb`, etc.):
```shellSession
sudo umount /dev/sdb1
```
### Write the Image to the USB Drive
Use the `dd` utility to write the NixOS installer image to your USB drive:
```shellSession
sudo dd bs=4M conv=fsync oflag=direct status=progress if=./nixos-installer-x86_64-linux.iso of=/dev/sd<X>
```
In this case, the USB device is `sdb` use `of=/dev/sdb`
### Boot and Connect
After writing the installer to the USB drive, use it to boot the target machine.
1. For this secure boot needs to be disabled. Go into your UEFI / Bios settings by pressing one of the keys outlined below while booting:
- **Dell**: F2/Del (BIOS Setup)
- **HP**: Esc (Startup Menu)
- **Lenovo**: F2/Fn+F2/Novo Button (IdeaPad Boot Menu/BIOS Setup)
- **Acer**: F2/Del (BIOS Setup)
- **Asus**: F2/Del (BIOS Setup)
- **Toshiba**: Esc then F12 (Alternate Method)
- **Sony**: F11
- **Samsung**: F2 (BIOS Setup)
- **MSI**: Del (BIOS Setup)
- **Apple**: Option (Alt) Key (Boot Menu for Mac)
- If your hardware was not listed read the manufacturers instructions how to enter the boot Menu/BIOS Setup.
2. Inside the UEFI/Bios Menu go to `Security->Secure Boot` and disable secure boot
3. Save your settings. Put in the USB stick and reboot.
4. Press one of keys outlined below to go into the Boot Menu
- **Dell**: F12 (Boot Menu)
- **HP**: F9 (Boot Menu)
- **Lenovo**: F12 (ThinkPad Boot Menu)
- **Acer**: F12 (Boot Menu)
- **Asus**: F8/Esc (Boot Menu)
- **Toshiba**: F12/F2 (Boot Menu)
- **Sony**: F11
- **Samsung**: F2/F12/Esc (Boot Menu)
- **MSI**: F11
- **Apple**: Option (Alt) Key (Boot Menu for Mac)
- If your hardware was not listed read the manufacturers instructions how to enter the boot Menu/BIOS Setup.
5. Select `NixOS` to boot into the clan installer
6. The installer will display an IP address and a root password, which you can use to connect via SSH.
Alternatively you can also use the displayed QR code.
7. Set your keyboard language (i.e. `de` for German keyboards, default is English). Important for writing passwords correctly.
```shellSession
loadkeys de
```
8. If you only have Wifi available, execute:
1. Bring up the `iwd` shell
```shellSession
iwctl
```
2. List available networks. Double press tab after station for autocompleting your wlan device. In this case `wlan0`
```shellSession
[iwd] station wlan0 get-networks
```
3. Connect to a Wifi network. Replace `SSID` with the wlan network name.
```shellSession
[iwd] station wlan0 connect SSID
```
9. Now that you have internet re-execute the init script by pressing `Ctrl+D` or by executing:
```shellSession
bash
```
10. Connect to the machine over ssh
```shellSession
ssh-copy-id -o PreferredAuthentications=password root@<ip>
```
Use the root password displayed on your screen as login.
---
# Whats next?
- Deploy a clan machine-configuration on your prepared machine
---

View File

@@ -0,0 +1,149 @@
# Backups
## Introduction to Backups
When you're managing your own services, creating regular backups is crucial to ensure your data's safety.
This guide introduces you to Clan's built-in backup functionalities.
Clan supports backing up your data to both local storage devices (like USB drives) and remote servers, using well-known tools like borgbackup and rsnapshot.
We might add more options in the future, but for now, let's dive into how you can secure your data.
## Backing Up Locally with Localbackup
### What is Localbackup?
Localbackup lets you backup your data onto physical storage devices connected to your computer,
such as USB hard drives or network-attached storage. It uses a tool called rsnapshot for this purpose.
### Setting Up Localbackup
1. **Identify Your Backup Device:**
First, figure out which device you'll use for backups. You can see all connected devices by running this command in your terminal:
```bash
lsblk --output NAME,PTUUID,FSTYPE,SIZE,MOUNTPOINT
```
Look for the device you intend to use for backups and note its details.
2. **Configure Your Backup Device:**
Once you've identified your device, you'll need to add it to your configuration.
Here's an example NixOS configuration for a device located at `/dev/sda2` with an `ext4` filesystem:
```nix
{
fileSystems."/mnt/hdd" = {
device = "/dev/sda2";
fsType = "ext4";
options = [ "defaults" "noauto" ];
};
}
```
Replace `/dev/sda2` with your device and `/mnt/hdd` with your preferred mount point.
3. **Set Backup Targets:** Next, define where on your device you'd like the backups to be stored:
```nix
{
clan.localbackup.targets.hdd = {
directory = "/mnt/hdd/backup";
mountpoint = "/mnt/hdd";
};
}
```
Change `/mnt/hdd` to the actual mount point you're using.
4. **Create Backups:** To create a backup, run:
```bash
clan backups create mymachine
```
This command saves snapshots of your data onto the backup device.
5. **Listing Backups:** To see available backups, run:
```bash
clan backups list mymachine
```
## Remote Backups with Borgbackup
### Overview of Borgbackup
Borgbackup splits the backup process into two parts: a backup client that sends data to a backup server.
The server stores the backups.
### Setting Up the Borgbackup Client
1. **Specify Backup Server:**
Start by indicating where your backup data should be sent. Replace `hostname` with your server's address:
```nix
{
clan.borgbackup.destinations = {
myhostname = {
repo = "borg@backuphost:/var/lib/borgbackup/myhostname";
};
};
}
```
2. **Select Folders to Backup:**
Decide which folders you want to back up. For example, to backup your home and root directories:
```nix
{ clanCore.state.userdata.folders = [ "/home" "/root" ]; }
```
3. **Generate Backup Credentials:**
Run `clan facts generate <yourmachine>` to prepare your machine for backup, creating necessary SSH keys and credentials.
### Setting Up the Borgbackup Server
1. **Configure Backup Repository:**
On the server where backups will be stored, enable the SSH daemon and set up a repository for each client:
```nix
{
services.borgbackup.repos.myhostname = {
path = "/var/lib/borgbackup/myhostname";
authorizedKeys = [
(builtins.readFile ./machines/myhostname/facts/borgbackup.ssh.pub)
];
};
}
```
Ensure the path to the public key is correct.
2. **Update Your Systems:** Apply your changes by running `clan machines update` to both the server and your client
### Managing Backups
- **Scheduled Backups:**
Backups are automatically performed nightly. To check the next scheduled backup, use:
```bash
systemctl list-timers | grep -E 'NEXT|borg'
```
- **Listing Backups:** To see available backups, run:
```bash
clan backups list mymachine
```
- **Manual Backups:** You can also initiate a backup manually:
```bash
clan backups create mymachine
```

View File

@@ -0,0 +1,150 @@
# Configuration - How to configure clan with your own machines
## Global configuration
In the `flake.nix` file:
- [x] set a unique `clanName`.
- [ ] set `clanIcon` (optional)
- [ ] Set `machineIcon` per machine (optional)
These icons will be used by our future GUI.
=== "**buildClan**"
```nix title="clan-core.lib.buildClan"
buildClan {
# Set a unique name
clanName = "Lobsters";
# Optional, a path to an image file
clanIcon = ./path/to/file;
# Should usually point to the directory of flake.nix
directory = ./.;
machines = {
jon = {
# ...
# Optional, a path to an image file
clanCore.machineIcon = ./path/to/file;
};
# ...
}
}
```
=== "**flakeParts**"
!!! info "See [Clan with flake-parts](./flake-parts.md) for help migrating to flake-parts."
```nix title="clan-core.flakeModules.default"
clan = {
# Set a unique name
clanName = "Lobsters";
# Optional, a path to an image file
clanIcon = ./path/to/file;
machines = {
jon = {
# ...
# Optional, a path to an image file
clanCore.machineIcon = ./path/to/file;
};
# ...
}
};
```
## Machine configuration
Adding or configuring a new machine requires two simple steps:
### Step 1. Identify Target Disk-ID
1. Find the remote disk id by executing:
```bash title="setup computer"
ssh root@<target-computer> lsblk --output NAME,ID-LINK,FSTYPE,SIZE,MOUNTPOINT
```
Which should show something like:
```bash
NAME ID-LINK FSTYPE SIZE MOUNTPOINT
sda usb-ST_16GB_AA6271026J1000000509-0:0 14.9G
├─sda1 usb-ST_16GB_AA6271026J1000000509-0:0-part1 1M
├─sda2 usb-ST_16GB_AA6271026J1000000509-0:0-part2 vfat 100M /boot
└─sda3 usb-ST_16GB_AA6271026J1000000509-0:0-part3 ext4 2.9G /
nvme0n1 nvme-eui.e8238fa6bf530001001b448b4aec2929 476.9G
├─nvme0n1p1 nvme-eui.e8238fa6bf530001001b448b4aec2929-part1 vfat 512M
├─nvme0n1p2 nvme-eui.e8238fa6bf530001001b448b4aec2929-part2 ext4 459.6G
└─nvme0n1p3 nvme-eui.e8238fa6bf530001001b448b4aec2929-part3 swap 16.8G
```
1. Edit the following fields inside the `flake.nix`
=== "**buildClan**"
```nix title="clan-core.lib.buildClan"
buildClan {
# ...
machines = {
"jon" = {
# ...
# Change this to the correct ip-address or hostname
# The hostname is the machine name by default
clan.networking.targetHost = pkgs.lib.mkDefault "root@<hostname>"
# Change this to the ID-LINK of the desired disk shown by 'lsblk'
clan.diskLayouts.singleDiskExt4 = {
device = "/dev/disk/by-id/__CHANGE_ME__";
}
# ...
};
};
}
```
=== "**flakeParts**"
```nix title="clan-core.flakeModules.default"
clan = {
# ...
machines = {
"jon" = {
# ...
# Change this to the correct ip-address or hostname
# The hostname is the machine name by default
clan.networking.targetHost = pkgs.lib.mkDefault "root@<hostname>"
# Change this to the ID-LINK of the desired disk shown by 'lsblk'
clan.diskLayouts.singleDiskExt4 = {
device = "/dev/disk/by-id/__CHANGE_ME__";
}
# ...
};
};
};
```
### Step 2. Detect hardware specific drivers
1. Generate a `hardware-configuration.nix` for your target computer
```bash
ssh root@<target-computer> nixos-generate-config --no-filesystems --show-hardware-config > hardware-configuration.nix
```
2. Move the generated file to `machines/jon/hardware-configuration.nix`.
### Initialize the facts
!!! Info
**All facts are automatically initialized.**
If you need additional help see our [facts chapter](./secrets.md)

View File

@@ -0,0 +1,102 @@
# Clan with `flake-parts`
Clan supports integration with [flake.parts](https://flake.parts/) a tool which allows composing nixos modules in a modular way.
Here's how to set up Clan using `nix flakes` and `flake-parts`.
## 1. Update Your Flake Inputs
To begin, you'll need to add `flake-parts` as a new dependency in your flake's inputs. This is alongside the already existing dependencies, such as `clan-core` and `nixpkgs`. Here's how you can update your `flake.nix` file:
```nix
# flake.nix
inputs = {
nixpkgs.url = "github:nixos/nixpkgs?ref=nixos-unstable";
# New flake-parts input
flake-parts.url = "github:hercules-ci/flake-parts";
flake-parts.inputs.nixpkgs-lib.follows = "nixpkgs";
clan-core = {
url = "git+https://git.clan.lol/clan/clan-core";
inputs.nixpkgs.follows = "nixpkgs"; # Needed if your configuration uses nixpkgs unstable.
# New
inputs.flake-parts.follows = "flake-parts";
};
}
```
## 2. Import Clan-Core Flake Module
After updating your flake inputs, the next step is to import the `clan-core` flake module. This will make the [clan options](https://git.clan.lol/clan/clan-core/src/branch/main/flakeModules/clan.nix) available within `mkFlake`.
```nix
outputs =
inputs@{ flake-parts, ... }:
flake-parts.lib.mkFlake { inherit inputs; } (
{
#
imports = [
inputs.clan-core.flakeModules.default
];
}
);
```
### 3. Configure Clan Settings and Define Machines
Configure your clan settings and define machine configurations.
Below is a guide on how to structure this in your flake.nix:
```nix
outputs = inputs@{ flake-parts, clan-core, ... }:
flake-parts.lib.mkFlake { inherit inputs; } ({self, pkgs, ...}: {
# We define our own systems below. you can still use this to add system specific outputs to your flake.
# See: https://flake.parts/getting-started
systems = [];
# import clan-core modules
imports = [
clan-core.flakeModules.default
];
# Define your clan
clan = {
# Clan wide settings. (Required)
clanName = ""; # Ensure to choose a unique name.
machines = {
jon = {
imports = [
./machines/jon/configuration.nix
# ... more modules
];
nixpkgs.hostPlatform = "x86_64-linux";
clanCore.machineIcon = null; # Optional, a path to an image file
# Set this for clan commands use ssh i.e. `clan machines update`
clan.networking.targetHost = pkgs.lib.mkDefault "root@jon";
# remote> lsblk --output NAME,ID-LINK,FSTYPE,SIZE,MOUNTPOINT
clan.diskLayouts.singleDiskExt4 = {
device = "/dev/disk/by-id/nvme-eui.e8238fa6bf530001001b448b4aec2929";
};
# There needs to be exactly one controller per clan
clan.networking.zerotier.controller.enable = true;
};
};
};
});
```
For detailed information about configuring `flake-parts` and the available options within Clan,
refer to the Clan module documentation located [here](https://git.clan.lol/clan/clan-core/src/branch/main/flakeModules/clan.nix).
## Whats next?
- [Configure Machines](configure.md): Customize machine configuration
- [Deploying](machines.md): Deploying a Machine configuration
---

View File

@@ -0,0 +1,149 @@
# Installer
We offer a dedicated installer to assist remote installations.
In this tutorial we will guide you through building and flashing it to a bootable USB drive.
## Creating and Using the **Clan Installer**
### Step 0. Prerequisites
- [x] A free USB Drive with at least 1.5GB (All data on it will be lost)
- [x] Linux/NixOS Machine with Internet
### Step 1. Identify the USB Flash Drive
1. Insert your USB flash drive into your computer.
2. Identify your flash drive with `lsblk`:
```shellSession
lsblk
```
```{.console, .no-copy}
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS
sdb 8:0 1 117,2G 0 disk
└─sdb1 8:1 1 117,2G 0 part /run/media/qubasa/INTENSO
nvme0n1 259:0 0 1,8T 0 disk
├─nvme0n1p1 259:1 0 512M 0 part /boot
└─nvme0n1p2 259:2 0 1,8T 0 part
└─luks-f7600028-9d83-4967-84bc-dd2f498bc486 254:0 0 1,8T 0 crypt /nix/store
```
!!! Info "In this case the USB device is `sdb`"
3. Ensure all partitions on the drive are unmounted. Replace `sdb1` in the command below with your device identifier (like `sdc1`, etc.):
```shellSession
sudo umount /dev/sdb1
```
### Step 2. Download the Installer
```shellSession
wget https://github.com/nix-community/nixos-images/releases/download/nixos-unstable/nixos-installer-x86_64-linux.iso
```
### Step 3. Flash the Installer to the USB Drive
!!! Danger "Specifying the wrong device can lead to unrecoverable data loss."
The `dd` utility will erase the disk. Make sure to specify the correct device (`of=...`)
For example if the USB device is `sdb` use `of=/dev/sdb`.
Use the `dd` utility to write the NixOS installer image to your USB drive:
```shellSession
sudo dd bs=4M conv=fsync oflag=direct status=progress if=./nixos-installer-x86_64-linux.iso of=/dev/sd<X>
```
### Step 4. Boot and Connect to your network
After writing the installer to the USB drive, use it to boot the target machine.
!!! info
Plug it into the target machine and select the USB drive as a temporary boot device.
??? tip "Here you can find the key combinations for selection used by most vendors."
- **Dell**: F12 (Boot Menu), F2/Del (BIOS Setup)
- **HP**: F9 (Boot Menu), Esc (Startup Menu)
- **Lenovo**: F12 (ThinkPad Boot Menu), F2/Fn+F2/Novo Button (IdeaPad Boot Menu/BIOS Setup)
- **Acer**: F12 (Boot Menu), F2/Del (BIOS Setup)
- **Asus**: F8/Esc (Boot Menu), F2/Del (BIOS Setup)
- **Toshiba**: F12/F2 (Boot Menu), Esc then F12 (Alternate Method)
- **Sony**: F11/Assist Button (Boot Menu/Recovery Options)
- **Samsung**: F2/F12/Esc (Boot Menu), F2 (BIOS Setup)
- **MSI**: F11 (Boot Menu), Del (BIOS Setup)
- **Apple**: Option (Alt) Key (Boot Menu for Mac)
- If your hardware was not listed read the manufacturers instructions how to enter the boot Menu/BIOS Setup.
**During Boot**
Select `NixOS` to boot into the clan installer.
**After Booting**
For deploying your configuration the machine needs to be connected via LAN (recommended).
For connecting via Wifi, please consult the [guide below](#optional-connect-to-wifi).
---
## Whats next?
- [Configure Machines](configure.md): Customize machine configuration
- [Deploying](machines.md): Deploying a Machine configuration
- [WiFi](#optional-connect-to-wifi): Guide for connecting to Wifi.
---
## (Optional) Connect to Wifi
If you don't have access via LAN the Installer offers support for connecting via Wifi.
```shellSession
iwctl
```
This will enter `iwd`
```{.console, .no-copy}
[iwd]#
```
Now run the following command to connect to your Wifi:
```shellSession
# Identify your network device.
device list
# Replace 'wlan0' with your wireless device name
# Find your Wifi SSID.
station wlan0 scan
station wlan0 get-networks
# Replace your_ssid with the Wifi SSID
# Connect to your network.
station wlan0 connect your_ssid
# Verify you are connected
station wlan0 show
```
If the connection was successful you should see something like this:
```{.console, .no-copy}
State connected
Connected network FRITZ!Box (Your router device)
IPv4 address 192.168.188.50 (Your new local ip)
```
Press `ctrl-d` to exit `IWD`.
!!! Important
Press `ctrl-d` **again** to update the displayed QR code and connection information.
You're all set up

View File

@@ -0,0 +1,128 @@
# Deploy Machine
Integrating a new machine into your Clan environment is an easy yet flexible process, allowing for a straight forward management of multiple NixOS configurations.
We'll walk you through adding a new computer to your Clan.
## Installing a New Machine
Clan CLI, in conjunction with [nixos-anywhere](https://github.com/nix-community/nixos-anywhere), provides a seamless method for installing NixOS on various machines.
This process involves preparing a suitable hardware and disk partitioning configuration and ensuring the target machine is accessible via SSH.
### Step 0. Prerequisites
=== "**Physical Hardware**"
- [x] **Two Computers**: You need one computer that you're getting ready (we'll call this the Target Computer) and another one to set it up from (we'll call this the Setup Computer). Make sure both can talk to each other over the network using SSH.
- [x] **Machine configuration**: See our basic [configuration guide](./configure.md)
- [x] **Initialized secrets**: See [secrets](secrets.md) for how to initialize your secrets.
- [x] **USB Flash Drive**: See [Clan Installer](installer.md)
!!! Steps
1. Create a NixOS installer image and transfer it to a bootable USB drive as described in the [installer](./installer.md).
2. Boot the target machine and connect it to a network that makes it reachable from your setup computer.
=== "**Baremetal Machines**"
- [x] **Two Computers**: You need one computer that you're getting ready (we'll call this the Target Computer) and another one to set it up from (we'll call this the Setup Computer). Make sure both can talk to each other over the network using SSH.
- [x] **Machine configuration**: See our basic [configuration guide](./configure.md)
- [x] **Initialized secrets**: See [secrets](secrets.md) for how to initialize your secrets.
!!! Steps
- Any cloud machine if it is reachable via SSH and supports `kexec`.
Confirm the machine is reachable via SSH from your setup computer.
```bash
ssh root@<your_target_machine_ip>
```
### Step 1. Deploy the machine
**Finally deployment time!** Use the following command to build and deploy the image via SSH onto your machine.
Replace `<target_host>` with the **target computers' ip address**:
```bash
clan machines install my-machine <target_host>
```
> Note: This may take a while for building and for the file transfer.
!!! success
Your machine is all set up. 🎉 🚀
---
## What's next ?
- [**Update a Machine**](#update-your-machines): Learn how to update an existing machine?
Coming Soon:
- **Join Your Machines in a Private Network:**: Stay tuned for steps on linking all your machines into a secure mesh network with Clan.
---
## Update Your Machines
Clan CLI enables you to remotely update your machines over SSH. This requires setting up a target address for each target machine.
### Setting the Target Host
Replace `host_or_ip` with the actual hostname or IP address of your target machine:
```bash
clan config --machine my-machine clan.networking.targetHost root@host_or_ip
```
!!! warning
The use of `root@` in the target address implies SSH access as the `root` user.
Ensure that the root login is secured and only used when necessary.
### Updating Machine Configurations
Execute the following command to update the specified machine:
```bash
clan machines update my-machine
```
You can also update all configured machines simultaneously by omitting the machine name:
```bash
clan machines update
```
### Setting a Build Host
If the machine does not have enough resources to run the NixOS evaluation or build itself,
it is also possible to specify a build host instead.
During an update, the cli will ssh into the build host and run `nixos-rebuild` from there.
```bash
clan config --machine my-machine clan.networking.buildHost root@host_or_ip
```
### Excluding a machine from `clan machine update`
To exclude machines from being updated when running `clan machines update` without any machines specified,
one can set the `clan.deployment.requireExplicitUpdate` option to true:
```bash
clan config --machine my-machine clan.deployment.requireExplicitUpdate true
```
This is useful for machines that are not always online or are not part of the regular update cycle.
---
# TODO:
* TODO: How to join others people zerotier
* `services.zerotier.joinNetworks = [ "network-id" ]`
* Controller needs to approve over webinterface or cli

View File

@@ -0,0 +1,95 @@
# Overlay Networks
This guide provides detailed instructions for configuring
[ZeroTier VPN](https://zerotier.com) within Clan. Follow the
outlined steps to set up a machine as a VPN controller (`<CONTROLLER>`) and to
include a new machine into the VPN.
## 1. Setting Up the VPN Controller
The VPN controller is initially essential for providing configuration to new
peers. Once addresses are allocated, the controller's continuous operation is not essential.
### Instructions
1. **Designate a Machine**: Label a machine as the VPN controller in the clan,
referred to as `<CONTROLLER>` henceforth in this guide.
1. **Add Configuration**: Input the following configuration to the NixOS
configuration of the controller machine:
```nix
clan.networking.zerotier.controller = {
enable = true;
public = true;
};
```
1. **Update the Controller Machine**: Execute the following:
```bash
$ clan machines update <CONTROLLER>
```
Your machine is now operational as the VPN controller.
## 2. Integrating a New Machine to the VPN
To introduce a new machine to the VPN, adhere to the following steps:
### Instructions:
1. **Update Configuration**: On the new machine, incorporate the following to its
configuration, substituting `<CONTROLLER>` with the controller machine name:
```nix
{ config, ... }: {
clan.networking.zerotier.networkId = builtins.readFile (config.clanCore.clanDir + "/machines/<CONTROLLER>/facts/zerotier-network-id");
}
```
1. **Update the New Machine**: Execute:
```bash
$ clan machines update <NEW_MACHINE>
```
Replace `<NEW_MACHINE>` with the designated new machine name.
1. **Retrieve the ZeroTier ID**: On the `new_machine`, execute:
```bash
$ sudo zerotier-cli info
```
Example Output:
```{.console, .no-copy}
200 info d2c71971db 1.12.1 OFFLINE
```
, where `d2c71971db` is the ZeroTier ID.
1. **Authorize the New Machine on the Controller**: On the controller machine,
execute:
```bash
$ sudo zerotier-members allow <ID>
```
Substitute `<ID>` with the ZeroTier ID obtained previously.
1. **Verify Connection**: On the `new_machine`, re-execute:
```bash
$ sudo zerotier-cli info
```
The status should now be "ONLINE":
```{.console, .no-copy}
200 info d2c71971db 1.12.1 ONLINE
```
!!! success "Congratulations!"
The new machine is now part of the VPN, and the ZeroTier
configuration on NixOS within the Clan project is complete.
## Decision
We chose zerotier because in our tests it was the easiest solution to bootstrap. You can selfhost a controller and the controller doesn't need to be globally reachable.
In the future we plan to add additional network technologies like tinc, head/tailscale, yggdrassil and mycelium.
## Specification
By default all machines within one clan are connected via the chosen network technology.
```
Clan
Node A
<-> (zerotier / mycelium / ...)
Node B
```
If you select multiple network technologies at the same time. e.g. (zerotier + yggdrassil)
One of them is the primary network and the above statement holds for the primary network.

View File

@@ -1,69 +1,151 @@
# Managing Secrets with Clan
# Secrets / Facts
Clan enables encryption of secrets within a Clan flake, ensuring secure sharing among users.
This documentation will guide you through managing secrets with the Clan CLI,
which utilizes the [sops](https://github.com/getsops/sops) format and
integrates with [sops-nix](https://github.com/Mic92/sops-nix) on NixOS machines.
Clan enables encryption of secrets (such as passwords & keys) ensuring security and ease-of-use among users.
## 1. Generating Keys and Creating Secrets
Clan utilizes the [sops](https://github.com/getsops/sops) format and integrates with [sops-nix](https://github.com/Mic92/sops-nix) on NixOS machines.
To begin, generate a key pair:
This documentation will guide you through managing secrets with the Clan CLI
```shellSession
$ clan secrets key generate
## 1. Initializing Secrets
### Create Your Master Keypair
To get started, you'll need to create **Your master keypair**.
!!! info
Don't worry — if you've already made one before, this step won't change or overwrite it.
```bash
clan secrets key generate
```
**Output**:
```
```{.console, .no-copy}
Public key: age1wkth7uhpkl555g40t8hjsysr20drq286netu8zptw50lmqz7j95sw2t3l7
Generated age private key at '/home/joerg/.config/sops/age/keys.txt' for your user.
Generated age private key at '/home/joerg/.config/sops/age/keys.txt' for your user. Please back it up on a secure location or you will lose access to your secrets.
Also add your age public key to the repository with 'clan secrets users add youruser age1wkth7uhpkl555g40t8hjsysr20drq286netu8zptw50lmqz7j95sw2t3l7' (replace you
user with your user name)
Also add your age public key to the repository with 'clan secrets users add YOUR_USER age1wkth7uhpkl555g40t8hjsysr20drq286netu8zptw50lmqz7j95sw2t3l7' (replace YOUR_USER with your actual username)
```
⚠️ **Important**: Backup the generated private key securely, or risk losing access to your secrets.
!!! warning
Make sure to keep a safe backup of the private key you've just created.
If it's lost, you won't be able to get to your secrets anymore because they all need the master key to be unlocked.
Next, add your public key to the Clan flake repository:
!!! note
It's safe to add any secrets created by the clan CLI and placed in your repository to version control systems like `git`.
```shellSession
$ clan secrets users add <your_username> <your_public_key>
### Add Your Public Key
```bash
clan secrets users add <your_username> <your_public_key>
```
Doing so creates this structure in your Clan flake:
!!! note
Choose the same username as on your Setup/Source Machine that you use to control the deployment with.
```
Once run this will create the following files:
```{.console, .no-copy}
sops/
└── users/
└── <your_username>/
└── key.json
```
Now, to set your first secret:
## 2. Adding Machine Keys
```shellSession
$ clan secrets set mysecret
Paste your secret:
New machines in Clan come with age keys stored in `./sops/machines/<machine_name>`. To list these machines:
```bash
$ clan secrets machines list
```
Note: As you type your secret, keypresses won't be displayed. Press Enter to save the secret.
For existing machines, add their keys:
Retrieve the stored secret:
```bash
$ clan secrets machines add <machine_name> <age_key>
```
```shellSession
### Advanced
To fetch an age key from an SSH host key:
```bash
$ ssh-keyscan <domain_name> | nix shell nixpkgs#ssh-to-age -c ssh-to-age
```
## 3. Assigning Access
By default, secrets are encrypted for your key. To specify which users and machines can access a secret:
```bash
$ clan secrets set --machine <machine1> --machine <machine2> --user <user1> --user <user2> <secret_name>
```
You can add machines/users to existing secrets without modifying the secret:
```bash
$ clan secrets machines add-secret <machine_name> <secret_name>
```
## 4. Adding Secrets
```bash
$ clan secrets set mysecret
Paste your secret:
```
!!! note
As you type your secret won't be displayed. Press Enter to save the secret.
## 5. Retrieving Stored Secrets
```bash
$ clan secrets get mysecret
```
And list all secrets like this:
### List all Secrets
```shellSession
```bash
$ clan secrets list
```
## 6. Groups
Clan CLI makes it easy to manage access by allowing you to create groups.
All users within a group inherit access to all secrets of the group.
This feature eases the process of handling permissions for multiple users.
Here's how to get started:
1. **Creating Groups**:
Assign users to a new group, e.g., `admins`:
```bash
$ clan secrets groups add admins <username>
```
2. **Listing Groups**:
```bash
$ clan secrets groups list
```
3. **Assigning Secrets to Groups**:
```bash
$ clan secrets groups add-secret <group_name> <secret_name>
```
## Further
Secrets in the repository follow this structure:
```
```{.console, .no-copy}
sops/
├── secrets/
│ └── <secret_name>/
@@ -73,73 +155,15 @@ sops/
```
The content of the secret is stored encrypted inside the `secret` file under `mysecret`.
By default, secrets are encrypted with your key to ensure readability.
## 2. Adding Machine Keys
New machines in Clan come with age keys stored in `./sops/machines/<machine_name>`. To list these machines:
```shellSession
$ clan secrets machines list
```
For existing machines, add their keys:
```shellSession
$ clan secrets machines add <machine_name> <age_key>
```
To fetch an age key from an SSH host key:
```shellSession
$ ssh-keyscan <domain_name> | nix shell nixpkgs#ssh-to-age -c ssh-to-age
```
## 3. Assigning Access
By default, secrets are encrypted for your key. To specify which users and machines can access a secret:
```shellSession
$ clan secrets set --machine <machine1> --machine <machine2> --user <user1> --user <user2> <secret_name>
```
You can add machines/users to existing secrets without modifying the secret:
```shellSession
$ clan secrets machines add-secret <machine_name> <secret_name>
```
## 4. Utilizing Groups
For convenience, Clan CLI allows group creation to simplify access management. Here's how:
1. **Creating Groups**:
Assign users to a new group, e.g., `admins`:
```shellSession
$ clan secrets groups add admins <username>
```
2. **Listing Groups**:
```shellSession
$ clan secrets groups list
```
3. **Assigning Secrets to Groups**:
```shellSession
$ clan secrets groups add-secret <group_name> <secret_name>
```
# NixOS integration
### NixOS integration
A NixOS machine will automatically import all secrets that are encrypted for the
current machine. At runtime it will use the host key to decrypt all secrets into
a in-memory, non-persistent filesystem using
[sops-nix](https://github.com/Mic92/sops-nix). In your nixos configuration you
can get a path to secrets like this `config.sops.secrets.<name>.path`. Example:
an in-memory, non-persistent filesystem using [sops-nix](https://github.com/Mic92/sops-nix).
In your nixos configuration you can get a path to secrets like this `config.sops.secrets.<name>.path`. For example:
```nix
{ config, ...}: {
@@ -155,19 +179,18 @@ can get a path to secrets like this `config.sops.secrets.<name>.path`. Example:
See the [readme](https://github.com/Mic92/sops-nix) of sops-nix for more
examples.
# Importing existing sops-based keys / sops-nix
### Migration: Importing existing sops-based keys / sops-nix
`clan secrets` stores each secrets in a single file, whereas [sops](https://github.com/Mic92/sops-nix)
commonly allows to put all secrets in a yaml or json documents.
`clan secrets` stores each secret in a single file, whereas [sops](https://github.com/Mic92/sops-nix) commonly allows to put all secrets in a yaml or json document.
If you already happened to use sops-nix, you can migrate by using the `clan secrets import-sops` command by importing these documents:
If you already happened to use sops-nix, you can migrate by using the `clan secrets import-sops` command by importing these files:
```shellSession
```bash
% clan secrets import-sops --prefix matchbox- --group admins --machine matchbox nixos/matchbox/secrets/secrets.yaml
```
This will create secrets for each secret found in `nixos/matchbox/secrets/secrets.yaml` in a ./sops folder of your repository.
Each member of the group `admins` will be able
This will create secrets for each secret found in `nixos/matchbox/secrets/secrets.yaml` in a `./sops` folder of your repository.
Each member of the group `admins` in this case will be able to decrypt the secrets with their respective key.
Since our clan secret module will auto-import secrets that are encrypted for a particular nixos machine,
you can now remove `sops.secrets.<secrets> = { };` unless you need to specify more options for the secret like owner/group of the secret file.

113
docs/site/index.md Normal file
View File

@@ -0,0 +1,113 @@
# Getting Started
Welcome to your simple guide on starting a new Clan project.
## What's Inside
We've put together a straightforward guide to help you out:
- [**Starting with a New Clan Project**](#starting-with-a-new-clan-project): Create a new Clan from scratch.
- [**Integrating Clan using Flake-Parts**](getting-started/flake-parts.md)
---
## **Starting with a New Clan Project**
Create your own clan with these initial steps.
### Prerequisites
#### Linux
Clan depends on nix installed on your system. Run the following command to install nix.
```bash
curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install
```
#### NixOS
If you run NixOS the `nix` binary is already installed.
You will also need to enable the `flakes` and `nix-commands` experimental features.
```bash
# /etc/nix/nix.conf or ~/.config/nix/nix.conf
experimental-features = nix-command flakes
```
#### Other
Clan doesn't offer dedicated support for other operating systems yet.
### Step 1: Add Clan CLI to Your Shell
Add the Clan CLI into your development workflow:
```bash
nix shell git+https://git.clan.lol/clan/clan-core#clan-cli
```
### Step 2: Initialize Your Project
Set the foundation of your Clan project by initializing it as follows:
```bash
clan flakes create my-clan
```
This command creates the `flake.nix` and `.clan-flake` files for your project.
### Step 3: Verify the Project Structure
Ensure that all project files exist by running:
```bash
cd my-clan
tree
```
This should yield the following:
``` { .console .no-copy }
.
├── flake.nix
├── machines
│   ├── jon
│   │   ├── configuration.nix
│   │   └── hardware-configuration.nix
│   └── sara
│   ├── configuration.nix
│   └── hardware-configuration.nix
└── modules
└── shared.nix
5 directories, 6 files
```
```bash
clan machines list
```
``` { .console .no-copy }
jon
sara
```
!!! success
You just successfully bootstrapped your first clan directory.
---
### What's Next?
- [**Machine Configuration**](getting-started/configure.md): Declare behavior and configuration of machines.
- [**Deploy Machines**](getting-started/machines.md): Learn how to deploy to any remote machine.
- [**Installer**](getting-started/installer.md): Setting up new computers remotely is easy with an USB stick.
- [**Check out our Templates**](templates/index.md)
---

BIN
docs/site/static/logo.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.5 KiB

View File

@@ -0,0 +1,24 @@
# Templates
We provide some starting templates you can easily use one of those via `nix flakes`.
They showcase best practices and guide you through setting up and using Clan's modules
I.e. To use the `new-clan` template run the following command:
```bash
nix flake init -t git+https://git.clan.lol/clan/clan-core#new-clan
```
## Available Templates
We offer the following templates:
To initialize a clan with one of those run:
```bash
nix flake init -t git+https://git.clan.lol/clan/clan-core#[TEMPLATE_NAME]
```
Substitute `[TEMPLATE_NAME]` with the name of the template.
- **new-clan**: Perfect for beginners, this template shows you how to link two machines in a basic setup.

42
flake.lock generated
View File

@@ -7,11 +7,11 @@
]
},
"locked": {
"lastModified": 1711261295,
"narHash": "sha256-5DUNQl9BSmLxgGLbF05G7hi/UTk9DyZq8AuEszhQA7Q=",
"lastModified": 1712356478,
"narHash": "sha256-kTcEtrQIRnexu5lAbLsmUcfR2CrmsACF1s3ZFw1NEVA=",
"owner": "nix-community",
"repo": "disko",
"rev": "5d2d3e421ade554b19b4dbb0d11a04023378a330",
"rev": "0a17298c0d96190ef3be729d594ba202b9c53beb",
"type": "github"
},
"original": {
@@ -27,11 +27,11 @@
]
},
"locked": {
"lastModified": 1709336216,
"narHash": "sha256-Dt/wOWeW6Sqm11Yh+2+t0dfEWxoMxGBvv3JpIocFl9E=",
"lastModified": 1712014858,
"narHash": "sha256-sB4SWl2lX95bExY2gMFG5HIzvva5AVMJd4Igm+GpZNw=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "f7b3c975cf067e56e7cda6cb098ebe3fb4d74ca2",
"rev": "9126214d0a59633752a136528f5f3b9aa8565b7d",
"type": "github"
},
"original": {
@@ -42,11 +42,11 @@
},
"nixlib": {
"locked": {
"lastModified": 1710636348,
"narHash": "sha256-/kB+ZWSdkZjbZ0FTqm0u84sf2jFS+30ysaEajmBjtoY=",
"lastModified": 1711846064,
"narHash": "sha256-cqfX0QJNEnge3a77VnytM0Q6QZZ0DziFXt6tSCV8ZSc=",
"owner": "nix-community",
"repo": "nixpkgs.lib",
"rev": "fa827dda806c5aa98f454da4c567991ab8ce422c",
"rev": "90b1a963ff84dc532db92f678296ff2499a60a87",
"type": "github"
},
"original": {
@@ -63,11 +63,11 @@
]
},
"locked": {
"lastModified": 1711108213,
"narHash": "sha256-Q8cwpA2LQOInqeXVckrfFlbzHB8HOWrYntuOxqn3A3g=",
"lastModified": 1712191720,
"narHash": "sha256-xXtSSnVHURHsxLQO30dzCKW5NJVGV/umdQPmFjPFMVA=",
"owner": "nix-community",
"repo": "nixos-generators",
"rev": "417a857dfb824e60930881a254dd67d6796f5884",
"rev": "0c15e76bed5432d7775a22e8d22059511f59d23a",
"type": "github"
},
"original": {
@@ -78,11 +78,11 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1711297276,
"narHash": "sha256-KtHBr73Z729krfueBV6pUsEyq/4vILGP77DPmrKOTrI=",
"lastModified": 1712468661,
"narHash": "sha256-n2gVVBs+rV+HzPv/N3QQv5cdAXqSkjmaObvfeMqnw2c=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "3d41d1087707826b3a90685ab69147f8dc8145d5",
"rev": "298edc8f1e0dfffce67f50375c9f5952e04a6d02",
"type": "github"
},
"original": {
@@ -110,11 +110,11 @@
"nixpkgs-stable": []
},
"locked": {
"lastModified": 1711249319,
"narHash": "sha256-N+Pp3/8H+rd7cO71VNV/ovV/Kwt+XNeUHNhsmyTabdM=",
"lastModified": 1712458908,
"narHash": "sha256-DMgBS+jNHDg8z3g9GkwqL8xTKXCRQ/0FGsAyrniVonc=",
"owner": "Mic92",
"repo": "sops-nix",
"rev": "405987a66cce9a4a82f321f11b205982a7127c88",
"rev": "39191e8e6265b106c9a2ba0cfd3a4dafe98a31c6",
"type": "github"
},
"original": {
@@ -130,11 +130,11 @@
]
},
"locked": {
"lastModified": 1710781103,
"narHash": "sha256-nehQK/XTFxfa6rYKtbi8M1w+IU1v5twYhiyA4dg1vpg=",
"lastModified": 1711963903,
"narHash": "sha256-N3QDhoaX+paWXHbEXZapqd1r95mdshxToGowtjtYkGI=",
"owner": "numtide",
"repo": "treefmt-nix",
"rev": "7ee5aaac63c30d3c97a8c56efe89f3b2aa9ae564",
"rev": "49dc4a92b02b8e68798abd99184f228243b6e3ac",
"type": "github"
},
"original": {

View File

@@ -25,7 +25,7 @@
outputs =
inputs@{ flake-parts, ... }:
flake-parts.lib.mkFlake { inherit inputs; } (
{ lib, ... }:
{ ... }:
{
systems = [
"x86_64-linux"
@@ -34,28 +34,18 @@
];
imports = [
./checks/flake-module.nix
./devShell.nix
./formatter.nix
./templates/flake-module.nix
./clanModules/flake-module.nix
./pkgs/flake-module.nix
./flakeModules/flake-module.nix
(import ./flakeModules/clan.nix inputs.self)
./devShell.nix
# TODO: migrate this @davHau
# ./docs/flake-module
./docs/nix/flake-module.nix
./formatter.nix
./lib/flake-module.nix
./nixosModules/flake-module.nix
{
options.flake = flake-parts.lib.mkSubmoduleOptions {
clanInternals = lib.mkOption {
type = lib.types.submodule {
options = {
all-machines-json = lib.mkOption { type = lib.types.attrsOf lib.types.str; };
machines = lib.mkOption { type = lib.types.attrsOf (lib.types.attrsOf lib.types.unspecified); };
machinesFunc = lib.mkOption { type = lib.types.attrsOf (lib.types.attrsOf lib.types.unspecified); };
};
};
};
};
}
./pkgs/flake-module.nix
./templates/flake-module.nix
];
}
);

75
flakeModules/clan.nix Normal file
View File

@@ -0,0 +1,75 @@
clan-core:
{
config,
lib,
flake-parts-lib,
inputs,
self,
...
}:
let
inherit (lib) mkOption types;
buildClan = import ../lib/build-clan {
inherit lib clan-core;
inherit (inputs) nixpkgs;
};
cfg = config.clan;
in
{
options.clan = {
directory = mkOption {
type = types.path;
description = "The directory containing the clan subdirectory";
default = self; # default to the directory of the flake
};
specialArgs = mkOption {
type = types.attrsOf types.raw;
default = { };
description = "Extra arguments to pass to nixosSystem i.e. useful to make self available";
};
machines = mkOption {
type = types.attrsOf types.raw;
default = { };
description = "Allows to include machine-specific modules i.e. machines.\${name} = { ... }";
};
clanName = mkOption {
type = types.str;
description = "Needs to be (globally) unique, as this determines the folder name where the flake gets downloaded to.";
};
clanIcon = mkOption {
type = types.nullOr types.path;
default = null;
description = "A path to an icon to be used for the clan, should be the same for all machines";
};
pkgsForSystem = mkOption {
type = types.functionTo types.raw;
default = _system: null;
description = "A map from arch to pkgs, if specified this nixpkgs will be only imported once for each system.";
};
};
options.flake = flake-parts-lib.mkSubmoduleOptions {
clanInternals = lib.mkOption {
type = lib.types.submodule {
options = {
all-machines-json = lib.mkOption { type = lib.types.attrsOf lib.types.unspecified; };
machines = lib.mkOption { type = lib.types.attrsOf (lib.types.attrsOf lib.types.unspecified); };
machinesFunc = lib.mkOption { type = lib.types.attrsOf (lib.types.attrsOf lib.types.unspecified); };
};
};
};
};
config = {
flake = buildClan {
inherit (cfg)
directory
specialArgs
machines
clanName
clanIcon
pkgsForSystem
;
};
};
_file = __curPos.file;
}

View File

@@ -0,0 +1,7 @@
{ self, config, ... }:
{
flake.flakeModules = {
clan = import ./clan.nix self;
default = config.flake.flakeModules.clan;
};
}

View File

@@ -37,7 +37,7 @@
options = [
"-eucx"
''
${lib.getExe pkgs.ruff} --fix "$@"
${lib.getExe pkgs.ruff} check --fix "$@"
${lib.getExe pkgs.ruff} format "$@"
''
"--" # this argument is ignored by bash

View File

@@ -58,6 +58,7 @@ let
(machines.${name} or { })
(
{
networking.hostName = lib.mkDefault name;
clanCore.clanName = clanName;
clanCore.clanIcon = clanIcon;
clanCore.clanDir = directory;
@@ -73,7 +74,9 @@ let
// lib.optionalAttrs (pkgs != null) { nixpkgs.pkgs = lib.mkForce pkgs; }
)
];
inherit specialArgs;
specialArgs = {
inherit clan-core;
} // specialArgs;
};
allMachines = machinesDirs // machines;

View File

@@ -1 +0,0 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBIbwIVnLy+uoDZ6uK/OCc1QK46SIGeC3mVc85dqLYQw lass@ignavia

View File

@@ -1,6 +1,7 @@
{
imports = [
./backups.nix
./facts
./manual.nix
./imports.nix
./metadata.nix
@@ -10,7 +11,6 @@
./outputs.nix
./packages.nix
./schema.nix
./secrets
./vm.nix
./wayland-proxy-virtwl.nix
./zerotier

View File

@@ -0,0 +1,167 @@
{ config, lib, ... }:
{
imports = [
(lib.mkRemovedOptionModule [
"clanCore"
"secretsPrefix"
] "secretsPrefix was only used by the sops module and the code is now integrated in there")
(lib.mkRenamedOptionModule
[
"clanCore"
"secretStore"
]
[
"clanCore"
"facts"
"secretStore"
]
)
(lib.mkRemovedOptionModule [
"clanCore"
"secretsDirectory"
] "clancore.secretsDirectory was removed. Use clanCore.facts.secretPathFunction instead")
(lib.mkRenamedOptionModule
[
"clanCore"
"secretsUploadDirectory"
]
[
"clanCore"
"facts"
"secretUploadDirectory"
]
)
];
options.clanCore.secrets = lib.mkOption {
visible = false;
default = { };
type = lib.types.attrsOf (
lib.types.submodule (service: {
options = {
name = lib.mkOption {
type = lib.types.str;
default = service.config._module.args.name;
description = ''
Namespace of the service
'';
};
generator = lib.mkOption {
type = lib.types.submodule (
{ ... }:
{
options = {
path = lib.mkOption {
type = lib.types.listOf (lib.types.either lib.types.path lib.types.package);
default = [ ];
description = ''
Extra paths to add to the PATH environment variable when running the generator.
'';
};
prompt = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = ''
prompt text to ask for a value.
This value will be passed to the script as the environment variable $prompt_value.
'';
};
script = lib.mkOption {
type = lib.types.str;
description = ''
Script to generate the secret.
The script will be called with the following variables:
- facts: path to a directory where facts can be stored
- secrets: path to a directory where secrets can be stored
The script is expected to generate all secrets and facts defined in the module.
'';
};
};
}
);
};
secrets = lib.mkOption {
default = { };
type = lib.types.attrsOf (
lib.types.submodule (secret: {
options =
{
name = lib.mkOption {
type = lib.types.str;
description = ''
name of the secret
'';
default = secret.config._module.args.name;
};
path = lib.mkOption {
type = lib.types.path;
description = ''
path to a secret which is generated by the generator
'';
default = config.clanCore.facts.secretPathFunction secret;
defaultText = lib.literalExpression "config.clanCore.facts.secretPathFunction secret";
};
}
// lib.optionalAttrs (config.clanCore.facts.secretStore == "sops") {
groups = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = config.clanCore.sops.defaultGroups;
description = ''
Groups to decrypt the secret for. By default we always use the user's key.
'';
};
};
})
);
description = ''
path where the secret is located in the filesystem
'';
};
facts = lib.mkOption {
default = { };
type = lib.types.attrsOf (
lib.types.submodule (fact: {
options = {
name = lib.mkOption {
type = lib.types.str;
description = ''
name of the fact
'';
default = fact.config._module.args.name;
};
path = lib.mkOption {
type = lib.types.path;
description = ''
path to a fact which is generated by the generator
'';
default =
config.clanCore.clanDir
+ "/machines/${config.clanCore.machineName}/facts/${fact.config._module.args.name}";
defaultText = lib.literalExpression "\${config.clanCore.clanDir}/machines/\${config.clanCore.machineName}/facts/\${fact.config._module.args.name}";
};
value = lib.mkOption {
defaultText = lib.literalExpression "\${config.clanCore.clanDir}/\${fact.config.path}";
type = lib.types.nullOr lib.types.str;
default =
if builtins.pathExists fact.config.path then lib.strings.fileContents fact.config.path else null;
};
};
})
);
};
};
})
);
};
config = lib.mkIf (config.clanCore.secrets != { }) {
clanCore.facts.services = lib.mapAttrs' (
name: service:
lib.warn "clanCore.secrets.${name} is deprecated, use clanCore.facts.services.${name} instead" (
lib.nameValuePair name ({
secret = service.secrets;
public = service.facts;
generator = service.generator;
})
)
) config.clanCore.secrets;
};
}

View File

@@ -0,0 +1,222 @@
{
config,
lib,
pkgs,
...
}:
{
options.clanCore.facts = {
secretStore = lib.mkOption {
type = lib.types.enum [
"sops"
"password-store"
"vm"
"custom"
];
default = "sops";
description = ''
method to store secret facts
custom can be used to define a custom secret fact store.
'';
};
secretModule = lib.mkOption {
type = lib.types.str;
internal = true;
description = ''
the python import path to the secret module
'';
};
secretUploadDirectory = lib.mkOption {
type = lib.types.nullOr lib.types.path;
default = null;
description = ''
The directory where secrets are uploaded into, This is backend specific.
'';
};
secretPathFunction = lib.mkOption {
type = lib.types.raw;
description = ''
The function to use to generate the path for a secret.
The default function will use the path attribute of the secret.
The function will be called with the secret submodule as an argument.
'';
};
publicStore = lib.mkOption {
type = lib.types.enum [
"in_repo"
"vm"
"custom"
];
default = "in_repo";
description = ''
method to store public facts.
custom can be used to define a custom public fact store.
'';
};
publicModule = lib.mkOption {
type = lib.types.str;
internal = true;
description = ''
the python import path to the public module
'';
};
publicDirectory = lib.mkOption {
type = lib.types.nullOr lib.types.path;
default = null;
};
services = lib.mkOption {
default = { };
type = lib.types.attrsOf (
lib.types.submodule (service: {
options = {
name = lib.mkOption {
type = lib.types.str;
default = service.config._module.args.name;
description = ''
Namespace of the service
'';
};
generator = lib.mkOption {
type = lib.types.submodule (
{ config, ... }:
{
options = {
path = lib.mkOption {
type = lib.types.listOf (lib.types.either lib.types.path lib.types.package);
default = [ ];
description = ''
Extra paths to add to the PATH environment variable when running the generator.
'';
};
prompt = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = ''
prompt text to ask for a value.
This value will be passed to the script as the environment variable $prompt_value.
'';
};
script = lib.mkOption {
type = lib.types.str;
description = ''
Shell script snippet to generate the secrets and facts.
The script has access to the following environment variables:
- facts: path to a directory where facts can be stored
- secrets: path to a directory where secrets can be stored
The script is expected to generate all secrets and facts defined for this service.
'';
};
finalScript = lib.mkOption {
type = lib.types.str;
readOnly = true;
internal = true;
default = ''
set -eu -o pipefail
export PATH="${lib.makeBinPath config.path}:${pkgs.coreutils}/bin"
# prepare sandbox user
mkdir -p /etc
cp ${
pkgs.runCommand "fake-etc" { } ''
export PATH="${pkgs.coreutils}/bin"
mkdir -p $out
cp /etc/* $out/
''
}/* /etc/
${config.script}
'';
};
};
}
);
};
secret = lib.mkOption {
default = { };
type = lib.types.attrsOf (
lib.types.submodule (secret: {
options =
{
name = lib.mkOption {
type = lib.types.str;
description = ''
name of the secret
'';
default = secret.config._module.args.name;
};
path = lib.mkOption {
type = lib.types.str;
description = ''
path to a secret which is generated by the generator
'';
default = config.clanCore.facts.secretPathFunction secret;
};
}
// lib.optionalAttrs (config.clanCore.facts.secretModule == "clan_cli.facts.secret_modules.sops") {
groups = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = config.clanCore.sops.defaultGroups;
description = ''
Groups to decrypt the secret for. By default we always use the user's key.
'';
};
};
})
);
description = ''
path where the secret is located in the filesystem
'';
};
public = lib.mkOption {
default = { };
type = lib.types.attrsOf (
lib.types.submodule (fact: {
options = {
name = lib.mkOption {
type = lib.types.str;
description = ''
name of the public fact
'';
default = fact.config._module.args.name;
};
path = lib.mkOption {
type = lib.types.path;
description = ''
path to a fact which is generated by the generator
'';
defaultText = lib.literalExpression "\${config.clanCore.clanDir}/machines/\${config.clanCore.machineName}/facts/\${fact.config.name}";
default =
config.clanCore.clanDir + "/machines/${config.clanCore.machineName}/facts/${fact.config.name}";
};
value = lib.mkOption {
defaultText = lib.literalExpression "\${config.clanCore.clanDir}/\${fact.config.path}";
type = lib.types.nullOr lib.types.str;
default =
if builtins.pathExists fact.config.path then lib.strings.fileContents fact.config.path else null;
};
};
})
);
};
};
})
);
};
};
imports = [
./compat.nix
./secret/sops.nix
./secret/password-store.nix
./secret/vm.nix
./public/in_repo.nix
./public/vm.nix
];
}

View File

@@ -0,0 +1,6 @@
{ config, lib, ... }:
{
config = lib.mkIf (config.clanCore.facts.publicStore == "in_repo") {
clanCore.facts.publicModule = "clan_cli.facts.public_modules.in_repo";
};
}

View File

@@ -0,0 +1,6 @@
{ config, lib, ... }:
{
config = lib.mkIf (config.clanCore.facts.publicStore == "vm") {
clanCore.facts.publicModule = "clan_cli.facts.public_modules.vm";
};
}

View File

@@ -0,0 +1,17 @@
{ config, lib, ... }:
{
options.clan.password-store.targetDirectory = lib.mkOption {
type = lib.types.path;
default = "/etc/secrets";
description = ''
The directory where the password store is uploaded to.
'';
};
config = lib.mkIf (config.clanCore.facts.secretStore == "password-store") {
clanCore.facts.secretPathFunction =
secret: "${config.clan.password-store.targetDirectory}/${secret.config.name}";
clanCore.facts.secretUploadDirectory = config.clan.password-store.targetDirectory;
clanCore.facts.secretModule = "clan_cli.facts.secret_modules.password_store";
};
}

View File

@@ -41,10 +41,15 @@ in
description = "The default groups to for encryption use when no groups are specified.";
};
};
config = lib.mkIf (config.clanCore.secretStore == "sops") {
clanCore.secretsDirectory = "/run/secrets";
clanCore.secretsPrefix = config.clanCore.machineName + "-";
system.clan.secretFactsModule = "clan_cli.facts.secret_modules.sops";
config = lib.mkIf (config.clanCore.facts.secretStore == "sops") {
# Before we generate a secret we cannot know the path yet, so we need to set it to an empty string
clanCore.facts.secretPathFunction =
secret:
config.sops.secrets.${"${config.clanCore.machineName}-${secret.config.name}"}.path
or "/no-such-path";
clanCore.facts.secretModule = "clan_cli.facts.secret_modules.sops";
clanCore.facts.secretUploadDirectory = lib.mkDefault "/var/lib/sops-nix";
sops.secrets = builtins.mapAttrs (name: _: {
sopsFile = config.clanCore.clanDir + "/sops/secrets/${name}/secret";
format = "binary";
@@ -57,6 +62,5 @@ in
sops.age.keyFile = lib.mkIf (builtins.pathExists (
config.clanCore.clanDir + "/sops/secrets/${config.clanCore.machineName}-age.key/secret"
)) (lib.mkDefault "/var/lib/sops-nix/key.txt");
clanCore.secretsUploadDirectory = lib.mkDefault "/var/lib/sops-nix";
};
}

View File

@@ -0,0 +1,8 @@
{ config, lib, ... }:
{
config = lib.mkIf (config.clanCore.facts.secretStore == "vm") {
clanCore.facts.secretPathFunction = secret: "/etc/secrets/${secret.config.name}";
clanCore.facts.secretUploadDirectory = "/etc/secrets";
clanCore.facts.secretModule = "clan_cli.facts.secret_modules.vm";
};
}

View File

@@ -23,6 +23,7 @@
};
clanDir = lib.mkOption {
type = lib.types.either lib.types.path lib.types.str;
default = ".";
description = ''
the location of the flake repo, used to calculate the location of facts and secrets
'';
@@ -35,6 +36,7 @@
};
machineName = lib.mkOption {
type = lib.types.str;
default = "nixos";
description = ''
the name of the machine
'';

View File

@@ -74,22 +74,11 @@
systemd.services.NetworkManager-wait-online.enable = false;
systemd.network.wait-online.enable = false;
# Provide a default network configuration but don't compete with network-manager or dhcpcd
systemd.network.networks."50-uplink" =
lib.mkIf (!(config.networking.networkmanager.enable || config.networking.dhcpcd.enable))
{
matchConfig.Type = "ether";
networkConfig = {
DHCP = "yes";
LLDP = "yes";
LLMNR = "yes";
MulticastDNS = "yes";
IPv6AcceptRA = "yes";
};
};
systemd.network.networks."99-ethernet-default-dhcp".networkConfig.MulticastDNS = lib.mkDefault "yes";
systemd.network.networks."99-wireless-client-dhcp".networkConfig.MulticastDNS = lib.mkDefault "yes";
networking.firewall.allowedUDPPorts = [ 5353 ]; # Multicast DNS
# Use networkd instead of the pile of shell scripts
networking.useNetworkd = lib.mkDefault true;
networking.useDHCP = lib.mkDefault false;
};
}

View File

@@ -44,33 +44,6 @@
'';
default = false;
};
secretsUploadDirectory = lib.mkOption {
type = lib.types.path;
description = ''
the directory on the deployment server where secrets are uploaded
'';
};
publicFactsModule = lib.mkOption {
type = lib.types.str;
description = ''
the python import path to the facts module
'';
default = "clan_cli.facts.public_modules.in_repo";
};
secretFactsModule = lib.mkOption {
type = lib.types.str;
description = ''
the python import path to the secrets module
'';
default = "clan_cli.facts.secret_modules.sops";
};
secretsData = lib.mkOption {
type = lib.types.path;
description = ''
secret data as json for the generator
'';
default = pkgs.writers.writeJSON "secrets.json" config.clanCore.secrets;
};
vm.create = lib.mkOption {
type = lib.types.path;
description = ''
@@ -92,10 +65,16 @@
# optimization for faster secret generate/upload and machines update
config = {
system.clan.deployment.data = {
inherit (config.system.clan) publicFactsModule secretFactsModule secretsData;
facts = {
inherit (config.clanCore.facts)
secretUploadDirectory
secretModule
publicModule
services
;
};
inherit (config.clan.networking) targetHost buildHost;
inherit (config.clan.deployment) requireExplicitUpdate;
inherit (config.clanCore) secretsUploadDirectory;
};
system.clan.deployment.file = pkgs.writeText "deployment.json" (
builtins.toJSON config.system.clan.deployment.data

View File

@@ -7,5 +7,6 @@ in
type = lib.types.attrs;
description = "The json schema for the .clan options namespace";
default = jsonschema.parseOptions options.clan;
defaultText = lib.literalExpression "jsonschema.schemaToJSON options.clan";
};
}

View File

@@ -1,196 +0,0 @@
{
config,
lib,
pkgs,
...
}:
{
options.clanCore.secretStore = lib.mkOption {
type = lib.types.enum [
"sops"
"password-store"
"vm"
"custom"
];
default = "sops";
description = ''
method to store secrets
custom can be used to define a custom secret store.
'';
};
options.clanCore.secretsDirectory = lib.mkOption {
type = lib.types.path;
description = ''
The directory where secrets are installed to. This is backend specific.
'';
};
options.clanCore.secretsUploadDirectory = lib.mkOption {
type = lib.types.nullOr lib.types.path;
default = null;
description = ''
The directory where secrets are uploaded into, This is backend specific.
'';
};
options.clanCore.secretsPrefix = lib.mkOption {
type = lib.types.str;
default = "";
description = ''
Prefix for secrets. This is backend specific.
'';
};
options.clanCore.secrets = lib.mkOption {
default = { };
type = lib.types.attrsOf (
lib.types.submodule (service: {
options = {
name = lib.mkOption {
type = lib.types.str;
default = service.config._module.args.name;
description = ''
Namespace of the service
'';
};
generator = lib.mkOption {
type = lib.types.submodule (
{ config, ... }:
{
options = {
path = lib.mkOption {
type = lib.types.listOf (lib.types.either lib.types.path lib.types.package);
default = [ ];
description = ''
Extra paths to add to the PATH environment variable when running the generator.
'';
};
prompt = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = ''
prompt text to ask for a value.
This value will be passed to the script as the environment variable $prompt_value.
'';
};
script = lib.mkOption {
type = lib.types.str;
description = ''
Script to generate the secret.
The script will be called with the following variables:
- facts: path to a directory where facts can be stored
- secrets: path to a directory where secrets can be stored
The script is expected to generate all secrets and facts defined in the module.
'';
};
finalScript = lib.mkOption {
type = lib.types.str;
readOnly = true;
internal = true;
default = ''
set -eu -o pipefail
export PATH="${lib.makeBinPath config.path}:${pkgs.coreutils}/bin"
# prepare sandbox user
mkdir -p /etc
cp ${
pkgs.runCommand "fake-etc" { } ''
export PATH="${pkgs.coreutils}/bin"
mkdir -p $out
cp /etc/* $out/
''
}/* /etc/
${config.script}
'';
};
};
}
);
};
secrets =
let
config' = config;
in
lib.mkOption {
default = { };
type = lib.types.attrsOf (
lib.types.submodule (
{ config, name, ... }:
{
options =
{
name = lib.mkOption {
type = lib.types.str;
description = ''
name of the secret
'';
default = name;
};
path = lib.mkOption {
type = lib.types.str;
description = ''
path to a secret which is generated by the generator
'';
default = "${config'.clanCore.secretsDirectory}/${config'.clanCore.secretsPrefix}${config.name}";
};
}
// lib.optionalAttrs (config'.clanCore.secretStore == "sops") {
groups = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = config'.clanCore.sops.defaultGroups;
description = ''
Groups to decrypt the secret for. By default we always use the user's key.
'';
};
};
}
)
);
description = ''
path where the secret is located in the filesystem
'';
};
facts = lib.mkOption {
default = { };
type = lib.types.attrsOf (
lib.types.submodule (fact: {
options = {
name = lib.mkOption {
type = lib.types.str;
description = ''
name of the fact
'';
default = fact.config._module.args.name;
};
path = lib.mkOption {
type = lib.types.path;
description = ''
path to a fact which is generated by the generator
'';
default =
config.clanCore.clanDir
+ "/machines/${config.clanCore.machineName}/facts/${fact.config._module.args.name}";
};
value = lib.mkOption {
defaultText = lib.literalExpression "\${config.clanCore.clanDir}/\${fact.config.path}";
type = lib.types.nullOr lib.types.str;
default =
if builtins.pathExists fact.config.path then lib.strings.fileContents fact.config.path else null;
};
};
})
);
};
};
})
);
};
imports = [
./sops.nix
./password-store.nix
./vm.nix
];
}

View File

@@ -1,15 +0,0 @@
{ config, lib, ... }:
{
options.clan.password-store.targetDirectory = lib.mkOption {
type = lib.types.path;
default = "/etc/secrets";
description = ''
The directory where the password store is uploaded to.
'';
};
config = lib.mkIf (config.clanCore.secretStore == "password-store") {
clanCore.secretsDirectory = config.clan.password-store.targetDirectory;
clanCore.secretsUploadDirectory = config.clan.password-store.targetDirectory;
system.clan.secretFactsModule = "clan_cli.facts.secret_modules.password_store";
};
}

View File

@@ -1,9 +0,0 @@
{ config, lib, ... }:
{
config = lib.mkIf (config.clanCore.secretStore == "vm") {
clanCore.secretsDirectory = "/etc/secrets";
clanCore.secretsUploadDirectory = "/etc/secrets";
system.clan.secretFactsModule = "clan_cli.facts.secret_modules.vm";
system.clan.publicFactsModule = "clan_cli.facts.public_modules.vm";
};
}

View File

@@ -6,7 +6,7 @@
}:
let
cfg = config.clan.networking.zerotier;
facts = config.clanCore.secrets.zerotier.facts or { };
facts = config.clanCore.facts.services.zerotier.public or { };
genMoonScript = pkgs.runCommand "genmoon" { nativeBuildInputs = [ pkgs.python3 ]; } ''
install -Dm755 ${./genmoon.py} $out/bin/genmoon
patchShebangs $out/bin/genmoon
@@ -24,6 +24,7 @@ in
name = lib.mkOption {
type = lib.types.str;
default = config.clanCore.clanName;
defaultText = "config.clanCore.clanName";
description = ''
zerotier network name
'';
@@ -102,7 +103,6 @@ in
systemd.network.networks."09-zerotier" = {
matchConfig.Name = "zt*";
networkConfig = {
LLMNR = true;
LLDP = true;
MulticastDNS = true;
KeepConfiguration = "static";
@@ -111,7 +111,7 @@ in
systemd.services.zerotierone.serviceConfig.ExecStartPre = [
"+${pkgs.writeShellScript "init-zerotier" ''
cp ${config.clanCore.secrets.zerotier.secrets.zerotier-identity-secret.path} /var/lib/zerotier-one/identity.secret
cp ${config.clanCore.facts.services.zerotier.secret.zerotier-identity-secret.path} /var/lib/zerotier-one/identity.secret
zerotier-idtool getpublic /var/lib/zerotier-one/identity.secret > /var/lib/zerotier-one/identity.public
${lib.optionalString (cfg.controller.enable) ''
@@ -159,9 +159,6 @@ in
''}"
];
networking.firewall.interfaces."zt+".allowedTCPPorts = [ 5353 ]; # mdns
networking.firewall.interfaces."zt+".allowedUDPPorts = [ 5353 ]; # mdns
networking.firewall.allowedTCPPorts = [ 9993 ]; # zerotier
networking.firewall.allowedUDPPorts = [ 9993 ]; # zerotier
@@ -179,10 +176,10 @@ in
(lib.mkIf cfg.controller.enable {
# only the controller needs to have the key in the repo, the other clients can be dynamic
# we generate the zerotier code manually for the controller, since it's part of the bootstrap command
clanCore.secrets.zerotier = {
facts.zerotier-ip = { };
facts.zerotier-network-id = { };
secrets.zerotier-identity-secret = { };
clanCore.facts.services.zerotier = {
public.zerotier-ip = { };
public.zerotier-network-id = { };
secret.zerotier-identity-secret = { };
generator.path = [
config.services.zerotierone.package
pkgs.fakeroot
@@ -200,9 +197,9 @@ in
environment.systemPackages = [ config.clanCore.clanPkgs.zerotier-members ];
})
(lib.mkIf (!cfg.controller.enable && cfg.networkId != null) {
clanCore.secrets.zerotier = {
facts.zerotier-ip = { };
secrets.zerotier-identity-secret = { };
clanCore.facts.services.zerotier = {
public.zerotier-ip = { };
secret.zerotier-identity-secret = { };
generator.path = [
config.services.zerotierone.package
pkgs.python3

View File

@@ -5,11 +5,18 @@
...
}:
{
############################################
# #
# For install image debugging execute: #
# $ qemu-kvm result/stick.raw -snapshot #
# #
############################################
systemd.tmpfiles.rules = [ "d /var/shared 0777 root root - -" ];
imports = [
(modulesPath + "/profiles/installation-device.nix")
(modulesPath + "/profiles/all-hardware.nix")
(modulesPath + "/profiles/base.nix")
(modulesPath + "/installer/cd-dvd/iso-image.nix")
];
services.openssh.settings.PermitRootLogin = "yes";
system.activationScripts.root-password = ''
@@ -40,7 +47,7 @@
--arg password "$(cat /var/shared/root-password)" \
--arg onion_address "$(cat /var/shared/onion-hostname)" \
--argjson local_addrs "$local_addrs" \
'{ password: $password, onion_address: $onion_address, local_addresses: $local_addrs }' \
'{ pass: $password, onion_address: $onion_address, addrs: $local_addrs }' \
> /var/shared/login.json
cat /var/shared/login.json | qrencode -t utf8 -o /var/shared/qrcode.utf8
'';
@@ -58,46 +65,5 @@
cat /var/shared/qrcode.utf8
fi
'';
boot.loader.systemd-boot.enable = true;
# Grub doesn't find devices for both BIOS and UEFI?
#boot.loader.grub.efiInstallAsRemovable = true;
#boot.loader.grub.efiSupport = true;
disko.devices = {
disk = {
stick = {
type = "disk";
device = "/vda";
imageSize = "3G";
content = {
type = "gpt";
partitions = {
#boot = {
# size = "1M";
# type = "EF02"; # for grub MBR
#};
ESP = {
size = "100M";
type = "EF00";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
};
};
root = {
size = "100%";
content = {
type = "filesystem";
format = "ext4";
mountpoint = "/";
};
};
};
};
};
};
};
isoImage.squashfsCompression = "zstd";
}

View File

@@ -22,13 +22,13 @@ After you can use the local bin wrapper to test things in the cli:
By default tests run in parallel using pytest-parallel.
pytest-parallel however breaks `breakpoint()`. To disable it, use this:
```console
```bash
pytest -n0 -s
```
You can also run a single test like this:
```console
```bash
pytest -n0 -s tests/test_secrets_cli.py::test_users
```
@@ -36,12 +36,12 @@ pytest -n0 -s tests/test_secrets_cli.py::test_users
Run all impure checks
```console
```bash
nix run .#impure-checks
```
Run all checks
```console
```bash
nix flake check
```

View File

@@ -6,7 +6,18 @@ from pathlib import Path
from types import ModuleType
from typing import Any
from . import backups, config, facts, flakes, flash, history, machines, secrets, vms
from . import (
backups,
config,
facts,
flakes,
flash,
flatpak,
history,
machines,
secrets,
vms,
)
from .custom_logger import setup_logging
from .dirs import get_clan_flake_toplevel
from .errors import ClanCmdError, ClanError
@@ -129,6 +140,8 @@ def main() -> None:
if args.debug:
setup_logging(logging.DEBUG, root_log_name=__name__.split(".")[0])
log.debug("Debug log activated")
if flatpak.is_flatpak():
log.debug("Running inside a flatpak sandbox")
else:
setup_logging(logging.INFO, root_log_name=__name__.split(".")[0])

View File

@@ -18,20 +18,24 @@ def check_secrets(machine: Machine, service: None | str = None) -> bool:
if service:
services = [service]
else:
services = list(machine.secrets_data.keys())
services = list(machine.facts_data.keys())
for service in services:
for secret_fact in machine.secrets_data[service]["secrets"]:
for secret_fact in machine.facts_data[service]["secret"]:
if isinstance(secret_fact, str):
secret_name = secret_fact
else:
secret_name = secret_fact["name"]
if not secret_facts_store.exists(service, secret_name):
log.info(f"Secret fact {secret_fact} for service {service} is missing")
log.info(
f"Secret fact '{secret_fact}' for service '{service}' in machine {machine.name} is missing."
)
missing_secret_facts.append((service, secret_name))
for public_fact in machine.secrets_data[service]["facts"]:
for public_fact in machine.facts_data[service]["public"]:
if not public_facts_store.exists(service, public_fact):
log.info(f"public Fact {public_fact} for service {service} is missing")
log.info(
f"Public fact '{public_fact}' for service '{service}' in machine {machine.name} is missing."
)
missing_public_facts.append((service, public_fact))
log.debug(f"missing_secret_facts: {missing_secret_facts}")

View File

@@ -11,6 +11,7 @@ from clan_cli.cmd import run
from ..errors import ClanError
from ..git import commit_files
from ..machines.inventory import get_all_machines, get_selected_machines
from ..machines.machines import Machine
from ..nix import nix_shell
from .check import check_secrets
@@ -36,138 +37,167 @@ def generate_service_facts(
public_facts_store: FactStoreBase,
tmpdir: Path,
prompt: Callable[[str], str],
) -> None:
) -> bool:
service_dir = tmpdir / service
# check if all secrets exist and generate them if at least one is missing
needs_regeneration = not check_secrets(machine, service=service)
log.debug(f"{service} needs_regeneration: {needs_regeneration}")
if needs_regeneration:
if not isinstance(machine.flake, Path):
msg = f"flake is not a Path: {machine.flake}"
msg += "fact/secret generation is only supported for local flakes"
if not needs_regeneration:
return False
if not isinstance(machine.flake, Path):
msg = f"flake is not a Path: {machine.flake}"
msg += "fact/secret generation is only supported for local flakes"
env = os.environ.copy()
facts_dir = service_dir / "facts"
facts_dir.mkdir(parents=True)
env["facts"] = str(facts_dir)
secrets_dir = service_dir / "secrets"
secrets_dir.mkdir(parents=True)
env["secrets"] = str(secrets_dir)
# compatibility for old outputs.nix users
if isinstance(machine.secrets_data[service]["generator"], str):
generator = machine.secrets_data[service]["generator"]
env = os.environ.copy()
facts_dir = service_dir / "facts"
facts_dir.mkdir(parents=True)
env["facts"] = str(facts_dir)
secrets_dir = service_dir / "secrets"
secrets_dir.mkdir(parents=True)
env["secrets"] = str(secrets_dir)
# compatibility for old outputs.nix users
if isinstance(machine.facts_data[service]["generator"], str):
generator = machine.facts_data[service]["generator"]
else:
generator = machine.facts_data[service]["generator"]["finalScript"]
if machine.facts_data[service]["generator"]["prompt"]:
prompt_value = prompt(machine.facts_data[service]["generator"]["prompt"])
env["prompt_value"] = prompt_value
# fmt: off
cmd = nix_shell(
[
"nixpkgs#bash",
"nixpkgs#bubblewrap",
],
[
"bwrap",
"--ro-bind", "/nix/store", "/nix/store",
"--tmpfs", "/usr/lib/systemd",
"--dev", "/dev",
"--bind", str(facts_dir), str(facts_dir),
"--bind", str(secrets_dir), str(secrets_dir),
"--unshare-all",
"--unshare-user",
"--uid", "1000",
"--",
"bash", "-c", generator
],
)
# fmt: on
run(
cmd,
env=env,
)
files_to_commit = []
# store secrets
for secret in machine.facts_data[service]["secret"]:
if isinstance(secret, str):
# TODO: This is the old NixOS module, can be dropped everyone has updated.
secret_name = secret
groups = []
else:
generator = machine.secrets_data[service]["generator"]["finalScript"]
if machine.secrets_data[service]["generator"]["prompt"]:
prompt_value = prompt(
machine.secrets_data[service]["generator"]["prompt"]
)
env["prompt_value"] = prompt_value
# fmt: off
cmd = nix_shell(
[
"nixpkgs#bash",
"nixpkgs#bubblewrap",
],
[
"bwrap",
"--ro-bind", "/nix/store", "/nix/store",
"--tmpfs", "/usr/lib/systemd",
"--dev", "/dev",
"--bind", str(facts_dir), str(facts_dir),
"--bind", str(secrets_dir), str(secrets_dir),
"--unshare-all",
"--unshare-user",
"--uid", "1000",
"--",
"bash", "-c", generator
],
secret_name = secret["name"]
groups = secret.get("groups", [])
secret_file = secrets_dir / secret_name
if not secret_file.is_file():
msg = f"did not generate a file for '{secret_name}' when running the following command:\n"
msg += generator
raise ClanError(msg)
secret_path = secret_facts_store.set(
service, secret_name, secret_file.read_bytes(), groups
)
# fmt: on
run(
cmd,
env=env,
)
files_to_commit = []
# store secrets
for secret in machine.secrets_data[service]["secrets"]:
if isinstance(secret, str):
# TODO: This is the old NixOS module, can be dropped everyone has updated.
secret_name = secret
groups = []
else:
secret_name = secret["name"]
groups = secret.get("groups", [])
if secret_path:
files_to_commit.append(secret_path)
secret_file = secrets_dir / secret_name
if not secret_file.is_file():
msg = f"did not generate a file for '{secret_name}' when running the following command:\n"
msg += generator
raise ClanError(msg)
secret_path = secret_facts_store.set(
service, secret_name, secret_file.read_bytes(), groups
)
if secret_path:
files_to_commit.append(secret_path)
# store facts
for name in machine.secrets_data[service]["facts"]:
fact_file = facts_dir / name
if not fact_file.is_file():
msg = f"did not generate a file for '{name}' when running the following command:\n"
msg += machine.secrets_data[service]["generator"]
raise ClanError(msg)
fact_file = public_facts_store.set(service, name, fact_file.read_bytes())
if fact_file:
files_to_commit.append(fact_file)
commit_files(
files_to_commit,
machine.flake_dir,
f"Update facts/secrets for service {service} in machine {machine.name}",
)
# store facts
for name in machine.facts_data[service]["public"]:
fact_file = facts_dir / name
if not fact_file.is_file():
msg = f"did not generate a file for '{name}' when running the following command:\n"
msg += machine.facts_data[service]["generator"]
raise ClanError(msg)
fact_file = public_facts_store.set(service, name, fact_file.read_bytes())
if fact_file:
files_to_commit.append(fact_file)
commit_files(
files_to_commit,
machine.flake_dir,
f"Update facts/secrets for service {service} in machine {machine.name}",
)
return True
def generate_facts(
machine: Machine,
prompt: None | Callable[[str], str] = None,
) -> None:
def prompt_func(text: str) -> str:
print(f"{text}: ")
return read_multiline_input()
def _generate_facts_for_machine(
machine: Machine, tmpdir: Path, prompt: Callable[[str], str] = prompt_func
) -> bool:
local_temp = tmpdir / machine.name
local_temp.mkdir()
secret_facts_module = importlib.import_module(machine.secret_facts_module)
secret_facts_store = secret_facts_module.SecretStore(machine=machine)
public_facts_module = importlib.import_module(machine.public_facts_module)
public_facts_store = public_facts_module.FactStore(machine=machine)
if prompt is None:
machine_updated = False
for service in machine.facts_data:
machine_updated |= generate_service_facts(
machine=machine,
service=service,
secret_facts_store=secret_facts_store,
public_facts_store=public_facts_store,
tmpdir=local_temp,
prompt=prompt,
)
if machine_updated:
# flush caches to make sure the new secrets are available in evaluation
machine.flush_caches()
return machine_updated
def prompt_func(text: str) -> str:
print(f"{text}: ")
return read_multiline_input()
prompt = prompt_func
def generate_facts(
machines: list[Machine], prompt: Callable[[str], str] = prompt_func
) -> bool:
was_regenerated = False
with TemporaryDirectory() as tmp:
tmpdir = Path(tmp)
for service in machine.secrets_data:
generate_service_facts(
machine=machine,
service=service,
secret_facts_store=secret_facts_store,
public_facts_store=public_facts_store,
tmpdir=tmpdir,
prompt=prompt,
)
print("successfully generated secrets")
for machine in machines:
errors = 0
try:
was_regenerated |= _generate_facts_for_machine(machine, tmpdir, prompt)
except Exception as exc:
log.error(f"Failed to generate facts for {machine.name}: {exc}")
errors += 1
if errors > 0:
raise ClanError(
f"Failed to generate facts for {errors} hosts. Check the logs above"
)
if not was_regenerated:
print("All secrets and facts are already up to date")
return was_regenerated
def generate_command(args: argparse.Namespace) -> None:
machine = Machine(name=args.machine, flake=args.flake)
generate_facts(machine)
if len(args.machines) == 0:
machines = get_all_machines(args.flake)
else:
machines = get_selected_machines(args.flake, args.machines)
generate_facts(machines)
def register_generate_parser(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"machine",
help="The machine to generate facts for",
"machines",
type=str,
help="machine to generate facts for. if empty, generate facts for all machines",
nargs="*",
default=[],
)
parser.set_defaults(func=generate_command)

View File

@@ -106,8 +106,8 @@ class SecretStore(SecretStoreBase):
return local_hash.decode() == remote_hash
def upload(self, output_dir: Path) -> None:
for service in self.machine.secrets_data:
for secret in self.machine.secrets_data[service]["secrets"]:
for service in self.machine.facts_data:
for secret in self.machine.facts_data[service]["secret"]:
if isinstance(secret, dict):
secret_name = secret["name"]
else:

View File

@@ -14,9 +14,9 @@ class SecretStore(SecretStoreBase):
self.machine = machine
# no need to generate keys if we don't manage secrets
if not hasattr(self.machine, "secrets_data"):
if not hasattr(self.machine, "facts_data"):
return
if not self.machine.secrets_data:
if not self.machine.facts_data:
return
if has_machine(self.machine.flake_dir, self.machine.name):

View File

@@ -6,7 +6,7 @@ from ..cmd import CmdOut, run
from ..errors import ClanError
from ..nix import nix_command, nix_shell
DEFAULT_URL: str = "git+https://git.clan.lol/clan/clan-core?new-clan"
DEFAULT_URL: str = "git+https://git.clan.lol/clan/clan-core"
def create_flake(directory: Path, url: str) -> dict[str, CmdOut]:

View File

@@ -4,6 +4,7 @@ import logging
import os
import shlex
import shutil
import textwrap
from collections.abc import Sequence
from dataclasses import dataclass
from pathlib import Path
@@ -20,7 +21,7 @@ log = logging.getLogger(__name__)
def flash_machine(
machine: Machine, disks: dict[str, str], dry_run: bool, debug: bool
machine: Machine, mode: str, disks: dict[str, str], dry_run: bool, debug: bool
) -> None:
secret_facts_module = importlib.import_module(machine.secret_facts_module)
secret_facts_store: SecretStoreBase = secret_facts_module.SecretStore(
@@ -56,6 +57,7 @@ def flash_machine(
disko_install.extend(["--extra-files", str(local_dir), upload_dir])
disko_install.extend(["--flake", str(machine.flake) + "#" + machine.name])
disko_install.extend(["--mode", str(mode)])
cmd = nix_shell(
["nixpkgs#disko"],
@@ -73,6 +75,7 @@ class FlashOptions:
dry_run: bool
confirm: bool
debug: bool
mode: str
class AppendDiskAction(argparse.Action):
@@ -99,14 +102,21 @@ def flash_command(args: argparse.Namespace) -> None:
dry_run=args.dry_run,
confirm=not args.yes,
debug=args.debug,
mode=args.mode,
)
machine = Machine(opts.machine, flake=opts.flake)
if opts.confirm and not opts.dry_run:
disk_str = ", ".join(f"{name}={device}" for name, device in opts.disks.items())
ask = input(f"Install {machine.name} to {disk_str}? [y/N] ")
msg = f"Install {machine.name}"
if disk_str != "":
msg += f" to {disk_str}"
msg += "? [y/N] "
ask = input(msg)
if ask != "y":
return
flash_machine(machine, disks=opts.disks, dry_run=opts.dry_run, debug=opts.debug)
flash_machine(
machine, opts.mode, disks=opts.disks, dry_run=opts.dry_run, debug=opts.debug
)
def register_parser(parser: argparse.ArgumentParser) -> None:
@@ -124,6 +134,20 @@ def register_parser(parser: argparse.ArgumentParser) -> None:
help="device to flash to",
default={},
)
mode_help = textwrap.dedent("""\
Specify the mode of operation. Valid modes are: format, mount."
Format will format the disk before installing.
Mount will mount the disk before installing.
Mount is useful for updating an existing system without losing data.
""")
parser.add_argument(
"--mode",
type=str,
help=mode_help,
choices=["format", "mount"],
default="format",
)
parser.add_argument(
"--yes",
action="store_true",

View File

@@ -0,0 +1,16 @@
import os
def is_flatpak() -> bool:
"""Check if the current process is running inside a flatpak sandbox."""
# FLATPAK_ID environment variable check
flatpak_env = "FLATPAK_ID" in os.environ
flatpak_file = False
try:
with open("/.flatpak-info"):
flatpak_file = True
except FileNotFoundError:
pass
return flatpak_env and flatpak_file

View File

@@ -5,6 +5,7 @@ from clan_cli.errors import ClanError
from clan_cli.nix import nix_shell
from .cmd import Log, run
from .locked_open import locked_open
def commit_file(
@@ -55,38 +56,45 @@ def _commit_file_to_git(
:param commit_message: The commit message.
:raises ClanError: If the file is not in the git repository.
"""
for file_path in file_paths:
with locked_open(repo_dir / ".git" / "clan.lock", "w+"):
for file_path in file_paths:
cmd = nix_shell(
["nixpkgs#git"],
["git", "-C", str(repo_dir), "add", str(file_path)],
)
# add the file to the git index
run(
cmd,
log=Log.BOTH,
error_msg=f"Failed to add {file_path} file to git index",
)
# check if there is a diff
cmd = nix_shell(
["nixpkgs#git"],
["git", "-C", str(repo_dir), "add", str(file_path)],
["git", "-C", str(repo_dir), "diff", "--cached", "--exit-code"]
+ [str(file_path) for file_path in file_paths],
)
# add the file to the git index
result = run(cmd, check=False, cwd=repo_dir)
# if there is no diff, return
if result.returncode == 0:
return
run(cmd, log=Log.BOTH, error_msg=f"Failed to add {file_path} file to git index")
# commit only that file
cmd = nix_shell(
["nixpkgs#git"],
[
"git",
"-C",
str(repo_dir),
"commit",
"-m",
commit_message,
]
+ [str(file_path) for file_path in file_paths],
)
# check if there is a diff
cmd = nix_shell(
["nixpkgs#git"],
["git", "-C", str(repo_dir), "diff", "--cached", "--exit-code"]
+ [str(file_path) for file_path in file_paths],
)
result = run(cmd, check=False, cwd=repo_dir)
# if there is no diff, return
if result.returncode == 0:
return
# commit only that file
cmd = nix_shell(
["nixpkgs#git"],
[
"git",
"-C",
str(repo_dir),
"commit",
"-m",
commit_message,
]
+ [str(file_path) for file_path in file_paths],
)
run(cmd, error_msg=f"Failed to commit {file_paths} to git repository {repo_dir}")
run(
cmd, error_msg=f"Failed to commit {file_paths} to git repository {repo_dir}"
)

View File

@@ -11,7 +11,7 @@ from .dirs import user_history_file
@contextmanager
def _locked_open(filename: str | Path, mode: str = "r") -> Generator:
def locked_open(filename: str | Path, mode: str = "r") -> Generator:
"""
This is a context manager that provides an advisory write lock on the file specified by `filename` when entering the context, and releases the lock when leaving the context. The lock is acquired using the `fcntl` module's `LOCK_EX` flag, which applies an exclusive write lock to the file.
"""
@@ -22,12 +22,12 @@ def _locked_open(filename: str | Path, mode: str = "r") -> Generator:
def write_history_file(data: Any) -> None:
with _locked_open(user_history_file(), "w+") as f:
with locked_open(user_history_file(), "w+") as f:
f.write(json.dumps(data, cls=ClanJSONEncoder, indent=4))
def read_history_file() -> list[dict]:
with _locked_open(user_history_file(), "r") as f:
with locked_open(user_history_file(), "r") as f:
content: str = f.read()
parsed: list[dict] = json.loads(content)
return parsed

View File

@@ -24,7 +24,7 @@ def install_nixos(
target_host = f"{h.user or 'root'}@{h.host}"
log.info(f"target host: {target_host}")
generate_facts(machine)
generate_facts([machine])
with TemporaryDirectory() as tmpdir_:
tmpdir = Path(tmpdir_)
@@ -38,9 +38,10 @@ def install_nixos(
cmd = [
"nixos-anywhere",
"-f",
"--debug",
"--copy-password",
"--flake",
f"{machine.flake}#{machine.name}",
"-t",
"--no-reboot",
"--extra-files",
str(tmpdir),
@@ -55,7 +56,8 @@ def install_nixos(
run(
nix_shell(
["nixpkgs#nixos-anywhere"],
# ["nixpkgs#sshpass", "/home/kenji/git/nix-projects/nixos-anywhere"],
["nixpkgs#sshpass", "nixpkgs#nixos-anywhere"],
cmd,
),
log=Log.BOTH,
@@ -118,6 +120,18 @@ def register_install_parser(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"target_host",
type=str,
nargs="?",
help="ssh address to install to in the form of user@host:2222",
)
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument(
"-j",
"--json",
help="specify the json file for ssh data (generated by starting the clan installer)",
)
group.add_argument(
"-P",
"--png",
help="specify the json file for ssh data as the qrcode image (generated by starting the clan installer)",
)
parser.set_defaults(func=install_command)

View File

@@ -0,0 +1,31 @@
import json
from pathlib import Path
from ..cmd import run
from ..nix import nix_build, nix_config
from .machines import Machine
# function to speedup eval if we want to evauluate all machines
def get_all_machines(flake_dir: Path) -> list[Machine]:
config = nix_config()
system = config["system"]
json_path = run(
nix_build([f'{flake_dir}#clanInternals.all-machines-json."{system}"'])
).stdout
machines_json = json.loads(Path(json_path.rstrip()).read_text())
machines = []
for name, machine_data in machines_json.items():
machines.append(
Machine(name=name, flake=flake_dir, deployment_info=machine_data)
)
return machines
def get_selected_machines(flake_dir: Path, machine_names: list[str]) -> list[Machine]:
machines = []
for name in machine_names:
machines.append(Machine(name=name, flake=flake_dir))
return machines

View File

@@ -0,0 +1,26 @@
from collections.abc import Callable
from typing import TypeVar
from ..ssh import Host, HostGroup, HostResult
from .machines import Machine
T = TypeVar("T")
class MachineGroup:
def __init__(self, machines: list[Machine]) -> None:
self.group = HostGroup(list(m.target_host for m in machines))
def run_function(
self, func: Callable[[Machine], T], check: bool = True
) -> list[HostResult[T]]:
"""
Function to run for each host in the group in parallel
@func the function to call
"""
def wrapped_func(host: Host) -> T:
return func(host.meta["machine"])
return self.group.run_function(wrapped_func, check=check)

View File

@@ -47,7 +47,7 @@ class Machine:
eval_cache: dict[str, str]
build_cache: dict[str, Path]
_flake_path: Path | None
_deployment_info: None | dict[str, str]
_deployment_info: None | dict
vm: QMPWrapper
def __init__(
@@ -75,12 +75,18 @@ class Machine:
self.eval_cache: dict[str, str] = {}
self.build_cache: dict[str, Path] = {}
self._flake_path: Path | None = None
self._deployment_info: None | dict[str, str] = deployment_info
self._deployment_info: None | dict = deployment_info
state_dir = vm_state_dir(flake_url=str(self.flake), vm_name=self.data.name)
self.vm: QMPWrapper = QMPWrapper(state_dir)
def flush_caches(self) -> None:
self._deployment_info = None
self._flake_path = None
self.build_cache.clear()
self.eval_cache.clear()
def __str__(self) -> str:
return f"Machine(name={self.data.name}, flake={self.data.flake_id})"
@@ -88,7 +94,7 @@ class Machine:
return str(self)
@property
def deployment_info(self) -> dict[str, str]:
def deployment_info(self) -> dict:
if self._deployment_info is not None:
return self._deployment_info
self._deployment_info = json.loads(
@@ -113,26 +119,21 @@ class Machine:
@property
def secret_facts_module(self) -> str:
return self.deployment_info["secretFactsModule"]
return self.deployment_info["facts"]["secretModule"]
@property
def public_facts_module(self) -> str:
return self.deployment_info["publicFactsModule"]
return self.deployment_info["facts"]["publicModule"]
@property
def secrets_data(self) -> dict[str, dict[str, Any]]:
if self.deployment_info["secretsData"]:
try:
return json.loads(Path(self.deployment_info["secretsData"]).read_text())
except json.JSONDecodeError as e:
raise ClanError(
f"Failed to parse secretsData for machine {self.data.name} as json"
) from e
def facts_data(self) -> dict[str, dict[str, Any]]:
if self.deployment_info["facts"]["services"]:
return self.deployment_info["facts"]["services"]
return {}
@property
def secrets_upload_directory(self) -> str:
return self.deployment_info["secretsUploadDirectory"]
return self.deployment_info["facts"]["secretUploadDirectory"]
@property
def flake_dir(self) -> Path:

View File

@@ -5,15 +5,15 @@ import os
import shlex
import subprocess
import sys
from pathlib import Path
from ..cmd import run
from ..errors import ClanError
from ..facts.generate import generate_facts
from ..facts.upload import upload_secrets
from ..machines.machines import Machine
from ..nix import nix_build, nix_command, nix_config, nix_metadata
from ..ssh import Host, HostGroup, HostKeyCheck, parse_deployment_address
from ..nix import nix_command, nix_metadata
from ..ssh import HostKeyCheck
from .inventory import get_all_machines, get_selected_machines
from .machine_group import MachineGroup
log = logging.getLogger(__name__)
@@ -86,31 +86,31 @@ def upload_sources(
)
def deploy_nixos(hosts: HostGroup) -> None:
def deploy_nixos(machines: MachineGroup) -> None:
"""
Deploy to all hosts in parallel
"""
def deploy(h: Host) -> None:
target = f"{h.user or 'root'}@{h.host}"
ssh_arg = f"-p {h.port}" if h.port else ""
def deploy(machine: Machine) -> None:
host = machine.build_host
target = f"{host.user or 'root'}@{host.host}"
ssh_arg = f"-p {host.port}" if host.port else ""
env = os.environ.copy()
env["NIX_SSHOPTS"] = ssh_arg
path = upload_sources(".", target)
if h.host_key_check != HostKeyCheck.STRICT:
ssh_arg += " -o StrictHostKeyChecking=no"
if h.host_key_check == HostKeyCheck.NONE:
ssh_arg += " -o UserKnownHostsFile=/dev/null"
ssh_arg += " -i " + h.key if h.key else ""
machine: Machine = h.meta["machine"]
generate_facts(machine)
generate_facts([machine])
upload_secrets(machine)
extra_args = h.meta.get("extra_args", [])
path = upload_sources(".", target)
if host.host_key_check != HostKeyCheck.STRICT:
ssh_arg += " -o StrictHostKeyChecking=no"
if host.host_key_check == HostKeyCheck.NONE:
ssh_arg += " -o UserKnownHostsFile=/dev/null"
ssh_arg += " -i " + host.key if host.key else ""
extra_args = host.meta.get("extra_args", [])
cmd = [
"nixos-rebuild",
"switch",
@@ -127,82 +127,55 @@ def deploy_nixos(hosts: HostGroup) -> None:
"--flake",
f"{path}#{machine.name}",
]
if target_host := h.meta.get("target_host"):
if target_host := host.meta.get("target_host"):
target_host = f"{target_host.user or 'root'}@{target_host.host}"
cmd.extend(["--target-host", target_host])
ret = h.run(cmd, check=False)
ret = host.run(cmd, check=False)
# re-retry switch if the first time fails
if ret.returncode != 0:
ret = h.run(cmd)
ret = host.run(cmd)
hosts.run_function(deploy)
machines.run_function(deploy)
# function to speedup eval if we want to evauluate all machines
def get_all_machines(clan_dir: Path) -> HostGroup:
config = nix_config()
system = config["system"]
machines_json = run(
nix_build([f'{clan_dir}#clanInternals.all-machines-json."{system}"'])
).stdout
machines = json.loads(Path(machines_json.rstrip()).read_text())
hosts = []
ignored_machines = []
for name, machine_data in machines.items():
if machine_data.get("requireExplicitUpdate", False):
continue
machine = Machine(name=name, flake=clan_dir, deployment_info=machine_data)
try:
hosts.append(machine.build_host)
except ClanError:
ignored_machines.append(name)
continue
if not hosts and ignored_machines != []:
print(
"WARNING: No machines to update. The following defined machines were ignored because they do not have `clan.networking.targetHost` nixos option set:",
file=sys.stderr,
)
for machine in ignored_machines:
print(machine, file=sys.stderr)
# very hacky. would be better to do a MachinesGroup instead
return HostGroup(hosts)
def get_selected_machines(machine_names: list[str], flake_dir: Path) -> HostGroup:
hosts = []
for name in machine_names:
machine = Machine(name=name, flake=flake_dir)
hosts.append(machine.build_host)
return HostGroup(hosts)
# FIXME: we want some kind of inventory here.
def update(args: argparse.Namespace) -> None:
if args.flake is None:
raise ClanError("Could not find clan flake toplevel directory")
machines = []
if len(args.machines) == 1 and args.target_host is not None:
machine = Machine(name=args.machines[0], flake=args.flake)
machine.target_host_address = args.target_host
host = parse_deployment_address(
args.machines[0],
args.target_host,
meta={"machine": machine},
)
machines = HostGroup([host])
machines.append(machine)
elif args.target_host is not None:
print("target host can only be specified for a single machine")
exit(1)
else:
if len(args.machines) == 0:
machines = get_all_machines(args.flake)
else:
machines = get_selected_machines(args.machines, args.flake)
ignored_machines = []
for machine in get_all_machines(args.flake):
if machine.deployment_info.get("requireExplicitUpdate", False):
continue
try:
machine.build_host
except ClanError: # check if we have a build host set
ignored_machines.append(machine)
continue
deploy_nixos(machines)
machines.append(machine)
if not machines and ignored_machines != []:
print(
"WARNING: No machines to update. The following defined machines were ignored because they do not have `clan.networking.targetHost` nixos option set:",
file=sys.stderr,
)
for machine in ignored_machines:
print(machine, file=sys.stderr)
else:
machines = get_selected_machines(args.flake, args.machines)
deploy_nixos(MachineGroup(machines))
def register_update_parser(parser: argparse.ArgumentParser) -> None:

View File

@@ -3,14 +3,8 @@ from pathlib import Path
from .. import tty
from ..errors import ClanError
from .folders import sops_secrets_folder
from .secrets import collect_keys_for_path, list_secrets
from .sops import (
default_sops_key_path,
generate_private_key,
get_public_key,
update_keys,
)
from .secrets import update_secrets
from .sops import default_sops_key_path, generate_private_key, get_public_key
def generate_key() -> str:
@@ -44,12 +38,7 @@ def show_command(args: argparse.Namespace) -> None:
def update_command(args: argparse.Namespace) -> None:
flake_dir = Path(args.flake)
for name in list_secrets(flake_dir):
secret_path = sops_secrets_folder(flake_dir) / name
update_keys(
secret_path,
list(sorted(collect_keys_for_path(secret_path))),
)
update_secrets(flake_dir)
def register_key_parser(parser: argparse.ArgumentParser) -> None:

View File

@@ -6,6 +6,7 @@ from ..git import commit_files
from ..machines.types import machine_name_type, validate_hostname
from . import secrets
from .folders import list_objects, remove_object, sops_machines_folder
from .secrets import update_secrets
from .sops import read_key, write_key
from .types import public_or_private_age_key_type, secret_name_type
@@ -13,6 +14,12 @@ from .types import public_or_private_age_key_type, secret_name_type
def add_machine(flake_dir: Path, name: str, key: str, force: bool) -> None:
path = sops_machines_folder(flake_dir) / name
write_key(path, key, force)
paths = [path]
def filter_machine_secrets(secret: Path) -> bool:
return secret.joinpath("machines", name).exists()
paths.extend(update_secrets(flake_dir, filter_secrets=filter_machine_secrets))
commit_files(
[path],
flake_dir,

View File

@@ -3,6 +3,7 @@ import getpass
import os
import shutil
import sys
from collections.abc import Callable
from dataclasses import dataclass
from pathlib import Path
from typing import IO
@@ -21,6 +22,23 @@ from .sops import decrypt_file, encrypt_file, ensure_sops_key, read_key, update_
from .types import VALID_SECRET_NAME, secret_name_type
def update_secrets(
flake_dir: Path, filter_secrets: Callable[[Path], bool] = lambda _: True
) -> list[Path]:
changed_files = []
for name in list_secrets(flake_dir):
secret_path = sops_secrets_folder(flake_dir) / name
if not filter_secrets(secret_path):
continue
changed_files.extend(
update_keys(
secret_path,
list(sorted(collect_keys_for_path(secret_path))),
)
)
return changed_files
def collect_keys_for_type(folder: Path) -> set[str]:
if not folder.exists():
return set()

View File

@@ -117,8 +117,10 @@ def sops_manifest(keys: list[str]) -> Iterator[Path]:
yield Path(manifest.name)
def update_keys(secret_path: Path, keys: list[str]) -> None:
def update_keys(secret_path: Path, keys: list[str]) -> list[Path]:
with sops_manifest(keys) as manifest:
secret_path = secret_path / "secret"
time_before = secret_path.stat().st_mtime
cmd = nix_shell(
["nixpkgs#sops"],
[
@@ -127,10 +129,13 @@ def update_keys(secret_path: Path, keys: list[str]) -> None:
str(manifest),
"updatekeys",
"--yes",
str(secret_path / "secret"),
str(secret_path),
],
)
run(cmd, log=Log.BOTH, error_msg=f"Could not update keys for {secret_path}")
if time_before == secret_path.stat().st_mtime:
return []
return [secret_path]
def encrypt_file(
@@ -202,7 +207,9 @@ def write_key(path: Path, publickey: str, overwrite: bool) -> None:
flags |= os.O_EXCL
fd = os.open(path / "key.json", flags)
except FileExistsError:
raise ClanError(f"{path.name} already exists in {path}")
raise ClanError(
f"{path.name} already exists in {path}. Use --force to overwrite."
)
with os.fdopen(fd, "w") as f:
json.dump({"publickey": publickey, "type": "age"}, f, indent=2)

Some files were not shown because too many files have changed in this diff Show More