Compare commits

...

411 Commits

Author SHA1 Message Date
Michael Hoang
01c9432cc5 checks/installation: don't hardcode system 2025-03-31 16:20:54 +09:00
Michael Hoang
f62e9db126 wip! fix all checks on aarch64-linux 2025-03-31 16:20:54 +09:00
renovate[bot]
dcb2231332 chore(deps): update data-mesher digest to bf8c544 2025-03-31 00:20:25 +00:00
renovate[bot]
725eeb87ae chore(deps): lock file maintenance 2025-03-31 00:00:41 +00:00
hsjobeki
66df677fd2 Merge pull request 'chore(lib): prepare for refactoring into clanLib' (#3141) from hsjobeki/clan-core:clan-services into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3141
2025-03-30 15:51:17 +00:00
Johannes Kirschbauer
f7d15215ea feat(clanLib): expose clanInternals.clanLib
This is usefully for:
- As api via python
- for testing clanLib downstream
2025-03-30 17:07:11 +02:00
Johannes Kirschbauer
c25574bebd fix(lib/evalClan): evalClan is an explizit lib attribute 2025-03-30 17:07:11 +02:00
Johannes Kirschbauer
fe5796ba17 feat(inventory/instances): only warn if instances is populated 2025-03-30 17:07:11 +02:00
Johannes Kirschbauer
f2e89d27fe feat(lib/inventory): use clanLib instead of clan-core as internal specialArg. This has the benefit of beeing more narrow scoped. 2025-03-30 17:07:09 +02:00
Johannes Kirschbauer
06dd2ebf8c feat(lib/modules): remove dependency on self 2025-03-30 16:12:01 +02:00
Johannes Kirschbauer
40740860c0 feat(lib): init callLib; helper to bootstrap clanLib 2025-03-30 16:11:04 +02:00
Johannes Kirschbauer
89bc39869c chore(lib): prepare for refactoring into clanLib 2025-03-30 15:56:54 +02:00
hsjobeki
84d0a2f2f0 Merge pull request 'enable clan services for machines' (#3134) from hsjobeki/clan-core:clan-services into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3134
2025-03-30 13:54:11 +00:00
Johannes Kirschbauer
1d07737989 docs(lib): init readme with folder and testing conventions 2025-03-30 15:29:05 +02:00
Johannes Kirschbauer
9d386485dd chore(auto-imports): Remove the auto-import feature. There will be a replacement, to make imports explicit" 2025-03-30 15:29:05 +02:00
Johannes Kirschbauer
ee9ae9c76d fix(eval/tests): distributed-services: don't abort on warn in tests" 2025-03-30 15:29:05 +02:00
Johannes Kirschbauer
d4d4d77d2d fix(tests): include 'lib' always as a whole 2025-03-30 15:29:05 +02:00
Johannes Kirschbauer
c0ebad1cd9 feat(inventory/instances): add wip warning 2025-03-30 15:29:05 +02:00
Johannes Kirschbauer
86d0c95da7 feat(inventory/instances): improve error location 2025-03-30 15:29:05 +02:00
Johannes Kirschbauer
0fb1b5c5ce feat(inventory/instances): add service result to nixos machines 2025-03-30 15:29:05 +02:00
renovate[bot]
dc0349e835 fix(deps): update dependency @tanstack/solid-query to v5.71.0 2025-03-30 13:00:14 +00:00
renovate[bot]
cc8a74b195 chore(deps): update data-mesher digest to 4d139ac 2025-03-30 09:50:10 +00:00
renovate[bot]
046fe0df36 chore(deps): update nixpkgs digest to eb0e0f2 2025-03-30 09:10:24 +00:00
lassulus
3f948fdbd4 Merge pull request 'Make Generator validation more dynamic' (#3052) from tangential/clan-core:dynamic-vars-generator-validation into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3052
2025-03-30 07:00:43 +00:00
Jonathan Thiessen
eb35e6ea21 Make Generator's validation dynamic
* Switch `Generator`'s `validation` from a regular property to
  an `@property` annotated method backed by `Machine`'s `eval_nix()`.
* Ensure that `Machine`'s flake cache is flushed after each
  effectful generator execution (rather than only after all
  generators have been executed).
2025-03-30 04:33:30 +00:00
Jonathan Thiessen
4a0e1b3b6b Add dependent vars generator dynamic validation test 2025-03-30 04:33:30 +00:00
Jonathan Thiessen
1b8974d167 Fix cached None support in FlakeCacheEntry
Previously, you could cache None values; however,
insertion wasn't idempotent/identical reinsertion
would lead to errors due to missing None checks.
2025-03-30 04:33:30 +00:00
Jonathan Thiessen
5e2b5fe213 Add overlapping (consistent) flake cache insert test
* Additionally, update `insert`'s input type hint to support None values
  (as they are already selectable and (one shot) insertable).
  This is necessary to appease the linter wrt the added test.
2025-03-30 04:33:30 +00:00
renovate[bot]
74fb3abbc7 chore(deps): update sops-nix digest to 8e87388 2025-03-30 04:00:13 +00:00
Luis Hebendanz
f2b04e74f1 Merge pull request 'clan-cli: Fix deployment as non root with a buildHost set' (#3132) from Qubasa/clan-core:fix_target_host_as_non_root into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3132
2025-03-29 16:56:49 +00:00
hsjobeki
d3ae684575 Merge pull request 'init inventory.instances and clan.service modules' (#3102) from hsjobeki/clan-core:clan-services into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3102
2025-03-29 16:22:30 +00:00
Johannes Kirschbauer
5b294e7651 chore(ui): ignore type error in unfinished features 2025-03-29 16:45:05 +01:00
Johannes Kirschbauer
40ae510075 test(inventory/legacy): don't need to support clanModules 2025-03-29 16:35:43 +01:00
Johannes Kirschbauer
48d910f11f fix(auto-imports): disable since this is not needed anymore and causing collision with the new module type 2025-03-29 16:24:48 +01:00
renovate[bot]
f242b9a35c chore(deps): update data-mesher digest to 734883c 2025-03-29 15:20:14 +00:00
Johannes Kirschbauer
978822d40a test(inventory/instances): add tests for per machine resolution 2025-03-29 15:40:31 +01:00
Johannes Kirschbauer
fa6c3be21e feat(inventory/instances): preserve settings modifiers close at source 2025-03-29 15:39:02 +01:00
brianmcgee
be61bac9af Merge pull request 'data-mesher-module' (#3086) from data-mesher-module into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3086
2025-03-29 14:07:42 +00:00
Pablo Ovelleiro Corral
42b58910a9 data-mesher: init module
Co-authored-by: Brian McGee <brian@bmcgee.ie>
2025-03-29 13:49:41 +00:00
Johannes Kirschbauer
a746b10578 chore: add description to {manifest, name} option 2025-03-29 14:33:44 +01:00
Johannes Kirschbauer
19341e4cb1 chore: format test, remove unused attributes 2025-03-29 14:27:52 +01:00
Johannes Kirschbauer
f4e06271ba chore: rename unused attributes in inventory adapter 2025-03-29 14:27:27 +01:00
Johannes Kirschbauer
d93fe229b3 chore: remove unused attributes from flake-module 2025-03-29 14:27:04 +01:00
Johannes Kirschbauer
5fc62806b1 feat(classgen): convert only certain attributes 2025-03-29 14:22:34 +01:00
Johannes Kirschbauer
e0be2f3435 fix(cli/inventory): update classes 2025-03-29 13:25:19 +01:00
Johannes Kirschbauer
a69b81488b fix(inventory/instances): fix jsonschema compatibility 2025-03-29 13:25:19 +01:00
Johannes Kirschbauer
b133a2407a feat(clan/services): init recursive service module 2025-03-29 13:25:19 +01:00
Johannes Kirschbauer
68ae27899a feat(clan/services): init test-suite for eval test 2025-03-29 13:25:19 +01:00
Johannes Kirschbauer
b83d3ecba2 feat(clan/services): init adapter function to convert inventory instances into clan.service module configurations 2025-03-29 13:25:19 +01:00
Johannes Kirschbauer
bec4317709 feat(inventory/instances): init instances as new attribute for adding distributed services 2025-03-29 13:25:19 +01:00
Johannes Kirschbauer
f37f15c482 feat(clan/services): init new clanInternals attribute 'distributedServices' 2025-03-29 13:25:19 +01:00
Johannes Kirschbauer
fae8ec318d feat(inventory/modules): allow inline modules 2025-03-29 13:25:19 +01:00
Qubasa
8e2005f38c clan-cli: Fix deployment as non root with a buildHost set 2025-03-28 18:47:36 +01:00
Mic92
94781bb358 Merge pull request 'clan_cli: fix support for non-root deployment user (from rtunreal)' (#3124) from Qubasa/clan-core:main into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3124
2025-03-28 17:26:33 +00:00
Jörg Thalheim
de740cf686 tests: add fake_sudo to sshd fixture
This allows to use the same code for both testing and real-world.
2025-03-28 17:14:22 +00:00
Qubasa
064edf61ef test_secrets_upload: Don't prepend sudo inside test; Improve secret upload test 2025-03-28 17:14:22 +00:00
renovate[bot]
aaf58d7be8 chore(deps): update treefmt-nix digest to 29a3d7b 2025-03-28 15:50:19 +00:00
renovate[bot]
03f8e41291 chore(deps): update nixpkgs digest to 6c59633 2025-03-28 15:30:25 +00:00
renovate[bot]
43bd4403c6 fix(deps): update dependency @tanstack/solid-query to v5.70.0 2025-03-28 15:20:17 +00:00
renovate[bot]
ebee55ffdc chore(deps): update nixpkgs digest to 25d1b84 2025-03-27 03:30:23 +00:00
renovate[bot]
47e9e5a8f0 chore(deps): update dependency @types/node to v22.13.14 2025-03-27 03:20:15 +00:00
Qubasa
d1a79653fe checks/installation-without-system: modify to install through normal user instead of root 2025-03-26 18:37:31 +01:00
RTUnreal
351ce1414a clan_cli: fix support for non-root deployment user 2025-03-26 18:37:31 +01:00
DavHau
e2ccd979ed vars/prompts: print var name even if custom description is set 2025-03-26 10:48:05 +00:00
renovate[bot]
f5f3f96809 chore(deps): update treefmt-nix digest to 61c8834 2025-03-26 10:10:09 +00:00
Mic92
59253a9c71 Merge pull request 'ADR: init clan api as library decision record' (#2975) from hsjobeki/clan-core:adr/architecture into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/2975
2025-03-26 10:01:55 +00:00
Johannes Kirschbauer
aa03adc581 ADR: init clan api as library decision record 2025-03-26 09:52:05 +00:00
Mic92
ffd84d50f7 Merge pull request 'Fix(classgen): support number conversion from jsonschema' (#3119) from hsjobeki/clan-core:class-fix into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3119
2025-03-26 09:45:09 +00:00
Johannes Kirschbauer
679387e4ba Fix(classgen): support number conversion from jsonschema 2025-03-25 19:27:01 +01:00
hsjobeki
1d60f94cc5 Merge pull request 'docs/configure: Remove reference to unfinished feature' (#3097) from kenji/clan-core:docs-remove-upcoming into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3097
2025-03-25 18:12:09 +00:00
Mic92
1235177541 Merge pull request 'Enable all pytest without core' (#3118) from enable-more-macos into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3118
2025-03-25 17:41:04 +00:00
Jörg Thalheim
5c08e9a38d add missing lock around "flake" "lock" 2025-03-25 18:30:18 +01:00
Jörg Thalheim
28dd54d866 make gnupg a dependency of sops
if anything uses a gnupg key, we need the gnupg binary.
Sucks a bit, but at least it makes it work everywhere.
2025-03-25 18:30:18 +01:00
Jörg Thalheim
5baf37f7e9 fix gpg key fixture on macOS
macOS has length limitations for unix sockets, which are violated by the
default length of temporary directories.
2025-03-25 18:30:18 +01:00
Jörg Thalheim
ff669e2957 move git_repo fixture to its own file for consistency 2025-03-25 18:30:11 +01:00
Jörg Thalheim
8d4c1839e7 use pre-generate gpg key for tests
this is a bit faster.
2025-03-25 18:30:11 +01:00
Jörg Thalheim
0765d981c6 enable python tests without core on macOS 2025-03-25 18:29:49 +01:00
Jörg Thalheim
10c27a0152 skip sshd-based tests on macOS for now 2025-03-25 18:29:49 +01:00
Mic92
ccb5af9565 Merge pull request 'docs/index: Clear up API Reference description' (#3098) from kenji/clan-core:docs-overview into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3098
2025-03-25 09:34:18 +00:00
renovate[bot]
828eff528a chore(deps): lock file maintenance 2025-03-25 02:10:52 +00:00
renovate[bot]
cbf47580cf chore(deps): update nixpkgs digest to 1750f3c 2025-03-25 01:50:24 +00:00
renovate[bot]
355ac57ccb chore(deps): update nixpkgs digest to dd61313 2025-03-24 19:30:24 +00:00
renovate[bot]
227e293421 chore(deps): update typescript-eslint monorepo to v8.28.0 2025-03-24 17:20:18 +00:00
renovate[bot]
9b3621b516 chore(deps): update dependency @types/node to v22.13.13 2025-03-24 11:20:15 +00:00
renovate[bot]
62f09a450f chore(deps): update dependency vite to v6.2.3 2025-03-24 10:30:17 +00:00
Michael Hoang
95282bd880 Merge pull request 'checks/flash: fix on aarch64-linux' (#3109) from push-wyyyplplwnpy into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3109
2025-03-24 10:23:24 +00:00
Michael Hoang
7a49ec252e checks/flash: support aarch64-linux 2025-03-24 19:13:20 +09:00
Michael Hoang
5f9ee97cab Merge pull request 'checks/installation-without-system: support aarch64-linux' (#3108) from push-wnsmqwtkplqw into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3108
2025-03-24 09:47:53 +00:00
Michael Hoang
c6be9bbf07 checks/installation-without-system: add aarch64-linux facter.json
This doesn't fix the test on `aarch64-linux` but brings it inline with
where `test-installation` fails.
2025-03-24 18:37:46 +09:00
Michael Hoang
d77ae5eed0 Merge pull request 'checks/backups: don't hardcode system' (#3107) from push-unpltryrzlsx into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3107
2025-03-24 08:38:28 +00:00
Michael Hoang
3c2888edc7 checks: don't build test machines as they may be system-less now 2025-03-24 17:27:36 +09:00
Michael Hoang
b0f23353ef checks/backups: don't hardcode system 2025-03-24 17:26:48 +09:00
renovate[bot]
3fccccc092 chore(deps): update dependency @types/node to v22.13.12 2025-03-24 06:40:15 +00:00
Michael Hoang
0a5d1bf322 Merge pull request 'checks: disable all failing aarch64-linux checks' (#3104) from push-zqxwrttvxuqy into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3104
2025-03-24 04:01:16 +00:00
Michael Hoang
9ca5cb7bcc checks: disable all failing aarch64-linux checks 2025-03-24 12:50:57 +09:00
renovate[bot]
cc1b356a94 chore(deps): update sops-nix digest to 67566fe 2025-03-23 04:00:13 +00:00
kenji
9aa8c1b8eb Merge pull request 'docs/configure: Fix erroneous option attribute' (#3099) from kenji/clan-core:docs-fix-3094 into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3099
2025-03-22 23:27:16 +00:00
a-kenji
709d773768 docs/configure: Fix erroneous option attribute
Fixes: 3094
2025-03-22 13:05:55 -07:00
a-kenji
845abd1356 docs/index: Clear up API Reference description
The term "auto generated" gives leeway to the incorrect assumption
that this is not a curated reference that contains written documentation
especially for the rendered reference.

This is not the case.
2025-03-22 12:56:51 -07:00
a-kenji
2b4a4f2422 docs/configure: Remove reference to unfinished feature 2025-03-22 12:53:59 -07:00
Mic92
82da5b6734 Merge pull request 'don't add nixpkgs to nix registry to not conflict with nixpkgs' (#3096) from nixpkgs-conflict into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3096
2025-03-22 16:16:27 +00:00
Jörg Thalheim
33a9fd8d3d tests/installer/client: increase RAM 2025-03-22 17:07:52 +01:00
Jörg Thalheim
4beb097a95 don't add nixpkgs to nix registry to not conflict with nixpkgs
NixOS is already doing this for us.
2025-03-22 14:45:53 +01:00
renovate[bot]
b4cd62b9f8 chore(deps): update nixpkgs digest to 94c4dbe 2025-03-22 04:00:29 +00:00
renovate[bot]
ee7b98c34d chore(deps): update sops-nix digest to b775692 2025-03-21 22:50:10 +00:00
renovate[bot]
8552d4b3bd chore(deps): update dependency eslint to v9.23.0 2025-03-21 20:40:14 +00:00
renovate[bot]
375edcff81 chore(deps): update dependency @eslint/js to v9.23.0 2025-03-21 20:20:13 +00:00
renovate[bot]
3183b26777 chore(deps): update nixpkgs digest to bfa9810 2025-03-21 19:00:27 +00:00
Luis Hebendanz
0feacaf300 Merge pull request 'sshd: Fix missing cfg.fqdn regression' (#3087) from Qubasa/clan-core:main into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3087
2025-03-21 16:47:25 +00:00
Qubasa
6917021996 sshd: Fix missing cfg.fqdn regression 2025-03-21 17:45:25 +01:00
lassulus
3965f7b59f Merge pull request 'clan-cli: cleanup broken deployment cache' (#3066) from fix_caching into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3066
2025-03-21 15:49:22 +00:00
renovate[bot]
610a70e4f8 chore(deps): update nixpkgs digest to 7344a3b 2025-03-21 12:30:23 +00:00
Jörg Thalheim
6134eb0293 tests/sshd: add a 5 second timeout for sshd to start 2025-03-21 12:41:36 +01:00
renovate[bot]
62e9fe8f9f chore(deps): update dependency @types/node to v22.13.11 2025-03-21 09:20:13 +00:00
renovate[bot]
5bc2d00014 chore(deps): update nixpkgs digest to 2a725d4 2025-03-20 21:20:28 +00:00
renovate[bot]
616b294b8c chore(deps): update nixpkgs digest to 44e422b 2025-03-20 20:20:27 +00:00
Michael Hoang
2d7b92b3f9 Merge pull request 'networking: add a default value for targetHost' (#3080) from push-rlvulrtxqkyq into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3080
2025-03-20 14:10:19 +00:00
Michael Hoang
0487670d30 networking: add a default value for targetHost 2025-03-20 22:45:19 +09:00
Michael Hoang
4cd174b268 Merge pull request 'sshd: trust own ed25519 host key as a known host' (#3077) from push-opymuwrqqqvv into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3077
2025-03-20 13:44:25 +00:00
Michael Hoang
a8b257f32c sshd: trust own ed25519 host key as a known host 2025-03-20 18:40:32 +09:00
Qubasa
047b767054 clan-cli: cleanup broken deployment cache 2025-03-20 00:17:36 -07:00
Michael Hoang
c74d23b799 Merge pull request 'checks: use pkgs.nixVersions.latest until pkgs.nix is 2.26+' (#3076) from push-vswxxyynxtmz into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3076
2025-03-20 07:01:57 +00:00
Michael Hoang
850627c5c6 checks: use pkgs.nixVersions.latest until pkgs.nix is 2.26+ 2025-03-20 15:52:13 +09:00
renovate[bot]
60d56c4e3b chore(deps): update typescript-eslint monorepo to v8.27.0 2025-03-20 01:20:15 +00:00
renovate[bot]
4911901f7c chore(deps): update sops-nix digest to 1770be8 2025-03-19 18:10:10 +00:00
Mic92
a96860a24b Merge pull request 'pytests: use /tmp on macos to avoid unix socket issues' (#3073) from nixpkgs-update into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3073
2025-03-19 17:45:15 +00:00
Jörg Thalheim
c429b41d2e pytests: use /tmp on macos to avoid unix socket issues 2025-03-19 18:35:38 +01:00
Mic92
fe305f7f47 Merge pull request 'Decisions/clanModules: Add example borgbackup as real world example' (#3070) from hsjobeki/clan-core:decisions-01 into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3070
2025-03-19 16:39:59 +00:00
Johannes Kirschbauer
591d397df9 Decisions/clanModules: Add example borgbackup as real world example 2025-03-19 16:39:52 +00:00
Mic92
8231979bae Merge pull request 'Silence mypy error after nixpkgs update' (#3072) from nixpkgs-update into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3072
2025-03-19 16:39:32 +00:00
Jörg Thalheim
6899461d0d disabe pytests on macOS for now 2025-03-19 17:33:27 +01:00
Jörg Thalheim
16b067d291 tests/fixtures_flakes: remove unused remote flag 2025-03-19 16:55:30 +01:00
Jörg Thalheim
93cbe62765 always resolve symlinks for TemporaryDirectory
On macOS mktemp returns a temporary directory in a symlink.
Nix has a bug where it won't accept path:// located in a symlink.
This avoid this issue by always resolving symlinks as returned by
TemporaryDirectory.
2025-03-19 16:47:18 +01:00
Jörg Thalheim
7fef29d7aa make sshd test work on macOS 2025-03-19 15:55:20 +01:00
Jörg Thalheim
952d1facce vm-manager: ignore interface between GObject and ListModel 2025-03-19 15:29:03 +01:00
Jörg Thalheim
a565a85a5e clan-vm-manager: support basic devshell on macOS 2025-03-19 15:29:03 +01:00
renovate[bot]
3d5ef5e909 chore(deps): update nixpkgs digest to 3549532 2025-03-19 13:40:12 +00:00
Luis Hebendanz
a5c5033273 Merge pull request 'clan-cli: machines delete: delete the machine's vars and secrets' (#2994) from lopter/clan-core:lo-machines-delete into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/2994
2025-03-19 12:25:48 +00:00
Louis Opter
0ee0351e3e clan-cli: add DavHau's explanation about the with_core pytest marker
See: https://git.clan.lol/clan/clan-core/pulls/2994#issuecomment-22542
2025-03-19 10:53:38 +00:00
Louis Opter
c02f19205f clan-cli: tests: call SopsSetup.init while setting up fixtures
We do this by introducing `flake_with_sops` fixture, that calls the
init method ahead of the test. We did not want to do this in the `flake`
fixture since not all tests using the `flake` fixture need to have sops
setup.
2025-03-19 10:53:38 +00:00
Louis Opter
dbcb8d6a4c clan-cli: don't try to delete a dir that doesn't exist in the pass vars backend
Do not crash in `delete_store`, if the machine has no vars, or the store
has been deleted already.
2025-03-19 10:53:38 +00:00
Louis Opter
039b309255 clan-cli: do not crash if a machine being deleted is missing from the inventory
We implement that by actually raising `KeyError` in `inventory.delete_by_path`
(as advertised in the docstring), since it makes more sense to catch a
`KeyError` than a generic `ClanError`.
2025-03-19 10:53:38 +00:00
Louis Opter
538374558d clan-cli: machines delete: delete the machine's vars and secrets
When a machine is deleted with `clan machines delete`, remove its
vars and legacy secrets, and update any secrets that reference the
machine's key.

This command is a superset of `clan secrets machine delete`, and I am
wondering if we could remove the `clan secrets machine` subcommand,
unless there is an use case for having a machine defined without its
key, and any secrets/vars?

Note:

- This deletes the `ListSecretsOptions` dataclass, as it did not seem to
  bring any value, especially since `list_secrets` was receiving its
  individual members instead of the whole dataclass. We can always bring
  it back if complexity grows to demand it.
2025-03-19 10:53:38 +00:00
Louis Opter
ef5ad09b2d clan-cli: add delete and delete_store to StoreBase
- `delete` lets you delete a specific var under a specific generator;
- `delete_store` deletes an entire store.

The `delete` method could be useful to "garbage-collect" unused vars as
a machine's configuration changes.

The `delete_store` method can be used to delete all the vars for a
machine when the machine is deleted. The current behavior is to leave
everything behind.

Important point:

- `delete_store` needs to be idempotent because public and
  "private"/"secret" vars for a machine can share the same physical
  store (directory), and deleting either type of store (public or
  private) will delete both.
2025-03-19 10:53:38 +00:00
Louis Opter
9780463e6a clan-cli: add an integration test for clan machines delete
This tests the changes made to that command to clean-up vars and secrets
when a machine is deleted.
2025-03-19 10:53:38 +00:00
Louis Opter
cac4b1200c clan-cli: tests/age_keys.py add notes, move function to check sops recipients
This supports the new integration test for `clan machines delete`.
2025-03-19 10:53:38 +00:00
Mic92
c8db27340e Merge pull request 'Fix clan machines create' (#3040) from Qubasa/clan-core:main into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3040
2025-03-19 10:53:17 +00:00
Jörg Thalheim
31a9c74e88 deduplicate CLAN_CORE/CLAN_CORE_PATH environment variables 2025-03-19 10:30:52 +00:00
Qubasa
dc8bfab65d clan-cli: Fix templates not downloading template, Make templates use Flake cache, Fix flake cache exception on conditional attribute, add more tests 2025-03-19 10:30:52 +00:00
DavHau
33abb7ecd7 docs: add guide for testing 2025-03-19 09:43:05 +00:00
renovate[bot]
fcbdae9d09 chore(deps): update treefmt-nix digest to adc195e 2025-03-19 08:10:09 +00:00
Michael Hoang
27b5680441 Merge pull request 'checks: ensure updating hardware config doesn't require system' (#3067) from fix/update-hardware-config-without-system into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3067
2025-03-19 06:53:50 +00:00
Michael Hoang
f13971167f checks: ensure updating hardware config doesn't require system 2025-03-19 15:28:52 +09:00
renovate[bot]
e75b5f3a2e chore(deps): lock file maintenance 2025-03-18 23:10:46 +00:00
renovate[bot]
d5c0a2eb9c chore(deps): update nixpkgs digest to 9bc8a90 2025-03-18 15:50:09 +00:00
renovate[bot]
8cc8d09a11 chore(deps): update treefmt-nix digest to b3b938a 2025-03-18 15:00:13 +00:00
Mic92
dfa3305450 Merge pull request 'nixpkgs-update' (#3061) from nixpkgs-update into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3061
2025-03-18 14:42:16 +00:00
Jörg Thalheim
94415dfd0e use pathlib.iterdir() everywhere 2025-03-18 15:34:06 +01:00
renovate[bot]
6fb5bca801 chore(deps): update nixpkgs digest to 0964789 2025-03-18 14:20:23 +00:00
renovate[bot]
4162810ee1 chore(deps): update disko digest to 0d8c6ad 2025-03-18 14:08:30 +00:00
Mic92
0b3badb0ef Merge pull request 'chore(deps): update dependency typescript to v5.8.2' (#3058) from typescript into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3058
2025-03-18 14:00:32 +00:00
Jörg Thalheim
6a5954ad77 remove unused typescript directive 2025-03-18 14:52:22 +01:00
Mic92
02231b979b Merge pull request 'Update eslint' (#3057) from eslint into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3057
2025-03-18 13:51:36 +00:00
Jörg Thalheim
028f6a4d3d prune npm deps 2025-03-18 14:43:30 +01:00
Jörg Thalheim
170908db7b address eslint warnings 2025-03-18 14:40:16 +01:00
renovate[bot]
39e6534dbb chore(deps): update typescript-eslint monorepo to v8 2025-03-18 14:26:21 +01:00
renovate[bot]
71809c1bdc chore(deps): update dependency eslint to v9 2025-03-18 14:26:19 +01:00
Mic92
eecedf95e4 Merge pull request 'cli: increase timeout for pytest' (#3056) from ci into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3056
2025-03-18 13:18:09 +00:00
Mic92
a208a9973c Merge pull request 'flake: switch back to using main branch of sops-nix' (#3050) from bump/sops-nix into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3050
2025-03-18 13:11:29 +00:00
renovate[bot]
d276d2faea chore(deps): update dependency typescript to v5.8.2 2025-03-18 13:10:11 +00:00
Mic92
d470283dca Merge pull request 'fix(deps): update dependency @solid-primitives/storage to v4' (#3046) from renovate/solid-primitives-storage-4.x into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3046
2025-03-18 13:08:51 +00:00
Mic92
88dab7d8bd Merge pull request 'chore(deps): update dependency vitest to v3' (#3044) from renovate/major-vitest-monorepo into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3044
2025-03-18 13:07:47 +00:00
Mic92
8474a0aaef Merge pull request 'chore(deps): update dependency @types/node to v22' (#3037) from renovate/node-22.x into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3037
2025-03-18 13:07:29 +00:00
Jörg Thalheim
5ab2f206ea cli: increase timeout for pytest 2025-03-18 14:04:43 +01:00
Michael Hoang
ea8037006f flake: switch back to using main branch of sops-nix 2025-03-18 12:55:13 +00:00
renovate[bot]
3a682a6b3e fix(deps): update dependency @solid-primitives/storage to v4 2025-03-18 12:50:42 +00:00
renovate[bot]
0556ea624f chore(deps): update dependency vitest to v3 2025-03-18 12:50:30 +00:00
renovate[bot]
8671fd7407 chore(deps): update dependency @types/node to v22 2025-03-18 12:50:14 +00:00
Mic92
3a9f0eb608 Merge pull request 'Fix broken installation tests' (#3055) from ci into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3055
2025-03-18 12:49:14 +00:00
Jörg Thalheim
1736b0f539 work around in installation test by using newer nix version 2025-03-18 13:29:28 +01:00
Jörg Thalheim
eb375f3d81 tests: fix warning about invalid nix registry 2025-03-18 13:29:28 +01:00
renovate[bot]
6162b82adb fix(deps): update tanstack-query monorepo 2025-03-18 12:10:39 +00:00
renovate[bot]
085189d1c4 fix(deps): update dependency nanoid to v5.1.4 2025-03-18 12:00:33 +00:00
renovate[bot]
3cb22ad2a1 chore(deps): update dependency vite to v6 2025-03-18 11:40:34 +00:00
Luis Hebendanz
27269d4ed9 Merge pull request 'ADR: init clanModules architecture decision' (#2838) from hsjobeki/clan-core:adr/clanModules into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/2838
Reviewed-by: kenji <aks.kenji@protonmail.com>
Reviewed-by: pinpox <clan@pablo.tools>
Reviewed-by: Luis Hebendanz <consulting@qube.email>
Reviewed-by: DavHau <d.hauer.it@gmail.com>
2025-03-18 11:15:16 +00:00
Johannes Kirschbauer
7cbedc74a5 ADR: init clanModules architecture decision
ADR: improve after review

improve wording

improve based on feedback

decisions/clanModules
2025-03-18 11:14:16 +00:00
renovate[bot]
5ac30a767b chore(deps): update dependency jsdom to v26 2025-03-12 15:01:24 +00:00
renovate[bot]
89c6bcda4d chore(deps): update actions/checkout action to v4 2025-03-12 13:01:16 +00:00
renovate[bot]
51da020de2 fix(deps): update tanstack-query monorepo 2025-03-12 12:12:37 +00:00
renovate[bot]
e943d8531f fix(deps): update dependency nanoid to v5.1.3 2025-03-12 12:01:17 +00:00
renovate[bot]
13b9c23db9 fix(deps): update dependency @solidjs/router to ^0.15.0 2025-03-12 11:12:36 +00:00
renovate[bot]
ad43f323b8 fix(deps): update dependency @solid-primitives/storage to v3.8.0 2025-03-12 11:01:19 +00:00
renovate[bot]
aeb3cc4428 chore(deps): update typescript-eslint monorepo to v7.18.0 2025-03-12 10:12:37 +00:00
renovate[bot]
d81ca7206b fix(deps): update dependency @modular-forms/solid to ^0.25.0 2025-03-12 09:22:33 +00:00
renovate[bot]
0011cf594a chore(deps): update eslint monorepo 2025-03-12 09:12:38 +00:00
renovate[bot]
41cd4533ba chore(deps): update dependency vite-plugin-solid to v2.11.6 2025-03-12 09:01:19 +00:00
renovate[bot]
c15544e928 chore(deps): update dependency vite to v5.4.14 2025-03-12 08:02:36 +00:00
renovate[bot]
fa0fe23985 chore(deps): update dependency solid-devtools to ^0.33.0 2025-03-12 07:52:35 +00:00
renovate[bot]
1497e76bc2 chore(deps): update dependency vitest to v1.6.1 2025-03-12 07:42:29 +00:00
renovate[bot]
b3d9c23e39 chore(deps): update dependency eslint-plugin-tailwindcss to v3.18.0 2025-03-12 07:32:34 +00:00
renovate[bot]
5520641feb chore(deps): update dependency daisyui to v4.12.24 2025-03-12 07:12:43 +00:00
renovate[bot]
97f5a6bd4c chore(deps): update dependency prettier to v3.5.3 2025-03-12 07:01:20 +00:00
renovate[bot]
3b2b5db84a chore(deps): update dependency tailwindcss to v3.4.17 2025-03-12 05:52:38 +00:00
renovate[bot]
84da7d437d fix(deps): update dependency material-icons to v1.13.14 2025-03-12 05:42:37 +00:00
renovate[bot]
b2db2c7abc fix(deps): update dependency corvu to v0.7.2 2025-03-12 05:32:47 +00:00
renovate[bot]
cb104b700d fix(deps): update dependency solid-markdown to v2.0.14 2025-03-12 05:12:55 +00:00
renovate[bot]
41054885db chore(deps): update dependency @types/node to v20.17.24 2025-03-12 05:01:29 +00:00
renovate[bot]
70c63221ec chore(deps): update dependency jsdom to v24.1.3 2025-03-11 17:17:25 +00:00
renovate[bot]
9c130c73e4 chore(deps): update dependency autoprefixer to v10.4.21 2025-03-11 16:57:31 +00:00
Mic92
178fff0618 Merge pull request 'nix fmt: renovate.json' (#3015) from ci into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3015
2025-03-11 16:57:08 +00:00
Jörg Thalheim
6324b495ee nix fmt: renovate.json 2025-03-11 17:56:52 +01:00
Mic92
ce7a70f9e1 Merge pull request 'renovate: enable recommend config/lock files/nix' (#3013) from ci into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3013
2025-03-11 16:43:51 +00:00
Jörg Thalheim
7102af9bd9 renovate: enable recommend config/lock files/nix 2025-03-11 17:43:31 +01:00
Mic92
b38fddaf29 Merge pull request 'drop renovate json' (#3012) from ci into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3012
2025-03-11 16:38:06 +00:00
Jörg Thalheim
e7ffcedd14 drop renovate json
we just use the defaults in clan-infra
2025-03-11 16:38:00 +00:00
Mic92
b5a66e767b Merge pull request 'renovate: also update flake.lock' (#3011) from ci into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3011
2025-03-11 16:33:00 +00:00
Jörg Thalheim
854d0fa83e renovate: also update flake.lock 2025-03-11 17:31:38 +01:00
Mic92
4ccf5ca373 Merge pull request 'renovate: enable dependency dashboard' (#3009) from ci into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3009
2025-03-11 16:26:55 +00:00
Jörg Thalheim
781d439567 renovate: enable dependency dashboard 2025-03-11 17:23:42 +01:00
Mic92
68e00ff613 Merge pull request 'chore(deps): update dependency @tailwindcss/typography to v0.5.16' (#3006) from renovate/tailwindcss-typography-0.x-lockfile into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3006
2025-03-11 15:39:10 +00:00
Mic92
828028e4b3 Merge pull request 'chore(deps): update dependency @floating-ui/dom to v1.6.13' (#3005) from renovate/floating-ui-dom-1.x-lockfile into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3005
2025-03-11 15:38:24 +00:00
renovate[bot]
b48d07f5c5 chore(deps): update dependency @tailwindcss/typography to v0.5.16 2025-03-11 15:31:37 +00:00
renovate[bot]
ea8c9ed649 chore(deps): update dependency @floating-ui/dom to v1.6.13 2025-03-11 15:31:33 +00:00
Mic92
68cb04c958 Merge pull request 'chore: Configure Renovate' (#3000) from renovate/configure into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3000
2025-03-11 15:06:28 +00:00
renovate[bot]
b8cb85fc72 Add renovate.json 2025-03-11 15:02:50 +00:00
Mic92
bdb97308d0 Merge pull request 'remove clan-bot' (#2999) from ci into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/2999
2025-03-11 15:00:53 +00:00
Jörg Thalheim
9708bdc6e7 remove clan-bot
gitea has auto-merge builtin, so we don't need the clan-bot.
2025-03-11 15:02:41 +01:00
Jörg Thalheim
9ac8a45f1d actually hide test-fixture from user
if we actually want to avoid the dependency on this facter json, we
cannot expose this as a flake input because nix flake archive will have
to download this on each deployment step.
2025-03-11 12:23:59 +00:00
Jörg Thalheim
a14fe1aef8 try to reproduce CI error with newer nix version 2025-03-11 12:23:59 +00:00
Jörg Thalheim
b1401d6e6b fix only the first generator of each machine beeing re-encrypted 2025-03-11 12:09:00 +01:00
Jörg Thalheim
f882c86fb0 don't log cache miss by default
This is expected and happens regular, so there is no value in logging
this.
2025-03-11 12:03:55 +01:00
Jörg Thalheim
98d566c46e add test for parsing ssh options 2025-03-11 11:27:04 +01:00
Jörg Thalheim
c4ec4ccb3f checks/morph: after flake update, increase memory size to 2048 2025-03-11 09:46:54 +00:00
Clan Merge Bot
5a6677379a update flake lock - 2025-03-10T00:00+00:00
Flake lock file updates:

• Updated input 'disko':
    'github:nix-community/disko/fa5746ecea1772cf59b3f34c5816ab3531478142?narHash=sha256-xFnU%2BuUl48Icas2wPQ%2BZzlL2O3n8f6J2LrzNK9f2nng%3D' (2025-02-15)
  → 'github:nix-community/disko/19c1140419c4f1cdf88ad4c1cfb6605597628940?narHash=sha256-WK%2BPZHbfDjLyveXAxpnrfagiFgZWaTJglewBWniTn2Y%3D' (2025-02-25)
• Updated input 'flake-parts':
    'github:hercules-ci/flake-parts/32ea77a06711b758da0ad9bd6a844c5740a87abd?narHash=sha256-7H9XgNiGLKN1G1CgRh0vUL4AheZSYzPm%2BzmZ7vxbJdo%3D' (2025-02-01)
  → 'github:hercules-ci/flake-parts/f4330d22f1c5d2ba72d3d22df5597d123fdb60a9?narHash=sha256-%2Bu2UunDA4Cl5Fci3m7S643HzKmIDAe%2BfiXrLqYsR2fs%3D' (2025-03-07)
• Updated input 'nixpkgs':
    'https://releases.nixos.org/nixpkgs/nixpkgs-25.05pre762233.02032da4af07/nixexprs.tar.xz?narHash=sha256-%2BvOiMQwHEYBbWgvK//cuUqHZQ/y3DddCLyxZAbDdpnM%3D' (1980-01-01)
  → 'https://releases.nixos.org/nixpkgs/nixpkgs-25.05pre764393.ed0b1881565c/nixexprs.tar.xz?narHash=sha256-Xre00/fcpD/SxZZfxRuMSu7YOBCY6vOfgFBYKNntox8%3D' (1980-01-01)
2025-03-11 09:46:54 +00:00
DavHau
30d19d088f docs: move contributing+debugging to section contributing 2025-03-11 15:19:46 +07:00
Luis Hebendanz
f3c45eb23e Merge pull request 'pkgs/webview-lib: Fix version' (#2991) from kenji/clan-core:webview-version into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/2991
2025-03-10 13:20:38 +00:00
a-kenji
eaac6c76e2 pkgs/webview-lib: Fix version 2025-03-10 13:20:38 +00:00
Luis Hebendanz
0939b29a8e Merge pull request 'clan-cli/tests: limit jobs to 16' (#2986) from DavHau/clan-core:dave into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/2986
2025-03-10 13:20:18 +00:00
DavHau
a2a395cdb0 clan-cli/tests: limit jobs to 16
This reduces overload on the CI, as it already runs multiple test instances in parallel (with-core, without-core, etc), and otherwise would spawn 96 workers for each of those.
2025-03-10 13:20:18 +00:00
Luis Hebendanz
df7429dbe7 Merge pull request 'fix: clan machines install on machines without hardware configuration' (#2983) from fix/systemless-installs into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/2983
Reviewed-by: kenji <aks.kenji@protonmail.com>
2025-03-10 13:19:17 +00:00
Michael Hoang
362faaf063 checks: use facter.json from external test-fixtures repo 2025-03-10 12:30:03 +09:00
Michael Hoang
e215a9db6e install: let nixos-anywhere determine where to build automatically
This fixes installing machines that don't have `system` defined i.e.
when running `clan machines install` with `--update-hardware-config`.
2025-03-10 12:30:03 +09:00
Michael Hoang
a5dd76b66d checks: don't expose systems that can't be evaluated 2025-03-10 11:58:32 +09:00
Michael Hoang
4472c51c25 checks: test installation on system-less systems 2025-03-10 11:58:32 +09:00
Michael Hoang
c6cf9d1336 checks/installation: use test-flake instead of self 2025-03-10 11:58:32 +09:00
Clan Merge Bot
9b6e42790e update flake lock - nixpkgs - 2025-03-10T00:00+00:00
Flake lock file updates:

• Updated input 'nixpkgs':
    'https://releases.nixos.org/nixpkgs/nixpkgs-25.05pre762233.02032da4af07/nixexprs.tar.xz?narHash=sha256-%2BvOiMQwHEYBbWgvK//cuUqHZQ/y3DddCLyxZAbDdpnM%3D' (1980-01-01)
  → 'https://releases.nixos.org/nixpkgs/nixpkgs-25.05pre764393.ed0b1881565c/nixexprs.tar.xz?narHash=sha256-Xre00/fcpD/SxZZfxRuMSu7YOBCY6vOfgFBYKNntox8%3D' (1980-01-01)
2025-03-10 00:00:46 +00:00
a-kenji
547b012e0b clanModules/mycelium: Allow by default, if adding the module 2025-03-09 23:49:03 +00:00
DavHau
9797ef792a vars+facts: use bwrap only if supported 2025-03-09 13:52:15 +07:00
Luis Hebendanz
fe0de90a28 Merge pull request 'Fix iwd space handling' (#2980) from Qubasa/clan-core:main into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/2980
2025-03-07 13:34:48 +00:00
Qubasa
539fd30206 clan-cli: treefmt fix to iwd module 2025-03-07 14:26:37 +01:00
Guilhem Saurel
a11d5471ec Fix iwd space handling
ref man iwd.network:

> Key-value lines contain a setting key, an equal sign and the value of
> the setting. Whitespace preceding the key, the equal sign or the value,
> is ignored. The key must be a continuous string of alphanumeric and
> underscore characters and minus signs only. The value starts at the
> first non-whitespace character after the first equal sign on the line
> and ends at the end of the line and must be correctly UTF-8-encoded.
> […]
> String values, including file
> paths and hexstrings, are written as is except for five characters that
> may be backslash-escaped: space, \t, \r, \n and backslash itself.
> The latter three must be escaped. A space character must be escaped if
> it is the first character in the value string and is written as \s.

I guess this is what is expected then:
```
$ echo -e "  \t \r \\ "
 \

$ echo -e "  \t \r \\ " | sed "s=\\\=\\\\\\\=g;s=\t=\\\t=g;s=\r=\\\r=g;s=^ =\\\s="
\s \t \r \\
```
2025-03-07 14:26:37 +01:00
Luis Hebendanz
19f2facbce Merge pull request 'clan-cli flake: make lix compatible' (#2970) from lix-compat into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/2970
2025-03-07 13:21:45 +00:00
Qubasa
468a25034e clan-cli: Make Lix compatible again 2025-03-07 14:06:35 +01:00
Johannes Kirschbauer
a2b76eb5a2 Inventory: rename internal inventory toplevel attribute to 'inventoryClass' 2025-03-07 12:30:29 +00:00
DavHau
ba0ed30997 update nixpkgs 2025-03-07 14:30:01 +07:00
DavHau
2a4d2c9cb5 switch to nixpkgs hosteded by cache.nixos.org
take 2 on https://git.clan.lol/clan/clan-core/pulls/2921
2025-03-07 07:20:07 +00:00
Michael Hoang
4c1e74fae6 nixos/clan: rename setDefaults to enableRecommendedDefaults 2025-03-05 03:37:41 +00:00
pinpox
cee62bf168 Merge pull request 'Automatic updates (phase 1)' (#2914) from pinpox/clan-core:auto-update-module into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/2914
2025-03-04 10:26:20 +00:00
Pablo Ovelleiro Corral
a865213894 Add auto-upgrade module 2025-03-04 10:09:37 +01:00
Johannes Kirschbauer
d8f9375580 Docs: add comment where actual contributing.md is located 2025-03-04 15:16:54 +09:00
Luis Hebendanz
526072806f Merge pull request 'docs/repo-layout: Remove infra section' (#2964) from kenji/clan-core:docs/remove-infra into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/2964
Reviewed-by: Enzime <enzime@noreply.git.clan.lol>
2025-03-03 11:34:45 +00:00
a-kenji
91a19d9ea9 docs/repo-layout: Remove infra section
Remove the public infra section, as it uses a bit of unfortunate
wording. It suggests we would add anyone to our infrastructure as an
admin.
2025-03-03 11:34:45 +00:00
Luis Hebendanz
38c7644692 Merge pull request 'docs/CONTRIBUTING: Remove internal tools documentation to streamline' (#2962) from kenji/clan-core:docs/remove-internal into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/2962
Reviewed-by: Enzime <enzime@noreply.git.clan.lol>
2025-03-03 11:34:34 +00:00
a-kenji
726f2ab5f8 docs/CONTRIBUTING: Remove internal tools documentation to streamline
Remove our internal developer tools documentation to streamline the
CONTRIBUTING.md guide and to not confuse external contributors.
2025-03-03 11:34:34 +00:00
a-kenji
5918620535 docs/CONTRIBUTING: Add missing article 2025-03-02 20:44:42 -08:00
a-kenji
58e85eda9c clanModules/iwd: Fix conversion link 2025-03-02 20:27:40 -08:00
a-kenji
e98e817941 docs: Fix numbered markdown list 2025-03-02 20:10:43 -08:00
a-kenji
fe92c7d1e6 docs/CONTRIBUTING: Clarify wrong suggestion about debugging dependent tools 2025-03-03 03:57:33 +00:00
a-kenji
4222f9788c docs/site: Fix flake-parts link 2025-03-03 03:53:49 +00:00
a-kenji
3d80423259 docs/CONTRIBUTING: Fix data-mesher reference name 2025-03-02 19:42:57 -08:00
Johannes Kirschbauer
186e81d8b9 Tests: migrate backup tests to use inventory 2025-03-03 02:28:03 +00:00
Qubasa
212c899767 clan-cli: Revert generating facts in clan facts list, for consistency 2025-03-03 01:40:40 +01:00
Luis Hebendanz
312c12c98f Merge pull request 'clan-cli: Fix wrong clan vars generate regression added by a903a9028b555223ddcb897cf8a8fb198fb991b4' (#2951) from Qubasa/clan-core:fix_regression into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/2951
2025-03-02 19:05:23 +00:00
Qubasa
2ec4e49650 clan-cli: Fix wrong clan vars generate regression added by a903a9028b 2025-03-02 19:53:17 +01:00
kenji
4e5b4a1b80 Merge pull request 'clanModules/root-password: don't deploy plain text password' (#2950) from vdbe/clan-core:clanModules/root-password/dont-deploy-plain-text into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/2950
2025-03-02 18:43:38 +00:00
vdbe
ccb3bdb740 clanModules/root-password: don't deploy plain text password 2025-03-02 17:52:03 +01:00
Luis Hebendanz
a903a9028b Merge pull request 'clan-cli: Fix get_all_facts forgetting to generate facts before getting them' (#2949) from Qubasa/clan-core:other_fixes into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/2949
2025-03-02 14:57:59 +00:00
Qubasa
ba28691747 clan-cli: Fix get_all_facts forgetting to generate facts before getting them 2025-03-02 15:07:57 +01:00
Luis Hebendanz
e7aa5cfb4e Merge pull request 'templates: Remove description attribute from templates' (#2933) from kenji/clan-core:templates/remove-description into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/2933
Reviewed-by: Enzime <enzime@noreply.git.clan.lol>
2025-03-02 13:45:59 +00:00
a-kenji
8b74147721 templates: Remove description attribute from templates
Remove the `description` flake attribute from templates.
It has limited usefulness, is unset, is another thing the user has to
set and is confronted with.

It seems better to omit this attribute to keep the focus here on what
really matters.
2025-03-02 13:45:59 +00:00
Luis Hebendanz
299180703e Merge pull request 'clanModules/machine-id: fix value' (#2948) from vdbe/clan-core:fix/clanmodules/machine-id into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/2948
2025-03-02 13:44:48 +00:00
vdbe
6c941deb96 clanModules/machine-id: fix value 2025-03-02 10:41:35 +01:00
Michael Hoang
39761946a0 vars/sops: fix clan vars fix missing machine name in error message 2025-03-02 08:22:23 +07:00
Luis Hebendanz
b71e16dd5d Merge pull request 'clan-cli: Remove can_build_locally and replace with nixos-anywhere --build-on auto' (#2944) from Qubasa/clan-core:main into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/2944
2025-03-01 17:34:17 +00:00
Qubasa
0da1a05b55 clan-cli: Remove can_build_locally and replace with nixos-anywhere --build-on auto 2025-03-01 17:52:41 +01:00
Luis Hebendanz
3551d061ce Merge pull request 'clan-cli: Make host upload function support uploading single files too' (#2943) from Qubasa/clan-core:main into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/2943
2025-03-01 16:35:19 +00:00
Qubasa
6099aeb0c6 clan-cli: Make host upload function support uploading single files too 2025-03-01 17:10:42 +01:00
lassulus
bcd6c7108a clan-cli: try to fix CI bug again 2025-02-28 04:58:42 +00:00
lassulus
d20f13abe7 clan-cli: set ssh port for nix copy 2025-02-28 04:58:42 +00:00
a-kenji
cfeda1f06d templates: Remove unneeded article 2025-02-27 11:28:48 +00:00
Luis Hebendanz
73dd981f71 Merge pull request 'docs/guide: Mention alternative secret store backends' (#2926) from kenji/clan-core:docs/add-reference-to-other-backend into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/2926
2025-02-27 09:58:00 +00:00
a-kenji
bc239e104c docs/guide: Mention alternative secret store backends 2025-02-27 09:58:00 +00:00
Luis Hebendanz
bd2702df6d Merge pull request 'templates: Remove superfluous comment' (#2932) from kenji/clan-core:templates/remove-superfluuous-comment into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/2932
2025-02-27 09:57:13 +00:00
a-kenji
7b0e652a7a templates: Remove superfluous comment
Remove this seemingly superfluous comment.
It is unclear what it really refers to.
Let's just remove it for brevity sake.
2025-02-27 09:57:13 +00:00
Luis Hebendanz
0c0eafe0f5 Merge pull request 'docs/guide: Deduplicate machine instructions' (#2924) from kenji/clan-core:docs/dedup-machines into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/2924
2025-02-27 09:56:52 +00:00
a-kenji
3e0cd4bdfb docs/guide: Deduplicate machine instructions 2025-02-27 09:56:52 +00:00
Luis Hebendanz
2cf40fea51 Merge pull request 'tests/inventory: Drop zed-editor from the test inventory' (#2922) from kenji/clan-core:drop/zed into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/2922
Reviewed-by: lassulus <clanlol@lassul.us>
2025-02-27 09:56:23 +00:00
a-kenji
40d1a76d8a tests/inventory: Drop zed-editor from the test inventory
Drop `zed-editor` from the test inventory and exchange it with `hello`.
The inventory packages are built in the tests, we don't want to build
large derivations there.
2025-02-27 09:56:23 +00:00
lassulus
60b22fdf0e clan-cli: another try to fix the CI bug 2025-02-27 08:24:28 +01:00
lassulus
cb13e7fab8 clan-cli: check if paths exist in cache check 2025-02-27 02:53:41 +01:00
Qubasa
b82a3b6085 clan-cli: Fix flake.py missing cache eviction if there is a garbage collected path 2025-02-27 01:01:50 +01:00
lassulus
44345ed28b Merge pull request 'fix(clan-cli): fix cross system' (#2935) from r17x/clan-core:fix/machine-cross-target-host into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/2935
2025-02-26 09:18:08 +00:00
r17x
456b25c921 fix(clan-cli): fix cross system 2025-02-26 14:27:00 +07:00
a-kenji
dfb5e5123f docs/guide: Add devshell recommendation to getting started
Fixes: #2218
2025-02-25 18:16:50 +00:00
a-kenji
636ee65428 clanModules/zerotier: Fix documentation 2025-02-26 00:41:20 +07:00
a-kenji
cbf8685f6e templates/flake-parts: Add .envrc
Since the other template also has an `.envrc` let's keep everything
consistent.
2025-02-25 18:43:53 +07:00
Pablo Ovelleiro Corral
500af543bb Revert "switch to nixpkgs hosteded by cache.nixos.org"
This reverts commit 8f6dd4acc4.
2025-02-25 09:04:27 +01:00
Pablo Ovelleiro Corral
46971aa51f Apply suggestion 2025-02-25 08:51:51 +01:00
Pablo Ovelleiro Corral
3d83266916 Fix exists() check for age files 2025-02-25 08:51:51 +01:00
lassulus
b87768d44a Merge pull request 'clan-cli: add unit tests for test_parse_deployment_address' (#2910) from lopter/clan-core:lo-test-parse-deployment-address into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/2910
2025-02-25 05:31:49 +00:00
Louis Opter
5b821c610d clan-cli: add unit tests for test_parse_deployment_address
Follow-up to #2899, more thorough than #2909.
2025-02-25 05:31:49 +00:00
Mic92
347a5a5f76 Merge pull request 'switch to nixpkgs hosteded by cache.nixos.org' (#2921) from fix-build into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/2921
2025-02-24 03:48:43 +00:00
Jörg Thalheim
8f6dd4acc4 switch to nixpkgs hosteded by cache.nixos.org
This now works thanks to https://github.com/NixOS/infra/pull/562
2025-02-24 10:44:15 +07:00
Clan Merge Bot
f3cbd0b289 update flake lock - treefmt-nix - 2025-02-24T00:00+00:00
Flake lock file updates:

• Updated input 'treefmt-nix':
    'github:numtide/treefmt-nix/4f09b473c936d41582dd744e19f34ec27592c5fd?narHash=sha256-yrK3Hjcr8F7qS/j2F%2Br7C7o010eVWWlm4T1PrbKBOxQ%3D' (2025-02-07)
  → 'github:numtide/treefmt-nix/3d0579f5cc93436052d94b73925b48973a104204?narHash=sha256-mL1szCeIsjh6Khn3nH2cYtwO5YXG6gBiTw1A30iGeDU%3D' (2025-02-17)
2025-02-24 00:00:55 +00:00
Qubasa
7b8a980336 clan-cli: Remove allow_uknown_placeholders flag. With upcoming nixos-anywhere patch this is not needed anymore. 2025-02-23 21:50:21 +01:00
Qubasa
d53e062024 clan-cli: Add an optional reference to an AsyncFuture to track origin of task. 2025-02-23 21:44:00 +01:00
lassulus
5ac629f549 clan-cli: use new flake caching for machines 2025-02-23 15:58:03 +01:00
lassulus
6c7fc15c0e try to fix profiles CI bug 2025-02-22 03:39:42 +00:00
lassulus
3121c5ecdb machines install: fix installation via tor 2025-02-22 03:39:42 +00:00
lassulus
ada544ef56 vars fs: fix 2025-02-22 03:39:42 +00:00
lassulus
3e0f9f52bb clan-cli deploy_info: fix find_reachable_host returning unreachable hosts 2025-02-22 03:39:42 +00:00
lassulus
3992d0ed0d add demo_iso code for iso-morphing 2025-02-22 03:39:42 +00:00
lassulus
6037dde559 Merge pull request 'Add support for XDG_* style directories on macos' (#2865) from Undone8/clan-core:main into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/2865
2025-02-22 03:34:56 +00:00
Denis Rosca
baa0a615ea Add support for XDG_* style directories on macos
Closes #2864
2025-02-22 03:34:56 +00:00
Jörg Thalheim
b0760bc2b9 recommend vars over facts 2025-02-22 03:34:40 +00:00
Mic92
6a33fe8e7a Merge pull request 'fix regex for detecting git+file inputs' (#2907) from fix-git-input into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/2907
2025-02-21 02:57:22 +00:00
Jörg Thalheim
1f3bd09245 fix regex for detecting git+file inputs 2025-02-20 10:00:04 +07:00
a-kenji
122dbf4240 clanModules/mycelium: Remove certain options for compatibility reasons
Remove certain options for compatibility reasons

We want to reintroduce them once we pass in `vars` through the
inventory.
2025-02-19 09:54:00 +00:00
Qubasa
8ac286bcaf docs: Fix install documentation 2025-02-19 14:23:45 +07:00
hsjobeki
8fcc004b68 Merge pull request 'clan-cli: "fix" ssh option parsing' (#2899) from lopter/clan-core:lo-fix-ssh-option-parsing into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/2899
2025-02-19 01:41:07 +00:00
Louis Opter
37bbbefa8e clan-cli: "fix" ssh option parsing
Calling it fix in double quotes since that's still quite hand-crafted,
but at least you can now specify options with `@` inside them (e.g.
`ProxyJump`) and have it work properly.

Moreover this fixes the syntax for GET-like variables in the networking
clanCore module. Only the fixed syntax is supported since that's what
was tested, and actually parsed in the code.
2025-02-18 21:37:38 +00:00
Qubasa
d44def5381 clan-cli: Fix clan create throwing a warning if --flake is not defined 2025-02-18 17:40:27 +07:00
Qubasa
03ce74fc74 clan-clI: Improve error message of clan update-hardware-config on non found nixos-facter 2025-02-18 17:40:27 +07:00
Qubasa
6c8137d30b docs: Fix multiple issues with the clan installation guide 2025-02-18 17:40:27 +07:00
Pablo Ovelleiro Corral
27a3126d68 Make store-backend configurable 2025-02-18 06:34:50 +01:00
Qubasa
faee6c2a79 clan-cli: Re-add test_copy_from_nixstore_symlink test but mark it impure 2025-02-17 14:21:50 +07:00
Qubasa
6070219b1a clan-cli: Remove set -x from pytest script 2025-02-17 14:10:22 +07:00
Qubasa
a5e32f9b6d clan-cli: Fix clan flakes create inside an already existing git repo 2025-02-17 13:23:31 +07:00
Qubasa
89e3793831 clan-cli: Add CLAN_TEST_STORE env var to clan-pytest-without-core 2025-02-17 13:19:45 +07:00
Michael Hoang
fd908e18c3 templates: move machine templates 2025-02-17 02:15:42 +00:00
Clan Merge Bot
a4d4b991a1 update flake lock - 2025-02-17T00:00+00:00
Flake lock file updates:

• Updated input 'disko':
    'github:nix-community/disko/ff3568858c54bd306e9e1f2886f0f781df307dff?narHash=sha256-3Z40qHaFScWUCVQrGc4Y%2BRdoPsh1R/wIh%2BAN4cTXP0I%3D' (2025-02-05)
  → 'github:nix-community/disko/fa5746ecea1772cf59b3f34c5816ab3531478142?narHash=sha256-xFnU%2BuUl48Icas2wPQ%2BZzlL2O3n8f6J2LrzNK9f2nng%3D' (2025-02-15)
• Updated input 'nixos-facter-modules':
    'github:numtide/nixos-facter-modules/fa11d87b61b2163efbb9aed7b7a5ae0299e5ab9c?narHash=sha256-aY55yiifyo1XPPpbpH0kWlV1g2dNGBlx6622b7OK8ks%3D' (2025-01-15)
  → 'github:numtide/nixos-facter-modules/60f8b8f3f99667de6a493a44375e5506bf0c48b1?narHash=sha256-/nA3tDdp/2g0FBy8966ppC2WDoyXtUWaHkZWL%2BN3ZKc%3D' (2025-02-05)
• Updated input 'nixpkgs':
    'github:NixOS/nixpkgs/1128e89fd5e11bb25aedbfc287733c6502202ea9?narHash=sha256-3ebRdThRic9bHMuNi2IAA/ek9b32bsy8F5R4SvGTIog%3D' (2025-02-13)
  → 'github:NixOS/nixpkgs/fada727ee7c0bd487e311dede0a2b0725a0f7765?narHash=sha256-Zc%2BK4AxAwFaWKK18nSl/3TKidGf46En7bfK8SL%2BRevg%3D' (2025-02-14)
2025-02-17 00:52:05 +00:00
Clan Merge Bot
4670525106 update flake lock - disko - 2025-02-17T00:00+00:00
Flake lock file updates:

• Updated input 'disko':
    'github:nix-community/disko/ff3568858c54bd306e9e1f2886f0f781df307dff?narHash=sha256-3Z40qHaFScWUCVQrGc4Y%2BRdoPsh1R/wIh%2BAN4cTXP0I%3D' (2025-02-05)
  → 'github:nix-community/disko/fa5746ecea1772cf59b3f34c5816ab3531478142?narHash=sha256-xFnU%2BuUl48Icas2wPQ%2BZzlL2O3n8f6J2LrzNK9f2nng%3D' (2025-02-15)
2025-02-17 00:34:01 +00:00
Clan Merge Bot
5a0ed03c56 update flake lock - nixpkgs - 2025-02-17T00:00+00:00
Flake lock file updates:

• Updated input 'nixpkgs':
    'github:NixOS/nixpkgs/1128e89fd5e11bb25aedbfc287733c6502202ea9?narHash=sha256-3ebRdThRic9bHMuNi2IAA/ek9b32bsy8F5R4SvGTIog%3D' (2025-02-13)
  → 'github:NixOS/nixpkgs/fada727ee7c0bd487e311dede0a2b0725a0f7765?narHash=sha256-Zc%2BK4AxAwFaWKK18nSl/3TKidGf46En7bfK8SL%2BRevg%3D' (2025-02-14)
2025-02-17 00:00:54 +00:00
Michael Hoang
af228db398 machines: hide morph 2025-02-17 00:22:14 +07:00
Pablo Ovelleiro Corral
b0e7de3c8b Create directory 2025-02-16 17:08:54 +00:00
Pablo Ovelleiro Corral
cb89fb0847 Fix locking 2025-02-16 17:08:54 +00:00
Pablo Ovelleiro Corral
014aec9531 Fix output when rebuilding 2025-02-16 17:08:54 +00:00
Michael Hoang
160bbfcb37 cli: add morph command 2025-02-16 21:31:28 +07:00
Michael Hoang
5c68e129b7 nixos/clan: add option for opting out of Clan defaults
Also replace `documentation.nixos.enable = false` with
`documentation.doc.enable` to opt out of only `nixos-help` and the HTML
manual but leave `man configuration.nix`.
2025-02-16 21:31:28 +07:00
Michael Hoang
bc53c7a886 cli: make some functions only create commits optionally 2025-02-16 21:14:09 +07:00
Michael Hoang
61c1943ccc templates: allow specifying from flake-parts module 2025-02-16 21:10:22 +07:00
a-kenji
c3013c1a02 docs/mesh-vpn: Document inventory usage 2025-02-16 13:32:42 +07:00
a-kenji
3cff6577da docs: Extend backups guide
Closes #2792
2025-02-16 13:04:53 +07:00
a-kenji
c795a1d895 clanModules/syncthing-static-peers: Migrate to vars
Part of the #2511 migration.
2025-02-15 23:34:25 +07:00
a-kenji
66e166068e clanModules/garage: Migrate to vars
Part of the #2511 migration.
2025-02-15 11:21:09 +00:00
lassulus
0c7173afd0 cli: nix_add_to_gcroots: don't run in sandboxed tests 2025-02-15 09:59:41 +00:00
lassulus
d5e391ecc8 clan-cli flake caching: fix selectors not merging 2025-02-15 09:59:41 +00:00
lassulus
2a3bc7b31b clan-cli: hash the flake_hash to remove unwanted character from path 2025-02-15 09:59:41 +00:00
a-kenji
b54346ce03 clanModules/state-version: Remove trailing newlines
The state version is now matched against certain regex rules.
We strip possible trailing newlines to improve compatibility.
2025-02-15 16:29:07 +07:00
hsjobeki
39bc7c1f17 Merge pull request 'Fix: clan machines delete persistance logic' (#2871) from hsjobeki/clan-core:hsjobeki-main into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/2871
2025-02-15 08:34:47 +00:00
Johannes Kirschbauer
153b5560c3 Cli: delete machines bugfix. Dont modify the inventory in other places 2025-02-15 09:03:54 +07:00
Johannes Kirschbauer
2412513ad4 Inventory: init delete api 2025-02-15 09:03:46 +07:00
Jörg Thalheim
873f650678 remove directory = self from our documentation 2025-02-15 01:48:17 +00:00
Qubasa
35aedddf65 docs: Add --refresh flag to nix shell command to mitigate caching issues 2025-02-14 13:28:21 +07:00
Qubasa
663ab70465 clan-cli: Make copy_from_nixstore work with single files 2025-02-14 13:28:01 +07:00
Jörg Thalheim
4f1e2ba582 zt-tcp-releay: useFetchCargoVendor 2025-02-14 12:25:32 +07:00
Jörg Thalheim
d3bd120a04 fix system.stateVersion 2025-02-14 12:22:26 +07:00
Jörg Thalheim
f8bf39e43a bump nixpkgs 2025-02-14 12:09:04 +07:00
Jörg Thalheim
93a7e272b1 Revert "zerotierone: fix on macOS"
This reverts commit 2e212e3e31.

no longer needed after nixpkgs bump
2025-02-14 12:08:18 +07:00
Qubasa
de3153259d clan-cli: Fix garbled clan vms run output. docs: Improve debugging guide 2025-02-13 16:17:55 +07:00
Luis Hebendanz
bf492d4deb Merge pull request 'clan-cli: Remove flake-registry set to none in get_clan_nix_attrset' (#2862) from Qubasa/clan-core:main into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/2862
2025-02-13 08:55:40 +00:00
Qubasa
41cb679eab clan-cli: Remove flake-registry set to none in get_clan_nix_attrset 2025-02-13 15:47:28 +07:00
Qubasa
b138cfcd69 clan-cli: Fix symlink issue with copy_from_nixstore, add test for it. Also add more comprehensive clan template tests 2025-02-13 15:34:21 +07:00
Luis Hebendanz
a22d426b25 Merge pull request 'cli: machines install: Add phases option to pass to nixos-anywhere and update the "Disk Encryption" documentation to use it' (#2858) from sachk/clan-core:main into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/2858
2025-02-13 07:04:27 +00:00
Sacha Korban
c0f07afb98 docs: disc-encryption: change guide to use phases option and misc improvements 2025-02-13 17:10:07 +11:00
Sacha Korban
0eaaabcf63 clan-cli: machines install: add phases option for nixos-anywhere 2025-02-13 17:10:02 +11:00
kenji
7df51d0474 Merge pull request 'clan-cli: secrets machines remove: update secrets after removing the key' (#2832) from lopter/clan-core:lo-fix-secrets-machine-remove into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/2832
Reviewed-by: kenji <aks.kenji@protonmail.com>
2025-02-13 03:53:23 +00:00
Louis Opter
5a6038f742 clan-cli: secrets machines remove: update secrets after removing the key
Quick follow up to PR #2781, this commit does the same kind of logic but
for machines instead of users and groups.

Note that this only affects the `clan secrets machines remove`
sub-command, and that `clan machines delete` still leaves unusable
secrets & vars behind. This can be addressed in a different change.
2025-02-13 03:53:23 +00:00
Luis Hebendanz
15e8df894e Merge pull request 'docs: fix git rm step in Add Machines' (#2853) from OliverNChalk/clan-core:main into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/2853
Reviewed-by: kenji <aks.kenji@protonmail.com>
2025-02-11 07:39:03 +00:00
OliverNChalk
50924ad7ff docs: fix git rm step in Add Machines 2025-02-11 09:41:25 +11:00
Michael Hoang
2e212e3e31 zerotierone: fix on macOS 2025-02-10 20:56:31 +07:00
Qubasa
23b57b0a3a clan-cli: Mark new test as impure 2025-02-10 20:36:55 +07:00
Qubasa
69d092c46b clan-cli: Add update_clan boolean option to create_clan 2025-02-10 20:36:55 +07:00
Qubasa
2663a181d0 clan-cli: Fix disko template to not fail because of missing bootloader. 2025-02-10 20:36:55 +07:00
Qubasa
9ab81a9c5d clan-cli: Add one more test for checking Flake with git+file: 2025-02-10 20:36:55 +07:00
lassulus
0872b781d7 clan-cli: add persistant flake caching 2025-02-10 13:29:01 +00:00
Jörg Thalheim
86e91c8604 cli: fix build on macOS 2025-02-10 17:41:50 +07:00
hsjobeki
14377f25c9 Merge pull request 'CLI: use partial update for machine create' (#2848) from hsjobeki/clan-core:hsjobeki-main into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/2848
2025-02-10 09:02:23 +00:00
Johannes Kirschbauer
9b706c148b Inventory: automatically create emtpy file on write 2025-02-10 09:02:23 +00:00
Johannes Kirschbauer
dee284d669 CLI: machine create use patch inventory for partial updates 2025-02-10 09:02:23 +00:00
lassulus
718e553211 clan_cli flake caching: support outPath 2025-02-10 04:33:37 +00:00
Clan Merge Bot
cbe3cb94b7 update flake lock - nixpkgs - 2025-02-10T00:00+00:00
Flake lock file updates:

• Updated input 'nixpkgs':
    'github:NixOS/nixpkgs/102a39bfee444533e6b4e8611d7e92aa39b7bec1?narHash=sha256-Q4vhtbLYWBUnjWD4iQb003Lt%2BN5PuURDad1BngGKdUs%3D' (2025-02-01)
  → 'github:NixOS/nixpkgs/fa35a3c8e17a3de613240fea68f876e5b4896aec?narHash=sha256-7Fu7oazPoYCbDzb9k8D/DdbKrC3aU1zlnc39Y8jy/s8%3D' (2025-02-08)
2025-02-10 04:12:52 +00:00
hsjobeki
91661da320 revert 283aad7ea0
revert ADR: init clanModules architecture decision
2025-02-10 03:42:44 +00:00
Clan Merge Bot
7ebc11f96f update flake lock - disko - 2025-02-10T00:00+00:00
Flake lock file updates:

• Updated input 'disko':
    'github:nix-community/disko/18d0a984cc2bc82cf61df19523a34ad463aa7f54?narHash=sha256-KYOATYEwaKysL3HdHdS5kbQMXvzS4iPJzJrML%2B3TKAo%3D' (2025-01-29)
  → 'github:nix-community/disko/ff3568858c54bd306e9e1f2886f0f781df307dff?narHash=sha256-3Z40qHaFScWUCVQrGc4Y%2BRdoPsh1R/wIh%2BAN4cTXP0I%3D' (2025-02-05)
2025-02-10 00:40:28 +00:00
Clan Merge Bot
27ef7040c2 update flake lock - treefmt-nix - 2025-02-10T00:00+00:00
Flake lock file updates:

• Updated input 'treefmt-nix':
    'github:numtide/treefmt-nix/bebf27d00f7d10ba75332a0541ac43676985dea3?narHash=sha256-j6jC12vCFsTGDmY2u1H12lMr62fnclNjuCtAdF1a4Nk%3D' (2025-01-28)
  → 'github:numtide/treefmt-nix/4f09b473c936d41582dd744e19f34ec27592c5fd?narHash=sha256-yrK3Hjcr8F7qS/j2F%2Br7C7o010eVWWlm4T1PrbKBOxQ%3D' (2025-02-07)
2025-02-10 00:00:45 +00:00
Johannes Kirschbauer
283aad7ea0 ADR: init clanModules architecture decision 2025-02-09 05:04:36 +00:00
Mic92
775088ccd9 Merge pull request 'fix-repo-sync' (#2834) from fix-repo-sync into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/2834
2025-02-08 14:59:52 +00:00
Jörg Thalheim
d71a8329f2 fix missing permissions for repo-sync 2025-02-08 14:37:46 +01:00
Jörg Thalheim
022d0babc5 fix dependabot settings 2025-02-08 14:37:36 +01:00
hsjobeki
934d8fc2a4 Merge pull request 'inventory: refactor role resolution into submodule' (#2826) from hsjobeki/clan-core:hsjobeki-main into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/2826
2025-02-08 04:03:51 +00:00
Johannes Kirschbauer
e75b50e335 Add missing test dependency 2025-02-08 10:48:57 +07:00
Johannes Kirschbauer
f9fc6904f0 inventory: refactor role resolution into submodule 2025-02-08 10:43:54 +07:00
Johannes Kirschbauer
6deaab506a Inventory: test include missing dependency folder 2025-02-08 10:43:53 +07:00
lassulus
32748c14f4 clan_cli machines: use Flake instead of FlakeId 2025-02-07 06:26:09 +01:00
a-kenji
6d2845c645 pkgs/cli: Rename create_file -> persist 2025-02-06 14:59:59 +00:00
kenji
4899c38e52 Merge pull request 'pgks/cli: Add toplevel aliases' (#2820) from kenji/clan-core:feat/alias/toplevel into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/2820
2025-02-06 14:56:57 +00:00
a-kenji
0d69d72899 pgks/cli: Add toplevel aliases 2025-02-06 14:56:57 +00:00
Jörg Thalheim
34904b8758 add dependabot 2025-02-06 18:02:01 +07:00
lassulus
51d65873a7 clan-cli: test_flake_caching: add actual flake caching test 2025-02-05 10:17:37 +00:00
Johannes Kirschbauer
02929e9d42 Inventory: migrate import and config resolution into a module 2025-02-05 16:23:30 +07:00
Johannes Kirschbauer
2018de8d9e Inventory: move build inventory into a module 2025-02-05 11:06:43 +07:00
260 changed files with 10019 additions and 5588 deletions

View File

@@ -8,5 +8,5 @@ jobs:
checks-impure:
runs-on: nix
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- run: nix run .#impure-checks

View File

@@ -7,7 +7,7 @@ jobs:
deploy-docs:
runs-on: nix
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- run: nix run .#deploy-docs
env:
SSH_HOMEPAGE_KEY: ${{ secrets.SSH_HOMEPAGE_KEY }}

6
.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,6 @@
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"

View File

@@ -3,10 +3,8 @@ on:
schedule:
- cron: "39 * * * *"
workflow_dispatch:
permissions:
contents: write
jobs:
repo-sync:
if: github.repository_owner == 'clan-lol'
@@ -15,10 +13,15 @@ jobs:
- uses: actions/checkout@v4
with:
persist-credentials: false
- uses: actions/create-github-app-token@v1
id: app-token
with:
app-id: ${{ vars.CI_APP_ID }}
private-key: ${{ secrets.CI_PRIVATE_KEY }}
- name: repo-sync
uses: repo-sync/github-sync@v2
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GITHUB_TOKEN: ${{ steps.app-token.outputs.token }}
with:
source_repo: "https://git.clan.lol/clan/clan-core.git"
source_branch: "main"

View File

@@ -1,3 +1,4 @@
# Contributing to Clan
<!-- Local file: docs/CONTRIBUTING.md -->
Go to the Contributing guide at https://docs.clan.lol/manual/contribute/

View File

@@ -5,6 +5,12 @@
fileSystems."/".device = "/dev/null";
boot.loader.grub.device = "/dev/null";
};
clan.inventory.services = {
borgbackup.test-backup = {
roles.client.machines = [ "test-backup" ];
roles.server.machines = [ "test-backup" ];
};
};
flake.nixosModules = {
test-backup =
{
@@ -22,12 +28,20 @@
in
{
imports = [
self.clanModules.borgbackup
# Do not import inventory modules. They should be configured via 'clan.inventory'
#
# TODO: Configure localbackup via inventory
self.clanModules.localbackup
];
# Borgbackup overrides
services.borgbackup.repos.test-backups = {
path = "/var/lib/borgbackup/test-backups";
authorizedKeys = [ (builtins.readFile ../lib/ssh/pubkey) ];
};
clan.borgbackup.destinations.test-backup.repo = lib.mkForce "borg@machine:.";
clan.core.networking.targetHost = "machine";
networking.hostName = "machine";
nixpkgs.hostPlatform = "x86_64-linux";
programs.ssh.knownHosts = {
machine.hostNames = [ "machine" ];
@@ -108,7 +122,6 @@
'';
folders = [ "/var/test-service" ];
};
clan.borgbackup.destinations.test-backup.repo = "borg@machine:.";
fileSystems."/mnt/external-disk" = {
device = "/dev/vdb"; # created in tests with virtualisation.emptyDisks
@@ -129,11 +142,6 @@
touch /run/unmount-external-disk
'';
};
services.borgbackup.repos.test-backups = {
path = "/var/lib/borgbackup/test-backups";
authorizedKeys = [ (builtins.readFile ../lib/ssh/pubkey) ];
};
};
};
perSystem =
@@ -152,27 +160,27 @@
"flake.lock"
"flakeModules"
"inventory.json"
"lib/build-clan"
"lib/default.nix"
"lib/flake-module.nix"
"lib/frontmatter"
"lib/inventory"
"nixosModules"
# Just include everything in 'lib'
# If anything changes in /lib that may affect everything
"lib"
];
};
in
{
# Needs investigation on aarch64-linux
# vm-test-run-test-backups> qemu-kvm: No machine specified, and there is no default
# vm-test-run-test-backups> Use -machine help to list supported machines
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux && pkgs.stdenv.hostPlatform.system != "aarch64-linux") {
checks = pkgs.lib.mkIf pkgs.stdenv.isLinux {
test-backups = (import ../lib/container-test.nix) {
name = "test-backups";
nodes.machine = {
imports = [
imports =
[
self.nixosModules.clanCore
# Some custom overrides for the backup tests
self.nixosModules.test-backup
];
]
++
# import the inventory generated nixosModules
self.clanInternals.inventoryClass.machines.test-backup.machineImports;
clan.core.settings.directory = ./.;
environment.systemPackages = [
(pkgs.writeShellScriptBin "foo" ''

View File

@@ -1,7 +1,7 @@
(import ../lib/container-test.nix) (
{ ... }:
{
name = "secrets";
name = "container";
nodes.machine =
{ ... }:

View File

@@ -0,0 +1,138 @@
(import ../lib/test-base.nix) (
{ self, lib, ... }:
let
inherit (self.lib.inventory) buildInventory;
machines = [
"signer"
"admin"
"peer"
];
serviceConfigs = buildInventory {
inventory = {
machines = lib.genAttrs machines (_: { });
services = {
data-mesher.default = {
roles.peer.machines = [ "peer" ];
roles.admin.machines = [ "admin" ];
roles.signer.machines = [ "signer" ];
};
};
modules = {
data-mesher = self.clanModules.data-mesher;
};
};
directory = ./.;
};
commonConfig =
{ config, ... }:
{
imports = [ self.nixosModules.clanCore ];
clan.core.settings.directory = builtins.toString ./.;
environment.systemPackages = [
config.services.data-mesher.package
];
clan.core.vars.settings.publicStore = "in_repo";
clan.core.vars.settings.secretStore = "vm";
clan.data-mesher.network.interface = "eth1";
clan.data-mesher.bootstrapNodes = [
"[2001:db8:1::1]:7946" # peer1
"[2001:db8:1::2]:7946" # peer2
];
# speed up for testing
services.data-mesher.settings = {
cluster.join_interval = lib.mkForce "2s";
cluster.push_pull_interval = lib.mkForce "5s";
};
systemd.tmpfiles.settings."vmsecrets" = {
"/etc/secrets" = {
C.argument = "${./vars/secret/${config.clan.core.settings.machine.name}}";
z = {
mode = "0700";
user = "data-mesher";
};
};
};
};
adminConfig = {
imports = serviceConfigs.machines.admin.machineImports;
config.clan.data-mesher.network.tld = "foo";
config.clan.core.settings.machine.name = "admin";
};
peerConfig = {
imports = serviceConfigs.machines.peer.machineImports;
config.clan.core.settings.machine.name = "peer";
};
signerConfig = {
imports = serviceConfigs.machines.signer.machineImports;
clan.core.settings.machine.name = "signer";
};
in
{
name = "data-mesher";
nodes = {
peer = {
imports = [
peerConfig
commonConfig
];
};
admin = {
imports = [
adminConfig
commonConfig
];
};
signer = {
imports = [
signerConfig
commonConfig
];
};
};
# TODO Add better test script.
testScript = ''
def resolve(node, success = {}, fail = [], timeout = 60):
for hostname, ips in success.items():
for ip in ips:
node.wait_until_succeeds(f"getent ahosts {hostname} | grep {ip}", timeout)
for hostname in fail:
node.wait_until_fails(f"getent ahosts {hostname}")
start_all()
admin.wait_for_unit("data-mesher")
signer.wait_for_unit("data-mesher")
peer.wait_for_unit("data-mesher")
# check dns resolution
for node in [admin, signer, peer]:
resolve(node, {
"admin.foo": ["2001:db8:1::1", "192.168.1.1"],
"peer.foo": ["2001:db8:1::2", "192.168.1.2"],
"signer.foo": ["2001:db8:1::3", "192.168.1.3"]
})
'';
}
)

View File

@@ -0,0 +1,3 @@
-----BEGIN PUBLIC KEY-----
MCowBQYDK2VwAyEAV/XZHv1UQEEzfD2YbJP1Q2jd1ZDG+CP5wvGf/1hcR+Q=
-----END PUBLIC KEY-----

View File

@@ -0,0 +1,3 @@
-----BEGIN PUBLIC KEY-----
MCowBQYDK2VwAyEAKSSUXJCftt5Vif6ek57CNKBcDRNfrWrxZUHjAIFW9HY=
-----END PUBLIC KEY-----

View File

@@ -0,0 +1,3 @@
-----BEGIN PUBLIC KEY-----
MCowBQYDK2VwAyEAvLD0mHQA+hf9ItlUHD0ml3i5XEArmmjwCC5rYEOmzWs=
-----END PUBLIC KEY-----

View File

@@ -0,0 +1,3 @@
-----BEGIN PRIVATE KEY-----
MC4CAQAwBQYDK2VwBCIEIFX+AzHy821hHqWLPeK3nzRuHod3FNrnPfaDoFvpz6LX
-----END PRIVATE KEY-----

View File

@@ -0,0 +1,3 @@
-----BEGIN PRIVATE KEY-----
MC4CAQAwBQYDK2VwBCIEIMwuDntiLoC7cFFyttGDf7cQWlOXOR0q90Jz3lEiuLg+
-----END PRIVATE KEY-----

View File

@@ -0,0 +1,3 @@
-----BEGIN PRIVATE KEY-----
MC4CAQAwBQYDK2VwBCIEIPmH2+vjYG6UOp+/g0Iqu7yZZKId5jffrfsySE36yO+D
-----END PRIVATE KEY-----

View File

@@ -0,0 +1,3 @@
-----BEGIN PRIVATE KEY-----
MC4CAQAwBQYDK2VwBCIEINS0tSnjHPG8IfpzQAS3wzoJA+4mYM70DIpltN8O4YD7
-----END PRIVATE KEY-----

View File

@@ -0,0 +1,3 @@
-----BEGIN PUBLIC KEY-----
MCowBQYDK2VwAyEA3P18+R5Gt+Jn7wYXpWNTXM5pyWn2WiOWekYCzXqWPwg=
-----END PUBLIC KEY-----

View File

@@ -12,6 +12,8 @@ in
./flash/flake-module.nix
./impure/flake-module.nix
./installation/flake-module.nix
./installation-without-system/flake-module.nix
./morph/flake-module.nix
./nixos-documentation/flake-module.nix
];
perSystem =
@@ -39,6 +41,7 @@ in
borgbackup = import ./borgbackup nixosTestArgs;
matrix-synapse = import ./matrix-synapse nixosTestArgs;
mumble = import ./mumble nixosTestArgs;
data-mesher = import ./data-mesher nixosTestArgs;
syncthing = import ./syncthing nixosTestArgs;
zt-tcp-relay = import ./zt-tcp-relay nixosTestArgs;
postgresql = import ./postgresql nixosTestArgs;
@@ -48,7 +51,7 @@ in
flakeOutputs =
lib.mapAttrs' (
name: config: lib.nameValuePair "nixos-${name}" config.config.system.build.toplevel
) self.nixosConfigurations
) (lib.filterAttrs (n: _: !lib.hasPrefix "test-" n) self.nixosConfigurations)
// lib.mapAttrs' (n: lib.nameValuePair "package-${n}") self'.packages
// lib.mapAttrs' (n: lib.nameValuePair "devShell-${n}") self'.devShells
// lib.mapAttrs' (name: config: lib.nameValuePair "home-manager-${name}" config.activation-script) (

View File

@@ -1,12 +1,26 @@
{ self, lib, ... }:
{
clan.machines.test-flash-machine = {
config,
self,
lib,
...
}:
{
clan.machines = lib.listToAttrs (
lib.map (
system:
lib.nameValuePair "test-flash-machine-${system}" {
clan.core.networking.targetHost = "test-flash-machine";
fileSystems."/".device = lib.mkDefault "/dev/vda";
boot.loader.grub.device = lib.mkDefault "/dev/vda";
# We need to use `mkForce` because we inherit from `test-install-machine`
# which currently hardcodes `nixpkgs.hostPlatform`
nixpkgs.hostPlatform = lib.mkForce system;
imports = [ self.nixosModules.test-flash-machine ];
};
}
) (lib.filter (lib.hasSuffix "linux") config.systems)
);
flake.nixosModules = {
test-flash-machine =
@@ -30,20 +44,20 @@
let
dependencies = [
pkgs.disko
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-flash-machine.pkgs.perlPackages.ConfigIniFiles
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-flash-machine.pkgs.perlPackages.FileSlurp
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".pkgs.perlPackages.ConfigIniFiles
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".pkgs.perlPackages.FileSlurp
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-flash-machine.config.system.build.toplevel
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-flash-machine.config.system.build.diskoScript
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-flash-machine.config.system.build.diskoScript.drvPath
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-flash-machine.config.system.clan.deployment.file
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.toplevel
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.diskoScript
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.diskoScript.drvPath
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.clan.deployment.file
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
in
{
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux) {
flash = (import ../lib/test-base.nix) {
checks = pkgs.lib.mkIf pkgs.stdenv.isLinux {
test-flash = (import ../lib/test-base.nix) {
name = "flash";
nodes.target = {
virtualisation.emptyDiskImages = [ 4096 ];
@@ -65,7 +79,7 @@
testScript = ''
start_all()
machine.succeed("clan flash write --debug --flake ${../..} --yes --disk main /dev/vdb test-flash-machine")
machine.succeed("clan flash write --debug --flake ${../..} --yes --disk main /dev/vdb test-flash-machine-${pkgs.hostPlatform.system}")
'';
} { inherit pkgs self; };
};

View File

@@ -0,0 +1,241 @@
{
self,
lib,
...
}:
{
# The purpose of this test is to ensure `clan machines install` works
# for machines that don't have a hardware config yet.
# If this test starts failing it could be due to the `facter.json` being out of date
# you can get a new one by adding
# client.fail("cat test-flake/machines/test-install-machine/facter.json >&2")
# to the installation test.
clan.machines.test-install-machine-without-system = {
fileSystems."/".device = lib.mkDefault "/dev/vda";
boot.loader.grub.device = lib.mkDefault "/dev/vda";
imports = [ self.nixosModules.test-install-machine-without-system ];
};
clan.machines.test-install-machine-with-system =
{ pkgs, ... }:
{
# https://git.clan.lol/clan/test-fixtures
facter.reportPath = builtins.fetchurl {
url = "https://git.clan.lol/clan/test-fixtures/raw/commit/4a2bc56d886578124b05060d3fb7eddc38c019f8/nixos-vm-facter-json/${pkgs.hostPlatform.system}.json";
sha256 =
{
aarch64-linux = "sha256:1rlfymk03rmfkm2qgrc8l5kj5i20srx79n1y1h4nzlpwaz0j7hh2";
x86_64-linux = "sha256:16myh0ll2gdwsiwkjw5ba4dl23ppwbsanxx214863j7nvzx42pws";
}
.${pkgs.hostPlatform.system};
};
fileSystems."/".device = lib.mkDefault "/dev/vda";
boot.loader.grub.device = lib.mkDefault "/dev/vda";
imports = [ self.nixosModules.test-install-machine-without-system ];
};
flake.nixosModules = {
test-install-machine-without-system =
{ lib, modulesPath, ... }:
{
imports = [
(modulesPath + "/testing/test-instrumentation.nix") # we need these 2 modules always to be able to run the tests
(modulesPath + "/profiles/qemu-guest.nix")
../lib/minify.nix
];
networking.hostName = "test-install-machine";
environment.etc."install-successful".text = "ok";
boot.consoleLogLevel = lib.mkForce 100;
boot.kernelParams = [ "boot.shell_on_fail" ];
# disko config
boot.loader.grub.efiSupport = lib.mkDefault true;
boot.loader.grub.efiInstallAsRemovable = lib.mkDefault true;
clan.core.vars.settings.secretStore = "vm";
clan.core.vars.generators.test = {
files.test.neededFor = "partitioning";
script = ''
echo "notok" > $out/test
'';
};
disko.devices = {
disk = {
main = {
type = "disk";
device = "/dev/vda";
preCreateHook = ''
test -e /run/partitioning-secrets/test/test
'';
content = {
type = "gpt";
partitions = {
boot = {
size = "1M";
type = "EF02"; # for grub MBR
priority = 1;
};
ESP = {
size = "512M";
type = "EF00";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
mountOptions = [ "umask=0077" ];
};
};
root = {
size = "100%";
content = {
type = "filesystem";
format = "ext4";
mountpoint = "/";
};
};
};
};
};
};
};
};
};
perSystem =
{
pkgs,
lib,
...
}:
let
dependencies = [
self
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.toplevel
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.diskoScript
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.clan.deployment.file
pkgs.stdenv.drvPath
pkgs.bash.drvPath
pkgs.nixos-anywhere
pkgs.bubblewrap
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
# with Nix 2.24 we get:
# vm-test-run-test-installation> client # error: sized: unexpected end-of-file
# vm-test-run-test-installation> client # error: unexpected end-of-file
# This seems to be fixed with Nix 2.26
# Remove this line once `pkgs.nix` is 2.26+
nixPackage =
assert
lib.versionOlder pkgs.nix.version "2.26"
&& lib.versionAtLeast pkgs.nixVersions.latest.version "2.26";
pkgs.nixVersions.latest;
in
{
# On aarch64-linux, hangs on reboot with after installation:
# vm-test-run-test-installation-without-system> installer # [ 288.002871] reboot: Restarting system
# vm-test-run-test-installation-without-system> client # [test-install-machine] ### Done! ###
# vm-test-run-test-installation-without-system> client # [test-install-machine] + step 'Done!'
# vm-test-run-test-installation-without-system> client # [test-install-machine] + echo '### Done! ###'
# vm-test-run-test-installation-without-system> client # [test-install-machine] + rm -rf /tmp/tmp.qb16EAq7hJ
# vm-test-run-test-installation-without-system> (finished: must succeed: clan machines install --debug --flake test-flake --yes test-install-machine-without-system --target-host root@installer --update-hardware-config nixos-facter >&2, in 154.62 seconds)
# vm-test-run-test-installation-without-system> target: starting vm
# vm-test-run-test-installation-without-system> target: QEMU running (pid 144)
# vm-test-run-test-installation-without-system> target: waiting for unit multi-user.target
# vm-test-run-test-installation-without-system> target: waiting for the VM to finish booting
# vm-test-run-test-installation-without-system> target: Guest root shell did not produce any data yet...
# vm-test-run-test-installation-without-system> target: To debug, enter the VM and run 'systemctl status backdoor.service'.
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux && !pkgs.stdenv.isAarch64) {
test-installation-without-system = (import ../lib/test-base.nix) {
name = "test-installation-without-system";
nodes.target = {
services.openssh.enable = true;
virtualisation.diskImage = "./target.qcow2";
virtualisation.useBootLoader = true;
nix.package = nixPackage;
};
nodes.installer =
{ modulesPath, ... }:
{
imports = [
(modulesPath + "/../tests/common/auto-format-root-device.nix")
];
services.openssh.enable = true;
system.nixos.variant_id = "installer";
environment.systemPackages = [ pkgs.nixos-facter ];
virtualisation.emptyDiskImages = [ 512 ];
virtualisation.diskSize = 8 * 1024;
virtualisation.rootDevice = "/dev/vdb";
# both installer and target need to use the same diskImage
virtualisation.diskImage = "./target.qcow2";
nix.package = nixPackage;
nix.settings = {
substituters = lib.mkForce [ ];
hashed-mirrors = null;
connect-timeout = lib.mkForce 3;
flake-registry = pkgs.writeText "flake-registry" ''{"flakes":[],"version":2}'';
experimental-features = [
"nix-command"
"flakes"
];
};
users.users.nonrootuser = {
isNormalUser = true;
openssh.authorizedKeys.keyFiles = [ ../lib/ssh/pubkey ];
extraGroups = [ "wheel" ];
};
security.sudo.wheelNeedsPassword = false;
system.extraDependencies = dependencies;
};
nodes.client = {
environment.systemPackages = [
self.packages.${pkgs.system}.clan-cli
] ++ self.packages.${pkgs.system}.clan-cli.runtimeDependencies;
environment.etc."install-closure".source = "${closureInfo}/store-paths";
virtualisation.memorySize = 3048;
nix.package = nixPackage;
nix.settings = {
substituters = lib.mkForce [ ];
hashed-mirrors = null;
connect-timeout = lib.mkForce 3;
flake-registry = pkgs.writeText "flake-registry" ''{"flakes":[],"version":2}'';
experimental-features = [
"nix-command"
"flakes"
];
};
system.extraDependencies = dependencies;
};
testScript = ''
client.start()
installer.start()
client.succeed("${pkgs.coreutils}/bin/install -Dm 600 ${../lib/ssh/privkey} /root/.ssh/id_ed25519")
client.wait_until_succeeds("timeout 2 ssh -o StrictHostKeyChecking=accept-new -v nonrootuser@installer hostname")
client.succeed("cp -r ${../..} test-flake && chmod -R +w test-flake")
client.fail("test -f test-flake/machines/test-install-machine-without-system/hardware-configuration.nix")
client.fail("test -f test-flake/machines/test-install-machine-without-system/facter.json")
client.succeed("clan machines update-hardware-config --flake test-flake test-install-machine-without-system nonrootuser@installer >&2")
client.succeed("test -f test-flake/machines/test-install-machine-without-system/facter.json")
client.succeed("rm test-flake/machines/test-install-machine-without-system/facter.json")
client.succeed("clan machines install --debug --flake test-flake --yes test-install-machine-without-system --target-host nonrootuser@installer --update-hardware-config nixos-facter >&2")
try:
installer.shutdown()
except BrokenPipeError:
# qemu has already exited
pass
target.state_dir = installer.state_dir
target.start()
target.wait_for_unit("multi-user.target")
assert(target.succeed("cat /etc/install-successful").strip() == "ok")
'';
} { inherit pkgs self; };
};
};
}

View File

@@ -23,7 +23,6 @@
environment.etc."install-successful".text = "ok";
nixpkgs.hostPlatform = "x86_64-linux";
boot.consoleLogLevel = lib.mkForce 100;
boot.kernelParams = [ "boot.shell_on_fail" ];
@@ -89,14 +88,26 @@
let
dependencies = [
self
self.nixosConfigurations.test-install-machine.config.system.build.toplevel
self.nixosConfigurations.test-install-machine.config.system.build.diskoScript
self.nixosConfigurations.test-install-machine.config.system.clan.deployment.file
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine.config.system.build.toplevel
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine.config.system.build.diskoScript
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine.config.system.clan.deployment.file
pkgs.bash.drvPath
pkgs.stdenv.drvPath
pkgs.nixos-anywhere
pkgs.bubblewrap
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
# with Nix 2.24 we get:
# vm-test-run-test-installation> client # error: sized: unexpected end-of-file
# vm-test-run-test-installation> client # error: unexpected end-of-file
# This seems to be fixed with Nix 2.26
# Remove this line once `pkgs.nix` is 2.26+
nixPackage =
assert
lib.versionOlder pkgs.nix.version "2.26"
&& lib.versionAtLeast pkgs.nixVersions.latest.version "2.26";
pkgs.nixVersions.latest;
in
{
# On aarch64-linux, hangs on reboot with after installation:
@@ -108,13 +119,14 @@
# vm-test-run-test-installation> new_machine: QEMU running (pid 80)
# vm-test-run-test-installation> new_machine: Guest root shell did not produce any data yet...
# vm-test-run-test-installation> new_machine: To debug, enter the VM and run 'systemctl status backdoor.service'.
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux && pkgs.stdenv.hostPlatform.system != "aarch64-linux") {
checks = pkgs.lib.mkIf pkgs.stdenv.isLinux {
test-installation = (import ../lib/test-base.nix) {
name = "test-installation";
nodes.target = {
services.openssh.enable = true;
virtualisation.diskImage = "./target.qcow2";
virtualisation.useBootLoader = true;
nix.package = nixPackage;
# virtualisation.fileSystems."/" = {
# device = "/dev/disk/by-label/this-is-not-real-and-will-never-be-used";
@@ -136,6 +148,7 @@
virtualisation.rootDevice = "/dev/vdb";
# both installer and target need to use the same diskImage
virtualisation.diskImage = "./target.qcow2";
nix.package = nixPackage;
nix.settings = {
substituters = lib.mkForce [ ];
hashed-mirrors = null;
@@ -153,7 +166,8 @@
self.packages.${pkgs.system}.clan-cli
] ++ self.packages.${pkgs.system}.clan-cli.runtimeDependencies;
environment.etc."install-closure".source = "${closureInfo}/store-paths";
virtualisation.memorySize = 2048;
virtualisation.memorySize = 3048;
nix.package = nixPackage;
nix.settings = {
substituters = lib.mkForce [ ];
hashed-mirrors = null;
@@ -174,12 +188,19 @@
client.succeed("${pkgs.coreutils}/bin/install -Dm 600 ${../lib/ssh/privkey} /root/.ssh/id_ed25519")
client.wait_until_succeeds("timeout 2 ssh -o StrictHostKeyChecking=accept-new -v root@installer hostname")
client.succeed("cp -r ${../..} test-flake && chmod -R +w test-flake")
# test that we can generate hardware configurations
client.fail("test -f test-flake/machines/test-install-machine/facter.json")
client.fail("test -f test-flake/machines/test-install-machine/hardware-configuration.nix")
client.succeed("clan machines update-hardware-config --flake test-flake test-install-machine root@installer >&2")
client.succeed("test -f test-flake/machines/test-install-machine/facter.json")
client.succeed("clan machines update-hardware-config --backend nixos-generate-config --flake test-flake test-install-machine root@installer>&2")
client.succeed("test -f test-flake/machines/test-install-machine/hardware-configuration.nix")
client.succeed("clan machines install --debug --flake ${../..} --yes test-install-machine --target-host root@installer >&2")
# but we don't use them because they're not cached
client.succeed("rm test-flake/machines/test-install-machine/hardware-configuration.nix test-flake/machines/test-install-machine/facter.json")
client.succeed("clan machines install --debug --flake test-flake --yes test-install-machine --target-host root@installer >&2")
try:
installer.shutdown()
except BrokenPipeError:

View File

@@ -16,6 +16,9 @@ in
documentation.enable = lib.mkDefault false;
boot.isContainer = true;
# needed since nixpkgs 7fb2f407c01b017737eafc26b065d7f56434a992 removed the getty unit by default
console.enable = true;
# undo qemu stuff
system.build.initialRamdisk = "";
virtualisation.sharedDirectories = lib.mkForce { };
@@ -31,6 +34,7 @@ in
};
# to accept external dependencies such as disko
node.specialArgs.self = self;
_module.args = { inherit self; };
imports = [
test
./container-driver/module.nix

View File

@@ -1,7 +1,8 @@
{ lib, ... }:
{
nixpkgs.flake.setFlakeRegistry = false;
nixpkgs.flake.setNixPath = false;
nix.registry.nixpkgs.to = { };
nix.registry = lib.mkForce { };
documentation.doc.enable = false;
documentation.man.enable = false;
}

View File

@@ -7,15 +7,19 @@ in
(nixos-lib.runTest {
hostPkgs = pkgs;
# speed-up evaluation
defaults = {
defaults = (
{ config, ... }:
{
imports = [
./minify.nix
];
documentation.enable = lib.mkDefault false;
nix.settings.min-free = 0;
system.stateVersion = lib.version;
};
system.stateVersion = config.system.nixos.release;
}
);
_module.args = { inherit self; };
# to accept external dependencies such as disko
node.specialArgs.self = self;
imports = [ test ];

View File

@@ -0,0 +1,62 @@
{
self,
...
}:
{
clan.machines.test-morph-machine = {
imports = [
./template/configuration.nix
self.nixosModules.clanCore
];
nixpkgs.hostPlatform = "x86_64-linux";
environment.etc."testfile".text = "morphed";
};
clan.templates.machine.test-morph-template = {
description = "Morph a machine";
path = ./template;
};
perSystem =
{
pkgs,
...
}:
{
checks = pkgs.lib.mkIf pkgs.stdenv.isLinux {
test-morph = (import ../lib/test-base.nix) {
name = "morph";
nodes = {
actual =
{ pkgs, ... }:
let
dependencies = [
self
pkgs.nixos-anywhere
pkgs.stdenv.drvPath
pkgs.stdenvNoCC
self.nixosConfigurations.test-morph-machine.config.system.build.toplevel
self.nixosConfigurations.test-morph-machine.config.system.clan.deployment.file
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
in
{
environment.etc."install-closure".source = "${closureInfo}/store-paths";
system.extraDependencies = dependencies;
virtualisation.memorySize = 2048;
environment.systemPackages = [ self.packages.${pkgs.system}.clan-cli ];
};
};
testScript = ''
start_all()
actual.fail("cat /etc/testfile")
actual.succeed("env CLAN_DIR=${self} clan machines morph test-morph-template --i-will-be-fired-for-using-this --debug --name test-morph-machine")
assert actual.succeed("cat /etc/testfile") == "morphed"
'';
} { inherit pkgs self; };
};
};
}

View File

@@ -0,0 +1,12 @@
{ modulesPath, ... }:
{
imports = [
# we need these 2 modules always to be able to run the tests
(modulesPath + "/testing/test-instrumentation.nix")
(modulesPath + "/virtualisation/qemu-vm.nix")
(modulesPath + "/profiles/minimal.nix")
];
clan.core.enableRecommendedDefaults = false;
}

View File

@@ -0,0 +1,8 @@
---
description = "Set up automatic upgrades"
categories = ["System"]
features = [ "inventory" ]
---
Whether to periodically upgrade NixOS to the latest version. If enabled, a
systemd timer will run `nixos-rebuild switch --upgrade` once a day.

View File

@@ -0,0 +1,24 @@
{
config,
lib,
...
}:
let
cfg = config.clan.autoUpgrade;
in
{
options.clan.autoUpgrade = {
flake = lib.mkOption {
type = lib.types.str;
description = "Flake reference";
};
};
config = {
system.autoUpgrade = {
inherit (cfg.clan.autoUpgrade) flake;
enable = true;
dates = "02:00";
randomizedDelaySec = "45min";
};
};
}

View File

@@ -0,0 +1,10 @@
---
description = "Set up data-mesher"
categories = ["System"]
features = [ "inventory" ]
[constraints]
roles.admin.min = 1
roles.admin.max = 1
---

View File

@@ -0,0 +1,19 @@
lib: {
machines =
config:
let
instanceNames = builtins.attrNames config.clan.inventory.services.data-mesher;
instanceName = builtins.head instanceNames;
dataMesherInstances = config.clan.inventory.services.data-mesher.${instanceName};
uniqueStrings = list: builtins.attrNames (builtins.groupBy lib.id list);
in
rec {
admins = dataMesherInstances.roles.admin.machines or [ ];
signers = dataMesherInstances.roles.signer.machines or [ ];
peers = dataMesherInstances.roles.peer.machines or [ ];
bootstrap = uniqueStrings (admins ++ signers);
};
}

View File

@@ -0,0 +1,51 @@
{ lib, config, ... }:
let
cfg = config.clan.data-mesher;
dmLib = import ../lib.nix lib;
in
{
imports = [
../shared.nix
];
options.clan.data-mesher = {
network = {
tld = lib.mkOption {
type = lib.types.str;
default = (config.networking.domain or "clan");
description = "Top level domain to use for the network";
};
hostTTL = lib.mkOption {
type = lib.types.str;
default = "672h"; # 28 days
example = "24h";
description = "The TTL for hosts in the network, in the form of a Go time.Duration";
};
};
};
config = {
services.data-mesher.initNetwork =
let
# for a given machine, read it's public key and remove any new lines
readHostKey =
machine:
let
path = "${config.clan.core.settings.directory}/vars/per-machine/${machine}/data-mesher-host-key/public_key/value";
in
builtins.elemAt (lib.splitString "\n" (builtins.readFile path)) 1;
in
{
enable = true;
keyPath = config.clan.core.vars.generators.data-mesher-network-key.files.private_key.path;
tld = cfg.network.tld;
hostTTL = cfg.network.hostTTL;
# admin and signer host public keys
signingKeys = builtins.map readHostKey (dmLib.machines config).bootstrap;
};
};
}

View File

@@ -0,0 +1,5 @@
{
imports = [
../shared.nix
];
}

View File

@@ -0,0 +1,5 @@
{
imports = [
../shared.nix
];
}

View File

@@ -0,0 +1,154 @@
{
config,
lib,
...
}:
let
cfg = config.clan.data-mesher;
dmLib = import ./lib.nix lib;
# the default bootstrap nodes are any machines with the admin or signers role
# we iterate through those machines, determining an IP address for them based on their VPN
# currently only supports zerotier
defaultBootstrapNodes = builtins.foldl' (
urls: name:
if
builtins.pathExists "${config.clan.core.settings.directory}/machines/${name}/facts/zerotier-ip"
then
let
ip = builtins.readFile "${config.clan.core.settings.directory}/machines/${name}/facts/zerotier-ip";
in
urls ++ "${ip}:${cfg.network.port}"
else
urls
) [ ] (dmLib.machines config).bootstrap;
in
{
options.clan.data-mesher = {
bootstrapNodes = lib.mkOption {
type = lib.types.nullOr (lib.types.listOf lib.types.str);
default = null;
description = ''
A list of bootstrap nodes that act as an initial gateway when joining
the cluster.
'';
example = [
"192.168.1.1:7946"
"192.168.1.2:7946"
];
};
network = {
interface = lib.mkOption {
type = lib.types.str;
description = ''
The interface over which cluster communication should be performed.
All the ip addresses associate with this interface will be part of
our host claim, including both ipv4 and ipv6.
This should be set to an internal/VPN interface.
'';
example = "tailscale0";
};
port = lib.mkOption {
type = lib.types.port;
default = 7946;
description = ''
Port to listen on for cluster communication.
'';
};
};
};
config = {
services.data-mesher = {
enable = true;
openFirewall = true;
settings = {
log_level = "warn";
state_dir = "/var/lib/data-mesher";
# read network id from vars
network.id = config.clan.core.vars.generators.data-mesher-network-key.files.public_key.value;
host = {
names = [ config.networking.hostName ];
key_path = config.clan.core.vars.generators.data-mesher-host-key.files.private_key.path;
};
cluster = {
port = cfg.network.port;
join_interval = "30s";
push_pull_interval = "30s";
interface = cfg.network.interface;
bootstrap_nodes = cfg.bootstrapNodes or defaultBootstrapNodes;
};
http.port = 7331;
http.interface = "lo";
};
};
# Generate host key.
clan.core.vars.generators.data-mesher-host-key = {
files =
let
owner = config.users.users.data-mesher.name;
in
{
private_key = {
inherit owner;
};
public_key = {
inherit owner;
secret = false;
};
};
runtimeInputs = [
config.services.data-mesher.package
];
script = ''
data-mesher generate keypair \
--public-key-path $out/public_key \
--private-key-path $out/private_key
'';
};
clan.core.vars.generators.data-mesher-network-key = {
# generated once per clan
share = true;
files =
let
owner = config.users.users.data-mesher.name;
in
{
private_key = {
inherit owner;
};
public_key = {
inherit owner;
secret = false;
};
};
runtimeInputs = [
config.services.data-mesher.package
];
script = ''
data-mesher generate keypair \
--public-key-path $out/public_key \
--private-key-path $out/private_key
'';
};
};
}

View File

@@ -9,9 +9,11 @@ in
# only import available files, as this allows to filter the files for tests.
flake.clanModules = filterAttrs (_name: pathExists) {
admin = ./admin;
auto-upgrade = ./auto-upgrade;
borgbackup = ./borgbackup;
borgbackup-static = ./borgbackup-static;
deltachat = ./deltachat;
data-mesher = ./data-mesher;
disk-id = ./disk-id;
dyndns = ./dyndns;
ergochat = ./ergochat;

View File

@@ -3,8 +3,7 @@ description = "S3-compatible object store for small self-hosted geo-distributed
---
This module generates garage specific keys automatically.
When using garage in a distributed deployment the `rpc_key` between connected instances must be shared.
This is currently still a manual process.
Also shares the `rpc_secret` between instances.
Options: [NixosModuleOptions](https://search.nixos.org/options?channel=unstable&size=50&sort=relevance&type=packages&query=garage)
Documentation: https://garagehq.deuxfleurs.fr/

View File

@@ -2,9 +2,9 @@
{
systemd.services.garage.serviceConfig = {
LoadCredential = [
"rpc_secret_path:${config.clan.core.facts.services.garage.secret.garage_rpc_secret.path}"
"admin_token_path:${config.clan.core.facts.services.garage.secret.garage_admin_token.path}"
"metrics_token_path:${config.clan.core.facts.services.garage.secret.garage_metrics_token.path}"
"rpc_secret_path:${config.clan.core.vars.generators.garage-shared.files.rpc_secret.path}"
"admin_token_path:${config.clan.core.vars.generators.garage.files.admin_token.path}"
"metrics_token_path:${config.clan.core.vars.generators.garage.files.metrics_token.path}"
];
Environment = [
"GARAGE_ALLOW_WORLD_READABLE_SECRETS=true"
@@ -14,37 +14,30 @@
];
};
clan.core.facts.services.garage = {
secret.garage_rpc_secret = { };
secret.garage_admin_token = { };
secret.garage_metrics_token = { };
generator.path = [
clan.core.vars.generators.garage = {
files.admin_token = { };
files.metrics_token = { };
runtimeInputs = [
pkgs.coreutils
pkgs.openssl
];
generator.script = ''
openssl rand -hex -out $secrets/garage_rpc_secret 32
openssl rand -base64 -out $secrets/garage_admin_token 32
openssl rand -base64 -out $secrets/garage_metrics_token 32
script = ''
openssl rand -base64 -out $out/admin_token 32
openssl rand -base64 -out $out/metrics_token 32
'';
};
# TODO: Vars is not in a useable state currently
# Move back, once it is implemented.
# clan.core.vars.generators.garage = {
# files.rpc_secret = { };
# files.admin_token = { };
# files.metrics_token = { };
# runtimeInputs = [
# pkgs.coreutils
# pkgs.openssl
# ];
# script = ''
# openssl rand -hex -out $out/rpc_secret 32
# openssl rand -base64 -out $out/admin_token 32
# openssl rand -base64 -out $out/metrics_token 32
# '';
# };
clan.core.vars.generators.garage-shared = {
share = true;
files.rpc_secret = { };
runtimeInputs = [
pkgs.coreutils
pkgs.openssl
];
script = ''
openssl rand -hex -out $out/rpc_secret 32
'';
};
clan.core.state.garage.folders = [ config.services.garage.settings.metadata_dir ];
}

View File

@@ -6,4 +6,4 @@ categories = [ "Network" ]
!!! Warning
If you've been using network manager + wpa_supplicant and now are switching to IWD read this migration guide:
https://iwd.wiki.kernel.org/networkmanager#converting_network_profiles
https://archive.kernel.org/oldwiki/iwd.wiki.kernel.org/networkmanager.html#converting_network_profiles

View File

@@ -1,4 +1,9 @@
{ lib, config, ... }:
{
lib,
config,
pkgs,
...
}:
let
cfg = config.clan.iwd;
@@ -12,12 +17,13 @@ let
{
secret.${secret_name} = { };
generator.prompt = "Wifi password for '${value.ssid}'";
# ref. man iwd.network
generator.script = ''
config="
[Settings]
AutoConnect=${if value.AutoConnect then "true" else "false"}
[Security]
Passphrase=\"$prompt_value\"
Passphrase=$(echo -e "$prompt_value" | ${lib.getExe pkgs.gnused} "s=\\\=\\\\\\\=g;s=\t=\\\t=g;s=\r=\\\r=g;s=^ =\\\s=")
"
echo "$config" > "$secrets/${secret_name}"
'';

View File

@@ -10,18 +10,18 @@ let
in
{
config = lib.mkMerge [
(lib.mkIf ((var.machineId.value or null) != null) {
(lib.mkIf ((var.value or null) != null) {
assertions = [
{
assertion = lib.stringLength var.machineId.value == 32;
assertion = lib.stringLength var.value == 32;
message = "machineId must be exactly 32 characters long.";
}
];
boot.kernelParams = [
''systemd.machine_id=${var.machineId.value}''
''systemd.machine_id=${var.value}''
];
environment.etc."machine-id" = {
text = var.machineId.value;
text = var.value;
};
})
{

View File

@@ -24,14 +24,7 @@ mycelium.default = {
"berlin"
"munich"
];
config = {
topLevelDomain = "m";
openFirewall = true;
addHostedPublicNodes = true;
};
};
```
This will add the machines named `berlin` and `munich` to the `mycelium` vpn.
And will also set the toplevel domain of the mycelium vpn to `m`, meaning the
machines are now reachable via `berlin.m` and `munich.m`.

View File

@@ -4,54 +4,18 @@
lib,
...
}:
let
flake = config.clan.core.settings.directory;
machineName = config.clan.core.settings.machine.name;
# Instances might be empty, if the module is not used via the inventory
#
# Type: { ${instanceName} :: { roles :: Roles } }
# Roles :: { ${role_name} :: { machines :: [string] } }
instances = config.clan.inventory.services.mycelium or { };
allPeers = lib.foldlAttrs (
acc: _instanceName: instanceConfig:
acc
++ (
if (builtins.elem machineName instanceConfig.roles.peer.machines) then
instanceConfig.roles.peer.machines
else
[ ]
)
) [ ] instances;
allPeerConfigurations = lib.filterAttrs (n: _: builtins.elem n allPeers) flake.nixosConfigurations;
allPeersWithIp =
builtins.mapAttrs
(_: x: lib.removeSuffix "\n" x.config.clan.core.vars.generators.mycelium.files.ip.value)
(
lib.filterAttrs (
_: x: (builtins.tryEval x.config.clan.core.vars.generators.mycelium.files.ip.value).success
) allPeerConfigurations
);
ips = lib.attrValues allPeersWithIp;
peers = lib.concatMap (ip: [
"tcp://[${ip}]:9651"
"quic://[${ip}]:9651"
]) ips;
in
{
options = {
clan.mycelium.topLevelDomain = lib.mkOption {
type = lib.types.str;
default = "";
description = "Top level domain to reach hosts";
};
clan.mycelium.openFirewall = lib.mkEnableOption "Open the firewall for mycelium";
clan.mycelium.addHostedPublicNodes = lib.mkEnableOption "Add hosted Public nodes";
clan.mycelium.addHosts = lib.mkOption {
clan.mycelium.openFirewall = lib.mkOption {
type = lib.types.bool;
default = true;
description = "Add mycelium ip's to the host file";
description = "Open the firewall for mycelium";
};
clan.mycelium.addHostedPublicNodes = lib.mkOption {
type = lib.types.bool;
default = true;
description = "Add hosted Public nodes";
};
};
@@ -60,18 +24,8 @@ in
addHostedPublicNodes = lib.mkDefault config.clan.mycelium.addHostedPublicNodes;
openFirewall = lib.mkDefault config.clan.mycelium.openFirewall;
keyFile = config.clan.core.vars.generators.mycelium.files.key.path;
inherit peers;
};
config.networking.hosts = lib.mkIf (config.clan.mycelium.addHosts) (
lib.mapAttrs' (
host: ip:
lib.nameValuePair ip (
if (config.clan.mycelium.topLevelDomain == "") then [ host ] else [ "${host}.m" ]
)
) allPeersWithIp
);
config.clan.core.vars.generators.mycelium = {
files."key" = { };
files."ip".secret = false;

View File

@@ -12,6 +12,9 @@
files.password-hash = {
neededFor = "users";
};
files.password = {
deploy = false;
};
migrateFact = "root-password";
runtimeInputs = [
pkgs.coreutils

View File

@@ -37,6 +37,7 @@ in
type = "rsa";
};
};
clan.core.vars.generators.openssh = {
files."ssh.id_ed25519" = { };
files."ssh.id_ed25519.pub".secret = false;
@@ -50,6 +51,14 @@ in
'';
};
programs.ssh.knownHosts.clan-sshd-self-ed25519 = {
hostNames = [
"localhost"
config.networking.hostName
] ++ (lib.optional (config.networking.domain != null) config.networking.fqdn);
publicKey = config.clan.core.vars.generators.openssh.files."ssh.id_ed25519.pub".value;
};
clan.core.vars.generators.openssh-rsa = lib.mkIf config.clan.sshd.hostKeys.rsa.enable {
files."ssh.id_rsa" = { };
files."ssh.id_rsa.pub".secret = false;

View File

@@ -3,7 +3,7 @@ let
var = config.clan.core.vars.generators.state-version.files.version or { };
in
{
system.stateVersion = lib.mkDefault var.value;
system.stateVersion = lib.mkDefault (lib.removeSuffix "\n" var.value);
clan.core.vars.generators.state-version = {
files.version = {

View File

@@ -7,7 +7,8 @@
let
dir = config.clan.core.settings.directory;
machineDir = dir + "/machines/";
syncthingPublicKeyPath = machines: machineDir + machines + "/facts/syncthing.pub";
machineVarDir = dir + "/vars/per-machine/";
syncthingPublicKeyPath = machines: machineVarDir + machines + "/syncthing/id/value";
machinesFileSet = builtins.readDir machineDir;
machines = lib.mapAttrsToList (name: _: name) machinesFileSet;
syncthingPublicKeysUnchecked = builtins.map (
@@ -83,24 +84,26 @@ in
configDir = "/var/lib/syncthing";
group = "syncthing";
key = lib.mkDefault config.clan.core.facts.services.syncthing.secret."syncthing.key".path or null;
cert = lib.mkDefault config.clan.core.facts.services.syncthing.secret."syncthing.cert".path or null;
key = lib.mkDefault config.clan.core.vars.generators.syncthing.files.key.path or null;
cert = lib.mkDefault config.clan.core.vars.generators.syncthing.files.cert.path or null;
};
clan.core.facts.services.syncthing = {
secret."syncthing.key" = { };
secret."syncthing.cert" = { };
public."syncthing.pub" = { };
generator.path = [
clan.core.vars.generators.syncthing = {
files.key = { };
files.cert = { };
files.api = { };
files.id.secret = false;
runtimeInputs = [
pkgs.coreutils
pkgs.gnugrep
pkgs.syncthing
];
generator.script = ''
syncthing generate --config "$secrets"
mv "$secrets"/key.pem "$secrets"/syncthing.key
mv "$secrets"/cert.pem "$secrets"/syncthing.cert
cat "$secrets"/config.xml | grep -oP '(?<=<device id=")[^"]+' | uniq > "$facts"/syncthing.pub
script = ''
syncthing generate --config $out
mv $out/key.pem $out/key
mv $out/cert.pem $out/cert
cat $out/config.xml | grep -oP '(?<=<device id=")[^"]+' | uniq > $out/id
cat $out/config.xml | grep -oP '<apikey>\K[^<]+' | uniq > $out/api
'';
};
}

View File

@@ -1,5 +1,5 @@
---
description = "Configures [Zerotier VPN](https://zerotier.com) secure and efficient networking within a Clan.."
description = "Configures [Zerotier VPN](https://zerotier.com) secure and efficient networking within a Clan."
features = [ "inventory" ]
categories = [ "Network", "System" ]

547
decisions/01-ClanModules.md Normal file
View File

@@ -0,0 +1,547 @@
# Clan service modules
Status: Accepted
## Context
To define a service in Clan, you need to define two things:
- `clanModule` - defined by module authors
- `inventory` - defined by users
The `clanModule` is currently a plain NixOS module. It is conditionally imported into each machine depending on the `service` and `role`.
A `role` is a function of a machine within a service. For example in the `backup` service there are `client` and `server` roles.
The `inventory` contains the settings for the user/consumer of the module. It describes what `services` run on each machine and with which `roles`.
Additionally any `service` can be instantiated multiple times.
This ADR proposes that we change how to write a `clanModule`. The `inventory` should get a new attribute called `instances` that allow for configuration of these modules.
### Status Quo
In this example the user configures 2 instances of the `networking` service:
The *user* defines
```nix
{
inventory.services = {
# anything inside an instance is instance specific
networking."instance1" = {
roles.client.tags = [ "all" ];
machines.foo.config = { ... /* machine specific settings */ };
# this will not apply to `clients` outside of `instance1`
roles.client.config = { ... /* client specific settings */ };
};
networking."instance2" = {
roles.server.tags = [ "all" ];
config = { ... /* applies to every machine that runs this instance */ };
};
};
}
```
The *module author* defines:
```nix
# networking/roles/client.nix
{ config, ... }:
let
instances = config.clan.inventory.services.networking or { };
serviceConfig = config.clan.networking;
in {
## Set some nixos options
}
```
### Problems
Problems with the current way of writing clanModules:
1. No way to retrieve the config of a single service instance, together with its name.
2. Directly exporting a single, anonymous nixosModule without any intermediary attribute layers doesn't leave room for exporting other inventory resources such as potentially `vars` or `homeManagerConfig`.
3. Can't access multiple config instances individually.
Example:
```nix
inventory = {
services = {
network.c-base = {
instanceConfig.ips = {
mors = "172.139.0.2";
};
};
network.gg23 = {
instanceConfig.ips = {
mors = "10.23.0.2";
};
};
};
};
```
This doesn't work because all instance configs are applied to the same namespace. So this results in a conflict currently.
Resolving this problem means that new inventory modules cannot be plain nixos modules anymore. If they are configured via `instances` / `instanceConfig` they cannot be configured without using the inventory. (There might be ways to inject instanceConfig but that requires knowledge of inventory internals)
4. Writing modules for multiple instances is cumbersome. Currently the clanModule author has to write one or multiple `fold` operations for potentially every nixos option to define how multiple service instances merge into every single one option. The new idea behind this adr is to pull the common fold function into the outer context provide it as a common helper. (See the example below. `perInstance` analog to the well known `perSystem` of flake-parts)
5. Each role has a different interface. We need to render that interface into json-schema which includes creating an unnecessary test machine currently. Defining the interface at a higher level (outside of any machine context) allows faster evaluation and an isolation by design from any machine.
This allows rendering the UI (options tree) of a service by just knowing the service and the corresponding roles without creating a dummy machine.
6. The interface of defining config is wrong. It is possible to define config that applies to multiple machine at once. It is possible to define config that applies to
a machine as a hole. But this is wrong behavior because the options exist at the role level. So config must also always exist at the role level.
Currently we merge options and config together but that may produce conflicts. Those module system conflicts are very hard to foresee since they depend on what roles exist at runtime.
## Proposed Change
We will create a new module class which is defined by `_class = "clan.service"` ([documented here](https://nixos.org/manual/nixpkgs/stable/#module-system-lib-evalModules-param-class)).
Existing clan modules will still work by continuing to be plain NixOS modules. All new modules can set `_class = "clan.service";` to use the proposed features.
In short the change introduces a new module class that makes the currently necessary folding of `clan.service`s `instances` and `roles` a common operation. The module author can define the inner function of the fold operations which is called a `clan.service` module.
There are the following attributes of such a module:
### `roles.<roleName>.interface`
Each role can have a different interface for how to be configured.
I.e.: A `client` role might have different options than a `server` role.
This attribute should be used to define `options`. (Not `config` !)
The end-user defines the corresponding `config`.
This submodule will be evaluated for each `instance role` combination and passed as argument into `perInstance`.
This submodules `options` will be evaluated to build the UI for that module dynamically.
### **Result attributes**
Some common result attributes are produced by modules of this proposal, those will be referenced later in this document but are commonly defined as:
- `nixosModule` A single nixos module. (`{config, ...}:{ environment.systemPackages = []; }`)
- `services.<serviceName>` An attribute set of `_class = clan.service`. Which contain the same thing as this whole ADR proposes.
- `vars` To be defined. Reserved for now.
### `roles.<roleName>.perInstance`
This acts like a function that maps over all `service instances` of a given `role`.
It produces the previously defined **result attributes**.
I.e. This allows to produce multiple `nixosModules` one for every instance of the service.
Hence making multiple `service instances` convenient by leveraging the module-system merge behavior.
### `perMachine`
This acts like a function that maps over all `machines` of a given `service`.
It produces the previously defined **result attributes**.
I.e. this allows to produce exactly one `nixosModule` per `service`.
Making it easy to set nixos-options only once if they have a one-to-one relation to a service being enabled.
Note: `lib.mkIf` can be used on i.e. `roleName` to make the scope more specific.
### `services.<serviceName>`
This allows to define nested services.
i.e the *service* `backup` might define a nested *service* `ssh` which sets up an ssh connection.
This can be defined in `perMachine` and `perInstance`
- For Every `instance` a given `service` may add multiple nested `services`.
- A given `service` may add a static set of nested `services`; Even if there are multiple instances of the same given service.
Q: Why is this not a top-level attribute?
A: Because nested service definitions may also depend on a `role` which must be resolved depending on `machine` and `instance`. The top-level module doesn't know anything about machines. Keeping the service layer machine agnostic allows us to build the UI for a module without adding any machines. (One of the problems with the current system)
```
zerotier/default.nix
```
```nix
# Some example module
{
_class = "clan.service";
# Analog to flake-parts 'perSystem' only that it takes instance
# The exact arguments will be specified and documented along with the actual implementation.
roles.client.perInstance = {
# attrs : settings of that instance
settings,
# string : name of the instance
instanceName,
# { name :: string , roles :: listOf string; }
machine,
# { {roleName} :: { machines :: listOf string; } }
roles,
...
}:
{
# Return a nixos module for every instance.
# The module author must be aware that this may return multiple modules (one for every instance) which are merged natively
nixosModule = {
config.debug."${instanceName}-client" = instanceConfig;
};
};
# Function that is called once for every machine with the role "client"
# Receives at least the following parameters:
#
# machine :: { name :: String, roles :: listOf string; }
# Name of the machine
#
# instances :: { instanceName :: { roleName :: { machines :: [ string ]; }}}
# Resolved roles
# Same type as currently in `clan.inventory.services.<ServiceName>.<InstanceName>.roles`
#
# The exact arguments will be specified and documented along with the actual implementation.
perMachine = {machine, instances, ... }: {
nixosModule =
{ lib, ... }:
{
# Some shared code should be put into a shared file
# Which is then imported into all/some roles
imports = [
../shared.nix
] ++
(lib.optional (builtins.elem "client" machine.roles)
{
options.debug = lib.mkOption {
type = lib.types.attrsOf lib.types.raw;
};
});
};
};
}
```
## Inventory.instances
This document also proposes to add a new attribute to the inventory that allow for exclusive configuration of the new modules.
This allows to better separate the new and the old way of writing and configuring modules. Keeping the new implementation more focussed and keeping existing technical debt out from the beginning.
The following thoughts went into this:
- Getting rid of `<serviceName>`: Using only the attribute name (plain string) is not sufficient for defining the source of the service module. Encoding meta information into it would also require some extensible format specification and parser.
- removing instanceConfig and machineConfig: There is no such config. Service configuration must always be role specific, because the options are defined on the role.
- renaming `config` to `settings` or similar. Since `config` is a module system internal name.
- Tags and machines should be an attribute set to allow setting `settings` on that level instead.
```nix
{
inventory.instances = {
"instance1" = {
# Allows to define where the module should be imported from.
module = {
input = "clan-core";
name = "borgbackup";
};
# settings that apply to all client machines
roles.client.settings = {};
# settings that apply to the client service of machine with name <machineName>
# There might be a server service that takes different settings on the same machine!
roles.client.machines.<machineName>.settings = {};
# settings that apply to all client-instances with tag <tagName>
roles.client.tags.<tagName>.settings = {};
};
"instance2" = {
# ...
};
};
}
```
## Iteration note
We want to implement the system as described. Once we have sufficient data on real world use-cases and modules we might revisit this document along with the updated implementation.
## Real world example
The following module demonstrates the idea in the example of *borgbackup*.
```nix
{
_class = "clan.service";
# Define the 'options' of 'settings' see argument of perInstance
roles.server.interface =
{ lib, ... }:
{
options = lib.mkOption {
type = lib.types.str;
default = "/var/lib/borgbackup";
description = ''
The directory where the borgbackup repositories are stored.
'';
};
};
roles.server.perInstance =
{
instanceName,
settings,
roles,
...
}:
{
nixosModule =
{ config, lib, ... }:
let
dir = config.clan.core.settings.directory;
machineDir = dir + "/vars/per-machine/";
allClients = roles.client.machines;
in
{
# services.borgbackup is a native nixos option
config.services.borgbackup.repos =
let
borgbackupIpMachinePath = machine: machineDir + machine + "/borgbackup/borgbackup.ssh.pub/value";
machinesMaybeKey = builtins.map (
machine:
let
fullPath = borgbackupIpMachinePath machine;
in
if builtins.pathExists fullPath then
machine
else
lib.warn ''
Machine ${machine} does not have a borgbackup key at ${fullPath},
run `clan var generate ${machine}` to generate it.
'' null
) allClients;
machinesWithKey = lib.filter (x: x != null) machinesMaybeKey;
hosts = builtins.map (machine: {
name = instanceName + machine;
value = {
path = "${settings.directory}/${machine}";
authorizedKeys = [ (builtins.readFile (borgbackupIpMachinePath machine)) ];
};
}) machinesWithKey;
in
if (builtins.listToAttrs hosts) != [ ] then builtins.listToAttrs hosts else { };
};
};
roles.client.interface =
{ lib, ... }:
{
# There might be a better interface now. This is just how clan borgbackup was configured in the 'old' way
options.destinations = lib.mkOption {
type = lib.types.attrsOf (
lib.types.submodule (
{ name, ... }:
{
options = {
name = lib.mkOption {
type = lib.types.strMatching "^[a-zA-Z0-9._-]+$";
default = name;
description = "the name of the backup job";
};
repo = lib.mkOption {
type = lib.types.str;
description = "the borgbackup repository to backup to";
};
rsh = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
defaultText = "ssh -i \${config.clan.core.vars.generators.borgbackup.files.\"borgbackup.ssh\".path} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null";
description = "the rsh to use for the backup";
};
};
}
)
);
default = { };
description = ''
destinations where the machine should be backed up to
'';
};
options.exclude = lib.mkOption {
type = lib.types.listOf lib.types.str;
example = [ "*.pyc" ];
default = [ ];
description = ''
Directories/Files to exclude from the backup.
Use * as a wildcard.
'';
};
};
roles.client.perInstance =
{
instanceName,
roles,
machine,
settings,
...
}:
{
nixosModule =
{
config,
lib,
pkgs,
...
}:
let
allServers = roles.server.machines;
# machineName = config.clan.core.settings.machine.name;
# cfg = config.clan.borgbackup;
preBackupScript = ''
declare -A preCommandErrors
${lib.concatMapStringsSep "\n" (
state:
lib.optionalString (state.preBackupCommand != null) ''
echo "Running pre-backup command for ${state.name}"
if ! /run/current-system/sw/bin/${state.preBackupCommand}; then
preCommandErrors["${state.name}"]=1
fi
''
) (lib.attrValues config.clan.core.state)}
if [[ ''${#preCommandErrors[@]} -gt 0 ]]; then
echo "pre-backup commands failed for the following services:"
for state in "''${!preCommandErrors[@]}"; do
echo " $state"
done
exit 1
fi
'';
destinations =
let
destList = builtins.map (serverName: {
name = "${instanceName}-${serverName}";
value = {
repo = "borg@${serverName}:/var/lib/borgbackup/${machine.name}";
rsh = "ssh -i ${
config.clan.core.vars.generators."borgbackup-${instanceName}".files."borgbackup.ssh".path
} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=Yes";
} // settings.destinations.${serverName};
}) allServers;
in
(builtins.listToAttrs destList);
in
{
config = {
# Derived from the destinations
systemd.services = lib.mapAttrs' (
_: dest:
lib.nameValuePair "borgbackup-job-${instanceName}-${dest.name}" {
# since borgbackup mounts the system read-only, we need to run in a ExecStartPre script, so we can generate additional files.
serviceConfig.ExecStartPre = [
''+${pkgs.writeShellScript "borgbackup-job-${dest.name}-pre-backup-commands" preBackupScript}''
];
}
) destinations;
services.borgbackup.jobs = lib.mapAttrs (_destinationName: dest: {
paths = lib.unique (
lib.flatten (map (state: state.folders) (lib.attrValues config.clan.core.state))
);
exclude = settings.exclude;
repo = dest.repo;
environment.BORG_RSH = dest.rsh;
compression = "auto,zstd";
startAt = "*-*-* 01:00:00";
persistentTimer = true;
encryption = {
mode = "repokey";
passCommand = "cat ${config.clan.core.vars.generators."borgbackup-${instanceName}".files."borgbackup.repokey".path}";
};
prune.keep = {
within = "1d"; # Keep all archives from the last day
daily = 7;
weekly = 4;
monthly = 0;
};
}) destinations;
environment.systemPackages = [
(pkgs.writeShellApplication {
name = "borgbackup-create";
runtimeInputs = [ config.systemd.package ];
text = ''
${lib.concatMapStringsSep "\n" (dest: ''
systemctl start borgbackup-job-${dest.name}
'') (lib.attrValues destinations)}
'';
})
(pkgs.writeShellApplication {
name = "borgbackup-list";
runtimeInputs = [ pkgs.jq ];
text = ''
(${
lib.concatMapStringsSep "\n" (
dest:
# we need yes here to skip the changed url verification
''echo y | /run/current-system/sw/bin/borg-job-${dest.name} list --json | jq '[.archives[] | {"name": ("${dest.name}::${dest.repo}::" + .name)}]' ''
) (lib.attrValues destinations)
}) | jq -s 'add // []'
'';
})
(pkgs.writeShellApplication {
name = "borgbackup-restore";
runtimeInputs = [ pkgs.gawk ];
text = ''
cd /
IFS=':' read -ra FOLDER <<< "''${FOLDERS-}"
job_name=$(echo "$NAME" | awk -F'::' '{print $1}')
backup_name=''${NAME#"$job_name"::}
if [[ ! -x /run/current-system/sw/bin/borg-job-"$job_name" ]]; then
echo "borg-job-$job_name not found: Backup name is invalid" >&2
exit 1
fi
echo y | /run/current-system/sw/bin/borg-job-"$job_name" extract "$backup_name" "''${FOLDER[@]}"
'';
})
];
# every borgbackup instance adds its own vars
clan.core.vars.generators."borgbackup-${instanceName}" = {
files."borgbackup.ssh.pub".secret = false;
files."borgbackup.ssh" = { };
files."borgbackup.repokey" = { };
migrateFact = "borgbackup";
runtimeInputs = [
pkgs.coreutils
pkgs.openssh
pkgs.xkcdpass
];
script = ''
ssh-keygen -t ed25519 -N "" -f $out/borgbackup.ssh
xkcdpass -n 4 -d - > $out/borgbackup.repokey
'';
};
};
};
};
perMachine = {
nixosModule =
{ ... }:
{
clan.core.backups.providers.borgbackup = {
list = "borgbackup-list";
create = "borgbackup-create";
restore = "borgbackup-restore";
};
};
};
}
```
## Prior-art
- https://github.com/NixOS/nixops
- https://github.com/infinisil/nixus

116
decisions/02-clan-api.md Normal file
View File

@@ -0,0 +1,116 @@
# Clan as library
## Status
Accepted
## Context
In the long term we envision the clan application will consist of the following user facing tools in the long term.
- `CLI`
- `TUI`
- `Desktop Application`
- `REST-API`
- `Mobile Application`
We might not be sure whether all of those will exist but the architecture should be generic such that those are possible without major changes of the underlying system.
## Decision
This leads to the conclusion that we should do `library` centric development.
With the current `clan` python code beeing a library that can be imported to create various tools ontop of it.
All **CLI** or **UI** related parts should be moved out of the main library.
*Note: The next person who wants implement any new frontend should do this first. Currently it looks like the TUI is the next one.*
Imagine roughly the following architecture:
```mermaid
graph TD
%% Define styles
classDef frontend fill:#f9f,stroke:#333,stroke-width:2px;
classDef backend fill:#bbf,stroke:#333,stroke-width:2px;
classDef storage fill:#ff9,stroke:#333,stroke-width:2px;
classDef testing fill:#cfc,stroke:#333,stroke-width:2px;
%% Define nodes
user(["User"]) -->|Interacts with| Frontends
subgraph "Frontends"
CLI["CLI"]:::frontend
APP["Desktop App"]:::frontend
TUI["TUI"]:::frontend
REST["REST API"]:::frontend
end
subgraph "Python"
API["Library <br>for interacting with clan"]:::backend
BusinessLogic["Business Logic<br>Implements actions like 'machine create'"]:::backend
STORAGE[("Persistence")]:::storage
NIX["Nix Eval & Build"]:::backend
end
subgraph "CI/CD & Tests"
TEST["Feature Testing"]:::testing
end
%% Define connections
CLI --> API
APP --> API
TUI --> API
REST --> API
TEST --> API
API --> BusinessLogic
BusinessLogic --> STORAGE
BusinessLogic --> NIX
```
With this very simple design it is ensured that all the basic features remain stable across all frontends.
In the end it is straight forward to create python library function calls in a testing framework to ensure that kind of stability.
Integration tests and smaller unit-tests should both be utilized to ensure the stability of the library.
Note: Library function don't have to be json-serializable in general.
Persistence includes but is not limited to: creating git commits, writing to inventory.json, reading and writing vars and to/from disk in general.
## Benefits / Drawbacks
- (+) Less tight coupling of frontend- / backend-teams
- (+) Consistency and inherent behavior
- (+) Performance & Scalability
- (+) Different frontends for different user groups
- (+) Documentation per library function makes it convenient to interact with the clan resources.
- (+) Testing the library ensures stability of the underlyings for all layers above.
- (-) Complexity overhead
- (-) library needs to be designed / documented
- (+) library can be well documented since it is a finite set of functions.
- (-) Error handling might be harder.
- (+) Common error reporting
- (-) different frontends need different features. The library must include them all.
- (+) All those core features must be implemented anyways.
- (+) VPN Benchmarking uses the existing library's already and works relatively well.
## Implementation considerations
Not all required details that need to change over time are possible to be pointed out ahead of time.
The goal of this document is to create a common understanding for how we like our project to be structured.
Any future commits should contribute to this goal.
Some ideas what might be needed to change:
- Having separate locations or packages for the library and the CLI.
- Rename the `clan_cli` package to `clan` and move the `cli` frontend into a subfolder or a separate package.
- Python Argparse or other cli related code should not exist in the `clan` python library.
- `__init__.py` should be very minimal. Only init the business logic models and resources. Note that all `__init__.py` files all the way up in the module tree are always executed as part of the python module import logic and thus should be as small as possible.
i.e. `from clan_cli.vars.generators import ...` executes both `clan_cli/__init__.py` and `clan_cli/vars/__init__.py` if any of those exist.
- `api` folder doesn't make sense since the python library `clan` is the api.
- Logic needed for the webui that performs json serialization and deserialization will be some `json-adapter` folder or package.
- Code for serializing dataclasses and typed dictionaries is needed for the persistence layer. (i.e. for read-write of inventory.json)
- The inventory-json is a backend resource, that is internal. Its logic includes merging, unmerging and partial updates with considering nix values and their priorities. Nobody should try to read or write to it directly.
Instead there will be library methods i.e. to add a `service` or to update/read/delete some information from it.
- Library functions should be carefully designed with suitable conventions for writing good api's in mind. (i.e: https://swagger.io/resources/articles/best-practices-in-api-design/)

View File

@@ -1,10 +1,12 @@
{ ... }:
{ inputs, ... }:
{
perSystem =
{
lib,
pkgs,
self',
config,
system,
...
}:
let
@@ -24,7 +26,8 @@
in
{
devShells.default = pkgs.mkShell {
packages = [
packages =
[
select-shell
pkgs.nix-unit
pkgs.tea
@@ -35,7 +38,14 @@
self'.packages.pending-reviews
# treefmt with config defined in ./flake-parts/formatting.nix
config.treefmt.build.wrapper
];
]
# bring in data-mesher for the cli which can help with things like key generation
++ (
let
data-mesher = inputs.data-mesher.packages.${system}.data-mesher or null;
in
lib.optional (data-mesher != null) data-mesher
);
shellHook = ''
echo -e "${ansiEscapes.green}switch to another dev-shell using: select-shell${ansiEscapes.reset}"
export PRJ_ROOT=$(git rev-parse --show-toplevel)

View File

@@ -21,14 +21,14 @@ Let's get your development environment up and running:
curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install
```
2. **Install direnv**:
1. **Install direnv**:
- To automatically setup a devshell on entering the directory
```bash
nix profile install nixpkgs#nix-direnv-flakes nixpkgs#direnv
```
3. **Add direnv to your shell**:
1. **Add direnv to your shell**:
- Direnv needs to [hook into your shell](https://direnv.net/docs/hook.html) to work.
You can do this by executing following command. The example below will setup direnv for `zsh` and `bash`
@@ -37,10 +37,10 @@ Let's get your development environment up and running:
echo 'eval "$(direnv hook zsh)"' >> ~/.zshrc && echo 'eval "$(direnv hook bash)"' >> ~/.bashrc && eval "$SHELL"
```
3. **Allow the devshell**
1. **Allow the devshell**
- Go to `clan-core/pkgs/clan-cli` and do a `direnv allow` to setup the necessary development environment to execute the `clan` command
4. **Create a Gitea Account**:
1. **Create a Gitea Account**:
- Register an account on https://git.clan.lol
- Fork the [clan-core](https://git.clan.lol/clan/clan-core) repository
- Clone the repository and navigate to it
@@ -48,30 +48,7 @@ Let's get your development environment up and running:
```bash
git remote add upstream gitea@git.clan.lol:clan/clan-core.git
```
5. **Create an access token**:
- Log in to Gitea.
- Go to your account settings.
- Navigate to the Applications section.
- Click Generate New Token.
- Name your token and select all available scopes.
- Generate the token and copy it for later use.
- Your access token is now ready to use with all permissions.
5. **Register Your Gitea Account Locally**:
- Execute the following command to add your Gitea account locally:
```bash
tea login add
```
- Fill out the prompt as follows:
- URL of Gitea instance: `https://git.clan.lol`
- Name of new Login [git.clan.lol]:
- Do you have an access token? Yes
- Token: <yourtoken>
- Set Optional settings: No
6. **Allow .envrc**:
1. **Allow .envrc**:
- When you enter the directory, you'll receive an error message like this:
```bash
@@ -79,7 +56,7 @@ Let's get your development environment up and running:
```
- Execute `direnv allow` to automatically execute the shell script `.envrc` when entering the directory.
7. **(Optional) Install Git Hooks**:
1. **(Optional) Install Git Hooks**:
- To syntax check your code you can run:
```bash
nix fmt
@@ -89,15 +66,9 @@ Let's get your development environment up and running:
./scripts/pre-commit
```
8. **Open a Pull Request**:
- To automatically open up a pull request you can use our tool called:
```
merge-after-ci --reviewers Mic92 Lassulus Qubasa
```
## Related Projects
- **Data Mesher**: [dm](https://git.clan.lol/clan/dm)
- **Data Mesher**: [data-mesher](https://git.clan.lol/clan/data-mesher)
- **Nixos Facter**: [nixos-facter](https://github.com/nix-community/nixos-facter)
- **Nixos Anywhere**: [nixos-anywhere](https://github.com/nix-community/nixos-anywhere)
- **Disko**: [disko](https://github.com/nix-community/disko)
@@ -128,8 +99,12 @@ run(
),
RunOpts(log=Log.BOTH, prefix=machine.name, needs_user_terminal=True),
)
```
The <path_to_local_src> doesn't need to be a local path, it can be any valid [flakeref](https://nix.dev/manual/nix/2.26/command-ref/new-cli/nix3-flake.html#flake-references).
And thus can point to test already opened PRs for example.
# Standards
- Every new module name should be in kebab-case.

View File

@@ -48,6 +48,7 @@ nav:
- Add Machines: getting-started/configure.md
- Secrets & Facts: getting-started/secrets.md
- Deploy Machine: getting-started/deploy.md
- Continuous Integration: getting-started/check.md
- Guides:
- Disk Encryption: getting-started/disk-encryption.md
- Mesh VPN: getting-started/mesh-vpn.md
@@ -61,8 +62,10 @@ nav:
- Authoring:
- Modules: clanmodules/index.md
- Disk Templates: manual/disk-templates.md
- Contribute: manual/contribute.md
- Debugging: manual/debugging.md
- Contributing:
- Contribute: contributing/contribute.md
- Debugging: contributing/debugging.md
- Testing: contributing/testing.md
- Repo Layout: manual/repo-layout.md
- Migrate existing Flakes: manual/migration-guide.md
# - Concepts:
@@ -76,6 +79,7 @@ nav:
# This is the module overview and should stay at the top
- reference/clanModules/admin.md
- reference/clanModules/borgbackup-static.md
- reference/clanModules/data-mesher.md
- reference/clanModules/borgbackup.md
- reference/clanModules/deltachat.md
- reference/clanModules/disk-id.md
@@ -107,6 +111,7 @@ nav:
- reference/clanModules/thelounge.md
- reference/clanModules/trusted-nix-caches.md
- reference/clanModules/user-password.md
- reference/clanModules/auto-upgrade.md
- reference/clanModules/vaultwarden.md
- reference/clanModules/xfce.md
- reference/clanModules/zerotier-static-peers.md

View File

@@ -13,8 +13,8 @@
# { clanCore = «derivation JSON»; clanModules = { ${name} = «derivation JSON» }; }
jsonDocs = pkgs.callPackage ./get-module-docs.nix {
inherit (self) clanModules;
evalClanModules = self.lib.evalClanModules;
modulesRolesOptions = self.lib.evalClanModulesWithRoles self.clanModules;
evalClanModules = self.lib.evalClan.evalClanModules;
modulesRolesOptions = self.lib.evalClan.evalClanModulesWithRoles self.clanModules;
};
# Frontmatter for clanModules

View File

@@ -585,7 +585,7 @@ Each attribute is documented below
```nix
buildClan {
directory = self;
self = self;
machines = {
jon = { };
sara = { };

View File

@@ -51,6 +51,20 @@ wintux
If you're using VSCode, it has a handy feature that makes paths to source code files clickable in the integrated terminal. Combined with the previously mentioned techniques, this allows you to open a Clan in VSCode, execute a command like `clan machines list --debug`, and receive a printed path to the code that initiates the subprocess. With the `Ctrl` key (or `Cmd` on macOS) and a mouse click, you can jump directly to the corresponding line in the code file and add a `breakpoint()` function to it, to inspect the internal state.
## Finding Print Messages
To identify where a specific print message comes from, you can enable a helpful feature. Simply set the environment variable `export TRACE_PRINT=1`. When you run commands with `--debug` mode, each print message will include information about its source location.
If you need more details, you can expand the stack trace information that appears with each print by setting the environment variable `export TRACE_DEPTH=3`.
## Analyzing Performance
To understand what's causing slow performance, set the environment variable `export CLAN_CLI_PERF=1`. When you complete a clan command, you'll see a summary of various performance metrics, helping you identify what's taking up time.
## See all possible packages and tests
To quickly show all possible packages and tests execute:

View File

@@ -0,0 +1,316 @@
# Testing your contributions
Each feature added to clan should be tested extensively via automated tests.
This document covers different methods of automated testing, including creating, running and debugging such tests.
In order to test the behavior of clan, different testing frameworks are used depending on the concern:
- NixOS VM tests: for high level integration
- NixOS container tests: for high level integration
- Python tests via pytest: for unit tests and integration tests
- Nix eval tests: for nix functions, libraries, modules, etc.
## NixOS VM Tests
The [NixOS VM Testing Framework](https://nixos.org/manual/nixos/stable/index.html#sec-nixos-tests) is used to create high level integration tests, by running one or more VMs generated from a specified config. Commands can be executed on the booted machine(s) to verify a deployment of a service works as expected. All machines within a test are connected by a virtual network. Internet access is not available.
### When to use VM tests
- testing that a service defined through a clan module works as expected after deployment
- testing clan-cli subcommands which require accessing a remote machine
### When not to use VM tests
NixOS VM Tests are slow and expensive. They should only be used for testing high level integration of components.
VM tests should be avoided wherever it is possible to implement a cheaper unit test instead.
- testing detailed behavior of a certain clan-cli command -> use unit testing via pytest instead
- regression testing -> add a unit test
### Finding examples for VM tests
Existing nixos vm tests in clan-core can be found by using ripgrep:
```shellSession
rg "import.*/lib/test-base.nix"
```
### Locating definitions of failing VM tests
All nixos vm tests in clan are exported as individual flake outputs under `checks.x86_64-linux.{test-attr-name}`.
If a test fails in CI:
- look for the job name of the test near the top if the CI Job page, like, for example `gitea:clan/clan-core#checks.x86_64-linux.borgbackup/1242`
- in this case `checks.x86_64-linux.borgbackup` is the attribute path
- note the last element of that attribute path, in this case `borgbackup`
- search for the attribute name inside the `/checks` directory via ripgrep
example: locating the vm test named `borgbackup`:
```shellSession
$ rg "borgbackup =" ./checks
./checks/flake-module.nix
41: borgbackup = import ./borgbackup nixosTestArgs;
```
-> the location of that test is `/checks/flake-module.nix` line `41`.
### Adding vm tests
Create a nixos test module under `/checks/{name}/default.nix` and import it in `/checks/flake-module.nix`.
### Running VM tests
```shellSession
nix build .#checks.x86_64-linux.{test-attr-name}
```
(replace `{test-attr-name}` with the name of the test)
### Debugging VM tests
The following techniques can be used to debug a VM test:
#### Print Statements
Locate the definition (see above) and add print statements, like, for example `print(client.succeed("systemctl --failed"))`, then re-run the test via `nix build` (see above)
#### Interactive Shell
- Execute the vm test outside the nix Sandbox via the following command:
`nix run .#checks.x86_64-linux.{test-attr-name}.driver -- --interactive`
- Then run the commands in the machines manually, like for example:
```python3
start_all()
machine1.succeed("echo hello")
```
#### Breakpoints
To get an interactive shell at a specific line in the VM test script, add a `breakpoint()` call before the line to debug, then run the test outside of the sandbox via:
`nix run .#checks.x86_64-linux.{test-attr-name}.driver`
## NixOS Container Tests
Those are very similar to NixOS VM tests, as in they run virtualized nixos machines, but instead of using VMs, they use containers which are much cheaper to launch.
As of now the container test driver is a downstream development in clan-core.
Basically everything stated under the NixOS VM tests sections applies here, except some limitations.
Limitations:
- does not yet support networking
- supports only one machine as of now
### Where to find examples for NixOS container tests
Existing nixos container tests in clan-core can be found by using ripgrep:
```shellSession
rg "import.*/lib/container-test.nix"
```
## Python tests via pytest
Since the clan cli is written in python, the `pytest` framework is used to define unit tests and integration tests via python
Due to superior efficiency,
### When to use python tests
- writing unit tests for python functions and modules, or bugfixes of such
- all integrations tests that do not require building or running a nixos machine
- impure integrations tests that require internet access (very rare, try to avoid)
### When not to use python tests
- integrations tests that require building or running a nixos machine (use NixOS VM or container tests instead)
- testing behavior of a nix function or library (use nix eval tests instead)
### Finding examples of python tests
Existing python tests in clan-core can be found by using ripgrep:
```shellSession
rg "import pytest"
```
### Locating definitions of failing python tests
If any python test fails in the CI pipeline, an error message like this can be found at the end of the log:
```
...
FAILED tests/test_machines_cli.py::test_machine_delete - clan_cli.errors.ClanError: Template 'new-machine' not in 'inputs.clan-core
...
```
In this case the test is defined in the file `/tests/test_machines_cli.py` via the test function `test_machine_delete`.
### Adding python tests
If a specific python module is tested, the test should be located near the tested module in a subdirectory called `./tests`
If the test is not clearly related to a specific module, put it in the top-level `./tests` directory of the tested python package. For `clan-cli` this would be `/pkgs/clan-cli/clan_cli/tests`.
All filenames must be prefixed with `test_` and test functions prefixed with `test_` for pytest to discover them.
### Running python tests
#### Running all python tests
To run all python tests which are executed in the CI pipeline locally, use this `nix build` command
```shellSession
nix build .#checks.x86_64-linux.clan-pytest-{with,without}-core
```
#### Running a specific python test
To run a specific python test outside the nix sandbox
1. Enter the development environment of the python package, by either:
- Having direnv enabled and entering the directory of the package (eg. `/pkgs/clan-cli`)
- Or using the command `select-shell {package}` in the top-level dev shell of clan-core, (eg. `switch-shell clan-cli`)
2. Execute the test via pytest using issuing
`pytest ./path/to/test_file.py:test_function_name -s -n0`
The flags `-sn0` are useful to forwards all stdout/stderr output to the terminal and be able to debug interactively via `breakpoint()`.
### Debugging python tests
To debug a specific python test, find its definition (see above) and make sure to enter the correct dev environment for that python package.
Modify the test and add `breakpoint()` statements to it.
Execute the test using the flags `-sn0` in order to get an interactive shell at the breakpoint:
```shelSession
pytest ./path/to/test_file.py:test_function_name -sn0
```
## Nix Eval Tests
### When to use nix eval tests
Nix eval tests are good for testing any nix logic, including
- nix functions
- nix libraries
- modules for the nixos module system
When not to use
- tests that require building nix derivations (except some very cheap ones)
- tests that require running programs written in other languages
- tests that require building or running nixos machines
### Finding examples of nix eval tests
Existing nix eval tests can be found via this ripgrep command:
```shellSession
rg "nix-unit --eval-store"
```
### Locating definitions of failing nix eval tests
Failing nix eval tests look like this:
```shellSession
> ✅ test_attrsOf_attrsOf_submodule
> ✅ test_attrsOf_submodule
> ❌ test_default
> /build/nix-8-2/expected.nix --- Nix
> 1 { foo = { bar = { __prio = 1500; }; } 1 { foo = { bar = { __prio = 1501; }; }
> . ; } . ; }
>
>
> ✅ test_no_default
> ✅ test_submodule
> ✅ test_submoduleWith
> ✅ test_submodule_with_merging
>
> 😢 6/7 successful
> error: Tests failed
```
To locate the definition, find the flake attribute name of the failing test near the top of the CI Job page, like for example `gitea:clan/clan-core#checks.x86_64-linux.lib-values-eval/1242`.
In this case `lib-values-eval` is the attribute we are looking for.
Find the attribute via ripgrep:
```shellSession
$ rg "lib-values-eval ="
lib/values/flake-module.nix
21: lib-values-eval = pkgs.runCommand "tests" { nativeBuildInputs = [ pkgs.nix-unit ]; } ''
grmpf@grmpf-nix ~/p/c/clan-core (test-docs)>
```
In this case the test is defined in the file `lib/values/flake-module.nix` line 21
### Adding nix eval tests
In clan core, the following pattern is usually followed:
- tests are put in a `test.nix` file
- a CI Job is exposed via a `flake-module.nix`
- that `flake-module.nix` is imported via the `flake.nix` at the root of the project
For example see `/lib/values/{test.nix,flake-module.nix}`.
### Running nix eval tests
Since all nix eval tests are exposed via the flake outputs, they can be ran via `nix build`:
```shellSession
nix build .#checks.x86_64-linux.{test-attr-name}
```
For quicker iteration times, instead of `nix build` use the `nix-unit` command available in the dev environment.
Example:
```shellSession
nix-unit --flake .#legacyPackages.x86_64-linux.{test-attr-name}
```
### Debugging nix eval tests
Follow the instructions above to find the definition of the test, then use one of the following techniques:
#### Print debugging
Add `lib.trace` or `lib.traceVal` statements in order to print some variables during evaluation
#### Nix repl
Use `nix repl` to evaluate to inspec the test.
Each test consists opf an `expr` (expression) and an `expected` field. `nix-unit` simply checks if `expr == expected` and prints the diff if that's not the case.
`nix repl` can be used to inspect `expr` manually, or any other variables that you choose to expose.
Example:
```shellSession
$ nix repl
Nix 2.25.5
Type :? for help.
nix-repl> tests = import ./lib/values/test.nix {}
nix-repl> tests
{
test_attrsOf_attrsOf_submodule = { ... };
test_attrsOf_submodule = { ... };
test_default = { ... };
test_no_default = { ... };
test_submodule = { ... };
test_submoduleWith = { ... };
test_submodule_with_merging = { ... };
}
nix-repl> tests.test_default.expr
{
foo = { ... };
}
```

View File

@@ -143,3 +143,25 @@ Ensure the path to the public key is correct.
```bash
clan backups create mymachine
```
- **Restoring Backups:** To restore a backup that has been listed by the list command (NAME):
```bash
clan backups restore [MACHINE] [PROVIDER] [NAME]
```
Example (Restoring a machine called `client` with the backup provider `borgbackup`):
```bash
clan backups restore client borgbackup [NAME]
```
The `backups` command is service aware and allows optional specification of the `--service` flag.
To only restore the service called `zerotier` on a machine called `controller` through the backup provider `borgbackup` use the following command:
```bash
clan backups restore client borgbackup [NAME] --service zerotier
```

View File

@@ -0,0 +1,28 @@
### Generate Facts and Vars
Typically, this step is handled automatically when a machine is deployed. However, to enable the use of `nix flake check` with your configuration, it must be completed manually beforehand.
Currently, generating all the necessary facts requires two separate commands. This is due to the coexistence of two parallel secret management solutions:
the newer, recommended version (`clan vars`) and the older version (`clan facts`) that we are slowly phasing out.
To generate both facts and vars, execute the following commands:
```sh
clan facts generate && clan vars generate
```
### Check Configuration
Validate your configuration by running:
```bash
nix flake check
```
This command helps ensure that your system configuration is correct and free from errors.
!!! Tip
You can integrate this step into your [Continuous Integration](https://en.wikipedia.org/wiki/Continuous_integration) workflow to ensure that only valid Nix configurations are merged into your codebase.

View File

@@ -3,7 +3,6 @@ Managing machine configurations can be done in the following ways:
- writing `nix` expressions in a `flake.nix` file,
- placing `autoincluded` files into your machine directory,
- configuring everything in a simple UI (upcoming).
Clan currently offers the following methods to configure machines:
@@ -79,9 +78,14 @@ Adding or configuring a new machine requires two simple steps:
└─nvme0n1p3 nvme-eui.e8238fa6bf530001001b448b4aec2929-part3 swap 16.8G
```
1. Edit the following fields inside the `./machines/jon/configuration.nix` and/or `./machines/sara/configuration.nix`
!!! Warning
Make sure to copy the `ID-LINK` from toplevel disk device like `nvme0n1` or `sda` instead of `nvme0n1p1` or `sda1`
```nix title="./machines/<machine>/configuration.nix" hl_lines="13 18 23 27"
2. Edit the following fields inside the `./machines/jon/configuration.nix` and/or `./machines/sara/configuration.nix`
<!-- Note: Use "jon" instead of "<machine>" as "<" is not supported in title tag -->
```nix title="./machines/jon/configuration.nix" hl_lines="13 18 22 26"
{
imports = [
./hardware-configuration.nix
@@ -94,16 +98,15 @@ Adding or configuring a new machine requires two simple steps:
];
# Put your username here for login
users.users.user.username = "__YOUR_USERNAME__";
users.users.user.name = "__YOUR_USERNAME__";
# Set this for clan commands use ssh i.e. `clan machines update`
# Set this for clan commands that use ssh
# If you change the hostname, you need to update this line to root@<new-hostname>
# This only works however if you have avahi running on your admin machine else use IP
clan.core.networking.targetHost = "root@__IP__";
# You can get your disk id by running the following command on the installer:
# Replace <IP> with the IP of the installer printed on the screen or by running the `ip addr` command.
# ssh root@<IP> lsblk --output NAME,ID-LINK,FSTYPE,SIZE,MOUNTPOINT
# Replace this __CHANGE_ME__ with the result of the lsblk command from step 1.
disko.devices.disk.main.device = "/dev/disk/by-id/__CHANGE_ME__";
# IMPORTANT! Add your SSH key here
@@ -114,80 +117,32 @@ Adding or configuring a new machine requires two simple steps:
}
```
You can also create additional machines using the `clan machines create` command:
```
$ clan machines create --help
usage: clan [-h] [SUBCOMMAND] machines create [-h] [--tags TAGS [TAGS ...]] [--template-name TEMPLATE_NAME]
[--target-host TARGET_HOST] [--debug] [--option name value] [--flake PATH]
machine_name
positional arguments:
machine_name The name of the machine to create
options:
-h, --help show this help message and exit
--tags TAGS [TAGS ...]
Tags to associate with the machine. Can be used to assign multiple machines to services.
--template-name TEMPLATE_NAME
The name of the template machine to import
--target-host TARGET_HOST
Address of the machine to install and update, in the format of user@host:1234
--debug Enable debug logging
--option name value Nix option to set
--flake PATH path to the flake where the clan resides in, can be a remote flake or local, can be set through
the [CLAN_DIR] environment variable
```
!!! Info "Replace `__YOUR_USERNAME__` with the ip of your machine, if you use avahi you can also use your hostname"
!!! Info "Replace `__IP__` with the ip of your machine, if you use avahi you can also use your hostname"
!!! Info "Replace `__CHANGE_ME__` with the appropriate identifier, such as `nvme-eui.e8238fa6bf530001001b448b4aec2929`"
!!! Info "Replace `__CHANGE_ME__` with the appropriate `ID-LINK` identifier, such as `nvme-eui.e8238fa6bf530001001b448b4aec2929`"
!!! Info "Replace `__YOUR_SSH_KEY__` with your personal key, like `ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILoMI0NC5eT9pHlQExrvR5ASV3iW9+BXwhfchq0smXUJ jon@jon-desktop`"
These steps will allow you to update your machine later.
### Step 2: Detect Drivers
You can also create additional machines using the cli:
Generate the `hardware-configuration.nix` file for your machine by executing the following command:
```bash
clan machines update-hardware-config [MACHINE_NAME] [HOSTNAME]
```
replace `[MACHINE_NAME]` with the name of the machine i.e. `jon` and `[HOSTNAME]` with the `ip_address` or `hostname` of the machine within the network. i.e. `<IP>`
!!! Example
```bash
clan machines update-hardware-config jon
```
$ clan machines create <machinename>
```
This command connects to the ip configured in the previous step, runs [nixos-facter](https://github.com/nix-community/nixos-facter)
to detect hardware configurations (excluding filesystems), and writes them to `machines/jon/facter.json`.
### Step 3: Custom Disk Formatting
### Step 2: Custom Disk Formatting
In `./modules/disko.nix`, a simple `ext4` disk partitioning scheme is defined for the Disko module. For more complex disk partitioning setups,
refer to the [Disko templates](https://github.com/nix-community/disko-templates) or [Disko examples](https://github.com/nix-community/disko/tree/master/example).
### Step 4: Custom Configuration
### (Optional): Renaming Machine
Modify `./machines/jon/configuration.nix` to personalize the system settings according to your requirements.
If you wish to name your machine to something else, do the following steps:
For renaming jon to your own machine name, you can use the following command:
```
mv ./machines/jon/configuration.nix ./machines/newname/configuration.nix
git mv ./machines/jon ./machines/newname
```
Than rename `jon` to your preferred name in `machines` in `flake.nix` as well as the import line:
```diff
- imports = [ ./machines/jon/configuration.nix ];
+ imports = [ ./machines/__NEW_NAME__/configuration.nix ];
```
!!! Info "Replace `__NEW_NAME__` with the name of the machine"
Note that our clan lives inside a git repository.
Only files that have been added with `git add` are recognized by `nix`.
So for every file that you add or rename you also need to run:
@@ -196,14 +151,11 @@ So for every file that you add or rename you also need to run:
git add ./path/to/my/file
```
For renaming jon to your own machine name, you can use the following command:
```
git mv ./machines/jon ./machines/newname
```
### (Optional): Removing a Machine
If you only want to setup a single machine at this point, you can delete `sara` from `flake.nix` as well as from the machines directory:
```
git rm ./machines/sara
git rm -rf ./machines/sara
```

View File

@@ -1,13 +1,7 @@
# Deploy your Clan
Integrating a new machine into your Clan environment is an easy yet flexible process, allowing for a straight forward management of multiple NixOS configurations.
Now that you have created a new machine, we will walk through how to install it.
We'll walk you through adding a new computer to your Clan.
## Installing a New Machine
Clan CLI, in conjunction with [nixos-anywhere](https://github.com/nix-community/nixos-anywhere), provides a seamless method for installing NixOS on various machines.
This process involves preparing a suitable hardware and disk partitioning configuration and ensuring the target machine is accessible via SSH.
### Step 0. Prerequisites
@@ -24,7 +18,7 @@ This process involves preparing a suitable hardware and disk partitioning config
2. Boot the target machine and connect it to a network that makes it reachable from your setup computer.
=== "**Remote Machines**"
=== "**Cloud VMs**"
- [x] **Two Computers**: You need one computer that you're getting ready (we'll call this the Target Computer) and another one to set it up from (we'll call this the Setup Computer). Make sure both can talk to each other over the network using SSH.
- [x] **Machine configuration**: See our basic [configuration guide](./configure.md)
@@ -107,32 +101,27 @@ This process involves preparing a suitable hardware and disk partitioning config
For easy sharing of deployment information via QR code, we highly recommend using [KDE Connect](https://apps.kde.org/de/kdeconnect/).
There are two ways to deploy your machine:
1. **SSH with Password Authentication**
Run the following command to install using SSH:
=== "**Password Auth**"
Run the following command to login over SSH with password authentication
```bash
clan machines install [MACHINE] --target-host <IP>
clan machines install [MACHINE] --target-host <IP> --update-hardware-config nixos-facter
```
=== "**QR Code Auth**"
Using the JSON contents of the QR Code:
```terminal
clan machines install [MACHINE] --json "[JSON]" --update-hardware-config nixos-facter
```
OR using a picture containing the QR code
```terminal
clan machines install [MACHINE] --png [PATH] --update-hardware-config nixos-facter
```
2. **Scanning a QR Code for Installation Details**
You can input the information by following one of these methods:
- **Using a JSON String or File Path:**
Provide the path to a JSON string or input the string directly:
```terminal
clan machines install [MACHINE] --json [JSON]
```
- **Using an Image Containing the QR Code:**
Provide the path to an image file containing the relevant QR code:
```terminal
clan machines install [MACHINE] --png [PATH]
```
=== "**SSH access**"
=== "**Cloud VM**"
Replace `<target_host>` with the **target computers' ip address**:
```bash
clan machines install [MACHINE] --target-host <target_host>
clan machines install [MACHINE] --target-host <target_host> --update-hardware-config nixos-facter
```

View File

@@ -49,7 +49,8 @@ Replace `kernelModules` with the ethernet module loaded one on your target machi
port = 7172;
authorizedKeys = [ "<yourkey>" ];
hostKeys = [
"/var/lib/initrd-ssh-key"
"/var/lib/initrd_host_ed25519_key"
"/var/lib/initrd_host_rsa_key"
];
};
};
@@ -73,7 +74,7 @@ Before starting the installation process, ensure that the SSH public key is copi
ssh-copy-id -o PreferredAuthentications=password -o PubkeyAuthentication=no root@nixos-installer.local
```
### Step 1.5: Prepare Secret Key and Clear Disk Data
### Step 1.5: Prepare Secret Key and Partition Disks
1. Access the installer using SSH:
@@ -90,13 +91,13 @@ nano /tmp/secret.key
3. Discard the old disk partition data:
```bash
blkdiscard /dev/disk/by-id/nvme-eui.002538b931b59865
blkdiscard /dev/disk/by-id/<installdisk>
```
4. Run the `clan` machine installation with the following command:
4. Run `clan` machines install, only running kexec and disko, with the following command:
```bash
clan machines install gchq-local --target-host root@nixos-installer --yes --no-reboot
clan machines install gchq-local --target-host root@nixos-installer --phases kexec,disko
```
### Step 2: ZFS Pool Import and System Installation
@@ -107,14 +108,10 @@ clan machines install gchq-local --target-host root@nixos-installer --yes --no-r
ssh root@nixos-installer.local
```
2. Perform the following commands on the remote installation environment:
2. Run the following command on the remote installation environment:
```bash
zpool import zroot
zfs set keylocation=prompt zroot/root
zfs load-key zroot/root
zfs set mountpoint=/mnt zroot/root/nixos
mount /dev/nvme0n1p2 /mnt/boot
```
3. Disconnect from the SSH session:
@@ -123,43 +120,36 @@ mount /dev/nvme0n1p2 /mnt/boot
CTRL+D
```
4. Securely copy your local `initrd_rsa_key` to the installer's `/mnt` directory:
4. Locally generate ssh host keys. You only need to generate ones for the algorithms you're using in `authorizedKeys`.
```bash
scp ~/.ssh/initrd_rsa_key root@nixos-installer.local:/mnt/var/lib/initrd-ssh-key
ssh-keygen -q -N "" -t ed25519 -f ./initrd_host_ed25519_key
ssh-keygen -q -N "" -t rsa -b 4096 -f ./initrd_host_rsa_key
```
5. SSH back into the installer:
5. Securely copy your local initrd ssh host keys to the installer's `/mnt` directory:
```bash
ssh root@nixos-installer.local
scp ./initrd_host* root@nixos-installer.local:/mnt/var/lib/
```
6. Navigate to the `/mnt` directory, enter the `nixos-enter` environment, and then exit:
6. Install nixos to the mounted partitions
```bash
cd /mnt
nixos-enter
realpath /run/current-system
exit
clan machines install gchq-local --target-host root@nixos-installer --phases install
```
7. Run the `nixos-install` command with the appropriate system path `<SYS_PATH>`:
```bash
nixos-install --no-root-passwd --no-channel-copy --root /mnt --system <SYS_PATH>
```
8. After the installation process, unmount `/mnt/boot`, change the ZFS mountpoint, and reboot the system:
7. After the installation process, unmount `/mnt/boot`, change the ZFS mountpoints and unmount all the ZFS volumes by exporting the zpool:
```bash
umount /mnt/boot
cd /
zfs set mountpoint=/ zroot/root/nixos
reboot
zfs set -u mountpoint=/ zroot/root/nixos
zfs set -u mountpoint=/tmp zroot/root/tmp
zfs set -u mountpoint=/home zroot/root/home
zpool export zroot
```
9. Perform a hard reboot of the machine and remove the USB stick.
8. Perform a reboot of the machine and remove the USB installer.
### Step 3: Accessing the Initial Ramdisk (initrd) Environment

View File

@@ -42,7 +42,7 @@ By the end of this guide, you'll have a fresh NixOS configuration ready to push
Add the Clan CLI into your development workflow:
```bash
nix shell git+https://git.clan.lol/clan/clan-core#clan-cli
nix shell git+https://git.clan.lol/clan/clan-core#clan-cli --refresh
```
You can find reference documentation for the `clan` cli program [here](../reference/cli/index.md).
@@ -92,6 +92,21 @@ This should yield the following:
5 directories, 9 files
```
??? info "Recommended way of sourcing the `clan` cli tool"
The default template also adds the `clan` cli tool to the development shell.
Meaning you can get the exact version you need directly from the folder
you are in right now.
In the `my-clan` directory run the following command:
```
nix develop
```
That way you will have the tool available in the shell environment.
We also recommend setting up [direnv](https://direnv.net/) for your shell, for a more convenient
experience.
```bash
clan machines list
```

View File

@@ -1,11 +1,16 @@
# Create an Installer Image
# Clan Installer Image for Physical Machines
Our installer image simplifies the process of performing remote installations.
To install Clan on physical machines, you need to use our custom installer image. This is necessary for proper installation and operation.
Follow our step-by-step guide to create and transfer this image onto a bootable USB drive.
!!! note "Using a Cloud VM?"
If you're using a cloud provider's virtual machine (VM), you can skip this section and go directly to the [Configure Machines](configure.md) step. In this scenario, we automatically use [nixos-anywhere](https://github.com/nix-community/nixos-anywhere) to replace the kernel during runtime.
??? info "Why nixos-anywhere Doesn't Work on Physical Hardware?"
nixos-anywhere relies on [kexec](https://wiki.archlinux.org/title/Kexec) to replace the running kernel with our custom one. This method often has compatibility issues with real hardware, especially systems with dedicated graphics cards like laptops and servers, leading to crashes and black screens.
??? info "Reasons for a Custom Install Image"
Our custom install images are built to include essential tools like [nixos-facter](https://github.com/nix-community/nixos-facter) and support for [ZFS](https://wiki.archlinux.org/title/ZFS). They're also optimized to run on systems with as little as 1 GB of RAM, ensuring efficient performance even on lower-end hardware.
!!! info
If you already have a NixOS machine you can ssh into (in the cloud for example) you can skip this chapter and go directly to [Configure Machines](configure.md).
### Step 0. Prerequisites
@@ -40,9 +45,9 @@ Follow our step-by-step guide to create and transfer this image onto a bootable
sudo umount /dev/sdb1
```
=== "**Linux OS**"
### Step 2. Flash Custom Installer
### Step 2. Create a Custom Installer
Using clan flash enables the inclusion of ssh public keys and wifi access points.
Using clan flash enables the inclusion of ssh public keys into the image.
It also allows to set language and keymap in the installer image.
```bash

View File

@@ -18,14 +18,53 @@ Clan
If you select multiple network technologies at the same time. e.g. (zerotier + yggdrassil)
You must choose one of them as primary network and the machines are always connected via the primary network.
## 1. Set-Up the VPN Controller
This guide shows you how to configure `zerotier` either through `NixOS Options` directly, or Clan's `Inventory` System.
The VPN controller is initially essential for providing configuration to new
peers. Once addresses are allocated, the controller's continuous operation is not essential.
1. **Designate a Machine**: Label a machine as the VPN controller in the clan,
=== "**Inventory**"
## 1. Choose the Controller
The controller is the initial entrypoint for new machines into the vpn.
It will sign the id's of new machines.
Once id's are signed, the controller's continuous operation is not essential.
A good controller choice is nevertheless a machine that can always be reached for updates - so that new peers can be added to the network.
For the purpose of this guide we have two machines:
- The `controller` machine, which will be the zerotier controller.
- The `new_machine` machine, which is the machine we want to add to the vpn network.
## 2. Configure the Inventory
```nix
clan.inventory = {
services.zerotier.default = {
roles.controller.machines = [
"controller"
];
roles.peer.machines = [
"new_machine"
];
};
};
```
## 3. Apply the Configuration
Update the `controller` machine:
```bash
clan machines update controller
```
=== "**NixOS Options**"
## 1. Set-Up the VPN Controller
The VPN controller is initially essential for providing configuration to new
peers. Once addresses are allocated, the controller's continuous operation is not essential.
1. **Designate a Machine**: Label a machine as the VPN controller in the clan,
referred to as `<CONTROLLER>` henceforth in this guide.
2. **Add Configuration**: Input the following configuration to the NixOS
2. **Add Configuration**: Input the following configuration to the NixOS
configuration of the controller machine:
```nix
clan.core.networking.zerotier.controller = {
@@ -33,24 +72,24 @@ peers. Once addresses are allocated, the controller's continuous operation is no
public = true;
};
```
3. **Update the Controller Machine**: Execute the following:
3. **Update the Controller Machine**: Execute the following:
```bash
clan machines update <CONTROLLER>
```
Your machine is now operational as the VPN controller.
## 2. Add Machines to the VPN
## 2. Add Machines to the VPN
To introduce a new machine to the VPN, adhere to the following steps:
To introduce a new machine to the VPN, adhere to the following steps:
1. **Update Configuration**: On the new machine, incorporate the following to its
1. **Update Configuration**: On the new machine, incorporate the following to its
configuration, substituting `<CONTROLLER>` with the controller machine name:
```nix
{ config, ... }: {
clan.core.networking.zerotier.networkId = builtins.readFile (config.clan.core.settings.directory + "/machines/<CONTROLLER>/facts/zerotier-network-id");
}
```
1. **Update the New Machine**: Execute:
1. **Update the New Machine**: Execute:
```bash
$ clan machines update <NEW_MACHINE>
```
@@ -93,7 +132,7 @@ To introduce a new machine to the VPN, adhere to the following steps:
```
Substitute `<ID>` with the ZeroTier ID obtained previously.
2. **Verify Connection**: On the `new_machine`, re-execute:
2. **Verify Connection**: On the `new_machine`, re-execute:
```bash
$ sudo zerotier-cli info
```

View File

@@ -1,7 +1,8 @@
Clan enables encryption of secrets (such as passwords & keys) ensuring security and ease-of-use among users.
Clan utilizes the [sops](https://github.com/getsops/sops) format and integrates with [sops-nix](https://github.com/Mic92/sops-nix) on NixOS machines.
By default Clan utilizes the [sops](https://github.com/getsops/sops) format and integrates with [sops-nix](https://github.com/Mic92/sops-nix) on NixOS machines.
Clan can also be configured to be used with other secret store [backends](https://docs.clan.lol/reference/clan-core/vars/#clan.core.vars.settings.secretStore).
This guide will walk you through:
@@ -39,7 +40,7 @@ Also add your age public key to the repository with 'clan secrets users add YOUR
### Add Your Public Key
```bash
clan secrets users add $USER <your_public_key>
clan secrets users add $USER --age-key <your_public_key>
```
It's best to choose the same username as on your Setup/Admin Machine that you use to control the deployment with.
@@ -53,33 +54,3 @@ sops/
└── key.json
```
If you followed the quickstart tutorial all necessary secrets are initialized at this point.
### Generate Facts and Vars
Typically, this step is handled automatically when a machine is deployed. However, to enable the use of `nix flake check` with your configuration, it must be completed manually beforehand.
Currently, generating all the necessary facts requires two separate commands. This is due to the coexistence of two parallel secret management solutions: the older, stable version (`clan secrets` and `clan facts`) and the newer, experimental version (`clan vars`).
To generate both facts and vars, execute the following commands:
```sh
clan facts generate && clan vars generate
```
### Check Configuration
Validate your configuration by running:
```bash
nix flake check
```
This command helps ensure that your system configuration is correct and free from errors.
!!! Tip
You can integrate this step into your [Continuous Integration](https://en.wikipedia.org/wiki/Continuous_integration) workflow to ensure that only valid Nix configurations are merged into your codebase.

View File

@@ -61,9 +61,9 @@ hide:
---
Use clan with [https://flake-parts.dev]()
Use clan with [https://flake.parts/]()
- [Contribute](./manual/contribute.md)
- [Contribute](./contributing/contribute.md)
---
@@ -73,7 +73,7 @@ hide:
## API Reference
**Auto generated API Documentation**
**Reference API Documentation**
<div class="grid cards" markdown>

View File

@@ -81,7 +81,7 @@ For the provide flake example, your flake should now look like this:
outputs = { self, nixpkgs, ... }:
let
clan = clan-core.lib.buildClan {
directory = self; # this needs to point at the repository root
self = self; # this needs to point at the repository root
specialArgs = {};
inventory.meta.name = "NEEDS_TO_BE_UNIQUE"; # TODO: Changeme

View File

@@ -18,8 +18,3 @@ $ tree -L 1
├── templates # Template files for creating a new Clan
└── vars
```
## Getting Started with Infrastructure
To dive into infrastructure, check out our clan infra repo: [clan-infra](https://git.clan.lol/clan/clan-infra). Please provide us with your public SOPS key so we can add you as an admin.

82
flake.lock generated
View File

@@ -1,5 +1,34 @@
{
"nodes": {
"data-mesher": {
"inputs": {
"flake-parts": [
"flake-parts"
],
"nixpkgs": [
"nixpkgs"
],
"systems": [
"systems"
],
"treefmt-nix": [
"treefmt-nix"
]
},
"locked": {
"lastModified": 1743379277,
"narHash": "sha256-4BNv+I6hksqZeRCrEHcQygK0MV1acjA8+L2TtA11H3c=",
"ref": "refs/heads/main",
"rev": "bf8c5448d826e047b842d6f2ac0fc698e976dda5",
"revCount": 375,
"type": "git",
"url": "https://git.clan.lol/clan/data-mesher"
},
"original": {
"type": "git",
"url": "https://git.clan.lol/clan/data-mesher"
}
},
"disko": {
"inputs": {
"nixpkgs": [
@@ -7,11 +36,11 @@
]
},
"locked": {
"lastModified": 1738148035,
"narHash": "sha256-KYOATYEwaKysL3HdHdS5kbQMXvzS4iPJzJrML+3TKAo=",
"lastModified": 1741786315,
"narHash": "sha256-VT65AE2syHVj6v/DGB496bqBnu1PXrrzwlw07/Zpllc=",
"owner": "nix-community",
"repo": "disko",
"rev": "18d0a984cc2bc82cf61df19523a34ad463aa7f54",
"rev": "0d8c6ad4a43906d14abd5c60e0ffe7b587b213de",
"type": "github"
},
"original": {
@@ -27,11 +56,11 @@
]
},
"locked": {
"lastModified": 1738453229,
"narHash": "sha256-7H9XgNiGLKN1G1CgRh0vUL4AheZSYzPm+zmZ7vxbJdo=",
"lastModified": 1741352980,
"narHash": "sha256-+u2UunDA4Cl5Fci3m7S643HzKmIDAe+fiXrLqYsR2fs=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "32ea77a06711b758da0ad9bd6a844c5740a87abd",
"rev": "f4330d22f1c5d2ba72d3d22df5597d123fdb60a9",
"type": "github"
},
"original": {
@@ -42,11 +71,11 @@
},
"nixos-facter-modules": {
"locked": {
"lastModified": 1736931726,
"narHash": "sha256-aY55yiifyo1XPPpbpH0kWlV1g2dNGBlx6622b7OK8ks=",
"lastModified": 1738752252,
"narHash": "sha256-/nA3tDdp/2g0FBy8966ppC2WDoyXtUWaHkZWL+N3ZKc=",
"owner": "numtide",
"repo": "nixos-facter-modules",
"rev": "fa11d87b61b2163efbb9aed7b7a5ae0299e5ab9c",
"rev": "60f8b8f3f99667de6a493a44375e5506bf0c48b1",
"type": "github"
},
"original": {
@@ -57,22 +86,20 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1738422722,
"narHash": "sha256-Q4vhtbLYWBUnjWD4iQb003Lt+N5PuURDad1BngGKdUs=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "102a39bfee444533e6b4e8611d7e92aa39b7bec1",
"type": "github"
"lastModified": 315532800,
"narHash": "sha256-Ls4VPCGSQrm6k3FCokyonfX/sgIdZc8f5ZzqEdukBFA=",
"rev": "eb0e0f21f15c559d2ac7633dc81d079d1caf5f5f",
"type": "tarball",
"url": "https://releases.nixos.org/nixpkgs/nixpkgs-25.05pre776128.eb0e0f21f15c/nixexprs.tar.xz"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
"type": "tarball",
"url": "https://nixos.org/channels/nixpkgs-unstable/nixexprs.tar.xz"
}
},
"root": {
"inputs": {
"data-mesher": "data-mesher",
"disko": "disko",
"flake-parts": "flake-parts",
"nixos-facter-modules": "nixos-facter-modules",
@@ -89,16 +116,15 @@
]
},
"locked": {
"lastModified": 1736953253,
"narHash": "sha256-shJxzy7qypjq9hpETQ3gJsBZXO5E3KR0INca/xwiVp4=",
"owner": "pinpox",
"lastModified": 1743305778,
"narHash": "sha256-Ux/UohNtnM5mn9SFjaHp6IZe2aAnUCzklMluNtV6zFo=",
"owner": "Mic92",
"repo": "sops-nix",
"rev": "a7c6e64401b6dde13c0de90230cb64087c9d9693",
"rev": "8e873886bbfc32163fe027b8676c75637b7da114",
"type": "github"
},
"original": {
"owner": "pinpox",
"ref": "lazy-assertions",
"owner": "Mic92",
"repo": "sops-nix",
"type": "github"
}
@@ -125,11 +151,11 @@
]
},
"locked": {
"lastModified": 1738070913,
"narHash": "sha256-j6jC12vCFsTGDmY2u1H12lMr62fnclNjuCtAdF1a4Nk=",
"lastModified": 1743081648,
"narHash": "sha256-WRAylyYptt6OX5eCEBWyTwOEqEtD6zt33rlUkr6u3cE=",
"owner": "numtide",
"repo": "treefmt-nix",
"rev": "bebf27d00f7d10ba75332a0541ac43676985dea3",
"rev": "29a3d7b768c70addce17af0869f6e2bd8f5be4b7",
"type": "github"
},
"original": {

View File

@@ -2,7 +2,7 @@
description = "clan.lol base operating system";
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
nixpkgs.url = "https://nixos.org/channels/nixpkgs-unstable/nixexprs.tar.xz";
flake-parts.url = "github:hercules-ci/flake-parts";
flake-parts.inputs.nixpkgs-lib.follows = "nixpkgs";
@@ -12,13 +12,23 @@
nixos-facter-modules.url = "github:numtide/nixos-facter-modules";
sops-nix.url = "github:pinpox/sops-nix/lazy-assertions";
sops-nix.url = "github:Mic92/sops-nix";
sops-nix.inputs.nixpkgs.follows = "nixpkgs";
systems.url = "github:nix-systems/default";
treefmt-nix.url = "github:numtide/treefmt-nix";
treefmt-nix.inputs.nixpkgs.follows = "nixpkgs";
data-mesher = {
url = "git+https://git.clan.lol/clan/data-mesher";
inputs = {
flake-parts.follows = "flake-parts";
nixpkgs.follows = "nixpkgs";
systems.follows = "systems";
treefmt-nix.follows = "treefmt-nix";
};
};
};
outputs =
@@ -43,10 +53,6 @@
meta.name = "clan-core";
};
flake = {
clan.templates = import ./templates { };
};
systems = import systems;
imports =
# only importing existing paths allows to minimize the flake for test
@@ -57,6 +63,7 @@
./devShell.nix
./docs/nix/flake-module.nix
./flakeModules/flake-module.nix
./flakeModules/demo_iso.nix
./lib/filter-clan-core/flake-module.nix
./lib/flake-module.nix
./nixosModules/clanCore/vars/flake-module.nix

View File

@@ -27,9 +27,13 @@ in
};
options.flake = flake-parts-lib.mkSubmoduleOptions {
clan = lib.mkOption { type = types.raw; };
clanInternals = lib.mkOption { type = types.raw; };
};
config = {
flake.clan = {
inherit (config.clan.clanInternals) templates;
};
flake.clanInternals = config.clan.clanInternals;
flake.nixosConfigurations = config.clan.nixosConfigurations;
};

101
flakeModules/demo_iso.nix Normal file
View File

@@ -0,0 +1,101 @@
{ self, ... }:
let
pkgs = self.inputs.nixpkgs.legacyPackages.x86_64-linux;
demoModule = {
imports = [
"${self.clanModules.mycelium}/roles/peer.nix"
# TODO do we need this? maybe not
(
{ modulesPath, ... }:
{
imports = [ "${modulesPath}/installer/cd-dvd/iso-image.nix" ];
}
)
];
};
clan_welcome = pkgs.writeShellApplication {
name = "clan_welcome";
runtimeInputs = [
pkgs.gum
pkgs.gitMinimal
pkgs.retry
self.packages.${pkgs.system}.clan-cli
];
text = ''
set -efu
gum confirm '
Welcome to Clan, a NixOS-based operating system for the CLAN project.
This installer can be used to try out clan on your machine, for that reason we setup a cooperative environment to play and hack together :)
' || exit 1
until retry -t 5 ping -c 1 -W 1 git.clan.lol &> /dev/null; do
# TODO make this nicer
nmtui
done
if ! test -e ~/clan-core; then
# git clone https://git.clan.lol/clan/clan-core.git ~/clan-core
cp -rv ${self} clan-core
fi
cd clan-core
clan machines morph demo-template --i-will-be-fired-for-using-this
exit
'';
};
morphModule = {
imports = [
(
{ modulesPath, ... }:
{
imports = [ "${modulesPath}/image/images.nix" ];
}
)
];
image.modules.iso.isoImage.squashfsCompression = "zstd -Xcompression-level 1";
networking.networkmanager.enable = true;
services.getty.autologinUser = "root";
programs.bash.interactiveShellInit = ''
if [[ "$(tty)" =~ /dev/(tty1|hvc0|ttyS0)$ ]]; then
# workaround for https://github.com/NixOS/nixpkgs/issues/219239
systemctl restart systemd-vconsole-setup.service
reset
${clan_welcome}/bin/clan_welcome
fi
'';
};
in
{
clan.templates.machine.demo-template = {
description = "Demo machine for the CLAN project";
# path = pkgs.runCommand "demo-template" {} ''
# mkdir -p $out
# echo '{ self, ... }: { imports = [ self.nixosModules.demoModule ]; }' > $out/configuration.nix
# '';
path = ./demo_template;
};
flake.nixosModules = { inherit morphModule demoModule; };
perSystem =
{ system, lib, ... }:
{
packages =
lib.mkIf
(lib.any (x: x == system) [
"x86_64-linux"
"aarch64-linux"
])
{
demo-iso =
(self.inputs.nixpkgs.lib.nixosSystem {
modules = [
{ nixpkgs.hostPlatform = system; }
morphModule
];
}).config.system.build.images.iso;
};
};
}

View File

@@ -0,0 +1,38 @@
{ pkgs, config, ... }:
{
fileSystems."/".device = "nodev";
boot.loader.grub.device = "nodev";
clan.core.vars.settings.secretStore = "fs";
clan.core.vars.generators.mycelium = {
files."key" = { };
files."ip".secret = false;
files."pubkey".secret = false;
runtimeInputs = [
pkgs.mycelium
pkgs.coreutils
pkgs.jq
];
script = ''
timeout 5 mycelium --key-file "$out"/key || :
mycelium inspect --key-file "$out"/key --json | jq -r .publicKey > "$out"/pubkey
mycelium inspect --key-file "$out"/key --json | jq -r .address > "$out"/ip
'';
};
services.mycelium = {
enable = true;
addHostedPublicNodes = true;
openFirewall = true;
keyFile = config.clan.core.vars.generators.mycelium.files.key.path;
};
services.getty.autologinUser = "root";
programs.bash.interactiveShellInit = ''
if [[ "$(tty)" =~ /dev/(tty1|hvc0|ttyS0)$ ]]; then
# workaround for https://github.com/NixOS/nixpkgs/issues/219239
systemctl restart systemd-vconsole-setup.service
reset
your mycelium IP is: $(cat /var/lib/mycelium/ip)
fi
'';
}

View File

@@ -23,6 +23,7 @@
"*.clan-flake"
"*.code-workspace"
"*.pub"
"*.priv"
"*.typed"
"*.age"
"*.list"
@@ -37,6 +38,7 @@
# prettier messes up our mkdocs flavoured markdown
"*.md"
"checks/data-mesher/vars/*"
"checks/lib/ssh/privkey"
"checks/lib/ssh/pubkey"
"checks/matrix-synapse/synapse-registration_shared_secret"

View File

@@ -56,7 +56,7 @@
"machines": {
"test-inventory-machine": {
"config": {
"packages": ["zed-editor"]
"packages": ["hello"]
},
"extraModules": []
}

72
lib/README.md Normal file
View File

@@ -0,0 +1,72 @@
# ClanLib
This folder is supposed to contain clan specific nix functions.
Such as:
- build-clan function
- select
- build-inventory function
- json-schema-converter
## Structure
Similar to `nixpkgs/lib` this produces a recursive attribute set in a fixed-point.
Functions within lib can depend on each other to create new abstractions.
### Conventions
Note: This is not consistently enforced yet.
If you start a new feature, or refactoring/touching existing ones, please help us to move towards the below illustrated.
A single feature-set/module may be organized like this:
```nix
# ↓ The final clanLib
{lib, clanLib, ...}:
# ↓ portion to add to clanLib
{
inventory.resolveTags = tags: inventory.machines; # implementation
inventory.buildMachines = x: clanLib.inventory.resolveTags x; # implementation
}
```
Every bigger feature should live in a subfolder with the feature name.
It should contain two files:
- `impl.nix`
- `test.nix`
- Everything else may be adopted as needed.
```
Example filetree
```
```sh
.
├── default.nix
├── feature_foo
│ ├── impl.nix
│ └── test.nix
└── feature_bar
├── impl.nix
├── complex-subfeature
│ ├── impl.nix
│ └── test.nix
├── testless-subfeature # <- We immediately see that this feature is not tested on itself.
│ └── impl.nix
└── test.nix
```
```nix
# default.nix
{lib, clanLib, ...}:
{
inventory.resolveTags = import ./resolveTags { inherit lib clanLib; };
}
```
## Testing
For testing we use [nix-unit](https://github.com/nix-community/nix-unit)
TODO: define a helper that automatically hooks up `tests` in `flake.legacyPackages` and a corresponding buildable `checks` attribute

View File

@@ -1,23 +0,0 @@
{
lib,
self,
...
}:
let
# Returns an attrset with inputs that have the attribute `clanModules`
inputsWithClanModules = lib.filterAttrs (
_name: value: builtins.hasAttr "clanModules" value
) self.inputs;
flattenedClanModules = lib.foldl' (
acc: input:
lib.mkMerge [
acc
input.clanModules
]
) { } (lib.attrValues inputsWithClanModules);
in
{
inventory.modules = flattenedClanModules;
}

View File

@@ -43,10 +43,7 @@ in
include = [
"flakeModules"
"inventory.json"
"lib/build-clan"
"lib/default.nix"
"lib/flake-module.nix"
"lib/inventory"
"lib"
"machines"
"nixosModules"
];

View File

@@ -69,6 +69,15 @@ in
```
'';
};
templates = lib.mkOption {
type = types.submodule { imports = [ ./templates/interface.nix ]; };
default = { };
description = ''
Define Clan templates.
'';
};
inventory = lib.mkOption {
type = types.submodule { imports = [ ../inventory/build-inventory/interface.nix ]; };
description = ''
@@ -112,11 +121,11 @@ in
type = types.lazyAttrsOf types.raw;
default = { };
};
# flake.clanInternals
clanInternals = lib.mkOption {
# Hide from documentation. Exposes internals to the cli.
visible = false;
# type = types.raw;
# ClanInternals
type = types.submodule {
options = {
@@ -132,12 +141,14 @@ in
moduleSchemas = lib.mkOption { type = lib.types.raw; };
inventoryFile = lib.mkOption { type = lib.types.raw; };
# The machine 'imports' generated by the inventory per machine
serviceConfigs = lib.mkOption { type = lib.types.raw; };
inventoryClass = lib.mkOption { type = lib.types.raw; };
# new attribute
distributedServices = lib.mkOption { type = lib.types.raw; };
# clan-core's modules
clanModules = lib.mkOption { type = lib.types.raw; };
source = lib.mkOption { type = lib.types.raw; };
meta = lib.mkOption { type = lib.types.raw; };
lib = lib.mkOption { type = lib.types.raw; };
clanLib = lib.mkOption { type = lib.types.raw; };
all-machines-json = lib.mkOption { type = lib.types.raw; };
machines = lib.mkOption { type = lib.types.raw; };
machinesFunc = lib.mkOption { type = lib.types.raw; };

View File

@@ -42,7 +42,7 @@ let
# map from machine name to service configuration
# { ${machineName} :: Config }
serviceConfigs = (
inventoryClass = (
buildInventory {
inherit inventory directory;
}
@@ -76,7 +76,10 @@ let
(machines.${name} or { })
# Inherit the inventory assertions ?
# { inherit (mergedInventory) assertions; }
{ imports = serviceConfigs.machines.${name}.machineImports or [ ]; }
{ imports = inventoryClass.machines.${name}.machineImports or [ ]; }
# Import the distribute services
{ imports = config.clanInternals.distributedServices.allMachines.${name} or [ ]; }
(
{
# Settings
@@ -96,12 +99,6 @@ let
networking.hostName = lib.mkDefault name;
# speeds up nix commands by using the nixpkgs from the host system (especially useful in VMs)
nix.registry.nixpkgs.to = lib.mkDefault {
type = "path";
path = lib.mkDefault nixpkgs;
};
# For vars we need to override the system so we run vars
# generators on the machine that runs `clan vars generate`. If a
# users is using the `pkgsForSystem`, we don't set
@@ -171,7 +168,6 @@ let
in
{
imports = [
./auto-imports.nix
# Merge the inventory file
{
inventory = _: {
@@ -204,8 +200,12 @@ in
clanInternals = {
moduleSchemas = clan-core.lib.modules.getModulesSchema config.inventory.modules;
inherit serviceConfigs;
inherit (clan-core) clanModules;
inherit inventoryClass;
distributedServices = import ../distributed-service/inventory-adapter.nix {
inherit lib inventory;
flake = config.self;
};
inherit (clan-core) clanModules clanLib;
inherit inventoryFile;
inventoryValuesPrios =
# Temporary workaround
@@ -217,9 +217,6 @@ in
templates = config.templates;
inventory = config.inventory;
meta = config.inventory.meta;
lib = {
inherit (clan-core.lib) select;
};
source = "${clan-core}";

View File

@@ -0,0 +1,57 @@
{
lib,
...
}:
let
inherit (lib) types;
templateType = types.submodule (
{ name, ... }:
{
options.description = lib.mkOption {
type = types.str;
default = name;
description = ''
The name of the template.
'';
};
options.path = lib.mkOption {
type = types.path;
description = ''
Holds the path to the clan template.
'';
};
}
);
in
{
options = {
# clan.templates.clan
clan = lib.mkOption {
type = types.attrsOf templateType;
default = { };
description = ''
Holds the different clan templates.
'';
};
# clan.templates.disko
disko = lib.mkOption {
type = types.attrsOf templateType;
default = { };
description = ''
Holds different disko templates.
'';
};
# clan.templates.machine
machine = lib.mkOption {
type = types.attrsOf templateType;
default = { };
description = ''
Holds the different machine templates.
'';
};
};
}

View File

@@ -30,7 +30,7 @@ in
expr = shallowForceAllAttributes config;
expectedError = {
type = "ThrownError";
msg = "A definition for option `directory' is not of type `path*";
msg = "A definition for option `directory' is not of type `absolute path*";
};
};

View File

@@ -1,25 +1,35 @@
{
lib,
clan-core,
self,
nixpkgs,
...
}:
let
# Produces the
# 'clanLib' attribute set
# Wrapped with fix, so we can depend on other clanLib functions without passing the whole flake
lib.fix (clanLib: {
# TODO:
# SSome bad lib functions that depend on something in 'self'.
# We should reduce the dependency on 'self' aka the 'flake' object
# This makes it easier to test
# most of the time passing the whole flake is unnecessary
callLib = file: args: import file { inherit lib clanLib; } // args;
evalClan = import ./eval-clan-modules {
inherit clan-core lib;
inherit lib;
clan-core = self;
pkgs = nixpkgs.legacyPackages.x86_64-linux;
};
in
{
inherit (evalClan) evalClanModules evalClanModulesWithRoles;
buildClan = import ./build-clan { inherit lib nixpkgs clan-core; };
buildClan = import ./build-clan {
inherit lib nixpkgs;
clan-core = self;
};
# ------------------------------------
# Lib functions that don't depend on 'self'
inventory = clanLib.callLib ./inventory { };
modules = clanLib.callLib ./frontmatter { };
facts = import ./facts.nix { inherit lib; };
inventory = import ./inventory { inherit lib clan-core; };
values = import ./values { inherit lib; };
jsonschema = import ./jsonschema { inherit lib; };
modules = import ./frontmatter {
inherit lib;
self = clan-core;
};
select = import ./select.nix;
}
})

View File

@@ -0,0 +1,33 @@
{ self, inputs, ... }:
let
inputOverrides = builtins.concatStringsSep " " (
builtins.map (input: " --override-input ${input} ${inputs.${input}}") (builtins.attrNames inputs)
);
in
{
perSystem =
{
pkgs,
lib,
system,
...
}:
{
# Run: nix-unit --extra-experimental-features flakes --flake .#legacyPackages.x86_64-linux.<attrName>
legacyPackages.evalTest-distributedServices = import ./tests {
inherit lib self;
};
checks = {
lib-distributedServices-eval = pkgs.runCommand "tests" { nativeBuildInputs = [ pkgs.nix-unit ]; } ''
export HOME="$(realpath .)"
nix-unit --eval-store "$HOME" \
--extra-experimental-features flakes \
${inputOverrides} \
--flake ${self}#legacyPackages.${system}.evalTest-distributedServices
touch $out
'';
};
};
}

View File

@@ -0,0 +1,199 @@
# Adapter function between the inventory.instances and the clan.service module
#
# Data flow:
# - inventory.instances -> Adapter -> clan.service module -> Service Resources (i.e. NixosModules per Machine, Vars per Service, etc.)
#
# What this file does:
#
# - Resolves the [Module] to an actual module-path and imports it.
# - Groups together all the same modules into a single import and creates all instances for it.
# - Resolves the inventory tags into machines. Tags don't exist at the service level.
# Also combines the settings for 'machines' and 'tags'.
{
lib,
# This is used to resolve the module imports from 'flake.inputs'
flake,
# The clan inventory
inventory,
}:
let
# Returns the list of machine names
# { ... } -> [ string ]
resolveTags =
{
# Available InventoryMachines :: { {name} :: { tags = [ string ]; }; }
machines,
# Requested members :: { machines, tags }
# Those will be resolved against the available machines
members,
# Not needed for resolution - only for error reporting
roleName,
instanceName,
}:
{
machines =
members.machines or [ ]
++ (builtins.foldl' (
acc: tag:
let
# For error printing
availableTags = lib.foldlAttrs (
acc: _: v:
v.tags or [ ] ++ acc
) [ ] (machines);
tagMembers = builtins.attrNames (lib.filterAttrs (_n: v: builtins.elem tag v.tags or [ ]) machines);
in
if tagMembers == [ ] then
lib.warn ''
Service instance '${instanceName}': - ${roleName} tags: no machine with tag '${tag}' found.
Available tags: ${builtins.toJSON (lib.unique availableTags)}
'' acc
else
acc ++ tagMembers
) [ ] members.tags or [ ]);
};
machineHasTag = machineName: tagName: lib.elem tagName inventory.machines.${machineName}.tags;
# map the instances into the module
importedModuleWithInstances = lib.mapAttrs (
instanceName: instance:
let
# TODO:
resolvedModuleSet =
# If the module.name is self then take the modules defined in the flake
# Otherwise its an external input which provides the modules via 'clan.modules' attribute
if instance.module.input == null then
inventory.modules
else
let
input =
flake.inputs.${instance.module.input} or (throw ''
Flake doesn't provide input with name '${instance.module.input}'
Choose one of the following inputs:
- ${
builtins.concatStringsSep "\n- " (
lib.attrNames (lib.filterAttrs (_name: input: input ? clan) flake.inputs)
)
}
To import a local module from 'inventory.modules' remove the 'input' attribute from the module definition
Remove the following line from the module definition:
...
- module.input = "${instance.module.input}"
'');
clanAttrs =
input.clan
or (throw "It seems the flake input ${instance.module.input} doesn't export any clan resources");
in
clanAttrs.modules;
resolvedModule =
resolvedModuleSet.${instance.module.name}
or (throw "flake doesn't provide clan-module with name ${instance.module.name}");
# Every instance includes machines via roles
# :: { client :: ... }
instanceRoles = lib.mapAttrs (
roleName: role:
let
resolvedMachines = resolveTags {
members = {
# Explicit members
machines = lib.attrNames role.machines;
# Resolved Members
tags = lib.attrNames role.tags;
};
inherit (inventory) machines;
inherit instanceName roleName;
};
in
# instances.<instanceName>.roles.<roleName> =
{
machines = lib.genAttrs resolvedMachines.machines (
machineName:
let
machineSettings = instance.roles.${roleName}.machines.${machineName}.settings or { };
settingsViaTags = lib.filterAttrs (
tagName: _: machineHasTag machineName tagName
) instance.roles.${roleName}.tags;
in
{
# TODO: Do we want to wrap settings with
# setDefaultModuleLocation "inventory.instances.${instanceName}.roles.${roleName}.tags.${tagName}";
settings = {
imports = [
machineSettings
] ++ lib.attrValues (lib.mapAttrs (_tagName: v: v.settings) settingsViaTags);
};
}
);
# Maps to settings for the role.
# In other words this sets the following path of a clan.service module:
# instances.<instanceName>.roles.<roleName>.settings
settings = role.settings;
}
) instance.roles;
in
{
inherit (instance) module;
inherit resolvedModule instanceRoles;
}
) inventory.instances;
# TODO: Eagerly check the _class of the resolved module
evals = lib.mapAttrs (
_module_ident: instances:
(lib.evalModules {
class = "clan.service";
modules =
[
./service-module.nix
# Import the resolved module
(builtins.head instances).instance.resolvedModule
]
# Include all the instances that correlate to the resolved module
++ (builtins.map (v: {
instances.${v.instanceName}.roles = v.instance.instanceRoles;
}) instances);
})
) grouped;
# Group the instances by the module they resolve to
# This is necessary to evaluate the module in a single pass
# :: { <module.input>_<module.name> :: [ { name, value } ] }
# Since 'perMachine' needs access to all the instances we should include them as a whole
grouped = lib.foldlAttrs (
acc: instanceName: instance:
let
inputName = if instance.module.input == null then "self" else instance.module.input;
id = inputName + "-" + instance.module.name;
in
acc
// {
${id} = acc.${id} or [ ] ++ [
{
inherit instanceName instance;
}
];
}
) { } importedModuleWithInstances;
# TODO: Return an attribute set of resources instead of a plain list of nixosModules
allMachines = lib.foldlAttrs (
acc: _name: eval:
acc
// lib.mapAttrs (
machineName: result: acc.${machineName} or [ ] ++ [ result.nixosModule ]
) eval.config.result.final
) { } evals;
in
{
inherit importedModuleWithInstances grouped;
inherit evals allMachines;
}

View File

@@ -0,0 +1,514 @@
{ lib, config, ... }:
let
inherit (lib) mkOption types;
inherit (types) attrsWith submoduleWith;
# TODO:
# Remove once this gets merged upstream; performs in O(n*log(n) instead of O(n^2))
# https://github.com/NixOS/nixpkgs/pull/355616/files
uniqueStrings = list: builtins.attrNames (builtins.groupBy lib.id list);
checkInstanceRoles =
instanceName: instanceRoles:
let
unmatchedRoles = lib.filter (roleName: !lib.elem roleName (lib.attrNames config.roles)) (
lib.attrNames instanceRoles
);
in
if unmatchedRoles == [ ] then
true
else
throw ''
inventory instance: 'instances.${instanceName}' defines the following roles:
${builtins.toJSON unmatchedRoles}
But the clan-service module '${config.manifest.name}' defines roles:
${builtins.toJSON (lib.attrNames config.roles)}
'';
# checkInstanceSettings =
# instanceName: instanceSettings:
# let
# unmatchedRoles = 1;
# in
# unmatchedRoles;
/**
Merges the role- and machine-settings using the role interface
Arguments:
- roleName: The name of the role
- instanceName: The name of the instance
- settings: The settings of the machine. Leave empty to get the role settings
Returns: evalModules result
The caller is responsible to use .config or .extendModules
*/
# TODO: evaluate against the role.settings statically and use extendModules to get the machineSettings
# Doing this might improve performance
evalMachineSettings =
{
roleName,
instanceName,
machineName ? null,
settings,
}:
lib.evalModules {
# Prefix for better error reporting
# This prints the path where the option should be defined rather than the plain path within settings
# "The option `instances.foo.roles.server.machines.test.settings.<>' was accessed but has no value defined. Try setting the option."
prefix =
[
"instances"
instanceName
"roles"
roleName
]
++ (lib.optionals (machineName != null) [
"machines"
machineName
])
++ [ "settings" ];
# This may lead to better error reporting
# And catch errors if anyone tried to import i.e. a nixosConfiguration
# Set some class: i.e "network.server.settings"
class = lib.concatStringsSep "." [
config.manifest.name
roleName
"settings"
];
modules = [
(lib.setDefaultModuleLocation "Via clan.service module: roles.${roleName}.interface"
config.roles.${roleName}.interface
)
(lib.setDefaultModuleLocation "inventory.instances.${instanceName}.roles.${roleName}.settings"
config.instances.${instanceName}.roles.${roleName}.settings
)
settings
# Dont set the module location here
# This should already be set by the tags resolver
# config.instances.${instanceName}.roles.${roleName}.machines.${machineName}.settings
];
};
/**
Makes a module extensible
returning its config
and making it extensible via '__functor' polymorphism
Example:
```nix-repl
res = makeExtensibleConfig (evalModules { options.foo = mkOption { default = 42; };)
res
=>
{
foo = 42;
_functor = <function>;
}
# This allows to override using mkDefault, mkForce, etc.
res { foo = 100; }
=>
{
foo = 100;
_functor = <function>;
}
```
*/
makeExtensibleConfig =
f: args:
let
makeModuleExtensible =
eval:
eval.config
// {
__functor = _self: m: makeModuleExtensible (eval.extendModules { modules = lib.toList m; });
};
in
makeModuleExtensible (f args);
/**
Apply the settings to the instance
Takes a [ServiceInstance] :: { roles :: { roleName :: { machines :: { machineName :: { settings :: { ... } } } } } }
Returns the same object but evaluates the settings against the interface.
We need this because 'perMachine' shouldn't gain access the raw deferred module.
*/
applySettings =
instanceName: instance:
lib.mapAttrs (roleName: role: {
machines = lib.mapAttrs (machineName: v: {
# TODO: evaluate the settings against the interface
# settings = (evalMachineSettings { inherit roleName instanceName; inherit (v) settings; }).config;
settings = (
makeExtensibleConfig evalMachineSettings {
inherit roleName instanceName machineName;
inherit (v) settings;
}
);
}) role.machines;
# TODO: evaluate the settings against the interface
settings = (
makeExtensibleConfig evalMachineSettings {
inherit roleName instanceName;
inherit (role) settings;
}
);
}) instance.roles;
in
{
options = {
instances = mkOption {
default = throw ''
The clan service module ${config.manifest.name} doesn't define any instances.
Did you forget to create instances via 'inventory.instances' ?
'';
type = attrsWith {
placeholder = "instanceName";
elemType = submoduleWith {
modules = [
(
{ name, ... }:
{
# options.settings = mkOption {
# description = "settings of 'instance': ${name}";
# default = {};
# apply = v: lib.seq (checkInstanceSettings name v) v;
# };
options.roles = mkOption {
default = throw ''
Instance '${name}' of service '${config.manifest.name}' mut define members via 'roles'.
To include a machine:
'instances.${name}.roles.<role-name>.machines.<your-machine-name>' must be set.
'';
type = attrsWith {
placeholder = "roleName";
elemType = submoduleWith {
modules = [
(
{ ... }:
{
# instances.{instanceName}.roles.{roleName}.machines
options.machines = mkOption {
type = attrsWith {
placeholder = "machineName";
elemType = submoduleWith {
modules = [
(m: {
options.settings = mkOption {
type = types.raw;
description = "Settings of '${name}-machine': ${m.name}.";
default = { };
};
})
];
};
};
};
# instances.{instanceName}.roles.{roleName}.settings
# options._settings = mkOption { };
# options._settingsViaTags = mkOption { };
# A deferred module that combines _settingsViaTags with _settings
options.settings = mkOption {
type = types.raw;
description = "Settings of 'role': ${name}";
default = { };
};
}
)
];
};
};
apply = v: lib.seq (checkInstanceRoles name v) v;
};
}
)
];
};
};
};
manifest = mkOption {
description = "Meta information about this module itself";
type = submoduleWith {
modules = [
{
options = {
name = mkOption {
description = ''
The name of the module
Mainly used to create an error context while evaluating.
This helps backtracking which module was included; And where an error came from originally.
'';
type = types.str;
};
};
}
];
};
};
roles = mkOption {
default = throw ''
Role behavior of service '${config.manifest.name}' must be defined.
A 'clan.service' module should always define its behavior via 'roles'
---
To add the role:
`roles.client = {}`
To define multiple instance behavior:
`roles.client.perInstance = { ... }: {}`
'';
type = attrsWith {
placeholder = "roleName";
elemType = submoduleWith {
modules = [
(
{ name, ... }:
let
roleName = name;
in
{
options.interface = mkOption {
type = types.deferredModule;
# TODO: Default to an empty module
# need to test that an the empty module can be evaluated to empty settings
default = { };
};
options.perInstance = mkOption {
type = types.deferredModuleWith {
staticModules = [
# Common output format
# As described by adr
# { nixosModule, services, ... }
(
{ ... }:
{
options.nixosModule = mkOption { default = { }; };
options.services = mkOption {
type = attrsWith {
placeholder = "serviceName";
elemType = submoduleWith {
modules = [ ./service-module.nix ];
};
};
default = { };
};
}
)
];
};
default = { };
apply =
/**
This apply transforms the module into a function that takes arguments and returns an evaluated module
The arguments of the function are determined by its scope:
-> 'perInstance' maps over all instances and over all machines hence it takes 'instanceName' and 'machineName' as iterator arguments
*/
v: instanceName: machineName:
(lib.evalModules {
specialArgs = {
inherit instanceName;
machine = {
name = machineName;
roles = applySettings instanceName config.instances.${instanceName};
};
settings = (
makeExtensibleConfig evalMachineSettings {
inherit roleName instanceName machineName;
settings =
config.instances.${instanceName}.roles.${roleName}.machines.${machineName}.settings or { };
}
);
};
modules = [ v ];
}).config;
};
}
)
];
};
};
};
perMachine = mkOption {
type = types.deferredModuleWith {
staticModules = [
# Common output format
# As described by adr
# { nixosModule, services, ... }
(
{ ... }:
{
options.nixosModule = mkOption { default = { }; };
options.services = mkOption {
type = attrsWith {
placeholder = "serviceName";
elemType = submoduleWith {
modules = [ ./service-module.nix ];
};
};
default = { };
};
}
)
];
};
default = { };
apply =
v: machineName: machineScope:
(lib.evalModules {
specialArgs = {
/**
This apply transforms the module into a function that takes arguments and returns an evaluated module
The arguments of the function are determined by its scope:
-> 'perMachine' maps over all machines of a service 'machineName' and a helper 'scope' (some aggregated attributes) as iterator arguments
The 'scope' attribute is used to collect the 'roles' of all 'instances' where the machine is part of and inject both into the specialArgs
*/
machine = {
name = machineName;
roles =
let
collectRoles =
instances:
lib.foldlAttrs (
r: _instanceName: instance:
r
++ lib.foldlAttrs (
r2: roleName: _role:
r2 ++ [ roleName ]
) [ ] instance.roles
) [ ] instances;
in
uniqueStrings (collectRoles machineScope.instances);
};
inherit (machineScope) instances;
# There are no machine settings.
# Settings are always role specific, having settings that apply to a machine globally would mean to merge all role and all instance settings into a single module.
# But that will likely cause conflicts because it is inherently wrong.
settings = throw ''
'perMachine' doesn't have a 'settings' argument.
Alternatives:
- 'instances.<instanceName>.roles.<roleName>.settings' should be used instead.
- 'instances.<instanceName>.roles.<roleName>.machines.<machineName>.settings' should be used instead.
If that is insufficient, you might also consider using 'roles.<roleName>.perInstance' instead of 'perMachine'.
'';
};
modules = [ v ];
}).config;
};
# ---
# Place the result in _module.result to mark them as "internal" and discourage usage/overrides
#
# ---
# Intermediate result by mapping over the 'roles', 'instances', and 'machines'.
# During this step the 'perMachine' and 'perInstance' are applied.
# The result-set for a single machine can then be found by collecting all 'nixosModules' recursively.
result.allRoles = mkOption {
readOnly = true;
default = lib.mapAttrs (roleName: roleCfg: {
allInstances = lib.mapAttrs (instanceName: instanceCfg: {
allMachines = lib.mapAttrs (
machineName: _machineCfg: roleCfg.perInstance instanceName machineName
) instanceCfg.roles.${roleName}.machines or { };
}) config.instances;
}) config.roles;
};
result.allMachines = mkOption {
readOnly = true;
default =
let
collectMachinesFromInstance =
instance:
uniqueStrings (
lib.foldlAttrs (
acc: _roleName: role:
acc ++ (lib.attrNames role.machines)
) [ ] instance.roles
);
# The service machines are defined by collecting all instance machines
serviceMachines = lib.foldlAttrs (
acc: instanceName: instance:
acc
// lib.genAttrs (collectMachinesFromInstance instance) (machineName:
# Store information why this machine is part of the service
# MachineOrigin :: { instances :: [ string ]; }
{
# Helper attribute to
instances = [ instanceName ] ++ acc.${machineName}.instances or [ ];
# All roles of the machine ?
roles = lib.foldlAttrs (
acc2: roleName: role:
if builtins.elem machineName (lib.attrNames role.machines) then acc2 ++ [ roleName ] else acc2
) [ ] instance.roles;
})
) { } config.instances;
allMachines = lib.mapAttrs (_machineName: MachineOrigin: {
# Filter out instances of which the machine is not part of
instances = lib.mapAttrs (_n: v: { roles = v; }) (
lib.filterAttrs (instanceName: _: builtins.elem instanceName MachineOrigin.instances) (
# Instances with evaluated settings
lib.mapAttrs applySettings config.instances
)
);
}) serviceMachines;
in
# allMachines;
lib.mapAttrs config.perMachine allMachines;
};
result.final = mkOption {
readOnly = true;
default = lib.mapAttrs (
machineName: machineResult:
let
# config.result.allRoles.client.allInstances.bar.allMachines.test
# instanceResults = config.result.allRoles.client.allInstances.bar.allMachines.${machineName};
instanceResults = lib.foldlAttrs (
acc: roleName: role:
acc
++ lib.foldlAttrs (
acc: instanceName: instance:
if instance.allMachines.${machineName}.nixosModule or { } != { } then
acc
++ [
(lib.setDefaultModuleLocation
"Via instances.${instanceName}.roles.${roleName}.machines.${machineName}"
instance.allMachines.${machineName}.nixosModule
)
]
else
acc
) [ ] role.allInstances
) [ ] config.result.allRoles;
in
{
inherit instanceResults;
nixosModule = {
imports = [
# For error backtracing. This module was produced by the 'perMachine' function
# TODO: check if we need this or if it leads to better errors if we pass the underlying module locations
(lib.setDefaultModuleLocation "clan.service: ${config.manifest.name} - via perMachine" machineResult.nixosModule)
] ++ instanceResults;
};
}
) config.result.allMachines;
};
};
}

View File

@@ -0,0 +1,327 @@
{
lib,
...
}:
let
inherit (lib)
evalModules
;
evalInventory =
m:
(evalModules {
# Static modules
modules = [
../../inventory/build-inventory/interface.nix
{
modules.test = { };
}
m
];
}).config;
flakeFixture = {
inputs = { };
};
callInventoryAdapter =
inventoryModule:
import ../inventory-adapter.nix {
inherit lib;
flake = flakeFixture;
inventory = evalInventory inventoryModule;
};
in
{
test_simple =
let
res = callInventoryAdapter {
# Authored module
# A minimal module looks like this
# It isn't exactly doing anything but it's a valid module that produces an output
modules."simple-module" = {
_class = "clan.service";
manifest = {
name = "netwitness";
};
};
# User config
instances."instance_foo" = {
module = {
name = "simple-module";
};
};
};
in
{
# Test that the module is mapped into the output
# We might change the attribute name in the future
expr = res.evals ? "self-simple-module";
expected = true;
};
# A module can be imported multiple times
# A module can also have multiple instances within the same module
# This mean modules must be grouped together, imported once
# All instances should be included within one evaluation to make all of them available
test_module_grouping =
let
res = callInventoryAdapter {
# Authored module
# A minimal module looks like this
# It isn't exactly doing anything but it's a valid module that produces an output
modules."A" = {
_class = "clan.service";
manifest = {
name = "A-name";
};
perMachine = { }: { };
};
modules."B" = {
_class = "clan.service";
manifest = {
name = "B-name";
};
perMachine = { }: { };
};
# User config
instances."instance_foo" = {
module = {
name = "A";
};
};
instances."instance_bar" = {
module = {
name = "B";
};
};
instances."instance_baz" = {
module = {
name = "A";
};
};
};
in
{
# Test that the module is mapped into the output
# We might change the attribute name in the future
expr = lib.mapAttrs (_n: v: builtins.length v) res.grouped;
expected = {
self-A = 2;
self-B = 1;
};
};
test_creates_all_instances =
let
res = callInventoryAdapter {
# Authored module
# A minimal module looks like this
# It isn't exactly doing anything but it's a valid module that produces an output
modules."A" = {
_class = "clan.service";
manifest = {
name = "network";
};
perMachine = { }: { };
};
instances."instance_foo" = {
module = {
name = "A";
};
};
instances."instance_bar" = {
module = {
name = "A";
};
};
instances."instance_zaza" = {
module = {
name = "B";
};
};
};
in
{
# Test that the module is mapped into the output
# We might change the attribute name in the future
expr = lib.attrNames res.evals.self-A.config.instances;
expected = [
"instance_bar"
"instance_foo"
];
};
# Membership via roles
test_add_machines_directly =
let
res = callInventoryAdapter {
# Authored module
# A minimal module looks like this
# It isn't exactly doing anything but it's a valid module that produces an output
modules."A" = {
_class = "clan.service";
manifest = {
name = "network";
};
# Define a role without special behavior
roles.peer = { };
# perMachine = {}: {};
};
machines = {
jon = { };
sara = { };
hxi = { };
};
instances."instance_foo" = {
module = {
name = "A";
};
roles.peer.machines.jon = { };
};
instances."instance_bar" = {
module = {
name = "A";
};
roles.peer.machines.sara = { };
};
instances."instance_zaza" = {
module = {
name = "B";
};
roles.peer.tags.all = { };
};
};
in
{
# Test that the module is mapped into the output
# We might change the attribute name in the future
expr = lib.attrNames res.evals.self-A.config.result.allMachines;
expected = [
"jon"
"sara"
];
};
# Membership via tags
test_add_machines_via_tags =
let
res = callInventoryAdapter {
# Authored module
# A minimal module looks like this
# It isn't exactly doing anything but it's a valid module that produces an output
modules."A" = {
_class = "clan.service";
manifest = {
name = "network";
};
# Define a role without special behavior
roles.peer = { };
# perMachine = {}: {};
};
machines = {
jon = {
tags = [ "foo" ];
};
sara = {
tags = [ "foo" ];
};
hxi = { };
};
instances."instance_foo" = {
module = {
name = "A";
};
roles.peer.tags.foo = { };
};
instances."instance_zaza" = {
module = {
name = "B";
};
roles.peer.tags.all = { };
};
};
in
{
# Test that the module is mapped into the output
# We might change the attribute name in the future
expr = lib.attrNames res.evals.self-A.config.result.allMachines;
expected = [
"jon"
"sara"
];
};
per_machine_args = import ./per_machine_args.nix { inherit lib callInventoryAdapter; };
# test_per_machine_receives_instances =
# let
# res = callInventoryAdapter {
# # Authored module
# # A minimal module looks like this
# # It isn't exactly doing anything but it's a valid module that produces an output
# modules."A" = {
# _class = "clan.service";
# manifest = {
# name = "network";
# };
# # Define a role without special behavior
# roles.peer = { };
# perMachine =
# { instances, ... }:
# {
# nixosModule = instances;
# };
# };
# machines = {
# jon = { };
# sara = { };
# };
# instances."instance_foo" = {
# module = {
# name = "A";
# };
# roles.peer.machines.jon = { };
# };
# instances."instance_bar" = {
# module = {
# name = "A";
# };
# roles.peer.machines.sara = { };
# };
# instances."instance_zaza" = {
# module = {
# name = "B";
# };
# roles.peer.tags.all = { };
# };
# };
# in
# {
# expr = {
# hasMachineSettings =
# res.evals.self-A.config.result.allMachines.jon.nixosModule. # { {instanceName} :: { roles :: { {roleName} :: { machines :: { {machineName} :: { settings :: {} } } } } } }
# instance_foo.roles.peer.machines.jon ? settings;
# machineSettingsEmpty =
# lib.filterAttrs (n: _v: n != "__functor" ) res.evals.self-A.config.result.allMachines.jon.nixosModule. # { {instanceName} :: { roles :: { {roleName} :: { machines :: { {machineName} :: { settings :: {} } } } } } }
# instance_foo.roles.peer.machines.jon.settings;
# hasRoleSettings =
# res.evals.self-A.config.result.allMachines.jon.nixosModule. # { {instanceName} :: { roles :: { {roleName} :: { machines :: { {machineName} :: { settings :: {} } } } } } }
# instance_foo.roles.peer ? settings;
# roleSettingsEmpty =
# lib.filterAttrs (n: _v: n != "__functor" ) res.evals.self-A.config.result.allMachines.jon.nixosModule. # { {instanceName} :: { roles :: { {roleName} :: { machines :: { {machineName} :: { settings :: {} } } } } } }
# instance_foo.roles.peer.settings;
# };
# expected = {
# hasMachineSettings = true;
# machineSettingsEmpty = {};
# hasRoleSettings = true;
# roleSettingsEmpty = {};
# };
# };
}

View File

@@ -0,0 +1,107 @@
{ lib, callInventoryAdapter }:
let # Authored module
# A minimal module looks like this
# It isn't exactly doing anything but it's a valid module that produces an output
modules."A" = {
_class = "clan.service";
manifest = {
name = "network";
};
# Define two roles with unmergeable interfaces
# Both define some 'timeout' but with completely different types.
roles.peer.interface =
{ lib, ... }:
{
options.timeout = lib.mkOption {
type = lib.types.str;
};
};
roles.server.interface =
{ lib, ... }:
{
options.timeout = lib.mkOption {
type = lib.types.submodule;
};
};
perMachine =
{ instances, ... }:
{
nixosModule = instances;
};
};
machines = {
jon = { };
sara = { };
};
res = callInventoryAdapter {
inherit modules machines;
instances."instance_foo" = {
module = {
name = "A";
};
roles.peer.machines.jon = {
settings.timeout = lib.mkForce "foo-peer-jon";
};
roles.peer = {
settings.timeout = "foo-peer";
};
};
instances."instance_bar" = {
module = {
name = "A";
};
roles.peer.machines.jon = {
settings.timeout = "bar-peer-jon";
};
};
instances."instance_zaza" = {
module = {
name = "B";
};
roles.peer.tags.all = { };
};
};
filterInternals = lib.filterAttrs (n: _v: !lib.hasPrefix "_" n);
in
{
# settings should evaluate
test_per_machine_receives_instance_settings = {
expr = {
hasMachineSettings =
res.evals.self-A.config.result.allMachines.jon.nixosModule.instance_foo.roles.peer.machines.jon
? settings;
# settings are specific.
# Below we access:
# instance = instance_foo
# roles = peer
# machines = jon
specificMachineSettings = filterInternals res.evals.self-A.config.result.allMachines.jon.nixosModule.instance_foo.roles.peer.machines.jon.settings;
hasRoleSettings =
res.evals.self-A.config.result.allMachines.jon.nixosModule.instance_foo.roles.peer ? settings;
# settings are specific.
# Below we access:
# instance = instance_foo
# roles = peer
# machines = *
specificRoleSettings = filterInternals res.evals.self-A.config.result.allMachines.jon.nixosModule.instance_foo.roles.peer.settings;
};
expected = {
hasMachineSettings = true;
specificMachineSettings = {
timeout = "foo-peer-jon";
};
hasRoleSettings = true;
specificRoleSettings = {
timeout = "foo-peer";
};
};
};
}

View File

@@ -6,11 +6,14 @@
let
baseModule = {
imports = (import (pkgs.path + "/nixos/modules/module-list.nix")) ++ [
(
{ config, ... }:
{
nixpkgs.pkgs = pkgs;
clan.core.name = "dummy";
system.stateVersion = lib.version;
system.stateVersion = config.system.nixos.release;
}
)
];
};

View File

@@ -10,16 +10,20 @@ let
pathExists
;
in
{
rec {
# We should remove this.
# It would enforce treating at least 'lib' as a module in a whole
imports = filter pathExists [
./jsonschema/flake-module.nix
./inventory/flake-module.nix
./build-clan/flake-module.nix
./values/flake-module.nix
./distributed-service/flake-module.nix
];
flake.lib = import ./default.nix {
inherit lib inputs;
flake.clanLib = import ./default.nix {
inherit lib inputs self;
inherit (inputs) nixpkgs;
clan-core = self;
};
# TODO: remove this legacy alias
flake.lib = flake.clanLib;
}

View File

@@ -1,9 +1,9 @@
{ lib, self }:
{ lib, clanLib }:
let
# Trim the .nix extension from a filename
trimExtension = name: builtins.substring 0 (builtins.stringLength name - 4) name;
jsonWithoutHeader = self.lib.jsonschema {
jsonWithoutHeader = clanLib.jsonschema {
includeDefaults = true;
header = { };
};
@@ -13,7 +13,7 @@ let
lib.mapAttrs (
_moduleName: rolesOptions:
lib.mapAttrs (_roleName: options: jsonWithoutHeader.parseOptions options { }) rolesOptions
) (self.lib.evalClanModulesWithRoles modules);
) (clanLib.evalClan.evalClanModulesWithRoles modules);
evalFrontmatter =
{

View File

@@ -0,0 +1,272 @@
{
lib,
config,
clanLib,
...
}:
let
inherit (config) inventory directory;
resolveTags =
# Inventory, { machines :: [string], tags :: [string] }
{
serviceName,
instanceName,
roleName,
inventory,
members,
}:
{
machines =
members.machines or [ ]
++ (builtins.foldl' (
acc: tag:
let
# For error printing
availableTags = lib.foldlAttrs (
acc: _: v:
v.tags or [ ] ++ acc
) [ ] (inventory.machines);
tagMembers = builtins.attrNames (
lib.filterAttrs (_n: v: builtins.elem tag v.tags or [ ]) inventory.machines
);
in
if tagMembers == [ ] then
lib.warn ''
inventory.services.${serviceName}.${instanceName}: - ${roleName} tags: no machine with tag '${tag}' found.
Available tags: ${builtins.toJSON (lib.unique availableTags)}
'' [ ]
else
acc ++ tagMembers
) [ ] members.tags or [ ]);
};
checkService =
modulepath: serviceName:
builtins.elem "inventory" (clanLib.modules.getFrontmatter modulepath serviceName).features or [ ];
compileMachine =
{ machineConfig }:
{
machineImports = [
(lib.optionalAttrs (machineConfig.deploy.targetHost or null != null) {
config.clan.core.networking.targetHost = machineConfig.deploy.targetHost;
})
];
assertions = { };
};
legacyResolveImports =
{
supportedRoles,
resolvedRolesPerInstance,
serviceConfigs,
serviceName,
machineName,
getRoleFile,
}:
(lib.foldlAttrs (
# : [ Modules ] -> String -> ServiceConfig -> [ Modules ]
acc2: instanceName: serviceConfig:
let
resolvedRoles = resolvedRolesPerInstance.${instanceName};
isInService = builtins.any (members: builtins.elem machineName members.machines) (
builtins.attrValues resolvedRoles
);
# all roles where the machine is present
machineRoles = builtins.attrNames (
lib.filterAttrs (_role: roleConfig: builtins.elem machineName roleConfig.machines) resolvedRoles
);
machineServiceConfig = (serviceConfig.machines.${machineName} or { }).config or { };
globalConfig = serviceConfig.config or { };
globalExtraModules = serviceConfig.extraModules or [ ];
machineExtraModules = serviceConfig.machines.${machineName}.extraModules or [ ];
roleServiceExtraModules = builtins.foldl' (
acc: role: acc ++ serviceConfig.roles.${role}.extraModules or [ ]
) [ ] machineRoles;
# TODO: maybe optimize this don't lookup the role in inverse roles. Imports are not lazy
roleModules = builtins.map (
role:
if builtins.elem role supportedRoles && inventory.modules ? ${serviceName} then
getRoleFile role
else
throw "Module ${serviceName} doesn't have role: '${role}'. Role: ${
inventory.modules.${serviceName}
}/roles/${role}.nix not found."
) machineRoles;
roleServiceConfigs = builtins.filter (m: m != { }) (
builtins.map (role: serviceConfig.roles.${role}.config or { }) machineRoles
);
extraModules = map (s: if builtins.typeOf s == "string" then "${directory}/${s}" else s) (
globalExtraModules ++ machineExtraModules ++ roleServiceExtraModules
);
in
if !(serviceConfig.enabled or true) then
acc2
else if isInService then
acc2
++ [
{
imports = roleModules ++ extraModules;
clan.inventory.services.${serviceName}.${instanceName} = {
roles = resolvedRoles;
# TODO: Add inverseRoles to the service config if needed
# inherit inverseRoles;
};
}
(lib.optionalAttrs (globalConfig != { } || machineServiceConfig != { } || roleServiceConfigs != [ ])
{
clan.${serviceName} = lib.mkMerge (
[
globalConfig
machineServiceConfig
]
++ roleServiceConfigs
);
}
)
]
else
acc2
) [ ] (serviceConfigs));
in
{
imports = [
./interface.nix
];
config = {
machines = builtins.mapAttrs (
machineName: machineConfig: m:
let
compiledServices = lib.mapAttrs (
_: serviceConfigs:
(
{ config, ... }:
let
serviceName = config.serviceName;
getRoleFile = role: builtins.seq role inventory.modules.${serviceName} + "/roles/${role}.nix";
in
{
_module.args = {
inherit
resolveTags
inventory
clanLib
machineName
serviceConfigs
;
};
imports = [
./roles.nix
];
isClanModule =
let
firstRole = import (getRoleFile (builtins.head config.supportedRoles));
loadModuleForClassCheck =
m:
if lib.isFunction m then
let
args = lib.functionArgs m;
in
m args
else
m;
module = loadModuleForClassCheck (firstRole);
in
if (module) ? _class then module._class == "clan" else false;
# The actual result
machineImports =
if config.isClanModule then
throw "Clan modules are not supported yet."
else
legacyResolveImports {
supportedRoles = config.supportedRoles;
resolvedRolesPerInstance = config.resolvedRolesPerInstance;
inherit
serviceConfigs
serviceName
machineName
getRoleFile
;
};
# Assertions
assertions = {
"checkservice.${serviceName}" = {
assertion = checkService inventory.modules.${serviceName} serviceName;
message = ''
Service ${serviceName} cannot be used in inventory. It does not declare the 'inventory' feature.
To allow it add the following to the beginning of the README.md of the module:
---
...
features = [ "inventory" ]
---
Also make sure to test the module with the 'inventory' feature enabled.
'';
};
};
}
)
) (config.inventory.services or { });
compiledMachine = compileMachine {
inherit
machineConfig
;
};
machineImports = (
compiledMachine.machineImports
++ builtins.foldl' (
acc: service:
let
failedAssertions = (lib.filterAttrs (_: v: !v.assertion) service.assertions);
failedAssertionsImports =
if failedAssertions != { } then
[
{
clan.inventory.assertions = failedAssertions;
}
]
else
[
{
clan.inventory.assertions = {
"alive.assertion.inventory" = {
assertion = true;
message = ''
No failed assertions found for machine ${machineName}. This will never be displayed.
It is here for testing purposes.
'';
};
};
}
];
in
acc
++ service.machineImports
# Import failed assertions
++ failedAssertionsImports
) [ ] (builtins.attrValues m.config.compiledServices)
);
in
{
inherit machineImports compiledServices compiledMachine;
}
) (inventory.machines or { });
};
}

View File

@@ -0,0 +1,91 @@
{ lib, ... }:
let
inherit (lib) types mkOption;
submodule = m: types.submoduleWith { modules = [ m ]; };
in
{
options = {
directory = mkOption {
type = types.path;
};
inventory = mkOption {
type = types.raw;
};
machines = mkOption {
type = types.attrsOf (
submodule (
{ name, ... }:
let
machineName = name;
in
{
options = {
compiledMachine = mkOption {
type = types.raw;
};
compiledServices = mkOption {
# type = types.attrsOf;
type = types.attrsOf (
types.submoduleWith {
modules = [
(
{ name, ... }:
let
serviceName = name;
in
{
options = {
machineName = mkOption {
default = machineName;
readOnly = true;
};
serviceName = mkOption {
default = serviceName;
readOnly = true;
};
# Outputs
machineImports = mkOption {
type = types.listOf types.raw;
};
supportedRoles = mkOption {
type = types.listOf types.str;
};
matchedRoles = mkOption {
type = types.listOf types.str;
};
isClanModule = mkOption {
type = types.bool;
};
machinesRoles = mkOption {
type = types.attrsOf (types.listOf types.str);
};
resolvedRolesPerInstance = mkOption {
type = types.attrsOf (
types.attrsOf (submodule {
options.machines = mkOption {
type = types.listOf types.str;
};
})
);
};
assertions = mkOption {
type = types.attrsOf types.raw;
};
};
}
)
];
}
);
};
machineImports = mkOption {
type = types.listOf types.raw;
};
};
}
)
);
};
};
}

View File

@@ -0,0 +1,65 @@
{
lib,
config,
resolveTags,
inventory,
clanLib,
machineName,
serviceConfigs,
...
}:
let
serviceName = config.serviceName;
in
{
# Roles resolution
# : List String
supportedRoles = clanLib.modules.getRoles inventory.modules serviceName;
matchedRoles = builtins.attrNames (
lib.filterAttrs (_: ms: builtins.elem machineName ms) config.machinesRoles
);
resolvedRolesPerInstance = lib.mapAttrs (
instanceName: instanceConfig:
let
resolvedRoles = lib.genAttrs config.supportedRoles (
roleName:
resolveTags {
members = instanceConfig.roles.${roleName} or { };
inherit
instanceName
serviceName
roleName
inventory
;
}
);
usedRoles = builtins.attrNames instanceConfig.roles;
unmatchedRoles = builtins.filter (role: !builtins.elem role config.supportedRoles) usedRoles;
in
if unmatchedRoles != [ ] then
throw ''
Roles ${builtins.toJSON unmatchedRoles} are not defined in the service ${serviceName}.
Instance: '${instanceName}'
Please use one of available roles: ${builtins.toJSON config.supportedRoles}
''
else
resolvedRoles
) serviceConfigs;
machinesRoles = builtins.zipAttrsWith (
_n: vs:
let
flat = builtins.foldl' (acc: s: acc ++ s.machines) [ ] vs;
in
lib.unique flat
) (builtins.attrValues config.resolvedRolesPerInstance);
assertions = lib.concatMapAttrs (
instanceName: resolvedRoles:
clanLib.modules.checkConstraints {
moduleName = serviceName;
allModules = inventory.modules;
inherit resolvedRoles instanceName;
}
) config.resolvedRolesPerInstance;
}

View File

@@ -1,272 +1,7 @@
# Generate partial NixOS configurations for every machine in the inventory
# This function is responsible for generating the module configuration for every machine in the inventory.
{ lib, clan-core }:
{ lib, clanLib }:
let
resolveTags =
# Inventory, { machines :: [string], tags :: [string] }
{
serviceName,
instanceName,
roleName,
inventory,
members,
}:
{
machines =
members.machines or [ ]
++ (builtins.foldl' (
acc: tag:
let
# For error printing
availableTags = lib.foldlAttrs (
acc: _: v:
v.tags or [ ] ++ acc
) [ ] (inventory.machines);
tagMembers = builtins.attrNames (
lib.filterAttrs (_n: v: builtins.elem tag v.tags or [ ]) inventory.machines
);
in
if tagMembers == [ ] then
lib.warn ''
inventory.services.${serviceName}.${instanceName}: - ${roleName} tags: no machine with tag '${tag}' found.
Available tags: ${builtins.toJSON (lib.unique availableTags)}
'' [ ]
else
acc ++ tagMembers
) [ ] members.tags or [ ]);
};
checkService =
modulepath: serviceName:
builtins.elem "inventory"
(clan-core.lib.modules.getFrontmatter modulepath serviceName).features or [ ];
compileMachine =
{ machineConfig }:
{
machineImports = [
(lib.optionalAttrs (machineConfig.deploy.targetHost or null != null) {
config.clan.core.networking.targetHost = machineConfig.deploy.targetHost;
})
];
assertions = { };
};
compileServicesForMachine =
# Returns a NixOS configuration for the machine 'machineName'.
# Return Format: { imports = [ ... ]; config = { ... }; options = { ... } }
{
machineName,
inventory,
directory,
}:
let
compileServiceModules =
serviceName: serviceConfigs:
let
supportedRoles = clan-core.lib.modules.getRoles inventory.modules serviceName;
firstRole = import (getRoleFile (builtins.head supportedRoles));
loadModuleForClassCheck =
m:
if lib.isFunction m then
let
args = lib.functionArgs m;
in
m args
else
m;
isClanModule =
let
module = loadModuleForClassCheck firstRole;
in
if module ? _class then module._class == "clan" else false;
getRoleFile = role: builtins.seq role inventory.modules.${serviceName} + "/roles/${role}.nix";
resolvedRolesPerInstance = lib.mapAttrs (
instanceName: instanceConfig:
let
resolvedRoles = lib.genAttrs supportedRoles (
roleName:
resolveTags {
members = instanceConfig.roles.${roleName} or { };
inherit
instanceName
serviceName
roleName
inventory
;
}
);
usedRoles = builtins.attrNames instanceConfig.roles;
unmatchedRoles = builtins.filter (role: !builtins.elem role supportedRoles) usedRoles;
in
if unmatchedRoles != [ ] then
throw ''
Service: '${serviceName}' Instance: '${instanceName}'
The following roles do not exist: ${builtins.toJSON unmatchedRoles}
Please use one of available roles: ${builtins.toJSON supportedRoles}
''
else
resolvedRoles
) serviceConfigs;
machinesRoles = builtins.zipAttrsWith (
_n: vs:
let
flat = builtins.foldl' (acc: s: acc ++ s.machines) [ ] vs;
in
lib.unique flat
) (builtins.attrValues resolvedRolesPerInstance);
matchedRoles = builtins.attrNames (
lib.filterAttrs (_: ms: builtins.elem machineName ms) machinesRoles
);
in
# roleImports = lib.mapAttrsToList (
# roleName: _: inventory.modules.${serviceName} + "/roles/${roleName}.nix"
# ) (lib.filterAttrs (_: ms: builtins.elem machineName ms) machinesRoles);
# CompiledService :: { machineImports :: []; machineRoles :: [ String ] }
{
inherit
machinesRoles
matchedRoles
resolvedRolesPerInstance
firstRole
isClanModule
supportedRoles
;
# TODO: Add other attributes
machineImports =
if isClanModule then
throw "Clan modules are not supported yet."
else
(lib.foldlAttrs (
# [ Modules ], String, ServiceConfig
acc2: instanceName: serviceConfig:
let
resolvedRoles = lib.genAttrs supportedRoles (
roleName:
resolveTags {
members = serviceConfig.roles.${roleName} or { };
inherit
serviceName
instanceName
roleName
inventory
;
}
);
isInService = builtins.any (members: builtins.elem machineName members.machines) (
builtins.attrValues resolvedRoles
);
# all roles where the machine is present
machineRoles = builtins.attrNames (
lib.filterAttrs (_role: roleConfig: builtins.elem machineName roleConfig.machines) resolvedRoles
);
machineServiceConfig = (serviceConfig.machines.${machineName} or { }).config or { };
globalConfig = serviceConfig.config or { };
globalExtraModules = serviceConfig.extraModules or [ ];
machineExtraModules = serviceConfig.machines.${machineName}.extraModules or [ ];
roleServiceExtraModules = builtins.foldl' (
acc: role: acc ++ serviceConfig.roles.${role}.extraModules or [ ]
) [ ] machineRoles;
# TODO: maybe optimize this dont lookup the role in inverse roles. Imports are not lazy
roleModules = builtins.map (
role:
if builtins.elem role supportedRoles && inventory.modules ? ${serviceName} then
getRoleFile role
else
throw "Module ${serviceName} doesn't have role: '${role}'. Role: ${
inventory.modules.${serviceName}
}/roles/${role}.nix not found."
) machineRoles;
roleServiceConfigs = builtins.filter (m: m != { }) (
builtins.map (role: serviceConfig.roles.${role}.config or { }) machineRoles
);
extraModules = map (s: if builtins.typeOf s == "string" then "${directory}/${s}" else s) (
globalExtraModules ++ machineExtraModules ++ roleServiceExtraModules
);
nonExistingRoles = builtins.filter (role: !(builtins.elem role supportedRoles)) (
builtins.attrNames (serviceConfig.roles or { })
);
constraintAssertions = clan-core.lib.modules.checkConstraints {
moduleName = serviceName;
allModules = inventory.modules;
inherit resolvedRoles instanceName;
};
in
if (nonExistingRoles != [ ]) then
throw "Roles ${builtins.toString nonExistingRoles} are not defined in the service ${serviceName}."
else if !(serviceConfig.enabled or true) then
acc2
else if isInService then
acc2
++ [
{
imports = roleModules ++ extraModules;
clan.inventory.assertions = constraintAssertions;
clan.inventory.services.${serviceName}.${instanceName} = {
roles = resolvedRoles;
# TODO: Add inverseRoles to the service config if needed
# inherit inverseRoles;
};
}
(lib.optionalAttrs (globalConfig != { } || machineServiceConfig != { } || roleServiceConfigs != [ ])
{
clan.${serviceName} = lib.mkMerge (
[
globalConfig
machineServiceConfig
]
++ roleServiceConfigs
);
}
)
]
else
acc2
) [ ] (serviceConfigs));
assertions = lib.mapAttrs' (name: value: {
name = "checkservice.${serviceName}.${name}";
value = {
assertion = checkService inventory.modules.${serviceName} serviceName;
message = ''
Service ${serviceName} cannot be used in inventory. It does not declare the 'inventory' feature.
To allow it add the following to the beginning of the README.md of the module:
---
...
features = [ "inventory" ]
---
Also make sure to test the module with the 'inventory' feature enabled.
'';
};
}) inventory.services;
};
in
lib.mapAttrs compileServiceModules inventory.services;
/*
Returns a set with NixOS configuration for every machine in the inventory.
@@ -276,57 +11,11 @@ let
{ inventory, directory }:
(lib.evalModules {
specialArgs = {
inherit directory inventory;
inherit clanLib;
};
modules = [
./internal.nix
(
{ ... }:
{
machines = builtins.mapAttrs (
machineName: machineConfig:
let
compiledServices = compileServicesForMachine {
inherit
machineName
inventory
directory
;
};
compiledMachine = compileMachine {
inherit
machineConfig
;
};
machineImports =
compiledMachine.machineImports
++ builtins.foldl' (
acc: service:
let
failedAssertions = (lib.filterAttrs (_: v: !v.assertion) service.assertions);
failedAssertionsImports =
if failedAssertions != { } then
[
{
clan.inventory.assertions = failedAssertions;
}
]
else
[ ];
in
acc
++ service.machineImports
# Import failed assertions
++ failedAssertionsImports
) [ ] (builtins.attrValues compiledServices);
in
{
inherit machineImports compiledServices compiledMachine;
}
) (inventory.machines or { });
}
)
./builder
{ inherit directory inventory; }
];
}).config;
in

View File

@@ -103,7 +103,9 @@ in
default = options;
};
modules = lib.mkOption {
type = types.attrsOf types.path;
# Don't define the type yet
# We manually transform the value with types.deferredModule.merge later to keep them serializable
type = types.attrsOf types.raw;
default = { };
defaultText = "clanModules of clan-core";
description = ''
@@ -275,7 +277,79 @@ in
)
);
};
instances = lib.mkOption {
# Keep as internal until all de-/serialization issues are resolved
visible = false;
internal = true;
description = "Multi host service module instances";
type = types.attrsOf (
types.submodule {
options = {
# ModuleSpec
module = lib.mkOption {
type = types.submodule {
options.input = lib.mkOption {
type = types.nullOr types.str;
default = null;
defaultText = "Name of the input. Default to 'null' which means the module is local";
description = ''
Name of the input. Default to 'null' which means the module is local
'';
};
options.name = lib.mkOption {
type = types.str;
};
};
};
roles = lib.mkOption {
default = { };
type = types.attrsOf (
types.submodule {
options = {
# TODO: deduplicate
machines = lib.mkOption {
type = types.attrsOf (
types.submodule {
options.settings = lib.mkOption {
default = { };
# Dont transform the value with `types.deferredModule` here. We need to keep it json serializable
# TODO: We need a custom serializer for deferredModule
type = types.deferredModule;
};
}
);
default = { };
};
tags = lib.mkOption {
type = types.attrsOf (
types.submodule {
options.settings = lib.mkOption {
default = { };
type = types.deferredModule;
};
}
);
default = { };
};
settings = lib.mkOption {
default = { };
type = types.deferredModule;
};
};
}
);
};
};
}
);
default = { };
apply =
v:
if v == { } then
v
else
lib.warn "Inventory.instances and related features are still under development. Please use with care." v;
};
services = lib.mkOption {
description = ''
Services of the inventory.

View File

@@ -1,24 +0,0 @@
{ lib, ... }:
let
inherit (lib) types mkOption;
submodule = m: types.submoduleWith { modules = [ m ]; };
in
{
options = {
machines = mkOption {
type = types.attrsOf (submodule {
options = {
compiledMachine = mkOption {
type = types.raw;
};
compiledServices = mkOption {
type = types.raw;
};
machineImports = mkOption {
type = types.raw;
};
};
});
};
};
}

View File

@@ -1,5 +1,5 @@
{ lib, clan-core }:
{ lib, clanLib }:
{
inherit (import ./build-inventory { inherit lib clan-core; }) buildInventory;
inherit (import ./build-inventory { inherit lib clanLib; }) buildInventory;
interface = ./build-inventory/interface.nix;
}

View File

@@ -50,10 +50,7 @@ in
self.filter {
include = [
"flakeModules"
"lib/default.nix"
"lib/flake-module.nix"
"lib/inventory"
"lib/frontmatter"
"lib"
"clanModules/flake-module.nix"
"clanModules/borgbackup"
];

View File

@@ -1,4 +0,0 @@
---
features = [ "inventory" ]
---
Description

Some files were not shown because too many files have changed in this diff Show More