Compare commits

...

92 Commits

Author SHA1 Message Date
8bef2e6b2e Drop macOS-specific remote-program param from nix copy command 2025-11-06 11:11:57 +08:00
clan-bot
8eaca289ad Merge pull request 'Update treefmt-nix' (#5745) from update-treefmt-nix into main 2025-11-05 20:08:44 +00:00
clan-bot
6f2d482187 Merge pull request 'Update treefmt-nix in devFlake' (#5756) from update-devFlake-treefmt-nix into main 2025-11-05 20:08:18 +00:00
clan-bot
4c30418f12 Update treefmt-nix in devFlake 2025-11-05 20:02:31 +00:00
clan-bot
3c66094d89 Update treefmt-nix 2025-11-05 20:02:02 +00:00
clan-bot
a8f180f8da Merge pull request 'Update treefmt-nix in devFlake' (#5753) from update-devFlake-treefmt-nix into main 2025-11-05 15:09:20 +00:00
clan-bot
e22218d589 Merge pull request 'Update nixpkgs-dev in devFlake' (#5752) from update-devFlake-nixpkgs-dev into main 2025-11-05 15:09:02 +00:00
clan-bot
228c60bcf7 Update treefmt-nix in devFlake 2025-11-05 15:02:30 +00:00
clan-bot
ed2b2d9df9 Update nixpkgs-dev in devFlake 2025-11-05 15:02:24 +00:00
Kenji Berthold
7e2a127d11 Merge pull request 'pkgs/clan-vm-manager: wrapGAppsHook -> wrapGAppsHook3' (#5748) from ke-wrap-gapps into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/5748
2025-11-05 12:27:32 +00:00
a-kenji
8c8bacb1ab pkgs/clan-vm-manager: wrapGAppsHook -> wrapGAppsHook3 2025-11-05 12:50:48 +01:00
clan-bot
8ba71144b6 Merge pull request 'Update nix-darwin' (#5744) from update-nix-darwin into main 2025-11-05 10:04:33 +00:00
clan-bot
7f2d15c8a1 Update nix-darwin 2025-11-05 10:01:31 +00:00
clan-bot
486463c793 Merge pull request 'Update treefmt-nix in devFlake' (#5746) from update-devFlake-treefmt-nix into main 2025-11-05 05:16:48 +00:00
clan-bot
071603d688 Update treefmt-nix in devFlake 2025-11-05 05:02:33 +00:00
clan-bot
c612561ec3 Merge pull request 'Update disko' (#5742) from update-disko into main 2025-11-05 00:10:58 +00:00
clan-bot
a88cd2be40 Update disko 2025-11-05 00:01:25 +00:00
clan-bot
7140b417d3 Merge pull request 'Update nixos-facter-modules' (#5738) from update-nixos-facter-modules into main 2025-11-04 20:10:12 +00:00
clan-bot
c7a42cca7f Update nixos-facter-modules 2025-11-04 20:01:33 +00:00
clan-bot
29ca23c629 Merge pull request 'Update nixpkgs-dev in devFlake' (#5740) from update-devFlake-nixpkgs-dev into main 2025-11-04 15:08:00 +00:00
clan-bot
cd7210de1b Update nixpkgs-dev in devFlake 2025-11-04 15:02:30 +00:00
Mic92
c2ebafcf92 Merge pull request 'zfsUnstable -> zfs_unstable' (#5737) from zfs-fix into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/5737
2025-11-04 14:46:19 +00:00
Jörg Thalheim
2a9e4e7860 zfsUnstable -> zfs_unstable
nixpkgs has a new path for this.
2025-11-04 15:41:50 +01:00
hsjobeki
43a7652624 Merge pull request 'App: init delete machine' (#5734) from jpy-scene into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/5734
2025-11-04 11:03:26 +00:00
Johannes Kirschbauer
65fd25bc2e App: init delete machine 2025-11-04 11:37:29 +01:00
Kenji Berthold
f89ea15749 Merge pull request 'pkgs/cli/vars: Add dependency validation' (#5727) from ke-vars-dependency-validation into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/5727
Reviewed-by: Mic92 <joerg@thalheim.io>
2025-11-04 09:55:55 +00:00
hsjobeki
19d4833be8 Merge pull request 'UI: clean up unused scene code' (#5730) from jpy-scene into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/5730
2025-11-04 08:39:04 +00:00
Johannes Kirschbauer
82f12eaf6f UI: clean up unused scene code 2025-11-04 09:34:17 +01:00
clan-bot
0b5a8e98de Merge pull request 'Update nix-darwin' (#5729) from update-nix-darwin into main 2025-11-04 05:05:29 +00:00
clan-bot
c5bddada05 Update nix-darwin 2025-11-04 05:01:02 +00:00
clan-bot
62b64c3b3e Merge pull request 'Update nixpkgs-dev in devFlake' (#5728) from update-devFlake-nixpkgs-dev into main 2025-11-03 15:07:53 +00:00
clan-bot
19a1ad6081 Update nixpkgs-dev in devFlake 2025-11-03 15:01:50 +00:00
Kenji Berthold
a2df5db3d6 Merge pull request 'docs/testing: Document requirements for our container testing system' (#5693) from ke-docs-testing-container into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/5693
2025-11-03 13:13:53 +00:00
Kenji Berthold
ac46f890ea Merge branch 'main' into ke-docs-testing-container 2025-11-03 13:06:14 +00:00
a-kenji
83f78d9f59 pkgs/cli/vars: Add dependency validation
Add explicit dependency validation to vars, so that proper error
messages can be surfaced to the user.

Instead of:
```
Traceback (most recent call last):
  File "/home/lhebendanz/Projects/clan-core/pkgs/clan-cli/clan_lib/async_run/__init__.py", line 154, in run
    self.result = AsyncResult(_result=self.function(*self.args, **self.kwargs))
                                      ~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/lhebendanz/Projects/clan-core/pkgs/clan-cli/clan_cli/machines/update.py", line 62, in run_update_wit
h_network
    run_machine_update(
    ~~~~~~~~~~~~~~~~~~^
        machine=machine,
        ^^^^^^^^^^^^^^^^
    ...<2 lines>...
        upload_inputs=upload_inputs,
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
    )
    ^
  File "/home/lhebendanz/Projects/clan-core/pkgs/clan-cli/clan_lib/machines/update.py", line 158, in run_machine_u
pdate
    run_generators([machine], generators=None, full_closure=False)
    ~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/lhebendanz/Projects/clan-core/pkgs/clan-cli/clan_lib/vars/generate.py", line 156, in run_generators
    all_generators = get_generators(machines, full_closure=True)
  File "/home/lhebendanz/Projects/clan-core/pkgs/clan-cli/clan_lib/vars/generate.py", line 50, in get_generators
    all_generators_list = Generator.get_machine_generators(
        all_machines,
        flake,
        include_previous_values=include_previous_values,
    )
  File "/home/lhebendanz/Projects/clan-core/pkgs/clan-cli/clan_cli/vars/generator.py", line 246, in get_machine_ge
nerators
    if generators_data[dep]["share"]
       ~~~~~~~~~~~~~~~^^^^^
KeyError: 'bla'
```

We now get:
```
$> Generator 'my_generator' on machine 'my_machine' depends on generator 'non_existing_generator', but 'non_existing_generator' does not exist
```

Closes: #5698
2025-11-03 14:00:38 +01:00
clan-bot
19abf8d288 Merge pull request 'Update nixpkgs-dev in devFlake' (#5726) from update-devFlake-nixpkgs-dev into main 2025-11-03 10:06:31 +00:00
clan-bot
e5105e31c4 Update nixpkgs-dev in devFlake 2025-11-03 10:01:47 +00:00
clan-bot
0f847b4799 Merge pull request 'Update nixpkgs-dev in devFlake' (#5724) from update-devFlake-nixpkgs-dev into main 2025-11-02 20:06:24 +00:00
clan-bot
40a8a823b8 Update nixpkgs-dev in devFlake 2025-11-02 20:01:50 +00:00
Mic92
e3adb3fc71 Merge pull request 'Fix vars upload for public vars with neededFor activation/partitioning' (#5723) from vars into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/5723
2025-11-02 16:00:13 +00:00
Jörg Thalheim
a569a1d147 Fix vars upload for public vars with neededFor activation/partitioning
When vars are marked with neededFor="activation" or "partitioning", they
need to be available early in the boot process. However, the populate_dir
methods in both sops and password_store secret backends were only calling
self.get() which only retrieves secret vars from the .../secret path.

This caused public vars (stored at .../value) to fail with "Secret does
not exist" errors when trying to upload them.

The fix uses file.value property instead, which properly delegates to the
correct store (SecretStore or FactStore) based on whether the file is
marked as secret or public.

Fixes affected all neededFor phases in both backends:
- sops: activation and partitioning phases
- password_store: activation and partitioning phases
2025-11-02 16:49:49 +01:00
Mic92
64718b77ca Merge pull request 'readme fix' (#5722) from i18n/clan-core:main into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/5722
2025-11-02 15:49:16 +00:00
i18n
7b34c39736 Merge pull request '更新 docs/site/getting-started/creating-your-first-clan.md' (#1) from i18n-patch-1 into main
Reviewed-on: https://git.clan.lol/i18n/clan-core/pulls/1
2025-11-02 13:24:05 +00:00
i18n
4d6ab60793 更新 docs/site/getting-started/creating-your-first-clan.md 2025-11-02 13:23:04 +00:00
clan-bot
35bffee544 Merge pull request 'Update nixpkgs-dev in devFlake' (#5721) from update-devFlake-nixpkgs-dev into main 2025-11-02 10:05:51 +00:00
clan-bot
16917fd79b Update nixpkgs-dev in devFlake 2025-11-02 10:01:50 +00:00
clan-bot
895c116c01 Merge pull request 'Update nix-darwin' (#5720) from update-nix-darwin into main 2025-11-02 05:06:00 +00:00
clan-bot
e67151f7b9 Merge pull request 'Update flake-parts' (#5719) from update-flake-parts into main 2025-11-02 05:05:06 +00:00
clan-bot
8d26ec1760 Update nix-darwin 2025-11-02 05:01:04 +00:00
clan-bot
7a9062b629 Update flake-parts 2025-11-02 05:01:01 +00:00
clan-bot
de07454a0a Merge pull request 'Update nix-darwin' (#5718) from update-nix-darwin into main 2025-11-01 20:06:16 +00:00
clan-bot
6fe60f61cf Update nix-darwin 2025-11-01 20:00:58 +00:00
clan-bot
3fa74847e4 Merge pull request 'Update nixpkgs-dev in devFlake' (#5716) from update-devFlake-nixpkgs-dev into main 2025-11-01 15:06:01 +00:00
clan-bot
fc37140b52 Update nixpkgs-dev in devFlake 2025-11-01 15:01:52 +00:00
hsjobeki
83406c61f3 Merge pull request 'services: update hello-world readme and tests' (#5714) from update-docs into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/5714
2025-11-01 11:37:39 +00:00
hsjobeki
6d736e7e80 Merge pull request 'docs: update experimental notes as planned in release-notes' (#5715) from ex-docs into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/5715
2025-11-01 11:35:29 +00:00
Johannes Kirschbauer
7b6cec4100 services: update hello-world readme and tests 2025-11-01 12:33:15 +01:00
Johannes Kirschbauer
e21a6516b5 docs: update experimental notes as planned in release-notes 2025-11-01 12:30:01 +01:00
Mic92
6ffe8ea5f6 Merge pull request 'treewide: replace pkgs.hostPlatform with pkgs.stdenv.hostPlatform' (#5713) from fix-eval into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/5713
2025-10-31 17:57:38 +00:00
Jörg Thalheim
0a2fefd141 treewide: replace pkgs.hostPlatform with pkgs.stdenv.hostPlatform
nixpkgs now throws an error for this, the other variant in stdenv also
exists in the previous release
2025-10-31 18:52:31 +01:00
Luis Hebendanz
0c885d05b6 Merge pull request 'clan_lib/flake: Improve select error message' (#5711) from Qubasa/clan-core:improve_clan_select_error_message into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/5711
2025-10-31 15:11:29 +00:00
Qubasa
58d85b117a clan_lib/flake: Improve select error message 2025-10-31 16:05:54 +01:00
clan-bot
ad58d7b6e9 Merge pull request 'Update nixpkgs-dev in devFlake' (#5710) from update-devFlake-nixpkgs-dev into main 2025-10-31 15:05:13 +00:00
clan-bot
7a63cb9642 Update nixpkgs-dev in devFlake 2025-10-31 15:01:50 +00:00
clan-bot
196b98da36 Merge pull request 'Update disko' (#5707) from update-disko into main 2025-10-31 10:10:34 +00:00
clan-bot
42acbe95b8 Update disko 2025-10-31 10:00:58 +00:00
clan-bot
b6b065e365 Merge pull request 'Update nixpkgs-dev in devFlake' (#5706) from update-devFlake-nixpkgs-dev into main 2025-10-31 00:08:41 +00:00
clan-bot
4b1955b189 Update nixpkgs-dev in devFlake 2025-10-31 00:02:00 +00:00
clan-bot
ef7ef8b843 Merge pull request 'Update nixpkgs-dev in devFlake' (#5704) from update-devFlake-nixpkgs-dev into main 2025-10-30 20:05:49 +00:00
clan-bot
38c1367322 Update nixpkgs-dev in devFlake 2025-10-30 20:01:49 +00:00
clan-bot
8e72c086fd Merge pull request 'Update nixpkgs-dev in devFlake' (#5702) from update-devFlake-nixpkgs-dev into main 2025-10-30 15:06:22 +00:00
clan-bot
c454b1339d Update nixpkgs-dev in devFlake 2025-10-30 15:01:51 +00:00
hsjobeki
d1b2d43e5b Merge pull request 'services: move into clan submodule' (#5701) from unify-clan into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/5701
2025-10-30 13:00:14 +00:00
Johannes Kirschbauer
da98ca0f1c clanLib: remove unused mapInstances 2025-10-30 13:54:05 +01:00
Johannes Kirschbauer
1953540d08 tests: update inventory tests to use whole clan modules 2025-10-30 13:54:05 +01:00
Johannes Kirschbauer
be31b9ce21 docs: remove service options from nuschtSearch
These hacks are blocking the flake level vars and exports
Maybe we bring this back later
So far nobody seemed using nuschtSearch
2025-10-30 13:54:05 +01:00
Johannes Kirschbauer
169b4016e6 docs: set self to clan-core for docs 2025-10-30 13:53:49 +01:00
Johannes Kirschbauer
2e55028a1b services: move into clan submodule 2025-10-30 13:53:49 +01:00
hsjobeki
1d228231f2 Merge pull request 'clan/services: Reduce surface of services wrapper function' (#5700) from unify-clan into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/5700
2025-10-30 09:49:56 +00:00
Johannes Kirschbauer
affb926450 services: remove duplicate module args 2025-10-30 10:10:55 +01:00
Johannes Kirschbauer
c7f65e929f inventoryAdapter: replace importedModulesEvaluated by equivalent config 2025-10-30 10:10:31 +01:00
hsjobeki
ba4ff493e8 Merge pull request 'revert: uniqueStrings' (#5699) from hsjobeki-patch-1 into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/5699
2025-10-30 08:34:59 +00:00
hsjobeki
eb08803e2a revert bfb30251e6
revert lib: replace uniqueStrings after upstreamed

TODO: Reapply after 25.11 release
2025-10-30 08:29:43 +00:00
clan-bot
bbc9486f0e Merge pull request 'Update nixpkgs-dev in devFlake' (#5697) from update-devFlake-nixpkgs-dev into main 2025-10-29 20:06:16 +00:00
clan-bot
999d709350 Update nixpkgs-dev in devFlake 2025-10-29 20:01:48 +00:00
clan-bot
0b1a330cc2 Merge pull request 'Update nixpkgs-dev in devFlake' (#5696) from update-devFlake-nixpkgs-dev into main 2025-10-29 15:06:14 +00:00
clan-bot
995b7cf50d Update nixpkgs-dev in devFlake 2025-10-29 15:01:49 +00:00
a-kenji
bc290fe59f docs/testing: Document requirements for our container testing system
Document the requirements for our container testing system:
- uid-range
- auto-allocate-uids

Further document that the container tests are used by default and how to
switch to the more traditional and more supported / featureful VM
testing framework.
2025-10-29 13:47:26 +01:00
clan-bot
5477b13233 Merge pull request 'Update nuschtos in devFlake' (#5690) from update-devFlake-nuschtos into main 2025-10-29 10:08:23 +00:00
clan-bot
d6170e5efb Update nuschtos in devFlake 2025-10-29 10:01:53 +00:00
clan-bot
18fe117363 Merge pull request 'Update nixpkgs-dev in devFlake' (#5689) from update-devFlake-nixpkgs-dev into main 2025-10-29 00:07:47 +00:00
clan-bot
33a868acc2 Update nixpkgs-dev in devFlake 2025-10-29 00:03:27 +00:00
56 changed files with 828 additions and 789 deletions

View File

@@ -58,51 +58,53 @@
pkgs.buildPackages.xorg.lndir
pkgs.glibcLocales
pkgs.kbd.out
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".pkgs.perlPackages.ConfigIniFiles
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".pkgs.perlPackages.FileSlurp
self.nixosConfigurations."test-flash-machine-${pkgs.stdenv.hostPlatform.system}".pkgs.perlPackages.ConfigIniFiles
self.nixosConfigurations."test-flash-machine-${pkgs.stdenv.hostPlatform.system}".pkgs.perlPackages.FileSlurp
pkgs.bubblewrap
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.toplevel
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.diskoScript
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.diskoScript.drvPath
self.nixosConfigurations."test-flash-machine-${pkgs.stdenv.hostPlatform.system}".config.system.build.toplevel
self.nixosConfigurations."test-flash-machine-${pkgs.stdenv.hostPlatform.system}".config.system.build.diskoScript
self.nixosConfigurations."test-flash-machine-${pkgs.stdenv.hostPlatform.system}".config.system.build.diskoScript.drvPath
]
++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
in
{
# Skip flash test on aarch64-linux for now as it's too slow
checks = lib.optionalAttrs (pkgs.stdenv.isLinux && pkgs.hostPlatform.system != "aarch64-linux") {
nixos-test-flash = self.clanLib.test.baseTest {
name = "flash";
nodes.target = {
virtualisation.emptyDiskImages = [ 4096 ];
virtualisation.memorySize = 4096;
checks =
lib.optionalAttrs (pkgs.stdenv.isLinux && pkgs.stdenv.hostPlatform.system != "aarch64-linux")
{
nixos-test-flash = self.clanLib.test.baseTest {
name = "flash";
nodes.target = {
virtualisation.emptyDiskImages = [ 4096 ];
virtualisation.memorySize = 4096;
virtualisation.useNixStoreImage = true;
virtualisation.writableStore = true;
virtualisation.useNixStoreImage = true;
virtualisation.writableStore = true;
environment.systemPackages = [ self.packages.${pkgs.system}.clan-cli ];
environment.etc."install-closure".source = "${closureInfo}/store-paths";
environment.systemPackages = [ self.packages.${pkgs.system}.clan-cli ];
environment.etc."install-closure".source = "${closureInfo}/store-paths";
nix.settings = {
substituters = lib.mkForce [ ];
hashed-mirrors = null;
connect-timeout = lib.mkForce 3;
flake-registry = "";
experimental-features = [
"nix-command"
"flakes"
];
};
nix.settings = {
substituters = lib.mkForce [ ];
hashed-mirrors = null;
connect-timeout = lib.mkForce 3;
flake-registry = "";
experimental-features = [
"nix-command"
"flakes"
];
};
};
testScript = ''
start_all()
machine.succeed("echo 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIRWUusawhlIorx7VFeQJHmMkhl9X3QpnvOdhnV/bQNG root@target' > ./test_id_ed25519.pub")
# Some distros like to automount disks with spaces
machine.succeed('mkdir -p "/mnt/with spaces" && mkfs.ext4 /dev/vdc && mount /dev/vdc "/mnt/with spaces"')
machine.succeed("clan flash write --ssh-pubkey ./test_id_ed25519.pub --keymap de --language de_DE.UTF-8 --debug --flake ${self.checks.x86_64-linux.clan-core-for-checks} --yes --disk main /dev/vdc test-flash-machine-${pkgs.stdenv.hostPlatform.system}")
'';
} { inherit pkgs self; };
};
testScript = ''
start_all()
machine.succeed("echo 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIRWUusawhlIorx7VFeQJHmMkhl9X3QpnvOdhnV/bQNG root@target' > ./test_id_ed25519.pub")
# Some distros like to automount disks with spaces
machine.succeed('mkdir -p "/mnt/with spaces" && mkfs.ext4 /dev/vdc && mount /dev/vdc "/mnt/with spaces"')
machine.succeed("clan flash write --ssh-pubkey ./test_id_ed25519.pub --keymap de --language de_DE.UTF-8 --debug --flake ${self.checks.x86_64-linux.clan-core-for-checks} --yes --disk main /dev/vdc test-flash-machine-${pkgs.hostPlatform.system}")
'';
} { inherit pkgs self; };
};
};
}

View File

@@ -160,9 +160,9 @@
closureInfo = pkgs.closureInfo {
rootPaths = [
privateInputs.clan-core-for-checks
self.nixosConfigurations."test-install-machine-${pkgs.hostPlatform.system}".config.system.build.toplevel
self.nixosConfigurations."test-install-machine-${pkgs.hostPlatform.system}".config.system.build.initialRamdisk
self.nixosConfigurations."test-install-machine-${pkgs.hostPlatform.system}".config.system.build.diskoScript
self.nixosConfigurations."test-install-machine-${pkgs.stdenv.hostPlatform.system}".config.system.build.toplevel
self.nixosConfigurations."test-install-machine-${pkgs.stdenv.hostPlatform.system}".config.system.build.initialRamdisk
self.nixosConfigurations."test-install-machine-${pkgs.stdenv.hostPlatform.system}".config.system.build.diskoScript
pkgs.stdenv.drvPath
pkgs.bash.drvPath
pkgs.buildPackages.xorg.lndir
@@ -215,7 +215,7 @@
# Prepare test flake and Nix store
flake_dir = prepare_test_flake(
temp_dir,
"${self.checks.${pkgs.hostPlatform.system}.clan-core-for-checks}",
"${self.checks.${pkgs.stdenv.hostPlatform.system}.clan-core-for-checks}",
"${closureInfo}"
)
@@ -296,7 +296,7 @@
# Prepare test flake and Nix store
flake_dir = prepare_test_flake(
temp_dir,
"${self.checks.${pkgs.hostPlatform.system}.clan-core-for-checks}",
"${self.checks.${pkgs.stdenv.hostPlatform.system}.clan-core-for-checks}",
"${closureInfo}"
)

View File

@@ -2,7 +2,7 @@
let
cli = self.packages.${pkgs.hostPlatform.system}.clan-cli-full;
cli = self.packages.${pkgs.stdenv.hostPlatform.system}.clan-cli-full;
ollama-model = pkgs.callPackage ./qwen3-4b-instruct.nix { };
in
@@ -53,7 +53,7 @@ in
pytest
pytest-xdist
(cli.pythonRuntime.pkgs.toPythonModule cli)
self.legacyPackages.${pkgs.hostPlatform.system}.nixosTestLib
self.legacyPackages.${pkgs.stdenv.hostPlatform.system}.nixosTestLib
]
))
];

View File

@@ -2,7 +2,7 @@
let
cli = self.packages.${pkgs.hostPlatform.system}.clan-cli-full;
cli = self.packages.${pkgs.stdenv.hostPlatform.system}.clan-cli-full;
in
{
name = "systemd-abstraction";

View File

@@ -115,9 +115,9 @@
let
closureInfo = pkgs.closureInfo {
rootPaths = [
self.packages.${pkgs.hostPlatform.system}.clan-cli
self.checks.${pkgs.hostPlatform.system}.clan-core-for-checks
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-update-machine.config.system.build.toplevel
self.packages.${pkgs.stdenv.hostPlatform.system}.clan-cli
self.checks.${pkgs.stdenv.hostPlatform.system}.clan-core-for-checks
self.clanInternals.machines.${pkgs.stdenv.hostPlatform.system}.test-update-machine.config.system.build.toplevel
pkgs.stdenv.drvPath
pkgs.bash.drvPath
pkgs.buildPackages.xorg.lndir
@@ -132,7 +132,7 @@
imports = [ self.nixosModules.test-update-machine ];
};
extraPythonPackages = _p: [
self.legacyPackages.${pkgs.hostPlatform.system}.nixosTestLib
self.legacyPackages.${pkgs.stdenv.hostPlatform.system}.nixosTestLib
];
testScript = ''
@@ -154,7 +154,7 @@
# Prepare test flake and Nix store
flake_dir = prepare_test_flake(
temp_dir,
"${self.checks.${pkgs.hostPlatform.system}.clan-core-for-checks}",
"${self.checks.${pkgs.stdenv.hostPlatform.system}.clan-core-for-checks}",
"${closureInfo}"
)
(flake_dir / ".clan-flake").write_text("") # Ensure .clan-flake exists
@@ -226,7 +226,7 @@
"--to",
"ssh://root@192.168.1.1",
"--no-check-sigs",
f"${self.packages.${pkgs.hostPlatform.system}.clan-cli}",
f"${self.packages.${pkgs.stdenv.hostPlatform.system}.clan-cli}",
"--extra-experimental-features", "nix-command flakes",
],
check=True,
@@ -242,7 +242,7 @@
"-o", "UserKnownHostsFile=/dev/null",
"-o", "StrictHostKeyChecking=no",
f"root@192.168.1.1",
"${self.packages.${pkgs.hostPlatform.system}.clan-cli}/bin/clan",
"${self.packages.${pkgs.stdenv.hostPlatform.system}.clan-cli}/bin/clan",
"machines",
"update",
"--debug",
@@ -270,7 +270,7 @@
# Run clan update command
subprocess.run([
"${self.packages.${pkgs.hostPlatform.system}.clan-cli-full}/bin/clan",
"${self.packages.${pkgs.stdenv.hostPlatform.system}.clan-cli-full}/bin/clan",
"machines",
"update",
"--debug",
@@ -297,7 +297,7 @@
# Run clan update command with --build-host
subprocess.run([
"${self.packages.${pkgs.hostPlatform.system}.clan-cli-full}/bin/clan",
"${self.packages.${pkgs.stdenv.hostPlatform.system}.clan-cli-full}/bin/clan",
"machines",
"update",
"--debug",

View File

@@ -1,3 +1,6 @@
!!! Danger "Experimental"
This service is experimental and will change in the future.
This service sets up a certificate authority (CA) that can issue certificates to
other machines in your clan. For this the `ca` role is used.
It additionally provides a `default` role, that can be applied to all machines

View File

@@ -1,3 +1,6 @@
!!! Danger "Experimental"
This service is experimental and will change in the future.
This module enables hosting clan-internal services easily, which can be resolved
inside your VPN. This allows defining a custom top-level domain (e.g. `.clan`)
and exposing endpoints from a machine to others, which will be

View File

@@ -1 +1,83 @@
This a test README just to appease the eval warnings if we don't have one
!!! Danger "Experimental"
This service is for demonstration purpose only and may change in the future.
The Hello-World Clan Service is a minimal example showing how to build and register your own service.
It serves as a reference implementation and is used in clan-core CI tests to ensure compatibility.
## What it demonstrates
- How to define a basic Clan-compatible service.
- How to structure your service for discovery and configuration.
- How Clan services interact with nixos.
## Testing
This service demonstrates two levels of testing to ensure quality and stability across releases:
1. **Unit & Integration Testing** — via [`nix-unit`](https://github.com/nix-community/nix-unit)
2. **End-to-End Testing** — via **NixOS VM tests**, which we extended to support **container virtualization** for better performance.
We highly advocate following the [Practical Testing Pyramid](https://martinfowler.com/articles/practical-test-pyramid.html):
* Write **unit tests** for core logic and invariants.
* Add **one or two end-to-end (E2E)** tests to confirm your service starts and behaves correctly in a real NixOS environment.
NixOS is **untyped** and frequently changes; tests are the safest way to ensure long-term stability of services.
```
/ \
/ \
/ E2E \
/-------\
/ \
/Integration\
/-------------\
/ \
/ Unit Tests \
-------------------
```
### nix-unit
We highly advocate the usage of
[nix-unit](https://github.com/nix-community/nix-unit)
Example in: tests/eval-tests.nix
If you use flake-parts you can use the [native integration](https://flake.parts/options/nix-unit.html)
If nix-unit succeeds you'r nixos evaluation should be mostly correct.
!!! Tip
- Ensure most used 'settings' and variants are tested.
- Think about some important edge-cases your system should handle.
### NixOS VM / Container Test
!!! Warning "Early Vars & clanTest"
The testing system around vars is experimental
`clanTest` is still experimental and enables container virtualization by default.
This is still early and might have some limitations.
Some minimal boilerplate is needed to use `clanTest`
```nix
nixosLib = import (inputs.nixpkgs + "/nixos/lib") { }
nixosLib.runTest (
{ ... }:
{
imports = [
self.modules.nixosTest.clanTest
# Example in tests/vm/default.nix
testModule
];
hostPkgs = pkgs;
# Uncomment if you don't want or cannot use containers
# test.useContainers = false;
}
)
```

View File

@@ -8,7 +8,7 @@
{
_class = "clan.service";
manifest.name = "clan-core/hello-word";
manifest.description = "This is a test";
manifest.description = "Minimal example clan service that greets the world";
manifest.readme = builtins.readFile ./README.md;
# This service provides two roles: "morning" and "evening". Roles can be

View File

@@ -26,7 +26,7 @@ in
# The hello-world service being tested
../../clanServices/hello-world
# Required modules
../../nixosModules/clanCore
../../nixosModules
];
testName = "hello-world";
tests = ./tests/eval-tests.nix;

View File

@@ -4,7 +4,7 @@
...
}:
let
testFlake = clanLib.clan {
testClan = clanLib.clan {
self = { };
# Point to the folder of the module
# TODO: make this optional
@@ -33,10 +33,20 @@ let
};
in
{
test_simple = {
config = testFlake.config;
/**
We highly advocate the usage of:
https://github.com/nix-community/nix-unit
expr = { };
expected = { };
If you use flake-parts you can use the native integration: https://flake.parts/options/nix-unit.html
*/
test_simple = {
# Allows inspection via the nix-repl
# Ignored by nix-unit; it only looks at 'expr' and 'expected'
inherit testClan;
# Assert that jon has the
# configured greeting in 'environment.etc.hello.text'
expr = testClan.config.nixosConfigurations.jon.config.environment.etc."hello".text;
expected = "Good evening World!";
};
}

View File

@@ -1,8 +1,5 @@
🚧🚧🚧 Experimental 🚧🚧🚧
Use at your own risk.
We are still refining its interfaces, instability and breakages are expected.
!!! Danger "Experimental"
This service is experimental and will change in the future.
---

View File

@@ -1,3 +1,6 @@
!!! Danger "Experimental"
This service is experimental and will change in the future.
## Usage
```

View File

@@ -39,6 +39,7 @@
...
}:
let
uniqueStrings = list: builtins.attrNames (builtins.groupBy lib.id list);
# Collect searchDomains from all servers in this instance
allServerSearchDomains = lib.flatten (
lib.mapAttrsToList (_name: machineConfig: machineConfig.settings.certificate.searchDomains or [ ]) (
@@ -46,7 +47,7 @@
)
);
# Merge client's searchDomains with all servers' searchDomains
searchDomains = lib.uniqueStrings (settings.certificate.searchDomains ++ allServerSearchDomains);
searchDomains = uniqueStrings (settings.certificate.searchDomains ++ allServerSearchDomains);
in
{
clan.core.vars.generators.openssh-ca = lib.mkIf (searchDomains != [ ]) {

View File

@@ -1,8 +1,5 @@
🚧🚧🚧 Experimental 🚧🚧🚧
Use at your own risk.
We are still refining its interfaces, instability and breakages are expected.
!!! Danger "Experimental"
This service is experimental and will change in the future.
---

View File

@@ -41,14 +41,14 @@ let
# In this case it is 'self-zerotier-redux'
# This is usually only used internally, but we can use it to test the evaluation of service module in isolation
# evaluatedService =
# testFlake.clanInternals.inventoryClass.distributedServices.importedModulesEvaluated.self-zerotier-redux.config;
# testFlake.clanInternals.inventoryClass.distributedServices.servicesEval.config.mappedServices.self-zerotier-redux.config;
in
{
test_simple = {
inherit testFlake;
expr =
testFlake.config.clan.clanInternals.inventoryClass.distributedServices.importedModulesEvaluated.self-wifi.config;
testFlake.config.clan.clanInternals.inventoryClass.distributedServices.servicesEval.config.mappedServices.self-wifi.config;
expected = 1;
# expr = {

View File

@@ -1,8 +1,5 @@
🚧🚧🚧 Experimental 🚧🚧🚧
Use at your own risk.
We are still refining its interfaces, instability and breakages are expected.
!!! Danger "Experimental"
This service is experimental and will change in the future.
---

View File

@@ -140,6 +140,9 @@
pkgs,
...
}:
let
uniqueStrings = list: builtins.attrNames (builtins.groupBy lib.id list);
in
{
imports = [
(import ./shared.nix {
@@ -156,7 +159,7 @@
config = {
systemd.services.zerotier-inventory-autoaccept =
let
machines = lib.uniqueStrings (
machines = uniqueStrings (
(lib.optionals (roles ? moon) (lib.attrNames roles.moon.machines))
++ (lib.optionals (roles ? controller) (lib.attrNames roles.controller.machines))
++ (lib.optionals (roles ? peer) (lib.attrNames roles.peer.machines))

18
devFlake/flake.lock generated
View File

@@ -105,11 +105,11 @@
},
"nixpkgs-dev": {
"locked": {
"lastModified": 1761631514,
"narHash": "sha256-VsXz+2W4DFBozzppbF9SXD9pNcv17Z+c/lYXvPJi/eI=",
"lastModified": 1762328495,
"narHash": "sha256-IUZvw5kvLiExApP9+SK/styzEKSqfe0NPclu9/z85OQ=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "a0b0d4b52b5f375658ca8371dc49bff171dbda91",
"rev": "4c621660e393922cf68cdbfc40eb5a2d54d3989a",
"type": "github"
},
"original": {
@@ -128,11 +128,11 @@
]
},
"locked": {
"lastModified": 1760652422,
"narHash": "sha256-C88Pgz38QIl9JxQceexqL2G7sw9vodHWx1Uaq+NRJrw=",
"lastModified": 1761730856,
"narHash": "sha256-t1i5p/vSWwueZSC0Z2BImxx3BjoUDNKyC2mk24krcMY=",
"owner": "NuschtOS",
"repo": "search",
"rev": "3ebeebe8b6a49dfb11f771f761e0310f7c48d726",
"rev": "e29de6db0cb3182e9aee75a3b1fd1919d995d85b",
"type": "github"
},
"original": {
@@ -208,11 +208,11 @@
"nixpkgs": []
},
"locked": {
"lastModified": 1761311587,
"narHash": "sha256-Msq86cR5SjozQGCnC6H8C+0cD4rnx91BPltZ9KK613Y=",
"lastModified": 1762366246,
"narHash": "sha256-3xc/f/ZNb5ma9Fc9knIzEwygXotA+0BZFQ5V5XovSOQ=",
"owner": "numtide",
"repo": "treefmt-nix",
"rev": "2eddae033e4e74bf581c2d1dfa101f9033dbd2dc",
"rev": "a82c779ca992190109e431d7d680860e6723e048",
"type": "github"
},
"original": {

View File

@@ -51,7 +51,7 @@ Make sure you have the following:
**Note:** This creates a new directory in your current location
```shellSession
nix run https://git.clan.lol/clan/clan-core/archive/main.tar.gz#clan-cli --refresh -- flakes create
nix run "https://git.clan.lol/clan/clan-core/archive/main.tar.gz#clan-cli" --refresh -- flakes create
```
3. Enter a **name** in the prompt:

View File

@@ -150,10 +150,61 @@ Those are very similar to NixOS VM tests, as in they run virtualized nixos machi
As of now the container test driver is a downstream development in clan-core.
Basically everything stated under the NixOS VM tests sections applies here, except some limitations.
Limitations:
### Using Container Tests vs VM Tests
- Cannot run in interactive mode, however while the container test runs, it logs a nsenter command that can be used to log into each of the container.
- setuid binaries don't work
Container tests are **enabled by default** for all tests using the clan testing framework.
They offer significant performance advantages over VM tests:
- **Faster startup**
- **Lower resource usage**: No full kernel boot or hardware emulation overhead
To control whether a test uses containers or VMs, use the `clan.test.useContainers` option:
```nix
{
clan = {
directory = ./.;
test.useContainers = true; # Use containers (default)
# test.useContainers = false; # Use VMs instead
};
}
```
**When to use VM tests instead of container tests:**
- Testing kernel features, modules, or boot processes
- Testing hardware-specific features
- When you need full system isolation
### System Requirements for Container Tests
Container tests require the **`uid-range`** system feature** in the Nix sandbox.
This feature allows Nix to allocate a range of UIDs for containers to use, enabling `systemd-nspawn` containers to run properly inside the Nix build sandbox.
**Configuration:**
The `uid-range` feature requires the `auto-allocate-uids` setting to be enabled in your Nix configuration.
To verify or enable it, add to your `/etc/nix/nix.conf` or NixOS configuration:
```nix
settings.experimental-features = [
"auto-allocate-uids"
];
nix.settings.auto-allocate-uids = true;
nix.settings.system-features = [ "uid-range" ];
```
**Technical details:**
- Container tests set `requiredSystemFeatures = [ "uid-range" ];` in their derivation (see `lib/test/container-test-driver/driver-module.nix:98`)
- Without this feature, containers cannot properly manage user namespaces and will fail to start
### Limitations
- Cannot run in interactive mode, however while the container test runs, it logs a nsenter command that can be used to log into each of the containers.
- Early implementation and limited by features.
### Where to find examples for NixOS container tests

36
flake.lock generated
View File

@@ -31,11 +31,11 @@
]
},
"locked": {
"lastModified": 1760701190,
"narHash": "sha256-y7UhnWlER8r776JsySqsbTUh2Txf7K30smfHlqdaIQw=",
"lastModified": 1762276996,
"narHash": "sha256-TtcPgPmp2f0FAnc+DMEw4ardEgv1SGNR3/WFGH0N19M=",
"owner": "nix-community",
"repo": "disko",
"rev": "3a9450b26e69dcb6f8de6e2b07b3fc1c288d85f5",
"rev": "af087d076d3860760b3323f6b583f4d828c1ac17",
"type": "github"
},
"original": {
@@ -51,11 +51,11 @@
]
},
"locked": {
"lastModified": 1760948891,
"narHash": "sha256-TmWcdiUUaWk8J4lpjzu4gCGxWY6/Ok7mOK4fIFfBuU4=",
"lastModified": 1762040540,
"narHash": "sha256-z5PlZ47j50VNF3R+IMS9LmzI5fYRGY/Z5O5tol1c9I4=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "864599284fc7c0ba6357ed89ed5e2cd5040f0c04",
"rev": "0010412d62a25d959151790968765a70c436598b",
"type": "github"
},
"original": {
@@ -71,11 +71,11 @@
]
},
"locked": {
"lastModified": 1761339987,
"narHash": "sha256-IUaawVwItZKi64IA6kF6wQCLCzpXbk2R46dHn8sHkig=",
"lastModified": 1762304480,
"narHash": "sha256-ikVIPB/ea/BAODk6aksgkup9k2jQdrwr4+ZRXtBgmSs=",
"owner": "nix-darwin",
"repo": "nix-darwin",
"rev": "7cd9aac79ee2924a85c211d21fafd394b06a38de",
"rev": "b8c7ac030211f18bd1f41eae0b815571853db7a2",
"type": "github"
},
"original": {
@@ -99,11 +99,11 @@
},
"nixos-facter-modules": {
"locked": {
"lastModified": 1761137276,
"narHash": "sha256-4lDjGnWRBLwqKQ4UWSUq6Mvxu9r8DSqCCydodW/Jsi8=",
"lastModified": 1762264948,
"narHash": "sha256-iaRf6n0KPl9hndnIft3blm1YTAyxSREV1oX0MFZ6Tk4=",
"owner": "nix-community",
"repo": "nixos-facter-modules",
"rev": "70bcd64225d167c7af9b475c4df7b5abba5c7de8",
"rev": "fa695bff9ec37fd5bbd7ee3181dbeb5f97f53c96",
"type": "github"
},
"original": {
@@ -115,10 +115,10 @@
"nixpkgs": {
"locked": {
"lastModified": 315532800,
"narHash": "sha256-yDxtm0PESdgNetiJN5+MFxgubBcLDTiuSjjrJiyvsvM=",
"rev": "d7f52a7a640bc54c7bb414cca603835bf8dd4b10",
"narHash": "sha256-LDT9wuUZtjPfmviCcVWif5+7j4kBI2mWaZwjNNeg4eg=",
"rev": "a7fc11be66bdfb5cdde611ee5ce381c183da8386",
"type": "tarball",
"url": "https://releases.nixos.org/nixpkgs/nixpkgs-25.11pre871443.d7f52a7a640b/nixexprs.tar.xz"
"url": "https://releases.nixos.org/nixpkgs/nixpkgs-25.11pre887438.a7fc11be66bd/nixexprs.tar.xz"
},
"original": {
"type": "tarball",
@@ -181,11 +181,11 @@
]
},
"locked": {
"lastModified": 1761311587,
"narHash": "sha256-Msq86cR5SjozQGCnC6H8C+0cD4rnx91BPltZ9KK613Y=",
"lastModified": 1762366246,
"narHash": "sha256-3xc/f/ZNb5ma9Fc9knIzEwygXotA+0BZFQ5V5XovSOQ=",
"owner": "numtide",
"repo": "treefmt-nix",
"rev": "2eddae033e4e74bf581c2d1dfa101f9033dbd2dc",
"rev": "a82c779ca992190109e431d7d680860e6723e048",
"type": "github"
},
"original": {

View File

@@ -2,11 +2,7 @@
lib,
clanLib,
}:
let
services = clanLib.callLib ./distributed-service/inventory-adapter.nix { };
in
{
inherit (services) mapInstances;
inventoryModule = {
_file = "clanLib.inventory.module";
imports = [

View File

@@ -28,19 +28,15 @@ in
elemType = submoduleWith {
class = "clan.service";
specialArgs = {
exports = config.exports;
directory = directory;
clanLib = specialArgs.clanLib;
exports = config.exports;
};
modules = [
(
{ name, ... }:
{
_module.args._ctx = [ name ];
_module.args.clanLib = specialArgs.clanLib;
_module.args.exports = config.exports;
_module.args.directory = directory;
}
)
./service-module.nix

View File

@@ -1,171 +0,0 @@
# Adapter function between the inventory.instances and the clan.service module
#
# Data flow:
# - inventory.instances -> Adapter -> clan.service module -> Service Resources (i.e. NixosModules per Machine, Vars per Service, etc.)
#
# What this file does:
#
# - Resolves the [Module] to an actual module-path and imports it.
# - Groups together all the same modules into a single import and creates all instances for it.
# - Resolves the inventory tags into machines. Tags don't exist at the service level.
# Also combines the settings for 'machines' and 'tags'.
{
lib,
clanLib,
...
}:
{
mapInstances =
{
# This is used to resolve the module imports from 'flake.inputs'
flakeInputs,
# The clan inventory
inventory,
directory,
clanCoreModules,
prefix ? [ ],
exportsModule,
}:
let
# machineHasTag = machineName: tagName: lib.elem tagName inventory.machines.${machineName}.tags;
# map the instances into the module
importedModuleWithInstances = lib.mapAttrs (
instanceName: instance:
let
resolvedModule = clanLib.resolveModule {
moduleSpec = instance.module;
inherit flakeInputs clanCoreModules;
};
# Every instance includes machines via roles
# :: { client :: ... }
instanceRoles = lib.mapAttrs (
roleName: role:
let
resolvedMachines = clanLib.inventory.resolveTags {
members = {
# Explicit members
machines = lib.attrNames role.machines;
# Resolved Members
tags = lib.attrNames role.tags;
};
inherit (inventory) machines;
inherit instanceName roleName;
};
in
# instances.<instanceName>.roles.<roleName> =
# Remove "tags", they are resolved into "machines"
(removeAttrs role [ "tags" ])
// {
machines = lib.genAttrs resolvedMachines.machines (
machineName:
let
machineSettings = instance.roles.${roleName}.machines.${machineName}.settings or { };
in
# TODO: tag settings
# Wait for this feature until option introspection for 'settings' is done.
# This might get too complex to handle otherwise.
# settingsViaTags = lib.filterAttrs (
# tagName: _: machineHasTag machineName tagName
# ) instance.roles.${roleName}.tags;
{
# TODO: Do we want to wrap settings with
# setDefaultModuleLocation "inventory.instances.${instanceName}.roles.${roleName}.tags.${tagName}";
settings = {
imports = [
machineSettings
]; # ++ lib.attrValues (lib.mapAttrs (_tagName: v: v.settings) settingsViaTags);
};
}
);
}
) instance.roles;
in
{
inherit (instance) module;
inherit resolvedModule instanceRoles;
}
) inventory.instances or { };
# Group the instances by the module they resolve to
# This is necessary to evaluate the module in a single pass
# :: { <module.input>_<module.name> :: [ { name, value } ] }
# Since 'perMachine' needs access to all the instances we should include them as a whole
grouped = lib.foldlAttrs (
acc: instanceName: instance:
let
inputName = if instance.module.input == null then "<clan-core>" else instance.module.input;
id = inputName + "-" + instance.module.name;
in
acc
// {
${id} = acc.${id} or [ ] ++ [
{
inherit instanceName instance;
}
];
}
) { } importedModuleWithInstances;
# servicesEval.config.mappedServices.self-A.result.final.jon.nixosModule
allMachines = lib.mapAttrs (machineName: _: {
# This is the list of nixosModules for each machine
machineImports = lib.foldlAttrs (
acc: _module_ident: serviceModule:
acc ++ [ serviceModule.result.final.${machineName}.nixosModule or { } ]
) [ ] servicesEval.config.mappedServices;
}) inventory.machines or { };
evalServices =
{ modules, prefix }:
lib.evalModules {
class = "clan";
specialArgs = {
inherit clanLib;
_ctx = prefix;
};
modules = [
(import ./all-services-wrapper.nix { inherit directory; })
]
++ modules;
};
servicesEval = evalServices {
inherit prefix;
modules = [
{
inherit exportsModule;
mappedServices = lib.mapAttrs (_module_ident: instances: {
imports = [
# Import the resolved module.
# i.e. clan.modules.admin
{
options.module = lib.mkOption {
type = lib.types.raw;
default = (builtins.head instances).instance.module;
};
}
(builtins.head instances).instance.resolvedModule
] # Include all the instances that correlate to the resolved module
++ (builtins.map (v: {
instances.${v.instanceName}.roles = v.instance.instanceRoles;
}) instances);
}) grouped;
}
];
};
importedModulesEvaluated = servicesEval.config.mappedServices;
in
{
inherit
servicesEval
importedModuleWithInstances
# Exposed for testing
grouped
allMachines
importedModulesEvaluated
;
};
}

View File

@@ -7,10 +7,14 @@
...
}:
let
inherit (lib) mkOption types uniqueStrings;
inherit (lib) mkOption types;
inherit (types) attrsWith submoduleWith;
errorContext = "Error context: ${lib.concatStringsSep "." _ctx}";
# TODO:
# Remove once this gets merged upstream; performs in O(n*log(n) instead of O(n^2))
# https://github.com/NixOS/nixpkgs/pull/355616/files
uniqueStrings = list: builtins.attrNames (builtins.groupBy lib.id list);
/**
Merges the role- and machine-settings using the role interface

View File

@@ -4,63 +4,53 @@
...
}:
let
inherit (lib)
evalModules
;
evalInventory =
m:
(evalModules {
# Static modules
modules = [
clanLib.inventory.inventoryModule
{
_file = "test file";
tags.all = [ ];
tags.nixos = [ ];
tags.darwin = [ ];
}
{
modules.test = { };
}
m
];
}).config;
callInventoryAdapter =
inventoryModule:
let
inventory = evalInventory inventoryModule;
flakeInputsFixture = {
self.clan.modules = inventoryModule.modules or { };
# Example upstream module
upstream.clan.modules = {
uzzi = {
_class = "clan.service";
manifest = {
name = "uzzi-from-upstream";
};
};
flakeInputsFixture = {
upstream.clan.modules = {
uzzi = {
_class = "clan.service";
manifest = {
name = "uzzi-from-upstream";
};
};
in
clanLib.inventory.mapInstances {
directory = ./.;
clanCoreModules = { };
flakeInputs = flakeInputsFixture;
inherit inventory;
exportsModule = { };
};
};
createTestClan =
testClan:
let
res = clanLib.clan ({
# Static / mocked
specialArgs = {
clan-core = {
clan.modules = { };
};
};
self.inputs = flakeInputsFixture // {
self.clan = res.config;
};
directory = ./.;
exportsModule = { };
imports = [
testClan
];
});
in
res;
in
{
extraModules = import ./extraModules.nix { inherit clanLib; };
exports = import ./exports.nix { inherit lib clanLib; };
settings = import ./settings.nix { inherit lib callInventoryAdapter; };
specialArgs = import ./specialArgs.nix { inherit lib callInventoryAdapter; };
resolve_module_spec = import ./import_module_spec.nix { inherit lib callInventoryAdapter; };
settings = import ./settings.nix { inherit lib createTestClan; };
specialArgs = import ./specialArgs.nix { inherit lib createTestClan; };
resolve_module_spec = import ./import_module_spec.nix {
inherit lib createTestClan;
};
test_simple =
let
res = callInventoryAdapter {
res = createTestClan {
# Authored module
# A minimal module looks like this
# It isn't exactly doing anything but it's a valid module that produces an output
@@ -71,7 +61,7 @@ in
};
};
# User config
instances."instance_foo" = {
inventory.instances."instance_foo" = {
module = {
name = "simple-module";
};
@@ -81,7 +71,7 @@ in
{
# Test that the module is mapped into the output
# We might change the attribute name in the future
expr = res.importedModulesEvaluated ? "<clan-core>-simple-module";
expr = res.config._services.mappedServices ? "<clan-core>-simple-module";
expected = true;
inherit res;
};
@@ -92,7 +82,7 @@ in
# All instances should be included within one evaluation to make all of them available
test_module_grouping =
let
res = callInventoryAdapter {
res = createTestClan {
# Authored module
# A minimal module looks like this
# It isn't exactly doing anything but it's a valid module that produces an output
@@ -112,18 +102,19 @@ in
perMachine = { }: { };
};
# User config
instances."instance_foo" = {
inventory.instances."instance_foo" = {
module = {
name = "A";
};
};
instances."instance_bar" = {
inventory.instances."instance_bar" = {
module = {
name = "B";
};
};
instances."instance_baz" = {
inventory.instances."instance_baz" = {
module = {
name = "A";
};
@@ -133,16 +124,16 @@ in
{
# Test that the module is mapped into the output
# We might change the attribute name in the future
expr = lib.mapAttrs (_n: v: builtins.length v) res.grouped;
expected = {
"<clan-core>-A" = 2;
"<clan-core>-B" = 1;
};
expr = lib.attrNames res.config._services.mappedServices;
expected = [
"<clan-core>-A"
"<clan-core>-B"
];
};
test_creates_all_instances =
let
res = callInventoryAdapter {
res = createTestClan {
# Authored module
# A minimal module looks like this
# It isn't exactly doing anything but it's a valid module that produces an output
@@ -154,22 +145,24 @@ in
perMachine = { }: { };
};
instances."instance_foo" = {
module = {
name = "A";
input = "self";
inventory = {
instances."instance_foo" = {
module = {
name = "A";
input = "self";
};
};
};
instances."instance_bar" = {
module = {
name = "A";
input = "self";
instances."instance_bar" = {
module = {
name = "A";
input = "self";
};
};
};
instances."instance_zaza" = {
module = {
name = "B";
input = null;
instances."instance_zaza" = {
module = {
name = "B";
input = null;
};
};
};
};
@@ -177,7 +170,7 @@ in
{
# Test that the module is mapped into the output
# We might change the attribute name in the future
expr = lib.attrNames res.importedModulesEvaluated.self-A.instances;
expr = lib.attrNames res.config._services.mappedServices.self-A.instances;
expected = [
"instance_bar"
"instance_foo"
@@ -187,7 +180,7 @@ in
# Membership via roles
test_add_machines_directly =
let
res = callInventoryAdapter {
res = createTestClan {
# Authored module
# A minimal module looks like this
# It isn't exactly doing anything but it's a valid module that produces an output
@@ -202,38 +195,40 @@ in
# perMachine = {}: {};
};
machines = {
jon = { };
sara = { };
hxi = { };
};
instances."instance_foo" = {
module = {
name = "A";
input = "self";
inventory = {
machines = {
jon = { };
sara = { };
hxi = { };
};
roles.peer.machines.jon = { };
};
instances."instance_bar" = {
module = {
name = "A";
input = "self";
instances."instance_foo" = {
module = {
name = "A";
input = "self";
};
roles.peer.machines.jon = { };
};
roles.peer.machines.sara = { };
};
instances."instance_zaza" = {
module = {
name = "B";
input = null;
instances."instance_bar" = {
module = {
name = "A";
input = "self";
};
roles.peer.machines.sara = { };
};
instances."instance_zaza" = {
module = {
name = "B";
input = null;
};
roles.peer.tags.all = { };
};
roles.peer.tags.all = { };
};
};
in
{
# Test that the module is mapped into the output
# We might change the attribute name in the future
expr = lib.attrNames res.importedModulesEvaluated.self-A.result.allMachines;
expr = lib.attrNames res.config._services.mappedServices.self-A.result.allMachines;
expected = [
"jon"
"sara"
@@ -243,7 +238,7 @@ in
# Membership via tags
test_add_machines_via_tags =
let
res = callInventoryAdapter {
res = createTestClan {
# Authored module
# A minimal module looks like this
# It isn't exactly doing anything but it's a valid module that produces an output
@@ -257,35 +252,37 @@ in
# perMachine = {}: {};
};
machines = {
jon = {
tags = [ "foo" ];
inventory = {
machines = {
jon = {
tags = [ "foo" ];
};
sara = {
tags = [ "foo" ];
};
hxi = { };
};
sara = {
tags = [ "foo" ];
instances."instance_foo" = {
module = {
name = "A";
input = "self";
};
roles.peer.tags.foo = { };
};
hxi = { };
};
instances."instance_foo" = {
module = {
name = "A";
input = "self";
instances."instance_zaza" = {
module = {
name = "B";
input = null;
};
roles.peer.tags.all = { };
};
roles.peer.tags.foo = { };
};
instances."instance_zaza" = {
module = {
name = "B";
input = null;
};
roles.peer.tags.all = { };
};
};
in
{
# Test that the module is mapped into the output
# We might change the attribute name in the future
expr = lib.attrNames res.importedModulesEvaluated.self-A.result.allMachines;
expr = lib.attrNames res.config._services.mappedServices.self-A.result.allMachines;
expected = [
"jon"
"sara"
@@ -293,6 +290,9 @@ in
};
machine_imports = import ./machine_imports.nix { inherit lib clanLib; };
per_machine_args = import ./per_machine_args.nix { inherit lib callInventoryAdapter; };
per_instance_args = import ./per_instance_args.nix { inherit lib callInventoryAdapter; };
per_machine_args = import ./per_machine_args.nix { inherit lib createTestClan; };
per_instance_args = import ./per_instance_args.nix {
inherit lib;
callInventoryAdapter = createTestClan;
};
}

View File

@@ -1,4 +1,4 @@
{ callInventoryAdapter, ... }:
{ createTestClan, ... }:
let
# Authored module
# A minimal module looks like this
@@ -23,10 +23,13 @@ let
resolve =
spec:
callInventoryAdapter {
inherit modules machines;
instances."instance_foo" = {
module = spec;
createTestClan {
inherit modules;
inventory = {
inherit machines;
instances."instance_foo" = {
module = spec;
};
};
};
in
@@ -36,25 +39,16 @@ in
(resolve {
name = "A";
input = "self";
}).importedModuleWithInstances.instance_foo.resolvedModule;
expected = {
_class = "clan.service";
manifest = {
name = "network";
};
};
}).config._services.mappedServices.self-A.manifest.name;
expected = "network";
};
test_import_remote_module_by_name = {
expr =
(resolve {
name = "uzzi";
input = "upstream";
}).importedModuleWithInstances.instance_foo.resolvedModule;
expected = {
_class = "clan.service";
manifest = {
name = "uzzi-from-upstream";
};
};
}).config._services.mappedServices.upstream-uzzi.manifest.name;
expected = "uzzi-from-upstream";
};
}

View File

@@ -58,39 +58,43 @@ let
sara = { };
};
res = callInventoryAdapter {
inherit modules machines;
instances."instance_foo" = {
module = {
name = "A";
input = "self";
inherit modules;
inventory = {
inherit machines;
instances."instance_foo" = {
module = {
name = "A";
input = "self";
};
roles.peer.machines.jon = {
settings.timeout = lib.mkForce "foo-peer-jon";
};
roles.peer = {
settings.timeout = "foo-peer";
};
roles.controller.machines.jon = { };
};
roles.peer.machines.jon = {
settings.timeout = lib.mkForce "foo-peer-jon";
instances."instance_bar" = {
module = {
name = "A";
input = "self";
};
roles.peer.machines.jon = {
settings.timeout = "bar-peer-jon";
};
};
roles.peer = {
settings.timeout = "foo-peer";
# TODO: move this into a seperate test.
# Seperate out the check that this module is never imported
# import the module "B" (undefined)
# All machines have this instance
instances."instance_zaza" = {
module = {
name = "B";
input = null;
};
roles.peer.tags.all = { };
};
roles.controller.machines.jon = { };
};
instances."instance_bar" = {
module = {
name = "A";
input = "self";
};
roles.peer.machines.jon = {
settings.timeout = "bar-peer-jon";
};
};
# TODO: move this into a seperate test.
# Seperate out the check that this module is never imported
# import the module "B" (undefined)
# All machines have this instance
instances."instance_zaza" = {
module = {
name = "B";
input = null;
};
roles.peer.tags.all = { };
};
};
@@ -105,9 +109,10 @@ in
{
# settings should evaluate
test_per_instance_arguments = {
inherit res;
expr = {
instanceName =
res.importedModulesEvaluated.self-A.result.allRoles.peer.allInstances."instance_foo".allMachines.jon.passthru.instanceName;
res.config._services.mappedServices.self-A.result.allRoles.peer.allInstances."instance_foo".allMachines.jon.passthru.instanceName;
# settings are specific.
# Below we access:
@@ -115,11 +120,11 @@ in
# roles = peer
# machines = jon
settings =
res.importedModulesEvaluated.self-A.result.allRoles.peer.allInstances.instance_foo.allMachines.jon.passthru.settings;
res.config._services.mappedServices.self-A.result.allRoles.peer.allInstances.instance_foo.allMachines.jon.passthru.settings;
machine =
res.importedModulesEvaluated.self-A.result.allRoles.peer.allInstances.instance_foo.allMachines.jon.passthru.machine;
res.config._services.mappedServices.self-A.result.allRoles.peer.allInstances.instance_foo.allMachines.jon.passthru.machine;
roles =
res.importedModulesEvaluated.self-A.result.allRoles.peer.allInstances.instance_foo.allMachines.jon.passthru.roles;
res.config._services.mappedServices.self-A.result.allRoles.peer.allInstances.instance_foo.allMachines.jon.passthru.roles;
};
expected = {
instanceName = "instance_foo";
@@ -160,9 +165,9 @@ in
# TODO: Cannot be tested like this anymore
test_per_instance_settings_vendoring = {
x = res.importedModulesEvaluated.self-A;
x = res.config._services.mappedServices.self-A;
expr =
res.importedModulesEvaluated.self-A.result.allRoles.peer.allInstances.instance_foo.allMachines.jon.passthru.vendoredSettings;
res.config._services.mappedServices.self-A.result.allRoles.peer.allInstances.instance_foo.allMachines.jon.passthru.vendoredSettings;
expected = {
timeout = "config.thing";
};

View File

@@ -1,4 +1,4 @@
{ lib, callInventoryAdapter }:
{ lib, createTestClan }:
let
# Authored module
# A minimal module looks like this
@@ -39,36 +39,40 @@ let
jon = { };
sara = { };
};
res = callInventoryAdapter {
inherit modules machines;
instances."instance_foo" = {
module = {
name = "A";
input = "self";
res = createTestClan {
inherit modules;
inventory = {
inherit machines;
instances."instance_foo" = {
module = {
name = "A";
input = "self";
};
roles.peer.machines.jon = {
settings.timeout = lib.mkForce "foo-peer-jon";
};
roles.peer = {
settings.timeout = "foo-peer";
};
};
roles.peer.machines.jon = {
settings.timeout = lib.mkForce "foo-peer-jon";
instances."instance_bar" = {
module = {
name = "A";
input = "self";
};
roles.peer.machines.jon = {
settings.timeout = "bar-peer-jon";
};
};
roles.peer = {
settings.timeout = "foo-peer";
instances."instance_zaza" = {
module = {
name = "B";
input = null;
};
roles.peer.tags.all = { };
};
};
instances."instance_bar" = {
module = {
name = "A";
input = "self";
};
roles.peer.machines.jon = {
settings.timeout = "bar-peer-jon";
};
};
instances."instance_zaza" = {
module = {
name = "B";
input = null;
};
roles.peer.tags.all = { };
};
};
in
@@ -79,7 +83,7 @@ in
inherit res;
expr = {
hasMachineSettings =
res.importedModulesEvaluated.self-A.result.allMachines.jon.passthru.instances.instance_foo.roles.peer.machines.jon
res.config._services.mappedServices.self-A.result.allMachines.jon.passthru.instances.instance_foo.roles.peer.machines.jon
? settings;
# settings are specific.
@@ -88,10 +92,10 @@ in
# roles = peer
# machines = jon
specificMachineSettings =
res.importedModulesEvaluated.self-A.result.allMachines.jon.passthru.instances.instance_foo.roles.peer.machines.jon.settings;
res.config._services.mappedServices.self-A.result.allMachines.jon.passthru.instances.instance_foo.roles.peer.machines.jon.settings;
hasRoleSettings =
res.importedModulesEvaluated.self-A.result.allMachines.jon.passthru.instances.instance_foo.roles.peer
res.config._services.mappedServices.self-A.result.allMachines.jon.passthru.instances.instance_foo.roles.peer
? settings;
# settings are specific.
@@ -100,7 +104,7 @@ in
# roles = peer
# machines = *
specificRoleSettings =
res.importedModulesEvaluated.self-A.result.allMachines.jon.passthru.instances.instance_foo.roles.peer;
res.config._services.mappedServices.self-A.result.allMachines.jon.passthru.instances.instance_foo.roles.peer;
};
expected = {
hasMachineSettings = true;

View File

@@ -1,6 +1,6 @@
{ callInventoryAdapter, lib, ... }:
{ createTestClan, lib, ... }:
let
res = callInventoryAdapter {
res = createTestClan {
modules."A" = {
_class = "clan.service";
manifest = {
@@ -21,28 +21,31 @@ let
};
};
};
machines = {
jon = { };
sara = { };
};
instances."instance_foo" = {
module = {
name = "A";
input = "self";
inventory = {
machines = {
jon = { };
sara = { };
};
# Settings for both jon and sara
roles.peer.settings = {
timeout = 40;
instances."instance_foo" = {
module = {
name = "A";
input = "self";
};
# Settings for both jon and sara
roles.peer.settings = {
timeout = 40;
};
# Jon overrides timeout
roles.peer.machines.jon = {
settings.timeout = lib.mkForce 42;
};
roles.peer.machines.sara = { };
};
# Jon overrides timeout
roles.peer.machines.jon = {
settings.timeout = lib.mkForce 42;
};
roles.peer.machines.sara = { };
};
};
config = res.servicesEval.config.mappedServices.self-A;
config = res.config._services.mappedServices.self-A;
#
applySettings =

View File

@@ -1,6 +1,6 @@
{ callInventoryAdapter, lib, ... }:
{ createTestClan, lib, ... }:
let
res = callInventoryAdapter {
res = createTestClan {
modules."A" = m: {
_class = "clan.service";
config = {
@@ -14,19 +14,21 @@ let
default = m;
};
};
machines = {
jon = { };
};
instances."instance_foo" = {
module = {
name = "A";
input = "self";
inventory = {
machines = {
jon = { };
};
instances."instance_foo" = {
module = {
name = "A";
input = "self";
};
roles.peer.machines.jon = { };
};
roles.peer.machines.jon = { };
};
};
specialArgs = lib.attrNames res.servicesEval.config.mappedServices.self-A.test.specialArgs;
specialArgs = lib.attrNames res.config._services.mappedServices.self-A.test.specialArgs;
in
{
test_simple = {

View File

@@ -19,6 +19,7 @@
imports = [
./top-level-interface.nix
./module.nix
./distributed-services.nix
./checks.nix
];
}

View File

@@ -0,0 +1,163 @@
{
lib,
clanLib,
config,
clan-core,
...
}:
let
inherit (lib) mkOption types;
# Keep a reference to top-level
clanConfig = config;
inventory = clanConfig.inventory;
flakeInputs = clanConfig.self.inputs;
clanCoreModules = clan-core.clan.modules;
grouped = lib.foldlAttrs (
acc: instanceName: instance:
let
inputName = if instance.module.input == null then "<clan-core>" else instance.module.input;
id = inputName + "-" + instance.module.name;
in
acc
// {
${id} = acc.${id} or [ ] ++ [
{
inherit instanceName instance;
}
];
}
) { } importedModuleWithInstances;
importedModuleWithInstances = lib.mapAttrs (
instanceName: instance:
let
resolvedModule = clanLib.resolveModule {
moduleSpec = instance.module;
inherit flakeInputs clanCoreModules;
};
# Every instance includes machines via roles
# :: { client :: ... }
instanceRoles = lib.mapAttrs (
roleName: role:
let
resolvedMachines = clanLib.inventory.resolveTags {
members = {
# Explicit members
machines = lib.attrNames role.machines;
# Resolved Members
tags = lib.attrNames role.tags;
};
inherit (inventory) machines;
inherit instanceName roleName;
};
in
# instances.<instanceName>.roles.<roleName> =
# Remove "tags", they are resolved into "machines"
(removeAttrs role [ "tags" ])
// {
machines = lib.genAttrs resolvedMachines.machines (
machineName:
let
machineSettings = instance.roles.${roleName}.machines.${machineName}.settings or { };
in
# TODO: tag settings
# Wait for this feature until option introspection for 'settings' is done.
# This might get too complex to handle otherwise.
# settingsViaTags = lib.filterAttrs (
# tagName: _: machineHasTag machineName tagName
# ) instance.roles.${roleName}.tags;
{
# TODO: Do we want to wrap settings with
# setDefaultModuleLocation "inventory.instances.${instanceName}.roles.${roleName}.tags.${tagName}";
settings = {
imports = [
machineSettings
]; # ++ lib.attrValues (lib.mapAttrs (_tagName: v: v.settings) settingsViaTags);
};
}
);
}
) instance.roles;
in
{
inherit (instance) module;
inherit resolvedModule instanceRoles;
}
) inventory.instances or { };
in
{
_class = "clan";
options._services = mkOption {
visible = false;
description = ''
All service instances
!!! Danger "Internal API"
Do not rely on this API yet.
- Will be renamed to just 'services' in the future.
Once the name can be claimed again.
- Structure will change.
API will be declared as public after beeing simplified.
'';
type = types.submoduleWith {
# TODO: Remove specialArgs
specialArgs = {
inherit clanLib;
};
modules = [
(import ../../lib/inventory/distributed-service/all-services-wrapper.nix {
inherit (clanConfig) directory;
})
# Dependencies
{
exportsModule = clanConfig.exportsModule;
}
{
# TODO: Rename to "allServices"
# All services
mappedServices = lib.mapAttrs (_module_ident: instances: {
imports = [
# Import the resolved module.
# i.e. clan.modules.admin
{
options.module = lib.mkOption {
type = lib.types.raw;
default = (builtins.head instances).instance.module;
};
}
(builtins.head instances).instance.resolvedModule
] # Include all the instances that correlate to the resolved module
++ (builtins.map (v: {
instances.${v.instanceName}.roles = v.instance.instanceRoles;
}) instances);
}) grouped;
}
];
};
default = { };
};
options._allMachines = mkOption {
internal = true;
type = types.raw;
default = lib.mapAttrs (machineName: _: {
# This is the list of nixosModules for each machine
machineImports = lib.foldlAttrs (
acc: _module_ident: serviceModule:
acc ++ [ serviceModule.result.final.${machineName}.nixosModule or { } ]
) [ ] config._services.mappedServices;
}) inventory.machines or { };
};
config = {
clanInternals.inventoryClass.machines = config._allMachines;
# clanInternals.inventoryClass.distributedServices = config._services;
# Exports from distributed services
exports = config._services.exports;
};
}

View File

@@ -3,12 +3,16 @@
lib,
clanModule,
clanLib,
clan-core,
}:
let
eval = lib.evalModules {
modules = [
clanModule
];
specialArgs = {
self = clan-core;
};
};
evalDocs = pkgs.nixosOptionsDoc {

View File

@@ -12,6 +12,7 @@ in
}:
let
jsonDocs = import ./eval-docs.nix {
clan-core = self;
inherit
pkgs
lib

View File

@@ -219,8 +219,6 @@ in
inherit nixosConfigurations;
inherit darwinConfigurations;
exports = config.clanInternals.inventoryClass.distributedServices.servicesEval.config.exports;
clanInternals = {
inventoryClass =
let
@@ -254,21 +252,9 @@ in
exportsModule = config.exportsModule;
}
(
{ config, ... }:
{ ... }:
{
staticModules = clan-core.clan.modules;
distributedServices = clanLib.inventory.mapInstances {
inherit (config)
inventory
directory
flakeInputs
exportsModule
;
clanCoreModules = clan-core.clan.modules;
prefix = [ "distributedServices" ];
};
machines = config.distributedServices.allMachines;
}
)
];

View File

@@ -67,9 +67,6 @@ in
type = types.raw;
};
distributedServices = mkOption {
type = types.raw;
};
inventory = mkOption {
type = types.raw;
};

View File

@@ -5,7 +5,7 @@
}:
{
# If we also need zfs, we can use the unstable version as we otherwise don't have a new enough kernel version
boot.zfs.package = pkgs.zfsUnstable;
boot.zfs.package = pkgs.zfs_unstable or pkgs.zfsUnstable;
# Enable bcachefs support
boot.supportedFilesystems.bcachefs = lib.mkDefault true;

View File

@@ -18,7 +18,7 @@ let
inputs.data-mesher.nixosModules.data-mesher
];
config = {
clan.core.clanPkgs = lib.mkDefault self.packages.${pkgs.hostPlatform.system};
clan.core.clanPkgs = lib.mkDefault self.packages.${pkgs.stdenv.hostPlatform.system};
};
};
in

View File

@@ -6,7 +6,7 @@
}:
let
isUnstable = config.boot.zfs.package == pkgs.zfsUnstable;
isUnstable = config.boot.zfs.package == pkgs.zfs_unstable or pkgs.zfsUnstable;
zfsCompatibleKernelPackages = lib.filterAttrs (
name: kernelPackages:
(builtins.match "linux_[0-9]+_[0-9]+" name) != null
@@ -30,5 +30,5 @@ let
in
{
# Note this might jump back and worth as kernel get added or removed.
boot.kernelPackages = lib.mkIf (lib.meta.availableOn pkgs.hostPlatform pkgs.zfs) latestKernelPackage;
boot.kernelPackages = lib.mkIf (lib.meta.availableOn pkgs.stdenv.hostPlatform pkgs.zfs) latestKernelPackage;
}

View File

@@ -4,6 +4,7 @@
padding: 8px;
flex-direction: column;
align-items: flex-start;
gap: 4px;
border-radius: 5px;
border: 1px solid var(--clr-border-def-2, #d8e8eb);

View File

@@ -1,11 +1,13 @@
import { onCleanup, onMount } from "solid-js";
import styles from "./ContextMenu.module.css";
import { Typography } from "../Typography/Typography";
import { Divider } from "../Divider/Divider";
import Icon from "../Icon/Icon";
export const Menu = (props: {
x: number;
y: number;
onSelect: (option: "move") => void;
onSelect: (option: "move" | "delete") => void;
close: () => void;
intersect: string[];
}) => {
@@ -54,13 +56,31 @@ export const Menu = (props: {
>
<Typography
hierarchy="label"
size="s"
weight="bold"
color={currentMachine() ? "primary" : "quaternary"}
>
Move
</Typography>
</li>
<Divider />
<li
class={styles.item}
aria-disabled={!currentMachine()}
onClick={() => {
console.log("Delete clicked", currentMachine());
props.onSelect("delete");
props.close();
}}
>
<Typography
hierarchy="label"
color={currentMachine() ? "primary" : "quaternary"}
>
<span class="flex items-center gap-2">
Delete
<Icon icon="Trash" font-size="inherit" />
</span>
</Typography>
</li>
</ul>
);
};

View File

@@ -71,7 +71,7 @@ const Machines = () => {
}
const result = ctx.machinesQuery.data;
return Object.keys(result).length > 0 ? result : undefined;
return Object.keys(result).length > 0 ? result : [];
};
return (
@@ -117,7 +117,7 @@ const Machines = () => {
}
>
<nav>
<For each={Object.entries(machines()!)}>
<For each={Object.entries(machines())}>
{([id, machine]) => (
<MachineRoute
clanURI={clanURI}

View File

@@ -206,8 +206,8 @@ const ClanSceneController = (props: RouteSectionProps) => {
<AddMachine
onCreated={async (id) => {
const promise = currentPromise();
await ctx.machinesQuery.refetch();
if (promise) {
await ctx.machinesQuery.refetch();
promise.resolve({ id });
setCurrentPromise(null);
}

View File

@@ -18,12 +18,12 @@ export class MachineManager {
private disposeRoot: () => void;
private machinePositionsSignal: Accessor<SceneData>;
private machinePositionsSignal: Accessor<SceneData | undefined>;
constructor(
scene: THREE.Scene,
registry: ObjectRegistry,
machinePositionsSignal: Accessor<SceneData>,
machinePositionsSignal: Accessor<SceneData | undefined>,
machinesQueryResult: MachinesQueryResult,
selectedIds: Accessor<Set<string>>,
setMachinePos: (id: string, position: [number, number] | null) => void,
@@ -39,8 +39,9 @@ export class MachineManager {
if (!machinesQueryResult.data) return;
const actualIds = Object.keys(machinesQueryResult.data);
const machinePositions = machinePositionsSignal();
// Remove stale
const machinePositions = machinePositionsSignal() || {};
for (const id of Object.keys(machinePositions)) {
if (!actualIds.includes(id)) {
console.log("Removing stale machine", id);
@@ -61,8 +62,7 @@ export class MachineManager {
// Effect 2: sync store → scene
//
createEffect(() => {
const positions = machinePositionsSignal();
if (!positions) return;
const positions = machinePositionsSignal() || {};
// Remove machines from scene
for (const [id, repr] of this.machines) {
@@ -103,7 +103,7 @@ export class MachineManager {
nextGridPos(): [number, number] {
const occupiedPositions = new Set(
Object.values(this.machinePositionsSignal()).map((data) =>
Object.values(this.machinePositionsSignal() || {}).map((data) =>
keyFromPos(data.position),
),
);

View File

@@ -32,6 +32,9 @@ import {
} from "./highlightStore";
import { createMachineMesh } from "./MachineRepr";
import { useClanContext } from "@/src/routes/Clan/Clan";
import client from "@api/clan/client";
import { navigateToClan } from "../hooks/clan";
import { useNavigate } from "@solidjs/router";
function intersectMachines(
event: MouseEvent,
@@ -100,7 +103,7 @@ export function CubeScene(props: {
onCreate: () => Promise<{ id: string }>;
selectedIds: Accessor<Set<string>>;
onSelect: (v: Set<string>) => void;
sceneStore: Accessor<SceneData>;
sceneStore: Accessor<SceneData | undefined>;
setMachinePos: (machineId: string, pos: [number, number] | null) => void;
isLoading: boolean;
clanURI: string;
@@ -131,9 +134,6 @@ export function CubeScene(props: {
let machineManager: MachineManager;
const [positionMode, setPositionMode] = createSignal<"grid" | "circle">(
"grid",
);
// Managed by controls
const [isDragging, setIsDragging] = createSignal(false);
@@ -142,10 +142,6 @@ export function CubeScene(props: {
// TODO: Unify this with actionRepr position
const [cursorPosition, setCursorPosition] = createSignal<[number, number]>();
const [cameraInfo, setCameraInfo] = createSignal({
position: { x: 0, y: 0, z: 0 },
spherical: { radius: 0, theta: 0, phi: 0 },
});
// Context menu state
const [contextOpen, setContextOpen] = createSignal(false);
const [menuPos, setMenuPos] = createSignal<{ x: number; y: number }>();
@@ -157,7 +153,6 @@ export function CubeScene(props: {
const BASE_SIZE = 0.9; // Height of the cube above the ground
const CUBE_SIZE = BASE_SIZE / 1.5; //
const BASE_HEIGHT = 0.05; // Height of the cube above the ground
const CUBE_Y = 0 + CUBE_SIZE / 2 + BASE_HEIGHT / 2; // Y position of the cube above the ground
const CUBE_SEGMENT_HEIGHT = CUBE_SIZE / 1;
const FLOOR_COLOR = 0xcdd8d9;
@@ -201,6 +196,8 @@ export function CubeScene(props: {
const grid = new THREE.GridHelper(1000, 1000 / 1, 0xe1edef, 0xe1edef);
const navigate = useNavigate();
onMount(() => {
// Scene setup
scene = new THREE.Scene();
@@ -311,21 +308,12 @@ export function CubeScene(props: {
bgCamera,
);
// controls.addEventListener("start", (e) => {
// setIsDragging(true);
// });
// controls.addEventListener("end", (e) => {
// setIsDragging(false);
// });
// Lighting
const ambientLight = new THREE.AmbientLight(0xd9f2f7, 0.72);
scene.add(ambientLight);
const directionalLight = new THREE.DirectionalLight(0xffffff, 3.5);
// scene.add(new THREE.DirectionalLightHelper(directionalLight));
// scene.add(new THREE.CameraHelper(camera));
const lightPos = new THREE.Spherical(
15,
initialSphericalCameraPosition.phi - Math.PI / 8,
@@ -412,30 +400,6 @@ export function CubeScene(props: {
actionMachine = createActionMachine();
scene.add(actionMachine);
// const spherical = new THREE.Spherical();
// spherical.setFromVector3(camera.position);
// Function to update camera info
const updateCameraInfo = () => {
const spherical = new THREE.Spherical();
spherical.setFromVector3(camera.position);
setCameraInfo({
position: {
x: Math.round(camera.position.x * 100) / 100,
y: Math.round(camera.position.y * 100) / 100,
z: Math.round(camera.position.z * 100) / 100,
},
spherical: {
radius: Math.round(spherical.radius * 100) / 100,
theta: Math.round(spherical.theta * 100) / 100,
phi: Math.round(spherical.phi * 100) / 100,
},
});
};
// Initial camera info update
updateCameraInfo();
createEffect(
on(ctx.worldMode, (mode) => {
if (mode === "create") {
@@ -661,7 +625,8 @@ export function CubeScene(props: {
});
const snapToGrid = (point: THREE.Vector3) => {
if (!props.sceneStore) return;
const store = props.sceneStore() || {};
// Snap to grid
const snapped = new THREE.Vector3(
Math.round(point.x / GRID_SIZE) * GRID_SIZE,
@@ -670,7 +635,7 @@ export function CubeScene(props: {
);
// Skip snapping if there's already a cube at this position
const positions = Object.entries(props.sceneStore());
const positions = Object.entries(store);
const intersects = positions.some(
([_id, p]) => p.position[0] === snapped.x && p.position[1] === snapped.z,
);
@@ -694,7 +659,6 @@ export function CubeScene(props: {
};
const onAddClick = (event: MouseEvent) => {
setPositionMode("grid");
ctx.setWorldMode("create");
renderLoop.requestRender();
};
@@ -706,9 +670,6 @@ export function CubeScene(props: {
if (!actionRepr) return;
actionRepr.visible = true;
// (actionRepr.material as THREE.MeshPhongMaterial).emissive.set(
// worldMode() === "create" ? CREATE_BASE_EMISSIVE : MOVE_BASE_EMISSIVE,
// );
// Calculate mouse position in normalized device coordinates
// (-1 to +1) for both components
@@ -736,23 +697,38 @@ export function CubeScene(props: {
}
}
};
const handleMenuSelect = (mode: "move") => {
const handleMenuSelect = async (mode: "move" | "delete") => {
const firstId = menuIntersection()[0];
if (!firstId) {
return;
}
const machine = machineManager.machines.get(firstId);
if (mode === "delete") {
console.log("deleting machine", firstId);
await client.post("delete_machine", {
body: {
machine: { flake: { identifier: props.clanURI }, name: firstId },
},
});
navigateToClan(navigate, props.clanURI);
ctx.machinesQuery.refetch();
ctx.serviceInstancesQuery.refetch();
return;
}
// Else "move" mode
ctx.setWorldMode(mode);
setHighlightGroups({ move: new Set(menuIntersection()) });
// Find the position of the first selected machine
// Set the actionMachine position to that
const firstId = menuIntersection()[0];
if (firstId) {
const machine = machineManager.machines.get(firstId);
if (machine && actionMachine) {
actionMachine.position.set(
machine.group.position.x,
0,
machine.group.position.z,
);
setCursorPosition([machine.group.position.x, machine.group.position.z]);
}
if (machine && actionMachine) {
actionMachine.position.set(
machine.group.position.x,
0,
machine.group.position.z,
);
setCursorPosition([machine.group.position.x, machine.group.position.z]);
}
};

View File

@@ -766,6 +766,28 @@ def test_prompt(
assert sops_store.get(my_generator, "prompt_persist").decode() == "prompt_persist"
@pytest.mark.with_core
def test_non_existing_dependency_raises_error(
monkeypatch: pytest.MonkeyPatch,
flake_with_sops: ClanFlake,
) -> None:
"""Ensure that a generator with a non-existing dependency raises a clear error."""
flake = flake_with_sops
config = flake.machines["my_machine"] = create_test_machine_config()
my_generator = config["clan"]["core"]["vars"]["generators"]["my_generator"]
my_generator["files"]["my_value"]["secret"] = False
my_generator["script"] = 'echo "$RANDOM" > "$out"/my_value'
my_generator["dependencies"] = ["non_existing_generator"]
flake.refresh()
monkeypatch.chdir(flake.path)
with pytest.raises(
ClanError,
match="Generator 'my_generator' on machine 'my_machine' depends on generator 'non_existing_generator', but 'non_existing_generator' does not exist",
):
cli.run(["vars", "generate", "--flake", str(flake.path), "my_machine"])
@pytest.mark.with_core
def test_shared_vars_must_never_depend_on_machine_specific_vars(
monkeypatch: pytest.MonkeyPatch,

View File

@@ -66,6 +66,41 @@ class Generator:
_public_store: "StoreBase | None" = None
_secret_store: "StoreBase | None" = None
@staticmethod
def validate_dependencies(
generator_name: str,
machine_name: str,
dependencies: list[str],
generators_data: dict[str, dict],
) -> list[GeneratorKey]:
"""Validate and build dependency keys for a generator.
Args:
generator_name: Name of the generator that has dependencies
machine_name: Name of the machine the generator belongs to
dependencies: List of dependency generator names
generators_data: Dictionary of all available generators for this machine
Returns:
List of GeneratorKey objects
Raises:
ClanError: If a dependency does not exist
"""
deps_list = []
for dep in dependencies:
if dep not in generators_data:
msg = f"Generator '{generator_name}' on machine '{machine_name}' depends on generator '{dep}', but '{dep}' does not exist. Please check your configuration."
raise ClanError(msg)
deps_list.append(
GeneratorKey(
machine=None if generators_data[dep]["share"] else machine_name,
name=dep,
)
)
return deps_list
@property
def key(self) -> GeneratorKey:
if self.share:
@@ -240,15 +275,12 @@ class Generator:
name=gen_name,
share=share,
files=files,
dependencies=[
GeneratorKey(
machine=None
if generators_data[dep]["share"]
else machine_name,
name=dep,
)
for dep in gen_data["dependencies"]
],
dependencies=cls.validate_dependencies(
gen_name,
machine_name,
gen_data["dependencies"],
generators_data,
),
migrate_fact=gen_data.get("migrateFact"),
validation_hash=gen_data.get("validationHash"),
prompts=prompts,

View File

@@ -245,7 +245,7 @@ class SecretStore(StoreBase):
output_dir / "activation" / generator.name / file.name
)
out_file.parent.mkdir(parents=True, exist_ok=True)
out_file.write_bytes(self.get(generator, file.name))
out_file.write_bytes(file.value)
if "partitioning" in phases:
for generator in vars_generators:
for file in generator.files:
@@ -254,7 +254,7 @@ class SecretStore(StoreBase):
output_dir / "partitioning" / generator.name / file.name
)
out_file.parent.mkdir(parents=True, exist_ok=True)
out_file.write_bytes(self.get(generator, file.name))
out_file.write_bytes(file.value)
hash_data = self.generate_hash(machine)
if hash_data:

View File

@@ -246,7 +246,7 @@ class SecretStore(StoreBase):
)
# chmod after in case it doesn't have u+w
target_path.touch(mode=0o600)
target_path.write_bytes(self.get(generator, file.name))
target_path.write_bytes(file.value)
target_path.chmod(file.mode)
if "partitioning" in phases:
@@ -260,7 +260,7 @@ class SecretStore(StoreBase):
)
# chmod after in case it doesn't have u+w
target_path.touch(mode=0o600)
target_path.write_bytes(self.get(generator, file.name))
target_path.write_bytes(file.value)
target_path.chmod(file.mode)
@override

View File

@@ -211,7 +211,7 @@ class ClanSelectError(ClanError):
def __str__(self) -> str:
if self.description:
return f"{self.msg} Reason: {self.description}"
return f"{self.msg} Reason: {self.description}. Use flag '--debug' to see full nix trace."
return self.msg
def __repr__(self) -> str:

View File

@@ -59,9 +59,7 @@ def upload_sources(machine: Machine, ssh: Host, upload_inputs: bool) -> str:
if not has_path_inputs and not upload_inputs:
# Just copy the flake to the remote machine, we can substitute other inputs there.
path = flake_data["path"]
if machine._class_ == "darwin":
remote_program_params = "?remote-program=bash -lc 'exec nix-daemon --stdio'"
remote_url = f"ssh-ng://{remote_url_base}{remote_program_params}"
remote_url = f"ssh-ng://{remote_url_base}"
cmd = nix_command(
[
"copy",

View File

@@ -17,7 +17,7 @@
runCommand,
setuptools,
webkitgtk_6_0,
wrapGAppsHook,
wrapGAppsHook3,
python,
lib,
stdenv,
@@ -87,7 +87,7 @@ buildPythonApplication rec {
nativeBuildInputs = [
setuptools
copyDesktopItems
wrapGAppsHook
wrapGAppsHook3
gobject-introspection
];

View File

@@ -64,6 +64,9 @@
'';
in
{
legacyPackages = {
inherit jsonDocs clanModulesViaService;
};
packages = {
inherit module-docs;
};

View File

@@ -11,151 +11,10 @@
...
}:
let
inherit (lib)
mapAttrsToList
mapAttrs
mkOption
types
splitString
stringLength
substring
;
inherit (self) clanLib;
serviceModules = self.clan.modules;
baseHref = "/option-search/";
getRoles =
module:
(clanLib.evalService {
modules = [ module ];
prefix = [ ];
}).config.roles;
getManifest =
module:
(clanLib.evalService {
modules = [ module ];
prefix = [ ];
}).config.manifest;
settingsModules = module: mapAttrs (_roleName: roleConfig: roleConfig.interface) (getRoles module);
# Map each letter to its capitalized version
capitalizeChar =
char:
{
a = "A";
b = "B";
c = "C";
d = "D";
e = "E";
f = "F";
g = "G";
h = "H";
i = "I";
j = "J";
k = "K";
l = "L";
m = "M";
n = "N";
o = "O";
p = "P";
q = "Q";
r = "R";
s = "S";
t = "T";
u = "U";
v = "V";
w = "W";
x = "X";
y = "Y";
z = "Z";
}
.${char};
title =
name:
let
# split by -
parts = splitString "-" name;
# capitalize first letter of each part
capitalize = part: (capitalizeChar (substring 0 1 part)) + substring 1 (stringLength part) part;
capitalizedParts = map capitalize parts;
in
builtins.concatStringsSep " " capitalizedParts;
fakeInstanceOptions =
name: module:
let
manifest = getManifest module;
description = ''
# ${title name} (Clan Service)
**${manifest.description}**
${lib.optionalString (manifest ? readme) manifest.readme}
${
if manifest.categories != [ ] then
"Categories: " + builtins.concatStringsSep ", " manifest.categories
else
"No categories defined"
}
'';
in
{
options = {
instances.${name} = lib.mkOption {
inherit description;
type = types.submodule {
options.roles = mapAttrs (
roleName: roleSettingsModule:
mkOption {
type = types.submodule {
_file = "docs flake-module";
imports = [
{ _module.args = { inherit clanLib; }; }
(import ../../modules/inventoryClass/role.nix {
nestedSettingsOption = mkOption {
type = types.raw;
description = ''
See [instances.${name}.roles.${roleName}.settings](${baseHref}?option_scope=0&option=inventory.instances.${name}.roles.${roleName}.settings)
'';
};
settingsOption = mkOption {
type = types.submoduleWith {
modules = [ roleSettingsModule ];
};
};
})
];
};
}
) (settingsModules module);
};
};
};
};
docModules = [
{
inherit self;
}
self.modules.clan.default
{
options.inventory = lib.mkOption {
type = types.submoduleWith {
modules = [
{ noInstanceOptions = true; }
]
++ mapAttrsToList fakeInstanceOptions serviceModules;
};
};
}
];
baseModule =
# Module
@@ -208,12 +67,6 @@
title = "Clan Options";
# scopes = mapAttrsToList mkScope serviceModules;
scopes = [
{
inherit baseHref;
name = "Flake Options (clan.nix file)";
modules = docModules;
urlPrefix = "https://git.clan.lol/clan/clan-core/src/branch/main/";
}
{
name = "Machine Options (clan.core NixOS options)";
optionsJSON = "${coreOptions}/share/doc/nixos/options.json";