Compare commits

...

39 Commits

Author SHA1 Message Date
55e343c43e some ai generated work to get services working for macos 2025-11-08 11:48:34 +08:00
8bef2e6b2e Drop macOS-specific remote-program param from nix copy command 2025-11-06 11:11:57 +08:00
clan-bot
8eaca289ad Merge pull request 'Update treefmt-nix' (#5745) from update-treefmt-nix into main 2025-11-05 20:08:44 +00:00
clan-bot
6f2d482187 Merge pull request 'Update treefmt-nix in devFlake' (#5756) from update-devFlake-treefmt-nix into main 2025-11-05 20:08:18 +00:00
clan-bot
4c30418f12 Update treefmt-nix in devFlake 2025-11-05 20:02:31 +00:00
clan-bot
3c66094d89 Update treefmt-nix 2025-11-05 20:02:02 +00:00
clan-bot
a8f180f8da Merge pull request 'Update treefmt-nix in devFlake' (#5753) from update-devFlake-treefmt-nix into main 2025-11-05 15:09:20 +00:00
clan-bot
e22218d589 Merge pull request 'Update nixpkgs-dev in devFlake' (#5752) from update-devFlake-nixpkgs-dev into main 2025-11-05 15:09:02 +00:00
clan-bot
228c60bcf7 Update treefmt-nix in devFlake 2025-11-05 15:02:30 +00:00
clan-bot
ed2b2d9df9 Update nixpkgs-dev in devFlake 2025-11-05 15:02:24 +00:00
Kenji Berthold
7e2a127d11 Merge pull request 'pkgs/clan-vm-manager: wrapGAppsHook -> wrapGAppsHook3' (#5748) from ke-wrap-gapps into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/5748
2025-11-05 12:27:32 +00:00
a-kenji
8c8bacb1ab pkgs/clan-vm-manager: wrapGAppsHook -> wrapGAppsHook3 2025-11-05 12:50:48 +01:00
clan-bot
8ba71144b6 Merge pull request 'Update nix-darwin' (#5744) from update-nix-darwin into main 2025-11-05 10:04:33 +00:00
clan-bot
7f2d15c8a1 Update nix-darwin 2025-11-05 10:01:31 +00:00
clan-bot
486463c793 Merge pull request 'Update treefmt-nix in devFlake' (#5746) from update-devFlake-treefmt-nix into main 2025-11-05 05:16:48 +00:00
clan-bot
071603d688 Update treefmt-nix in devFlake 2025-11-05 05:02:33 +00:00
clan-bot
c612561ec3 Merge pull request 'Update disko' (#5742) from update-disko into main 2025-11-05 00:10:58 +00:00
clan-bot
a88cd2be40 Update disko 2025-11-05 00:01:25 +00:00
clan-bot
7140b417d3 Merge pull request 'Update nixos-facter-modules' (#5738) from update-nixos-facter-modules into main 2025-11-04 20:10:12 +00:00
clan-bot
c7a42cca7f Update nixos-facter-modules 2025-11-04 20:01:33 +00:00
clan-bot
29ca23c629 Merge pull request 'Update nixpkgs-dev in devFlake' (#5740) from update-devFlake-nixpkgs-dev into main 2025-11-04 15:08:00 +00:00
clan-bot
cd7210de1b Update nixpkgs-dev in devFlake 2025-11-04 15:02:30 +00:00
Mic92
c2ebafcf92 Merge pull request 'zfsUnstable -> zfs_unstable' (#5737) from zfs-fix into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/5737
2025-11-04 14:46:19 +00:00
Jörg Thalheim
2a9e4e7860 zfsUnstable -> zfs_unstable
nixpkgs has a new path for this.
2025-11-04 15:41:50 +01:00
hsjobeki
43a7652624 Merge pull request 'App: init delete machine' (#5734) from jpy-scene into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/5734
2025-11-04 11:03:26 +00:00
Johannes Kirschbauer
65fd25bc2e App: init delete machine 2025-11-04 11:37:29 +01:00
Kenji Berthold
f89ea15749 Merge pull request 'pkgs/cli/vars: Add dependency validation' (#5727) from ke-vars-dependency-validation into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/5727
Reviewed-by: Mic92 <joerg@thalheim.io>
2025-11-04 09:55:55 +00:00
hsjobeki
19d4833be8 Merge pull request 'UI: clean up unused scene code' (#5730) from jpy-scene into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/5730
2025-11-04 08:39:04 +00:00
Johannes Kirschbauer
82f12eaf6f UI: clean up unused scene code 2025-11-04 09:34:17 +01:00
clan-bot
0b5a8e98de Merge pull request 'Update nix-darwin' (#5729) from update-nix-darwin into main 2025-11-04 05:05:29 +00:00
clan-bot
c5bddada05 Update nix-darwin 2025-11-04 05:01:02 +00:00
clan-bot
62b64c3b3e Merge pull request 'Update nixpkgs-dev in devFlake' (#5728) from update-devFlake-nixpkgs-dev into main 2025-11-03 15:07:53 +00:00
clan-bot
19a1ad6081 Update nixpkgs-dev in devFlake 2025-11-03 15:01:50 +00:00
Kenji Berthold
a2df5db3d6 Merge pull request 'docs/testing: Document requirements for our container testing system' (#5693) from ke-docs-testing-container into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/5693
2025-11-03 13:13:53 +00:00
Kenji Berthold
ac46f890ea Merge branch 'main' into ke-docs-testing-container 2025-11-03 13:06:14 +00:00
a-kenji
83f78d9f59 pkgs/cli/vars: Add dependency validation
Add explicit dependency validation to vars, so that proper error
messages can be surfaced to the user.

Instead of:
```
Traceback (most recent call last):
  File "/home/lhebendanz/Projects/clan-core/pkgs/clan-cli/clan_lib/async_run/__init__.py", line 154, in run
    self.result = AsyncResult(_result=self.function(*self.args, **self.kwargs))
                                      ~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/lhebendanz/Projects/clan-core/pkgs/clan-cli/clan_cli/machines/update.py", line 62, in run_update_wit
h_network
    run_machine_update(
    ~~~~~~~~~~~~~~~~~~^
        machine=machine,
        ^^^^^^^^^^^^^^^^
    ...<2 lines>...
        upload_inputs=upload_inputs,
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
    )
    ^
  File "/home/lhebendanz/Projects/clan-core/pkgs/clan-cli/clan_lib/machines/update.py", line 158, in run_machine_u
pdate
    run_generators([machine], generators=None, full_closure=False)
    ~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/lhebendanz/Projects/clan-core/pkgs/clan-cli/clan_lib/vars/generate.py", line 156, in run_generators
    all_generators = get_generators(machines, full_closure=True)
  File "/home/lhebendanz/Projects/clan-core/pkgs/clan-cli/clan_lib/vars/generate.py", line 50, in get_generators
    all_generators_list = Generator.get_machine_generators(
        all_machines,
        flake,
        include_previous_values=include_previous_values,
    )
  File "/home/lhebendanz/Projects/clan-core/pkgs/clan-cli/clan_cli/vars/generator.py", line 246, in get_machine_ge
nerators
    if generators_data[dep]["share"]
       ~~~~~~~~~~~~~~~^^^^^
KeyError: 'bla'
```

We now get:
```
$> Generator 'my_generator' on machine 'my_machine' depends on generator 'non_existing_generator', but 'non_existing_generator' does not exist
```

Closes: #5698
2025-11-03 14:00:38 +01:00
clan-bot
19abf8d288 Merge pull request 'Update nixpkgs-dev in devFlake' (#5726) from update-devFlake-nixpkgs-dev into main 2025-11-03 10:06:31 +00:00
clan-bot
e5105e31c4 Update nixpkgs-dev in devFlake 2025-11-03 10:01:47 +00:00
a-kenji
bc290fe59f docs/testing: Document requirements for our container testing system
Document the requirements for our container testing system:
- uid-range
- auto-allocate-uids

Further document that the container tests are used by default and how to
switch to the more traditional and more supported / featureful VM
testing framework.
2025-10-29 13:47:26 +01:00
20 changed files with 435 additions and 116 deletions

105
PLAN.md Normal file
View File

@@ -0,0 +1,105 @@
Title: Add nix-darwin Support to Clan Services (clan.service)
Summary
- Extend clan services so authors can ship a darwinModule alongside nixosModule.
- Wire service results into darwin machines the same way we already do for NixOS.
- Keep full backward compatibility: existing services that only export nixosModule continue to work unchanged.
Goals
- Service authors can return perInstance/perMachine darwinModule similarly to nixosModule.
- Darwin machines import the correct aggregated service module outputs.
- Documentation describes the new result attribute and authoring pattern.
Non-Goals (initial phase)
- No rework of service settings schema or UI beyond documenting darwinModule.
- No OS-specific extraModules handling (we will keep extraModules affecting only nixos aggregation initially to avoid breaking existing users).
- No sweeping updates of all services; well add a concrete example (users) and leave others to be migrated incrementally.
Design Overview
- Service result attributes gain darwinModule in both roles.<name>.perInstance and perMachine results.
- The service aggregator composes both nixosModule and darwinModule per machine.
- The machine wiring picks the correct module based on the machines class (nixos vs darwin).
Changes By File (with anchors)
- lib/inventory/distributed-service/service-module.nix
- Add darwinModule to per-instance return type next to nixosModule.
- Where: lib/inventory/distributed-service/service-module.nix:536 (options.nixosModule = mkOption { … })
- Action: Add sibling options.darwinModule = mkOption { type = types.deferredModule; default = { }; description = "A single nix-darwin module for the instance."; }.
- Add darwinModule to per-machine return type next to nixosModule.
- Where: lib/inventory/distributed-service/service-module.nix:666 (options.nixosModule = mkOption { … })
- Action: Add sibling options.darwinModule = mkOption { type = types.deferredModule; default = { }; description = "A single nix-darwin module for the machine."; }.
- Compose darwinModule per (role, instance, machine) similarly to nixosModule.
- Where: lib/inventory/distributed-service/service-module.nix:878893 (wrapper that builds nixosModule = { imports = [ instanceRes.nixosModule ] ++ extraModules … })
- Action: Build darwinModule = { imports = [ instanceRes.darwinModule ]; }.
Note: Do NOT include roles.*.extraModules here for darwin initially to avoid importing nixos-specific modules into darwin eval.
- Aggregate darwinModules in final result.
- Where: lib/inventory/distributed-service/service-module.nix:958993 (instanceResults builder and final nixosModule = { imports = [ machineResult.nixosModule ] ++ instanceResults.nixosModules; })
- Actions:
- Track instanceResults.darwinModules in parallel to instanceResults.nixosModules.
- Add final darwinModule = { imports = [ machineResult.darwinModule ] ++ instanceResults.darwinModules; }.
- modules/clan/distributed-services.nix
- Feed the right service module to each machine based on machineClass.
- Where: modules/clan/distributed-services.nix:147152
- Current: machineImports = fold over services, collecting serviceModule.result.final.${machineName}.nixosModule
- Change: If inventory.machines.${machineName}.machineClass == "darwin" then collect .darwinModule else .nixosModule.
- modules/clan/module.nix
- Ensure machineImports are included for both nixos and darwin machines.
- Where: modules/clan/module.nix:195 (currently ++ lib.optionals (_class == "nixos") (v.machineImports or [ ]))
- Change: Include machineImports for darwin as well (or remove the conditional and always append v.machineImports).
- docs/site/decisions/01-Clan-Modules.md
- Document darwinModule as a result attribute.
- Where: docs/site/decisions/01-Clan-Modules.md:129146 (Result attributes and perMachine text mentioning only nixosModule)
- Change: Add “darwinModule” to the Result attributes list and examples, mirroring nixosModule.
- Example service update: clanServices/users/default.nix
- Add perInstance.darwinModule and perMachine.darwinModule mirroring nixos behavior where feasible.
- Where: clanServices/users/default.nix:2890 (roles.default.perInstance.nixosModule), 148153 (perMachine.nixosModule)
- Change: Provide minimal darwinModule that sets users.users.<name> (and any safe, cross-platform bits). If some nixos-only settings (e.g., systemd hooks) exist, keep them nixos-only.
Implementation Steps
1) Service API extensions
- Add options.darwinModule to roles.*.perInstance and perMachine (see anchors above).
- Keep defaults to {} so services can omit it safely.
2) Aggregation logic
- result.allRoles: emit darwinModule wrapper from instanceRes.darwinModule.
- result.final:
- Collect instanceResults.darwinModules alongside instanceResults.nixosModules.
- Produce final darwinModule with [ machineResult.darwinModule ] ++ instanceResults.darwinModules.
- Leave exports logic unchanged.
3) Machine wiring
- modules/clan/distributed-services.nix: choose .darwinModule vs .nixosModule based on inventory.machines.<name>.machineClass.
- modules/clan/module.nix: include v.machineImports for both OS classes.
4) Example migration (users)
- Add darwinModule in clanServices/users/default.nix.
- Validate that users service evaluates for a darwin machine and does not reference nixos-specific options.
5) Documentation
- Update ADR docs to mention darwinModule in Result attributes and examples.
- Add a short “Authoring for Darwin” snippet showing perInstance/perMachine returning both modules.
6) Tests and verification
- Unit-level: extend lib/inventory/distributed-service/tests to assert presence of result.final.<machine>.darwinModule when perInstance/perMachine return it.
- Integration-level: evaluate a sample darwin machine (e.g., inventory.json has test-darwin-machine) and assert clan.darwinModules.<machine> includes the aggregated module.
- Sanity: ensure existing nixos-only services still evaluate unchanged.
Backward Compatibility
- Existing services that only return nixosModule continue to work.
- Darwin machines wont import service modules until services provide darwinModule, avoiding accidental breakage.
- extraModules remain applied only to nixos aggregation initially to prevent nixos-only modules from breaking darwin evaluation. We can add OS-specific extraModules in a follow-up (e.g., roles.*.extraModulesDarwin).
Acceptance Criteria
- Services can return darwinModule in perInstance/perMachine without errors.
- Darwin machines import aggregated darwinModule outputs from all participating services.
- nixos behavior remains unchanged for existing services.
- Documentation updated to reflect the new attribute and example.
Rollout Notes
- Start by updating clanServices/users as a working example.
- Encourage service authors to add darwinModule incrementally; no global migration is required.

View File

@@ -120,6 +120,63 @@
share = settings.share; share = settings.share;
script =
(
if settings.prompt then
''
prompt_value=$(cat "$prompts"/user-password)
if [[ -n "''${prompt_value-}" ]]; then
echo "$prompt_value" | tr -d "\n" > "$out"/user-password
else
xkcdpass --numwords 4 --delimiter - --count 1 | tr -d "\n" > "$out"/user-password
fi
''
else
''
xkcdpass --numwords 4 --delimiter - --count 1 | tr -d "\n" > "$out"/user-password
''
)
+ ''
mkpasswd -s -m sha-512 < "$out"/user-password | tr -d "\n" > "$out"/user-password-hash
'';
};
};
darwinModule =
{
config,
pkgs,
lib,
...
}:
{
# For darwin, we currently only generate and manage the password secret.
# Hooking into actual macOS account management may be added later.
clan.core.vars.generators."user-password-${settings.user}" = {
files.user-password-hash.neededFor = "users";
files.user-password.deploy = false;
prompts.user-password = lib.mkIf settings.prompt {
display = {
group = settings.user;
label = "password";
required = false;
helperText = ''
Your password will be encrypted and stored securely using the secret store you've configured.
'';
};
type = "hidden";
persist = true;
description = "Leave empty to generate automatically";
};
runtimeInputs = [
pkgs.coreutils
pkgs.xkcdpass
pkgs.mkpasswd
];
share = settings.share;
script = script =
( (
if settings.prompt then if settings.prompt then
@@ -149,5 +206,7 @@
# Immutable users to ensure that this module has exclusive control over the users. # Immutable users to ensure that this module has exclusive control over the users.
users.mutableUsers = false; users.mutableUsers = false;
}; };
# No-op for darwin by default; can be extended later if needed.
darwinModule = { };
}; };
} }

12
devFlake/flake.lock generated
View File

@@ -105,11 +105,11 @@
}, },
"nixpkgs-dev": { "nixpkgs-dev": {
"locked": { "locked": {
"lastModified": 1762080734, "lastModified": 1762328495,
"narHash": "sha256-fFunzA7ITlPHRr7dECaFGTBucNiWYEVDNPBw/9gFmII=", "narHash": "sha256-IUZvw5kvLiExApP9+SK/styzEKSqfe0NPclu9/z85OQ=",
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "bc7f6fa86de9b208edf4ea7bbf40bcd8cc7d70a5", "rev": "4c621660e393922cf68cdbfc40eb5a2d54d3989a",
"type": "github" "type": "github"
}, },
"original": { "original": {
@@ -208,11 +208,11 @@
"nixpkgs": [] "nixpkgs": []
}, },
"locked": { "locked": {
"lastModified": 1761311587, "lastModified": 1762366246,
"narHash": "sha256-Msq86cR5SjozQGCnC6H8C+0cD4rnx91BPltZ9KK613Y=", "narHash": "sha256-3xc/f/ZNb5ma9Fc9knIzEwygXotA+0BZFQ5V5XovSOQ=",
"owner": "numtide", "owner": "numtide",
"repo": "treefmt-nix", "repo": "treefmt-nix",
"rev": "2eddae033e4e74bf581c2d1dfa101f9033dbd2dc", "rev": "a82c779ca992190109e431d7d680860e6723e048",
"type": "github" "type": "github"
}, },
"original": { "original": {

View File

@@ -150,10 +150,61 @@ Those are very similar to NixOS VM tests, as in they run virtualized nixos machi
As of now the container test driver is a downstream development in clan-core. As of now the container test driver is a downstream development in clan-core.
Basically everything stated under the NixOS VM tests sections applies here, except some limitations. Basically everything stated under the NixOS VM tests sections applies here, except some limitations.
Limitations: ### Using Container Tests vs VM Tests
- Cannot run in interactive mode, however while the container test runs, it logs a nsenter command that can be used to log into each of the container. Container tests are **enabled by default** for all tests using the clan testing framework.
- setuid binaries don't work They offer significant performance advantages over VM tests:
- **Faster startup**
- **Lower resource usage**: No full kernel boot or hardware emulation overhead
To control whether a test uses containers or VMs, use the `clan.test.useContainers` option:
```nix
{
clan = {
directory = ./.;
test.useContainers = true; # Use containers (default)
# test.useContainers = false; # Use VMs instead
};
}
```
**When to use VM tests instead of container tests:**
- Testing kernel features, modules, or boot processes
- Testing hardware-specific features
- When you need full system isolation
### System Requirements for Container Tests
Container tests require the **`uid-range`** system feature** in the Nix sandbox.
This feature allows Nix to allocate a range of UIDs for containers to use, enabling `systemd-nspawn` containers to run properly inside the Nix build sandbox.
**Configuration:**
The `uid-range` feature requires the `auto-allocate-uids` setting to be enabled in your Nix configuration.
To verify or enable it, add to your `/etc/nix/nix.conf` or NixOS configuration:
```nix
settings.experimental-features = [
"auto-allocate-uids"
];
nix.settings.auto-allocate-uids = true;
nix.settings.system-features = [ "uid-range" ];
```
**Technical details:**
- Container tests set `requiredSystemFeatures = [ "uid-range" ];` in their derivation (see `lib/test/container-test-driver/driver-module.nix:98`)
- Without this feature, containers cannot properly manage user namespaces and will fail to start
### Limitations
- Cannot run in interactive mode, however while the container test runs, it logs a nsenter command that can be used to log into each of the containers.
- Early implementation and limited by features.
### Where to find examples for NixOS container tests ### Where to find examples for NixOS container tests

30
flake.lock generated
View File

@@ -31,11 +31,11 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1761899396, "lastModified": 1762276996,
"narHash": "sha256-XOpKBp6HLzzMCbzW50TEuXN35zN5WGQREC7n34DcNMM=", "narHash": "sha256-TtcPgPmp2f0FAnc+DMEw4ardEgv1SGNR3/WFGH0N19M=",
"owner": "nix-community", "owner": "nix-community",
"repo": "disko", "repo": "disko",
"rev": "6f4cf5abbe318e4cd1e879506f6eeafd83f7b998", "rev": "af087d076d3860760b3323f6b583f4d828c1ac17",
"type": "github" "type": "github"
}, },
"original": { "original": {
@@ -71,11 +71,11 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1762039661, "lastModified": 1762304480,
"narHash": "sha256-oM5BwAGE78IBLZn+AqxwH/saqwq3e926rNq5HmOulkc=", "narHash": "sha256-ikVIPB/ea/BAODk6aksgkup9k2jQdrwr4+ZRXtBgmSs=",
"owner": "nix-darwin", "owner": "nix-darwin",
"repo": "nix-darwin", "repo": "nix-darwin",
"rev": "c3c8c9f2a5ed43175ac4dc030308756620e6e4e4", "rev": "b8c7ac030211f18bd1f41eae0b815571853db7a2",
"type": "github" "type": "github"
}, },
"original": { "original": {
@@ -99,11 +99,11 @@
}, },
"nixos-facter-modules": { "nixos-facter-modules": {
"locked": { "locked": {
"lastModified": 1761137276, "lastModified": 1762264948,
"narHash": "sha256-4lDjGnWRBLwqKQ4UWSUq6Mvxu9r8DSqCCydodW/Jsi8=", "narHash": "sha256-iaRf6n0KPl9hndnIft3blm1YTAyxSREV1oX0MFZ6Tk4=",
"owner": "nix-community", "owner": "nix-community",
"repo": "nixos-facter-modules", "repo": "nixos-facter-modules",
"rev": "70bcd64225d167c7af9b475c4df7b5abba5c7de8", "rev": "fa695bff9ec37fd5bbd7ee3181dbeb5f97f53c96",
"type": "github" "type": "github"
}, },
"original": { "original": {
@@ -115,10 +115,10 @@
"nixpkgs": { "nixpkgs": {
"locked": { "locked": {
"lastModified": 315532800, "lastModified": 315532800,
"narHash": "sha256-yDxtm0PESdgNetiJN5+MFxgubBcLDTiuSjjrJiyvsvM=", "narHash": "sha256-LDT9wuUZtjPfmviCcVWif5+7j4kBI2mWaZwjNNeg4eg=",
"rev": "d7f52a7a640bc54c7bb414cca603835bf8dd4b10", "rev": "a7fc11be66bdfb5cdde611ee5ce381c183da8386",
"type": "tarball", "type": "tarball",
"url": "https://releases.nixos.org/nixpkgs/nixpkgs-25.11pre871443.d7f52a7a640b/nixexprs.tar.xz" "url": "https://releases.nixos.org/nixpkgs/nixpkgs-25.11pre887438.a7fc11be66bd/nixexprs.tar.xz"
}, },
"original": { "original": {
"type": "tarball", "type": "tarball",
@@ -181,11 +181,11 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1761311587, "lastModified": 1762366246,
"narHash": "sha256-Msq86cR5SjozQGCnC6H8C+0cD4rnx91BPltZ9KK613Y=", "narHash": "sha256-3xc/f/ZNb5ma9Fc9knIzEwygXotA+0BZFQ5V5XovSOQ=",
"owner": "numtide", "owner": "numtide",
"repo": "treefmt-nix", "repo": "treefmt-nix",
"rev": "2eddae033e4e74bf581c2d1dfa101f9033dbd2dc", "rev": "a82c779ca992190109e431d7d680860e6723e048",
"type": "github" "type": "github"
}, },
"original": { "original": {

View File

@@ -561,6 +561,15 @@ in
``` ```
''; '';
}; };
options.darwinModule = mkOption {
type = types.deferredModule;
default = { };
description = ''
A single nix-darwin module for the instance.
This mirrors `nixosModule` but targets darwin machines.
'';
};
}) })
]; ];
}; };
@@ -686,6 +695,15 @@ in
``` ```
''; '';
}; };
options.darwinModule = mkOption {
type = types.deferredModule;
default = { };
description = ''
A single nix-darwin module for the machine.
This mirrors `nixosModule` but targets darwin machines.
'';
};
}) })
]; ];
}; };
@@ -890,6 +908,11 @@ in
lib.setDefaultModuleLocation "via inventory.instances.${instanceName}.roles.${roleName}" s lib.setDefaultModuleLocation "via inventory.instances.${instanceName}.roles.${roleName}" s
) instanceCfg.roles.${roleName}.extraModules); ) instanceCfg.roles.${roleName}.extraModules);
}; };
darwinModule = {
imports = [
instanceRes.darwinModule
];
};
} }
) instanceCfg.roles.${roleName}.machines or { }; ) instanceCfg.roles.${roleName}.machines or { };
@@ -979,11 +1002,24 @@ in
else else
instanceAcc.nixosModules instanceAcc.nixosModules
); );
darwinModules = (
if instance.allMachines.${machineName}.darwinModule or { } != { } then
instanceAcc.darwinModules
++ [
(lib.setDefaultModuleLocation
"Via instances.${instanceName}.roles.${roleName}.machines.${machineName}"
instance.allMachines.${machineName}.darwinModule
)
]
else
instanceAcc.darwinModules
);
} }
) roleAcc role.allInstances ) roleAcc role.allInstances
) )
{ {
nixosModules = [ ]; nixosModules = [ ];
darwinModules = [ ];
# ... # ...
} }
config.result.allRoles; config.result.allRoles;
@@ -1021,6 +1057,12 @@ in
] ]
++ instanceResults.nixosModules; ++ instanceResults.nixosModules;
}; };
darwinModule = {
imports = [
(lib.setDefaultModuleLocation "Via ${config.manifest.name}.perMachine - machine='${machineName}';" machineResult.darwinModule)
]
++ instanceResults.darwinModules;
};
} }
) config.result.allMachines; ) config.result.allMachines;
}; };

View File

@@ -145,10 +145,23 @@ in
internal = true; internal = true;
type = types.raw; type = types.raw;
default = lib.mapAttrs (machineName: _: { default = lib.mapAttrs (machineName: _: {
# This is the list of nixosModules for each machine # This is the list of service modules for each machine (nixos or darwin)
machineImports = lib.foldlAttrs ( machineImports = lib.foldlAttrs (
acc: _module_ident: serviceModule: acc: _module_ident: serviceModule:
acc ++ [ serviceModule.result.final.${machineName}.nixosModule or { } ] let
modName =
if inventory.machines.${machineName}.machineClass == "darwin" then
"darwinModule"
else
"nixosModule";
finalForMachine = serviceModule.result.final.${machineName} or { };
picked =
if builtins.hasAttr modName finalForMachine then
(builtins.getAttr modName finalForMachine)
else
{ };
in
acc ++ [ picked ]
) [ ] config._services.mappedServices; ) [ ] config._services.mappedServices;
}) inventory.machines or { }; }) inventory.machines or { };
}; };

View File

@@ -192,7 +192,7 @@ in
# - darwinModules (_class = darwin) # - darwinModules (_class = darwin)
(lib.optionalAttrs (clan-core ? "${_class}Modules") clan-core."${_class}Modules".clanCore) (lib.optionalAttrs (clan-core ? "${_class}Modules") clan-core."${_class}Modules".clanCore)
] ]
++ lib.optionals (_class == "nixos") (v.machineImports or [ ]); ++ (v.machineImports or [ ]);
# default hostname # default hostname
networking.hostName = lib.mkDefault name; networking.hostName = lib.mkDefault name;

View File

@@ -5,7 +5,7 @@
}: }:
{ {
# If we also need zfs, we can use the unstable version as we otherwise don't have a new enough kernel version # If we also need zfs, we can use the unstable version as we otherwise don't have a new enough kernel version
boot.zfs.package = pkgs.zfsUnstable; boot.zfs.package = pkgs.zfs_unstable or pkgs.zfsUnstable;
# Enable bcachefs support # Enable bcachefs support
boot.supportedFilesystems.bcachefs = lib.mkDefault true; boot.supportedFilesystems.bcachefs = lib.mkDefault true;

View File

@@ -6,7 +6,7 @@
}: }:
let let
isUnstable = config.boot.zfs.package == pkgs.zfsUnstable; isUnstable = config.boot.zfs.package == pkgs.zfs_unstable or pkgs.zfsUnstable;
zfsCompatibleKernelPackages = lib.filterAttrs ( zfsCompatibleKernelPackages = lib.filterAttrs (
name: kernelPackages: name: kernelPackages:
(builtins.match "linux_[0-9]+_[0-9]+" name) != null (builtins.match "linux_[0-9]+_[0-9]+" name) != null

View File

@@ -4,6 +4,7 @@
padding: 8px; padding: 8px;
flex-direction: column; flex-direction: column;
align-items: flex-start; align-items: flex-start;
gap: 4px;
border-radius: 5px; border-radius: 5px;
border: 1px solid var(--clr-border-def-2, #d8e8eb); border: 1px solid var(--clr-border-def-2, #d8e8eb);

View File

@@ -1,11 +1,13 @@
import { onCleanup, onMount } from "solid-js"; import { onCleanup, onMount } from "solid-js";
import styles from "./ContextMenu.module.css"; import styles from "./ContextMenu.module.css";
import { Typography } from "../Typography/Typography"; import { Typography } from "../Typography/Typography";
import { Divider } from "../Divider/Divider";
import Icon from "../Icon/Icon";
export const Menu = (props: { export const Menu = (props: {
x: number; x: number;
y: number; y: number;
onSelect: (option: "move") => void; onSelect: (option: "move" | "delete") => void;
close: () => void; close: () => void;
intersect: string[]; intersect: string[];
}) => { }) => {
@@ -54,13 +56,31 @@ export const Menu = (props: {
> >
<Typography <Typography
hierarchy="label" hierarchy="label"
size="s"
weight="bold"
color={currentMachine() ? "primary" : "quaternary"} color={currentMachine() ? "primary" : "quaternary"}
> >
Move Move
</Typography> </Typography>
</li> </li>
<Divider />
<li
class={styles.item}
aria-disabled={!currentMachine()}
onClick={() => {
console.log("Delete clicked", currentMachine());
props.onSelect("delete");
props.close();
}}
>
<Typography
hierarchy="label"
color={currentMachine() ? "primary" : "quaternary"}
>
<span class="flex items-center gap-2">
Delete
<Icon icon="Trash" font-size="inherit" />
</span>
</Typography>
</li>
</ul> </ul>
); );
}; };

View File

@@ -71,7 +71,7 @@ const Machines = () => {
} }
const result = ctx.machinesQuery.data; const result = ctx.machinesQuery.data;
return Object.keys(result).length > 0 ? result : undefined; return Object.keys(result).length > 0 ? result : [];
}; };
return ( return (
@@ -117,7 +117,7 @@ const Machines = () => {
} }
> >
<nav> <nav>
<For each={Object.entries(machines()!)}> <For each={Object.entries(machines())}>
{([id, machine]) => ( {([id, machine]) => (
<MachineRoute <MachineRoute
clanURI={clanURI} clanURI={clanURI}

View File

@@ -206,8 +206,8 @@ const ClanSceneController = (props: RouteSectionProps) => {
<AddMachine <AddMachine
onCreated={async (id) => { onCreated={async (id) => {
const promise = currentPromise(); const promise = currentPromise();
await ctx.machinesQuery.refetch();
if (promise) { if (promise) {
await ctx.machinesQuery.refetch();
promise.resolve({ id }); promise.resolve({ id });
setCurrentPromise(null); setCurrentPromise(null);
} }

View File

@@ -18,12 +18,12 @@ export class MachineManager {
private disposeRoot: () => void; private disposeRoot: () => void;
private machinePositionsSignal: Accessor<SceneData>; private machinePositionsSignal: Accessor<SceneData | undefined>;
constructor( constructor(
scene: THREE.Scene, scene: THREE.Scene,
registry: ObjectRegistry, registry: ObjectRegistry,
machinePositionsSignal: Accessor<SceneData>, machinePositionsSignal: Accessor<SceneData | undefined>,
machinesQueryResult: MachinesQueryResult, machinesQueryResult: MachinesQueryResult,
selectedIds: Accessor<Set<string>>, selectedIds: Accessor<Set<string>>,
setMachinePos: (id: string, position: [number, number] | null) => void, setMachinePos: (id: string, position: [number, number] | null) => void,
@@ -39,8 +39,9 @@ export class MachineManager {
if (!machinesQueryResult.data) return; if (!machinesQueryResult.data) return;
const actualIds = Object.keys(machinesQueryResult.data); const actualIds = Object.keys(machinesQueryResult.data);
const machinePositions = machinePositionsSignal();
// Remove stale const machinePositions = machinePositionsSignal() || {};
for (const id of Object.keys(machinePositions)) { for (const id of Object.keys(machinePositions)) {
if (!actualIds.includes(id)) { if (!actualIds.includes(id)) {
console.log("Removing stale machine", id); console.log("Removing stale machine", id);
@@ -61,8 +62,7 @@ export class MachineManager {
// Effect 2: sync store → scene // Effect 2: sync store → scene
// //
createEffect(() => { createEffect(() => {
const positions = machinePositionsSignal(); const positions = machinePositionsSignal() || {};
if (!positions) return;
// Remove machines from scene // Remove machines from scene
for (const [id, repr] of this.machines) { for (const [id, repr] of this.machines) {
@@ -103,7 +103,7 @@ export class MachineManager {
nextGridPos(): [number, number] { nextGridPos(): [number, number] {
const occupiedPositions = new Set( const occupiedPositions = new Set(
Object.values(this.machinePositionsSignal()).map((data) => Object.values(this.machinePositionsSignal() || {}).map((data) =>
keyFromPos(data.position), keyFromPos(data.position),
), ),
); );

View File

@@ -32,6 +32,9 @@ import {
} from "./highlightStore"; } from "./highlightStore";
import { createMachineMesh } from "./MachineRepr"; import { createMachineMesh } from "./MachineRepr";
import { useClanContext } from "@/src/routes/Clan/Clan"; import { useClanContext } from "@/src/routes/Clan/Clan";
import client from "@api/clan/client";
import { navigateToClan } from "../hooks/clan";
import { useNavigate } from "@solidjs/router";
function intersectMachines( function intersectMachines(
event: MouseEvent, event: MouseEvent,
@@ -100,7 +103,7 @@ export function CubeScene(props: {
onCreate: () => Promise<{ id: string }>; onCreate: () => Promise<{ id: string }>;
selectedIds: Accessor<Set<string>>; selectedIds: Accessor<Set<string>>;
onSelect: (v: Set<string>) => void; onSelect: (v: Set<string>) => void;
sceneStore: Accessor<SceneData>; sceneStore: Accessor<SceneData | undefined>;
setMachinePos: (machineId: string, pos: [number, number] | null) => void; setMachinePos: (machineId: string, pos: [number, number] | null) => void;
isLoading: boolean; isLoading: boolean;
clanURI: string; clanURI: string;
@@ -131,9 +134,6 @@ export function CubeScene(props: {
let machineManager: MachineManager; let machineManager: MachineManager;
const [positionMode, setPositionMode] = createSignal<"grid" | "circle">(
"grid",
);
// Managed by controls // Managed by controls
const [isDragging, setIsDragging] = createSignal(false); const [isDragging, setIsDragging] = createSignal(false);
@@ -142,10 +142,6 @@ export function CubeScene(props: {
// TODO: Unify this with actionRepr position // TODO: Unify this with actionRepr position
const [cursorPosition, setCursorPosition] = createSignal<[number, number]>(); const [cursorPosition, setCursorPosition] = createSignal<[number, number]>();
const [cameraInfo, setCameraInfo] = createSignal({
position: { x: 0, y: 0, z: 0 },
spherical: { radius: 0, theta: 0, phi: 0 },
});
// Context menu state // Context menu state
const [contextOpen, setContextOpen] = createSignal(false); const [contextOpen, setContextOpen] = createSignal(false);
const [menuPos, setMenuPos] = createSignal<{ x: number; y: number }>(); const [menuPos, setMenuPos] = createSignal<{ x: number; y: number }>();
@@ -157,7 +153,6 @@ export function CubeScene(props: {
const BASE_SIZE = 0.9; // Height of the cube above the ground const BASE_SIZE = 0.9; // Height of the cube above the ground
const CUBE_SIZE = BASE_SIZE / 1.5; // const CUBE_SIZE = BASE_SIZE / 1.5; //
const BASE_HEIGHT = 0.05; // Height of the cube above the ground const BASE_HEIGHT = 0.05; // Height of the cube above the ground
const CUBE_Y = 0 + CUBE_SIZE / 2 + BASE_HEIGHT / 2; // Y position of the cube above the ground
const CUBE_SEGMENT_HEIGHT = CUBE_SIZE / 1; const CUBE_SEGMENT_HEIGHT = CUBE_SIZE / 1;
const FLOOR_COLOR = 0xcdd8d9; const FLOOR_COLOR = 0xcdd8d9;
@@ -201,6 +196,8 @@ export function CubeScene(props: {
const grid = new THREE.GridHelper(1000, 1000 / 1, 0xe1edef, 0xe1edef); const grid = new THREE.GridHelper(1000, 1000 / 1, 0xe1edef, 0xe1edef);
const navigate = useNavigate();
onMount(() => { onMount(() => {
// Scene setup // Scene setup
scene = new THREE.Scene(); scene = new THREE.Scene();
@@ -311,21 +308,12 @@ export function CubeScene(props: {
bgCamera, bgCamera,
); );
// controls.addEventListener("start", (e) => {
// setIsDragging(true);
// });
// controls.addEventListener("end", (e) => {
// setIsDragging(false);
// });
// Lighting // Lighting
const ambientLight = new THREE.AmbientLight(0xd9f2f7, 0.72); const ambientLight = new THREE.AmbientLight(0xd9f2f7, 0.72);
scene.add(ambientLight); scene.add(ambientLight);
const directionalLight = new THREE.DirectionalLight(0xffffff, 3.5); const directionalLight = new THREE.DirectionalLight(0xffffff, 3.5);
// scene.add(new THREE.DirectionalLightHelper(directionalLight));
// scene.add(new THREE.CameraHelper(camera));
const lightPos = new THREE.Spherical( const lightPos = new THREE.Spherical(
15, 15,
initialSphericalCameraPosition.phi - Math.PI / 8, initialSphericalCameraPosition.phi - Math.PI / 8,
@@ -412,30 +400,6 @@ export function CubeScene(props: {
actionMachine = createActionMachine(); actionMachine = createActionMachine();
scene.add(actionMachine); scene.add(actionMachine);
// const spherical = new THREE.Spherical();
// spherical.setFromVector3(camera.position);
// Function to update camera info
const updateCameraInfo = () => {
const spherical = new THREE.Spherical();
spherical.setFromVector3(camera.position);
setCameraInfo({
position: {
x: Math.round(camera.position.x * 100) / 100,
y: Math.round(camera.position.y * 100) / 100,
z: Math.round(camera.position.z * 100) / 100,
},
spherical: {
radius: Math.round(spherical.radius * 100) / 100,
theta: Math.round(spherical.theta * 100) / 100,
phi: Math.round(spherical.phi * 100) / 100,
},
});
};
// Initial camera info update
updateCameraInfo();
createEffect( createEffect(
on(ctx.worldMode, (mode) => { on(ctx.worldMode, (mode) => {
if (mode === "create") { if (mode === "create") {
@@ -661,7 +625,8 @@ export function CubeScene(props: {
}); });
const snapToGrid = (point: THREE.Vector3) => { const snapToGrid = (point: THREE.Vector3) => {
if (!props.sceneStore) return; const store = props.sceneStore() || {};
// Snap to grid // Snap to grid
const snapped = new THREE.Vector3( const snapped = new THREE.Vector3(
Math.round(point.x / GRID_SIZE) * GRID_SIZE, Math.round(point.x / GRID_SIZE) * GRID_SIZE,
@@ -670,7 +635,7 @@ export function CubeScene(props: {
); );
// Skip snapping if there's already a cube at this position // Skip snapping if there's already a cube at this position
const positions = Object.entries(props.sceneStore()); const positions = Object.entries(store);
const intersects = positions.some( const intersects = positions.some(
([_id, p]) => p.position[0] === snapped.x && p.position[1] === snapped.z, ([_id, p]) => p.position[0] === snapped.x && p.position[1] === snapped.z,
); );
@@ -694,7 +659,6 @@ export function CubeScene(props: {
}; };
const onAddClick = (event: MouseEvent) => { const onAddClick = (event: MouseEvent) => {
setPositionMode("grid");
ctx.setWorldMode("create"); ctx.setWorldMode("create");
renderLoop.requestRender(); renderLoop.requestRender();
}; };
@@ -706,9 +670,6 @@ export function CubeScene(props: {
if (!actionRepr) return; if (!actionRepr) return;
actionRepr.visible = true; actionRepr.visible = true;
// (actionRepr.material as THREE.MeshPhongMaterial).emissive.set(
// worldMode() === "create" ? CREATE_BASE_EMISSIVE : MOVE_BASE_EMISSIVE,
// );
// Calculate mouse position in normalized device coordinates // Calculate mouse position in normalized device coordinates
// (-1 to +1) for both components // (-1 to +1) for both components
@@ -736,23 +697,38 @@ export function CubeScene(props: {
} }
} }
}; };
const handleMenuSelect = (mode: "move") => { const handleMenuSelect = async (mode: "move" | "delete") => {
const firstId = menuIntersection()[0];
if (!firstId) {
return;
}
const machine = machineManager.machines.get(firstId);
if (mode === "delete") {
console.log("deleting machine", firstId);
await client.post("delete_machine", {
body: {
machine: { flake: { identifier: props.clanURI }, name: firstId },
},
});
navigateToClan(navigate, props.clanURI);
ctx.machinesQuery.refetch();
ctx.serviceInstancesQuery.refetch();
return;
}
// Else "move" mode
ctx.setWorldMode(mode); ctx.setWorldMode(mode);
setHighlightGroups({ move: new Set(menuIntersection()) }); setHighlightGroups({ move: new Set(menuIntersection()) });
// Find the position of the first selected machine // Find the position of the first selected machine
// Set the actionMachine position to that // Set the actionMachine position to that
const firstId = menuIntersection()[0]; if (machine && actionMachine) {
if (firstId) { actionMachine.position.set(
const machine = machineManager.machines.get(firstId); machine.group.position.x,
if (machine && actionMachine) { 0,
actionMachine.position.set( machine.group.position.z,
machine.group.position.x, );
0, setCursorPosition([machine.group.position.x, machine.group.position.z]);
machine.group.position.z,
);
setCursorPosition([machine.group.position.x, machine.group.position.z]);
}
} }
}; };

View File

@@ -766,6 +766,28 @@ def test_prompt(
assert sops_store.get(my_generator, "prompt_persist").decode() == "prompt_persist" assert sops_store.get(my_generator, "prompt_persist").decode() == "prompt_persist"
@pytest.mark.with_core
def test_non_existing_dependency_raises_error(
monkeypatch: pytest.MonkeyPatch,
flake_with_sops: ClanFlake,
) -> None:
"""Ensure that a generator with a non-existing dependency raises a clear error."""
flake = flake_with_sops
config = flake.machines["my_machine"] = create_test_machine_config()
my_generator = config["clan"]["core"]["vars"]["generators"]["my_generator"]
my_generator["files"]["my_value"]["secret"] = False
my_generator["script"] = 'echo "$RANDOM" > "$out"/my_value'
my_generator["dependencies"] = ["non_existing_generator"]
flake.refresh()
monkeypatch.chdir(flake.path)
with pytest.raises(
ClanError,
match="Generator 'my_generator' on machine 'my_machine' depends on generator 'non_existing_generator', but 'non_existing_generator' does not exist",
):
cli.run(["vars", "generate", "--flake", str(flake.path), "my_machine"])
@pytest.mark.with_core @pytest.mark.with_core
def test_shared_vars_must_never_depend_on_machine_specific_vars( def test_shared_vars_must_never_depend_on_machine_specific_vars(
monkeypatch: pytest.MonkeyPatch, monkeypatch: pytest.MonkeyPatch,

View File

@@ -66,6 +66,41 @@ class Generator:
_public_store: "StoreBase | None" = None _public_store: "StoreBase | None" = None
_secret_store: "StoreBase | None" = None _secret_store: "StoreBase | None" = None
@staticmethod
def validate_dependencies(
generator_name: str,
machine_name: str,
dependencies: list[str],
generators_data: dict[str, dict],
) -> list[GeneratorKey]:
"""Validate and build dependency keys for a generator.
Args:
generator_name: Name of the generator that has dependencies
machine_name: Name of the machine the generator belongs to
dependencies: List of dependency generator names
generators_data: Dictionary of all available generators for this machine
Returns:
List of GeneratorKey objects
Raises:
ClanError: If a dependency does not exist
"""
deps_list = []
for dep in dependencies:
if dep not in generators_data:
msg = f"Generator '{generator_name}' on machine '{machine_name}' depends on generator '{dep}', but '{dep}' does not exist. Please check your configuration."
raise ClanError(msg)
deps_list.append(
GeneratorKey(
machine=None if generators_data[dep]["share"] else machine_name,
name=dep,
)
)
return deps_list
@property @property
def key(self) -> GeneratorKey: def key(self) -> GeneratorKey:
if self.share: if self.share:
@@ -240,15 +275,12 @@ class Generator:
name=gen_name, name=gen_name,
share=share, share=share,
files=files, files=files,
dependencies=[ dependencies=cls.validate_dependencies(
GeneratorKey( gen_name,
machine=None machine_name,
if generators_data[dep]["share"] gen_data["dependencies"],
else machine_name, generators_data,
name=dep, ),
)
for dep in gen_data["dependencies"]
],
migrate_fact=gen_data.get("migrateFact"), migrate_fact=gen_data.get("migrateFact"),
validation_hash=gen_data.get("validationHash"), validation_hash=gen_data.get("validationHash"),
prompts=prompts, prompts=prompts,

View File

@@ -59,9 +59,7 @@ def upload_sources(machine: Machine, ssh: Host, upload_inputs: bool) -> str:
if not has_path_inputs and not upload_inputs: if not has_path_inputs and not upload_inputs:
# Just copy the flake to the remote machine, we can substitute other inputs there. # Just copy the flake to the remote machine, we can substitute other inputs there.
path = flake_data["path"] path = flake_data["path"]
if machine._class_ == "darwin": remote_url = f"ssh-ng://{remote_url_base}"
remote_program_params = "?remote-program=bash -lc 'exec nix-daemon --stdio'"
remote_url = f"ssh-ng://{remote_url_base}{remote_program_params}"
cmd = nix_command( cmd = nix_command(
[ [
"copy", "copy",

View File

@@ -17,7 +17,7 @@
runCommand, runCommand,
setuptools, setuptools,
webkitgtk_6_0, webkitgtk_6_0,
wrapGAppsHook, wrapGAppsHook3,
python, python,
lib, lib,
stdenv, stdenv,
@@ -87,7 +87,7 @@ buildPythonApplication rec {
nativeBuildInputs = [ nativeBuildInputs = [
setuptools setuptools
copyDesktopItems copyDesktopItems
wrapGAppsHook wrapGAppsHook3
gobject-introspection gobject-introspection
]; ];