Compare commits

..

8 Commits

Author SHA1 Message Date
Jörg Thalheim
a2e4b98a93 clan-cli/secrets: suggest clan vars keygen instead 2025-10-07 11:12:04 +02:00
Louis Opter
25ce97dd5e clan-cli/secrets: update some error message in encrypt_secret
Found that while reading through some code.
2025-10-07 11:09:00 +02:00
a-kenji
bd361b2744 docs: Fix nixpkgs hierarchy 2025-10-07 11:09:00 +02:00
clan-bot
ac901f5656 Update nixpkgs-dev in devFlake 2025-10-07 11:09:00 +02:00
clan-bot
8339ad605c Update clan-core-for-checks in devFlake 2025-10-07 11:09:00 +02:00
clan-bot
3473aaa440 Update clan-core-for-checks in devFlake 2025-10-07 11:09:00 +02:00
clan-bot
e983b10331 Update clan-core-for-checks in devFlake 2025-10-07 11:09:00 +02:00
clan-bot
fcf5ccd115 Update nixpkgs-dev in devFlake 2025-10-07 11:09:00 +02:00
39 changed files with 388 additions and 1079 deletions

View File

@@ -1,12 +0,0 @@
## Description of the change
<!-- Brief summary of the change if not already clear from the title -->
## Checklist
- [ ] Updated Documentation
- [ ] Added tests
- [ ] Doesn't affect backwards compatibility - or check the next points
- [ ] Add the breaking change and migration details to docs/release-notes.md
- !!! Review from another person is required *BEFORE* merge !!!
- [ ] Add introduction of major feature to docs/release-notes.md

View File

@@ -120,7 +120,7 @@ in
) (self.darwinConfigurations or { })
// lib.mapAttrs' (n: lib.nameValuePair "package-${n}") (
if system == "aarch64-darwin" then
lib.filterAttrs (n: _: n != "docs" && n != "deploy-docs" && n != "option-search") packagesToBuild
lib.filterAttrs (n: _: n != "docs" && n != "deploy-docs" && n != "docs-options") packagesToBuild
else
packagesToBuild
)

12
devFlake/flake.lock generated
View File

@@ -3,10 +3,10 @@
"clan-core-for-checks": {
"flake": false,
"locked": {
"lastModified": 1760000589,
"narHash": "sha256-9xBwxeb8x5XOo3alaJvv2ZwL7UhW3/oYUUBK+odWGrk=",
"lastModified": 1759795610,
"narHash": "sha256-YFOK+aoJjWLfMHj2spvrQIe0ufIsv6P8o44NqoFPwp0=",
"ref": "main",
"rev": "e2f20b5ffcd4ff59e2528d29649056e3eb8d22bb",
"rev": "0de79962eacfe6f09d7aabca2a7305deef4fde0c",
"shallow": true,
"type": "git",
"url": "https://git.clan.lol/clan/clan-core"
@@ -105,11 +105,11 @@
},
"nixpkgs-dev": {
"locked": {
"lastModified": 1759989671,
"narHash": "sha256-3Wk0I5TYsd7cyIO8vYGxjOuQ8zraZEUFZqEhSSIhQLs=",
"lastModified": 1759794031,
"narHash": "sha256-Zruni/00BlDHSWVJf3mb0o+OHnxIvJNuXkPloY9c+PU=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "837076de579c67aa0c2ce2ab49948b24d907d449",
"rev": "09c221b2f0726da85b124efb60a1d123971dfa08",
"type": "github"
},
"original": {

2
docs/.gitignore vendored
View File

@@ -1,6 +1,6 @@
/site/reference
/site/services/official
/site/static
/site/option-search
/site/options
/site/openapi.json
!/site/static/extra.css

View File

@@ -1,5 +1,5 @@
# Serve documentation locally
```
nix develop .#docs -c mkdocs serve
$ nix develop .#docs -c mkdocs serve
```

41
docs/main.py Normal file
View File

@@ -0,0 +1,41 @@
from typing import Any
def define_env(env: Any) -> None:
static_dir = "/static/"
video_dir = "https://clan.lol/" + "videos/"
asciinema_dir = static_dir + "asciinema-player/"
@env.macro
def video(name: str) -> str:
return f"""<video loop muted autoplay id="{name}">
<source src={video_dir + name} type="video/webm">
Your browser does not support the video tag.
</video>"""
@env.macro
def asciinema(name: str) -> str:
return f"""<div id="{name}">
<script>
// Function to load the script and then create the Asciinema player
function loadAsciinemaPlayer() {{
var script = document.createElement('script');
script.src = "{asciinema_dir}/asciinema-player.min.js";
script.onload = function() {{
AsciinemaPlayer.create('{video_dir + name}', document.getElementById("{name}"), {{
loop: true,
autoPlay: true,
controls: false,
speed: 1.5,
theme: "solarized-light"
}});
}};
document.head.appendChild(script);
}}
// Load the Asciinema player script
loadAsciinemaPlayer();
</script>
<link rel="stylesheet" type="text/css" href="{asciinema_dir}/asciinema-player.css" />
</div>"""

View File

@@ -58,7 +58,7 @@ nav:
- getting-started/configure-disk.md
- getting-started/update-machines.md
- getting-started/continuous-integration.md
- Convert existing NixOS configurations: getting-started/convert-existing-NixOS-configuration.md
- getting-started/convert-existing-NixOS-configuration.md
- Guides:
- Inventory:
- Introduction to Inventory: guides/inventory/inventory.md
@@ -66,7 +66,6 @@ nav:
- Services:
- Introduction to Services: guides/services/introduction-to-services.md
- Author Your Own Service: guides/services/community.md
- Internal Services with SSL: guides/internal-ssl-services.md
- Vars:
- Introduction to Vars: guides/vars/vars-overview.md
- Minimal Example: guides/vars/vars-backend.md
@@ -180,7 +179,7 @@ nav:
- services/official/zerotier.md
- services/community.md
- Search Clan Options: "/option-search"
- Search Clan Options: "/options"
docs_dir: site
site_dir: out

View File

@@ -3,9 +3,11 @@
module-docs,
clan-cli-docs,
clan-lib-openapi,
asciinema-player-js,
asciinema-player-css,
roboto,
fira-code,
option-search,
docs-options,
...
}:
let
@@ -51,9 +53,13 @@ pkgs.stdenv.mkDerivation {
chmod -R +w ./site
echo "Generated API documentation in './site/reference/' "
rm -rf ./site/option-search
cp -r ${option-search} ./site/option-search
chmod -R +w ./site/option-search
rm -rf ./site/options
cp -r ${docs-options} ./site/options
chmod -R +w ./site/options
mkdir -p ./site/static/asciinema-player
ln -snf ${asciinema-player-js} ./site/static/asciinema-player/asciinema-player.min.js
ln -snf ${asciinema-player-css} ./site/static/asciinema-player/asciinema-player.css
# Link to fonts
ln -snf ${roboto}/share/fonts/truetype/Roboto-Regular.ttf ./site/static/

View File

@@ -1,5 +1,8 @@
{ inputs, ... }:
{ inputs, self, ... }:
{
imports = [
./options/flake-module.nix
];
perSystem =
{
config,
@@ -7,7 +10,83 @@
pkgs,
...
}:
let
# Simply evaluated options (JSON)
# { clanCore = «derivation JSON»; clanModules = { ${name} = «derivation JSON» }; }
jsonDocs = pkgs.callPackage ./get-module-docs.nix {
inherit (self) clanModules;
clan-core = self;
inherit pkgs;
};
# clan service options
clanModulesViaService = pkgs.writeText "info.json" (builtins.toJSON jsonDocs.clanModulesViaService);
# Simply evaluated options (JSON)
renderOptions =
pkgs.runCommand "render-options"
{
# TODO: ruff does not splice properly in nativeBuildInputs
depsBuildBuild = [ pkgs.ruff ];
nativeBuildInputs = [
pkgs.python3
pkgs.mypy
self'.packages.clan-cli
];
}
''
install -D -m755 ${./render_options}/__init__.py $out/bin/render-options
patchShebangs --build $out/bin/render-options
ruff format --check --diff $out/bin/render-options
ruff check --line-length 88 $out/bin/render-options
mypy --strict $out/bin/render-options
'';
asciinema-player-js = pkgs.fetchurl {
url = "https://github.com/asciinema/asciinema-player/releases/download/v3.7.0/asciinema-player.min.js";
sha256 = "sha256-Ymco/+FinDr5YOrV72ehclpp4amrczjo5EU3jfr/zxs=";
};
asciinema-player-css = pkgs.fetchurl {
url = "https://github.com/asciinema/asciinema-player/releases/download/v3.7.0/asciinema-player.css";
sha256 = "sha256-GZMeZFFGvP5GMqqh516mjJKfQaiJ6bL38bSYOXkaohc=";
};
module-docs =
pkgs.runCommand "rendered"
{
buildInputs = [
pkgs.python3
self'.packages.clan-cli
];
}
''
export CLAN_CORE_PATH=${
inputs.nixpkgs.lib.fileset.toSource {
root = ../..;
fileset = ../../clanModules;
}
}
export CLAN_CORE_DOCS=${jsonDocs.clanCore}/share/doc/nixos/options.json
# A file that contains the links to all clanModule docs
export CLAN_MODULES_VIA_SERVICE=${clanModulesViaService}
export CLAN_SERVICE_INTERFACE=${self'.legacyPackages.clan-service-module-interface}/share/doc/nixos/options.json
export CLAN_OPTIONS_PATH=${self'.legacyPackages.clan-options}/share/doc/nixos/options.json
mkdir $out
# The python script will place mkDocs files in the output directory
exec python3 ${renderOptions}/bin/render-options
'';
in
{
legacyPackages = {
inherit
jsonDocs
clanModulesViaService
;
};
devShells.docs = self'.packages.docs.overrideAttrs (_old: {
nativeBuildInputs = [
# Run: htmlproofer --disable-external
@@ -26,20 +105,22 @@
docs = pkgs.python3.pkgs.callPackage ./default.nix {
inherit (self'.packages)
clan-cli-docs
option-search
docs-options
inventory-api-docs
clan-lib-openapi
module-docs
;
inherit (inputs) nixpkgs;
inherit module-docs;
inherit asciinema-player-js;
inherit asciinema-player-css;
};
deploy-docs = pkgs.callPackage ./deploy-docs.nix { inherit (config.packages) docs; };
inherit module-docs;
};
checks.docs-integrity =
pkgs.runCommand "docs-integrity"
{
nativeBuildInputs = [ pkgs.html-proofer ];
LANG = "C.UTF-8";
}
''
# External links should be avoided in the docs, because they often break

View File

@@ -24,7 +24,7 @@
serviceModules = self.clan.modules;
baseHref = "/option-search/";
baseHref = "/options/";
getRoles =
module:
@@ -118,7 +118,7 @@
_file = "docs flake-module";
imports = [
{ _module.args = { inherit clanLib; }; }
(import ../../lib/modules/inventoryClass/roles-interface.nix {
(import ../../../lib/modules/inventoryClass/roles-interface.nix {
nestedSettingsOption = mkOption {
type = types.raw;
description = ''
@@ -201,7 +201,7 @@
# };
packages = {
option-search =
docs-options =
if privateInputs ? nuschtos then
privateInputs.nuschtos.packages.${pkgs.stdenv.hostPlatform.system}.mkMultiSearch {
inherit baseHref;

View File

@@ -1,9 +0,0 @@
# clan-core release notes 25.11
<!-- This is not rendered yet -->
## New features
## Breaking Changes
## Misc

View File

@@ -4,14 +4,14 @@ This guide will help you convert your existing NixOS configurations into a Clan.
Migrating instead of starting new can be trickier and might lead to bugs or
unexpected issues. We recommend reading the [Getting Started](../getting-started/creating-your-first-clan.md) guide first.
Once you have a working setup and understand the concepts transferring your NixOS configurations over is easy.
Once you have a working setup and understand the concepts transfering your NixOS configurations over is easy.
## Back up your existing configuration
Before you start, it is strongly recommended to back up your existing
configuration in any form you see fit. If you use version control to manage
your configuration changes, it is also a good idea to follow the migration
guide in a separate branch until everything works as expected.
guide in a separte branch until everything works as expected.
## Starting Point

View File

@@ -67,59 +67,6 @@ nix build .#checks.x86_64-linux.{test-attr-name}
```
(replace `{test-attr-name}` with the name of the test)
### Testing services with vars
Services that define their own vars (using `clan.core.vars.generators`) require generating test vars before running the tests.
#### Understanding the `clan.directory` setting
The `clan.directory` option is critical for vars generation and loading in tests. This setting determines:
1. **Where vars are generated**: When you run `update-vars`, it creates `vars/` and `sops/` directories inside the path specified by `clan.directory`
2. **Where vars are loaded from**: During test execution, machines look for their vars and secrets relative to `clan.directory`
#### Generating test vars
For services that define vars, you must first run:
```shellSession
nix run .#checks.x86_64-linux.{test-attr-name}.update-vars
```
This generates the necessary var files in the directory specified by `clan.directory`. After running this command, you can run the test normally:
```shellSession
nix run .#checks.x86_64-linux.{test-attr-name}
```
#### Example: service-dummy-test
The `service-dummy-test` is a good example of a test that uses vars. To run it:
```shellSession
# First, generate the test vars
nix run .#checks.x86_64-linux.service-dummy-test.update-vars
# Then run the test
nix run .#checks.x86_64-linux.service-dummy-test
```
#### Common issues
If `update-vars` fails, you may need to ensure that:
- **`clan.directory` is set correctly**: It should point to the directory where you want vars to be generated (typically `clan.directory = ./.;` in your test definition)
- **Your test defines machines**: Machines must be defined in `clan.inventory.machines` or through the inventory system
- **Machine definitions are complete**: Each machine should have the necessary service configuration that defines the vars generators
**If vars are not found during test execution:**
- Verify that `clan.directory` points to the same location where you ran `update-vars`
- Check that the `vars/` and `sops/` directories exist in that location
- Ensure the generated files match the machines and generators defined in your test
You can reference `/checks/service-dummy-test/` to see a complete working example of a test with vars, including the correct directory structure.
### Debugging VM tests
The following techniques can be used to debug a VM test:

View File

@@ -1,213 +0,0 @@
A common use case you might have is to host services and applications which are
only reachable within your clan.
This guide explains how to set up such secure, clan-internal web services using
a custom top-level domain (TLD) with SSL certificates.
Your services will be accessible only within your clan network and secured with
proper SSL certificates that all clan machines trust.
## Overview
By combining the `coredns` and `certificates` clan services, you can:
- Create a custom TLD for your clan (e.g. `.c`)
- Host internal web services accessible via HTTPS (e.g. `https://api.c`, `https://dashboard.c`)
- Automatically provision and trust SSL certificates across all clan machines
- Keep internal services secure and isolated from the public internet
The setup uses two clan services working together:
- **coredns service**: Provides DNS resolution for your custom TLD within the clan
- **certificates service**: Creates a certificate authority (CA) and issues SSL certificates for your TLD
### DNS Resolution Flow
1. A clan machine tries to access `https://service.c`
2. The machine queries its local DNS resolver (unbound)
3. For `.c` domains, the query is forwarded to your clan's CoreDNS server. All
other domains will be resolved as usual.
4. CoreDNS returns the IP address of the machine hosting the service
5. The machine connects directly to the service over HTTPS
6. The SSL certificate is trusted because all machines trust your clan's CA
## Step-by-Step Setup
The following setup assumes you have a VPN (e.g. Zerotier) already running. The
IPs configured in the options below will probably the Zerotier-IPs of the
respective machines.
### Configure the CoreDNS Service
The CoreDNS service has two roles:
- `server`: Runs the DNS server for your custom TLD
- `default`: Makes machines use the DNS server for TLD resolution and allows exposing services
Add this to your inventory:
```nix
inventory = {
machines = {
dns-server = { }; # Machine that will run the DNS server
web-server = { }; # Machine that will host web services
client = { }; # Any other machines in your clan
};
instances = {
coredns = {
# Add the default role to all machines
roles.default.tags = [ "all" ];
# DNS server for the .c TLD
roles.server.machines.dns-server.settings = {
ip = "192.168.1.10"; # IP of your DNS server machine
tld = "c";
};
# Machine hosting services (example: ca.c and admin.c)
roles.default.machines.web-server.settings = {
ip = "192.168.1.20"; # IP of your web server
services = [ "ca" "admin" ];
};
};
};
};
```
### Configure the Certificates Service
The certificates service also has two roles:
- `ca`: Sets up the certificate authority on a server
- `default`: Makes machines trust the CA and allows them to request certificates
Add this to your inventory:
```nix
inventory = {
instances = {
# ... coredns configuration from above ...
certificates = {
# Set up CA for .c domain
roles.ca.machines.dns-server.settings = {
tlds = [ "c" ];
acmeEmail = "admin@example.com"; # Optional: your email
};
# Add default role to all machines to trust the CA
roles.default.tags = [ "all" ];
};
};
};
```
### Complete Example Configuration
Here's a complete working example:
```nix
nventory = {
machines = {
caserver = { }; # DNS server + CA + web services
webserver = { }; # Additional web services
client = { }; # Client machine
};
instances = {
coredns = {
# Add the default role to all machines
roles.default.tags = [ "all" ];
# DNS server for the .c TLD
roles.server.machines.caserver.settings = {
ip = "192.168.8.5";
tld = "c";
};
# machine hosting https://ca.c (our CA for SSL)
roles.default.machines.caserver.settings = {
ip = "192.168.8.5";
services = [ "ca" ];
};
# machine hosting https://blub.c (some internal web-service)
roles.default.machines.webserver.settings = {
ip = "192.168.8.6";
services = [ "blub" ];
};
};
# Provide https for the .c top-level domain
certificates = {
roles.ca.machines.caserver.settings = {
tlds = [ "c" ];
acmeEmail = "admin@example.com";
};
roles.default.tags = [ "all" ];
};
};
};
```
## Testing Your Configuration
DNS resolution can be tested with:
```bash
# On any clan machine, test DNS resolution
nslookup ca.c
nslookup blub.c
```
You should also now be able to visit `https://ca.c` to access the certificate authority or visit `https://blub.c` to access your web service.
## Troubleshooting
### DNS Resolution Issues
1. **Check if DNS server is running**:
```bash
# On the DNS server machine
systemctl status coredns
```
2. **Verify DNS configuration**:
```bash
# Check if the right nameservers are configured
cat /etc/resolv.conf
systemctl status systemd-resolved
```
3. **Test DNS directly**:
```bash
# Query the DNS server directly
dig @192.168.8.5 ca.c
```
### Certificate Issues
1. **Check CA status**:
```bash
# On the CA machine
systemctl status step-ca
systemctl status nginx
```
2. **Verify certificate trust**:
```bash
# Test certificate trust
curl -v https://ca.c
openssl s_client -connect ca.c:443 -verify_return_error
```
3. **Check ACME configuration**:
```bash
# View ACME certificates
ls /var/lib/acme/
journalctl -u acme-ca.c.service
```

View File

@@ -288,7 +288,7 @@ of their type.
In the inventory we the assign machines to a type, e.g. by using tags
```nix title="flake.nix"
instances.machine-type = {
instnaces.machine-type = {
module.input = "self";
module.name = "@pinpox/machine-type";
roles.desktop.tags.desktop = { };
@@ -303,4 +303,3 @@ instances.machine-type = {
- [Reference Documentation for Service Authors](../../reference/options/clan_service.md)
- [Migration Guide from ClanModules to ClanServices](../../guides/migrations/migrate-inventory-services.md)
- [Decision that lead to ClanServices](../../decisions/01-Clan-Modules.md)
- [Testing Guide for Services with Vars](../contributing/testing.md#testing-services-with-vars)

View File

@@ -70,8 +70,6 @@ hide:
.clamp-toggle:checked ~ .clamp-more::after { content: "Read less"; }
</style>
trivial change
<div class="clamp-wrap" style="--lines: 3;">
<input type="checkbox" id="clan-readmore" class="clamp-toggle" />
<div class="clamp-content">
@@ -124,7 +122,7 @@ trivial change
command line interface
- [Clan Options](./reference/options/clan.md)
- [Clan Options](/options)
---

View File

@@ -4,10 +4,10 @@ This section of the site provides an overview of available options and commands
---
- [Clan Configuration Option](/options) - for defining a Clan
- Learn how to use the [Clan CLI](../reference/cli/index.md)
- Explore available [services](../services/definition.md)
- [NixOS Configuration Options](../reference/clan.core/index.md) - Additional options avilable on a NixOS machine.
- [Search Clan Option](/option-search) - for defining a Clan
---

View File

@@ -77,8 +77,6 @@
};
};
};
# Allows downstream users to inject "unsupported" nixpkgs versions
checks.minNixpkgsVersion.ignore = true;
};
systems = import systems;
imports = [

View File

@@ -52,6 +52,8 @@
"checks/secrets/sops/groups/group/machines/machine"
"checks/syncthing/introducer/introducer_device_id"
"checks/syncthing/introducer/introducer_test_api"
"docs/site/static/asciinema-player/asciinema-player.css"
"docs/site/static/asciinema-player/asciinema-player.min.js"
"nixosModules/clanCore/vars/secret/sops/eval-tests/populated/vars/my_machine/my_generator/my_secret"
"pkgs/clan-cli/clan_cli/tests/data/gnupg.conf"
"pkgs/clan-cli/clan_cli/tests/data/password-store/.gpg-id"
@@ -92,6 +94,9 @@
"*.yaml"
"*.yml"
];
excludes = [
"*/asciinema-player/*"
];
};
treefmt.programs.mypy.directories = {
"clan-cli" = {

View File

@@ -149,13 +149,6 @@ let
# TODO: Add index support in nixpkgs first
# else if type.name == "listOf" then
# handleListOf meta.list
else if type.name == "either" then
# For either(oneOf) types, we skip introspection as we cannot
# determine which branch of the union was taken without more context
# This *should* be safe, as it can currently mostly be triggered through
# The `extraModules` setting of inventory modules and seems to be better
# than just aborting entirely.
{ }
else
throw "Yet Unsupported type: ${type.name}";
in

View File

@@ -1,8 +1,7 @@
# tests for the nixos options to jsonschema converter
# run these tests via `nix-unit ./test.nix`
{
lib ? import /home/johannes/git/nixpkgs/lib,
# lib ? (import <nixpkgs> { }).lib,
lib ? (import <nixpkgs> { }).lib,
slib ? (import ./. { inherit lib; }),
}:
let
@@ -68,38 +67,31 @@ in
};
};
};
test_no_default =
let
configuration = (
eval [
{
options.foo.bar = lib.mkOption {
type = lib.types.bool;
};
}
]
);
in
{
inherit configuration;
expr = stableView (
slib.getPrios {
options = configuration.options;
}
);
expected = {
foo = {
bar = {
__this = {
files = [ ];
prio = 9999;
total = false;
};
test_no_default = {
expr = stableView (
slib.getPrios {
options =
(eval [
{
options.foo.bar = lib.mkOption {
type = lib.types.bool;
};
}
]).options;
}
);
expected = {
foo = {
bar = {
__this = {
files = [ ];
prio = 9999;
total = false;
};
};
};
};
};
test_submodule = {
expr = stableView (
@@ -707,44 +699,4 @@ in
};
};
};
test_listOf_either =
let
evaluated = eval [
{
options.extraModules = lib.mkOption {
description = "List of modules that can be strings, paths, or attrsets";
default = [ ];
type = lib.types.listOf (
lib.types.oneOf [
lib.types.str
lib.types.path
(lib.types.attrsOf lib.types.anything)
]
);
};
}
({
_file = "config.nix";
extraModules = [
"modules/common.nix"
./some/path.nix
{ config = { }; }
];
})
];
result = slib.getPrios { options = evaluated.options; };
in
{
inherit evaluated;
# Test that either types in list items return empty objects
# This is a behavioral test and not necessarily the correct
# behavior. But this is better than crashing on people directly.
expr = result.extraModules.__list;
expected = [
{ }
{ }
{ }
];
};
}

View File

@@ -11,35 +11,28 @@ export default meta;
type Story = StoryObj<ClanSettingsModalProps>;
const props: ClanSettingsModalProps = {
onClose: fn(),
model: {
uri: "/home/foo/my-clan",
details: {
export const Default: Story = {
args: {
onClose: fn(),
model: {
uri: "/home/foo/my-clan",
name: "Sol",
description: null,
icon: null,
},
fieldsSchema: {
name: {
readonly: true,
reason: null,
readonly_members: [],
},
description: {
readonly: false,
reason: null,
readonly_members: [],
},
icon: {
readonly: false,
reason: null,
readonly_members: [],
fieldsSchema: {
name: {
readonly: true,
reason: null,
},
description: {
readonly: false,
reason: null,
},
icon: {
readonly: false,
reason: null,
},
},
},
},
};
export const Default: Story = {
args: props,
};

View File

@@ -75,14 +75,13 @@ class TestFlake(Flake):
def path(self) -> Path:
return self.test_dir
def machine_selector(self, machine_name: str, selector: str) -> str:
"""Create a selector for a specific machine.
def select_machine(self, machine_name: str, selector: str) -> Any:
"""Select a nix attribute for a specific machine.
Args:
machine_name: The name of the machine
selector: The attribute selector string relative to the machine config
Returns:
The full selector string for the machine
apply: Optional function to apply to the result
"""
config = nix_config()
@@ -90,7 +89,9 @@ class TestFlake(Flake):
test_system = system
if system.endswith("-darwin"):
test_system = system.rstrip("darwin") + "linux"
return f'checks."{test_system}".{self.check_attr}.machinesCross."{system}"."{machine_name}".{selector}'
full_selector = f'checks."{test_system}".{self.check_attr}.machinesCross.{system}."{machine_name}".{selector}'
return self.select(full_selector)
# we don't want to evaluate all machines of the flake. Only the ones defined in the test
def set_machine_names(self, machine_names: list[str]) -> None:

View File

@@ -355,10 +355,7 @@ def get_public_age_key_from_private_key(privkey: str) -> str:
cmd = nix_shell(["age"], ["age-keygen", "-y"])
error_msg = "Failed to get public key for age private key. Is the key malformed?"
res = run(
cmd,
RunOpts(input=privkey.encode(), error_msg=error_msg, sensitive_input=True),
)
res = run(cmd, RunOpts(input=privkey.encode(), error_msg=error_msg))
return res.stdout.rstrip(os.linesep).rstrip()

View File

@@ -166,16 +166,16 @@ def test_generate_public_and_secret_vars(
assert shared_value.startswith("shared")
vars_text = stringify_all_vars(machine)
flake_obj = Flake(str(flake.path))
my_generator = Generator("my_generator", machines=["my_machine"], _flake=flake_obj)
my_generator = Generator("my_generator", machine="my_machine", _flake=flake_obj)
shared_generator = Generator(
"my_shared_generator",
share=True,
machines=["my_machine"],
machine="my_machine",
_flake=flake_obj,
)
dependent_generator = Generator(
"dependent_generator",
machines=["my_machine"],
machine="my_machine",
_flake=flake_obj,
)
in_repo_store = in_repo.FactStore(flake=flake_obj)
@@ -340,12 +340,12 @@ def test_generate_secret_var_sops_with_default_group(
flake_obj = Flake(str(flake.path))
first_generator = Generator(
"first_generator",
machines=["my_machine"],
machine="my_machine",
_flake=flake_obj,
)
second_generator = Generator(
"second_generator",
machines=["my_machine"],
machine="my_machine",
_flake=flake_obj,
)
in_repo_store = in_repo.FactStore(flake=flake_obj)
@@ -375,13 +375,13 @@ def test_generate_secret_var_sops_with_default_group(
first_generator_with_share = Generator(
"first_generator",
share=False,
machines=["my_machine"],
machine="my_machine",
_flake=flake_obj,
)
second_generator_with_share = Generator(
"second_generator",
share=False,
machines=["my_machine"],
machine="my_machine",
_flake=flake_obj,
)
assert sops_store.user_has_access("user2", first_generator_with_share, "my_secret")
@@ -432,6 +432,7 @@ def test_generated_shared_secret_sops(
assert check_vars(machine1.name, machine1.flake)
cli.run(["vars", "generate", "--flake", str(flake.path), "machine2"])
assert check_vars(machine2.name, machine2.flake)
assert check_vars(machine2.name, machine2.flake)
m1_sops_store = sops.SecretStore(machine1.flake)
m2_sops_store = sops.SecretStore(machine2.flake)
# Create generators with machine context for testing
@@ -512,28 +513,28 @@ def test_generate_secret_var_password_store(
"my_generator",
share=False,
files=[],
machines=["my_machine"],
machine="my_machine",
_flake=flake_obj,
)
my_generator_shared = Generator(
"my_generator",
share=True,
files=[],
machines=["my_machine"],
machine="my_machine",
_flake=flake_obj,
)
my_shared_generator = Generator(
"my_shared_generator",
share=True,
files=[],
machines=["my_machine"],
machine="my_machine",
_flake=flake_obj,
)
my_shared_generator_not_shared = Generator(
"my_shared_generator",
share=False,
files=[],
machines=["my_machine"],
machine="my_machine",
_flake=flake_obj,
)
assert store.exists(my_generator, "my_secret")
@@ -545,7 +546,7 @@ def test_generate_secret_var_password_store(
name="my_generator",
share=False,
files=[],
machines=["my_machine"],
machine="my_machine",
_flake=flake_obj,
)
assert store.get(generator, "my_secret").decode() == "hello\n"
@@ -556,7 +557,7 @@ def test_generate_secret_var_password_store(
"my_generator",
share=False,
files=[],
machines=["my_machine"],
machine="my_machine",
_flake=flake_obj,
)
var_name = "my_secret"
@@ -569,7 +570,7 @@ def test_generate_secret_var_password_store(
"my_generator2",
share=False,
files=[],
machines=["my_machine"],
machine="my_machine",
_flake=flake_obj,
)
var_name = "my_secret2"
@@ -581,7 +582,7 @@ def test_generate_secret_var_password_store(
"my_shared_generator",
share=True,
files=[],
machines=["my_machine"],
machine="my_machine",
_flake=flake_obj,
)
var_name = "my_shared_secret"
@@ -628,8 +629,8 @@ def test_generate_secret_for_multiple_machines(
in_repo_store2 = in_repo.FactStore(flake=flake_obj)
# Create generators for each machine
gen1 = Generator("my_generator", machines=["machine1"], _flake=flake_obj)
gen2 = Generator("my_generator", machines=["machine2"], _flake=flake_obj)
gen1 = Generator("my_generator", machine="machine1", _flake=flake_obj)
gen2 = Generator("my_generator", machine="machine2", _flake=flake_obj)
assert in_repo_store1.exists(gen1, "my_value")
assert in_repo_store2.exists(gen2, "my_value")
@@ -693,12 +694,12 @@ def test_prompt(
# Set up objects for testing the results
flake_obj = Flake(str(flake.path))
my_generator = Generator("my_generator", machines=["my_machine"], _flake=flake_obj)
my_generator = Generator("my_generator", machine="my_machine", _flake=flake_obj)
my_generator_with_details = Generator(
name="my_generator",
share=False,
files=[],
machines=["my_machine"],
machine="my_machine",
_flake=flake_obj,
)
@@ -783,10 +784,10 @@ def test_shared_vars_regeneration(
in_repo_store_2 = in_repo.FactStore(machine2.flake)
# Create generators with machine context for testing
child_gen_m1 = Generator(
"child_generator", share=False, machines=["machine1"], _flake=machine1.flake
"child_generator", share=False, machine="machine1", _flake=machine1.flake
)
child_gen_m2 = Generator(
"child_generator", share=False, machines=["machine2"], _flake=machine2.flake
"child_generator", share=False, machine="machine2", _flake=machine2.flake
)
# generate for machine 1
cli.run(["vars", "generate", "--flake", str(flake.path), "machine1"])
@@ -854,13 +855,13 @@ def test_multi_machine_shared_vars(
generator_m1 = Generator(
"shared_generator",
share=True,
machines=["machine1"],
machine="machine1",
_flake=machine1.flake,
)
generator_m2 = Generator(
"shared_generator",
share=True,
machines=["machine2"],
machine="machine2",
_flake=machine2.flake,
)
# generate for machine 1
@@ -916,9 +917,7 @@ def test_api_set_prompts(
)
machine = Machine(name="my_machine", flake=Flake(str(flake.path)))
store = in_repo.FactStore(machine.flake)
my_generator = Generator(
"my_generator", machines=["my_machine"], _flake=machine.flake
)
my_generator = Generator("my_generator", machine="my_machine", _flake=machine.flake)
assert store.exists(my_generator, "prompt1")
assert store.get(my_generator, "prompt1").decode() == "input1"
run_generators(
@@ -1062,10 +1061,10 @@ def test_migration(
assert "Migrated var my_generator/my_value" in caplog.text
assert "Migrated secret var my_generator/my_secret" in caplog.text
flake_obj = Flake(str(flake.path))
my_generator = Generator("my_generator", machines=["my_machine"], _flake=flake_obj)
my_generator = Generator("my_generator", machine="my_machine", _flake=flake_obj)
other_generator = Generator(
"other_generator",
machines=["my_machine"],
machine="my_machine",
_flake=flake_obj,
)
in_repo_store = in_repo.FactStore(flake=flake_obj)
@@ -1211,7 +1210,7 @@ def test_share_mode_switch_regenerates_secret(
sops_store = sops.SecretStore(flake=flake_obj)
generator_not_shared = Generator(
"my_generator", share=False, machines=["my_machine"], _flake=flake_obj
"my_generator", share=False, machine="my_machine", _flake=flake_obj
)
initial_public = in_repo_store.get(generator_not_shared, "my_value").decode()
@@ -1230,7 +1229,7 @@ def test_share_mode_switch_regenerates_secret(
# Read the new values with shared generator
generator_shared = Generator(
"my_generator", share=True, machines=["my_machine"], _flake=flake_obj
"my_generator", share=True, machine="my_machine", _flake=flake_obj
)
new_public = in_repo_store.get(generator_shared, "my_value").decode()
@@ -1265,117 +1264,68 @@ def test_cache_misses_for_vars_operations(
flake: ClanFlake,
) -> None:
"""Test that vars operations result in minimal cache misses."""
# Set up first machine with two generators
config = flake.machines["my_machine"] = create_test_machine_config()
# Set up two generators with public values
gen1 = config["clan"]["core"]["vars"]["generators"]["gen1"]
gen1["files"]["value1"]["secret"] = False
gen1["script"] = 'echo -n "test_value1" > "$out"/value1'
gen2 = config["clan"]["core"]["vars"]["generators"]["gen2"]
gen2["files"]["value2"]["secret"] = False
gen2["script"] = 'echo -n "test_value2" > "$out"/value2'
# Add a second machine with the same generator configuration
flake.machines["other_machine"] = config.copy()
# Set up a simple generator with a public value
my_generator = config["clan"]["core"]["vars"]["generators"]["my_generator"]
my_generator["files"]["my_value"]["secret"] = False
my_generator["script"] = 'echo -n "test_value" > "$out"/my_value'
flake.refresh()
monkeypatch.chdir(flake.path)
# Create fresh machine objects to ensure clean cache state
flake_obj = Flake(str(flake.path))
machine1 = Machine(name="my_machine", flake=flake_obj)
machine2 = Machine(name="other_machine", flake=flake_obj)
# Create a fresh machine object to ensure clean cache state
machine = Machine(name="my_machine", flake=Flake(str(flake.path)))
# Test 1: Running vars generate for BOTH machines simultaneously should still result in exactly 2 cache misses
# Even though we have:
# - 2 machines (my_machine and other_machine)
# - 2 generators per machine (gen1 and gen2)
# We still only get 2 cache misses when generating for both machines:
# 1. One for getting the list of generators for both machines
# 2. One batched evaluation for getting all generator scripts for both machines
# The key insight: the system should batch ALL evaluations across ALL machines into a single nix eval
# Test 1: Running vars generate with a fresh cache should result in exactly 3 cache misses
# Expected cache misses:
# 1. One for getting the list of generators
# 2. One for getting the final script of our test generator (my_generator)
# 3. One for getting the final script of the state version generator (added by default)
# TODO: The third cache miss is undesired in tests. disable state version module for tests
run_generators(
machines=[machine1, machine2],
machines=[machine],
generators=None, # Generate all
)
# Print stack traces if we have more than 2 cache misses
if flake_obj._cache_misses != 2:
flake_obj.print_cache_miss_analysis(
# Print stack traces if we have more than 3 cache misses
if machine.flake._cache_misses != 3:
machine.flake.print_cache_miss_analysis(
title="Cache miss analysis for vars generate"
)
assert flake_obj._cache_misses == 2, (
f"Expected exactly 2 cache misses for vars generate, got {flake_obj._cache_misses}"
assert machine.flake._cache_misses == 2, (
f"Expected exactly 2 cache misses for vars generate, got {machine.flake._cache_misses}"
)
# Verify the value was generated correctly
var_value = get_machine_var(machine, "my_generator/my_value")
assert var_value.printable_value == "test_value"
# Test 2: List all vars should result in exactly 1 cache miss
# Force cache invalidation (this also resets cache miss tracking)
invalidate_flake_cache(flake.path)
flake_obj.invalidate_cache()
machine.flake.invalidate_cache()
stringify_all_vars(machine1)
assert flake_obj._cache_misses == 1, (
f"Expected exactly 1 cache miss for vars list, got {flake_obj._cache_misses}"
stringify_all_vars(machine)
assert machine.flake._cache_misses == 1, (
f"Expected exactly 1 cache miss for vars list, got {machine.flake._cache_misses}"
)
# Test 3: Getting a specific var with a fresh cache should result in exactly 1 cache miss
# Force cache invalidation (this also resets cache miss tracking)
invalidate_flake_cache(flake.path)
flake_obj.invalidate_cache()
machine.flake.invalidate_cache()
# Only test gen1 for the get operation
var_value = get_machine_var(machine1, "gen1/value1")
assert var_value.printable_value == "test_value1"
var_value = get_machine_var(machine, "my_generator/my_value")
assert var_value.printable_value == "test_value"
assert flake_obj._cache_misses == 1, (
f"Expected exactly 1 cache miss for vars get with fresh cache, got {flake_obj._cache_misses}"
assert machine.flake._cache_misses == 1, (
f"Expected exactly 1 cache miss for vars get with fresh cache, got {machine.flake._cache_misses}"
)
@pytest.mark.with_core
def test_shared_generator_conflicting_definition_raises_error(
monkeypatch: pytest.MonkeyPatch,
flake_with_sops: ClanFlake,
) -> None:
"""Test that vars generation raises an error when two machines have different
definitions for the same shared generator.
"""
flake = flake_with_sops
# Create machine1 with a shared generator
machine1_config = flake.machines["machine1"] = create_test_machine_config()
shared_gen1 = machine1_config["clan"]["core"]["vars"]["generators"][
"shared_generator"
]
shared_gen1["share"] = True
shared_gen1["files"]["file1"]["secret"] = False
shared_gen1["script"] = 'echo "test" > "$out"/file1'
# Create machine2 with the same shared generator but different files
machine2_config = flake.machines["machine2"] = create_test_machine_config()
shared_gen2 = machine2_config["clan"]["core"]["vars"]["generators"][
"shared_generator"
]
shared_gen2["share"] = True
shared_gen2["files"]["file2"]["secret"] = False # Different file name
shared_gen2["script"] = 'echo "test" > "$out"/file2'
flake.refresh()
monkeypatch.chdir(flake.path)
# Attempting to generate vars for both machines should raise an error
# because they have conflicting definitions for the same shared generator
with pytest.raises(
ClanError,
match=".*differ.*",
):
cli.run(["vars", "generate", "--flake", str(flake.path)])
@pytest.mark.with_core
def test_dynamic_invalidation(
monkeypatch: pytest.MonkeyPatch,

View File

@@ -40,15 +40,12 @@ class StoreBase(ABC):
def get_machine(self, generator: "Generator") -> str:
"""Get machine name from generator, asserting it's not None for now."""
if generator.share:
return "__shared"
if not generator.machines:
if generator.machine is None:
if generator.share:
return "__shared"
msg = f"Generator '{generator.name}' has no machine associated"
raise ClanError(msg)
if len(generator.machines) != 1:
msg = f"Generator '{generator.name}' has {len(generator.machines)} machines, expected exactly 1"
raise ClanError(msg)
return generator.machines[0]
return generator.machine
# get a single fact
@abstractmethod
@@ -150,7 +147,7 @@ class StoreBase(ABC):
prev_generator = dataclasses.replace(
generator,
share=not generator.share,
machines=[] if not generator.share else [machine],
machine=machine if generator.share else None,
)
if self.exists(prev_generator, var.name):
changed_files += self.delete(prev_generator, var.name)
@@ -168,12 +165,12 @@ class StoreBase(ABC):
new_file = self._set(generator, var, value, machine)
action_str = "Migrated" if is_migration else "Updated"
log_info: Callable
if generator.share:
if generator.machine is None:
log_info = log.info
else:
from clan_lib.machines.machines import Machine # noqa: PLC0415
machine_obj = Machine(name=generator.machines[0], flake=self.flake)
machine_obj = Machine(name=generator.machine, flake=self.flake)
log_info = machine_obj.info
if self.is_secret_store:
log.info(f"{action_str} secret var {generator.name}/{var.name}\n")

View File

@@ -61,22 +61,14 @@ class Generator:
migrate_fact: str | None = None
validation_hash: str | None = None
machines: list[str] = field(default_factory=list)
machine: str | None = None
_flake: "Flake | None" = None
_public_store: "StoreBase | None" = None
_secret_store: "StoreBase | None" = None
@property
def key(self) -> GeneratorKey:
if self.share:
# must be a shared generator
machine = None
elif len(self.machines) != 1:
msg = f"Shared generator {self.name} must have exactly one machine, but has {len(self.machines)}: {', '.join(self.machines)}"
raise ClanError(msg)
else:
machine = self.machines[0]
return GeneratorKey(machine=machine, name=self.name)
return GeneratorKey(machine=self.machine, name=self.name)
def __hash__(self) -> int:
return hash(self.key)
@@ -151,10 +143,7 @@ class Generator:
files_selector = "config.clan.core.vars.generators.*.files.*.{secret,deploy,owner,group,mode,neededFor}"
flake.precache(cls.get_machine_selectors(machine_names))
generators: list[Generator] = []
shared_generators_raw: dict[
str, tuple[str, dict, dict]
] = {} # name -> (machine_name, gen_data, files_data)
generators = []
for machine_name in machine_names:
# Get all generator metadata in one select (safe fields only)
@@ -176,38 +165,6 @@ class Generator:
sec_store = machine.secret_vars_store
for gen_name, gen_data in generators_data.items():
# Check for conflicts in shared generator definitions using raw data
if gen_data["share"]:
if gen_name in shared_generators_raw:
prev_machine, prev_gen_data, prev_files_data = (
shared_generators_raw[gen_name]
)
# Compare raw data
prev_gen_files = prev_files_data.get(gen_name, {})
curr_gen_files = files_data.get(gen_name, {})
# Build list of differences with details
differences = []
if prev_gen_files != curr_gen_files:
differences.append("files")
if prev_gen_data.get("prompts") != gen_data.get("prompts"):
differences.append("prompts")
if prev_gen_data.get("dependencies") != gen_data.get(
"dependencies"
):
differences.append("dependencies")
if prev_gen_data.get("validationHash") != gen_data.get(
"validationHash"
):
differences.append("validation_hash")
if differences:
msg = f"Machines {prev_machine} and {machine_name} have different definitions for shared generator '{gen_name}' (differ in: {', '.join(differences)})"
raise ClanError(msg)
else:
shared_generators_raw[gen_name] = (
machine_name,
gen_data,
files_data,
)
# Build files from the files_data
files = []
gen_files = files_data.get(gen_name, {})
@@ -252,27 +209,14 @@ class Generator:
migrate_fact=gen_data.get("migrateFact"),
validation_hash=gen_data.get("validationHash"),
prompts=prompts,
# shared generators can have multiple machines, machine-specific have one
machines=[machine_name],
# only set machine for machine-specific generators
# this is essential for the graph algorithms to work correctly
machine=None if share else machine_name,
_flake=flake,
_public_store=pub_store,
_secret_store=sec_store,
)
if share:
# For shared generators, check if we already created it
existing = next(
(g for g in generators if g.name == gen_name and g.share), None
)
if existing:
# Just append the machine to the existing generator
existing.machines.append(machine_name)
else:
# Add the new shared generator
generators.append(generator)
else:
# Always add per-machine generators
generators.append(generator)
generators.append(generator)
# TODO: This should be done in a non-mutable way.
if include_previous_values:
@@ -301,19 +245,15 @@ class Generator:
return sec_store.get(self, prompt.name).decode()
return None
def final_script_selector(self, machine_name: str) -> str:
if self._flake is None:
msg = "Flake cannot be None"
raise ClanError(msg)
return self._flake.machine_selector(
machine_name, f'config.clan.core.vars.generators."{self.name}".finalScript'
)
def final_script(self, machine: "Machine") -> Path:
if self._flake is None:
msg = "Flake cannot be None"
raise ClanError(msg)
output = Path(self._flake.select(self.final_script_selector(machine.name)))
output = Path(
machine.select(
f'config.clan.core.vars.generators."{self.name}".finalScript',
),
)
if tmp_store := nix_test_store():
output = tmp_store.joinpath(*output.parts[1:])
return output

View File

@@ -49,28 +49,28 @@ def test_required_generators() -> None:
gen_1 = Generator(
name="gen_1",
dependencies=[],
machines=[machine_name],
machine=machine_name,
_public_store=public_store,
_secret_store=secret_store,
)
gen_2 = Generator(
name="gen_2",
dependencies=[gen_1.key],
machines=[machine_name],
machine=machine_name,
_public_store=public_store,
_secret_store=secret_store,
)
gen_2a = Generator(
name="gen_2a",
dependencies=[gen_2.key],
machines=[machine_name],
machine=machine_name,
_public_store=public_store,
_secret_store=secret_store,
)
gen_2b = Generator(
name="gen_2b",
dependencies=[gen_2.key],
machines=[machine_name],
machine=machine_name,
_public_store=public_store,
_secret_store=secret_store,
)
@@ -118,22 +118,21 @@ def test_shared_generator_invalidates_multiple_machines_dependents() -> None:
shared_gen = Generator(
name="shared_gen",
dependencies=[],
share=True, # Mark as shared generator
machines=[machine_1, machine_2], # Shared across both machines
machine=None, # Shared generator
_public_store=public_store,
_secret_store=secret_store,
)
gen_1 = Generator(
name="gen_1",
dependencies=[shared_gen.key],
machines=[machine_1],
machine=machine_1,
_public_store=public_store,
_secret_store=secret_store,
)
gen_2 = Generator(
name="gen_2",
dependencies=[shared_gen.key],
machines=[machine_2],
machine=machine_2,
_public_store=public_store,
_secret_store=secret_store,
)

View File

@@ -294,8 +294,6 @@ class RunOpts:
# This is needed for GUI applications
graphical_perm: bool = False
trace: bool = True
# Mark input as sensitive to prevent it from being logged (e.g., private keys, passwords)
sensitive_input: bool = False
def cmd_with_root(cmd: list[str], graphical: bool = False) -> list[str]:
@@ -351,10 +349,7 @@ def run(
if cmdlog.isEnabledFor(logging.DEBUG) and options.trace:
if options.input and isinstance(options.input, bytes):
# Always redact sensitive input (e.g., private keys, passwords)
if options.sensitive_input:
filtered_input = "<<REDACTED>>"
elif any(
if any(
not ch.isprintable() for ch in options.input.decode("ascii", "replace")
):
filtered_input = "<<binary_blob>>"

View File

@@ -1132,20 +1132,6 @@ class Flake:
return self._cache.select(selector)
def machine_selector(self, machine_name: str, selector: str) -> str:
"""Create a selector for a specific machine.
Args:
machine_name: The name of the machine
selector: The attribute selector string relative to the machine config
Returns:
The full selector string for the machine
"""
config = nix_config()
system = config["system"]
return f'clanInternals.machines."{system}"."{machine_name}".{selector}'
def select_machine(self, machine_name: str, selector: str) -> Any:
"""Select a nix attribute for a specific machine.
@@ -1155,7 +1141,11 @@ class Flake:
apply: Optional function to apply to the result
"""
return self.select(self.machine_selector(machine_name, selector))
config = nix_config()
system = config["system"]
full_selector = f'clanInternals.machines."{system}"."{machine_name}".{selector}'
return self.select(full_selector)
def list_machines(
self,

View File

@@ -136,123 +136,92 @@ def networks_from_flake(flake: Flake) -> dict[str, Network]:
return networks
class BestRemoteContext:
"""Class-based context manager for establishing and maintaining network connections."""
@contextmanager
def get_best_remote(machine: "Machine") -> Iterator["Remote"]:
"""Context manager that yields the best remote connection for a machine following this priority:
1. If machine has targetHost in inventory, return a direct connection
2. Return the highest priority network where machine is reachable
3. If no network works, try to get targetHost from machine nixos config
def __init__(self, machine: "Machine") -> None:
self.machine = machine
self._network_ctx: Any = None
self._remote: Remote | None = None
Args:
machine: Machine instance to connect to
def __enter__(self) -> "Remote":
"""Establish the best remote connection for a machine following this priority:
1. If machine has targetHost in inventory, return a direct connection
2. Return the highest priority network where machine is reachable
3. If no network works, try to get targetHost from machine nixos config
Yields:
Remote object for connecting to the machine
Returns:
Remote object for connecting to the machine
Raises:
ClanError: If no connection method works
Raises:
ClanError: If no connection method works
"""
# Step 1: Check if targetHost is set in inventory
inv_machine = machine.get_inv_machine()
target_host = inv_machine.get("deploy", {}).get("targetHost")
"""
# Step 1: Check if targetHost is set in inventory
inv_machine = self.machine.get_inv_machine()
target_host = inv_machine.get("deploy", {}).get("targetHost")
if target_host:
log.debug(f"Using targetHost from inventory for {machine.name}: {target_host}")
# Create a direct network with just this machine
remote = Remote.from_ssh_uri(machine_name=machine.name, address=target_host)
yield remote
return
if target_host:
log.debug(
f"Using targetHost from inventory for {self.machine.name}: {target_host}"
)
self._remote = Remote.from_ssh_uri(
machine_name=self.machine.name, address=target_host
)
return self._remote
# Step 2: Try existing networks by priority
try:
networks = networks_from_flake(machine.flake)
# Step 2: Try existing networks by priority
try:
networks = networks_from_flake(self.machine.flake)
sorted_networks = sorted(networks.items(), key=lambda x: -x[1].priority)
sorted_networks = sorted(networks.items(), key=lambda x: -x[1].priority)
for network_name, network in sorted_networks:
if self.machine.name not in network.peers:
continue
for network_name, network in sorted_networks:
if machine.name not in network.peers:
continue
log.debug(f"trying to connect via {network_name}")
if network.is_running():
try:
ping_time = network.ping(self.machine.name)
# Check if network is running and machine is reachable
log.debug(f"trying to connect via {network_name}")
if network.is_running():
try:
ping_time = network.ping(machine.name)
if ping_time is not None:
log.info(
f"Machine {machine.name} reachable via {network_name} network",
)
yield network.remote(machine.name)
return
except ClanError as e:
log.debug(f"Failed to reach {machine.name} via {network_name}: {e}")
else:
try:
log.debug(f"Establishing connection for network {network_name}")
with network.module.connection(network) as connected_network:
ping_time = connected_network.ping(machine.name)
if ping_time is not None:
log.info(
f"Machine {self.machine.name} reachable via {network_name} network",
f"Machine {machine.name} reachable via {network_name} network after connection",
)
self._remote = remote = network.remote(self.machine.name)
return remote
except ClanError as e:
log.debug(
f"Failed to reach {self.machine.name} via {network_name}: {e}"
)
else:
try:
log.debug(f"Establishing connection for network {network_name}")
# Enter the network context and keep it alive
self._network_ctx = network.module.connection(network)
connected_network = self._network_ctx.__enter__()
ping_time = connected_network.ping(self.machine.name)
if ping_time is not None:
log.info(
f"Machine {self.machine.name} reachable via {network_name} network after connection",
)
self._remote = remote = connected_network.remote(
self.machine.name
)
return remote
# Ping failed, clean up this connection attempt
self._network_ctx.__exit__(None, None, None)
self._network_ctx = None
except ClanError as e:
# Clean up failed connection attempt
if self._network_ctx is not None:
self._network_ctx.__exit__(None, None, None)
self._network_ctx = None
log.debug(
f"Failed to establish connection to {self.machine.name} via {network_name}: {e}",
)
except (ImportError, AttributeError, KeyError) as e:
log.debug(
f"Failed to use networking modules to determine machines remote: {e}"
)
yield connected_network.remote(machine.name)
return
except ClanError as e:
log.debug(
f"Failed to establish connection to {machine.name} via {network_name}: {e}",
)
except (ImportError, AttributeError, KeyError) as e:
log.debug(f"Failed to use networking modules to determine machines remote: {e}")
# Step 3: Try targetHost from machine nixos config
target_host = self.machine.select('config.clan.core.networking."targetHost"')
if target_host:
log.debug(
f"Using targetHost from machine config for {self.machine.name}: {target_host}",
)
self._remote = Remote.from_ssh_uri(
machine_name=self.machine.name,
address=target_host,
)
return self._remote
# Step 3: Try targetHost from machine nixos config
target_host = machine.select('config.clan.core.networking."targetHost"')
if target_host:
log.debug(
f"Using targetHost from machine config for {machine.name}: {target_host}",
)
# Check if reachable
remote = Remote.from_ssh_uri(
machine_name=machine.name,
address=target_host,
)
yield remote
return
# No connection method found
msg = f"Could not find any way to connect to machine '{self.machine.name}'. No targetHost configured and machine not reachable via any network."
raise ClanError(msg)
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: object,
) -> None:
"""Clean up network connection if one was established."""
if self._network_ctx is not None:
self._network_ctx.__exit__(exc_type, exc_val, exc_tb)
def get_best_remote(machine: "Machine") -> BestRemoteContext:
return BestRemoteContext(machine)
# No connection method found
msg = f"Could not find any way to connect to machine '{machine.name}'. No targetHost configured and machine not reachable via any network."
raise ClanError(msg)
def get_network_overview(networks: dict[str, Network]) -> dict:

View File

@@ -1,53 +0,0 @@
let
lib = import /home/johannes/git/nixpkgs/lib;
clanLib = import ../../../../lib { inherit lib; };
inherit (lib) evalModules mkOption types;
eval = evalModules {
modules = [
{
options.foos = mkOption {
type = types.attrsOf (
types.submodule {
options.bar = mkOption { };
}
);
};
# config.foos = lib.mkForce { this.bar = 42; };
config.instances.a = { };
# config.foo = lib.mkForce {
# bar = 42;
# };
}
{
_file = "inventory.json";
# instances.a = { setting = };
}
# {
# options.foo = mkOption {
# type = types.attrsOf (types.attrsOf (types.submoduleWith { modules = [
# {
# options.bar = mkOption {};
# }
# ]; }));
# default = { bar = { }; };
# };
# }
# {
# _file = "static.nix";
# foo.static.thing = { bar = 1; }; # <- Can: Op.Modify
# }
# {
# _file = "inventory.json";
# foo.managed.thing = { bar = 1; }; # <- Can: Op.Delete, Op.Modify
# #
# }
];
};
in
{
inherit clanLib eval;
}

View File

@@ -1,11 +1,7 @@
from enum import Enum
from typing import Any, TypedDict
from clan_lib.errors import ClanError
from clan_lib.persist.path_utils import (
PathTuple,
path_to_string,
)
from clan_lib.persist.path_utils import PathTuple, path_to_string
WRITABLE_PRIORITY_THRESHOLD = 100 # Values below this are not writeable
@@ -193,69 +189,3 @@ def compute_write_map(
"""
return _determine_writeability_recursive(priorities, all_values, persisted)
class RawAttributes(TypedDict):
headType: str
nullable: bool
prio: int
total: bool
files: list[str]
class OpType(Enum):
MODIFY = "modify"
DELETE = "delete"
def transform_attribute_properties(
introspection: dict[str, Any],
all_values: dict[str, Any],
persisted: dict[str, Any],
# Passthrough for recursion
curr_path: PathTuple = (),
parent_attributes: RawAttributes | None = None,
) -> dict[PathTuple, set[OpType]]:
"""Transform attribute properties to ensure correct types and defaults."""
results: dict[PathTuple, set[OpType]] = {}
for key, key_meta in introspection.items():
if key in {"__this", "__list"}:
continue
path = (*curr_path, key)
results[path] = set()
local_attributes: RawAttributes = key_meta.get("__this")
key_priority = local_attributes["prio"] or None
effective_priority = key_priority or (
parent_attributes["prio"] if parent_attributes else None
)
if effective_priority is None:
msg = f"Priority for path '{path_to_string(path)}' is not defined and no parent to inherit from. Cannot determine effective priority."
raise ClanError(msg)
if isinstance(key_meta, dict):
subattrs = transform_attribute_properties(
key_meta,
all_values.get(key, {}),
persisted.get(key, {}),
curr_path=path,
parent_attributes=local_attributes,
)
results.update(dict(subattrs.items()))
return results
# Only defined in inventory.json -> We might be able to delete it, because we defined it.
# But we could also have some option default somewhere else, so we cannot be sure.
# if all(f.endswith("inventory.json") for f in raw_attributes["files"]):
# operations.add(OpType.DELETE)
# if (
# raw_attributes["prio"] >= WRITABLE_PRIORITY_THRESHOLD
# or ".json" in raw_attributes["files"]
# ):
# operations.add(OpType.MODIFY)

View File

@@ -5,110 +5,11 @@ import pytest
from clan_lib.flake.flake import Flake
from clan_lib.persist.inventory_store import InventoryStore
from clan_lib.persist.write_rules import (
compute_write_map,
transform_attribute_properties,
)
from clan_lib.persist.write_rules import compute_write_map
if TYPE_CHECKING:
from clan_lib.nix_models.clan import Clan
# foos.this = lib.mkForce { bar = 42; };
# ->
# {
# foos = {
# __this = {
# files = [
# "inventory.json"
# "<unknown-file>"
# ];
# headType = "attrsOf";
# nullable = false;
# prio = 100;
# total = false;
# };
# this = {
# __this = {
# files = [ "<unknown-file>" ];
# headType = "submodule";
# nullable = false;
# prio = 50;
# total = true;
# };
# bar = {
# __this = {
# files = [ "<unknown-file>" ];
# headType = "unspecified";
# nullable = false;
# prio = 100;
# total = false;
# };
# };
# };
# };
# }
def test_write_new() -> None:
all_data: dict = {"foo": {"bar": 42}}
persisted_data: dict = {}
introspection: dict = {
"foo": {
"__this": {
"files": ["/dir/file.nix"],
"headType": "unspecified",
"nullable": False,
"prio": 100, # <- default prio
"total": False,
},
"bar": {
"__this": {
"files": ["/dir/file.nix"],
"headType": "int",
"nullable": False,
"prio": 100, # <- default prio
"total": False,
}
},
}
}
res = transform_attribute_properties(introspection, all_data, persisted_data)
breakpoint()
# No operations allowed, because mkForce
# We cannot modify this value in ANY possible way.
# inventory.json definitions and children definition are filtered out by the module system
# assert attributes == {"operations": set(), "path": ["foo", "bar"]}
# normal_prio_attrs: RawAttributes = {
# "files": ["/dir/file.nix"],
# "headType": "attrsOf",
# "nullable": False,
# "prio": 100, # <- default prio
# "total": False,
# }
# attributes = transform_attribute_properties(("foo", "bar"), normal_prio_attrs)
# # We can modify this value, because its a normal prio
# # This means keys can be added/removed/changed respecting their individual local constraints
# assert attributes == {"operations": { OpType.MODIFY }, "path": ["foo", "bar"]}
# default_prio_attrs: RawAttributes = {
# "files": ["/dir/file.nix"],
# "headType": "attrsOf",
# "nullable": False,
# "prio": 100, # <- default prio
# "total": False,
# }
# attributes = transform_attribute_properties(("foo", "bar"), default_prio_attrs)
# # We can modify this value, because its a normal prio
# # This means keys can be added/removed/changed respecting their individual local constraints
# assert attributes == {"operations": { OpType.MODIFY, OpType.DELETE }, "path": ["foo", "bar"]}
# Integration test
@pytest.mark.with_core

View File

@@ -93,21 +93,21 @@ def _ensure_healthy(
if generators is None:
generators = Generator.get_machine_generators([machine.name], machine.flake)
public_health_check_msg = machine.public_vars_store.health_check(
pub_healtcheck_msg = machine.public_vars_store.health_check(
machine.name,
generators,
)
secret_health_check_msg = machine.secret_vars_store.health_check(
sec_healtcheck_msg = machine.secret_vars_store.health_check(
machine.name,
generators,
)
if public_health_check_msg or secret_health_check_msg:
if pub_healtcheck_msg or sec_healtcheck_msg:
msg = f"Health check failed for machine {machine.name}:\n"
if public_health_check_msg:
msg += f"Public vars store: {public_health_check_msg}\n"
if secret_health_check_msg:
msg += f"Secret vars store: {secret_health_check_msg}"
if pub_healtcheck_msg:
msg += f"Public vars store: {pub_healtcheck_msg}\n"
if sec_healtcheck_msg:
msg += f"Secret vars store: {sec_healtcheck_msg}"
raise ClanError(msg)
@@ -177,25 +177,13 @@ def run_generators(
for machine in machines:
_ensure_healthy(machine=machine)
# get the flake via any machine (they are all the same)
flake = machines[0].flake
def get_generator_machine(generator: Generator) -> Machine:
if generator.share:
# return first machine if generator is shared
return machines[0]
return Machine(name=generator.machines[0], flake=flake)
# preheat the select cache, to reduce repeated calls during execution
selectors = []
for generator in generator_objects:
machine = get_generator_machine(generator)
selectors.append(generator.final_script_selector(machine.name))
flake.precache(selectors)
# execute generators
for generator in generator_objects:
machine = get_generator_machine(generator)
machine = (
machines[0]
if generator.machine is None
else Machine(name=generator.machine, flake=machines[0].flake)
)
if check_can_migrate(machine, generator):
migrate_files(machine, generator)
else:

View File

@@ -1,71 +0,0 @@
{ self, inputs, ... }:
{
perSystem =
{ pkgs, self', ... }:
let
# Simply evaluated options (JSON)
# { clanCore = «derivation JSON»; clanModules = { ${name} = «derivation JSON» }; }
jsonDocs = pkgs.callPackage ./get-module-docs.nix {
inherit (self) clanModules;
clan-core = self;
inherit pkgs;
};
# clan service options
clanModulesViaService = pkgs.writeText "info.json" (builtins.toJSON jsonDocs.clanModulesViaService);
# Simply evaluated options (JSON)
renderOptions =
pkgs.runCommand "render-options"
{
# TODO: ruff does not splice properly in nativeBuildInputs
depsBuildBuild = [ pkgs.ruff ];
nativeBuildInputs = [
pkgs.python3
pkgs.mypy
self'.packages.clan-cli
];
}
''
install -D -m755 ${./generate}/__init__.py $out/bin/render-options
patchShebangs --build $out/bin/render-options
ruff format --check --diff $out/bin/render-options
ruff check --line-length 88 $out/bin/render-options
mypy --strict $out/bin/render-options
'';
module-docs =
pkgs.runCommand "rendered"
{
buildInputs = [
pkgs.python3
self'.packages.clan-cli
];
}
''
export CLAN_CORE_PATH=${
inputs.nixpkgs.lib.fileset.toSource {
root = ../..;
fileset = ../../clanModules;
}
}
export CLAN_CORE_DOCS=${jsonDocs.clanCore}/share/doc/nixos/options.json
# A file that contains the links to all clanModule docs
export CLAN_MODULES_VIA_SERVICE=${clanModulesViaService}
export CLAN_SERVICE_INTERFACE=${self'.legacyPackages.clan-service-module-interface}/share/doc/nixos/options.json
export CLAN_OPTIONS_PATH=${self'.legacyPackages.clan-options}/share/doc/nixos/options.json
mkdir $out
# The python script will place mkDocs files in the output directory
exec python3 ${renderOptions}/bin/render-options
'';
in
{
packages = {
inherit module-docs;
};
};
}

View File

@@ -2,14 +2,12 @@
{
imports = [
./clan-app/flake-module.nix
./clan-cli/flake-module.nix
./clan-core-flake/flake-module.nix
./clan-vm-manager/flake-module.nix
./icon-update/flake-module.nix
./installer/flake-module.nix
./option-search/flake-module.nix
./docs-from-code/flake-module.nix
./icon-update/flake-module.nix
./clan-core-flake/flake-module.nix
./clan-app/flake-module.nix
./testing/flake-module.nix
];