Compare commits

..

8 Commits

Author SHA1 Message Date
Jörg Thalheim
a2e4b98a93 clan-cli/secrets: suggest clan vars keygen instead 2025-10-07 11:12:04 +02:00
Louis Opter
25ce97dd5e clan-cli/secrets: update some error message in encrypt_secret
Found that while reading through some code.
2025-10-07 11:09:00 +02:00
a-kenji
bd361b2744 docs: Fix nixpkgs hierarchy 2025-10-07 11:09:00 +02:00
clan-bot
ac901f5656 Update nixpkgs-dev in devFlake 2025-10-07 11:09:00 +02:00
clan-bot
8339ad605c Update clan-core-for-checks in devFlake 2025-10-07 11:09:00 +02:00
clan-bot
3473aaa440 Update clan-core-for-checks in devFlake 2025-10-07 11:09:00 +02:00
clan-bot
e983b10331 Update clan-core-for-checks in devFlake 2025-10-07 11:09:00 +02:00
clan-bot
fcf5ccd115 Update nixpkgs-dev in devFlake 2025-10-07 11:09:00 +02:00
87 changed files with 853 additions and 1402 deletions

View File

@@ -1,12 +0,0 @@
## Description of the change
<!-- Brief summary of the change if not already clear from the title -->
## Checklist
- [ ] Updated Documentation
- [ ] Added tests
- [ ] Doesn't affect backwards compatibility - or check the next points
- [ ] Add the breaking change and migration details to docs/release-notes.md
- !!! Review from another person is required *BEFORE* merge !!!
- [ ] Add introduction of major feature to docs/release-notes.md

View File

@@ -19,19 +19,28 @@ let
nixosLib = import (self.inputs.nixpkgs + "/nixos/lib") { };
in
{
imports = filter pathExists [
./devshell/flake-module.nix
./flash/flake-module.nix
./installation/flake-module.nix
./update/flake-module.nix
./morph/flake-module.nix
./nixos-documentation/flake-module.nix
./dont-depend-on-repo-root.nix
# clan core submodule tests
../nixosModules/clanCore/machine-id/tests/flake-module.nix
../nixosModules/clanCore/postgresql/tests/flake-module.nix
../nixosModules/clanCore/state-version/tests/flake-module.nix
];
imports =
let
clanCoreModulesDir = ../nixosModules/clanCore;
getClanCoreTestModules =
let
moduleNames = attrNames (builtins.readDir clanCoreModulesDir);
testPaths = map (
moduleName: clanCoreModulesDir + "/${moduleName}/tests/flake-module.nix"
) moduleNames;
in
filter pathExists testPaths;
in
getClanCoreTestModules
++ filter pathExists [
./devshell/flake-module.nix
./flash/flake-module.nix
./installation/flake-module.nix
./update/flake-module.nix
./morph/flake-module.nix
./nixos-documentation/flake-module.nix
./dont-depend-on-repo-root.nix
];
flake.check = genAttrs [ "x86_64-linux" "aarch64-darwin" ] (
system:
let
@@ -111,7 +120,7 @@ in
) (self.darwinConfigurations or { })
// lib.mapAttrs' (n: lib.nameValuePair "package-${n}") (
if system == "aarch64-darwin" then
lib.filterAttrs (n: _: n != "docs" && n != "deploy-docs" && n != "option-search") packagesToBuild
lib.filterAttrs (n: _: n != "docs" && n != "deploy-docs" && n != "docs-options") packagesToBuild
else
packagesToBuild
)

View File

@@ -15,6 +15,7 @@ let
networking.useNetworkd = true;
services.openssh.enable = true;
services.openssh.settings.UseDns = false;
services.openssh.settings.PasswordAuthentication = false;
system.nixos.variant_id = "installer";
environment.systemPackages = [
pkgs.nixos-facter

View File

@@ -50,13 +50,13 @@
dns =
{ pkgs, ... }:
{
environment.systemPackages = [ pkgs.nettools ];
environment.systemPackages = [ pkgs.net-tools ];
};
client =
{ pkgs, ... }:
{
environment.systemPackages = [ pkgs.nettools ];
environment.systemPackages = [ pkgs.net-tools ];
};
server01 = {

View File

@@ -1,39 +1,91 @@
The `sshd` Clan service manages SSH to make it easy to securely access your
machines over the internet. The service uses `vars` to store the SSH host keys
for each machine to ensure they remain stable across deployments.
# Clan service: sshd
What it does
- Generates and persists SSH host keys via `vars`.
- Optionally issues CAsigned host certificates for servers.
- Installs the `server` CA public key into `clients` `known_hosts` for TOFUless verification.
`sshd` also generates SSH certificates for both servers and clients allowing for
certificate-based authentication for SSH.
The service also disables password-based authentication over SSH, to access your
machines you'll need to use public key authentication or certificate-based
authentication.
When to use it
- ZeroTOFU SSH for dynamic fleets: admins/CI can connect to frequently rebuilt hosts (e.g., server-1.example.com) without prompts or perhost `known_hosts` churn.
## Usage
Roles
- Server: runs sshd, presents a CAsigned host certificate for `<machine>.<domain>`.
- Client: trusts the CA for the given domains to verify servers certificates.
Tip: assign both roles to a machine if it should both present a cert and verify others.
Quick start (with host certificates)
Useful if you never want to get a prompt about trusting the ssh fingerprint.
```nix
{
inventory.instances = {
sshd-with-certs = {
module = { name = "sshd"; input = "clan-core"; };
# Servers present certificates for <machine>.example.com
roles.server.tags.all = { };
roles.server.settings = {
certificate.searchDomains = [ "example.com" ];
# Optional: also add RSA host keys
# hostKeys.rsa.enable = true;
};
# Clients trust the CA for *.example.com
roles.client.tags.all = { };
roles.client.settings = {
certificate.searchDomains = [ "example.com" ];
};
};
};
}
```
Basic: only add persistent host keys (ed25519), no certificates
Useful if you want to get an ssh "trust this server" prompt once and then never again.
```nix
{
inventory.instances = {
# By default this service only generates ed25519 host keys
sshd-basic = {
module = {
name = "sshd";
input = "clan-core";
};
roles.server.tags.all = { };
roles.client.tags.all = { };
};
# Also generate RSA host keys for all servers
sshd-with-rsa = {
module = {
name = "sshd";
input = "clan-core";
};
roles.server.tags.all = { };
roles.server.settings = {
hostKeys.rsa.enable = true;
};
roles.client.tags.all = { };
};
};
}
```
Example: selective trust per environment
Admins should trust only production; CI should trust prod and staging. Servers are reachable under both domains.
```nix
{
inventory.instances = {
sshd-env-scoped = {
module = { name = "sshd"; input = "clan-core"; };
# Servers present certs for both prod and staging FQDNs
roles.server.tags.all = { };
roles.server.settings = {
certificate.searchDomains = [ "prod.example.com" "staging.example.com" ];
};
# Admin laptop: trust prod only
roles.client.machines."admin-laptop".settings = {
certificate.searchDomains = [ "prod.example.com" ];
};
# CI runner: trust prod and staging
roles.client.machines."ci-runner-1".settings = {
certificate.searchDomains = [ "prod.example.com" "staging.example.com" ];
};
};
};
}
```
- Admin -> server1.prod.example.com: zeroTOFU (verified via cert).
- Admin -> server1.staging.example.com: falls back to TOFU (or is blocked by policy).
- CI -> either prod or staging: zeroTOFU for both.
Note: server and client searchDomains dont have to be identical; they only need to overlap for the hostnames you actually use.
Notes
- Connect using a name that matches a cert principal (e.g., `server1.example.com`); wildcards are not allowed inside the certificate.
- CA private key stays in `vars` (not deployed); only the CA public key is distributed.
- Logins still require your user SSH keys on the server (passwords are disabled).

View File

@@ -11,7 +11,9 @@
pkgs.syncthing
];
script = ''
syncthing generate --config "$out"
export TMPDIR=/tmp
TEMPORARY=$(mktemp -d)
syncthing generate --config "$out" --data "$TEMPORARY"
mv "$out"/key.pem "$out"/key
mv "$out"/cert.pem "$out"/cert
cat "$out"/config.xml | grep -oP '(?<=<device id=")[^"]+' | uniq > "$out"/id

24
devFlake/flake.lock generated
View File

@@ -3,16 +3,16 @@
"clan-core-for-checks": {
"flake": false,
"locked": {
"lastModified": 1760368011,
"narHash": "sha256-mLK2nwbfklfOGIVAKVNDwGyYz8mPh4fzsAqSK3BlCiI=",
"ref": "clan-25.05",
"rev": "1b3c129aa9741d99b27810652ca888b3fbfc3a11",
"lastModified": 1759795610,
"narHash": "sha256-YFOK+aoJjWLfMHj2spvrQIe0ufIsv6P8o44NqoFPwp0=",
"ref": "main",
"rev": "0de79962eacfe6f09d7aabca2a7305deef4fde0c",
"shallow": true,
"type": "git",
"url": "https://git.clan.lol/clan/clan-core"
},
"original": {
"ref": "clan-25.05",
"ref": "main",
"shallow": true,
"type": "git",
"url": "https://git.clan.lol/clan/clan-core"
@@ -105,16 +105,16 @@
},
"nixpkgs-dev": {
"locked": {
"lastModified": 1760309387,
"narHash": "sha256-e0lvQ7+B1Y8zjykYHAj9tBv10ggLqK0nmxwvMU3J0Eo=",
"lastModified": 1759794031,
"narHash": "sha256-Zruni/00BlDHSWVJf3mb0o+OHnxIvJNuXkPloY9c+PU=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "6cd95994a9c8f7c6f8c1f1161be94119afdcb305",
"rev": "09c221b2f0726da85b124efb60a1d123971dfa08",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-25.05-small",
"ref": "nixos-unstable-small",
"repo": "nixpkgs",
"type": "github"
}
@@ -208,11 +208,11 @@
"nixpkgs": []
},
"locked": {
"lastModified": 1760120816,
"narHash": "sha256-gq9rdocpmRZCwLS5vsHozwB6b5nrOBDNc2kkEaTXHfg=",
"lastModified": 1758728421,
"narHash": "sha256-ySNJ008muQAds2JemiyrWYbwbG+V7S5wg3ZVKGHSFu8=",
"owner": "numtide",
"repo": "treefmt-nix",
"rev": "761ae7aff00907b607125b2f57338b74177697ed",
"rev": "5eda4ee8121f97b218f7cc73f5172098d458f1d1",
"type": "github"
},
"original": {

View File

@@ -2,7 +2,7 @@
description = "private dev inputs";
# Dev dependencies
inputs.nixpkgs-dev.url = "github:NixOS/nixpkgs/nixos-25.05-small";
inputs.nixpkgs-dev.url = "github:NixOS/nixpkgs/nixos-unstable-small";
inputs.flake-utils.url = "github:numtide/flake-utils";
inputs.flake-utils.inputs.systems.follows = "systems";
@@ -15,7 +15,7 @@
inputs.systems.url = "github:nix-systems/default";
inputs.clan-core-for-checks.url = "git+https://git.clan.lol/clan/clan-core?ref=clan-25.05&shallow=1";
inputs.clan-core-for-checks.url = "git+https://git.clan.lol/clan/clan-core?ref=main&shallow=1";
inputs.clan-core-for-checks.flake = false;
inputs.test-fixtures.url = "git+https://git.clan.lol/clan/test-fixtures";

2
docs/.gitignore vendored
View File

@@ -1,6 +1,6 @@
/site/reference
/site/services/official
/site/static
/site/option-search
/site/options
/site/openapi.json
!/site/static/extra.css

View File

@@ -1,5 +1,5 @@
# Serve documentation locally
```
nix develop .#docs -c mkdocs serve
$ nix develop .#docs -c mkdocs serve
```

41
docs/main.py Normal file
View File

@@ -0,0 +1,41 @@
from typing import Any
def define_env(env: Any) -> None:
static_dir = "/static/"
video_dir = "https://clan.lol/" + "videos/"
asciinema_dir = static_dir + "asciinema-player/"
@env.macro
def video(name: str) -> str:
return f"""<video loop muted autoplay id="{name}">
<source src={video_dir + name} type="video/webm">
Your browser does not support the video tag.
</video>"""
@env.macro
def asciinema(name: str) -> str:
return f"""<div id="{name}">
<script>
// Function to load the script and then create the Asciinema player
function loadAsciinemaPlayer() {{
var script = document.createElement('script');
script.src = "{asciinema_dir}/asciinema-player.min.js";
script.onload = function() {{
AsciinemaPlayer.create('{video_dir + name}', document.getElementById("{name}"), {{
loop: true,
autoPlay: true,
controls: false,
speed: 1.5,
theme: "solarized-light"
}});
}};
document.head.appendChild(script);
}}
// Load the Asciinema player script
loadAsciinemaPlayer();
</script>
<link rel="stylesheet" type="text/css" href="{asciinema_dir}/asciinema-player.css" />
</div>"""

View File

@@ -58,7 +58,7 @@ nav:
- getting-started/configure-disk.md
- getting-started/update-machines.md
- getting-started/continuous-integration.md
- Convert existing NixOS configurations: getting-started/convert-existing-NixOS-configuration.md
- getting-started/convert-existing-NixOS-configuration.md
- Guides:
- Inventory:
- Introduction to Inventory: guides/inventory/inventory.md
@@ -66,7 +66,6 @@ nav:
- Services:
- Introduction to Services: guides/services/introduction-to-services.md
- Author Your Own Service: guides/services/community.md
- Internal Services with SSL: guides/internal-ssl-services.md
- Vars:
- Introduction to Vars: guides/vars/vars-overview.md
- Minimal Example: guides/vars/vars-backend.md
@@ -180,7 +179,7 @@ nav:
- services/official/zerotier.md
- services/community.md
- Search Clan Options: "/option-search"
- Search Clan Options: "/options"
docs_dir: site
site_dir: out

View File

@@ -3,9 +3,11 @@
module-docs,
clan-cli-docs,
clan-lib-openapi,
asciinema-player-js,
asciinema-player-css,
roboto,
fira-code,
option-search,
docs-options,
...
}:
let
@@ -51,9 +53,13 @@ pkgs.stdenv.mkDerivation {
chmod -R +w ./site
echo "Generated API documentation in './site/reference/' "
rm -rf ./site/option-search
cp -r ${option-search} ./site/option-search
chmod -R +w ./site/option-search
rm -rf ./site/options
cp -r ${docs-options} ./site/options
chmod -R +w ./site/options
mkdir -p ./site/static/asciinema-player
ln -snf ${asciinema-player-js} ./site/static/asciinema-player/asciinema-player.min.js
ln -snf ${asciinema-player-css} ./site/static/asciinema-player/asciinema-player.css
# Link to fonts
ln -snf ${roboto}/share/fonts/truetype/Roboto-Regular.ttf ./site/static/

View File

@@ -1,5 +1,8 @@
{ inputs, ... }:
{ inputs, self, ... }:
{
imports = [
./options/flake-module.nix
];
perSystem =
{
config,
@@ -7,7 +10,83 @@
pkgs,
...
}:
let
# Simply evaluated options (JSON)
# { clanCore = «derivation JSON»; clanModules = { ${name} = «derivation JSON» }; }
jsonDocs = pkgs.callPackage ./get-module-docs.nix {
inherit (self) clanModules;
clan-core = self;
inherit pkgs;
};
# clan service options
clanModulesViaService = pkgs.writeText "info.json" (builtins.toJSON jsonDocs.clanModulesViaService);
# Simply evaluated options (JSON)
renderOptions =
pkgs.runCommand "render-options"
{
# TODO: ruff does not splice properly in nativeBuildInputs
depsBuildBuild = [ pkgs.ruff ];
nativeBuildInputs = [
pkgs.python3
pkgs.mypy
self'.packages.clan-cli
];
}
''
install -D -m755 ${./render_options}/__init__.py $out/bin/render-options
patchShebangs --build $out/bin/render-options
ruff format --check --diff $out/bin/render-options
ruff check --line-length 88 $out/bin/render-options
mypy --strict $out/bin/render-options
'';
asciinema-player-js = pkgs.fetchurl {
url = "https://github.com/asciinema/asciinema-player/releases/download/v3.7.0/asciinema-player.min.js";
sha256 = "sha256-Ymco/+FinDr5YOrV72ehclpp4amrczjo5EU3jfr/zxs=";
};
asciinema-player-css = pkgs.fetchurl {
url = "https://github.com/asciinema/asciinema-player/releases/download/v3.7.0/asciinema-player.css";
sha256 = "sha256-GZMeZFFGvP5GMqqh516mjJKfQaiJ6bL38bSYOXkaohc=";
};
module-docs =
pkgs.runCommand "rendered"
{
buildInputs = [
pkgs.python3
self'.packages.clan-cli
];
}
''
export CLAN_CORE_PATH=${
inputs.nixpkgs.lib.fileset.toSource {
root = ../..;
fileset = ../../clanModules;
}
}
export CLAN_CORE_DOCS=${jsonDocs.clanCore}/share/doc/nixos/options.json
# A file that contains the links to all clanModule docs
export CLAN_MODULES_VIA_SERVICE=${clanModulesViaService}
export CLAN_SERVICE_INTERFACE=${self'.legacyPackages.clan-service-module-interface}/share/doc/nixos/options.json
export CLAN_OPTIONS_PATH=${self'.legacyPackages.clan-options}/share/doc/nixos/options.json
mkdir $out
# The python script will place mkDocs files in the output directory
exec python3 ${renderOptions}/bin/render-options
'';
in
{
legacyPackages = {
inherit
jsonDocs
clanModulesViaService
;
};
devShells.docs = self'.packages.docs.overrideAttrs (_old: {
nativeBuildInputs = [
# Run: htmlproofer --disable-external
@@ -26,20 +105,22 @@
docs = pkgs.python3.pkgs.callPackage ./default.nix {
inherit (self'.packages)
clan-cli-docs
option-search
docs-options
inventory-api-docs
clan-lib-openapi
module-docs
;
inherit (inputs) nixpkgs;
inherit module-docs;
inherit asciinema-player-js;
inherit asciinema-player-css;
};
deploy-docs = pkgs.callPackage ./deploy-docs.nix { inherit (config.packages) docs; };
inherit module-docs;
};
checks.docs-integrity =
pkgs.runCommand "docs-integrity"
{
nativeBuildInputs = [ pkgs.html-proofer ];
LANG = "C.UTF-8";
}
''
# External links should be avoided in the docs, because they often break

View File

@@ -24,7 +24,7 @@
serviceModules = self.clan.modules;
baseHref = "/option-search/";
baseHref = "/options/";
getRoles =
module:
@@ -118,7 +118,7 @@
_file = "docs flake-module";
imports = [
{ _module.args = { inherit clanLib; }; }
(import ../../lib/modules/inventoryClass/roles-interface.nix {
(import ../../../lib/modules/inventoryClass/roles-interface.nix {
nestedSettingsOption = mkOption {
type = types.raw;
description = ''
@@ -201,7 +201,7 @@
# };
packages = {
option-search =
docs-options =
if privateInputs ? nuschtos then
privateInputs.nuschtos.packages.${pkgs.stdenv.hostPlatform.system}.mkMultiSearch {
inherit baseHref;

View File

@@ -1,9 +0,0 @@
# clan-core release notes 25.11
<!-- This is not rendered yet -->
## New features
## Breaking Changes
## Misc

View File

@@ -4,14 +4,14 @@ This guide will help you convert your existing NixOS configurations into a Clan.
Migrating instead of starting new can be trickier and might lead to bugs or
unexpected issues. We recommend reading the [Getting Started](../getting-started/creating-your-first-clan.md) guide first.
Once you have a working setup and understand the concepts transferring your NixOS configurations over is easy.
Once you have a working setup and understand the concepts transfering your NixOS configurations over is easy.
## Back up your existing configuration
Before you start, it is strongly recommended to back up your existing
configuration in any form you see fit. If you use version control to manage
your configuration changes, it is also a good idea to follow the migration
guide in a separate branch until everything works as expected.
guide in a separte branch until everything works as expected.
## Starting Point

View File

@@ -67,59 +67,6 @@ nix build .#checks.x86_64-linux.{test-attr-name}
```
(replace `{test-attr-name}` with the name of the test)
### Testing services with vars
Services that define their own vars (using `clan.core.vars.generators`) require generating test vars before running the tests.
#### Understanding the `clan.directory` setting
The `clan.directory` option is critical for vars generation and loading in tests. This setting determines:
1. **Where vars are generated**: When you run `update-vars`, it creates `vars/` and `sops/` directories inside the path specified by `clan.directory`
2. **Where vars are loaded from**: During test execution, machines look for their vars and secrets relative to `clan.directory`
#### Generating test vars
For services that define vars, you must first run:
```shellSession
nix run .#checks.x86_64-linux.{test-attr-name}.update-vars
```
This generates the necessary var files in the directory specified by `clan.directory`. After running this command, you can run the test normally:
```shellSession
nix run .#checks.x86_64-linux.{test-attr-name}
```
#### Example: service-dummy-test
The `service-dummy-test` is a good example of a test that uses vars. To run it:
```shellSession
# First, generate the test vars
nix run .#checks.x86_64-linux.service-dummy-test.update-vars
# Then run the test
nix run .#checks.x86_64-linux.service-dummy-test
```
#### Common issues
If `update-vars` fails, you may need to ensure that:
- **`clan.directory` is set correctly**: It should point to the directory where you want vars to be generated (typically `clan.directory = ./.;` in your test definition)
- **Your test defines machines**: Machines must be defined in `clan.inventory.machines` or through the inventory system
- **Machine definitions are complete**: Each machine should have the necessary service configuration that defines the vars generators
**If vars are not found during test execution:**
- Verify that `clan.directory` points to the same location where you ran `update-vars`
- Check that the `vars/` and `sops/` directories exist in that location
- Ensure the generated files match the machines and generators defined in your test
You can reference `/checks/service-dummy-test/` to see a complete working example of a test with vars, including the correct directory structure.
### Debugging VM tests
The following techniques can be used to debug a VM test:

View File

@@ -1,213 +0,0 @@
A common use case you might have is to host services and applications which are
only reachable within your clan.
This guide explains how to set up such secure, clan-internal web services using
a custom top-level domain (TLD) with SSL certificates.
Your services will be accessible only within your clan network and secured with
proper SSL certificates that all clan machines trust.
## Overview
By combining the `coredns` and `certificates` clan services, you can:
- Create a custom TLD for your clan (e.g. `.c`)
- Host internal web services accessible via HTTPS (e.g. `https://api.c`, `https://dashboard.c`)
- Automatically provision and trust SSL certificates across all clan machines
- Keep internal services secure and isolated from the public internet
The setup uses two clan services working together:
- **coredns service**: Provides DNS resolution for your custom TLD within the clan
- **certificates service**: Creates a certificate authority (CA) and issues SSL certificates for your TLD
### DNS Resolution Flow
1. A clan machine tries to access `https://service.c`
2. The machine queries its local DNS resolver (unbound)
3. For `.c` domains, the query is forwarded to your clan's CoreDNS server. All
other domains will be resolved as usual.
4. CoreDNS returns the IP address of the machine hosting the service
5. The machine connects directly to the service over HTTPS
6. The SSL certificate is trusted because all machines trust your clan's CA
## Step-by-Step Setup
The following setup assumes you have a VPN (e.g. Zerotier) already running. The
IPs configured in the options below will probably the Zerotier-IPs of the
respective machines.
### Configure the CoreDNS Service
The CoreDNS service has two roles:
- `server`: Runs the DNS server for your custom TLD
- `default`: Makes machines use the DNS server for TLD resolution and allows exposing services
Add this to your inventory:
```nix
inventory = {
machines = {
dns-server = { }; # Machine that will run the DNS server
web-server = { }; # Machine that will host web services
client = { }; # Any other machines in your clan
};
instances = {
coredns = {
# Add the default role to all machines
roles.default.tags = [ "all" ];
# DNS server for the .c TLD
roles.server.machines.dns-server.settings = {
ip = "192.168.1.10"; # IP of your DNS server machine
tld = "c";
};
# Machine hosting services (example: ca.c and admin.c)
roles.default.machines.web-server.settings = {
ip = "192.168.1.20"; # IP of your web server
services = [ "ca" "admin" ];
};
};
};
};
```
### Configure the Certificates Service
The certificates service also has two roles:
- `ca`: Sets up the certificate authority on a server
- `default`: Makes machines trust the CA and allows them to request certificates
Add this to your inventory:
```nix
inventory = {
instances = {
# ... coredns configuration from above ...
certificates = {
# Set up CA for .c domain
roles.ca.machines.dns-server.settings = {
tlds = [ "c" ];
acmeEmail = "admin@example.com"; # Optional: your email
};
# Add default role to all machines to trust the CA
roles.default.tags = [ "all" ];
};
};
};
```
### Complete Example Configuration
Here's a complete working example:
```nix
nventory = {
machines = {
caserver = { }; # DNS server + CA + web services
webserver = { }; # Additional web services
client = { }; # Client machine
};
instances = {
coredns = {
# Add the default role to all machines
roles.default.tags = [ "all" ];
# DNS server for the .c TLD
roles.server.machines.caserver.settings = {
ip = "192.168.8.5";
tld = "c";
};
# machine hosting https://ca.c (our CA for SSL)
roles.default.machines.caserver.settings = {
ip = "192.168.8.5";
services = [ "ca" ];
};
# machine hosting https://blub.c (some internal web-service)
roles.default.machines.webserver.settings = {
ip = "192.168.8.6";
services = [ "blub" ];
};
};
# Provide https for the .c top-level domain
certificates = {
roles.ca.machines.caserver.settings = {
tlds = [ "c" ];
acmeEmail = "admin@example.com";
};
roles.default.tags = [ "all" ];
};
};
};
```
## Testing Your Configuration
DNS resolution can be tested with:
```bash
# On any clan machine, test DNS resolution
nslookup ca.c
nslookup blub.c
```
You should also now be able to visit `https://ca.c` to access the certificate authority or visit `https://blub.c` to access your web service.
## Troubleshooting
### DNS Resolution Issues
1. **Check if DNS server is running**:
```bash
# On the DNS server machine
systemctl status coredns
```
2. **Verify DNS configuration**:
```bash
# Check if the right nameservers are configured
cat /etc/resolv.conf
systemctl status systemd-resolved
```
3. **Test DNS directly**:
```bash
# Query the DNS server directly
dig @192.168.8.5 ca.c
```
### Certificate Issues
1. **Check CA status**:
```bash
# On the CA machine
systemctl status step-ca
systemctl status nginx
```
2. **Verify certificate trust**:
```bash
# Test certificate trust
curl -v https://ca.c
openssl s_client -connect ca.c:443 -verify_return_error
```
3. **Check ACME configuration**:
```bash
# View ACME certificates
ls /var/lib/acme/
journalctl -u acme-ca.c.service
```

View File

@@ -288,7 +288,7 @@ of their type.
In the inventory we the assign machines to a type, e.g. by using tags
```nix title="flake.nix"
instances.machine-type = {
instnaces.machine-type = {
module.input = "self";
module.name = "@pinpox/machine-type";
roles.desktop.tags.desktop = { };
@@ -303,4 +303,3 @@ instances.machine-type = {
- [Reference Documentation for Service Authors](../../reference/options/clan_service.md)
- [Migration Guide from ClanModules to ClanServices](../../guides/migrations/migrate-inventory-services.md)
- [Decision that lead to ClanServices](../../decisions/01-Clan-Modules.md)
- [Testing Guide for Services with Vars](../contributing/testing.md#testing-services-with-vars)

View File

@@ -122,7 +122,7 @@ hide:
command line interface
- [Clan Options](./reference/options/clan.md)
- [Clan Options](/options)
---

View File

@@ -4,10 +4,10 @@ This section of the site provides an overview of available options and commands
---
- [Clan Configuration Option](/options) - for defining a Clan
- Learn how to use the [Clan CLI](../reference/cli/index.md)
- Explore available [services](../services/definition.md)
- [NixOS Configuration Options](../reference/clan.core/index.md) - Additional options avilable on a NixOS machine.
- [Search Clan Option](/option-search) - for defining a Clan
---

23
flake.lock generated
View File

@@ -71,16 +71,15 @@
]
},
"locked": {
"lastModified": 1759509947,
"narHash": "sha256-4XifSIHfpJKcCf5bZZRhj8C4aCpjNBaE3kXr02s4rHU=",
"lastModified": 1758805352,
"narHash": "sha256-BHdc43Lkayd+72W/NXRKHzX5AZ+28F3xaUs3a88/Uew=",
"owner": "nix-darwin",
"repo": "nix-darwin",
"rev": "000eadb231812ad6ea6aebd7526974aaf4e79355",
"rev": "c48e963a5558eb1c3827d59d21c5193622a1477c",
"type": "github"
},
"original": {
"owner": "nix-darwin",
"ref": "nix-darwin-25.05",
"repo": "nix-darwin",
"type": "github"
}
@@ -115,15 +114,15 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1760324802,
"narHash": "sha256-VWlJtLQ5EQQj45Wj0yTExtSjwRyZ59/qMqEwus/Exlg=",
"rev": "7e297ddff44a3cc93673bb38d0374df8d0ad73e4",
"lastModified": 315532800,
"narHash": "sha256-1tUpklZsKzMGI3gjo/dWD+hS8cf+5Jji8TF5Cfz7i3I=",
"rev": "08b8f92ac6354983f5382124fef6006cade4a1c1",
"type": "tarball",
"url": "https://releases.nixos.org/nixos/25.05/nixos-25.05.811135.7e297ddff44a/nixexprs.tar.xz"
"url": "https://releases.nixos.org/nixpkgs/nixpkgs-25.11pre862603.08b8f92ac635/nixexprs.tar.xz"
},
"original": {
"type": "tarball",
"url": "https://nixos.org/channels/nixos-25.05/nixexprs.tar.xz"
"url": "https://nixos.org/channels/nixpkgs-unstable/nixexprs.tar.xz"
}
},
"root": {
@@ -182,11 +181,11 @@
]
},
"locked": {
"lastModified": 1760120816,
"narHash": "sha256-gq9rdocpmRZCwLS5vsHozwB6b5nrOBDNc2kkEaTXHfg=",
"lastModified": 1758728421,
"narHash": "sha256-ySNJ008muQAds2JemiyrWYbwbG+V7S5wg3ZVKGHSFu8=",
"owner": "numtide",
"repo": "treefmt-nix",
"rev": "761ae7aff00907b607125b2f57338b74177697ed",
"rev": "5eda4ee8121f97b218f7cc73f5172098d458f1d1",
"type": "github"
},
"original": {

View File

@@ -2,9 +2,9 @@
description = "clan.lol base operating system";
inputs = {
nixpkgs.url = "https://nixos.org/channels/nixos-25.05/nixexprs.tar.xz";
nixpkgs.url = "https://nixos.org/channels/nixpkgs-unstable/nixexprs.tar.xz";
nix-darwin.url = "github:nix-darwin/nix-darwin/nix-darwin-25.05";
nix-darwin.url = "github:nix-darwin/nix-darwin";
nix-darwin.inputs.nixpkgs.follows = "nixpkgs";
flake-parts.url = "github:hercules-ci/flake-parts";
@@ -77,8 +77,6 @@
};
};
};
# Allows downstream users to inject "unsupported" nixpkgs versions
checks.minNixpkgsVersion.ignore = true;
};
systems = import systems;
imports = [

View File

@@ -11,6 +11,8 @@
treefmt.programs.nixfmt.enable = true;
treefmt.programs.nixfmt.package = pkgs.nixfmt-rfc-style;
treefmt.programs.deadnix.enable = true;
treefmt.programs.sizelint.enable = true;
treefmt.programs.sizelint.failOnWarn = true;
treefmt.programs.clang-format.enable = true;
treefmt.settings.global.excludes = [
"*.png"
@@ -50,6 +52,8 @@
"checks/secrets/sops/groups/group/machines/machine"
"checks/syncthing/introducer/introducer_device_id"
"checks/syncthing/introducer/introducer_test_api"
"docs/site/static/asciinema-player/asciinema-player.css"
"docs/site/static/asciinema-player/asciinema-player.min.js"
"nixosModules/clanCore/vars/secret/sops/eval-tests/populated/vars/my_machine/my_generator/my_secret"
"pkgs/clan-cli/clan_cli/tests/data/gnupg.conf"
"pkgs/clan-cli/clan_cli/tests/data/password-store/.gpg-id"
@@ -90,6 +94,9 @@
"*.yaml"
"*.yml"
];
excludes = [
"*/asciinema-player/*"
];
};
treefmt.programs.mypy.directories = {
"clan-cli" = {

View File

@@ -1,51 +0,0 @@
{ lib }:
let
sanitizePath =
rootPath: path:
let
storePrefix = builtins.unsafeDiscardStringContext ("${rootPath}");
pathStr = lib.removePrefix "/" (
lib.removePrefix storePrefix (builtins.unsafeDiscardStringContext (toString path))
);
in
pathStr;
mkFunctions = rootPath: passthru: virtual_fs: {
# Some functions to override lib functions
pathExists =
path:
let
pathStr = sanitizePath rootPath path;
isPassthru = builtins.any (exclude: (builtins.match exclude pathStr) != null) passthru;
in
if isPassthru then
builtins.pathExists path
else
let
res = virtual_fs ? ${pathStr};
in
lib.trace "pathExists: '${pathStr}' -> '${lib.generators.toPretty { } res}'" res;
readDir =
path:
let
pathStr = sanitizePath rootPath path;
base = (pathStr + "/");
res = lib.mapAttrs' (name: fileInfo: {
name = lib.removePrefix base name;
value = fileInfo.type;
}) (lib.filterAttrs (n: _: lib.hasPrefix base n) virtual_fs);
isPassthru = builtins.any (exclude: (builtins.match exclude pathStr) != null) passthru;
in
if isPassthru then
builtins.readDir path
else
lib.trace "readDir: '${pathStr}' -> '${lib.generators.toPretty { } res}'" res;
};
in
{
virtual_fs,
rootPath,
# Patterns
passthru ? [ ],
}:
mkFunctions rootPath passthru virtual_fs

View File

@@ -28,6 +28,7 @@ lib.fix (
# Plain imports.
introspection = import ./introspection { inherit lib; };
jsonschema = import ./jsonschema { inherit lib; };
facts = import ./facts.nix { inherit lib; };
docs = import ./docs.nix { inherit lib; };
# flakes
@@ -35,10 +36,6 @@ lib.fix (
# TODO: Flatten our lib functions like this:
resolveModule = clanLib.callLib ./resolve-module { };
fs = {
inherit (builtins) pathExists readDir;
};
};
in
f

71
lib/facts.nix Normal file
View File

@@ -0,0 +1,71 @@
{ lib, ... }:
clanDir:
let
allMachineNames = lib.mapAttrsToList (name: _: name) (builtins.readDir clanDir);
getFactPath = machine: fact: "${clanDir}/machines/${machine}/facts/${fact}";
readFact =
machine: fact:
let
path = getFactPath machine fact;
in
if builtins.pathExists path then builtins.readFile path else null;
# Example:
#
# readFactFromAllMachines zerotier-ip
# => {
# machineA = "1.2.3.4";
# machineB = "5.6.7.8";
# };
readFactFromAllMachines =
fact:
let
machines = allMachineNames;
facts = lib.genAttrs machines (machine: readFact machine fact);
filteredFacts = lib.filterAttrs (_machine: fact: fact != null) facts;
in
filteredFacts;
# all given facts are are set and factvalues are never null.
#
# Example:
#
# readFactsFromAllMachines [ "zerotier-ip" "syncthing.pub" ]
# => {
# machineA =
# {
# "zerotier-ip" = "1.2.3.4";
# "synching.pub" = "1234";
# };
# machineB =
# {
# "zerotier-ip" = "5.6.7.8";
# "synching.pub" = "23456719";
# };
# };
readFactsFromAllMachines =
facts:
let
# machine -> fact -> factvalue
machinesFactsAttrs = lib.genAttrs allMachineNames (
machine: lib.genAttrs facts (fact: readFact machine fact)
);
# remove all machines which don't have all facts set
filteredMachineFactAttrs = lib.filterAttrs (
_machine: values: builtins.all (fact: values.${fact} != null) facts
) machinesFactsAttrs;
in
filteredMachineFactAttrs;
in
{
inherit
allMachineNames
getFactPath
readFact
readFactFromAllMachines
readFactsFromAllMachines
;
}

View File

@@ -149,13 +149,6 @@ let
# TODO: Add index support in nixpkgs first
# else if type.name == "listOf" then
# handleListOf meta.list
else if type.name == "either" then
# For either(oneOf) types, we skip introspection as we cannot
# determine which branch of the union was taken without more context
# This *should* be safe, as it can currently mostly be triggered through
# The `extraModules` setting of inventory modules and seems to be better
# than just aborting entirely.
{ }
else
throw "Yet Unsupported type: ${type.name}";
in

View File

@@ -699,44 +699,4 @@ in
};
};
};
test_listOf_either =
let
evaluated = eval [
{
options.extraModules = lib.mkOption {
description = "List of modules that can be strings, paths, or attrsets";
default = [ ];
type = lib.types.listOf (
lib.types.oneOf [
lib.types.str
lib.types.path
(lib.types.attrsOf lib.types.anything)
]
);
};
}
({
_file = "config.nix";
extraModules = [
"modules/common.nix"
./some/path.nix
{ config = { }; }
];
})
];
result = slib.getPrios { options = evaluated.options; };
in
{
inherit evaluated;
# Test that either types in list items return empty objects
# This is a behavioral test and not necessarily the correct
# behavior. But this is better than crashing on people directly.
expr = result.extraModules.__list;
expected = [
{ }
{ }
{ }
];
};
}

View File

@@ -133,13 +133,12 @@ in
}
)
{
# Note: we use clanLib.fs here, so that we can override it in tests
inventory = lib.optionalAttrs (clanLib.fs.pathExists "${directory}/machines") ({
imports = lib.mapAttrsToList (name: _t: {
_file = "${directory}/machines/${name}";
machines.${name} = { };
}) ((lib.filterAttrs (_: t: t == "directory") (clanLib.fs.readDir "${directory}/machines")));
});
# TODO: Figure out why this causes infinite recursion
inventory.machines = lib.optionalAttrs (builtins.pathExists "${directory}/machines") (
builtins.mapAttrs (_n: _v: { }) (
lib.filterAttrs (_: t: t == "directory") (builtins.readDir "${directory}/machines")
)
);
}
{
inventory.machines = lib.mapAttrs (_n: _: { }) config.machines;

View File

@@ -1,108 +0,0 @@
{
lib ? import <nixpkgs/lib>,
}:
let
clanLibOrig = (import ./.. { inherit lib; }).__unfix__;
clanLibWithFs =
{ virtual_fs }:
lib.fix (
lib.extends (
final: _:
let
clan-core = {
clanLib = final;
modules.clan.default = lib.modules.importApply ./clan { inherit clan-core; };
# Note: Can add other things to "clan-core"
# ... Not needed for this test
};
in
{
clan = import ../clan {
inherit lib clan-core;
};
# Override clanLib.fs for unit-testing against a virtual filesystem
fs = import ../clanTest/virtual-fs.nix { inherit lib; } {
inherit rootPath virtual_fs;
# Example of a passthru
# passthru = [
# ".*inventory\.json$"
# ];
};
}
) clanLibOrig
);
rootPath = ./.;
in
{
test_autoload_directories =
let
vclan =
(clanLibWithFs {
virtual_fs = {
"machines" = {
type = "directory";
};
"machines/foo-machine" = {
type = "directory";
};
"machines/bar-machine" = {
type = "directory";
};
};
}).clan
{ config.directory = rootPath; };
in
{
inherit vclan;
expr = {
machines = lib.attrNames vclan.config.inventory.machines;
definedInMachinesDir = map (
p: lib.hasInfix "/machines/" p
) vclan.options.inventory.valueMeta.configuration.options.machines.files;
};
expected = {
machines = [
"bar-machine"
"foo-machine"
];
definedInMachinesDir = [
true # /machines/foo-machine
true # /machines/bar-machine
false # <clan-core>/module.nix defines "machines" without members
];
};
};
# Could probably be unified with the previous test
# This is here for the sake to show that 'virtual_fs' is a test parameter
test_files_are_not_machines =
let
vclan =
(clanLibWithFs {
virtual_fs = {
"machines" = {
type = "directory";
};
"machines/foo.nix" = {
type = "file";
};
"machines/bar.nix" = {
type = "file";
};
};
}).clan
{ config.directory = rootPath; };
in
{
inherit vclan;
expr = {
machines = lib.attrNames vclan.config.inventory.machines;
};
expected = {
machines = [ ];
};
};
}

View File

@@ -12,7 +12,6 @@ let
in
#######
{
autoloading = import ./dir_test.nix { inherit lib; };
test_missing_self =
let
eval = clan {

View File

@@ -164,25 +164,13 @@
config = lib.mkIf (config.clan.core.secrets != { }) {
clan.core.facts.services = lib.mapAttrs' (
name: service:
lib.warn
''
###############################################################################
# #
# clan.core.secrets.${name} clan.core.facts.services.${name} is deprecated #
# in favor of "vars" #
# #
# Refer to https://docs.clan.lol/guides/migrations/migration-facts-vars/ #
# for migration instructions. #
# #
###############################################################################
''
(
lib.nameValuePair name ({
secret = service.secrets;
public = service.facts;
generator = service.generator;
})
)
lib.warn "clan.core.secrets.${name} is deprecated, use clan.core.facts.services.${name} instead" (
lib.nameValuePair name ({
secret = service.secrets;
public = service.facts;
generator = service.generator;
})
)
) config.clan.core.secrets;
};
}

View File

@@ -6,17 +6,7 @@
}:
{
config.warnings = lib.optionals (config.clan.core.facts.services != { }) [
''
###############################################################################
# #
# Facts are deprecated please migrate any usages to vars instead #
# #
# #
# Refer to https://docs.clan.lol/guides/migrations/migration-facts-vars/ #
# for migration instructions. #
# #
###############################################################################
''
"Facts are deprecated, please migrate them to vars instead, see: https://docs.clan.lol/guides/migrations/migration-facts-vars/"
];
options.clan.core.facts = {

View File

@@ -5,31 +5,33 @@
let
inherit (lib)
filterAttrs
flatten
mapAttrsToList
;
relevantFiles = filterAttrs (
_name: f: f.secret && f.deploy && (f.neededFor == "users" || f.neededFor == "services")
);
collectFiles =
generators:
builtins.concatLists (
mapAttrsToList (
gen_name: generator:
mapAttrsToList (fname: file: {
name = fname;
generator = gen_name;
neededForUsers = file.neededFor == "users";
inherit (generator) share;
inherit (file)
owner
group
mode
restartUnits
;
}) (relevantFiles generator.files)
) generators
);
in
collectFiles
generators:
let
relevantFiles =
generator:
filterAttrs (
_name: f: f.secret && f.deploy && (f.neededFor == "users" || f.neededFor == "services")
) generator.files;
allFiles = flatten (
mapAttrsToList (
gen_name: generator:
mapAttrsToList (fname: file: {
name = fname;
generator = gen_name;
neededForUsers = file.neededFor == "users";
inherit (generator) share;
inherit (file)
owner
group
mode
restartUnits
;
}) (relevantFiles generator)
) generators
);
in
allFiles

View File

@@ -41,7 +41,7 @@ class ApiBridge(Protocol):
def process_request(self, request: BackendRequest) -> None:
"""Process an API request through the middleware chain."""
from clan_app.middleware.base import MiddlewareContext
from clan_app.middleware.base import MiddlewareContext # noqa: PLC0415
with ExitStack() as stack:
# Capture the current call stack up to this point
@@ -62,7 +62,7 @@ class ApiBridge(Protocol):
)
middleware.process(context)
except Exception as e:
from clan_app.middleware.base import (
from clan_app.middleware.base import ( # noqa: PLC0415
MiddlewareError,
)

View File

@@ -191,13 +191,13 @@ class HttpBridge(ApiBridge, BaseHTTPRequestHandler):
return file_data
def do_OPTIONS(self) -> None: # noqa: N802
def do_OPTIONS(self) -> None:
"""Handle CORS preflight requests."""
self.send_response_only(200)
self._send_cors_headers()
self.end_headers()
def do_GET(self) -> None: # noqa: N802
def do_GET(self) -> None:
"""Handle GET requests."""
parsed_url = urlparse(self.path)
path = parsed_url.path
@@ -211,7 +211,7 @@ class HttpBridge(ApiBridge, BaseHTTPRequestHandler):
else:
self.send_api_error_response("info", "Not Found", ["http_bridge", "GET"])
def do_POST(self) -> None: # noqa: N802
def do_POST(self) -> None:
"""Handle POST requests."""
parsed_url = urlparse(self.path)
path = parsed_url.path

View File

@@ -34,7 +34,7 @@ class WebviewBridge(ApiBridge):
log.debug(f"Sending response: {serialized}")
# Import FuncStatus locally to avoid circular import
from .webview import FuncStatus
from .webview import FuncStatus # noqa: PLC0415
self.webview.return_(response._op_key, FuncStatus.SUCCESS, serialized) # noqa: SLF001

View File

@@ -113,27 +113,15 @@ mkShell {
# todo darwin support needs some work
(lib.optionalString stdenv.hostPlatform.isLinux ''
# configure playwright for storybook snapshot testing
# we only want webkit as that matches what the app is rendered with
export PLAYWRIGHT_SKIP_BROWSER_DOWNLOAD=1
export PLAYWRIGHT_BROWSERS_PATH=${
playwright-driver.browsers.override {
withFfmpeg = false;
withFirefox = false;
withWebkit = true;
withChromium = false;
withChromiumHeadlessShell = false;
withChromiumHeadlessShell = true;
}
}
# stop playwright from trying to validate it has downloaded the necessary browsers
# we are providing them manually via nix
export PLAYWRIGHT_SKIP_VALIDATE_HOST_REQUIREMENTS=true
# playwright browser drivers are versioned e.g. webkit-2191
# this helps us avoid having to update the playwright js dependency everytime we update nixpkgs and vice versa
# see vitest.config.js for corresponding launch configuration
export PLAYWRIGHT_WEBKIT_EXECUTABLE=$(find -L "$PLAYWRIGHT_BROWSERS_PATH" -type f -name "pw_run.sh")
export PLAYWRIGHT_HOST_PLATFORM_OVERRIDE="ubuntu-24.04"
'');
}

View File

@@ -53,7 +53,7 @@
"jsdom": "^26.1.0",
"knip": "^5.61.2",
"markdown-to-jsx": "^7.7.10",
"playwright": "~1.55.1",
"playwright": "~1.53.2",
"postcss": "^8.4.38",
"postcss-url": "^10.1.3",
"prettier": "^3.2.5",
@@ -6956,13 +6956,13 @@
}
},
"node_modules/playwright": {
"version": "1.55.1",
"resolved": "https://registry.npmjs.org/playwright/-/playwright-1.55.1.tgz",
"integrity": "sha512-cJW4Xd/G3v5ovXtJJ52MAOclqeac9S/aGGgRzLabuF8TnIb6xHvMzKIa6JmrRzUkeXJgfL1MhukP0NK6l39h3A==",
"version": "1.53.2",
"resolved": "https://registry.npmjs.org/playwright/-/playwright-1.53.2.tgz",
"integrity": "sha512-6K/qQxVFuVQhRQhFsVZ9fGeatxirtrpPgxzBYWyZLEXJzqYwuL4fuNmfOfD5et1tJE4GScKyPNeLhZeRwuTU3A==",
"dev": true,
"license": "Apache-2.0",
"dependencies": {
"playwright-core": "1.55.1"
"playwright-core": "1.53.2"
},
"bin": {
"playwright": "cli.js"
@@ -6975,9 +6975,9 @@
}
},
"node_modules/playwright-core": {
"version": "1.55.1",
"resolved": "https://registry.npmjs.org/playwright-core/-/playwright-core-1.55.1.tgz",
"integrity": "sha512-Z6Mh9mkwX+zxSlHqdr5AOcJnfp+xUWLCt9uKV18fhzA8eyxUd8NUWzAjxUh55RZKSYwDGX0cfaySdhZJGMoJ+w==",
"version": "1.53.2",
"resolved": "https://registry.npmjs.org/playwright-core/-/playwright-core-1.53.2.tgz",
"integrity": "sha512-ox/OytMy+2w1jcYEYlOo1Hhp8hZkLCximMTUTMBXjGUA1KoFfiSZ+DU+3a739jsPY0yoKH2TFy9S2fsJas8yAw==",
"dev": true,
"license": "Apache-2.0",
"bin": {

View File

@@ -48,7 +48,7 @@
"jsdom": "^26.1.0",
"knip": "^5.61.2",
"markdown-to-jsx": "^7.7.10",
"playwright": "~1.55.1",
"playwright": "~1.53.2",
"postcss": "^8.4.38",
"postcss-url": "^10.1.3",
"prettier": "^3.2.5",

View File

@@ -1,7 +1,7 @@
import type { Meta, StoryObj } from "@kachurun/storybook-solid";
import { Button, ButtonProps } from "./Button";
import { Component } from "solid-js";
import { expect, fn, waitFor, within } from "storybook/test";
import { expect, fn, waitFor } from "storybook/test";
import { StoryContext } from "@kachurun/storybook-solid-vite";
const getCursorStyle = (el: Element) => window.getComputedStyle(el).cursor;
@@ -216,11 +216,17 @@ const timeout = process.env.NODE_ENV === "test" ? 500 : 2000;
export const Primary: Story = {
args: {
hierarchy: "primary",
onClick: fn(),
onAction: fn(async () => {
// wait 500 ms to simulate an action
await new Promise((resolve) => setTimeout(resolve, timeout));
// randomly fail to check that the loading state still returns to normal
if (Math.random() > 0.5) {
throw new Error("Action failure");
}
}),
},
play: async ({ canvasElement, step, userEvent, args }: StoryContext) => {
const canvas = within(canvasElement);
play: async ({ canvas, step, userEvent, args }: StoryContext) => {
const buttons = await canvas.findAllByRole("button");
for (const button of buttons) {
@@ -232,6 +238,14 @@ export const Primary: Story = {
}
await step(`Click on ${testID}`, async () => {
// check for the loader
const loaders = button.getElementsByClassName("loader");
await expect(loaders.length).toEqual(1);
// assert its width is 0 before we click
const [loader] = loaders;
await expect(loader.clientWidth).toEqual(0);
// move the mouse over the button
await userEvent.hover(button);
@@ -241,8 +255,33 @@ export const Primary: Story = {
// click the button
await userEvent.click(button);
// the click handler should have been called
await expect(args.onClick).toHaveBeenCalled();
// check the button has changed
await waitFor(
async () => {
// the action handler should have been called
await expect(args.onAction).toHaveBeenCalled();
// the button should have a loading class
await expect(button).toHaveClass("loading");
// the loader should be visible
await expect(loader.clientWidth).toBeGreaterThan(0);
// the pointer should have changed to wait
await expect(getCursorStyle(button)).toEqual("wait");
},
{ timeout: timeout + 500 },
);
// wait for the action handler to finish
await waitFor(
async () => {
// the loading class should be removed
await expect(button).not.toHaveClass("loading");
// the loader should be hidden
await expect(loader.clientWidth).toEqual(0);
// the pointer should be normal
await expect(getCursorStyle(button)).toEqual("pointer");
},
{ timeout: timeout + 500 },
);
});
}
},

View File

@@ -57,7 +57,6 @@ export const Button = (props: ButtonProps) => {
return (
<KobalteButton
role="button"
class={cx(
styles.button, // default button class
local.size != "default" && styles[local.size],

View File

@@ -160,47 +160,47 @@ const mockFetcher = <K extends OperationNames>(
},
}) satisfies ApiCall<K>;
// export const Default: Story = {
// args: {},
// decorators: [
// (Story: StoryObj) => {
// const queryClient = new QueryClient({
// defaultOptions: {
// queries: {
// retry: false,
// staleTime: Infinity,
// },
// },
// });
//
// Object.entries(queryData).forEach(([clanURI, clan]) => {
// queryClient.setQueryData(
// ["clans", encodeBase64(clanURI), "details"],
// clan.details,
// );
//
// const machines = clan.machines || {};
//
// queryClient.setQueryData(
// ["clans", encodeBase64(clanURI), "machines"],
// machines,
// );
//
// Object.entries(machines).forEach(([name, machine]) => {
// queryClient.setQueryData(
// ["clans", encodeBase64(clanURI), "machine", name, "state"],
// machine.state,
// );
// });
// });
//
// return (
// <ApiClientProvider client={{ fetch: mockFetcher }}>
// <QueryClientProvider client={queryClient}>
// <Story />
// </QueryClientProvider>
// </ApiClientProvider>
// );
// },
// ],
// };
export const Default: Story = {
args: {},
decorators: [
(Story: StoryObj) => {
const queryClient = new QueryClient({
defaultOptions: {
queries: {
retry: false,
staleTime: Infinity,
},
},
});
Object.entries(queryData).forEach(([clanURI, clan]) => {
queryClient.setQueryData(
["clans", encodeBase64(clanURI), "details"],
clan.details,
);
const machines = clan.machines || {};
queryClient.setQueryData(
["clans", encodeBase64(clanURI), "machines"],
machines,
);
Object.entries(machines).forEach(([name, machine]) => {
queryClient.setQueryData(
["clans", encodeBase64(clanURI), "machine", name, "state"],
machine.state,
);
});
});
return (
<ApiClientProvider client={{ fetch: mockFetcher }}>
<QueryClientProvider client={queryClient}>
<Story />
</QueryClientProvider>
</ApiClientProvider>
);
},
],
};

View File

@@ -11,35 +11,28 @@ export default meta;
type Story = StoryObj<ClanSettingsModalProps>;
const props: ClanSettingsModalProps = {
onClose: fn(),
model: {
uri: "/home/foo/my-clan",
details: {
export const Default: Story = {
args: {
onClose: fn(),
model: {
uri: "/home/foo/my-clan",
name: "Sol",
description: null,
icon: null,
},
fieldsSchema: {
name: {
readonly: true,
reason: null,
readonly_members: [],
},
description: {
readonly: false,
reason: null,
readonly_members: [],
},
icon: {
readonly: false,
reason: null,
readonly_members: [],
fieldsSchema: {
name: {
readonly: true,
reason: null,
},
description: {
readonly: false,
reason: null,
},
icon: {
readonly: false,
reason: null,
},
},
},
},
};
export const Default: Story = {
args: props,
};

View File

@@ -22,9 +22,9 @@ import { Alert } from "@/src/components/Alert/Alert";
import { removeClanURI } from "@/src/stores/clan";
const schema = v.object({
name: v.string(),
description: v.optional(v.string()),
icon: v.optional(v.string()),
name: v.pipe(v.optional(v.string())),
description: v.nullish(v.string()),
icon: v.pipe(v.nullish(v.string())),
});
export interface ClanSettingsModalProps {

View File

@@ -0,0 +1,15 @@
import { Meta, StoryObj } from "@kachurun/storybook-solid";
import { CubeScene } from "./cubes";
const meta: Meta = {
title: "scene/cubes",
component: CubeScene,
};
export default meta;
type Story = StoryObj;
export const Default: Story = {
args: {},
};

View File

@@ -304,10 +304,11 @@ const FlashProgress = () => {
const [store, set] = getStepStore<InstallStoreType>(stepSignal);
onMount(async () => {
const result = await store.flash?.progress?.result;
if (result?.status == "success") {
stepSignal.next();
const result = await store.flash.progress.result;
if (result.status == "success") {
console.log("Flashing Success");
}
stepSignal.next();
});
const handleCancel = async () => {

View File

@@ -165,23 +165,23 @@ export default meta;
type Story = StoryObj<typeof ServiceWorkflow>;
// export const Default: Story = {
// args: {},
// };
//
// export const SelectRoleMembers: Story = {
// render: () => (
// <ServiceWorkflow
// handleSubmit={(instance) => {
// console.log("Submitted instance:", instance);
// }}
// onClose={() => {
// console.log("Closed");
// }}
// initialStep="select:members"
// initialStore={{
// currentRole: "peer",
// }}
// />
// ),
// };
export const Default: Story = {
args: {},
};
export const SelectRoleMembers: Story = {
render: () => (
<ServiceWorkflow
handleSubmit={(instance) => {
console.log("Submitted instance:", instance);
}}
onClose={() => {
console.log("Closed");
}}
initialStep="select:members"
initialStore={{
currentRole: "peer",
}}
/>
),
};

View File

@@ -9,11 +9,7 @@
"esModuleInterop": true,
"jsx": "preserve",
"jsxImportSource": "solid-js",
"types": [
"vite/client",
"vite-plugin-solid-svg/types-component-solid",
"@vitest/browser/providers/playwright"
],
"types": ["vite/client", "vite-plugin-solid-svg/types-component-solid"],
"noEmit": true,
"resolveJsonModule": true,
"allowJs": true,

View File

@@ -40,14 +40,7 @@ export default mergeConfig(
enabled: true,
headless: true,
provider: "playwright",
instances: [
{
browser: "webkit",
launch: {
executablePath: process.env.PLAYWRIGHT_WEBKIT_EXECUTABLE,
},
},
],
instances: [{ browser: "chromium" }],
},
// This setup file applies Storybook project annotations for Vitest
// More info at: https://storybook.js.org/docs/api/portable-stories/portable-stories-vitest#setprojectannotations

View File

@@ -9,7 +9,7 @@ def main() -> None:
load_in_all_api_functions()
# import lazily since we otherwise we do not have all api functions loaded according to Qubasa
from clan_lib.api import API
from clan_lib.api import API # noqa: PLC0415
schema = API.to_json_schema()
print(f"""{json.dumps(schema, indent=2)}""")

View File

@@ -75,14 +75,13 @@ class TestFlake(Flake):
def path(self) -> Path:
return self.test_dir
def machine_selector(self, machine_name: str, selector: str) -> str:
"""Create a selector for a specific machine.
def select_machine(self, machine_name: str, selector: str) -> Any:
"""Select a nix attribute for a specific machine.
Args:
machine_name: The name of the machine
selector: The attribute selector string relative to the machine config
Returns:
The full selector string for the machine
apply: Optional function to apply to the result
"""
config = nix_config()
@@ -90,7 +89,9 @@ class TestFlake(Flake):
test_system = system
if system.endswith("-darwin"):
test_system = system.rstrip("darwin") + "linux"
return f'checks."{test_system}".{self.check_attr}.machinesCross."{system}"."{machine_name}".{selector}'
full_selector = f'checks."{test_system}".{self.check_attr}.machinesCross.{system}."{machine_name}".{selector}'
return self.select(full_selector)
# we don't want to evaluate all machines of the flake. Only the ones defined in the test
def set_machine_names(self, machine_names: list[str]) -> None:
@@ -102,7 +103,7 @@ class TestFlake(Flake):
opts: "ListOptions | None" = None, # noqa: ARG002
) -> "dict[str, MachineResponse]":
"""List machines of a clan"""
from clan_lib.machines.actions import (
from clan_lib.machines.actions import ( # noqa: PLC0415
InventoryMachine,
MachineResponse,
)

View File

@@ -231,7 +231,7 @@ def remove_machine_command(args: argparse.Namespace) -> None:
def add_group_argument(parser: argparse.ArgumentParser) -> None:
from clan_cli.completions import (
from clan_cli.completions import ( # noqa: PLC0415
add_dynamic_completer,
complete_groups,
)
@@ -334,7 +334,7 @@ def register_groups_parser(parser: argparse.ArgumentParser) -> None:
help="the name of the machines to add",
type=machine_name_type,
)
from clan_cli.completions import (
from clan_cli.completions import ( # noqa: PLC0415
add_dynamic_completer,
complete_machines,
)
@@ -353,7 +353,7 @@ def register_groups_parser(parser: argparse.ArgumentParser) -> None:
help="the name of the machines to remove",
type=machine_name_type,
)
from clan_cli.completions import (
from clan_cli.completions import ( # noqa: PLC0415
add_dynamic_completer,
complete_machines,
)
@@ -369,7 +369,7 @@ def register_groups_parser(parser: argparse.ArgumentParser) -> None:
help="the name of the user to add",
type=user_name_type,
)
from clan_cli.completions import (
from clan_cli.completions import ( # noqa: PLC0415
add_dynamic_completer,
complete_users,
)
@@ -388,7 +388,7 @@ def register_groups_parser(parser: argparse.ArgumentParser) -> None:
help="the name of the user to remove",
type=user_name_type,
)
from clan_cli.completions import (
from clan_cli.completions import ( # noqa: PLC0415
add_dynamic_completer,
complete_users,
)
@@ -407,7 +407,7 @@ def register_groups_parser(parser: argparse.ArgumentParser) -> None:
help="the name of the secret",
type=secret_name_type,
)
from clan_cli.completions import (
from clan_cli.completions import ( # noqa: PLC0415
add_dynamic_completer,
complete_secrets,
)
@@ -426,7 +426,7 @@ def register_groups_parser(parser: argparse.ArgumentParser) -> None:
help="the name of the secret",
type=secret_name_type,
)
from clan_cli.completions import (
from clan_cli.completions import ( # noqa: PLC0415
add_dynamic_completer,
complete_secrets,
)

View File

@@ -69,7 +69,7 @@ def register_import_sops_parser(parser: argparse.ArgumentParser) -> None:
default=[],
help="the group to import the secrets to",
)
from clan_cli.completions import (
from clan_cli.completions import ( # noqa: PLC0415
add_dynamic_completer,
complete_groups,
)
@@ -82,7 +82,7 @@ def register_import_sops_parser(parser: argparse.ArgumentParser) -> None:
default=[],
help="the machine to import the secrets to",
)
from clan_cli.completions import (
from clan_cli.completions import ( # noqa: PLC0415
add_dynamic_completer,
complete_machines,
)
@@ -95,7 +95,7 @@ def register_import_sops_parser(parser: argparse.ArgumentParser) -> None:
default=[],
help="the user to import the secrets to",
)
from clan_cli.completions import (
from clan_cli.completions import ( # noqa: PLC0415
add_dynamic_completer,
complete_users,
)

View File

@@ -172,7 +172,7 @@ def register_machines_parser(parser: argparse.ArgumentParser) -> None:
help="the name of the machine",
type=machine_name_type,
)
from clan_cli.completions import (
from clan_cli.completions import ( # noqa: PLC0415
add_dynamic_completer,
complete_machines,
)
@@ -192,7 +192,7 @@ def register_machines_parser(parser: argparse.ArgumentParser) -> None:
help="the name of the machine",
type=machine_name_type,
)
from clan_cli.completions import (
from clan_cli.completions import ( # noqa: PLC0415
add_dynamic_completer,
complete_machines,
)
@@ -207,7 +207,7 @@ def register_machines_parser(parser: argparse.ArgumentParser) -> None:
help="the name of the machine",
type=machine_name_type,
)
from clan_cli.completions import (
from clan_cli.completions import ( # noqa: PLC0415
add_dynamic_completer,
complete_machines,
)
@@ -225,7 +225,7 @@ def register_machines_parser(parser: argparse.ArgumentParser) -> None:
help="the name of the machine",
type=machine_name_type,
)
from clan_cli.completions import (
from clan_cli.completions import ( # noqa: PLC0415
add_dynamic_completer,
complete_machines,
complete_secrets,
@@ -250,7 +250,7 @@ def register_machines_parser(parser: argparse.ArgumentParser) -> None:
help="the name of the machine",
type=machine_name_type,
)
from clan_cli.completions import (
from clan_cli.completions import ( # noqa: PLC0415
add_dynamic_completer,
complete_machines,
complete_secrets,

View File

@@ -255,7 +255,7 @@ def add_secret_argument(parser: argparse.ArgumentParser, autocomplete: bool) ->
type=secret_name_type,
)
if autocomplete:
from clan_cli.completions import (
from clan_cli.completions import ( # noqa: PLC0415
add_dynamic_completer,
complete_secrets,
)
@@ -467,7 +467,7 @@ def register_secrets_parser(subparser: argparse._SubParsersAction) -> None:
default=[],
help="the group to import the secrets to (can be repeated)",
)
from clan_cli.completions import (
from clan_cli.completions import ( # noqa: PLC0415
add_dynamic_completer,
complete_groups,
)
@@ -480,7 +480,7 @@ def register_secrets_parser(subparser: argparse._SubParsersAction) -> None:
default=[],
help="the machine to import the secrets to (can be repeated)",
)
from clan_cli.completions import (
from clan_cli.completions import ( # noqa: PLC0415
add_dynamic_completer,
complete_machines,
)
@@ -493,7 +493,7 @@ def register_secrets_parser(subparser: argparse._SubParsersAction) -> None:
default=[],
help="the user to import the secrets to (can be repeated)",
)
from clan_cli.completions import (
from clan_cli.completions import ( # noqa: PLC0415
add_dynamic_completer,
complete_users,
)

View File

@@ -355,10 +355,7 @@ def get_public_age_key_from_private_key(privkey: str) -> str:
cmd = nix_shell(["age"], ["age-keygen", "-y"])
error_msg = "Failed to get public key for age private key. Is the key malformed?"
res = run(
cmd,
RunOpts(input=privkey.encode(), error_msg=error_msg, sensitive_input=True),
)
res = run(cmd, RunOpts(input=privkey.encode(), error_msg=error_msg))
return res.stdout.rstrip(os.linesep).rstrip()

View File

@@ -281,7 +281,7 @@ def register_users_parser(parser: argparse.ArgumentParser) -> None:
help="the name of the user",
type=user_name_type,
)
from clan_cli.completions import (
from clan_cli.completions import ( # noqa: PLC0415
add_dynamic_completer,
complete_users,
)
@@ -295,7 +295,7 @@ def register_users_parser(parser: argparse.ArgumentParser) -> None:
help="the name of the user",
type=user_name_type,
)
from clan_cli.completions import (
from clan_cli.completions import ( # noqa: PLC0415
add_dynamic_completer,
complete_users,
)
@@ -312,7 +312,7 @@ def register_users_parser(parser: argparse.ArgumentParser) -> None:
help="the name of the user",
type=user_name_type,
)
from clan_cli.completions import (
from clan_cli.completions import ( # noqa: PLC0415
add_dynamic_completer,
complete_secrets,
complete_users,
@@ -336,7 +336,7 @@ def register_users_parser(parser: argparse.ArgumentParser) -> None:
help="the name of the group",
type=user_name_type,
)
from clan_cli.completions import (
from clan_cli.completions import ( # noqa: PLC0415
add_dynamic_completer,
complete_secrets,
complete_users,
@@ -360,7 +360,7 @@ def register_users_parser(parser: argparse.ArgumentParser) -> None:
help="the name of the user",
type=user_name_type,
)
from clan_cli.completions import (
from clan_cli.completions import ( # noqa: PLC0415
add_dynamic_completer,
complete_users,
)
@@ -378,7 +378,7 @@ def register_users_parser(parser: argparse.ArgumentParser) -> None:
help="the name of the user",
type=user_name_type,
)
from clan_cli.completions import (
from clan_cli.completions import ( # noqa: PLC0415
add_dynamic_completer,
complete_users,
)

View File

@@ -0,0 +1,24 @@
{
# Use this path to our repo root e.g. for UI test
# inputs.clan-core.url = "../../../../.";
# this placeholder is replaced by the path to nixpkgs
inputs.clan-core.url = "__CLAN_CORE__";
outputs =
{ self, clan-core }:
let
clan = clan-core.lib.clan {
inherit self;
meta.name = "test_flake_with_core_dynamic_machines";
machines =
let
machineModules = builtins.readDir (self + "/machines");
in
builtins.mapAttrs (name: _type: import (self + "/machines/${name}")) machineModules;
};
in
{
inherit (clan.config) nixosConfigurations nixosModules clanInternals;
};
}

View File

@@ -1,6 +1,5 @@
import json
import logging
import os
import shutil
import subprocess
import time
@@ -167,16 +166,16 @@ def test_generate_public_and_secret_vars(
assert shared_value.startswith("shared")
vars_text = stringify_all_vars(machine)
flake_obj = Flake(str(flake.path))
my_generator = Generator("my_generator", machines=["my_machine"], _flake=flake_obj)
my_generator = Generator("my_generator", machine="my_machine", _flake=flake_obj)
shared_generator = Generator(
"my_shared_generator",
share=True,
machines=["my_machine"],
machine="my_machine",
_flake=flake_obj,
)
dependent_generator = Generator(
"dependent_generator",
machines=["my_machine"],
machine="my_machine",
_flake=flake_obj,
)
in_repo_store = in_repo.FactStore(flake=flake_obj)
@@ -341,12 +340,12 @@ def test_generate_secret_var_sops_with_default_group(
flake_obj = Flake(str(flake.path))
first_generator = Generator(
"first_generator",
machines=["my_machine"],
machine="my_machine",
_flake=flake_obj,
)
second_generator = Generator(
"second_generator",
machines=["my_machine"],
machine="my_machine",
_flake=flake_obj,
)
in_repo_store = in_repo.FactStore(flake=flake_obj)
@@ -376,13 +375,13 @@ def test_generate_secret_var_sops_with_default_group(
first_generator_with_share = Generator(
"first_generator",
share=False,
machines=["my_machine"],
machine="my_machine",
_flake=flake_obj,
)
second_generator_with_share = Generator(
"second_generator",
share=False,
machines=["my_machine"],
machine="my_machine",
_flake=flake_obj,
)
assert sops_store.user_has_access("user2", first_generator_with_share, "my_secret")
@@ -430,43 +429,10 @@ def test_generated_shared_secret_sops(
machine1 = Machine(name="machine1", flake=Flake(str(flake.path)))
machine2 = Machine(name="machine2", flake=Flake(str(flake.path)))
cli.run(["vars", "generate", "--flake", str(flake.path), "machine1"])
# Get the initial state of the flake directory after generation
def get_file_mtimes(path: str) -> dict[str, float]:
"""Get modification times of all files in a directory tree."""
mtimes = {}
for root, _dirs, files in os.walk(path):
# Skip .git directory
if ".git" in root:
continue
for file in files:
filepath = Path(root) / file
mtimes[str(filepath)] = filepath.stat().st_mtime
return mtimes
initial_mtimes = get_file_mtimes(str(flake.path))
# First check_vars should not write anything
assert check_vars(machine1.name, machine1.flake), (
"machine1 has already generated vars, so check_vars should return True\n"
f"Check result:\n{check_vars(machine1.name, machine1.flake)}"
)
# Verify no files were modified
after_check_mtimes = get_file_mtimes(str(flake.path))
assert initial_mtimes == after_check_mtimes, (
"check_vars should not modify any files when vars are already valid"
)
assert not check_vars(machine2.name, machine2.flake), (
"machine2 has not generated vars yet, so check_vars should return False"
)
# Verify no files were modified
after_check_mtimes_2 = get_file_mtimes(str(flake.path))
assert initial_mtimes == after_check_mtimes_2, (
"check_vars should not modify any files when vars are not valid"
)
assert check_vars(machine1.name, machine1.flake)
cli.run(["vars", "generate", "--flake", str(flake.path), "machine2"])
assert check_vars(machine2.name, machine2.flake)
assert check_vars(machine2.name, machine2.flake)
m1_sops_store = sops.SecretStore(machine1.flake)
m2_sops_store = sops.SecretStore(machine2.flake)
# Create generators with machine context for testing
@@ -547,28 +513,28 @@ def test_generate_secret_var_password_store(
"my_generator",
share=False,
files=[],
machines=["my_machine"],
machine="my_machine",
_flake=flake_obj,
)
my_generator_shared = Generator(
"my_generator",
share=True,
files=[],
machines=["my_machine"],
machine="my_machine",
_flake=flake_obj,
)
my_shared_generator = Generator(
"my_shared_generator",
share=True,
files=[],
machines=["my_machine"],
machine="my_machine",
_flake=flake_obj,
)
my_shared_generator_not_shared = Generator(
"my_shared_generator",
share=False,
files=[],
machines=["my_machine"],
machine="my_machine",
_flake=flake_obj,
)
assert store.exists(my_generator, "my_secret")
@@ -580,7 +546,7 @@ def test_generate_secret_var_password_store(
name="my_generator",
share=False,
files=[],
machines=["my_machine"],
machine="my_machine",
_flake=flake_obj,
)
assert store.get(generator, "my_secret").decode() == "hello\n"
@@ -591,7 +557,7 @@ def test_generate_secret_var_password_store(
"my_generator",
share=False,
files=[],
machines=["my_machine"],
machine="my_machine",
_flake=flake_obj,
)
var_name = "my_secret"
@@ -604,7 +570,7 @@ def test_generate_secret_var_password_store(
"my_generator2",
share=False,
files=[],
machines=["my_machine"],
machine="my_machine",
_flake=flake_obj,
)
var_name = "my_secret2"
@@ -616,7 +582,7 @@ def test_generate_secret_var_password_store(
"my_shared_generator",
share=True,
files=[],
machines=["my_machine"],
machine="my_machine",
_flake=flake_obj,
)
var_name = "my_shared_secret"
@@ -663,8 +629,8 @@ def test_generate_secret_for_multiple_machines(
in_repo_store2 = in_repo.FactStore(flake=flake_obj)
# Create generators for each machine
gen1 = Generator("my_generator", machines=["machine1"], _flake=flake_obj)
gen2 = Generator("my_generator", machines=["machine2"], _flake=flake_obj)
gen1 = Generator("my_generator", machine="machine1", _flake=flake_obj)
gen2 = Generator("my_generator", machine="machine2", _flake=flake_obj)
assert in_repo_store1.exists(gen1, "my_value")
assert in_repo_store2.exists(gen2, "my_value")
@@ -728,12 +694,12 @@ def test_prompt(
# Set up objects for testing the results
flake_obj = Flake(str(flake.path))
my_generator = Generator("my_generator", machines=["my_machine"], _flake=flake_obj)
my_generator = Generator("my_generator", machine="my_machine", _flake=flake_obj)
my_generator_with_details = Generator(
name="my_generator",
share=False,
files=[],
machines=["my_machine"],
machine="my_machine",
_flake=flake_obj,
)
@@ -818,10 +784,10 @@ def test_shared_vars_regeneration(
in_repo_store_2 = in_repo.FactStore(machine2.flake)
# Create generators with machine context for testing
child_gen_m1 = Generator(
"child_generator", share=False, machines=["machine1"], _flake=machine1.flake
"child_generator", share=False, machine="machine1", _flake=machine1.flake
)
child_gen_m2 = Generator(
"child_generator", share=False, machines=["machine2"], _flake=machine2.flake
"child_generator", share=False, machine="machine2", _flake=machine2.flake
)
# generate for machine 1
cli.run(["vars", "generate", "--flake", str(flake.path), "machine1"])
@@ -889,13 +855,13 @@ def test_multi_machine_shared_vars(
generator_m1 = Generator(
"shared_generator",
share=True,
machines=["machine1"],
machine="machine1",
_flake=machine1.flake,
)
generator_m2 = Generator(
"shared_generator",
share=True,
machines=["machine2"],
machine="machine2",
_flake=machine2.flake,
)
# generate for machine 1
@@ -951,9 +917,7 @@ def test_api_set_prompts(
)
machine = Machine(name="my_machine", flake=Flake(str(flake.path)))
store = in_repo.FactStore(machine.flake)
my_generator = Generator(
"my_generator", machines=["my_machine"], _flake=machine.flake
)
my_generator = Generator("my_generator", machine="my_machine", _flake=machine.flake)
assert store.exists(my_generator, "prompt1")
assert store.get(my_generator, "prompt1").decode() == "input1"
run_generators(
@@ -1097,10 +1061,10 @@ def test_migration(
assert "Migrated var my_generator/my_value" in caplog.text
assert "Migrated secret var my_generator/my_secret" in caplog.text
flake_obj = Flake(str(flake.path))
my_generator = Generator("my_generator", machines=["my_machine"], _flake=flake_obj)
my_generator = Generator("my_generator", machine="my_machine", _flake=flake_obj)
other_generator = Generator(
"other_generator",
machines=["my_machine"],
machine="my_machine",
_flake=flake_obj,
)
in_repo_store = in_repo.FactStore(flake=flake_obj)
@@ -1246,7 +1210,7 @@ def test_share_mode_switch_regenerates_secret(
sops_store = sops.SecretStore(flake=flake_obj)
generator_not_shared = Generator(
"my_generator", share=False, machines=["my_machine"], _flake=flake_obj
"my_generator", share=False, machine="my_machine", _flake=flake_obj
)
initial_public = in_repo_store.get(generator_not_shared, "my_value").decode()
@@ -1265,7 +1229,7 @@ def test_share_mode_switch_regenerates_secret(
# Read the new values with shared generator
generator_shared = Generator(
"my_generator", share=True, machines=["my_machine"], _flake=flake_obj
"my_generator", share=True, machine="my_machine", _flake=flake_obj
)
new_public = in_repo_store.get(generator_shared, "my_value").decode()
@@ -1300,117 +1264,68 @@ def test_cache_misses_for_vars_operations(
flake: ClanFlake,
) -> None:
"""Test that vars operations result in minimal cache misses."""
# Set up first machine with two generators
config = flake.machines["my_machine"] = create_test_machine_config()
# Set up two generators with public values
gen1 = config["clan"]["core"]["vars"]["generators"]["gen1"]
gen1["files"]["value1"]["secret"] = False
gen1["script"] = 'echo -n "test_value1" > "$out"/value1'
gen2 = config["clan"]["core"]["vars"]["generators"]["gen2"]
gen2["files"]["value2"]["secret"] = False
gen2["script"] = 'echo -n "test_value2" > "$out"/value2'
# Add a second machine with the same generator configuration
flake.machines["other_machine"] = config.copy()
# Set up a simple generator with a public value
my_generator = config["clan"]["core"]["vars"]["generators"]["my_generator"]
my_generator["files"]["my_value"]["secret"] = False
my_generator["script"] = 'echo -n "test_value" > "$out"/my_value'
flake.refresh()
monkeypatch.chdir(flake.path)
# Create fresh machine objects to ensure clean cache state
flake_obj = Flake(str(flake.path))
machine1 = Machine(name="my_machine", flake=flake_obj)
machine2 = Machine(name="other_machine", flake=flake_obj)
# Create a fresh machine object to ensure clean cache state
machine = Machine(name="my_machine", flake=Flake(str(flake.path)))
# Test 1: Running vars generate for BOTH machines simultaneously should still result in exactly 2 cache misses
# Even though we have:
# - 2 machines (my_machine and other_machine)
# - 2 generators per machine (gen1 and gen2)
# We still only get 2 cache misses when generating for both machines:
# 1. One for getting the list of generators for both machines
# 2. One batched evaluation for getting all generator scripts for both machines
# The key insight: the system should batch ALL evaluations across ALL machines into a single nix eval
# Test 1: Running vars generate with a fresh cache should result in exactly 3 cache misses
# Expected cache misses:
# 1. One for getting the list of generators
# 2. One for getting the final script of our test generator (my_generator)
# 3. One for getting the final script of the state version generator (added by default)
# TODO: The third cache miss is undesired in tests. disable state version module for tests
run_generators(
machines=[machine1, machine2],
machines=[machine],
generators=None, # Generate all
)
# Print stack traces if we have more than 2 cache misses
if flake_obj._cache_misses != 2:
flake_obj.print_cache_miss_analysis(
# Print stack traces if we have more than 3 cache misses
if machine.flake._cache_misses != 3:
machine.flake.print_cache_miss_analysis(
title="Cache miss analysis for vars generate"
)
assert flake_obj._cache_misses == 2, (
f"Expected exactly 2 cache misses for vars generate, got {flake_obj._cache_misses}"
assert machine.flake._cache_misses == 2, (
f"Expected exactly 2 cache misses for vars generate, got {machine.flake._cache_misses}"
)
# Verify the value was generated correctly
var_value = get_machine_var(machine, "my_generator/my_value")
assert var_value.printable_value == "test_value"
# Test 2: List all vars should result in exactly 1 cache miss
# Force cache invalidation (this also resets cache miss tracking)
invalidate_flake_cache(flake.path)
flake_obj.invalidate_cache()
machine.flake.invalidate_cache()
stringify_all_vars(machine1)
assert flake_obj._cache_misses == 1, (
f"Expected exactly 1 cache miss for vars list, got {flake_obj._cache_misses}"
stringify_all_vars(machine)
assert machine.flake._cache_misses == 1, (
f"Expected exactly 1 cache miss for vars list, got {machine.flake._cache_misses}"
)
# Test 3: Getting a specific var with a fresh cache should result in exactly 1 cache miss
# Force cache invalidation (this also resets cache miss tracking)
invalidate_flake_cache(flake.path)
flake_obj.invalidate_cache()
machine.flake.invalidate_cache()
# Only test gen1 for the get operation
var_value = get_machine_var(machine1, "gen1/value1")
assert var_value.printable_value == "test_value1"
var_value = get_machine_var(machine, "my_generator/my_value")
assert var_value.printable_value == "test_value"
assert flake_obj._cache_misses == 1, (
f"Expected exactly 1 cache miss for vars get with fresh cache, got {flake_obj._cache_misses}"
assert machine.flake._cache_misses == 1, (
f"Expected exactly 1 cache miss for vars get with fresh cache, got {machine.flake._cache_misses}"
)
@pytest.mark.with_core
def test_shared_generator_conflicting_definition_raises_error(
monkeypatch: pytest.MonkeyPatch,
flake_with_sops: ClanFlake,
) -> None:
"""Test that vars generation raises an error when two machines have different
definitions for the same shared generator.
"""
flake = flake_with_sops
# Create machine1 with a shared generator
machine1_config = flake.machines["machine1"] = create_test_machine_config()
shared_gen1 = machine1_config["clan"]["core"]["vars"]["generators"][
"shared_generator"
]
shared_gen1["share"] = True
shared_gen1["files"]["file1"]["secret"] = False
shared_gen1["script"] = 'echo "test" > "$out"/file1'
# Create machine2 with the same shared generator but different files
machine2_config = flake.machines["machine2"] = create_test_machine_config()
shared_gen2 = machine2_config["clan"]["core"]["vars"]["generators"][
"shared_generator"
]
shared_gen2["share"] = True
shared_gen2["files"]["file2"]["secret"] = False # Different file name
shared_gen2["script"] = 'echo "test" > "$out"/file2'
flake.refresh()
monkeypatch.chdir(flake.path)
# Attempting to generate vars for both machines should raise an error
# because they have conflicting definitions for the same shared generator
with pytest.raises(
ClanError,
match=".*differ.*",
):
cli.run(["vars", "generate", "--flake", str(flake.path)])
@pytest.mark.with_core
def test_dynamic_invalidation(
monkeypatch: pytest.MonkeyPatch,

View File

@@ -40,15 +40,12 @@ class StoreBase(ABC):
def get_machine(self, generator: "Generator") -> str:
"""Get machine name from generator, asserting it's not None for now."""
if generator.share:
return "__shared"
if not generator.machines:
if generator.machine is None:
if generator.share:
return "__shared"
msg = f"Generator '{generator.name}' has no machine associated"
raise ClanError(msg)
if len(generator.machines) != 1:
msg = f"Generator '{generator.name}' has {len(generator.machines)} machines, expected exactly 1"
raise ClanError(msg)
return generator.machines[0]
return generator.machine
# get a single fact
@abstractmethod
@@ -150,7 +147,7 @@ class StoreBase(ABC):
prev_generator = dataclasses.replace(
generator,
share=not generator.share,
machines=[] if not generator.share else [machine],
machine=machine if generator.share else None,
)
if self.exists(prev_generator, var.name):
changed_files += self.delete(prev_generator, var.name)
@@ -168,12 +165,12 @@ class StoreBase(ABC):
new_file = self._set(generator, var, value, machine)
action_str = "Migrated" if is_migration else "Updated"
log_info: Callable
if generator.share:
if generator.machine is None:
log_info = log.info
else:
from clan_lib.machines.machines import Machine
from clan_lib.machines.machines import Machine # noqa: PLC0415
machine_obj = Machine(name=generator.machines[0], flake=self.flake)
machine_obj = Machine(name=generator.machine, flake=self.flake)
log_info = machine_obj.info
if self.is_secret_store:
log.info(f"{action_str} secret var {generator.name}/{var.name}\n")

View File

@@ -3,7 +3,6 @@ import logging
from typing import TYPE_CHECKING
from clan_cli.completions import add_dynamic_completer, complete_machines
from clan_cli.vars.secret_modules import sops
from clan_lib.errors import ClanError
from clan_lib.flake import Flake, require_flake
from clan_lib.machines.machines import Machine
@@ -27,33 +26,13 @@ class VarStatus:
self.unfixed_secret_vars = unfixed_secret_vars
self.invalid_generators = invalid_generators
def text(self) -> str:
log = ""
if self.missing_secret_vars:
log += "Missing secret vars:\n"
for var in self.missing_secret_vars:
log += f" - {var.id}\n"
if self.missing_public_vars:
log += "Missing public vars:\n"
for var in self.missing_public_vars:
log += f" - {var.id}\n"
if self.unfixed_secret_vars:
log += "Unfixed secret vars:\n"
for var in self.unfixed_secret_vars:
log += f" - {var.id}\n"
if self.invalid_generators:
log += "Invalid generators (outdated invalidation hash):\n"
for gen in self.invalid_generators:
log += f" - {gen}\n"
return log if log else "All vars are present and valid."
def vars_status(
machine_name: str,
flake: Flake,
generator_name: None | str = None,
) -> VarStatus:
from clan_cli.vars.generator import Generator
from clan_cli.vars.generator import Generator # noqa: PLC0415
machine = Machine(name=machine_name, flake=flake)
missing_secret_vars = []
@@ -87,32 +66,15 @@ def vars_status(
f"Secret var '{file.name}' for service '{generator.name}' in machine {machine.name} is missing.",
)
missing_secret_vars.append(file)
if (
isinstance(machine.secret_vars_store, sops.SecretStore)
and generator.share
and file.exists
and not machine.secret_vars_store.machine_has_access(
generator=generator,
secret_name=file.name,
machine=machine.name,
)
):
msg = (
f"Secret var '{generator.name}/{file.name}' is marked for deployment to machine '{machine.name}', but the machine does not have access to it.\n"
f"Run 'clan vars generate {machine.name}' to fix this.\n"
)
machine.info(msg)
missing_secret_vars.append(file)
else:
health_msg = machine.secret_vars_store.health_check(
msg = machine.secret_vars_store.health_check(
machine=machine.name,
generators=[generator],
file_name=file.name,
)
if health_msg is not None:
if msg:
machine.info(
f"Secret var '{file.name}' for service '{generator.name}' in machine {machine.name} needs update: {health_msg}",
f"Secret var '{file.name}' for service '{generator.name}' in machine {machine.name} needs update: {msg}",
)
unfixed_secret_vars.append(file)
@@ -144,7 +106,6 @@ def check_vars(
generator_name: None | str = None,
) -> bool:
status = vars_status(machine_name, flake, generator_name=generator_name)
log.info(f"Check results for machine '{machine_name}': \n{status.text()}")
return not (
status.missing_secret_vars
or status.missing_public_vars

View File

@@ -61,22 +61,14 @@ class Generator:
migrate_fact: str | None = None
validation_hash: str | None = None
machines: list[str] = field(default_factory=list)
machine: str | None = None
_flake: "Flake | None" = None
_public_store: "StoreBase | None" = None
_secret_store: "StoreBase | None" = None
@property
def key(self) -> GeneratorKey:
if self.share:
# must be a shared generator
machine = None
elif len(self.machines) != 1:
msg = f"Shared generator {self.name} must have exactly one machine, but has {len(self.machines)}: {', '.join(self.machines)}"
raise ClanError(msg)
else:
machine = self.machines[0]
return GeneratorKey(machine=machine, name=self.name)
return GeneratorKey(machine=self.machine, name=self.name)
def __hash__(self) -> int:
return hash(self.key)
@@ -151,10 +143,7 @@ class Generator:
files_selector = "config.clan.core.vars.generators.*.files.*.{secret,deploy,owner,group,mode,neededFor}"
flake.precache(cls.get_machine_selectors(machine_names))
generators: list[Generator] = []
shared_generators_raw: dict[
str, tuple[str, dict, dict]
] = {} # name -> (machine_name, gen_data, files_data)
generators = []
for machine_name in machine_names:
# Get all generator metadata in one select (safe fields only)
@@ -176,38 +165,6 @@ class Generator:
sec_store = machine.secret_vars_store
for gen_name, gen_data in generators_data.items():
# Check for conflicts in shared generator definitions using raw data
if gen_data["share"]:
if gen_name in shared_generators_raw:
prev_machine, prev_gen_data, prev_files_data = (
shared_generators_raw[gen_name]
)
# Compare raw data
prev_gen_files = prev_files_data.get(gen_name, {})
curr_gen_files = files_data.get(gen_name, {})
# Build list of differences with details
differences = []
if prev_gen_files != curr_gen_files:
differences.append("files")
if prev_gen_data.get("prompts") != gen_data.get("prompts"):
differences.append("prompts")
if prev_gen_data.get("dependencies") != gen_data.get(
"dependencies"
):
differences.append("dependencies")
if prev_gen_data.get("validationHash") != gen_data.get(
"validationHash"
):
differences.append("validation_hash")
if differences:
msg = f"Machines {prev_machine} and {machine_name} have different definitions for shared generator '{gen_name}' (differ in: {', '.join(differences)})"
raise ClanError(msg)
else:
shared_generators_raw[gen_name] = (
machine_name,
gen_data,
files_data,
)
# Build files from the files_data
files = []
gen_files = files_data.get(gen_name, {})
@@ -252,31 +209,14 @@ class Generator:
migrate_fact=gen_data.get("migrateFact"),
validation_hash=gen_data.get("validationHash"),
prompts=prompts,
# shared generators can have multiple machines, machine-specific have one
machines=[machine_name],
# only set machine for machine-specific generators
# this is essential for the graph algorithms to work correctly
machine=None if share else machine_name,
_flake=flake,
_public_store=pub_store,
_secret_store=sec_store,
)
# link generator to its files
for file in files:
file.generator(generator)
if share:
# For shared generators, check if we already created it
existing = next(
(g for g in generators if g.name == gen_name and g.share), None
)
if existing:
# Just append the machine to the existing generator
existing.machines.append(machine_name)
else:
# Add the new shared generator
generators.append(generator)
else:
# Always add per-machine generators
generators.append(generator)
generators.append(generator)
# TODO: This should be done in a non-mutable way.
if include_previous_values:
@@ -305,19 +245,15 @@ class Generator:
return sec_store.get(self, prompt.name).decode()
return None
def final_script_selector(self, machine_name: str) -> str:
if self._flake is None:
msg = "Flake cannot be None"
raise ClanError(msg)
return self._flake.machine_selector(
machine_name, f'config.clan.core.vars.generators."{self.name}".finalScript'
)
def final_script(self, machine: "Machine") -> Path:
if self._flake is None:
msg = "Flake cannot be None"
raise ClanError(msg)
output = Path(self._flake.select(self.final_script_selector(machine.name)))
output = Path(
machine.select(
f'config.clan.core.vars.generators."{self.name}".finalScript',
),
)
if tmp_store := nix_test_store():
output = tmp_store.joinpath(*output.parts[1:])
return output
@@ -482,7 +418,7 @@ class Generator:
if sys.platform == "linux" and bwrap.bubblewrap_works():
cmd = bubblewrap_cmd(str(final_script), tmpdir)
elif sys.platform == "darwin":
from clan_lib.sandbox_exec import sandbox_exec_cmd
from clan_lib.sandbox_exec import sandbox_exec_cmd # noqa: PLC0415
cmd = stack.enter_context(sandbox_exec_cmd(str(final_script), tmpdir))
else:

View File

@@ -49,28 +49,28 @@ def test_required_generators() -> None:
gen_1 = Generator(
name="gen_1",
dependencies=[],
machines=[machine_name],
machine=machine_name,
_public_store=public_store,
_secret_store=secret_store,
)
gen_2 = Generator(
name="gen_2",
dependencies=[gen_1.key],
machines=[machine_name],
machine=machine_name,
_public_store=public_store,
_secret_store=secret_store,
)
gen_2a = Generator(
name="gen_2a",
dependencies=[gen_2.key],
machines=[machine_name],
machine=machine_name,
_public_store=public_store,
_secret_store=secret_store,
)
gen_2b = Generator(
name="gen_2b",
dependencies=[gen_2.key],
machines=[machine_name],
machine=machine_name,
_public_store=public_store,
_secret_store=secret_store,
)
@@ -118,22 +118,21 @@ def test_shared_generator_invalidates_multiple_machines_dependents() -> None:
shared_gen = Generator(
name="shared_gen",
dependencies=[],
share=True, # Mark as shared generator
machines=[machine_1, machine_2], # Shared across both machines
machine=None, # Shared generator
_public_store=public_store,
_secret_store=secret_store,
)
gen_1 = Generator(
name="gen_1",
dependencies=[shared_gen.key],
machines=[machine_1],
machine=machine_1,
_public_store=public_store,
_secret_store=secret_store,
)
gen_2 = Generator(
name="gen_2",
dependencies=[shared_gen.key],
machines=[machine_2],
machine=machine_2,
_public_store=public_store,
_secret_store=secret_store,
)

View File

@@ -54,7 +54,7 @@ class SecretStore(StoreBase):
def ensure_machine_key(self, machine: str) -> None:
"""Ensure machine has sops keys initialized."""
# no need to generate keys if we don't manage secrets
from clan_cli.vars.generator import Generator
from clan_cli.vars.generator import Generator # noqa: PLC0415
vars_generators = Generator.get_machine_generators([machine], self.flake)
if not vars_generators:
@@ -98,8 +98,7 @@ class SecretStore(StoreBase):
def machine_has_access(
self, generator: Generator, secret_name: str, machine: str
) -> bool:
if not has_machine(self.flake.path, machine):
return False
self.ensure_machine_key(machine)
key_dir = sops_machines_folder(self.flake.path) / machine
return self.key_has_access(key_dir, generator, secret_name)
@@ -143,7 +142,7 @@ class SecretStore(StoreBase):
"""
if generators is None:
from clan_cli.vars.generator import Generator
from clan_cli.vars.generator import Generator # noqa: PLC0415
generators = Generator.get_machine_generators([machine], self.flake)
file_found = False
@@ -157,6 +156,8 @@ class SecretStore(StoreBase):
else:
continue
if file.secret and self.exists(generator, file.name):
if file.deploy:
self.ensure_machine_has_access(generator, file.name, machine)
needs_update, msg = self.needs_fix(generator, file.name, machine)
if needs_update:
outdated.append((generator.name, file.name, msg))
@@ -218,7 +219,7 @@ class SecretStore(StoreBase):
return [store_folder]
def populate_dir(self, machine: str, output_dir: Path, phases: list[str]) -> None:
from clan_cli.vars.generator import Generator
from clan_cli.vars.generator import Generator # noqa: PLC0415
vars_generators = Generator.get_machine_generators([machine], self.flake)
if "users" in phases or "services" in phases:
@@ -282,7 +283,6 @@ class SecretStore(StoreBase):
) -> None:
if self.machine_has_access(generator, name, machine):
return
self.ensure_machine_key(machine)
secret_folder = self.secret_path(generator, name)
add_secret(
self.flake.path,
@@ -292,7 +292,7 @@ class SecretStore(StoreBase):
)
def collect_keys_for_secret(self, machine: str, path: Path) -> set[sops.SopsKey]:
from clan_cli.secrets.secrets import (
from clan_cli.secrets.secrets import ( # noqa: PLC0415
collect_keys_for_path,
collect_keys_for_type,
)
@@ -354,10 +354,10 @@ class SecretStore(StoreBase):
ClanError: If the specified file_name is not found
"""
from clan_cli.secrets.secrets import update_keys
from clan_cli.secrets.secrets import update_keys # noqa: PLC0415
if generators is None:
from clan_cli.vars.generator import Generator
from clan_cli.vars.generator import Generator # noqa: PLC0415
generators = Generator.get_machine_generators([machine], self.flake)
file_found = False

View File

@@ -319,9 +319,9 @@ def load_in_all_api_functions() -> None:
We have to make sure python loads every wrapped function at least once.
This is done by importing all modules from the clan_lib and clan_cli packages.
"""
import clan_cli # Avoid circular imports - many modules import from clan_lib.api
import clan_cli # noqa: PLC0415 # Avoid circular imports - many modules import from clan_lib.api
import clan_lib # Avoid circular imports - many modules import from clan_lib.api
import clan_lib # noqa: PLC0415 # Avoid circular imports - many modules import from clan_lib.api
import_all_modules_from_package(clan_lib)
import_all_modules_from_package(clan_cli)

View File

@@ -88,7 +88,7 @@ def list_system_storage_devices() -> Blockdevices:
A list of detected block devices with metadata like size, path, type, etc.
"""
from clan_lib.nix import nix_shell
from clan_lib.nix import nix_shell # noqa: PLC0415
cmd = nix_shell(
["util-linux"],
@@ -124,7 +124,7 @@ def get_clan_directory_relative(flake: Flake) -> str:
ClanError: If the flake evaluation fails or directories cannot be found
"""
from clan_lib.dirs import get_clan_directories
from clan_lib.dirs import get_clan_directories # noqa: PLC0415
_, relative_dir = get_clan_directories(flake)
return relative_dir

View File

@@ -294,8 +294,6 @@ class RunOpts:
# This is needed for GUI applications
graphical_perm: bool = False
trace: bool = True
# Mark input as sensitive to prevent it from being logged (e.g., private keys, passwords)
sensitive_input: bool = False
def cmd_with_root(cmd: list[str], graphical: bool = False) -> list[str]:
@@ -351,10 +349,7 @@ def run(
if cmdlog.isEnabledFor(logging.DEBUG) and options.trace:
if options.input and isinstance(options.input, bytes):
# Always redact sensitive input (e.g., private keys, passwords)
if options.sensitive_input:
filtered_input = "<<REDACTED>>"
elif any(
if any(
not ch.isprintable() for ch in options.input.decode("ascii", "replace")
):
filtered_input = "<<binary_blob>>"

View File

@@ -1132,20 +1132,6 @@ class Flake:
return self._cache.select(selector)
def machine_selector(self, machine_name: str, selector: str) -> str:
"""Create a selector for a specific machine.
Args:
machine_name: The name of the machine
selector: The attribute selector string relative to the machine config
Returns:
The full selector string for the machine
"""
config = nix_config()
system = config["system"]
return f'clanInternals.machines."{system}"."{machine_name}".{selector}'
def select_machine(self, machine_name: str, selector: str) -> Any:
"""Select a nix attribute for a specific machine.
@@ -1155,14 +1141,18 @@ class Flake:
apply: Optional function to apply to the result
"""
return self.select(self.machine_selector(machine_name, selector))
config = nix_config()
system = config["system"]
full_selector = f'clanInternals.machines."{system}"."{machine_name}".{selector}'
return self.select(full_selector)
def list_machines(
self,
opts: "ListOptions | None" = None,
) -> "dict[str, MachineResponse]":
"""List machines of a clan"""
from clan_lib.machines.actions import list_machines
from clan_lib.machines.actions import list_machines # noqa: PLC0415
return list_machines(self, opts)

View File

@@ -18,14 +18,14 @@ def locked_open(filename: Path, mode: str = "r") -> Generator:
def write_history_file(data: Any) -> None:
from clan_lib.dirs import user_history_file
from clan_lib.dirs import user_history_file # noqa: PLC0415
with locked_open(user_history_file(), "w+") as f:
f.write(json.dumps(data, cls=ClanJSONEncoder, indent=4))
def read_history_file() -> list[dict]:
from clan_lib.dirs import user_history_file
from clan_lib.dirs import user_history_file # noqa: PLC0415
with locked_open(user_history_file(), "r") as f:
content: str = f.read()

View File

@@ -119,9 +119,6 @@ def run_machine_hardware_info_init(
if opts.debug:
cmd += ["--debug"]
# Add nix options to nixos-anywhere
cmd.extend(opts.machine.flake.nix_options or [])
cmd += [target_host.target]
cmd = nix_shell(
["nixos-anywhere"],

View File

@@ -33,7 +33,7 @@ class Machine:
def get_inv_machine(self) -> "InventoryMachine":
# Import on demand to avoid circular imports
from clan_lib.machines.actions import get_machine
from clan_lib.machines.actions import get_machine # noqa: PLC0415
return get_machine(self.flake, self.name)
@@ -95,7 +95,7 @@ class Machine:
@cached_property
def secret_vars_store(self) -> StoreBase:
from clan_cli.vars.secret_modules import password_store
from clan_cli.vars.secret_modules import password_store # noqa: PLC0415
secret_module = self.select("config.clan.core.vars.settings.secretModule")
module = importlib.import_module(secret_module)
@@ -126,7 +126,7 @@ class Machine:
return self.flake.path
def target_host(self) -> Remote:
from clan_lib.network.network import get_best_remote
from clan_lib.network.network import get_best_remote # noqa: PLC0415
with get_best_remote(self) as remote:
return remote

View File

@@ -42,7 +42,7 @@ def _suggest_similar_names(
def get_available_machines(flake: Flake) -> list[str]:
from clan_lib.machines.list import list_machines
from clan_lib.machines.list import list_machines # noqa: PLC0415
machines = list_machines(flake)
return list(machines.keys())

View File

@@ -34,7 +34,7 @@ class Peer:
_var: dict[str, str] = self._host["var"]
machine_name = _var["machine"]
generator = _var["generator"]
from clan_lib.machines.machines import Machine
from clan_lib.machines.machines import Machine # noqa: PLC0415
machine = Machine(name=machine_name, flake=self.flake)
var = get_machine_var(
@@ -136,123 +136,92 @@ def networks_from_flake(flake: Flake) -> dict[str, Network]:
return networks
class BestRemoteContext:
"""Class-based context manager for establishing and maintaining network connections."""
@contextmanager
def get_best_remote(machine: "Machine") -> Iterator["Remote"]:
"""Context manager that yields the best remote connection for a machine following this priority:
1. If machine has targetHost in inventory, return a direct connection
2. Return the highest priority network where machine is reachable
3. If no network works, try to get targetHost from machine nixos config
def __init__(self, machine: "Machine") -> None:
self.machine = machine
self._network_ctx: Any = None
self._remote: Remote | None = None
Args:
machine: Machine instance to connect to
def __enter__(self) -> "Remote":
"""Establish the best remote connection for a machine following this priority:
1. If machine has targetHost in inventory, return a direct connection
2. Return the highest priority network where machine is reachable
3. If no network works, try to get targetHost from machine nixos config
Yields:
Remote object for connecting to the machine
Returns:
Remote object for connecting to the machine
Raises:
ClanError: If no connection method works
Raises:
ClanError: If no connection method works
"""
# Step 1: Check if targetHost is set in inventory
inv_machine = machine.get_inv_machine()
target_host = inv_machine.get("deploy", {}).get("targetHost")
"""
# Step 1: Check if targetHost is set in inventory
inv_machine = self.machine.get_inv_machine()
target_host = inv_machine.get("deploy", {}).get("targetHost")
if target_host:
log.debug(f"Using targetHost from inventory for {machine.name}: {target_host}")
# Create a direct network with just this machine
remote = Remote.from_ssh_uri(machine_name=machine.name, address=target_host)
yield remote
return
if target_host:
log.debug(
f"Using targetHost from inventory for {self.machine.name}: {target_host}"
)
self._remote = Remote.from_ssh_uri(
machine_name=self.machine.name, address=target_host
)
return self._remote
# Step 2: Try existing networks by priority
try:
networks = networks_from_flake(machine.flake)
# Step 2: Try existing networks by priority
try:
networks = networks_from_flake(self.machine.flake)
sorted_networks = sorted(networks.items(), key=lambda x: -x[1].priority)
sorted_networks = sorted(networks.items(), key=lambda x: -x[1].priority)
for network_name, network in sorted_networks:
if self.machine.name not in network.peers:
continue
for network_name, network in sorted_networks:
if machine.name not in network.peers:
continue
log.debug(f"trying to connect via {network_name}")
if network.is_running():
try:
ping_time = network.ping(self.machine.name)
# Check if network is running and machine is reachable
log.debug(f"trying to connect via {network_name}")
if network.is_running():
try:
ping_time = network.ping(machine.name)
if ping_time is not None:
log.info(
f"Machine {machine.name} reachable via {network_name} network",
)
yield network.remote(machine.name)
return
except ClanError as e:
log.debug(f"Failed to reach {machine.name} via {network_name}: {e}")
else:
try:
log.debug(f"Establishing connection for network {network_name}")
with network.module.connection(network) as connected_network:
ping_time = connected_network.ping(machine.name)
if ping_time is not None:
log.info(
f"Machine {self.machine.name} reachable via {network_name} network",
f"Machine {machine.name} reachable via {network_name} network after connection",
)
self._remote = remote = network.remote(self.machine.name)
return remote
except ClanError as e:
log.debug(
f"Failed to reach {self.machine.name} via {network_name}: {e}"
)
else:
try:
log.debug(f"Establishing connection for network {network_name}")
# Enter the network context and keep it alive
self._network_ctx = network.module.connection(network)
connected_network = self._network_ctx.__enter__()
ping_time = connected_network.ping(self.machine.name)
if ping_time is not None:
log.info(
f"Machine {self.machine.name} reachable via {network_name} network after connection",
)
self._remote = remote = connected_network.remote(
self.machine.name
)
return remote
# Ping failed, clean up this connection attempt
self._network_ctx.__exit__(None, None, None)
self._network_ctx = None
except ClanError as e:
# Clean up failed connection attempt
if self._network_ctx is not None:
self._network_ctx.__exit__(None, None, None)
self._network_ctx = None
log.debug(
f"Failed to establish connection to {self.machine.name} via {network_name}: {e}",
)
except (ImportError, AttributeError, KeyError) as e:
log.debug(
f"Failed to use networking modules to determine machines remote: {e}"
)
yield connected_network.remote(machine.name)
return
except ClanError as e:
log.debug(
f"Failed to establish connection to {machine.name} via {network_name}: {e}",
)
except (ImportError, AttributeError, KeyError) as e:
log.debug(f"Failed to use networking modules to determine machines remote: {e}")
# Step 3: Try targetHost from machine nixos config
target_host = self.machine.select('config.clan.core.networking."targetHost"')
if target_host:
log.debug(
f"Using targetHost from machine config for {self.machine.name}: {target_host}",
)
self._remote = Remote.from_ssh_uri(
machine_name=self.machine.name,
address=target_host,
)
return self._remote
# Step 3: Try targetHost from machine nixos config
target_host = machine.select('config.clan.core.networking."targetHost"')
if target_host:
log.debug(
f"Using targetHost from machine config for {machine.name}: {target_host}",
)
# Check if reachable
remote = Remote.from_ssh_uri(
machine_name=machine.name,
address=target_host,
)
yield remote
return
# No connection method found
msg = f"Could not find any way to connect to machine '{self.machine.name}'. No targetHost configured and machine not reachable via any network."
raise ClanError(msg)
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: object,
) -> None:
"""Clean up network connection if one was established."""
if self._network_ctx is not None:
self._network_ctx.__exit__(exc_type, exc_val, exc_tb)
def get_best_remote(machine: "Machine") -> BestRemoteContext:
return BestRemoteContext(machine)
# No connection method found
msg = f"Could not find any way to connect to machine '{machine.name}'. No targetHost configured and machine not reachable via any network."
raise ClanError(msg)
def get_network_overview(networks: dict[str, Network]) -> dict:

View File

@@ -88,7 +88,7 @@ def nix_eval(flags: list[str]) -> list[str]:
],
)
if os.environ.get("IN_NIX_SANDBOX"):
from clan_lib.dirs import nixpkgs_source
from clan_lib.dirs import nixpkgs_source # noqa: PLC0415
return [
*default_flags,
@@ -169,7 +169,7 @@ def nix_shell(packages: list[str], cmd: list[str]) -> list[str]:
if not missing_packages:
return cmd
from clan_lib.dirs import nixpkgs_flake
from clan_lib.dirs import nixpkgs_flake # noqa: PLC0415
return [
*nix_command(["shell", "--inputs-from", f"{nixpkgs_flake()!s}"]),

View File

@@ -464,12 +464,12 @@ class Remote:
self,
opts: "ConnectionOptions | None" = None,
) -> None:
from clan_lib.network.check import check_machine_ssh_reachable
from clan_lib.network.check import check_machine_ssh_reachable # noqa: PLC0415
return check_machine_ssh_reachable(self, opts)
def check_machine_ssh_login(self) -> None:
from clan_lib.network.check import check_machine_ssh_login
from clan_lib.network.check import check_machine_ssh_login # noqa: PLC0415
return check_machine_ssh_login(self)

View File

@@ -5,7 +5,6 @@ from clan_cli.vars import graph
from clan_cli.vars.generator import Generator
from clan_cli.vars.graph import requested_closure
from clan_cli.vars.migration import check_can_migrate, migrate_files
from clan_cli.vars.secret_modules import sops
from clan_lib.api import API
from clan_lib.errors import ClanError
@@ -94,21 +93,21 @@ def _ensure_healthy(
if generators is None:
generators = Generator.get_machine_generators([machine.name], machine.flake)
public_health_check_msg = machine.public_vars_store.health_check(
pub_healtcheck_msg = machine.public_vars_store.health_check(
machine.name,
generators,
)
secret_health_check_msg = machine.secret_vars_store.health_check(
sec_healtcheck_msg = machine.secret_vars_store.health_check(
machine.name,
generators,
)
if public_health_check_msg or secret_health_check_msg:
if pub_healtcheck_msg or sec_healtcheck_msg:
msg = f"Health check failed for machine {machine.name}:\n"
if public_health_check_msg:
msg += f"Public vars store: {public_health_check_msg}\n"
if secret_health_check_msg:
msg += f"Secret vars store: {secret_health_check_msg}"
if pub_healtcheck_msg:
msg += f"Public vars store: {pub_healtcheck_msg}\n"
if sec_healtcheck_msg:
msg += f"Secret vars store: {sec_healtcheck_msg}"
raise ClanError(msg)
@@ -153,15 +152,15 @@ def run_generators(
if not machines:
msg = "At least one machine must be provided"
raise ClanError(msg)
all_generators = get_generators(machines, full_closure=True)
if isinstance(generators, list):
# List of generator names - use them exactly as provided
if len(generators) == 0:
return
generators_to_run = [g for g in all_generators if g.key.name in generators]
all_generators = get_generators(machines, full_closure=True)
generator_objects = [g for g in all_generators if g.key.name in generators]
else:
# None or single string - use get_generators with closure parameter
generators_to_run = get_generators(
generator_objects = get_generators(
machines,
full_closure=full_closure,
generator_name=generators,
@@ -171,49 +170,20 @@ def run_generators(
# TODO: make this more lazy and ask for every generator on execution
if callable(prompt_values):
prompt_values = {
generator.name: prompt_values(generator) for generator in generators_to_run
generator.name: prompt_values(generator) for generator in generator_objects
}
# execute health check
for machine in machines:
_ensure_healthy(machine=machine)
# ensure all selected machines have access to all selected shared generators
for machine in machines:
# This is only relevant for the sops store
# TODO: improve store abstraction to use Protocols and introduce a proper SecretStore interface
if not isinstance(machine.secret_vars_store, sops.SecretStore):
continue
for generator in all_generators:
if generator.share:
for file in generator.files:
if not file.secret or not file.exists:
continue
machine.secret_vars_store.ensure_machine_has_access(
generator,
file.name,
machine.name,
)
# get the flake via any machine (they are all the same)
flake = machines[0].flake
def get_generator_machine(generator: Generator) -> Machine:
if generator.share:
# return first machine if generator is shared
return machines[0]
return Machine(name=generator.machines[0], flake=flake)
# preheat the select cache, to reduce repeated calls during execution
selectors = []
for generator in generators_to_run:
machine = get_generator_machine(generator)
selectors.append(generator.final_script_selector(machine.name))
flake.precache(selectors)
# execute generators
for generator in generators_to_run:
machine = get_generator_machine(generator)
for generator in generator_objects:
machine = (
machines[0]
if generator.machine is None
else Machine(name=generator.machine, flake=machines[0].flake)
)
if check_can_migrate(machine, generator):
migrate_files(machine, generator)
else:

View File

@@ -290,7 +290,9 @@ def collect_commands() -> list[Category]:
# 3. sort by title alphabetically
return (c.title.split(" ")[0], c.title, weight)
return sorted(result, key=weight_cmd_groups)
result = sorted(result, key=weight_cmd_groups)
return result
def build_command_reference() -> None:

View File

@@ -36,7 +36,7 @@ class MPProcess:
def _set_proc_name(name: str) -> None:
if sys.platform != "linux":
return
import ctypes
import ctypes # noqa: PLC0415
# Define the prctl function with the appropriate arguments and return type
libc = ctypes.CDLL("libc.so.6")

View File

@@ -759,12 +759,12 @@ class Win32Implementation(BaseImplementation):
SM_CXSMICON = 49
if sys.platform == "win32":
from ctypes import Structure
from ctypes import Structure # noqa: PLC0415
class WNDCLASSW(Structure):
"""Windows class structure for window registration."""
from ctypes import CFUNCTYPE, wintypes
from ctypes import CFUNCTYPE, wintypes # noqa: PLC0415
LPFN_WND_PROC = CFUNCTYPE(
wintypes.INT,
@@ -789,7 +789,7 @@ class Win32Implementation(BaseImplementation):
class MENUITEMINFOW(Structure):
"""Windows menu item information structure."""
from ctypes import wintypes
from ctypes import wintypes # noqa: PLC0415
_fields_: ClassVar = [
("cb_size", wintypes.UINT),
@@ -809,7 +809,7 @@ class Win32Implementation(BaseImplementation):
class NOTIFYICONDATAW(Structure):
"""Windows notification icon data structure."""
from ctypes import wintypes
from ctypes import wintypes # noqa: PLC0415
_fields_: ClassVar = [
("cb_size", wintypes.DWORD),
@@ -1061,7 +1061,7 @@ class Win32Implementation(BaseImplementation):
if sys.platform != "win32":
return
from ctypes import wintypes
from ctypes import wintypes # noqa: PLC0415
if self._menu is None:
self.update_menu()
@@ -1110,7 +1110,7 @@ class Win32Implementation(BaseImplementation):
if sys.platform != "win32":
return 0
from ctypes import wintypes
from ctypes import wintypes # noqa: PLC0415
if msg == self.WM_TRAYICON:
if l_param == self.WM_RBUTTONUP:

View File

@@ -1,71 +0,0 @@
{ self, inputs, ... }:
{
perSystem =
{ pkgs, self', ... }:
let
# Simply evaluated options (JSON)
# { clanCore = «derivation JSON»; clanModules = { ${name} = «derivation JSON» }; }
jsonDocs = pkgs.callPackage ./get-module-docs.nix {
inherit (self) clanModules;
clan-core = self;
inherit pkgs;
};
# clan service options
clanModulesViaService = pkgs.writeText "info.json" (builtins.toJSON jsonDocs.clanModulesViaService);
# Simply evaluated options (JSON)
renderOptions =
pkgs.runCommand "render-options"
{
# TODO: ruff does not splice properly in nativeBuildInputs
depsBuildBuild = [ pkgs.ruff ];
nativeBuildInputs = [
pkgs.python3
pkgs.mypy
self'.packages.clan-cli
];
}
''
install -D -m755 ${./generate}/__init__.py $out/bin/render-options
patchShebangs --build $out/bin/render-options
ruff format --check --diff $out/bin/render-options
ruff check --line-length 88 $out/bin/render-options
mypy --strict $out/bin/render-options
'';
module-docs =
pkgs.runCommand "rendered"
{
buildInputs = [
pkgs.python3
self'.packages.clan-cli
];
}
''
export CLAN_CORE_PATH=${
inputs.nixpkgs.lib.fileset.toSource {
root = ../..;
fileset = ../../clanModules;
}
}
export CLAN_CORE_DOCS=${jsonDocs.clanCore}/share/doc/nixos/options.json
# A file that contains the links to all clanModule docs
export CLAN_MODULES_VIA_SERVICE=${clanModulesViaService}
export CLAN_SERVICE_INTERFACE=${self'.legacyPackages.clan-service-module-interface}/share/doc/nixos/options.json
export CLAN_OPTIONS_PATH=${self'.legacyPackages.clan-options}/share/doc/nixos/options.json
mkdir $out
# The python script will place mkDocs files in the output directory
exec python3 ${renderOptions}/bin/render-options
'';
in
{
packages = {
inherit module-docs;
};
};
}

View File

@@ -2,14 +2,12 @@
{
imports = [
./clan-app/flake-module.nix
./clan-cli/flake-module.nix
./clan-core-flake/flake-module.nix
./clan-vm-manager/flake-module.nix
./icon-update/flake-module.nix
./installer/flake-module.nix
./option-search/flake-module.nix
./docs-from-code/flake-module.nix
./icon-update/flake-module.nix
./clan-core-flake/flake-module.nix
./clan-app/flake-module.nix
./testing/flake-module.nix
];