Compare commits

..

3 Commits

Author SHA1 Message Date
pinpox
b37fa18f1b Remove clanModules 2025-08-18 14:37:20 +02:00
Jörg Thalheim
f539d00e9a waypipe: disable gpu for now 2025-08-18 14:35:53 +02:00
Jörg Thalheim
2d22eecd32 waypipe: disable gpu for now 2025-08-18 14:35:53 +02:00
562 changed files with 7536 additions and 24456 deletions

View File

@@ -0,0 +1,9 @@
name: checks
on:
pull_request:
jobs:
checks-impure:
runs-on: nix
steps:
- uses: actions/checkout@v4
- run: nix run .#impure-checks

View File

@@ -10,7 +10,7 @@ jobs:
if: github.repository_owner == 'clan-lol'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
with:
persist-credentials: false
- uses: actions/create-github-app-token@v2

View File

@@ -1,20 +0,0 @@
clanServices/.* @pinpox @kenji
lib/test/container-test-driver/.* @DavHau @mic92
lib/modules/inventory/.* @hsjobeki
lib/modules/inventoryClass/.* @hsjobeki
pkgs/clan-app/ui/.* @hsjobeki @brianmcgee
pkgs/clan-app/clan_app/.* @qubasa @hsjobeki
pkgs/clan-cli/clan_cli/.* @lassulus @mic92 @kenji
pkgs/clan-cli/clan_cli/(secrets|vars)/.* @DavHau @lassulus
pkgs/clan-cli/clan_lib/log_machines/.* @Qubasa
pkgs/clan-cli/clan_lib/ssh/.* @Qubasa @Mic92 @lassulus
pkgs/clan-cli/clan_lib/tags/.* @hsjobeki
pkgs/clan-cli/clan_lib/persist/.* @hsjobeki
pkgs/clan-cli/clan_lib/flake/.* @lassulus
pkgs/clan-cli/api.py @hsjobeki
pkgs/clan-cli/openapi.py @hsjobeki

View File

@@ -8,7 +8,7 @@ Our mission is simple: to democratize computing by providing tools that empower
## Features of Clan
- **Full-Stack System Deployment:** Utilize Clan's toolkit alongside Nix's reliability to build and manage systems effortlessly.
- **Full-Stack System Deployment:** Utilize Clans toolkit alongside Nix's reliability to build and manage systems effortlessly.
- **Overlay Networks:** Secure, private communication channels between devices.
- **Virtual Machine Integration:** Seamless operation of VM applications within the main operating system.
- **Robust Backup Management:** Long-term, self-hosted data preservation.

View File

@@ -0,0 +1,6 @@
{ fetchgit }:
fetchgit {
url = "https://git.clan.lol/clan/clan-core.git";
rev = "5d884cecc2585a29b6a3596681839d081b4de192";
sha256 = "09is1afmncamavb2q88qac37vmsijxzsy1iz1vr6gsyjq2rixaxc";
}

View File

@@ -36,6 +36,7 @@ in
++ filter pathExists [
./devshell/flake-module.nix
./flash/flake-module.nix
./impure/flake-module.nix
./installation/flake-module.nix
./update/flake-module.nix
./morph/flake-module.nix

View File

@@ -50,14 +50,12 @@
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.toplevel
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.diskoScript
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.diskoScript.drvPath
(import ../installation/facter-report.nix pkgs.hostPlatform.system)
]
++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
in
{
# Skip flash test on aarch64-linux for now as it's too slow
checks = lib.optionalAttrs (pkgs.stdenv.isLinux && pkgs.hostPlatform.system != "aarch64-linux") {
checks = pkgs.lib.mkIf pkgs.stdenv.isLinux {
nixos-test-flash = self.clanLib.test.baseTest {
name = "flash";
nodes.target = {

View File

@@ -0,0 +1,51 @@
{
perSystem =
{
pkgs,
lib,
self',
...
}:
{
# a script that executes all other checks
packages.impure-checks = pkgs.writeShellScriptBin "impure-checks" ''
#!${pkgs.bash}/bin/bash
set -euo pipefail
unset CLAN_DIR
export PATH="${
lib.makeBinPath (
[
pkgs.gitMinimal
pkgs.nix
pkgs.coreutils
pkgs.rsync # needed to have rsync installed on the dummy ssh server
]
++ self'.packages.clan-cli-full.runtimeDependencies
)
}"
ROOT=$(git rev-parse --show-toplevel)
cd "$ROOT/pkgs/clan-cli"
# Set up custom git configuration for tests
export GIT_CONFIG_GLOBAL=$(mktemp)
git config --file "$GIT_CONFIG_GLOBAL" user.name "Test User"
git config --file "$GIT_CONFIG_GLOBAL" user.email "test@example.com"
export GIT_CONFIG_SYSTEM=/dev/null
# this disables dynamic dependency loading in clan-cli
export CLAN_NO_DYNAMIC_DEPS=1
jobs=$(nproc)
# Spawning worker in pytest is relatively slow, so we limit the number of jobs to 13
# (current number of impure tests)
jobs="$((jobs > 6 ? 6 : jobs))"
nix develop "$ROOT#clan-cli" -c bash -c "TMPDIR=/tmp python -m pytest -n $jobs -m impure ./clan_cli $@"
# Clean up temporary git config
rm -f "$GIT_CONFIG_GLOBAL"
'';
};
}

View File

@@ -1,10 +0,0 @@
system:
builtins.fetchurl {
url = "https://git.clan.lol/clan/test-fixtures/raw/commit/4a2bc56d886578124b05060d3fb7eddc38c019f8/nixos-vm-facter-json/${system}.json";
sha256 =
{
aarch64-linux = "sha256:1rlfymk03rmfkm2qgrc8l5kj5i20srx79n1y1h4nzlpwaz0j7hh2";
x86_64-linux = "sha256:16myh0ll2gdwsiwkjw5ba4dl23ppwbsanxx214863j7nvzx42pws";
}
.${system};
}

View File

@@ -18,23 +18,27 @@
fileSystems."/".device = lib.mkDefault "/dev/vda";
boot.loader.grub.device = lib.mkDefault "/dev/vda";
imports = [
self.nixosModules.test-install-machine-without-system
];
imports = [ self.nixosModules.test-install-machine-without-system ];
};
clan.machines.test-install-machine-with-system =
{ pkgs, ... }:
{
# https://git.clan.lol/clan/test-fixtures
facter.reportPath = import ./facter-report.nix pkgs.hostPlatform.system;
facter.reportPath = builtins.fetchurl {
url = "https://git.clan.lol/clan/test-fixtures/raw/commit/4a2bc56d886578124b05060d3fb7eddc38c019f8/nixos-vm-facter-json/${pkgs.hostPlatform.system}.json";
sha256 =
{
aarch64-linux = "sha256:1rlfymk03rmfkm2qgrc8l5kj5i20srx79n1y1h4nzlpwaz0j7hh2";
x86_64-linux = "sha256:16myh0ll2gdwsiwkjw5ba4dl23ppwbsanxx214863j7nvzx42pws";
}
.${pkgs.hostPlatform.system};
};
fileSystems."/".device = lib.mkDefault "/dev/vda";
boot.loader.grub.device = lib.mkDefault "/dev/vda";
imports = [ self.nixosModules.test-install-machine-without-system ];
};
flake.nixosModules = {
test-install-machine-without-system =
{ lib, modulesPath, ... }:
@@ -155,7 +159,6 @@
pkgs.stdenv.drvPath
pkgs.bash.drvPath
pkgs.buildPackages.xorg.lndir
(import ./facter-report.nix pkgs.hostPlatform.system)
]
++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
};
@@ -229,7 +232,6 @@
"-i", ssh_conn.ssh_key,
"--option", "store", os.environ['CLAN_TEST_STORE'],
"--update-hardware-config", "nixos-facter",
"--no-persist-state",
]
subprocess.run(clan_cmd, check=True)
@@ -273,7 +275,7 @@
"${self.checks.x86_64-linux.clan-core-for-checks}",
"${closureInfo}"
)
# Set up SSH connection
ssh_conn = setup_ssh_connection(
target,
@@ -299,8 +301,7 @@
"test-install-machine-without-system",
"-i", ssh_conn.ssh_key,
"--option", "store", os.environ['CLAN_TEST_STORE'],
"--target-host", f"nonrootuser@localhost:{ssh_conn.host_port}",
"--yes"
f"nonrootuser@localhost:{ssh_conn.host_port}"
]
result = subprocess.run(clan_cmd, capture_output=True, cwd=flake_dir)
@@ -324,9 +325,7 @@
"test-install-machine-without-system",
"-i", ssh_conn.ssh_key,
"--option", "store", os.environ['CLAN_TEST_STORE'],
"--target-host",
f"nonrootuser@localhost:{ssh_conn.host_port}",
"--yes"
f"nonrootuser@localhost:{ssh_conn.host_port}"
]
result = subprocess.run(clan_cmd, capture_output=True, cwd=flake_dir)

View File

@@ -35,7 +35,6 @@
pkgs.stdenv.drvPath
pkgs.stdenvNoCC
self.nixosConfigurations.test-morph-machine.config.system.build.toplevel
(import ../installation/facter-report.nix pkgs.hostPlatform.system)
]
++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };

View File

@@ -112,7 +112,6 @@
pkgs.stdenv.drvPath
pkgs.bash.drvPath
pkgs.buildPackages.xorg.lndir
(import ../installation/facter-report.nix pkgs.hostPlatform.system)
]
++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
};

View File

@@ -1,62 +0,0 @@
{ ... }:
let
error = builtins.throw ''
###############################################################################
# #
# Clan modules (clanModules) have been deprecated and removed in favor of #
# Clan services! #
# #
# Refer to https://docs.clan.lol/guides/migrations/migrate-inventory-services #
# for migration instructions. #
# #
###############################################################################
'';
modnames = [
"admin"
"borgbackup"
"borgbackup-static"
"deltachat"
"disk-id"
"dyndns"
"ergochat"
"garage"
"heisenbridge"
"iwd"
"localbackup"
"localsend"
"matrix-synapse"
"moonlight"
"mumble"
"nginx"
"packages"
"postgresql"
"root-password"
"single-disk"
"sshd"
"state-version"
"static-hosts"
"sunshine"
"syncthing"
"syncthing-static-peers"
"thelounge"
"trusted-nix-caches"
"user-password"
"vaultwarden"
"xfce"
"zerotier-static-peers"
"zt-tcp-relay"
];
in
{
flake.clanModules = builtins.listToAttrs (
map (name: {
inherit name;
value = error;
}) modnames
);
}

View File

@@ -1,55 +0,0 @@
# We don't have a way of specifying dependencies between clanServices for now.
# When it get's added this file should be removed and the users module used instead.
{
roles.default.perInstance =
{ ... }:
{
nixosModule =
{
config,
pkgs,
...
}:
{
users.mutableUsers = false;
users.users.root.hashedPasswordFile =
config.clan.core.vars.generators.root-password.files.password-hash.path;
clan.core.vars.generators.root-password = {
files.password-hash.neededFor = "users";
files.password.deploy = false;
runtimeInputs = [
pkgs.coreutils
pkgs.mkpasswd
pkgs.xkcdpass
];
prompts.password.display = {
group = "Root User";
label = "Password";
required = false;
helperText = ''
Your password will be encrypted and stored securely using the secret store you've configured.
'';
};
prompts.password.type = "hidden";
prompts.password.persist = true;
prompts.password.description = "Leave empty to generate automatically";
script = ''
prompt_value="$(cat "$prompts"/password)"
if [[ -n "''${prompt_value-}" ]]; then
echo "$prompt_value" | tr -d "\n" > "$out"/password
else
xkcdpass --numwords 5 --delimiter - --count 1 | tr -d "\n" > "$out"/password
fi
mkpasswd -s -m sha-512 < "$out"/password | tr -d "\n" > "$out"/password-hash
'';
};
};
};
}

View File

@@ -1,32 +0,0 @@
This service sets up a certificate authority (CA) that can issue certificates to
other machines in your clan. For this the `ca` role is used.
It additionally provides a `default` role, that can be applied to all machines
in your clan and will make sure they trust your CA.
## Example Usage
The following configuration would add a CA for the top level domain `.foo`. If
the machine `server` now hosts a webservice at `https://something.foo`, it will
get a certificate from `ca` which is valid inside your clan. The machine
`client` will trust this certificate if it makes a request to
`https://something.foo`.
This clan service can be combined with the `coredns` service for easy to deploy,
SSL secured clan-internal service hosting.
```nix
inventory = {
machines.ca = { };
machines.client = { };
machines.server = { };
instances."certificates" = {
module.name = "certificates";
module.input = "self";
roles.ca.machines.ca.settings.tlds = [ "foo" ];
roles.default.machines.client = { };
roles.default.machines.server = { };
};
};
```

View File

@@ -1,245 +0,0 @@
{ ... }:
{
_class = "clan.service";
manifest.name = "certificates";
manifest.description = "Sets up a certificates internal to your Clan";
manifest.categories = [ "Network" ];
manifest.readme = builtins.readFile ./README.md;
roles.ca = {
interface =
{ lib, ... }:
{
options.acmeEmail = lib.mkOption {
type = lib.types.str;
default = "none@none.tld";
description = ''
Email address for account creation and correspondence from the CA.
It is recommended to use the same email for all certs to avoid account
creation limits.
'';
};
options.tlds = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = "Top level domain for this CA. Certificates will be issued and trusted for *.<tld>";
};
options.expire = lib.mkOption {
type = lib.types.nullOr lib.types.str;
description = "When the certificate should expire.";
default = "8760h";
example = "8760h";
};
};
perInstance =
{ settings, ... }:
{
nixosModule =
{
config,
pkgs,
lib,
...
}:
let
domains = map (tld: "ca.${tld}") settings.tlds;
in
{
security.acme.defaults.email = settings.acmeEmail;
security.acme = {
certs = builtins.listToAttrs (
map (domain: {
name = domain;
value = {
server = "https://${domain}:1443/acme/acme/directory";
};
}) domains
);
};
networking.firewall.allowedTCPPorts = [
80
443
];
services.nginx = {
enable = true;
recommendedProxySettings = true;
virtualHosts = builtins.listToAttrs (
map (domain: {
name = domain;
value = {
addSSL = true;
enableACME = true;
locations."/".proxyPass = "https://localhost:1443";
locations."= /ca.crt".alias =
config.clan.core.vars.generators.step-intermediate-cert.files."intermediate.crt".path;
};
}) domains
);
};
clan.core.vars.generators = {
# Intermediate key generator
"step-intermediate-key" = {
files."intermediate.key" = {
secret = true;
deploy = true;
owner = "step-ca";
group = "step-ca";
};
runtimeInputs = [ pkgs.step-cli ];
script = ''
step crypto keypair --kty EC --curve P-256 --no-password --insecure $out/intermediate.pub $out/intermediate.key
'';
};
# Intermediate certificate generator
"step-intermediate-cert" = {
files."intermediate.crt".secret = false;
dependencies = [
"step-ca"
"step-intermediate-key"
];
runtimeInputs = [ pkgs.step-cli ];
script = ''
# Create intermediate certificate
step certificate create \
--ca $in/step-ca/ca.crt \
--ca-key $in/step-ca/ca.key \
--ca-password-file /dev/null \
--key $in/step-intermediate-key/intermediate.key \
--template ${pkgs.writeText "intermediate.tmpl" ''
{
"subject": {{ toJson .Subject }},
"keyUsage": ["certSign", "crlSign"],
"basicConstraints": {
"isCA": true,
"maxPathLen": 0
},
"nameConstraints": {
"critical": true,
"permittedDNSDomains": [${
(lib.strings.concatStringsSep "," (map (tld: ''"${tld}"'') settings.tlds))
}]
}
}
''} ${lib.optionalString (settings.expire != null) "--not-after ${settings.expire}"} \
--not-before=-12h \
--no-password --insecure \
"Clan Intermediate CA" \
$out/intermediate.crt
'';
};
};
services.step-ca = {
enable = true;
intermediatePasswordFile = "/dev/null";
address = "0.0.0.0";
port = 1443;
settings = {
root = config.clan.core.vars.generators.step-ca.files."ca.crt".path;
crt = config.clan.core.vars.generators.step-intermediate-cert.files."intermediate.crt".path;
key = config.clan.core.vars.generators.step-intermediate-key.files."intermediate.key".path;
dnsNames = domains;
logger.format = "text";
db = {
type = "badger";
dataSource = "/var/lib/step-ca/db";
};
authority = {
provisioners = [
{
type = "ACME";
name = "acme";
forceCN = true;
}
];
claims = {
maxTLSCertDuration = "2160h";
defaultTLSCertDuration = "2160h";
};
backdate = "1m0s";
};
tls = {
cipherSuites = [
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256"
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"
];
minVersion = 1.2;
maxVersion = 1.3;
renegotiation = false;
};
};
};
};
};
};
# Empty role, so we can add non-ca machins to the instance to trust the CA
roles.default = {
interface =
{ lib, ... }:
{
options.acmeEmail = lib.mkOption {
type = lib.types.str;
default = "none@none.tld";
description = ''
Email address for account creation and correspondence from the CA.
It is recommended to use the same email for all certs to avoid account
creation limits.
'';
};
};
perInstance =
{ settings, ... }:
{
nixosModule.security.acme.defaults.email = settings.acmeEmail;
};
};
# All machines (independent of role) will trust the CA
perMachine.nixosModule =
{ pkgs, config, ... }:
{
# Root CA generator
clan.core.vars.generators = {
"step-ca" = {
share = true;
files."ca.key" = {
secret = true;
deploy = false;
};
files."ca.crt".secret = false;
runtimeInputs = [ pkgs.step-cli ];
script = ''
step certificate create --template ${pkgs.writeText "root.tmpl" ''
{
"subject": {{ toJson .Subject }},
"issuer": {{ toJson .Subject }},
"keyUsage": ["certSign", "crlSign"],
"basicConstraints": {
"isCA": true,
"maxPathLen": 1
}
}
''} "Clan Root CA" $out/ca.crt $out/ca.key \
--kty EC --curve P-256 \
--not-after=8760h \
--not-before=-12h \
--no-password --insecure
'';
};
};
security.pki.certificateFiles = [ config.clan.core.vars.generators."step-ca".files."ca.crt".path ];
environment.systemPackages = [ pkgs.openssl ];
security.acme.acceptTerms = true;
};
}

View File

@@ -1,21 +0,0 @@
{
self,
lib,
...
}:
let
module = lib.modules.importApply ./default.nix {
inherit (self) packages;
};
in
{
clan.modules.certificates = module;
perSystem =
{ ... }:
{
clan.nixosTests.certificates = {
imports = [ ./tests/vm/default.nix ];
clan.modules.certificates = module;
};
};
}

View File

@@ -1,84 +0,0 @@
{
name = "certificates";
clan = {
directory = ./.;
inventory = {
machines.ca = { }; # 192.168.1.1
machines.client = { }; # 192.168.1.2
machines.server = { }; # 192.168.1.3
instances."certificates" = {
module.name = "certificates";
module.input = "self";
roles.ca.machines.ca.settings.tlds = [ "foo" ];
roles.default.machines.client = { };
roles.default.machines.server = { };
};
};
};
nodes =
let
hostConfig = ''
192.168.1.1 ca.foo
192.168.1.3 test.foo
'';
in
{
client.networking.extraHosts = hostConfig;
ca.networking.extraHosts = hostConfig;
server = {
networking.extraHosts = hostConfig;
# TODO: Could this be set automatically?
# I would like to get this information from the coredns module, but we
# cannot model dependencies yet
security.acme.certs."test.foo".server = "https://ca.foo/acme/acme/directory";
# Host a simple service on 'server', with SSL provided via our CA. 'client'
# should be able to curl it via https and accept the certificates
# presented
networking.firewall.allowedTCPPorts = [
80
443
];
services.nginx = {
enable = true;
virtualHosts."test.foo" = {
enableACME = true;
forceSSL = true;
locations."/" = {
return = "200 'test server response'";
extraConfig = "add_header Content-Type text/plain;";
};
};
};
};
};
testScript = ''
start_all()
import time
time.sleep(3)
ca.succeed("systemctl restart acme-order-renew-ca.foo.service ")
time.sleep(3)
server.succeed("systemctl restart acme-test.foo.service")
# It takes a while for the correct certs to appear (before that self-signed
# are presented by nginx) so we wait for a bit.
client.wait_until_succeeds("curl -v https://test.foo")
# Show certificate information for debugging
client.succeed("openssl s_client -connect test.foo:443 -servername test.foo </dev/null 2>/dev/null | openssl x509 -text -noout 1>&2")
'';
}

View File

@@ -1,6 +0,0 @@
[
{
"publickey": "age1yd2cden7jav8x4nzx2fwze2fsa5j0qm2m3t7zum765z3u4gj433q7dqj43",
"type": "age"
}
]

View File

@@ -1,6 +0,0 @@
[
{
"publickey": "age1js225d8jc507sgcg0fdfv2x3xv3asm4ds5c6s4hp37nq8spxu95sc5x3ce",
"type": "age"
}
]

View File

@@ -1,6 +0,0 @@
[
{
"publickey": "age1nwuh8lc604mnz5r8ku8zswyswnwv02excw237c0cmtlejp7xfp8sdrcwfa",
"type": "age"
}
]

View File

@@ -1,15 +0,0 @@
{
"data": "ENC[AES256_GCM,data:6+XilULKRuWtAZ6B8Lj9UqCfi1T6dmqrDqBNXqS4SvBwM1bIWiL6juaT1Q7ByOexzID7tY740gmQBqTey54uLydh8mW0m4ZtUqw=,iv:9kscsrMPBGkutTnxrc5nrc7tQXpzLxw+929pUDKqTu0=,tag:753uIjm8ZRs0xsjiejEY8g==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA1d3kycldZRXhmR0FqTXJp\nWWU0MDBYNmxxbFE5M2xKYm5KWnQ0MXBHNEM4CjN4RFFVcFlkd3pjTFVDQ3Vackdj\nVTVhMWoxdFpsWHp5S1p4L05kYk5LUkkKLS0tIENtZFZZTjY2amFVQmZLZFplQzBC\nZm1vWFI4MXR1ZHIxTTQ5VXdSYUhvOTQKte0bKjXQ0xA8FrpuChjDUvjVqp97D8kT\n3tVh6scdjxW48VSBZP1GRmqcMqCdj75GvJTbWeNEV4PDBW7GI0UW+Q==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-09-02T08:42:39Z",
"mac": "ENC[AES256_GCM,data:AftMorrH7qX5ctVu5evYHn5h9pC4Mmm2VYaAV8Hy0PKTc777jNsL6DrxFVV3NVqtecpwrzZFWKgzukcdcRJe4veVeBrusmoZYtifH0AWZTEVpVlr2UXYYxCDmNZt1WHfVUo40bT//X6QM0ye6a/2Y1jYPbMbryQNcGmnpk9PDvU=,iv:5nk+d8hzA05LQp7ZHRbIgiENg2Ha6J6YzyducM6zcNU=,tag:dy1hqWVzMu/+fSK57h9ZCA==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../users/admin

View File

@@ -1,15 +0,0 @@
{
"data": "ENC[AES256_GCM,data:jdTuGQUYvT1yXei1RHKsOCsABmMlkcLuziHDVhA7NequZeNu0fSbrJTXQDCHsDGhlYRcjU5EsEDT750xdleXuD3Gs9zWvPVobI4=,iv:YVow3K1j6fzRF9bRfIEpuOkO/nRpku/UQxWNGC+UJQQ=,tag:cNLM5R7uu6QpwPB9K6MYzg==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBvOVF2WXRSL0NpQzFZR01I\nNU85TGcyQmVDazN1dmpuRFVTZEg5NDRKTGhrCk1IVjFSU1V6WHBVRnFWcHkyVERr\nTjFKbW1mQ2FWOWhjN2VPamMxVEQ5VkkKLS0tIENVUGlhanhuWGtDKzBzRmk2dE4v\nMXZBRXNMa3IrOTZTNHRUWVE3UXEwSWMK2cBLoL/H/Vxd/klVrqVLdX9Mww5j7gw/\nEWc5/hN+km6XoW+DiJxVG4qaJ7qqld6u5ZnKgJT+2h9CfjA04I2akg==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-09-02T08:42:51Z",
"mac": "ENC[AES256_GCM,data:zOBQVM2Ydu4v0+Fw3p3cEU+5+7eKaadV0tKro1JVOxclG1Vs6Myq57nw2eWf5JxIl0ulL+FavPKY26qOQ3aqcGOT3PMRlCda9z+0oSn9Im9bE/DzAGmoH/bp76kFkgTTOCZTMUoqJ+UJqv0qy1BH/92sSSKmYshEX6d1vr5ISrw=,iv:i9ZW4sLxOCan4UokHlySVr1CW39nCTusG4DmEPj/gIw=,tag:iZBDPHDkE3Vt5mFcFu1TPQ==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../users/admin

View File

@@ -1,15 +0,0 @@
{
"data": "ENC[AES256_GCM,data:5CJuHcxJMXZJ8GqAeG3BrbWtT1kade4kxgJsn1cRpmr1UgN0ZVYnluPEiBscClNSOzcc6vcrBpfTI3dj1tASKTLP58M+GDBFQDo=,iv:gsK7XqBGkYCoqAvyFlIXuJ27PKSbTmy7f6cgTmT2gow=,tag:qG5KejkBvy9ytfhGXa/Mnw==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBxbzVqYkplTzJKN1pwS3VM\naFFIK2VsR3lYUVExYW9ieERBL0tlcFZtVzJRCkpiLzdmWmFlOUZ5QUJ4WkhXZ2tQ\nZm92YXBCV0RpYnIydUdEVTRiamI4bjAKLS0tIG93a2htS1hFcjBOeVFnNCtQTHVr\na2FPYjVGbWtORjJVWXE5bndPU1RWcXMKikMEB7X+kb7OtiyqXn3HRpLYkCdoayDh\n7cjGnplk17q25/lRNHM4JVS5isFfuftCl01enESqkvgq+cwuFwa9DQ==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-09-02T08:42:59Z",
"mac": "ENC[AES256_GCM,data:xybV2D0xukZnH2OwRpIugPnS7LN9AbgGKwFioPJc1FQWx9TxMUVDwgMN6V5WrhWkXgF2zP4krtDYpEz4Vq+LbOjcnTUteuCc+7pMHubuRuip7j+M32MH1kuf4bVZuXbCfvm7brGxe83FzjoioLqzA8g/X6Q1q7/ErkNeFjluC3Q=,iv:QEW3EUKSRZY3fbXlP7z+SffWkQeXwMAa5K8RQW7NvPE=,tag:DhFxY7xr7H1Wbd527swD0Q==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../users/admin

View File

@@ -1,4 +0,0 @@
{
"publickey": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"type": "age"
}

View File

@@ -1,12 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIBsDCCAVegAwIBAgIQbT1Ivm+uwyf0HNkJfan2BTAKBggqhkjOPQQDAjAXMRUw
EwYDVQQDEwxDbGFuIFJvb3QgQ0EwHhcNMjUwOTAxMjA0MzAzWhcNMjYwOTAyMDg0
MzAzWjAfMR0wGwYDVQQDExRDbGFuIEludGVybWVkaWF0ZSBDQTBZMBMGByqGSM49
AgEGCCqGSM49AwEHA0IABDXCNrUIotju9P1U6JxLV43sOxLlRphQJS4dM+lvjTZc
aQ+HwQg0AHVlQNRwS3JqKrJJtJVyKbZklh6eFaDPoj6jfTB7MA4GA1UdDwEB/wQE
AwIBBjASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdDgQWBBRKHaccHgP2ccSWVBWN
zGoDdTg7aTAfBgNVHSMEGDAWgBSfsnz4phMJx9su/kgeF/FbZQCBgzAVBgNVHR4B
Af8ECzAJoAcwBYIDZm9vMAoGCCqGSM49BAMCA0cAMEQCICiUDk1zGNzpS/iVKLfW
zUGaCagpn2mCx4xAXQM9UranAiAn68nVYGWjkzhU31wyCAupxOjw7Bt96XXqIAz9
hLLtMA==
-----END CERTIFICATE-----

View File

@@ -1,19 +0,0 @@
{
"data": "ENC[AES256_GCM,data:Auonh9fa7jSkld1Zyxw74x5ydj6Xc+0SOgiqumVETNCfner9K96Rmv1PkREuHNGWPsnzyEM3pRT8ijvu3QoKvy9QPCCewyT07Wqe4G74+bk1iMeAHsV3To6kHs6M8OISvE+CmG0+hlLmdfRSabTzyWPLHbOjvFTEEuA5G7xiryacSYOE++eeEHdn+oUDh/IMTcfLjCGMjsXFikx1Hb+ofeRTlCg47+0w4MXVvQkOzQB5V2C694jZXvZ19jd/ioqr8YASz2xatGvqwW6cpZxqOWyZJ0UAj/6yFk6tZWifqVB3wgU=,iv:ITFCrDkeWl4GWCebVq15ei9QmkOLDwUIYojKZ2TU6JU=,tag:8k4iYbCIusUykY79H86WUQ==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBsT25UbjJTQ2tzbnQyUm9p\neWx1UlZIeVpocnBqUCt0YnFlN2FOU25Lb0hNCmdXUUsyalRTbHRRQ0NLSGc1YllV\nUXRwaENhaXU1WmdnVDE0UWprUUUyeDAKLS0tIHV3dHU3aG5JclM0V3FadzN0SU14\ndFptbEJUNXQ4QVlqbkJ1TjAvdDQwSGsKcKPWUjhK7wzIpdIdksMShF2fpLdDTUBS\nZiU7P1T+3psxad9qhapvU0JrAY+9veFaYVEHha2aN/XKs8HqUcTp3A==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1yd2cden7jav8x4nzx2fwze2fsa5j0qm2m3t7zum765z3u4gj433q7dqj43",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBjZFVteVZwVGVmRE9NT3hG\nNGMyS3FSaXluM1FpeUp6SDVMUEpwYzg5SmdvCkRPU0QyU1JicGNkdlMyQWVkT0k3\nL2YrbDhWeGk4WFhxcUFmTmhZQ0pEQncKLS0tIG85Ui9rKzBJQ2VkMFBUQTMvSTlu\nbm8rZ09Wa24rQkNvTTNtYTZBN3MrZlkK7cjNhlUKZdOrRq/nKUsbUQgNTzX8jO+0\nzADpz6WCMvsJ15xazc10BGh03OtdMWl5tcoWMaZ71HWtI9Gip5DH0w==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-09-02T08:42:42Z",
"mac": "ENC[AES256_GCM,data:9xlO5Yis8DG/y8GjvP63NltD4xEL7zqdHL2cQE8gAoh/ZamAmK5ZL0ld80mB3eIYEPKZYvmUYI4Lkrge2ZdqyDoubrW+eJ3dxn9+StxA9FzXYwUE0t+bbsNJfOOp/kDojf060qLGsu0kAGKd2ca4WiDccR0Cieky335C7Zzhi/Q=,iv:bWQ4wr0CJHSN+6ipUbkYTDWZJyFQjDKszfpVX9EEUsY=,tag:kADIFgJBEGCvr5fPbbdEDA==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1,10 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIBcTCCARigAwIBAgIRAIix99+AE7Y+uyiLGaRHEhUwCgYIKoZIzj0EAwIwFzEV
MBMGA1UEAxMMQ2xhbiBSb290IENBMB4XDTI1MDkwMTIwNDI1N1oXDTI2MDkwMjA4
NDI1N1owFzEVMBMGA1UEAxMMQ2xhbiBSb290IENBMFkwEwYHKoZIzj0CAQYIKoZI
zj0DAQcDQgAEk7nn9kzxI+xkRmNMlxD+7T78UqV3aqus0foJh6uu1CHC+XaebMcw
JN95nAe3oYA3yZG6Mnq9nCxsYha4EhzGYqNFMEMwDgYDVR0PAQH/BAQDAgEGMBIG
A1UdEwEB/wQIMAYBAf8CAQEwHQYDVR0OBBYEFJ+yfPimEwnH2y7+SB4X8VtlAIGD
MAoGCCqGSM49BAMCA0cAMEQCIBId/CcbT5MPFL90xa+XQz+gVTdRwsu6Bg7ehMso
Bj0oAiBjSlttd5yeuZGXBm+O0Gl+WdKV60QlrWutNewXFS4UpQ==
-----END CERTIFICATE-----

View File

@@ -1,15 +0,0 @@
{
"data": "ENC[AES256_GCM,data:PnEXteU3I7U0OKgE+oR3xjHdLWYTpJjM/jlzxtGU0uP2pUBuQv3LxtEz+cP0ZsafHLNq2iNJ7xpUEE0g4d3M296S56oSocK3fREWBiJFiaC7SAEUiil1l3UCwHn7LzmdEmn8Kq7T+FK89wwqtVWIASLo2gZC/yHE5eEanEATTchGLSNiHJRzZ8n0Ekm8EFUA6czOqA5nPQHaSmeLzu1g80lSSi1ICly6dJksa6DVucwOyVFYFEeq8Dfyc1eyP8L1ee0D7QFYBMduYOXTKPtNnyDmdaQMj7cMMvE7fn04idIiAqw=,iv:nvLmAfFk2GXnnUy+Afr648R60Ou13eu9UKykkiA8Y+4=,tag:lTTAxfG0EDCU6u7xlW6xSQ==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBEMjNWUm5NbktQeTRWRjJE\nWWFZc2Rsa3I5aitPSno1WnhORENNcng5OHprCjNUQVhBVHFBcWFjaW5UdmxKTnZw\nQlI4MDk5Wkp0RElCeWgzZ2dFQkF2dkkKLS0tIDVreTkydnJ0RDdHSHlQeVV6bGlP\nTmpJOVBSb2dkVS9TZG5SRmFjdnQ1b3cKQ5XvwH1jD4XPVs5RzOotBDq8kiE6S5k2\nDBv6ugjsM5qV7/oGP9H69aSB4jKPZjEn3yiNw++Oorc8uXd5kSGh7w==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-09-02T08:43:00Z",
"mac": "ENC[AES256_GCM,data:3jFf66UyZUWEtPdPu809LCS3K/Hc6zbnluystl3eXS+KGI+dCoYmN9hQruRNBRxf6jli2RIlArmmEPBDQVt67gG/qugTdT12krWnYAZ78iocmOnkf44fWxn/pqVnn4JYpjEYRgy8ueGDnUkwvpGWVZpcXw5659YeDQuYOJ2mq0U=,iv:3k7fBPrABdLItQ2Z+Mx8Nx0eIEKo93zG/23K+Q5Hl3I=,tag:aehAObdx//DEjbKlOeM7iQ==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../../../sops/users/admin

View File

@@ -1,68 +0,0 @@
This module enables hosting clan-internal services easily, which can be resolved
inside your VPN. This allows defining a custom top-level domain (e.g. `.clan`)
and exposing endpoints from a machine to others, which will be
accessible under `http://<service>.clan` in your browser.
The service consists of two roles:
- A `server` role: This is the DNS-server that will be queried when trying to
resolve clan-internal services. It defines the top-level domain.
- A `default` role: This does two things. First, it sets up the nameservers so
thatclan-internal queries are resolved via the `server` machine, while
external queries are resolved as normal via DHCP. Second, it allows exposing
services (see example below).
## Example Usage
Here the machine `dnsserver` is designated as internal DNS-server for the TLD
`.foo`. `server01` will host an application that shall be reachable at
`http://one.foo` and `server02` is going to be reachable at `http://two.foo`.
`client` is any other machine that is part of the clan but does not host any
services.
When `client` tries to resolve `http://one.foo`, the DNS query will be
routed to `dnsserver`, which will answer with `192.168.1.3`. If it tries to
resolve some external domain (e.g. `https://clan.lol`), the query will not be
routed to `dnsserver` but resolved as before, via the nameservers advertised by
DHCP.
```nix
inventory = {
machines = {
dnsserver = { }; # 192.168.1.2
server01 = { }; # 192.168.1.3
server02 = { }; # 192.168.1.4
client = { }; # 192.168.1.5
};
instances = {
coredns = {
module.name = "@clan/coredns";
module.input = "self";
# Add the default role to all machines, including `client`
roles.default.tags.all = { };
# DNS server
roles.server.machines."dnsserver".settings = {
ip = "192.168.1.2";
tld = "foo";
};
# First service
roles.default.machines."server01".settings = {
ip = "192.168.1.3";
services = [ "one" ];
};
# Second service
roles.default.machines."server02".settings = {
ip = "192.168.1.4";
services = [ "two" ];
};
};
};
};
```

View File

@@ -1,176 +0,0 @@
{ ... }:
{
_class = "clan.service";
manifest.name = "coredns";
manifest.description = "Clan-internal DNS and service exposure";
manifest.categories = [ "Network" ];
manifest.readme = builtins.readFile ./README.md;
roles.server = {
interface =
{ lib, ... }:
{
options.tld = lib.mkOption {
type = lib.types.str;
default = "clan";
description = ''
Top-level domain for this instance. All services below this will be
resolved internally.
'';
};
options.ip = lib.mkOption {
type = lib.types.str;
# TODO: Set a default
description = "IP for the DNS to listen on";
};
options.dnsPort = lib.mkOption {
type = lib.types.int;
default = 1053;
description = "Port of the clan-internal DNS server";
};
};
perInstance =
{
roles,
settings,
...
}:
{
nixosModule =
{
lib,
pkgs,
...
}:
{
networking.firewall.allowedTCPPorts = [ settings.dnsPort ];
networking.firewall.allowedUDPPorts = [ settings.dnsPort ];
services.coredns =
let
# Get all service entries for one host
hostServiceEntries =
host:
lib.strings.concatStringsSep "\n" (
map (
service: "${service} IN A ${roles.default.machines.${host}.settings.ip} ; ${host}"
) roles.default.machines.${host}.settings.services
);
zonefile = pkgs.writeTextFile {
name = "db.${settings.tld}";
text = ''
$TTL 3600
@ IN SOA ns.${settings.tld}. admin.${settings.tld}. 1 7200 3600 1209600 3600
IN NS ns.${settings.tld}.
ns IN A ${settings.ip} ; DNS server
''
+ (lib.strings.concatStringsSep "\n" (
map (host: hostServiceEntries host) (lib.attrNames roles.default.machines)
));
};
in
{
enable = true;
config =
let
dnsPort = builtins.toString settings.dnsPort;
in
''
.:${dnsPort} {
forward . 1.1.1.1
cache 30
}
${settings.tld}:${dnsPort} {
file ${zonefile}
}
'';
};
};
};
};
roles.default = {
interface =
{ lib, ... }:
{
options.services = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ ];
description = ''
Service endpoints this host exposes (without TLD). Each entry will
be resolved to <entry>.<tld> using the configured top-level domain.
'';
};
options.ip = lib.mkOption {
type = lib.types.str;
# TODO: Set a default
description = "IP on which the services will listen";
};
options.dnsPort = lib.mkOption {
type = lib.types.int;
default = 1053;
description = "Port of the clan-internal DNS server";
};
};
perInstance =
{ roles, settings, ... }:
{
nixosModule =
{ lib, ... }:
{
networking.nameservers = map (m: "127.0.0.1:5353#${roles.server.machines.${m}.settings.tld}") (
lib.attrNames roles.server.machines
);
services.resolved.domains = map (m: "~${roles.server.machines.${m}.settings.tld}") (
lib.attrNames roles.server.machines
);
services.unbound = {
enable = true;
settings = {
server = {
port = 5353;
verbosity = 2;
interface = [ "127.0.0.1" ];
access-control = [ "127.0.0.0/8 allow" ];
do-not-query-localhost = "no";
domain-insecure = map (m: "${roles.server.machines.${m}.settings.tld}.") (
lib.attrNames roles.server.machines
);
};
# Default: forward everything else to DHCP-provided resolvers
forward-zone = [
{
name = ".";
forward-addr = "127.0.0.53@53"; # Forward to systemd-resolved
}
];
stub-zone = map (m: {
name = "${roles.server.machines.${m}.settings.tld}.";
stub-addr = "${roles.server.machines.${m}.settings.ip}@${builtins.toString settings.dnsPort}";
}) (lib.attrNames roles.server.machines);
};
};
};
};
};
}

View File

@@ -1,110 +0,0 @@
{
...
}:
{
name = "coredns";
clan = {
directory = ./.;
test.useContainers = true;
inventory = {
machines = {
dns = { }; # 192.168.1.2
server01 = { }; # 192.168.1.3
server02 = { }; # 192.168.1.4
client = { }; # 192.168.1.1
};
instances = {
coredns = {
module.name = "@clan/coredns";
module.input = "self";
roles.default.tags.all = { };
# First service
roles.default.machines."server01".settings = {
ip = "192.168.1.3";
services = [ "one" ];
};
# Second service
roles.default.machines."server02".settings = {
ip = "192.168.1.4";
services = [ "two" ];
};
# DNS server
roles.server.machines."dns".settings = {
ip = "192.168.1.2";
tld = "foo";
};
};
};
};
};
nodes = {
dns =
{ pkgs, ... }:
{
environment.systemPackages = [ pkgs.net-tools ];
};
client =
{ pkgs, ... }:
{
environment.systemPackages = [ pkgs.net-tools ];
};
server01 = {
services.nginx = {
enable = true;
virtualHosts."one.foo" = {
locations."/" = {
return = "200 'test server response one'";
extraConfig = "add_header Content-Type text/plain;";
};
};
};
};
server02 = {
services.nginx = {
enable = true;
virtualHosts."two.foo" = {
locations."/" = {
return = "200 'test server response two'";
extraConfig = "add_header Content-Type text/plain;";
};
};
};
};
};
testScript = ''
import json
start_all()
machines = [server01, server02, dns, client]
for m in machines:
m.systemctl("start network-online.target")
for m in machines:
m.wait_for_unit("network-online.target")
# This should work, but is borken in tests i think? Instead we dig directly
# client.succeed("curl -k -v http://one.foo")
# client.succeed("curl -k -v http://two.foo")
answer = client.succeed("dig @192.168.1.2 -p 1053 one.foo")
assert "192.168.1.3" in answer, "IP not found"
answer = client.succeed("dig @192.168.1.2 -p 1053 two.foo")
assert "192.168.1.4" in answer, "IP not found"
'';
}

View File

@@ -1,4 +0,0 @@
{
"publickey": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"type": "age"
}

View File

@@ -18,4 +18,11 @@
imports = map (name: ./. + "/${name}/flake-module.nix") validModuleDirs;
in
imports;
flake.clanModules = builtins.throw ''
clanModules have been removed!
Refer to https://docs.clan.lol/guides/migrations/migrate-inventory-services for migration.
'';
}

View File

@@ -10,34 +10,22 @@
lib,
...
}:
let
jsonpath = "/tmp/telegraf.json";
auth_user = "prometheus";
in
{
networking.firewall.interfaces = lib.mkIf (settings.allowAllInterfaces == false) (
builtins.listToAttrs (
map (name: {
inherit name;
value.allowedTCPPorts = [
9273
9990
];
value.allowedTCPPorts = [ 9273 ];
}) settings.interfaces
)
);
networking.firewall.allowedTCPPorts = lib.mkIf (settings.allowAllInterfaces == true) [
9273
9990
];
networking.firewall.allowedTCPPorts = lib.mkIf (settings.allowAllInterfaces == true) [ 9273 ];
clan.core.vars.generators."telegraf" = {
files.password.restartUnits = [ "telegraf.service" ];
files.password-env.restartUnits = [ "telegraf.service" ];
files.miniserve-auth.restartUnits = [ "telegraf.service" ];
clan.core.vars.generators."telegraf-password" = {
files.telegraf-password.neededFor = "users";
files.telegraf-password.restartUnits = [ "telegraf.service" ];
runtimeInputs = [
pkgs.coreutils
@@ -47,27 +35,16 @@
script = ''
PASSWORD=$(xkcdpass --numwords 4 --delimiter - --count 1 | tr -d "\n")
echo "BASIC_AUTH_PWD=$PASSWORD" > "$out"/password-env
echo "${auth_user}:$PASSWORD" > "$out"/miniserve-auth
echo "$PASSWORD" | tr -d "\n" > "$out"/password
echo "BASIC_AUTH_PWD=$PASSWORD" > "$out"/telegraf-password
'';
};
systemd.services.telegraf-json = {
enable = true;
wantedBy = [ "multi-user.target" ];
after = [ "telegraf.service" ];
wants = [ "telegraf.service" ];
serviceConfig = {
Restart = "on-failure";
};
script = "${pkgs.miniserve}/bin/miniserve -p 9990 ${jsonpath} --auth-file ${config.clan.core.vars.generators.telegraf.files.miniserve-auth.path}";
};
services.telegraf = {
enable = true;
environmentFiles = [
(builtins.toString config.clan.core.vars.generators.telegraf.files.password-env.path)
(builtins.toString
config.clan.core.vars.generators."telegraf-password".files.telegraf-password.path
)
];
extraConfig = {
agent.interval = "60s";
@@ -82,35 +59,25 @@
exec =
let
nixosSystems = pkgs.writeShellScript "current-system" ''
printf "nixos_systems,current_system=%s,booted_system=%s,current_kernel=%s,booted_kernel=%s present=0\n" \
"$(readlink /run/current-system)" "$(readlink /run/booted-system)" \
"$(basename $(echo /run/current-system/kernel-modules/lib/modules/*))" \
"$(basename $(echo /run/booted-system/kernel-modules/lib/modules/*))"
currentSystemScript = pkgs.writeShellScript "current-system" ''
printf "current_system,path=%s present=0\n" $(readlink /run/current-system)
'';
in
[
{
# Expose the path to current-system as metric. We use
# this to check if the machine is up-to-date.
commands = [ nixosSystems ];
commands = [ currentSystemScript ];
data_format = "influx";
}
];
};
# sadly there doesn'T seem to exist a telegraf http_client output plugin
outputs.prometheus_client = {
listen = ":9273";
metric_version = 2;
basic_username = "${auth_user}";
basic_username = "prometheus";
basic_password = "$${BASIC_AUTH_PWD}";
};
outputs.file = {
files = [ jsonpath ];
data_format = "json";
json_timestamp_units = "1s";
};
};
};
};

View File

@@ -0,0 +1,37 @@
This service generates the `system.stateVersion` of the nixos installation
automatically.
Possible values:
[system.stateVersion](https://search.nixos.org/options?channel=unstable&show=system.stateVersion&from=0&size=50&sort=relevance&type=packages&query=stateVersion)
## Usage
The following configuration will set `stateVersion` for all machines:
```
inventory.instances = {
state-version = {
module = {
name = "state-version";
input = "clan";
};
roles.default.tags.all = { };
};
```
## Migration
If you are already setting `system.stateVersion`, either let the automatic
generation happen, or trigger the generation manually for the machine. The
service will take the specified version, if one is already supplied through the
config.
To manually generate the version for a specified machine run:
```
clan vars generate [MACHINE]
```
If the setting was already set, you can then remove `system.stateVersion` from
your machine configuration. For new machines, just import the service as shown
above.

View File

@@ -0,0 +1,50 @@
{ ... }:
{
_class = "clan.service";
manifest.name = "clan-core/state-version";
manifest.description = "Automatically generate the state version of the nixos installation.";
manifest.categories = [ "System" ];
manifest.readme = builtins.readFile ./README.md;
roles.default = {
perInstance =
{ ... }:
{
nixosModule =
{
config,
lib,
...
}:
let
var = config.clan.core.vars.generators.state-version.files.version or { };
in
{
warnings = [
''
The clan.state-version service is deprecated and will be
removed on 2025-07-15 in favor of a nix option.
Please migrate your configuration to use `clan.core.settings.state-version.enable = true` instead.
''
];
system.stateVersion = lib.mkDefault (lib.removeSuffix "\n" var.value);
clan.core.vars.generators.state-version = {
files.version = {
secret = false;
value = lib.mkDefault config.system.nixos.release;
};
runtimeInputs = [ ];
script = ''
echo -n ${config.system.stateVersion} > "$out"/version
'';
};
};
};
};
}

View File

@@ -3,16 +3,14 @@ let
module = lib.modules.importApply ./default.nix { };
in
{
clan.modules = {
coredns = module;
};
clan.modules.state-version = module;
perSystem =
{ ... }:
{
clan.nixosTests.coredns = {
clan.nixosTests.state-version = {
imports = [ ./tests/vm/default.nix ];
clan.modules."@clan/coredns" = module;
clan.modules."@clan/state-version" = module;
};
};
}

View File

@@ -0,0 +1,22 @@
{ lib, ... }:
{
name = "service-state-version";
clan = {
directory = ./.;
inventory = {
machines.server = { };
instances.default = {
module.name = "@clan/state-version";
module.input = "self";
roles.default.machines."server" = { };
};
};
};
nodes.server = { };
testScript = lib.mkDefault ''
start_all()
'';
}

View File

@@ -17,20 +17,6 @@
};
};
# Deploy user Carol on all machines. Prompt only once and use the
# same password on all machines. (`share = true`)
user-carol = {
module = {
name = "users";
input = "clan";
};
roles.default.tags.all = { };
roles.default.settings = {
user = "carol";
share = true;
};
};
# Deploy user bob only on his laptop. Prompt for a password.
user-bob = {
module = {
@@ -43,44 +29,3 @@
};
}
```
## Migration from `root-password` module
The deprecated `clan.root-password` module has been replaced by the `users` module. Here's how to migrate:
### 1. Update your flake configuration
Replace the `root-password` module import with a `users` service instance:
```nix
# OLD - Remove this from your nixosModules:
imports = [
self.inputs.clan-core.clanModules.root-password
];
# NEW - Add to inventory.instances or machines/flake-module.nix:
instances = {
users-root = {
module.name = "users";
module.input = "clan-core";
roles.default.tags.nixos = { };
roles.default.settings = {
user = "root";
prompt = false; # Set to true if you want to be prompted
groups = [ ];
};
};
};
```
### 2. Migrate vars
The vars structure has changed from `root-password` to `user-password-root`:
```bash
# For each machine, rename the vars directories:
cd vars/per-machine/<machine-name>/
mv root-password user-password-root
mv user-password-root/password-hash user-password-root/user-password-hash
mv user-password-root/password user-password-root/user-password
```

View File

@@ -59,17 +59,6 @@
- "input" - Allows the user to access input devices.
'';
};
share = lib.mkOption {
type = lib.types.bool;
default = false;
example = true;
description = ''
Weather the user should have the same password on all machines.
By default, you will be prompted for a new password for every host.
Unless `generate` is set to `true`.
'';
};
};
};
@@ -93,6 +82,7 @@
};
clan.core.vars.generators."user-password-${settings.user}" = {
files.user-password-hash.neededFor = "users";
files.user-password-hash.restartUnits = lib.optional (config.services.userborn.enable) "userborn.service";
files.user-password.deploy = false;
@@ -117,8 +107,6 @@
pkgs.mkpasswd
];
share = settings.share;
script =
(
if settings.prompt then

View File

@@ -1,5 +1,6 @@
#!/usr/bin/env python3
"""IPv6 address allocator for WireGuard networks.
"""
IPv6 address allocator for WireGuard networks.
Network layout:
- Base network: /40 ULA prefix (fd00::/8 + 32 bits from hash)
@@ -12,11 +13,6 @@ import ipaddress
import sys
from pathlib import Path
# Constants for argument count validation
MIN_ARGS_BASE = 4
MIN_ARGS_CONTROLLER = 5
MIN_ARGS_PEER = 5
def hash_string(s: str) -> str:
"""Generate SHA256 hash of string."""
@@ -24,7 +20,8 @@ def hash_string(s: str) -> str:
def generate_ula_prefix(instance_name: str) -> ipaddress.IPv6Network:
"""Generate a /40 ULA prefix from instance name.
"""
Generate a /40 ULA prefix from instance name.
Format: fd{32-bit hash}/40
This gives us fd00:0000:0000::/40 through fdff:ffff:ff00::/40
@@ -44,14 +41,15 @@ def generate_ula_prefix(instance_name: str) -> ipaddress.IPv6Network:
prefix = f"fd{prefix_bits:08x}"
prefix_formatted = f"{prefix[:4]}:{prefix[4:8]}::/40"
return ipaddress.IPv6Network(prefix_formatted)
network = ipaddress.IPv6Network(prefix_formatted)
return network
def generate_controller_subnet(
base_network: ipaddress.IPv6Network,
controller_name: str,
base_network: ipaddress.IPv6Network, controller_name: str
) -> ipaddress.IPv6Network:
"""Generate a /56 subnet for a controller from the base /40 network.
"""
Generate a /56 subnet for a controller from the base /40 network.
We have 16 bits (40 to 56) to allocate controller subnets.
This allows for 65,536 possible controller subnets.
@@ -64,11 +62,14 @@ def generate_controller_subnet(
# The controller subnet is at base_prefix:controller_id::/56
base_int = int(base_network.network_address)
controller_subnet_int = base_int | (controller_id << (128 - 56))
return ipaddress.IPv6Network((controller_subnet_int, 56))
controller_subnet = ipaddress.IPv6Network((controller_subnet_int, 56))
return controller_subnet
def generate_peer_suffix(peer_name: str) -> str:
"""Generate a unique 64-bit host suffix for a peer.
"""
Generate a unique 64-bit host suffix for a peer.
This suffix will be used in all controller subnets to create unique addresses.
Format: :xxxx:xxxx:xxxx:xxxx (64 bits)
@@ -78,13 +79,14 @@ def generate_peer_suffix(peer_name: str) -> str:
suffix_bits = h[:16]
# Format as IPv6 suffix without leading colon
return f"{suffix_bits[0:4]}:{suffix_bits[4:8]}:{suffix_bits[8:12]}:{suffix_bits[12:16]}"
suffix = f"{suffix_bits[0:4]}:{suffix_bits[4:8]}:{suffix_bits[8:12]}:{suffix_bits[12:16]}"
return suffix
def main() -> None:
if len(sys.argv) < MIN_ARGS_BASE:
if len(sys.argv) < 4:
print(
"Usage: ipv6_allocator.py <output_dir> <instance_name> <controller|peer> <machine_name>",
"Usage: ipv6_allocator.py <output_dir> <instance_name> <controller|peer> <machine_name>"
)
sys.exit(1)
@@ -96,7 +98,7 @@ def main() -> None:
base_network = generate_ula_prefix(instance_name)
if node_type == "controller":
if len(sys.argv) < MIN_ARGS_CONTROLLER:
if len(sys.argv) < 5:
print("Controller name required")
sys.exit(1)
@@ -112,7 +114,7 @@ def main() -> None:
(output_dir / "prefix").write_text(prefix_str)
elif node_type == "peer":
if len(sys.argv) < MIN_ARGS_PEER:
if len(sys.argv) < 5:
print("Peer name required")
sys.exit(1)

24
devFlake/flake.lock generated
View File

@@ -3,10 +3,10 @@
"clan-core-for-checks": {
"flake": false,
"locked": {
"lastModified": 1756166884,
"narHash": "sha256-skg4rwpbCjhpLlrv/Pndd43FoEgrJz98WARtGLhCSzo=",
"lastModified": 1755093452,
"narHash": "sha256-NKBss7QtNnOqYVyJmYCgaCvYZK0mpQTQc9fLgE1mGyk=",
"ref": "main",
"rev": "f7414d7e6e58709af27b6fe16eb530278e81eaaf",
"rev": "7e97734797f0c6bd3c2d3a51cf54a2a6b371c222",
"shallow": true,
"type": "git",
"url": "https://git.clan.lol/clan/clan-core"
@@ -84,11 +84,11 @@
},
"nixpkgs-dev": {
"locked": {
"lastModified": 1757007868,
"narHash": "sha256-zekS8JUSNEiphLnjWJBFoaX4Kb8GxiiD6FvoKZI+8b0=",
"lastModified": 1755375481,
"narHash": "sha256-43PgCQFgFD1nM/7dncytV0c5heNHe/gXrEud18ZWcZU=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "36420cc41abb467f89082432cfe139f5fdbdcea3",
"rev": "35f1742e4f1470817ff8203185e2ce0359947f12",
"type": "github"
},
"original": {
@@ -107,11 +107,11 @@
]
},
"locked": {
"lastModified": 1756738487,
"narHash": "sha256-8QX7Ab5CcICp7zktL47VQVS+QeaU4YDNAjzty7l7TQE=",
"lastModified": 1754869408,
"narHash": "sha256-G1zNuxiCDfqNQVoL9j5v+ZYfUER7AI158ev98/JC8LI=",
"owner": "NuschtOS",
"repo": "search",
"rev": "5feeaeefb571e6ca2700888b944f436f7c05149b",
"rev": "2f5478267557a0f7a70d953b6c0867a5b4282739",
"type": "github"
},
"original": {
@@ -165,11 +165,11 @@
"nixpkgs": []
},
"locked": {
"lastModified": 1756662192,
"narHash": "sha256-F1oFfV51AE259I85av+MAia221XwMHCOtZCMcZLK2Jk=",
"lastModified": 1754847726,
"narHash": "sha256-2vX8QjO5lRsDbNYvN9hVHXLU6oMl+V/PsmIiJREG4rE=",
"owner": "numtide",
"repo": "treefmt-nix",
"rev": "1aabc6c05ccbcbf4a635fb7a90400e44282f61c4",
"rev": "7d81f6fb2e19bf84f1c65135d1060d829fae2408",
"type": "github"
},
"original": {

View File

@@ -33,6 +33,7 @@
self'.packages.tea-create-pr
self'.packages.merge-after-ci
self'.packages.pending-reviews
self'.packages.agit
# treefmt with config defined in ./flake-parts/formatting.nix
config.treefmt.build.wrapper
];
@@ -45,7 +46,7 @@
ln -sfT ${inputs.nix-select} "$PRJ_ROOT/pkgs/clan-cli/clan_lib/select"
# Generate classes.py from schemas
${self'.packages.classgen}/bin/classgen ${self'.legacyPackages.schemas.clanSchemaJson}/schema.json $PRJ_ROOT/pkgs/clan-cli/clan_lib/nix_models/clan.py
${self'.packages.classgen}/bin/classgen ${self'.legacyPackages.schemas.clan-schema-abstract}/schema.json $PRJ_ROOT/pkgs/clan-cli/clan_lib/nix_models/clan.py
'';
};
};

2
docs/.gitignore vendored
View File

@@ -1,5 +1,5 @@
/site/reference
/site/static
/site/options
/site/options-page
/site/openapi.json
!/site/static/extra.css

View File

@@ -1,11 +1,13 @@
{
lib,
config,
...
}:
let
suffix = config.clan.core.vars.generators.disk-id.files.diskId.value;
mirrorBoot = idx: {
# suffix is to prevent disk name collisions
name = idx;
name = idx + suffix;
type = "disk";
device = "/dev/disk/by-id/${idx}";
content = {

View File

@@ -1,11 +1,13 @@
{
lib,
config,
...
}:
let
suffix = config.clan.core.vars.generators.disk-id.files.diskId.value;
mirrorBoot = idx: {
# suffix is to prevent disk name collisions
name = idx;
name = idx + suffix;
type = "disk";
device = "/dev/disk/by-id/${idx}";
content = {

View File

@@ -2,11 +2,11 @@ site_name: Clan Documentation
site_url: https://docs.clan.lol
repo_url: https://git.clan.lol/clan/clan-core/
repo_name: "_>"
edit_uri: _edit/main/docs/site/
edit_uri: _edit/main/docs/docs/
validation:
omitted_files: warn
absolute_links: ignore
absolute_links: warn
unrecognized_links: warn
markdown_extensions:
@@ -59,15 +59,14 @@ nav:
- Configure Disk Config: guides/getting-started/choose-disk.md
- Update Machine: guides/getting-started/update.md
- Continuous Integration: guides/getting-started/flake-check.md
- Convert Existing NixOS Config: guides/getting-started/convert-flake.md
- ClanServices: guides/clanServices.md
- Using Services: guides/clanServices.md
- Backup & Restore: guides/backups.md
- Disk Encryption: guides/disk-encryption.md
- Age Plugins: guides/age-plugins.md
- Secrets management: guides/secrets.md
- Networking: guides/networking.md
- Target Host: guides/target-host.md
- Zerotier VPN: guides/mesh-vpn.md
- How to disable Secure Boot: guides/secure-boot.md
- Secure Boot: guides/secure-boot.md
- Flake-parts: guides/flake-parts.md
- macOS: guides/macos.md
- Contributing:
@@ -78,7 +77,8 @@ nav:
- Writing a Service Module: guides/services/community.md
- Writing a Disko Template: guides/disko-templates/community.md
- Migrations:
- Migrate from clan modules to services: guides/migrations/migrate-inventory-services.md
- Migrate existing Flakes: guides/migrations/migration-guide.md
- Migrate inventory Services: guides/migrations/migrate-inventory-services.md
- Facts Vars Migration: guides/migrations/migration-facts-vars.md
- Disk id: guides/migrations/disk-id.md
- Concepts:
@@ -88,14 +88,12 @@ nav:
- Templates: concepts/templates.md
- Reference:
- Overview: reference/index.md
- Browse Options: "/options"
- Clan Options: options.md
- Services:
- Overview:
- reference/clanServices/index.md
- reference/clanServices/admin.md
- reference/clanServices/borgbackup.md
- reference/clanServices/certificates.md
- reference/clanServices/coredns.md
- reference/clanServices/data-mesher.md
- reference/clanServices/dyndns.md
- reference/clanServices/emergency-access.md
@@ -108,6 +106,7 @@ nav:
- reference/clanServices/monitoring.md
- reference/clanServices/packages.md
- reference/clanServices/sshd.md
- reference/clanServices/state-version.md
- reference/clanServices/syncthing.md
- reference/clanServices/trusted-nix-caches.md
- reference/clanServices/users.md
@@ -156,7 +155,6 @@ nav:
- 05-deployment-parameters: decisions/05-deployment-parameters.md
- Template: decisions/_template.md
- Glossary: reference/glossary.md
- Browse Options: "/options"
docs_dir: site
site_dir: out
@@ -174,7 +172,6 @@ theme:
- content.code.annotate
- content.code.copy
- content.tabs.link
- content.action.edit
icon:
repo: fontawesome/brands/git
custom_dir: overrides

View File

@@ -54,9 +54,9 @@ pkgs.stdenv.mkDerivation {
chmod -R +w ./site/reference
echo "Generated API documentation in './site/reference/' "
rm -rf ./site/options
cp -r ${docs-options} ./site/options
chmod -R +w ./site/options
rm -r ./site/options-page || true
cp -r ${docs-options} ./site/options-page
chmod -R +w ./site/options-page
mkdir -p ./site/static/asciinema-player
ln -snf ${asciinema-player-js} ./site/static/asciinema-player/asciinema-player.min.js

View File

@@ -25,7 +25,7 @@
serviceModules = self.clan.modules;
baseHref = "/options/";
baseHref = "/options-page/";
getRoles =
module:
@@ -126,7 +126,7 @@
nestedSettingsOption = mkOption {
type = types.raw;
description = ''
See [instances.${name}.roles.${roleName}.settings](${baseHref}?option_scope=0&option=inventory.instances.${name}.roles.${roleName}.settings)
See [instances.${name}.roles.${roleName}.settings](${baseHref}?option_scope=0&option=instances.${name}.roles.${roleName}.settings)
'';
};
settingsOption = mkOption {
@@ -161,42 +161,6 @@
}
];
baseModule =
# Module
{ config, ... }:
{
imports = (import (pkgs.path + "/nixos/modules/module-list.nix"));
nixpkgs.pkgs = pkgs;
clan.core.name = "dummy";
system.stateVersion = config.system.nixos.release;
# Set this to work around a bug where `clan.core.settings.machine.name`
# is forced due to `networking.interfaces` being forced
# somewhere in the nixpkgs options
facter.detected.dhcp.enable = lib.mkForce false;
};
evalClanModules =
let
evaled = lib.evalModules {
class = "nixos";
modules = [
baseModule
{
clan.core.settings.directory = self;
}
self.nixosModules.clanCore
];
};
in
evaled;
coreOptions =
(pkgs.nixosOptionsDoc {
options = (evalClanModules.options).clan.core or { };
warningsAreErrors = true;
transformOptions = self.clanLib.docs.stripStorePathsFromDeclarations;
}).optionsJSON;
in
{
# Uncomment for debugging
@@ -211,17 +175,10 @@
# scopes = mapAttrsToList mkScope serviceModules;
scopes = [
{
inherit baseHref;
name = "Flake Options (clan.nix file)";
name = "Clan";
modules = docModules;
urlPrefix = "https://git.clan.lol/clan/clan-core/src/branch/main/";
}
{
name = "Machine Options (clan.core NixOS options)";
optionsJSON = "${coreOptions}/share/doc/nixos/options.json";
urlPrefix = "https://git.clan.lol/clan/clan-core/src/branch/main/";
}
];
};
};

View File

@@ -1,5 +1,3 @@
"""Module for rendering NixOS options documentation from JSON format."""
# Options are available in the following format:
# https://github.com/nixos/nixpkgs/blob/master/nixos/lib/make-options-doc/default.nix
#
@@ -34,7 +32,7 @@ from typing import Any
from clan_lib.errors import ClanError
from clan_lib.services.modules import (
CategoryInfo,
ModuleManifest,
Frontmatter,
)
# Get environment variables
@@ -48,7 +46,7 @@ CLAN_SERVICE_INTERFACE = os.environ.get("CLAN_SERVICE_INTERFACE")
CLAN_MODULES_VIA_SERVICE = os.environ.get("CLAN_MODULES_VIA_SERVICE")
OUT = os.environ.get("out") # noqa: SIM112
OUT = os.environ.get("out")
def sanitize(text: str) -> str:
@@ -68,7 +66,8 @@ def render_option_header(name: str) -> str:
def join_lines_with_indentation(lines: list[str], indent: int = 4) -> str:
"""Joins multiple lines with a specified number of whitespace characters as indentation.
"""
Joins multiple lines with a specified number of whitespace characters as indentation.
Args:
lines (list of str): The lines of text to join.
@@ -76,7 +75,6 @@ def join_lines_with_indentation(lines: list[str], indent: int = 4) -> str:
Returns:
str: The indented and concatenated string.
"""
# Create the indentation string (e.g., four spaces)
indent_str = " " * indent
@@ -163,10 +161,7 @@ def render_option(
def print_options(
options_file: str,
head: str,
no_options: str,
replace_prefix: str | None = None,
options_file: str, head: str, no_options: str, replace_prefix: str | None = None
) -> str:
res = ""
with (Path(options_file) / "share/doc/nixos/options.json").open() as f:
@@ -175,16 +170,15 @@ def print_options(
res += head if len(options.items()) else no_options
for option_name, info in options.items():
if replace_prefix:
display_name = option_name.replace(replace_prefix + ".", "")
else:
display_name = option_name
option_name = option_name.replace(replace_prefix + ".", "")
res += render_option(display_name, info, 4)
res += render_option(option_name, info, 4)
return res
def module_header(module_name: str) -> str:
return f"# {module_name}\n\n"
def module_header(module_name: str, has_inventory_feature: bool = False) -> str:
indicator = " 🔹" if has_inventory_feature else ""
return f"# {module_name}{indicator}\n\n"
clan_core_descr = """
@@ -242,7 +236,7 @@ def produce_clan_core_docs() -> None:
for submodule_name, split_options in split.items():
outfile = f"{module_name}/{submodule_name}.md"
print(
f"[clan_core.{submodule_name}] Rendering option of: {submodule_name}... {outfile}",
f"[clan_core.{submodule_name}] Rendering option of: {submodule_name}... {outfile}"
)
init_level = 1
root = options_to_tree(split_options, debug=True)
@@ -277,9 +271,56 @@ def produce_clan_core_docs() -> None:
of.write(output)
def render_roles(roles: list[str] | None, module_name: str) -> str:
if roles:
roles_list = "\n".join([f"- `{r}`" for r in roles])
return (
f"""
### Roles
This module can be used via predefined roles
{roles_list}
"""
"""
Every role has its own configuration options, which are each listed below.
For more information, see the [inventory guide](../../concepts/inventory.md).
??? Example
For example the `admin` module adds the following options globally to all machines where it is used.
`clan.admin.allowedkeys`
```nix
clan-core.lib.clan {
inventory.services = {
admin.me = {
roles.default.machines = [ "jon" ];
config.allowedkeys = [ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQD..." ];
};
};
};
```
"""
)
return ""
clan_modules_descr = """
Clan modules are [NixOS modules](https://wiki.nixos.org/wiki/NixOS_modules)
which have been enhanced with additional features provided by Clan, with
certain option types restricted to enable configuration through a graphical
interface.
!!! note "🔹"
Modules with this indicator support the [inventory](../../concepts/inventory.md) feature.
"""
def render_categories(
categories: list[str],
categories_info: dict[str, CategoryInfo],
categories: list[str], categories_info: dict[str, CategoryInfo]
) -> str:
res = """<div style="display: grid; grid-template-columns: repeat(3, 1fr); gap: 10px;">"""
for cat in categories:
@@ -344,10 +385,10 @@ Learn how to use `clanServices` in practice in the [Using clanServices guide](..
# output += f"`clan.modules.{module_name}`\n"
output += f"*{module_info['manifest']['description']}*\n"
fm = Frontmatter("")
# output += "## Categories\n\n"
output += render_categories(
module_info["manifest"]["categories"],
ModuleManifest.categories_info(),
module_info["manifest"]["categories"], fm.categories_info
)
output += f"{module_info['manifest']['readme']}\n"
@@ -356,7 +397,7 @@ Learn how to use `clanServices` in practice in the [Using clanServices guide](..
output += f"The {module_name} module has the following roles:\n\n"
for role_name in module_info["roles"]:
for role_name, _ in module_info["roles"].items():
output += f"- {role_name}\n"
for role_name, role_filename in module_info["roles"].items():
@@ -376,8 +417,35 @@ Learn how to use `clanServices` in practice in the [Using clanServices guide](..
of.write(output)
def build_option_card(module_name: str, frontmatter: Frontmatter) -> str:
"""
Build the overview index card for each reference target option.
"""
def indent_all(text: str, indent_size: int = 4) -> str:
"""
Indent all lines in a string.
"""
indent = " " * indent_size
lines = text.split("\n")
indented_text = indent + ("\n" + indent).join(lines)
return indented_text
def to_md_li(module_name: str, frontmatter: Frontmatter) -> str:
md_li = (
f"""- **[{module_name}](./{"-".join(module_name.split(" "))}.md)**\n\n"""
)
md_li += f"""{indent_all("---", 4)}\n\n"""
fmd = f"\n{frontmatter.description.strip()}" if frontmatter.description else ""
md_li += f"""{indent_all(fmd, 4)}"""
return md_li
return f"{to_md_li(module_name, frontmatter)}\n\n"
def split_options_by_root(options: dict[str, Any]) -> dict[str, dict[str, Any]]:
"""Split the flat dictionary of options into a dict of which each entry will construct complete option trees.
"""
Split the flat dictionary of options into a dict of which each entry will construct complete option trees.
{
"a": { Data }
"a.b": { Data }
@@ -461,7 +529,9 @@ def option_short_name(option_name: str) -> str:
def options_to_tree(options: dict[str, Any], debug: bool = False) -> Option:
"""Convert the options dictionary to a tree structure."""
"""
Convert the options dictionary to a tree structure.
"""
# Helper function to create nested structure
def add_to_tree(path_parts: list[str], info: Any, current_node: Option) -> None:
@@ -513,24 +583,22 @@ def options_to_tree(options: dict[str, Any], debug: bool = False) -> Option:
def options_docs_from_tree(
root: Option,
init_level: int = 1,
prefix: list[str] | None = None,
root: Option, init_level: int = 1, prefix: list[str] | None = None
) -> str:
"""Eender the options from the tree structure.
"""
eender the options from the tree structure.
Args:
root (Option): The root option node.
init_level (int): The initial level of indentation.
prefix (list str): Will be printed as common prefix of all attribute names.
"""
def render_tree(option: Option, level: int = init_level) -> str:
output = ""
should_render = not option.name.startswith("<") and not option.name.startswith(
"_",
"_"
)
if should_render:
# short_name = option_short_name(option.name)
@@ -551,10 +619,11 @@ def options_docs_from_tree(
return output
return render_tree(root)
md = render_tree(root)
return md
if __name__ == "__main__":
if __name__ == "__main__": #
produce_clan_core_docs()
produce_clan_service_author_docs()

View File

@@ -1,33 +1,15 @@
# Auto-included Files
Clan automatically imports specific files from each machine directory and registers them, reducing the need for manual configuration.
Clan automatically imports the following files from a directory and registers them.
## Machine Registration
## Machine registration
Every folder under `machines/{machineName}` is automatically registered as a Clan machine.
Every folder `machines/{machineName}` will be registered automatically as a Clan machine.
!!! info "Files loaded automatically for each machine"
!!! info "Automatically loaded files"
The following files are detected and imported for every Clan machine:
The following files are loaded automatically for each Clan machine:
- [x] `machines/{machineName}/configuration.nix`
Main configuration file for the machine.
- [x] `machines/{machineName}/hardware-configuration.nix`
Hardware-specific configuration generated by NixOS.
- [x] `machines/{machineName}/facter.json`
Contains system facts. Automatically generated — see [nixos-facter](https://clan.lol/blog/nixos-facter/) for details.
- [x] `machines/{machineName}/disko.nix`
Disk layout configuration. See the [disko quickstart](https://github.com/nix-community/disko/blob/master/docs/quickstart.md) for more info.
## Other Auto-included Files
* **`inventory.json`**
Managed by Clan's API.
Merges with `clan.inventory` to extend the inventory.
* **`.clan-flake`**
Sentinel file to be used to locate the root of a Clan repository.
Falls back to `.git`, `.hg`, `.svn`, or `flake.nix` if not found.
- [x] `machines/{machineName}/configuration.nix`
- [x] `machines/{machineName}/hardware-configuration.nix`
- [x] `machines/{machineName}/facter.json` Automatically configured, for further information see [nixos-facter](https://clan.lol/blog/nixos-facter/)
- [x] `machines/{machineName}/disko.nix` Automatically loaded, for further information see the [disko docs](https://github.com/nix-community/disko/blob/master/docs/quickstart.md).

View File

@@ -1,22 +1,16 @@
# Using the Inventory
# Using `clanServices`
Clan's inventory system is a composable way to define and deploy services across
machines.
Clans `clanServices` system is a composable way to define and deploy services across machines.
This guide shows how to **instantiate** a `clanService`, explains how service
definitions are structured in your inventory, and how to pick or create services
from modules exposed by flakes.
This guide shows how to **instantiate** a `clanService`, explains how service definitions are structured in your inventory, and how to pick or create services from modules exposed by flakes.
The term **Multi-host-modules** was introduced previously in the [nixus
repository](https://github.com/infinisil/nixus) and represents a similar
concept.
The term **Multi-host-modules** was introduced previously in the [nixus repository](https://github.com/infinisil/nixus) and represents a similar concept.
______________________________________________________________________
---
## Overview
Services are used in `inventory.instances`, and assigned to *roles* and
*machines* -- meaning you decide which machines run which part of the service.
Services are used in `inventory.instances`, and then they attach to *roles* and *machines* — meaning you decide which machines run which part of the service.
For example:
@@ -24,138 +18,119 @@ For example:
inventory.instances = {
borgbackup = {
roles.client.machines."laptop" = {};
roles.client.machines."workstation" = {};
roles.client.machines."server1" = {};
roles.server.machines."backup-box" = {};
};
}
```
This says: "Run borgbackup as a *client* on my *laptop* and *workstation*, and
as a *server* on *backup-box*". `client` and `server` are roles defined by the
`borgbackup` service.
This says: Run borgbackup as a *client* on my *laptop* and *server1*, and as a *server* on *backup-box*.”
## Module source specification
Each instance includes a reference to a **module specification** -- this is how
Clan knows which service module to use and where it came from.
Each instance includes a reference to a **module specification** this is how Clan knows which service module to use and where it came from.
Usually one would just use `imports` but we needd to make the `module source` configurable via Python API.
By default it is not required to specify the `module`, in which case it defaults to the preprovided services of clan-core.
It is not required to specify the `module.input` parameter, in which case it
defaults to the pre-provided services of clan-core. In a similar fashion, the
`module.name` parameter can also be omitted, it will default to the name of the
instance.
---
## Override Example
Example of instantiating a `borgbackup` service using `clan-core`:
```nix
inventory.instances = {
borgbackup = { # <- Instance name
# This can be partially/fully specified,
# - If the instance name is not the name of the module
# - If the input is not clan-core
# module = {
# name = "borgbackup"; # Name of the module (optional)
# input = "clan-core"; # The flake input where the service is defined (optional)
# };
# Instance Name: Different name for this 'borgbackup' instance
borgbackup = {
# Since this is instances."borgbackup" the whole `module = { ... }` below is equivalent and optional.
module = {
name = "borgbackup"; # <-- Name of the module (optional)
input = "clan-core"; # <-- The flake input where the service is defined (optional)
};
# Participation of the machines is defined via roles
# Right side needs to be an attribute set. Its purpose will become clear later
roles.client.machines."machine-a" = {};
roles.server.machines."backup-host" = {};
};
}
```
## Module Settings
If you used `clan-core` as an input attribute for your flake:
Each role might expose configurable options. See clan's [clanServices
reference](../reference/clanServices/index.md) for all available options.
```nix
# ↓ module.input = "clan-core"
inputs.clan-core.url = "https://git.clan.lol/clan/clan-core/archive/main.tar.gz";
```
Settings can be set in per-machine or per-role. The latter is applied to all
machines that are assigned to that role.
## Simplified Example
If only one instance is needed for a service and the service is a clan core service, the `module` definition can be omitted.
```nix
# Simplified way of specifying a single instance
inventory.instances = {
# instance name is `borgbackup` -> clan core module `borgbackup` will be loaded.
borgbackup = {
# Participation of the machines is defined via roles
# Right side needs to be an attribute set. Its purpose will become clear later
roles.client.machines."machine-a" = {};
roles.server.machines."backup-host" = {};
};
}
```
## Configuration Example
Each role might expose configurable options
See clan's [clanServices reference](../reference/clanServices/index.md) for available options
```nix
inventory.instances = {
borgbackup = {
# Settings for 'machine-a'
borgbackup-example = {
module = {
name = "borgbackup";
input = "clan-core";
};
roles.client.machines."machine-a" = {
# 'client' -Settings of 'machine-a'
settings = {
backupFolders = [
/home
/var
];
};
# ---------------------------
};
# Settings for all machines of the role "server"
roles.server.settings = {
directory = "/var/lib/borgbackup";
};
roles.server.machines."backup-host" = {};
};
}
```
## Tags
Tags can be used to assign multiple machines to a role at once. It can be thought of as a grouping mechanism.
For example using the `all` tag for services that you want to be configured on all
your machines is a common pattern.
The following example could be used to backup all your machines to a common
backup server
Multiple members can be defined using tags as follows
```nix
inventory.instances = {
borgbackup = {
# "All" machines are assigned to the borgbackup 'client' role
roles.client.tags = [ "all" ];
# But only one specific machine (backup-host) is assigned to the 'server' role
roles.server.machines."backup-host" = {};
};
}
```
## Sharing additional Nix configuration
Sometimes you need to add custom NixOS configuration alongside your clan
services. The `extraModules` option allows you to include additional NixOS
configuration that is applied for every machine assigned to that role.
There are multiple valid syntaxes for specifying modules:
```nix
inventory.instances = {
borgbackup = {
roles.client = {
# Direct module reference
extraModules = [ ../nixosModules/borgbackup.nix ];
# Or using self (needs to be json serializable)
# See next example, for a workaround.
extraModules = [ self.nixosModules.borgbackup ];
# Or inline module definition, (needs to be json compatible)
extraModules = [
{
# Your module configuration here
# ...
#
# If the module needs to contain non-serializable expressions:
imports = [ ./path/to/non-serializable.nix ];
}
];
borgbackup-example = {
module = {
name = "borgbackup";
input = "clan-core";
};
#
# The 'all' -tag targets all machines
roles.client.tags."all" = {};
# ---------------------------
roles.server.machines."backup-host" = {};
};
}
```
## Picking a clanService
You can use services exposed by Clan's core module library, `clan-core`.
You can use services exposed by Clans core module library, `clan-core`.
🔗 See: [List of Available Services in clan-core](../reference/clanServices/index.md)
@@ -167,19 +142,18 @@ You can also author your own `clanService` modules.
You might expose your service module from your flake — this makes it easy for other people to also use your module in their clan.
______________________________________________________________________
---
## 💡 Tips for Working with clanServices
- You can add multiple inputs to your flake (`clan-core`, `your-org-modules`, etc.) to mix and match services.
- Each service instance is isolated by its key in `inventory.instances`, allowing to deploy multiple versions or roles of the same service type.
- Roles can target different machines or be scoped dynamically.
* You can add multiple inputs to your flake (`clan-core`, `your-org-modules`, etc.) to mix and match services.
* Each service instance is isolated by its key in `inventory.instances`, allowing you to deploy multiple versions or roles of the same service type.
* Roles can target different machines or be scoped dynamically.
______________________________________________________________________
---
## What's Next?
- [Author your own clanService →](../guides/services/community.md)
- [Migrate from clanModules →](../guides/migrations/migrate-inventory-services.md)
## Whats Next?
* [Author your own clanService →](../guides/services/community.md)
* [Migrate from clanModules →](../guides/migrations/migrate-inventory-services.md)
<!-- TODO: * [Understand the architecture →](../explanation/clan-architecture.md) -->

View File

@@ -90,10 +90,13 @@ export CLAN_DEBUG_COMMANDS=1
These options help you pinpoint the source and context of print messages and debug logs during development.
## Analyzing Performance
To understand what's causing slow performance, set the environment variable `export CLAN_CLI_PERF=1`. When you complete a clan command, you'll see a summary of various performance metrics, helping you identify what's taking up time.
## See all possible packages and tests
To quickly show all possible packages and tests execute:
@@ -152,16 +155,28 @@ To test the CLI locally in a development environment and set breakpoints for deb
## Test Locally in a Nix Sandbox
To run tests in a Nix sandbox:
To run tests in a Nix sandbox, you have two options depending on whether your test functions have been marked as impure or not:
### Running Tests Marked as Impure
If your test functions need to execute `nix build` and have been marked as impure because you can't execute `nix build` inside a Nix sandbox, use the following command:
```bash
nix build .#checks.x86_64-linux.clan-pytest-with-core
nix run .#impure-checks -L
```
This command will run the impure test functions.
### Running Pure Tests
For test functions that have not been marked as impure and don't require executing `nix build`, you can use the following command:
```bash
nix build .#checks.x86_64-linux.clan-pytest-without-core
nix build .#checks.x86_64-linux.clan-pytest --rebuild
```
This command will run all pure test functions.
### Inspecting the Nix Sandbox
If you need to inspect the Nix sandbox while running tests, follow these steps:

View File

@@ -27,7 +27,7 @@ inputs = {
## Import the Clan flake-parts Module
After updating your flake inputs, the next step is to import the Clan flake-parts module. This will make the [Clan options](/options) available within `mkFlake`.
After updating your flake inputs, the next step is to import the Clan flake-parts module. This will make the [Clan options](../options.md) available within `mkFlake`.
```nix
{

View File

@@ -2,9 +2,9 @@
Machines can be added using the following methods
- Create a file `machines/{machine_name}/configuration.nix` (See: [File Autoincludes](../../concepts/autoincludes.md))
- Imperative via cli command: `clan machines create`
- Editing nix expressions in flake.nix See [`clan-core.lib.clan`](/options/?scope=Flake Options (clan.nix file))
- Editing nix expressions in flake.nix (i.e. via `clan-core.lib.clan`)
- Editing machines/`machine_name`/configuration.nix (automatically included if it exists)
- `clan machines create` (imperative)
See the complete [list](../../concepts/autoincludes.md) of auto-loaded files.
@@ -39,6 +39,7 @@ See the complete [list](../../concepts/autoincludes.md) of auto-loaded files.
The imperative command might create a machine folder in `machines/jon`
And might persist information in `inventory.json`
### Configuring a machine
!!! Note

View File

@@ -1,15 +1,12 @@
# Update Machines
# Update Your Machines
The Clan command line interface enables you to update machines remotely over SSH.
In this guide we will teach you how to set a `targetHost` in Nix,
and how to define a remote builder for your machine closures.
Clan CLI enables you to remotely update your machines over SSH. This requires setting up a target address for each target machine.
### Setting `targetHost`
## Setting `targetHost`
In your Nix files, set the `targetHost` to the reachable IP address of your new machine. This eliminates the need to specify `--target-host` with every command.
Set the machines `targetHost` to the reachable IP address of the new machine.
This eliminates the need to specify `--target-host` in CLI commands.
```{.nix title="clan.nix" hl_lines="9"}
{
@@ -26,42 +23,15 @@ inventory.machines = {
# [...]
}
```
The use of `root@` in the target address implies SSH access as the `root` user.
Ensure that the root login is secured and only used when necessary.
## Multiple Target Hosts
You can now experiment with a new interface that allows you to define multiple `targetHost` addresses for different VPNs. Learn more and try it out in our [networking guide](../networking.md).
### Setting a Build Host
## Updating Machine Configurations
Execute the following command to update the specified machine:
```bash
clan machines update jon
```
All machines can be updated simultaneously by omitting the machine name:
```bash
clan machines update
```
---
## Advanced Usage
The following options are only needed for special cases, such as limited resources, mixed environments, or private flakes.
### Setting `buildHost`
If the machine does not have enough resources to run the NixOS **evaluation** or **build** itself,
it is also possible to specify a `buildHost` instead.
During an update, clan will ssh into the `buildHost` and run `nixos-rebuild` from there.
!!! Note
The `buildHost` option should be set directly within your machines Nix configuration, **not** under `inventory.machines`.
If the machine does not have enough resources to run the NixOS evaluation or build itself,
it is also possible to specify a build host instead.
During an update, the cli will ssh into the build host and run `nixos-rebuild` from there.
```{.nix hl_lines="5" .no-copy}
@@ -75,11 +45,7 @@ buildClan {
};
```
### Overriding configuration with CLI flags
`buildHost` / `targetHost`, and other network settings can be temporarily overridden for a single command:
For the full list of flags refer to the [Clan CLI](../../reference/cli/index.md)
You can also override the build host via the command line:
```bash
# Build on a remote host
@@ -90,9 +56,23 @@ clan machines update jon --build-host local
```
!!! Note
Make sure the CPU architecture of the `buildHost` matches that of the `targetHost`
Make sure that the CPU architecture is the same for the buildHost as for the targetHost.
Example:
If you want to deploy to a macOS machine, your architecture is an ARM64-Darwin, that means you need a second macOS machine to build it.
For example, if deploying to a macOS machine with an ARM64-Darwin architecture, you need a second macOS machine with the same architecture to build it.
### Updating Machine Configurations
Execute the following command to update the specified machine:
```bash
clan machines update jon
```
You can also update all configured machines simultaneously by omitting the machine name:
```bash
clan machines update
```
### Excluding a machine from `clan machine update`
@@ -116,15 +96,14 @@ This is useful for machines that are not always online or are not part of the re
### Uploading Flake Inputs
When updating remote machines, flake inputs are usually fetched by the build host.
However, if flake inputs require authentication (e.g., private repositories),
Use the `--upload-inputs` flag to upload all inputs from your local machine:
However, if your flake inputs require authentication (e.g., private repositories),
you can use the `--upload-inputs` flag to upload all inputs from your local machine:
```bash
clan machines update jon --upload-inputs
```
This is particularly useful when:
- The flake references private Git repositories
- Authentication credentials are only available on local machine
- Your flake references private Git repositories
- Authentication credentials are only available on your local machine
- The build host doesn't have access to certain network resources

View File

@@ -254,7 +254,7 @@ The following table shows the migration status of each deprecated clanModule:
| `data-mesher` | ✅ [Migrated](../../reference/clanServices/data-mesher.md) | |
| `deltachat` | ❌ Removed | |
| `disk-id` | ❌ Removed | |
| `dyndns` | [Migrated](../../reference/clanServices/dyndns.md) | |
| `dyndns` | [Being Migrated](https://git.clan.lol/clan/clan-core/pulls/4390) | |
| `ergochat` | ❌ Removed | |
| `garage` | ✅ [Migrated](../../reference/clanServices/garage.md) | |
| `golem-provider` | ❌ Removed | |
@@ -263,18 +263,18 @@ The following table shows the migration status of each deprecated clanModule:
| `iwd` | ❌ Removed | Use [wifi service](../../reference/clanServices/wifi.md) instead |
| `localbackup` | ✅ [Migrated](../../reference/clanServices/localbackup.md) | |
| `localsend` | ❌ Removed | |
| `machine-id` | ✅ [Migrated](../../reference/clan.core/settings.md) | Now an [option](../../reference/clan.core/settings.md) |
| `machine-id` | ❌ Removed | Now an [option](../../reference/clan.core/settings.md) |
| `matrix-synapse` | ✅ [Migrated](../../reference/clanServices/matrix-synapse.md) | |
| `moonlight` | ❌ Removed | |
| `mumble` | ❌ Removed | |
| `mycelium` | ✅ [Migrated](../../reference/clanServices/mycelium.md) | |
| `nginx` | ❌ Removed | |
| `packages` | ✅ [Migrated](../../reference/clanServices/packages.md) | |
| `postgresql` | ✅ [Migrated](../../reference/clan.core/settings.md) | Now an [option](../../reference/clan.core/settings.md) |
| `root-password` | ✅ [Migrated](../../reference/clanServices/users.md) | See [migration guide](../../reference/clanServices/users.md#migration-from-root-password-module) |
| `postgresql` | ❌ Removed | Now an [option](../../reference/clan.core/settings.md) |
| `root-password` | ✅ [Migrated](../../reference/clanServices/users.md) | |
| `single-disk` | ❌ Removed | |
| `sshd` | ✅ [Migrated](../../reference/clanServices/sshd.md) | |
| `state-version` | ✅ [Migrated](../../reference/clan.core/settings.md) | Now an [option](../../reference/clan.core/settings.md) |
| `state-version` | ✅ [Migrated](../../reference/clanServices/state-version.md) | |
| `static-hosts` | ❌ Removed | |
| `sunshine` | ❌ Removed | |
| `syncthing-static-peers` | ❌ Removed | |

View File

@@ -1,20 +1,18 @@
# Convert existing NixOS configurations
# Migrate existing NixOS configurations
This guide will help you convert your existing NixOS configurations into a Clan.
This guide will help you migrate your existing NixOS configurations into Clan.
!!! Warning
Migrating instead of starting new can be trickier and might lead to bugs or
unexpected issues. We recommend reading the [Getting Started](./index.md) guide first.
Once you have a working setup and understand the concepts transfering your NixOS configurations over is easy.
## Back up your existing configuration
unexpected issues. We recommend following the [Getting Started](../getting-started/index.md) guide first. Once you have a working setup, you can easily transfer your NixOS configurations over.
## Back up your existing configuration!
Before you start, it is strongly recommended to back up your existing
configuration in any form you see fit. If you use version control to manage
your configuration changes, it is also a good idea to follow the migration
guide in a separte branch until everything works as expected.
## Starting Point
We assume you are already using NixOS flakes to manage your configuration. If
@@ -45,9 +43,10 @@ have have two hosts: **berlin** and **cologne**.
}
```
## 1. Add `clan-core` to `inputs`
## Add clan-core Input
Add `clan-core` to your flake as input.
Add `clan-core` to your flake as input. It will provide everything we need to
manage your configurations with clan.
```nix
inputs.clan-core = {
@@ -57,7 +56,7 @@ inputs.clan-core = {
}
```
## 2. Update Outputs
## Update Outputs
To be able to access our newly added dependency, it has to be added to the
output parameters.
@@ -104,23 +103,26 @@ For the provide flake example, your flake should now look like this:
};
in
{
inherit (clan.config) nixosConfigurations nixosModules clanInternals;
clan = clan.config;
nixosConfigurations = clan.nixosConfigurations;
inherit (clan) clanInternals;
clan = {
inherit (clan) templates;
};
};
}
```
Et voilà! Your existing hosts are now part of a clan.
Existing Nix tooling
Et voilà! Your existing hosts are now part of a clan. Existing Nix tooling
should still work as normal. To check that you didn't make any errors, run `nix
flake show` and verify both hosts are still recognized as if nothing had
changed. You should also see the new `clan` output.
changed. You should also see the new `clanInternals` output.
```
nix flake show
git+file:///my-nixos-config
├───clan: unknown
├───clanInternals: unknown
└───nixosConfigurations
├───berlin: NixOS configuration
└───cologne: NixOS configuration
@@ -129,7 +131,7 @@ git+file:///my-nixos-config
Of course you can also rebuild your configuration using `nixos-rebuild` and
veryify everything still works.
## 3. Add `clan-cli` to your `devShells`
## Add Clan CLI devShell
At this point Clan is set up, but you can't use the CLI yet. To do so, it is
recommended to expose it via a `devShell` in your flake. It is also possible to
@@ -161,8 +163,8 @@ cologne
## Specify Targets
Clan needs to know where it can reach your hosts. For testing purpose set
`clan.core.networking.targetHost` to the machines adress or hostname.
Clan needs to know where it can reach your hosts. For each of your hosts, set
`clan.core.networking.targetHost` to its adress or hostname.
```nix
# machines/berlin/configuration.nix
@@ -171,8 +173,6 @@ Clan needs to know where it can reach your hosts. For testing purpose set
}
```
See our guide on for properly [configuring machines networking](../networking.md)
## Next Steps
You are now fully set up. Use the CLI to manage your hosts or proceed to

View File

@@ -1,184 +0,0 @@
# Connecting to Your Machines
Clan provides automatic networking with fallback mechanisms to reliably connect to your machines.
## Option 1: Automatic Networking with Fallback (Recommended)
Clan's networking module automatically manages connections through various network technologies with intelligent fallback. When you run `clan ssh` or `clan machines update`, Clan tries each configured network by priority until one succeeds.
### Basic Setup with Internet Service
For machines with public IPs or DNS names, use the `internet` service to configure direct SSH while keeping fallback options:
```{.nix title="flake.nix" hl_lines="7-10 14-16"}
{
outputs = { self, clan-core, ... }:
let
clan = clan-core.lib.clan {
inventory.instances = {
# Direct SSH with fallback support
internet = {
roles.default.machines.server1 = {
settings.host = "server1.example.com";
};
roles.default.machines.server2 = {
settings.host = "192.168.1.100";
};
};
# Fallback: Secure connections via Tor
tor = {
roles.server.tags.nixos = { };
};
};
};
in
{
inherit (clan.config) nixosConfigurations;
};
}
```
### Advanced Setup with Multiple Networks
```{.nix title="flake.nix" hl_lines="7-10 13-16 19-21"}
{
outputs = { self, clan-core, ... }:
let
clan = clan-core.lib.clan {
inventory.instances = {
# Priority 1: Try direct connection first
internet = {
roles.default.machines.publicserver = {
settings.host = "public.example.com";
};
};
# Priority 2: VPN for internal machines
zerotier = {
roles.controller.machines."controller" = { };
roles.peer.tags.nixos = { };
};
# Priority 3: Tor as universal fallback
tor = {
roles.server.tags.nixos = { };
};
};
};
in
{
inherit (clan.config) nixosConfigurations;
};
}
```
### How It Works
Clan automatically tries networks in order of priority:
1. Direct internet connections (if configured)
2. VPN networks (ZeroTier, Tailscale, etc.)
3. Tor hidden services
4. Any other configured networks
If one network fails, Clan automatically tries the next.
### Useful Commands
```bash
# View all configured networks and their status
clan network list
# Test connectivity through all networks
clan network ping machine1
# Show complete network topology
clan network overview
```
## Option 2: Manual targetHost (Bypasses Fallback!)
!!! warning
Setting `targetHost` directly **disables all automatic networking and fallback**. Only use this if you need complete control and don't want Clan's intelligent connection management.
### Using Inventory (For Static Addresses)
Use inventory-level `targetHost` when the address is **static** and doesn't depend on NixOS configuration:
```{.nix title="flake.nix" hl_lines="8"}
{
outputs = { self, clan-core, ... }:
let
clan = clan-core.lib.clan {
inventory.machines.server = {
# WARNING: This bypasses all networking modules!
# Use for: Static IPs, DNS names, known hostnames
deploy.targetHost = "root@192.168.1.100";
};
};
in
{
inherit (clan.config) nixosConfigurations;
};
}
```
**When to use inventory-level:**
- Static IP addresses: `"root@192.168.1.100"`
- DNS names: `"user@server.example.com"`
- Any address that doesn't change based on machine configuration
### Using NixOS Configuration (For Dynamic Addresses)
Use machine-level `targetHost` when you need to **interpolate values from the NixOS configuration**:
```{.nix title="flake.nix" hl_lines="7"}
{
outputs = { self, clan-core, ... }:
let
clan = clan-core.lib.clan {
machines.server = { config, ... }: {
# WARNING: This also bypasses all networking modules!
# REQUIRED for: Addresses that depend on NixOS config
clan.core.networking.targetHost = "root@${config.networking.hostName}.local";
};
};
in
{
inherit (clan.config) nixosConfigurations;
};
}
```
**When to use machine-level (NixOS config):**
- Using hostName from config: `"root@${config.networking.hostName}.local"`
- Building from multiple config values: `"${config.users.users.deploy.name}@${config.networking.hostName}"`
- Any address that depends on evaluated NixOS configuration
!!! info "Key Difference"
**Inventory-level** (`deploy.targetHost`) is evaluated immediately and works with static strings.
**Machine-level** (`clan.core.networking.targetHost`) is evaluated after NixOS configuration and can access `config.*` values.
## Quick Decision Guide
| Scenario | Recommended Approach | Why |
|----------|---------------------|-----|
| Public servers | `internet` service | Keeps fallback options |
| Mixed infrastructure | Multiple networks | Automatic failover |
| Machines behind NAT | ZeroTier/Tor | NAT traversal with fallback |
| Testing/debugging | Manual targetHost | Full control, no magic |
| Single static machine | Manual targetHost | Simple, no overhead |
## Command-Line Override
The `--target-host` flag bypasses ALL networking configuration:
```bash
# Emergency access - ignores all networking config
clan machines update server --target-host root@backup-ip.com
# Direct SSH - no fallback attempted
clan ssh laptop --target-host user@10.0.0.5
```
Use this for debugging or emergency access when automatic networking isn't working.

View File

@@ -255,50 +255,11 @@ outputs = inputs: flake-parts.lib.mkFlake { inherit inputs; } ({self, lib, ...}:
})
```
The benefit of this approach is that downstream users can override the value of
`myClan` by using `mkForce` or other priority modifiers.
## Example: A machine-type service
Users often have different types of machines. These could be any classification
you like, for example "servers" and "desktops". Having such distictions, allows
reusing parts of your configuration that should be appplied to a class of
machines. Since this is such a common pattern, here is how to write such a
service.
For this example the we have to roles: `server` and `desktop`. Additionally, we
can use the `perMachine` section to add configuration to all machines regardless
of their type.
```nix title="machine-type.nix"
{
_class = "clan.service";
manifest.name = "machine-type";
roles.server.perInstance.nixosModule = ./server.nix;
roles.desktop.perInstance.nixosModule = ./desktop.nix;
perMachine.nixosModule = {
# Configuration for all machines (any type)
};
}
```
In the inventory we the assign machines to a type, e.g. by using tags
```nix title="flake.nix"
instnaces.machine-type = {
module.input = "self";
module.name = "@pinpox/machine-type";
roles.desktop.tags.desktop = { };
roles.server.tags.server = { };
};
```
The benefit of this approach is that downstream users can override the value of `myClan` by using `mkForce` or other priority modifiers.
---
## Further Reading
## Further
- [Reference Documentation for Service Authors](../../reference/clanServices/clan-service-author-interface.md)
- [Migration Guide from ClanModules to ClanServices](../../guides/migrations/migrate-inventory-services.md)

View File

@@ -0,0 +1,84 @@
# How to Set `targetHost` for a Machine
The `targetHost` defines where the machine can be reached for operations like SSH or deployment. You can set it in two ways, depending on your use case.
---
## ✅ Option 1: Use the Inventory (Recommended for Static Hosts)
If the hostname is **static**, like `server.example.com`, set it in the **inventory**:
```{.nix title="flake.nix" hl_lines="8"}
{
# edlided
outputs =
{ self, clan-core, ... }:
let
# Sometimes this attribute set is defined in clan.nix
clan = clan-core.lib.clan {
inventory.machines.jon = {
deploy.targetHost = "root@server.example.com";
};
};
in
{
inherit (clan.config) nixosConfigurations nixosModules clanInternals;
# elided
};
}
```
This is fast, simple and explicit, and doesnt require evaluating the NixOS config. We can also displayed it in the clan-cli or clan-app.
---
## ✅ Option 2: Use NixOS (Only for Dynamic Hosts)
If your target host depends on a **dynamic expression** (like using the machines evaluated FQDN), set it inside the NixOS module:
```{.nix title="flake.nix" hl_lines="8"}
{
# edlided
outputs =
{ self, clan-core, ... }:
let
# Sometimes this attribute set is defined in clan.nix
clan = clan-core.lib.clan {
machines.jon = {config, ...}: {
clan.core.networking.targetHost = "jon@${config.networking.fqdn}";
};
};
in
{
inherit (clan.config) nixosConfigurations nixosModules clanInternals;
# elided
};
}
```
Use this **only if the value cannot be made static**, because its slower and won't be displayed in the clan-cli or clan-app yet.
---
## 📝 TL;DR
| Use Case | Use Inventory? | Example |
| ------------------------- | -------------- | -------------------------------- |
| Static hostname | ✅ Yes | `root@server.example.com` |
| Dynamic config expression | ❌ No | `jon@${config.networking.fqdn}` |
---
## 🚀 Coming Soon: Unified Networking Module
Were working on a new networking module that will automatically do all of this for you.
- Easier to use
- Sane defaults: Youll always be able to reach the machine — no need to worry about hostnames.
- ✨ Migration from **either method** will be supported and simple.
## Summary
- Ask: *Does this hostname dynamically change based on NixOS config?*
- If **no**, use the inventory.
- If **yes**, then use NixOS config.

6
docs/site/options.md Normal file
View File

@@ -0,0 +1,6 @@
---
template: options.html
---
<iframe src="/options-page/" height="1000" width="100%"></iframe>

View File

@@ -4,7 +4,7 @@ This section of the site provides an overview of available options and commands
---
- [Clan Configuration Option](/options) - for defining a Clan
- [Clan Configuration Option](../options.md) - for defining a Clan
- Learn how to use the [Clan CLI](./cli/index.md)
- Explore available [services](./clanServices/index.md)
- [NixOS Configuration Options](./clan.core/index.md) - Additional options avilable on a NixOS machine.

52
flake.lock generated
View File

@@ -13,11 +13,11 @@
]
},
"locked": {
"lastModified": 1756695982,
"narHash": "sha256-dyLhOSDzxZtRgi5aj/OuaZJUsuvo+8sZ9CU/qieZ15c=",
"rev": "cc8f26e7e6c2dc985526ba59b286ae5a83168cdb",
"lastModified": 1753067306,
"narHash": "sha256-jyoEbaXa8/MwVQ+PajUdT63y3gYhgD9o7snO/SLaikw=",
"rev": "18dfd42bdb2cfff510b8c74206005f733e38d8b9",
"type": "tarball",
"url": "https://git.clan.lol/api/v1/repos/clan/data-mesher/archive/cc8f26e7e6c2dc985526ba59b286ae5a83168cdb.tar.gz"
"url": "https://git.clan.lol/api/v1/repos/clan/data-mesher/archive/18dfd42bdb2cfff510b8c74206005f733e38d8b9.tar.gz"
},
"original": {
"type": "tarball",
@@ -31,11 +31,11 @@
]
},
"locked": {
"lastModified": 1756733629,
"narHash": "sha256-dwWGlDhcO5SMIvMSTB4mjQ5Pvo2vtxvpIknhVnSz2I8=",
"lastModified": 1754971456,
"narHash": "sha256-p04ZnIBGzerSyiY2dNGmookCldhldWAu03y0s3P8CB0=",
"owner": "nix-community",
"repo": "disko",
"rev": "a5c4f2ab72e3d1ab43e3e65aa421c6f2bd2e12a1",
"rev": "8246829f2e675a46919718f9a64b71afe3bfb22d",
"type": "github"
},
"original": {
@@ -51,11 +51,11 @@
]
},
"locked": {
"lastModified": 1756770412,
"narHash": "sha256-+uWLQZccFHwqpGqr2Yt5VsW/PbeJVTn9Dk6SHWhNRPw=",
"lastModified": 1754487366,
"narHash": "sha256-pHYj8gUBapuUzKV/kN/tR3Zvqc7o6gdFB9XKXIp1SQ8=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "4524271976b625a4a605beefd893f270620fd751",
"rev": "af66ad14b28a127c5c0f3bbb298218fc63528a18",
"type": "github"
},
"original": {
@@ -71,11 +71,11 @@
]
},
"locked": {
"lastModified": 1757015938,
"narHash": "sha256-1qBXNK/QxEjCqIoA2DxWn5gqM8rVxt+OxKodXu1GLTY=",
"lastModified": 1755275010,
"narHash": "sha256-lEApCoWUEWh0Ifc3k1JdVjpMtFFXeL2gG1qvBnoRc2I=",
"owner": "nix-darwin",
"repo": "nix-darwin",
"rev": "eaacfa1101b84225491d2ceae9549366d74dc214",
"rev": "7220b01d679e93ede8d7b25d6f392855b81dd475",
"type": "github"
},
"original": {
@@ -86,11 +86,11 @@
},
"nix-select": {
"locked": {
"lastModified": 1755887746,
"narHash": "sha256-lzWbpHKX0WAn/jJDoCijIDss3rqYIPawe46GDaE6U3g=",
"rev": "92c2574c5e113281591be01e89bb9ddb31d19156",
"lastModified": 1745005516,
"narHash": "sha256-IVaoOGDIvAa/8I0sdiiZuKptDldrkDWUNf/+ezIRhyc=",
"rev": "69d8bf596194c5c35a4e90dd02c52aa530caddf8",
"type": "tarball",
"url": "https://git.clan.lol/api/v1/repos/clan/nix-select/archive/92c2574c5e113281591be01e89bb9ddb31d19156.tar.gz"
"url": "https://git.clan.lol/api/v1/repos/clan/nix-select/archive/69d8bf596194c5c35a4e90dd02c52aa530caddf8.tar.gz"
},
"original": {
"type": "tarball",
@@ -99,11 +99,11 @@
},
"nixos-facter-modules": {
"locked": {
"lastModified": 1756491981,
"narHash": "sha256-lXyDAWPw/UngVtQfgQ8/nrubs2r+waGEYIba5UX62+k=",
"lastModified": 1750412875,
"narHash": "sha256-uP9Xxw5XcFwjX9lNoYRpybOnIIe1BHfZu5vJnnPg3Jc=",
"owner": "nix-community",
"repo": "nixos-facter-modules",
"rev": "c1b29520945d3e148cd96618c8a0d1f850965d8c",
"rev": "14df13c84552a7d1f33c1cd18336128fbc43f920",
"type": "github"
},
"original": {
@@ -115,10 +115,10 @@
"nixpkgs": {
"locked": {
"lastModified": 315532800,
"narHash": "sha256-h8Sx4S+/0FpodZji6W9lHzwY5BcuUG85Aj3GfhvGC2o=",
"rev": "a650b5d0de99158323597f048667c4d914243224",
"narHash": "sha256-moy1MfcGj+Pd+lU3PHYQUJq9OP0Evv9me8MjtmHlnRM=",
"rev": "32f313e49e42f715491e1ea7b306a87c16fe0388",
"type": "tarball",
"url": "https://releases.nixos.org/nixpkgs/nixpkgs-25.11pre845298.a650b5d0de99/nixexprs.tar.xz"
"url": "https://releases.nixos.org/nixpkgs/nixpkgs-25.11pre844992.32f313e49e42/nixexprs.tar.xz"
},
"original": {
"type": "tarball",
@@ -181,11 +181,11 @@
]
},
"locked": {
"lastModified": 1756662192,
"narHash": "sha256-F1oFfV51AE259I85av+MAia221XwMHCOtZCMcZLK2Jk=",
"lastModified": 1754847726,
"narHash": "sha256-2vX8QjO5lRsDbNYvN9hVHXLU6oMl+V/PsmIiJREG4rE=",
"owner": "numtide",
"repo": "treefmt-nix",
"rev": "1aabc6c05ccbcbf4a635fb7a90400e44282f61c4",
"rev": "7d81f6fb2e19bf84f1c65135d1060d829fae2408",
"type": "github"
},
"original": {

View File

@@ -96,7 +96,6 @@
./nixosModules/flake-module.nix
./pkgs/flake-module.nix
./templates/flake-module.nix
./pkgs/clan-cli/clan_cli/tests/flake-module.nix
]
++ [
(if pathExists ./flakeModules/clan.nix then import ./flakeModules/clan.nix inputs.self else { })

View File

@@ -46,8 +46,6 @@
"checks/lib/ssh/privkey"
"checks/lib/ssh/pubkey"
"checks/matrix-synapse/synapse-registration_shared_secret"
"checks/mumble/machines/peer1/facts/mumble-cert"
"checks/mumble/machines/peer2/facts/mumble-cert"
"checks/secrets/clan-secrets"
"checks/secrets/sops/groups/group/machines/machine"
"checks/syncthing/introducer/introducer_device_id"

View File

@@ -87,8 +87,6 @@ in
relativeDir = removePrefix "${self}/" (toString config.clan.directory);
update-vars = hostPkgs.writeShellScriptBin "update-vars" ''
set -x
export PRJ_ROOT=$(git rev-parse --show-toplevel)
${update-vars-script} $PRJ_ROOT/${relativeDir} ${testName}
'';

View File

@@ -328,7 +328,7 @@ rec {
# To get the type of a Deferred modules we need to know the interface of the place where it is evaluated.
# i.e. in case of a clan.service this is the interface of the service which dynamically changes depending on the service
# We assign "type" = []
# This means any value is valid — or like TypeScript's unknown.
# This means any value is valid — or like TypeScripts unknown.
# We can assign the type later, when we know the exact interface.
# tsType = "unknown" is a type that we preload for json2ts, such that it gets the correct type in typescript
(option.type.name == "deferredModule")

View File

@@ -245,8 +245,6 @@ in
in
{ config, ... }:
{
staticModules = clan-core.clan.modules;
distributedServices = clanLib.inventory.mapInstances {
inherit (clanConfig) inventory exportsModule;
inherit flakeInputs directory;

View File

@@ -639,7 +639,7 @@ in
Exports are used to share and expose information between instances.
Define exports in the [`perInstance`](#roles.perInstance) or [`perMachine`](#perMachine) scope.
Define exports in the [`perInstance`](#perInstance) or [`perMachine`](#perMachine) scope.
Accessing the exports:

View File

@@ -21,14 +21,14 @@ let
"secrets"
"templates"
];
clanSchemaNix = jsonLib.parseOptions (lib.filterAttrs (n: _v: lib.elem n include) clanOpts) { };
clanSchema = jsonLib.parseOptions (lib.filterAttrs (n: _v: lib.elem n include) clanOpts) { };
clanSchemaJson = pkgs.stdenv.mkDerivation {
clan-schema-abstract = pkgs.stdenv.mkDerivation {
name = "clan-schema-files";
buildInputs = [ pkgs.cue ];
src = ./.;
buildPhase = ''
export SCHEMA=${builtins.toFile "clan-schema.json" (builtins.toJSON clanSchemaNix)}
export SCHEMA=${builtins.toFile "clan-schema.json" (builtins.toJSON clanSchema)}
cp $SCHEMA schema.json
# Also generate a CUE schema version that is derived from the JSON schema
cue import -f -p compose -l '#Root:' schema.json
@@ -41,7 +41,7 @@ in
{
inherit
flakeOptions
clanSchemaNix
clanSchemaJson
clanSchema
clan-schema-abstract
;
}

View File

@@ -255,16 +255,6 @@ in
'';
};
installedAt = lib.mkOption {
type = types.nullOr types.int;
default = null;
description = ''
Indicates when the machine was first installed.
Timestamp is in unix time (seconds since epoch).
'';
};
tags = lib.mkOption {
description = ''
List of tags for the machine.

View File

@@ -27,9 +27,7 @@ in
default = { };
};
tags = lib.mkOption {
type = types.coercedTo (types.listOf types.str) (t: lib.genAttrs t (_: { })) (
types.attrsOf (types.submodule { })
);
type = types.attrsOf (types.submodule { });
default = { };
};
settings =

View File

@@ -23,12 +23,6 @@ let
};
in
{
options.staticModules = lib.mkOption {
readOnly = true;
type = lib.types.raw;
apply = moduleSet: lib.mapAttrs (inspectModule "<clan-core>") moduleSet;
};
options.modulesPerSource = lib.mkOption {
# { sourceName :: { moduleName :: {} }}
readOnly = true;

View File

@@ -1,5 +1,3 @@
"""Test driver for container-based NixOS testing."""
import argparse
import ctypes
import os
@@ -13,7 +11,7 @@ import uuid
from collections.abc import Callable
from contextlib import _GeneratorContextManager
from dataclasses import dataclass
from functools import cache, cached_property
from functools import cached_property
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from typing import Any
@@ -22,21 +20,23 @@ from colorama import Fore, Style
from .logger import AbstractLogger, CompositeLogger, TerminalLogger
# Global flag to track if test environment has been initialized
_test_env_initialized = False
@cache
def init_test_environment() -> None:
"""Set up the test environment (network bridge, /etc/passwd) once."""
global _test_env_initialized
if _test_env_initialized:
return
# Set up network bridge
subprocess.run(
["ip", "link", "add", "br0", "type", "bridge"],
check=True,
text=True,
["ip", "link", "add", "br0", "type", "bridge"], check=True, text=True
)
subprocess.run(["ip", "link", "set", "br0", "up"], check=True, text=True)
subprocess.run(
["ip", "addr", "add", "192.168.1.254/24", "dev", "br0"],
check=True,
text=True,
["ip", "addr", "add", "192.168.1.254/24", "dev", "br0"], check=True, text=True
)
# Set up minimal passwd file for unprivileged operations
@@ -44,7 +44,7 @@ def init_test_environment() -> None:
passwd_content = """root:x:0:0:Root:/root:/bin/sh
nixbld:x:1000:100:Nix build user:/tmp:/bin/sh
nobody:x:65534:65534:Nobody:/:/bin/sh
""" # noqa: S105 - This is not a password, it's a Unix passwd file format for testing
"""
with NamedTemporaryFile(mode="w", delete=False, prefix="test-passwd-") as f:
f.write(passwd_content)
@@ -84,6 +84,8 @@ nogroup:x:65534:
errno = ctypes.get_errno()
raise OSError(errno, os.strerror(errno), "Failed to mount group")
_test_env_initialized = True
# Load the C library
libc = ctypes.CDLL("libc.so.6", use_errno=True)
@@ -109,7 +111,8 @@ def mount(
mountflags: int = 0,
data: str | None = None,
) -> None:
"""A Python wrapper for the mount system call.
"""
A Python wrapper for the mount system call.
:param source: The source of the file system (e.g., device name, remote filesystem).
:param target: The mount point (an existing directory).
@@ -126,11 +129,7 @@ def mount(
# Call the mount system call
result = libc.mount(
source_c,
target_c,
fstype_c,
ctypes.c_ulong(mountflags),
data_c,
source_c, target_c, fstype_c, ctypes.c_ulong(mountflags), data_c
)
if result != 0:
@@ -142,11 +141,11 @@ class Error(Exception):
pass
def prepare_machine_root(root: Path) -> None:
def prepare_machine_root(machinename: str, root: Path) -> None:
root.mkdir(parents=True, exist_ok=True)
root.joinpath("etc").mkdir(parents=True, exist_ok=True)
root.joinpath(".env").write_text(
"\n".join(f"{k}={v}" for k, v in os.environ.items()),
"\n".join(f"{k}={v}" for k, v in os.environ.items())
)
@@ -158,6 +157,7 @@ def retry(fn: Callable, timeout: int = 900) -> None:
"""Call the given function repeatedly, with 1 second intervals,
until it returns True or a timeout is reached.
"""
for _ in range(timeout):
if fn(False):
return
@@ -189,7 +189,7 @@ class Machine:
return self.get_systemd_process()
def start(self) -> None:
prepare_machine_root(self.rootdir)
prepare_machine_root(self.name, self.rootdir)
init_test_environment()
cmd = [
"systemd-nspawn",
@@ -212,12 +212,8 @@ class Machine:
self.process = subprocess.Popen(cmd, stdout=subprocess.PIPE, text=True, env=env)
def get_systemd_process(self) -> int:
if self.process is None:
msg = "Machine not started"
raise RuntimeError(msg)
if self.process.stdout is None:
msg = "Machine has no stdout"
raise RuntimeError(msg)
assert self.process is not None, "Machine not started"
assert self.process.stdout is not None, "Machine has no stdout"
for line in self.process.stdout:
print(line, end="")
@@ -234,9 +230,9 @@ class Machine:
.read_text()
.split()
)
if len(childs) != 1:
msg = f"Expected exactly one child process for systemd-nspawn, got {childs}"
raise RuntimeError(msg)
assert len(childs) == 1, (
f"Expected exactly one child process for systemd-nspawn, got {childs}"
)
try:
return int(childs[0])
except ValueError as e:
@@ -256,9 +252,7 @@ class Machine:
def tuple_from_line(line: str) -> tuple[str, str]:
match = line_pattern.match(line)
if match is None:
msg = f"Failed to parse line: {line}"
raise RuntimeError(msg)
assert match is not None
return match[1], match[2]
return dict(
@@ -268,14 +262,8 @@ class Machine:
)
def nsenter_command(self, command: str) -> list[str]:
nsenter = shutil.which("nsenter")
if not nsenter:
msg = "nsenter command not found"
raise RuntimeError(msg)
return [
nsenter,
"nsenter",
"--target",
str(self.container_pid),
"--mount",
@@ -292,11 +280,12 @@ class Machine:
def execute(
self,
command: str,
check_return: bool = True, # noqa: ARG002
check_output: bool = True, # noqa: ARG002
check_return: bool = True,
check_output: bool = True,
timeout: int | None = 900,
) -> subprocess.CompletedProcess:
"""Execute a shell command, returning a list `(status, stdout)`.
"""
Execute a shell command, returning a list `(status, stdout)`.
Commands are run with `set -euo pipefail` set:
@@ -327,22 +316,21 @@ class Machine:
`timeout` parameter, e.g., `execute(cmd, timeout=10)` or
`execute(cmd, timeout=None)`. The default is 900 seconds.
"""
# Always run command with shell opts
command = f"set -eo pipefail; source /etc/profile; set -xu; {command}"
return subprocess.run(
proc = subprocess.run(
self.nsenter_command(command),
env={},
timeout=timeout,
check=False,
stdout=subprocess.PIPE,
text=True,
)
return proc
def nested(
self,
msg: str,
attrs: dict[str, str] | None = None,
self, msg: str, attrs: dict[str, str] | None = None
) -> _GeneratorContextManager:
if attrs is None:
attrs = {}
@@ -351,7 +339,8 @@ class Machine:
return self.logger.nested(msg, my_attrs)
def systemctl(self, q: str) -> subprocess.CompletedProcess:
"""Runs `systemctl` commands with optional support for
"""
Runs `systemctl` commands with optional support for
`systemctl --user`
```py
@@ -366,7 +355,8 @@ class Machine:
return self.execute(f"systemctl {q}")
def wait_until_succeeds(self, command: str, timeout: int = 900) -> str:
"""Repeat a shell command with 1-second intervals until it succeeds.
"""
Repeat a shell command with 1-second intervals until it succeeds.
Has a default timeout of 900 seconds which can be modified, e.g.
`wait_until_succeeds(cmd, timeout=10)`. See `execute` for details on
command execution.
@@ -384,17 +374,18 @@ class Machine:
return output
def wait_for_open_port(
self,
port: int,
addr: str = "localhost",
timeout: int = 900,
self, port: int, addr: str = "localhost", timeout: int = 900
) -> None:
"""Wait for a port to be open on the given address."""
"""
Wait for a port to be open on the given address.
"""
command = f"nc -z {shlex.quote(addr)} {port}"
self.wait_until_succeeds(command, timeout=timeout)
def wait_for_file(self, filename: str, timeout: int = 30) -> None:
"""Waits until the file exists in the machine's file system."""
"""
Waits until the file exists in the machine's file system.
"""
def check_file(_last_try: bool) -> bool:
result = self.execute(f"test -e {filename}")
@@ -404,7 +395,8 @@ class Machine:
retry(check_file, timeout)
def wait_for_unit(self, unit: str, timeout: int = 900) -> None:
"""Wait for a systemd unit to get into "active" state.
"""
Wait for a systemd unit to get into "active" state.
Throws exceptions on "failed" and "inactive" states as well as after
timing out.
"""
@@ -449,7 +441,9 @@ class Machine:
return res.stdout
def shutdown(self) -> None:
"""Shut down the machine, waiting for the VM to exit."""
"""
Shut down the machine, waiting for the VM to exit.
"""
if self.process:
self.process.terminate()
self.process.wait()
@@ -563,7 +557,7 @@ class Driver:
rootdir=tempdir_path / container.name,
out_dir=self.out_dir,
logger=self.logger,
),
)
)
def start_all(self) -> None:
@@ -581,15 +575,13 @@ class Driver:
# We lauch a sleep here, so we can pgrep the process cmdline for
# the uuid
sleep = shutil.which("sleep")
if sleep is None:
msg = "sleep command not found"
raise RuntimeError(msg)
assert sleep is not None, "sleep command not found"
machine.execute(
f"systemd-run /bin/sh -c '{sleep} 999999999 && echo {nspawn_uuid}'",
)
print(
f"To attach to container {machine.name} run on the same machine that runs the test:",
f"To attach to container {machine.name} run on the same machine that runs the test:"
)
print(
" ".join(
@@ -611,8 +603,8 @@ class Driver:
"-c",
"bash",
Style.RESET_ALL,
],
),
]
)
)
def test_symbols(self) -> dict[str, Any]:
@@ -631,13 +623,13 @@ class Driver:
"additionally exposed symbols:\n "
+ ", ".join(m.name for m in self.machines)
+ ",\n "
+ ", ".join(list(general_symbols.keys())),
+ ", ".join(list(general_symbols.keys()))
)
return {**general_symbols, **machine_symbols}
def test_script(self) -> None:
"""Run the test script"""
exec(self.testscript, self.test_symbols(), None) # noqa: S102
exec(self.testscript, self.test_symbols(), None)
def run_tests(self) -> None:
"""Run the test script (for non-interactive test runs)"""

View File

@@ -25,31 +25,27 @@ class AbstractLogger(ABC):
@abstractmethod
@contextmanager
def subtest(
self,
name: str,
attributes: dict[str, str] | None = None,
self, name: str, attributes: dict[str, str] | None = None
) -> Iterator[None]:
pass
@abstractmethod
@contextmanager
def nested(
self,
message: str,
attributes: dict[str, str] | None = None,
self, message: str, attributes: dict[str, str] | None = None
) -> Iterator[None]:
pass
@abstractmethod
def info(self, *args: Any, **kwargs: Any) -> None:
def info(self, *args: Any, **kwargs: Any) -> None: # type: ignore
pass
@abstractmethod
def warning(self, *args: Any, **kwargs: Any) -> None:
def warning(self, *args: Any, **kwargs: Any) -> None: # type: ignore
pass
@abstractmethod
def error(self, *args: Any, **kwargs: Any) -> None:
def error(self, *args: Any, **kwargs: Any) -> None: # type: ignore
pass
@abstractmethod
@@ -63,8 +59,6 @@ class AbstractLogger(ABC):
class JunitXMLLogger(AbstractLogger):
class TestCaseState:
"""State tracking for individual test cases in JUnit XML reports."""
def __init__(self) -> None:
self.stdout = ""
self.stderr = ""
@@ -72,7 +66,7 @@ class JunitXMLLogger(AbstractLogger):
def __init__(self, outfile: Path) -> None:
self.tests: dict[str, JunitXMLLogger.TestCaseState] = {
"main": self.TestCaseState(),
"main": self.TestCaseState()
}
self.currentSubtest = "main"
self.outfile: Path = outfile
@@ -80,16 +74,12 @@ class JunitXMLLogger(AbstractLogger):
atexit.register(self.close)
def log(self, message: str, attributes: dict[str, str] | None = None) -> None:
del attributes # Unused but kept for API compatibility
self.tests[self.currentSubtest].stdout += message + os.linesep
@contextmanager
def subtest(
self,
name: str,
attributes: dict[str, str] | None = None,
self, name: str, attributes: dict[str, str] | None = None
) -> Iterator[None]:
del attributes # Unused but kept for API compatibility
old_test = self.currentSubtest
self.tests.setdefault(name, self.TestCaseState())
self.currentSubtest = name
@@ -100,24 +90,18 @@ class JunitXMLLogger(AbstractLogger):
@contextmanager
def nested(
self,
message: str,
attributes: dict[str, str] | None = None,
self, message: str, attributes: dict[str, str] | None = None
) -> Iterator[None]:
del attributes # Unused but kept for API compatibility
self.log(message)
yield
def info(self, *args: Any, **kwargs: Any) -> None:
del kwargs # Unused but kept for API compatibility
self.tests[self.currentSubtest].stdout += args[0] + os.linesep
def warning(self, *args: Any, **kwargs: Any) -> None:
del kwargs # Unused but kept for API compatibility
self.tests[self.currentSubtest].stdout += args[0] + os.linesep
def error(self, *args: Any, **kwargs: Any) -> None:
del kwargs # Unused but kept for API compatibility
self.tests[self.currentSubtest].stderr += args[0] + os.linesep
self.tests[self.currentSubtest].failure = True
@@ -160,9 +144,7 @@ class CompositeLogger(AbstractLogger):
@contextmanager
def subtest(
self,
name: str,
attributes: dict[str, str] | None = None,
self, name: str, attributes: dict[str, str] | None = None
) -> Iterator[None]:
with ExitStack() as stack:
for logger in self.logger_list:
@@ -171,24 +153,22 @@ class CompositeLogger(AbstractLogger):
@contextmanager
def nested(
self,
message: str,
attributes: dict[str, str] | None = None,
self, message: str, attributes: dict[str, str] | None = None
) -> Iterator[None]:
with ExitStack() as stack:
for logger in self.logger_list:
stack.enter_context(logger.nested(message, attributes))
yield
def info(self, *args: Any, **kwargs: Any) -> None: # type: ignore[no-untyped-def]
def info(self, *args: Any, **kwargs: Any) -> None: # type: ignore
for logger in self.logger_list:
logger.info(*args, **kwargs)
def warning(self, *args: Any, **kwargs: Any) -> None: # type: ignore[no-untyped-def]
def warning(self, *args: Any, **kwargs: Any) -> None: # type: ignore
for logger in self.logger_list:
logger.warning(*args, **kwargs)
def error(self, *args: Any, **kwargs: Any) -> None: # type: ignore[no-untyped-def]
def error(self, *args: Any, **kwargs: Any) -> None: # type: ignore
for logger in self.logger_list:
logger.error(*args, **kwargs)
sys.exit(1)
@@ -220,24 +200,19 @@ class TerminalLogger(AbstractLogger):
@contextmanager
def subtest(
self,
name: str,
attributes: dict[str, str] | None = None,
self, name: str, attributes: dict[str, str] | None = None
) -> Iterator[None]:
with self.nested("subtest: " + name, attributes):
yield
@contextmanager
def nested(
self,
message: str,
attributes: dict[str, str] | None = None,
self, message: str, attributes: dict[str, str] | None = None
) -> Iterator[None]:
self._eprint(
self.maybe_prefix(
Style.BRIGHT + Fore.GREEN + message + Style.RESET_ALL,
attributes,
),
Style.BRIGHT + Fore.GREEN + message + Style.RESET_ALL, attributes
)
)
tic = time.time()
@@ -245,13 +220,13 @@ class TerminalLogger(AbstractLogger):
toc = time.time()
self.log(f"(finished: {message}, in {toc - tic:.2f} seconds)")
def info(self, *args: Any, **kwargs: Any) -> None: # type: ignore[no-untyped-def]
def info(self, *args: Any, **kwargs: Any) -> None: # type: ignore
self.log(*args, **kwargs)
def warning(self, *args: Any, **kwargs: Any) -> None: # type: ignore[no-untyped-def]
def warning(self, *args: Any, **kwargs: Any) -> None: # type: ignore
self.log(*args, **kwargs)
def error(self, *args: Any, **kwargs: Any) -> None: # type: ignore[no-untyped-def]
def error(self, *args: Any, **kwargs: Any) -> None: # type: ignore
self.log(*args, **kwargs)
def print_serial_logs(self, enable: bool) -> None:
@@ -284,9 +259,7 @@ class XMLLogger(AbstractLogger):
return "".join(ch for ch in message if unicodedata.category(ch)[0] != "C")
def maybe_prefix(
self,
message: str,
attributes: dict[str, str] | None = None,
self, message: str, attributes: dict[str, str] | None = None
) -> str:
if attributes and "machine" in attributes:
return f"{attributes['machine']}: {message}"
@@ -297,13 +270,13 @@ class XMLLogger(AbstractLogger):
self.xml.characters(message)
self.xml.endElement("line")
def info(self, *args: Any, **kwargs: Any) -> None: # type: ignore[no-untyped-def]
def info(self, *args: Any, **kwargs: Any) -> None: # type: ignore
self.log(*args, **kwargs)
def warning(self, *args: Any, **kwargs: Any) -> None: # type: ignore[no-untyped-def]
def warning(self, *args: Any, **kwargs: Any) -> None: # type: ignore
self.log(*args, **kwargs)
def error(self, *args: Any, **kwargs: Any) -> None: # type: ignore[no-untyped-def]
def error(self, *args: Any, **kwargs: Any) -> None: # type: ignore
self.log(*args, **kwargs)
def log(self, message: str, attributes: dict[str, str] | None = None) -> None:
@@ -336,18 +309,14 @@ class XMLLogger(AbstractLogger):
@contextmanager
def subtest(
self,
name: str,
attributes: dict[str, str] | None = None,
self, name: str, attributes: dict[str, str] | None = None
) -> Iterator[None]:
with self.nested("subtest: " + name, attributes):
yield
@contextmanager
def nested(
self,
message: str,
attributes: dict[str, str] | None = None,
self, message: str, attributes: dict[str, str] | None = None
) -> Iterator[None]:
if attributes is None:
attributes = {}

View File

@@ -8,10 +8,6 @@
{
imports = lib.optional (_class == "nixos") (
lib.mkIf config.clan.core.enableRecommendedDefaults {
# Enable automatic state-version generation.
clan.core.settings.state-version.enable = lib.mkDefault true;
# Use systemd during boot as well except:
# - systems with raids as this currently require manual configuration: https://github.com/NixOS/nixpkgs/issues/210210
# - for containers we currently rely on the `stage-2` init script that sets up our /etc
@@ -41,7 +37,6 @@
};
config = lib.mkIf config.clan.core.enableRecommendedDefaults {
# This disables the HTML manual and `nixos-help` command but leaves
# `man configuration.nix`
documentation.doc.enable = lib.mkDefault false;

View File

@@ -1,17 +1,40 @@
{ ... }:
{
perSystem.clan.nixosTests.machine-id = {
perSystem =
{ ... }:
{
clan.nixosTests.machine-id = {
name = "service-machine-id";
name = "machine-id";
clan = {
directory = ./.;
machines.server = {
clan.core.settings.machine-id.enable = true;
clan = {
directory = ./.;
# Workaround until we can use nodes.server = { };
modules."@clan/importer" = ../../../../clanServices/importer;
inventory = {
machines.server = { };
instances.importer = {
module.name = "@clan/importer";
module.input = "self";
roles.default.tags.all = { };
roles.default.extraModules = [
{
# Test machine ID generation
clan.core.settings.machine-id.enable = true;
}
];
};
};
};
# TODO: Broken. Use instead of importer after fixing.
# nodes.server = { };
# This is not an actual vm test, this is a workaround to
# generate the needed vars for the eval test.
testScript = "";
};
};
# This is not an actual vm test, this is a workaround to
# generate the needed vars for the eval test.
testScript = "";
};
}

View File

@@ -1,236 +0,0 @@
{
lib,
config,
pkgs,
...
}:
let
cfg = config.clan.core.postgresql;
createDatabaseState =
db:
let
folder = "/var/backup/postgres/${db.name}";
current = "${folder}/pg-dump";
compression = lib.optionalString (lib.versionAtLeast config.services.postgresql.package.version "16") "--compress=zstd";
in
{
folders = [ folder ];
preBackupScript = ''
export PATH=${
lib.makeBinPath [
config.services.postgresql.package
config.systemd.package
pkgs.coreutils
pkgs.util-linux
pkgs.zstd
]
}
while [[ "$(systemctl is-active postgresql)" == activating ]]; do
sleep 1
done
mkdir -p "${folder}"
runuser -u postgres -- pg_dump ${compression} --dbname=${db.name} -Fc -c > "${current}.tmp"
mv "${current}.tmp" ${current}
'';
postRestoreScript = ''
export PATH=${
lib.makeBinPath [
config.services.postgresql.package
config.systemd.package
pkgs.coreutils
pkgs.util-linux
pkgs.zstd
pkgs.gnugrep
]
}
while [[ "$(systemctl is-active postgresql)" == activating ]]; do
sleep 1
done
echo "Waiting for postgres to be ready..."
while ! runuser -u postgres -- psql --port=${builtins.toString config.services.postgresql.settings.port} -d postgres -c "" ; do
if ! systemctl is-active postgresql; then exit 1; fi
sleep 0.1
done
if [[ -e "${current}" ]]; then
(
systemctl stop ${lib.concatStringsSep " " db.restore.stopOnRestore}
trap "systemctl start ${lib.concatStringsSep " " db.restore.stopOnRestore}" EXIT
mkdir -p "${folder}"
if runuser -u postgres -- psql -d postgres -c "SELECT 1 FROM pg_database WHERE datname = '${db.name}'" | grep -q 1; then
runuser -u postgres -- dropdb "${db.name}"
fi
runuser -u postgres -- pg_restore -C -d postgres "${current}"
)
else
echo No database backup found, skipping restore
fi
'';
};
createDatabase = db: ''
CREATE DATABASE "${db.name}" ${
lib.concatStringsSep " " (
lib.mapAttrsToList (name: value: "${name} = '${value}'") db.create.options
)
}
'';
userClauses = lib.mapAttrsToList (
_: user:
''$PSQL -tAc "SELECT 1 FROM pg_roles WHERE rolname='${user.name}'" | grep -q 1 || $PSQL -tAc 'CREATE USER "${user.name}"' ''
) cfg.users;
databaseClauses = lib.mapAttrsToList (
name: db:
lib.optionalString db.create.enable ''$PSQL -d postgres -c "SELECT 1 FROM pg_database WHERE datname = '${name}'" | grep -q 1 || $PSQL -d postgres -c ${lib.escapeShellArg (createDatabase db)} ''
) cfg.databases;
in
{
options.clan.core.postgresql = {
enable = lib.mkEnableOption "Whether to enable PostgreSQL Server";
# we are reimplemeting ensureDatabase and ensureUser options here to allow to create databases with options
databases = lib.mkOption {
description = "Databases to create";
default = { };
type = lib.types.attrsOf (
lib.types.submodule (
{ name, ... }:
{
options = {
name = lib.mkOption {
type = lib.types.str;
default = name;
description = "Database name.";
};
service = lib.mkOption {
type = lib.types.str;
default = name;
description = "Service name that we associate with the database.";
};
# set to false, in case the upstream module uses ensureDatabase option
create.enable = lib.mkOption {
type = lib.types.bool;
default = true;
description = "Create the database if it does not exist.";
};
create.options = lib.mkOption {
description = "Options to pass to the CREATE DATABASE command.";
type = lib.types.lazyAttrsOf lib.types.str;
default = { };
example = {
TEMPLATE = "template0";
LC_COLLATE = "C";
LC_CTYPE = "C";
ENCODING = "UTF8";
OWNER = "foo";
};
};
restore.stopOnRestore = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ ];
description = "List of systemd services to stop before restoring the database.";
};
};
}
)
);
};
users = lib.mkOption {
description = "Users to create";
default = { };
type = lib.types.attrsOf (
lib.types.submodule (
{ name, ... }:
{
options.name = lib.mkOption {
description = "User name";
type = lib.types.str;
default = name;
};
}
)
);
};
};
config = lib.mkIf (config.clan.core.postgresql.enable) {
clan.core.settings.state-version.enable = true;
# services.postgresql.package = lib.mkDefault pkgs.postgresql_16;
services.postgresql.enable = true;
services.postgresql.settings = {
wal_level = "replica";
max_wal_senders = 3;
};
# We are duplicating a bit the upstream module but allow to create databases with options
systemd.services.postgresql.postStart = ''
PSQL="psql --port=${builtins.toString config.services.postgresql.settings.port}"
while ! $PSQL -d postgres -c "" 2> /dev/null; do
if ! kill -0 "$MAINPID"; then exit 1; fi
sleep 0.1
done
${lib.concatStringsSep "\n" userClauses}
${lib.concatStringsSep "\n" databaseClauses}
'';
clan.core.state = lib.mapAttrs' (
_: db: lib.nameValuePair db.service (createDatabaseState db)
) config.clan.core.postgresql.databases;
environment.systemPackages = builtins.map (
db:
let
folder = "/var/backup/postgres/${db.name}";
current = "${folder}/pg-dump";
in
pkgs.writeShellScriptBin "postgres-db-restore-command-${db.name}" ''
export PATH=${
lib.makeBinPath [
config.services.postgresql.package
config.systemd.package
pkgs.coreutils
pkgs.util-linux
pkgs.zstd
pkgs.gnugrep
]
}
while [[ "$(systemctl is-active postgresql)" == activating ]]; do
sleep 1
done
echo "Waiting for postgres to be ready..."
while ! runuser -u postgres -- psql --port=${builtins.toString config.services.postgresql.settings.port} -d postgres -c "" ; do
if ! systemctl is-active postgresql; then exit 1; fi
sleep 0.1
done
if [[ -e "${current}" ]]; then
(
${lib.optionalString (db.restore.stopOnRestore != [ ]) ''
systemctl stop ${builtins.toString db.restore.stopOnRestore}
trap "systemctl start ${builtins.toString db.restore.stopOnRestore}" EXIT
''}
mkdir -p "${folder}"
if runuser -u postgres -- psql -d postgres -c "SELECT 1 FROM pg_database WHERE datname = '${db.name}'" | grep -q 1; then
runuser -u postgres -- dropdb "${db.name}"
fi
runuser -u postgres -- pg_restore -C -d postgres "${current}"
)
else
echo No database backup found, skipping restore
fi
''
) (builtins.attrValues config.clan.core.postgresql.databases);
};
}

View File

@@ -10,14 +10,30 @@
clan = {
directory = ./.;
machines.machine = {
clan.core.postgresql.enable = true;
clan.core.postgresql.users.test = { };
clan.core.postgresql.databases.test.create.options.OWNER = "test";
clan.core.settings.directory = ./.;
# Workaround until we can use nodes.machine = { };
modules."@clan/importer" = ../../../../clanServices/importer;
inventory = {
machines.machine = { };
instances.importer = {
module.name = "@clan/importer";
module.input = "self";
roles.default.tags.all = { };
roles.default.extraModules = [
{
clan.core.postgresql.enable = true;
clan.core.postgresql.users.test = { };
clan.core.postgresql.databases.test.create.options.OWNER = "test";
clan.core.settings.directory = ./.;
}
];
};
};
};
# TODO: Broken. Use instead of importer after fixing.
# nodes.machine = { };
testScript =
let
runpg = "runuser -u postgres -- /run/current-system/sw/bin/psql";

View File

@@ -9,11 +9,28 @@
clan = {
directory = ./.;
machines.server = {
clan.core.settings.state-version.enable = true;
# Workaround until we can use nodes.server = { };
modules."@clan/importer" = ../../../../clanServices/importer;
inventory = {
machines.server = { };
instances.importer = {
module.name = "@clan/importer";
module.input = "self";
roles.default.tags.all = { };
roles.default.extraModules = [
{
clan.core.settings.state-version.enable = true;
}
];
};
};
};
# TODO: Broken. Use instead of importer after fixing.
# nodes.server = { };
# This is not an actual vm test, this is a workaround to
# generate the needed vars for the eval test.
testScript = "";

View File

@@ -290,11 +290,9 @@ in
};
owner = mkOption {
description = "The user name or id that will own the file.";
type = str;
default = "root";
};
group = mkOption {
type = str;
description = "The group name or id that will own the file.";
default = if _class == "darwin" then "wheel" else "root";
defaultText = lib.literalExpression ''if _class == "darwin" then "wheel" else "root"'';
@@ -304,15 +302,6 @@ in
description = "The unix file mode of the file. Must be a 4-digit octal number.";
default = "0400";
};
exists = mkOption {
description = ''
Returns true if the file exists, This is used to guard against reading not set value in evaluation.
This currently only works for non secret files.
'';
type = bool;
default = if file.config.secret then throw "Cannot determine existance of secret file" else false;
defaultText = "Throws error because the existance of a secret file cannot be determined";
};
value =
mkOption {
description = ''

View File

@@ -25,7 +25,7 @@ in
);
value = mkIf (file.config.secret == false) (
# dynamically adjust priority to allow overriding with mkDefault in case the file is not found
if file.config.exists then
if (pathExists file.config.flakePath) then
# if the file is found it should have normal priority
readFile file.config.flakePath
else
@@ -34,7 +34,6 @@ in
throw "Please run `clan vars generate ${config.clan.core.settings.machine.name}` as file was not found: ${file.config.path}"
)
);
exists = mkIf (file.config.secret == false) (pathExists file.config.flakePath);
};
};
}

View File

@@ -1,116 +0,0 @@
# Standalone VM base module that can be imported independently
# This module contains the core VM configuration without the system extension
{
lib,
config,
pkgs,
modulesPath,
...
}:
let
# Flatten the list of state folders into a single list
stateFolders = lib.flatten (
lib.mapAttrsToList (_item: attrs: attrs.folders) config.clan.core.state
);
in
{
imports = [
(modulesPath + "/virtualisation/qemu-vm.nix")
./serial.nix
./waypipe.nix
];
clan.core.state.HOME.folders = [ "/home" ];
clan.services.waypipe = {
inherit (config.clan.core.vm.inspect.waypipe) enable command;
};
# required for issuing shell commands via qga
services.qemuGuest.enable = true;
# required to react to system_powerdown qmp command
# Some desktop managers like xfce override the poweroff signal and therefore
# make it impossible to handle it via 'logind' directly.
services.acpid.enable = true;
services.acpid.handlers.power.event = "button/power.*";
services.acpid.handlers.power.action = "poweroff";
# only works on x11
services.spice-vdagentd.enable = config.services.xserver.enable;
boot.initrd.systemd.enable = true;
boot.initrd.systemd.storePaths = [
pkgs.util-linux
pkgs.e2fsprogs
];
boot.initrd.systemd.emergencyAccess = true;
# userborn would be faster because it doesn't need perl, but it cannot create normal users
services.userborn.enable = true;
users.mutableUsers = false;
users.allowNoPasswordLogin = true;
boot.initrd.kernelModules = [ "virtiofs" ];
virtualisation.writableStore = false;
virtualisation.fileSystems = lib.mkForce (
{
"/nix/store" = {
device = "nix-store";
options = [
"x-systemd.requires=systemd-modules-load.service"
"ro"
];
fsType = "virtiofs";
};
"/" = {
device = "/dev/vda";
fsType = "ext4";
options = [
"defaults"
"x-systemd.makefs"
"nobarrier"
"noatime"
"nodiratime"
"data=writeback"
"discard"
];
};
"/vmstate" = {
device = "/dev/vdb";
options = [
"x-systemd.makefs"
"noatime"
"nodiratime"
"discard"
];
noCheck = true;
fsType = "ext4";
};
${config.clan.core.facts.secretUploadDirectory} = {
device = "secrets";
fsType = "9p";
neededForBoot = true;
options = [
"trans=virtio"
"version=9p2000.L"
"cache=loose"
];
};
}
// lib.listToAttrs (
map (
folder:
lib.nameValuePair folder {
device = "/vmstate${folder}";
fsType = "none";
options = [ "bind" ];
}
) stateFolders
)
);
}

View File

@@ -4,11 +4,116 @@
pkgs,
options,
extendModules,
modulesPath,
...
}:
let
# Import the standalone VM base module
vmModule = import ./vm-base.nix;
# Flatten the list of state folders into a single list
stateFolders = lib.flatten (
lib.mapAttrsToList (_item: attrs: attrs.folders) config.clan.core.state
);
vmModule = {
imports = [
(modulesPath + "/virtualisation/qemu-vm.nix")
./serial.nix
./waypipe.nix
];
clan.core.state.HOME.folders = [ "/home" ];
clan.services.waypipe = {
inherit (config.clan.core.vm.inspect.waypipe) enable command;
};
# required for issuing shell commands via qga
services.qemuGuest.enable = true;
# required to react to system_powerdown qmp command
# Some desktop managers like xfce override the poweroff signal and therefore
# make it impossible to handle it via 'logind' directly.
services.acpid.enable = true;
services.acpid.handlers.power.event = "button/power.*";
services.acpid.handlers.power.action = "poweroff";
# only works on x11
services.spice-vdagentd.enable = config.services.xserver.enable;
boot.initrd.systemd.enable = true;
boot.initrd.systemd.storePaths = [
pkgs.util-linux
pkgs.e2fsprogs
];
boot.initrd.systemd.emergencyAccess = true;
# userborn would be faster because it doesn't need perl, but it cannot create normal users
services.userborn.enable = true;
users.mutableUsers = false;
users.allowNoPasswordLogin = true;
boot.initrd.kernelModules = [ "virtiofs" ];
virtualisation.writableStore = false;
virtualisation.fileSystems = lib.mkForce (
{
"/nix/store" = {
device = "nix-store";
options = [
"x-systemd.requires=systemd-modules-load.service"
"ro"
];
fsType = "virtiofs";
};
"/" = {
device = "/dev/vda";
fsType = "ext4";
options = [
"defaults"
"x-systemd.makefs"
"nobarrier"
"noatime"
"nodiratime"
"data=writeback"
"discard"
];
};
"/vmstate" = {
device = "/dev/vdb";
options = [
"x-systemd.makefs"
"noatime"
"nodiratime"
"discard"
];
noCheck = true;
fsType = "ext4";
};
${config.clan.core.facts.secretUploadDirectory} = {
device = "secrets";
fsType = "9p";
neededForBoot = true;
options = [
"trans=virtio"
"version=9p2000.L"
"cache=loose"
];
};
}
// lib.listToAttrs (
map (
folder:
lib.nameValuePair folder {
device = "/vmstate${folder}";
fsType = "none";
options = [ "bind" ];
}
) stateFolders
)
);
};
# We cannot simply merge the VM config into the current system config, because
# it is not necessarily a VM.

View File

@@ -16,10 +16,6 @@ from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Any
# Constants
NODE_ID_LENGTH = 10
NETWORK_ID_LENGTH = 16
class ClanError(Exception):
pass
@@ -59,9 +55,9 @@ class Identity:
def node_id(self) -> str:
nid = self.public.split(":")[0]
if len(nid) != NODE_ID_LENGTH:
msg = f"node_id must be {NODE_ID_LENGTH} characters long, got {len(nid)}: {nid}"
raise ClanError(msg)
assert len(nid) == 10, (
f"node_id must be 10 characters long, got {len(nid)}: {nid}"
)
return nid
@@ -88,10 +84,9 @@ class ZerotierController:
headers["Content-Type"] = "application/json"
headers["X-ZT1-AUTH"] = self.authtoken
url = f"http://127.0.0.1:{self.port}{path}"
# Safe: only connecting to localhost zerotier API
req = urllib.request.Request(url, headers=headers, method=method, data=body) # noqa: S310
with urllib.request.urlopen(req, timeout=5) as resp: # noqa: S310
return json.load(resp)
req = urllib.request.Request(url, headers=headers, method=method, data=body)
resp = urllib.request.urlopen(req)
return json.load(resp)
def status(self) -> dict[str, Any]:
return self._http_request("/status")
@@ -177,9 +172,9 @@ def create_identity() -> Identity:
def compute_zerotier_ip(network_id: str, identity: Identity) -> ipaddress.IPv6Address:
if len(network_id) != NETWORK_ID_LENGTH:
msg = f"network_id must be {NETWORK_ID_LENGTH} characters long, got '{network_id}'"
raise ClanError(msg)
assert len(network_id) == 16, (
f"network_id must be 16 characters long, got '{network_id}'"
)
nwid = int(network_id, 16)
node_id = int(identity.node_id(), 16)
addr_parts = bytearray(
@@ -200,7 +195,7 @@ def compute_zerotier_ip(network_id: str, identity: Identity) -> ipaddress.IPv6Ad
(node_id >> 16) & 0xFF,
(node_id >> 8) & 0xFF,
(node_id) & 0xFF,
],
]
)
return ipaddress.IPv6Address(bytes(addr_parts))
@@ -208,10 +203,7 @@ def compute_zerotier_ip(network_id: str, identity: Identity) -> ipaddress.IPv6Ad
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument(
"--mode",
choices=["network", "identity"],
required=True,
type=str,
"--mode", choices=["network", "identity"], required=True, type=str
)
parser.add_argument("--ip", type=Path, required=True)
parser.add_argument("--identity-secret", type=Path, required=True)

7
nixosModules/clanCore/zerotier/genmoon.py Executable file → Normal file
View File

@@ -6,12 +6,9 @@ import sys
from pathlib import Path
from tempfile import NamedTemporaryFile
# Constants
REQUIRED_ARGS = 4
def main() -> None:
if len(sys.argv) != REQUIRED_ARGS:
if len(sys.argv) != 4:
print("Usage: genmoon.py <moon.json> <endpoint.json> <moons.d>")
sys.exit(1)
moon_json_path = sys.argv[1]
@@ -20,7 +17,7 @@ def main() -> None:
moon_json = json.loads(Path(moon_json_path).read_text())
moon_json["roots"][0]["stableEndpoints"] = json.loads(
Path(endpoint_config).read_text(),
Path(endpoint_config).read_text()
)
with NamedTemporaryFile("w") as f:

View File

@@ -34,7 +34,4 @@ in
flake.nixosModules.clanCore = clanCore;
flake.darwinModules.clanCore = clanCore;
# Standalone VM base module that can be imported for VM testing
flake.nixosModules.clan-vm-base = ./clanCore/vm-base.nix;
}

View File

@@ -12,14 +12,8 @@ let
(builtins.match "linux_[0-9]+_[0-9]+" name) != null
&& (builtins.tryEval kernelPackages).success
&& (
let
zfsPackage =
if isUnstable then
kernelPackages.zfs_unstable
else
kernelPackages.${pkgs.zfs.kernelModuleAttribute};
in
!(zfsPackage.meta.broken or false)
(!isUnstable && !kernelPackages.zfs.meta.broken)
|| (isUnstable && !kernelPackages.zfs_unstable.meta.broken)
)
) pkgs.linuxKernel.packages;
latestKernelPackage = lib.last (
@@ -30,5 +24,5 @@ let
in
{
# Note this might jump back and worth as kernel get added or removed.
boot.kernelPackages = lib.mkIf (lib.meta.availableOn pkgs.hostPlatform pkgs.zfs) latestKernelPackage;
boot.kernelPackages = latestKernelPackage;
}

Some files were not shown because too many files have changed in this diff Show More