Compare commits

..

1 Commits

Author SHA1 Message Date
pinpox
99270051cd Add prometheus role to monitoring 2025-08-15 11:22:15 +02:00
594 changed files with 10142 additions and 22307 deletions

View File

@@ -0,0 +1,9 @@
name: checks
on:
pull_request:
jobs:
checks-impure:
runs-on: nix
steps:
- uses: actions/checkout@v4
- run: nix run .#impure-checks

View File

@@ -10,7 +10,7 @@ jobs:
if: github.repository_owner == 'clan-lol'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
with:
persist-credentials: false
- uses: actions/create-github-app-token@v2

1
.gitignore vendored
View File

@@ -39,6 +39,7 @@ select
# Generated files
pkgs/clan-app/ui/api/API.json
pkgs/clan-app/ui/api/API.ts
pkgs/clan-app/ui/api/Inventory.ts
pkgs/clan-app/ui/api/modules_schemas.json
pkgs/clan-app/ui/api/schema.json
pkgs/clan-app/ui/.fonts

View File

@@ -1,20 +0,0 @@
clanServices/.* @pinpox @kenji
lib/test/container-test-driver/.* @DavHau @mic92
lib/modules/inventory/.* @hsjobeki
lib/modules/inventoryClass/.* @hsjobeki
pkgs/clan-app/ui/.* @hsjobeki @brianmcgee
pkgs/clan-app/clan_app/.* @qubasa @hsjobeki
pkgs/clan-cli/clan_cli/.* @lassulus @mic92 @kenji
pkgs/clan-cli/clan_cli/(secrets|vars)/.* @DavHau @lassulus
pkgs/clan-cli/clan_lib/log_machines/.* @Qubasa
pkgs/clan-cli/clan_lib/ssh/.* @Qubasa @Mic92 @lassulus
pkgs/clan-cli/clan_lib/tags/.* @hsjobeki
pkgs/clan-cli/clan_lib/persist/.* @hsjobeki
pkgs/clan-cli/clan_lib/flake/.* @lassulus
pkgs/clan-cli/api.py @hsjobeki
pkgs/clan-cli/openapi.py @hsjobeki

View File

@@ -8,7 +8,7 @@ Our mission is simple: to democratize computing by providing tools that empower
## Features of Clan
- **Full-Stack System Deployment:** Utilize Clan's toolkit alongside Nix's reliability to build and manage systems effortlessly.
- **Full-Stack System Deployment:** Utilize Clans toolkit alongside Nix's reliability to build and manage systems effortlessly.
- **Overlay Networks:** Secure, private communication channels between devices.
- **Virtual Machine Integration:** Seamless operation of VM applications within the main operating system.
- **Robust Backup Management:** Long-term, self-hosted data preservation.

View File

@@ -36,6 +36,7 @@ in
++ filter pathExists [
./devshell/flake-module.nix
./flash/flake-module.nix
./impure/flake-module.nix
./installation/flake-module.nix
./update/flake-module.nix
./morph/flake-module.nix
@@ -138,6 +139,33 @@ in
nixosTests
// flakeOutputs
// {
# TODO: Automatically provide this check to downstream users to check their modules
clan-modules-json-compatible =
let
allSchemas = lib.mapAttrs (
_n: m:
let
schema =
(self.clanLib.evalService {
modules = [ m ];
prefix = [
"checks"
system
];
}).config.result.api.schema;
in
schema
) self.clan.modules;
in
pkgs.runCommand "combined-result"
{
schemaFile = builtins.toFile "schemas.json" (builtins.toJSON allSchemas);
}
''
mkdir -p $out
cat $schemaFile > $out/allSchemas.json
'';
clan-core-for-checks = pkgs.runCommand "clan-core-for-checks" { } ''
cp -r ${privateInputs.clan-core-for-checks} $out
chmod -R +w $out

View File

@@ -55,8 +55,7 @@
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
in
{
# Skip flash test on aarch64-linux for now as it's too slow
checks = lib.optionalAttrs (pkgs.stdenv.isLinux && pkgs.hostPlatform.system != "aarch64-linux") {
checks = pkgs.lib.mkIf pkgs.stdenv.isLinux {
nixos-test-flash = self.clanLib.test.baseTest {
name = "flash";
nodes.target = {

View File

@@ -0,0 +1,51 @@
{
perSystem =
{
pkgs,
lib,
self',
...
}:
{
# a script that executes all other checks
packages.impure-checks = pkgs.writeShellScriptBin "impure-checks" ''
#!${pkgs.bash}/bin/bash
set -euo pipefail
unset CLAN_DIR
export PATH="${
lib.makeBinPath (
[
pkgs.gitMinimal
pkgs.nix
pkgs.coreutils
pkgs.rsync # needed to have rsync installed on the dummy ssh server
]
++ self'.packages.clan-cli-full.runtimeDependencies
)
}"
ROOT=$(git rev-parse --show-toplevel)
cd "$ROOT/pkgs/clan-cli"
# Set up custom git configuration for tests
export GIT_CONFIG_GLOBAL=$(mktemp)
git config --file "$GIT_CONFIG_GLOBAL" user.name "Test User"
git config --file "$GIT_CONFIG_GLOBAL" user.email "test@example.com"
export GIT_CONFIG_SYSTEM=/dev/null
# this disables dynamic dependency loading in clan-cli
export CLAN_NO_DYNAMIC_DEPS=1
jobs=$(nproc)
# Spawning worker in pytest is relatively slow, so we limit the number of jobs to 13
# (current number of impure tests)
jobs="$((jobs > 13 ? 13 : jobs))"
nix develop "$ROOT#clan-cli" -c bash -c "TMPDIR=/tmp python -m pytest -n $jobs -m impure ./clan_cli $@"
# Clean up temporary git config
rm -f "$GIT_CONFIG_GLOBAL"
'';
};
}

View File

@@ -232,7 +232,6 @@
"-i", ssh_conn.ssh_key,
"--option", "store", os.environ['CLAN_TEST_STORE'],
"--update-hardware-config", "nixos-facter",
"--no-persist-state",
]
subprocess.run(clan_cmd, check=True)
@@ -242,7 +241,7 @@
target.shutdown()
except BrokenPipeError:
# qemu has already exited
target.connected = False
pass
# Create a new machine instance that boots from the installed system
installed_machine = create_test_machine(target, "${pkgs.qemu_test}", name="after_install")
@@ -276,7 +275,7 @@
"${self.checks.x86_64-linux.clan-core-for-checks}",
"${closureInfo}"
)
# Set up SSH connection
ssh_conn = setup_ssh_connection(
target,
@@ -302,8 +301,7 @@
"test-install-machine-without-system",
"-i", ssh_conn.ssh_key,
"--option", "store", os.environ['CLAN_TEST_STORE'],
"--target-host", f"nonrootuser@localhost:{ssh_conn.host_port}",
"--yes"
f"nonrootuser@localhost:{ssh_conn.host_port}"
]
result = subprocess.run(clan_cmd, capture_output=True, cwd=flake_dir)
@@ -327,9 +325,7 @@
"test-install-machine-without-system",
"-i", ssh_conn.ssh_key,
"--option", "store", os.environ['CLAN_TEST_STORE'],
"--target-host",
f"nonrootuser@localhost:{ssh_conn.host_port}",
"--yes"
f"nonrootuser@localhost:{ssh_conn.host_port}"
]
result = subprocess.run(clan_cmd, capture_output=True, cwd=flake_dir)

View File

@@ -1,32 +0,0 @@
This service sets up a certificate authority (CA) that can issue certificates to
other machines in your clan. For this the `ca` role is used.
It additionally provides a `default` role, that can be applied to all machines
in your clan and will make sure they trust your CA.
## Example Usage
The following configuration would add a CA for the top level domain `.foo`. If
the machine `server` now hosts a webservice at `https://something.foo`, it will
get a certificate from `ca` which is valid inside your clan. The machine
`client` will trust this certificate if it makes a request to
`https://something.foo`.
This clan service can be combined with the `coredns` service for easy to deploy,
SSL secured clan-internal service hosting.
```nix
inventory = {
machines.ca = { };
machines.client = { };
machines.server = { };
instances."certificates" = {
module.name = "certificates";
module.input = "self";
roles.ca.machines.ca.settings.tlds = [ "foo" ];
roles.default.machines.client = { };
roles.default.machines.server = { };
};
};
```

View File

@@ -1,245 +0,0 @@
{ ... }:
{
_class = "clan.service";
manifest.name = "certificates";
manifest.description = "Sets up a certificates internal to your Clan";
manifest.categories = [ "Network" ];
manifest.readme = builtins.readFile ./README.md;
roles.ca = {
interface =
{ lib, ... }:
{
options.acmeEmail = lib.mkOption {
type = lib.types.str;
default = "none@none.tld";
description = ''
Email address for account creation and correspondence from the CA.
It is recommended to use the same email for all certs to avoid account
creation limits.
'';
};
options.tlds = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = "Top level domain for this CA. Certificates will be issued and trusted for *.<tld>";
};
options.expire = lib.mkOption {
type = lib.types.nullOr lib.types.str;
description = "When the certificate should expire.";
default = "8760h";
example = "8760h";
};
};
perInstance =
{ settings, ... }:
{
nixosModule =
{
config,
pkgs,
lib,
...
}:
let
domains = map (tld: "ca.${tld}") settings.tlds;
in
{
security.acme.defaults.email = settings.acmeEmail;
security.acme = {
certs = builtins.listToAttrs (
map (domain: {
name = domain;
value = {
server = "https://${domain}:1443/acme/acme/directory";
};
}) domains
);
};
networking.firewall.allowedTCPPorts = [
80
443
];
services.nginx = {
enable = true;
recommendedProxySettings = true;
virtualHosts = builtins.listToAttrs (
map (domain: {
name = domain;
value = {
addSSL = true;
enableACME = true;
locations."/".proxyPass = "https://localhost:1443";
locations."= /ca.crt".alias =
config.clan.core.vars.generators.step-intermediate-cert.files."intermediate.crt".path;
};
}) domains
);
};
clan.core.vars.generators = {
# Intermediate key generator
"step-intermediate-key" = {
files."intermediate.key" = {
secret = true;
deploy = true;
owner = "step-ca";
group = "step-ca";
};
runtimeInputs = [ pkgs.step-cli ];
script = ''
step crypto keypair --kty EC --curve P-256 --no-password --insecure $out/intermediate.pub $out/intermediate.key
'';
};
# Intermediate certificate generator
"step-intermediate-cert" = {
files."intermediate.crt".secret = false;
dependencies = [
"step-ca"
"step-intermediate-key"
];
runtimeInputs = [ pkgs.step-cli ];
script = ''
# Create intermediate certificate
step certificate create \
--ca $in/step-ca/ca.crt \
--ca-key $in/step-ca/ca.key \
--ca-password-file /dev/null \
--key $in/step-intermediate-key/intermediate.key \
--template ${pkgs.writeText "intermediate.tmpl" ''
{
"subject": {{ toJson .Subject }},
"keyUsage": ["certSign", "crlSign"],
"basicConstraints": {
"isCA": true,
"maxPathLen": 0
},
"nameConstraints": {
"critical": true,
"permittedDNSDomains": [${
(lib.strings.concatStringsSep "," (map (tld: ''"${tld}"'') settings.tlds))
}]
}
}
''} ${lib.optionalString (settings.expire != null) "--not-after ${settings.expire}"} \
--not-before=-12h \
--no-password --insecure \
"Clan Intermediate CA" \
$out/intermediate.crt
'';
};
};
services.step-ca = {
enable = true;
intermediatePasswordFile = "/dev/null";
address = "0.0.0.0";
port = 1443;
settings = {
root = config.clan.core.vars.generators.step-ca.files."ca.crt".path;
crt = config.clan.core.vars.generators.step-intermediate-cert.files."intermediate.crt".path;
key = config.clan.core.vars.generators.step-intermediate-key.files."intermediate.key".path;
dnsNames = domains;
logger.format = "text";
db = {
type = "badger";
dataSource = "/var/lib/step-ca/db";
};
authority = {
provisioners = [
{
type = "ACME";
name = "acme";
forceCN = true;
}
];
claims = {
maxTLSCertDuration = "2160h";
defaultTLSCertDuration = "2160h";
};
backdate = "1m0s";
};
tls = {
cipherSuites = [
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256"
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"
];
minVersion = 1.2;
maxVersion = 1.3;
renegotiation = false;
};
};
};
};
};
};
# Empty role, so we can add non-ca machins to the instance to trust the CA
roles.default = {
interface =
{ lib, ... }:
{
options.acmeEmail = lib.mkOption {
type = lib.types.str;
default = "none@none.tld";
description = ''
Email address for account creation and correspondence from the CA.
It is recommended to use the same email for all certs to avoid account
creation limits.
'';
};
};
perInstance =
{ settings, ... }:
{
nixosModule.security.acme.defaults.email = settings.acmeEmail;
};
};
# All machines (independent of role) will trust the CA
perMachine.nixosModule =
{ pkgs, config, ... }:
{
# Root CA generator
clan.core.vars.generators = {
"step-ca" = {
share = true;
files."ca.key" = {
secret = true;
deploy = false;
};
files."ca.crt".secret = false;
runtimeInputs = [ pkgs.step-cli ];
script = ''
step certificate create --template ${pkgs.writeText "root.tmpl" ''
{
"subject": {{ toJson .Subject }},
"issuer": {{ toJson .Subject }},
"keyUsage": ["certSign", "crlSign"],
"basicConstraints": {
"isCA": true,
"maxPathLen": 1
}
}
''} "Clan Root CA" $out/ca.crt $out/ca.key \
--kty EC --curve P-256 \
--not-after=8760h \
--not-before=-12h \
--no-password --insecure
'';
};
};
security.pki.certificateFiles = [ config.clan.core.vars.generators."step-ca".files."ca.crt".path ];
environment.systemPackages = [ pkgs.openssl ];
security.acme.acceptTerms = true;
};
}

View File

@@ -1,21 +0,0 @@
{
self,
lib,
...
}:
let
module = lib.modules.importApply ./default.nix {
inherit (self) packages;
};
in
{
clan.modules.certificates = module;
perSystem =
{ ... }:
{
clan.nixosTests.certificates = {
imports = [ ./tests/vm/default.nix ];
clan.modules.certificates = module;
};
};
}

View File

@@ -1,84 +0,0 @@
{
name = "certificates";
clan = {
directory = ./.;
inventory = {
machines.ca = { }; # 192.168.1.1
machines.client = { }; # 192.168.1.2
machines.server = { }; # 192.168.1.3
instances."certificates" = {
module.name = "certificates";
module.input = "self";
roles.ca.machines.ca.settings.tlds = [ "foo" ];
roles.default.machines.client = { };
roles.default.machines.server = { };
};
};
};
nodes =
let
hostConfig = ''
192.168.1.1 ca.foo
192.168.1.3 test.foo
'';
in
{
client.networking.extraHosts = hostConfig;
ca.networking.extraHosts = hostConfig;
server = {
networking.extraHosts = hostConfig;
# TODO: Could this be set automatically?
# I would like to get this information from the coredns module, but we
# cannot model dependencies yet
security.acme.certs."test.foo".server = "https://ca.foo/acme/acme/directory";
# Host a simple service on 'server', with SSL provided via our CA. 'client'
# should be able to curl it via https and accept the certificates
# presented
networking.firewall.allowedTCPPorts = [
80
443
];
services.nginx = {
enable = true;
virtualHosts."test.foo" = {
enableACME = true;
forceSSL = true;
locations."/" = {
return = "200 'test server response'";
extraConfig = "add_header Content-Type text/plain;";
};
};
};
};
};
testScript = ''
start_all()
import time
time.sleep(3)
ca.succeed("systemctl restart acme-order-renew-ca.foo.service ")
time.sleep(3)
server.succeed("systemctl restart acme-test.foo.service")
# It takes a while for the correct certs to appear (before that self-signed
# are presented by nginx) so we wait for a bit.
client.wait_until_succeeds("curl -v https://test.foo")
# Show certificate information for debugging
client.succeed("openssl s_client -connect test.foo:443 -servername test.foo </dev/null 2>/dev/null | openssl x509 -text -noout 1>&2")
'';
}

View File

@@ -1,6 +0,0 @@
[
{
"publickey": "age1yd2cden7jav8x4nzx2fwze2fsa5j0qm2m3t7zum765z3u4gj433q7dqj43",
"type": "age"
}
]

View File

@@ -1,6 +0,0 @@
[
{
"publickey": "age1js225d8jc507sgcg0fdfv2x3xv3asm4ds5c6s4hp37nq8spxu95sc5x3ce",
"type": "age"
}
]

View File

@@ -1,6 +0,0 @@
[
{
"publickey": "age1nwuh8lc604mnz5r8ku8zswyswnwv02excw237c0cmtlejp7xfp8sdrcwfa",
"type": "age"
}
]

View File

@@ -1,15 +0,0 @@
{
"data": "ENC[AES256_GCM,data:6+XilULKRuWtAZ6B8Lj9UqCfi1T6dmqrDqBNXqS4SvBwM1bIWiL6juaT1Q7ByOexzID7tY740gmQBqTey54uLydh8mW0m4ZtUqw=,iv:9kscsrMPBGkutTnxrc5nrc7tQXpzLxw+929pUDKqTu0=,tag:753uIjm8ZRs0xsjiejEY8g==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA1d3kycldZRXhmR0FqTXJp\nWWU0MDBYNmxxbFE5M2xKYm5KWnQ0MXBHNEM4CjN4RFFVcFlkd3pjTFVDQ3Vackdj\nVTVhMWoxdFpsWHp5S1p4L05kYk5LUkkKLS0tIENtZFZZTjY2amFVQmZLZFplQzBC\nZm1vWFI4MXR1ZHIxTTQ5VXdSYUhvOTQKte0bKjXQ0xA8FrpuChjDUvjVqp97D8kT\n3tVh6scdjxW48VSBZP1GRmqcMqCdj75GvJTbWeNEV4PDBW7GI0UW+Q==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-09-02T08:42:39Z",
"mac": "ENC[AES256_GCM,data:AftMorrH7qX5ctVu5evYHn5h9pC4Mmm2VYaAV8Hy0PKTc777jNsL6DrxFVV3NVqtecpwrzZFWKgzukcdcRJe4veVeBrusmoZYtifH0AWZTEVpVlr2UXYYxCDmNZt1WHfVUo40bT//X6QM0ye6a/2Y1jYPbMbryQNcGmnpk9PDvU=,iv:5nk+d8hzA05LQp7ZHRbIgiENg2Ha6J6YzyducM6zcNU=,tag:dy1hqWVzMu/+fSK57h9ZCA==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../users/admin

View File

@@ -1,15 +0,0 @@
{
"data": "ENC[AES256_GCM,data:jdTuGQUYvT1yXei1RHKsOCsABmMlkcLuziHDVhA7NequZeNu0fSbrJTXQDCHsDGhlYRcjU5EsEDT750xdleXuD3Gs9zWvPVobI4=,iv:YVow3K1j6fzRF9bRfIEpuOkO/nRpku/UQxWNGC+UJQQ=,tag:cNLM5R7uu6QpwPB9K6MYzg==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBvOVF2WXRSL0NpQzFZR01I\nNU85TGcyQmVDazN1dmpuRFVTZEg5NDRKTGhrCk1IVjFSU1V6WHBVRnFWcHkyVERr\nTjFKbW1mQ2FWOWhjN2VPamMxVEQ5VkkKLS0tIENVUGlhanhuWGtDKzBzRmk2dE4v\nMXZBRXNMa3IrOTZTNHRUWVE3UXEwSWMK2cBLoL/H/Vxd/klVrqVLdX9Mww5j7gw/\nEWc5/hN+km6XoW+DiJxVG4qaJ7qqld6u5ZnKgJT+2h9CfjA04I2akg==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-09-02T08:42:51Z",
"mac": "ENC[AES256_GCM,data:zOBQVM2Ydu4v0+Fw3p3cEU+5+7eKaadV0tKro1JVOxclG1Vs6Myq57nw2eWf5JxIl0ulL+FavPKY26qOQ3aqcGOT3PMRlCda9z+0oSn9Im9bE/DzAGmoH/bp76kFkgTTOCZTMUoqJ+UJqv0qy1BH/92sSSKmYshEX6d1vr5ISrw=,iv:i9ZW4sLxOCan4UokHlySVr1CW39nCTusG4DmEPj/gIw=,tag:iZBDPHDkE3Vt5mFcFu1TPQ==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../users/admin

View File

@@ -1,15 +0,0 @@
{
"data": "ENC[AES256_GCM,data:5CJuHcxJMXZJ8GqAeG3BrbWtT1kade4kxgJsn1cRpmr1UgN0ZVYnluPEiBscClNSOzcc6vcrBpfTI3dj1tASKTLP58M+GDBFQDo=,iv:gsK7XqBGkYCoqAvyFlIXuJ27PKSbTmy7f6cgTmT2gow=,tag:qG5KejkBvy9ytfhGXa/Mnw==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBxbzVqYkplTzJKN1pwS3VM\naFFIK2VsR3lYUVExYW9ieERBL0tlcFZtVzJRCkpiLzdmWmFlOUZ5QUJ4WkhXZ2tQ\nZm92YXBCV0RpYnIydUdEVTRiamI4bjAKLS0tIG93a2htS1hFcjBOeVFnNCtQTHVr\na2FPYjVGbWtORjJVWXE5bndPU1RWcXMKikMEB7X+kb7OtiyqXn3HRpLYkCdoayDh\n7cjGnplk17q25/lRNHM4JVS5isFfuftCl01enESqkvgq+cwuFwa9DQ==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-09-02T08:42:59Z",
"mac": "ENC[AES256_GCM,data:xybV2D0xukZnH2OwRpIugPnS7LN9AbgGKwFioPJc1FQWx9TxMUVDwgMN6V5WrhWkXgF2zP4krtDYpEz4Vq+LbOjcnTUteuCc+7pMHubuRuip7j+M32MH1kuf4bVZuXbCfvm7brGxe83FzjoioLqzA8g/X6Q1q7/ErkNeFjluC3Q=,iv:QEW3EUKSRZY3fbXlP7z+SffWkQeXwMAa5K8RQW7NvPE=,tag:DhFxY7xr7H1Wbd527swD0Q==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../users/admin

View File

@@ -1,12 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIBsDCCAVegAwIBAgIQbT1Ivm+uwyf0HNkJfan2BTAKBggqhkjOPQQDAjAXMRUw
EwYDVQQDEwxDbGFuIFJvb3QgQ0EwHhcNMjUwOTAxMjA0MzAzWhcNMjYwOTAyMDg0
MzAzWjAfMR0wGwYDVQQDExRDbGFuIEludGVybWVkaWF0ZSBDQTBZMBMGByqGSM49
AgEGCCqGSM49AwEHA0IABDXCNrUIotju9P1U6JxLV43sOxLlRphQJS4dM+lvjTZc
aQ+HwQg0AHVlQNRwS3JqKrJJtJVyKbZklh6eFaDPoj6jfTB7MA4GA1UdDwEB/wQE
AwIBBjASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdDgQWBBRKHaccHgP2ccSWVBWN
zGoDdTg7aTAfBgNVHSMEGDAWgBSfsnz4phMJx9su/kgeF/FbZQCBgzAVBgNVHR4B
Af8ECzAJoAcwBYIDZm9vMAoGCCqGSM49BAMCA0cAMEQCICiUDk1zGNzpS/iVKLfW
zUGaCagpn2mCx4xAXQM9UranAiAn68nVYGWjkzhU31wyCAupxOjw7Bt96XXqIAz9
hLLtMA==
-----END CERTIFICATE-----

View File

@@ -1,19 +0,0 @@
{
"data": "ENC[AES256_GCM,data:Auonh9fa7jSkld1Zyxw74x5ydj6Xc+0SOgiqumVETNCfner9K96Rmv1PkREuHNGWPsnzyEM3pRT8ijvu3QoKvy9QPCCewyT07Wqe4G74+bk1iMeAHsV3To6kHs6M8OISvE+CmG0+hlLmdfRSabTzyWPLHbOjvFTEEuA5G7xiryacSYOE++eeEHdn+oUDh/IMTcfLjCGMjsXFikx1Hb+ofeRTlCg47+0w4MXVvQkOzQB5V2C694jZXvZ19jd/ioqr8YASz2xatGvqwW6cpZxqOWyZJ0UAj/6yFk6tZWifqVB3wgU=,iv:ITFCrDkeWl4GWCebVq15ei9QmkOLDwUIYojKZ2TU6JU=,tag:8k4iYbCIusUykY79H86WUQ==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBsT25UbjJTQ2tzbnQyUm9p\neWx1UlZIeVpocnBqUCt0YnFlN2FOU25Lb0hNCmdXUUsyalRTbHRRQ0NLSGc1YllV\nUXRwaENhaXU1WmdnVDE0UWprUUUyeDAKLS0tIHV3dHU3aG5JclM0V3FadzN0SU14\ndFptbEJUNXQ4QVlqbkJ1TjAvdDQwSGsKcKPWUjhK7wzIpdIdksMShF2fpLdDTUBS\nZiU7P1T+3psxad9qhapvU0JrAY+9veFaYVEHha2aN/XKs8HqUcTp3A==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1yd2cden7jav8x4nzx2fwze2fsa5j0qm2m3t7zum765z3u4gj433q7dqj43",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBjZFVteVZwVGVmRE9NT3hG\nNGMyS3FSaXluM1FpeUp6SDVMUEpwYzg5SmdvCkRPU0QyU1JicGNkdlMyQWVkT0k3\nL2YrbDhWeGk4WFhxcUFmTmhZQ0pEQncKLS0tIG85Ui9rKzBJQ2VkMFBUQTMvSTlu\nbm8rZ09Wa24rQkNvTTNtYTZBN3MrZlkK7cjNhlUKZdOrRq/nKUsbUQgNTzX8jO+0\nzADpz6WCMvsJ15xazc10BGh03OtdMWl5tcoWMaZ71HWtI9Gip5DH0w==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-09-02T08:42:42Z",
"mac": "ENC[AES256_GCM,data:9xlO5Yis8DG/y8GjvP63NltD4xEL7zqdHL2cQE8gAoh/ZamAmK5ZL0ld80mB3eIYEPKZYvmUYI4Lkrge2ZdqyDoubrW+eJ3dxn9+StxA9FzXYwUE0t+bbsNJfOOp/kDojf060qLGsu0kAGKd2ca4WiDccR0Cieky335C7Zzhi/Q=,iv:bWQ4wr0CJHSN+6ipUbkYTDWZJyFQjDKszfpVX9EEUsY=,tag:kADIFgJBEGCvr5fPbbdEDA==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1,10 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIBcTCCARigAwIBAgIRAIix99+AE7Y+uyiLGaRHEhUwCgYIKoZIzj0EAwIwFzEV
MBMGA1UEAxMMQ2xhbiBSb290IENBMB4XDTI1MDkwMTIwNDI1N1oXDTI2MDkwMjA4
NDI1N1owFzEVMBMGA1UEAxMMQ2xhbiBSb290IENBMFkwEwYHKoZIzj0CAQYIKoZI
zj0DAQcDQgAEk7nn9kzxI+xkRmNMlxD+7T78UqV3aqus0foJh6uu1CHC+XaebMcw
JN95nAe3oYA3yZG6Mnq9nCxsYha4EhzGYqNFMEMwDgYDVR0PAQH/BAQDAgEGMBIG
A1UdEwEB/wQIMAYBAf8CAQEwHQYDVR0OBBYEFJ+yfPimEwnH2y7+SB4X8VtlAIGD
MAoGCCqGSM49BAMCA0cAMEQCIBId/CcbT5MPFL90xa+XQz+gVTdRwsu6Bg7ehMso
Bj0oAiBjSlttd5yeuZGXBm+O0Gl+WdKV60QlrWutNewXFS4UpQ==
-----END CERTIFICATE-----

View File

@@ -1,15 +0,0 @@
{
"data": "ENC[AES256_GCM,data:PnEXteU3I7U0OKgE+oR3xjHdLWYTpJjM/jlzxtGU0uP2pUBuQv3LxtEz+cP0ZsafHLNq2iNJ7xpUEE0g4d3M296S56oSocK3fREWBiJFiaC7SAEUiil1l3UCwHn7LzmdEmn8Kq7T+FK89wwqtVWIASLo2gZC/yHE5eEanEATTchGLSNiHJRzZ8n0Ekm8EFUA6czOqA5nPQHaSmeLzu1g80lSSi1ICly6dJksa6DVucwOyVFYFEeq8Dfyc1eyP8L1ee0D7QFYBMduYOXTKPtNnyDmdaQMj7cMMvE7fn04idIiAqw=,iv:nvLmAfFk2GXnnUy+Afr648R60Ou13eu9UKykkiA8Y+4=,tag:lTTAxfG0EDCU6u7xlW6xSQ==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBEMjNWUm5NbktQeTRWRjJE\nWWFZc2Rsa3I5aitPSno1WnhORENNcng5OHprCjNUQVhBVHFBcWFjaW5UdmxKTnZw\nQlI4MDk5Wkp0RElCeWgzZ2dFQkF2dkkKLS0tIDVreTkydnJ0RDdHSHlQeVV6bGlP\nTmpJOVBSb2dkVS9TZG5SRmFjdnQ1b3cKQ5XvwH1jD4XPVs5RzOotBDq8kiE6S5k2\nDBv6ugjsM5qV7/oGP9H69aSB4jKPZjEn3yiNw++Oorc8uXd5kSGh7w==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-09-02T08:43:00Z",
"mac": "ENC[AES256_GCM,data:3jFf66UyZUWEtPdPu809LCS3K/Hc6zbnluystl3eXS+KGI+dCoYmN9hQruRNBRxf6jli2RIlArmmEPBDQVt67gG/qugTdT12krWnYAZ78iocmOnkf44fWxn/pqVnn4JYpjEYRgy8ueGDnUkwvpGWVZpcXw5659YeDQuYOJ2mq0U=,iv:3k7fBPrABdLItQ2Z+Mx8Nx0eIEKo93zG/23K+Q5Hl3I=,tag:aehAObdx//DEjbKlOeM7iQ==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../../../sops/users/admin

View File

@@ -1,68 +0,0 @@
This module enables hosting clan-internal services easily, which can be resolved
inside your VPN. This allows defining a custom top-level domain (e.g. `.clan`)
and exposing endpoints from a machine to others, which will be
accessible under `http://<service>.clan` in your browser.
The service consists of two roles:
- A `server` role: This is the DNS-server that will be queried when trying to
resolve clan-internal services. It defines the top-level domain.
- A `default` role: This does two things. First, it sets up the nameservers so
thatclan-internal queries are resolved via the `server` machine, while
external queries are resolved as normal via DHCP. Second, it allows exposing
services (see example below).
## Example Usage
Here the machine `dnsserver` is designated as internal DNS-server for the TLD
`.foo`. `server01` will host an application that shall be reachable at
`http://one.foo` and `server02` is going to be reachable at `http://two.foo`.
`client` is any other machine that is part of the clan but does not host any
services.
When `client` tries to resolve `http://one.foo`, the DNS query will be
routed to `dnsserver`, which will answer with `192.168.1.3`. If it tries to
resolve some external domain (e.g. `https://clan.lol`), the query will not be
routed to `dnsserver` but resolved as before, via the nameservers advertised by
DHCP.
```nix
inventory = {
machines = {
dnsserver = { }; # 192.168.1.2
server01 = { }; # 192.168.1.3
server02 = { }; # 192.168.1.4
client = { }; # 192.168.1.5
};
instances = {
coredns = {
module.name = "@clan/coredns";
module.input = "self";
# Add the default role to all machines, including `client`
roles.default.tags.all = { };
# DNS server
roles.server.machines."dnsserver".settings = {
ip = "192.168.1.2";
tld = "foo";
};
# First service
roles.default.machines."server01".settings = {
ip = "192.168.1.3";
services = [ "one" ];
};
# Second service
roles.default.machines."server02".settings = {
ip = "192.168.1.4";
services = [ "two" ];
};
};
};
};
```

View File

@@ -1,157 +0,0 @@
{ ... }:
{
_class = "clan.service";
manifest.name = "coredns";
manifest.description = "Clan-internal DNS and service exposure";
manifest.categories = [ "Network" ];
manifest.readme = builtins.readFile ./README.md;
roles.server = {
interface =
{ lib, ... }:
{
options.tld = lib.mkOption {
type = lib.types.str;
default = "clan";
description = ''
Top-level domain for this instance. All services below this will be
resolved internally.
'';
};
options.ip = lib.mkOption {
type = lib.types.str;
# TODO: Set a default
description = "IP for the DNS to listen on";
};
};
perInstance =
{
roles,
settings,
...
}:
{
nixosModule =
{
lib,
pkgs,
...
}:
{
networking.firewall.allowedTCPPorts = [ 53 ];
networking.firewall.allowedUDPPorts = [ 53 ];
services.coredns =
let
# Get all service entries for one host
hostServiceEntries =
host:
lib.strings.concatStringsSep "\n" (
map (
service: "${service} IN A ${roles.default.machines.${host}.settings.ip} ; ${host}"
) roles.default.machines.${host}.settings.services
);
zonefile = pkgs.writeTextFile {
name = "db.${settings.tld}";
text = ''
$TTL 3600
@ IN SOA ns.${settings.tld}. admin.${settings.tld}. 1 7200 3600 1209600 3600
IN NS ns.${settings.tld}.
ns IN A ${settings.ip} ; DNS server
''
+ (lib.strings.concatStringsSep "\n" (
map (host: hostServiceEntries host) (lib.attrNames roles.default.machines)
));
};
in
{
enable = true;
config = ''
. {
forward . 1.1.1.1
cache 30
}
${settings.tld} {
file ${zonefile}
}
'';
};
};
};
};
roles.default = {
interface =
{ lib, ... }:
{
options.services = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ ];
description = ''
Service endpoints this host exposes (without TLD). Each entry will
be resolved to <entry>.<tld> using the configured top-level domain.
'';
};
options.ip = lib.mkOption {
type = lib.types.str;
# TODO: Set a default
description = "IP on which the services will listen";
};
};
perInstance =
{ roles, ... }:
{
nixosModule =
{ lib, ... }:
{
networking.nameservers = map (m: "127.0.0.1:5353#${roles.server.machines.${m}.settings.tld}") (
lib.attrNames roles.server.machines
);
services.resolved.domains = map (m: "~${roles.server.machines.${m}.settings.tld}") (
lib.attrNames roles.server.machines
);
services.unbound = {
enable = true;
settings = {
server = {
port = 5353;
verbosity = 2;
interface = [ "127.0.0.1" ];
access-control = [ "127.0.0.0/8 allow" ];
do-not-query-localhost = "no";
domain-insecure = map (m: "${roles.server.machines.${m}.settings.tld}.") (
lib.attrNames roles.server.machines
);
};
# Default: forward everything else to DHCP-provided resolvers
forward-zone = [
{
name = ".";
forward-addr = "127.0.0.53@53"; # Forward to systemd-resolved
}
];
stub-zone = map (m: {
name = "${roles.server.machines.${m}.settings.tld}.";
stub-addr = "${roles.server.machines.${m}.settings.ip}";
}) (lib.attrNames roles.server.machines);
};
};
};
};
};
}

View File

@@ -1,113 +0,0 @@
{
...
}:
{
name = "coredns";
clan = {
directory = ./.;
test.useContainers = true;
inventory = {
machines = {
dns = { }; # 192.168.1.2
server01 = { }; # 192.168.1.3
server02 = { }; # 192.168.1.4
client = { }; # 192.168.1.1
};
instances = {
coredns = {
module.name = "@clan/coredns";
module.input = "self";
roles.default.tags.all = { };
# First service
roles.default.machines."server01".settings = {
ip = "192.168.1.3";
services = [ "one" ];
};
# Second service
roles.default.machines."server02".settings = {
ip = "192.168.1.4";
services = [ "two" ];
};
# DNS server
roles.server.machines."dns".settings = {
ip = "192.168.1.2";
tld = "foo";
};
};
};
};
};
nodes = {
dns =
{ pkgs, ... }:
{
environment.systemPackages = [ pkgs.net-tools ];
};
client =
{ pkgs, ... }:
{
environment.systemPackages = [ pkgs.net-tools ];
};
server01 = {
services.nginx = {
enable = true;
virtualHosts."one.foo" = {
locations."/" = {
return = "200 'test server response one'";
extraConfig = "add_header Content-Type text/plain;";
};
};
};
};
server02 = {
services.nginx = {
enable = true;
virtualHosts."two.foo" = {
locations."/" = {
return = "200 'test server response two'";
extraConfig = "add_header Content-Type text/plain;";
};
};
};
};
};
testScript = ''
import json
start_all()
machines = [server01, server02, dns, client]
for m in machines:
m.systemctl("start network-online.target")
for m in machines:
m.wait_for_unit("network-online.target")
# import time
# time.sleep(2333333)
# This should work, but is borken in tests i think? Instead we dig directly
# client.succeed("curl -k -v http://one.foo")
# client.succeed("curl -k -v http://two.foo")
answer = client.succeed("dig @192.168.1.2 one.foo")
assert "192.168.1.3" in answer, "IP not found"
answer = client.succeed("dig @192.168.1.2 two.foo")
assert "192.168.1.4" in answer, "IP not found"
'';
}

View File

@@ -1,4 +0,0 @@
{
"publickey": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"type": "age"
}

View File

@@ -24,5 +24,12 @@
};
};
imports = [ ./telegraf.nix ];
# roles.prometheus = {
# interface = { lib, ... }: { };
# };
imports = [
./telegraf.nix
./prometheus.nix
];
}

View File

@@ -0,0 +1,182 @@
{
roles.prometheus.perInstance =
{ settings, roles, ... }:
{
nixosModule =
{ pkgs, lib, ... }:
{
# imports = [
# # ./matrix-alertmanager.nix
# # ./irc-alertmanager.nix
# # ./rules.nix
# ];
services.prometheus = {
# webExternalUrl = "https://prometheus.thalheim.io";
extraFlags = [ "--storage.tsdb.retention.time=30d" ];
scrapeConfigs = [
{
job_name = "telegraf";
scrape_interval = "60s";
metrics_path = "/metrics";
static_configs = [
(map (host: {
labels.host = host;
# labels.org = "TODO";
targets = [ "${host}.clan:9273" ];
}) lib.attrNames roles.telegraf.machines)
# {
# # labels.host = "rauter.r:9273";
# # labels.org = "TODO";
# targets = map (host: "${host}.clan:9273") lib.attrNames roles.telegraf.machines;
# }
];
}
# {
# job_name = "gitea";
# scrape_interval = "60s";
# metrics_path = "/metrics";
#
# scheme = "https";
# static_configs = [ { targets = [ "git.thalheim.io:443" ]; } ];
# }
];
alertmanagers = [ { static_configs = [ { targets = [ "localhost:9093" ]; } ]; } ];
};
services.prometheus.alertmanager = {
enable = true;
# environmentFile = config.sops.secrets.alertmanager.path;
# webExternalUrl = "https://alertmanager.thalheim.io";
# listenAddress = "[::1]";
# configuration = {
# global = {
# # The smarthost and SMTP sender used for mail notifications.
# smtp_smarthost = "mail.thalheim.io:587";
# smtp_from = "alertmanager@thalheim.io";
# smtp_auth_username = "alertmanager@thalheim.io";
# smtp_auth_password = "$SMTP_PASSWORD";
# };
# route = {
# receiver = "default";
# routes = [
# {
# group_by = [ "host" ];
# match_re.org = "krebs";
# group_wait = "5m";
# group_interval = "5m";
# repeat_interval = "4h";
# receiver = "krebs";
# }
# {
# group_by = [ "host" ];
# match_re.org = "nixos-wiki";
# group_wait = "5m";
# group_interval = "5m";
# repeat_interval = "4h";
# receiver = "nixos-wiki";
# }
# {
# group_by = [ "host" ];
# match_re.org = "numtide";
# group_wait = "5m";
# group_interval = "5m";
# repeat_interval = "4h";
# receiver = "numtide";
# }
# {
# group_by = [ "host" ];
# match_re.org = "clan-lol";
# group_wait = "5m";
# group_interval = "5m";
# repeat_interval = "4h";
# receiver = "clan-lol";
# }
# {
# group_by = [ "host" ];
# match_re.org = "dave";
# group_wait = "5m";
# group_interval = "5m";
# repeat_interval = "4h";
# receiver = "dave";
# }
# {
# group_by = [ "host" ];
# group_wait = "30s";
# group_interval = "2m";
# repeat_interval = "2h";
# receiver = "all";
# }
# ];
# };
# receivers = [
# {
# name = "krebs";
# webhook_configs = [
# {
# url = "http://127.0.0.1:9223/";
# max_alerts = 5;
# }
# ];
# }
# {
# name = "numtide";
# webhook_configs = [
# # TODO
# #{
# # send_resolved = true;
# # url = "https://chat.ntd.one/plugins/alertmanager/api/webhook?token='xxxxxxxxxxxxxxxxxxx-yyyyyyy'";
# #}
# ];
# }
# {
# name = "nixos-wiki";
# webhook_configs = [
# {
# url = "http://localhost:9088/alert";
# max_alerts = 5;
# }
# ];
# }
# {
# name = "clan-lol";
# webhook_configs = [
# # TODO
# #{
# # url = "http://localhost:4050/services/hooks/YWxlcnRtYW5hZ2VyX3NlcnZpY2U";
# # max_alerts = 5;
# #}
# ];
# }
# {
# name = "dave";
# telegram_configs = [
# {
# chat_id = 42927997;
# bot_token = "$TELEGRAM_BOT_TOKEN";
# }
# ];
# }
# {
# name = "all";
# # pushover_configs = [
# # {
# # user_key = "$PUSHOVER_USER_KEY";
# # token = "$PUSHOVER_TOKEN";
# # priority = "0";
# # }
# # ];
# }
# { name = "default"; }
# ];
# };
};
};
};
}

View File

@@ -4,66 +4,22 @@
{
nixosModule =
{
config,
pkgs,
lib,
...
}:
let
jsonpath = "/tmp/telegraf.json";
auth_user = "prometheus";
in
{ pkgs, lib, ... }:
{
networking.firewall.interfaces = lib.mkIf (settings.allowAllInterfaces == false) (
builtins.listToAttrs (
map (name: {
inherit name;
value.allowedTCPPorts = [
9273
9990
];
value.allowedTCPPorts = [ 9273 ];
}) settings.interfaces
)
);
networking.firewall.allowedTCPPorts = lib.mkIf (settings.allowAllInterfaces == true) [
9273
9990
];
clan.core.vars.generators."telegraf" = {
files.password.restartUnits = [ "telegraf.service" ];
files.password-env.restartUnits = [ "telegraf.service" ];
files.miniserve-auth.restartUnits = [ "telegraf.service" ];
runtimeInputs = [
pkgs.coreutils
pkgs.xkcdpass
pkgs.mkpasswd
];
script = ''
PASSWORD=$(xkcdpass --numwords 4 --delimiter - --count 1 | tr -d "\n")
echo "BASIC_AUTH_PWD=$PASSWORD" > "$out"/password-env
echo "${auth_user}:$PASSWORD" > "$out"/miniserve-auth
echo "$PASSWORD" | tr -d "\n" > "$out"/password
'';
};
systemd.services.telegraf-json = {
enable = true;
wantedBy = [ "multi-user.target" ];
script = "${pkgs.miniserve}/bin/miniserve -p 9990 ${jsonpath} --auth-file ${config.clan.core.vars.generators.telegraf.files.miniserve-auth.path}";
};
networking.firewall.allowedTCPPorts = lib.mkIf (settings.allowAllInterfaces == true) [ 9273 ];
services.telegraf = {
enable = true;
environmentFiles = [
(builtins.toString config.clan.core.vars.generators.telegraf.files.password-env.path)
];
extraConfig = {
agent.interval = "60s";
inputs = {
@@ -77,34 +33,22 @@
exec =
let
nixosSystems = pkgs.writeShellScript "current-system" ''
printf "nixos_systems,current_system=%s,booted_system=%s,current_kernel=%s,booted_kernel=%s present=0\n" \
"$(readlink /run/current-system)" "$(readlink /run/booted-system)" \
"$(basename $(echo /run/current-system/kernel-modules/lib/modules/*))" \
"$(basename $(echo /run/booted-system/kernel-modules/lib/modules/*))"
currentSystemScript = pkgs.writeShellScript "current-system" ''
printf "current_system,path=%s present=0\n" $(readlink /run/current-system)
'';
in
[
{
# Expose the path to current-system as metric. We use
# this to check if the machine is up-to-date.
commands = [ nixosSystems ];
commands = [ currentSystemScript ];
data_format = "influx";
}
];
};
# sadly there doesn'T seem to exist a telegraf http_client output plugin
outputs.prometheus_client = {
listen = ":9273";
metric_version = 2;
basic_username = "${auth_user}";
basic_password = "$${BASIC_AUTH_PWD}";
};
outputs.file = {
files = [ jsonpath ];
data_format = "json";
json_timestamp_units = "1s";
};
};
};

View File

@@ -0,0 +1,37 @@
This service generates the `system.stateVersion` of the nixos installation
automatically.
Possible values:
[system.stateVersion](https://search.nixos.org/options?channel=unstable&show=system.stateVersion&from=0&size=50&sort=relevance&type=packages&query=stateVersion)
## Usage
The following configuration will set `stateVersion` for all machines:
```
inventory.instances = {
state-version = {
module = {
name = "state-version";
input = "clan";
};
roles.default.tags.all = { };
};
```
## Migration
If you are already setting `system.stateVersion`, either let the automatic
generation happen, or trigger the generation manually for the machine. The
service will take the specified version, if one is already supplied through the
config.
To manually generate the version for a specified machine run:
```
clan vars generate [MACHINE]
```
If the setting was already set, you can then remove `system.stateVersion` from
your machine configuration. For new machines, just import the service as shown
above.

View File

@@ -0,0 +1,50 @@
{ ... }:
{
_class = "clan.service";
manifest.name = "clan-core/state-version";
manifest.description = "Automatically generate the state version of the nixos installation.";
manifest.categories = [ "System" ];
manifest.readme = builtins.readFile ./README.md;
roles.default = {
perInstance =
{ ... }:
{
nixosModule =
{
config,
lib,
...
}:
let
var = config.clan.core.vars.generators.state-version.files.version or { };
in
{
warnings = [
''
The clan.state-version service is deprecated and will be
removed on 2025-07-15 in favor of a nix option.
Please migrate your configuration to use `clan.core.settings.state-version.enable = true` instead.
''
];
system.stateVersion = lib.mkDefault (lib.removeSuffix "\n" var.value);
clan.core.vars.generators.state-version = {
files.version = {
secret = false;
value = lib.mkDefault config.system.nixos.release;
};
runtimeInputs = [ ];
script = ''
echo -n ${config.system.stateVersion} > "$out"/version
'';
};
};
};
};
}

View File

@@ -3,16 +3,14 @@ let
module = lib.modules.importApply ./default.nix { };
in
{
clan.modules = {
coredns = module;
};
clan.modules.state-version = module;
perSystem =
{ ... }:
{
clan.nixosTests.coredns = {
clan.nixosTests.state-version = {
imports = [ ./tests/vm/default.nix ];
clan.modules."@clan/coredns" = module;
clan.modules."@clan/state-version" = module;
};
};
}

View File

@@ -0,0 +1,22 @@
{ lib, ... }:
{
name = "service-state-version";
clan = {
directory = ./.;
inventory = {
machines.server = { };
instances.default = {
module.name = "@clan/state-version";
module.input = "self";
roles.default.machines."server" = { };
};
};
};
nodes.server = { };
testScript = lib.mkDefault ''
start_all()
'';
}

View File

@@ -17,20 +17,6 @@
};
};
# Deploy user Carol on all machines. Prompt only once and use the
# same password on all machines. (`share = true`)
user-carol = {
module = {
name = "users";
input = "clan";
};
roles.default.tags.all = { };
roles.default.settings = {
user = "carol";
share = true;
};
};
# Deploy user bob only on his laptop. Prompt for a password.
user-bob = {
module = {
@@ -43,44 +29,3 @@
};
}
```
## Migration from `root-password` module
The deprecated `clan.root-password` module has been replaced by the `users` module. Here's how to migrate:
### 1. Update your flake configuration
Replace the `root-password` module import with a `users` service instance:
```nix
# OLD - Remove this from your nixosModules:
imports = [
self.inputs.clan-core.clanModules.root-password
];
# NEW - Add to inventory.instances or machines/flake-module.nix:
instances = {
users-root = {
module.name = "users";
module.input = "clan-core";
roles.default.tags.nixos = { };
roles.default.settings = {
user = "root";
prompt = false; # Set to true if you want to be prompted
groups = [ ];
};
};
};
```
### 2. Migrate vars
The vars structure has changed from `root-password` to `user-password-root`:
```bash
# For each machine, rename the vars directories:
cd vars/per-machine/<machine-name>/
mv root-password user-password-root
mv user-password-root/password-hash user-password-root/user-password-hash
mv user-password-root/password user-password-root/user-password
```

View File

@@ -59,17 +59,6 @@
- "input" - Allows the user to access input devices.
'';
};
share = lib.mkOption {
type = lib.types.bool;
default = false;
example = true;
description = ''
Weather the user should have the same password on all machines.
By default, you will be prompted for a new password for every host.
Unless `generate` is set to `true`.
'';
};
};
};
@@ -93,6 +82,7 @@
};
clan.core.vars.generators."user-password-${settings.user}" = {
files.user-password-hash.neededFor = "users";
files.user-password-hash.restartUnits = lib.optional (config.services.userborn.enable) "userborn.service";
files.user-password.deploy = false;
@@ -117,8 +107,6 @@
pkgs.mkpasswd
];
share = settings.share;
script =
(
if settings.prompt then

View File

@@ -1,5 +1,6 @@
#!/usr/bin/env python3
"""IPv6 address allocator for WireGuard networks.
"""
IPv6 address allocator for WireGuard networks.
Network layout:
- Base network: /40 ULA prefix (fd00::/8 + 32 bits from hash)
@@ -12,11 +13,6 @@ import ipaddress
import sys
from pathlib import Path
# Constants for argument count validation
MIN_ARGS_BASE = 4
MIN_ARGS_CONTROLLER = 5
MIN_ARGS_PEER = 5
def hash_string(s: str) -> str:
"""Generate SHA256 hash of string."""
@@ -24,7 +20,8 @@ def hash_string(s: str) -> str:
def generate_ula_prefix(instance_name: str) -> ipaddress.IPv6Network:
"""Generate a /40 ULA prefix from instance name.
"""
Generate a /40 ULA prefix from instance name.
Format: fd{32-bit hash}/40
This gives us fd00:0000:0000::/40 through fdff:ffff:ff00::/40
@@ -44,14 +41,15 @@ def generate_ula_prefix(instance_name: str) -> ipaddress.IPv6Network:
prefix = f"fd{prefix_bits:08x}"
prefix_formatted = f"{prefix[:4]}:{prefix[4:8]}::/40"
return ipaddress.IPv6Network(prefix_formatted)
network = ipaddress.IPv6Network(prefix_formatted)
return network
def generate_controller_subnet(
base_network: ipaddress.IPv6Network,
controller_name: str,
base_network: ipaddress.IPv6Network, controller_name: str
) -> ipaddress.IPv6Network:
"""Generate a /56 subnet for a controller from the base /40 network.
"""
Generate a /56 subnet for a controller from the base /40 network.
We have 16 bits (40 to 56) to allocate controller subnets.
This allows for 65,536 possible controller subnets.
@@ -64,11 +62,14 @@ def generate_controller_subnet(
# The controller subnet is at base_prefix:controller_id::/56
base_int = int(base_network.network_address)
controller_subnet_int = base_int | (controller_id << (128 - 56))
return ipaddress.IPv6Network((controller_subnet_int, 56))
controller_subnet = ipaddress.IPv6Network((controller_subnet_int, 56))
return controller_subnet
def generate_peer_suffix(peer_name: str) -> str:
"""Generate a unique 64-bit host suffix for a peer.
"""
Generate a unique 64-bit host suffix for a peer.
This suffix will be used in all controller subnets to create unique addresses.
Format: :xxxx:xxxx:xxxx:xxxx (64 bits)
@@ -78,13 +79,14 @@ def generate_peer_suffix(peer_name: str) -> str:
suffix_bits = h[:16]
# Format as IPv6 suffix without leading colon
return f"{suffix_bits[0:4]}:{suffix_bits[4:8]}:{suffix_bits[8:12]}:{suffix_bits[12:16]}"
suffix = f"{suffix_bits[0:4]}:{suffix_bits[4:8]}:{suffix_bits[8:12]}:{suffix_bits[12:16]}"
return suffix
def main() -> None:
if len(sys.argv) < MIN_ARGS_BASE:
if len(sys.argv) < 4:
print(
"Usage: ipv6_allocator.py <output_dir> <instance_name> <controller|peer> <machine_name>",
"Usage: ipv6_allocator.py <output_dir> <instance_name> <controller|peer> <machine_name>"
)
sys.exit(1)
@@ -96,7 +98,7 @@ def main() -> None:
base_network = generate_ula_prefix(instance_name)
if node_type == "controller":
if len(sys.argv) < MIN_ARGS_CONTROLLER:
if len(sys.argv) < 5:
print("Controller name required")
sys.exit(1)
@@ -112,7 +114,7 @@ def main() -> None:
(output_dir / "prefix").write_text(prefix_str)
elif node_type == "peer":
if len(sys.argv) < MIN_ARGS_PEER:
if len(sys.argv) < 5:
print("Peer name required")
sys.exit(1)

View File

@@ -1,33 +0,0 @@
This module sets up [yggdrasil](https://yggdrasil-network.github.io/) across
your clan.
Yggdrasil is designed to be a future-proof and decentralised alternative to
the structured routing protocols commonly used today on the internet. Inside
your clan, it will allow you reaching all of your machines.
## Example Usage
While you can specify statically configured peers for each host, yggdrasil does
auto-discovery of local peers.
```nix
inventory = {
machines = {
peer1 = { };
peer2 = { };
};
instances = {
yggdrasil = {
# Deploy on all machines
roles.default.tags.all = { };
# Or individual hosts
roles.default.machines.peer1 = { };
roles.default.machines.peer2 = { };
};
};
};
```

View File

@@ -1,116 +0,0 @@
{ ... }:
{
_class = "clan.service";
manifest.name = "clan-core/yggdrasil";
manifest.description = "Yggdrasil encrypted IPv6 routing overlay network";
roles.default = {
interface =
{ lib, ... }:
{
options.extraMulticastInterfaces = lib.mkOption {
type = lib.types.listOf lib.types.attrs;
default = [ ];
description = ''
Additional interfaces to use for Multicast. See
https://yggdrasil-network.github.io/configurationref.html#multicastinterfaces
for reference.
'';
example = [
{
Regex = "(wg).*";
Beacon = true;
Listen = true;
Port = 5400;
Priority = 1020;
}
];
};
options.peers = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ ];
description = ''
Static peers to configure for this host.
If not set, local peers will be auto-discovered
'';
example = [
"tcp://192.168.1.1:6443"
"quic://192.168.1.1:6443"
"tls://192.168.1.1:6443"
"ws://192.168.1.1:6443"
];
};
};
perInstance =
{ settings, ... }:
{
nixosModule =
{
config,
pkgs,
...
}:
{
clan.core.vars.generators.yggdrasil = {
files.privateKey = { };
files.publicKey.secret = false;
files.address.secret = false;
runtimeInputs = with pkgs; [
yggdrasil
jq
openssl
];
script = ''
# Generate private key
openssl genpkey -algorithm Ed25519 -out $out/privateKey
# Generate corresponding public key
openssl pkey -in $out/privateKey -pubout -out $out/publicKey
# Derive IPv6 address from key
echo "{ \"PrivateKeyPath\": \"$out/privateKey\"}" | yggdrasil -useconf -address > $out/address
'';
};
systemd.services.yggdrasil.serviceConfig.BindReadOnlyPaths = [
"${config.clan.core.vars.generators.yggdrasil.files.privateKey.path}:/var/lib/yggdrasil/key"
];
services.yggdrasil = {
enable = true;
openMulticastPort = true;
persistentKeys = true;
settings = {
PrivateKeyPath = "/var/lib/yggdrasil/key";
IfName = "ygg";
Peers = settings.peers;
MulticastInterfaces = [
# Ethernet is preferred over WIFI
{
Regex = "(eth|en).*";
Beacon = true;
Listen = true;
Port = 5400;
Priority = 1024;
}
{
Regex = "(wl).*";
Beacon = true;
Listen = true;
Port = 5400;
Priority = 1025;
}
]
++ settings.extraMulticastInterfaces;
};
};
networking.firewall.allowedTCPPorts = [ 5400 ];
};
};
};
}

View File

@@ -1,24 +0,0 @@
{
self,
lib,
...
}:
let
module = lib.modules.importApply ./default.nix {
inherit (self) packages;
};
in
{
clan.modules = {
yggdrasil = module;
};
perSystem =
{ ... }:
{
clan.nixosTests.yggdrasil = {
imports = [ ./tests/vm/default.nix ];
clan.modules.yggdrasil = module;
};
};
}

View File

@@ -1,93 +0,0 @@
{
name = "yggdrasil";
clan = {
test.useContainers = false;
directory = ./.;
inventory = {
machines.peer1 = { };
machines.peer2 = { };
instances."yggdrasil" = {
module.name = "yggdrasil";
module.input = "self";
# Assign the roles to the two machines
roles.default.machines.peer1 = { };
roles.default.machines.peer2 = { };
};
};
};
# TODO remove after testing, this is just to make @pinpox' life easier
nodes =
let
c =
{ pkgs, ... }:
{
environment.systemPackages = with pkgs; [ net-tools ];
console = {
font = "Lat2-Terminus16";
keyMap = "colemak";
};
};
in
{
peer1 = c;
peer2 = c;
};
testScript = ''
start_all()
# Wait for both machines to be ready
peer1.wait_for_unit("multi-user.target")
peer2.wait_for_unit("multi-user.target")
# Check that yggdrasil service is running on both machines
peer1.wait_for_unit("yggdrasil")
peer2.wait_for_unit("yggdrasil")
peer1.succeed("systemctl is-active yggdrasil")
peer2.succeed("systemctl is-active yggdrasil")
# Check that both machines have yggdrasil network interfaces
# Yggdrasil creates a tun interface (usually tun0)
peer1.wait_until_succeeds("ip link show | grep -E 'ygg'", 30)
peer2.wait_until_succeeds("ip link show | grep -E 'ygg'", 30)
# Get yggdrasil IPv6 addresses from both machines
peer1_ygg_ip = peer1.succeed("yggdrasilctl -json getself | jq -r '.address'").strip()
peer2_ygg_ip = peer2.succeed("yggdrasilctl -json getself | jq -r '.address'").strip()
# TODO: enable this check. Values don't match up yet, but I can't
# update-vars to test, because the script is borken.
# Compare runtime addresses with saved addresses from vars
# expected_peer1_ip = "${builtins.readFile ./vars/per-machine/peer1/yggdrasil/address/value}"
# expected_peer2_ip = "${builtins.readFile ./vars/per-machine/peer2/yggdrasil/address/value}"
print(f"peer1 yggdrasil IP: {peer1_ygg_ip}")
print(f"peer2 yggdrasil IP: {peer2_ygg_ip}")
# print(f"peer1 expected IP: {expected_peer1_ip}")
# print(f"peer2 expected IP: {expected_peer2_ip}")
#
# # Verify that runtime addresses match expected addresses
# assert peer1_ygg_ip == expected_peer1_ip, f"peer1 runtime IP {peer1_ygg_ip} != expected IP {expected_peer1_ip}"
# assert peer2_ygg_ip == expected_peer2_ip, f"peer2 runtime IP {peer2_ygg_ip} != expected IP {expected_peer2_ip}"
# Wait a bit for the yggdrasil network to establish connectivity
import time
time.sleep(10)
# Test connectivity: peer1 should be able to ping peer2 via yggdrasil
peer1.succeed(f"ping -6 -c 3 {peer2_ygg_ip}")
# Test connectivity: peer2 should be able to ping peer1 via yggdrasil
peer2.succeed(f"ping -6 -c 3 {peer1_ygg_ip}")
'';
}

View File

@@ -1,6 +0,0 @@
[
{
"publickey": "age1r264u9yngfq8qkrveh4tn0rhfes02jfgrtqufdx4n4m3hs4rla2qx0rk4d",
"type": "age"
}
]

View File

@@ -1,6 +0,0 @@
[
{
"publickey": "age1p8kuf8s0nfekwreh4g38cgghp4nzszenx0fraeyky2me0nly2scstqunx8",
"type": "age"
}
]

View File

@@ -1,15 +0,0 @@
{
"data": "ENC[AES256_GCM,data:3dolkgdLC4y5fps4gGb9hf4QhwkUUBodlMOKT+/+erO70FB/pzYBg0mQjQy/uqjINzfIiM32iwVDnx3/Yyz5BDRo2CK+83UGEi4=,iv:FRp1HqlU06JeyEXXFO5WxJWxeLnmUJRWGuFKcr4JFOM=,tag:rbi30HJuqPHdU/TqInGXmg==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBoYXBxS1JuNW9NeC9YU0xY\nK2xQWDhUYjZ4VzZmeUw1aG9UN2trVnBGQ0J3Ckk0V3d0UFBkT0RnZjBoYjNRVEVW\nN2VEdCtUTUUwenhJSEErT0MyWDA2bHMKLS0tIHJJSzVtR3NCVXozbzREWjltN2ZG\nZm44Y1c4MWNIblcxbmt2YkdxVE10Z1UKmJKEjiYZ9U47QACkbacNTirQIcCvFjM/\nwVxSEVq524sK8LCyIEvsG4e3I3Kn0ybZjoth7J/jg7J4gb8MVw+leQ==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-09-16T08:13:06Z",
"mac": "ENC[AES256_GCM,data:6HJDkg0AWz+zx5niSIyBAGGaeemwPOqTCA/Fa6VjjyCh1wOav3OTzy/DRBOCze4V52hMGV3ULrI2V7G7DdvQy6LqiKBTQX5ZbWm3IxLASamJBjUJ1LvTm97WvyL54u/l2McYlaUIC8bYDl1UQUqDMo9pN4GwdjsRNCIl4O0Z7KY=,iv:zkWfYuhqwKpZk/16GlpKdAi2qS6LiPvadRJmxp2ZW+w=,tag:qz1gxVnT3OjWxKRKss5W8w==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../users/admin

View File

@@ -1,15 +0,0 @@
{
"data": "ENC[AES256_GCM,data:BW15ydnNpr0NIXu92nMsD/Y52BDEOsdZg2/fiM8lwSTJN3lEymrIBYsRrcPAnGpFb52d7oN8zdNz9WoW3f/Xwl136sWDz/sc0k4=,iv:7m77nOR/uXLMqXB5QmegtoYVqByJVFFqZIVOtlAonzg=,tag:8sUo9DRscNRajrk+CzHzHw==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBLVWpnSlJOTVU4NWRMSCto\nS0RaR2RCTUJjT1J0VzRPVTdPL2N5Yjl3c0EwCmlabm1aSzdlV29nb3lrZFBEZXR6\nRjI2TGZUNW1KQ3pLbDFscUlKSnVBNWcKLS0tIDlLR1VFSTRHeWNiQ29XK1pUUnlr\nVkVHOXdJeHhpcldYNVhpK1V6Nng0eW8KSsqJejY1kll6bUBUngiolCB7OhjyI0Gc\nH+9OrORt/nLnc51eo/4Oh9vp/dvSZzuW9MOF9m0f6B3WOFRVMAbukQ==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-09-16T08:13:15Z",
"mac": "ENC[AES256_GCM,data:dyLnGXBC4nGOgX2TrGhf8kI/+Et0PRy+Ppr228y3LYzgcmUunZl9R8+QXJN51OJSQ63gLun5TBw0v+3VnRVBodlhqTDtfACJ7eILCiArPJqeZoh5MR6HkF31yfqTRlXl1i6KHRPVWvjRIdwJ9yZVN1XNAUsxc7xovqS6kkkGPsA=,iv:7yXnpbU7Zf7GH1+Uimq8eXDUX1kO/nvTaGx4nmTrKdM=,tag:WNn9CUOdCAlksC0Qln5rVg==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../users/admin

View File

@@ -1,4 +0,0 @@
{
"publickey": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"type": "age"
}

View File

@@ -1 +0,0 @@
200:91bb:f1ec:c580:6d52:70b3:4d60:7bf2

View File

@@ -1 +0,0 @@
../../../../../../sops/machines/peer1

View File

@@ -1,19 +0,0 @@
{
"data": "ENC[AES256_GCM,data:/YoEoYY8CmqK4Yk4fmZieIHIvRn779aikoo3+6SWI5SxuU8TLJVY9+Q7mRmnbCso/8RPMICWkZMIkfbxYi6Dwc4UFmLwPqCoeAYsFBiHsJ6QUoTm1qtDDfXcruFs8Mo93ZmJb7oJIC0a+sVbB5L1NsGmG3g+a+g=,iv:KrMjRIQXutv9WdNzI5VWD6SMDnGzs9LFWcG2d9a6XDg=,tag:x5gQN9FaatRBcHOyS2cicw==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBwQ0FNU1c4RDNKTHRtMy8z\nSEtQRzFXTVFvcitMWjVlMURPVkxsZC9wU25nCmt4TS81bnJidzFVZkxEY0ovWUtm\nVk5PMjZEWVJCei9rVTJ2bG1ZNWJoZGMKLS0tIHgyTEhIdUQ3YnlKVi9lNVpUZ0dI\nd3BLL05oMXFldGVKbkpoaklscDJMR3MKpUl/KNPrtyt4/bu3xXUAQIkugQXWjlPf\nFqFc1Vnqxynd+wJkkd/zYs4XcOraogOUj/WIRXkqXgdDDoEqb/VIBg==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1r264u9yngfq8qkrveh4tn0rhfes02jfgrtqufdx4n4m3hs4rla2qx0rk4d",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSArOUdkd3VVSTU3NHZ6aURB\na2dYMXhyMmVLMDVlM0dzVHpxbUw3K3BFcVNzCm1LczFyd3BubGwvRVUwQ1Q0aWZR\nL1hlb1VpZ3JnTVQ4Zm9wVnlJYVNuL00KLS0tIHlMRVMyNW9rWG45bVVtczF3MVNq\nL2d2RXhEeVcyRVNmSUF6cks5VStxVkUKugI1iDei32852wNV/zPlyVwKJH1UXOlY\nFQq7dqMJMWI6a5F+z4UdaHvzyKxF2CWBG7DVnaUSpq7Q3uGmibsSOQ==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-09-16T08:13:07Z",
"mac": "ENC[AES256_GCM,data:LIlgQgiQt9aHXagpXphxSnpju+DOxuBvPpz5Rr43HSwgbWFgZ8tqlH2C1xo2xsJIexWkc823J9txpy+PLFXSm4/NbQGbKSymjHNEIYaU1tBSQ0KZ+s22X3/ku3Hug7/MkEKv5JsroTEcu3FK6Fv7Mo0VWqUggenl9AsJ5BocUO4=,iv:LGOnpWsod1ek4isWVrHrS+ZOCPrhwlPliPOTiMVY0zY=,tag:tRuHBSd9HxOswNcqjvzg0w==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../../../../sops/users/admin

View File

@@ -1,3 +0,0 @@
-----BEGIN PUBLIC KEY-----
MCowBQYDK2VwAyEAtyIHCZ0/yVbHpllPwgaWIFQ3Kb4fYMcOujgVmttA7gM=
-----END PUBLIC KEY-----

View File

@@ -1 +0,0 @@
200:bb1f:6f1c:1852:173a:cb5e:5726:870

View File

@@ -1 +0,0 @@
../../../../../../sops/machines/peer2

View File

@@ -1,19 +0,0 @@
{
"data": "ENC[AES256_GCM,data:b1dbaJQGr8mnISch0iej+FhMnYOIFxOJYCvWDQseiczltXsBetbYr+89co5Sp7wmhQrH3tlWaih3HZe294Y9j8XvwpNUtmW3RZHsU/6EWA50LKcToFGFCcEBM/Nz9RStQXnjwLbRSLFuMlfoQttUATB2XYSm+Ng=,iv:YCeE3KbHaBhR0q10qO8Og1LBT5OUjsIDxfclpcLJh6I=,tag:M7y9HAC+fh8Fe8HoqQrnbg==,type:str]",
"sops": {
"age": [
{
"recipient": "age1p8kuf8s0nfekwreh4g38cgghp4nzszenx0fraeyky2me0nly2scstqunx8",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA3NTVOT2MxaDJsTXloVVcv\nellUdnVxSVdnZ1NBUGEwLzBiTGoyZENJdm1RClp5eHY3dkdVSzVJYk52dWFCQnlG\nclIrQUJ5RXRYTythWTFHR1NhVHlyMVkKLS0tIEFza3YwcUNiYUV5VWJQcTljY2ZR\nUnc3U1VubmZRTCtTTC9rd1kydnNYa00KqdwV3eRHA6Y865JXQ7lxbS6aTIGf/kQM\nqDFdiUdvEDqo19Df3QBJ7amQ1YjPqSIRbO8CJNPI8JqQJKTaBOgm9g==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBzTmV0Skd5Zzk1SXc4ZDc3\nRi9wTVdDM1lTc3N0MXpNNVZjUWJ6VDZHd3hzCkpRZnNtSU14clkybWxvSEhST2py\nR29jcHdXSCtFRE02ejB0dzN1eGVQZ1kKLS0tIE9YVjJBRTg1SGZ5S3lYdFRUM3RW\nOGZjUEhURnJIVTBnZG43UFpTZkdseFUKOgHC10Rqf/QnzfCHUMEPb1PVo9E6qlpo\nW/F1I8ZqkFI8sWh54nilXeR8i8w+QCthliBxsxdDTv2FSxdnKNHu3A==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-09-16T08:13:15Z",
"mac": "ENC[AES256_GCM,data:0byytsY3tFK3r4qhM1+iYe9KYYKJ8cJO/HonYflbB0iTD+oRBnnDUuChPdBK50tQxH8aInlvgIGgi45OMk7IrFBtBYQRgFBUR5zDujzel9hJXQvpvqgvRMkzA542ngjxYmZ74mQB+pIuFhlVJCfdTN+smX6N4KyDRj9d8aKK0Qs=,iv:DC8nwgUAUSdOCr8TlgJX21SxOPOoJKYeNoYvwj5b9OI=,tag:cbJ8M+UzaghkvtEnRCp+GA==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../../../../sops/users/admin

View File

@@ -1,3 +0,0 @@
-----BEGIN PUBLIC KEY-----
MCowBQYDK2VwAyEAonBIcfPW9GKaUNRs+8epsgQOShNbR9v26+3H80an2/c=
-----END PUBLIC KEY-----

24
devFlake/flake.lock generated
View File

@@ -3,10 +3,10 @@
"clan-core-for-checks": {
"flake": false,
"locked": {
"lastModified": 1756166884,
"narHash": "sha256-skg4rwpbCjhpLlrv/Pndd43FoEgrJz98WARtGLhCSzo=",
"lastModified": 1755093452,
"narHash": "sha256-NKBss7QtNnOqYVyJmYCgaCvYZK0mpQTQc9fLgE1mGyk=",
"ref": "main",
"rev": "f7414d7e6e58709af27b6fe16eb530278e81eaaf",
"rev": "7e97734797f0c6bd3c2d3a51cf54a2a6b371c222",
"shallow": true,
"type": "git",
"url": "https://git.clan.lol/clan/clan-core"
@@ -84,11 +84,11 @@
},
"nixpkgs-dev": {
"locked": {
"lastModified": 1756662818,
"narHash": "sha256-Opggp4xiucQ5gBceZ6OT2vWAZOjQb3qULv39scGZ9Nw=",
"lastModified": 1755166611,
"narHash": "sha256-sk8pK8kWz4IE4ErAjKE1d8tMChY6VQR32U4yS68FIog=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "2e6aeede9cb4896693434684bb0002ab2c0cfc09",
"rev": "1a341e3c908f4a3105e737bd13af0318dc06fbe3",
"type": "github"
},
"original": {
@@ -107,11 +107,11 @@
]
},
"locked": {
"lastModified": 1755555503,
"narHash": "sha256-WiOO7GUOsJ4/DoMy2IC5InnqRDSo2U11la48vCCIjjY=",
"lastModified": 1754869408,
"narHash": "sha256-G1zNuxiCDfqNQVoL9j5v+ZYfUER7AI158ev98/JC8LI=",
"owner": "NuschtOS",
"repo": "search",
"rev": "6f3efef888b92e6520f10eae15b86ff537e1d2ea",
"rev": "2f5478267557a0f7a70d953b6c0867a5b4282739",
"type": "github"
},
"original": {
@@ -165,11 +165,11 @@
"nixpkgs": []
},
"locked": {
"lastModified": 1756662192,
"narHash": "sha256-F1oFfV51AE259I85av+MAia221XwMHCOtZCMcZLK2Jk=",
"lastModified": 1754847726,
"narHash": "sha256-2vX8QjO5lRsDbNYvN9hVHXLU6oMl+V/PsmIiJREG4rE=",
"owner": "numtide",
"repo": "treefmt-nix",
"rev": "1aabc6c05ccbcbf4a635fb7a90400e44282f61c4",
"rev": "7d81f6fb2e19bf84f1c65135d1060d829fae2408",
"type": "github"
},
"original": {

View File

@@ -33,6 +33,7 @@
self'.packages.tea-create-pr
self'.packages.merge-after-ci
self'.packages.pending-reviews
self'.packages.agit
# treefmt with config defined in ./flake-parts/formatting.nix
config.treefmt.build.wrapper
];
@@ -45,7 +46,7 @@
ln -sfT ${inputs.nix-select} "$PRJ_ROOT/pkgs/clan-cli/clan_lib/select"
# Generate classes.py from schemas
${self'.packages.classgen}/bin/classgen ${self'.legacyPackages.schemas.clanSchemaJson}/schema.json $PRJ_ROOT/pkgs/clan-cli/clan_lib/nix_models/clan.py
${self'.packages.classgen}/bin/classgen ${self'.legacyPackages.schemas.clan-schema-abstract}/schema.json $PRJ_ROOT/pkgs/clan-cli/clan_lib/nix_models/clan.py
'';
};
};

2
docs/.gitignore vendored
View File

@@ -1,5 +1,5 @@
/site/reference
/site/static
/site/options
/site/options-page
/site/openapi.json
!/site/static/extra.css

View File

@@ -1,11 +1,13 @@
{
lib,
config,
...
}:
let
suffix = config.clan.core.vars.generators.disk-id.files.diskId.value;
mirrorBoot = idx: {
# suffix is to prevent disk name collisions
name = idx;
name = idx + suffix;
type = "disk";
device = "/dev/disk/by-id/${idx}";
content = {

View File

@@ -1,11 +1,13 @@
{
lib,
config,
...
}:
let
suffix = config.clan.core.vars.generators.disk-id.files.diskId.value;
mirrorBoot = idx: {
# suffix is to prevent disk name collisions
name = idx;
name = idx + suffix;
type = "disk";
device = "/dev/disk/by-id/${idx}";
content = {

View File

@@ -2,11 +2,11 @@ site_name: Clan Documentation
site_url: https://docs.clan.lol
repo_url: https://git.clan.lol/clan/clan-core/
repo_name: "_>"
edit_uri: _edit/main/docs/site/
edit_uri: _edit/main/docs/docs/
validation:
omitted_files: warn
absolute_links: ignore
absolute_links: warn
unrecognized_links: warn
markdown_extensions:
@@ -59,15 +59,14 @@ nav:
- Configure Disk Config: guides/getting-started/choose-disk.md
- Update Machine: guides/getting-started/update.md
- Continuous Integration: guides/getting-started/flake-check.md
- Convert Existing NixOS Config: guides/getting-started/convert-flake.md
- ClanServices: guides/clanServices.md
- Using Services: guides/clanServices.md
- Backup & Restore: guides/backups.md
- Disk Encryption: guides/disk-encryption.md
- Age Plugins: guides/age-plugins.md
- Secrets management: guides/secrets.md
- Networking: guides/networking.md
- Target Host: guides/target-host.md
- Zerotier VPN: guides/mesh-vpn.md
- How to disable Secure Boot: guides/secure-boot.md
- Secure Boot: guides/secure-boot.md
- Flake-parts: guides/flake-parts.md
- macOS: guides/macos.md
- Contributing:
@@ -78,7 +77,8 @@ nav:
- Writing a Service Module: guides/services/community.md
- Writing a Disko Template: guides/disko-templates/community.md
- Migrations:
- Migrate from clan modules to services: guides/migrations/migrate-inventory-services.md
- Migrate existing Flakes: guides/migrations/migration-guide.md
- Migrate inventory Services: guides/migrations/migrate-inventory-services.md
- Facts Vars Migration: guides/migrations/migration-facts-vars.md
- Disk id: guides/migrations/disk-id.md
- Concepts:
@@ -88,14 +88,12 @@ nav:
- Templates: concepts/templates.md
- Reference:
- Overview: reference/index.md
- Browse Options: "/options"
- Clan Options: options.md
- Services:
- Overview:
- reference/clanServices/index.md
- reference/clanServices/admin.md
- reference/clanServices/borgbackup.md
- reference/clanServices/certificates.md
- reference/clanServices/coredns.md
- reference/clanServices/data-mesher.md
- reference/clanServices/dyndns.md
- reference/clanServices/emergency-access.md
@@ -108,12 +106,12 @@ nav:
- reference/clanServices/monitoring.md
- reference/clanServices/packages.md
- reference/clanServices/sshd.md
- reference/clanServices/state-version.md
- reference/clanServices/syncthing.md
- reference/clanServices/trusted-nix-caches.md
- reference/clanServices/users.md
- reference/clanServices/wifi.md
- reference/clanServices/wireguard.md
- reference/clanServices/yggdrasil.md
- reference/clanServices/zerotier.md
- API: reference/clanServices/clan-service-author-interface.md
@@ -157,7 +155,6 @@ nav:
- 05-deployment-parameters: decisions/05-deployment-parameters.md
- Template: decisions/_template.md
- Glossary: reference/glossary.md
- Browse Options: "/options"
docs_dir: site
site_dir: out
@@ -175,7 +172,6 @@ theme:
- content.code.annotate
- content.code.copy
- content.tabs.link
- content.action.edit
icon:
repo: fontawesome/brands/git
custom_dir: overrides

View File

@@ -54,9 +54,9 @@ pkgs.stdenv.mkDerivation {
chmod -R +w ./site/reference
echo "Generated API documentation in './site/reference/' "
rm -rf ./site/options
cp -r ${docs-options} ./site/options
chmod -R +w ./site/options
rm -r ./site/options-page || true
cp -r ${docs-options} ./site/options-page
chmod -R +w ./site/options-page
mkdir -p ./site/static/asciinema-player
ln -snf ${asciinema-player-js} ./site/static/asciinema-player/asciinema-player.min.js

View File

@@ -40,7 +40,6 @@ writeShellScriptBin "deploy-docs" ''
rsync \
--checksum \
--delete \
-e "ssh -o StrictHostKeyChecking=no $sshExtraArgs" \
-a ${docs}/ \
www@clan.lol:/var/www/docs.clan.lol

View File

@@ -18,8 +18,27 @@
inherit (self) clanModules;
clan-core = self;
inherit pkgs;
evalClanModules = self.clanLib.evalClan.evalClanModules;
modulesRolesOptions = self.clanLib.evalClan.evalClanModulesWithRoles {
allModules = self.clanModules;
inherit pkgs;
clan-core = self;
};
};
# Frontmatter for clanModules
clanModulesFrontmatter =
let
docs = pkgs.nixosOptionsDoc {
options = self.clanLib.modules.frontmatterOptions;
transformOptions = self.clanLib.docs.stripStorePathsFromDeclarations;
};
in
docs.optionsJSON;
# Options available when imported via ` inventory.${moduleName}....${rolesName} `
clanModulesViaRoles = pkgs.writeText "info.json" (builtins.toJSON jsonDocs.clanModulesViaRoles);
# clan service options
clanModulesViaService = pkgs.writeText "info.json" (builtins.toJSON jsonDocs.clanModulesViaService);
@@ -69,10 +88,12 @@
}
}
export CLAN_CORE_DOCS=${jsonDocs.clanCore}/share/doc/nixos/options.json
# A file that contains the links to all clanModule docs
export CLAN_MODULES_VIA_ROLES=${clanModulesViaRoles}
export CLAN_MODULES_VIA_SERVICE=${clanModulesViaService}
export CLAN_SERVICE_INTERFACE=${self'.legacyPackages.clan-service-module-interface}/share/doc/nixos/options.json
# Frontmatter format for clanModules
export CLAN_MODULES_FRONTMATTER_DOCS=${clanModulesFrontmatter}/share/doc/nixos/options.json
export BUILD_CLAN_PATH=${buildClanOptions}/share/doc/nixos/options.json
@@ -86,6 +107,7 @@
legacyPackages = {
inherit
jsonDocs
clanModulesViaRoles
clanModulesViaService
;
};

View File

@@ -1,5 +1,7 @@
{
modulesRolesOptions,
nixosOptionsDoc,
evalClanModules,
lib,
pkgs,
clan-core,
@@ -8,36 +10,21 @@
let
inherit (clan-core.clanLib.docs) stripStorePathsFromDeclarations;
transformOptions = stripStorePathsFromDeclarations;
nixosConfigurationWithClan =
let
evaled = lib.evalModules {
class = "nixos";
modules = [
# Basemodule
(
{ config, ... }:
{
imports = (import (pkgs.path + "/nixos/modules/module-list.nix"));
nixpkgs.pkgs = pkgs;
clan.core.name = "dummy";
system.stateVersion = config.system.nixos.release;
# Set this to work around a bug where `clan.core.settings.machine.name`
# is forced due to `networking.interfaces` being forced
# somewhere in the nixpkgs options
facter.detected.dhcp.enable = lib.mkForce false;
}
)
{
clan.core.settings.directory = clan-core;
}
clan-core.nixosModules.clanCore
];
};
in
evaled;
in
{
clanModulesViaRoles = lib.mapAttrs (
_moduleName: rolesOptions:
lib.mapAttrs (
_roleName: options:
(nixosOptionsDoc {
inherit options;
warningsAreErrors = true;
inherit transformOptions;
}).optionsJSON
) rolesOptions
) modulesRolesOptions;
# Test with:
# nix build .\#legacyPackages.x86_64-linux.clanModulesViaService
clanModulesViaService = lib.mapAttrs (
@@ -51,6 +38,7 @@ in
{
roles = lib.mapAttrs (
_roleName: role:
(nixosOptionsDoc {
transformOptions =
opt:
@@ -66,13 +54,20 @@ in
warningsAreErrors = true;
}).optionsJSON
) evaluatedService.config.roles;
manifest = evaluatedService.config.manifest;
}
) clan-core.clan.modules;
clanCore =
(nixosOptionsDoc {
options = nixosConfigurationWithClan.options.clan.core;
options =
((evalClanModules {
modules = [ ];
inherit pkgs clan-core;
}).options
).clan.core or { };
warningsAreErrors = true;
inherit transformOptions;
}).optionsJSON;

View File

@@ -25,7 +25,7 @@
serviceModules = self.clan.modules;
baseHref = "/options/";
baseHref = "/options-page/";
getRoles =
module:
@@ -126,7 +126,7 @@
nestedSettingsOption = mkOption {
type = types.raw;
description = ''
See [instances.${name}.roles.${roleName}.settings](${baseHref}?option_scope=0&option=inventory.instances.${name}.roles.${roleName}.settings)
See [instances.${name}.roles.${roleName}.settings](${baseHref}?option_scope=0&option=instances.${name}.roles.${roleName}.settings)
'';
};
settingsOption = mkOption {
@@ -161,42 +161,6 @@
}
];
baseModule =
# Module
{ config, ... }:
{
imports = (import (pkgs.path + "/nixos/modules/module-list.nix"));
nixpkgs.pkgs = pkgs;
clan.core.name = "dummy";
system.stateVersion = config.system.nixos.release;
# Set this to work around a bug where `clan.core.settings.machine.name`
# is forced due to `networking.interfaces` being forced
# somewhere in the nixpkgs options
facter.detected.dhcp.enable = lib.mkForce false;
};
evalClanModules =
let
evaled = lib.evalModules {
class = "nixos";
modules = [
baseModule
{
clan.core.settings.directory = self;
}
self.nixosModules.clanCore
];
};
in
evaled;
coreOptions =
(pkgs.nixosOptionsDoc {
options = (evalClanModules.options).clan.core or { };
warningsAreErrors = true;
transformOptions = self.clanLib.docs.stripStorePathsFromDeclarations;
}).optionsJSON;
in
{
# Uncomment for debugging
@@ -211,17 +175,10 @@
# scopes = mapAttrsToList mkScope serviceModules;
scopes = [
{
inherit baseHref;
name = "Flake Options (clan.nix file)";
name = "Clan";
modules = docModules;
urlPrefix = "https://git.clan.lol/clan/clan-core/src/branch/main/";
}
{
name = "Machine Options (clan.core NixOS options)";
optionsJSON = "${coreOptions}/share/doc/nixos/options.json";
urlPrefix = "https://git.clan.lol/clan/clan-core/src/branch/main/";
}
];
};
};

View File

@@ -1,5 +1,3 @@
"""Module for rendering NixOS options documentation from JSON format."""
# Options are available in the following format:
# https://github.com/nixos/nixpkgs/blob/master/nixos/lib/make-options-doc/default.nix
#
@@ -34,21 +32,30 @@ from typing import Any
from clan_lib.errors import ClanError
from clan_lib.services.modules import (
CategoryInfo,
ModuleManifest,
Frontmatter,
extract_frontmatter,
get_roles,
)
# Get environment variables
CLAN_CORE_PATH = Path(os.environ["CLAN_CORE_PATH"])
CLAN_CORE_DOCS = Path(os.environ["CLAN_CORE_DOCS"])
CLAN_MODULES_FRONTMATTER_DOCS = os.environ.get("CLAN_MODULES_FRONTMATTER_DOCS")
BUILD_CLAN_PATH = os.environ.get("BUILD_CLAN_PATH")
## Clan modules ##
# Some modules can be imported via nix natively
CLAN_MODULES_VIA_NIX = os.environ.get("CLAN_MODULES_VIA_NIX")
# Some modules can be imported via inventory
CLAN_MODULES_VIA_ROLES = os.environ.get("CLAN_MODULES_VIA_ROLES")
# Options how to author clan.modules
# perInstance, perMachine, ...
CLAN_SERVICE_INTERFACE = os.environ.get("CLAN_SERVICE_INTERFACE")
CLAN_MODULES_VIA_SERVICE = os.environ.get("CLAN_MODULES_VIA_SERVICE")
OUT = os.environ.get("out") # noqa: SIM112
OUT = os.environ.get("out")
def sanitize(text: str) -> str:
@@ -68,7 +75,8 @@ def render_option_header(name: str) -> str:
def join_lines_with_indentation(lines: list[str], indent: int = 4) -> str:
"""Joins multiple lines with a specified number of whitespace characters as indentation.
"""
Joins multiple lines with a specified number of whitespace characters as indentation.
Args:
lines (list of str): The lines of text to join.
@@ -76,7 +84,6 @@ def join_lines_with_indentation(lines: list[str], indent: int = 4) -> str:
Returns:
str: The indented and concatenated string.
"""
# Create the indentation string (e.g., four spaces)
indent_str = " " * indent
@@ -163,10 +170,7 @@ def render_option(
def print_options(
options_file: str,
head: str,
no_options: str,
replace_prefix: str | None = None,
options_file: str, head: str, no_options: str, replace_prefix: str | None = None
) -> str:
res = ""
with (Path(options_file) / "share/doc/nixos/options.json").open() as f:
@@ -175,16 +179,32 @@ def print_options(
res += head if len(options.items()) else no_options
for option_name, info in options.items():
if replace_prefix:
display_name = option_name.replace(replace_prefix + ".", "")
else:
display_name = option_name
option_name = option_name.replace(replace_prefix + ".", "")
res += render_option(display_name, info, 4)
res += render_option(option_name, info, 4)
return res
def module_header(module_name: str) -> str:
return f"# {module_name}\n\n"
def module_header(module_name: str, has_inventory_feature: bool = False) -> str:
indicator = " 🔹" if has_inventory_feature else ""
return f"# {module_name}{indicator}\n\n"
def module_nix_usage(module_name: str) -> str:
return f"""## Usage via Nix
**This module can be also imported directly in your nixos configuration. Although it is recommended to use the [inventory](../../concepts/inventory.md) interface if available.**
Some modules are considered 'low-level' or 'expert modules' and are not available via the inventory interface.
```nix
{{config, lib, inputs, ...}}: {{
imports = [ inputs.clan-core.clanModules.{module_name} ];
# ...
}}
```
"""
clan_core_descr = """
@@ -203,6 +223,68 @@ The following options are available for this module.
"""
def produce_clan_modules_frontmatter_docs() -> None:
if not CLAN_MODULES_FRONTMATTER_DOCS:
msg = f"Environment variables are not set correctly: $CLAN_CORE_DOCS={CLAN_CORE_DOCS}"
raise ClanError(msg)
if not OUT:
msg = f"Environment variables are not set correctly: $out={OUT}"
raise ClanError(msg)
with Path(CLAN_MODULES_FRONTMATTER_DOCS).open() as f:
options: dict[str, dict[str, Any]] = json.load(f)
# header
output = """# Frontmatter
Every clan module has a `frontmatter` section within its readme. It provides
machine readable metadata about the module.
!!! example
The used format is `TOML`
The content is separated by `---` and the frontmatter must be placed at the very top of the `README.md` file.
```toml
---
description = "A description of the module"
categories = ["category1", "category2"]
[constraints]
roles.client.max = 10
roles.server.min = 1
---
# Readme content
...
```
"""
output += """## Overview
This provides an overview of the available attributes of the `frontmatter`
within the `README.md` of a clan module.
"""
# for option_name, info in options.items():
# if option_name == "_module.args":
# continue
# output += render_option(option_name, info)
root = options_to_tree(options, debug=True)
for option in root.suboptions:
output += options_docs_from_tree(option, init_level=2)
outfile = Path(OUT) / "clanModules/frontmatter/index.md"
outfile.parent.mkdir(
parents=True,
exist_ok=True,
)
with outfile.open("w") as of:
of.write(output)
def produce_clan_core_docs() -> None:
if not CLAN_CORE_DOCS:
msg = f"Environment variables are not set correctly: $CLAN_CORE_DOCS={CLAN_CORE_DOCS}"
@@ -242,7 +324,7 @@ def produce_clan_core_docs() -> None:
for submodule_name, split_options in split.items():
outfile = f"{module_name}/{submodule_name}.md"
print(
f"[clan_core.{submodule_name}] Rendering option of: {submodule_name}... {outfile}",
f"[clan_core.{submodule_name}] Rendering option of: {submodule_name}... {outfile}"
)
init_level = 1
root = options_to_tree(split_options, debug=True)
@@ -277,9 +359,56 @@ def produce_clan_core_docs() -> None:
of.write(output)
def render_roles(roles: list[str] | None, module_name: str) -> str:
if roles:
roles_list = "\n".join([f"- `{r}`" for r in roles])
return (
f"""
### Roles
This module can be used via predefined roles
{roles_list}
"""
"""
Every role has its own configuration options, which are each listed below.
For more information, see the [inventory guide](../../concepts/inventory.md).
??? Example
For example the `admin` module adds the following options globally to all machines where it is used.
`clan.admin.allowedkeys`
```nix
clan-core.lib.clan {
inventory.services = {
admin.me = {
roles.default.machines = [ "jon" ];
config.allowedkeys = [ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQD..." ];
};
};
};
```
"""
)
return ""
clan_modules_descr = """
Clan modules are [NixOS modules](https://wiki.nixos.org/wiki/NixOS_modules)
which have been enhanced with additional features provided by Clan, with
certain option types restricted to enable configuration through a graphical
interface.
!!! note "🔹"
Modules with this indicator support the [inventory](../../concepts/inventory.md) feature.
"""
def render_categories(
categories: list[str],
categories_info: dict[str, CategoryInfo],
categories: list[str], categories_info: dict[str, CategoryInfo]
) -> str:
res = """<div style="display: grid; grid-template-columns: repeat(3, 1fr); gap: 10px;">"""
for cat in categories:
@@ -344,10 +473,10 @@ Learn how to use `clanServices` in practice in the [Using clanServices guide](..
# output += f"`clan.modules.{module_name}`\n"
output += f"*{module_info['manifest']['description']}*\n"
fm = Frontmatter("")
# output += "## Categories\n\n"
output += render_categories(
module_info["manifest"]["categories"],
ModuleManifest.categories_info(),
module_info["manifest"]["categories"], fm.categories_info
)
output += f"{module_info['manifest']['readme']}\n"
@@ -356,7 +485,7 @@ Learn how to use `clanServices` in practice in the [Using clanServices guide](..
output += f"The {module_name} module has the following roles:\n\n"
for role_name in module_info["roles"]:
for role_name, _ in module_info["roles"].items():
output += f"- {role_name}\n"
for role_name, role_filename in module_info["roles"].items():
@@ -376,8 +505,183 @@ Learn how to use `clanServices` in practice in the [Using clanServices guide](..
of.write(output)
def produce_clan_modules_docs() -> None:
if not CLAN_MODULES_VIA_NIX:
msg = f"Environment variables are not set correctly: $CLAN_MODULES_VIA_NIX={CLAN_MODULES_VIA_NIX}"
raise ClanError(msg)
if not CLAN_MODULES_VIA_ROLES:
msg = f"Environment variables are not set correctly: $CLAN_MODULES_VIA_ROLES={CLAN_MODULES_VIA_ROLES}"
raise ClanError(msg)
if not CLAN_CORE_PATH:
msg = f"Environment variables are not set correctly: $CLAN_CORE_PATH={CLAN_CORE_PATH}"
raise ClanError(msg)
if not OUT:
msg = f"Environment variables are not set correctly: $out={OUT}"
raise ClanError(msg)
modules_index = "# Modules Overview\n\n"
modules_index += clan_modules_descr
modules_index += "## Overview\n\n"
modules_index += '<div class="grid cards" markdown>\n\n'
with Path(CLAN_MODULES_VIA_ROLES).open() as f2:
role_links: dict[str, dict[str, str]] = json.load(f2)
with Path(CLAN_MODULES_VIA_NIX).open() as f:
links: dict[str, str] = json.load(f)
for module_name, options_file in links.items():
print(f"Rendering ClanModule: {module_name}")
readme_file = CLAN_CORE_PATH / "clanModules" / module_name / "README.md"
with readme_file.open() as f:
readme = f.read()
frontmatter: Frontmatter
frontmatter, readme_content = extract_frontmatter(readme, str(readme_file))
# skip if experimental feature enabled
if "experimental" in frontmatter.features:
print(f"Skipping {module_name}: Experimental feature")
continue
modules_index += build_option_card(module_name, frontmatter)
##### Print module documentation #####
# 1. Header
output = module_header(module_name, "inventory" in frontmatter.features)
# 2. Description from README.md
if frontmatter.description:
output += f"*{frontmatter.description}*\n\n"
# 2. Deprecation note if the module is deprecated
if "deprecated" in frontmatter.features:
output += f"""
!!! Warning "Deprecated"
The `{module_name}` module is deprecated.*
Use 'clanServices/{module_name}' or a similar successor instead
"""
else:
output += f"""
!!! Warning "Will be deprecated"
The `{module_name}` module might eventually be migrated to 'clanServices'*
See: [clanServices](../../guides/clanServices.md)
"""
# 3. Categories from README.md
output += "## Categories\n\n"
output += render_categories(frontmatter.categories, frontmatter.categories_info)
output += "\n---\n\n"
# 3. README.md content
output += f"{readme_content}\n"
# 4. Usage
##### Print usage via Inventory #####
# get_roles(str) -> list[str] | None
# if not isinstance(options_file, str):
roles = get_roles(CLAN_CORE_PATH / "clanModules" / module_name)
if roles:
# Render inventory usage
output += """## Usage via Inventory\n\n"""
output += render_roles(roles, module_name)
for role in roles:
role_options_file = role_links[module_name][role]
# Abort if the options file is not found
if not isinstance(role_options_file, str):
print(
f"Error: module: {module_name} in role: {role} - options file not found, Got {role_options_file}"
)
exit(1)
no_options = f"""### Options of `{role}` role
**The `{module_name}` `{role}` doesnt offer / require any options to be set.**
"""
heading = f"""### Options of `{role}` role
The following options are available when using the `{role}` role.
"""
output += print_options(
role_options_file,
heading,
no_options,
replace_prefix=f"clan.{module_name}",
)
else:
# No roles means no inventory usage
output += """## Usage via Inventory
**This module cannot be used via the inventory interface.**
"""
##### Print usage via Nix / nixos #####
if not isinstance(options_file, str):
print(
f"Skipping {module_name}: Cannot be used via import clanModules.{module_name}"
)
output += """## Usage via Nix
**This module cannot be imported directly in your nixos configuration.**
"""
else:
output += module_nix_usage(module_name)
no_options = "** This module doesnt require any options to be set.**"
output += print_options(options_file, options_head, no_options)
outfile = Path(OUT) / f"clanModules/{module_name}.md"
outfile.parent.mkdir(
parents=True,
exist_ok=True,
)
with outfile.open("w") as of:
of.write(output)
modules_index += "</div>"
modules_index += "\n"
modules_outfile = Path(OUT) / "clanModules/index.md"
with modules_outfile.open("w") as of:
of.write(modules_index)
def build_option_card(module_name: str, frontmatter: Frontmatter) -> str:
"""
Build the overview index card for each reference target option.
"""
def indent_all(text: str, indent_size: int = 4) -> str:
"""
Indent all lines in a string.
"""
indent = " " * indent_size
lines = text.split("\n")
indented_text = indent + ("\n" + indent).join(lines)
return indented_text
def to_md_li(module_name: str, frontmatter: Frontmatter) -> str:
md_li = (
f"""- **[{module_name}](./{"-".join(module_name.split(" "))}.md)**\n\n"""
)
md_li += f"""{indent_all("---", 4)}\n\n"""
fmd = f"\n{frontmatter.description.strip()}" if frontmatter.description else ""
md_li += f"""{indent_all(fmd, 4)}"""
return md_li
return f"{to_md_li(module_name, frontmatter)}\n\n"
def split_options_by_root(options: dict[str, Any]) -> dict[str, dict[str, Any]]:
"""Split the flat dictionary of options into a dict of which each entry will construct complete option trees.
"""
Split the flat dictionary of options into a dict of which each entry will construct complete option trees.
{
"a": { Data }
"a.b": { Data }
@@ -461,7 +765,9 @@ def option_short_name(option_name: str) -> str:
def options_to_tree(options: dict[str, Any], debug: bool = False) -> Option:
"""Convert the options dictionary to a tree structure."""
"""
Convert the options dictionary to a tree structure.
"""
# Helper function to create nested structure
def add_to_tree(path_parts: list[str], info: Any, current_node: Option) -> None:
@@ -513,24 +819,22 @@ def options_to_tree(options: dict[str, Any], debug: bool = False) -> Option:
def options_docs_from_tree(
root: Option,
init_level: int = 1,
prefix: list[str] | None = None,
root: Option, init_level: int = 1, prefix: list[str] | None = None
) -> str:
"""Eender the options from the tree structure.
"""
eender the options from the tree structure.
Args:
root (Option): The root option node.
init_level (int): The initial level of indentation.
prefix (list str): Will be printed as common prefix of all attribute names.
"""
def render_tree(option: Option, level: int = init_level) -> str:
output = ""
should_render = not option.name.startswith("<") and not option.name.startswith(
"_",
"_"
)
if should_render:
# short_name = option_short_name(option.name)
@@ -551,11 +855,16 @@ def options_docs_from_tree(
return output
return render_tree(root)
md = render_tree(root)
return md
if __name__ == "__main__":
if __name__ == "__main__": #
produce_clan_core_docs()
produce_clan_service_author_docs()
# produce_clan_modules_docs()
produce_clan_service_docs()
# produce_clan_modules_frontmatter_docs()

View File

@@ -1,33 +1,15 @@
# Auto-included Files
Clan automatically imports specific files from each machine directory and registers them, reducing the need for manual configuration.
Clan automatically imports the following files from a directory and registers them.
## Machine Registration
## Machine registration
Every folder under `machines/{machineName}` is automatically registered as a Clan machine.
Every folder `machines/{machineName}` will be registered automatically as a Clan machine.
!!! info "Files loaded automatically for each machine"
!!! info "Automatically loaded files"
The following files are detected and imported for every Clan machine:
The following files are loaded automatically for each Clan machine:
- [x] `machines/{machineName}/configuration.nix`
Main configuration file for the machine.
- [x] `machines/{machineName}/hardware-configuration.nix`
Hardware-specific configuration generated by NixOS.
- [x] `machines/{machineName}/facter.json`
Contains system facts. Automatically generated — see [nixos-facter](https://clan.lol/blog/nixos-facter/) for details.
- [x] `machines/{machineName}/disko.nix`
Disk layout configuration. See the [disko quickstart](https://github.com/nix-community/disko/blob/master/docs/quickstart.md) for more info.
## Other Auto-included Files
* **`inventory.json`**
Managed by Clan's API.
Merges with `clan.inventory` to extend the inventory.
* **`.clan-flake`**
Sentinel file to be used to locate the root of a Clan repository.
Falls back to `.git`, `.hg`, `.svn`, or `flake.nix` if not found.
- [x] `machines/{machineName}/configuration.nix`
- [x] `machines/{machineName}/hardware-configuration.nix`
- [x] `machines/{machineName}/facter.json` Automatically configured, for further information see [nixos-facter](https://clan.lol/blog/nixos-facter/)
- [x] `machines/{machineName}/disko.nix` Automatically loaded, for further information see the [disko docs](https://github.com/nix-community/disko/blob/master/docs/quickstart.md).

View File

@@ -1,22 +1,16 @@
# Using the Inventory
# Using `clanServices`
Clan's inventory system is a composable way to define and deploy services across
machines.
Clans `clanServices` system is a composable way to define and deploy services across machines.
This guide shows how to **instantiate** a `clanService`, explains how service
definitions are structured in your inventory, and how to pick or create services
from modules exposed by flakes.
This guide shows how to **instantiate** a `clanService`, explains how service definitions are structured in your inventory, and how to pick or create services from modules exposed by flakes.
The term **Multi-host-modules** was introduced previously in the [nixus
repository](https://github.com/infinisil/nixus) and represents a similar
concept.
The term **Multi-host-modules** was introduced previously in the [nixus repository](https://github.com/infinisil/nixus) and represents a similar concept.
______________________________________________________________________
---
## Overview
Services are used in `inventory.instances`, and assigned to *roles* and
*machines* -- meaning you decide which machines run which part of the service.
Services are used in `inventory.instances`, and then they attach to *roles* and *machines* — meaning you decide which machines run which part of the service.
For example:
@@ -24,138 +18,119 @@ For example:
inventory.instances = {
borgbackup = {
roles.client.machines."laptop" = {};
roles.client.machines."workstation" = {};
roles.client.machines."server1" = {};
roles.server.machines."backup-box" = {};
};
}
```
This says: "Run borgbackup as a *client* on my *laptop* and *workstation*, and
as a *server* on *backup-box*". `client` and `server` are roles defined by the
`borgbackup` service.
This says: Run borgbackup as a *client* on my *laptop* and *server1*, and as a *server* on *backup-box*.”
## Module source specification
Each instance includes a reference to a **module specification** -- this is how
Clan knows which service module to use and where it came from.
Each instance includes a reference to a **module specification** this is how Clan knows which service module to use and where it came from.
Usually one would just use `imports` but we needd to make the `module source` configurable via Python API.
By default it is not required to specify the `module`, in which case it defaults to the preprovided services of clan-core.
It is not required to specify the `module.input` parameter, in which case it
defaults to the pre-provided services of clan-core. In a similar fashion, the
`module.name` parameter can also be omitted, it will default to the name of the
instance.
---
## Override Example
Example of instantiating a `borgbackup` service using `clan-core`:
```nix
inventory.instances = {
borgbackup = { # <- Instance name
# This can be partially/fully specified,
# - If the instance name is not the name of the module
# - If the input is not clan-core
# module = {
# name = "borgbackup"; # Name of the module (optional)
# input = "clan-core"; # The flake input where the service is defined (optional)
# };
# Instance Name: Different name for this 'borgbackup' instance
borgbackup = {
# Since this is instances."borgbackup" the whole `module = { ... }` below is equivalent and optional.
module = {
name = "borgbackup"; # <-- Name of the module (optional)
input = "clan-core"; # <-- The flake input where the service is defined (optional)
};
# Participation of the machines is defined via roles
# Right side needs to be an attribute set. Its purpose will become clear later
roles.client.machines."machine-a" = {};
roles.server.machines."backup-host" = {};
};
}
```
## Module Settings
If you used `clan-core` as an input attribute for your flake:
Each role might expose configurable options. See clan's [clanServices
reference](../reference/clanServices/index.md) for all available options.
```nix
# ↓ module.input = "clan-core"
inputs.clan-core.url = "https://git.clan.lol/clan/clan-core/archive/main.tar.gz";
```
Settings can be set in per-machine or per-role. The latter is applied to all
machines that are assigned to that role.
## Simplified Example
If only one instance is needed for a service and the service is a clan core service, the `module` definition can be omitted.
```nix
# Simplified way of specifying a single instance
inventory.instances = {
# instance name is `borgbackup` -> clan core module `borgbackup` will be loaded.
borgbackup = {
# Participation of the machines is defined via roles
# Right side needs to be an attribute set. Its purpose will become clear later
roles.client.machines."machine-a" = {};
roles.server.machines."backup-host" = {};
};
}
```
## Configuration Example
Each role might expose configurable options
See clan's [clanServices reference](../reference/clanServices/index.md) for available options
```nix
inventory.instances = {
borgbackup = {
# Settings for 'machine-a'
borgbackup-example = {
module = {
name = "borgbackup";
input = "clan-core";
};
roles.client.machines."machine-a" = {
# 'client' -Settings of 'machine-a'
settings = {
backupFolders = [
/home
/var
];
};
# ---------------------------
};
# Settings for all machines of the role "server"
roles.server.settings = {
directory = "/var/lib/borgbackup";
};
roles.server.machines."backup-host" = {};
};
}
```
## Tags
Tags can be used to assign multiple machines to a role at once. It can be thought of as a grouping mechanism.
For example using the `all` tag for services that you want to be configured on all
your machines is a common pattern.
The following example could be used to backup all your machines to a common
backup server
Multiple members can be defined using tags as follows
```nix
inventory.instances = {
borgbackup = {
# "All" machines are assigned to the borgbackup 'client' role
roles.client.tags = [ "all" ];
# But only one specific machine (backup-host) is assigned to the 'server' role
roles.server.machines."backup-host" = {};
};
}
```
## Sharing additional Nix configuration
Sometimes you need to add custom NixOS configuration alongside your clan
services. The `extraModules` option allows you to include additional NixOS
configuration that is applied for every machine assigned to that role.
There are multiple valid syntaxes for specifying modules:
```nix
inventory.instances = {
borgbackup = {
roles.client = {
# Direct module reference
extraModules = [ ../nixosModules/borgbackup.nix ];
# Or using self (needs to be json serializable)
# See next example, for a workaround.
extraModules = [ self.nixosModules.borgbackup ];
# Or inline module definition, (needs to be json compatible)
extraModules = [
{
# Your module configuration here
# ...
#
# If the module needs to contain non-serializable expressions:
imports = [ ./path/to/non-serializable.nix ];
}
];
borgbackup-example = {
module = {
name = "borgbackup";
input = "clan-core";
};
#
# The 'all' -tag targets all machines
roles.client.tags."all" = {};
# ---------------------------
roles.server.machines."backup-host" = {};
};
}
```
## Picking a clanService
You can use services exposed by Clan's core module library, `clan-core`.
You can use services exposed by Clans core module library, `clan-core`.
🔗 See: [List of Available Services in clan-core](../reference/clanServices/index.md)
@@ -167,19 +142,18 @@ You can also author your own `clanService` modules.
You might expose your service module from your flake — this makes it easy for other people to also use your module in their clan.
______________________________________________________________________
---
## 💡 Tips for Working with clanServices
- You can add multiple inputs to your flake (`clan-core`, `your-org-modules`, etc.) to mix and match services.
- Each service instance is isolated by its key in `inventory.instances`, allowing to deploy multiple versions or roles of the same service type.
- Roles can target different machines or be scoped dynamically.
* You can add multiple inputs to your flake (`clan-core`, `your-org-modules`, etc.) to mix and match services.
* Each service instance is isolated by its key in `inventory.instances`, allowing you to deploy multiple versions or roles of the same service type.
* Roles can target different machines or be scoped dynamically.
______________________________________________________________________
---
## What's Next?
- [Author your own clanService →](../guides/services/community.md)
- [Migrate from clanModules →](../guides/migrations/migrate-inventory-services.md)
## Whats Next?
* [Author your own clanService →](../guides/services/community.md)
* [Migrate from clanModules →](../guides/migrations/migrate-inventory-services.md)
<!-- TODO: * [Understand the architecture →](../explanation/clan-architecture.md) -->

View File

@@ -90,10 +90,13 @@ export CLAN_DEBUG_COMMANDS=1
These options help you pinpoint the source and context of print messages and debug logs during development.
## Analyzing Performance
To understand what's causing slow performance, set the environment variable `export CLAN_CLI_PERF=1`. When you complete a clan command, you'll see a summary of various performance metrics, helping you identify what's taking up time.
## See all possible packages and tests
To quickly show all possible packages and tests execute:
@@ -152,16 +155,28 @@ To test the CLI locally in a development environment and set breakpoints for deb
## Test Locally in a Nix Sandbox
To run tests in a Nix sandbox:
To run tests in a Nix sandbox, you have two options depending on whether your test functions have been marked as impure or not:
### Running Tests Marked as Impure
If your test functions need to execute `nix build` and have been marked as impure because you can't execute `nix build` inside a Nix sandbox, use the following command:
```bash
nix build .#checks.x86_64-linux.clan-pytest-with-core
nix run .#impure-checks -L
```
This command will run the impure test functions.
### Running Pure Tests
For test functions that have not been marked as impure and don't require executing `nix build`, you can use the following command:
```bash
nix build .#checks.x86_64-linux.clan-pytest-without-core
nix build .#checks.x86_64-linux.clan-pytest --rebuild
```
This command will run all pure test functions.
### Inspecting the Nix Sandbox
If you need to inspect the Nix sandbox while running tests, follow these steps:

View File

@@ -27,7 +27,7 @@ inputs = {
## Import the Clan flake-parts Module
After updating your flake inputs, the next step is to import the Clan flake-parts module. This will make the [Clan options](/options) available within `mkFlake`.
After updating your flake inputs, the next step is to import the Clan flake-parts module. This will make the [Clan options](../options.md) available within `mkFlake`.
```nix
{

View File

@@ -2,9 +2,9 @@
Machines can be added using the following methods
- Create a file `machines/{machine_name}/configuration.nix` (See: [File Autoincludes](../../concepts/autoincludes.md))
- Imperative via cli command: `clan machines create`
- Editing nix expressions in flake.nix See [`clan-core.lib.clan`](/options/?scope=Flake Options (clan.nix file))
- Editing nix expressions in flake.nix (i.e. via `clan-core.lib.clan`)
- Editing machines/`machine_name`/configuration.nix (automatically included if it exists)
- `clan machines create` (imperative)
See the complete [list](../../concepts/autoincludes.md) of auto-loaded files.
@@ -39,6 +39,7 @@ See the complete [list](../../concepts/autoincludes.md) of auto-loaded files.
The imperative command might create a machine folder in `machines/jon`
And might persist information in `inventory.json`
### Configuring a machine
!!! Note

View File

@@ -1,129 +1,110 @@
# :material-clock-fast: Getting Started
Ready to manage your fleet of machines?
Ready to create your own Clan and manage a fleet of machines? Follow these simple steps to get started.
We will create a declarative infrastructure using **clan**, **git**, and **nix flakes**.
This guide walks your through setting up your own declarative infrastructure using clan, git and flakes. By the end of this, you will have one or more machines integrated and installed. You can then import your existing NixOS configuration into this setup if you wish.
You'll finish with a centrally managed fleet, ready to import your existing NixOS configuration.
The following steps are meant to be executed on the machine on which to administer the infrastructure.
In order to get started you should have at least one machine with either physical or ssh access available as an installation target. Your local machine can also be used as an installation target if it is already running NixOS.
## Prerequisites
Make sure you have the following:
=== "**Linux**"
* 💻 **Administration Machine**: Run the setup commands from this machine.
* 🛠️ **Nix**: The Nix package manager, installed on your administration machine.
??? info "**How to install Nix (Linux / MacOS / NixOS)**"
**On Linux or macOS:**
1. Run the recommended installer:
```shellSession
curl --proto '=https' --tlsv1.2 -sSf -L [https://install.determinate.systems/nix](https://install.determinate.systems/nix) | sh -s -- install
```
2. After installation, ensure flakes are enabled by adding this line to `~/.config/nix/nix.conf`:
```
experimental-features = nix-command flakes
```
**On NixOS:**
Nix is already installed. You only need to enable flakes for your user in your `configuration.nix`:
```nix
{
nix.settings.experimental-features = [ "nix-command" "flakes" ];
}
```
Then, run `nixos-rebuild switch` to apply the changes.
* 🎯 **Target Machine(s)**: A remote machine with SSH, or your local machine (if NixOS).
## Create a New Clan
1. Navigate to your desired directory:
```shellSession
cd <your-directory>
```
2. Create a new clan flake:
**Note:** This creates a new directory in your current location
Clan requires Nix to be installed on your system. Run the following command to install Nix:
```shellSession
nix run https://git.clan.lol/clan/clan-core/archive/main.tar.gz#clan-cli --refresh -- flakes create
curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install
```
3. Enter a **name** in the prompt:
If you have previously installed Nix, make sure `experimental-features = nix-command flakes` is present in `~/.config/nix/nix.conf` or `/etc/nix/nix.conf`. If this is not the case, please add it to `~/.config/nix/nix.conf`.
```terminalSession
Enter a name for the new clan: my-clan
=== "**NixOS**"
If you run NixOS the `nix` binary is already installed.
You will also need to enable the `nix-command` and `flakes` experimental features in your `configuration.nix`:
```nix
{ nix.settings.experimental-features = [ "nix-command" "flakes" ]; }
```
## Project Structure
=== "**macOS**"
Your new directory, `my-clan`, should contain the following structure:
Clan requires Nix to be installed on your system. Run the following command to install Nix:
```
my-clan/
├── clan.nix
├── flake.lock
├── flake.nix
├── modules/
└── sops/
```
```shellSession
curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install
```
!!! note "Templates"
This is the structure for the `default` template.
If you have previously installed Nix, make sure `experimental-features = nix-command flakes` is present in `~/.config/nix/nix.conf` or `/etc/nix/nix.conf`. If this is not the case, please add it to `~/.config/nix/nix.conf`.
Use `clan templates list` and `clan templates --help` for available templates & more. Keep in mind that the exact files may change as templates evolve.
## Create a new clan
## Activate the Environment
To get started, `cd` into your new project directory.
Initialize a new clan flake
```shellSession
cd my-clan
nix run https://git.clan.lol/clan/clan-core/archive/main.tar.gz#clan-cli --refresh -- flakes create
```
Now, activate the environment using one of the following methods.
This should prompt for a *name*:
```terminalSession
Enter a name for the new clan: my-clan
```
Enter a *name*, confirm with *enter*. A directory with that name will be created and initialized.
!!! Note
This command uses the `default` template
See `clan templates list` and the `--help` reference for how to use other templates.
## Explore the Project Structure
Take a look at all project files:
For example, you might see something like:
```{ .console .no-copy }
$ cd my-clan
$ ls
clan.nix flake.lock flake.nix modules sops
```
Dont worry if your output looks different — Clan templates evolve over time.
To interact with your newly created clan the you need to load the `clan` cli-package it into your environment by running:
=== "Automatic (direnv, recommended)"
**Prerequisite**: You must have [nix-direnv](https://github.com/nix-community/nix-direnv) installed.
- prerequisite: [install nix-direnv](https://github.com/nix-community/nix-direnv)
Run `direnv allow` to automatically load the environment whenever you enter this directory.
```shellSession
direnv allow
```
=== "Manual (nix develop)"
Run nix develop to load the environment for your current shell session.
```shellSession
nix develop
```
## Verify the Setup
Once your environment is active, verify that the clan command is available by running:
verify that you can run `clan` commands:
```shellSession
clan show
```
You should see the default metadata for your new clan:
You should see something like this:
```shellSession
Name: __CHANGE_ME__
Description: None
```
This confirms your setup is working correctly.
You can now change the default name by editing the `meta.name` field in your `clan.nix` file.
To change the name of your clan edit `meta.name` in the `clan.nix` or `flake.nix` file
```{.nix title="clan.nix" hl_lines="3"}
{

View File

@@ -1,15 +1,12 @@
# Update Machines
# Update Your Machines
The Clan command line interface enables you to update machines remotely over SSH.
In this guide we will teach you how to set a `targetHost` in Nix,
and how to define a remote builder for your machine closures.
Clan CLI enables you to remotely update your machines over SSH. This requires setting up a target address for each target machine.
### Setting `targetHost`
## Setting `targetHost`
In your Nix files, set the `targetHost` to the reachable IP address of your new machine. This eliminates the need to specify `--target-host` with every command.
Set the machines `targetHost` to the reachable IP address of the new machine.
This eliminates the need to specify `--target-host` in CLI commands.
```{.nix title="clan.nix" hl_lines="9"}
{
@@ -26,42 +23,15 @@ inventory.machines = {
# [...]
}
```
The use of `root@` in the target address implies SSH access as the `root` user.
Ensure that the root login is secured and only used when necessary.
## Multiple Target Hosts
You can now experiment with a new interface that allows you to define multiple `targetHost` addresses for different VPNs. Learn more and try it out in our [networking guide](../networking.md).
### Setting a Build Host
## Updating Machine Configurations
Execute the following command to update the specified machine:
```bash
clan machines update jon
```
All machines can be updated simultaneously by omitting the machine name:
```bash
clan machines update
```
---
## Advanced Usage
The following options are only needed for special cases, such as limited resources, mixed environments, or private flakes.
### Setting `buildHost`
If the machine does not have enough resources to run the NixOS **evaluation** or **build** itself,
it is also possible to specify a `buildHost` instead.
During an update, clan will ssh into the `buildHost` and run `nixos-rebuild` from there.
!!! Note
The `buildHost` option should be set directly within your machines Nix configuration, **not** under `inventory.machines`.
If the machine does not have enough resources to run the NixOS evaluation or build itself,
it is also possible to specify a build host instead.
During an update, the cli will ssh into the build host and run `nixos-rebuild` from there.
```{.nix hl_lines="5" .no-copy}
@@ -75,11 +45,7 @@ buildClan {
};
```
### Overriding configuration with CLI flags
`buildHost` / `targetHost`, and other network settings can be temporarily overridden for a single command:
For the full list of flags refer to the [Clan CLI](../../reference/cli/index.md)
You can also override the build host via the command line:
```bash
# Build on a remote host
@@ -90,9 +56,23 @@ clan machines update jon --build-host local
```
!!! Note
Make sure the CPU architecture of the `buildHost` matches that of the `targetHost`
Make sure that the CPU architecture is the same for the buildHost as for the targetHost.
Example:
If you want to deploy to a macOS machine, your architecture is an ARM64-Darwin, that means you need a second macOS machine to build it.
For example, if deploying to a macOS machine with an ARM64-Darwin architecture, you need a second macOS machine with the same architecture to build it.
### Updating Machine Configurations
Execute the following command to update the specified machine:
```bash
clan machines update jon
```
You can also update all configured machines simultaneously by omitting the machine name:
```bash
clan machines update
```
### Excluding a machine from `clan machine update`
@@ -116,15 +96,14 @@ This is useful for machines that are not always online or are not part of the re
### Uploading Flake Inputs
When updating remote machines, flake inputs are usually fetched by the build host.
However, if flake inputs require authentication (e.g., private repositories),
Use the `--upload-inputs` flag to upload all inputs from your local machine:
However, if your flake inputs require authentication (e.g., private repositories),
you can use the `--upload-inputs` flag to upload all inputs from your local machine:
```bash
clan machines update jon --upload-inputs
```
This is particularly useful when:
- The flake references private Git repositories
- Authentication credentials are only available on local machine
- Your flake references private Git repositories
- Authentication credentials are only available on your local machine
- The build host doesn't have access to certain network resources

View File

@@ -254,7 +254,7 @@ The following table shows the migration status of each deprecated clanModule:
| `data-mesher` | ✅ [Migrated](../../reference/clanServices/data-mesher.md) | |
| `deltachat` | ❌ Removed | |
| `disk-id` | ❌ Removed | |
| `dyndns` | [Migrated](../../reference/clanServices/dyndns.md) | |
| `dyndns` | [Being Migrated](https://git.clan.lol/clan/clan-core/pulls/4390) | |
| `ergochat` | ❌ Removed | |
| `garage` | ✅ [Migrated](../../reference/clanServices/garage.md) | |
| `golem-provider` | ❌ Removed | |
@@ -263,18 +263,18 @@ The following table shows the migration status of each deprecated clanModule:
| `iwd` | ❌ Removed | Use [wifi service](../../reference/clanServices/wifi.md) instead |
| `localbackup` | ✅ [Migrated](../../reference/clanServices/localbackup.md) | |
| `localsend` | ❌ Removed | |
| `machine-id` | ✅ [Migrated](../../reference/clan.core/settings.md) | Now an [option](../../reference/clan.core/settings.md) |
| `machine-id` | ❌ Removed | Now an [option](../../reference/clan.core/settings.md) |
| `matrix-synapse` | ✅ [Migrated](../../reference/clanServices/matrix-synapse.md) | |
| `moonlight` | ❌ Removed | |
| `mumble` | ❌ Removed | |
| `mycelium` | ✅ [Migrated](../../reference/clanServices/mycelium.md) | |
| `nginx` | ❌ Removed | |
| `packages` | ✅ [Migrated](../../reference/clanServices/packages.md) | |
| `postgresql` | ✅ [Migrated](../../reference/clan.core/settings.md) | Now an [option](../../reference/clan.core/settings.md) |
| `root-password` | ✅ [Migrated](../../reference/clanServices/users.md) | See [migration guide](../../reference/clanServices/users.md#migration-from-root-password-module) |
| `postgresql` | ❌ Removed | Now an [option](../../reference/clan.core/settings.md) |
| `root-password` | ✅ [Migrated](../../reference/clanServices/users.md) | |
| `single-disk` | ❌ Removed | |
| `sshd` | ✅ [Migrated](../../reference/clanServices/sshd.md) | |
| `state-version` | ✅ [Migrated](../../reference/clan.core/settings.md) | Now an [option](../../reference/clan.core/settings.md) |
| `state-version` | ✅ [Migrated](../../reference/clanServices/state-version.md) | |
| `static-hosts` | ❌ Removed | |
| `sunshine` | ❌ Removed | |
| `syncthing-static-peers` | ❌ Removed | |

View File

@@ -1,20 +1,18 @@
# Convert existing NixOS configurations
# Migrate existing NixOS configurations
This guide will help you convert your existing NixOS configurations into a Clan.
This guide will help you migrate your existing NixOS configurations into Clan.
!!! Warning
Migrating instead of starting new can be trickier and might lead to bugs or
unexpected issues. We recommend reading the [Getting Started](./index.md) guide first.
Once you have a working setup and understand the concepts transfering your NixOS configurations over is easy.
## Back up your existing configuration
unexpected issues. We recommend following the [Getting Started](../getting-started/index.md) guide first. Once you have a working setup, you can easily transfer your NixOS configurations over.
## Back up your existing configuration!
Before you start, it is strongly recommended to back up your existing
configuration in any form you see fit. If you use version control to manage
your configuration changes, it is also a good idea to follow the migration
guide in a separte branch until everything works as expected.
## Starting Point
We assume you are already using NixOS flakes to manage your configuration. If
@@ -45,9 +43,10 @@ have have two hosts: **berlin** and **cologne**.
}
```
## 1. Add `clan-core` to `inputs`
## Add clan-core Input
Add `clan-core` to your flake as input.
Add `clan-core` to your flake as input. It will provide everything we need to
manage your configurations with clan.
```nix
inputs.clan-core = {
@@ -57,7 +56,7 @@ inputs.clan-core = {
}
```
## 2. Update Outputs
## Update Outputs
To be able to access our newly added dependency, it has to be added to the
output parameters.
@@ -104,23 +103,26 @@ For the provide flake example, your flake should now look like this:
};
in
{
inherit (clan.config) nixosConfigurations nixosModules clanInternals;
clan = clan.config;
nixosConfigurations = clan.nixosConfigurations;
inherit (clan) clanInternals;
clan = {
inherit (clan) templates;
};
};
}
```
Et voilà! Your existing hosts are now part of a clan.
Existing Nix tooling
Et voilà! Your existing hosts are now part of a clan. Existing Nix tooling
should still work as normal. To check that you didn't make any errors, run `nix
flake show` and verify both hosts are still recognized as if nothing had
changed. You should also see the new `clan` output.
changed. You should also see the new `clanInternals` output.
```
nix flake show
git+file:///my-nixos-config
├───clan: unknown
├───clanInternals: unknown
└───nixosConfigurations
├───berlin: NixOS configuration
└───cologne: NixOS configuration
@@ -129,7 +131,7 @@ git+file:///my-nixos-config
Of course you can also rebuild your configuration using `nixos-rebuild` and
veryify everything still works.
## 3. Add `clan-cli` to your `devShells`
## Add Clan CLI devShell
At this point Clan is set up, but you can't use the CLI yet. To do so, it is
recommended to expose it via a `devShell` in your flake. It is also possible to
@@ -161,8 +163,8 @@ cologne
## Specify Targets
Clan needs to know where it can reach your hosts. For testing purpose set
`clan.core.networking.targetHost` to the machines adress or hostname.
Clan needs to know where it can reach your hosts. For each of your hosts, set
`clan.core.networking.targetHost` to its adress or hostname.
```nix
# machines/berlin/configuration.nix
@@ -171,8 +173,6 @@ Clan needs to know where it can reach your hosts. For testing purpose set
}
```
See our guide on for properly [configuring machines networking](../networking.md)
## Next Steps
You are now fully set up. Use the CLI to manage your hosts or proceed to

View File

@@ -1,184 +0,0 @@
# Connecting to Your Machines
Clan provides automatic networking with fallback mechanisms to reliably connect to your machines.
## Option 1: Automatic Networking with Fallback (Recommended)
Clan's networking module automatically manages connections through various network technologies with intelligent fallback. When you run `clan ssh` or `clan machines update`, Clan tries each configured network by priority until one succeeds.
### Basic Setup with Internet Service
For machines with public IPs or DNS names, use the `internet` service to configure direct SSH while keeping fallback options:
```{.nix title="flake.nix" hl_lines="7-10 14-16"}
{
outputs = { self, clan-core, ... }:
let
clan = clan-core.lib.clan {
inventory.instances = {
# Direct SSH with fallback support
internet = {
roles.default.machines.server1 = {
settings.address = "server1.example.com";
};
roles.default.machines.server2 = {
settings.address = "192.168.1.100";
};
};
# Fallback: Secure connections via Tor
tor = {
roles.server.tags.nixos = { };
};
};
};
in
{
inherit (clan.config) nixosConfigurations;
};
}
```
### Advanced Setup with Multiple Networks
```{.nix title="flake.nix" hl_lines="7-10 13-16 19-21"}
{
outputs = { self, clan-core, ... }:
let
clan = clan-core.lib.clan {
inventory.instances = {
# Priority 1: Try direct connection first
internet = {
roles.default.machines.publicserver = {
settings.address = "public.example.com";
};
};
# Priority 2: VPN for internal machines
zerotier = {
roles.controller.machines."controller" = { };
roles.peer.tags.nixos = { };
};
# Priority 3: Tor as universal fallback
tor = {
roles.server.tags.nixos = { };
};
};
};
in
{
inherit (clan.config) nixosConfigurations;
};
}
```
### How It Works
Clan automatically tries networks in order of priority:
1. Direct internet connections (if configured)
2. VPN networks (ZeroTier, Tailscale, etc.)
3. Tor hidden services
4. Any other configured networks
If one network fails, Clan automatically tries the next.
### Useful Commands
```bash
# View all configured networks and their status
clan network list
# Test connectivity through all networks
clan network ping machine1
# Show complete network topology
clan network overview
```
## Option 2: Manual targetHost (Bypasses Fallback!)
!!! warning
Setting `targetHost` directly **disables all automatic networking and fallback**. Only use this if you need complete control and don't want Clan's intelligent connection management.
### Using Inventory (For Static Addresses)
Use inventory-level `targetHost` when the address is **static** and doesn't depend on NixOS configuration:
```{.nix title="flake.nix" hl_lines="8"}
{
outputs = { self, clan-core, ... }:
let
clan = clan-core.lib.clan {
inventory.machines.server = {
# WARNING: This bypasses all networking modules!
# Use for: Static IPs, DNS names, known hostnames
deploy.targetHost = "root@192.168.1.100";
};
};
in
{
inherit (clan.config) nixosConfigurations;
};
}
```
**When to use inventory-level:**
- Static IP addresses: `"root@192.168.1.100"`
- DNS names: `"user@server.example.com"`
- Any address that doesn't change based on machine configuration
### Using NixOS Configuration (For Dynamic Addresses)
Use machine-level `targetHost` when you need to **interpolate values from the NixOS configuration**:
```{.nix title="flake.nix" hl_lines="7"}
{
outputs = { self, clan-core, ... }:
let
clan = clan-core.lib.clan {
machines.server = { config, ... }: {
# WARNING: This also bypasses all networking modules!
# REQUIRED for: Addresses that depend on NixOS config
clan.core.networking.targetHost = "root@${config.networking.hostName}.local";
};
};
in
{
inherit (clan.config) nixosConfigurations;
};
}
```
**When to use machine-level (NixOS config):**
- Using hostName from config: `"root@${config.networking.hostName}.local"`
- Building from multiple config values: `"${config.users.users.deploy.name}@${config.networking.hostName}"`
- Any address that depends on evaluated NixOS configuration
!!! info "Key Difference"
**Inventory-level** (`deploy.targetHost`) is evaluated immediately and works with static strings.
**Machine-level** (`clan.core.networking.targetHost`) is evaluated after NixOS configuration and can access `config.*` values.
## Quick Decision Guide
| Scenario | Recommended Approach | Why |
|----------|---------------------|-----|
| Public servers | `internet` service | Keeps fallback options |
| Mixed infrastructure | Multiple networks | Automatic failover |
| Machines behind NAT | ZeroTier/Tor | NAT traversal with fallback |
| Testing/debugging | Manual targetHost | Full control, no magic |
| Single static machine | Manual targetHost | Simple, no overhead |
## Command-Line Override
The `--target-host` flag bypasses ALL networking configuration:
```bash
# Emergency access - ignores all networking config
clan machines update server --target-host root@backup-ip.com
# Direct SSH - no fallback attempted
clan ssh laptop --target-host user@10.0.0.5
```
Use this for debugging or emergency access when automatic networking isn't working.

View File

@@ -255,50 +255,11 @@ outputs = inputs: flake-parts.lib.mkFlake { inherit inputs; } ({self, lib, ...}:
})
```
The benefit of this approach is that downstream users can override the value of
`myClan` by using `mkForce` or other priority modifiers.
## Example: A machine-type service
Users often have different types of machines. These could be any classification
you like, for example "servers" and "desktops". Having such distictions, allows
reusing parts of your configuration that should be appplied to a class of
machines. Since this is such a common pattern, here is how to write such a
service.
For this example the we have to roles: `server` and `desktop`. Additionally, we
can use the `perMachine` section to add configuration to all machines regardless
of their type.
```nix title="machine-type.nix"
{
_class = "clan.service";
manifest.name = "machine-type";
roles.server.perInstance.nixosModule = ./server.nix;
roles.desktop.perInstance.nixosModule = ./desktop.nix;
perMachine.nixosModule = {
# Configuration for all machines (any type)
};
}
```
In the inventory we the assign machines to a type, e.g. by using tags
```nix title="flake.nix"
instnaces.machine-type = {
module.input = "self";
module.name = "@pinpox/machine-type";
roles.desktop.tags.desktop = { };
roles.server.tags.server = { };
};
```
The benefit of this approach is that downstream users can override the value of `myClan` by using `mkForce` or other priority modifiers.
---
## Further Reading
## Further
- [Reference Documentation for Service Authors](../../reference/clanServices/clan-service-author-interface.md)
- [Migration Guide from ClanModules to ClanServices](../../guides/migrations/migrate-inventory-services.md)

View File

@@ -0,0 +1,84 @@
# How to Set `targetHost` for a Machine
The `targetHost` defines where the machine can be reached for operations like SSH or deployment. You can set it in two ways, depending on your use case.
---
## ✅ Option 1: Use the Inventory (Recommended for Static Hosts)
If the hostname is **static**, like `server.example.com`, set it in the **inventory**:
```{.nix title="flake.nix" hl_lines="8"}
{
# edlided
outputs =
{ self, clan-core, ... }:
let
# Sometimes this attribute set is defined in clan.nix
clan = clan-core.lib.clan {
inventory.machines.jon = {
deploy.targetHost = "root@server.example.com";
};
};
in
{
inherit (clan.config) nixosConfigurations nixosModules clanInternals;
# elided
};
}
```
This is fast, simple and explicit, and doesnt require evaluating the NixOS config. We can also displayed it in the clan-cli or clan-app.
---
## ✅ Option 2: Use NixOS (Only for Dynamic Hosts)
If your target host depends on a **dynamic expression** (like using the machines evaluated FQDN), set it inside the NixOS module:
```{.nix title="flake.nix" hl_lines="8"}
{
# edlided
outputs =
{ self, clan-core, ... }:
let
# Sometimes this attribute set is defined in clan.nix
clan = clan-core.lib.clan {
machines.jon = {config, ...}: {
clan.core.networking.targetHost = "jon@${config.networking.fqdn}";
};
};
in
{
inherit (clan.config) nixosConfigurations nixosModules clanInternals;
# elided
};
}
```
Use this **only if the value cannot be made static**, because its slower and won't be displayed in the clan-cli or clan-app yet.
---
## 📝 TL;DR
| Use Case | Use Inventory? | Example |
| ------------------------- | -------------- | -------------------------------- |
| Static hostname | ✅ Yes | `root@server.example.com` |
| Dynamic config expression | ❌ No | `jon@${config.networking.fqdn}` |
---
## 🚀 Coming Soon: Unified Networking Module
Were working on a new networking module that will automatically do all of this for you.
- Easier to use
- Sane defaults: Youll always be able to reach the machine — no need to worry about hostnames.
- ✨ Migration from **either method** will be supported and simple.
## Summary
- Ask: *Does this hostname dynamically change based on NixOS config?*
- If **no**, use the inventory.
- If **yes**, then use NixOS config.

6
docs/site/options.md Normal file
View File

@@ -0,0 +1,6 @@
---
template: options.html
---
<iframe src="/options-page/" height="1000" width="100%"></iframe>

View File

@@ -4,7 +4,7 @@ This section of the site provides an overview of available options and commands
---
- [Clan Configuration Option](/options) - for defining a Clan
- [Clan Configuration Option](../options.md) - for defining a Clan
- Learn how to use the [Clan CLI](./cli/index.md)
- Explore available [services](./clanServices/index.md)
- [NixOS Configuration Options](./clan.core/index.md) - Additional options avilable on a NixOS machine.

46
flake.lock generated
View File

@@ -13,11 +13,11 @@
]
},
"locked": {
"lastModified": 1756695982,
"narHash": "sha256-dyLhOSDzxZtRgi5aj/OuaZJUsuvo+8sZ9CU/qieZ15c=",
"rev": "cc8f26e7e6c2dc985526ba59b286ae5a83168cdb",
"lastModified": 1753067306,
"narHash": "sha256-jyoEbaXa8/MwVQ+PajUdT63y3gYhgD9o7snO/SLaikw=",
"rev": "18dfd42bdb2cfff510b8c74206005f733e38d8b9",
"type": "tarball",
"url": "https://git.clan.lol/api/v1/repos/clan/data-mesher/archive/cc8f26e7e6c2dc985526ba59b286ae5a83168cdb.tar.gz"
"url": "https://git.clan.lol/api/v1/repos/clan/data-mesher/archive/18dfd42bdb2cfff510b8c74206005f733e38d8b9.tar.gz"
},
"original": {
"type": "tarball",
@@ -31,11 +31,11 @@
]
},
"locked": {
"lastModified": 1756115622,
"narHash": "sha256-iv8xVtmLMNLWFcDM/HcAPLRGONyTRpzL9NS09RnryRM=",
"lastModified": 1754971456,
"narHash": "sha256-p04ZnIBGzerSyiY2dNGmookCldhldWAu03y0s3P8CB0=",
"owner": "nix-community",
"repo": "disko",
"rev": "bafad29f89e83b2d861b493aa23034ea16595560",
"rev": "8246829f2e675a46919718f9a64b71afe3bfb22d",
"type": "github"
},
"original": {
@@ -71,11 +71,11 @@
]
},
"locked": {
"lastModified": 1755825449,
"narHash": "sha256-XkiN4NM9Xdy59h69Pc+Vg4PxkSm9EWl6u7k6D5FZ5cM=",
"lastModified": 1751313918,
"narHash": "sha256-HsJM3XLa43WpG+665aGEh8iS8AfEwOIQWk3Mke3e7nk=",
"owner": "nix-darwin",
"repo": "nix-darwin",
"rev": "8df64f819698c1fee0c2969696f54a843b2231e8",
"rev": "e04a388232d9a6ba56967ce5b53a8a6f713cdfcf",
"type": "github"
},
"original": {
@@ -86,11 +86,11 @@
},
"nix-select": {
"locked": {
"lastModified": 1755887746,
"narHash": "sha256-lzWbpHKX0WAn/jJDoCijIDss3rqYIPawe46GDaE6U3g=",
"rev": "92c2574c5e113281591be01e89bb9ddb31d19156",
"lastModified": 1745005516,
"narHash": "sha256-IVaoOGDIvAa/8I0sdiiZuKptDldrkDWUNf/+ezIRhyc=",
"rev": "69d8bf596194c5c35a4e90dd02c52aa530caddf8",
"type": "tarball",
"url": "https://git.clan.lol/api/v1/repos/clan/nix-select/archive/92c2574c5e113281591be01e89bb9ddb31d19156.tar.gz"
"url": "https://git.clan.lol/api/v1/repos/clan/nix-select/archive/69d8bf596194c5c35a4e90dd02c52aa530caddf8.tar.gz"
},
"original": {
"type": "tarball",
@@ -99,11 +99,11 @@
},
"nixos-facter-modules": {
"locked": {
"lastModified": 1756491981,
"narHash": "sha256-lXyDAWPw/UngVtQfgQ8/nrubs2r+waGEYIba5UX62+k=",
"lastModified": 1750412875,
"narHash": "sha256-uP9Xxw5XcFwjX9lNoYRpybOnIIe1BHfZu5vJnnPg3Jc=",
"owner": "nix-community",
"repo": "nixos-facter-modules",
"rev": "c1b29520945d3e148cd96618c8a0d1f850965d8c",
"rev": "14df13c84552a7d1f33c1cd18336128fbc43f920",
"type": "github"
},
"original": {
@@ -115,10 +115,10 @@
"nixpkgs": {
"locked": {
"lastModified": 315532800,
"narHash": "sha256-h8Sx4S+/0FpodZji6W9lHzwY5BcuUG85Aj3GfhvGC2o=",
"rev": "a650b5d0de99158323597f048667c4d914243224",
"narHash": "sha256-2ILJtWugqmMyZnaWnHh+5yyw8RZWbKu9rVdeWmrBVhY=",
"rev": "a595dde4d0d31606e19dcec73db02279db59d201",
"type": "tarball",
"url": "https://releases.nixos.org/nixpkgs/nixpkgs-25.11pre845298.a650b5d0de99/nixexprs.tar.xz"
"url": "https://releases.nixos.org/nixpkgs/nixpkgs-25.11pre844295.a595dde4d0d3/nixexprs.tar.xz"
},
"original": {
"type": "tarball",
@@ -181,11 +181,11 @@
]
},
"locked": {
"lastModified": 1756662192,
"narHash": "sha256-F1oFfV51AE259I85av+MAia221XwMHCOtZCMcZLK2Jk=",
"lastModified": 1754847726,
"narHash": "sha256-2vX8QjO5lRsDbNYvN9hVHXLU6oMl+V/PsmIiJREG4rE=",
"owner": "numtide",
"repo": "treefmt-nix",
"rev": "1aabc6c05ccbcbf4a635fb7a90400e44282f61c4",
"rev": "7d81f6fb2e19bf84f1c65135d1060d829fae2408",
"type": "github"
},
"original": {

View File

@@ -67,6 +67,7 @@
clan = {
meta.name = "clan-core";
inventory = {
services = { };
machines = {
"test-darwin-machine" = {
machineClass = "darwin";
@@ -96,7 +97,6 @@
./nixosModules/flake-module.nix
./pkgs/flake-module.nix
./templates/flake-module.nix
./pkgs/clan-cli/clan_cli/tests/flake-module.nix
]
++ [
(if pathExists ./flakeModules/clan.nix then import ./flakeModules/clan.nix inputs.self else { })

View File

@@ -87,8 +87,6 @@ in
relativeDir = removePrefix "${self}/" (toString config.clan.directory);
update-vars = hostPkgs.writeShellScriptBin "update-vars" ''
set -x
export PRJ_ROOT=$(git rev-parse --show-toplevel)
${update-vars-script} $PRJ_ROOT/${relativeDir} ${testName}
'';

View File

@@ -33,6 +33,7 @@ lib.fix (
evalService = clanLib.callLib ./modules/inventory/distributed-service/evalService.nix { };
# ------------------------------------
# ClanLib functions
evalClan = clanLib.callLib ./modules/inventory/eval-clan-modules { };
inventory = clanLib.callLib ./modules/inventory { };
modules = clanLib.callLib ./modules/inventory/frontmatter { };
test = clanLib.callLib ./test { };

Some files were not shown because too many files have changed in this diff Show More