Compare commits

..

1 Commits

Author SHA1 Message Date
pinpox
3ac9167f18 expose metrics as json 2025-08-20 11:04:01 +02:00
512 changed files with 6244 additions and 12592 deletions

View File

@@ -10,7 +10,7 @@ jobs:
if: github.repository_owner == 'clan-lol'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
with:
persist-credentials: false
- uses: actions/create-github-app-token@v2

View File

@@ -8,7 +8,7 @@ Our mission is simple: to democratize computing by providing tools that empower
## Features of Clan
- **Full-Stack System Deployment:** Utilize Clan's toolkit alongside Nix's reliability to build and manage systems effortlessly.
- **Full-Stack System Deployment:** Utilize Clans toolkit alongside Nix's reliability to build and manage systems effortlessly.
- **Overlay Networks:** Secure, private communication channels between devices.
- **Virtual Machine Integration:** Seamless operation of VM applications within the main operating system.
- **Robust Backup Management:** Long-term, self-hosted data preservation.

View File

@@ -55,8 +55,7 @@
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
in
{
# Skip flash test on aarch64-linux for now as it's too slow
checks = lib.optionalAttrs (pkgs.stdenv.isLinux && pkgs.hostPlatform.system != "aarch64-linux") {
checks = pkgs.lib.mkIf pkgs.stdenv.isLinux {
nixos-test-flash = self.clanLib.test.baseTest {
name = "flash";
nodes.target = {

View File

@@ -232,7 +232,6 @@
"-i", ssh_conn.ssh_key,
"--option", "store", os.environ['CLAN_TEST_STORE'],
"--update-hardware-config", "nixos-facter",
"--no-persist-state",
]
subprocess.run(clan_cmd, check=True)
@@ -302,8 +301,7 @@
"test-install-machine-without-system",
"-i", ssh_conn.ssh_key,
"--option", "store", os.environ['CLAN_TEST_STORE'],
"--target-host", f"nonrootuser@localhost:{ssh_conn.host_port}",
"--yes"
f"nonrootuser@localhost:{ssh_conn.host_port}"
]
result = subprocess.run(clan_cmd, capture_output=True, cwd=flake_dir)
@@ -327,9 +325,7 @@
"test-install-machine-without-system",
"-i", ssh_conn.ssh_key,
"--option", "store", os.environ['CLAN_TEST_STORE'],
"--target-host",
f"nonrootuser@localhost:{ssh_conn.host_port}",
"--yes"
f"nonrootuser@localhost:{ssh_conn.host_port}"
]
result = subprocess.run(clan_cmd, capture_output=True, cwd=flake_dir)

View File

@@ -1,32 +0,0 @@
This service sets up a certificate authority (CA) that can issue certificates to
other machines in your clan. For this the `ca` role is used.
It additionally provides a `default` role, that can be applied to all machines
in your clan and will make sure they trust your CA.
## Example Usage
The following configuration would add a CA for the top level domain `.foo`. If
the machine `server` now hosts a webservice at `https://something.foo`, it will
get a certificate from `ca` which is valid inside your clan. The machine
`client` will trust this certificate if it makes a request to
`https://something.foo`.
This clan service can be combined with the `coredns` service for easy to deploy,
SSL secured clan-internal service hosting.
```nix
inventory = {
machines.ca = { };
machines.client = { };
machines.server = { };
instances."certificates" = {
module.name = "certificates";
module.input = "self";
roles.ca.machines.ca.settings.tlds = [ "foo" ];
roles.default.machines.client = { };
roles.default.machines.server = { };
};
};
```

View File

@@ -1,245 +0,0 @@
{ ... }:
{
_class = "clan.service";
manifest.name = "certificates";
manifest.description = "Sets up a certificates internal to your Clan";
manifest.categories = [ "Network" ];
manifest.readme = builtins.readFile ./README.md;
roles.ca = {
interface =
{ lib, ... }:
{
options.acmeEmail = lib.mkOption {
type = lib.types.str;
default = "none@none.tld";
description = ''
Email address for account creation and correspondence from the CA.
It is recommended to use the same email for all certs to avoid account
creation limits.
'';
};
options.tlds = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = "Top level domain for this CA. Certificates will be issued and trusted for *.<tld>";
};
options.expire = lib.mkOption {
type = lib.types.nullOr lib.types.str;
description = "When the certificate should expire.";
default = "8760h";
example = "8760h";
};
};
perInstance =
{ settings, ... }:
{
nixosModule =
{
config,
pkgs,
lib,
...
}:
let
domains = map (tld: "ca.${tld}") settings.tlds;
in
{
security.acme.defaults.email = settings.acmeEmail;
security.acme = {
certs = builtins.listToAttrs (
map (domain: {
name = domain;
value = {
server = "https://${domain}:1443/acme/acme/directory";
};
}) domains
);
};
networking.firewall.allowedTCPPorts = [
80
443
];
services.nginx = {
enable = true;
recommendedProxySettings = true;
virtualHosts = builtins.listToAttrs (
map (domain: {
name = domain;
value = {
addSSL = true;
enableACME = true;
locations."/".proxyPass = "https://localhost:1443";
locations."= /ca.crt".alias =
config.clan.core.vars.generators.step-intermediate-cert.files."intermediate.crt".path;
};
}) domains
);
};
clan.core.vars.generators = {
# Intermediate key generator
"step-intermediate-key" = {
files."intermediate.key" = {
secret = true;
deploy = true;
owner = "step-ca";
group = "step-ca";
};
runtimeInputs = [ pkgs.step-cli ];
script = ''
step crypto keypair --kty EC --curve P-256 --no-password --insecure $out/intermediate.pub $out/intermediate.key
'';
};
# Intermediate certificate generator
"step-intermediate-cert" = {
files."intermediate.crt".secret = false;
dependencies = [
"step-ca"
"step-intermediate-key"
];
runtimeInputs = [ pkgs.step-cli ];
script = ''
# Create intermediate certificate
step certificate create \
--ca $in/step-ca/ca.crt \
--ca-key $in/step-ca/ca.key \
--ca-password-file /dev/null \
--key $in/step-intermediate-key/intermediate.key \
--template ${pkgs.writeText "intermediate.tmpl" ''
{
"subject": {{ toJson .Subject }},
"keyUsage": ["certSign", "crlSign"],
"basicConstraints": {
"isCA": true,
"maxPathLen": 0
},
"nameConstraints": {
"critical": true,
"permittedDNSDomains": [${
(lib.strings.concatStringsSep "," (map (tld: ''"${tld}"'') settings.tlds))
}]
}
}
''} ${lib.optionalString (settings.expire != null) "--not-after ${settings.expire}"} \
--not-before=-12h \
--no-password --insecure \
"Clan Intermediate CA" \
$out/intermediate.crt
'';
};
};
services.step-ca = {
enable = true;
intermediatePasswordFile = "/dev/null";
address = "0.0.0.0";
port = 1443;
settings = {
root = config.clan.core.vars.generators.step-ca.files."ca.crt".path;
crt = config.clan.core.vars.generators.step-intermediate-cert.files."intermediate.crt".path;
key = config.clan.core.vars.generators.step-intermediate-key.files."intermediate.key".path;
dnsNames = domains;
logger.format = "text";
db = {
type = "badger";
dataSource = "/var/lib/step-ca/db";
};
authority = {
provisioners = [
{
type = "ACME";
name = "acme";
forceCN = true;
}
];
claims = {
maxTLSCertDuration = "2160h";
defaultTLSCertDuration = "2160h";
};
backdate = "1m0s";
};
tls = {
cipherSuites = [
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256"
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"
];
minVersion = 1.2;
maxVersion = 1.3;
renegotiation = false;
};
};
};
};
};
};
# Empty role, so we can add non-ca machins to the instance to trust the CA
roles.default = {
interface =
{ lib, ... }:
{
options.acmeEmail = lib.mkOption {
type = lib.types.str;
default = "none@none.tld";
description = ''
Email address for account creation and correspondence from the CA.
It is recommended to use the same email for all certs to avoid account
creation limits.
'';
};
};
perInstance =
{ settings, ... }:
{
nixosModule.security.acme.defaults.email = settings.acmeEmail;
};
};
# All machines (independent of role) will trust the CA
perMachine.nixosModule =
{ pkgs, config, ... }:
{
# Root CA generator
clan.core.vars.generators = {
"step-ca" = {
share = true;
files."ca.key" = {
secret = true;
deploy = false;
};
files."ca.crt".secret = false;
runtimeInputs = [ pkgs.step-cli ];
script = ''
step certificate create --template ${pkgs.writeText "root.tmpl" ''
{
"subject": {{ toJson .Subject }},
"issuer": {{ toJson .Subject }},
"keyUsage": ["certSign", "crlSign"],
"basicConstraints": {
"isCA": true,
"maxPathLen": 1
}
}
''} "Clan Root CA" $out/ca.crt $out/ca.key \
--kty EC --curve P-256 \
--not-after=8760h \
--not-before=-12h \
--no-password --insecure
'';
};
};
security.pki.certificateFiles = [ config.clan.core.vars.generators."step-ca".files."ca.crt".path ];
environment.systemPackages = [ pkgs.openssl ];
security.acme.acceptTerms = true;
};
}

View File

@@ -1,21 +0,0 @@
{
self,
lib,
...
}:
let
module = lib.modules.importApply ./default.nix {
inherit (self) packages;
};
in
{
clan.modules.certificates = module;
perSystem =
{ ... }:
{
clan.nixosTests.certificates = {
imports = [ ./tests/vm/default.nix ];
clan.modules.certificates = module;
};
};
}

View File

@@ -1,84 +0,0 @@
{
name = "certificates";
clan = {
directory = ./.;
inventory = {
machines.ca = { }; # 192.168.1.1
machines.client = { }; # 192.168.1.2
machines.server = { }; # 192.168.1.3
instances."certificates" = {
module.name = "certificates";
module.input = "self";
roles.ca.machines.ca.settings.tlds = [ "foo" ];
roles.default.machines.client = { };
roles.default.machines.server = { };
};
};
};
nodes =
let
hostConfig = ''
192.168.1.1 ca.foo
192.168.1.3 test.foo
'';
in
{
client.networking.extraHosts = hostConfig;
ca.networking.extraHosts = hostConfig;
server = {
networking.extraHosts = hostConfig;
# TODO: Could this be set automatically?
# I would like to get this information from the coredns module, but we
# cannot model dependencies yet
security.acme.certs."test.foo".server = "https://ca.foo/acme/acme/directory";
# Host a simple service on 'server', with SSL provided via our CA. 'client'
# should be able to curl it via https and accept the certificates
# presented
networking.firewall.allowedTCPPorts = [
80
443
];
services.nginx = {
enable = true;
virtualHosts."test.foo" = {
enableACME = true;
forceSSL = true;
locations."/" = {
return = "200 'test server response'";
extraConfig = "add_header Content-Type text/plain;";
};
};
};
};
};
testScript = ''
start_all()
import time
time.sleep(3)
ca.succeed("systemctl restart acme-order-renew-ca.foo.service ")
time.sleep(3)
server.succeed("systemctl restart acme-test.foo.service")
# It takes a while for the correct certs to appear (before that self-signed
# are presented by nginx) so we wait for a bit.
client.wait_until_succeeds("curl -v https://test.foo")
# Show certificate information for debugging
client.succeed("openssl s_client -connect test.foo:443 -servername test.foo </dev/null 2>/dev/null | openssl x509 -text -noout 1>&2")
'';
}

View File

@@ -1,6 +0,0 @@
[
{
"publickey": "age1yd2cden7jav8x4nzx2fwze2fsa5j0qm2m3t7zum765z3u4gj433q7dqj43",
"type": "age"
}
]

View File

@@ -1,6 +0,0 @@
[
{
"publickey": "age1js225d8jc507sgcg0fdfv2x3xv3asm4ds5c6s4hp37nq8spxu95sc5x3ce",
"type": "age"
}
]

View File

@@ -1,6 +0,0 @@
[
{
"publickey": "age1nwuh8lc604mnz5r8ku8zswyswnwv02excw237c0cmtlejp7xfp8sdrcwfa",
"type": "age"
}
]

View File

@@ -1,15 +0,0 @@
{
"data": "ENC[AES256_GCM,data:6+XilULKRuWtAZ6B8Lj9UqCfi1T6dmqrDqBNXqS4SvBwM1bIWiL6juaT1Q7ByOexzID7tY740gmQBqTey54uLydh8mW0m4ZtUqw=,iv:9kscsrMPBGkutTnxrc5nrc7tQXpzLxw+929pUDKqTu0=,tag:753uIjm8ZRs0xsjiejEY8g==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA1d3kycldZRXhmR0FqTXJp\nWWU0MDBYNmxxbFE5M2xKYm5KWnQ0MXBHNEM4CjN4RFFVcFlkd3pjTFVDQ3Vackdj\nVTVhMWoxdFpsWHp5S1p4L05kYk5LUkkKLS0tIENtZFZZTjY2amFVQmZLZFplQzBC\nZm1vWFI4MXR1ZHIxTTQ5VXdSYUhvOTQKte0bKjXQ0xA8FrpuChjDUvjVqp97D8kT\n3tVh6scdjxW48VSBZP1GRmqcMqCdj75GvJTbWeNEV4PDBW7GI0UW+Q==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-09-02T08:42:39Z",
"mac": "ENC[AES256_GCM,data:AftMorrH7qX5ctVu5evYHn5h9pC4Mmm2VYaAV8Hy0PKTc777jNsL6DrxFVV3NVqtecpwrzZFWKgzukcdcRJe4veVeBrusmoZYtifH0AWZTEVpVlr2UXYYxCDmNZt1WHfVUo40bT//X6QM0ye6a/2Y1jYPbMbryQNcGmnpk9PDvU=,iv:5nk+d8hzA05LQp7ZHRbIgiENg2Ha6J6YzyducM6zcNU=,tag:dy1hqWVzMu/+fSK57h9ZCA==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../users/admin

View File

@@ -1,15 +0,0 @@
{
"data": "ENC[AES256_GCM,data:jdTuGQUYvT1yXei1RHKsOCsABmMlkcLuziHDVhA7NequZeNu0fSbrJTXQDCHsDGhlYRcjU5EsEDT750xdleXuD3Gs9zWvPVobI4=,iv:YVow3K1j6fzRF9bRfIEpuOkO/nRpku/UQxWNGC+UJQQ=,tag:cNLM5R7uu6QpwPB9K6MYzg==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBvOVF2WXRSL0NpQzFZR01I\nNU85TGcyQmVDazN1dmpuRFVTZEg5NDRKTGhrCk1IVjFSU1V6WHBVRnFWcHkyVERr\nTjFKbW1mQ2FWOWhjN2VPamMxVEQ5VkkKLS0tIENVUGlhanhuWGtDKzBzRmk2dE4v\nMXZBRXNMa3IrOTZTNHRUWVE3UXEwSWMK2cBLoL/H/Vxd/klVrqVLdX9Mww5j7gw/\nEWc5/hN+km6XoW+DiJxVG4qaJ7qqld6u5ZnKgJT+2h9CfjA04I2akg==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-09-02T08:42:51Z",
"mac": "ENC[AES256_GCM,data:zOBQVM2Ydu4v0+Fw3p3cEU+5+7eKaadV0tKro1JVOxclG1Vs6Myq57nw2eWf5JxIl0ulL+FavPKY26qOQ3aqcGOT3PMRlCda9z+0oSn9Im9bE/DzAGmoH/bp76kFkgTTOCZTMUoqJ+UJqv0qy1BH/92sSSKmYshEX6d1vr5ISrw=,iv:i9ZW4sLxOCan4UokHlySVr1CW39nCTusG4DmEPj/gIw=,tag:iZBDPHDkE3Vt5mFcFu1TPQ==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../users/admin

View File

@@ -1,15 +0,0 @@
{
"data": "ENC[AES256_GCM,data:5CJuHcxJMXZJ8GqAeG3BrbWtT1kade4kxgJsn1cRpmr1UgN0ZVYnluPEiBscClNSOzcc6vcrBpfTI3dj1tASKTLP58M+GDBFQDo=,iv:gsK7XqBGkYCoqAvyFlIXuJ27PKSbTmy7f6cgTmT2gow=,tag:qG5KejkBvy9ytfhGXa/Mnw==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBxbzVqYkplTzJKN1pwS3VM\naFFIK2VsR3lYUVExYW9ieERBL0tlcFZtVzJRCkpiLzdmWmFlOUZ5QUJ4WkhXZ2tQ\nZm92YXBCV0RpYnIydUdEVTRiamI4bjAKLS0tIG93a2htS1hFcjBOeVFnNCtQTHVr\na2FPYjVGbWtORjJVWXE5bndPU1RWcXMKikMEB7X+kb7OtiyqXn3HRpLYkCdoayDh\n7cjGnplk17q25/lRNHM4JVS5isFfuftCl01enESqkvgq+cwuFwa9DQ==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-09-02T08:42:59Z",
"mac": "ENC[AES256_GCM,data:xybV2D0xukZnH2OwRpIugPnS7LN9AbgGKwFioPJc1FQWx9TxMUVDwgMN6V5WrhWkXgF2zP4krtDYpEz4Vq+LbOjcnTUteuCc+7pMHubuRuip7j+M32MH1kuf4bVZuXbCfvm7brGxe83FzjoioLqzA8g/X6Q1q7/ErkNeFjluC3Q=,iv:QEW3EUKSRZY3fbXlP7z+SffWkQeXwMAa5K8RQW7NvPE=,tag:DhFxY7xr7H1Wbd527swD0Q==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../users/admin

View File

@@ -1,12 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIBsDCCAVegAwIBAgIQbT1Ivm+uwyf0HNkJfan2BTAKBggqhkjOPQQDAjAXMRUw
EwYDVQQDEwxDbGFuIFJvb3QgQ0EwHhcNMjUwOTAxMjA0MzAzWhcNMjYwOTAyMDg0
MzAzWjAfMR0wGwYDVQQDExRDbGFuIEludGVybWVkaWF0ZSBDQTBZMBMGByqGSM49
AgEGCCqGSM49AwEHA0IABDXCNrUIotju9P1U6JxLV43sOxLlRphQJS4dM+lvjTZc
aQ+HwQg0AHVlQNRwS3JqKrJJtJVyKbZklh6eFaDPoj6jfTB7MA4GA1UdDwEB/wQE
AwIBBjASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdDgQWBBRKHaccHgP2ccSWVBWN
zGoDdTg7aTAfBgNVHSMEGDAWgBSfsnz4phMJx9su/kgeF/FbZQCBgzAVBgNVHR4B
Af8ECzAJoAcwBYIDZm9vMAoGCCqGSM49BAMCA0cAMEQCICiUDk1zGNzpS/iVKLfW
zUGaCagpn2mCx4xAXQM9UranAiAn68nVYGWjkzhU31wyCAupxOjw7Bt96XXqIAz9
hLLtMA==
-----END CERTIFICATE-----

View File

@@ -1,19 +0,0 @@
{
"data": "ENC[AES256_GCM,data:Auonh9fa7jSkld1Zyxw74x5ydj6Xc+0SOgiqumVETNCfner9K96Rmv1PkREuHNGWPsnzyEM3pRT8ijvu3QoKvy9QPCCewyT07Wqe4G74+bk1iMeAHsV3To6kHs6M8OISvE+CmG0+hlLmdfRSabTzyWPLHbOjvFTEEuA5G7xiryacSYOE++eeEHdn+oUDh/IMTcfLjCGMjsXFikx1Hb+ofeRTlCg47+0w4MXVvQkOzQB5V2C694jZXvZ19jd/ioqr8YASz2xatGvqwW6cpZxqOWyZJ0UAj/6yFk6tZWifqVB3wgU=,iv:ITFCrDkeWl4GWCebVq15ei9QmkOLDwUIYojKZ2TU6JU=,tag:8k4iYbCIusUykY79H86WUQ==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBsT25UbjJTQ2tzbnQyUm9p\neWx1UlZIeVpocnBqUCt0YnFlN2FOU25Lb0hNCmdXUUsyalRTbHRRQ0NLSGc1YllV\nUXRwaENhaXU1WmdnVDE0UWprUUUyeDAKLS0tIHV3dHU3aG5JclM0V3FadzN0SU14\ndFptbEJUNXQ4QVlqbkJ1TjAvdDQwSGsKcKPWUjhK7wzIpdIdksMShF2fpLdDTUBS\nZiU7P1T+3psxad9qhapvU0JrAY+9veFaYVEHha2aN/XKs8HqUcTp3A==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1yd2cden7jav8x4nzx2fwze2fsa5j0qm2m3t7zum765z3u4gj433q7dqj43",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBjZFVteVZwVGVmRE9NT3hG\nNGMyS3FSaXluM1FpeUp6SDVMUEpwYzg5SmdvCkRPU0QyU1JicGNkdlMyQWVkT0k3\nL2YrbDhWeGk4WFhxcUFmTmhZQ0pEQncKLS0tIG85Ui9rKzBJQ2VkMFBUQTMvSTlu\nbm8rZ09Wa24rQkNvTTNtYTZBN3MrZlkK7cjNhlUKZdOrRq/nKUsbUQgNTzX8jO+0\nzADpz6WCMvsJ15xazc10BGh03OtdMWl5tcoWMaZ71HWtI9Gip5DH0w==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-09-02T08:42:42Z",
"mac": "ENC[AES256_GCM,data:9xlO5Yis8DG/y8GjvP63NltD4xEL7zqdHL2cQE8gAoh/ZamAmK5ZL0ld80mB3eIYEPKZYvmUYI4Lkrge2ZdqyDoubrW+eJ3dxn9+StxA9FzXYwUE0t+bbsNJfOOp/kDojf060qLGsu0kAGKd2ca4WiDccR0Cieky335C7Zzhi/Q=,iv:bWQ4wr0CJHSN+6ipUbkYTDWZJyFQjDKszfpVX9EEUsY=,tag:kADIFgJBEGCvr5fPbbdEDA==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1,10 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIBcTCCARigAwIBAgIRAIix99+AE7Y+uyiLGaRHEhUwCgYIKoZIzj0EAwIwFzEV
MBMGA1UEAxMMQ2xhbiBSb290IENBMB4XDTI1MDkwMTIwNDI1N1oXDTI2MDkwMjA4
NDI1N1owFzEVMBMGA1UEAxMMQ2xhbiBSb290IENBMFkwEwYHKoZIzj0CAQYIKoZI
zj0DAQcDQgAEk7nn9kzxI+xkRmNMlxD+7T78UqV3aqus0foJh6uu1CHC+XaebMcw
JN95nAe3oYA3yZG6Mnq9nCxsYha4EhzGYqNFMEMwDgYDVR0PAQH/BAQDAgEGMBIG
A1UdEwEB/wQIMAYBAf8CAQEwHQYDVR0OBBYEFJ+yfPimEwnH2y7+SB4X8VtlAIGD
MAoGCCqGSM49BAMCA0cAMEQCIBId/CcbT5MPFL90xa+XQz+gVTdRwsu6Bg7ehMso
Bj0oAiBjSlttd5yeuZGXBm+O0Gl+WdKV60QlrWutNewXFS4UpQ==
-----END CERTIFICATE-----

View File

@@ -1,15 +0,0 @@
{
"data": "ENC[AES256_GCM,data:PnEXteU3I7U0OKgE+oR3xjHdLWYTpJjM/jlzxtGU0uP2pUBuQv3LxtEz+cP0ZsafHLNq2iNJ7xpUEE0g4d3M296S56oSocK3fREWBiJFiaC7SAEUiil1l3UCwHn7LzmdEmn8Kq7T+FK89wwqtVWIASLo2gZC/yHE5eEanEATTchGLSNiHJRzZ8n0Ekm8EFUA6czOqA5nPQHaSmeLzu1g80lSSi1ICly6dJksa6DVucwOyVFYFEeq8Dfyc1eyP8L1ee0D7QFYBMduYOXTKPtNnyDmdaQMj7cMMvE7fn04idIiAqw=,iv:nvLmAfFk2GXnnUy+Afr648R60Ou13eu9UKykkiA8Y+4=,tag:lTTAxfG0EDCU6u7xlW6xSQ==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBEMjNWUm5NbktQeTRWRjJE\nWWFZc2Rsa3I5aitPSno1WnhORENNcng5OHprCjNUQVhBVHFBcWFjaW5UdmxKTnZw\nQlI4MDk5Wkp0RElCeWgzZ2dFQkF2dkkKLS0tIDVreTkydnJ0RDdHSHlQeVV6bGlP\nTmpJOVBSb2dkVS9TZG5SRmFjdnQ1b3cKQ5XvwH1jD4XPVs5RzOotBDq8kiE6S5k2\nDBv6ugjsM5qV7/oGP9H69aSB4jKPZjEn3yiNw++Oorc8uXd5kSGh7w==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-09-02T08:43:00Z",
"mac": "ENC[AES256_GCM,data:3jFf66UyZUWEtPdPu809LCS3K/Hc6zbnluystl3eXS+KGI+dCoYmN9hQruRNBRxf6jli2RIlArmmEPBDQVt67gG/qugTdT12krWnYAZ78iocmOnkf44fWxn/pqVnn4JYpjEYRgy8ueGDnUkwvpGWVZpcXw5659YeDQuYOJ2mq0U=,iv:3k7fBPrABdLItQ2Z+Mx8Nx0eIEKo93zG/23K+Q5Hl3I=,tag:aehAObdx//DEjbKlOeM7iQ==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../../../sops/users/admin

View File

@@ -1,68 +0,0 @@
This module enables hosting clan-internal services easily, which can be resolved
inside your VPN. This allows defining a custom top-level domain (e.g. `.clan`)
and exposing endpoints from a machine to others, which will be
accessible under `http://<service>.clan` in your browser.
The service consists of two roles:
- A `server` role: This is the DNS-server that will be queried when trying to
resolve clan-internal services. It defines the top-level domain.
- A `default` role: This does two things. First, it sets up the nameservers so
thatclan-internal queries are resolved via the `server` machine, while
external queries are resolved as normal via DHCP. Second, it allows exposing
services (see example below).
## Example Usage
Here the machine `dnsserver` is designated as internal DNS-server for the TLD
`.foo`. `server01` will host an application that shall be reachable at
`http://one.foo` and `server02` is going to be reachable at `http://two.foo`.
`client` is any other machine that is part of the clan but does not host any
services.
When `client` tries to resolve `http://one.foo`, the DNS query will be
routed to `dnsserver`, which will answer with `192.168.1.3`. If it tries to
resolve some external domain (e.g. `https://clan.lol`), the query will not be
routed to `dnsserver` but resolved as before, via the nameservers advertised by
DHCP.
```nix
inventory = {
machines = {
dnsserver = { }; # 192.168.1.2
server01 = { }; # 192.168.1.3
server02 = { }; # 192.168.1.4
client = { }; # 192.168.1.5
};
instances = {
coredns = {
module.name = "@clan/coredns";
module.input = "self";
# Add the default role to all machines, including `client`
roles.default.tags.all = { };
# DNS server
roles.server.machines."dnsserver".settings = {
ip = "192.168.1.2";
tld = "foo";
};
# First service
roles.default.machines."server01".settings = {
ip = "192.168.1.3";
services = [ "one" ];
};
# Second service
roles.default.machines."server02".settings = {
ip = "192.168.1.4";
services = [ "two" ];
};
};
};
};
```

View File

@@ -1,157 +0,0 @@
{ ... }:
{
_class = "clan.service";
manifest.name = "coredns";
manifest.description = "Clan-internal DNS and service exposure";
manifest.categories = [ "Network" ];
manifest.readme = builtins.readFile ./README.md;
roles.server = {
interface =
{ lib, ... }:
{
options.tld = lib.mkOption {
type = lib.types.str;
default = "clan";
description = ''
Top-level domain for this instance. All services below this will be
resolved internally.
'';
};
options.ip = lib.mkOption {
type = lib.types.str;
# TODO: Set a default
description = "IP for the DNS to listen on";
};
};
perInstance =
{
roles,
settings,
...
}:
{
nixosModule =
{
lib,
pkgs,
...
}:
{
networking.firewall.allowedTCPPorts = [ 53 ];
networking.firewall.allowedUDPPorts = [ 53 ];
services.coredns =
let
# Get all service entries for one host
hostServiceEntries =
host:
lib.strings.concatStringsSep "\n" (
map (
service: "${service} IN A ${roles.default.machines.${host}.settings.ip} ; ${host}"
) roles.default.machines.${host}.settings.services
);
zonefile = pkgs.writeTextFile {
name = "db.${settings.tld}";
text = ''
$TTL 3600
@ IN SOA ns.${settings.tld}. admin.${settings.tld}. 1 7200 3600 1209600 3600
IN NS ns.${settings.tld}.
ns IN A ${settings.ip} ; DNS server
''
+ (lib.strings.concatStringsSep "\n" (
map (host: hostServiceEntries host) (lib.attrNames roles.default.machines)
));
};
in
{
enable = true;
config = ''
. {
forward . 1.1.1.1
cache 30
}
${settings.tld} {
file ${zonefile}
}
'';
};
};
};
};
roles.default = {
interface =
{ lib, ... }:
{
options.services = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ ];
description = ''
Service endpoints this host exposes (without TLD). Each entry will
be resolved to <entry>.<tld> using the configured top-level domain.
'';
};
options.ip = lib.mkOption {
type = lib.types.str;
# TODO: Set a default
description = "IP on which the services will listen";
};
};
perInstance =
{ roles, ... }:
{
nixosModule =
{ lib, ... }:
{
networking.nameservers = map (m: "127.0.0.1:5353#${roles.server.machines.${m}.settings.tld}") (
lib.attrNames roles.server.machines
);
services.resolved.domains = map (m: "~${roles.server.machines.${m}.settings.tld}") (
lib.attrNames roles.server.machines
);
services.unbound = {
enable = true;
settings = {
server = {
port = 5353;
verbosity = 2;
interface = [ "127.0.0.1" ];
access-control = [ "127.0.0.0/8 allow" ];
do-not-query-localhost = "no";
domain-insecure = map (m: "${roles.server.machines.${m}.settings.tld}.") (
lib.attrNames roles.server.machines
);
};
# Default: forward everything else to DHCP-provided resolvers
forward-zone = [
{
name = ".";
forward-addr = "127.0.0.53@53"; # Forward to systemd-resolved
}
];
stub-zone = map (m: {
name = "${roles.server.machines.${m}.settings.tld}.";
stub-addr = "${roles.server.machines.${m}.settings.ip}";
}) (lib.attrNames roles.server.machines);
};
};
};
};
};
}

View File

@@ -1,113 +0,0 @@
{
...
}:
{
name = "coredns";
clan = {
directory = ./.;
test.useContainers = true;
inventory = {
machines = {
dns = { }; # 192.168.1.2
server01 = { }; # 192.168.1.3
server02 = { }; # 192.168.1.4
client = { }; # 192.168.1.1
};
instances = {
coredns = {
module.name = "@clan/coredns";
module.input = "self";
roles.default.tags.all = { };
# First service
roles.default.machines."server01".settings = {
ip = "192.168.1.3";
services = [ "one" ];
};
# Second service
roles.default.machines."server02".settings = {
ip = "192.168.1.4";
services = [ "two" ];
};
# DNS server
roles.server.machines."dns".settings = {
ip = "192.168.1.2";
tld = "foo";
};
};
};
};
};
nodes = {
dns =
{ pkgs, ... }:
{
environment.systemPackages = [ pkgs.net-tools ];
};
client =
{ pkgs, ... }:
{
environment.systemPackages = [ pkgs.net-tools ];
};
server01 = {
services.nginx = {
enable = true;
virtualHosts."one.foo" = {
locations."/" = {
return = "200 'test server response one'";
extraConfig = "add_header Content-Type text/plain;";
};
};
};
};
server02 = {
services.nginx = {
enable = true;
virtualHosts."two.foo" = {
locations."/" = {
return = "200 'test server response two'";
extraConfig = "add_header Content-Type text/plain;";
};
};
};
};
};
testScript = ''
import json
start_all()
machines = [server01, server02, dns, client]
for m in machines:
m.systemctl("start network-online.target")
for m in machines:
m.wait_for_unit("network-online.target")
# import time
# time.sleep(2333333)
# This should work, but is borken in tests i think? Instead we dig directly
# client.succeed("curl -k -v http://one.foo")
# client.succeed("curl -k -v http://two.foo")
answer = client.succeed("dig @192.168.1.2 one.foo")
assert "192.168.1.3" in answer, "IP not found"
answer = client.succeed("dig @192.168.1.2 two.foo")
assert "192.168.1.4" in answer, "IP not found"
'';
}

View File

@@ -1,4 +0,0 @@
{
"publickey": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"type": "age"
}

View File

@@ -11,8 +11,7 @@
...
}:
let
jsonpath = "/tmp/telegraf.json";
auth_user = "prometheus";
jsonpath = /tmp/telegraf.json;
in
{
@@ -20,24 +19,18 @@
builtins.listToAttrs (
map (name: {
inherit name;
value.allowedTCPPorts = [
9273
9990
];
value.allowedTCPPorts = [ 9273 9990 ];
}) settings.interfaces
)
);
networking.firewall.allowedTCPPorts = lib.mkIf (settings.allowAllInterfaces == true) [
9273
9990
];
systemd.services.telegsaf-json.script = "${pkgs.miniserve}/bin/miniserve -p 9990 ${jsonpath}";
clan.core.vars.generators."telegraf" = {
networking.firewall.allowedTCPPorts = lib.mkIf (settings.allowAllInterfaces == true) [ 9273 ];
files.password.restartUnits = [ "telegraf.service" ];
files.password-env.restartUnits = [ "telegraf.service" ];
files.miniserve-auth.restartUnits = [ "telegraf.service" ];
clan.core.vars.generators."telegraf-password" = {
files.telegraf-password.neededFor = "users";
files.telegraf-password.restartUnits = [ "telegraf.service" ];
runtimeInputs = [
pkgs.coreutils
@@ -47,22 +40,16 @@
script = ''
PASSWORD=$(xkcdpass --numwords 4 --delimiter - --count 1 | tr -d "\n")
echo "BASIC_AUTH_PWD=$PASSWORD" > "$out"/password-env
echo "${auth_user}:$PASSWORD" > "$out"/miniserve-auth
echo "$PASSWORD" | tr -d "\n" > "$out"/password
echo "BASIC_AUTH_PWD=$PASSWORD" > "$out"/telegraf-password
'';
};
systemd.services.telegraf-json = {
enable = true;
wantedBy = [ "multi-user.target" ];
script = "${pkgs.miniserve}/bin/miniserve -p 9990 ${jsonpath} --auth-file ${config.clan.core.vars.generators.telegraf.files.miniserve-auth.path}";
};
services.telegraf = {
enable = true;
environmentFiles = [
(builtins.toString config.clan.core.vars.generators.telegraf.files.password-env.path)
(builtins.toString
config.clan.core.vars.generators."telegraf-password".files.telegraf-password.path
)
];
extraConfig = {
agent.interval = "60s";
@@ -77,35 +64,32 @@
exec =
let
nixosSystems = pkgs.writeShellScript "current-system" ''
printf "nixos_systems,current_system=%s,booted_system=%s,current_kernel=%s,booted_kernel=%s present=0\n" \
"$(readlink /run/current-system)" "$(readlink /run/booted-system)" \
"$(basename $(echo /run/current-system/kernel-modules/lib/modules/*))" \
"$(basename $(echo /run/booted-system/kernel-modules/lib/modules/*))"
currentSystemScript = pkgs.writeShellScript "current-system" ''
printf "current_system,path=%s present=0\n" $(readlink /run/current-system)
'';
in
[
{
# Expose the path to current-system as metric. We use
# this to check if the machine is up-to-date.
commands = [ nixosSystems ];
commands = [ currentSystemScript ];
data_format = "influx";
}
];
};
# sadly there doesn'T seem to exist a telegraf http_client output plugin
outputs.prometheus_client = {
listen = ":9273";
metric_version = 2;
basic_username = "${auth_user}";
basic_password = "$${BASIC_AUTH_PWD}";
};
outputs.file = {
files = [ jsonpath ];
data_format = "json";
json_timestamp_units = "1s";
};
outputs.prometheus_client = {
listen = ":9273";
metric_version = 2;
basic_username = "prometheus";
basic_password = "$${BASIC_AUTH_PWD}";
};
};
};
};

View File

@@ -0,0 +1,37 @@
This service generates the `system.stateVersion` of the nixos installation
automatically.
Possible values:
[system.stateVersion](https://search.nixos.org/options?channel=unstable&show=system.stateVersion&from=0&size=50&sort=relevance&type=packages&query=stateVersion)
## Usage
The following configuration will set `stateVersion` for all machines:
```
inventory.instances = {
state-version = {
module = {
name = "state-version";
input = "clan";
};
roles.default.tags.all = { };
};
```
## Migration
If you are already setting `system.stateVersion`, either let the automatic
generation happen, or trigger the generation manually for the machine. The
service will take the specified version, if one is already supplied through the
config.
To manually generate the version for a specified machine run:
```
clan vars generate [MACHINE]
```
If the setting was already set, you can then remove `system.stateVersion` from
your machine configuration. For new machines, just import the service as shown
above.

View File

@@ -0,0 +1,50 @@
{ ... }:
{
_class = "clan.service";
manifest.name = "clan-core/state-version";
manifest.description = "Automatically generate the state version of the nixos installation.";
manifest.categories = [ "System" ];
manifest.readme = builtins.readFile ./README.md;
roles.default = {
perInstance =
{ ... }:
{
nixosModule =
{
config,
lib,
...
}:
let
var = config.clan.core.vars.generators.state-version.files.version or { };
in
{
warnings = [
''
The clan.state-version service is deprecated and will be
removed on 2025-07-15 in favor of a nix option.
Please migrate your configuration to use `clan.core.settings.state-version.enable = true` instead.
''
];
system.stateVersion = lib.mkDefault (lib.removeSuffix "\n" var.value);
clan.core.vars.generators.state-version = {
files.version = {
secret = false;
value = lib.mkDefault config.system.nixos.release;
};
runtimeInputs = [ ];
script = ''
echo -n ${config.system.stateVersion} > "$out"/version
'';
};
};
};
};
}

View File

@@ -3,16 +3,14 @@ let
module = lib.modules.importApply ./default.nix { };
in
{
clan.modules = {
coredns = module;
};
clan.modules.state-version = module;
perSystem =
{ ... }:
{
clan.nixosTests.coredns = {
clan.nixosTests.state-version = {
imports = [ ./tests/vm/default.nix ];
clan.modules."@clan/coredns" = module;
clan.modules."@clan/state-version" = module;
};
};
}

View File

@@ -0,0 +1,22 @@
{ lib, ... }:
{
name = "service-state-version";
clan = {
directory = ./.;
inventory = {
machines.server = { };
instances.default = {
module.name = "@clan/state-version";
module.input = "self";
roles.default.machines."server" = { };
};
};
};
nodes.server = { };
testScript = lib.mkDefault ''
start_all()
'';
}

View File

@@ -17,20 +17,6 @@
};
};
# Deploy user Carol on all machines. Prompt only once and use the
# same password on all machines. (`share = true`)
user-carol = {
module = {
name = "users";
input = "clan";
};
roles.default.tags.all = { };
roles.default.settings = {
user = "carol";
share = true;
};
};
# Deploy user bob only on his laptop. Prompt for a password.
user-bob = {
module = {
@@ -43,44 +29,3 @@
};
}
```
## Migration from `root-password` module
The deprecated `clan.root-password` module has been replaced by the `users` module. Here's how to migrate:
### 1. Update your flake configuration
Replace the `root-password` module import with a `users` service instance:
```nix
# OLD - Remove this from your nixosModules:
imports = [
self.inputs.clan-core.clanModules.root-password
];
# NEW - Add to inventory.instances or machines/flake-module.nix:
instances = {
users-root = {
module.name = "users";
module.input = "clan-core";
roles.default.tags.nixos = { };
roles.default.settings = {
user = "root";
prompt = false; # Set to true if you want to be prompted
groups = [ ];
};
};
};
```
### 2. Migrate vars
The vars structure has changed from `root-password` to `user-password-root`:
```bash
# For each machine, rename the vars directories:
cd vars/per-machine/<machine-name>/
mv root-password user-password-root
mv user-password-root/password-hash user-password-root/user-password-hash
mv user-password-root/password user-password-root/user-password
```

View File

@@ -59,17 +59,6 @@
- "input" - Allows the user to access input devices.
'';
};
share = lib.mkOption {
type = lib.types.bool;
default = false;
example = true;
description = ''
Weather the user should have the same password on all machines.
By default, you will be prompted for a new password for every host.
Unless `generate` is set to `true`.
'';
};
};
};
@@ -93,6 +82,7 @@
};
clan.core.vars.generators."user-password-${settings.user}" = {
files.user-password-hash.neededFor = "users";
files.user-password-hash.restartUnits = lib.optional (config.services.userborn.enable) "userborn.service";
files.user-password.deploy = false;
@@ -117,8 +107,6 @@
pkgs.mkpasswd
];
share = settings.share;
script =
(
if settings.prompt then

View File

@@ -1,5 +1,6 @@
#!/usr/bin/env python3
"""IPv6 address allocator for WireGuard networks.
"""
IPv6 address allocator for WireGuard networks.
Network layout:
- Base network: /40 ULA prefix (fd00::/8 + 32 bits from hash)
@@ -12,11 +13,6 @@ import ipaddress
import sys
from pathlib import Path
# Constants for argument count validation
MIN_ARGS_BASE = 4
MIN_ARGS_CONTROLLER = 5
MIN_ARGS_PEER = 5
def hash_string(s: str) -> str:
"""Generate SHA256 hash of string."""
@@ -24,7 +20,8 @@ def hash_string(s: str) -> str:
def generate_ula_prefix(instance_name: str) -> ipaddress.IPv6Network:
"""Generate a /40 ULA prefix from instance name.
"""
Generate a /40 ULA prefix from instance name.
Format: fd{32-bit hash}/40
This gives us fd00:0000:0000::/40 through fdff:ffff:ff00::/40
@@ -44,14 +41,15 @@ def generate_ula_prefix(instance_name: str) -> ipaddress.IPv6Network:
prefix = f"fd{prefix_bits:08x}"
prefix_formatted = f"{prefix[:4]}:{prefix[4:8]}::/40"
return ipaddress.IPv6Network(prefix_formatted)
network = ipaddress.IPv6Network(prefix_formatted)
return network
def generate_controller_subnet(
base_network: ipaddress.IPv6Network,
controller_name: str,
base_network: ipaddress.IPv6Network, controller_name: str
) -> ipaddress.IPv6Network:
"""Generate a /56 subnet for a controller from the base /40 network.
"""
Generate a /56 subnet for a controller from the base /40 network.
We have 16 bits (40 to 56) to allocate controller subnets.
This allows for 65,536 possible controller subnets.
@@ -64,11 +62,14 @@ def generate_controller_subnet(
# The controller subnet is at base_prefix:controller_id::/56
base_int = int(base_network.network_address)
controller_subnet_int = base_int | (controller_id << (128 - 56))
return ipaddress.IPv6Network((controller_subnet_int, 56))
controller_subnet = ipaddress.IPv6Network((controller_subnet_int, 56))
return controller_subnet
def generate_peer_suffix(peer_name: str) -> str:
"""Generate a unique 64-bit host suffix for a peer.
"""
Generate a unique 64-bit host suffix for a peer.
This suffix will be used in all controller subnets to create unique addresses.
Format: :xxxx:xxxx:xxxx:xxxx (64 bits)
@@ -78,13 +79,14 @@ def generate_peer_suffix(peer_name: str) -> str:
suffix_bits = h[:16]
# Format as IPv6 suffix without leading colon
return f"{suffix_bits[0:4]}:{suffix_bits[4:8]}:{suffix_bits[8:12]}:{suffix_bits[12:16]}"
suffix = f"{suffix_bits[0:4]}:{suffix_bits[4:8]}:{suffix_bits[8:12]}:{suffix_bits[12:16]}"
return suffix
def main() -> None:
if len(sys.argv) < MIN_ARGS_BASE:
if len(sys.argv) < 4:
print(
"Usage: ipv6_allocator.py <output_dir> <instance_name> <controller|peer> <machine_name>",
"Usage: ipv6_allocator.py <output_dir> <instance_name> <controller|peer> <machine_name>"
)
sys.exit(1)
@@ -96,7 +98,7 @@ def main() -> None:
base_network = generate_ula_prefix(instance_name)
if node_type == "controller":
if len(sys.argv) < MIN_ARGS_CONTROLLER:
if len(sys.argv) < 5:
print("Controller name required")
sys.exit(1)
@@ -112,7 +114,7 @@ def main() -> None:
(output_dir / "prefix").write_text(prefix_str)
elif node_type == "peer":
if len(sys.argv) < MIN_ARGS_PEER:
if len(sys.argv) < 5:
print("Peer name required")
sys.exit(1)

View File

@@ -1,33 +0,0 @@
This module sets up [yggdrasil](https://yggdrasil-network.github.io/) across
your clan.
Yggdrasil is designed to be a future-proof and decentralised alternative to
the structured routing protocols commonly used today on the internet. Inside
your clan, it will allow you reaching all of your machines.
## Example Usage
While you can specify statically configured peers for each host, yggdrasil does
auto-discovery of local peers.
```nix
inventory = {
machines = {
peer1 = { };
peer2 = { };
};
instances = {
yggdrasil = {
# Deploy on all machines
roles.default.tags.all = { };
# Or individual hosts
roles.default.machines.peer1 = { };
roles.default.machines.peer2 = { };
};
};
};
```

View File

@@ -1,116 +0,0 @@
{ ... }:
{
_class = "clan.service";
manifest.name = "clan-core/yggdrasil";
manifest.description = "Yggdrasil encrypted IPv6 routing overlay network";
roles.default = {
interface =
{ lib, ... }:
{
options.extraMulticastInterfaces = lib.mkOption {
type = lib.types.listOf lib.types.attrs;
default = [ ];
description = ''
Additional interfaces to use for Multicast. See
https://yggdrasil-network.github.io/configurationref.html#multicastinterfaces
for reference.
'';
example = [
{
Regex = "(wg).*";
Beacon = true;
Listen = true;
Port = 5400;
Priority = 1020;
}
];
};
options.peers = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ ];
description = ''
Static peers to configure for this host.
If not set, local peers will be auto-discovered
'';
example = [
"tcp://192.168.1.1:6443"
"quic://192.168.1.1:6443"
"tls://192.168.1.1:6443"
"ws://192.168.1.1:6443"
];
};
};
perInstance =
{ settings, ... }:
{
nixosModule =
{
config,
pkgs,
...
}:
{
clan.core.vars.generators.yggdrasil = {
files.privateKey = { };
files.publicKey.secret = false;
files.address.secret = false;
runtimeInputs = with pkgs; [
yggdrasil
jq
openssl
];
script = ''
# Generate private key
openssl genpkey -algorithm Ed25519 -out $out/privateKey
# Generate corresponding public key
openssl pkey -in $out/privateKey -pubout -out $out/publicKey
# Derive IPv6 address from key
echo "{ \"PrivateKeyPath\": \"$out/privateKey\"}" | yggdrasil -useconf -address > $out/address
'';
};
systemd.services.yggdrasil.serviceConfig.BindReadOnlyPaths = [
"${config.clan.core.vars.generators.yggdrasil.files.privateKey.path}:/var/lib/yggdrasil/key"
];
services.yggdrasil = {
enable = true;
openMulticastPort = true;
persistentKeys = true;
settings = {
PrivateKeyPath = "/var/lib/yggdrasil/key";
IfName = "ygg";
Peers = settings.peers;
MulticastInterfaces = [
# Ethernet is preferred over WIFI
{
Regex = "(eth|en).*";
Beacon = true;
Listen = true;
Port = 5400;
Priority = 1024;
}
{
Regex = "(wl).*";
Beacon = true;
Listen = true;
Port = 5400;
Priority = 1025;
}
]
++ settings.extraMulticastInterfaces;
};
};
networking.firewall.allowedTCPPorts = [ 5400 ];
};
};
};
}

View File

@@ -1,24 +0,0 @@
{
self,
lib,
...
}:
let
module = lib.modules.importApply ./default.nix {
inherit (self) packages;
};
in
{
clan.modules = {
yggdrasil = module;
};
perSystem =
{ ... }:
{
clan.nixosTests.yggdrasil = {
imports = [ ./tests/vm/default.nix ];
clan.modules.yggdrasil = module;
};
};
}

View File

@@ -1,93 +0,0 @@
{
name = "yggdrasil";
clan = {
test.useContainers = false;
directory = ./.;
inventory = {
machines.peer1 = { };
machines.peer2 = { };
instances."yggdrasil" = {
module.name = "yggdrasil";
module.input = "self";
# Assign the roles to the two machines
roles.default.machines.peer1 = { };
roles.default.machines.peer2 = { };
};
};
};
# TODO remove after testing, this is just to make @pinpox' life easier
nodes =
let
c =
{ pkgs, ... }:
{
environment.systemPackages = with pkgs; [ net-tools ];
console = {
font = "Lat2-Terminus16";
keyMap = "colemak";
};
};
in
{
peer1 = c;
peer2 = c;
};
testScript = ''
start_all()
# Wait for both machines to be ready
peer1.wait_for_unit("multi-user.target")
peer2.wait_for_unit("multi-user.target")
# Check that yggdrasil service is running on both machines
peer1.wait_for_unit("yggdrasil")
peer2.wait_for_unit("yggdrasil")
peer1.succeed("systemctl is-active yggdrasil")
peer2.succeed("systemctl is-active yggdrasil")
# Check that both machines have yggdrasil network interfaces
# Yggdrasil creates a tun interface (usually tun0)
peer1.wait_until_succeeds("ip link show | grep -E 'ygg'", 30)
peer2.wait_until_succeeds("ip link show | grep -E 'ygg'", 30)
# Get yggdrasil IPv6 addresses from both machines
peer1_ygg_ip = peer1.succeed("yggdrasilctl -json getself | jq -r '.address'").strip()
peer2_ygg_ip = peer2.succeed("yggdrasilctl -json getself | jq -r '.address'").strip()
# TODO: enable this check. Values don't match up yet, but I can't
# update-vars to test, because the script is borken.
# Compare runtime addresses with saved addresses from vars
# expected_peer1_ip = "${builtins.readFile ./vars/per-machine/peer1/yggdrasil/address/value}"
# expected_peer2_ip = "${builtins.readFile ./vars/per-machine/peer2/yggdrasil/address/value}"
print(f"peer1 yggdrasil IP: {peer1_ygg_ip}")
print(f"peer2 yggdrasil IP: {peer2_ygg_ip}")
# print(f"peer1 expected IP: {expected_peer1_ip}")
# print(f"peer2 expected IP: {expected_peer2_ip}")
#
# # Verify that runtime addresses match expected addresses
# assert peer1_ygg_ip == expected_peer1_ip, f"peer1 runtime IP {peer1_ygg_ip} != expected IP {expected_peer1_ip}"
# assert peer2_ygg_ip == expected_peer2_ip, f"peer2 runtime IP {peer2_ygg_ip} != expected IP {expected_peer2_ip}"
# Wait a bit for the yggdrasil network to establish connectivity
import time
time.sleep(10)
# Test connectivity: peer1 should be able to ping peer2 via yggdrasil
peer1.succeed(f"ping -6 -c 3 {peer2_ygg_ip}")
# Test connectivity: peer2 should be able to ping peer1 via yggdrasil
peer2.succeed(f"ping -6 -c 3 {peer1_ygg_ip}")
'';
}

View File

@@ -1,6 +0,0 @@
[
{
"publickey": "age1r264u9yngfq8qkrveh4tn0rhfes02jfgrtqufdx4n4m3hs4rla2qx0rk4d",
"type": "age"
}
]

View File

@@ -1,6 +0,0 @@
[
{
"publickey": "age1p8kuf8s0nfekwreh4g38cgghp4nzszenx0fraeyky2me0nly2scstqunx8",
"type": "age"
}
]

View File

@@ -1,15 +0,0 @@
{
"data": "ENC[AES256_GCM,data:3dolkgdLC4y5fps4gGb9hf4QhwkUUBodlMOKT+/+erO70FB/pzYBg0mQjQy/uqjINzfIiM32iwVDnx3/Yyz5BDRo2CK+83UGEi4=,iv:FRp1HqlU06JeyEXXFO5WxJWxeLnmUJRWGuFKcr4JFOM=,tag:rbi30HJuqPHdU/TqInGXmg==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBoYXBxS1JuNW9NeC9YU0xY\nK2xQWDhUYjZ4VzZmeUw1aG9UN2trVnBGQ0J3Ckk0V3d0UFBkT0RnZjBoYjNRVEVW\nN2VEdCtUTUUwenhJSEErT0MyWDA2bHMKLS0tIHJJSzVtR3NCVXozbzREWjltN2ZG\nZm44Y1c4MWNIblcxbmt2YkdxVE10Z1UKmJKEjiYZ9U47QACkbacNTirQIcCvFjM/\nwVxSEVq524sK8LCyIEvsG4e3I3Kn0ybZjoth7J/jg7J4gb8MVw+leQ==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-09-16T08:13:06Z",
"mac": "ENC[AES256_GCM,data:6HJDkg0AWz+zx5niSIyBAGGaeemwPOqTCA/Fa6VjjyCh1wOav3OTzy/DRBOCze4V52hMGV3ULrI2V7G7DdvQy6LqiKBTQX5ZbWm3IxLASamJBjUJ1LvTm97WvyL54u/l2McYlaUIC8bYDl1UQUqDMo9pN4GwdjsRNCIl4O0Z7KY=,iv:zkWfYuhqwKpZk/16GlpKdAi2qS6LiPvadRJmxp2ZW+w=,tag:qz1gxVnT3OjWxKRKss5W8w==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../users/admin

View File

@@ -1,15 +0,0 @@
{
"data": "ENC[AES256_GCM,data:BW15ydnNpr0NIXu92nMsD/Y52BDEOsdZg2/fiM8lwSTJN3lEymrIBYsRrcPAnGpFb52d7oN8zdNz9WoW3f/Xwl136sWDz/sc0k4=,iv:7m77nOR/uXLMqXB5QmegtoYVqByJVFFqZIVOtlAonzg=,tag:8sUo9DRscNRajrk+CzHzHw==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBLVWpnSlJOTVU4NWRMSCto\nS0RaR2RCTUJjT1J0VzRPVTdPL2N5Yjl3c0EwCmlabm1aSzdlV29nb3lrZFBEZXR6\nRjI2TGZUNW1KQ3pLbDFscUlKSnVBNWcKLS0tIDlLR1VFSTRHeWNiQ29XK1pUUnlr\nVkVHOXdJeHhpcldYNVhpK1V6Nng0eW8KSsqJejY1kll6bUBUngiolCB7OhjyI0Gc\nH+9OrORt/nLnc51eo/4Oh9vp/dvSZzuW9MOF9m0f6B3WOFRVMAbukQ==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-09-16T08:13:15Z",
"mac": "ENC[AES256_GCM,data:dyLnGXBC4nGOgX2TrGhf8kI/+Et0PRy+Ppr228y3LYzgcmUunZl9R8+QXJN51OJSQ63gLun5TBw0v+3VnRVBodlhqTDtfACJ7eILCiArPJqeZoh5MR6HkF31yfqTRlXl1i6KHRPVWvjRIdwJ9yZVN1XNAUsxc7xovqS6kkkGPsA=,iv:7yXnpbU7Zf7GH1+Uimq8eXDUX1kO/nvTaGx4nmTrKdM=,tag:WNn9CUOdCAlksC0Qln5rVg==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../users/admin

View File

@@ -1,4 +0,0 @@
{
"publickey": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"type": "age"
}

View File

@@ -1 +0,0 @@
200:91bb:f1ec:c580:6d52:70b3:4d60:7bf2

View File

@@ -1 +0,0 @@
../../../../../../sops/machines/peer1

View File

@@ -1,19 +0,0 @@
{
"data": "ENC[AES256_GCM,data:/YoEoYY8CmqK4Yk4fmZieIHIvRn779aikoo3+6SWI5SxuU8TLJVY9+Q7mRmnbCso/8RPMICWkZMIkfbxYi6Dwc4UFmLwPqCoeAYsFBiHsJ6QUoTm1qtDDfXcruFs8Mo93ZmJb7oJIC0a+sVbB5L1NsGmG3g+a+g=,iv:KrMjRIQXutv9WdNzI5VWD6SMDnGzs9LFWcG2d9a6XDg=,tag:x5gQN9FaatRBcHOyS2cicw==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBwQ0FNU1c4RDNKTHRtMy8z\nSEtQRzFXTVFvcitMWjVlMURPVkxsZC9wU25nCmt4TS81bnJidzFVZkxEY0ovWUtm\nVk5PMjZEWVJCei9rVTJ2bG1ZNWJoZGMKLS0tIHgyTEhIdUQ3YnlKVi9lNVpUZ0dI\nd3BLL05oMXFldGVKbkpoaklscDJMR3MKpUl/KNPrtyt4/bu3xXUAQIkugQXWjlPf\nFqFc1Vnqxynd+wJkkd/zYs4XcOraogOUj/WIRXkqXgdDDoEqb/VIBg==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1r264u9yngfq8qkrveh4tn0rhfes02jfgrtqufdx4n4m3hs4rla2qx0rk4d",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSArOUdkd3VVSTU3NHZ6aURB\na2dYMXhyMmVLMDVlM0dzVHpxbUw3K3BFcVNzCm1LczFyd3BubGwvRVUwQ1Q0aWZR\nL1hlb1VpZ3JnTVQ4Zm9wVnlJYVNuL00KLS0tIHlMRVMyNW9rWG45bVVtczF3MVNq\nL2d2RXhEeVcyRVNmSUF6cks5VStxVkUKugI1iDei32852wNV/zPlyVwKJH1UXOlY\nFQq7dqMJMWI6a5F+z4UdaHvzyKxF2CWBG7DVnaUSpq7Q3uGmibsSOQ==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-09-16T08:13:07Z",
"mac": "ENC[AES256_GCM,data:LIlgQgiQt9aHXagpXphxSnpju+DOxuBvPpz5Rr43HSwgbWFgZ8tqlH2C1xo2xsJIexWkc823J9txpy+PLFXSm4/NbQGbKSymjHNEIYaU1tBSQ0KZ+s22X3/ku3Hug7/MkEKv5JsroTEcu3FK6Fv7Mo0VWqUggenl9AsJ5BocUO4=,iv:LGOnpWsod1ek4isWVrHrS+ZOCPrhwlPliPOTiMVY0zY=,tag:tRuHBSd9HxOswNcqjvzg0w==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../../../../sops/users/admin

View File

@@ -1,3 +0,0 @@
-----BEGIN PUBLIC KEY-----
MCowBQYDK2VwAyEAtyIHCZ0/yVbHpllPwgaWIFQ3Kb4fYMcOujgVmttA7gM=
-----END PUBLIC KEY-----

View File

@@ -1 +0,0 @@
200:bb1f:6f1c:1852:173a:cb5e:5726:870

View File

@@ -1 +0,0 @@
../../../../../../sops/machines/peer2

View File

@@ -1,19 +0,0 @@
{
"data": "ENC[AES256_GCM,data:b1dbaJQGr8mnISch0iej+FhMnYOIFxOJYCvWDQseiczltXsBetbYr+89co5Sp7wmhQrH3tlWaih3HZe294Y9j8XvwpNUtmW3RZHsU/6EWA50LKcToFGFCcEBM/Nz9RStQXnjwLbRSLFuMlfoQttUATB2XYSm+Ng=,iv:YCeE3KbHaBhR0q10qO8Og1LBT5OUjsIDxfclpcLJh6I=,tag:M7y9HAC+fh8Fe8HoqQrnbg==,type:str]",
"sops": {
"age": [
{
"recipient": "age1p8kuf8s0nfekwreh4g38cgghp4nzszenx0fraeyky2me0nly2scstqunx8",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA3NTVOT2MxaDJsTXloVVcv\nellUdnVxSVdnZ1NBUGEwLzBiTGoyZENJdm1RClp5eHY3dkdVSzVJYk52dWFCQnlG\nclIrQUJ5RXRYTythWTFHR1NhVHlyMVkKLS0tIEFza3YwcUNiYUV5VWJQcTljY2ZR\nUnc3U1VubmZRTCtTTC9rd1kydnNYa00KqdwV3eRHA6Y865JXQ7lxbS6aTIGf/kQM\nqDFdiUdvEDqo19Df3QBJ7amQ1YjPqSIRbO8CJNPI8JqQJKTaBOgm9g==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBzTmV0Skd5Zzk1SXc4ZDc3\nRi9wTVdDM1lTc3N0MXpNNVZjUWJ6VDZHd3hzCkpRZnNtSU14clkybWxvSEhST2py\nR29jcHdXSCtFRE02ejB0dzN1eGVQZ1kKLS0tIE9YVjJBRTg1SGZ5S3lYdFRUM3RW\nOGZjUEhURnJIVTBnZG43UFpTZkdseFUKOgHC10Rqf/QnzfCHUMEPb1PVo9E6qlpo\nW/F1I8ZqkFI8sWh54nilXeR8i8w+QCthliBxsxdDTv2FSxdnKNHu3A==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-09-16T08:13:15Z",
"mac": "ENC[AES256_GCM,data:0byytsY3tFK3r4qhM1+iYe9KYYKJ8cJO/HonYflbB0iTD+oRBnnDUuChPdBK50tQxH8aInlvgIGgi45OMk7IrFBtBYQRgFBUR5zDujzel9hJXQvpvqgvRMkzA542ngjxYmZ74mQB+pIuFhlVJCfdTN+smX6N4KyDRj9d8aKK0Qs=,iv:DC8nwgUAUSdOCr8TlgJX21SxOPOoJKYeNoYvwj5b9OI=,tag:cbJ8M+UzaghkvtEnRCp+GA==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../../../../sops/users/admin

View File

@@ -1,3 +0,0 @@
-----BEGIN PUBLIC KEY-----
MCowBQYDK2VwAyEAonBIcfPW9GKaUNRs+8epsgQOShNbR9v26+3H80an2/c=
-----END PUBLIC KEY-----

18
devFlake/flake.lock generated
View File

@@ -3,10 +3,10 @@
"clan-core-for-checks": {
"flake": false,
"locked": {
"lastModified": 1756166884,
"narHash": "sha256-skg4rwpbCjhpLlrv/Pndd43FoEgrJz98WARtGLhCSzo=",
"lastModified": 1755649112,
"narHash": "sha256-Tk/Sjlb7W+5usS4upH0la4aGYs2velkHADnhhn2xO88=",
"ref": "main",
"rev": "f7414d7e6e58709af27b6fe16eb530278e81eaaf",
"rev": "bdab3e23af4a9715dfa6346f344f1bd428508672",
"shallow": true,
"type": "git",
"url": "https://git.clan.lol/clan/clan-core"
@@ -84,11 +84,11 @@
},
"nixpkgs-dev": {
"locked": {
"lastModified": 1756662818,
"narHash": "sha256-Opggp4xiucQ5gBceZ6OT2vWAZOjQb3qULv39scGZ9Nw=",
"lastModified": 1755628699,
"narHash": "sha256-IAM29K+Cz9pu90lDDkJpmOpWzQ9Ed0FNkRJcereY+rM=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "2e6aeede9cb4896693434684bb0002ab2c0cfc09",
"rev": "01b1b3809cfa3b0fbca8d75c9cf4b2efdb8182cf",
"type": "github"
},
"original": {
@@ -165,11 +165,11 @@
"nixpkgs": []
},
"locked": {
"lastModified": 1756662192,
"narHash": "sha256-F1oFfV51AE259I85av+MAia221XwMHCOtZCMcZLK2Jk=",
"lastModified": 1754847726,
"narHash": "sha256-2vX8QjO5lRsDbNYvN9hVHXLU6oMl+V/PsmIiJREG4rE=",
"owner": "numtide",
"repo": "treefmt-nix",
"rev": "1aabc6c05ccbcbf4a635fb7a90400e44282f61c4",
"rev": "7d81f6fb2e19bf84f1c65135d1060d829fae2408",
"type": "github"
},
"original": {

View File

@@ -33,6 +33,7 @@
self'.packages.tea-create-pr
self'.packages.merge-after-ci
self'.packages.pending-reviews
self'.packages.agit
# treefmt with config defined in ./flake-parts/formatting.nix
config.treefmt.build.wrapper
];
@@ -45,7 +46,7 @@
ln -sfT ${inputs.nix-select} "$PRJ_ROOT/pkgs/clan-cli/clan_lib/select"
# Generate classes.py from schemas
${self'.packages.classgen}/bin/classgen ${self'.legacyPackages.schemas.clanSchemaJson}/schema.json $PRJ_ROOT/pkgs/clan-cli/clan_lib/nix_models/clan.py
${self'.packages.classgen}/bin/classgen ${self'.legacyPackages.schemas.clan-schema-abstract}/schema.json $PRJ_ROOT/pkgs/clan-cli/clan_lib/nix_models/clan.py
'';
};
};

2
docs/.gitignore vendored
View File

@@ -1,5 +1,5 @@
/site/reference
/site/static
/site/options
/site/options-page
/site/openapi.json
!/site/static/extra.css

View File

@@ -1,11 +1,13 @@
{
lib,
config,
...
}:
let
suffix = config.clan.core.vars.generators.disk-id.files.diskId.value;
mirrorBoot = idx: {
# suffix is to prevent disk name collisions
name = idx;
name = idx + suffix;
type = "disk";
device = "/dev/disk/by-id/${idx}";
content = {

View File

@@ -1,11 +1,13 @@
{
lib,
config,
...
}:
let
suffix = config.clan.core.vars.generators.disk-id.files.diskId.value;
mirrorBoot = idx: {
# suffix is to prevent disk name collisions
name = idx;
name = idx + suffix;
type = "disk";
device = "/dev/disk/by-id/${idx}";
content = {

View File

@@ -2,11 +2,11 @@ site_name: Clan Documentation
site_url: https://docs.clan.lol
repo_url: https://git.clan.lol/clan/clan-core/
repo_name: "_>"
edit_uri: _edit/main/docs/site/
edit_uri: _edit/main/docs/docs/
validation:
omitted_files: warn
absolute_links: ignore
absolute_links: warn
unrecognized_links: warn
markdown_extensions:
@@ -59,15 +59,14 @@ nav:
- Configure Disk Config: guides/getting-started/choose-disk.md
- Update Machine: guides/getting-started/update.md
- Continuous Integration: guides/getting-started/flake-check.md
- Convert Existing NixOS Config: guides/getting-started/convert-flake.md
- ClanServices: guides/clanServices.md
- Using Services: guides/clanServices.md
- Backup & Restore: guides/backups.md
- Disk Encryption: guides/disk-encryption.md
- Age Plugins: guides/age-plugins.md
- Secrets management: guides/secrets.md
- Networking: guides/networking.md
- Target Host: guides/target-host.md
- Zerotier VPN: guides/mesh-vpn.md
- How to disable Secure Boot: guides/secure-boot.md
- Secure Boot: guides/secure-boot.md
- Flake-parts: guides/flake-parts.md
- macOS: guides/macos.md
- Contributing:
@@ -78,7 +77,8 @@ nav:
- Writing a Service Module: guides/services/community.md
- Writing a Disko Template: guides/disko-templates/community.md
- Migrations:
- Migrate from clan modules to services: guides/migrations/migrate-inventory-services.md
- Migrate existing Flakes: guides/migrations/migration-guide.md
- Migrate inventory Services: guides/migrations/migrate-inventory-services.md
- Facts Vars Migration: guides/migrations/migration-facts-vars.md
- Disk id: guides/migrations/disk-id.md
- Concepts:
@@ -88,14 +88,12 @@ nav:
- Templates: concepts/templates.md
- Reference:
- Overview: reference/index.md
- Browse Options: "/options"
- Clan Options: options.md
- Services:
- Overview:
- reference/clanServices/index.md
- reference/clanServices/admin.md
- reference/clanServices/borgbackup.md
- reference/clanServices/certificates.md
- reference/clanServices/coredns.md
- reference/clanServices/data-mesher.md
- reference/clanServices/dyndns.md
- reference/clanServices/emergency-access.md
@@ -108,12 +106,12 @@ nav:
- reference/clanServices/monitoring.md
- reference/clanServices/packages.md
- reference/clanServices/sshd.md
- reference/clanServices/state-version.md
- reference/clanServices/syncthing.md
- reference/clanServices/trusted-nix-caches.md
- reference/clanServices/users.md
- reference/clanServices/wifi.md
- reference/clanServices/wireguard.md
- reference/clanServices/yggdrasil.md
- reference/clanServices/zerotier.md
- API: reference/clanServices/clan-service-author-interface.md
@@ -157,7 +155,6 @@ nav:
- 05-deployment-parameters: decisions/05-deployment-parameters.md
- Template: decisions/_template.md
- Glossary: reference/glossary.md
- Browse Options: "/options"
docs_dir: site
site_dir: out
@@ -175,7 +172,6 @@ theme:
- content.code.annotate
- content.code.copy
- content.tabs.link
- content.action.edit
icon:
repo: fontawesome/brands/git
custom_dir: overrides

View File

@@ -54,9 +54,9 @@ pkgs.stdenv.mkDerivation {
chmod -R +w ./site/reference
echo "Generated API documentation in './site/reference/' "
rm -rf ./site/options
cp -r ${docs-options} ./site/options
chmod -R +w ./site/options
rm -r ./site/options-page || true
cp -r ${docs-options} ./site/options-page
chmod -R +w ./site/options-page
mkdir -p ./site/static/asciinema-player
ln -snf ${asciinema-player-js} ./site/static/asciinema-player/asciinema-player.min.js

View File

@@ -25,7 +25,7 @@
serviceModules = self.clan.modules;
baseHref = "/options/";
baseHref = "/options-page/";
getRoles =
module:
@@ -126,7 +126,7 @@
nestedSettingsOption = mkOption {
type = types.raw;
description = ''
See [instances.${name}.roles.${roleName}.settings](${baseHref}?option_scope=0&option=inventory.instances.${name}.roles.${roleName}.settings)
See [instances.${name}.roles.${roleName}.settings](${baseHref}?option_scope=0&option=instances.${name}.roles.${roleName}.settings)
'';
};
settingsOption = mkOption {
@@ -161,42 +161,6 @@
}
];
baseModule =
# Module
{ config, ... }:
{
imports = (import (pkgs.path + "/nixos/modules/module-list.nix"));
nixpkgs.pkgs = pkgs;
clan.core.name = "dummy";
system.stateVersion = config.system.nixos.release;
# Set this to work around a bug where `clan.core.settings.machine.name`
# is forced due to `networking.interfaces` being forced
# somewhere in the nixpkgs options
facter.detected.dhcp.enable = lib.mkForce false;
};
evalClanModules =
let
evaled = lib.evalModules {
class = "nixos";
modules = [
baseModule
{
clan.core.settings.directory = self;
}
self.nixosModules.clanCore
];
};
in
evaled;
coreOptions =
(pkgs.nixosOptionsDoc {
options = (evalClanModules.options).clan.core or { };
warningsAreErrors = true;
transformOptions = self.clanLib.docs.stripStorePathsFromDeclarations;
}).optionsJSON;
in
{
# Uncomment for debugging
@@ -211,17 +175,10 @@
# scopes = mapAttrsToList mkScope serviceModules;
scopes = [
{
inherit baseHref;
name = "Flake Options (clan.nix file)";
name = "Clan";
modules = docModules;
urlPrefix = "https://git.clan.lol/clan/clan-core/src/branch/main/";
}
{
name = "Machine Options (clan.core NixOS options)";
optionsJSON = "${coreOptions}/share/doc/nixos/options.json";
urlPrefix = "https://git.clan.lol/clan/clan-core/src/branch/main/";
}
];
};
};

View File

@@ -1,5 +1,3 @@
"""Module for rendering NixOS options documentation from JSON format."""
# Options are available in the following format:
# https://github.com/nixos/nixpkgs/blob/master/nixos/lib/make-options-doc/default.nix
#
@@ -34,7 +32,7 @@ from typing import Any
from clan_lib.errors import ClanError
from clan_lib.services.modules import (
CategoryInfo,
ModuleManifest,
Frontmatter,
)
# Get environment variables
@@ -48,7 +46,7 @@ CLAN_SERVICE_INTERFACE = os.environ.get("CLAN_SERVICE_INTERFACE")
CLAN_MODULES_VIA_SERVICE = os.environ.get("CLAN_MODULES_VIA_SERVICE")
OUT = os.environ.get("out") # noqa: SIM112
OUT = os.environ.get("out")
def sanitize(text: str) -> str:
@@ -68,7 +66,8 @@ def render_option_header(name: str) -> str:
def join_lines_with_indentation(lines: list[str], indent: int = 4) -> str:
"""Joins multiple lines with a specified number of whitespace characters as indentation.
"""
Joins multiple lines with a specified number of whitespace characters as indentation.
Args:
lines (list of str): The lines of text to join.
@@ -76,7 +75,6 @@ def join_lines_with_indentation(lines: list[str], indent: int = 4) -> str:
Returns:
str: The indented and concatenated string.
"""
# Create the indentation string (e.g., four spaces)
indent_str = " " * indent
@@ -163,10 +161,7 @@ def render_option(
def print_options(
options_file: str,
head: str,
no_options: str,
replace_prefix: str | None = None,
options_file: str, head: str, no_options: str, replace_prefix: str | None = None
) -> str:
res = ""
with (Path(options_file) / "share/doc/nixos/options.json").open() as f:
@@ -175,16 +170,15 @@ def print_options(
res += head if len(options.items()) else no_options
for option_name, info in options.items():
if replace_prefix:
display_name = option_name.replace(replace_prefix + ".", "")
else:
display_name = option_name
option_name = option_name.replace(replace_prefix + ".", "")
res += render_option(display_name, info, 4)
res += render_option(option_name, info, 4)
return res
def module_header(module_name: str) -> str:
return f"# {module_name}\n\n"
def module_header(module_name: str, has_inventory_feature: bool = False) -> str:
indicator = " 🔹" if has_inventory_feature else ""
return f"# {module_name}{indicator}\n\n"
clan_core_descr = """
@@ -242,7 +236,7 @@ def produce_clan_core_docs() -> None:
for submodule_name, split_options in split.items():
outfile = f"{module_name}/{submodule_name}.md"
print(
f"[clan_core.{submodule_name}] Rendering option of: {submodule_name}... {outfile}",
f"[clan_core.{submodule_name}] Rendering option of: {submodule_name}... {outfile}"
)
init_level = 1
root = options_to_tree(split_options, debug=True)
@@ -277,9 +271,56 @@ def produce_clan_core_docs() -> None:
of.write(output)
def render_roles(roles: list[str] | None, module_name: str) -> str:
if roles:
roles_list = "\n".join([f"- `{r}`" for r in roles])
return (
f"""
### Roles
This module can be used via predefined roles
{roles_list}
"""
"""
Every role has its own configuration options, which are each listed below.
For more information, see the [inventory guide](../../concepts/inventory.md).
??? Example
For example the `admin` module adds the following options globally to all machines where it is used.
`clan.admin.allowedkeys`
```nix
clan-core.lib.clan {
inventory.services = {
admin.me = {
roles.default.machines = [ "jon" ];
config.allowedkeys = [ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQD..." ];
};
};
};
```
"""
)
return ""
clan_modules_descr = """
Clan modules are [NixOS modules](https://wiki.nixos.org/wiki/NixOS_modules)
which have been enhanced with additional features provided by Clan, with
certain option types restricted to enable configuration through a graphical
interface.
!!! note "🔹"
Modules with this indicator support the [inventory](../../concepts/inventory.md) feature.
"""
def render_categories(
categories: list[str],
categories_info: dict[str, CategoryInfo],
categories: list[str], categories_info: dict[str, CategoryInfo]
) -> str:
res = """<div style="display: grid; grid-template-columns: repeat(3, 1fr); gap: 10px;">"""
for cat in categories:
@@ -344,10 +385,10 @@ Learn how to use `clanServices` in practice in the [Using clanServices guide](..
# output += f"`clan.modules.{module_name}`\n"
output += f"*{module_info['manifest']['description']}*\n"
fm = Frontmatter("")
# output += "## Categories\n\n"
output += render_categories(
module_info["manifest"]["categories"],
ModuleManifest.categories_info(),
module_info["manifest"]["categories"], fm.categories_info
)
output += f"{module_info['manifest']['readme']}\n"
@@ -356,7 +397,7 @@ Learn how to use `clanServices` in practice in the [Using clanServices guide](..
output += f"The {module_name} module has the following roles:\n\n"
for role_name in module_info["roles"]:
for role_name, _ in module_info["roles"].items():
output += f"- {role_name}\n"
for role_name, role_filename in module_info["roles"].items():
@@ -376,8 +417,35 @@ Learn how to use `clanServices` in practice in the [Using clanServices guide](..
of.write(output)
def build_option_card(module_name: str, frontmatter: Frontmatter) -> str:
"""
Build the overview index card for each reference target option.
"""
def indent_all(text: str, indent_size: int = 4) -> str:
"""
Indent all lines in a string.
"""
indent = " " * indent_size
lines = text.split("\n")
indented_text = indent + ("\n" + indent).join(lines)
return indented_text
def to_md_li(module_name: str, frontmatter: Frontmatter) -> str:
md_li = (
f"""- **[{module_name}](./{"-".join(module_name.split(" "))}.md)**\n\n"""
)
md_li += f"""{indent_all("---", 4)}\n\n"""
fmd = f"\n{frontmatter.description.strip()}" if frontmatter.description else ""
md_li += f"""{indent_all(fmd, 4)}"""
return md_li
return f"{to_md_li(module_name, frontmatter)}\n\n"
def split_options_by_root(options: dict[str, Any]) -> dict[str, dict[str, Any]]:
"""Split the flat dictionary of options into a dict of which each entry will construct complete option trees.
"""
Split the flat dictionary of options into a dict of which each entry will construct complete option trees.
{
"a": { Data }
"a.b": { Data }
@@ -461,7 +529,9 @@ def option_short_name(option_name: str) -> str:
def options_to_tree(options: dict[str, Any], debug: bool = False) -> Option:
"""Convert the options dictionary to a tree structure."""
"""
Convert the options dictionary to a tree structure.
"""
# Helper function to create nested structure
def add_to_tree(path_parts: list[str], info: Any, current_node: Option) -> None:
@@ -513,24 +583,22 @@ def options_to_tree(options: dict[str, Any], debug: bool = False) -> Option:
def options_docs_from_tree(
root: Option,
init_level: int = 1,
prefix: list[str] | None = None,
root: Option, init_level: int = 1, prefix: list[str] | None = None
) -> str:
"""Eender the options from the tree structure.
"""
eender the options from the tree structure.
Args:
root (Option): The root option node.
init_level (int): The initial level of indentation.
prefix (list str): Will be printed as common prefix of all attribute names.
"""
def render_tree(option: Option, level: int = init_level) -> str:
output = ""
should_render = not option.name.startswith("<") and not option.name.startswith(
"_",
"_"
)
if should_render:
# short_name = option_short_name(option.name)
@@ -551,10 +619,11 @@ def options_docs_from_tree(
return output
return render_tree(root)
md = render_tree(root)
return md
if __name__ == "__main__":
if __name__ == "__main__": #
produce_clan_core_docs()
produce_clan_service_author_docs()

View File

@@ -1,33 +1,15 @@
# Auto-included Files
Clan automatically imports specific files from each machine directory and registers them, reducing the need for manual configuration.
Clan automatically imports the following files from a directory and registers them.
## Machine Registration
## Machine registration
Every folder under `machines/{machineName}` is automatically registered as a Clan machine.
Every folder `machines/{machineName}` will be registered automatically as a Clan machine.
!!! info "Files loaded automatically for each machine"
!!! info "Automatically loaded files"
The following files are detected and imported for every Clan machine:
The following files are loaded automatically for each Clan machine:
- [x] `machines/{machineName}/configuration.nix`
Main configuration file for the machine.
- [x] `machines/{machineName}/hardware-configuration.nix`
Hardware-specific configuration generated by NixOS.
- [x] `machines/{machineName}/facter.json`
Contains system facts. Automatically generated — see [nixos-facter](https://clan.lol/blog/nixos-facter/) for details.
- [x] `machines/{machineName}/disko.nix`
Disk layout configuration. See the [disko quickstart](https://github.com/nix-community/disko/blob/master/docs/quickstart.md) for more info.
## Other Auto-included Files
* **`inventory.json`**
Managed by Clan's API.
Merges with `clan.inventory` to extend the inventory.
* **`.clan-flake`**
Sentinel file to be used to locate the root of a Clan repository.
Falls back to `.git`, `.hg`, `.svn`, or `flake.nix` if not found.
- [x] `machines/{machineName}/configuration.nix`
- [x] `machines/{machineName}/hardware-configuration.nix`
- [x] `machines/{machineName}/facter.json` Automatically configured, for further information see [nixos-facter](https://clan.lol/blog/nixos-facter/)
- [x] `machines/{machineName}/disko.nix` Automatically loaded, for further information see the [disko docs](https://github.com/nix-community/disko/blob/master/docs/quickstart.md).

View File

@@ -1,22 +1,16 @@
# Using the Inventory
# Using `clanServices`
Clan's inventory system is a composable way to define and deploy services across
machines.
Clans `clanServices` system is a composable way to define and deploy services across machines.
This guide shows how to **instantiate** a `clanService`, explains how service
definitions are structured in your inventory, and how to pick or create services
from modules exposed by flakes.
This guide shows how to **instantiate** a `clanService`, explains how service definitions are structured in your inventory, and how to pick or create services from modules exposed by flakes.
The term **Multi-host-modules** was introduced previously in the [nixus
repository](https://github.com/infinisil/nixus) and represents a similar
concept.
The term **Multi-host-modules** was introduced previously in the [nixus repository](https://github.com/infinisil/nixus) and represents a similar concept.
______________________________________________________________________
---
## Overview
Services are used in `inventory.instances`, and assigned to *roles* and
*machines* -- meaning you decide which machines run which part of the service.
Services are used in `inventory.instances`, and then they attach to *roles* and *machines* — meaning you decide which machines run which part of the service.
For example:
@@ -24,138 +18,119 @@ For example:
inventory.instances = {
borgbackup = {
roles.client.machines."laptop" = {};
roles.client.machines."workstation" = {};
roles.client.machines."server1" = {};
roles.server.machines."backup-box" = {};
};
}
```
This says: "Run borgbackup as a *client* on my *laptop* and *workstation*, and
as a *server* on *backup-box*". `client` and `server` are roles defined by the
`borgbackup` service.
This says: Run borgbackup as a *client* on my *laptop* and *server1*, and as a *server* on *backup-box*.”
## Module source specification
Each instance includes a reference to a **module specification** -- this is how
Clan knows which service module to use and where it came from.
Each instance includes a reference to a **module specification** this is how Clan knows which service module to use and where it came from.
Usually one would just use `imports` but we needd to make the `module source` configurable via Python API.
By default it is not required to specify the `module`, in which case it defaults to the preprovided services of clan-core.
It is not required to specify the `module.input` parameter, in which case it
defaults to the pre-provided services of clan-core. In a similar fashion, the
`module.name` parameter can also be omitted, it will default to the name of the
instance.
---
## Override Example
Example of instantiating a `borgbackup` service using `clan-core`:
```nix
inventory.instances = {
borgbackup = { # <- Instance name
# This can be partially/fully specified,
# - If the instance name is not the name of the module
# - If the input is not clan-core
# module = {
# name = "borgbackup"; # Name of the module (optional)
# input = "clan-core"; # The flake input where the service is defined (optional)
# };
# Instance Name: Different name for this 'borgbackup' instance
borgbackup = {
# Since this is instances."borgbackup" the whole `module = { ... }` below is equivalent and optional.
module = {
name = "borgbackup"; # <-- Name of the module (optional)
input = "clan-core"; # <-- The flake input where the service is defined (optional)
};
# Participation of the machines is defined via roles
# Right side needs to be an attribute set. Its purpose will become clear later
roles.client.machines."machine-a" = {};
roles.server.machines."backup-host" = {};
};
}
```
## Module Settings
If you used `clan-core` as an input attribute for your flake:
Each role might expose configurable options. See clan's [clanServices
reference](../reference/clanServices/index.md) for all available options.
```nix
# ↓ module.input = "clan-core"
inputs.clan-core.url = "https://git.clan.lol/clan/clan-core/archive/main.tar.gz";
```
Settings can be set in per-machine or per-role. The latter is applied to all
machines that are assigned to that role.
## Simplified Example
If only one instance is needed for a service and the service is a clan core service, the `module` definition can be omitted.
```nix
# Simplified way of specifying a single instance
inventory.instances = {
# instance name is `borgbackup` -> clan core module `borgbackup` will be loaded.
borgbackup = {
# Participation of the machines is defined via roles
# Right side needs to be an attribute set. Its purpose will become clear later
roles.client.machines."machine-a" = {};
roles.server.machines."backup-host" = {};
};
}
```
## Configuration Example
Each role might expose configurable options
See clan's [clanServices reference](../reference/clanServices/index.md) for available options
```nix
inventory.instances = {
borgbackup = {
# Settings for 'machine-a'
borgbackup-example = {
module = {
name = "borgbackup";
input = "clan-core";
};
roles.client.machines."machine-a" = {
# 'client' -Settings of 'machine-a'
settings = {
backupFolders = [
/home
/var
];
};
# ---------------------------
};
# Settings for all machines of the role "server"
roles.server.settings = {
directory = "/var/lib/borgbackup";
};
roles.server.machines."backup-host" = {};
};
}
```
## Tags
Tags can be used to assign multiple machines to a role at once. It can be thought of as a grouping mechanism.
For example using the `all` tag for services that you want to be configured on all
your machines is a common pattern.
The following example could be used to backup all your machines to a common
backup server
Multiple members can be defined using tags as follows
```nix
inventory.instances = {
borgbackup = {
# "All" machines are assigned to the borgbackup 'client' role
roles.client.tags = [ "all" ];
# But only one specific machine (backup-host) is assigned to the 'server' role
roles.server.machines."backup-host" = {};
};
}
```
## Sharing additional Nix configuration
Sometimes you need to add custom NixOS configuration alongside your clan
services. The `extraModules` option allows you to include additional NixOS
configuration that is applied for every machine assigned to that role.
There are multiple valid syntaxes for specifying modules:
```nix
inventory.instances = {
borgbackup = {
roles.client = {
# Direct module reference
extraModules = [ ../nixosModules/borgbackup.nix ];
# Or using self (needs to be json serializable)
# See next example, for a workaround.
extraModules = [ self.nixosModules.borgbackup ];
# Or inline module definition, (needs to be json compatible)
extraModules = [
{
# Your module configuration here
# ...
#
# If the module needs to contain non-serializable expressions:
imports = [ ./path/to/non-serializable.nix ];
}
];
borgbackup-example = {
module = {
name = "borgbackup";
input = "clan-core";
};
#
# The 'all' -tag targets all machines
roles.client.tags."all" = {};
# ---------------------------
roles.server.machines."backup-host" = {};
};
}
```
## Picking a clanService
You can use services exposed by Clan's core module library, `clan-core`.
You can use services exposed by Clans core module library, `clan-core`.
🔗 See: [List of Available Services in clan-core](../reference/clanServices/index.md)
@@ -167,19 +142,18 @@ You can also author your own `clanService` modules.
You might expose your service module from your flake — this makes it easy for other people to also use your module in their clan.
______________________________________________________________________
---
## 💡 Tips for Working with clanServices
- You can add multiple inputs to your flake (`clan-core`, `your-org-modules`, etc.) to mix and match services.
- Each service instance is isolated by its key in `inventory.instances`, allowing to deploy multiple versions or roles of the same service type.
- Roles can target different machines or be scoped dynamically.
* You can add multiple inputs to your flake (`clan-core`, `your-org-modules`, etc.) to mix and match services.
* Each service instance is isolated by its key in `inventory.instances`, allowing you to deploy multiple versions or roles of the same service type.
* Roles can target different machines or be scoped dynamically.
______________________________________________________________________
---
## What's Next?
- [Author your own clanService →](../guides/services/community.md)
- [Migrate from clanModules →](../guides/migrations/migrate-inventory-services.md)
## Whats Next?
* [Author your own clanService →](../guides/services/community.md)
* [Migrate from clanModules →](../guides/migrations/migrate-inventory-services.md)
<!-- TODO: * [Understand the architecture →](../explanation/clan-architecture.md) -->

View File

@@ -27,7 +27,7 @@ inputs = {
## Import the Clan flake-parts Module
After updating your flake inputs, the next step is to import the Clan flake-parts module. This will make the [Clan options](/options) available within `mkFlake`.
After updating your flake inputs, the next step is to import the Clan flake-parts module. This will make the [Clan options](../options.md) available within `mkFlake`.
```nix
{

View File

@@ -2,9 +2,9 @@
Machines can be added using the following methods
- Create a file `machines/{machine_name}/configuration.nix` (See: [File Autoincludes](../../concepts/autoincludes.md))
- Imperative via cli command: `clan machines create`
- Editing nix expressions in flake.nix See [`clan-core.lib.clan`](/options/?scope=Flake Options (clan.nix file))
- Editing nix expressions in flake.nix (i.e. via `clan-core.lib.clan`)
- Editing machines/`machine_name`/configuration.nix (automatically included if it exists)
- `clan machines create` (imperative)
See the complete [list](../../concepts/autoincludes.md) of auto-loaded files.
@@ -39,6 +39,7 @@ See the complete [list](../../concepts/autoincludes.md) of auto-loaded files.
The imperative command might create a machine folder in `machines/jon`
And might persist information in `inventory.json`
### Configuring a machine
!!! Note

View File

@@ -1,15 +1,12 @@
# Update Machines
# Update Your Machines
The Clan command line interface enables you to update machines remotely over SSH.
In this guide we will teach you how to set a `targetHost` in Nix,
and how to define a remote builder for your machine closures.
Clan CLI enables you to remotely update your machines over SSH. This requires setting up a target address for each target machine.
### Setting `targetHost`
## Setting `targetHost`
In your Nix files, set the `targetHost` to the reachable IP address of your new machine. This eliminates the need to specify `--target-host` with every command.
Set the machines `targetHost` to the reachable IP address of the new machine.
This eliminates the need to specify `--target-host` in CLI commands.
```{.nix title="clan.nix" hl_lines="9"}
{
@@ -26,42 +23,15 @@ inventory.machines = {
# [...]
}
```
The use of `root@` in the target address implies SSH access as the `root` user.
Ensure that the root login is secured and only used when necessary.
## Multiple Target Hosts
You can now experiment with a new interface that allows you to define multiple `targetHost` addresses for different VPNs. Learn more and try it out in our [networking guide](../networking.md).
### Setting a Build Host
## Updating Machine Configurations
Execute the following command to update the specified machine:
```bash
clan machines update jon
```
All machines can be updated simultaneously by omitting the machine name:
```bash
clan machines update
```
---
## Advanced Usage
The following options are only needed for special cases, such as limited resources, mixed environments, or private flakes.
### Setting `buildHost`
If the machine does not have enough resources to run the NixOS **evaluation** or **build** itself,
it is also possible to specify a `buildHost` instead.
During an update, clan will ssh into the `buildHost` and run `nixos-rebuild` from there.
!!! Note
The `buildHost` option should be set directly within your machines Nix configuration, **not** under `inventory.machines`.
If the machine does not have enough resources to run the NixOS evaluation or build itself,
it is also possible to specify a build host instead.
During an update, the cli will ssh into the build host and run `nixos-rebuild` from there.
```{.nix hl_lines="5" .no-copy}
@@ -75,11 +45,7 @@ buildClan {
};
```
### Overriding configuration with CLI flags
`buildHost` / `targetHost`, and other network settings can be temporarily overridden for a single command:
For the full list of flags refer to the [Clan CLI](../../reference/cli/index.md)
You can also override the build host via the command line:
```bash
# Build on a remote host
@@ -90,9 +56,23 @@ clan machines update jon --build-host local
```
!!! Note
Make sure the CPU architecture of the `buildHost` matches that of the `targetHost`
Make sure that the CPU architecture is the same for the buildHost as for the targetHost.
Example:
If you want to deploy to a macOS machine, your architecture is an ARM64-Darwin, that means you need a second macOS machine to build it.
For example, if deploying to a macOS machine with an ARM64-Darwin architecture, you need a second macOS machine with the same architecture to build it.
### Updating Machine Configurations
Execute the following command to update the specified machine:
```bash
clan machines update jon
```
You can also update all configured machines simultaneously by omitting the machine name:
```bash
clan machines update
```
### Excluding a machine from `clan machine update`
@@ -116,15 +96,14 @@ This is useful for machines that are not always online or are not part of the re
### Uploading Flake Inputs
When updating remote machines, flake inputs are usually fetched by the build host.
However, if flake inputs require authentication (e.g., private repositories),
Use the `--upload-inputs` flag to upload all inputs from your local machine:
However, if your flake inputs require authentication (e.g., private repositories),
you can use the `--upload-inputs` flag to upload all inputs from your local machine:
```bash
clan machines update jon --upload-inputs
```
This is particularly useful when:
- The flake references private Git repositories
- Authentication credentials are only available on local machine
- Your flake references private Git repositories
- Authentication credentials are only available on your local machine
- The build host doesn't have access to certain network resources

View File

@@ -254,7 +254,7 @@ The following table shows the migration status of each deprecated clanModule:
| `data-mesher` | ✅ [Migrated](../../reference/clanServices/data-mesher.md) | |
| `deltachat` | ❌ Removed | |
| `disk-id` | ❌ Removed | |
| `dyndns` | [Migrated](../../reference/clanServices/dyndns.md) | |
| `dyndns` | [Being Migrated](https://git.clan.lol/clan/clan-core/pulls/4390) | |
| `ergochat` | ❌ Removed | |
| `garage` | ✅ [Migrated](../../reference/clanServices/garage.md) | |
| `golem-provider` | ❌ Removed | |
@@ -263,18 +263,18 @@ The following table shows the migration status of each deprecated clanModule:
| `iwd` | ❌ Removed | Use [wifi service](../../reference/clanServices/wifi.md) instead |
| `localbackup` | ✅ [Migrated](../../reference/clanServices/localbackup.md) | |
| `localsend` | ❌ Removed | |
| `machine-id` | ✅ [Migrated](../../reference/clan.core/settings.md) | Now an [option](../../reference/clan.core/settings.md) |
| `machine-id` | ❌ Removed | Now an [option](../../reference/clan.core/settings.md) |
| `matrix-synapse` | ✅ [Migrated](../../reference/clanServices/matrix-synapse.md) | |
| `moonlight` | ❌ Removed | |
| `mumble` | ❌ Removed | |
| `mycelium` | ✅ [Migrated](../../reference/clanServices/mycelium.md) | |
| `nginx` | ❌ Removed | |
| `packages` | ✅ [Migrated](../../reference/clanServices/packages.md) | |
| `postgresql` | ✅ [Migrated](../../reference/clan.core/settings.md) | Now an [option](../../reference/clan.core/settings.md) |
| `root-password` | ✅ [Migrated](../../reference/clanServices/users.md) | See [migration guide](../../reference/clanServices/users.md#migration-from-root-password-module) |
| `postgresql` | ❌ Removed | Now an [option](../../reference/clan.core/settings.md) |
| `root-password` | ✅ [Migrated](../../reference/clanServices/users.md) | |
| `single-disk` | ❌ Removed | |
| `sshd` | ✅ [Migrated](../../reference/clanServices/sshd.md) | |
| `state-version` | ✅ [Migrated](../../reference/clan.core/settings.md) | Now an [option](../../reference/clan.core/settings.md) |
| `state-version` | ✅ [Migrated](../../reference/clanServices/state-version.md) | |
| `static-hosts` | ❌ Removed | |
| `sunshine` | ❌ Removed | |
| `syncthing-static-peers` | ❌ Removed | |

View File

@@ -1,20 +1,18 @@
# Convert existing NixOS configurations
# Migrate existing NixOS configurations
This guide will help you convert your existing NixOS configurations into a Clan.
This guide will help you migrate your existing NixOS configurations into Clan.
!!! Warning
Migrating instead of starting new can be trickier and might lead to bugs or
unexpected issues. We recommend reading the [Getting Started](./index.md) guide first.
Once you have a working setup and understand the concepts transfering your NixOS configurations over is easy.
## Back up your existing configuration
unexpected issues. We recommend following the [Getting Started](../getting-started/index.md) guide first. Once you have a working setup, you can easily transfer your NixOS configurations over.
## Back up your existing configuration!
Before you start, it is strongly recommended to back up your existing
configuration in any form you see fit. If you use version control to manage
your configuration changes, it is also a good idea to follow the migration
guide in a separte branch until everything works as expected.
## Starting Point
We assume you are already using NixOS flakes to manage your configuration. If
@@ -45,9 +43,10 @@ have have two hosts: **berlin** and **cologne**.
}
```
## 1. Add `clan-core` to `inputs`
## Add clan-core Input
Add `clan-core` to your flake as input.
Add `clan-core` to your flake as input. It will provide everything we need to
manage your configurations with clan.
```nix
inputs.clan-core = {
@@ -57,7 +56,7 @@ inputs.clan-core = {
}
```
## 2. Update Outputs
## Update Outputs
To be able to access our newly added dependency, it has to be added to the
output parameters.
@@ -104,23 +103,26 @@ For the provide flake example, your flake should now look like this:
};
in
{
inherit (clan.config) nixosConfigurations nixosModules clanInternals;
clan = clan.config;
nixosConfigurations = clan.nixosConfigurations;
inherit (clan) clanInternals;
clan = {
inherit (clan) templates;
};
};
}
```
Et voilà! Your existing hosts are now part of a clan.
Existing Nix tooling
Et voilà! Your existing hosts are now part of a clan. Existing Nix tooling
should still work as normal. To check that you didn't make any errors, run `nix
flake show` and verify both hosts are still recognized as if nothing had
changed. You should also see the new `clan` output.
changed. You should also see the new `clanInternals` output.
```
nix flake show
git+file:///my-nixos-config
├───clan: unknown
├───clanInternals: unknown
└───nixosConfigurations
├───berlin: NixOS configuration
└───cologne: NixOS configuration
@@ -129,7 +131,7 @@ git+file:///my-nixos-config
Of course you can also rebuild your configuration using `nixos-rebuild` and
veryify everything still works.
## 3. Add `clan-cli` to your `devShells`
## Add Clan CLI devShell
At this point Clan is set up, but you can't use the CLI yet. To do so, it is
recommended to expose it via a `devShell` in your flake. It is also possible to
@@ -161,8 +163,8 @@ cologne
## Specify Targets
Clan needs to know where it can reach your hosts. For testing purpose set
`clan.core.networking.targetHost` to the machines adress or hostname.
Clan needs to know where it can reach your hosts. For each of your hosts, set
`clan.core.networking.targetHost` to its adress or hostname.
```nix
# machines/berlin/configuration.nix
@@ -171,8 +173,6 @@ Clan needs to know where it can reach your hosts. For testing purpose set
}
```
See our guide on for properly [configuring machines networking](../networking.md)
## Next Steps
You are now fully set up. Use the CLI to manage your hosts or proceed to

View File

@@ -1,184 +0,0 @@
# Connecting to Your Machines
Clan provides automatic networking with fallback mechanisms to reliably connect to your machines.
## Option 1: Automatic Networking with Fallback (Recommended)
Clan's networking module automatically manages connections through various network technologies with intelligent fallback. When you run `clan ssh` or `clan machines update`, Clan tries each configured network by priority until one succeeds.
### Basic Setup with Internet Service
For machines with public IPs or DNS names, use the `internet` service to configure direct SSH while keeping fallback options:
```{.nix title="flake.nix" hl_lines="7-10 14-16"}
{
outputs = { self, clan-core, ... }:
let
clan = clan-core.lib.clan {
inventory.instances = {
# Direct SSH with fallback support
internet = {
roles.default.machines.server1 = {
settings.address = "server1.example.com";
};
roles.default.machines.server2 = {
settings.address = "192.168.1.100";
};
};
# Fallback: Secure connections via Tor
tor = {
roles.server.tags.nixos = { };
};
};
};
in
{
inherit (clan.config) nixosConfigurations;
};
}
```
### Advanced Setup with Multiple Networks
```{.nix title="flake.nix" hl_lines="7-10 13-16 19-21"}
{
outputs = { self, clan-core, ... }:
let
clan = clan-core.lib.clan {
inventory.instances = {
# Priority 1: Try direct connection first
internet = {
roles.default.machines.publicserver = {
settings.address = "public.example.com";
};
};
# Priority 2: VPN for internal machines
zerotier = {
roles.controller.machines."controller" = { };
roles.peer.tags.nixos = { };
};
# Priority 3: Tor as universal fallback
tor = {
roles.server.tags.nixos = { };
};
};
};
in
{
inherit (clan.config) nixosConfigurations;
};
}
```
### How It Works
Clan automatically tries networks in order of priority:
1. Direct internet connections (if configured)
2. VPN networks (ZeroTier, Tailscale, etc.)
3. Tor hidden services
4. Any other configured networks
If one network fails, Clan automatically tries the next.
### Useful Commands
```bash
# View all configured networks and their status
clan network list
# Test connectivity through all networks
clan network ping machine1
# Show complete network topology
clan network overview
```
## Option 2: Manual targetHost (Bypasses Fallback!)
!!! warning
Setting `targetHost` directly **disables all automatic networking and fallback**. Only use this if you need complete control and don't want Clan's intelligent connection management.
### Using Inventory (For Static Addresses)
Use inventory-level `targetHost` when the address is **static** and doesn't depend on NixOS configuration:
```{.nix title="flake.nix" hl_lines="8"}
{
outputs = { self, clan-core, ... }:
let
clan = clan-core.lib.clan {
inventory.machines.server = {
# WARNING: This bypasses all networking modules!
# Use for: Static IPs, DNS names, known hostnames
deploy.targetHost = "root@192.168.1.100";
};
};
in
{
inherit (clan.config) nixosConfigurations;
};
}
```
**When to use inventory-level:**
- Static IP addresses: `"root@192.168.1.100"`
- DNS names: `"user@server.example.com"`
- Any address that doesn't change based on machine configuration
### Using NixOS Configuration (For Dynamic Addresses)
Use machine-level `targetHost` when you need to **interpolate values from the NixOS configuration**:
```{.nix title="flake.nix" hl_lines="7"}
{
outputs = { self, clan-core, ... }:
let
clan = clan-core.lib.clan {
machines.server = { config, ... }: {
# WARNING: This also bypasses all networking modules!
# REQUIRED for: Addresses that depend on NixOS config
clan.core.networking.targetHost = "root@${config.networking.hostName}.local";
};
};
in
{
inherit (clan.config) nixosConfigurations;
};
}
```
**When to use machine-level (NixOS config):**
- Using hostName from config: `"root@${config.networking.hostName}.local"`
- Building from multiple config values: `"${config.users.users.deploy.name}@${config.networking.hostName}"`
- Any address that depends on evaluated NixOS configuration
!!! info "Key Difference"
**Inventory-level** (`deploy.targetHost`) is evaluated immediately and works with static strings.
**Machine-level** (`clan.core.networking.targetHost`) is evaluated after NixOS configuration and can access `config.*` values.
## Quick Decision Guide
| Scenario | Recommended Approach | Why |
|----------|---------------------|-----|
| Public servers | `internet` service | Keeps fallback options |
| Mixed infrastructure | Multiple networks | Automatic failover |
| Machines behind NAT | ZeroTier/Tor | NAT traversal with fallback |
| Testing/debugging | Manual targetHost | Full control, no magic |
| Single static machine | Manual targetHost | Simple, no overhead |
## Command-Line Override
The `--target-host` flag bypasses ALL networking configuration:
```bash
# Emergency access - ignores all networking config
clan machines update server --target-host root@backup-ip.com
# Direct SSH - no fallback attempted
clan ssh laptop --target-host user@10.0.0.5
```
Use this for debugging or emergency access when automatic networking isn't working.

View File

@@ -255,50 +255,11 @@ outputs = inputs: flake-parts.lib.mkFlake { inherit inputs; } ({self, lib, ...}:
})
```
The benefit of this approach is that downstream users can override the value of
`myClan` by using `mkForce` or other priority modifiers.
## Example: A machine-type service
Users often have different types of machines. These could be any classification
you like, for example "servers" and "desktops". Having such distictions, allows
reusing parts of your configuration that should be appplied to a class of
machines. Since this is such a common pattern, here is how to write such a
service.
For this example the we have to roles: `server` and `desktop`. Additionally, we
can use the `perMachine` section to add configuration to all machines regardless
of their type.
```nix title="machine-type.nix"
{
_class = "clan.service";
manifest.name = "machine-type";
roles.server.perInstance.nixosModule = ./server.nix;
roles.desktop.perInstance.nixosModule = ./desktop.nix;
perMachine.nixosModule = {
# Configuration for all machines (any type)
};
}
```
In the inventory we the assign machines to a type, e.g. by using tags
```nix title="flake.nix"
instnaces.machine-type = {
module.input = "self";
module.name = "@pinpox/machine-type";
roles.desktop.tags.desktop = { };
roles.server.tags.server = { };
};
```
The benefit of this approach is that downstream users can override the value of `myClan` by using `mkForce` or other priority modifiers.
---
## Further Reading
## Further
- [Reference Documentation for Service Authors](../../reference/clanServices/clan-service-author-interface.md)
- [Migration Guide from ClanModules to ClanServices](../../guides/migrations/migrate-inventory-services.md)

View File

@@ -0,0 +1,84 @@
# How to Set `targetHost` for a Machine
The `targetHost` defines where the machine can be reached for operations like SSH or deployment. You can set it in two ways, depending on your use case.
---
## ✅ Option 1: Use the Inventory (Recommended for Static Hosts)
If the hostname is **static**, like `server.example.com`, set it in the **inventory**:
```{.nix title="flake.nix" hl_lines="8"}
{
# edlided
outputs =
{ self, clan-core, ... }:
let
# Sometimes this attribute set is defined in clan.nix
clan = clan-core.lib.clan {
inventory.machines.jon = {
deploy.targetHost = "root@server.example.com";
};
};
in
{
inherit (clan.config) nixosConfigurations nixosModules clanInternals;
# elided
};
}
```
This is fast, simple and explicit, and doesnt require evaluating the NixOS config. We can also displayed it in the clan-cli or clan-app.
---
## ✅ Option 2: Use NixOS (Only for Dynamic Hosts)
If your target host depends on a **dynamic expression** (like using the machines evaluated FQDN), set it inside the NixOS module:
```{.nix title="flake.nix" hl_lines="8"}
{
# edlided
outputs =
{ self, clan-core, ... }:
let
# Sometimes this attribute set is defined in clan.nix
clan = clan-core.lib.clan {
machines.jon = {config, ...}: {
clan.core.networking.targetHost = "jon@${config.networking.fqdn}";
};
};
in
{
inherit (clan.config) nixosConfigurations nixosModules clanInternals;
# elided
};
}
```
Use this **only if the value cannot be made static**, because its slower and won't be displayed in the clan-cli or clan-app yet.
---
## 📝 TL;DR
| Use Case | Use Inventory? | Example |
| ------------------------- | -------------- | -------------------------------- |
| Static hostname | ✅ Yes | `root@server.example.com` |
| Dynamic config expression | ❌ No | `jon@${config.networking.fqdn}` |
---
## 🚀 Coming Soon: Unified Networking Module
Were working on a new networking module that will automatically do all of this for you.
- Easier to use
- Sane defaults: Youll always be able to reach the machine — no need to worry about hostnames.
- ✨ Migration from **either method** will be supported and simple.
## Summary
- Ask: *Does this hostname dynamically change based on NixOS config?*
- If **no**, use the inventory.
- If **yes**, then use NixOS config.

6
docs/site/options.md Normal file
View File

@@ -0,0 +1,6 @@
---
template: options.html
---
<iframe src="/options-page/" height="1000" width="100%"></iframe>

View File

@@ -4,7 +4,7 @@ This section of the site provides an overview of available options and commands
---
- [Clan Configuration Option](/options) - for defining a Clan
- [Clan Configuration Option](../options.md) - for defining a Clan
- Learn how to use the [Clan CLI](./cli/index.md)
- Explore available [services](./clanServices/index.md)
- [NixOS Configuration Options](./clan.core/index.md) - Additional options avilable on a NixOS machine.

40
flake.lock generated
View File

@@ -13,11 +13,11 @@
]
},
"locked": {
"lastModified": 1756695982,
"narHash": "sha256-dyLhOSDzxZtRgi5aj/OuaZJUsuvo+8sZ9CU/qieZ15c=",
"rev": "cc8f26e7e6c2dc985526ba59b286ae5a83168cdb",
"lastModified": 1753067306,
"narHash": "sha256-jyoEbaXa8/MwVQ+PajUdT63y3gYhgD9o7snO/SLaikw=",
"rev": "18dfd42bdb2cfff510b8c74206005f733e38d8b9",
"type": "tarball",
"url": "https://git.clan.lol/api/v1/repos/clan/data-mesher/archive/cc8f26e7e6c2dc985526ba59b286ae5a83168cdb.tar.gz"
"url": "https://git.clan.lol/api/v1/repos/clan/data-mesher/archive/18dfd42bdb2cfff510b8c74206005f733e38d8b9.tar.gz"
},
"original": {
"type": "tarball",
@@ -31,11 +31,11 @@
]
},
"locked": {
"lastModified": 1756115622,
"narHash": "sha256-iv8xVtmLMNLWFcDM/HcAPLRGONyTRpzL9NS09RnryRM=",
"lastModified": 1755519972,
"narHash": "sha256-bU4nqi3IpsUZJeyS8Jk85ytlX61i4b0KCxXX9YcOgVc=",
"owner": "nix-community",
"repo": "disko",
"rev": "bafad29f89e83b2d861b493aa23034ea16595560",
"rev": "4073ff2f481f9ef3501678ff479ed81402caae6d",
"type": "github"
},
"original": {
@@ -71,11 +71,11 @@
]
},
"locked": {
"lastModified": 1755825449,
"narHash": "sha256-XkiN4NM9Xdy59h69Pc+Vg4PxkSm9EWl6u7k6D5FZ5cM=",
"lastModified": 1755275010,
"narHash": "sha256-lEApCoWUEWh0Ifc3k1JdVjpMtFFXeL2gG1qvBnoRc2I=",
"owner": "nix-darwin",
"repo": "nix-darwin",
"rev": "8df64f819698c1fee0c2969696f54a843b2231e8",
"rev": "7220b01d679e93ede8d7b25d6f392855b81dd475",
"type": "github"
},
"original": {
@@ -86,11 +86,11 @@
},
"nix-select": {
"locked": {
"lastModified": 1755887746,
"narHash": "sha256-lzWbpHKX0WAn/jJDoCijIDss3rqYIPawe46GDaE6U3g=",
"rev": "92c2574c5e113281591be01e89bb9ddb31d19156",
"lastModified": 1745005516,
"narHash": "sha256-IVaoOGDIvAa/8I0sdiiZuKptDldrkDWUNf/+ezIRhyc=",
"rev": "69d8bf596194c5c35a4e90dd02c52aa530caddf8",
"type": "tarball",
"url": "https://git.clan.lol/api/v1/repos/clan/nix-select/archive/92c2574c5e113281591be01e89bb9ddb31d19156.tar.gz"
"url": "https://git.clan.lol/api/v1/repos/clan/nix-select/archive/69d8bf596194c5c35a4e90dd02c52aa530caddf8.tar.gz"
},
"original": {
"type": "tarball",
@@ -99,11 +99,11 @@
},
"nixos-facter-modules": {
"locked": {
"lastModified": 1756491981,
"narHash": "sha256-lXyDAWPw/UngVtQfgQ8/nrubs2r+waGEYIba5UX62+k=",
"lastModified": 1755504238,
"narHash": "sha256-mw7q5DPdmz/1au8mY0u1DztRgVyJToGJfJszxjKSNes=",
"owner": "nix-community",
"repo": "nixos-facter-modules",
"rev": "c1b29520945d3e148cd96618c8a0d1f850965d8c",
"rev": "354ed498c9628f32383c3bf5b6668a17cdd72a28",
"type": "github"
},
"original": {
@@ -181,11 +181,11 @@
]
},
"locked": {
"lastModified": 1756662192,
"narHash": "sha256-F1oFfV51AE259I85av+MAia221XwMHCOtZCMcZLK2Jk=",
"lastModified": 1754847726,
"narHash": "sha256-2vX8QjO5lRsDbNYvN9hVHXLU6oMl+V/PsmIiJREG4rE=",
"owner": "numtide",
"repo": "treefmt-nix",
"rev": "1aabc6c05ccbcbf4a635fb7a90400e44282f61c4",
"rev": "7d81f6fb2e19bf84f1c65135d1060d829fae2408",
"type": "github"
},
"original": {

View File

@@ -87,8 +87,6 @@ in
relativeDir = removePrefix "${self}/" (toString config.clan.directory);
update-vars = hostPkgs.writeShellScriptBin "update-vars" ''
set -x
export PRJ_ROOT=$(git rev-parse --show-toplevel)
${update-vars-script} $PRJ_ROOT/${relativeDir} ${testName}
'';

View File

@@ -328,7 +328,7 @@ rec {
# To get the type of a Deferred modules we need to know the interface of the place where it is evaluated.
# i.e. in case of a clan.service this is the interface of the service which dynamically changes depending on the service
# We assign "type" = []
# This means any value is valid — or like TypeScript's unknown.
# This means any value is valid — or like TypeScripts unknown.
# We can assign the type later, when we know the exact interface.
# tsType = "unknown" is a type that we preload for json2ts, such that it gets the correct type in typescript
(option.type.name == "deferredModule")

View File

@@ -245,8 +245,6 @@ in
in
{ config, ... }:
{
staticModules = clan-core.clan.modules;
distributedServices = clanLib.inventory.mapInstances {
inherit (clanConfig) inventory exportsModule;
inherit flakeInputs directory;

View File

@@ -639,7 +639,7 @@ in
Exports are used to share and expose information between instances.
Define exports in the [`perInstance`](#roles.perInstance) or [`perMachine`](#perMachine) scope.
Define exports in the [`perInstance`](#perInstance) or [`perMachine`](#perMachine) scope.
Accessing the exports:

View File

@@ -21,14 +21,14 @@ let
"secrets"
"templates"
];
clanSchemaNix = jsonLib.parseOptions (lib.filterAttrs (n: _v: lib.elem n include) clanOpts) { };
clanSchema = jsonLib.parseOptions (lib.filterAttrs (n: _v: lib.elem n include) clanOpts) { };
clanSchemaJson = pkgs.stdenv.mkDerivation {
clan-schema-abstract = pkgs.stdenv.mkDerivation {
name = "clan-schema-files";
buildInputs = [ pkgs.cue ];
src = ./.;
buildPhase = ''
export SCHEMA=${builtins.toFile "clan-schema.json" (builtins.toJSON clanSchemaNix)}
export SCHEMA=${builtins.toFile "clan-schema.json" (builtins.toJSON clanSchema)}
cp $SCHEMA schema.json
# Also generate a CUE schema version that is derived from the JSON schema
cue import -f -p compose -l '#Root:' schema.json
@@ -41,7 +41,7 @@ in
{
inherit
flakeOptions
clanSchemaNix
clanSchemaJson
clanSchema
clan-schema-abstract
;
}

View File

@@ -27,9 +27,7 @@ in
default = { };
};
tags = lib.mkOption {
type = types.coercedTo (types.listOf types.str) (t: lib.genAttrs t (_: { })) (
types.attrsOf (types.submodule { })
);
type = types.attrsOf (types.submodule { });
default = { };
};
settings =

View File

@@ -23,12 +23,6 @@ let
};
in
{
options.staticModules = lib.mkOption {
readOnly = true;
type = lib.types.raw;
apply = moduleSet: lib.mapAttrs (inspectModule "<clan-core>") moduleSet;
};
options.modulesPerSource = lib.mkOption {
# { sourceName :: { moduleName :: {} }}
readOnly = true;

View File

@@ -1,5 +1,3 @@
"""Test driver for container-based NixOS testing."""
import argparse
import ctypes
import os
@@ -13,7 +11,7 @@ import uuid
from collections.abc import Callable
from contextlib import _GeneratorContextManager
from dataclasses import dataclass
from functools import cache, cached_property
from functools import cached_property
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from typing import Any
@@ -22,21 +20,23 @@ from colorama import Fore, Style
from .logger import AbstractLogger, CompositeLogger, TerminalLogger
# Global flag to track if test environment has been initialized
_test_env_initialized = False
@cache
def init_test_environment() -> None:
"""Set up the test environment (network bridge, /etc/passwd) once."""
global _test_env_initialized
if _test_env_initialized:
return
# Set up network bridge
subprocess.run(
["ip", "link", "add", "br0", "type", "bridge"],
check=True,
text=True,
["ip", "link", "add", "br0", "type", "bridge"], check=True, text=True
)
subprocess.run(["ip", "link", "set", "br0", "up"], check=True, text=True)
subprocess.run(
["ip", "addr", "add", "192.168.1.254/24", "dev", "br0"],
check=True,
text=True,
["ip", "addr", "add", "192.168.1.254/24", "dev", "br0"], check=True, text=True
)
# Set up minimal passwd file for unprivileged operations
@@ -44,7 +44,7 @@ def init_test_environment() -> None:
passwd_content = """root:x:0:0:Root:/root:/bin/sh
nixbld:x:1000:100:Nix build user:/tmp:/bin/sh
nobody:x:65534:65534:Nobody:/:/bin/sh
""" # noqa: S105 - This is not a password, it's a Unix passwd file format for testing
"""
with NamedTemporaryFile(mode="w", delete=False, prefix="test-passwd-") as f:
f.write(passwd_content)
@@ -84,6 +84,8 @@ nogroup:x:65534:
errno = ctypes.get_errno()
raise OSError(errno, os.strerror(errno), "Failed to mount group")
_test_env_initialized = True
# Load the C library
libc = ctypes.CDLL("libc.so.6", use_errno=True)
@@ -109,7 +111,8 @@ def mount(
mountflags: int = 0,
data: str | None = None,
) -> None:
"""A Python wrapper for the mount system call.
"""
A Python wrapper for the mount system call.
:param source: The source of the file system (e.g., device name, remote filesystem).
:param target: The mount point (an existing directory).
@@ -126,11 +129,7 @@ def mount(
# Call the mount system call
result = libc.mount(
source_c,
target_c,
fstype_c,
ctypes.c_ulong(mountflags),
data_c,
source_c, target_c, fstype_c, ctypes.c_ulong(mountflags), data_c
)
if result != 0:
@@ -142,11 +141,11 @@ class Error(Exception):
pass
def prepare_machine_root(root: Path) -> None:
def prepare_machine_root(machinename: str, root: Path) -> None:
root.mkdir(parents=True, exist_ok=True)
root.joinpath("etc").mkdir(parents=True, exist_ok=True)
root.joinpath(".env").write_text(
"\n".join(f"{k}={v}" for k, v in os.environ.items()),
"\n".join(f"{k}={v}" for k, v in os.environ.items())
)
@@ -158,6 +157,7 @@ def retry(fn: Callable, timeout: int = 900) -> None:
"""Call the given function repeatedly, with 1 second intervals,
until it returns True or a timeout is reached.
"""
for _ in range(timeout):
if fn(False):
return
@@ -189,7 +189,7 @@ class Machine:
return self.get_systemd_process()
def start(self) -> None:
prepare_machine_root(self.rootdir)
prepare_machine_root(self.name, self.rootdir)
init_test_environment()
cmd = [
"systemd-nspawn",
@@ -212,12 +212,8 @@ class Machine:
self.process = subprocess.Popen(cmd, stdout=subprocess.PIPE, text=True, env=env)
def get_systemd_process(self) -> int:
if self.process is None:
msg = "Machine not started"
raise RuntimeError(msg)
if self.process.stdout is None:
msg = "Machine has no stdout"
raise RuntimeError(msg)
assert self.process is not None, "Machine not started"
assert self.process.stdout is not None, "Machine has no stdout"
for line in self.process.stdout:
print(line, end="")
@@ -234,9 +230,9 @@ class Machine:
.read_text()
.split()
)
if len(childs) != 1:
msg = f"Expected exactly one child process for systemd-nspawn, got {childs}"
raise RuntimeError(msg)
assert len(childs) == 1, (
f"Expected exactly one child process for systemd-nspawn, got {childs}"
)
try:
return int(childs[0])
except ValueError as e:
@@ -256,9 +252,7 @@ class Machine:
def tuple_from_line(line: str) -> tuple[str, str]:
match = line_pattern.match(line)
if match is None:
msg = f"Failed to parse line: {line}"
raise RuntimeError(msg)
assert match is not None
return match[1], match[2]
return dict(
@@ -268,14 +262,8 @@ class Machine:
)
def nsenter_command(self, command: str) -> list[str]:
nsenter = shutil.which("nsenter")
if not nsenter:
msg = "nsenter command not found"
raise RuntimeError(msg)
return [
nsenter,
"nsenter",
"--target",
str(self.container_pid),
"--mount",
@@ -292,11 +280,12 @@ class Machine:
def execute(
self,
command: str,
check_return: bool = True, # noqa: ARG002
check_output: bool = True, # noqa: ARG002
check_return: bool = True,
check_output: bool = True,
timeout: int | None = 900,
) -> subprocess.CompletedProcess:
"""Execute a shell command, returning a list `(status, stdout)`.
"""
Execute a shell command, returning a list `(status, stdout)`.
Commands are run with `set -euo pipefail` set:
@@ -327,22 +316,21 @@ class Machine:
`timeout` parameter, e.g., `execute(cmd, timeout=10)` or
`execute(cmd, timeout=None)`. The default is 900 seconds.
"""
# Always run command with shell opts
command = f"set -eo pipefail; source /etc/profile; set -xu; {command}"
return subprocess.run(
proc = subprocess.run(
self.nsenter_command(command),
env={},
timeout=timeout,
check=False,
stdout=subprocess.PIPE,
text=True,
)
return proc
def nested(
self,
msg: str,
attrs: dict[str, str] | None = None,
self, msg: str, attrs: dict[str, str] | None = None
) -> _GeneratorContextManager:
if attrs is None:
attrs = {}
@@ -351,7 +339,8 @@ class Machine:
return self.logger.nested(msg, my_attrs)
def systemctl(self, q: str) -> subprocess.CompletedProcess:
"""Runs `systemctl` commands with optional support for
"""
Runs `systemctl` commands with optional support for
`systemctl --user`
```py
@@ -366,7 +355,8 @@ class Machine:
return self.execute(f"systemctl {q}")
def wait_until_succeeds(self, command: str, timeout: int = 900) -> str:
"""Repeat a shell command with 1-second intervals until it succeeds.
"""
Repeat a shell command with 1-second intervals until it succeeds.
Has a default timeout of 900 seconds which can be modified, e.g.
`wait_until_succeeds(cmd, timeout=10)`. See `execute` for details on
command execution.
@@ -384,17 +374,18 @@ class Machine:
return output
def wait_for_open_port(
self,
port: int,
addr: str = "localhost",
timeout: int = 900,
self, port: int, addr: str = "localhost", timeout: int = 900
) -> None:
"""Wait for a port to be open on the given address."""
"""
Wait for a port to be open on the given address.
"""
command = f"nc -z {shlex.quote(addr)} {port}"
self.wait_until_succeeds(command, timeout=timeout)
def wait_for_file(self, filename: str, timeout: int = 30) -> None:
"""Waits until the file exists in the machine's file system."""
"""
Waits until the file exists in the machine's file system.
"""
def check_file(_last_try: bool) -> bool:
result = self.execute(f"test -e {filename}")
@@ -404,7 +395,8 @@ class Machine:
retry(check_file, timeout)
def wait_for_unit(self, unit: str, timeout: int = 900) -> None:
"""Wait for a systemd unit to get into "active" state.
"""
Wait for a systemd unit to get into "active" state.
Throws exceptions on "failed" and "inactive" states as well as after
timing out.
"""
@@ -449,7 +441,9 @@ class Machine:
return res.stdout
def shutdown(self) -> None:
"""Shut down the machine, waiting for the VM to exit."""
"""
Shut down the machine, waiting for the VM to exit.
"""
if self.process:
self.process.terminate()
self.process.wait()
@@ -563,7 +557,7 @@ class Driver:
rootdir=tempdir_path / container.name,
out_dir=self.out_dir,
logger=self.logger,
),
)
)
def start_all(self) -> None:
@@ -581,15 +575,13 @@ class Driver:
# We lauch a sleep here, so we can pgrep the process cmdline for
# the uuid
sleep = shutil.which("sleep")
if sleep is None:
msg = "sleep command not found"
raise RuntimeError(msg)
assert sleep is not None, "sleep command not found"
machine.execute(
f"systemd-run /bin/sh -c '{sleep} 999999999 && echo {nspawn_uuid}'",
)
print(
f"To attach to container {machine.name} run on the same machine that runs the test:",
f"To attach to container {machine.name} run on the same machine that runs the test:"
)
print(
" ".join(
@@ -611,8 +603,8 @@ class Driver:
"-c",
"bash",
Style.RESET_ALL,
],
),
]
)
)
def test_symbols(self) -> dict[str, Any]:
@@ -631,13 +623,13 @@ class Driver:
"additionally exposed symbols:\n "
+ ", ".join(m.name for m in self.machines)
+ ",\n "
+ ", ".join(list(general_symbols.keys())),
+ ", ".join(list(general_symbols.keys()))
)
return {**general_symbols, **machine_symbols}
def test_script(self) -> None:
"""Run the test script"""
exec(self.testscript, self.test_symbols(), None) # noqa: S102
exec(self.testscript, self.test_symbols(), None)
def run_tests(self) -> None:
"""Run the test script (for non-interactive test runs)"""

View File

@@ -25,31 +25,27 @@ class AbstractLogger(ABC):
@abstractmethod
@contextmanager
def subtest(
self,
name: str,
attributes: dict[str, str] | None = None,
self, name: str, attributes: dict[str, str] | None = None
) -> Iterator[None]:
pass
@abstractmethod
@contextmanager
def nested(
self,
message: str,
attributes: dict[str, str] | None = None,
self, message: str, attributes: dict[str, str] | None = None
) -> Iterator[None]:
pass
@abstractmethod
def info(self, *args: Any, **kwargs: Any) -> None:
def info(self, *args: Any, **kwargs: Any) -> None: # type: ignore
pass
@abstractmethod
def warning(self, *args: Any, **kwargs: Any) -> None:
def warning(self, *args: Any, **kwargs: Any) -> None: # type: ignore
pass
@abstractmethod
def error(self, *args: Any, **kwargs: Any) -> None:
def error(self, *args: Any, **kwargs: Any) -> None: # type: ignore
pass
@abstractmethod
@@ -63,8 +59,6 @@ class AbstractLogger(ABC):
class JunitXMLLogger(AbstractLogger):
class TestCaseState:
"""State tracking for individual test cases in JUnit XML reports."""
def __init__(self) -> None:
self.stdout = ""
self.stderr = ""
@@ -72,7 +66,7 @@ class JunitXMLLogger(AbstractLogger):
def __init__(self, outfile: Path) -> None:
self.tests: dict[str, JunitXMLLogger.TestCaseState] = {
"main": self.TestCaseState(),
"main": self.TestCaseState()
}
self.currentSubtest = "main"
self.outfile: Path = outfile
@@ -80,16 +74,12 @@ class JunitXMLLogger(AbstractLogger):
atexit.register(self.close)
def log(self, message: str, attributes: dict[str, str] | None = None) -> None:
del attributes # Unused but kept for API compatibility
self.tests[self.currentSubtest].stdout += message + os.linesep
@contextmanager
def subtest(
self,
name: str,
attributes: dict[str, str] | None = None,
self, name: str, attributes: dict[str, str] | None = None
) -> Iterator[None]:
del attributes # Unused but kept for API compatibility
old_test = self.currentSubtest
self.tests.setdefault(name, self.TestCaseState())
self.currentSubtest = name
@@ -100,24 +90,18 @@ class JunitXMLLogger(AbstractLogger):
@contextmanager
def nested(
self,
message: str,
attributes: dict[str, str] | None = None,
self, message: str, attributes: dict[str, str] | None = None
) -> Iterator[None]:
del attributes # Unused but kept for API compatibility
self.log(message)
yield
def info(self, *args: Any, **kwargs: Any) -> None:
del kwargs # Unused but kept for API compatibility
self.tests[self.currentSubtest].stdout += args[0] + os.linesep
def warning(self, *args: Any, **kwargs: Any) -> None:
del kwargs # Unused but kept for API compatibility
self.tests[self.currentSubtest].stdout += args[0] + os.linesep
def error(self, *args: Any, **kwargs: Any) -> None:
del kwargs # Unused but kept for API compatibility
self.tests[self.currentSubtest].stderr += args[0] + os.linesep
self.tests[self.currentSubtest].failure = True
@@ -160,9 +144,7 @@ class CompositeLogger(AbstractLogger):
@contextmanager
def subtest(
self,
name: str,
attributes: dict[str, str] | None = None,
self, name: str, attributes: dict[str, str] | None = None
) -> Iterator[None]:
with ExitStack() as stack:
for logger in self.logger_list:
@@ -171,24 +153,22 @@ class CompositeLogger(AbstractLogger):
@contextmanager
def nested(
self,
message: str,
attributes: dict[str, str] | None = None,
self, message: str, attributes: dict[str, str] | None = None
) -> Iterator[None]:
with ExitStack() as stack:
for logger in self.logger_list:
stack.enter_context(logger.nested(message, attributes))
yield
def info(self, *args: Any, **kwargs: Any) -> None: # type: ignore[no-untyped-def]
def info(self, *args: Any, **kwargs: Any) -> None: # type: ignore
for logger in self.logger_list:
logger.info(*args, **kwargs)
def warning(self, *args: Any, **kwargs: Any) -> None: # type: ignore[no-untyped-def]
def warning(self, *args: Any, **kwargs: Any) -> None: # type: ignore
for logger in self.logger_list:
logger.warning(*args, **kwargs)
def error(self, *args: Any, **kwargs: Any) -> None: # type: ignore[no-untyped-def]
def error(self, *args: Any, **kwargs: Any) -> None: # type: ignore
for logger in self.logger_list:
logger.error(*args, **kwargs)
sys.exit(1)
@@ -220,24 +200,19 @@ class TerminalLogger(AbstractLogger):
@contextmanager
def subtest(
self,
name: str,
attributes: dict[str, str] | None = None,
self, name: str, attributes: dict[str, str] | None = None
) -> Iterator[None]:
with self.nested("subtest: " + name, attributes):
yield
@contextmanager
def nested(
self,
message: str,
attributes: dict[str, str] | None = None,
self, message: str, attributes: dict[str, str] | None = None
) -> Iterator[None]:
self._eprint(
self.maybe_prefix(
Style.BRIGHT + Fore.GREEN + message + Style.RESET_ALL,
attributes,
),
Style.BRIGHT + Fore.GREEN + message + Style.RESET_ALL, attributes
)
)
tic = time.time()
@@ -245,13 +220,13 @@ class TerminalLogger(AbstractLogger):
toc = time.time()
self.log(f"(finished: {message}, in {toc - tic:.2f} seconds)")
def info(self, *args: Any, **kwargs: Any) -> None: # type: ignore[no-untyped-def]
def info(self, *args: Any, **kwargs: Any) -> None: # type: ignore
self.log(*args, **kwargs)
def warning(self, *args: Any, **kwargs: Any) -> None: # type: ignore[no-untyped-def]
def warning(self, *args: Any, **kwargs: Any) -> None: # type: ignore
self.log(*args, **kwargs)
def error(self, *args: Any, **kwargs: Any) -> None: # type: ignore[no-untyped-def]
def error(self, *args: Any, **kwargs: Any) -> None: # type: ignore
self.log(*args, **kwargs)
def print_serial_logs(self, enable: bool) -> None:
@@ -284,9 +259,7 @@ class XMLLogger(AbstractLogger):
return "".join(ch for ch in message if unicodedata.category(ch)[0] != "C")
def maybe_prefix(
self,
message: str,
attributes: dict[str, str] | None = None,
self, message: str, attributes: dict[str, str] | None = None
) -> str:
if attributes and "machine" in attributes:
return f"{attributes['machine']}: {message}"
@@ -297,13 +270,13 @@ class XMLLogger(AbstractLogger):
self.xml.characters(message)
self.xml.endElement("line")
def info(self, *args: Any, **kwargs: Any) -> None: # type: ignore[no-untyped-def]
def info(self, *args: Any, **kwargs: Any) -> None: # type: ignore
self.log(*args, **kwargs)
def warning(self, *args: Any, **kwargs: Any) -> None: # type: ignore[no-untyped-def]
def warning(self, *args: Any, **kwargs: Any) -> None: # type: ignore
self.log(*args, **kwargs)
def error(self, *args: Any, **kwargs: Any) -> None: # type: ignore[no-untyped-def]
def error(self, *args: Any, **kwargs: Any) -> None: # type: ignore
self.log(*args, **kwargs)
def log(self, message: str, attributes: dict[str, str] | None = None) -> None:
@@ -336,18 +309,14 @@ class XMLLogger(AbstractLogger):
@contextmanager
def subtest(
self,
name: str,
attributes: dict[str, str] | None = None,
self, name: str, attributes: dict[str, str] | None = None
) -> Iterator[None]:
with self.nested("subtest: " + name, attributes):
yield
@contextmanager
def nested(
self,
message: str,
attributes: dict[str, str] | None = None,
self, message: str, attributes: dict[str, str] | None = None
) -> Iterator[None]:
if attributes is None:
attributes = {}

View File

@@ -8,10 +8,6 @@
{
imports = lib.optional (_class == "nixos") (
lib.mkIf config.clan.core.enableRecommendedDefaults {
# Enable automatic state-version generation.
clan.core.settings.state-version.enable = lib.mkDefault true;
# Use systemd during boot as well except:
# - systems with raids as this currently require manual configuration: https://github.com/NixOS/nixpkgs/issues/210210
# - for containers we currently rely on the `stage-2` init script that sets up our /etc
@@ -41,7 +37,6 @@
};
config = lib.mkIf config.clan.core.enableRecommendedDefaults {
# This disables the HTML manual and `nixos-help` command but leaves
# `man configuration.nix`
documentation.doc.enable = lib.mkDefault false;

View File

@@ -1,17 +1,40 @@
{ ... }:
{
perSystem.clan.nixosTests.machine-id = {
perSystem =
{ ... }:
{
clan.nixosTests.machine-id = {
name = "service-machine-id";
name = "service-machine-id";
clan = {
directory = ./.;
machines.server = {
clan.core.settings.machine-id.enable = true;
clan = {
directory = ./.;
# Workaround until we can use nodes.server = { };
modules."@clan/importer" = ../../../../clanServices/importer;
inventory = {
machines.server = { };
instances.importer = {
module.name = "@clan/importer";
module.input = "self";
roles.default.tags.all = { };
roles.default.extraModules = [
{
# Test machine ID generation
clan.core.settings.machine-id.enable = true;
}
];
};
};
};
# TODO: Broken. Use instead of importer after fixing.
# nodes.server = { };
# This is not an actual vm test, this is a workaround to
# generate the needed vars for the eval test.
testScript = "";
};
};
# This is not an actual vm test, this is a workaround to
# generate the needed vars for the eval test.
testScript = "";
};
}

View File

@@ -10,14 +10,30 @@
clan = {
directory = ./.;
machines.machine = {
clan.core.postgresql.enable = true;
clan.core.postgresql.users.test = { };
clan.core.postgresql.databases.test.create.options.OWNER = "test";
clan.core.settings.directory = ./.;
# Workaround until we can use nodes.machine = { };
modules."@clan/importer" = ../../../../clanServices/importer;
inventory = {
machines.machine = { };
instances.importer = {
module.name = "@clan/importer";
module.input = "self";
roles.default.tags.all = { };
roles.default.extraModules = [
{
clan.core.postgresql.enable = true;
clan.core.postgresql.users.test = { };
clan.core.postgresql.databases.test.create.options.OWNER = "test";
clan.core.settings.directory = ./.;
}
];
};
};
};
# TODO: Broken. Use instead of importer after fixing.
# nodes.machine = { };
testScript =
let
runpg = "runuser -u postgres -- /run/current-system/sw/bin/psql";

View File

@@ -9,11 +9,28 @@
clan = {
directory = ./.;
machines.server = {
clan.core.settings.state-version.enable = true;
# Workaround until we can use nodes.server = { };
modules."@clan/importer" = ../../../../clanServices/importer;
inventory = {
machines.server = { };
instances.importer = {
module.name = "@clan/importer";
module.input = "self";
roles.default.tags.all = { };
roles.default.extraModules = [
{
clan.core.settings.state-version.enable = true;
}
];
};
};
};
# TODO: Broken. Use instead of importer after fixing.
# nodes.server = { };
# This is not an actual vm test, this is a workaround to
# generate the needed vars for the eval test.
testScript = "";

View File

@@ -304,15 +304,6 @@ in
description = "The unix file mode of the file. Must be a 4-digit octal number.";
default = "0400";
};
exists = mkOption {
description = ''
Returns true if the file exists, This is used to guard against reading not set value in evaluation.
This currently only works for non secret files.
'';
type = bool;
default = if file.config.secret then throw "Cannot determine existance of secret file" else false;
defaultText = "Throws error because the existance of a secret file cannot be determined";
};
value =
mkOption {
description = ''

View File

@@ -25,7 +25,7 @@ in
);
value = mkIf (file.config.secret == false) (
# dynamically adjust priority to allow overriding with mkDefault in case the file is not found
if file.config.exists then
if (pathExists file.config.flakePath) then
# if the file is found it should have normal priority
readFile file.config.flakePath
else
@@ -34,7 +34,6 @@ in
throw "Please run `clan vars generate ${config.clan.core.settings.machine.name}` as file was not found: ${file.config.path}"
)
);
exists = mkIf (file.config.secret == false) (pathExists file.config.flakePath);
};
};
}

Some files were not shown because too many files have changed in this diff Show More