Compare commits

..

1 Commits

Author SHA1 Message Date
lassulus
1b3f087079 WIP: zerotier refactor 2025-09-23 12:12:22 +02:00
291 changed files with 2315 additions and 23298 deletions

4
CONTRIBUTING.md Normal file
View File

@@ -0,0 +1,4 @@
# Contributing to Clan
<!-- Local file: docs/CONTRIBUTING.md -->
Go to the Contributing guide at https://docs.clan.lol/guides/contributing/CONTRIBUTING

View File

@@ -13,6 +13,8 @@
fileSystems."/".device = lib.mkDefault "/dev/vda";
boot.loader.grub.device = lib.mkDefault "/dev/vda";
# We need to use `mkForce` because we inherit from `test-install-machine`
# which currently hardcodes `nixpkgs.hostPlatform`
nixpkgs.hostPlatform = lib.mkForce system;
imports = [ self.nixosModules.test-flash-machine ];
@@ -26,9 +28,6 @@
{
imports = [ self.nixosModules.test-install-machine-without-system ];
# We don't want our system to define any `vars` generators as these can't
# be generated as the flake is inside `/nix/store`.
clan.core.settings.state-version.enable = false;
clan.core.vars.generators.test = lib.mkForce { };
disko.devices.disk.main.preCreateHook = lib.mkForce "";
@@ -60,11 +59,11 @@
pkgs.kbd.out
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".pkgs.perlPackages.ConfigIniFiles
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".pkgs.perlPackages.FileSlurp
pkgs.bubblewrap
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.toplevel
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.diskoScript
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.diskoScript.drvPath
(import ../installation/facter-report.nix pkgs.hostPlatform.system)
]
++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
@@ -88,7 +87,7 @@
substituters = lib.mkForce [ ];
hashed-mirrors = null;
connect-timeout = lib.mkForce 3;
flake-registry = "";
flake-registry = pkgs.writeText "flake-registry" ''{"flakes":[],"version":2}'';
experimental-features = [
"nix-command"
"flakes"

View File

@@ -0,0 +1,10 @@
system:
builtins.fetchurl {
url = "https://git.clan.lol/clan/test-fixtures/raw/commit/4a2bc56d886578124b05060d3fb7eddc38c019f8/nixos-vm-facter-json/${system}.json";
sha256 =
{
aarch64-linux = "sha256:1rlfymk03rmfkm2qgrc8l5kj5i20srx79n1y1h4nzlpwaz0j7hh2";
x86_64-linux = "sha256:16myh0ll2gdwsiwkjw5ba4dl23ppwbsanxx214863j7nvzx42pws";
}
.${system};
}

View File

@@ -1,8 +1,8 @@
{
config,
self,
lib,
privateInputs,
...
}:
{
@@ -14,37 +14,26 @@
# you can get a new one by adding
# client.fail("cat test-flake/machines/test-install-machine/facter.json >&2")
# to the installation test.
clan.machines = {
test-install-machine-without-system = {
clan.machines.test-install-machine-without-system = {
fileSystems."/".device = lib.mkDefault "/dev/vda";
boot.loader.grub.device = lib.mkDefault "/dev/vda";
imports = [
self.nixosModules.test-install-machine-without-system
];
};
clan.machines.test-install-machine-with-system =
{ pkgs, ... }:
{
# https://git.clan.lol/clan/test-fixtures
facter.reportPath = import ./facter-report.nix pkgs.hostPlatform.system;
fileSystems."/".device = lib.mkDefault "/dev/vda";
boot.loader.grub.device = lib.mkDefault "/dev/vda";
imports = [
self.nixosModules.test-install-machine-without-system
];
imports = [ self.nixosModules.test-install-machine-without-system ];
};
}
// (lib.listToAttrs (
lib.map (
system:
lib.nameValuePair "test-install-machine-${system}" {
imports = [
self.nixosModules.test-install-machine-without-system
(
if privateInputs ? test-fixtures then
{
facter.reportPath = privateInputs.test-fixtures + /nixos-vm-facter-json/${system}.json;
}
else
{ nixpkgs.hostPlatform = system; }
)
];
fileSystems."/".device = lib.mkDefault "/dev/vda";
boot.loader.grub.device = lib.mkDefault "/dev/vda";
}
) (lib.filter (lib.hasSuffix "linux") config.systems)
));
flake.nixosModules = {
test-install-machine-without-system =
@@ -160,12 +149,13 @@
closureInfo = pkgs.closureInfo {
rootPaths = [
privateInputs.clan-core-for-checks
self.nixosConfigurations."test-install-machine-${pkgs.hostPlatform.system}".config.system.build.toplevel
self.nixosConfigurations."test-install-machine-${pkgs.hostPlatform.system}".config.system.build.initialRamdisk
self.nixosConfigurations."test-install-machine-${pkgs.hostPlatform.system}".config.system.build.diskoScript
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.toplevel
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.initialRamdisk
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.diskoScript
pkgs.stdenv.drvPath
pkgs.bash.drvPath
pkgs.buildPackages.xorg.lndir
(import ./facter-report.nix pkgs.hostPlatform.system)
]
++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
};
@@ -215,7 +205,7 @@
# Prepare test flake and Nix store
flake_dir = prepare_test_flake(
temp_dir,
"${self.checks.${pkgs.hostPlatform.system}.clan-core-for-checks}",
"${self.checks.x86_64-linux.clan-core-for-checks}",
"${closureInfo}"
)
@@ -226,22 +216,6 @@
"${../assets/ssh/privkey}"
)
# Run clan install from host using port forwarding
clan_cmd = [
"${self.packages.${pkgs.system}.clan-cli-full}/bin/clan",
"machines",
"init-hardware-config",
"--debug",
"--flake", str(flake_dir),
"--yes", "test-install-machine-without-system",
"--host-key-check", "none",
"--target-host", f"nonrootuser@localhost:{ssh_conn.host_port}",
"-i", ssh_conn.ssh_key,
"--option", "store", os.environ['CLAN_TEST_STORE']
]
subprocess.run(clan_cmd, check=True)
# Run clan install from host using port forwarding
clan_cmd = [
"${self.packages.${pkgs.system}.clan-cli-full}/bin/clan",
@@ -296,7 +270,7 @@
# Prepare test flake and Nix store
flake_dir = prepare_test_flake(
temp_dir,
"${self.checks.${pkgs.hostPlatform.system}.clan-core-for-checks}",
"${self.checks.x86_64-linux.clan-core-for-checks}",
"${closureInfo}"
)

View File

@@ -147,11 +147,28 @@ let
];
doCheck = false;
};
# Common closure info
closureInfo = pkgs.closureInfo {
rootPaths = [
self.checks.x86_64-linux.clan-core-for-checks
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.toplevel
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.initialRamdisk
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.diskoScript
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.clan.deployment.file
pkgs.stdenv.drvPath
pkgs.bash.drvPath
pkgs.buildPackages.xorg.lndir
]
++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
};
in
{
inherit
target
baseTestMachine
nixosTestLib
closureInfo
;
}

View File

@@ -35,6 +35,7 @@
pkgs.stdenv.drvPath
pkgs.stdenvNoCC
self.nixosConfigurations.test-morph-machine.config.system.build.toplevel
(import ../installation/facter-report.nix pkgs.hostPlatform.system)
]
++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };

View File

@@ -29,34 +29,32 @@ nixosLib.runTest (
{ nodes, ... }:
''
import subprocess
import tempfile
from nixos_test_lib.nix_setup import setup_nix_in_nix
from nixos_test_lib.nix_setup import setup_nix_in_nix # type: ignore[import-untyped]
with tempfile.TemporaryDirectory() as temp_dir:
setup_nix_in_nix(temp_dir, None) # No closure info for this test
setup_nix_in_nix(None) # No closure info for this test
start_all()
admin1.wait_for_unit("multi-user.target")
peer1.wait_for_unit("multi-user.target")
start_all()
admin1.wait_for_unit("multi-user.target")
peer1.wait_for_unit("multi-user.target")
# peer1 should have the 'hello' file
peer1.succeed("cat ${nodes.peer1.clan.core.vars.generators.new-service.files.not-a-secret.path}")
# peer1 should have the 'hello' file
peer1.succeed("cat ${nodes.peer1.clan.core.vars.generators.new-service.files.not-a-secret.path}")
ls_out = peer1.succeed("ls -la ${nodes.peer1.clan.core.vars.generators.new-service.files.a-secret.path}")
# Check that the file is owned by 'nobody'
assert "nobody" in ls_out, f"File is not owned by 'nobody': {ls_out}"
# Check that the file is in the 'users' group
assert "users" in ls_out, f"File is not in the 'users' group: {ls_out}"
# Check that the file is in the '0644' mode
assert "-rw-r--r--" in ls_out, f"File is not in the '0644' mode: {ls_out}"
ls_out = peer1.succeed("ls -la ${nodes.peer1.clan.core.vars.generators.new-service.files.a-secret.path}")
# Check that the file is owned by 'nobody'
assert "nobody" in ls_out, f"File is not owned by 'nobody': {ls_out}"
# Check that the file is in the 'users' group
assert "users" in ls_out, f"File is not in the 'users' group: {ls_out}"
# Check that the file is in the '0644' mode
assert "-rw-r--r--" in ls_out, f"File is not in the '0644' mode: {ls_out}"
# Run clan command
result = subprocess.run(
["${
clan-core.packages.${hostPkgs.system}.clan-cli
}/bin/clan", "machines", "list", "--flake", "${config.clan.test.flakeForSandbox}"],
check=True
)
# Run clan command
result = subprocess.run(
["${
clan-core.packages.${hostPkgs.system}.clan-cli
}/bin/clan", "machines", "list", "--flake", "${config.clan.test.flakeForSandbox}"],
check=True
)
'';
}
)

View File

@@ -27,9 +27,7 @@
modules.new-service = {
_class = "clan.service";
manifest.name = "new-service";
roles.peer = {
description = "A peer that uses the new-service to generate some files.";
};
roles.peer = { };
perMachine = {
nixosModule = {
# This should be generated by:

View File

@@ -34,9 +34,7 @@ nixosLib.runTest (
modules.new-service = {
_class = "clan.service";
manifest.name = "new-service";
roles.peer = {
description = "A peer that uses the new-service to generate some files.";
};
roles.peer = { };
perMachine = {
nixosModule = {
# This should be generated by:

View File

@@ -67,15 +67,6 @@
];
};
nix.settings = {
flake-registry = "";
# required for setting the `flake-registry`
experimental-features = [
"nix-command"
"flakes"
];
};
# Define the mounts that exist in the container to prevent them from being stopped
fileSystems = {
"/" = {
@@ -115,13 +106,13 @@
let
closureInfo = pkgs.closureInfo {
rootPaths = [
self.packages.${pkgs.hostPlatform.system}.clan-cli
self.checks.${pkgs.hostPlatform.system}.clan-core-for-checks
self.packages.${pkgs.system}.clan-cli
self.checks.${pkgs.system}.clan-core-for-checks
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-update-machine.config.system.build.toplevel
pkgs.stdenv.drvPath
pkgs.bash.drvPath
pkgs.buildPackages.xorg.lndir
pkgs.bubblewrap
(import ../installation/facter-report.nix pkgs.hostPlatform.system)
]
++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
};
@@ -132,7 +123,7 @@
imports = [ self.nixosModules.test-update-machine ];
};
extraPythonPackages = _p: [
self.legacyPackages.${pkgs.hostPlatform.system}.nixosTestLib
self.legacyPackages.${pkgs.system}.nixosTestLib
];
testScript = ''
@@ -154,7 +145,7 @@
# Prepare test flake and Nix store
flake_dir = prepare_test_flake(
temp_dir,
"${self.checks.${pkgs.hostPlatform.system}.clan-core-for-checks}",
"${self.checks.x86_64-linux.clan-core-for-checks}",
"${closureInfo}"
)
(flake_dir / ".clan-flake").write_text("") # Ensure .clan-flake exists
@@ -221,13 +212,12 @@
[
"${pkgs.nix}/bin/nix",
"copy",
"--from",
f"{temp_dir}/store",
"--to",
"ssh://root@192.168.1.1",
"--no-check-sigs",
f"${self.packages.${pkgs.hostPlatform.system}.clan-cli}",
f"${self.packages.${pkgs.system}.clan-cli}",
"--extra-experimental-features", "nix-command flakes",
"--from", f"{os.environ["TMPDIR"]}/store"
],
check=True,
env={
@@ -242,7 +232,7 @@
"-o", "UserKnownHostsFile=/dev/null",
"-o", "StrictHostKeyChecking=no",
f"root@192.168.1.1",
"${self.packages.${pkgs.hostPlatform.system}.clan-cli}/bin/clan",
"${self.packages.${pkgs.system}.clan-cli}/bin/clan",
"machines",
"update",
"--debug",
@@ -270,7 +260,7 @@
# Run clan update command
subprocess.run([
"${self.packages.${pkgs.hostPlatform.system}.clan-cli-full}/bin/clan",
"${self.packages.${pkgs.system}.clan-cli-full}/bin/clan",
"machines",
"update",
"--debug",
@@ -297,7 +287,7 @@
# Run clan update command with --build-host
subprocess.run([
"${self.packages.${pkgs.hostPlatform.system}.clan-cli-full}/bin/clan",
"${self.packages.${pkgs.system}.clan-cli-full}/bin/clan",
"machines",
"update",
"--debug",

View File

@@ -1,14 +1,14 @@
{
_class = "clan.service";
manifest.name = "clan-core/admin";
manifest.description = "Adds a root user with ssh access";
manifest.description = "Convenient Administration for the Clan App";
manifest.categories = [ "Utility" ];
roles.default = {
description = "Placeholder role to apply the admin service";
interface =
{ lib, ... }:
{
options = {
allowedKeys = lib.mkOption {
default = { };

View File

@@ -9,7 +9,7 @@ inventory.instances = {
};
roles.client.machines."jon".settings = {
destinations."storagebox" = {
repo = "username@hostname:/./borgbackup";
repo = "username@$hostname:/./borgbackup";
rsh = ''ssh -oPort=23 -i /run/secrets/vars/borgbackup/borgbackup.ssh'';
};
};

View File

@@ -9,7 +9,7 @@
# TODO: a client can only be in one instance, add constraint
roles.server = {
description = "A borgbackup server that stores the backups of clients.";
interface =
{ lib, ... }:
{
@@ -54,7 +54,7 @@
authorizedKeys = [ (builtins.readFile (borgbackupIpMachinePath machineName)) ];
# };
# }) machinesWithKey;
}) (roles.client.machines or { });
}) roles.client.machines;
in
hosts;
};
@@ -62,7 +62,6 @@
};
roles.client = {
description = "A borgbackup client that backs up to all borgbackup server roles.";
interface =
{
lib,
@@ -188,7 +187,7 @@
config.clan.core.vars.generators.borgbackup.files."borgbackup.ssh".path
} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=Yes";
};
}) (builtins.attrNames (roles.server.machines or { }));
}) (builtins.attrNames roles.server.machines);
in
(builtins.listToAttrs destinations);

View File

@@ -2,12 +2,12 @@
{
_class = "clan.service";
manifest.name = "certificates";
manifest.description = "Sets up a PKI certificate chain using step-ca";
manifest.description = "Sets up a certificates internal to your Clan";
manifest.categories = [ "Network" ];
manifest.readme = builtins.readFile ./README.md;
roles.ca = {
description = "A certificate authority that issues and signs certificates for other machines.";
interface =
{ lib, ... }:
{
@@ -184,7 +184,6 @@
# Empty role, so we can add non-ca machins to the instance to trust the CA
roles.default = {
description = "A machine that trusts the CA and can get certificates issued by it.";
interface =
{ lib, ... }:
{

View File

@@ -45,15 +45,13 @@ inventory = {
# Add the default role to all machines, including `client`
roles.default.tags.all = { };
# DNS server queries to http://<name>.foo are resolved here
# DNS server
roles.server.machines."dnsserver".settings = {
ip = "192.168.1.2";
tld = "foo";
};
# First service
# Registers http://one.foo will resolve to 192.168.1.3
# underlying service runs on server01
roles.default.machines."server01".settings = {
ip = "192.168.1.3";
services = [ "one" ];

View File

@@ -8,7 +8,7 @@
manifest.readme = builtins.readFile ./README.md;
roles.server = {
description = "A DNS server that resolves services in the clan network.";
interface =
{ lib, ... }:
{
@@ -103,7 +103,6 @@
};
roles.default = {
description = "A machine that registers the 'server' role as resolver and registers services under the configured TLD in the resolver.";
interface =
{ lib, ... }:
{

View File

@@ -101,7 +101,6 @@ in
manifest.readme = builtins.readFile ./README.md;
roles.admin = {
description = "A data-mesher admin node that bootstraps the network and can sign new nodes into the network.";
interface =
{ lib, ... }:
{
@@ -178,7 +177,6 @@ in
};
roles.signer = {
description = "A data-mesher signer node that can sign new nodes into the network.";
interface = sharedInterface;
perInstance =
{
@@ -210,7 +208,6 @@ in
};
roles.peer = {
description = "A data-mesher peer node that connects to the network.";
interface = sharedInterface;
perInstance =
{

View File

@@ -2,12 +2,11 @@
{
_class = "clan.service";
manifest.name = "clan-core/dyndns";
manifest.description = "A dynamic DNS service to auto update domain IPs";
manifest.description = "A dynamic DNS service to update domain IPs";
manifest.categories = [ "Network" ];
manifest.readme = builtins.readFile ./README.md;
roles.default = {
description = "Placeholder role to apply the dyndns service";
interface =
{ lib, ... }:
{

View File

@@ -2,34 +2,31 @@
{
_class = "clan.service";
manifest.name = "clan-core/emergency-access";
manifest.description = "Set recovery password for emergency access to machine to debug boot issues";
manifest.description = "Set recovery password for emergency access to machine";
manifest.categories = [ "System" ];
manifest.readme = builtins.readFile ./README.md;
roles.default = {
description = "Placeholder role to apply the emergency-access service";
perInstance = {
nixosModule =
{ config, pkgs, ... }:
{
boot.initrd.systemd.emergencyAccess =
config.clan.core.vars.generators.emergency-access.files.password-hash.value;
roles.default.perInstance = {
nixosModule =
{ config, pkgs, ... }:
{
boot.initrd.systemd.emergencyAccess =
config.clan.core.vars.generators.emergency-access.files.password-hash.value;
clan.core.vars.generators.emergency-access = {
runtimeInputs = [
pkgs.coreutils
pkgs.mkpasswd
pkgs.xkcdpass
];
files.password.deploy = false;
files.password-hash.secret = false;
clan.core.vars.generators.emergency-access = {
runtimeInputs = [
pkgs.coreutils
pkgs.mkpasswd
pkgs.xkcdpass
];
files.password.deploy = false;
files.password-hash.secret = false;
script = ''
xkcdpass --numwords 4 --delimiter - --count 1 | tr -d "\n" > $out/password
mkpasswd -s -m sha-512 < $out/password | tr -d "\n" > $out/password-hash
'';
};
script = ''
xkcdpass --numwords 4 --delimiter - --count 1 | tr -d "\n" > $out/password
mkpasswd -s -m sha-512 < $out/password | tr -d "\n" > $out/password-hash
'';
};
};
};
};
}

View File

@@ -6,7 +6,7 @@
manifest.categories = [ "System" ];
roles.default = {
description = "Placeholder role to apply the garage service";
perInstance.nixosModule =
{
config,

View File

@@ -14,7 +14,6 @@
# defined in this file directly (e.g. the "morning" role) or split up into a
# separate file (e.g. the "evening" role)
roles.morning = {
description = "A morning greeting machine";
interface =
{ lib, ... }:
{
@@ -68,7 +67,6 @@
# the interface here, so we can see all settings of the service in one place,
# but you can also move it to the respective file
roles.evening = {
description = "An evening greeting machine";
interface =
{ lib, ... }:
{

View File

@@ -6,7 +6,5 @@
manifest.categories = [ "Utility" ];
manifest.readme = builtins.readFile ./README.md;
roles.default = {
description = "Placeholder role to apply the importer service";
};
roles.default = { };
}

View File

@@ -2,13 +2,12 @@
{
_class = "clan.service";
manifest.name = "clan-core/internet";
manifest.description = "Part of the clan networking abstraction to define how to reach machines from outside the clan network over the internet, if defined has the highest priority";
manifest.description = "direct access (or via ssh jumphost) to machines";
manifest.categories = [
"System"
"Network"
];
roles.default = {
description = "Placeholder role to apply the internet service";
interface =
{ lib, ... }:
{

View File

@@ -2,12 +2,11 @@
{
_class = "clan.service";
manifest.name = "localbackup";
manifest.description = "Automatically backups current machine to local directory or a mounted drive.";
manifest.description = "Automatically backups current machine to local directory.";
manifest.categories = [ "System" ];
manifest.readme = builtins.readFile ./README.md;
roles.default = {
description = "Placeholder role to apply the localbackup service";
interface =
{ lib, ... }:
{

View File

@@ -6,7 +6,6 @@
manifest.categories = [ "Social" ];
roles.default = {
description = "Placeholder role to apply the matrix-synapse service";
interface =
{ lib, ... }:
{

View File

@@ -6,7 +6,6 @@
manifest.readme = builtins.readFile ./README.md;
roles.telegraf = {
description = "Placeholder role to apply the telegraf monitoring agent";
interface =
{ lib, ... }:
{

View File

@@ -2,14 +2,13 @@
{
_class = "clan.service";
manifest.name = "clan-core/mycelium";
manifest.description = "End-2-end encrypted P2P IPv6 overlay network";
manifest.description = "End-2-end encrypted IPv6 overlay network";
manifest.categories = [
"System"
"Network"
];
roles.peer = {
description = "A peer in the mycelium network";
interface =
{ lib, ... }:
{

View File

@@ -8,7 +8,6 @@
];
roles.default = {
description = "Placeholder role to apply the packages service";
interface =
{ lib, ... }:
{

View File

@@ -1,91 +1,36 @@
# Clan service: sshd
What it does
- Generates and persists SSH host keys via `vars`.
- Optionally issues CAsigned host certificates for servers.
- Installs the `server` CA public key into `clients` `known_hosts` for TOFUless verification.
The `sshd` Clan service manages SSH to make it easy to securely access your machines over the internet. The service uses `vars` to store the SSH host keys for each machine to ensure they remain stable across deployments.
`sshd` also generates SSH certificates for both servers and clients allowing for certificate-based authentication for SSH.
When to use it
- ZeroTOFU SSH for dynamic fleets: admins/CI can connect to frequently rebuilt hosts (e.g., server-1.example.com) without prompts or perhost `known_hosts` churn.
The service also disables password-based authentication over SSH, to access your machines you'll need to use public key authentication or certificate-based authentication.
Roles
- Server: runs sshd, presents a CAsigned host certificate for `<machine>.<domain>`.
- Client: trusts the CA for the given domains to verify servers certificates.
Tip: assign both roles to a machine if it should both present a cert and verify others.
## Usage
Quick start (with host certificates)
Useful if you never want to get a prompt about trusting the ssh fingerprint.
```nix
{
inventory.instances = {
sshd-with-certs = {
module = { name = "sshd"; input = "clan-core"; };
# Servers present certificates for <machine>.example.com
roles.server.tags.all = { };
roles.server.settings = {
certificate.searchDomains = [ "example.com" ];
# Optional: also add RSA host keys
# hostKeys.rsa.enable = true;
};
# Clients trust the CA for *.example.com
roles.client.tags.all = { };
roles.client.settings = {
certificate.searchDomains = [ "example.com" ];
};
};
};
}
```
Basic: only add persistent host keys (ed25519), no certificates
Useful if you want to get an ssh "trust this server" prompt once and then never again.
```nix
{
inventory.instances = {
# By default this service only generates ed25519 host keys
sshd-basic = {
module = {
name = "sshd";
input = "clan-core";
};
roles.server.tags.all = { };
roles.client.tags.all = { };
};
};
}
```
Example: selective trust per environment
Admins should trust only production; CI should trust prod and staging. Servers are reachable under both domains.
```nix
{
inventory.instances = {
sshd-env-scoped = {
module = { name = "sshd"; input = "clan-core"; };
# Servers present certs for both prod and staging FQDNs
# Also generate RSA host keys for all servers
sshd-with-rsa = {
module = {
name = "sshd";
input = "clan-core";
};
roles.server.tags.all = { };
roles.server.settings = {
certificate.searchDomains = [ "prod.example.com" "staging.example.com" ];
};
# Admin laptop: trust prod only
roles.client.machines."admin-laptop".settings = {
certificate.searchDomains = [ "prod.example.com" ];
};
# CI runner: trust prod and staging
roles.client.machines."ci-runner-1".settings = {
certificate.searchDomains = [ "prod.example.com" "staging.example.com" ];
hostKeys.rsa.enable = true;
};
roles.client.tags.all = { };
};
};
}
```
- Admin -> server1.prod.example.com: zeroTOFU (verified via cert).
- Admin -> server1.staging.example.com: falls back to TOFU (or is blocked by policy).
- CI -> either prod or staging: zeroTOFU for both.
Note: server and client searchDomains dont have to be identical; they only need to overlap for the hostnames you actually use.
Notes
- Connect using a name that matches a cert principal (e.g., `server1.example.com`); wildcards are not allowed inside the certificate.
- CA private key stays in `vars` (not deployed); only the CA public key is distributed.
- Logins still require your user SSH keys on the server (passwords are disabled).

View File

@@ -10,7 +10,6 @@
manifest.readme = builtins.readFile ./README.md;
roles.client = {
description = "Installs the SSH CA public key into known_hosts for the configured domains, so this machine can verify servers host certificates without TOFU prompts.";
interface =
{ lib, ... }:
{
@@ -39,6 +38,7 @@
...
}:
{
clan.core.vars.generators.openssh-ca = lib.mkIf (settings.certificate.searchDomains != [ ]) {
share = true;
files.id_ed25519.deploy = false;
@@ -64,12 +64,11 @@
};
roles.server = {
description = "Runs sshd with persistent host keys and (if certificate.searchDomains is set) a CAsigned host certificate for <machine>.<domain>, enabling TOFUless verification by clients that trust the CA.";
interface =
{ lib, ... }:
{
options = {
hostKeys.rsa.enable = lib.mkEnableOption "generating a RSA host key";
hostKeys.rsa.enable = lib.mkEnableOption "Generate RSA host key";
certificate = {
searchDomains = lib.mkOption {
@@ -97,7 +96,9 @@
...
}:
{
clan.core.vars.generators = {
openssh-ca = lib.mkIf (settings.certificate.searchDomains != [ ]) {
share = true;
files.id_ed25519.deploy = false;

View File

@@ -13,7 +13,7 @@
}
```
Now the folder `~/syncthing/documents` will be shared and kept in sync with all your machines.
Now the folder `~/syncthing/documents` will be shared with all your machines.
## Documentation

View File

@@ -11,7 +11,6 @@
manifest.readme = builtins.readFile ./README.md;
roles.peer = {
description = "A peer in the syncthing cluster that syncs files with other peers.";
interface =
{ lib, ... }:
{

View File

@@ -2,17 +2,13 @@
{
_class = "clan.service";
manifest.name = "clan-core/tor";
manifest.description = "Part of the clan networking abstraction to define how to reach machines through the Tor network, if used has the lowest priority";
manifest.description = "Onion routing, use Hidden services to connect your machines";
manifest.categories = [
"System"
"Network"
];
roles.client = {
description = ''
Enables a continuosly running Tor proxy on the machine, allowing access to other machines via the Tor network.
If not enabled, a Tor proxy will be started automatically when required.
'';
perInstance =
{
...
@@ -35,7 +31,6 @@
};
roles.server = {
description = "Sets up a Tor onion service for the machine, thus making it reachable over Tor.";
# interface =
# { lib, ... }:
# {

View File

@@ -7,7 +7,7 @@
manifest.readme = builtins.readFile ./README.md;
roles.default = {
description = "Placeholder role to apply the trusted-nix-caches service";
perInstance =
{ ... }:
{

View File

@@ -10,7 +10,6 @@
manifest.readme = builtins.readFile ./README.md;
roles.default = {
description = "Placeholder role to apply the user service";
interface =
{ lib, ... }:
{

View File

@@ -1,21 +0,0 @@
This module allows you to pre-configure WiFi networks for automatic connection.
Each attribute in `settings.network` serves as an internal identifier, not the actual SSID.
After defining your networks, you will be prompted for the SSID and password for each one.
This module leverages NetworkManager for managing connections.
```nix
instances = {
wifi = {
module.name = "wifi";
module.input = "clan-core";
roles.default = {
machines."jon" = {
settings.networks.home = { };
settings.networks.work = { keyMgmt = "wpa-eap"; };
};
};
};
};
```

View File

@@ -9,11 +9,8 @@ in
{
_class = "clan.service";
manifest.name = "wifi";
manifest.description = "Pre configure wifi networks to connect to";
manifest.readme = builtins.readFile ./README.md;
roles.default = {
description = "Placeholder role to apply the wifi service";
interface = {
options.networks = lib.mkOption {
type = lib.types.attrsOf (
@@ -45,18 +42,7 @@ in
)
);
default = { };
example = {
home = { };
guest = {
autoConnect = false;
keyMgmt = "wpa-eap";
};
};
description = ''
List of wifi networks to configure for connection.
Each attribute name is an internal identifier (not the SSID).
For each network, you will be prompted to enter the SSID and password as secrets.
'';
description = "Wifi networks to predefine";
};
};

View File

@@ -146,7 +146,6 @@ in
# Peer options and configuration
roles.peer = {
description = "A peer that connects to one or more controllers.";
interface =
{ lib, ... }:
{
@@ -262,7 +261,6 @@ in
# Controller options and configuration
roles.controller = {
description = "A controller that routes peer traffic. Must be publicly reachable.";
interface =
{ lib, ... }:
{

View File

@@ -5,7 +5,6 @@
manifest.description = "Yggdrasil encrypted IPv6 routing overlay network";
roles.default = {
description = "Placeholder role to apply the yggdrasil service";
interface =
{ lib, ... }:
{
@@ -89,7 +88,7 @@
enable = true;
openMulticastPort = true;
# We don't need this option, because we persist our keys with
# vars by ourselves. This option creates an unnecesary additional
# vars by ourselfs. This option creates an unnessesary additional
# systemd service to save/load the keys and should be removed
# from the NixOS module entirely, as it can be replaced by the
# (at the time of writing undocumented) PrivateKeyPath= setting.

View File

@@ -2,100 +2,78 @@
{
_class = "clan.service";
manifest.name = "clan-core/zerotier";
manifest.description = "Zerotier Mesh VPN Service for secure P2P networking between machines";
manifest.description = "Configuration of the secure and efficient Zerotier VPN";
manifest.categories = [ "Utility" ];
manifest.readme = builtins.readFile ./README.md;
roles.peer = {
description = "A peer that connects to your private Zerotier network.";
perInstance =
{
instanceName,
roles,
lib,
...
}:
{
exports.networking = {
priority = lib.mkDefault 900;
# TODO add user space network support to clan-cli
module = "clan_lib.network.zerotier";
peers = lib.mapAttrs (name: _machine: {
host.var = {
machine = name;
generator = "zerotier";
file = "zerotier-ip";
};
}) roles.peer.machines;
};
nixosModule =
{
config,
lib,
pkgs,
...
}:
{
imports = [
(import ./shared.nix {
inherit
instanceName
roles
config
lib
pkgs
;
})
];
};
};
};
roles.moon = {
description = "A moon acts as a relay node to connect other nodes in the zerotier network that are not publicly reachable. Each moon must be publicly reachable.";
roles.default = {
interface =
{ lib, ... }:
{
options.stableEndpoints = lib.mkOption {
type = lib.types.listOf lib.types.str;
options.networkId = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = ''
Make this machine a moon.
Other machines can join this moon by adding this moon in their config.
It will be reachable under the given stable endpoints.
'';
example = ''
[ "1.2.3.4" "10.0.0.3/9993" "2001:abcd:abcd::3/9993" ]
The zerotier network id to use. If not set, a network will be generated for you.
If you administrate your zerotier network via my.zerotier.com, you should set this.
'';
example = "8056c2e21c000001";
};
};
perInstance =
{
instanceName,
settings,
roles,
...
}:
{ instanceName, settings, ... }:
{
nixosModule =
{
config,
lib,
pkgs,
lib,
...
}:
{
config.clan.core.networking.zerotier.moon.stableEndpoints = settings.stableEndpoints;
config = lib.mkMerge [
# code to start/configure zerotier
({ config, ... }: {
services.zerotierone = {
enable = true;
joinNetworks = [ config.clan."zerotier_${instanceName}".networkId ];
};
systemd.network.networks."09-zerotier" = {
matchConfig.Name = "zt*";
networkConfig = {
LLDP = true;
MulticastDNS = true;
KeepConfiguration = "static";
};
};
imports = [
(import ./shared.nix {
inherit
instanceName
roles
config
lib
pkgs
;
systemd.services.zerotierone.serviceConfig.ExecStartPre = [
"+${pkgs.writeShellScript "init-zerotier" ''
# compare hashes of the current identity secret and the one in the config
hash1=$(sha256sum /var/lib/zerotier-one/identity.secret | cut -d ' ' -f 1)
hash2=$(sha256sum ${config.clan.core.vars.generators.zerotier.files.zerotier-identity-secret.path} | cut -d ' ' -f 1)
if [[ "$hash1" != "$hash2" ]]; then
echo "Identity secret has changed, backing up old identity to /var/lib/zerotier-one/identity.secret.bac"
cp /var/lib/zerotier-one/identity.secret /var/lib/zerotier-one/identity.secret.bac
cp /var/lib/zerotier-one/identity.public /var/lib/zerotier-one/identity.public.bac
cp ${config.clan.core.vars.generators.zerotier.files.zerotier-identity-secret.path} /var/lib/zerotier-one/identity.secret
zerotier-idtool getpublic /var/lib/zerotier-one/identity.secret > /var/lib/zerotier-one/identity.public
fi
# cleanup old networks
if [[ -d /var/lib/zerotier-one/networks.d ]]; then
find /var/lib/zerotier-one/networks.d \
-type f \
-name "*.conf" \
-not \( ${
lib.concatMapStringsSep " -o " (
netId: ''-name "${netId}.conf"''
) config.services.zerotierone.joinNetworks
} \) \
-delete
fi
''}"
];
})
];
};
@@ -103,20 +81,25 @@
};
roles.controller = {
description = "Manages network membership and is responsible for admitting new peers to your Zerotier network.";
interface =
{ lib, ... }:
{
options.allowedIps = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ ];
description = ''
Extra machines by their zerotier ip that the zerotier controller
should accept. These could be external machines.
'';
example = ''
[ "fd5d:bbe3:cbc5:fe6b:f699:935d:bbe3:cbc5" ]
'';
options = {
allowedIps = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ ];
description = ''
Extra machines by their zerotier ip that the zerotier controller
should accept. These could be external machines.
'';
example = ''
[ "fd5d:bbe3:cbc5:fe6b:f699:935d:bbe3:cbc5" ]
'';
};
settings = lib.mkOption {
description = "override the network config in /var/lib/zerotier/bla/$network.json";
type = lib.types.json;
};
};
};
@@ -139,54 +122,135 @@
uniqueStrings = list: builtins.attrNames (builtins.groupBy lib.id list);
in
{
imports = [
(import ./shared.nix {
inherit
instanceName
roles
config
lib
pkgs
;
})
];
config = {
systemd.services.zerotier-inventory-autoaccept =
let
machines = uniqueStrings (
(lib.optionals (roles ? moon) (lib.attrNames roles.moon.machines))
++ (lib.optionals (roles ? controller) (lib.attrNames roles.controller.machines))
++ (lib.optionals (roles ? peer) (lib.attrNames roles.peer.machines))
);
networkIps = builtins.foldl' (
ips: name:
if
builtins.pathExists "${config.clan.core.settings.directory}/vars/per-machine/${name}/zerotier/zerotier-ip/value"
then
ips
++ [
(builtins.readFile "${config.clan.core.settings.directory}/vars/per-machine/${name}/zerotier/zerotier-ip/value")
]
else
ips
) [ ] machines;
allHostIPs = settings.allowedIps ++ networkIps;
in
{
wantedBy = [ "multi-user.target" ];
after = [ "zerotierone.service" ];
path = [ config.clan.core.clanPkgs.zerotierone ];
serviceConfig.ExecStart = pkgs.writeShellScript "zerotier-inventory-autoaccept" ''
${lib.concatMapStringsSep "\n" (host: ''
${config.clan.core.clanPkgs.zerotier-members}/bin/zerotier-members allow --member-ip ${host}
'') allHostIPs}
'';
};
systemd.services.zerotierone.serviceConfig.ExecStartPre = [
"+${pkgs.writeShellScript "init-zerotier-${instanceName}" ''
mkdir -p /var/lib/zerotier-one/controller.d/${instanceName}
ln -sfT ${pkgs.writeText "net.json" (builtins.toJSON settings.settings)} /var/lib/zerotier-one/controller.d/network/${config.clan."zerotier_${instanceName}".networkId}.json
clan.core.networking.zerotier.controller.enable = lib.mkDefault true;
};
''}"
];
systemd.services.zerotierone.serviceConfig.ExecStartPost = [
"+${pkgs.writeShellScript "whitelist-controller" ''
${config.clan.core.clanPkgs.zerotier-members}/bin/zerotier-members allow ${
builtins.substring 0 10 config.clan."zerotier_${instanceName}".networkId
}
''}"
];
systemd.services.zerotier-inventory-autoaccept =
let
machines = uniqueStrings (
(lib.optionals (roles ? moon) (lib.attrNames roles.moon.machines))
++ (lib.optionals (roles ? controller) (lib.attrNames roles.controller.machines))
++ (lib.optionals (roles ? peer) (lib.attrNames roles.peer.machines))
);
networkIps = builtins.foldl' (
ips: name:
if
builtins.pathExists "${config.clan.core.settings.directory}/vars/per-machine/${name}/zerotier/zerotier-ip/value"
then
ips
++ [
(builtins.readFile "${config.clan.core.settings.directory}/vars/per-machine/${name}/zerotier/zerotier-ip/value")
]
else
ips
) [ ] machines;
allHostIPs = settings.allowedIps ++ networkIps;
in
{
wantedBy = [ "multi-user.target" ];
after = [ "zerotierone.service" ];
path = [ config.clan.core.clanPkgs.zerotierone ];
serviceConfig.ExecStart = pkgs.writeShellScript "zerotier-inventory-autoaccept" ''
${lib.concatMapStringsSep "\n" (host: ''
${config.clan.core.clanPkgs.zerotier-members}/bin/zerotier-members allow --member-ip ${host}
'') allHostIPs}
'';
};
};
};
};
# there a bunch of different scenarios that can happen which we need to take care of
## controller is not a peer (the controller doesn't have an ipv4 and is not part of the network)
## controller has multiple networks (currently the code only creates a single network per controller, so we need to figure out the API call to get another network created)
## controller has multiple networks but is peer in only some
## I guess we need to make an attrset of controllers to networks and then create the networks with the controller key
# every controller key will be shared, so all network specific ids can be shared
perMachine = { instances, machine, lib, ... }: let
# an attrset of { controller1 = { instance1 = {}; instance2 = {}; }; controller2 = { instance3 = {}; }; }
controllerNetworks = lib.foldlAttrs (acc: instanceName: instance: lib.recursiveUpdate acc { ${lib.head (lib.attrNames instance.roles.controller.machines)} = { ${instanceName} = {}; }; }) {} instances;
getInstanceController = instance: lib.head (lib.attrNames instance.roles.controller.machines);
in {
nixosModule = { pkgs, config, ... }: {
config = lib.mkMerge [
{ # every controller gets a shared network key, from which we can derive multiple network ids
# we have this var shared, so we can create it when evaluating any machine
clan.core.vars.generators = lib.mapAttrs' (controllerName: _: lib.nameValuePair "zerotier_controller_${controllerName}" {
shared = true;
files.zerotier-identity-secret.deploy = false;
runtimeInputs = [
config.services.zerotierone.package
pkgs.python3
];
script = ''
python3 ${./generate.py} --mode network \
--ip "$out/zerotier-ip" \
--identity-secret "$out/zerotier-identity-secret"
'';
}) controllerNetworks;
}
{ # every instance in a controller gets a network id, which is derived from the controller's shared network key
clan.core.vars.generators = lib.mapAttrs' (instanceName: instance: lib.nameValuePair "zerotier_network_${instanceName}" {
shared = true;
files.zerotier-network-id.secret = false;
dependencies = [
config.clan.core.vars.generators."zerotier_controller_${getInstanceController instance}"
];
runtimeInputs = [
config.services.zerotierone.package
pkgs.python3
];
script = ''
python3 ${./generate.py} --mode network-id \
--network-id "$out/zerotier-network-id"
# TODO we need to pass in the controller key
'';
}) instances;
}
{ # define nixos options, which we need to propagate certain information to other roles (like controller)
options.clan = lib.mapAttrs' (instanceName: _: lib.nameValuePair "zerotier_${instanceName}" {
networkId = lib.mkOption {
type = lib.types.str;
description = "The zerotier network id assigned to this machine";
default = if instances.${instanceName}.settings.networkId != null then instances.${instanceName}.settings.networkId else "generated-per-machine";
};
});
}
# if we have set null as networkId, we assume that we have a controller and generate a shared key for it
# (lib.mkIf (settings.networkId == null) {
# clan.core.vars.generators."zerotier_${instanceName}" = {
# files.zerotier-network-id.secret = false;
# files.zerotier-identity-secret.deploy = false;
# shared = true;
# runtimeInputs = [
# config.services.zerotierone.package
# pkgs.python3
# ];
# script = ''
# source ${(pkgs.callPackage ../../../pkgs/minifakeroot { })}/share/minifakeroot/rc
# python3 ${./generate.py} --mode network \
# --ip "$out/zerotier-ip" \
# --identity-secret "$out/zerotier-identity-secret" \
# --network-id "$out/zerotier-network-id"
# '';
# };
# clan."zerotier_${instanceName}".networkId =
# config.clan.core.vars.generators."zerotier_${instanceName}".files.zerotier-network-id.value;
# })
];
};
};
};
}

View File

@@ -28,38 +28,5 @@ in
config = {
clan.core.networking.zerotier.networkId = networkId;
clan.core.networking.zerotier.name = instanceName;
systemd.services.zerotierone.serviceConfig.ExecStartPost = lib.mkIf (moonIps != [ ]) (
lib.mkAfter [
"+${pkgs.writeScript "orbit-moons-by-ip" ''
#!${pkgs.python3.interpreter}
import json
import ipaddress
import subprocess
def compute_member_id(ipv6_addr: str) -> str:
addr = ipaddress.IPv6Address(ipv6_addr)
addr_bytes = bytearray(addr.packed)
# Extract the bytes corresponding to the member_id (node_id)
node_id_bytes = addr_bytes[10:16]
node_id = int.from_bytes(node_id_bytes, byteorder="big")
member_id = format(node_id, "x").zfill(10)[-10:]
return member_id
def main() -> None:
ips = json.loads(${builtins.toJSON (builtins.toJSON moonIps)})
for ip in ips:
member_id = compute_member_id(ip)
res = subprocess.run(["zerotier-cli", "orbit", member_id, member_id])
if res.returncode != 0:
print(f"Failed to add {member_id} to orbit")
if __name__ == "__main__":
main()
''}"
]
);
};
}

67
devFlake/flake.lock generated
View File

@@ -3,10 +3,10 @@
"clan-core-for-checks": {
"flake": false,
"locked": {
"lastModified": 1759915474,
"narHash": "sha256-ef7awwmx2onWuA83FNE29B3tTZ+tQxEWLD926ckMiF8=",
"lastModified": 1756166884,
"narHash": "sha256-skg4rwpbCjhpLlrv/Pndd43FoEgrJz98WARtGLhCSzo=",
"ref": "main",
"rev": "81e15cab34f9ae00b6f2df5f2e53ee07cd3a0af3",
"rev": "f7414d7e6e58709af27b6fe16eb530278e81eaaf",
"shallow": true,
"type": "git",
"url": "https://git.clan.lol/clan/clan-core"
@@ -18,27 +18,6 @@
"url": "https://git.clan.lol/clan/clan-core"
}
},
"flake-parts": {
"inputs": {
"nixpkgs-lib": [
"test-fixtures",
"nixpkgs"
]
},
"locked": {
"lastModified": 1741352980,
"narHash": "sha256-+u2UunDA4Cl5Fci3m7S643HzKmIDAe+fiXrLqYsR2fs=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "f4330d22f1c5d2ba72d3d22df5597d123fdb60a9",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "flake-parts",
"type": "github"
}
},
"flake-utils": {
"inputs": {
"systems": [
@@ -105,11 +84,11 @@
},
"nixpkgs-dev": {
"locked": {
"lastModified": 1759860509,
"narHash": "sha256-c7eJvqAlWLhwNc9raHkQ7mvoFbHLUO/cLMrww1ds4Zg=",
"lastModified": 1758573205,
"narHash": "sha256-0ybDco+HjG5h46wx7ww4JIyg3y/mBDgkMCVX/Ua0e/Q=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "b574dcadf3fb578dee8d104b565bd745a5a9edc0",
"rev": "803b1683f562edc00665874bf98c1aad0b111482",
"type": "github"
},
"original": {
@@ -128,11 +107,11 @@
]
},
"locked": {
"lastModified": 1758662783,
"narHash": "sha256-igrxT+/MnmcftPOHEb+XDwAMq3Xg1Xy7kVYQaHhPlAg=",
"lastModified": 1758272005,
"narHash": "sha256-1u3xTH+3kaHhztPmWtLAD8LF5pTYLR2CpsPFWTFnVtQ=",
"owner": "NuschtOS",
"repo": "search",
"rev": "7d4c0fc4ffe3bd64e5630417162e9e04e64b27a4",
"rev": "aa975a3757f28ce862812466c5848787b868e116",
"type": "github"
},
"original": {
@@ -148,7 +127,6 @@
"nixpkgs-dev": "nixpkgs-dev",
"nuschtos": "nuschtos",
"systems": "systems_2",
"test-fixtures": "test-fixtures",
"treefmt-nix": "treefmt-nix"
}
},
@@ -182,37 +160,16 @@
"type": "github"
}
},
"test-fixtures": {
"inputs": {
"flake-parts": "flake-parts",
"nixpkgs": [
"nixpkgs-dev"
]
},
"locked": {
"lastModified": 1742806412,
"narHash": "sha256-ZoAN0/sHEHY+ymJnkdkBAuZ/6sc66RYR4xMHpLf7++E=",
"ref": "refs/heads/main",
"rev": "4a2bc56d886578124b05060d3fb7eddc38c019f8",
"revCount": 2,
"type": "git",
"url": "https://git.clan.lol/clan/test-fixtures"
},
"original": {
"type": "git",
"url": "https://git.clan.lol/clan/test-fixtures"
}
},
"treefmt-nix": {
"inputs": {
"nixpkgs": []
},
"locked": {
"lastModified": 1758728421,
"narHash": "sha256-ySNJ008muQAds2JemiyrWYbwbG+V7S5wg3ZVKGHSFu8=",
"lastModified": 1758206697,
"narHash": "sha256-/DbPkh6PZOgfueCbs3uzlk4ASU2nPPsiVWhpMCNkAd0=",
"owner": "numtide",
"repo": "treefmt-nix",
"rev": "5eda4ee8121f97b218f7cc73f5172098d458f1d1",
"rev": "128222dc911b8e2e18939537bed1762b7f3a04aa",
"type": "github"
},
"original": {

View File

@@ -18,8 +18,5 @@
inputs.clan-core-for-checks.url = "git+https://git.clan.lol/clan/clan-core?ref=main&shallow=1";
inputs.clan-core-for-checks.flake = false;
inputs.test-fixtures.url = "git+https://git.clan.lol/clan/test-fixtures";
inputs.test-fixtures.inputs.nixpkgs.follows = "nixpkgs-dev";
outputs = inputs: inputs;
}

1
docs/.gitignore vendored
View File

@@ -1,5 +1,4 @@
/site/reference
/site/services/official
/site/static
/site/options
/site/openapi.json

View File

@@ -1,5 +1,6 @@
# Contributing to Clan
**Continuous Integration (CI)**: Each pull request gets automatically tested by gitea. If any errors are detected, it will block pull requests until they're resolved.
**Dependency Management**: We use the [Nix package manager](https://nixos.org/) to manage dependencies and ensure reproducibility, making your development process more robust.
@@ -9,27 +10,25 @@
- Linux
- macOS
## Getting Started with the Development Environment
# Getting Started with the Development Environment
Let's get your development environment up and running:
1. **Install Nix Package Manager**:
- You can install the Nix package manager by either [downloading the Nix installer](https://github.com/DeterminateSystems/nix-installer/releases) or running this command:
```bash
curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install
```
```bash
curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install
```
2. **Install direnv**:
1. **Install direnv**:
- To automatically setup a devshell on entering the directory
```bash
nix profile install nixpkgs#nix-direnv-flakes nixpkgs#direnv
```
```bash
nix profile install nixpkgs#nix-direnv-flakes nixpkgs#direnv
```
3. **Add direnv to your shell**:
1. **Add direnv to your shell**:
- Direnv needs to [hook into your shell](https://direnv.net/docs/hook.html) to work.
You can do this by executing following command. The example below will setup direnv for `zsh` and `bash`
@@ -38,43 +37,34 @@ Let's get your development environment up and running:
echo 'eval "$(direnv hook zsh)"' >> ~/.zshrc && echo 'eval "$(direnv hook bash)"' >> ~/.bashrc && eval "$SHELL"
```
4. **Allow the devshell**
1. **Allow the devshell**
- Go to `clan-core/pkgs/clan-cli` and do a `direnv allow` to setup the necessary development environment to execute the `clan` command
5. **Create a Gitea Account**
1. **Create a Gitea Account**:
- Register an account on https://git.clan.lol
- Fork the [clan-core](https://git.clan.lol/clan/clan-core) repository
- Clone the repository and navigate to it
- Add a new remote called upstream
```bash
git remote add upstream gitea@git.clan.lol:clan/clan-core.git
```
7. **Allow .envrc**
- Add a new remote called upstream:
```bash
git remote add upstream gitea@git.clan.lol:clan/clan-core.git
```
1. **Allow .envrc**:
- When you enter the directory, you'll receive an error message like this:
```bash
direnv: error .envrc is blocked. Run `direnv allow` to approve its content
```
```bash
direnv: error .envrc is blocked. Run `direnv allow` to approve its content
```
- Execute `direnv allow` to automatically execute the shell script `.envrc` when entering the directory.
8. **(Optional) Install Git Hooks**
1. **(Optional) Install Git Hooks**:
- To syntax check your code you can run:
```bash
nix fmt
```
```bash
nix fmt
```
- To make this automatic install the git hooks
```bash
./scripts/pre-commit
```
```bash
./scripts/pre-commit
```
## Related Projects
@@ -83,7 +73,7 @@ Let's get your development environment up and running:
- **Nixos Anywhere**: [nixos-anywhere](https://github.com/nix-community/nixos-anywhere)
- **Disko**: [disko](https://github.com/nix-community/disko)
### Override related projects for local development
## Fixing Bugs or Adding Features in Clan-CLI
If you have a bug fix or feature that involves a related project, clone the relevant repository and replace its invocation in your local setup.
@@ -112,10 +102,10 @@ run(
```
The `<path_to_local_src>` doesn't need to be a local path, it can be any valid [flakeref](https://nix.dev/manual/nix/2.26/command-ref/new-cli/nix3-flake.html#flake-references).
The <path_to_local_src> doesn't need to be a local path, it can be any valid [flakeref](https://nix.dev/manual/nix/2.26/command-ref/new-cli/nix3-flake.html#flake-references).
And thus can point to test already opened PRs for example.
## Standards
# Standards
- Every new module name should be in kebab-case.
- Every fact definition, where possible should be in kebab-case.

View File

@@ -1,5 +1,5 @@
# Serve documentation locally
```
nix develop .#docs -c mkdocs serve
$ nix develop .#docs -c mkdocs serve
```

41
docs/main.py Normal file
View File

@@ -0,0 +1,41 @@
from typing import Any
def define_env(env: Any) -> None:
static_dir = "/static/"
video_dir = "https://clan.lol/" + "videos/"
asciinema_dir = static_dir + "asciinema-player/"
@env.macro
def video(name: str) -> str:
return f"""<video loop muted autoplay id="{name}">
<source src={video_dir + name} type="video/webm">
Your browser does not support the video tag.
</video>"""
@env.macro
def asciinema(name: str) -> str:
return f"""<div id="{name}">
<script>
// Function to load the script and then create the Asciinema player
function loadAsciinemaPlayer() {{
var script = document.createElement('script');
script.src = "{asciinema_dir}/asciinema-player.min.js";
script.onload = function() {{
AsciinemaPlayer.create('{video_dir + name}', document.getElementById("{name}"), {{
loop: true,
autoPlay: true,
controls: false,
speed: 1.5,
theme: "solarized-light"
}});
}};
document.head.appendChild(script);
}}
// Load the Asciinema player script
loadAsciinemaPlayer();
</script>
<link rel="stylesheet" type="text/css" href="{asciinema_dir}/asciinema-player.css" />
</div>"""

View File

@@ -47,26 +47,26 @@ exclude_docs: |
nav:
- Getting Started:
- Overview: index.md
- getting-started/creating-your-first-clan.md
- getting-started/add-machines.md
- getting-started/add-users.md
- getting-started/add-services.md
- Creating Your First Clan: guides/getting-started/index.md
- Add Machines: guides/getting-started/add-machines.md
- Add User: guides/getting-started/add-user.md
- Add Services: guides/getting-started/add-services.md
- Deploy to Physical Machine:
- getting-started/deploy-to-physical-machine/flash-installer.md
- getting-started/deploy-to-physical-machine/install-machine.md
- getting-started/deploy-to-virtual-machine.md
- getting-started/configure-disk.md
- getting-started/update-machines.md
- getting-started/continuous-integration.md
- Convert existing NixOS configurations: getting-started/convert-existing-NixOS-configuration.md
- Create USB Installer: guides/getting-started/create-installer.md
- Deploy Physical Machine: guides/getting-started/hardware-report-physical.md
- Deploy to Virtual Machine: guides/getting-started/hardware-report-virtual.md
- Configure Disk Config: guides/getting-started/choose-disk.md
- Update Machine: guides/getting-started/update.md
- Continuous Integration: guides/getting-started/flake-check.md
- Convert Existing NixOS Config: guides/getting-started/convert-flake.md
- Guides:
- Inventory:
- Introduction to Inventory: guides/inventory/inventory.md
- File Autoincludes: guides/inventory/autoincludes.md
- Services:
- Introduction to Services: guides/services/introduction-to-services.md
- Clan Services:
- Inventory Guide: guides/inventory/clanServices.md
- Author Your Own Service: guides/services/community.md
- Internal Services with SSL: guides/internal-ssl-services.md
- Vars:
- Introduction to Vars: guides/vars/vars-overview.md
- Minimal Example: guides/vars/vars-backend.md
@@ -76,40 +76,37 @@ nav:
- Sops Backend:
- Yubikeys & Age Plugins: guides/vars/sops/age-plugins.md
- Managing Users (OLD): guides/secrets.md
- Backups:
- Introduction to Backups: guides/backups/backup-intro.md
- Minimal Example: guides/backups/minimal-example.md
- Digging Deeper: guides/backups/digging-deeper.md
- Advanced Example: guides/backups/advanced-example.md
- Networking:
- Introduction to Networking: guides/networking/networking.md
- Zerotier VPN: guides/networking/mesh-vpn.md
- Nixpkgs Flake Input: guides/nixpkgs-flake-input/index.md
- Disko Templates:
- Community Disko Templates: guides/disko-templates/community.md
- Backups:
- Introduction to Backups: guides/backups.md
- Flake-parts: guides/flake-parts.md
- NixOS Rebuild: guides/nixos-rebuild.md
- macOS: guides/macos.md
- macOS:
- Managing macOS Machines: guides/macos.md
# Should be part of the respective sections above
# machines, disko, clan
- Templates:
- Introduction to Templates: concepts/templates.md
- Community Disko Templates: guides/disko-templates/community.md
- Templates: concepts/templates.md
- Migrations:
- clan modules to clan services: guides/migrations/migrate-inventory-services.md
- Facts to Vars: guides/migrations/migration-facts-vars.md
- clan modules --> clan services: guides/migrations/migrate-inventory-services.md
- Facts --> Vars: guides/migrations/migration-facts-vars.md
- Disk id: guides/migrations/disk-id.md
- Disk Encryption: guides/disk-encryption.md
- Disable Secure Boot: guides/secure-boot.md
- Contributing:
- guides/contributing/CONTRIBUTING.md
- guides/contributing/debugging.md
- guides/contributing/testing.md
- Hacking: guides/contributing/CONTRIBUTING.md
- Advanced Debugging: guides/contributing/debugging.md
- Testing: guides/contributing/testing.md
- Reference:
- Overview: reference/index.md
- Options:
- reference/options/clan.md
- reference/options/clan_inventory.md
- reference/options/clan_service.md
- Clan Options: reference/options/clan.md
- Clan Inventory Options: reference/options/clan_inventory.md
- Clan Service API: reference/clanServices/clan-service-author-interface.md
- clan.core (Machine Options):
- Overview: reference/clan.core/index.md
@@ -143,42 +140,41 @@ nav:
- HTTP API: api.md
- Decisions:
- decisions/Architecture-decisions.md
- decisions/01-Clan-Modules.md
- decisions/02-clan-as-library.md
- decisions/03-adr-numbering-process.md
- decisions/04-fetching-nix-from-python.md
- decisions/05-deployment-parameters.md
- decisions/template.md
- Architecture Decisions: decisions/README.md
- 01-clanModules: decisions/01-ClanModules.md
- 02-clan-api: decisions/02-clan-api.md
- 03-adr-numbering-process: decisions/03-adr-numbering-process.md
- 04-fetching-nix-from-python: decisions/04-fetching-nix-from-python.md
- 05-deployment-parameters: decisions/05-deployment-parameters.md
- Template: decisions/_template.md
- Glossary: reference/glossary.md
- Services:
- services/definition.md
# Generated list from the list of official services
- Introduction to ClanServices: reference/clanServices/index.md
- Official:
- services/official/admin.md
- services/official/borgbackup.md
- services/official/certificates.md
- services/official/coredns.md
- services/official/data-mesher.md
- services/official/dyndns.md
- services/official/emergency-access.md
- services/official/garage.md
- services/official/hello-world.md
- services/official/importer.md
- services/official/localbackup.md
- services/official/matrix-synapse.md
- services/official/mycelium.md
- services/official/monitoring.md
- services/official/packages.md
- services/official/sshd.md
- services/official/syncthing.md
- services/official/trusted-nix-caches.md
- services/official/users.md
- services/official/wifi.md
- services/official/wireguard.md
- services/official/yggdrasil.md
- services/official/zerotier.md
- services/community.md
- reference/clanServices/admin.md
- reference/clanServices/borgbackup.md
- reference/clanServices/certificates.md
- reference/clanServices/coredns.md
- reference/clanServices/data-mesher.md
- reference/clanServices/dyndns.md
- reference/clanServices/emergency-access.md
- reference/clanServices/garage.md
- reference/clanServices/hello-world.md
- reference/clanServices/importer.md
- reference/clanServices/localbackup.md
- reference/clanServices/matrix-synapse.md
- reference/clanServices/mycelium.md
- reference/clanServices/monitoring.md
- reference/clanServices/packages.md
- reference/clanServices/sshd.md
- reference/clanServices/syncthing.md
- reference/clanServices/trusted-nix-caches.md
- reference/clanServices/users.md
- reference/clanServices/wifi.md
- reference/clanServices/wireguard.md
- reference/clanServices/yggdrasil.md
- reference/clanServices/zerotier.md
- Community: community/services/index.md
- Search Clan Options: "/options"
@@ -234,7 +230,7 @@ extra:
- icon: fontawesome/brands/github
link: https://github.com/clan-lol/clan-core
- icon: fontawesome/solid/rss
link: https://clan.lol/feed.xml
link: /feed_rss_created.xml
plugins:
- search

View File

@@ -3,6 +3,8 @@
module-docs,
clan-cli-docs,
clan-lib-openapi,
asciinema-player-js,
asciinema-player-css,
roboto,
fira-code,
docs-options,
@@ -42,19 +44,23 @@ pkgs.stdenv.mkDerivation {
pushd docs
mkdir -p ./site/reference/cli
cp -af ${module-docs}/services/* ./site/services/
cp -af ${module-docs}/reference/* ./site/reference/
cp -af ${module-docs}/* ./site/reference/
cp -af ${clan-cli-docs}/* ./site/reference/cli/
mkdir -p ./site/reference/internal
cp -af ${clan-lib-openapi} ./site/openapi.json
chmod -R +w ./site
chmod -R +w ./site/reference
echo "Generated API documentation in './site/reference/' "
rm -rf ./site/options
cp -r ${docs-options} ./site/options
chmod -R +w ./site/options
mkdir -p ./site/static/asciinema-player
ln -snf ${asciinema-player-js} ./site/static/asciinema-player/asciinema-player.min.js
ln -snf ${asciinema-player-css} ./site/static/asciinema-player/asciinema-player.css
# Link to fonts
ln -snf ${roboto}/share/fonts/truetype/Roboto-Regular.ttf ./site/static/
ln -snf ${fira-code}/share/fonts/truetype/FiraCode-VF.ttf ./site/static/

View File

@@ -43,6 +43,15 @@
mypy --strict $out/bin/render-options
'';
asciinema-player-js = pkgs.fetchurl {
url = "https://github.com/asciinema/asciinema-player/releases/download/v3.7.0/asciinema-player.min.js";
sha256 = "sha256-Ymco/+FinDr5YOrV72ehclpp4amrczjo5EU3jfr/zxs=";
};
asciinema-player-css = pkgs.fetchurl {
url = "https://github.com/asciinema/asciinema-player/releases/download/v3.7.0/asciinema-player.css";
sha256 = "sha256-GZMeZFFGvP5GMqqh516mjJKfQaiJ6bL38bSYOXkaohc=";
};
module-docs =
pkgs.runCommand "rendered"
{
@@ -79,12 +88,8 @@
;
};
devShells.docs = self'.packages.docs.overrideAttrs (_old: {
nativeBuildInputs = [
# Run: htmlproofer --disable-external
pkgs.html-proofer
]
++ self'.devShells.default.nativeBuildInputs
++ self'.packages.docs.nativeBuildInputs;
nativeBuildInputs =
self'.devShells.default.nativeBuildInputs ++ self'.packages.docs.nativeBuildInputs;
shellHook = ''
${self'.devShells.default.shellHook}
git_root=$(git rev-parse --show-toplevel)
@@ -102,22 +107,11 @@
;
inherit (inputs) nixpkgs;
inherit module-docs;
inherit asciinema-player-js;
inherit asciinema-player-css;
};
deploy-docs = pkgs.callPackage ./deploy-docs.nix { inherit (config.packages) docs; };
inherit module-docs;
};
checks.docs-integrity =
pkgs.runCommand "docs-integrity"
{
nativeBuildInputs = [ pkgs.html-proofer ];
LANG = "C.UTF-8";
}
''
# External links should be avoided in the docs, because they often break
# and we cannot statically control them. Thus we disable checking them
htmlproofer --disable-external ${self'.packages.docs}
touch $out
'';
};
}

View File

@@ -105,7 +105,7 @@ def render_option(
read_only = option.get("readOnly")
res = f"""
{"#" * level} {sanitize(name) if short_head is None else sanitize(short_head)}
{"#" * level} {sanitize(name) if short_head is None else sanitize(short_head)} {"{: #" + sanitize_anchor(name) + "}" if level > 1 else ""}
"""
@@ -235,7 +235,7 @@ def produce_clan_core_docs() -> None:
if module_type is not None and "submodule" not in module_type:
continue
core_outputs[indexfile] += (
f" - [{submodule_name}](../../reference/clan.core/{submodule_name}.md)\n"
f" - [{submodule_name}](./{submodule_name}.md)\n"
)
core_outputs[indexfile] += options_head
@@ -273,10 +273,8 @@ def produce_clan_core_docs() -> None:
core_outputs[outfile] += output
for outfile, output in core_outputs.items():
(Path(OUT) / "reference" / outfile).parent.mkdir(
parents=True, exist_ok=True
)
with (Path(OUT) / "reference" / outfile).open("w") as of:
(Path(OUT) / outfile).parent.mkdir(parents=True, exist_ok=True)
with (Path(OUT) / outfile).open("w") as of:
of.write(output)
@@ -309,6 +307,32 @@ def produce_clan_service_docs() -> None:
msg = f"Environment variables are not set correctly: $out={OUT}"
raise ClanError(msg)
indexfile = Path(OUT) / "clanServices/index.md"
indexfile.parent.mkdir(
parents=True,
exist_ok=True,
)
index = "# Clan Services\n\n"
index += """
**`clanServices`** are modular building blocks that simplify the configuration and orchestration of multi-host services.
Each `clanService`:
* Is a module of class **`clan.service`**
* Can define **roles** (e.g., `client`, `server`)
* Uses **`inventory.instances`** to configure where and how it is deployed
!!! Note
`clanServices` are part of Clan's next-generation service model and are intended to replace `clanModules`.
See [Migration Guide](../../guides/migrations/migrate-inventory-services.md) for help on migrating.
Learn how to use `clanServices` in practice in the [Using clanServices guide](../../guides/inventory/clanServices.md).
"""
with indexfile.open("w") as of:
of.write(index)
with Path(CLAN_MODULES_VIA_SERVICE).open() as f3:
service_links: dict[str, dict[str, dict[str, Any]]] = json.load(f3)
@@ -344,7 +368,7 @@ def produce_clan_service_docs() -> None:
replace_prefix=f"clan.{module_name}",
)
outfile = Path(OUT) / "services/official" / f"{module_name}.md"
outfile = Path(OUT) / f"clanServices/{module_name}.md"
outfile.parent.mkdir(
parents=True,
exist_ok=True,
@@ -413,7 +437,7 @@ Typically needed by module authors to define roles, behavior and metadata for di
# for option in options_tree.suboptions:
output += options_docs_from_tree(options_tree, init_level=2)
outfile = Path(OUT) / "reference/options" / "clan_service.md"
outfile = Path(OUT) / "clanServices/clan-service-author-interface.md"
outfile.parent.mkdir(parents=True, exist_ok=True)
with Path.open(outfile, "w") as of:
of.write(output)
@@ -428,10 +452,10 @@ def produce_inventory_docs() -> None:
msg = f"Environment variables are not set correctly: $out={OUT}"
raise ClanError(msg)
output = """# Inventory Submodule
output = """# Inventory
This provides an overview of the available options of the `inventory` model.
It can be set via the `inventory` attribute of the [`clan`](../../reference/options/clan.md) function, or via the [`clan.inventory`](../../reference/options/clan_inventory.md) attribute of flake-parts.
It can be set via the `inventory` attribute of the [`clan`](./clan.md#inventory) function, or via the [`clan.inventory`](./clan.md#inventory) attribute of flake-parts.
"""
# Inventory options are already included under the clan attribute
@@ -455,7 +479,7 @@ It can be set via the `inventory` attribute of the [`clan`](../../reference/opti
for option in inventory_opt.suboptions:
output += options_docs_from_tree(option, init_level=2)
outfile = Path(OUT) / "reference/options" / "clan_inventory.md"
outfile = Path(OUT) / "options/clan_inventory.md"
outfile.parent.mkdir(parents=True, exist_ok=True)
with Path.open(outfile, "w") as of:
of.write(output)
@@ -473,8 +497,8 @@ def produce_clan_options_docs() -> None:
output = """# Clan Options
This provides an overview of the available options
Those can be set via [`clan-core.lib.clan`](../../reference/options/clan.md) function,
or via the [`clan`](../../reference/options/clan.md) attribute of flake-parts.
Those can be set via [`clan-core.lib.clan`](./clan.md#inventory) function,
or via the [`clan`](./clan.md) attribute of flake-parts.
"""
# Inventory options are already included under the clan attribute
@@ -488,16 +512,10 @@ or via the [`clan`](../../reference/options/clan.md) attribute of flake-parts.
# Exclude inventory options
for option in clan_root_option.suboptions:
if "inventory" in option.name:
output += """## Inventory
Attribute: `inventory`
See: [Inventory Submodule](../../reference/options/clan_inventory.md)
"""
continue
output += options_docs_from_tree(option, init_level=2)
outfile = Path(OUT) / "reference/options" / "clan.md"
outfile = Path(OUT) / "options/clan.md"
outfile.parent.mkdir(parents=True, exist_ok=True)
with Path.open(outfile, "w") as of:
of.write(output)

View File

@@ -1,3 +1,5 @@
# Clan service modules
## Status
Accepted

View File

@@ -1,3 +1,5 @@
# Clan as library
## Status
Accepted

View File

@@ -1,112 +0,0 @@
## Status
Accepted
## Context
In the long term we envision the clan application will consist of the following user facing tools in the long term.
- `CLI`
- `TUI`
- `Desktop Application`
- `REST-API`
- `Mobile Application`
We might not be sure whether all of those will exist but the architecture should be generic such that those are possible without major changes of the underlying system.
## Decision
This leads to the conclusion that we should do `library` centric development.
With the current `clan` python code being a library that can be imported to create various tools ontop of it.
All **CLI** or **UI** related parts should be moved out of the main library.
Imagine roughly the following architecture:
``` mermaid
graph TD
%% Define styles
classDef frontend fill:#f9f,stroke:#333,stroke-width:2px;
classDef backend fill:#bbf,stroke:#333,stroke-width:2px;
classDef storage fill:#ff9,stroke:#333,stroke-width:2px;
classDef testing fill:#cfc,stroke:#333,stroke-width:2px;
%% Define nodes
user(["User"]) -->|Interacts with| Frontends
subgraph "Frontends"
CLI["CLI"]:::frontend
APP["Desktop App"]:::frontend
TUI["TUI"]:::frontend
REST["REST API"]:::frontend
end
subgraph "Python"
API["Library <br>for interacting with clan"]:::backend
BusinessLogic["Business Logic<br>Implements actions like 'machine create'"]:::backend
STORAGE[("Persistence")]:::storage
NIX["Nix Eval & Build"]:::backend
end
subgraph "CI/CD & Tests"
TEST["Feature Testing"]:::testing
end
%% Define connections
CLI --> API
APP --> API
TUI --> API
REST --> API
TEST --> API
API --> BusinessLogic
BusinessLogic --> STORAGE
BusinessLogic --> NIX
```
With this very simple design it is ensured that all the basic features remain stable across all frontends.
In the end it is straight forward to create python library function calls in a testing framework to ensure that kind of stability.
Integration tests and smaller unit-tests should both be utilized to ensure the stability of the library.
Note: Library function don't have to be json-serializable in general.
Persistence includes but is not limited to: creating git commits, writing to inventory.json, reading and writing vars, and interacting with persisted data in general.
## Benefits / Drawbacks
- (+) Less tight coupling of frontend- / backend-teams
- (+) Consistency and inherent behavior
- (+) Performance & Scalability
- (+) Different frontends for different user groups
- (+) Documentation per library function makes it convenient to interact with the clan resources.
- (+) Testing the library ensures stability of the underlyings for all layers above.
- (-) Complexity overhead
- (-) library needs to be designed / documented
- (+) library can be well documented since it is a finite set of functions.
- (-) Error handling might be harder.
- (+) Common error reporting
- (-) different frontends need different features. The library must include them all.
- (+) All those core features must be implemented anyways.
- (+) VPN Benchmarking uses the existing library's already and works relatively well.
## Implementation considerations
Not all required details that need to change over time are possible to be pointed out ahead of time.
The goal of this document is to create a common understanding for how we like our project to be structured.
Any future commits should contribute to this goal.
Some ideas what might be needed to change:
- Having separate locations or packages for the library and the CLI.
- Rename the `clan_cli` package to `clan` and move the `cli` frontend into a subfolder or a separate package.
- Python Argparse or other cli related code should not exist in the `clan` python library.
- `__init__.py` should be very minimal. Only init the business logic models and resources. Note that all `__init__.py` files all the way up in the module tree are always executed as part of the python module import logic and thus should be as small as possible.
i.e. `from clan_cli.vars.generators import ...` executes both `clan_cli/__init__.py` and `clan_cli/vars/__init__.py` if any of those exist.
- `api` folder doesn't make sense since the python library `clan` is the api.
- Logic needed for the webui that performs json serialization and deserialization will be some `json-adapter` folder or package.
- Code for serializing dataclasses and typed dictionaries is needed for the persistence layer. (i.e. for read-write of inventory.json)
- The inventory-json is a backend resource, that is internal. Its logic includes merging, unmerging and partial updates with considering nix values and their priorities. Nobody should try to read or write to it directly.
Instead there will be library methods i.e. to add a `service` or to update/read/delete some information from it.
- Library functions should be carefully designed with suitable conventions for writing good api's in mind. (i.e: https://swagger.io/resources/articles/best-practices-in-api-design/)

View File

@@ -1,3 +1,5 @@
# ADR Numbering process
## Status
Proposed after some conversation between @lassulus, @Mic92, & @lopter.

View File

@@ -1,3 +1,5 @@
# deployment parameters: evalHost, buildHost, targetHost
## Status
accepted

View File

@@ -1,14 +0,0 @@
This section contains the architecture decisions that have been reviewed and generally agreed upon
## What is an ADR?
> An architecture decision record (ADR) is a document that captures an important architecture decision made along with its context and consequences.
!!! Note
For further reading about adr's we recommend [architecture-decision-record](https://github.com/joelparkerhenderson/architecture-decision-record)
## Crafting a new ADR
1. Use the [template](../decisions/template.md)
2. Create the Pull request and gather feedback
3. Retreive your adr-number (see: [numbering](../decisions/03-adr-numbering-process.md))

View File

@@ -1,3 +1,5 @@
# Architecture Decision Records
This section contains the architecture decisions that have been reviewed and generally agreed upon
## What is an ADR?
@@ -9,6 +11,6 @@ This section contains the architecture decisions that have been reviewed and gen
## Crafting a new ADR
1. Use the [template](../decisions/template.md)
1. Use the [template](./_template.md)
2. Create the Pull request and gather feedback
3. Retreive your adr-number (see: [numbering](../decisions/03-adr-numbering-process.md))
3. Retreive your adr-number (see: [numbering](./03-adr-numbering-process.md))

View File

@@ -1,6 +1,6 @@
## Decision record template by Michael Nygard
# Decision record template by Michael Nygard
This is the template in [Documenting architecture decisions - Michael Nygard](https://thinkrelevance.com/blog/2011/11/15/documenting-architecture-decisions).
This is the template in [Documenting architecture decisions - Michael Nygard](http://thinkrelevance.com/blog/2011/11/15/documenting-architecture-decisions).
You can use [adr-tools](https://github.com/npryce/adr-tools) for managing the ADR files.
In each ADR file, write these sections:

View File

@@ -1,24 +0,0 @@
## Decision record template by Michael Nygard
This is the template in [Documenting architecture decisions - Michael Nygard](https://thinkrelevance.com/blog/2011/11/15/documenting-architecture-decisions).
You can use [adr-tools](https://github.com/npryce/adr-tools) for managing the ADR files.
In each ADR file, write these sections:
# Title
## Status
What is the status, such as proposed, accepted, rejected, deprecated, superseded, etc.?
## Context
What is the issue that we're seeing that is motivating this decision or change?
## Decision
What is the change that we're proposing and/or doing?
## Consequences
What becomes easier or more difficult to do because of this change?

View File

@@ -1,75 +0,0 @@
A service in clan is a self-contained, reusable unit of system configuration that provides a specific piece of functionality across one or more machines.
Think of it as a recipe for running a tool — like automatic backups, VPN networking, monitoring, etc.
In Clan Services are multi-Host & role-based:
- Roles map machines to logical service responsibilities, enabling structured, clean deployments.
- You can use tags instead of explicit machine names.
To learn more: [Guide about clanService](../guides/services/introduction-to-services.md)
!!! Important
It is recommended to add at least one networking service such as `zerotier` that allows to reach all your clan machines from your setup computer across the globe.
## Configure a Zerotier Network (recommended)
```{.nix title="clan.nix" hl_lines="8-16"}
{
inventory.machines = {
jon = { };
sara = { };
};
inventory.instances = {
zerotier = { # (1)
# Replace with the name (string) of your machine that you will use as zerotier-controller
# See: https://docs.zerotier.com/controller/
# Deploy this machine first to create the network secrets
roles.controller.machines."jon" = { }; # (2)
# Peers of the network
# this line means 'all' clan machines will be 'peers'
roles.peer.tags.all = { }; # (3)
};
};
# ...
# elided
}
```
1. See [services/official](../services/definition.md) for all available services and how to configure them.
Or read [guides/services](../guides/services/community.md) if you want to bring your own
2. Replace `__YOUR_CONTROLLER_` with the *name* of your machine.
3. This line will add all machines of your clan as `peer` to zerotier
## Adding more recommended defaults
Adding the following services is recommended for most users:
```{.nix title="clan.nix" hl_lines="7-14"}
{
inventory.machines = {
jon = { };
sara = { };
};
inventory.instances = {
admin = { # (1)
roles.default.tags.all = { };
roles.default.settings = {
allowedKeys = {
"my-user" = "ssh-ed25519 AAAAC3N..."; # (2)
};
};
};
# ...
# elided
};
}
```
1. The `admin` service will generate a **root-password** and **add your ssh-key** that allows for convienient administration.
2. Equivalent to directly setting `authorizedKeys` like in [configuring a machine](../getting-started/add-machines.md#configuring-a-machine)
3. Adds `user = jon` as a user on all machines. Will create a `home` directory, and prompt for a password before deployment.

View File

@@ -1,125 +0,0 @@
!!! Note "Under construction"
The users concept of clan is not done yet. This guide outlines some solutions from our community.
Defining users can be done in many different ways. We want to highlight two approaches:
- Using clan's [users](../services/official/users.md) service.
- Using a custom approach.
## Adding Users using the [users](../services/official/users.md) service
To add a first *user* this guide will be leveraging two things:
- [services](../services/definition.md): Allows to bind arbitrary logic to something we call an `ìnstance`.
- [services/users](../services/official/users.md): Implements logic for adding a single user perInstance.
The example shows how to add a user called `jon`:
```{.nix title="clan.nix" hl_lines="7-21"}
{
inventory.machines = {
jon = { };
sara = { };
};
inventory.instances = {
jon-user = { # (1)
module.name = "users";
roles.default.tags.all = { }; # (2)
roles.default.settings = {
user = "jon"; # (3)
groups = [
"wheel" # Allow using 'sudo'
"networkmanager" # Allows to manage network connections.
"video" # Allows to access video devices.
"input" # Allows to access input devices.
];
};
};
# ...
# elided
};
}
```
1. Add `user = jon` as a user on all machines. Will create a `home` directory, and prompt for a password before deployment.
2. Add this user to `all` machines
3. Define the `name` of the user to be `jon`
The `users` service creates a `/home/jon` directory, allows `jon` to sign in and will take care of the user's password.
For more information see [services/users](../services/official/users.md)
## Using a custom approach
Some people like to define a `users` folder in their repository root.
That allows to bind all user specific logic to a single place (`default.nix`)
Which can be imported into individual machines to make the user available on that machine.
```bash
.
├── machines
│   ├── jon
# ......
├── users
│   ├── jon
│ │ └── default.nix # <- a NixOS module; sets some options
# ... ... ...
```
## using [home-manager](https://github.com/nix-community/home-manager)
When using clan's `users` service it is possible to define extraModules.
In fact this is always possible when using clan's services.
We can use this property of clan services to bind a nixosModule to the user, which configures home-manager.
```{.nix title="clan.nix" hl_lines="22"}
{
inventory.machines = {
jon = { };
sara = { };
};
inventory.instances = {
jon-user = {
module.name = "users";
roles.default.tags.all = { };
roles.default.settings = {
user = "jon",
groups = [
"wheel"
"networkmanager"
"video"
"input"
];
};
roles.default.extraModules = [ ./users/jon/home.nix ]; # (1)
};
# ...
# elided
};
}
```
1. Type `path` or `string`: Must point to a separate file. Inlining a module is not possible
!!! Note "This is inspiration"
Our community might come up with better solutions soon.
We are seeking contributions to improve this pattern if you have a nicer solution in mind.
```nix title="users/jon/home.nix"
# NixOS module to import home-manager and the home-manager configuration of 'jon'
{ self, ...}:
{
imports = [ self.inputs.home-manager.nixosModules.default ];
home-manager.users.jon = {
imports = [
./home-configuration.nix
];
};
}
```

View File

@@ -1,74 +0,0 @@
By default clan uses [disko](https://github.com/nix-community/disko) which allows for declarative disk partitioning.
To see what disk templates are available run:
```{.shellSession hl_lines="10" .no-copy}
$ clan templates list
Available 'clan' template
├── <builtin>
│ ├── default: Initialize a new clan flake
│ ├── flake-parts: Flake-parts
│ └── minimal: for clans managed via (G)UI
Available 'disko' templates
├── <builtin>
│ └── single-disk: A simple ext4 disk with a single partition
Available 'machine' templates
├── <builtin>
│ ├── demo-template: Demo machine for the CLAN project
│ ├── flash-installer: Initialize a new flash-installer machine
│ ├── new-machine: Initialize a new machine
│ └── test-morph-template: Morph a machine
```
For this guide we will select the `single-disk` template, that uses `A simple ext4 disk with a single partition`.
!!! tip
For advanced partitioning, see [Disko templates](https://github.com/nix-community/disko-templates) or [Disko examples](https://github.com/nix-community/disko/tree/master/example).
You can also [contribute a disk template to clan core](https://docs.clan.lol/guides/disko-templates/community/)
To setup a disk schema for a machine run
```bash
clan templates apply disk single-disk jon --set mainDisk ""
```
Which should fail and give the valid options for the specific hardware:
```shellSession
Invalid value for placeholder mainDisk - Valid options:
/dev/disk/by-id/nvme-WD_PC_SN740_SDDQNQD-512G-1201_232557804368
```
Re-run the command with the correct disk:
```bash
clan templates apply disk single-disk jon --set mainDisk "/dev/disk/by-id/nvme-WD_PC_SN740_SDDQNQD-512G-1201_232557804368"
```
Should now be successful
```shellSession
Applied disk template 'single-disk' to machine 'jon'
```
A disko.nix file should be created in `machines/jon`
You can have a look and customize it if needed.
!!! Danger
Don't change the `disko.nix` after the machine is installed for the first time, unless you really know what you are doing.
Changing disko configuration requires wiping and reinstalling the machine.
## Deploy the machine
**Finally deployment time!**
This command is destructive and will format your disk and install NixOS on it! It is equivalent to appending `--phases kexec,disko,install,reboot`.
```bash
clan machines install [MACHINE] --target-host root@<IP>
```

View File

@@ -1,178 +0,0 @@
This guide will help you convert your existing NixOS configurations into a Clan.
!!! Warning
Migrating instead of starting new can be trickier and might lead to bugs or
unexpected issues. We recommend reading the [Getting Started](../getting-started/creating-your-first-clan.md) guide first.
Once you have a working setup and understand the concepts transferring your NixOS configurations over is easy.
## Back up your existing configuration
Before you start, it is strongly recommended to back up your existing
configuration in any form you see fit. If you use version control to manage
your configuration changes, it is also a good idea to follow the migration
guide in a separate branch until everything works as expected.
## Starting Point
We assume you are already using NixOS flakes to manage your configuration. If
not, migrate to a flake-based setup following the official [NixOS
documentation](https://nix.dev/manual/nix/2.25/command-ref/new-cli/nix3-flake.html).
The snippet below shows a common Nix flake. For this example we will assume you
have have two hosts: **berlin** and **cologne**.
```nix
{
inputs.nixpkgs.url = "github:nixos/nixpkgs/nixos-unstable";
outputs = { self, nixpkgs, ... }: {
nixosConfigurations = {
berlin = nixpkgs.lib.nixosSystem {
system = "x86_64-linux";
modules = [ ./machines/berlin/configuration.nix ];
};
cologne = nixpkgs.lib.nixosSystem {
system = "x86_64-linux";
modules = [ ./machines/cologne/configuration.nix ];
};
};
};
}
```
## 1. Add `clan-core` to `inputs`
Add `clan-core` to your flake as input.
```nix
inputs.clan-core = {
url = "https://git.clan.lol/clan/clan-core/archive/main.tar.gz";
# Don't do this if your machines are on nixpkgs stable.
inputs.nixpkgs.follows = "nixpkgs";
}
```
## 2. Update Outputs
To be able to access our newly added dependency, it has to be added to the
output parameters.
```diff
- outputs = { self, nixpkgs, ... }:
+ outputs = { self, nixpkgs, clan-core }:
```
The existing `nixosConfigurations` output of your flake will be created by
clan. In addition, a new `clanInternals` output will be added. Since both of
these are provided by the output of `clan-core.lib.clan`, a common syntax is to use a
`let...in` statement to create your clan and access it's parameters in the flake
outputs.
For the provide flake example, your flake should now look like this:
```nix
{
inputs.nixpkgs.url = "github:nixos/nixpkgs/nixos-unstable";
inputs.clan-core = {
url = "https://git.clan.lol/clan/clan-core/archive/main.tar.gz";
inputs.nixpkgs.follows = "nixpkgs";
};
outputs = { self, nixpkgs, clan-core, ... }:
let
clan = clan-core.lib.clan {
self = self; # this needs to point at the repository root
specialArgs = {};
meta.name = throw "Change me to something unique";
machines = {
berlin = {
nixpkgs.hostPlatform = "x86_64-linux";
imports = [ ./machines/berlin/configuration.nix ];
};
cologne = {
nixpkgs.hostPlatform = "x86_64-linux";
imports = [ ./machines/cologne/configuration.nix ];
};
};
};
in
{
inherit (clan.config) nixosConfigurations nixosModules clanInternals;
clan = clan.config;
};
}
```
✅ Et voilà! Your existing hosts are now part of a clan.
Existing Nix tooling
should still work as normal. To check that you didn't make any errors, run `nix
flake show` and verify both hosts are still recognized as if nothing had
changed. You should also see the new `clan` output.
```
nix flake show
git+file:///my-nixos-config
├───clan: unknown
└───nixosConfigurations
├───berlin: NixOS configuration
└───cologne: NixOS configuration
```
Of course you can also rebuild your configuration using `nixos-rebuild` and
veryify everything still works.
## 3. Add `clan-cli` to your `devShells`
At this point Clan is set up, but you can't use the CLI yet. To do so, it is
recommended to expose it via a `devShell` in your flake. It is also possible to
install it any other way you would install a package in Nix, but using a
developtment shell ensures the CLI's version will always be in sync with your
configuration.
A minimal example is provided below, add it to your flake outputs.
```nix
devShells."x86_64-linux".default = nixpkgs.legacyPackages."x86_64-linux".mkShell {
packages = [ clan-core.packages."x86_64-linux".clan-cli ];
}
```
To use the CLI, execute `nix develop` in the directory of your flake. The
resulting shell, provides you with the `clan` CLI tool. Since you will be using
it every time you interact with Clan, it is recommended to set up
[direnv](https://direnv.net/).
Verify everything works as expected by running `clan machines list`.
```
nix develop
[user@host:~/my-nixos-config]$ clan machines list
berlin
cologne
```
## Specify Targets
Clan needs to know where it can reach your hosts. For testing purpose set
`clan.core.networking.targetHost` to the machines adress or hostname.
```nix
# machines/berlin/configuration.nix
{
clan.core.networking.targetHost = "123.4.56.78";
}
```
See our guide on for properly [configuring machines networking](../guides/networking/networking.md)
## Next Steps
You are now fully set up. Use the CLI to manage your hosts or proceed to
configure further services. At this point you should be able to run commands
like `clan machines update berlin` to deploy a host.

View File

@@ -1,135 +0,0 @@
Ready to manage your fleet of machines?
We will create a declarative infrastructure using **clan**, **git**, and **nix flakes**.
You'll finish with a centrally managed fleet, ready to import your existing NixOS configuration.
## Prerequisites
Make sure you have the following:
* 💻 **Administration Machine**: Run the setup commands from this machine.
* 🛠️ **Nix**: The Nix package manager, installed on your administration machine.
??? info "**How to install Nix (Linux / MacOS / NixOS)**"
**On Linux or macOS:**
1. Run the recommended installer:
```shellSession
curl --proto '=https' --tlsv1.2 -sSf -L [https://install.determinate.systems/nix](https://install.determinate.systems/nix) | sh -s -- install
```
2. After installation, ensure flakes are enabled by adding this line to `~/.config/nix/nix.conf`:
```
experimental-features = nix-command flakes
```
**On NixOS:**
Nix is already installed. You only need to enable flakes for your user in your `configuration.nix`:
```nix
{
nix.settings.experimental-features = [ "nix-command" "flakes" ];
}
```
Then, run `nixos-rebuild switch` to apply the changes.
* 🎯 **Target Machine(s)**: A remote machine with SSH, or your local machine (if NixOS).
## Create a New Clan
1. Navigate to your desired directory:
```shellSession
cd <your-directory>
```
2. Create a new clan flake:
**Note:** This creates a new directory in your current location
```shellSession
nix run https://git.clan.lol/clan/clan-core/archive/main.tar.gz#clan-cli --refresh -- flakes create
```
3. Enter a **name** in the prompt:
```terminalSession
Enter a name for the new clan: my-clan
```
## Project Structure
Your new directory, `my-clan`, should contain the following structure:
```
my-clan/
├── clan.nix
├── flake.lock
├── flake.nix
├── modules/
└── sops/
```
!!! note "Templates"
This is the structure for the `default` template.
Use `clan templates list` and `clan templates --help` for available templates & more. Keep in mind that the exact files may change as templates evolve.
## Activate the Environment
To get started, `cd` into your new project directory.
```shellSession
cd my-clan
```
Now, activate the environment using one of the following methods.
=== "Automatic (direnv, recommended)"
**Prerequisite**: You must have [nix-direnv](https://github.com/nix-community/nix-direnv) installed.
Run `direnv allow` to automatically load the environment whenever you enter this directory.
```shellSession
direnv allow
```
=== "Manual (nix develop)"
Run nix develop to load the environment for your current shell session.
```shellSession
nix develop
```
## Verify the Setup
Once your environment is active, verify that the clan command is available by running:
```shellSession
clan show
```
You should see the default metadata for your new clan:
```shellSession
Name: __CHANGE_ME__
Description: None
```
This confirms your setup is working correctly.
You can now change the default name by editing the `meta.name` field in your `clan.nix` file.
```{.nix title="clan.nix" hl_lines="3"}
{
# Ensure this is unique among all clans you want to use.
meta.name = "__CHANGE_ME__";
# ...
# elided
}
```

View File

@@ -1,201 +0,0 @@
To install Clan on physical machines, you need to use our custom installer image. This is necessary for proper installation and operation.
!!! note "Deploying to a Virtual Machine?"
If you're deploying to a virtual machine (VM), you can skip this section and go directly to the [Deploy Virtual Machine](../../getting-started/deploy-to-virtual-machine.md) step. In this scenario, we automatically use [nixos-anywhere](https://github.com/nix-community/nixos-anywhere) to replace the kernel during runtime.
??? info "Why nixos-anywhere Doesn't Work on Physical Hardware?"
nixos-anywhere relies on [kexec](https://wiki.archlinux.org/title/Kexec) to replace the running kernel with our custom one. This method often has compatibility issues with real hardware, especially systems with dedicated graphics cards like laptops and servers, leading to crashes and black screens.
??? info "Reasons for a Custom Install Image"
Our custom install images are built to include essential tools like [nixos-facter](https://github.com/nix-community/nixos-facter) and support for [ZFS](https://wiki.archlinux.org/title/ZFS). They're also optimized to run on systems with as little as 1 GB of RAM, ensuring efficient performance even on lower-end hardware.
## Prerequisites
- [x] A free USB Drive with at least 1.5GB (All data on it will be lost)
- [x] Linux/NixOS Machine with Internet
## Identify the USB Flash Drive
1. Insert your USB flash drive into your computer.
2. Identify your flash drive with `lsblk`:
```shellSession
lsblk
```
```{.shellSession hl_lines="2" .no-copy}
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS
sdb 8:0 1 117,2G 0 disk
└─sdb1 8:1 1 117,2G 0 part /run/media/qubasa/INTENSO
nvme0n1 259:0 0 1,8T 0 disk
├─nvme0n1p1 259:1 0 512M 0 part /boot
└─nvme0n1p2 259:2 0 1,8T 0 part
└─luks-f7600028-9d83-4967-84bc-dd2f498bc486 254:0 0 1,8T 0 crypt /nix/store
```
!!! Info "In this case the USB device is `sdb`"
3. Ensure all partitions on the drive are unmounted. Replace `sdb1` in the command below with your device identifier (like `sdc1`, etc.):
```shellSession
sudo umount /dev/sdb1
```
## Installer
=== "**Linux OS**"
**Create a Custom Installer**
We recommend to build your own installer because of the following reasons:
- Include your ssh public keys into the image that allows passwordless ssh connection later on.
- Set your preferred language and keymap
```bash
clan flash write --flake https://git.clan.lol/clan/clan-core/archive/main.tar.gz \
--ssh-pubkey $HOME/.ssh/id_ed25519.pub \
--keymap us \
--language en_US.UTF-8 \
--disk main /dev/sd<X> \
flash-installer
```
!!! Note
Replace `$HOME/.ssh/id_ed25519.pub` with a path to your SSH public key.
Replace `/dev/sd<X>` with the drive path you want to flash
!!! Danger "Specifying the wrong device can lead to unrecoverable data loss."
The `clan flash` utility will erase the disk. Make sure to specify the correct device
- **SSH-Pubkey Option**
To add an ssh public key into the installer image append the option:
```
--ssh-pubkey <pubkey_path>
```
If you do not have an ssh key yet, you can generate one with `ssh-keygen -t ed25519` command.
This ssh key will be installed into the root user.
- **Connect to the installer**
On boot, the installer will display on-screen the IP address it received from the network.
If you need to configure Wi-Fi first, refer to the next section.
If Multicast-DNS (Avahi) is enabled on your own machine, you can also access the installer using the `flash-installer.local` address.
- **List Keymaps**
You can get a list of all keymaps with the following command:
```
clan flash list keymaps
```
- **List Languages**
You can get a list of all languages with the following command:
```
clan flash list languages
```
=== "**Other OS**"
**Download Generic Installer**
For x86_64:
```shellSession
wget https://github.com/nix-community/nixos-images/releases/download/nixos-unstable/nixos-installer-x86_64-linux.iso
```
For generic arm64 / aarch64 (probably does not work on raspberry pi...)
```shellSession
wget https://github.com/nix-community/nixos-images/releases/download/nixos-unstable/nixos-installer-aarch64-linux.iso
```
!!! Note
If you don't have `wget` installed, you can use `curl --progress-bar -OL <url>` instead.
## Flash the Installer to the USB Drive
!!! Danger "Specifying the wrong device can lead to unrecoverable data loss."
The `dd` utility will erase the disk. Make sure to specify the correct device (`of=...`)
For example if the USB device is `sdb` use `of=/dev/sdb` (on macOS it will look more like /dev/disk1)
On Linux, you can use the `lsblk` utility to identify the correct disko
```
lsblk --output NAME,ID-LINK,FSTYPE,SIZE,MOUNTPOINT
```
On macos use `diskutil`:
```
diskutil list
```
Use the `dd` utility to write the NixOS installer image to your USB drive.
Replace `/dev/sd<X>` with your external drive from above.
```shellSession
sudo dd bs=4M conv=fsync status=progress if=./nixos-installer-x86_64-linux.iso of=/dev/sd<X>
```
- **Connect to the installer
On boot, the installer will display on-screen the IP address it received from the network.
If you need to configure Wi-Fi first, refer to the next section.
If Multicast-DNS (Avahi) is enabled on your own machine, you can also access the installer using the `nixos-installer.local` address.
## Boot From USB Stick
- To use, boot from the Clan USB drive with **secure boot turned off**. For step by step instructions go to [Disabling Secure Boot](../../guides/secure-boot.md)
## (Optional) Connect to Wifi Manually
If you don't have access via LAN the Installer offers support for connecting via Wifi.
```shellSession
iwctl
```
This will enter `iwd`
```{.console, .no-copy}
[iwd]#
```
Now run the following command to connect to your Wifi:
```{.shellSession .no-copy}
# Identify your network device.
device list
# Replace 'wlan0' with your wireless device name
# Find your Wifi SSID.
station wlan0 scan
station wlan0 get-networks
# Replace your_ssid with the Wifi SSID
# Connect to your network.
station wlan0 connect your_ssid
# Verify you are connected
station wlan0 show
```
If the connection was successful you should see something like this:
```{.console, .no-copy}
State connected
Connected network FRITZ!Box (Your router device)
IPv4 address 192.168.188.50 (Your new local ip)
```
Press ++ctrl+d++ to exit `IWD`.
!!! Important
Press ++ctrl+d++ **again** to update the displayed QR code and connection information.
You're all set up

View File

@@ -1,26 +0,0 @@
## Prerequisites
- [x] RAM > 2GB
- [x] **Two Computers**: You need one computer that you're getting ready (we'll call this the Target Computer) and another one to set it up from (we'll call this the Setup Computer). Make sure both can talk to each other over the network using SSH.
- [x] **Machine configuration**: See our basic [adding and configuring machine guide](../getting-started/add-machines.md)
Clan supports any cloud machine if it is reachable via SSH and supports `kexec`.
??? tip "NixOS can cause strange issues when booting in certain cloud environments."
If on Linode: Make sure that the system uses "Direct Disk boot kernel" (found in the configuration panel)
The following command will generate a hardware report with [nixos-facter](https://github.com/nix-community/nixos-facter) and writes it back into your machine folder. The `--phases kexec` flag makes sure we are not yet formatting anything, instead if the target system is not a NixOS machine it will use [kexec](https://wiki.archlinux.org/title/Kexec) to switch to a NixOS kernel.
```terminal
clan machines install [MACHINE] \
--update-hardware-config nixos-facter \
--phases kexec \
--target-host myuser@<IP>
```
!!! Warning
After running the above command, be aware that the SSH login user changes from `myuser` to `root`. For subsequent SSH connections to the target machine, use `root` as the login user. This change occurs because the system switches to the NixOS kernel using `kexec`.

View File

@@ -1,129 +0,0 @@
# Update Machines
The Clan command line interface enables you to update machines remotely over SSH.
In this guide we will teach you how to set a `targetHost` in Nix,
and how to define a remote builder for your machine closures.
## Setting `targetHost`
Set the machines `targetHost` to the reachable IP address of the new machine.
This eliminates the need to specify `--target-host` in CLI commands.
```{.nix title="clan.nix" hl_lines="9"}
{
# Ensure this is unique among all clans you want to use.
meta.name = "my-clan";
inventory.machines = {
# Define machines here.
# The machine name will be used as the hostname.
jon = {
deploy.targetHost = "root@192.168.192.4"; # (1)
};
};
# [...]
}
```
The use of `root@` in the target address implies SSH access as the `root` user.
Ensure that the root login is secured and only used when necessary.
## Multiple Target Hosts
You can now experiment with a new interface that allows you to define multiple `targetHost` addresses for different VPNs. Learn more and try it out in our [networking guide](../guides/networking/networking.md).
## Updating Machine Configurations
Execute the following command to update the specified machine:
```bash
clan machines update jon
```
All machines can be updated simultaneously by omitting the machine name:
```bash
clan machines update
```
---
## Advanced Usage
The following options are only needed for special cases, such as limited resources, mixed environments, or private flakes.
### Setting `buildHost`
If the machine does not have enough resources to run the NixOS **evaluation** or **build** itself,
it is also possible to specify a `buildHost` instead.
During an update, clan will ssh into the `buildHost` and run `nixos-rebuild` from there.
!!! Note
The `buildHost` option should be set directly within your machines Nix configuration, **not** under `inventory.machines`.
```{.nix hl_lines="5" .no-copy}
clan {
# ...
machines = {
"jon" = {
clan.core.networking.buildHost = "root@<host_or_ip>";
};
};
};
```
### Overriding configuration with CLI flags
`buildHost` / `targetHost`, and other network settings can be temporarily overridden for a single command:
For the full list of flags refer to the [Clan CLI](../reference/cli/index.md)
```bash
# Build on a remote host
clan machines update jon --build-host root@192.168.1.10
# Build locally (useful for testing or when the target has limited resources)
clan machines update jon --build-host local
```
!!! Note
Make sure the CPU architecture of the `buildHost` matches that of the `targetHost`
For example, if deploying to a macOS machine with an ARM64-Darwin architecture, you need a second macOS machine with the same architecture to build it.
### Excluding a machine from `clan machine update`
To exclude machines from being updated when running `clan machines update` without any machines specified,
one can set the `clan.deployment.requireExplicitUpdate` option to true:
```{.nix hl_lines="5" .no-copy}
clan {
# ...
machines = {
"jon" = {
clan.deployment.requireExplicitUpdate = true;
};
};
};
```
This is useful for machines that are not always online or are not part of the regular update cycle.
### Uploading Flake Inputs
When updating remote machines, flake inputs are usually fetched by the build host.
However, if flake inputs require authentication (e.g., private repositories),
Use the `--upload-inputs` flag to upload all inputs from your local machine:
```bash
clan machines update jon --upload-inputs
```
This is particularly useful when:
- The flake references private Git repositories
- Authentication credentials are only available on local machine
- The build host doesn't have access to certain network resources

195
docs/site/guides/backups.md Normal file
View File

@@ -0,0 +1,195 @@
This guide explains how to set up and manage
[BorgBackup](https://borgbackup.readthedocs.io/) for secure, efficient backups
in a clan network. BorgBackup provides:
- Space efficient storage of backups with deduplication
- Secure, authenticated encryption
- Compression: lz4, zstd, zlib, lzma or none
- Mountable backups with FUSE
- Easy installation on multiple platforms: Linux, macOS, BSD, …
- Free software (BSD license).
- Backed by a large and active open-source community.
## Borgbackup Example
```nix
inventory.instances = {
borgbackup = {
module = {
name = "borgbackup";
input = "clan-core";
};
roles.client.machines."jon".settings = {
destinations."storagebox" = {
repo = "username@$hostname:/./borgbackup";
rsh = ''ssh -oPort=23 -i /run/secrets/vars/borgbackup/borgbackup.ssh'';
};
};
roles.server.machines = { };
};
};
```
The input should be named according to your flake input. Jon is configured as a
client machine with a destination pointing to a Hetzner Storage Box.
To see a list of all possible options go to [borgbackup clan service](../reference/clanServices/borgbackup.md)
## Roles
A Clan Service can have multiple roles, each role applies different nix config to the machine.
### 1. Client
Clients are machines that create and send backups to various destinations. Each
client can have multiple backup destinations configured.
### 2. Server
Servers act as backup repositories, receiving and storing backups from client
machines. They can be dedicated backup servers within your clan network.
## Backup destinations
This service allows you to perform backups to multiple `destinations`.
Destinations can be:
- **Local**: Local disk storage
- **Server**: Your own borgbackup server (using the `server` role)
- **Third-party services**: Such as Hetzner's Storage Box
## State management
Backups are based on [states](../reference/clan.core/state.md). A state
defines which files should be backed up and how these files are obtained through
pre/post backup and restore scripts.
Here's an example for a user application `linkding`:
In this example:
- `/data/podman/linkding` is the application's data directory
- `/var/backup/linkding` is the staging directory where data is copied for
backup
```nix
clan.core.state.linkding = {
folders = [ "/var/backup/linkding" ];
preBackupScript = ''
export PATH=${
lib.makeBinPath [
config.systemd.package
pkgs.coreutils
pkgs.rsync
]
}
service_status=$(systemctl is-active podman-linkding)
if [ "$service_status" = "active" ]; then
systemctl stop podman-linkding
rsync -avH --delete --numeric-ids "/data/podman/linkding/" /var/backup/linkding/
systemctl start podman-linkding
fi
'';
postRestoreScript = ''
export PATH=${
lib.makeBinPath [
config.systemd.package
pkgs.coreutils
pkgs.rsync
]
}
service_status="$(systemctl is-active podman-linkding)"
if [ "$service_status" = "active" ]; then
systemctl stop podman-linkding
# Backup locally current linkding data
cp -rp "/data/podman/linkding" "/data/podman/linkding.bak"
# Restore from borgbackup
rsync -avH --delete --numeric-ids /var/backup/linkding/ "/data/podman/linkding/"
systemctl start podman-linkding
fi
'';
};
```
## Managing backups
In this section we go over how to manage your collection of backups with the clan command.
### Listing states
To see which files (`states`) will be backed up on a specific machine, use:
```bash
clan state list jon
```
This will show all configured states for the machine `jon`, for example:
```text
· service: linkding
folders:
- /var/backup/linkding
preBackupCommand: pre-backup-linkding
postRestoreCommand: post-restore-linkding
· service: zerotier
folders:
- /var/lib/zerotier-one
```
### Creating backups
To create a backup of a machine (e.g., `jon`), run:
```bash
clan backups create jon
```
This will backup all configured states (`zerotier` and `linkding` in this
example) from the machine `jon`.
### Listing available backups
To see all available backups, use:
```bash
clan backups list
```
This will display all backups with their timestamps:
```text
storagebox::username@username.your-storagebox.de:/./borgbackup::jon-jon-2025-07-22T19:40:10
storagebox::username@username.your-storagebox.de:/./borgbackup::jon-jon-2025-07-23T01:00:00
storagebox::username@username.your-storagebox.de:/./borgbackup::jon-storagebox-2025-07-24T01:00:00
storagebox::username@username.your-storagebox.de:/./borgbackup::jon-storagebox-2025-07-24T06:02:35
```
### Restoring backups
For restoring a backup you have two options.
#### Full restoration
To restore all services from a backup:
```bash
clan backups restore jon borgbackup storagebox::u444061@u444061.your-storagebox.de:/./borgbackup::jon-storagebox-2025-07-24T06:02:35
```
#### Partial restoration
To restore only a specific service (e.g., `linkding`):
```bash
clan backups restore --service linkding jon borgbackup storagebox::u444061@u444061.your-storagebox.de:/./borgbackup::jon-storagebox-2025-07-24T06:02:35
```

View File

@@ -1,71 +0,0 @@
This guide explains how to set up a [Hetzner Storage Box](https://docs.hetzner.com/storage/storage-box/general) as a backup destination instead of using an internal Clan backup server. Follow the steps below to configure and verify the setup.
### Step 1: Create a Hetzner Storage Box
Begin by [creating a Hetzner Storage Box account](https://docs.hetzner.com/storage/storage-box/getting-started/creating-a-storage-box).
### Step 2: Create a Sub-Account
Set up a sub-account for your `jon` machine. Save the SSH password for this account in your password manager for future reference.
### Step 3: Configure BorgBackup in `clan.nix`
Add the BorgBackup service to your `clan.nix` configuration. In this example, the `jon` machine will back up to `user-sub1@user-sub1.your-storagebox.de` in the `borgbackup` folder:
```nix hl_lines="9"
inventory.instances = {
borgbackup = {
module = {
name = "borgbackup";
input = "clan-core";
};
roles.client.machines."jon".settings = {
destinations."storagebox" = {
repo = "user-sub1@user-sub1.your-storagebox.de:/./borgbackup";
rsh = ''ssh -p 23 -oStrictHostKeyChecking=accept-new -i /run/secrets/vars/borgbackup/borgbackup.ssh'';
};
};
};
};
```
### Step 4: Generate SSH Keys
Run the following command to generate the SSH private keys:
```bash
clan vars generate
```
### Step 5: Add the Public Key to the Sub-Account
Add the generated SSH public key to the `user-sub1` account by running:
```bash
clan vars get jon borgbackup/borgbackup.ssh.pub | ssh -p23 user-sub1@user-sub1.your-storagebox.de install-ssh-key
```
### Step 6: Deploy the Configuration
Apply the changes to your Clan setup by executing:
```bash
clan machines update
```
### Step 7: Verify the Setup
Check if the configuration works by starting the BorgBackup service on the `jon` machine:
```bash
systemctl start borgbackup-job-storagebox.service &
```
Then, inspect the service logs to ensure everything is functioning correctly:
```bash
journalctl -u borgbackup-job-storagebox.service
```

View File

@@ -1,89 +0,0 @@
# Introduction to Clan Backups
This guide explains how to use the Clan backup and state management interface to configure, manage, and restore backups for your services and machines. By the end of this guide, you will understand how to define backup states, manage backups, and restore data.
## State Management
Clan backups are based on the concept of [states](../../reference/clan.core/state.md). A state is a Nix attribute set, defined as `clan.core.state.<name> = {};`, which specifies the files or directories to back up.
For example, if you have a clan service called `linkding`, you can define the folders to back up as follows:
```nix hl_lines="2"
clan.core.state.linkding = {
folders = [ "/var/backup/linkding" ];
};
```
In this example:
- `/var/backup/linkding` is the staging directory where data is prepared for backup.
This simple configuration ensures that all critical data for the `linkding` service is included in the backup process.
## Custom Pre and Post Backup Hooks
The state interface allows you to run custom scripts before creating a backup and after restoring one. These scripts are defined using the `preBackupScript` and `postRestoreScript` options. This can be useful for tasks like stopping services, syncing data, or performing cleanup operations.
### Example: Pre and Post Backup Scripts for the `linkding` Service
In the following example, we configure the `linkding` service to:
1. Stop the service before backing up its data.
2. Sync the data to a staging directory.
3. Restore the data and restart the service after restoration.
```nix hl_lines="5 26"
clan.core.state.linkding = {
folders = [ "/var/backup/linkding" ];
# Script to run before creating a backup
preBackupScript = ''
export PATH=${
lib.makeBinPath [
config.systemd.package
pkgs.coreutils
pkgs.rsync
]
}
# Check if the service is running
service_status=$(systemctl is-active podman-linkding)
if [ "$service_status" = "active" ]; then
# Stop the service and sync data to the backup directory
systemctl stop podman-linkding
rsync -avH --delete --numeric-ids "/data/podman/linkding/" /var/backup/linkding/
systemctl start podman-linkding
fi
'';
# Script to run after restoring a backup
postRestoreScript = ''
export PATH=${
lib.makeBinPath [
config.systemd.package
pkgs.coreutils
pkgs.rsync
]
}
# Check if the service is running
service_status="$(systemctl is-active podman-linkding)"
if [ "$service_status" = "active" ]; then
# Stop the service
systemctl stop podman-linkding
# Backup current data locally
cp -rp "/data/podman/linkding" "/data/podman/linkding.bak"
# Restore data from the backup directory
rsync -avH --delete --numeric-ids /var/backup/linkding/ "/data/podman/linkding/"
# Restart the service
systemctl start podman-linkding
fi
'';
};
```

View File

@@ -1,75 +0,0 @@
In this section we go over how to manage your collection of backups with the clan command.
### Listing states
To see which files (`states`) will be backed up on a specific machine, use:
```bash
clan state list jon
```
This will show all configured states for the machine `jon`, for example:
```text
· service: linkding
folders:
- /var/backup/linkding
preBackupCommand: pre-backup-linkding
postRestoreCommand: post-restore-linkding
· service: zerotier
folders:
- /var/lib/zerotier-one
```
### Creating backups
To create a backup of a machine (e.g., `jon`), run:
```bash
clan backups create jon
```
This will backup all configured states (`zerotier` and `linkding` in this
example) from the machine `jon`.
### Listing available backups
To see all available backups, use:
```bash
clan backups list
```
This will display all backups with their timestamps:
```text
storagebox::username@username.your-storagebox.de:/./borgbackup::jon-jon-2025-07-22T19:40:10
storagebox::username@username.your-storagebox.de:/./borgbackup::jon-jon-2025-07-23T01:00:00
storagebox::username@username.your-storagebox.de:/./borgbackup::jon-storagebox-2025-07-24T01:00:00
storagebox::username@username.your-storagebox.de:/./borgbackup::jon-storagebox-2025-07-24T06:02:35
```
### Restoring backups
For restoring a backup you have two options.
#### Full restoration
To restore all services from a backup:
```bash
clan backups restore jon borgbackup storagebox::u444061@u444061.your-storagebox.de:/./borgbackup::jon-storagebox-2025-07-24T06:02:35
```
#### Partial restoration
To restore only a specific service (e.g., `linkding`):
```bash
clan backups restore --service linkding jon borgbackup storagebox::u444061@u444061.your-storagebox.de:/./borgbackup::jon-storagebox-2025-07-24T06:02:35
```

View File

@@ -1,63 +0,0 @@
In this guide we will explain how to install a simple peer-to-peer backup system through the inventory. Such that machines will backup it's state to other machines in the clan, ensuring redundancy and data safety.
### What is BorgBackup?
BorgBackup is a powerful and efficient backup solution designed for secure and space-efficient backups. It offers features such as:
- **Deduplication**: Saves storage space by avoiding duplicate data.
- **Encryption**: Ensures backups are secure and authenticated.
- **Compression**: Supports multiple compression algorithms like lz4, zstd, zlib, and more.
- **FUSE Mounting**: Allows backups to be mounted as a file system.
- **Cross-Platform**: Works on Linux, macOS, BSD, and more.
- **Open Source**: Licensed under BSD and supported by an active community.
While this guide uses BorgBackup, you can also use other backup services supported by Clan, depending on your requirements.
### Example Setup
In this example, we configure a backup system with three machines: `bob`, `jon`, and `alice`. The `bob` and `jon` machines will periodically back up their state folders to `alice`. The backups are encrypted for security.
```nix
inventory.instances = {
borgbackup = {
module = {
name = "borgbackup";
input = "clan-core";
};
roles.client.machines = {
"bob" = { };
"jon" = { };
};
roles.server.machines = {
"alice" = { };
};
};
};
```
## Roles
In a Clan Service, roles define how machines participate in the backup system. Each role applies specific Nix configurations to the machine, enabling flexibility and scalability in your backup setup.
- **Client**: These machines create backups and send them to designated destinations. Clients can be configured to back up to multiple destinations, ensuring redundancy and reliability.
- **Server**: These machines act as repositories, receiving and securely storing backups from client machines. Servers can be dedicated backup nodes within your clan network, providing centralized storage for all backups.
## Backup destinations
This service allows you to perform backups to multiple `destinations`.
Destinations can be:
- **Local**: Local disk storage
- **Server**: Your own borgbackup server (using the `server` role)
- **Third-party services**: Such as Hetzner's Storage Box
However, if BorgBackup does not meet your needs, you can implement your own backup clan service.

View File

@@ -26,7 +26,7 @@ pkgs.mkShell {
## Debugging nixos-anywhere
If you encounter a bug in a complex shell script such as `nixos-anywhere`, start by replacing the `nixos-anywhere` command with a local checkout of the project, look in the [contribution](../../guides/contributing/CONTRIBUTING.md) section for an example.
If you encounter a bug in a complex shell script such as `nixos-anywhere`, start by replacing the `nixos-anywhere` command with a local checkout of the project, look in the [contribution](./CONTRIBUTING.md) section for an example.
## The Debug Flag

View File

@@ -67,59 +67,6 @@ nix build .#checks.x86_64-linux.{test-attr-name}
```
(replace `{test-attr-name}` with the name of the test)
### Testing services with vars
Services that define their own vars (using `clan.core.vars.generators`) require generating test vars before running the tests.
#### Understanding the `clan.directory` setting
The `clan.directory` option is critical for vars generation and loading in tests. This setting determines:
1. **Where vars are generated**: When you run `update-vars`, it creates `vars/` and `sops/` directories inside the path specified by `clan.directory`
2. **Where vars are loaded from**: During test execution, machines look for their vars and secrets relative to `clan.directory`
#### Generating test vars
For services that define vars, you must first run:
```shellSession
nix run .#checks.x86_64-linux.{test-attr-name}.update-vars
```
This generates the necessary var files in the directory specified by `clan.directory`. After running this command, you can run the test normally:
```shellSession
nix run .#checks.x86_64-linux.{test-attr-name}
```
#### Example: service-dummy-test
The `service-dummy-test` is a good example of a test that uses vars. To run it:
```shellSession
# First, generate the test vars
nix run .#checks.x86_64-linux.service-dummy-test.update-vars
# Then run the test
nix run .#checks.x86_64-linux.service-dummy-test
```
#### Common issues
If `update-vars` fails, you may need to ensure that:
- **`clan.directory` is set correctly**: It should point to the directory where you want vars to be generated (typically `clan.directory = ./.;` in your test definition)
- **Your test defines machines**: Machines must be defined in `clan.inventory.machines` or through the inventory system
- **Machine definitions are complete**: Each machine should have the necessary service configuration that defines the vars generators
**If vars are not found during test execution:**
- Verify that `clan.directory` points to the same location where you ran `update-vars`
- Check that the `vars/` and `sops/` directories exist in that location
- Ensure the generated files match the machines and generators defined in your test
You can reference `/checks/service-dummy-test/` to see a complete working example of a test with vars, including the correct directory structure.
### Debugging VM tests
The following techniques can be used to debug a VM test:

View File

@@ -5,7 +5,7 @@ This guide provides an example setup for a single-disk ZFS system with native en
This configuration only applies to `systemd-boot` enabled systems and **requires** UEFI booting.
!!! Info "Secure Boot"
This guide is compatible with systems that have [secure boot disabled](../guides/secure-boot.md). If you encounter boot issues, check if secure boot needs to be disabled in your UEFI settings.
This guide is compatible with systems that have [secure boot disabled](./secure-boot.md). If you encounter boot issues, check if secure boot needs to be disabled in your UEFI settings.
Replace the highlighted lines with your own disk-id.
You can find our your disk-id by executing:

View File

@@ -1,29 +1,33 @@
Clan supports integration with [flake-parts](https://flake.parts/), a framework for constructing your `flake.nix` using modules. Follow these steps to integrate Clan with flake-parts:
## Step 1: Update Your Flake Inputs
Clan supports integration with [flake-parts](https://flake.parts/), a framework for constructing your `flake.nix` using modules.
Add `flake-parts` as a dependency in your `flake.nix` file alongside existing dependencies like `clan-core` and `nixpkgs`. Here's an example:
To construct your Clan using flake-parts, follow these steps:
## Update Your Flake Inputs
To begin, you'll need to add `flake-parts` as a new dependency in your flake's inputs. This is alongside the already existing dependencies, such as `clan-core` and `nixpkgs`. Here's how you can update your `flake.nix` file:
```nix
# flake.nix
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs?ref=nixos-unstable";
# Add flake-parts
# New flake-parts input
flake-parts.url = "github:hercules-ci/flake-parts";
flake-parts.inputs.nixpkgs-lib.follows = "nixpkgs";
clan-core = {
url = "https://git.clan.lol/clan/clan-core/archive/main.tar.gz";
inputs.nixpkgs.follows = "nixpkgs"; # Avoid this if using nixpkgs stable.
inputs.flake-parts.follows = "flake-parts"; # New
inputs.nixpkgs.follows = "nixpkgs"; # Don't do this if your machines are on nixpkgs stable.
# New
inputs.flake-parts.follows = "flake-parts";
};
};
}
```
## Step 2: Import the Clan flake-parts Module
## Import the Clan flake-parts Module
Next, import the Clan flake-parts module to make the [Clan options](../reference/options/clan.md) available within `mkFlake`:
After updating your flake inputs, the next step is to import the Clan flake-parts module. This will make the [Clan options](/options) available within `mkFlake`.
```nix
{
@@ -39,9 +43,9 @@ Next, import the Clan flake-parts module to make the [Clan options](../reference
}
```
## Step 3: Configure Clan Settings and Define Machines
## Configure Clan Settings and Define Machines
Configure Clan-wide settings and define machines. Here's an example `flake.nix`:
Next you'll need to configure Clan wide settings and define machines, here's an example of how `flake.nix` should look:
```nix
{
@@ -58,22 +62,24 @@ Configure Clan-wide settings and define machines. Here's an example `flake.nix`:
];
# Define your Clan
# See: https://docs.clan.lol/reference/nix-api/clan/
clan = {
meta.name = ""; # Required and must be unique
# Clan wide settings
meta.name = ""; # This is required and must be unique
machines = {
jon = {
imports = [
./modules/firefox.nix
# Add more modules as needed
# ... more modules
];
nixpkgs.hostPlatform = "x86_64-linux";
# Enable remote Clan commands over SSH
# Set this for Clan commands to work remotely over SSH like `clan machines update`
clan.core.networking.targetHost = "root@jon";
# Disk configuration
# remote> lsblk --output NAME,ID-LINK,FSTYPE,SIZE,MOUNTPOINT
disko.devices.disk.main = {
device = "/dev/disk/by-id/nvme-eui.e8238fa6bf530001001b448b4aec2929";
};
@@ -84,4 +90,7 @@ Configure Clan-wide settings and define machines. Here's an example `flake.nix`:
}
```
For more details on configuring `flake-parts` and available Clan options, refer to the [Clan module documentation](https://git.clan.lol/clan/clan-core/src/branch/main/flakeModules/clan.nix).
For detailed information about configuring `flake-parts` and the available options within Clan,
refer to the [Clan module](https://git.clan.lol/clan/clan-core/src/branch/main/flakeModules/clan.nix) documentation.
---

View File

@@ -1,10 +1,12 @@
# How to add machines
Machines can be added using the following methods
- Create a file `machines/{machine_name}/configuration.nix` (See: [File Autoincludes](../guides/inventory/autoincludes.md))
- Create a file `machines/{machine_name}/configuration.nix` (See: [File Autoincludes](../inventory/autoincludes.md))
- Imperative via cli command: `clan machines create`
- Editing nix expressions in flake.nix See [`clan-core.lib.clan`](../reference/options/clan.md)
- Editing nix expressions in flake.nix See [`clan-core.lib.clan`](/options/?scope=Flake Options (clan.nix file))
See the complete [list](../guides/inventory/autoincludes.md) of auto-loaded files.
See the complete [list](../inventory/autoincludes.md) of auto-loaded files.
## Create a machine
@@ -18,6 +20,8 @@ See the complete [list](../guides/inventory/autoincludes.md) of auto-loaded file
};
# Additional NixOS configuration can be added here.
# machines/jon/configuration.nix will be automatically imported.
# See: https://docs.clan.lol/guides/more-machines/#automatic-registration
machines = {
# jon = { config, ... }: {
# environment.systemPackages = [ pkgs.asciinema ];
@@ -38,8 +42,8 @@ See the complete [list](../guides/inventory/autoincludes.md) of auto-loaded file
### Configuring a machine
!!! Note
The option: `inventory.machines.<name>` is used to define metadata about the machine
That includes for example `deploy.targethost` `machineClass` or `tags`
The option: `inventory.machines.<name>` is used to define metadata about the machine
That includes for example `deploy.targethost` `machineClass` or `tags`
The option: `machines.<name>` is used to add extra *nixosConfiguration* to a machine
@@ -71,7 +75,7 @@ This example demonstrates what is needed based on a machine called `jon`:
```
1. Tags can be used to automatically add this machine to services later on. - You dont need to set this now.
2. Add your _ssh key_ here - That will ensure you can always login to your machine via _ssh_ in case something goes wrong.
2. Add your *ssh key* here - That will ensure you can always login to your machine via *ssh* in case something goes wrong.
### (Optional) Create a `configuration.nix`
@@ -99,8 +103,8 @@ git mv ./machines/jon ./machines/<your-machine-name>
Since your Clan configuration lives inside a Git repository, remember:
- Only files tracked by Git (`git add`) are recognized.
- Whenever you add, rename, or remove files, run:
* Only files tracked by Git (`git add`) are recognized.
* Whenever you add, rename, or remove files, run:
```bash
git add ./machines/<your-machine-name>

View File

@@ -1,3 +1,5 @@
# How to add services
A service in clan is a self-contained, reusable unit of system configuration that provides a specific piece of functionality across one or more machines.
Think of it as a recipe for running a tool — like automatic backups, VPN networking, monitoring, etc.
@@ -8,7 +10,7 @@ In Clan Services are multi-Host & role-based:
- You can use tags instead of explicit machine names.
To learn more: [Guide about clanService](../guides/services/introduction-to-services.md)
To learn more: [Guide about clanService](../inventory/clanServices.md)
!!! Important
It is recommended to add at least one networking service such as `zerotier` that allows to reach all your clan machines from your setup computer across the globe.
@@ -38,8 +40,8 @@ To learn more: [Guide about clanService](../guides/services/introduction-to-serv
}
```
1. See [services/official](../services/definition.md) for all available services and how to configure them.
Or read [guides/services](../guides/services/community.md) if you want to bring your own
1. See [reference/clanServices](../../reference/clanServices/index.md) for all available services and how to configure them.
Or read [authoring/clanServices](../services/community.md) if you want to bring your own
2. Replace `__YOUR_CONTROLLER_` with the *name* of your machine.
@@ -71,5 +73,5 @@ Adding the following services is recommended for most users:
```
1. The `admin` service will generate a **root-password** and **add your ssh-key** that allows for convienient administration.
2. Equivalent to directly setting `authorizedKeys` like in [configuring a machine](../getting-started/add-machines.md#configuring-a-machine)
2. Equivalent to directly setting `authorizedKeys` like in [configuring a machine](./add-machines.md#configuring-a-machine)
3. Adds `user = jon` as a user on all machines. Will create a `home` directory, and prompt for a password before deployment.

View File

@@ -1,17 +1,19 @@
# How to add users
!!! Note "Under construction"
The users concept of clan is not done yet. This guide outlines some solutions from our community.
Defining users can be done in many different ways. We want to highlight two approaches:
- Using clan's [users](../services/official/users.md) service.
- Using clan's [users](../../reference/clanServices/users.md) service.
- Using a custom approach.
## Adding Users using the [users](../services/official/users.md) service
## Adding Users using the [users](../../reference/clanServices/users.md) service
To add a first *user* this guide will be leveraging two things:
- [services](../services/definition.md): Allows to bind arbitrary logic to something we call an `ìnstance`.
- [services/users](../services/official/users.md): Implements logic for adding a single user perInstance.
- [clanServices](../../reference/clanServices/index.md): Allows to bind arbitrary logic to something we call an `ìnstance`.
- [clanServices/users](../../reference/clanServices/users.md): Implements logic for adding a single user perInstance.
The example shows how to add a user called `jon`:
@@ -49,7 +51,7 @@ The example shows how to add a user called `jon`:
The `users` service creates a `/home/jon` directory, allows `jon` to sign in and will take care of the user's password.
For more information see [services/users](../services/official/users.md)
For more information see [clanService/users](../../reference/clanServices/users.md)
## Using a custom approach

View File

@@ -1,3 +1,5 @@
# Configure Disk Config
By default clan uses [disko](https://github.com/nix-community/disko) which allows for declarative disk partitioning.
To see what disk templates are available run:
@@ -62,9 +64,9 @@ You can have a look and customize it if needed.
## Deploy the machine
**Finally deployment time!**
**Finally deployment time!**
This command is destructive and will format your disk and install NixOS on it! It is equivalent to appending `--phases kexec,disko,install,reboot`.
This command is destructive and will format your disk and install NixOS on it! It is equivalent to appending `--phases kexec,disko,install,reboot`.
```bash

View File

@@ -1,8 +1,10 @@
# Convert existing NixOS configurations
This guide will help you convert your existing NixOS configurations into a Clan.
!!! Warning
Migrating instead of starting new can be trickier and might lead to bugs or
unexpected issues. We recommend reading the [Getting Started](../getting-started/creating-your-first-clan.md) guide first.
unexpected issues. We recommend reading the [Getting Started](./index.md) guide first.
Once you have a working setup and understand the concepts transfering your NixOS configurations over is easy.
@@ -169,7 +171,7 @@ Clan needs to know where it can reach your hosts. For testing purpose set
}
```
See our guide on for properly [configuring machines networking](../guides/networking/networking.md)
See our guide on for properly [configuring machines networking](../networking/networking.md)
## Next Steps

View File

@@ -1,7 +1,9 @@
# USB Installer Image for Physical Machines
To install Clan on physical machines, you need to use our custom installer image. This is necessary for proper installation and operation.
!!! note "Deploying to a Virtual Machine?"
If you're deploying to a virtual machine (VM), you can skip this section and go directly to the [Deploy Virtual Machine](../../getting-started/deploy-to-virtual-machine.md) step. In this scenario, we automatically use [nixos-anywhere](https://github.com/nix-community/nixos-anywhere) to replace the kernel during runtime.
If you're deploying to a virtual machine (VM), you can skip this section and go directly to the [Deploy Virtual Machine](./hardware-report-virtual.md) step. In this scenario, we automatically use [nixos-anywhere](https://github.com/nix-community/nixos-anywhere) to replace the kernel during runtime.
??? info "Why nixos-anywhere Doesn't Work on Physical Hardware?"
nixos-anywhere relies on [kexec](https://wiki.archlinux.org/title/Kexec) to replace the running kernel with our custom one. This method often has compatibility issues with real hardware, especially systems with dedicated graphics cards like laptops and servers, leading to crashes and black screens.
@@ -150,7 +152,7 @@ sudo umount /dev/sdb1
## Boot From USB Stick
- To use, boot from the Clan USB drive with **secure boot turned off**. For step by step instructions go to [Disabling Secure Boot](../../guides/secure-boot.md)
- To use, boot from the Clan USB drive with **secure boot turned off**. For step by step instructions go to [Disabling Secure Boot](../secure-boot.md)
## (Optional) Connect to Wifi Manually

View File

@@ -1,16 +1,18 @@
# Installing a Physical Machine
Now that you have created a machine, added some services, and set up secrets, this guide will walk you through how to deploy it.
### Prerequisites
- [x] RAM > 2GB
- [x] **Two Computers**: You need one computer that you're getting ready (we'll call this the Target Computer) and another one to set it up from (we'll call this the Setup Computer). Make sure both can talk to each other over the network using SSH.
- [x] **Machine configuration**: See our basic [adding and configuring machine guide](../../getting-started/add-machines.md)
- [x] **Initialized secrets**: See [secrets](../../guides/secrets.md) for how to initialize your secrets.
- [x] **USB Flash Drive**: See [Clan Installer](../../getting-started/deploy-to-physical-machine/flash-installer.md)
- [x] **Machine configuration**: See our basic [adding and configuring machine guide](./add-machines.md)
- [x] **Initialized secrets**: See [secrets](../secrets.md) for how to initialize your secrets.
- [x] **USB Flash Drive**: See [Clan Installer](./create-installer.md)
### Image Installer
This method makes use of the [image installers](../../getting-started/deploy-to-physical-machine/flash-installer.md).
This method makes use of the [image installers](./create-installer.md).
The installer will randomly generate a password and local addresses on boot, then run a SSH server with these preconfigured.
The installer shows its deployment relevant information in two formats, a text form, as well as a QR code.
@@ -66,7 +68,7 @@ This is an example of the booted installer.
```
2. The root password for the installer medium.
This password is autogenerated and meant to be easily typeable.
3. See how to connect the installer medium to wlan [here](../../getting-started/deploy-to-physical-machine/flash-installer.md).
3. See how to connect the installer medium to wlan [here](./create-installer.md).
!!!tip
For easy sharing of deployment information via QR code, we highly recommend using [KDE Connect](https://apps.kde.org/de/kdeconnect/).
@@ -111,4 +113,4 @@ The following command will generate a hardware report with [nixos-facter](https:
If you are using our template `[MACHINE]` would be `jon`
[Next Step (Choose Disk Format)](../../getting-started/configure-disk.md){ .md-button .md-button--primary }
[Next Step (Choose Disk Format)](./choose-disk.md){ .md-button .md-button--primary }

View File

@@ -1,8 +1,12 @@
# Generate a VM Hardware Report
Now that you have created a machine, added some services, and set up secrets, this guide will walk you through how to deploy it.
## Prerequisites
- [x] RAM > 2GB
- [x] **Two Computers**: You need one computer that you're getting ready (we'll call this the Target Computer) and another one to set it up from (we'll call this the Setup Computer). Make sure both can talk to each other over the network using SSH.
- [x] **Machine configuration**: See our basic [adding and configuring machine guide](../getting-started/add-machines.md)
- [x] **Machine configuration**: See our basic [adding and configuring machine guide](./add-machines.md)
Clan supports any cloud machine if it is reachable via SSH and supports `kexec`.

View File

@@ -1,4 +1,6 @@
Ready to manage your fleet of machines?
# :material-clock-fast: Getting Started
Ready to manage your fleet of machines?
We will create a declarative infrastructure using **clan**, **git**, and **nix flakes**.
@@ -41,7 +43,7 @@ Make sure you have the following:
## Create a New Clan
1. Navigate to your desired directory:
```shellSession
cd <your-directory>
```
@@ -74,7 +76,7 @@ my-clan/
```
!!! note "Templates"
This is the structure for the `default` template.
This is the structure for the `default` template.
Use `clan templates list` and `clan templates --help` for available templates & more. Keep in mind that the exact files may change as templates evolve.
@@ -119,7 +121,7 @@ Name: __CHANGE_ME__
Description: None
```
This confirms your setup is working correctly.
This confirms your setup is working correctly.
You can now change the default name by editing the `meta.name` field in your `clan.nix` file.

View File

@@ -1,3 +1,4 @@
# Update Machines
The Clan command line interface enables you to update machines remotely over SSH.
@@ -31,7 +32,7 @@ Ensure that the root login is secured and only used when necessary.
## Multiple Target Hosts
You can now experiment with a new interface that allows you to define multiple `targetHost` addresses for different VPNs. Learn more and try it out in our [networking guide](../guides/networking/networking.md).
You can now experiment with a new interface that allows you to define multiple `targetHost` addresses for different VPNs. Learn more and try it out in our [networking guide](../networking/networking.md).
## Updating Machine Configurations
@@ -78,7 +79,7 @@ clan {
`buildHost` / `targetHost`, and other network settings can be temporarily overridden for a single command:
For the full list of flags refer to the [Clan CLI](../reference/cli/index.md)
For the full list of flags refer to the [Clan CLI](../../reference/cli/index.md)
```bash
# Build on a remote host

View File

@@ -1,213 +0,0 @@
A common use case you might have is to host services and applications which are
only reachable within your clan.
This guide explains how to set up such secure, clan-internal web services using
a custom top-level domain (TLD) with SSL certificates.
Your services will be accessible only within your clan network and secured with
proper SSL certificates that all clan machines trust.
## Overview
By combining the `coredns` and `certificates` clan services, you can:
- Create a custom TLD for your clan (e.g. `.c`)
- Host internal web services accessible via HTTPS (e.g. `https://api.c`, `https://dashboard.c`)
- Automatically provision and trust SSL certificates across all clan machines
- Keep internal services secure and isolated from the public internet
The setup uses two clan services working together:
- **coredns service**: Provides DNS resolution for your custom TLD within the clan
- **certificates service**: Creates a certificate authority (CA) and issues SSL certificates for your TLD
### DNS Resolution Flow
1. A clan machine tries to access `https://service.c`
2. The machine queries its local DNS resolver (unbound)
3. For `.c` domains, the query is forwarded to your clan's CoreDNS server. All
other domains will be resolved as usual.
4. CoreDNS returns the IP address of the machine hosting the service
5. The machine connects directly to the service over HTTPS
6. The SSL certificate is trusted because all machines trust your clan's CA
## Step-by-Step Setup
The following setup assumes you have a VPN (e.g. Zerotier) already running. The
IPs configured in the options below will probably the Zerotier-IPs of the
respective machines.
### Configure the CoreDNS Service
The CoreDNS service has two roles:
- `server`: Runs the DNS server for your custom TLD
- `default`: Makes machines use the DNS server for TLD resolution and allows exposing services
Add this to your inventory:
```nix
inventory = {
machines = {
dns-server = { }; # Machine that will run the DNS server
web-server = { }; # Machine that will host web services
client = { }; # Any other machines in your clan
};
instances = {
coredns = {
# Add the default role to all machines
roles.default.tags = [ "all" ];
# DNS server for the .c TLD
roles.server.machines.dns-server.settings = {
ip = "192.168.1.10"; # IP of your DNS server machine
tld = "c";
};
# Machine hosting services (example: ca.c and admin.c)
roles.default.machines.web-server.settings = {
ip = "192.168.1.20"; # IP of your web server
services = [ "ca" "admin" ];
};
};
};
};
```
### Configure the Certificates Service
The certificates service also has two roles:
- `ca`: Sets up the certificate authority on a server
- `default`: Makes machines trust the CA and allows them to request certificates
Add this to your inventory:
```nix
inventory = {
instances = {
# ... coredns configuration from above ...
certificates = {
# Set up CA for .c domain
roles.ca.machines.dns-server.settings = {
tlds = [ "c" ];
acmeEmail = "admin@example.com"; # Optional: your email
};
# Add default role to all machines to trust the CA
roles.default.tags = [ "all" ];
};
};
};
```
### Complete Example Configuration
Here's a complete working example:
```nix
nventory = {
machines = {
caserver = { }; # DNS server + CA + web services
webserver = { }; # Additional web services
client = { }; # Client machine
};
instances = {
coredns = {
# Add the default role to all machines
roles.default.tags = [ "all" ];
# DNS server for the .c TLD
roles.server.machines.caserver.settings = {
ip = "192.168.8.5";
tld = "c";
};
# machine hosting https://ca.c (our CA for SSL)
roles.default.machines.caserver.settings = {
ip = "192.168.8.5";
services = [ "ca" ];
};
# machine hosting https://blub.c (some internal web-service)
roles.default.machines.webserver.settings = {
ip = "192.168.8.6";
services = [ "blub" ];
};
};
# Provide https for the .c top-level domain
certificates = {
roles.ca.machines.caserver.settings = {
tlds = [ "c" ];
acmeEmail = "admin@example.com";
};
roles.default.tags = [ "all" ];
};
};
};
```
## Testing Your Configuration
DNS resolution can be tested with:
```bash
# On any clan machine, test DNS resolution
nslookup ca.c
nslookup blub.c
```
You should also now be able to visit `https://ca.c` to access the certificate authority or visit `https://blub.c` to access your web service.
## Troubleshooting
### DNS Resolution Issues
1. **Check if DNS server is running**:
```bash
# On the DNS server machine
systemctl status coredns
```
2. **Verify DNS configuration**:
```bash
# Check if the right nameservers are configured
cat /etc/resolv.conf
systemctl status systemd-resolved
```
3. **Test DNS directly**:
```bash
# Query the DNS server directly
dig @192.168.8.5 ca.c
```
### Certificate Issues
1. **Check CA status**:
```bash
# On the CA machine
systemctl status step-ca
systemctl status nginx
```
2. **Verify certificate trust**:
```bash
# Test certificate trust
curl -v https://ca.c
openssl s_client -connect ca.c:443 -verify_return_error
```
3. **Check ACME configuration**:
```bash
# View ACME certificates
ls /var/lib/acme/
journalctl -u acme-ca.c.service
```

View File

@@ -1,11 +1,11 @@
Clan's services are a modular way to define and deploy services across
Clan's inventory system is a composable way to define and deploy services across
machines.
This guide shows how to **instantiate** a **service**, explains how service
definitions are structured and how to pick or create services
This guide shows how to **instantiate** a `clanService`, explains how service
definitions are structured in your inventory, and how to pick or create services
from modules exposed by flakes.
A similar term: **Multi-host-modules** was introduced previously in the [nixus
The term **Multi-host-modules** was introduced previously in the [nixus
repository](https://github.com/infinisil/nixus) and represents a similar
concept.
@@ -68,7 +68,7 @@ inventory.instances = {
## Module Settings
Each role might expose configurable options. See clan's [clanServices
reference](../../services/definition.md) for all available options.
reference](../../reference/clanServices/index.md) for all available options.
Settings can be set in per-machine or per-role. The latter is applied to all
machines that are assigned to that role.
@@ -155,13 +155,13 @@ inventory.instances = {
You can use services exposed by Clan's core module library, `clan-core`.
🔗 See: [List of Available Services in clan-core](../../services/definition.md)
🔗 See: [List of Available Services in clan-core](../../reference/clanServices/index.md)
## Defining Your Own Service
You can also author your own `clanService` modules.
🔗 Learn how to write your own service: [Authoring a service](../../guides/services/community.md)
🔗 Learn how to write your own service: [Authoring a service](../services/community.md)
You might expose your service module from your flake — this makes it easy for other people to also use your module in their clan.
@@ -177,5 +177,7 @@ ______________________________________________________________________
## What's Next?
- [Author your own clanService →](../../guides/services/community.md)
- [Migrate from clanModules →](../../guides/migrations/migrate-inventory-services.md)
- [Author your own clanService →](../services/community.md)
- [Migrate from clanModules →](../migrations/migrate-inventory-services.md)
<!-- TODO: * [Understand the architecture →](../explanation/clan-architecture.md) -->

View File

@@ -17,13 +17,13 @@ The following tutorial will walk through setting up a Backup service where the t
## Prerequisites
- [x] [Add some machines](../../getting-started/add-machines.md) to your Clan.
- [x] [Add some machines](../getting-started/add-machines.md) to your Clan.
## Services
The inventory defines `instances` of clan services. Membership of `machines` is defined via `roles` exclusively.
See each [modules documentation](../../services/definition.md) for its available roles.
See each [modules documentation](../../reference/clanServices/index.md) for its available roles.
### Adding services to machines

View File

@@ -7,7 +7,7 @@ This guide explains how to manage macOS machines using Clan.
Currently, Clan supports the following features for macOS:
- `clan machines update` for existing [nix-darwin](https://github.com/nix-darwin/nix-darwin) installations
- Support for [vars](../guides/vars/vars-overview.md)
- Support for [vars](./vars/vars-overview.md)
## Add Your Machine to Your Clan Flake

View File

@@ -1,7 +1,7 @@
# Migrating from using `clanModules` to `clanServices`
**Audience**: This is a guide for **people using `clanModules`**.
If you are a **module author** and need to migrate your modules please consult our **new** [clanServices authoring guide](../../guides/services/community.md)
If you are a **module author** and need to migrate your modules please consult our **new** [clanServices authoring guide](../services/community.md)
## What's Changing?
@@ -157,7 +157,7 @@ instances = {
### Move `services` entries to `instances`
Check if a service that you use has been migrated [In our reference](../../services/definition.md)
Check if a service that you use has been migrated [In our reference](../../reference/clanServices/index.md)
In your inventory, move it from:
@@ -247,45 +247,45 @@ The following table shows the migration status of each deprecated clanModule:
| clanModule | Migration Status | Notes |
|--------------------------|-------------------------------------------------------------------|------------------------------------------------------------------|
| `admin` | ✅ [Migrated](../../services/official/admin.md) | |
| `admin` | ✅ [Migrated](../../reference/clanServices/admin.md) | |
| `auto-upgrade` | ❌ Removed | |
| `borgbackup-static` | ❌ Removed | |
| `borgbackup` | ✅ [Migrated](../../services/official/borgbackup.md) | |
| `data-mesher` | ✅ [Migrated](../../services/official/data-mesher.md) | |
| `borgbackup` | ✅ [Migrated](../../reference/clanServices/borgbackup.md) | |
| `data-mesher` | ✅ [Migrated](../../reference/clanServices/data-mesher.md) | |
| `deltachat` | ❌ Removed | |
| `disk-id` | ❌ Removed | |
| `dyndns` | ✅ [Migrated](../../services/official/dyndns.md) | |
| `dyndns` | ✅ [Migrated](../../reference/clanServices/dyndns.md) | |
| `ergochat` | ❌ Removed | |
| `garage` | ✅ [Migrated](../../services/official/garage.md) | |
| `garage` | ✅ [Migrated](../../reference/clanServices/garage.md) | |
| `golem-provider` | ❌ Removed | |
| `heisenbridge` | ❌ Removed | |
| `importer` | ✅ [Migrated](../../services/official/importer.md) | |
| `iwd` | ❌ Removed | Use [wifi service](../../services/official/wifi.md) instead |
| `localbackup` | ✅ [Migrated](../../services/official/localbackup.md) | |
| `importer` | ✅ [Migrated](../../reference/clanServices/importer.md) | |
| `iwd` | ❌ Removed | Use [wifi service](../../reference/clanServices/wifi.md) instead |
| `localbackup` | ✅ [Migrated](../../reference/clanServices/localbackup.md) | |
| `localsend` | ❌ Removed | |
| `machine-id` | ✅ [Migrated](../../reference/clan.core/settings.md) | Now an [option](../../reference/clan.core/settings.md) |
| `matrix-synapse` | ✅ [Migrated](../../services/official/matrix-synapse.md) | |
| `matrix-synapse` | ✅ [Migrated](../../reference/clanServices/matrix-synapse.md) | |
| `moonlight` | ❌ Removed | |
| `mumble` | ❌ Removed | |
| `mycelium` | ✅ [Migrated](../../services/official/mycelium.md) | |
| `mycelium` | ✅ [Migrated](../../reference/clanServices/mycelium.md) | |
| `nginx` | ❌ Removed | |
| `packages` | ✅ [Migrated](../../services/official/packages.md) | |
| `packages` | ✅ [Migrated](../../reference/clanServices/packages.md) | |
| `postgresql` | ✅ [Migrated](../../reference/clan.core/settings.md) | Now an [option](../../reference/clan.core/settings.md) |
| `root-password` | ✅ [Migrated](../../services/official/users.md) | See [migration guide](../../services/official/users.md#migration-from-root-password-module) |
| `root-password` | ✅ [Migrated](../../reference/clanServices/users.md) | See [migration guide](../../reference/clanServices/users.md#migration-from-root-password-module) |
| `single-disk` | ❌ Removed | |
| `sshd` | ✅ [Migrated](../../services/official/sshd.md) | |
| `sshd` | ✅ [Migrated](../../reference/clanServices/sshd.md) | |
| `state-version` | ✅ [Migrated](../../reference/clan.core/settings.md) | Now an [option](../../reference/clan.core/settings.md) |
| `static-hosts` | ❌ Removed | |
| `sunshine` | ❌ Removed | |
| `syncthing-static-peers` | ❌ Removed | |
| `syncthing` | ✅ [Migrated](../../services/official/syncthing.md) | |
| `syncthing` | ✅ [Migrated](../../reference/clanServices/syncthing.md) | |
| `thelounge` | ❌ Removed | |
| `trusted-nix-caches` | ✅ [Migrated](../../services/official/trusted-nix-caches.md) | |
| `user-password` | ✅ [Migrated](../../services/official/users.md) | |
| `trusted-nix-caches` | ✅ [Migrated](../../reference/clanServices/trusted-nix-caches.md) | |
| `user-password` | ✅ [Migrated](../../reference/clanServices/users.md) | |
| `vaultwarden` | ❌ Removed | |
| `xfce` | ❌ Removed | |
| `zerotier-static-peers` | ❌ Removed | |
| `zerotier` | ✅ [Migrated](../../services/official/zerotier.md) | |
| `zerotier` | ✅ [Migrated](../../reference/clanServices/zerotier.md) | |
| `zt-tcp-relay` | ❌ Removed | |
---
@@ -378,6 +378,6 @@ instances = {
## Further reference
* [Inventory Concept](../../guides/inventory/inventory.md)
* [Authoring a 'clan.service' module](../../guides/services/community.md)
* [ClanServices](../../guides/services/introduction-to-services.md)
* [Inventory Concept](../inventory/inventory.md)
* [Authoring a 'clan.service' module](../services/community.md)
* [ClanServices](../inventory/clanServices.md)

View File

@@ -1,9 +1,9 @@
# Migrate modules from `facts` to `vars`
# Migrate modules from `facts` to `vars`.
For a high level overview about `vars` see our [blog post](https://clan.lol/blog/vars/).
This guide will help you migrate your modules that still use our [`facts`](../../guides/migrations/migration-facts-vars.md) backend
to the [`vars`](../../guides/vars/vars-overview.md) backend.
This guide will help you migrate your modules that still use our [`facts`](../secrets.md) backend
to the [`vars`](../vars/vars-overview.md) backend.
The `vars` [module](../../reference/clan.core/vars.md) and the clan [command](../../reference/cli/vars.md) work in tandem, they should ideally be kept in sync.
@@ -33,6 +33,7 @@ vars.generators.vaultwarden = {
And this would read as follows: The vaultwarden `vars` module generates the admin file.
## Prompts
Because prompts can be a necessity for certain systems `vars` have a shorthand for defining them.
@@ -45,9 +46,7 @@ facts.services.forgejo-api = {
generator.script = "cp $prompt_value > $secret/token";
};
```
To have analogous functionality in `vars`:
```nix
vars.generators.forgejo-api = {
prompts.token = {
@@ -56,7 +55,6 @@ vars.generators.forgejo-api = {
};
};
```
This does not only simplify prompting, it also now allows us to define multiple prompts in one generator.
A more analogous way to the `fact` method is available, in case the module author needs more flexibility with the prompt input:
@@ -94,8 +92,8 @@ facts.services.syncthing = {
};
```
This would be the corresponding `vars` module, which also will migrate existing facts.
This would be the corresponding `vars` module, which also will migrate existing facts.
```nix
vars.generators.syncthing = {
migrateFact = "syncthing";
@@ -118,11 +116,11 @@ vars.generators.syncthing = {
'';
};
```
Most of the usage patterns stay the same, but `vars` have a more ergonomic interface.
There are not two different ways to define files anymore (public/secret).
Now files are defined under the `files` attribute and are secret by default.
## Happy Migration
We hope this gives you a clear path to start and finish your migration from `facts` to `vars`.

View File

@@ -1,3 +1,4 @@
This guide provides detailed instructions for configuring
[ZeroTier VPN](https://zerotier.com) within Clan. Follow the
outlined steps to set up a machine as a VPN controller (`<CONTROLLER>`) and to
@@ -97,12 +98,11 @@ The status should be "ONLINE":
```
## Further
Currently **Zerotier** is the only mesh-vpn that is fully integrated into clan.
In the future we plan to add additional network technologies like tinc, head/tailscale
Currently we support yggdrassil and mycelium through usage of the inventory,
though it is not yet integrated into the networking module.
We chose ZeroTier because in our tests it was a straight forward solution to bootstrap.
Currently you can only use **Zerotier** as networking technology because this is the first network stack we aim to support.
In the future we plan to add additional network technologies like tinc, head/tailscale, yggdrassil and mycelium.
We chose zerotier because in our tests it was a straight forwards solution to bootstrap.
It allows you to selfhost a controller and the controller doesn't need to be globally reachable.
Which made it a good fit for starting the project.
@@ -132,7 +132,7 @@ $ sudo zerotier-cli info
#### Manually Authorize a Machine on the Controller
=== "with ZeroTierIP"
=== "with ZerotierIP"
```bash
$ sudo zerotier-members allow --member-ip <IP>
@@ -140,10 +140,10 @@ $ sudo zerotier-cli info
Substitute `<IP>` with the ZeroTier IP obtained previously.
=== "with ZeroTierID"
=== "with ZerotierID"
```bash
$ sudo zerotier-members allow <ID>
```
Substitute `<ID>` with the ZeroTier ID obtained previously.
Substitute `<ID>` with the ZeroTier ID obtained previously.

View File

@@ -64,5 +64,5 @@ nixos-rebuild switch --flake .#my-machine --target-host root@target-ip --build-h
## Related Documentation
- [Update Your Machines](../getting-started/update-machines.md) - Using clan's update command
- [Variables (Vars)](../guides/vars/vars-overview.md) - Understanding the vars system
- [Update Your Machines](getting-started/update.md) - Using clan's update command
- [Variables (Vars)](vars/vars-overview.md) - Understanding the vars system

View File

@@ -1,99 +0,0 @@
**Q**: How should I choose the `nixpkgs` input for my flake when using `clan-core`?
**A**: Pin your flake to a recent `nixpkgs` version. Here are two common approaches, each with its trade-offs:
## Option 1: Follow `clan-core`
- **Pros**:
- Recommended for most users.
- Verified by our CI and widely used by others.
- **Cons**:
- Coupled to version bumps in `clan-core`.
- Upstream features and packages may take longer to land.
Example:
```nix
inputs = {
clan-core.url = "https://git.clan.lol/clan/clan-core/archive/main.tar.gz";
# Use the `nixpkgs` version locked in `clan-core`
nixpkgs.follows = "clan-core/nixpkgs";
};
```
## Option 2: Use Your Own `nixpkgs` Version
- **Pros**:
- Faster access to new upstream features and packages.
- **Cons**:
- Recommended for advanced users.
- Not covered by our CI — youre on the frontier.
Example:
```nix
inputs = {
# Specify your own `nixpkgs` version
nixpkgs.url = "github:nixos/nixpkgs/nixos-unstable";
clan-core.url = "https://git.clan.lol/clan/clan-core/archive/main.tar.gz";
# Ensure `clan-core` uses your `nixpkgs` version
clan-core.inputs.nixpkgs.follows = "nixpkgs";
};
```
## Recommended: Avoid Duplicate `nixpkgs` Entries
To prevent ambiguity or compatibility issues, check your `flake.lock` for duplicate `nixpkgs` entries. Duplicate entries indicate a missing `follows` directive in one of your flake inputs.
Example of duplicate entries in `flake.lock`:
```json
"nixpkgs": {
"locked": {
"lastModified": 315532800,
"narHash": "sha256-1tUpklZsKzMGI3gjo/dWD+hS8cf+5Jji8TF5Cfz7i3I=",
"rev": "08b8f92ac6354983f5382124fef6006cade4a1c1",
"type": "tarball",
"url": "https://releases.nixos.org/nixpkgs/nixpkgs-25.11pre862603.08b8f92ac635/nixexprs.tar.xz"
},
"original": {
"type": "tarball",
"url": "https://nixos.org/channels/nixpkgs-unstable/nixexprs.tar.xz"
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1758346548,
"narHash": "sha256-afXE7AJ7MY6wY1pg/Y6UPHNYPy5GtUKeBkrZZ/gC71E=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "b2a3852bd078e68dd2b3dfa8c00c67af1f0a7d20",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "nixos-25.05",
"repo": "nixpkgs",
"type": "github"
}
}
```
To locate the source of duplicate entries, grep your `flake.lock` file. For example, if `home-manager` is referencing `nixpkgs_2` instead of the main `nixpkgs`:
```json
"home-manager": {
"inputs": {
"nixpkgs": "nixpkgs_2"
}
}
```
Fix this by adding the following line to your `flake.nix` inputs:
```nix
home-manager.inputs.nixpkgs.follows = "nixpkgs";
```
Repeat this process until all duplicate `nixpkgs` entries are resolved. This ensures all inputs use the same `nixpkgs` source, preventing cross-version conflicts.

View File

@@ -1,5 +1,5 @@
This article provides an overview over the underlying secrets system which is used by [Vars](../guides/vars/vars-overview.md).
Under most circumstances you should use [Vars](../guides/vars/vars-overview.md) directly instead.
This article provides an overview over the underlying secrets system which is used by [Vars](./vars/vars-overview.md).
Under most circumstances you should use [Vars](./vars/vars-overview.md) directly instead.
Consider using `clan secrets` only for managing admin users and groups, as well as a debugging tool.

View File

@@ -3,9 +3,9 @@
## Service Module Specification
This section explains how to author a clan service module.
We discussed the initial architecture in [01-clan-service-modules](../../decisions/01-Clan-Modules.md) and decided to rework the format.
We discussed the initial architecture in [01-clan-service-modules](../../decisions/01-ClanModules.md) and decided to rework the format.
For the full specification and current state see: **[Service Author Reference](../../reference/options/clan_service.md)**
For the full specification and current state see: **[Service Author Reference](../../reference/clanServices/clan-service-author-interface.md)**
### A Minimal module
@@ -47,7 +47,7 @@ The imported module file must fulfill at least the following requirements:
}
```
For more attributes see: **[Service Author Reference](../../reference/options/clan_service.md)**
For more attributes see: **[Service Author Reference](../../reference/clanServices/clan-service-author-interface.md)**
### Adding functionality to the module
@@ -300,7 +300,6 @@ instnaces.machine-type = {
## Further Reading
- [Reference Documentation for Service Authors](../../reference/options/clan_service.md)
- [Migration Guide from ClanModules to ClanServices](../../guides/migrations/migrate-inventory-services.md)
- [Decision that lead to ClanServices](../../decisions/01-Clan-Modules.md)
- [Testing Guide for Services with Vars](../contributing/testing.md#testing-services-with-vars)
- [Reference Documentation for Service Authors](../../reference/clanServices/clan-service-author-interface.md)
- [Migration Guide from ClanModules to ClanServices](../migrations/migrate-inventory-services.md)
- [Decision that lead to ClanServices](../../decisions/01-ClanModules.md)

Some files were not shown because too many files have changed in this diff Show More