Compare commits

..

2 Commits

Author SHA1 Message Date
Jörg Thalheim
41635dd350 enable warningsAreErrors in doc 2025-01-07 12:17:46 +01:00
Jörg Thalheim
16d16faa9c fix typo 2025-01-07 11:06:36 +01:00
194 changed files with 1858 additions and 5072 deletions

2
.gitignore vendored
View File

@@ -14,7 +14,7 @@ example_clan
nixos.qcow2
**/*.glade~
/docs/out
**/.local.env
# dream2nix
.dream2nix

View File

@@ -1,3 +1,23 @@
# Contributing to Clan
Go to the Contributing guide at https://docs.clan.lol/manual/contribute/
## Live-reloading documentation
Enter the `docs` directory:
```shell-session
cd docs
```
Enter the development shell or enable `direnv`:
```shell-session
direnv allow
```
Run a local server:
```shell-session
mkdocs serve
```
Open http://localhost:8000/ in your browser.

View File

@@ -27,6 +27,7 @@
];
clan.core.networking.targetHost = "machine";
networking.hostName = "machine";
services.openssh.settings.UseDns = false;
nixpkgs.hostPlatform = "x86_64-linux";
programs.ssh.knownHosts = {
@@ -36,8 +37,6 @@
services.openssh = {
enable = true;
settings.UsePAM = false;
settings.UseDns = false;
hostKeys = [
{
path = "/root/.ssh/id_ed25519";
@@ -48,10 +47,6 @@
users.users.root.openssh.authorizedKeys.keyFiles = [ ../lib/ssh/pubkey ];
# This is needed to unlock the user for sshd
# Because we use sshd without setuid binaries
users.users.borg.initialPassword = "hello";
systemd.tmpfiles.settings."vmsecrets" = {
"/root/.ssh/id_ed25519" = {
C.argument = "${../lib/ssh/privkey}";
@@ -67,14 +62,14 @@
user = "root";
};
};
"/etc/secrets/borgbackup/borgbackup.ssh" = {
"/etc/secrets/borgbackup.ssh" = {
C.argument = "${../lib/ssh/privkey}";
z = {
mode = "0400";
user = "root";
};
};
"/etc/secrets/borgbackup/borgbackup.repokey" = {
"/etc/secrets/borgbackup.repokey" = {
C.argument = builtins.toString (pkgs.writeText "repokey" "repokey12345");
z = {
mode = "0400";
@@ -83,7 +78,8 @@
};
};
clan.core.facts.secretStore = "vm";
clan.core.vars.settings.secretStore = "vm";
# TODO: set this backend as well, once we have implemented it.
#clan.core.vars.settings.secretStore = "vm";
environment.systemPackages = [ self.packages.${pkgs.system}.clan-cli ];
environment.etc.install-closure.source = "${closureInfo}/store-paths";
@@ -138,47 +134,20 @@
};
perSystem =
{ pkgs, ... }:
let
clanCore = self.filter {
include = [
"checks/backups"
"checks/flake-module.nix"
"clanModules/borgbackup"
"clanModules/flake-module.nix"
"clanModules/localbackup"
"clanModules/packages"
"clanModules/single-disk"
"clanModules/zerotier"
"flake.lock"
"flakeModules"
"inventory.json"
"lib/build-clan"
"lib/default.nix"
"lib/flake-module.nix"
"lib/frontmatter"
"lib/inventory"
"nixosModules"
];
};
in
{
# Needs investigation on aarch64-linux
# vm-test-run-test-backups> qemu-kvm: No machine specified, and there is no default
# vm-test-run-test-backups> Use -machine help to list supported machines
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux && pkgs.stdenv.hostPlatform.system != "aarch64-linux") {
test-backups = (import ../lib/container-test.nix) {
test-backups = (import ../lib/test-base.nix) {
name = "test-backups";
nodes.machine = {
imports = [
self.nixosModules.clanCore
self.nixosModules.test-backup
];
virtualisation.emptyDiskImages = [ 256 ];
clan.core.settings.directory = ./.;
environment.systemPackages = [
(pkgs.writeShellScriptBin "foo" ''
echo ${clanCore}
'')
];
};
testScript = ''
@@ -190,14 +159,14 @@
machine.succeed("echo testing > /var/test-backups/somefile")
# create
machine.succeed("clan backups create --debug --flake ${clanCore} test-backup")
machine.succeed("clan backups create --debug --flake ${self} test-backup")
machine.wait_until_succeeds("! systemctl is-active borgbackup-job-test-backup >&2")
machine.succeed("test -f /run/mount-external-disk")
machine.succeed("test -f /run/unmount-external-disk")
# list
backup_id = json.loads(machine.succeed("borg-job-test-backup list --json"))["archives"][0]["archive"]
out = machine.succeed("clan backups list --debug --flake ${clanCore} test-backup").strip()
out = machine.succeed("clan backups list --debug --flake ${self} test-backup").strip()
print(out)
assert backup_id in out, f"backup {backup_id} not found in {out}"
localbackup_id = "hdd::/mnt/external-disk/snapshot.0"
@@ -205,7 +174,7 @@
## borgbackup restore
machine.succeed("rm -f /var/test-backups/somefile")
machine.succeed(f"clan backups restore --debug --flake ${clanCore} test-backup borgbackup 'test-backup::borg@machine:.::{backup_id}' >&2")
machine.succeed(f"clan backups restore --debug --flake ${self} test-backup borgbackup 'test-backup::borg@machine:.::{backup_id}' >&2")
assert machine.succeed("cat /var/test-backups/somefile").strip() == "testing", "restore failed"
machine.succeed("test -f /var/test-service/pre-restore-command")
machine.succeed("test -f /var/test-service/post-restore-command")
@@ -213,7 +182,7 @@
## localbackup restore
machine.succeed("rm -rf /var/test-backups/somefile /var/test-service/ && mkdir -p /var/test-service")
machine.succeed(f"clan backups restore --debug --flake ${clanCore} test-backup localbackup '{localbackup_id}' >&2")
machine.succeed(f"clan backups restore --debug --flake ${self} test-backup localbackup '{localbackup_id}' >&2")
assert machine.succeed("cat /var/test-backups/somefile").strip() == "testing", "restore failed"
machine.succeed("test -f /var/test-service/pre-restore-command")
machine.succeed("test -f /var/test-service/post-restore-command")

View File

@@ -21,14 +21,14 @@
clan.core.state.testState.folders = [ "/etc/state" ];
environment.etc.state.text = "hello world";
systemd.tmpfiles.settings."vmsecrets" = {
"/etc/secrets/borgbackup/borgbackup.ssh" = {
"/etc/secrets/borgbackup.ssh" = {
C.argument = "${../lib/ssh/privkey}";
z = {
mode = "0400";
user = "root";
};
};
"/etc/secrets/borgbackup/borgbackup.repokey" = {
"/etc/secrets/borgbackup.repokey" = {
C.argument = builtins.toString (pkgs.writeText "repokey" "repokey12345");
z = {
mode = "0400";
@@ -36,8 +36,7 @@
};
};
};
# clan.core.facts.secretStore = "vm";
clan.core.vars.settings.secretStore = "vm";
clan.core.facts.secretStore = "vm";
clan.borgbackup.destinations.test.repo = "borg@localhost:.";
}

View File

@@ -1,12 +1,6 @@
{ self, lib, ... }:
let
inherit (lib)
filter
pathExists
;
in
{ self, ... }:
{
imports = filter pathExists [
imports = [
./backups/flake-module.nix
./devshell/flake-module.nix
./flash/flake-module.nix

View File

@@ -1,25 +1,5 @@
{ self, lib, ... }:
{ self, ... }:
{
clan.machines.test-flash-machine = {
clan.core.networking.targetHost = "test-flash-machine";
fileSystems."/".device = lib.mkDefault "/dev/vda";
boot.loader.grub.device = lib.mkDefault "/dev/vda";
imports = [ self.nixosModules.test-flash-machine ];
};
flake.nixosModules = {
test-flash-machine =
{ lib, ... }:
{
imports = [ self.nixosModules.test-install-machine ];
clan.core.vars.generators.test = lib.mkForce { };
disko.devices.disk.main.preCreateHook = lib.mkForce "";
};
};
perSystem =
{
nodes,
@@ -30,18 +10,15 @@
let
dependencies = [
pkgs.disko
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-flash-machine.pkgs.perlPackages.ConfigIniFiles
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-flash-machine.pkgs.perlPackages.FileSlurp
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-flash-machine.config.system.build.toplevel
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-flash-machine.config.system.build.diskoScript
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-flash-machine.config.system.build.diskoScript.drvPath
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-flash-machine.config.system.clan.deployment.file
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine.config.system.build.toplevel
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine.config.system.build.diskoScript
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine.config.system.build.diskoScript.drvPath
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine.config.system.clan.deployment.file
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
in
{
# Currently disabled...
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux) {
flash = (import ../lib/test-base.nix) {
name = "flash";
@@ -65,7 +42,7 @@
testScript = ''
start_all()
machine.succeed("clan flash write --debug --flake ${../..} --yes --disk main /dev/vdb test-flash-machine")
machine.succeed("clan flash write --debug --flake ${../..} --yes --disk main /dev/vdb test-install-machine")
'';
} { inherit pkgs self; };
};

View File

@@ -1,5 +1,6 @@
{
self,
inputs,
lib,
...
}:
@@ -16,68 +17,18 @@
{ lib, modulesPath, ... }:
{
imports = [
self.clanModules.single-disk
(modulesPath + "/testing/test-instrumentation.nix") # we need these 2 modules always to be able to run the tests
(modulesPath + "/profiles/qemu-guest.nix")
../lib/minify.nix
];
clan.single-disk.device = "/dev/vda";
environment.etc."install-successful".text = "ok";
nixpkgs.hostPlatform = "x86_64-linux";
boot.consoleLogLevel = lib.mkForce 100;
boot.kernelParams = [ "boot.shell_on_fail" ];
# disko config
boot.loader.grub.efiSupport = lib.mkDefault true;
boot.loader.grub.efiInstallAsRemovable = lib.mkDefault true;
clan.core.vars.settings.secretStore = "vm";
clan.core.vars.generators.test = {
files.test.neededFor = "partitioning";
script = ''
echo "notok" > $out/test
'';
};
disko.devices = {
disk = {
main = {
type = "disk";
device = "/dev/vda";
preCreateHook = ''
test -e /run/partitioning-secrets/test/test
'';
content = {
type = "gpt";
partitions = {
boot = {
size = "1M";
type = "EF02"; # for grub MBR
priority = 1;
};
ESP = {
size = "512M";
type = "EF00";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
mountOptions = [ "umask=0077" ];
};
};
root = {
size = "100%";
content = {
type = "filesystem";
format = "ext4";
mountpoint = "/";
};
};
};
};
};
};
};
};
};
perSystem =
@@ -94,7 +45,6 @@
self.nixosConfigurations.test-install-machine.config.system.clan.deployment.file
pkgs.stdenv.drvPath
pkgs.nixos-anywhere
pkgs.bubblewrap
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
in
@@ -176,9 +126,9 @@
client.succeed("cp -r ${../..} test-flake && chmod -R +w test-flake")
client.fail("test -f test-flake/machines/test-install-machine/hardware-configuration.nix")
client.succeed("clan machines update-hardware-config --flake test-flake test-install-machine root@installer >&2")
client.succeed("test -f test-flake/machines/test-install-machine/facter.json")
client.succeed("clan machines update-hardware-config --backend nixos-generate-config --flake test-flake test-install-machine root@installer>&2")
client.succeed("test -f test-flake/machines/test-install-machine/hardware-configuration.nix")
client.succeed("clan machines update-hardware-config --backend nixos-facter --flake test-flake test-install-machine root@installer>&2")
client.succeed("test -f test-flake/machines/test-install-machine/facter.json")
client.succeed("clan machines install --debug --flake ${../..} --yes test-install-machine --target-host root@installer >&2")
try:
installer.shutdown()

View File

@@ -7,19 +7,9 @@
let
testDriver = hostPkgs.python3.pkgs.callPackage ./package.nix {
inherit (config) extraPythonPackages;
inherit (hostPkgs.pkgs) util-linux systemd nix;
inherit (hostPkgs.pkgs) util-linux systemd;
};
containers =
testScript:
map (m: [
m.system.build.toplevel
(hostPkgs.closureInfo {
rootPaths = [
m.system.build.toplevel
(hostPkgs.writeText "testScript" testScript)
];
})
]) (lib.attrValues config.nodes);
containers = map (m: m.system.build.toplevel) (lib.attrValues config.nodes);
pythonizeName =
name:
let
@@ -54,6 +44,8 @@ in
''
mkdir -p $out/bin
containers=(${toString containers})
${lib.optionalString (!config.skipTypeCheck) ''
# prepend type hints so the test script can be type checked with mypy
cat "${./test-script-prepend.py}" >> testScriptWithTypes
@@ -74,13 +66,7 @@ in
ln -s ${testDriver}/bin/nixos-test-driver $out/bin/nixos-test-driver
wrapProgram $out/bin/nixos-test-driver \
${
lib.concatStringsSep " " (
map (container: "--add-flags '--container ${builtins.toString container}'") (
containers config.testScriptString
)
)
} \
${lib.concatStringsSep " " (map (name: "--add-flags '--container ${name}'") containers)} \
--add-flags "--test-script '$out/test-script'"
''
);

View File

@@ -5,7 +5,6 @@
setuptools,
util-linux,
systemd,
nix,
colorama,
junit-xml,
}:
@@ -17,7 +16,6 @@ buildPythonApplication {
systemd
colorama
junit-xml
nix
] ++ extraPythonPackages python3Packages;
nativeBuildInputs = [ setuptools ];
format = "pyproject";

View File

@@ -1,5 +1,4 @@
import argparse
import ctypes
import os
import re
import subprocess
@@ -13,55 +12,6 @@ from typing import Any
from .logger import AbstractLogger, CompositeLogger, TerminalLogger
# Load the C library
libc = ctypes.CDLL("libc.so.6", use_errno=True)
# Define the mount function
libc.mount.argtypes = [
ctypes.c_char_p, # source
ctypes.c_char_p, # target
ctypes.c_char_p, # filesystemtype
ctypes.c_ulong, # mountflags
ctypes.c_void_p, # data
]
libc.mount.restype = ctypes.c_int
MS_BIND = 0x1000
MS_REC = 0x4000
def mount(
source: Path,
target: Path,
filesystemtype: str,
mountflags: int = 0,
data: str | None = None,
) -> None:
"""
A Python wrapper for the mount system call.
:param source: The source of the file system (e.g., device name, remote filesystem).
:param target: The mount point (an existing directory).
:param filesystemtype: The filesystem type (e.g., "ext4", "nfs").
:param mountflags: Mount options flags.
:param data: File system-specific data (e.g., options like "rw").
:raises OSError: If the mount system call fails.
"""
# Convert Python strings to C-compatible strings
source_c = ctypes.c_char_p(str(source).encode("utf-8"))
target_c = ctypes.c_char_p(str(target).encode("utf-8"))
fstype_c = ctypes.c_char_p(filesystemtype.encode("utf-8"))
data_c = ctypes.c_char_p(data.encode("utf-8")) if data else None
# Call the mount system call
result = libc.mount(
source_c, target_c, fstype_c, ctypes.c_ulong(mountflags), data_c
)
if result != 0:
errno = ctypes.get_errno()
raise OSError(errno, os.strerror(errno))
class Error(Exception):
pass
@@ -121,7 +71,7 @@ class Machine:
self.rootdir,
"--register=no",
"--resolv-conf=off",
"--bind=/nix",
"--bind-ro=/nix/store",
"--bind",
self.out_dir,
"--bind=/proc:/run/host/proc",
@@ -152,9 +102,9 @@ class Machine:
.read_text()
.split()
)
assert len(childs) == 1, (
f"Expected exactly one child process for systemd-nspawn, got {childs}"
)
assert (
len(childs) == 1
), f"Expected exactly one child process for systemd-nspawn, got {childs}"
try:
return int(childs[0])
except ValueError as e:
@@ -303,9 +253,7 @@ class Machine:
info = self.get_unit_info(unit)
state = info["ActiveState"]
if state == "failed":
proc = self.systemctl(f"--lines 0 status {unit}")
journal = self.execute(f"journalctl -u {unit} --no-pager")
msg = f'unit "{unit}" reached state "{state}":\n{proc.stdout}\n{journal.stdout}'
msg = f'unit "{unit}" reached state "{state}"'
raise Error(msg)
if state == "inactive":
@@ -323,9 +271,7 @@ class Machine:
def succeed(self, command: str, timeout: int | None = None) -> str:
res = self.execute(command, timeout=timeout)
if res.returncode != 0:
msg = f"Failed to run command {command}\n"
msg += f"Exit code: {res.returncode}\n"
msg += f"Stdout: {res.stdout}"
msg = f"Failed to run command {command}"
raise RuntimeError(msg)
return res.stdout
@@ -342,12 +288,6 @@ class Machine:
self.shutdown()
NIX_DIR = Path("/nix")
NIX_STORE = Path("/nix/store/")
NEW_NIX_DIR = Path("/.nix-rw")
NEW_NIX_STORE_DIR = NEW_NIX_DIR / "store"
def setup_filesystems() -> None:
# We don't care about cleaning up the mount points, since we're running in a nix sandbox.
Path("/run").mkdir(parents=True, exist_ok=True)
@@ -356,32 +296,6 @@ def setup_filesystems() -> None:
Path("/etc").chmod(0o755)
Path("/etc/os-release").touch()
Path("/etc/machine-id").write_text("a5ea3f98dedc0278b6f3cc8c37eeaeac")
NEW_NIX_STORE_DIR.mkdir(parents=True)
# Read /proc/mounts and replicate every bind mount
with Path("/proc/self/mounts").open() as f:
for line in f:
columns = line.split(" ")
source = Path(columns[1])
if source.parent != NIX_STORE:
continue
target = NEW_NIX_STORE_DIR / source.name
if source.is_dir():
target.mkdir()
else:
target.touch()
try:
mount(source, target, "none", MS_BIND)
except OSError as e:
msg = f"mount({source}, {target}) failed"
raise Error(msg) from e
out = Path(os.environ["out"])
(NEW_NIX_STORE_DIR / out.name).mkdir()
mount(NEW_NIX_DIR, NIX_DIR, "none", MS_BIND | MS_REC)
def load_nix_db(closure_info: Path) -> None:
with (closure_info / "registration").open() as f:
subprocess.run(["nix-store", "--load-db"], stdin=f, check=True, text=True)
class Driver:
@@ -389,7 +303,7 @@ class Driver:
def __init__(
self,
containers: list[tuple[Path, Path]],
containers: list[Path],
logger: AbstractLogger,
testscript: str,
out_dir: str,
@@ -399,24 +313,21 @@ class Driver:
self.out_dir = out_dir
self.logger = logger
setup_filesystems()
# TODO: this won't work for multiple containers
assert len(containers) == 1, "Only one container is supported at the moment"
load_nix_db(containers[0][1])
self.tempdir = TemporaryDirectory()
tempdir_path = Path(self.tempdir.name)
self.machines = []
for container in containers:
name_match = re.match(r".*-nixos-system-(.+)-(.+)", container[0].name)
name_match = re.match(r".*-nixos-system-(.+)-(.+)", container.name)
if not name_match:
msg = f"Unable to extract hostname from {container[0].name}"
msg = f"Unable to extract hostname from {container.name}"
raise Error(msg)
name = name_match.group(1)
self.machines.append(
Machine(
name=name,
toplevel=container[0],
toplevel=container,
rootdir=tempdir_path / name,
out_dir=self.out_dir,
logger=self.logger,
@@ -488,11 +399,9 @@ def main() -> None:
arg_parser = argparse.ArgumentParser(prog="nixos-test-driver")
arg_parser.add_argument(
"--containers",
nargs=2,
action="append",
nargs="+",
type=Path,
metavar=("TOPLEVEL_STORE_DIR", "CLOSURE_INFO"),
help="container system toplevel store dir and closure info",
help="container system toplevel paths",
)
arg_parser.add_argument(
"--test-script",

View File

@@ -25,9 +25,6 @@ in
networking.interfaces = lib.mkForce { };
#networking.primaryIPAddress = lib.mkForce null;
systemd.services.backdoor.enable = false;
# we don't have permission to set cpu scheduler in our container
systemd.services.nix-daemon.serviceConfig.CPUSchedulingPolicy = lib.mkForce "";
};
# to accept external dependencies such as disko
node.specialArgs.self = self;

View File

@@ -31,8 +31,6 @@
clan.matrix-synapse.users.someuser = { };
clan.core.facts.secretStore = "vm";
clan.core.vars.settings.secretStore = "vm";
clan.core.vars.settings.publicStore = "in_repo";
# because we use systemd-tmpfiles to copy the secrets, we need to a separate systemd-tmpfiles call to provision them.
boot.postBootCommands = "${config.systemd.package}/bin/systemd-tmpfiles --create /etc/tmpfiles.d/00-vmsecrets.conf";
@@ -43,21 +41,21 @@
d.mode = "0700";
z.mode = "0700";
};
"/etc/secrets/matrix-synapse/synapse-registration_shared_secret" = {
"/etc/secrets/synapse-registration_shared_secret" = {
f.argument = "supersecret";
z = {
mode = "0400";
user = "root";
};
};
"/etc/secrets/matrix-password-admin/matrix-password-admin" = {
"/etc/secrets/matrix-password-admin" = {
f.argument = "matrix-password1";
z = {
mode = "0400";
user = "root";
};
};
"/etc/secrets/matrix-password-someuser/matrix-password-someuser" = {
"/etc/secrets/matrix-password-someuser" = {
f.argument = "matrix-password2";
z = {
mode = "0400";

View File

@@ -63,9 +63,9 @@ in
rsh = lib.mkOption {
type = lib.types.str;
default = "ssh -i ${
config.clan.core.vars.generators.borgbackup.files."borgbackup.ssh".path
config.clan.core.facts.services.borgbackup.secret."borgbackup.ssh".path
} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=Yes";
defaultText = "ssh -i \${config.clan.core.vars.generators.borgbackup.files.\"borgbackup.ssh\".path} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null";
defaultText = "ssh -i \${config.clan.core.facts.services.borgbackup.secret.\"borgbackup.ssh\".path} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null";
description = "the rsh to use for the backup";
};
};
@@ -126,7 +126,7 @@ in
encryption = {
mode = "repokey";
passCommand = "cat ${config.clan.core.vars.generators.borgbackup.files."borgbackup.repokey".path}";
passCommand = "cat ${config.clan.core.facts.services.borgbackup.secret."borgbackup.repokey".path}";
};
prune.keep = {
@@ -177,21 +177,20 @@ in
})
];
clan.core.vars.generators.borgbackup = {
files."borgbackup.ssh.pub".secret = false;
files."borgbackup.ssh" = { };
files."borgbackup.repokey" = { };
migrateFact = "borgbackup";
runtimeInputs = [
pkgs.coreutils
# Facts generation. So the client can authenticate to the server
clan.core.facts.services.borgbackup = {
public."borgbackup.ssh.pub" = { };
secret."borgbackup.ssh" = { };
secret."borgbackup.repokey" = { };
generator.path = [
pkgs.openssh
pkgs.coreutils
pkgs.xkcdpass
];
script = ''
ssh-keygen -t ed25519 -N "" -f $out/borgbackup.ssh
xkcdpass -n 4 -d - > $out/borgbackup.repokey
generator.script = ''
ssh-keygen -t ed25519 -N "" -f "$secrets"/borgbackup.ssh
mv "$secrets"/borgbackup.ssh.pub "$facts"/borgbackup.ssh.pub
xkcdpass -n 4 -d - > "$secrets"/borgbackup.repokey
'';
};

View File

@@ -1,7 +1,7 @@
{ config, lib, ... }:
let
dir = config.clan.core.settings.directory;
machineDir = dir + "/vars/per-machine/";
machineDir = dir + "/machines/";
machineName = config.clan.core.settings.machine.name;
# Instances might be empty, if the module is not used via the inventory
@@ -33,8 +33,7 @@ in
};
config.services.borgbackup.repos =
let
borgbackupIpMachinePath = machine: machineDir + machine + "/borgbackup/borgbackup.ssh.pub/value";
borgbackupIpMachinePath = machines: machineDir + machines + "/facts/borgbackup.ssh.pub";
machinesMaybeKey = builtins.map (
machine:
let
@@ -45,7 +44,7 @@ in
else
lib.warn ''
Machine ${machine} does not have a borgbackup key at ${fullPath},
run `clan var generate ${machine}` to generate it.
run `clan facts generate ${machine}` to generate it.
'' null
) allClients;

View File

@@ -52,7 +52,7 @@ let
migrateFact = "${secret_id opt}";
prompts.${secret_id opt} = {
type = "hidden";
persist = true;
createFile = true;
};
};
};

View File

@@ -1,13 +1,6 @@
{ lib, ... }:
let
inherit (lib)
filterAttrs
pathExists
;
in
{ ... }:
{
# only import available files, as this allows to filter the files for tests.
flake.clanModules = filterAttrs (_name: pathExists) {
flake.clanModules = {
admin = ./admin;
borgbackup = ./borgbackup;
borgbackup-static = ./borgbackup-static;
@@ -26,7 +19,6 @@ in
matrix-synapse = ./matrix-synapse;
moonlight = ./moonlight;
mumble = ./mumble;
mycelium = ./mycelium;
nginx = ./nginx;
packages = ./packages;
postgresql = ./postgresql;

View File

@@ -106,6 +106,17 @@ in
};
};
systemd.tmpfiles.settings."01-matrix" = {
"/run/synapse-registration-shared-secret" = {
C.argument =
config.clan.core.facts.services.matrix-synapse.secret.synapse-registration_shared_secret.path;
z = {
mode = "0400";
user = "matrix-synapse";
};
};
};
clan.postgresql.users.matrix-synapse = { };
clan.postgresql.databases.matrix-synapse.create.options = {
TEMPLATE = "template0";
@@ -116,28 +127,26 @@ in
};
clan.postgresql.databases.matrix-synapse.restore.stopOnRestore = [ "matrix-synapse" ];
clan.core.vars.generators =
clan.core.facts.services =
{
"matrix-synapse" = {
files."synapse-registration_shared_secret" = { };
runtimeInputs = with pkgs; [
secret."synapse-registration_shared_secret" = { };
generator.path = with pkgs; [
coreutils
pwgen
];
migrateFact = "matrix-synapse";
script = ''
echo -n "$(pwgen -s 32 1)" > "$out"/synapse-registration_shared_secret
generator.script = ''
echo -n "$(pwgen -s 32 1)" > "$secrets"/synapse-registration_shared_secret
'';
};
}
// lib.mapAttrs' (
name: user:
lib.nameValuePair "matrix-password-${user.name}" {
files."matrix-password-${user.name}" = { };
migrateFact = "matrix-password-${user.name}";
runtimeInputs = with pkgs; [ xkcdpass ];
script = ''
xkcdpass -n 4 -d - > "$out"/${lib.escapeShellArg "matrix-password-${user.name}"}
secret."matrix-password-${user.name}" = { };
generator.path = with pkgs; [ xkcdpass ];
generator.script = ''
xkcdpass -n 4 -d - > "$secrets"/${lib.escapeShellArg "matrix-password-${user.name}"}
'';
}
) cfg.users;
@@ -154,20 +163,14 @@ in
+ lib.concatMapStringsSep "\n" (user: ''
# only create user if it doesn't exist
/run/current-system/sw/bin/matrix-synapse-register_new_matrix_user --exists-ok --password-file ${
config.clan.core.vars.generators."matrix-password-${user.name}".files."matrix-password-${user.name}".path
config.clan.core.facts.services."matrix-password-${user.name}".secret."matrix-password-${user.name}".path
} --user "${user.name}" ${if user.admin then "--admin" else "--no-admin"}
'') (lib.attrValues cfg.users);
in
{
path = [ pkgs.curl ];
serviceConfig.ExecStartPre = lib.mkBefore [
"+${pkgs.coreutils}/bin/install -o matrix-synapse -g matrix-synapse ${
lib.escapeShellArg
config.clan.core.vars.generators.matrix-synapse.files."synapse-registration_shared_secret".path
} /run/synapse-registration-shared-secret"
];
serviceConfig.ExecStartPost = [
''+${pkgs.writeShellScript "matrix-synapse-create-users" usersScript}''
(''+${pkgs.writeShellScript "matrix-synapse-create-users" usersScript}'')
];
};

View File

@@ -1,37 +0,0 @@
---
description = "End-2-end encrypted IPv6 overlay network"
categories = ["System", "Network"]
features = [ "inventory" ]
---
Mycelium is an IPv6 overlay network written in Rust. Each node that joins the overlay network will receive an overlay network IP in the 400::/7 range.
Features:
- Mycelium, is locality aware, it will look for the shortest path between nodes
- All traffic between the nodes is end-2-end encrypted
- Traffic can be routed over nodes of friends, location aware
- If a physical link goes down Mycelium will automatically reroute your traffic
- The IP address is IPV6 and linked to private key
- A simple reliable messagebus is implemented on top of Mycelium
- Mycelium has multiple ways how to communicate quic, tcp, ... and we are working on holepunching for Quick which means P2P traffic without middlemen for NATted networks e.g. most homes
- Scalability is very important for us, we tried many overlay networks before and got stuck on all of them, we are trying to design a network which scales to a planetary level
- You can run mycelium without TUN and only use it as reliable message bus.
An example configuration might look like this in the inventory:
```nix
mycelium.default = {
roles.peer.machines = [
"berlin"
"munich"
];
config = {
topLevelDomain = "m";
openFirewall = true;
addHostedPublicNodes = true;
};
};
```
This will add the machines named `berlin` and `munich` to the `mycelium` vpn.
And will also set the toplevel domain of the mycelium vpn to `m`, meaning the
machines are now reachable via `berlin.m` and `munich.m`.

View File

@@ -1,91 +0,0 @@
{
pkgs,
config,
lib,
...
}:
let
flake = config.clan.core.settings.directory;
machineName = config.clan.core.settings.machine.name;
# Instances might be empty, if the module is not used via the inventory
#
# Type: { ${instanceName} :: { roles :: Roles } }
# Roles :: { ${role_name} :: { machines :: [string] } }
instances = config.clan.inventory.services.mycelium or { };
allPeers = lib.foldlAttrs (
acc: _instanceName: instanceConfig:
acc
++ (
if (builtins.elem machineName instanceConfig.roles.peer.machines) then
instanceConfig.roles.peer.machines
else
[ ]
)
) [ ] instances;
allPeerConfigurations = lib.filterAttrs (n: _: builtins.elem n allPeers) flake.nixosConfigurations;
allPeersWithIp =
builtins.mapAttrs
(_: x: lib.removeSuffix "\n" x.config.clan.core.vars.generators.mycelium.files.ip.value)
(
lib.filterAttrs (
_: x: (builtins.tryEval x.config.clan.core.vars.generators.mycelium.files.ip.value).success
) allPeerConfigurations
);
ips = lib.attrValues allPeersWithIp;
peers = lib.concatMap (ip: [
"tcp://[${ip}]:9651"
"quic://[${ip}]:9651"
]) ips;
in
{
options = {
clan.mycelium.topLevelDomain = lib.mkOption {
type = lib.types.str;
default = "";
description = "Top level domain to reach hosts";
};
clan.mycelium.openFirewall = lib.mkEnableOption "Open the firewall for mycelium";
clan.mycelium.addHostedPublicNodes = lib.mkEnableOption "Add hosted Public nodes";
clan.mycelium.addHosts = lib.mkOption {
default = true;
description = "Add mycelium ip's to the host file";
};
};
config.services.mycelium = {
enable = true;
addHostedPublicNodes = lib.mkDefault config.clan.mycelium.addHostedPublicNodes;
openFirewall = lib.mkDefault config.clan.mycelium.openFirewall;
keyFile = config.clan.core.vars.generators.mycelium.files.key.path;
inherit peers;
};
config.networking.hosts = lib.mkIf (config.clan.mycelium.addHosts) (
lib.mapAttrs' (
host: ip:
lib.nameValuePair ip (
if (config.clan.mycelium.topLevelDomain == "") then [ host ] else [ "${host}.m" ]
)
) allPeersWithIp
);
config.clan.core.vars.generators.mycelium = {
files."key" = { };
files."ip".secret = false;
files."pubkey".secret = false;
runtimeInputs = [
pkgs.mycelium
pkgs.coreutils
pkgs.jq
];
script = ''
timeout 5 mycelium --key-file "$out"/key || :
mycelium inspect --key-file "$out"/key --json | jq -r .publicKey > "$out"/pubkey
mycelium inspect --key-file "$out"/key --json | jq -r .address > "$out"/ip
'';
};
}

View File

@@ -1,7 +1,5 @@
---
description = "Automatically generates and configures a password for the root user."
categories = ["System"]
features = [ "inventory" ]
---
After the system was installed/deployed the following command can be used to display the root-password:

View File

@@ -1,6 +1,29 @@
# Dont import this file
# It is only here for backwards compatibility.
# Dont author new modules with this file.
{
imports = [ ./roles/default.nix ];
pkgs,
config,
lib,
...
}:
{
users.mutableUsers = false;
users.users.root.hashedPasswordFile =
config.clan.core.facts.services.root-password.secret.password-hash.path;
sops.secrets = lib.mkIf (config.clan.core.facts.secretStore == "sops") {
"${config.clan.core.settings.machine.name}-password-hash".neededForUsers = true;
};
clan.core.facts.services.root-password = {
secret.password = { };
secret.password-hash = { };
generator.path = with pkgs; [
coreutils
xkcdpass
mkpasswd
];
generator.script = ''
xkcdpass --numwords 3 --delimiter - --count 1 | tr -d "\n" > $secrets/password
cat $secrets/password | mkpasswd -s -m sha-512 | tr -d "\n" > $secrets/password-hash
'';
};
}

View File

@@ -1,35 +0,0 @@
{
pkgs,
config,
...
}:
{
users.mutableUsers = false;
users.users.root.hashedPasswordFile =
config.clan.core.vars.generators.root-password.files.password-hash.path;
clan.core.vars.generators.root-password = {
files.password-hash = {
neededFor = "users";
};
migrateFact = "root-password";
runtimeInputs = [
pkgs.coreutils
pkgs.mkpasswd
pkgs.xkcdpass
];
prompts.password.type = "hidden";
prompts.password.persist = true;
prompts.password.description = "You can autogenerate a password, if you leave this prompt blank.";
script = ''
prompt_value=$(cat $prompts/password)
if [[ -n ''${prompt_value-} ]]; then
echo $prompt_value | tr -d "\n" > $out/password
else
xkcdpass --numwords 3 --delimiter - --count 1 | tr -d "\n" > $out/password
fi
mkpasswd -s -m sha-512 < $out/password | tr -d "\n" > $out/password-hash
'';
};
}

View File

@@ -1,7 +1,5 @@
---
description = "Automatically generates and configures a password for the specified user account."
categories = ["System"]
features = ["inventory"]
---
If setting the option prompt to true, the user will be prompted to type in their desired password.

View File

@@ -1,6 +1,58 @@
# Dont import this file
# It is only here for backwards compatibility.
# Dont author new modules with this file.
{
imports = [ ./roles/default.nix ];
pkgs,
config,
lib,
...
}:
let
cfg = config.clan.user-password;
in
{
options.clan.user-password = {
user = lib.mkOption {
type = lib.types.str;
example = "alice";
description = "The user the password should be generated for.";
};
prompt = lib.mkOption {
type = lib.types.bool;
default = true;
example = false;
description = "Whether the user should be prompted.";
};
};
config = {
users.mutableUsers = false;
users.users.${cfg.user} = {
hashedPasswordFile = config.clan.core.facts.services.user-password.secret.user-password-hash.path;
isNormalUser = lib.mkDefault true;
};
sops.secrets = lib.mkIf (config.clan.core.facts.secretStore == "sops") {
"${config.clan.core.settings.machine.name}-user-password-hash".neededForUsers = true;
};
clan.core.facts.services.user-password = {
secret.user-password = { };
secret.user-password-hash = { };
generator.prompt = (
lib.mkIf config.clan.user-password.prompt "Set the password for your user '${config.clan.user-password.user}'.
You can autogenerate a password, if you leave this prompt blank."
);
generator.path = with pkgs; [
coreutils
xkcdpass
mkpasswd
];
generator.script = ''
if [[ -n ''${prompt_value-} ]]; then
echo $prompt_value | tr -d "\n" > $secrets/user-password
else
xkcdpass --numwords 3 --delimiter - --count 1 | tr -d "\n" > $secrets/user-password
fi
cat $secrets/user-password | mkpasswd -s -m sha-512 | tr -d "\n" > $secrets/user-password-hash
'';
};
};
}

View File

@@ -1,58 +0,0 @@
{
pkgs,
config,
lib,
...
}:
let
cfg = config.clan.user-password;
in
{
options.clan.user-password = {
user = lib.mkOption {
type = lib.types.str;
example = "alice";
description = "The user the password should be generated for.";
};
prompt = lib.mkOption {
type = lib.types.bool;
default = true;
example = false;
description = "Whether the user should be prompted.";
};
};
config = {
users.mutableUsers = false;
users.users.${cfg.user} = {
hashedPasswordFile = config.clan.core.facts.services.user-password.secret.user-password-hash.path;
isNormalUser = lib.mkDefault true;
};
sops.secrets = lib.mkIf (config.clan.core.facts.secretStore == "sops") {
"${config.clan.core.settings.machine.name}-user-password-hash".neededForUsers = true;
};
clan.core.facts.services.user-password = {
secret.user-password = { };
secret.user-password-hash = { };
generator.prompt = (
lib.mkIf config.clan.user-password.prompt "Set the password for your user '${config.clan.user-password.user}'.
You can autogenerate a password, if you leave this prompt blank."
);
generator.path = with pkgs; [
coreutils
xkcdpass
mkpasswd
];
generator.script = ''
if [[ -n ''${prompt_value-} ]]; then
echo $prompt_value | tr -d "\n" > $secrets/user-password
else
xkcdpass --numwords 3 --delimiter - --count 1 | tr -d "\n" > $secrets/user-password
fi
cat $secrets/user-password | mkpasswd -s -m sha-512 | tr -d "\n" > $secrets/user-password-hash
'';
};
};
}

View File

@@ -14,9 +14,9 @@ let
name = "iwd.${name}";
value = {
prompts.ssid.type = "line";
prompts.ssid.persist = true;
prompts.ssid.createFile = true;
prompts.password.type = "hidden";
prompts.password.persist = true;
prompts.password.createFile = true;
share = true;
};
};

View File

@@ -1 +0,0 @@
see [architecture-decision-record](https://github.com/joelparkerhenderson/architecture-decision-record)

View File

@@ -1,24 +0,0 @@
# Decision record template by Michael Nygard
This is the template in [Documenting architecture decisions - Michael Nygard](http://thinkrelevance.com/blog/2011/11/15/documenting-architecture-decisions).
You can use [adr-tools](https://github.com/npryce/adr-tools) for managing the ADR files.
In each ADR file, write these sections:
# Title
## Status
What is the status, such as proposed, accepted, rejected, deprecated, superseded, etc.?
## Context
What is the issue that we're seeing that is motivating this decision or change?
## Decision
What is the change that we're proposing and/or doing?
## Consequences
What becomes easier or more difficult to do because of this change?

4
docs/.gitignore vendored
View File

@@ -1,3 +1,3 @@
/site/reference
/site/static
!/site/static/extra.css
/site/static/Roboto-Regular.ttf
/site/static/FiraCode-VF.ttf

View File

@@ -1,5 +1,4 @@
# Contributing to Clan
# Contributing
**Continuous Integration (CI)**: Each pull request gets automatically tested by gitea. If any errors are detected, it will block pull requests until they're resolved.
@@ -25,7 +24,7 @@ Let's get your development environment up and running:
- To automatically setup a devshell on entering the directory
```bash
nix profile install nixpkgs#nix-direnv-flakes nixpkgs#direnv
nix profile install nixpkgs#nix-direnv-flakes
```
3. **Add direnv to your shell**:
@@ -37,9 +36,6 @@ Let's get your development environment up and running:
echo 'eval "$(direnv hook zsh)"' >> ~/.zshrc && echo 'eval "$(direnv hook bash)"' >> ~/.bashrc && eval "$SHELL"
```
3. **Allow the devshell**
- Go to `clan-core/pkgs/clan-cli` and do a `direnv allow` to setup the necessary development environment to execute the `clan` command
4. **Create a Gitea Account**:
- Register an account on https://git.clan.lol
- Fork the [clan-core](https://git.clan.lol/clan/clan-core) repository
@@ -95,40 +91,110 @@ Let's get your development environment up and running:
merge-after-ci --reviewers Mic92 Lassulus Qubasa
```
## Related Projects
# Debugging
- **Data Mesher**: [dm](https://git.clan.lol/clan/dm)
- **Nixos Facter**: [nixos-facter](https://github.com/nix-community/nixos-facter)
- **Nixos Anywhere**: [nixos-anywhere](https://github.com/nix-community/nixos-anywhere)
- **Disko**: [disko](https://github.com/nix-community/disko)
Here are some methods for debugging and testing the clan-cli:
## Fixing Bugs or Adding Features in Clan-CLI
## See all possible packages and tests
If you have a bug fix or feature that involves a related project, clone the relevant repository and replace its invocation in your local setup.
To quickly show all possible packages and tests execute:
For instance, if you need to update `nixos-anywhere` in clan-cli, find its usage:
```python
run(
nix_shell(
["nixpkgs#nixos-anywhere"],
cmd,
),
RunOpts(log=Log.BOTH, prefix=machine.name, needs_user_terminal=True),
)
```bash
nix flake show --system no-eval
```
You can replace `"nixpkgs#nixos-anywhere"` with your local path:
Under `checks` you will find all tests that are executed in our CI. Under `packages` you find all our projects.
```python
run(
nix_shell(
["<path_to_local_src>#nixos-anywhere"],
cmd,
),
RunOpts(log=Log.BOTH, prefix=machine.name, needs_user_terminal=True),
)
```
git+file:///home/lhebendanz/Projects/clan-core
├───apps
│ └───x86_64-linux
│ ├───install-vm: app
│ └───install-vm-nogui: app
├───checks
│ └───x86_64-linux
│ ├───borgbackup omitted (use '--all-systems' to show)
│ ├───check-for-breakpoints omitted (use '--all-systems' to show)
│ ├───clan-dep-age omitted (use '--all-systems' to show)
│ ├───clan-dep-bash omitted (use '--all-systems' to show)
│ ├───clan-dep-e2fsprogs omitted (use '--all-systems' to show)
│ ├───clan-dep-fakeroot omitted (use '--all-systems' to show)
│ ├───clan-dep-git omitted (use '--all-systems' to show)
│ ├───clan-dep-nix omitted (use '--all-systems' to show)
│ ├───clan-dep-openssh omitted (use '--all-systems' to show)
│ ├───"clan-dep-python3.11-mypy" omitted (use '--all-systems' to show)
├───packages
│ └───x86_64-linux
│ ├───clan-cli omitted (use '--all-systems' to show)
│ ├───clan-cli-docs omitted (use '--all-systems' to show)
│ ├───clan-ts-api omitted (use '--all-systems' to show)
│ ├───clan-app omitted (use '--all-systems' to show)
│ ├───default omitted (use '--all-systems' to show)
│ ├───deploy-docs omitted (use '--all-systems' to show)
│ ├───docs omitted (use '--all-systems' to show)
│ ├───editor omitted (use '--all-systems' to show)
└───templates
├───default: template: Initialize a new clan flake
└───new-clan: template: Initialize a new clan flake
```
You can execute every test separately by following the tree path `nix build .#checks.x86_64-linux.clan-pytest` for example.
## Test Locally in Devshell with Breakpoints
To test the cli locally in a development environment and set breakpoints for debugging, follow these steps:
1. Run the following command to execute your tests and allow for debugging with breakpoints:
```bash
cd ./pkgs/clan-cli
pytest -n0 -s --maxfail=1 ./tests/test_nameofthetest.py
```
You can place `breakpoint()` in your Python code where you want to trigger a breakpoint for debugging.
## Test Locally in a Nix Sandbox
To run tests in a Nix sandbox, you have two options depending on whether your test functions have been marked as impure or not:
### Running Tests Marked as Impure
If your test functions need to execute `nix build` and have been marked as impure because you can't execute `nix build` inside a Nix sandbox, use the following command:
```bash
nix run .#impure-checks
```
This command will run the impure test functions.
### Running Pure Tests
For test functions that have not been marked as impure and don't require executing `nix build`, you can use the following command:
```bash
nix build .#checks.x86_64-linux.clan-pytest --rebuild
```
This command will run all pure test functions.
### Inspecting the Nix Sandbox
If you need to inspect the Nix sandbox while running tests, follow these steps:
1. Insert an endless sleep into your test code where you want to pause the execution. For example:
```python
import time
time.sleep(3600) # Sleep for one hour
```
2. Use `cntr` and `psgrep` to attach to the Nix sandbox. This allows you to interactively debug your code while it's paused. For example:
```bash
psgrep -a -x your_python_process_name
cntr attach <container id, container name or process id>
```
Or you can also use the [nix breakpoint hook](https://nixos.org/manual/nixpkgs/stable/#breakpointhook)
# Standards

View File

@@ -43,28 +43,25 @@ exclude_docs: |
nav:
- Home: index.md
- Getting Started:
- Setup Clan: getting-started/index.md
- Create Installer: getting-started/installer.md
- Add Machines: getting-started/configure.md
- Getting Started: getting-started/index.md
- Installer: getting-started/installer.md
- Configure: getting-started/configure.md
- Secrets & Facts: getting-started/secrets.md
- Deploy Machine: getting-started/deploy.md
- Guides:
- Overview: manual/index.md
- Disk Encryption: getting-started/disk-encryption.md
- Mesh VPN: getting-started/mesh-vpn.md
- Backup & Restore: getting-started/backups.md
- Vars Backend: manual/vars-backend.md
- Facts Backend: manual/secrets.md
- Autoincludes: manual/adding-machines.md
- Adding Machines: manual/adding-machines.md
- Inventory: manual/inventory.md
- Secrets: manual/secrets.md
- Secure Boot: manual/secure-boot.md
- Flake-parts: manual/flake-parts.md
- Authoring:
- Modules: clanmodules/index.md
- Disk Templates: manual/disk-templates.md
- Contribute: manual/contribute.md
- Debugging: manual/debugging.md
- Repo Layout: manual/repo-layout.md
- Migrate existing Flakes: manual/migration-guide.md
# - Concepts:
# - Overview: concepts/index.md
- Reference:
@@ -92,7 +89,6 @@ nav:
- reference/clanModules/matrix-synapse.md
- reference/clanModules/moonlight.md
- reference/clanModules/mumble.md
- reference/clanModules/mycelium.md
- reference/clanModules/nginx.md
- reference/clanModules/packages.md
- reference/clanModules/postgresql.md
@@ -121,7 +117,6 @@ nav:
- reference/cli/flash.md
- reference/cli/history.md
- reference/cli/machines.md
- reference/cli/select.md
- reference/cli/secrets.md
- reference/cli/show.md
- reference/cli/ssh.md

View File

@@ -20,7 +20,10 @@
# Frontmatter for clanModules
clanModulesFrontmatter =
let
docs = pkgs.nixosOptionsDoc { options = self.lib.modules.frontmatterOptions; };
docs = pkgs.nixosOptionsDoc {
options = self.lib.modules.frontmatterOptions;
warningsAreErrors = true;
};
in
docs.optionsJSON;
@@ -69,13 +72,7 @@
];
}
''
export CLAN_CORE_PATH=${
self.filter {
include = [
"clanModules"
];
}
}
export CLAN_CORE_PATH=${self}
export CLAN_CORE_DOCS=${jsonDocs.clanCore}/share/doc/nixos/options.json
# A file that contains the links to all clanModule docs
export CLAN_MODULES_VIA_ROLES=${clanModulesViaRoles}

View File

@@ -103,7 +103,7 @@ def render_option(
read_only = option.get("readOnly")
res = f"""
{"#" * level} {sanitize(name) if short_head is None else sanitize(short_head)} {"{: #" + sanitize_anchor(name) + "}" if level > 1 else ""}
{"#" * level} {sanitize(name) if short_head is None else sanitize(short_head)} {"{: #"+sanitize_anchor(name)+"}" if level > 1 else ""}
"""
@@ -125,7 +125,7 @@ def render_option(
**Default**:
```nix
{option.get("default", {}).get("text") if option.get("default") else "No default set."}
{option.get("default",{}).get("text") if option.get("default") else "No default set."}
```
"""
example = option.get("example", {}).get("text")

View File

@@ -48,12 +48,12 @@ clanModules/borgbackup
=== "User module"
If the module should be ad-hoc loaded.
It can be made available in any project via the [`clan.inventory.modules`](../reference/nix-api/inventory.md#inventory.modules) attribute.
It can be made avilable in any project via the [`clan.inventory.modules`](../reference/nix-api/inventory.md#inventory.modules) attribute.
```nix title="flake.nix"
# ...
buildClan {
# 1. Add the module to the available clanModules with inventory support
# 1. Add the module to the avilable inventory modules
inventory.modules = {
custom-module = ./modules/my_module;
};
@@ -111,7 +111,7 @@ Adds the roles: `client` and `server`
Sometimes a `ClanModule` should be usable via both clan's `inventory` concept but also natively as a NixOS module.
> In the long term, we want most modules to implement support for the inventory,
> but we are also aware that there are certain low-level modules that always serve as a backend for other higher-level `clanModules` with inventory support.
> but we are also aware that there are certain low-level modules that always serve as a backend for other higher-level inventory modules.
> These modules may not want to implement inventory interfaces as they are always used directly by other modules.
This can be achieved by placing an additional `default.nix` into the root of the ClanModules directory as shown:

View File

@@ -1,4 +1,6 @@
# Introduction to Backups
# Backups
## Introduction to Backups
When you're managing your own services, creating regular backups is crucial to ensure your data's safety.
This guide introduces you to Clan's built-in backup functionalities.
@@ -7,6 +9,8 @@ We might add more options in the future, but for now, let's dive into how you ca
## Backing Up Locally with Localbackup
### What is Localbackup?
Localbackup lets you backup your data onto physical storage devices connected to your computer,
such as USB hard drives or network-attached storage. It uses a tool called rsnapshot for this purpose.

View File

@@ -1,3 +1,4 @@
# Configuration - How to configure clan with your own machines
Managing machine configurations can be done in the following ways:
@@ -162,8 +163,7 @@ replace `[MACHINE_NAME]` with the name of the machine i.e. `jon` and `[HOSTNAME]
clan machines update-hardware-config jon
```
This command connects to the ip configured in the previous step, runs [nixos-facter](https://github.com/nix-community/nixos-facter)
to detect hardware configurations (excluding filesystems), and writes them to `machines/jon/facter.json`.
This command connects to the ip configured in the previous step, runs `nixos-generate-config` to detect hardware configurations (excluding filesystems), and writes them to `machines/jon/hardware-configuration.nix`.
### Step 3: Custom Disk Formatting
@@ -207,3 +207,11 @@ If you only want to setup a single machine at this point, you can delete `sara`
```
git rm ./machines/sara
```
---
## What's next?
- [Secrets & Facts](secrets.md): Setting up secrets with sops-nix
---

View File

@@ -1,3 +1,4 @@
# Deploy Machine
Integrating a new machine into your Clan environment is an easy yet flexible process, allowing for a straight forward management of multiple NixOS configurations.
@@ -214,4 +215,12 @@ buildClan {
This is useful for machines that are not always online or are not part of the regular update cycle.
---
## What's next ?
- [**Disk Encryption**](./disk-encryption.md): Configure disk encryption with remote decryption
- [**Mesh VPN**](./mesh-vpn.md): Configuring a secure mesh network.
---

View File

@@ -14,7 +14,7 @@ lsblk --output NAME,ID-LINK,FSTYPE,SIZE,MOUNTPOINT
=== "**Single Disk**"
Below is the configuration for `disko.nix`
```nix hl_lines="13 53"
```nix hl_lines="17 48"
--8<-- "docs/code-examples/disko-single-disk.nix"
```
@@ -22,7 +22,7 @@ lsblk --output NAME,ID-LINK,FSTYPE,SIZE,MOUNTPOINT
=== "**Raid 1**"
Below is the configuration for `disko.nix`
```nix hl_lines="13 53 54"
```nix hl_lines="17 48 49"
--8<-- "docs/code-examples/disko-raid.nix"
```
@@ -172,7 +172,7 @@ ssh -p 7172 root@192.168.178.141
2. Run the `systemd-tty-ask-password-agent` utility to query a password:
```bash
systemd-tty-ask-password-agent
systemd-tty-ask-password-agent --query
```
After completing these steps, your NixOS should be successfully installed and ready for use.

View File

@@ -1,9 +1,6 @@
# :material-clock-fast: Getting Started
Ready to create your own clan and manage a fleet of machines? Follow these simple steps to get started.
By the end of this guide, you'll have a fresh NixOS configuration ready to push to one or more machines. You'll create a new git repository and a flake, and all you need is at least one machine to push to. This is the easiest way to begin, and we recommend you to copy your existing configuration into this new setup!
Create your own clan with these initial steps and manage a fleet of machines with one single testable git repository!
### Prerequisites
@@ -54,8 +51,6 @@ clan --help
### Step 2: Initialize Your Project
If you want to migrate an existing project, follow this [guide](https://docs.clan.lol/manual/migration-guide/).
Set the foundation of your Clan project by initializing it as follows:
```bash
@@ -105,3 +100,10 @@ sara
You just successfully bootstrapped your first clan directory.
---
### What's Next?
- [**Installer**](./installer.md): Setting up new computers remotely is easy with an USB stick.
---

View File

@@ -1,4 +1,4 @@
# Create an Installer Image
# Installer
Our installer image simplifies the process of performing remote installations.
@@ -61,8 +61,7 @@ sudo umount /dev/sdb1
The `clan flash` utility will erase the disk. Make sure to specify the correct device
- **SSH-Pubkey Option**
- **SSH-Pubkey Option**:
To add an ssh public key into the installer image append the option:
```
--ssh-pubkey <pubkey_path>
@@ -70,21 +69,19 @@ sudo umount /dev/sdb1
If you do not have an ssh key yet, you can generate one with `ssh-keygen -t ed25519` command.
This ssh key will be installed into the root user.
- **Connect to the installer**
- **Connect to the installer
On boot, the installer will display on-screen the IP address it received from the network.
If you need to configure Wi-Fi first, refer to the next section.
If Multicast-DNS (Avahi) is enabled on your own machine, you can also access the installer using the `flash-installer.local` address.
- **List Keymaps**
- **List Keymaps**:
You can get a list of all keymaps with the following command:
```
clan flash list keymaps
```
- **List Languages**
- **List Languages**:
You can get a list of all languages with the following command:
```
clan flash list languages
@@ -197,3 +194,10 @@ Press ++ctrl+d++ to exit `IWD`.
You're all set up
---
## What's next?
- [Configure Machines](configure.md): Customize machine configuration
---

View File

@@ -1,3 +1,4 @@
# Mesh VPN
This guide provides detailed instructions for configuring
[ZeroTier VPN](https://zerotier.com) within Clan. Follow the

View File

@@ -1,3 +1,4 @@
# Secrets / Facts
Clan enables encryption of secrets (such as passwords & keys) ensuring security and ease-of-use among users.
@@ -83,3 +84,8 @@ This command helps ensure that your system configuration is correct and free fro
You can integrate this step into your [Continuous Integration](https://en.wikipedia.org/wiki/Continuous_integration) workflow to ensure that only valid Nix configurations are merged into your codebase.
## What's next?
- [Deployment](deploy.md): How to remotely deploy your machine
- Full [Secrets](../manual/secrets.md) guide If you want to know more about how to save and share passwords in your clan

View File

@@ -4,13 +4,13 @@ hide:
- toc
---
# :material-home: Welcome to **Clan**'s documentation
# :material-home: Welcome to **Clan**'s awesome documentation
[Getting Started](./getting-started/index.md){ .md-button }
## Tutorials
## What's inside
**Learning-oriented adventures with a hands-on experience.**
This documentation is structured into the following sections
<div class="grid cards" markdown>
@@ -19,69 +19,25 @@ hide:
---
Create your own clan and get everything
running in minutes
running in a couple of minutes.
[:octicons-arrow-right-24: Getting started](./getting-started/index.md)
- :fontawesome-solid-user-group:{ .lg .middle } __Authoring Modules__
- :material-sign-direction:{ .lg .middle } __Guides__
---
Create clanModules that can be reused by the community.
Instructions and explanations for practical Implementations ordered by Topic.
[:octicons-arrow-right-24: Authoring clanModules](./clanmodules/index.md)
[:octicons-arrow-right-24: Guides](./manual/index.md)
- :material-api:{ .lg .middle } __Reference__
---
Detailed Specification of Functions and APIs.
[:octicons-arrow-right-24: Reference](./reference/index.md)
</div>
## :material-book: Guides
**How-to Guides for achieving a certain goal or solving a specific issue.**
<div class="grid cards" markdown>
- [Autoincludes](./manual/adding-machines.md)
---
Learn how Clan automatically includes machines and Nix files.
- [Vars Backend](./manual/vars-backend.md)
---
Learn how to manage secrets with facts.
- [Inventory](./manual/inventory.md)
---
Clan's declaration format for running **services** on one or multiple **machines**.
- [Flake-parts](./manual/flake-parts.md)
---
Use clan with [https://flake-parts.dev]()
- [Contribute](./manual/contribute.md)
---
Discover how to set up a development environment to contribute to Clan!
</div>
## API Reference
**Auto generated API Documentation**
<div class="grid cards" markdown>
- [Reference Overview](./reference/index.md)
---
Learn how to interface with Clan programmatically
</div>

View File

@@ -1,3 +1,4 @@
# Adding Machines
Clan has two general methods of adding machines:
@@ -17,8 +18,6 @@ Every folder `machines/{machineName}` will be registered automatically as a Clan
- [x] `machines/{machineName}/facter.json` Automatically configured, for further information see [nixos-facter](https://clan.lol/blog/nixos-facter/)
- [x] `machines/{machineName}/disko.nix` Automatically loaded, for further information see the [disko docs](https://github.com/nix-community/disko/blob/master/docs/quickstart.md).
## Manual declaration
Machines can also be added manually under `buildClan`, `clan.*` in flake-parts or via [`inventory`](../manual/inventory.md).

View File

@@ -1,153 +0,0 @@
Here are some methods for debugging and testing the clan-cli
## Using a Development Branch
To streamline your development process, I suggest not installing `clan-cli`. Instead, clone the `clan-core` repository and add `clan-core/pkgs/clan-cli/bin` to your PATH to use the checked-out version directly.
!!! Note
After cloning, navigate to `clan-core/pkgs/clan-cli` and execute `direnv allow` to activate the devshell. This will set up a symlink to nixpkgs at a specific location; without it, `clan-cli` won't function correctly.
With this setup, you can easily use [breakpoint()](https://docs.python.org/3/library/pdb.html) to inspect the application's internal state as needed.
This approach is feasible because `clan-cli` only requires a Python interpreter and has no other dependencies.
```nix
pkgs.mkShell {
packages = [
pkgs.python3
];
shellHook = ''
export GIT_ROOT="$(git rev-parse --show-toplevel)"
export PATH=$PATH:~/Projects/clan-core/pkgs/clan-cli/bin
'';
}
```
## The Debug Flag
You can enhance your debugging process with the `--debug` flag in the `clan` command. When you add this flag to any command, it displays all subprocess commands initiated by `clan` in a readable format, along with the source code position that triggered them. This feature makes it easier to understand and trace what's happening under the hood.
```bash
$ clan machines list --debug 1
Debug log activated
nix \
--extra-experimental-features 'nix-command flakes' \
eval \
--show-trace --json \
--print-build-logs '/home/qubasa/Projects/qubasas-clan#clanInternals.machines.x86_64-linux' \
--apply builtins.attrNames \
--json
Caller: ~/Projects/clan-core/pkgs/clan-cli/clan_cli/machines/list.py:96::list_nixos_machines
warning: Git tree '/home/qubasa/Projects/qubasas-clan' is dirty
demo
gchq-local
wintux
```
## VSCode
If you're using VSCode, it has a handy feature that makes paths to source code files clickable in the integrated terminal. Combined with the previously mentioned techniques, this allows you to open a Clan in VSCode, execute a command like `clan machines list --debug`, and receive a printed path to the code that initiates the subprocess. With the `Ctrl` key (or `Cmd` on macOS) and a mouse click, you can jump directly to the corresponding line in the code file and add a `breakpoint()` function to it, to inspect the internal state.
## See all possible packages and tests
To quickly show all possible packages and tests execute:
```bash
nix flake show
```
Under `checks` you will find all tests that are executed in our CI. Under `packages` you find all our projects.
```
git+file:///home/lhebendanz/Projects/clan-core
├───apps
│ └───x86_64-linux
│ ├───install-vm: app
│ └───install-vm-nogui: app
├───checks
│ └───x86_64-linux
│ ├───borgbackup omitted (use '--all-systems' to show)
│ ├───check-for-breakpoints omitted (use '--all-systems' to show)
│ ├───clan-dep-age omitted (use '--all-systems' to show)
│ ├───clan-dep-bash omitted (use '--all-systems' to show)
│ ├───clan-dep-e2fsprogs omitted (use '--all-systems' to show)
│ ├───clan-dep-fakeroot omitted (use '--all-systems' to show)
│ ├───clan-dep-git omitted (use '--all-systems' to show)
│ ├───clan-dep-nix omitted (use '--all-systems' to show)
│ ├───clan-dep-openssh omitted (use '--all-systems' to show)
│ ├───"clan-dep-python3.11-mypy" omitted (use '--all-systems' to show)
├───packages
│ └───x86_64-linux
│ ├───clan-cli omitted (use '--all-systems' to show)
│ ├───clan-cli-docs omitted (use '--all-systems' to show)
│ ├───clan-ts-api omitted (use '--all-systems' to show)
│ ├───clan-app omitted (use '--all-systems' to show)
│ ├───default omitted (use '--all-systems' to show)
│ ├───deploy-docs omitted (use '--all-systems' to show)
│ ├───docs omitted (use '--all-systems' to show)
│ ├───editor omitted (use '--all-systems' to show)
└───templates
├───default: template: Initialize a new clan flake
└───new-clan: template: Initialize a new clan flake
```
You can execute every test separately by following the tree path `nix run .#checks.x86_64-linux.clan-pytest -L` for example.
## Test Locally in Devshell with Breakpoints
To test the cli locally in a development environment and set breakpoints for debugging, follow these steps:
1. Run the following command to execute your tests and allow for debugging with breakpoints:
```bash
cd ./pkgs/clan-cli
pytest -n0 -s --maxfail=1 ./tests/test_nameofthetest.py
```
You can place `breakpoint()` in your Python code where you want to trigger a breakpoint for debugging.
## Test Locally in a Nix Sandbox
To run tests in a Nix sandbox, you have two options depending on whether your test functions have been marked as impure or not:
### Running Tests Marked as Impure
If your test functions need to execute `nix build` and have been marked as impure because you can't execute `nix build` inside a Nix sandbox, use the following command:
```bash
nix run .#impure-checks -L
```
This command will run the impure test functions.
### Running Pure Tests
For test functions that have not been marked as impure and don't require executing `nix build`, you can use the following command:
```bash
nix build .#checks.x86_64-linux.clan-pytest --rebuild
```
This command will run all pure test functions.
### Inspecting the Nix Sandbox
If you need to inspect the Nix sandbox while running tests, follow these steps:
1. Insert an endless sleep into your test code where you want to pause the execution. For example:
```python
import time
time.sleep(3600) # Sleep for one hour
```
2. Use `cntr` and `psgrep` to attach to the Nix sandbox. This allows you to interactively debug your code while it's paused. For example:
```bash
psgrep <your_python_process_name>
cntr attach <container id, container name or process id>
```
Or you can also use the [nix breakpoint hook](https://nixos.org/manual/nixpkgs/stable/#breakpointhook)

View File

@@ -1,3 +1,5 @@
# Disk Templates
!!! Danger ":fontawesome-solid-road-barrier: Under Construction :fontawesome-solid-road-barrier:"
Currently under construction use with caution

View File

@@ -1,3 +1,4 @@
# Clan with `flake-parts`
Clan supports integration with [flake.parts](https://flake.parts/) a tool which allows composing nixos modules in a modular way.

66
docs/site/manual/index.md Normal file
View File

@@ -0,0 +1,66 @@
# :material-book: Guides
Instructions and explanations for practical Implementations ordered by Topics.
## Tutorials
**Learning-oriented adventures with a hands-on experience.**
<div class="grid cards" markdown>
- :material-clock-fast:{ .lg .middle } __Set up in 15 minutes__
---
Create your own clan and get everything
running in minutes
[:octicons-arrow-right-24: Getting started](../getting-started/index.md)
- :fontawesome-solid-user-group:{ .lg .middle } __Authoring Modules__
---
Create clanModules that can be reused by the community.
[:octicons-arrow-right-24: Authoring clanModules](../clanmodules/index.md)
</div>
## Guides
**How-to Guides for achieving a certain goal or solving a specific issue.**
<div class="grid cards" markdown>
- [Machines](./adding-machines.md)
---
Learn how Clan automatically includes machines and Nix files.
- [Secrets](./secrets.md)
---
Learn how to manage secrets.
- [Inventory](./inventory.md)
---
Clan's declaration format for running **services** on one or multiple **machines**.
- [Flake-parts](./flake-parts.md)
---
Use clan with [https://flake-parts.dev]()
- [Contribute](./contribute.md)
---
Discover how to set up a development environment to contribute to Clan!
</div>

View File

@@ -1,15 +1,10 @@
# Inventory
`Inventory` is an abstract service layer for consistently configuring distributed services across machine boundaries.
## Concept
See [Inventory API Documentation](../reference/nix-api/inventory.md)
Its concept is slightly different to what NixOS veterans might be used to. The inventory is a service definition on a higher level, not a machine configuration. This allows you to define a consistent and coherent service.
The inventory logic will automatically derive the modules and configurations to enable on each machine in your `clan` based on its `role`. This makes it super easy to setup distributed `services` such as Backups, Networking, traditional cloud services, or peer-to-peer based applications.
The following tutorial will walk through setting up a Backup service where the terms `Service` and `Role` will become more clear.
See also: [Inventory API Documentation](../reference/nix-api/inventory.md)
This guide will walk you through setting up a backup service, where the inventory becomes useful.
!!! example "Experimental status"
The inventory implementation is not considered stable yet.
@@ -23,13 +18,17 @@ See also: [Inventory API Documentation](../reference/nix-api/inventory.md)
## Services
The inventory defines `services`. Membership of `machines` is defined via `roles` exclusively.
The inventory defines `services`. Membership of `machines` is defined via roles exclusively.
See each [modules documentation](../reference/clanModules/index.md) for its available roles.
See the each [module documentation](../reference/clanModules/index.md) for available roles.
!!! Note
It is possible to use any [clanModule](../reference/clanModules/index.md) in the inventory and add machines via
`roles.default.*`
### Adding services to machines
A service can be added to one or multiple machines via `Roles`. clan's `Role` interface provide sane defaults for a module this allows the module author to reduce the configuration overhead to a minimum.
A module can be added to one or multiple machines via `Roles`. clan's `Role` interface provide sane defaults for a module this allows the module author to reduce the configuration overhead to a minimum.
Each service can still be customized and configured according to the modules options.

View File

@@ -1,171 +0,0 @@
# Migrate existing NixOS configurations
This guide will help you migrate your existing Nix configurations into Clan.
!!! Warning
Migrating instead of starting new can be trickier and might lead to bugs or
unexpected issues. We recommend following the [Getting Started](../getting-started/index.md) guide first. Once you have a working setup, you can easily transfer your Nix configurations over.
## Back up your existing configuration!
Before you start, it is strongly recommended to back up your existing
configuration in any form you see fit. If you use version control to manage
your configuration changes, it is also a good idea to follow the migration
guide in a separte branch until everything works as expected.
## Starting Point
We assume you are already using NixOS flakes to manage your configuration. If
not, migrate to a flake-based setup following the official [NixOS
documentation](https://nix.dev/manual/nix/2.25/command-ref/new-cli/nix3-flake.html).
The snippet below shows a common Nix flake. For this example we will assume you
have have two hosts: **berlin** and **cologne**.
```nix
{
inputs.nixpkgs.url = "github:nixos/nixpkgs/nixos-unstable";
outputs = { self, nixpkgs, ... }: {
nixosConfigurations = {
berlin = nixpkgs.lib.nixosSystem {
system = "x86_64-linux";
modules = [./machines/berlin/configuration.nix];
};
cologne = nixpkgs.lib.nixosSystem {
system = "x86_64-linux";
modules = [./machines/cologne/configuration.nix];
};
};
};
}
```
## Add clan-core Input
Add `clan-core` to your flake as input. It will provide everything we need to
manage your configurations with clan.
```nix
inputs.clan-core = {
url = "git+https://git.clan.lol/clan/clan-core";
# Don't do this if your machines are on nixpkgs stable.
inputs.nixpkgs.follows = "nixpkgs";
};
```
## Update Outputs
To be able to access our newly added dependency, it has to be added to the
output parameters.
```diff
- outputs = { self, nixpkgs, ... }:
+ outputs = { self, nixpkgs, clan-core }:
```
The existing `nixosConfigurations` output of your flake will be created by
clan. In addition, a new `clanInternals` output will be added. Since both of
these are provided by the output of `lib.buildClan`, a common syntax is to use a
`let...in` statement to create your clan and access it's parameters in the flake
outputs.
For the provide flake example, your flake should now look like this:
```nix
{
inputs.nixpkgs.url = "github:nixos/nixpkgs/nixos-unstable";
outputs = { self, nixpkgs, ... }:
let
clan = clan-core.lib.buildClan {
directory = self; # this needs to point at the repository root
specialArgs = {};
inventory.meta.name = "NEEDS_TO_BE_UNIQUE"; # TODO: Changeme
machines = {
berlin = {
nixpkgs.hostPlatform = "x86_64-linux";
imports = [ ./machines/berlin/configuration.nix ];
};
cologne = {
nixpkgs.hostPlatform = "x86_64-linux";
imports = [ ./machines/cologne/configuration.nix ];
};
};
};
in
{
nixosConfigurations = clan.nixosConfigurations;
inherit (clan) clanInternals;
};
}
```
Et voilà! Your existing hosts are now part of a clan. Existing Nix tooling
should still work as normal. To check that you didn't make any errors, run `nix
flake show` and verify both hosts are still recognized as if nothing had
changed. You should also see the new `clanInternals` output.
```
nix flake show
git+file:///my-nixos-config
├───clanInternals: unknown
└───nixosConfigurations
├───berlin: NixOS configuration
└───cologne: NixOS configuration
```
Of course you can also rebuild your configuration using `nixos-rebuild` and
veryify everything still works.
## Add Clan CLI devShell
At this point Clan is set up, but you can't use the CLI yet. To do so, it is
recommended to expose it via a `devShell` in your flake. It is also possible to
install it any other way you would install a package in Nix, but using a
developtment shell ensures the CLI's version will always be in sync with your
configuration.
A minimal example is provided below, add it to your flake outputs.
```nix
devShells."x86_64-linux".default = nixpkgs.legacyPackages."x86_64-linux".mkShell {
packages = [ clan-core.packages."x86_64-linux".clan-cli ];
};
```
To use the CLI, execute `nix develop` in the directory of your flake. The
resulting shell, provides you with the `clan` CLI tool. Since you will be using
it every time you interact with Clan, it is recommended to set up
[direnv](https://direnv.net/).
Verify everything works as expected by running `clan machines list`.
```
nix develop
[user@host:~/my-nixos-config]$ clan machines list
berlin
cologne
```
## Specify Targets
Clan needs to know where it can reach your hosts. For each of your hosts, set
`clan.core.networking.targetHost` to its adress or hostname.
```nix
# machines/berlin/configuration.nix
{
clan.core.networking.targetHost = "123.4.56.78";
}
```
## Next Steps
You are now fully set up. Use the CLI to manage your hosts or proceed to
configure further services. At this point you should be able to run commands
like `clan machines update berlin` to deploy a host.

View File

@@ -1,25 +0,0 @@
This guide will help you navigate the codebase and locate key files:
```bash
$ tree -L 1
.
├── checks # Contains NixOS and VM tests
├── clanModules # Clan modules available for end-user import
├── docs # Source files for docs.clan.lol, generated with MkDocs
├── flakeModules
├── lib # User-exposed Clan Nix functions like buildClan and inventory
├── machines
├── nixosModules # Internal Clan Nix functions, e.g., clanCore
├── pkgs # Clan applications and packaged dependencies
├── formatter.nix # Configuration for nix-treefmt, manages `nix fmt`
├── scripts
├── sops
├── templates # Template files for creating a new Clan
└── vars
```
## Getting Started with Infrastructure
To dive into infrastructure, check out our clan infra repo: [clan-infra](https://git.clan.lol/clan/clan-infra). Please provide us with your public SOPS key so we can add you as an admin.

View File

@@ -1,151 +0,0 @@
!!! Note
Vars is the new secret backend that will soon replace the Facts backend
Defining a linux user's password via the nixos configuration previously required running `mkpasswd ...` and then copying the hash back into the nix configuration.
In this example, we will guide you through automating that interaction using clan `vars`.
For a more general explanation of what clan vars are and how it works, see the intro of the [Reference Documentation for vars](https://docs.clan.lol/reference/clan-core/vars/)
This guide assumes
- clan is set up already (see [Getting Started](../getting-started/index.md))
- a machine has been added to the clan (see [Adding Machines](./adding-machines.md))
This section will walk you through the following steps:
1. declare a `generator` in the machine's nixos configuration
2. inspect the status via the clan cli
3. generate the vars
4. observer the changes
5. update the machine
6. share the root password between machines
7. change the password
## Declare the generator
In this example, a `vars` `generator` is used to:
- prompt the user for the password
- run the required `mkpasswd` command to generate the hash
- store the hash in a file
- expose the file path to the nixos configuration
Create a new nix file `root-password.nix` with the following content and import it into your `configuration.nix`
```nix
{config, pkgs, ...}: {
clan.core.vars.generators.root-password = {
# prompt the user for a password
# (`password-input` being an arbitrary name)
prompts.password-input.description = "the root user's password";
prompts.password-input.type = "hidden";
# don't store the prompted password itself
prompts.password-input.persist = false;
# define an output file for storing the hash
files.password-hash.secret = false;
# define the logic for generating the hash
script = ''
cat $prompts/password-input | mkpasswd -m sha-512 > $out/password-hash
'';
# the tools required by the script
runtimeInputs = [ pkgs.mkpasswd ];
};
# ensure users are immutable (otherwise the following config might be ignored)
users.mutableUsers = false;
# set the root password to the file containing the hash
users.users.root.hashedPasswordFile =
# clan will make sure, this path exists
config.clan.core.vars.generators.root-password.files.password-hash.path;
}
```
## Inspect the status
Executing `clan vars list`, you should see the following:
```shellSession
$ clan vars list my_machine
root-password/password-hash: <not set>
```
...indicating that the value `password-hash` for the generator `root-password` is not set yet.
## Generate the values
This step is not strictly necessary, as deploying the machine via `clan machines update` would trigger the generator as well.
To run the generator, execute `clan vars generate` for your machine
```shellSession
$ clan vars generate my_machine
Enter the value for root-password/password-input (hidden):
```
After entering the value, the updated status is reported:
```shellSession
Updated var root-password/password-hash
old: <not set>
new: $6$RMats/YMeypFtcYX$DUi...
```
## Observe the changes
With the last step, a new file was created in your repository:
`vars/per-machine/my-machine/root-password/password-hash/value`
If the repository is a git repository, a commit was created automatically:
```shellSession
$ git log -n1
commit ... (HEAD -> master)
Author: ...
Date: ...
Update vars via generator root-password for machine grmpf-nix
```
## Update the machine
```shell
clan machines update my_machine
```
## Share root password between machines
If we just imported the `root-password.nix` from above into more machines, clan would ask for a new password for each additional machine.
If the root password instead should only be entered once and shared across all machines, the generator defined above needs to be declared as `shared`, by adding `share = true` to it:
```nix
{config, pkgs, ...}: {
clan.vars.generators.root-password = {
share = true;
# ...
}
}
```
Importing that shared generator into each machine, will ensure that the password is only asked once the first machine gets updated and then re-used for all subsequent machines.
## Change the root password
Changing the password can be done via this command.
Replace `my-machine` with your machine.
If the password is shared, just pick any machine that has the generator declared.
```shellSession
$ clan vars generate my-machine --generator root-password --regenerate
...
Enter the value for root-password/password-input (hidden):
Input received. Processing...
...
Updated var root-password/password-hash
old: $6$tb27m6EOdff.X9TM$19N...
new: $6$OyoQtDVzeemgh8EQ$zRK...
```
## Further Reading
- [Reference Documentation for `clan.core.vars` nixos options](../reference/clan-core/vars.md)
- [Reference Documentation for the `clan vars` cli command](../reference/cli/vars.md)

View File

@@ -0,0 +1 @@
/nix/store/8y5h98wk5p94mv1wyb2c4gkrr7bswd19-asciinema-player.css

View File

@@ -0,0 +1 @@
/nix/store/w0i3f9qzn9n6jmfnfgiw5wnab2f9ssdw-asciinema-player.min.js

View File

@@ -15,8 +15,3 @@
.md-header img {
filter: invert(100%) brightness(100%);
}
.md-nav__title,
.md-nav__item.md-nav__item--section > label > span {
color: var(--md-typeset-a-color);
}

41
flake.lock generated
View File

@@ -7,11 +7,11 @@
]
},
"locked": {
"lastModified": 1738148035,
"narHash": "sha256-KYOATYEwaKysL3HdHdS5kbQMXvzS4iPJzJrML+3TKAo=",
"lastModified": 1735468753,
"narHash": "sha256-2dt1nOe9zf9pDkf5Kn7FUFyPRo581s0n90jxYXJ94l0=",
"owner": "nix-community",
"repo": "disko",
"rev": "18d0a984cc2bc82cf61df19523a34ad463aa7f54",
"rev": "84a5b93637cc16cbfcc61b6e1684d626df61eb21",
"type": "github"
},
"original": {
@@ -27,11 +27,11 @@
]
},
"locked": {
"lastModified": 1738453229,
"narHash": "sha256-7H9XgNiGLKN1G1CgRh0vUL4AheZSYzPm+zmZ7vxbJdo=",
"lastModified": 1735774679,
"narHash": "sha256-soePLBazJk0qQdDVhdbM98vYdssfs3WFedcq+raipRI=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "32ea77a06711b758da0ad9bd6a844c5740a87abd",
"rev": "f2f7418ce0ab4a5309a4596161d154cfc877af66",
"type": "github"
},
"original": {
@@ -42,11 +42,11 @@
},
"nixos-facter-modules": {
"locked": {
"lastModified": 1736931726,
"narHash": "sha256-aY55yiifyo1XPPpbpH0kWlV1g2dNGBlx6622b7OK8ks=",
"lastModified": 1734596637,
"narHash": "sha256-MRqwVAe3gsb88u4ME1UidmZFVCx+FEnoob0zkpO9DMY=",
"owner": "numtide",
"repo": "nixos-facter-modules",
"rev": "fa11d87b61b2163efbb9aed7b7a5ae0299e5ab9c",
"rev": "536472754982bf03079b4b4e0261838a760587c0",
"type": "github"
},
"original": {
@@ -57,11 +57,11 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1738422722,
"narHash": "sha256-Q4vhtbLYWBUnjWD4iQb003Lt+N5PuURDad1BngGKdUs=",
"lastModified": 1734435836,
"narHash": "sha256-kMBQ5PRiFLagltK0sH+08aiNt3zGERC2297iB6vrvlU=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "102a39bfee444533e6b4e8611d7e92aa39b7bec1",
"rev": "4989a246d7a390a859852baddb1013f825435cee",
"type": "github"
},
"original": {
@@ -89,16 +89,15 @@
]
},
"locked": {
"lastModified": 1736953253,
"narHash": "sha256-shJxzy7qypjq9hpETQ3gJsBZXO5E3KR0INca/xwiVp4=",
"owner": "pinpox",
"lastModified": 1736064798,
"narHash": "sha256-xJRN0FmX9QJ6+w8eIIIxzBU1AyQcLKJ1M/Gp6lnSD20=",
"owner": "Mic92",
"repo": "sops-nix",
"rev": "a7c6e64401b6dde13c0de90230cb64087c9d9693",
"rev": "5dc08f9cc77f03b43aacffdfbc8316807773c930",
"type": "github"
},
"original": {
"owner": "pinpox",
"ref": "lazy-assertions",
"owner": "Mic92",
"repo": "sops-nix",
"type": "github"
}
@@ -125,11 +124,11 @@
]
},
"locked": {
"lastModified": 1738070913,
"narHash": "sha256-j6jC12vCFsTGDmY2u1H12lMr62fnclNjuCtAdF1a4Nk=",
"lastModified": 1736115332,
"narHash": "sha256-FBG9d7e0BTFfxVdw4b5EmNll2Mv7hfRc54hbB4LrKko=",
"owner": "numtide",
"repo": "treefmt-nix",
"rev": "bebf27d00f7d10ba75332a0541ac43676985dea3",
"rev": "1788ca5acd4b542b923d4757d4cfe4183cc6a92d",
"type": "github"
},
"original": {

View File

@@ -12,7 +12,7 @@
nixos-facter-modules.url = "github:numtide/nixos-facter-modules";
sops-nix.url = "github:pinpox/sops-nix/lazy-assertions";
sops-nix.url = "github:Mic92/sops-nix";
sops-nix.inputs.nixpkgs.follows = "nixpkgs";
systems.url = "github:nix-systems/default";
@@ -24,53 +24,36 @@
outputs =
inputs@{
flake-parts,
nixpkgs,
self,
systems,
...
}:
let
inherit (nixpkgs.lib)
filter
optional
pathExists
;
in
flake-parts.lib.mkFlake { inherit inputs; } (
{ ... }:
{
clan = {
meta.name = "clan-core";
directory = self;
};
flake = {
clan.templates = import ./templates { };
};
systems = import systems;
imports =
# only importing existing paths allows to minimize the flake for test
# by removing files
filter pathExists [
./checks/flake-module.nix
./clanModules/flake-module.nix
./devShell.nix
./docs/nix/flake-module.nix
./flakeModules/flake-module.nix
./lib/filter-clan-core/flake-module.nix
./lib/flake-module.nix
./nixosModules/clanCore/vars/flake-module.nix
./nixosModules/flake-module.nix
./pkgs/flake-module.nix
./templates/flake-module.nix
]
++ [
(if pathExists ./flakeModules/clan.nix then import ./flakeModules/clan.nix inputs.self else { })
]
imports = [
./checks/flake-module.nix
./clanModules/flake-module.nix
./flakeModules/flake-module.nix
(import ./flakeModules/clan.nix inputs.self)
./devShell.nix
# TODO: migrate this @davHau
# ./docs/flake-module
./docs/nix/flake-module.nix
./lib/flake-module.nix
./nixosModules/flake-module.nix
./nixosModules/clanCore/vars/flake-module.nix
./pkgs/flake-module.nix
./templates/flake-module.nix
# Make treefmt-nix optional
# This only works if you set inputs.clan-core.inputs.treefmt-nix.follows
# to a non-empty input that doesn't export a flakeModule
++ optional (pathExists ./formatter.nix && inputs.treefmt-nix ? flakeModule) ./formatter.nix;
] ++ inputs.nixpkgs.lib.optional (inputs.treefmt-nix ? flakeModule) ./formatter.nix;
}
);
}

View File

@@ -92,12 +92,13 @@
treefmt.programs.mypy.directories =
{
"clan-cli" = {
extraPythonPackages = self'.packages.clan-cli.testDependencies;
directory = "pkgs/clan-cli";
extraPythonPackages = (self'.packages.clan-cli.devshellPyDeps pkgs.python3Packages);
};
"clan-app" = {
directory = "pkgs/clan-app";
extraPythonPackages = (self'.packages.clan-app.devshellPyDeps pkgs.python3Packages);
extraPythonPackages =
(self'.packages.clan-app.externalTestDeps or [ ]) ++ self'.packages.clan-cli.testDependencies;
extraPythonPaths = [ "../clan-cli" ];
};
}
@@ -106,9 +107,8 @@
{
"clan-vm-manager" = {
directory = "pkgs/clan-vm-manager";
extraPythonPackages = self'.packages.clan-vm-manager.externalTestDeps ++ [
(pkgs.python3.withPackages (ps: self'.packages.clan-cli.devshellPyDeps ps))
];
extraPythonPackages =
self'.packages.clan-vm-manager.externalTestDeps ++ self'.packages.clan-cli.testDependencies;
extraPythonPaths = [ "../clan-cli" ];
};
}

View File

@@ -1,23 +0,0 @@
{
lib,
self,
...
}:
let
# Returns an attrset with inputs that have the attribute `clanModules`
inputsWithClanModules = lib.filterAttrs (
_name: value: builtins.hasAttr "clanModules" value
) self.inputs;
flattenedClanModules = lib.foldl' (
acc: input:
lib.mkMerge [
acc
input.clanModules
]
) { } (lib.attrValues inputsWithClanModules);
in
{
inventory.modules = flattenedClanModules;
}

View File

@@ -8,8 +8,7 @@
}:
{
## Inputs
self ? lib.warn "Argument: 'self' must be set when using 'buildClan'." null, # Reference to the current flake
# allows to include machine-specific modules i.e. machines.${name} = { ... }
directory, # The directory containing the machines subdirectory # allows to include machine-specific modules i.e. machines.${name} = { ... }
# A map from arch to pkgs, if specified this nixpkgs will be only imported once for each system.
# This improves performance, but all nipxkgs.* options will be ignored.
# deadnix: skip
@@ -24,12 +23,11 @@ let
inherit
lib
nixpkgs
specialArgs
clan-core
self
;
inherit specialArgs;
self = directory;
};
rest = builtins.removeAttrs attrs [ "specialArgs" ];
in
eval {

View File

@@ -7,7 +7,7 @@ let
};
evalDocs = pkgs.nixosOptionsDoc {
options = eval.options;
warningsAreErrors = false;
warningsAreErrors = true;
};
in
{

View File

@@ -2,8 +2,8 @@
lib,
nixpkgs,
clan-core,
self,
specialArgs ? { },
self,
}:
# Returns a function that takes self, which should point to the directory of the flake
module:
@@ -14,8 +14,6 @@ module:
modules = [
./interface.nix
module
{
inherit specialArgs;
}
{ inherit specialArgs; }
];
}).config

View File

@@ -20,11 +20,12 @@ in
jsonDocs = import ./eval-docs.nix {
inherit pkgs lib;
};
in
{
legacyPackages.clan-internals-docs = jsonDocs.optionsJSON;
# Run: nix-unit --extra-experimental-features flakes --flake .#legacyPackages.x86_64-linux.evalTests-build-clan
# Run: nix-unit --extra-experimental-features flakes --flake .#legacyPackages.x86_64-linux.evalTests
legacyPackages.evalTests-build-clan = import ./tests.nix {
inherit lib;
inherit (inputs) nixpkgs;
@@ -38,20 +39,7 @@ in
nix-unit --eval-store "$HOME" \
--extra-experimental-features flakes \
${inputOverrides} \
--flake ${
self.filter {
include = [
"flakeModules"
"inventory.json"
"lib/build-clan"
"lib/default.nix"
"lib/flake-module.nix"
"lib/inventory"
"machines"
"nixosModules"
];
}
}#legacyPackages.${system}.evalTests-build-clan
--flake ${self}#legacyPackages.${system}.evalTests-build-clan
touch $out
'';

View File

@@ -8,26 +8,10 @@ let
in
{
options = {
self = lib.mkOption {
type = types.raw;
default = self;
defaultText = "Reference to the current flake";
description = ''
This is used to import external clan modules.
'';
};
# Required options
directory = lib.mkOption {
type = types.coercedTo lib.types.raw (
v:
if lib.isAttrs v then
lib.warn "It appears you set 'clan.directory = self'. Instead set 'clan.self = self'. 'clan.directory' expects a path" v
else if v == null then
throw "Please set either clan.self or clan.directory"
else
builtins.toString v
) lib.types.path;
default = builtins.toString self;
type = types.path;
default = self;
defaultText = "Root directory of the flake";
description = ''
The directory containing the clan.
@@ -124,10 +108,6 @@ in
# We don't specify the type here, for better performance.
inventory = lib.mkOption { type = lib.types.raw; };
inventoryValuesPrios = lib.mkOption { type = lib.types.raw; };
# all exported clan templates from this clan
templates = lib.mkOption { type = lib.types.raw; };
# all exported clan modules from this clan
modules = lib.mkOption { type = lib.types.raw; };
# all inventory module schemas
moduleSchemas = lib.mkOption { type = lib.types.raw; };
inventoryFile = lib.mkOption { type = lib.types.raw; };
@@ -137,7 +117,6 @@ in
clanModules = lib.mkOption { type = lib.types.raw; };
source = lib.mkOption { type = lib.types.raw; };
meta = lib.mkOption { type = lib.types.raw; };
lib = lib.mkOption { type = lib.types.raw; };
all-machines-json = lib.mkOption { type = lib.types.raw; };
machines = lib.mkOption { type = lib.types.raw; };
machinesFunc = lib.mkOption { type = lib.types.raw; };

View File

@@ -1,4 +1,3 @@
# NixOS module
{
config,
clan-core,
@@ -44,7 +43,8 @@ let
# { ${machineName} :: Config }
serviceConfigs = (
buildInventory {
inherit inventory directory;
inherit inventory;
inherit directory;
}
);
@@ -76,7 +76,7 @@ let
(machines.${name} or { })
# Inherit the inventory assertions ?
# { inherit (mergedInventory) assertions; }
{ imports = serviceConfigs.machines.${name}.machineImports or [ ]; }
{ imports = serviceConfigs.${name} or [ ]; }
(
{
# Settings
@@ -167,11 +167,9 @@ let
(builtins.fromJSON (builtins.readFile inventoryFile))
else
{ };
in
{
imports = [
./auto-imports.nix
# Merge the inventory file
{
inventory = _: {
@@ -183,7 +181,7 @@ in
{
inventory.machines = lib.optionalAttrs (builtins.pathExists "${directory}/machines") (
builtins.mapAttrs (_n: _v: { }) (
lib.filterAttrs (_: t: t == "directory") (builtins.readDir "${directory}/machines")
(lib.filterAttrs (_: t: t == "directory") (builtins.readDir "${directory}/machines"))
)
);
}
@@ -191,9 +189,7 @@ in
inventory.machines = lib.mapAttrs (_n: _: { }) config.machines;
}
# Merge the meta attributes from the buildClan function
{
inventory.modules = clan-core.clanModules;
}
{ inventory.modules = clan-core.clanModules; }
# config.inventory.meta <- config.meta
{ inventory.meta = config.meta; }
# Set default for computed tags
@@ -212,14 +208,8 @@ in
builtins.removeAttrs (clan-core.lib.values.getPrios { options = inventory.options; })
# tags are freeformType which is not supported yet.
[ "tags" ];
modules = config.modules;
templates = config.templates;
inventory = config.inventory;
meta = config.inventory.meta;
lib = {
inherit (clan-core.lib) select;
};
source = "${clan-core}";

View File

@@ -10,52 +10,24 @@ let
inherit lib nixpkgs clan-core;
self = ./.;
};
# Shallowly force all attribute values to be evaluated.
shallowForceAllAttributes = lib.foldlAttrs (
_acc: _name: value:
lib.seq value true
) true;
in
#######
{
test_missing_self =
let
config = buildClan {
meta.name = "test";
imports = [ ./module.nix ];
};
in
{
expr = shallowForceAllAttributes config;
expectedError = {
type = "ThrownError";
msg = "A definition for option `directory' is not of type `path*";
};
};
test_only_required =
let
config = evalClan {
self = {
inputs = { };
outPath = ./.;
};
meta.name = "test";
imports = [ ./module.nix ];
};
in
{
expr = shallowForceAllAttributes config;
expr = config.inventory ? meta;
expected = true;
};
test_all_simple =
let
config = evalClan {
self = {
inputs = { };
};
directory = ./.;
machines = { };
inventory = {
@@ -71,10 +43,6 @@ in
test_outputs_clanInternals =
let
config = evalClan {
self = {
inputs = { };
};
directory = ./.;
imports = [
# What the user needs to specif
{
@@ -100,9 +68,6 @@ in
test_fn_simple =
let
result = buildClan {
self = {
inputs = { };
};
directory = ./.;
meta.name = "test";
};
@@ -119,9 +84,6 @@ in
test_fn_extensiv_meta =
let
result = buildClan {
self = {
inputs = { };
};
directory = ./.;
meta.name = "test";
meta.description = "test";
@@ -142,9 +104,6 @@ in
test_fn_clan_core =
let
result = buildClan {
self = {
inputs = { };
};
directory = ../../.;
meta.name = "test-clan-core";
};
@@ -160,9 +119,6 @@ in
test_buildClan_all_machines =
let
result = buildClan {
self = {
inputs = { };
};
directory = ./.;
meta.name = "test";
inventory.machines.machine1.meta.name = "machine1";
@@ -182,9 +138,6 @@ in
test_buildClan_specialArgs =
let
result = buildClan {
self = {
inputs = { };
};
directory = ./.;
meta.name = "test";
specialArgs.foo = "dream2nix";

View File

@@ -21,5 +21,4 @@ in
inherit lib;
self = clan-core;
};
select = import ./select.nix;
}

View File

@@ -1,18 +0,0 @@
{ self, ... }:
let
nixFilter = import ./nix-filter.nix;
in
{
flake.filter =
{
include ? [ ],
exclude ? [ ],
}:
nixFilter.filter {
inherit exclude;
include = include ++ [
"flake.nix"
];
root = self;
};
}

View File

@@ -1,193 +0,0 @@
/*
MIT License
Copyright (c) 2021 Numtide
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
# This is a pure and self-contained library
rec {
# Default to filter when calling this lib.
__functor = _self: filter;
# A proper source filter
filter =
{
# Base path to include
root,
# Derivation name
name ? "source",
# Only include the following path matches.
#
# Allows all files by default.
include ? [
(
_: _: _:
true
)
],
# Ignore the following matches
exclude ? [ ],
}:
assert _pathIsDirectory root;
let
callMatcher = args: _toMatcher ({ inherit root; } // args);
include_ = map (callMatcher { matchParents = true; }) include;
exclude_ = map (callMatcher { matchParents = false; }) exclude;
in
builtins.path {
inherit name;
path = root;
filter =
path: type: (builtins.any (f: f path type) include_) && (!builtins.any (f: f path type) exclude_);
};
# Match a directory and any path inside of it
inDirectory =
directory: args:
let
# Convert `directory` to a path to clean user input.
directory_ = _toCleanPath args.root directory;
in
path: _type:
directory_ == path
# Add / to the end to make sure we match a full directory prefix
|| _hasPrefix (directory_ + "/") path;
# Match any directory
isDirectory =
_: _: type:
type == "directory";
# Combines matchers
and =
a: b: args:
let
toMatcher = _toMatcher args;
in
path: type: (toMatcher a path type) && (toMatcher b path type);
# Combines matchers
or_ =
a: b: args:
let
toMatcher = _toMatcher args;
in
path: type: (toMatcher a path type) || (toMatcher b path type);
# Or is actually a keyword, but can also be used as a key in an attrset.
or = or_;
# Match paths with the given extension
matchExt =
ext: _args: path: _type:
_hasSuffix ".${ext}" path;
# Filter out files or folders with this exact name
matchName =
name: _root: path: _type:
builtins.baseNameOf path == name;
# Wrap a matcher with this to debug its results
debugMatch =
label: fn: args: path: type:
let
ret = fn args path type;
retStr = if ret then "true" else "false";
in
builtins.trace "label=${label} path=${path} type=${type} ret=${retStr}" ret;
# Add this at the end of the include or exclude, to trace all the unmatched paths
traceUnmatched =
_args: path: type:
builtins.trace "unmatched path=${path} type=${type}" false;
# Lib stuff
# If an argument to include or exclude is a path, transform it to a matcher.
#
# This probably needs more work, I don't think that it works on
# sub-folders.
_toMatcher =
args: f:
let
path_ = _toCleanPath args.root f;
pathIsDirectory = _pathIsDirectory path_;
in
if builtins.isFunction f then
f args
else
path: type:
(if pathIsDirectory then inDirectory path_ args path type else path_ == path)
|| args.matchParents && type == "directory" && _hasPrefix "${path}/" path_;
# Makes sure a path is:
# * absolute
# * doesn't contain superfluous slashes or ..
#
# Returns a string so there is no risk of adding it to the store by mistake.
_toCleanPath =
absPath: path:
assert _pathIsDirectory absPath;
if builtins.isPath path then
toString path
else if builtins.isString path then
if builtins.substring 0 1 path == "/" then path else toString (absPath + ("/" + path))
else
throw "unsupported type ${builtins.typeOf path}, expected string or path";
_hasSuffix =
# Suffix to check for
suffix:
# Input string
content:
let
lenContent = builtins.stringLength content;
lenSuffix = builtins.stringLength suffix;
in
lenContent >= lenSuffix && builtins.substring (lenContent - lenSuffix) lenContent content == suffix;
_hasPrefix =
# Prefix to check for
prefix:
# Input string
content:
let
lenPrefix = builtins.stringLength prefix;
in
prefix == builtins.substring 0 lenPrefix content;
# Returns true if the path exists and is a directory and false otherwise
_pathIsDirectory =
p:
let
parent = builtins.dirOf p;
base = builtins.unsafeDiscardStringContext (builtins.baseNameOf p);
inNixStore = builtins.storeDir == toString parent;
in
# If the parent folder is /nix/store, we assume p is a directory. Because
# reading /nix/store is very slow, and not allowed in every environments.
inNixStore
|| (
builtins.pathExists p
&& (builtins.readDir parent).${builtins.unsafeDiscardStringContext base} == "directory"
);
}

View File

@@ -4,14 +4,8 @@
self,
...
}:
let
inherit (lib)
filter
pathExists
;
in
{
imports = filter pathExists [
imports = [
./jsonschema/flake-module.nix
./inventory/flake-module.nix
./build-clan/flake-module.nix

View File

@@ -68,7 +68,7 @@ let
if (builtins.pathExists readme) then
(builtins.readFile readme)
else
throw "No README.md found for module ${modulename} (expected at ${readme})";
throw "No README.md found for module ${modulename}";
in
readmeContents;

View File

@@ -63,7 +63,7 @@ in
constraints = mkOption {
default = { };
description = ''
Constraints for the module
Contraints for the module
The following example requires exactly one `server`
and supports up to `7` clients

View File

@@ -28,10 +28,10 @@ let
);
in
if tagMembers == [ ] then
lib.warn ''
throw ''
inventory.services.${serviceName}.${instanceName}: - ${roleName} tags: no machine with tag '${tag}' found.
Available tags: ${builtins.toJSON (lib.unique availableTags)}
'' [ ]
''
else
acc ++ tagMembers
) [ ] members.tags or [ ]);
@@ -42,208 +42,18 @@ let
builtins.elem "inventory"
(clan-core.lib.modules.getFrontmatter modulepath serviceName).features or [ ];
compileMachine =
{ machineConfig }:
{
machineImports = [
(lib.optionalAttrs (machineConfig.deploy.targetHost or null != null) {
config.clan.core.networking.targetHost = machineConfig.deploy.targetHost;
})
];
assertions = { };
};
compileServicesForMachine =
# Returns a NixOS configuration for the machine 'machineName'.
# Return Format: { imports = [ ... ]; config = { ... }; options = { ... } }
{
machineName,
inventory,
directory,
}:
let
compileServiceModules =
serviceName: serviceConfigs:
let
supportedRoles = clan-core.lib.modules.getRoles inventory.modules serviceName;
firstRole = import (getRoleFile (builtins.head supportedRoles));
loadModuleForClassCheck =
m:
if lib.isFunction m then
let
args = lib.functionArgs m;
in
m args
else
m;
isClanModule =
let
module = loadModuleForClassCheck firstRole;
in
if module ? _class then module._class == "clan" else false;
getRoleFile = role: builtins.seq role inventory.modules.${serviceName} + "/roles/${role}.nix";
resolvedRolesPerInstance = lib.mapAttrs (
instanceName: instanceConfig:
let
resolvedRoles = lib.genAttrs supportedRoles (
roleName:
resolveTags {
members = instanceConfig.roles.${roleName} or { };
inherit
instanceName
serviceName
roleName
inventory
;
}
);
usedRoles = builtins.attrNames instanceConfig.roles;
unmatchedRoles = builtins.filter (role: !builtins.elem role supportedRoles) usedRoles;
in
if unmatchedRoles != [ ] then
throw ''
Service: '${serviceName}' Instance: '${instanceName}'
The following roles do not exist: ${builtins.toJSON unmatchedRoles}
Please use one of available roles: ${builtins.toJSON supportedRoles}
''
else
resolvedRoles
) serviceConfigs;
machinesRoles = builtins.zipAttrsWith (
_n: vs:
let
flat = builtins.foldl' (acc: s: acc ++ s.machines) [ ] vs;
in
lib.unique flat
) (builtins.attrValues resolvedRolesPerInstance);
matchedRoles = builtins.attrNames (
lib.filterAttrs (_: ms: builtins.elem machineName ms) machinesRoles
);
in
# roleImports = lib.mapAttrsToList (
# roleName: _: inventory.modules.${serviceName} + "/roles/${roleName}.nix"
# ) (lib.filterAttrs (_: ms: builtins.elem machineName ms) machinesRoles);
# CompiledService :: { machineImports :: []; machineRoles :: [ String ] }
{
inherit
machinesRoles
matchedRoles
resolvedRolesPerInstance
firstRole
isClanModule
supportedRoles
;
# TODO: Add other attributes
machineImports =
if isClanModule then
throw "Clan modules are not supported yet."
else
(lib.foldlAttrs (
# [ Modules ], String, ServiceConfig
acc2: instanceName: serviceConfig:
let
resolvedRoles = lib.genAttrs supportedRoles (
roleName:
resolveTags {
members = serviceConfig.roles.${roleName} or { };
inherit
serviceName
instanceName
roleName
inventory
;
}
);
isInService = builtins.any (members: builtins.elem machineName members.machines) (
builtins.attrValues resolvedRoles
);
# all roles where the machine is present
machineRoles = builtins.attrNames (
lib.filterAttrs (_role: roleConfig: builtins.elem machineName roleConfig.machines) resolvedRoles
);
machineServiceConfig = (serviceConfig.machines.${machineName} or { }).config or { };
globalConfig = serviceConfig.config or { };
globalExtraModules = serviceConfig.extraModules or [ ];
machineExtraModules = serviceConfig.machines.${machineName}.extraModules or [ ];
roleServiceExtraModules = builtins.foldl' (
acc: role: acc ++ serviceConfig.roles.${role}.extraModules or [ ]
) [ ] machineRoles;
# TODO: maybe optimize this dont lookup the role in inverse roles. Imports are not lazy
roleModules = builtins.map (
role:
if builtins.elem role supportedRoles && inventory.modules ? ${serviceName} then
getRoleFile role
else
throw "Module ${serviceName} doesn't have role: '${role}'. Role: ${
inventory.modules.${serviceName}
}/roles/${role}.nix not found."
) machineRoles;
roleServiceConfigs = builtins.filter (m: m != { }) (
builtins.map (role: serviceConfig.roles.${role}.config or { }) machineRoles
);
extraModules = map (s: if builtins.typeOf s == "string" then "${directory}/${s}" else s) (
globalExtraModules ++ machineExtraModules ++ roleServiceExtraModules
);
nonExistingRoles = builtins.filter (role: !(builtins.elem role supportedRoles)) (
builtins.attrNames (serviceConfig.roles or { })
);
constraintAssertions = clan-core.lib.modules.checkConstraints {
moduleName = serviceName;
allModules = inventory.modules;
inherit resolvedRoles instanceName;
};
in
if (nonExistingRoles != [ ]) then
throw "Roles ${builtins.toString nonExistingRoles} are not defined in the service ${serviceName}."
else if !(serviceConfig.enabled or true) then
acc2
else if isInService then
acc2
++ [
{
imports = roleModules ++ extraModules;
clan.inventory.assertions = constraintAssertions;
clan.inventory.services.${serviceName}.${instanceName} = {
roles = resolvedRoles;
# TODO: Add inverseRoles to the service config if needed
# inherit inverseRoles;
};
}
(lib.optionalAttrs (globalConfig != { } || machineServiceConfig != { } || roleServiceConfigs != [ ])
{
clan.${serviceName} = lib.mkMerge (
[
globalConfig
machineServiceConfig
]
++ roleServiceConfigs
);
}
)
]
else
acc2
) [ ] (serviceConfigs));
assertions = lib.mapAttrs' (name: value: {
name = "checkservice.${serviceName}.${name}";
value = {
extendMachine =
{ machineConfig, inventory }:
[
(lib.optionalAttrs (machineConfig.deploy.targetHost or null != null) {
config.clan.core.networking.targetHost = machineConfig.deploy.targetHost;
})
{
assertions = lib.foldlAttrs (
acc: serviceName: _serviceConfigs:
acc
++ [
{
assertion = checkService inventory.modules.${serviceName} serviceName;
message = ''
Service ${serviceName} cannot be used in inventory. It does not declare the 'inventory' feature.
@@ -260,75 +70,147 @@ let
Also make sure to test the module with the 'inventory' feature enabled.
'';
};
}) inventory.services;
};
}
]
) [ ] inventory.services;
}
];
in
lib.mapAttrs compileServiceModules inventory.services;
mapMachineConfigToNixOSConfig =
# Returns a NixOS configuration for the machine 'machineName'.
# Return Format: { imports = [ ... ]; config = { ... }; options = { ... } }
{
machineName,
machineConfig,
inventory,
directory,
}:
lib.foldlAttrs (
# [ Modules ], String, { ${instance_name} :: ServiceConfig }
initialServiceModules: serviceName: serviceConfigs:
initialServiceModules
# Collect service config
++ (lib.foldlAttrs (
# [ Modules ], String, ServiceConfig
acc2: instanceName: serviceConfig:
let
roles = clan-core.lib.modules.getRoles inventory.modules serviceName;
resolvedRoles = lib.genAttrs roles (
roleName:
resolveTags {
members = serviceConfig.roles.${roleName} or { };
inherit
serviceName
instanceName
roleName
inventory
;
}
);
isInService = builtins.any (members: builtins.elem machineName members.machines) (
builtins.attrValues resolvedRoles
);
# all roles where the machine is present
machineRoles = builtins.attrNames (
lib.filterAttrs (_role: roleConfig: builtins.elem machineName roleConfig.machines) resolvedRoles
);
machineServiceConfig = (serviceConfig.machines.${machineName} or { }).config or { };
globalConfig = serviceConfig.config or { };
globalExtraModules = serviceConfig.extraModules or [ ];
machineExtraModules = serviceConfig.machines.${machineName}.extraModules or [ ];
roleServiceExtraModules = builtins.foldl' (
acc: role: acc ++ serviceConfig.roles.${role}.extraModules or [ ]
) [ ] machineRoles;
# TODO: maybe optimize this dont lookup the role in inverse roles. Imports are not lazy
roleModules = builtins.map (
role:
if builtins.elem role roles && inventory.modules ? ${serviceName} then
inventory.modules.${serviceName} + "/roles/${role}.nix"
else
throw "Module ${serviceName} doesn't have role: '${role}'. Role: ${
inventory.modules.${serviceName}
}/roles/${role}.nix not found."
) machineRoles;
roleServiceConfigs = builtins.filter (m: m != { }) (
builtins.map (role: serviceConfig.roles.${role}.config or { }) machineRoles
);
extraModules = map (s: if builtins.typeOf s == "string" then "${directory}/${s}" else s) (
globalExtraModules ++ machineExtraModules ++ roleServiceExtraModules
);
nonExistingRoles = builtins.filter (role: !(builtins.elem role roles)) (
builtins.attrNames (serviceConfig.roles or { })
);
constraintAssertions = clan-core.lib.modules.checkConstraints {
moduleName = serviceName;
allModules = inventory.modules;
inherit resolvedRoles instanceName;
};
in
if (nonExistingRoles != [ ]) then
throw "Roles ${builtins.toString nonExistingRoles} are not defined in the service ${serviceName}."
else if !(serviceConfig.enabled or true) then
acc2
else if isInService then
acc2
++ [
{
imports = roleModules ++ extraModules;
clan.inventory.assertions = constraintAssertions;
clan.inventory.services.${serviceName}.${instanceName} = {
roles = resolvedRoles;
# TODO: Add inverseRoles to the service config if needed
# inherit inverseRoles;
};
}
(lib.optionalAttrs (globalConfig != { } || machineServiceConfig != { } || roleServiceConfigs != [ ])
{
clan.${serviceName} = lib.mkMerge (
[
globalConfig
machineServiceConfig
]
++ roleServiceConfigs
);
}
)
]
else
acc2
) [ ] (serviceConfigs))
) [ ] inventory.services
# Global extension for each machine
++ (extendMachine { inherit machineConfig inventory; });
/*
Returns a set with NixOS configuration for every machine in the inventory.
Returns a NixOS configuration for every machine in the inventory.
machinesFromInventory :: Inventory -> { ${machine_name} :: NixOSConfiguration }
*/
buildInventory =
{ inventory, directory }:
(lib.evalModules {
specialArgs = {
inherit directory inventory;
};
modules = [
./internal.nix
(
{ ... }:
{
machines = builtins.mapAttrs (
machineName: machineConfig:
let
compiledServices = compileServicesForMachine {
inherit
machineName
inventory
directory
;
};
compiledMachine = compileMachine {
inherit
machineConfig
;
};
machineImports =
compiledMachine.machineImports
++ builtins.foldl' (
acc: service:
let
failedAssertions = (lib.filterAttrs (_: v: !v.assertion) service.assertions);
failedAssertionsImports =
if failedAssertions != { } then
[
{
clan.inventory.assertions = failedAssertions;
}
]
else
[ ];
in
acc
++ service.machineImports
# Import failed assertions
++ failedAssertionsImports
) [ ] (builtins.attrValues compiledServices);
in
{
inherit machineImports compiledServices compiledMachine;
}
) (inventory.machines or { });
}
)
];
}).config;
# For every machine in the inventory, build a NixOS configuration
# For each machine generate config, forEach service, if the machine is used.
builtins.mapAttrs (
machineName: machineConfig:
mapMachineConfigToNixOSConfig {
inherit
machineName
machineConfig
inventory
directory
;
}
) (inventory.machines or { });
in
{
inherit buildInventory;

View File

@@ -92,6 +92,7 @@ let
};
in
{
imports = [
./assertions.nix
];

View File

@@ -1,24 +0,0 @@
{ lib, ... }:
let
inherit (lib) types mkOption;
submodule = m: types.submoduleWith { modules = [ m ]; };
in
{
options = {
machines = mkOption {
type = types.attrsOf (submodule {
options = {
compiledMachine = mkOption {
type = types.raw;
};
compiledServices = mkOption {
type = types.raw;
};
machineImports = mkOption {
type = types.raw;
};
};
});
};
};
}

View File

@@ -42,23 +42,11 @@ in
checks = {
lib-inventory-eval = pkgs.runCommand "tests" { nativeBuildInputs = [ pkgs.nix-unit ]; } ''
export HOME="$(realpath .)"
export NIX_ABORT_ON_WARN=1
nix-unit --eval-store "$HOME" \
--extra-experimental-features flakes \
${inputOverrides} \
--flake ${
self.filter {
include = [
"flakeModules"
"lib/default.nix"
"lib/flake-module.nix"
"lib/inventory"
"lib/frontmatter"
"clanModules/flake-module.nix"
"clanModules/borgbackup"
];
}
}#legacyPackages.${system}.evalTests-inventory
--flake ${self}#legacyPackages.${system}.evalTests-inventory
touch $out
'';

View File

@@ -1,4 +0,0 @@
---
features = [ "inventory" ]
---
Description

View File

@@ -1,6 +0,0 @@
{ ... }:
{
_class = "clan";
perInstance = { };
perService = { };
}

View File

@@ -9,59 +9,17 @@ let
inherit (inventory) buildInventory;
in
{
test_inventory_a =
let
compiled = buildInventory {
inventory = {
machines = {
A = { };
};
services = {
clanModule = { };
legacyModule = { };
};
modules = {
clanModule = ./clanModule;
legacyModule = ./legacyModule;
};
};
directory = ./.;
};
in
{
expr = {
clanModule = lib.filterAttrs (
name: _: name == "isClanModule"
) compiled.machines.A.compiledServices.clanModule;
legacyModule = lib.filterAttrs (
name: _: name == "isClanModule"
) compiled.machines.A.compiledServices.legacyModule;
};
expected = {
clanModule = {
isClanModule = true;
};
legacyModule = {
isClanModule = false;
};
};
test_inventory_empty = {
# Empty inventory should return an empty module
expr = buildInventory {
inventory = { };
directory = ./.;
};
test_inventory_empty =
expected = { };
};
test_inventory_role_imports =
let
compiled = buildInventory {
inventory = { };
directory = ./.;
};
in
{
# Empty inventory should return an empty module
expr = compiled.machines;
expected = { };
};
test_inventory_role_resolve =
let
compiled = buildInventory {
configs = buildInventory {
directory = ./.;
inventory = {
modules = clan-core.clanModules;
@@ -84,37 +42,21 @@ in
in
{
expr = {
m1 = (compiled.machines."backup_server").compiledServices.borgbackup.matchedRoles;
m2 = (compiled.machines."client_1_machine").compiledServices.borgbackup.matchedRoles;
m3 = (compiled.machines."client_2_machine").compiledServices.borgbackup.matchedRoles;
inherit ((compiled.machines."client_2_machine").compiledServices.borgbackup)
resolvedRolesPerInstance
;
server_imports = (builtins.head configs."backup_server").imports;
client_1_imports = (builtins.head configs."client_1_machine").imports;
client_2_imports = (builtins.head configs."client_2_machine").imports;
};
expected = {
m1 = [
"server"
server_imports = [
(clan-core.clanModules.borgbackup + "/roles/server.nix")
];
m2 = [
"client"
client_1_imports = [
(clan-core.clanModules.borgbackup + "/roles/client.nix")
];
m3 = [
"client"
client_2_imports = [
(clan-core.clanModules.borgbackup + "/roles/client.nix")
];
resolvedRolesPerInstance = {
instance_1 = {
client = {
machines = [
"client_1_machine"
"client_2_machine"
];
};
server = {
machines = [ "backup_server" ];
};
};
};
};
};
test_inventory_tag_resolve =
@@ -141,19 +83,19 @@ in
};
in
{
expr = configs.machines.client_1_machine.compiledServices.borgbackup.resolvedRolesPerInstance;
expr = {
# A machine that includes the backup service should have 3 imports
# - one for some service agnostic properties of the machine itself
# - One for the service itself (default.nix)
# - one for the role (roles/client.nix)
client_1_machine = builtins.length configs.client_1_machine;
client_2_machine = builtins.length configs.client_2_machine;
not_used_machine = builtins.length configs.not_used_machine;
};
expected = {
instance_1 = {
client = {
machines = [
"client_1_machine"
"client_2_machine"
];
};
server = {
machines = [ ];
};
};
client_1_machine = 4;
client_2_machine = 4;
not_used_machine = 2;
};
};
@@ -176,11 +118,15 @@ in
};
in
{
expr = configs.machines.machine_1.compiledServices.borgbackup.matchedRoles;
expected = [
"client"
"server"
];
expr = {
machine_1_imports = (builtins.head configs."machine_1").imports;
};
expected = {
machine_1_imports = [
(clan-core.clanModules.borgbackup + "/roles/client.nix")
(clan-core.clanModules.borgbackup + "/roles/server.nix")
];
};
};
test_inventory_module_doesnt_exist =
@@ -201,8 +147,7 @@ in
};
in
{
inherit configs;
expr = configs.machines.machine_1.machineImports;
expr = configs;
expectedError = {
type = "ThrownError";
msg = "ClanModule not found*";
@@ -227,15 +172,12 @@ in
};
in
{
inherit configs;
expr = configs.machines.machine_1.machineImports;
expr = configs;
expectedError = {
type = "ThrownError";
msg = "Roles roleXYZ are not defined in the service borgbackup.";
};
};
# Needs NIX_ABORT_ON_WARN=1
# So the lib.warn is turned into abort
test_inventory_tag_doesnt_exist =
let
configs = buildInventory {
@@ -257,11 +199,10 @@ in
};
in
{
expr = configs.machines.machine_1.machineImports;
expr = configs;
expectedError = {
type = "Error";
# TODO: Add warning matching in nix-unit
msg = ".*";
type = "ThrownError";
msg = "no machine with tag '\\w+' found";
};
};
test_inventory_disabled_service =
@@ -285,8 +226,13 @@ in
};
in
{
inherit configs;
expr = builtins.filter (v: v != { }) configs.machines.machine_1.machineImports;
expected = [ ];
expr = {
machine_1_config = (builtins.head configs."machine_1");
};
expected = {
# Empty config
machine_1_config = { };
};
};
}

View File

@@ -1,4 +0,0 @@
---
features = [ "inventory" ]
---
Description

View File

@@ -1,10 +0,0 @@
{
lib,
config,
clan-core,
...
}:
{
# Just some random stuff
config.user.user = lib.mapAttrs clan-core.users.root;
}

View File

@@ -1,68 +0,0 @@
let
recursiveSelect =
selectorIndex: selectorList: target:
let
selector = builtins.elemAt selectorList selectorIndex;
in
# selector is empty, we are done
if selectorIndex + 1 > builtins.length selectorList then
target
else if builtins.isList target then
# support bla.* for lists and recurse into all elements
if selector == "*" then
builtins.map (v: recursiveSelect (selectorIndex + 1) selectorList v) target
# support bla.3 for lists and recurse into the 4th element
else if (builtins.match "[[:digit:]]*" selector) == [ ] then
recursiveSelect (selectorIndex + 1) selectorList (
builtins.elemAt target (builtins.fromJSON selector)
)
else
throw "only * or a number is allowed in list selector"
else if builtins.isAttrs target then
# handle the case bla.x.*.z where x is an attrset and we recurse into all elements
if selector == "*" then
builtins.mapAttrs (_: v: recursiveSelect (selectorIndex + 1) selectorList v) target
# support bla.{x,y,z}.world where we get world from each of x, y and z
else if (builtins.match ''^\{([^}]*)}$'' selector) != null then
let
attrsAsList = (
builtins.filter (x: !builtins.isList x) (
builtins.split "," (builtins.head (builtins.match ''^\{([^}]*)}$'' selector))
)
);
dummyAttrSet = builtins.listToAttrs (
map (x: {
name = x;
value = null;
}) attrsAsList
);
filteredAttrs = builtins.intersectAttrs dummyAttrSet target;
in
builtins.mapAttrs (_: v: recursiveSelect (selectorIndex + 1) selectorList v) filteredAttrs
else
recursiveSelect (selectorIndex + 1) selectorList (builtins.getAttr selector target)
else
throw "Expected a list or an attrset";
parseSelector =
selector:
let
splitByQuote = x: builtins.filter (x: !builtins.isList x) (builtins.split ''"'' x);
splitByDot =
x:
builtins.filter (x: x != "") (
map (builtins.replaceStrings [ "." ] [ "" ]) (
builtins.filter (x: !builtins.isList x) (builtins.split ''\.'' x)
)
);
handleQuoted =
x: if x == [ ] then [ ] else [ (builtins.head x) ] ++ handleUnquoted (builtins.tail x);
handleUnquoted =
x: if x == [ ] then [ ] else splitByDot (builtins.head x) ++ handleQuoted (builtins.tail x);
in
handleUnquoted (splitByQuote selector);
in
selector: target: recursiveSelect 0 (parseSelector selector) target

View File

@@ -6,17 +6,8 @@ let
in
{
perSystem =
{ pkgs, system, ... }:
{
pkgs,
system,
lib,
...
}:
let
tests = import ./test.nix { inherit lib; };
in
{
legacyPackages.evalTests-values = tests;
checks = {
lib-values-eval = pkgs.runCommand "tests" { nativeBuildInputs = [ pkgs.nix-unit ]; } ''
export HOME="$(realpath .)"
@@ -24,16 +15,7 @@ in
nix-unit --eval-store "$HOME" \
--extra-experimental-features flakes \
${inputOverrides} \
--flake ${
self.filter {
include = [
"flakeModules"
"lib/default.nix"
"lib/flake-module.nix"
"lib/values"
];
}
}#legacyPackages.${system}.evalTests-values
--flake ${self}#legacyPackages.${system}.evalTests-inventory
touch $out
'';

View File

@@ -16,9 +16,10 @@ in
{
imports = [
./public/in_repo.nix
# ./public/vm.nix
./secret/password-store.nix
./secret/sops
./secret/vm.nix
# ./secret/vm.nix
];
options.clan.core.vars = lib.mkOption {
description = ''
@@ -42,10 +43,9 @@ in
dependencies
validationHash
migrateFact
share
prompts
share
;
files = lib.flip lib.mapAttrs generator.files (
_name: file: {
inherit (file)

View File

@@ -24,14 +24,7 @@ in
nix-unit --eval-store "$HOME" \
--extra-experimental-features flakes \
${inputOverrides} \
--flake ${
self.filter {
include = [
"flakeModules"
"nixosModules"
];
}
}#legacyPackages.${system}.evalTests-module-clan-vars
--flake ${self}#legacyPackages.${system}.evalTests-module-clan-vars
touch $out
'';

View File

@@ -21,7 +21,7 @@ let
promptsToFilesScript = concatMapStrings promptToFile;
filePromptNames = attrNames (filterAttrs (_name: prompt: prompt.persist) config.prompts);
filePromptNames = attrNames (filterAttrs (_name: prompt: prompt.createFile) config.prompts);
in
{
finalScript = mkOptionDefault (

View File

@@ -60,10 +60,7 @@ in
description = ''
A list of other generators that this generator depends on.
The output values of these generators will be available to the generator script as files.
For example:
**A file `file1` of a generator named `dep1` will be available via `$in/dep1/file1`**
For example, the file 'file1' of a dependency named 'dep1' will be available via $in/dep1/file1.
'';
type = listOf str;
default = [ ];
@@ -207,14 +204,11 @@ in
description = ''
This option determines when the secret will be decrypted and deployed to the target machine.
By setting this to `partitioning`, the secret will be deployed prior to running `disko` allowing
you to manage filesystem encryption keys. These will only be deployed when installing the system.
By setting this to `activation`, the secret will be deployed prior to running `nixos-rebuild` or `nixos-install`.
By setting this to `user`, the secret will be deployed prior to users and groups are created, allowing
users' passwords to be managed by vars. The secret will be stored in `/run/secrets-for-users` and `owner` and `group` must be `root`.
'';
type = lib.types.enum [
"partitioning"
"activation"
"users"
"services"
@@ -269,7 +263,7 @@ in
default = prompt.config._module.args.name;
defaultText = "Name of the prompt";
};
persist = lib.mkOption {
createFile = lib.mkOption {
description = ''
Whether the prompted value should be stored in a file with the same name as the prompt.
@@ -282,7 +276,7 @@ in
```
'';
type = bool;
default = false;
default = true;
};
description = lib.mkOption {
description = ''
@@ -324,11 +318,9 @@ in
description = ''
The script to run to generate the files.
The script will be run with the following environment variables:
- $in: The directory containing the output values of all declared dependencies
- $out: The output directory to put the generated files
- $prompts: The directory containing the prompted values as files
- $in: The directory containing the output values of all declared dependencies
- $out: The output directory to put the generated files
- $prompts: The directory containing the prompted values as files
The script should produce the files specified in the 'files' attribute under $out.
'';
type = either str path;

View File

@@ -30,7 +30,7 @@ in
readFile file.config.path
else
# if the file is not found, we want to downgrade the priority, to allow overriding via mkDefault
mkOptionDefault (throw "File not found: ${file.config.path}")
mkOptionDefault (readFile file.config.path)
);
};
};

View File

@@ -15,7 +15,6 @@ let
];
text = ''
set -efu -o pipefail
set -x
src=$1
target=$2
@@ -29,7 +28,7 @@ let
mount --bind --make-private "$target".tmp "$target".tmp
mount --bind --make-private "$target" "$target"
tar -xf "$src" -C "$target".tmp
move-mount --beneath --move "$target".tmp "$target"
move-mount --beneath --move "$target".tmp "$target" 2>/dev/null
umount -R "$target".tmp
rmdir "$target".tmp
umount --lazy "$target"
@@ -42,7 +41,6 @@ let
useSystemdActivation =
(options.systemd ? sysusers && config.systemd.sysusers.enable)
|| (options.services ? userborn && config.services.userborn.enable);
normalSecrets = lib.any (
gen: lib.any (file: file.neededFor == "services") (lib.attrValues gen.files)
) (lib.attrValues config.clan.core.vars.generators);
@@ -76,9 +74,7 @@ in
else if file.config.neededFor == "services" then
"/run/secrets/${file.config.generatorName}/${file.config.name}"
else if file.config.neededFor == "activation" then
"${config.clan.password-store.secretLocation}/activation/${file.config.generatorName}/${file.config.name}"
else if file.config.neededFor == "partitioning" then
"/run/partitioning-secrets/${file.config.generatorName}/${file.config.name}"
"${config.clan.password-store.secretLocation}/${file.config.generatorName}/${file.config.name}"
else
throw "unknown neededFor ${file.config.neededFor}";

View File

@@ -6,6 +6,8 @@
}:
let
inherit (lib) flip;
inherit (import ./funcs.nix { inherit lib; }) collectFiles;
machineName = config.clan.core.settings.machine.name;
@@ -25,10 +27,8 @@ in
# Before we generate a secret we cannot know the path yet, so we need to set it to an empty string
fileModule = file: {
path = lib.mkIf file.config.secret (
if file.config.neededFor == "partitioning" then
"/run/partitioning-secrets/${file.config.generatorName}/${file.config.name}"
else if file.config.neededFor == "activation" then
"/var/lib/sops-nix/activation/${file.config.generatorName}/${file.config.name}"
if file.config.neededFor == "activation" then
"/var/lib/sops-nix/${file.config.generatorName}/${file.config.name}"
else
config.sops.secrets.${"vars/${file.config.generatorName}/${file.config.name}"}.path
or "/no-such-path"
@@ -38,18 +38,16 @@ in
};
config.sops = lib.mkIf (config.clan.core.vars.settings.secretStore == "sops") {
secrets = lib.listToAttrs (
map (secret: {
flip map vars (secret: {
name = "vars/${secret.generator}/${secret.name}";
value = {
inherit (secret) owner group neededForUsers;
sopsFile = secretPath secret;
format = "binary";
};
}) (builtins.filter (x: builtins.pathExists (secretPath x)) vars)
})
);
# To get proper error messages about missing secrets we need a dummy secret file that is always present
defaultSopsFile = lib.mkIf config.sops.validateSopsFiles (
lib.mkDefault (builtins.toString (pkgs.writeText "dummy.yaml" ""))

View File

@@ -7,6 +7,7 @@ let
inherit (lib)
filterAttrs
flatten
flip
mapAttrsToList
;
in
@@ -17,20 +18,20 @@ in
let
relevantFiles =
generator:
filterAttrs (
_name: f: f.secret && f.deploy && (f.neededFor == "users" || f.neededFor == "services")
) generator.files;
flip filterAttrs generator.files (_name: f: f.secret && f.deploy && (f.neededFor != "activation"));
allFiles = flatten (
mapAttrsToList (
flip mapAttrsToList vars.generators (
gen_name: generator:
mapAttrsToList (fname: file: {
name = fname;
generator = gen_name;
neededForUsers = file.neededFor == "users";
inherit (generator) share;
inherit (file) owner group;
}) (relevantFiles generator)
) vars.generators
flip mapAttrsToList (relevantFiles generator) (
fname: file: {
name = fname;
generator = gen_name;
neededForUsers = file.neededFor == "users";
inherit (generator) share;
inherit (file) owner group;
}
)
)
);
in
allFiles;

View File

@@ -1,17 +0,0 @@
{
config,
lib,
...
}:
{
config.clan.core.vars.settings = lib.mkIf (config.clan.core.vars.settings.secretStore == "vm") {
fileModule = file: {
path =
if file.config.neededFor == "partitioning" then
"/run/partitioning-secrets/${file.config.generatorName}/${file.config.name}"
else
"/etc/secrets/${file.config.generatorName}/${file.config.name}";
};
secretModule = "clan_cli.vars.secret_modules.vm";
};
}

View File

@@ -55,9 +55,9 @@ class Identity:
def node_id(self) -> str:
nid = self.public.split(":")[0]
assert len(nid) == 10, (
f"node_id must be 10 characters long, got {len(nid)}: {nid}"
)
assert (
len(nid) == 10
), f"node_id must be 10 characters long, got {len(nid)}: {nid}"
return nid
@@ -172,9 +172,9 @@ def create_identity() -> Identity:
def compute_zerotier_ip(network_id: str, identity: Identity) -> ipaddress.IPv6Address:
assert len(network_id) == 16, (
f"network_id must be 16 characters long, got '{network_id}'"
)
assert (
len(network_id) == 16
), "network_id must be 16 characters long, got {network_id}"
nwid = int(network_id, 16)
node_id = int(identity.node_id(), 16)
addr_parts = bytearray(

View File

@@ -55,8 +55,6 @@ in
network-status
];
nix.settings.extra-substituters = [ "/" ];
########################################################################################################
# #
# Copied from: #

View File

@@ -0,0 +1,96 @@
# ruff: noqa: N801
import logging
from tkinter import Tk, filedialog
from clan_cli.api import ApiError, ErrorDataClass, SuccessDataClass
from clan_cli.api.directory import FileFilter, FileRequest
log = logging.getLogger(__name__)
def _apply_filters(filters: FileFilter | None) -> list[tuple[str, str]]:
if not filters:
return []
filter_patterns = []
if filters.mime_types:
# Tkinter does not directly support MIME types, so this section can be adjusted
# if you wish to handle them differently
filter_patterns.extend(filters.mime_types)
if filters.patterns:
filter_patterns.extend(filters.patterns)
if filters.suffixes:
suffix_patterns = [f"*.{suffix}" for suffix in filters.suffixes]
filter_patterns.extend(suffix_patterns)
filter_title = filters.title if filters.title else "Custom Files"
return [(filter_title, " ".join(filter_patterns))]
def open_file(
file_request: FileRequest, *, op_key: str
) -> SuccessDataClass[list[str] | None] | ErrorDataClass:
try:
root = Tk()
root.withdraw() # Hide the main window
root.attributes("-topmost", True) # Bring the dialogs to the front
file_path: str = ""
multiple_files: list[str] = []
if file_request.mode == "open_file":
file_path = filedialog.askopenfilename(
title=file_request.title,
initialdir=file_request.initial_folder,
initialfile=file_request.initial_file,
filetypes=_apply_filters(file_request.filters),
)
elif file_request.mode == "select_folder":
file_path = filedialog.askdirectory(
title=file_request.title, initialdir=file_request.initial_folder
)
elif file_request.mode == "save":
file_path = filedialog.asksaveasfilename(
title=file_request.title,
initialdir=file_request.initial_folder,
initialfile=file_request.initial_file,
filetypes=_apply_filters(file_request.filters),
)
elif file_request.mode == "open_multiple_files":
tresult = filedialog.askopenfilenames(
title=file_request.title,
initialdir=file_request.initial_folder,
filetypes=_apply_filters(file_request.filters),
)
multiple_files = list(tresult)
if len(file_path) == 0 and len(multiple_files) == 0:
msg = "No file selected"
raise ValueError(msg) # noqa: TRY301
multiple_files = [file_path] if len(multiple_files) == 0 else multiple_files
return SuccessDataClass(op_key, status="success", data=multiple_files)
except Exception as e:
log.exception("Error opening file")
return ErrorDataClass(
op_key=op_key,
status="error",
errors=[
ApiError(
message=e.__class__.__name__,
description=str(e),
location=["open_file"],
)
],
)
finally:
root.destroy()

View File

@@ -1,200 +0,0 @@
# ruff: noqa: N801
import gi
gi.require_version("Gtk", "4.0")
import logging
import time
from pathlib import Path
from typing import Any
from clan_cli.api import ApiError, ErrorDataClass, SuccessDataClass
from clan_cli.api.directory import FileRequest
from gi.repository import Gio, GLib, Gtk
log = logging.getLogger(__name__)
def remove_none(_list: list) -> list:
return [i for i in _list if i is not None]
RESULT: dict[str, SuccessDataClass[list[str] | None] | ErrorDataClass] = {}
def open_file(
file_request: FileRequest, *, op_key: str
) -> SuccessDataClass[list[str] | None] | ErrorDataClass:
GLib.idle_add(gtk_open_file, file_request, op_key)
while RESULT.get(op_key) is None:
time.sleep(0.2)
response = RESULT[op_key]
del RESULT[op_key]
return response
def gtk_open_file(file_request: FileRequest, op_key: str) -> bool:
def returns(data: SuccessDataClass | ErrorDataClass) -> None:
global RESULT
RESULT[op_key] = data
def on_file_select(file_dialog: Gtk.FileDialog, task: Gio.Task) -> None:
try:
gfile = file_dialog.open_finish(task)
if gfile:
selected_path = remove_none([gfile.get_path()])
returns(
SuccessDataClass(
op_key=op_key, data=selected_path, status="success"
)
)
except Exception as e:
log.exception("Error opening file")
returns(
ErrorDataClass(
op_key=op_key,
status="error",
errors=[
ApiError(
message=e.__class__.__name__,
description=str(e),
location=["open_file"],
)
],
)
)
def on_file_select_multiple(file_dialog: Gtk.FileDialog, task: Gio.Task) -> None:
try:
gfiles: Any = file_dialog.open_multiple_finish(task)
if gfiles:
selected_paths = remove_none([gfile.get_path() for gfile in gfiles])
returns(
SuccessDataClass(
op_key=op_key, data=selected_paths, status="success"
)
)
else:
returns(SuccessDataClass(op_key=op_key, data=None, status="success"))
except Exception as e:
log.exception("Error opening file")
returns(
ErrorDataClass(
op_key=op_key,
status="error",
errors=[
ApiError(
message=e.__class__.__name__,
description=str(e),
location=["open_file"],
)
],
)
)
def on_folder_select(file_dialog: Gtk.FileDialog, task: Gio.Task) -> None:
try:
gfile = file_dialog.select_folder_finish(task)
if gfile:
selected_path = remove_none([gfile.get_path()])
returns(
SuccessDataClass(
op_key=op_key, data=selected_path, status="success"
)
)
else:
returns(SuccessDataClass(op_key=op_key, data=None, status="success"))
except Exception as e:
log.exception("Error opening file")
returns(
ErrorDataClass(
op_key=op_key,
status="error",
errors=[
ApiError(
message=e.__class__.__name__,
description=str(e),
location=["open_file"],
)
],
)
)
def on_save_finish(file_dialog: Gtk.FileDialog, task: Gio.Task) -> None:
try:
gfile = file_dialog.save_finish(task)
if gfile:
selected_path = remove_none([gfile.get_path()])
returns(
SuccessDataClass(
op_key=op_key, data=selected_path, status="success"
)
)
else:
returns(SuccessDataClass(op_key=op_key, data=None, status="success"))
except Exception as e:
log.exception("Error opening file")
returns(
ErrorDataClass(
op_key=op_key,
status="error",
errors=[
ApiError(
message=e.__class__.__name__,
description=str(e),
location=["open_file"],
)
],
)
)
dialog = Gtk.FileDialog()
if file_request.title:
dialog.set_title(file_request.title)
if file_request.filters:
filters = Gio.ListStore.new(Gtk.FileFilter)
file_filters = Gtk.FileFilter()
if file_request.filters.title:
file_filters.set_name(file_request.filters.title)
if file_request.filters.mime_types:
for mime in file_request.filters.mime_types:
file_filters.add_mime_type(mime)
filters.append(file_filters)
if file_request.filters.patterns:
for pattern in file_request.filters.patterns:
file_filters.add_pattern(pattern)
if file_request.filters.suffixes:
for suffix in file_request.filters.suffixes:
file_filters.add_suffix(suffix)
filters.append(file_filters)
dialog.set_filters(filters)
if file_request.initial_file:
p = Path(file_request.initial_file).expanduser()
f = Gio.File.new_for_path(str(p))
dialog.set_initial_file(f)
if file_request.initial_folder:
p = Path(file_request.initial_folder).expanduser()
f = Gio.File.new_for_path(str(p))
dialog.set_initial_folder(f)
# if select_folder
if file_request.mode == "select_folder":
dialog.select_folder(callback=on_folder_select)
if file_request.mode == "open_multiple_files":
dialog.open_multiple(callback=on_file_select_multiple)
elif file_request.mode == "open_file":
dialog.open(callback=on_file_select)
elif file_request.mode == "save":
dialog.save(callback=on_save_finish)
return GLib.SOURCE_REMOVE

View File

@@ -12,7 +12,7 @@ from pathlib import Path
from clan_cli.api import API
from clan_cli.custom_logger import setup_logging
from clan_app.api.file_gtk import open_file
from clan_app.api.file import open_file
from clan_app.deps.webview.webview import Size, SizeHint, Webview

Binary file not shown.

After

Width:  |  Height:  |  Size: 86 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 163 KiB

View File

@@ -0,0 +1,63 @@
/* Insert custom styles here */
navigation-view {
padding: 5px;
/* padding-left: 5px;
padding-right: 5px;
padding-bottom: 5px; */
}
avatar {
margin: 2px;
}
.trust {
padding-top: 25px;
padding-bottom: 25px;
}
.join-list {
margin-top: 1px;
margin-left: 2px;
margin-right: 2px;
}
.progress-bar {
margin-right: 25px;
min-width: 200px;
}
.group-list {
background-color: inherit;
}
.group-list > .activatable:hover {
background-color: unset;
}
.group-list > row {
margin-top: 12px;
border-bottom: unset;
}
.vm-list {
margin-top: 25px;
margin-bottom: 25px;
}
.no-shadow {
box-shadow: none;
}
.search-entry {
margin-bottom: 12px;
}
searchbar {
margin-bottom: 25px;
}
.log-view {
margin-top: 12px;
font-family: monospace;
padding: 8px;
}

Some files were not shown because too many files have changed in this diff Show More