Compare commits

..

2 Commits

Author SHA1 Message Date
Jörg Thalheim
41635dd350 enable warningsAreErrors in doc 2025-01-07 12:17:46 +01:00
Jörg Thalheim
16d16faa9c fix typo 2025-01-07 11:06:36 +01:00
320 changed files with 6610 additions and 12356 deletions

View File

@@ -8,5 +8,5 @@ jobs:
checks-impure:
runs-on: nix
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- run: nix run .#impure-checks

View File

@@ -7,7 +7,7 @@ jobs:
deploy-docs:
runs-on: nix
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- run: nix run .#deploy-docs
env:
SSH_HOMEPAGE_KEY: ${{ secrets.SSH_HOMEPAGE_KEY }}

View File

@@ -1,6 +0,0 @@
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"

View File

@@ -3,8 +3,10 @@ on:
schedule:
- cron: "39 * * * *"
workflow_dispatch:
permissions:
contents: write
jobs:
repo-sync:
if: github.repository_owner == 'clan-lol'
@@ -13,15 +15,10 @@ jobs:
- uses: actions/checkout@v4
with:
persist-credentials: false
- uses: actions/create-github-app-token@v1
id: app-token
with:
app-id: ${{ vars.CI_APP_ID }}
private-key: ${{ secrets.CI_PRIVATE_KEY }}
- name: repo-sync
uses: repo-sync/github-sync@v2
env:
GITHUB_TOKEN: ${{ steps.app-token.outputs.token }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
source_repo: "https://git.clan.lol/clan/clan-core.git"
source_branch: "main"

2
.gitignore vendored
View File

@@ -14,7 +14,7 @@ example_clan
nixos.qcow2
**/*.glade~
/docs/out
**/.local.env
# dream2nix
.dream2nix

View File

@@ -1,4 +1,23 @@
# Contributing to Clan
<!-- Local file: docs/CONTRIBUTING.md -->
Go to the Contributing guide at https://docs.clan.lol/manual/contribute/
## Live-reloading documentation
Enter the `docs` directory:
```shell-session
cd docs
```
Enter the development shell or enable `direnv`:
```shell-session
direnv allow
```
Run a local server:
```shell-session
mkdocs serve
```
Open http://localhost:8000/ in your browser.

View File

@@ -5,12 +5,6 @@
fileSystems."/".device = "/dev/null";
boot.loader.grub.device = "/dev/null";
};
clan.inventory.services = {
borgbackup.test-backup = {
roles.client.machines = [ "test-backup" ];
roles.server.machines = [ "test-backup" ];
};
};
flake.nixosModules = {
test-backup =
{
@@ -28,20 +22,13 @@
in
{
imports = [
# Do not import inventory modules. They should be configured via 'clan.inventory'
#
# TODO: Configure localbackup via inventory
self.clanModules.borgbackup
self.clanModules.localbackup
];
# Borgbackup overrides
services.borgbackup.repos.test-backups = {
path = "/var/lib/borgbackup/test-backups";
authorizedKeys = [ (builtins.readFile ../lib/ssh/pubkey) ];
};
clan.borgbackup.destinations.test-backup.repo = lib.mkForce "borg@machine:.";
clan.core.networking.targetHost = "machine";
networking.hostName = "machine";
services.openssh.settings.UseDns = false;
nixpkgs.hostPlatform = "x86_64-linux";
programs.ssh.knownHosts = {
machine.hostNames = [ "machine" ];
@@ -50,8 +37,6 @@
services.openssh = {
enable = true;
settings.UsePAM = false;
settings.UseDns = false;
hostKeys = [
{
path = "/root/.ssh/id_ed25519";
@@ -62,10 +47,6 @@
users.users.root.openssh.authorizedKeys.keyFiles = [ ../lib/ssh/pubkey ];
# This is needed to unlock the user for sshd
# Because we use sshd without setuid binaries
users.users.borg.initialPassword = "hello";
systemd.tmpfiles.settings."vmsecrets" = {
"/root/.ssh/id_ed25519" = {
C.argument = "${../lib/ssh/privkey}";
@@ -81,14 +62,14 @@
user = "root";
};
};
"/etc/secrets/borgbackup/borgbackup.ssh" = {
"/etc/secrets/borgbackup.ssh" = {
C.argument = "${../lib/ssh/privkey}";
z = {
mode = "0400";
user = "root";
};
};
"/etc/secrets/borgbackup/borgbackup.repokey" = {
"/etc/secrets/borgbackup.repokey" = {
C.argument = builtins.toString (pkgs.writeText "repokey" "repokey12345");
z = {
mode = "0400";
@@ -97,7 +78,8 @@
};
};
clan.core.facts.secretStore = "vm";
clan.core.vars.settings.secretStore = "vm";
# TODO: set this backend as well, once we have implemented it.
#clan.core.vars.settings.secretStore = "vm";
environment.systemPackages = [ self.packages.${pkgs.system}.clan-cli ];
environment.etc.install-closure.source = "${closureInfo}/store-paths";
@@ -122,6 +104,7 @@
'';
folders = [ "/var/test-service" ];
};
clan.borgbackup.destinations.test-backup.repo = "borg@machine:.";
fileSystems."/mnt/external-disk" = {
device = "/dev/vdb"; # created in tests with virtualisation.emptyDisks
@@ -142,55 +125,29 @@
touch /run/unmount-external-disk
'';
};
services.borgbackup.repos.test-backups = {
path = "/var/lib/borgbackup/test-backups";
authorizedKeys = [ (builtins.readFile ../lib/ssh/pubkey) ];
};
};
};
perSystem =
{ pkgs, ... }:
let
clanCore = self.filter {
include = [
"checks/backups"
"checks/flake-module.nix"
"clanModules/borgbackup"
"clanModules/flake-module.nix"
"clanModules/localbackup"
"clanModules/packages"
"clanModules/single-disk"
"clanModules/zerotier"
"flake.lock"
"flakeModules"
"inventory.json"
"lib/build-clan"
"lib/default.nix"
"lib/select.nix"
"lib/flake-module.nix"
"lib/frontmatter"
"lib/inventory"
"lib/constraints"
"nixosModules"
];
};
in
{
checks = pkgs.lib.mkIf pkgs.stdenv.isLinux {
test-backups = (import ../lib/container-test.nix) {
# Needs investigation on aarch64-linux
# vm-test-run-test-backups> qemu-kvm: No machine specified, and there is no default
# vm-test-run-test-backups> Use -machine help to list supported machines
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux && pkgs.stdenv.hostPlatform.system != "aarch64-linux") {
test-backups = (import ../lib/test-base.nix) {
name = "test-backups";
nodes.machine = {
imports =
[
imports = [
self.nixosModules.clanCore
# Some custom overrides for the backup tests
self.nixosModules.test-backup
]
++
# import the inventory generated nixosModules
self.clanInternals.inventoryClass.machines.test-backup.machineImports;
clan.core.settings.directory = ./.;
environment.systemPackages = [
(pkgs.writeShellScriptBin "foo" ''
echo ${clanCore}
'')
];
virtualisation.emptyDiskImages = [ 256 ];
clan.core.settings.directory = ./.;
};
testScript = ''
@@ -202,14 +159,14 @@
machine.succeed("echo testing > /var/test-backups/somefile")
# create
machine.succeed("clan backups create --debug --flake ${clanCore} test-backup")
machine.succeed("clan backups create --debug --flake ${self} test-backup")
machine.wait_until_succeeds("! systemctl is-active borgbackup-job-test-backup >&2")
machine.succeed("test -f /run/mount-external-disk")
machine.succeed("test -f /run/unmount-external-disk")
# list
backup_id = json.loads(machine.succeed("borg-job-test-backup list --json"))["archives"][0]["archive"]
out = machine.succeed("clan backups list --debug --flake ${clanCore} test-backup").strip()
out = machine.succeed("clan backups list --debug --flake ${self} test-backup").strip()
print(out)
assert backup_id in out, f"backup {backup_id} not found in {out}"
localbackup_id = "hdd::/mnt/external-disk/snapshot.0"
@@ -217,7 +174,7 @@
## borgbackup restore
machine.succeed("rm -f /var/test-backups/somefile")
machine.succeed(f"clan backups restore --debug --flake ${clanCore} test-backup borgbackup 'test-backup::borg@machine:.::{backup_id}' >&2")
machine.succeed(f"clan backups restore --debug --flake ${self} test-backup borgbackup 'test-backup::borg@machine:.::{backup_id}' >&2")
assert machine.succeed("cat /var/test-backups/somefile").strip() == "testing", "restore failed"
machine.succeed("test -f /var/test-service/pre-restore-command")
machine.succeed("test -f /var/test-service/post-restore-command")
@@ -225,7 +182,7 @@
## localbackup restore
machine.succeed("rm -rf /var/test-backups/somefile /var/test-service/ && mkdir -p /var/test-service")
machine.succeed(f"clan backups restore --debug --flake ${clanCore} test-backup localbackup '{localbackup_id}' >&2")
machine.succeed(f"clan backups restore --debug --flake ${self} test-backup localbackup '{localbackup_id}' >&2")
assert machine.succeed("cat /var/test-backups/somefile").strip() == "testing", "restore failed"
machine.succeed("test -f /var/test-service/pre-restore-command")
machine.succeed("test -f /var/test-service/post-restore-command")

View File

@@ -21,14 +21,14 @@
clan.core.state.testState.folders = [ "/etc/state" ];
environment.etc.state.text = "hello world";
systemd.tmpfiles.settings."vmsecrets" = {
"/etc/secrets/borgbackup/borgbackup.ssh" = {
"/etc/secrets/borgbackup.ssh" = {
C.argument = "${../lib/ssh/privkey}";
z = {
mode = "0400";
user = "root";
};
};
"/etc/secrets/borgbackup/borgbackup.repokey" = {
"/etc/secrets/borgbackup.repokey" = {
C.argument = builtins.toString (pkgs.writeText "repokey" "repokey12345");
z = {
mode = "0400";
@@ -36,8 +36,7 @@
};
};
};
# clan.core.facts.secretStore = "vm";
clan.core.vars.settings.secretStore = "vm";
clan.core.facts.secretStore = "vm";
clan.borgbackup.destinations.test.repo = "borg@localhost:.";
}

View File

@@ -1,7 +1,7 @@
(import ../lib/container-test.nix) (
{ ... }:
{
name = "container";
name = "secrets";
nodes.machine =
{ ... }:

View File

@@ -1,19 +1,11 @@
{ self, lib, ... }:
let
inherit (lib)
filter
pathExists
;
in
{ self, ... }:
{
imports = filter pathExists [
imports = [
./backups/flake-module.nix
./devshell/flake-module.nix
./flash/flake-module.nix
./impure/flake-module.nix
./installation/flake-module.nix
./installation-without-system/flake-module.nix
./morph/flake-module.nix
./nixos-documentation/flake-module.nix
];
perSystem =
@@ -50,7 +42,7 @@ in
flakeOutputs =
lib.mapAttrs' (
name: config: lib.nameValuePair "nixos-${name}" config.config.system.build.toplevel
) (lib.filterAttrs (n: _: !lib.hasPrefix "test-" n) self.nixosConfigurations)
) self.nixosConfigurations
// lib.mapAttrs' (n: lib.nameValuePair "package-${n}") self'.packages
// lib.mapAttrs' (n: lib.nameValuePair "devShell-${n}") self'.devShells
// lib.mapAttrs' (name: config: lib.nameValuePair "home-manager-${name}" config.activation-script) (

View File

@@ -1,39 +1,5 @@
{ self, ... }:
{
config,
self,
lib,
...
}:
{
clan.machines = lib.listToAttrs (
lib.map (
system:
lib.nameValuePair "test-flash-machine-${system}" {
clan.core.networking.targetHost = "test-flash-machine";
fileSystems."/".device = lib.mkDefault "/dev/vda";
boot.loader.grub.device = lib.mkDefault "/dev/vda";
# We need to use `mkForce` because we inherit from `test-install-machine`
# which currently hardcodes `nixpkgs.hostPlatform`
nixpkgs.hostPlatform = lib.mkForce system;
imports = [ self.nixosModules.test-flash-machine ];
}
) (lib.filter (lib.hasSuffix "linux") config.systems)
);
flake.nixosModules = {
test-flash-machine =
{ lib, ... }:
{
imports = [ self.nixosModules.test-install-machine ];
clan.core.vars.generators.test = lib.mkForce { };
disko.devices.disk.main.preCreateHook = lib.mkForce "";
};
};
perSystem =
{
nodes,
@@ -44,20 +10,17 @@
let
dependencies = [
pkgs.disko
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".pkgs.perlPackages.ConfigIniFiles
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".pkgs.perlPackages.FileSlurp
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.toplevel
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.diskoScript
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.diskoScript.drvPath
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.clan.deployment.file
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine.config.system.build.toplevel
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine.config.system.build.diskoScript
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine.config.system.build.diskoScript.drvPath
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine.config.system.clan.deployment.file
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
in
{
checks = pkgs.lib.mkIf pkgs.stdenv.isLinux {
test-flash = (import ../lib/test-base.nix) {
# Currently disabled...
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux) {
flash = (import ../lib/test-base.nix) {
name = "flash";
nodes.target = {
virtualisation.emptyDiskImages = [ 4096 ];
@@ -79,7 +42,7 @@
testScript = ''
start_all()
machine.succeed("clan flash write --debug --flake ${../..} --yes --disk main /dev/vdb test-flash-machine-${pkgs.hostPlatform.system}")
machine.succeed("clan flash write --debug --flake ${../..} --yes --disk main /dev/vdb test-install-machine")
'';
} { inherit pkgs self; };
};

View File

@@ -30,7 +30,6 @@
# this disables dynamic dependency loading in clan-cli
export CLAN_NO_DYNAMIC_DEPS=1
export IN_PYTEST=1
nix develop "$ROOT#clan-cli" -c bash -c "TMPDIR=/tmp python -m pytest -m impure ./tests $@"
'';
};

View File

@@ -1,241 +0,0 @@
{
self,
lib,
...
}:
{
# The purpose of this test is to ensure `clan machines install` works
# for machines that don't have a hardware config yet.
# If this test starts failing it could be due to the `facter.json` being out of date
# you can get a new one by adding
# client.fail("cat test-flake/machines/test-install-machine/facter.json >&2")
# to the installation test.
clan.machines.test-install-machine-without-system = {
fileSystems."/".device = lib.mkDefault "/dev/vda";
boot.loader.grub.device = lib.mkDefault "/dev/vda";
imports = [ self.nixosModules.test-install-machine-without-system ];
};
clan.machines.test-install-machine-with-system =
{ pkgs, ... }:
{
# https://git.clan.lol/clan/test-fixtures
facter.reportPath = builtins.fetchurl {
url = "https://git.clan.lol/clan/test-fixtures/raw/commit/4a2bc56d886578124b05060d3fb7eddc38c019f8/nixos-vm-facter-json/${pkgs.hostPlatform.system}.json";
sha256 =
{
aarch64-linux = "sha256:1rlfymk03rmfkm2qgrc8l5kj5i20srx79n1y1h4nzlpwaz0j7hh2";
x86_64-linux = "sha256:16myh0ll2gdwsiwkjw5ba4dl23ppwbsanxx214863j7nvzx42pws";
}
.${pkgs.hostPlatform.system};
};
fileSystems."/".device = lib.mkDefault "/dev/vda";
boot.loader.grub.device = lib.mkDefault "/dev/vda";
imports = [ self.nixosModules.test-install-machine-without-system ];
};
flake.nixosModules = {
test-install-machine-without-system =
{ lib, modulesPath, ... }:
{
imports = [
(modulesPath + "/testing/test-instrumentation.nix") # we need these 2 modules always to be able to run the tests
(modulesPath + "/profiles/qemu-guest.nix")
../lib/minify.nix
];
networking.hostName = "test-install-machine";
environment.etc."install-successful".text = "ok";
boot.consoleLogLevel = lib.mkForce 100;
boot.kernelParams = [ "boot.shell_on_fail" ];
# disko config
boot.loader.grub.efiSupport = lib.mkDefault true;
boot.loader.grub.efiInstallAsRemovable = lib.mkDefault true;
clan.core.vars.settings.secretStore = "vm";
clan.core.vars.generators.test = {
files.test.neededFor = "partitioning";
script = ''
echo "notok" > $out/test
'';
};
disko.devices = {
disk = {
main = {
type = "disk";
device = "/dev/vda";
preCreateHook = ''
test -e /run/partitioning-secrets/test/test
'';
content = {
type = "gpt";
partitions = {
boot = {
size = "1M";
type = "EF02"; # for grub MBR
priority = 1;
};
ESP = {
size = "512M";
type = "EF00";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
mountOptions = [ "umask=0077" ];
};
};
root = {
size = "100%";
content = {
type = "filesystem";
format = "ext4";
mountpoint = "/";
};
};
};
};
};
};
};
};
};
perSystem =
{
pkgs,
lib,
...
}:
let
dependencies = [
self
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.toplevel
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.diskoScript
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.clan.deployment.file
pkgs.stdenv.drvPath
pkgs.bash.drvPath
pkgs.nixos-anywhere
pkgs.bubblewrap
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
# with Nix 2.24 we get:
# vm-test-run-test-installation> client # error: sized: unexpected end-of-file
# vm-test-run-test-installation> client # error: unexpected end-of-file
# This seems to be fixed with Nix 2.26
# Remove this line once `pkgs.nix` is 2.26+
nixPackage =
assert
lib.versionOlder pkgs.nix.version "2.26"
&& lib.versionAtLeast pkgs.nixVersions.latest.version "2.26";
pkgs.nixVersions.latest;
in
{
# On aarch64-linux, hangs on reboot with after installation:
# vm-test-run-test-installation-without-system> installer # [ 288.002871] reboot: Restarting system
# vm-test-run-test-installation-without-system> client # [test-install-machine] ### Done! ###
# vm-test-run-test-installation-without-system> client # [test-install-machine] + step 'Done!'
# vm-test-run-test-installation-without-system> client # [test-install-machine] + echo '### Done! ###'
# vm-test-run-test-installation-without-system> client # [test-install-machine] + rm -rf /tmp/tmp.qb16EAq7hJ
# vm-test-run-test-installation-without-system> (finished: must succeed: clan machines install --debug --flake test-flake --yes test-install-machine-without-system --target-host root@installer --update-hardware-config nixos-facter >&2, in 154.62 seconds)
# vm-test-run-test-installation-without-system> target: starting vm
# vm-test-run-test-installation-without-system> target: QEMU running (pid 144)
# vm-test-run-test-installation-without-system> target: waiting for unit multi-user.target
# vm-test-run-test-installation-without-system> target: waiting for the VM to finish booting
# vm-test-run-test-installation-without-system> target: Guest root shell did not produce any data yet...
# vm-test-run-test-installation-without-system> target: To debug, enter the VM and run 'systemctl status backdoor.service'.
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux && !pkgs.stdenv.isAarch64) {
test-installation-without-system = (import ../lib/test-base.nix) {
name = "test-installation-without-system";
nodes.target = {
services.openssh.enable = true;
virtualisation.diskImage = "./target.qcow2";
virtualisation.useBootLoader = true;
nix.package = nixPackage;
};
nodes.installer =
{ modulesPath, ... }:
{
imports = [
(modulesPath + "/../tests/common/auto-format-root-device.nix")
];
services.openssh.enable = true;
system.nixos.variant_id = "installer";
environment.systemPackages = [ pkgs.nixos-facter ];
virtualisation.emptyDiskImages = [ 512 ];
virtualisation.diskSize = 8 * 1024;
virtualisation.rootDevice = "/dev/vdb";
# both installer and target need to use the same diskImage
virtualisation.diskImage = "./target.qcow2";
nix.package = nixPackage;
nix.settings = {
substituters = lib.mkForce [ ];
hashed-mirrors = null;
connect-timeout = lib.mkForce 3;
flake-registry = pkgs.writeText "flake-registry" ''{"flakes":[],"version":2}'';
experimental-features = [
"nix-command"
"flakes"
];
};
users.users.nonrootuser = {
isNormalUser = true;
openssh.authorizedKeys.keyFiles = [ ../lib/ssh/pubkey ];
extraGroups = [ "wheel" ];
};
security.sudo.wheelNeedsPassword = false;
system.extraDependencies = dependencies;
};
nodes.client = {
environment.systemPackages = [
self.packages.${pkgs.system}.clan-cli
] ++ self.packages.${pkgs.system}.clan-cli.runtimeDependencies;
environment.etc."install-closure".source = "${closureInfo}/store-paths";
virtualisation.memorySize = 3048;
nix.package = nixPackage;
nix.settings = {
substituters = lib.mkForce [ ];
hashed-mirrors = null;
connect-timeout = lib.mkForce 3;
flake-registry = pkgs.writeText "flake-registry" ''{"flakes":[],"version":2}'';
experimental-features = [
"nix-command"
"flakes"
];
};
system.extraDependencies = dependencies;
};
testScript = ''
client.start()
installer.start()
client.succeed("${pkgs.coreutils}/bin/install -Dm 600 ${../lib/ssh/privkey} /root/.ssh/id_ed25519")
client.wait_until_succeeds("timeout 2 ssh -o StrictHostKeyChecking=accept-new -v nonrootuser@installer hostname")
client.succeed("cp -r ${../..} test-flake && chmod -R +w test-flake")
client.fail("test -f test-flake/machines/test-install-machine-without-system/hardware-configuration.nix")
client.fail("test -f test-flake/machines/test-install-machine-without-system/facter.json")
client.succeed("clan machines update-hardware-config --flake test-flake test-install-machine-without-system nonrootuser@installer >&2")
client.succeed("test -f test-flake/machines/test-install-machine-without-system/facter.json")
client.succeed("rm test-flake/machines/test-install-machine-without-system/facter.json")
client.succeed("clan machines install --debug --flake test-flake --yes test-install-machine-without-system --target-host nonrootuser@installer --update-hardware-config nixos-facter >&2")
try:
installer.shutdown()
except BrokenPipeError:
# qemu has already exited
pass
target.state_dir = installer.state_dir
target.start()
target.wait_for_unit("multi-user.target")
assert(target.succeed("cat /etc/install-successful").strip() == "ok")
'';
} { inherit pkgs self; };
};
};
}

View File

@@ -1,5 +1,6 @@
{
self,
inputs,
lib,
...
}:
@@ -16,68 +17,18 @@
{ lib, modulesPath, ... }:
{
imports = [
self.clanModules.single-disk
(modulesPath + "/testing/test-instrumentation.nix") # we need these 2 modules always to be able to run the tests
(modulesPath + "/profiles/qemu-guest.nix")
../lib/minify.nix
];
clan.single-disk.device = "/dev/vda";
environment.etc."install-successful".text = "ok";
nixpkgs.hostPlatform = "x86_64-linux";
boot.consoleLogLevel = lib.mkForce 100;
boot.kernelParams = [ "boot.shell_on_fail" ];
# disko config
boot.loader.grub.efiSupport = lib.mkDefault true;
boot.loader.grub.efiInstallAsRemovable = lib.mkDefault true;
clan.core.vars.settings.secretStore = "vm";
clan.core.vars.generators.test = {
files.test.neededFor = "partitioning";
script = ''
echo "notok" > $out/test
'';
};
disko.devices = {
disk = {
main = {
type = "disk";
device = "/dev/vda";
preCreateHook = ''
test -e /run/partitioning-secrets/test/test
'';
content = {
type = "gpt";
partitions = {
boot = {
size = "1M";
type = "EF02"; # for grub MBR
priority = 1;
};
ESP = {
size = "512M";
type = "EF00";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
mountOptions = [ "umask=0077" ];
};
};
root = {
size = "100%";
content = {
type = "filesystem";
format = "ext4";
mountpoint = "/";
};
};
};
};
};
};
};
};
};
perSystem =
@@ -92,23 +43,10 @@
self.nixosConfigurations.test-install-machine.config.system.build.toplevel
self.nixosConfigurations.test-install-machine.config.system.build.diskoScript
self.nixosConfigurations.test-install-machine.config.system.clan.deployment.file
pkgs.bash.drvPath
pkgs.stdenv.drvPath
pkgs.nixos-anywhere
pkgs.bubblewrap
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
# with Nix 2.24 we get:
# vm-test-run-test-installation> client # error: sized: unexpected end-of-file
# vm-test-run-test-installation> client # error: unexpected end-of-file
# This seems to be fixed with Nix 2.26
# Remove this line once `pkgs.nix` is 2.26+
nixPackage =
assert
lib.versionOlder pkgs.nix.version "2.26"
&& lib.versionAtLeast pkgs.nixVersions.latest.version "2.26";
pkgs.nixVersions.latest;
in
{
# On aarch64-linux, hangs on reboot with after installation:
@@ -120,14 +58,13 @@
# vm-test-run-test-installation> new_machine: QEMU running (pid 80)
# vm-test-run-test-installation> new_machine: Guest root shell did not produce any data yet...
# vm-test-run-test-installation> new_machine: To debug, enter the VM and run 'systemctl status backdoor.service'.
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux && !pkgs.stdenv.isAarch64) {
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux && pkgs.stdenv.hostPlatform.system != "aarch64-linux") {
test-installation = (import ../lib/test-base.nix) {
name = "test-installation";
nodes.target = {
services.openssh.enable = true;
virtualisation.diskImage = "./target.qcow2";
virtualisation.useBootLoader = true;
nix.package = nixPackage;
# virtualisation.fileSystems."/" = {
# device = "/dev/disk/by-label/this-is-not-real-and-will-never-be-used";
@@ -149,7 +86,6 @@
virtualisation.rootDevice = "/dev/vdb";
# both installer and target need to use the same diskImage
virtualisation.diskImage = "./target.qcow2";
nix.package = nixPackage;
nix.settings = {
substituters = lib.mkForce [ ];
hashed-mirrors = null;
@@ -167,8 +103,7 @@
self.packages.${pkgs.system}.clan-cli
] ++ self.packages.${pkgs.system}.clan-cli.runtimeDependencies;
environment.etc."install-closure".source = "${closureInfo}/store-paths";
virtualisation.memorySize = 3048;
nix.package = nixPackage;
virtualisation.memorySize = 2048;
nix.settings = {
substituters = lib.mkForce [ ];
hashed-mirrors = null;
@@ -189,19 +124,12 @@
client.succeed("${pkgs.coreutils}/bin/install -Dm 600 ${../lib/ssh/privkey} /root/.ssh/id_ed25519")
client.wait_until_succeeds("timeout 2 ssh -o StrictHostKeyChecking=accept-new -v root@installer hostname")
client.succeed("cp -r ${../..} test-flake && chmod -R +w test-flake")
# test that we can generate hardware configurations
client.fail("test -f test-flake/machines/test-install-machine/facter.json")
client.fail("test -f test-flake/machines/test-install-machine/hardware-configuration.nix")
client.succeed("clan machines update-hardware-config --flake test-flake test-install-machine root@installer >&2")
client.succeed("test -f test-flake/machines/test-install-machine/facter.json")
client.succeed("clan machines update-hardware-config --backend nixos-generate-config --flake test-flake test-install-machine root@installer>&2")
client.succeed("test -f test-flake/machines/test-install-machine/hardware-configuration.nix")
# but we don't use them because they're not cached
client.succeed("rm test-flake/machines/test-install-machine/hardware-configuration.nix test-flake/machines/test-install-machine/facter.json")
client.succeed("clan machines install --debug --flake test-flake --yes test-install-machine --target-host root@installer >&2")
client.succeed("clan machines update-hardware-config --backend nixos-facter --flake test-flake test-install-machine root@installer>&2")
client.succeed("test -f test-flake/machines/test-install-machine/facter.json")
client.succeed("clan machines install --debug --flake ${../..} --yes test-install-machine --target-host root@installer >&2")
try:
installer.shutdown()
except BrokenPipeError:

View File

@@ -7,19 +7,9 @@
let
testDriver = hostPkgs.python3.pkgs.callPackage ./package.nix {
inherit (config) extraPythonPackages;
inherit (hostPkgs.pkgs) util-linux systemd nix;
inherit (hostPkgs.pkgs) util-linux systemd;
};
containers =
testScript:
map (m: [
m.system.build.toplevel
(hostPkgs.closureInfo {
rootPaths = [
m.system.build.toplevel
(hostPkgs.writeText "testScript" testScript)
];
})
]) (lib.attrValues config.nodes);
containers = map (m: m.system.build.toplevel) (lib.attrValues config.nodes);
pythonizeName =
name:
let
@@ -54,6 +44,8 @@ in
''
mkdir -p $out/bin
containers=(${toString containers})
${lib.optionalString (!config.skipTypeCheck) ''
# prepend type hints so the test script can be type checked with mypy
cat "${./test-script-prepend.py}" >> testScriptWithTypes
@@ -74,13 +66,7 @@ in
ln -s ${testDriver}/bin/nixos-test-driver $out/bin/nixos-test-driver
wrapProgram $out/bin/nixos-test-driver \
${
lib.concatStringsSep " " (
map (container: "--add-flags '--container ${builtins.toString container}'") (
containers config.testScriptString
)
)
} \
${lib.concatStringsSep " " (map (name: "--add-flags '--container ${name}'") containers)} \
--add-flags "--test-script '$out/test-script'"
''
);

View File

@@ -5,7 +5,6 @@
setuptools,
util-linux,
systemd,
nix,
colorama,
junit-xml,
}:
@@ -17,7 +16,6 @@ buildPythonApplication {
systemd
colorama
junit-xml
nix
] ++ extraPythonPackages python3Packages;
nativeBuildInputs = [ setuptools ];
format = "pyproject";

View File

@@ -1,5 +1,4 @@
import argparse
import ctypes
import os
import re
import subprocess
@@ -13,55 +12,6 @@ from typing import Any
from .logger import AbstractLogger, CompositeLogger, TerminalLogger
# Load the C library
libc = ctypes.CDLL("libc.so.6", use_errno=True)
# Define the mount function
libc.mount.argtypes = [
ctypes.c_char_p, # source
ctypes.c_char_p, # target
ctypes.c_char_p, # filesystemtype
ctypes.c_ulong, # mountflags
ctypes.c_void_p, # data
]
libc.mount.restype = ctypes.c_int
MS_BIND = 0x1000
MS_REC = 0x4000
def mount(
source: Path,
target: Path,
filesystemtype: str,
mountflags: int = 0,
data: str | None = None,
) -> None:
"""
A Python wrapper for the mount system call.
:param source: The source of the file system (e.g., device name, remote filesystem).
:param target: The mount point (an existing directory).
:param filesystemtype: The filesystem type (e.g., "ext4", "nfs").
:param mountflags: Mount options flags.
:param data: File system-specific data (e.g., options like "rw").
:raises OSError: If the mount system call fails.
"""
# Convert Python strings to C-compatible strings
source_c = ctypes.c_char_p(str(source).encode("utf-8"))
target_c = ctypes.c_char_p(str(target).encode("utf-8"))
fstype_c = ctypes.c_char_p(filesystemtype.encode("utf-8"))
data_c = ctypes.c_char_p(data.encode("utf-8")) if data else None
# Call the mount system call
result = libc.mount(
source_c, target_c, fstype_c, ctypes.c_ulong(mountflags), data_c
)
if result != 0:
errno = ctypes.get_errno()
raise OSError(errno, os.strerror(errno))
class Error(Exception):
pass
@@ -121,7 +71,7 @@ class Machine:
self.rootdir,
"--register=no",
"--resolv-conf=off",
"--bind=/nix",
"--bind-ro=/nix/store",
"--bind",
self.out_dir,
"--bind=/proc:/run/host/proc",
@@ -152,9 +102,9 @@ class Machine:
.read_text()
.split()
)
assert len(childs) == 1, (
f"Expected exactly one child process for systemd-nspawn, got {childs}"
)
assert (
len(childs) == 1
), f"Expected exactly one child process for systemd-nspawn, got {childs}"
try:
return int(childs[0])
except ValueError as e:
@@ -303,9 +253,7 @@ class Machine:
info = self.get_unit_info(unit)
state = info["ActiveState"]
if state == "failed":
proc = self.systemctl(f"--lines 0 status {unit}")
journal = self.execute(f"journalctl -u {unit} --no-pager")
msg = f'unit "{unit}" reached state "{state}":\n{proc.stdout}\n{journal.stdout}'
msg = f'unit "{unit}" reached state "{state}"'
raise Error(msg)
if state == "inactive":
@@ -323,9 +271,7 @@ class Machine:
def succeed(self, command: str, timeout: int | None = None) -> str:
res = self.execute(command, timeout=timeout)
if res.returncode != 0:
msg = f"Failed to run command {command}\n"
msg += f"Exit code: {res.returncode}\n"
msg += f"Stdout: {res.stdout}"
msg = f"Failed to run command {command}"
raise RuntimeError(msg)
return res.stdout
@@ -342,12 +288,6 @@ class Machine:
self.shutdown()
NIX_DIR = Path("/nix")
NIX_STORE = Path("/nix/store/")
NEW_NIX_DIR = Path("/.nix-rw")
NEW_NIX_STORE_DIR = NEW_NIX_DIR / "store"
def setup_filesystems() -> None:
# We don't care about cleaning up the mount points, since we're running in a nix sandbox.
Path("/run").mkdir(parents=True, exist_ok=True)
@@ -356,32 +296,6 @@ def setup_filesystems() -> None:
Path("/etc").chmod(0o755)
Path("/etc/os-release").touch()
Path("/etc/machine-id").write_text("a5ea3f98dedc0278b6f3cc8c37eeaeac")
NEW_NIX_STORE_DIR.mkdir(parents=True)
# Read /proc/mounts and replicate every bind mount
with Path("/proc/self/mounts").open() as f:
for line in f:
columns = line.split(" ")
source = Path(columns[1])
if source.parent != NIX_STORE:
continue
target = NEW_NIX_STORE_DIR / source.name
if source.is_dir():
target.mkdir()
else:
target.touch()
try:
mount(source, target, "none", MS_BIND)
except OSError as e:
msg = f"mount({source}, {target}) failed"
raise Error(msg) from e
out = Path(os.environ["out"])
(NEW_NIX_STORE_DIR / out.name).mkdir()
mount(NEW_NIX_DIR, NIX_DIR, "none", MS_BIND | MS_REC)
def load_nix_db(closure_info: Path) -> None:
with (closure_info / "registration").open() as f:
subprocess.run(["nix-store", "--load-db"], stdin=f, check=True, text=True)
class Driver:
@@ -389,7 +303,7 @@ class Driver:
def __init__(
self,
containers: list[tuple[Path, Path]],
containers: list[Path],
logger: AbstractLogger,
testscript: str,
out_dir: str,
@@ -399,24 +313,21 @@ class Driver:
self.out_dir = out_dir
self.logger = logger
setup_filesystems()
# TODO: this won't work for multiple containers
assert len(containers) == 1, "Only one container is supported at the moment"
load_nix_db(containers[0][1])
self.tempdir = TemporaryDirectory()
tempdir_path = Path(self.tempdir.name)
self.machines = []
for container in containers:
name_match = re.match(r".*-nixos-system-(.+)-(.+)", container[0].name)
name_match = re.match(r".*-nixos-system-(.+)-(.+)", container.name)
if not name_match:
msg = f"Unable to extract hostname from {container[0].name}"
msg = f"Unable to extract hostname from {container.name}"
raise Error(msg)
name = name_match.group(1)
self.machines.append(
Machine(
name=name,
toplevel=container[0],
toplevel=container,
rootdir=tempdir_path / name,
out_dir=self.out_dir,
logger=self.logger,
@@ -488,11 +399,9 @@ def main() -> None:
arg_parser = argparse.ArgumentParser(prog="nixos-test-driver")
arg_parser.add_argument(
"--containers",
nargs=2,
action="append",
nargs="+",
type=Path,
metavar=("TOPLEVEL_STORE_DIR", "CLOSURE_INFO"),
help="container system toplevel store dir and closure info",
help="container system toplevel paths",
)
arg_parser.add_argument(
"--test-script",

View File

@@ -16,9 +16,6 @@ in
documentation.enable = lib.mkDefault false;
boot.isContainer = true;
# needed since nixpkgs 7fb2f407c01b017737eafc26b065d7f56434a992 removed the getty unit by default
console.enable = true;
# undo qemu stuff
system.build.initialRamdisk = "";
virtualisation.sharedDirectories = lib.mkForce { };
@@ -28,13 +25,9 @@ in
networking.interfaces = lib.mkForce { };
#networking.primaryIPAddress = lib.mkForce null;
systemd.services.backdoor.enable = false;
# we don't have permission to set cpu scheduler in our container
systemd.services.nix-daemon.serviceConfig.CPUSchedulingPolicy = lib.mkForce "";
};
# to accept external dependencies such as disko
node.specialArgs.self = self;
_module.args = { inherit self; };
imports = [
test
./container-driver/module.nix

View File

@@ -1,8 +1,7 @@
{ lib, ... }:
{
nixpkgs.flake.setFlakeRegistry = false;
nixpkgs.flake.setNixPath = false;
nix.registry = lib.mkForce { };
nix.registry.nixpkgs.to = { };
documentation.doc.enable = false;
documentation.man.enable = false;
}

View File

@@ -7,19 +7,15 @@ in
(nixos-lib.runTest {
hostPkgs = pkgs;
# speed-up evaluation
defaults = (
{ config, ... }:
{
defaults = {
imports = [
./minify.nix
];
documentation.enable = lib.mkDefault false;
nix.settings.min-free = 0;
system.stateVersion = config.system.nixos.release;
}
);
system.stateVersion = lib.version;
};
_module.args = { inherit self; };
# to accept external dependencies such as disko
node.specialArgs.self = self;
imports = [ test ];

View File

@@ -31,8 +31,6 @@
clan.matrix-synapse.users.someuser = { };
clan.core.facts.secretStore = "vm";
clan.core.vars.settings.secretStore = "vm";
clan.core.vars.settings.publicStore = "in_repo";
# because we use systemd-tmpfiles to copy the secrets, we need to a separate systemd-tmpfiles call to provision them.
boot.postBootCommands = "${config.systemd.package}/bin/systemd-tmpfiles --create /etc/tmpfiles.d/00-vmsecrets.conf";
@@ -43,21 +41,21 @@
d.mode = "0700";
z.mode = "0700";
};
"/etc/secrets/matrix-synapse/synapse-registration_shared_secret" = {
"/etc/secrets/synapse-registration_shared_secret" = {
f.argument = "supersecret";
z = {
mode = "0400";
user = "root";
};
};
"/etc/secrets/matrix-password-admin/matrix-password-admin" = {
"/etc/secrets/matrix-password-admin" = {
f.argument = "matrix-password1";
z = {
mode = "0400";
user = "root";
};
};
"/etc/secrets/matrix-password-someuser/matrix-password-someuser" = {
"/etc/secrets/matrix-password-someuser" = {
f.argument = "matrix-password2";
z = {
mode = "0400";

View File

@@ -1,62 +0,0 @@
{
self,
...
}:
{
clan.machines.test-morph-machine = {
imports = [
./template/configuration.nix
self.nixosModules.clanCore
];
nixpkgs.hostPlatform = "x86_64-linux";
environment.etc."testfile".text = "morphed";
};
clan.templates.machine.test-morph-template = {
description = "Morph a machine";
path = ./template;
};
perSystem =
{
pkgs,
...
}:
{
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux && !pkgs.stdenv.isAarch64) {
test-morph = (import ../lib/test-base.nix) {
name = "morph";
nodes = {
actual =
{ pkgs, ... }:
let
dependencies = [
self
pkgs.nixos-anywhere
pkgs.stdenv.drvPath
pkgs.stdenvNoCC
self.nixosConfigurations.test-morph-machine.config.system.build.toplevel
self.nixosConfigurations.test-morph-machine.config.system.clan.deployment.file
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
in
{
environment.etc."install-closure".source = "${closureInfo}/store-paths";
system.extraDependencies = dependencies;
virtualisation.memorySize = 2048;
environment.systemPackages = [ self.packages.${pkgs.system}.clan-cli ];
};
};
testScript = ''
start_all()
actual.fail("cat /etc/testfile")
actual.succeed("env CLAN_DIR=${self} clan machines morph test-morph-template --i-will-be-fired-for-using-this --debug --name test-morph-machine")
assert actual.succeed("cat /etc/testfile") == "morphed"
'';
} { inherit pkgs self; };
};
};
}

View File

@@ -1,12 +0,0 @@
{ modulesPath, ... }:
{
imports = [
# we need these 2 modules always to be able to run the tests
(modulesPath + "/testing/test-instrumentation.nix")
(modulesPath + "/virtualisation/qemu-vm.nix")
(modulesPath + "/profiles/minimal.nix")
];
clan.core.enableRecommendedDefaults = false;
}

View File

@@ -1,8 +0,0 @@
---
description = "Set up automatic upgrades"
categories = ["System"]
features = [ "inventory" ]
---
Whether to periodically upgrade NixOS to the latest version. If enabled, a
systemd timer will run `nixos-rebuild switch --upgrade` once a day.

View File

@@ -1,24 +0,0 @@
{
config,
lib,
...
}:
let
cfg = config.clan.autoUpgrade;
in
{
options.clan.autoUpgrade = {
flake = lib.mkOption {
type = lib.types.str;
description = "Flake reference";
};
};
config = {
system.autoUpgrade = {
inherit (cfg.clan.autoUpgrade) flake;
enable = true;
dates = "02:00";
randomizedDelaySec = "45min";
};
};
}

View File

@@ -63,9 +63,9 @@ in
rsh = lib.mkOption {
type = lib.types.str;
default = "ssh -i ${
config.clan.core.vars.generators.borgbackup.files."borgbackup.ssh".path
config.clan.core.facts.services.borgbackup.secret."borgbackup.ssh".path
} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=Yes";
defaultText = "ssh -i \${config.clan.core.vars.generators.borgbackup.files.\"borgbackup.ssh\".path} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null";
defaultText = "ssh -i \${config.clan.core.facts.services.borgbackup.secret.\"borgbackup.ssh\".path} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null";
description = "the rsh to use for the backup";
};
};
@@ -126,7 +126,7 @@ in
encryption = {
mode = "repokey";
passCommand = "cat ${config.clan.core.vars.generators.borgbackup.files."borgbackup.repokey".path}";
passCommand = "cat ${config.clan.core.facts.services.borgbackup.secret."borgbackup.repokey".path}";
};
prune.keep = {
@@ -177,21 +177,20 @@ in
})
];
clan.core.vars.generators.borgbackup = {
files."borgbackup.ssh.pub".secret = false;
files."borgbackup.ssh" = { };
files."borgbackup.repokey" = { };
migrateFact = "borgbackup";
runtimeInputs = [
pkgs.coreutils
# Facts generation. So the client can authenticate to the server
clan.core.facts.services.borgbackup = {
public."borgbackup.ssh.pub" = { };
secret."borgbackup.ssh" = { };
secret."borgbackup.repokey" = { };
generator.path = [
pkgs.openssh
pkgs.coreutils
pkgs.xkcdpass
];
script = ''
ssh-keygen -t ed25519 -N "" -f $out/borgbackup.ssh
xkcdpass -n 4 -d - > $out/borgbackup.repokey
generator.script = ''
ssh-keygen -t ed25519 -N "" -f "$secrets"/borgbackup.ssh
mv "$secrets"/borgbackup.ssh.pub "$facts"/borgbackup.ssh.pub
xkcdpass -n 4 -d - > "$secrets"/borgbackup.repokey
'';
};

View File

@@ -1,7 +1,7 @@
{ config, lib, ... }:
let
dir = config.clan.core.settings.directory;
machineDir = dir + "/vars/per-machine/";
machineDir = dir + "/machines/";
machineName = config.clan.core.settings.machine.name;
# Instances might be empty, if the module is not used via the inventory
@@ -33,8 +33,7 @@ in
};
config.services.borgbackup.repos =
let
borgbackupIpMachinePath = machine: machineDir + machine + "/borgbackup/borgbackup.ssh.pub/value";
borgbackupIpMachinePath = machines: machineDir + machines + "/facts/borgbackup.ssh.pub";
machinesMaybeKey = builtins.map (
machine:
let
@@ -45,7 +44,7 @@ in
else
lib.warn ''
Machine ${machine} does not have a borgbackup key at ${fullPath},
run `clan var generate ${machine}` to generate it.
run `clan facts generate ${machine}` to generate it.
'' null
) allClients;

View File

@@ -52,7 +52,7 @@ let
migrateFact = "${secret_id opt}";
prompts.${secret_id opt} = {
type = "hidden";
persist = true;
createFile = true;
};
};
};

View File

@@ -1,15 +1,7 @@
{ lib, ... }:
let
inherit (lib)
filterAttrs
pathExists
;
in
{ ... }:
{
# only import available files, as this allows to filter the files for tests.
flake.clanModules = filterAttrs (_name: pathExists) {
flake.clanModules = {
admin = ./admin;
auto-upgrade = ./auto-upgrade;
borgbackup = ./borgbackup;
borgbackup-static = ./borgbackup-static;
deltachat = ./deltachat;
@@ -27,7 +19,6 @@ in
matrix-synapse = ./matrix-synapse;
moonlight = ./moonlight;
mumble = ./mumble;
mycelium = ./mycelium;
nginx = ./nginx;
packages = ./packages;
postgresql = ./postgresql;

View File

@@ -3,7 +3,8 @@ description = "S3-compatible object store for small self-hosted geo-distributed
---
This module generates garage specific keys automatically.
Also shares the `rpc_secret` between instances.
When using garage in a distributed deployment the `rpc_key` between connected instances must be shared.
This is currently still a manual process.
Options: [NixosModuleOptions](https://search.nixos.org/options?channel=unstable&size=50&sort=relevance&type=packages&query=garage)
Documentation: https://garagehq.deuxfleurs.fr/

View File

@@ -2,9 +2,9 @@
{
systemd.services.garage.serviceConfig = {
LoadCredential = [
"rpc_secret_path:${config.clan.core.vars.generators.garage-shared.files.rpc_secret.path}"
"admin_token_path:${config.clan.core.vars.generators.garage.files.admin_token.path}"
"metrics_token_path:${config.clan.core.vars.generators.garage.files.metrics_token.path}"
"rpc_secret_path:${config.clan.core.facts.services.garage.secret.garage_rpc_secret.path}"
"admin_token_path:${config.clan.core.facts.services.garage.secret.garage_admin_token.path}"
"metrics_token_path:${config.clan.core.facts.services.garage.secret.garage_metrics_token.path}"
];
Environment = [
"GARAGE_ALLOW_WORLD_READABLE_SECRETS=true"
@@ -14,30 +14,37 @@
];
};
clan.core.vars.generators.garage = {
files.admin_token = { };
files.metrics_token = { };
runtimeInputs = [
clan.core.facts.services.garage = {
secret.garage_rpc_secret = { };
secret.garage_admin_token = { };
secret.garage_metrics_token = { };
generator.path = [
pkgs.coreutils
pkgs.openssl
];
script = ''
openssl rand -base64 -out $out/admin_token 32
openssl rand -base64 -out $out/metrics_token 32
generator.script = ''
openssl rand -hex -out $secrets/garage_rpc_secret 32
openssl rand -base64 -out $secrets/garage_admin_token 32
openssl rand -base64 -out $secrets/garage_metrics_token 32
'';
};
clan.core.vars.generators.garage-shared = {
share = true;
files.rpc_secret = { };
runtimeInputs = [
pkgs.coreutils
pkgs.openssl
];
script = ''
openssl rand -hex -out $out/rpc_secret 32
'';
};
# TODO: Vars is not in a useable state currently
# Move back, once it is implemented.
# clan.core.vars.generators.garage = {
# files.rpc_secret = { };
# files.admin_token = { };
# files.metrics_token = { };
# runtimeInputs = [
# pkgs.coreutils
# pkgs.openssl
# ];
# script = ''
# openssl rand -hex -out $out/rpc_secret 32
# openssl rand -base64 -out $out/admin_token 32
# openssl rand -base64 -out $out/metrics_token 32
# '';
# };
clan.core.state.garage.folders = [ config.services.garage.settings.metadata_dir ];
}

View File

@@ -6,4 +6,4 @@ categories = [ "Network" ]
!!! Warning
If you've been using network manager + wpa_supplicant and now are switching to IWD read this migration guide:
https://archive.kernel.org/oldwiki/iwd.wiki.kernel.org/networkmanager.html#converting_network_profiles
https://iwd.wiki.kernel.org/networkmanager#converting_network_profiles

View File

@@ -1,9 +1,4 @@
{
lib,
config,
pkgs,
...
}:
{ lib, config, ... }:
let
cfg = config.clan.iwd;
@@ -17,13 +12,12 @@ let
{
secret.${secret_name} = { };
generator.prompt = "Wifi password for '${value.ssid}'";
# ref. man iwd.network
generator.script = ''
config="
[Settings]
AutoConnect=${if value.AutoConnect then "true" else "false"}
[Security]
Passphrase=$(echo -e "$prompt_value" | ${lib.getExe pkgs.gnused} "s=\\\=\\\\\\\=g;s=\t=\\\t=g;s=\r=\\\r=g;s=^ =\\\s=")
Passphrase=\"$prompt_value\"
"
echo "$config" > "$secrets/${secret_name}"
'';

View File

@@ -10,18 +10,18 @@ let
in
{
config = lib.mkMerge [
(lib.mkIf ((var.value or null) != null) {
(lib.mkIf ((var.machineId.value or null) != null) {
assertions = [
{
assertion = lib.stringLength var.value == 32;
assertion = lib.stringLength var.machineId.value == 32;
message = "machineId must be exactly 32 characters long.";
}
];
boot.kernelParams = [
''systemd.machine_id=${var.value}''
''systemd.machine_id=${var.machineId.value}''
];
environment.etc."machine-id" = {
text = var.value;
text = var.machineId.value;
};
})
{

View File

@@ -106,6 +106,17 @@ in
};
};
systemd.tmpfiles.settings."01-matrix" = {
"/run/synapse-registration-shared-secret" = {
C.argument =
config.clan.core.facts.services.matrix-synapse.secret.synapse-registration_shared_secret.path;
z = {
mode = "0400";
user = "matrix-synapse";
};
};
};
clan.postgresql.users.matrix-synapse = { };
clan.postgresql.databases.matrix-synapse.create.options = {
TEMPLATE = "template0";
@@ -116,28 +127,26 @@ in
};
clan.postgresql.databases.matrix-synapse.restore.stopOnRestore = [ "matrix-synapse" ];
clan.core.vars.generators =
clan.core.facts.services =
{
"matrix-synapse" = {
files."synapse-registration_shared_secret" = { };
runtimeInputs = with pkgs; [
secret."synapse-registration_shared_secret" = { };
generator.path = with pkgs; [
coreutils
pwgen
];
migrateFact = "matrix-synapse";
script = ''
echo -n "$(pwgen -s 32 1)" > "$out"/synapse-registration_shared_secret
generator.script = ''
echo -n "$(pwgen -s 32 1)" > "$secrets"/synapse-registration_shared_secret
'';
};
}
// lib.mapAttrs' (
name: user:
lib.nameValuePair "matrix-password-${user.name}" {
files."matrix-password-${user.name}" = { };
migrateFact = "matrix-password-${user.name}";
runtimeInputs = with pkgs; [ xkcdpass ];
script = ''
xkcdpass -n 4 -d - > "$out"/${lib.escapeShellArg "matrix-password-${user.name}"}
secret."matrix-password-${user.name}" = { };
generator.path = with pkgs; [ xkcdpass ];
generator.script = ''
xkcdpass -n 4 -d - > "$secrets"/${lib.escapeShellArg "matrix-password-${user.name}"}
'';
}
) cfg.users;
@@ -154,20 +163,14 @@ in
+ lib.concatMapStringsSep "\n" (user: ''
# only create user if it doesn't exist
/run/current-system/sw/bin/matrix-synapse-register_new_matrix_user --exists-ok --password-file ${
config.clan.core.vars.generators."matrix-password-${user.name}".files."matrix-password-${user.name}".path
config.clan.core.facts.services."matrix-password-${user.name}".secret."matrix-password-${user.name}".path
} --user "${user.name}" ${if user.admin then "--admin" else "--no-admin"}
'') (lib.attrValues cfg.users);
in
{
path = [ pkgs.curl ];
serviceConfig.ExecStartPre = lib.mkBefore [
"+${pkgs.coreutils}/bin/install -o matrix-synapse -g matrix-synapse ${
lib.escapeShellArg
config.clan.core.vars.generators.matrix-synapse.files."synapse-registration_shared_secret".path
} /run/synapse-registration-shared-secret"
];
serviceConfig.ExecStartPost = [
''+${pkgs.writeShellScript "matrix-synapse-create-users" usersScript}''
(''+${pkgs.writeShellScript "matrix-synapse-create-users" usersScript}'')
];
};

View File

@@ -1,30 +0,0 @@
---
description = "End-2-end encrypted IPv6 overlay network"
categories = ["System", "Network"]
features = [ "inventory" ]
---
Mycelium is an IPv6 overlay network written in Rust. Each node that joins the overlay network will receive an overlay network IP in the 400::/7 range.
Features:
- Mycelium, is locality aware, it will look for the shortest path between nodes
- All traffic between the nodes is end-2-end encrypted
- Traffic can be routed over nodes of friends, location aware
- If a physical link goes down Mycelium will automatically reroute your traffic
- The IP address is IPV6 and linked to private key
- A simple reliable messagebus is implemented on top of Mycelium
- Mycelium has multiple ways how to communicate quic, tcp, ... and we are working on holepunching for Quick which means P2P traffic without middlemen for NATted networks e.g. most homes
- Scalability is very important for us, we tried many overlay networks before and got stuck on all of them, we are trying to design a network which scales to a planetary level
- You can run mycelium without TUN and only use it as reliable message bus.
An example configuration might look like this in the inventory:
```nix
mycelium.default = {
roles.peer.machines = [
"berlin"
"munich"
];
};
```
This will add the machines named `berlin` and `munich` to the `mycelium` vpn.

View File

@@ -1,45 +0,0 @@
{
pkgs,
config,
lib,
...
}:
{
options = {
clan.mycelium.openFirewall = lib.mkOption {
type = lib.types.bool;
default = true;
description = "Open the firewall for mycelium";
};
clan.mycelium.addHostedPublicNodes = lib.mkOption {
type = lib.types.bool;
default = true;
description = "Add hosted Public nodes";
};
};
config.services.mycelium = {
enable = true;
addHostedPublicNodes = lib.mkDefault config.clan.mycelium.addHostedPublicNodes;
openFirewall = lib.mkDefault config.clan.mycelium.openFirewall;
keyFile = config.clan.core.vars.generators.mycelium.files.key.path;
};
config.clan.core.vars.generators.mycelium = {
files."key" = { };
files."ip".secret = false;
files."pubkey".secret = false;
runtimeInputs = [
pkgs.mycelium
pkgs.coreutils
pkgs.jq
];
script = ''
timeout 5 mycelium --key-file "$out"/key || :
mycelium inspect --key-file "$out"/key --json | jq -r .publicKey > "$out"/pubkey
mycelium inspect --key-file "$out"/key --json | jq -r .address > "$out"/ip
'';
};
}

View File

@@ -1,7 +1,5 @@
---
description = "Automatically generates and configures a password for the root user."
categories = ["System"]
features = [ "inventory" ]
---
After the system was installed/deployed the following command can be used to display the root-password:

View File

@@ -1,6 +1,29 @@
# Dont import this file
# It is only here for backwards compatibility.
# Dont author new modules with this file.
{
imports = [ ./roles/default.nix ];
pkgs,
config,
lib,
...
}:
{
users.mutableUsers = false;
users.users.root.hashedPasswordFile =
config.clan.core.facts.services.root-password.secret.password-hash.path;
sops.secrets = lib.mkIf (config.clan.core.facts.secretStore == "sops") {
"${config.clan.core.settings.machine.name}-password-hash".neededForUsers = true;
};
clan.core.facts.services.root-password = {
secret.password = { };
secret.password-hash = { };
generator.path = with pkgs; [
coreutils
xkcdpass
mkpasswd
];
generator.script = ''
xkcdpass --numwords 3 --delimiter - --count 1 | tr -d "\n" > $secrets/password
cat $secrets/password | mkpasswd -s -m sha-512 | tr -d "\n" > $secrets/password-hash
'';
};
}

View File

@@ -1,38 +0,0 @@
{
pkgs,
config,
...
}:
{
users.mutableUsers = false;
users.users.root.hashedPasswordFile =
config.clan.core.vars.generators.root-password.files.password-hash.path;
clan.core.vars.generators.root-password = {
files.password-hash = {
neededFor = "users";
};
files.password = {
deploy = false;
};
migrateFact = "root-password";
runtimeInputs = [
pkgs.coreutils
pkgs.mkpasswd
pkgs.xkcdpass
];
prompts.password.type = "hidden";
prompts.password.persist = true;
prompts.password.description = "You can autogenerate a password, if you leave this prompt blank.";
script = ''
prompt_value=$(cat $prompts/password)
if [[ -n ''${prompt_value-} ]]; then
echo $prompt_value | tr -d "\n" > $out/password
else
xkcdpass --numwords 3 --delimiter - --count 1 | tr -d "\n" > $out/password
fi
mkpasswd -s -m sha-512 < $out/password | tr -d "\n" > $out/password-hash
'';
};
}

View File

@@ -37,7 +37,6 @@ in
type = "rsa";
};
};
clan.core.vars.generators.openssh = {
files."ssh.id_ed25519" = { };
files."ssh.id_ed25519.pub".secret = false;
@@ -51,14 +50,6 @@ in
'';
};
programs.ssh.knownHosts.clan-sshd-self-ed25519 = {
hostNames = [
"localhost"
config.networking.hostName
] ++ (lib.optional (config.networking.domain != null) config.networking.fqdn);
publicKey = config.clan.core.vars.generators.openssh.files."ssh.id_ed25519.pub".value;
};
clan.core.vars.generators.openssh-rsa = lib.mkIf config.clan.sshd.hostKeys.rsa.enable {
files."ssh.id_rsa" = { };
files."ssh.id_rsa.pub".secret = false;

View File

@@ -3,7 +3,7 @@ let
var = config.clan.core.vars.generators.state-version.files.version or { };
in
{
system.stateVersion = lib.mkDefault (lib.removeSuffix "\n" var.value);
system.stateVersion = lib.mkDefault var.value;
clan.core.vars.generators.state-version = {
files.version = {

View File

@@ -7,8 +7,7 @@
let
dir = config.clan.core.settings.directory;
machineDir = dir + "/machines/";
machineVarDir = dir + "/vars/per-machine/";
syncthingPublicKeyPath = machines: machineVarDir + machines + "/syncthing/id/value";
syncthingPublicKeyPath = machines: machineDir + machines + "/facts/syncthing.pub";
machinesFileSet = builtins.readDir machineDir;
machines = lib.mapAttrsToList (name: _: name) machinesFileSet;
syncthingPublicKeysUnchecked = builtins.map (
@@ -84,26 +83,24 @@ in
configDir = "/var/lib/syncthing";
group = "syncthing";
key = lib.mkDefault config.clan.core.vars.generators.syncthing.files.key.path or null;
cert = lib.mkDefault config.clan.core.vars.generators.syncthing.files.cert.path or null;
key = lib.mkDefault config.clan.core.facts.services.syncthing.secret."syncthing.key".path or null;
cert = lib.mkDefault config.clan.core.facts.services.syncthing.secret."syncthing.cert".path or null;
};
clan.core.vars.generators.syncthing = {
files.key = { };
files.cert = { };
files.api = { };
files.id.secret = false;
runtimeInputs = [
clan.core.facts.services.syncthing = {
secret."syncthing.key" = { };
secret."syncthing.cert" = { };
public."syncthing.pub" = { };
generator.path = [
pkgs.coreutils
pkgs.gnugrep
pkgs.syncthing
];
script = ''
syncthing generate --config $out
mv $out/key.pem $out/key
mv $out/cert.pem $out/cert
cat $out/config.xml | grep -oP '(?<=<device id=")[^"]+' | uniq > $out/id
cat $out/config.xml | grep -oP '<apikey>\K[^<]+' | uniq > $out/api
generator.script = ''
syncthing generate --config "$secrets"
mv "$secrets"/key.pem "$secrets"/syncthing.key
mv "$secrets"/cert.pem "$secrets"/syncthing.cert
cat "$secrets"/config.xml | grep -oP '(?<=<device id=")[^"]+' | uniq > "$facts"/syncthing.pub
'';
};
}

View File

@@ -1,7 +1,5 @@
---
description = "Automatically generates and configures a password for the specified user account."
categories = ["System"]
features = ["inventory"]
---
If setting the option prompt to true, the user will be prompted to type in their desired password.

View File

@@ -1,6 +1,58 @@
# Dont import this file
# It is only here for backwards compatibility.
# Dont author new modules with this file.
{
imports = [ ./roles/default.nix ];
pkgs,
config,
lib,
...
}:
let
cfg = config.clan.user-password;
in
{
options.clan.user-password = {
user = lib.mkOption {
type = lib.types.str;
example = "alice";
description = "The user the password should be generated for.";
};
prompt = lib.mkOption {
type = lib.types.bool;
default = true;
example = false;
description = "Whether the user should be prompted.";
};
};
config = {
users.mutableUsers = false;
users.users.${cfg.user} = {
hashedPasswordFile = config.clan.core.facts.services.user-password.secret.user-password-hash.path;
isNormalUser = lib.mkDefault true;
};
sops.secrets = lib.mkIf (config.clan.core.facts.secretStore == "sops") {
"${config.clan.core.settings.machine.name}-user-password-hash".neededForUsers = true;
};
clan.core.facts.services.user-password = {
secret.user-password = { };
secret.user-password-hash = { };
generator.prompt = (
lib.mkIf config.clan.user-password.prompt "Set the password for your user '${config.clan.user-password.user}'.
You can autogenerate a password, if you leave this prompt blank."
);
generator.path = with pkgs; [
coreutils
xkcdpass
mkpasswd
];
generator.script = ''
if [[ -n ''${prompt_value-} ]]; then
echo $prompt_value | tr -d "\n" > $secrets/user-password
else
xkcdpass --numwords 3 --delimiter - --count 1 | tr -d "\n" > $secrets/user-password
fi
cat $secrets/user-password | mkpasswd -s -m sha-512 | tr -d "\n" > $secrets/user-password-hash
'';
};
};
}

View File

@@ -1,58 +0,0 @@
{
pkgs,
config,
lib,
...
}:
let
cfg = config.clan.user-password;
in
{
options.clan.user-password = {
user = lib.mkOption {
type = lib.types.str;
example = "alice";
description = "The user the password should be generated for.";
};
prompt = lib.mkOption {
type = lib.types.bool;
default = true;
example = false;
description = "Whether the user should be prompted.";
};
};
config = {
users.mutableUsers = false;
users.users.${cfg.user} = {
hashedPasswordFile = config.clan.core.facts.services.user-password.secret.user-password-hash.path;
isNormalUser = lib.mkDefault true;
};
sops.secrets = lib.mkIf (config.clan.core.facts.secretStore == "sops") {
"${config.clan.core.settings.machine.name}-user-password-hash".neededForUsers = true;
};
clan.core.facts.services.user-password = {
secret.user-password = { };
secret.user-password-hash = { };
generator.prompt = (
lib.mkIf config.clan.user-password.prompt "Set the password for your user '${config.clan.user-password.user}'.
You can autogenerate a password, if you leave this prompt blank."
);
generator.path = with pkgs; [
coreutils
xkcdpass
mkpasswd
];
generator.script = ''
if [[ -n ''${prompt_value-} ]]; then
echo $prompt_value | tr -d "\n" > $secrets/user-password
else
xkcdpass --numwords 3 --delimiter - --count 1 | tr -d "\n" > $secrets/user-password
fi
cat $secrets/user-password | mkpasswd -s -m sha-512 | tr -d "\n" > $secrets/user-password-hash
'';
};
};
}

View File

@@ -14,9 +14,9 @@ let
name = "iwd.${name}";
value = {
prompts.ssid.type = "line";
prompts.ssid.persist = true;
prompts.ssid.createFile = true;
prompts.password.type = "hidden";
prompts.password.persist = true;
prompts.password.createFile = true;
share = true;
};
};

View File

@@ -1,5 +1,5 @@
---
description = "Configures [Zerotier VPN](https://zerotier.com) secure and efficient networking within a Clan."
description = "Configures [Zerotier VPN](https://zerotier.com) secure and efficient networking within a Clan.."
features = [ "inventory" ]
categories = [ "Network", "System" ]

View File

@@ -1,547 +0,0 @@
# Clan service modules
Status: Accepted
## Context
To define a service in Clan, you need to define two things:
- `clanModule` - defined by module authors
- `inventory` - defined by users
The `clanModule` is currently a plain NixOS module. It is conditionally imported into each machine depending on the `service` and `role`.
A `role` is a function of a machine within a service. For example in the `backup` service there are `client` and `server` roles.
The `inventory` contains the settings for the user/consumer of the module. It describes what `services` run on each machine and with which `roles`.
Additionally any `service` can be instantiated multiple times.
This ADR proposes that we change how to write a `clanModule`. The `inventory` should get a new attribute called `instances` that allow for configuration of these modules.
### Status Quo
In this example the user configures 2 instances of the `networking` service:
The *user* defines
```nix
{
inventory.services = {
# anything inside an instance is instance specific
networking."instance1" = {
roles.client.tags = [ "all" ];
machines.foo.config = { ... /* machine specific settings */ };
# this will not apply to `clients` outside of `instance1`
roles.client.config = { ... /* client specific settings */ };
};
networking."instance2" = {
roles.server.tags = [ "all" ];
config = { ... /* applies to every machine that runs this instance */ };
};
};
}
```
The *module author* defines:
```nix
# networking/roles/client.nix
{ config, ... }:
let
instances = config.clan.inventory.services.networking or { };
serviceConfig = config.clan.networking;
in {
## Set some nixos options
}
```
### Problems
Problems with the current way of writing clanModules:
1. No way to retrieve the config of a single service instance, together with its name.
2. Directly exporting a single, anonymous nixosModule without any intermediary attribute layers doesn't leave room for exporting other inventory resources such as potentially `vars` or `homeManagerConfig`.
3. Can't access multiple config instances individually.
Example:
```nix
inventory = {
services = {
network.c-base = {
instanceConfig.ips = {
mors = "172.139.0.2";
};
};
network.gg23 = {
instanceConfig.ips = {
mors = "10.23.0.2";
};
};
};
};
```
This doesn't work because all instance configs are applied to the same namespace. So this results in a conflict currently.
Resolving this problem means that new inventory modules cannot be plain nixos modules anymore. If they are configured via `instances` / `instanceConfig` they cannot be configured without using the inventory. (There might be ways to inject instanceConfig but that requires knowledge of inventory internals)
4. Writing modules for multiple instances is cumbersome. Currently the clanModule author has to write one or multiple `fold` operations for potentially every nixos option to define how multiple service instances merge into every single one option. The new idea behind this adr is to pull the common fold function into the outer context provide it as a common helper. (See the example below. `perInstance` analog to the well known `perSystem` of flake-parts)
5. Each role has a different interface. We need to render that interface into json-schema which includes creating an unnecessary test machine currently. Defining the interface at a higher level (outside of any machine context) allows faster evaluation and an isolation by design from any machine.
This allows rendering the UI (options tree) of a service by just knowing the service and the corresponding roles without creating a dummy machine.
6. The interface of defining config is wrong. It is possible to define config that applies to multiple machine at once. It is possible to define config that applies to
a machine as a hole. But this is wrong behavior because the options exist at the role level. So config must also always exist at the role level.
Currently we merge options and config together but that may produce conflicts. Those module system conflicts are very hard to foresee since they depend on what roles exist at runtime.
## Proposed Change
We will create a new module class which is defined by `_class = "clan.service"` ([documented here](https://nixos.org/manual/nixpkgs/stable/#module-system-lib-evalModules-param-class)).
Existing clan modules will still work by continuing to be plain NixOS modules. All new modules can set `_class = "clan.service";` to use the proposed features.
In short the change introduces a new module class that makes the currently necessary folding of `clan.service`s `instances` and `roles` a common operation. The module author can define the inner function of the fold operations which is called a `clan.service` module.
There are the following attributes of such a module:
### `roles.<roleName>.interface`
Each role can have a different interface for how to be configured.
I.e.: A `client` role might have different options than a `server` role.
This attribute should be used to define `options`. (Not `config` !)
The end-user defines the corresponding `config`.
This submodule will be evaluated for each `instance role` combination and passed as argument into `perInstance`.
This submodules `options` will be evaluated to build the UI for that module dynamically.
### **Result attributes**
Some common result attributes are produced by modules of this proposal, those will be referenced later in this document but are commonly defined as:
- `nixosModule` A single nixos module. (`{config, ...}:{ environment.systemPackages = []; }`)
- `services.<serviceName>` An attribute set of `_class = clan.service`. Which contain the same thing as this whole ADR proposes.
- `vars` To be defined. Reserved for now.
### `roles.<roleName>.perInstance`
This acts like a function that maps over all `service instances` of a given `role`.
It produces the previously defined **result attributes**.
I.e. This allows to produce multiple `nixosModules` one for every instance of the service.
Hence making multiple `service instances` convenient by leveraging the module-system merge behavior.
### `perMachine`
This acts like a function that maps over all `machines` of a given `service`.
It produces the previously defined **result attributes**.
I.e. this allows to produce exactly one `nixosModule` per `service`.
Making it easy to set nixos-options only once if they have a one-to-one relation to a service being enabled.
Note: `lib.mkIf` can be used on i.e. `roleName` to make the scope more specific.
### `services.<serviceName>`
This allows to define nested services.
i.e the *service* `backup` might define a nested *service* `ssh` which sets up an ssh connection.
This can be defined in `perMachine` and `perInstance`
- For Every `instance` a given `service` may add multiple nested `services`.
- A given `service` may add a static set of nested `services`; Even if there are multiple instances of the same given service.
Q: Why is this not a top-level attribute?
A: Because nested service definitions may also depend on a `role` which must be resolved depending on `machine` and `instance`. The top-level module doesn't know anything about machines. Keeping the service layer machine agnostic allows us to build the UI for a module without adding any machines. (One of the problems with the current system)
```
zerotier/default.nix
```
```nix
# Some example module
{
_class = "clan.service";
# Analog to flake-parts 'perSystem' only that it takes instance
# The exact arguments will be specified and documented along with the actual implementation.
roles.client.perInstance = {
# attrs : settings of that instance
settings,
# string : name of the instance
instanceName,
# { name :: string , roles :: listOf string; }
machine,
# { {roleName} :: { machines :: listOf string; } }
roles,
...
}:
{
# Return a nixos module for every instance.
# The module author must be aware that this may return multiple modules (one for every instance) which are merged natively
nixosModule = {
config.debug."${instanceName}-client" = instanceConfig;
};
};
# Function that is called once for every machine with the role "client"
# Receives at least the following parameters:
#
# machine :: { name :: String, roles :: listOf string; }
# Name of the machine
#
# instances :: { instanceName :: { roleName :: { machines :: [ string ]; }}}
# Resolved roles
# Same type as currently in `clan.inventory.services.<ServiceName>.<InstanceName>.roles`
#
# The exact arguments will be specified and documented along with the actual implementation.
perMachine = {machine, instances, ... }: {
nixosModule =
{ lib, ... }:
{
# Some shared code should be put into a shared file
# Which is then imported into all/some roles
imports = [
../shared.nix
] ++
(lib.optional (builtins.elem "client" machine.roles)
{
options.debug = lib.mkOption {
type = lib.types.attrsOf lib.types.raw;
};
});
};
};
}
```
## Inventory.instances
This document also proposes to add a new attribute to the inventory that allow for exclusive configuration of the new modules.
This allows to better separate the new and the old way of writing and configuring modules. Keeping the new implementation more focussed and keeping existing technical debt out from the beginning.
The following thoughts went into this:
- Getting rid of `<serviceName>`: Using only the attribute name (plain string) is not sufficient for defining the source of the service module. Encoding meta information into it would also require some extensible format specification and parser.
- removing instanceConfig and machineConfig: There is no such config. Service configuration must always be role specific, because the options are defined on the role.
- renaming `config` to `settings` or similar. Since `config` is a module system internal name.
- Tags and machines should be an attribute set to allow setting `settings` on that level instead.
```nix
{
inventory.instances = {
"instance1" = {
# Allows to define where the module should be imported from.
module = {
input = "clan-core";
name = "borgbackup";
};
# settings that apply to all client machines
roles.client.settings = {};
# settings that apply to the client service of machine with name <machineName>
# There might be a server service that takes different settings on the same machine!
roles.client.machines.<machineName>.settings = {};
# settings that apply to all client-instances with tag <tagName>
roles.client.tags.<tagName>.settings = {};
};
"instance2" = {
# ...
};
};
}
```
## Iteration note
We want to implement the system as described. Once we have sufficient data on real world use-cases and modules we might revisit this document along with the updated implementation.
## Real world example
The following module demonstrates the idea in the example of *borgbackup*.
```nix
{
_class = "clan.service";
# Define the 'options' of 'settings' see argument of perInstance
roles.server.interface =
{ lib, ... }:
{
options = lib.mkOption {
type = lib.types.str;
default = "/var/lib/borgbackup";
description = ''
The directory where the borgbackup repositories are stored.
'';
};
};
roles.server.perInstance =
{
instanceName,
settings,
roles,
...
}:
{
nixosModule =
{ config, lib, ... }:
let
dir = config.clan.core.settings.directory;
machineDir = dir + "/vars/per-machine/";
allClients = roles.client.machines;
in
{
# services.borgbackup is a native nixos option
config.services.borgbackup.repos =
let
borgbackupIpMachinePath = machine: machineDir + machine + "/borgbackup/borgbackup.ssh.pub/value";
machinesMaybeKey = builtins.map (
machine:
let
fullPath = borgbackupIpMachinePath machine;
in
if builtins.pathExists fullPath then
machine
else
lib.warn ''
Machine ${machine} does not have a borgbackup key at ${fullPath},
run `clan var generate ${machine}` to generate it.
'' null
) allClients;
machinesWithKey = lib.filter (x: x != null) machinesMaybeKey;
hosts = builtins.map (machine: {
name = instanceName + machine;
value = {
path = "${settings.directory}/${machine}";
authorizedKeys = [ (builtins.readFile (borgbackupIpMachinePath machine)) ];
};
}) machinesWithKey;
in
if (builtins.listToAttrs hosts) != [ ] then builtins.listToAttrs hosts else { };
};
};
roles.client.interface =
{ lib, ... }:
{
# There might be a better interface now. This is just how clan borgbackup was configured in the 'old' way
options.destinations = lib.mkOption {
type = lib.types.attrsOf (
lib.types.submodule (
{ name, ... }:
{
options = {
name = lib.mkOption {
type = lib.types.strMatching "^[a-zA-Z0-9._-]+$";
default = name;
description = "the name of the backup job";
};
repo = lib.mkOption {
type = lib.types.str;
description = "the borgbackup repository to backup to";
};
rsh = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
defaultText = "ssh -i \${config.clan.core.vars.generators.borgbackup.files.\"borgbackup.ssh\".path} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null";
description = "the rsh to use for the backup";
};
};
}
)
);
default = { };
description = ''
destinations where the machine should be backed up to
'';
};
options.exclude = lib.mkOption {
type = lib.types.listOf lib.types.str;
example = [ "*.pyc" ];
default = [ ];
description = ''
Directories/Files to exclude from the backup.
Use * as a wildcard.
'';
};
};
roles.client.perInstance =
{
instanceName,
roles,
machine,
settings,
...
}:
{
nixosModule =
{
config,
lib,
pkgs,
...
}:
let
allServers = roles.server.machines;
# machineName = config.clan.core.settings.machine.name;
# cfg = config.clan.borgbackup;
preBackupScript = ''
declare -A preCommandErrors
${lib.concatMapStringsSep "\n" (
state:
lib.optionalString (state.preBackupCommand != null) ''
echo "Running pre-backup command for ${state.name}"
if ! /run/current-system/sw/bin/${state.preBackupCommand}; then
preCommandErrors["${state.name}"]=1
fi
''
) (lib.attrValues config.clan.core.state)}
if [[ ''${#preCommandErrors[@]} -gt 0 ]]; then
echo "pre-backup commands failed for the following services:"
for state in "''${!preCommandErrors[@]}"; do
echo " $state"
done
exit 1
fi
'';
destinations =
let
destList = builtins.map (serverName: {
name = "${instanceName}-${serverName}";
value = {
repo = "borg@${serverName}:/var/lib/borgbackup/${machine.name}";
rsh = "ssh -i ${
config.clan.core.vars.generators."borgbackup-${instanceName}".files."borgbackup.ssh".path
} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=Yes";
} // settings.destinations.${serverName};
}) allServers;
in
(builtins.listToAttrs destList);
in
{
config = {
# Derived from the destinations
systemd.services = lib.mapAttrs' (
_: dest:
lib.nameValuePair "borgbackup-job-${instanceName}-${dest.name}" {
# since borgbackup mounts the system read-only, we need to run in a ExecStartPre script, so we can generate additional files.
serviceConfig.ExecStartPre = [
''+${pkgs.writeShellScript "borgbackup-job-${dest.name}-pre-backup-commands" preBackupScript}''
];
}
) destinations;
services.borgbackup.jobs = lib.mapAttrs (_destinationName: dest: {
paths = lib.unique (
lib.flatten (map (state: state.folders) (lib.attrValues config.clan.core.state))
);
exclude = settings.exclude;
repo = dest.repo;
environment.BORG_RSH = dest.rsh;
compression = "auto,zstd";
startAt = "*-*-* 01:00:00";
persistentTimer = true;
encryption = {
mode = "repokey";
passCommand = "cat ${config.clan.core.vars.generators."borgbackup-${instanceName}".files."borgbackup.repokey".path}";
};
prune.keep = {
within = "1d"; # Keep all archives from the last day
daily = 7;
weekly = 4;
monthly = 0;
};
}) destinations;
environment.systemPackages = [
(pkgs.writeShellApplication {
name = "borgbackup-create";
runtimeInputs = [ config.systemd.package ];
text = ''
${lib.concatMapStringsSep "\n" (dest: ''
systemctl start borgbackup-job-${dest.name}
'') (lib.attrValues destinations)}
'';
})
(pkgs.writeShellApplication {
name = "borgbackup-list";
runtimeInputs = [ pkgs.jq ];
text = ''
(${
lib.concatMapStringsSep "\n" (
dest:
# we need yes here to skip the changed url verification
''echo y | /run/current-system/sw/bin/borg-job-${dest.name} list --json | jq '[.archives[] | {"name": ("${dest.name}::${dest.repo}::" + .name)}]' ''
) (lib.attrValues destinations)
}) | jq -s 'add // []'
'';
})
(pkgs.writeShellApplication {
name = "borgbackup-restore";
runtimeInputs = [ pkgs.gawk ];
text = ''
cd /
IFS=':' read -ra FOLDER <<< "''${FOLDERS-}"
job_name=$(echo "$NAME" | awk -F'::' '{print $1}')
backup_name=''${NAME#"$job_name"::}
if [[ ! -x /run/current-system/sw/bin/borg-job-"$job_name" ]]; then
echo "borg-job-$job_name not found: Backup name is invalid" >&2
exit 1
fi
echo y | /run/current-system/sw/bin/borg-job-"$job_name" extract "$backup_name" "''${FOLDER[@]}"
'';
})
];
# every borgbackup instance adds its own vars
clan.core.vars.generators."borgbackup-${instanceName}" = {
files."borgbackup.ssh.pub".secret = false;
files."borgbackup.ssh" = { };
files."borgbackup.repokey" = { };
migrateFact = "borgbackup";
runtimeInputs = [
pkgs.coreutils
pkgs.openssh
pkgs.xkcdpass
];
script = ''
ssh-keygen -t ed25519 -N "" -f $out/borgbackup.ssh
xkcdpass -n 4 -d - > $out/borgbackup.repokey
'';
};
};
};
};
perMachine = {
nixosModule =
{ ... }:
{
clan.core.backups.providers.borgbackup = {
list = "borgbackup-list";
create = "borgbackup-create";
restore = "borgbackup-restore";
};
};
};
}
```
## Prior-art
- https://github.com/NixOS/nixops
- https://github.com/infinisil/nixus

View File

@@ -1,116 +0,0 @@
# Clan as library
## Status
Accepted
## Context
In the long term we envision the clan application will consist of the following user facing tools in the long term.
- `CLI`
- `TUI`
- `Desktop Application`
- `REST-API`
- `Mobile Application`
We might not be sure whether all of those will exist but the architecture should be generic such that those are possible without major changes of the underlying system.
## Decision
This leads to the conclusion that we should do `library` centric development.
With the current `clan` python code beeing a library that can be imported to create various tools ontop of it.
All **CLI** or **UI** related parts should be moved out of the main library.
*Note: The next person who wants implement any new frontend should do this first. Currently it looks like the TUI is the next one.*
Imagine roughly the following architecture:
```mermaid
graph TD
%% Define styles
classDef frontend fill:#f9f,stroke:#333,stroke-width:2px;
classDef backend fill:#bbf,stroke:#333,stroke-width:2px;
classDef storage fill:#ff9,stroke:#333,stroke-width:2px;
classDef testing fill:#cfc,stroke:#333,stroke-width:2px;
%% Define nodes
user(["User"]) -->|Interacts with| Frontends
subgraph "Frontends"
CLI["CLI"]:::frontend
APP["Desktop App"]:::frontend
TUI["TUI"]:::frontend
REST["REST API"]:::frontend
end
subgraph "Python"
API["Library <br>for interacting with clan"]:::backend
BusinessLogic["Business Logic<br>Implements actions like 'machine create'"]:::backend
STORAGE[("Persistence")]:::storage
NIX["Nix Eval & Build"]:::backend
end
subgraph "CI/CD & Tests"
TEST["Feature Testing"]:::testing
end
%% Define connections
CLI --> API
APP --> API
TUI --> API
REST --> API
TEST --> API
API --> BusinessLogic
BusinessLogic --> STORAGE
BusinessLogic --> NIX
```
With this very simple design it is ensured that all the basic features remain stable across all frontends.
In the end it is straight forward to create python library function calls in a testing framework to ensure that kind of stability.
Integration tests and smaller unit-tests should both be utilized to ensure the stability of the library.
Note: Library function don't have to be json-serializable in general.
Persistence includes but is not limited to: creating git commits, writing to inventory.json, reading and writing vars and to/from disk in general.
## Benefits / Drawbacks
- (+) Less tight coupling of frontend- / backend-teams
- (+) Consistency and inherent behavior
- (+) Performance & Scalability
- (+) Different frontends for different user groups
- (+) Documentation per library function makes it convenient to interact with the clan resources.
- (+) Testing the library ensures stability of the underlyings for all layers above.
- (-) Complexity overhead
- (-) library needs to be designed / documented
- (+) library can be well documented since it is a finite set of functions.
- (-) Error handling might be harder.
- (+) Common error reporting
- (-) different frontends need different features. The library must include them all.
- (+) All those core features must be implemented anyways.
- (+) VPN Benchmarking uses the existing library's already and works relatively well.
## Implementation considerations
Not all required details that need to change over time are possible to be pointed out ahead of time.
The goal of this document is to create a common understanding for how we like our project to be structured.
Any future commits should contribute to this goal.
Some ideas what might be needed to change:
- Having separate locations or packages for the library and the CLI.
- Rename the `clan_cli` package to `clan` and move the `cli` frontend into a subfolder or a separate package.
- Python Argparse or other cli related code should not exist in the `clan` python library.
- `__init__.py` should be very minimal. Only init the business logic models and resources. Note that all `__init__.py` files all the way up in the module tree are always executed as part of the python module import logic and thus should be as small as possible.
i.e. `from clan_cli.vars.generators import ...` executes both `clan_cli/__init__.py` and `clan_cli/vars/__init__.py` if any of those exist.
- `api` folder doesn't make sense since the python library `clan` is the api.
- Logic needed for the webui that performs json serialization and deserialization will be some `json-adapter` folder or package.
- Code for serializing dataclasses and typed dictionaries is needed for the persistence layer. (i.e. for read-write of inventory.json)
- The inventory-json is a backend resource, that is internal. Its logic includes merging, unmerging and partial updates with considering nix values and their priorities. Nobody should try to read or write to it directly.
Instead there will be library methods i.e. to add a `service` or to update/read/delete some information from it.
- Library functions should be carefully designed with suitable conventions for writing good api's in mind. (i.e: https://swagger.io/resources/articles/best-practices-in-api-design/)

View File

@@ -1 +0,0 @@
see [architecture-decision-record](https://github.com/joelparkerhenderson/architecture-decision-record)

View File

@@ -1,24 +0,0 @@
# Decision record template by Michael Nygard
This is the template in [Documenting architecture decisions - Michael Nygard](http://thinkrelevance.com/blog/2011/11/15/documenting-architecture-decisions).
You can use [adr-tools](https://github.com/npryce/adr-tools) for managing the ADR files.
In each ADR file, write these sections:
# Title
## Status
What is the status, such as proposed, accepted, rejected, deprecated, superseded, etc.?
## Context
What is the issue that we're seeing that is motivating this decision or change?
## Decision
What is the change that we're proposing and/or doing?
## Consequences
What becomes easier or more difficult to do because of this change?

4
docs/.gitignore vendored
View File

@@ -1,3 +1,3 @@
/site/reference
/site/static
!/site/static/extra.css
/site/static/Roboto-Regular.ttf
/site/static/FiraCode-VF.ttf

View File

@@ -1,5 +1,4 @@
# Contributing to Clan
# Contributing
**Continuous Integration (CI)**: Each pull request gets automatically tested by gitea. If any errors are detected, it will block pull requests until they're resolved.
@@ -21,14 +20,14 @@ Let's get your development environment up and running:
curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install
```
1. **Install direnv**:
2. **Install direnv**:
- To automatically setup a devshell on entering the directory
```bash
nix profile install nixpkgs#nix-direnv-flakes nixpkgs#direnv
nix profile install nixpkgs#nix-direnv-flakes
```
1. **Add direnv to your shell**:
3. **Add direnv to your shell**:
- Direnv needs to [hook into your shell](https://direnv.net/docs/hook.html) to work.
You can do this by executing following command. The example below will setup direnv for `zsh` and `bash`
@@ -37,10 +36,7 @@ Let's get your development environment up and running:
echo 'eval "$(direnv hook zsh)"' >> ~/.zshrc && echo 'eval "$(direnv hook bash)"' >> ~/.bashrc && eval "$SHELL"
```
1. **Allow the devshell**
- Go to `clan-core/pkgs/clan-cli` and do a `direnv allow` to setup the necessary development environment to execute the `clan` command
1. **Create a Gitea Account**:
4. **Create a Gitea Account**:
- Register an account on https://git.clan.lol
- Fork the [clan-core](https://git.clan.lol/clan/clan-core) repository
- Clone the repository and navigate to it
@@ -48,7 +44,30 @@ Let's get your development environment up and running:
```bash
git remote add upstream gitea@git.clan.lol:clan/clan-core.git
```
1. **Allow .envrc**:
5. **Create an access token**:
- Log in to Gitea.
- Go to your account settings.
- Navigate to the Applications section.
- Click Generate New Token.
- Name your token and select all available scopes.
- Generate the token and copy it for later use.
- Your access token is now ready to use with all permissions.
5. **Register Your Gitea Account Locally**:
- Execute the following command to add your Gitea account locally:
```bash
tea login add
```
- Fill out the prompt as follows:
- URL of Gitea instance: `https://git.clan.lol`
- Name of new Login [git.clan.lol]:
- Do you have an access token? Yes
- Token: <yourtoken>
- Set Optional settings: No
6. **Allow .envrc**:
- When you enter the directory, you'll receive an error message like this:
```bash
@@ -56,7 +75,7 @@ Let's get your development environment up and running:
```
- Execute `direnv allow` to automatically execute the shell script `.envrc` when entering the directory.
1. **(Optional) Install Git Hooks**:
7. **(Optional) Install Git Hooks**:
- To syntax check your code you can run:
```bash
nix fmt
@@ -66,44 +85,116 @@ Let's get your development environment up and running:
./scripts/pre-commit
```
## Related Projects
8. **Open a Pull Request**:
- To automatically open up a pull request you can use our tool called:
```
merge-after-ci --reviewers Mic92 Lassulus Qubasa
```
- **Data Mesher**: [data-mesher](https://git.clan.lol/clan/data-mesher)
- **Nixos Facter**: [nixos-facter](https://github.com/nix-community/nixos-facter)
- **Nixos Anywhere**: [nixos-anywhere](https://github.com/nix-community/nixos-anywhere)
- **Disko**: [disko](https://github.com/nix-community/disko)
# Debugging
## Fixing Bugs or Adding Features in Clan-CLI
Here are some methods for debugging and testing the clan-cli:
If you have a bug fix or feature that involves a related project, clone the relevant repository and replace its invocation in your local setup.
## See all possible packages and tests
For instance, if you need to update `nixos-anywhere` in clan-cli, find its usage:
To quickly show all possible packages and tests execute:
```python
run(
nix_shell(
["nixpkgs#nixos-anywhere"],
cmd,
),
RunOpts(log=Log.BOTH, prefix=machine.name, needs_user_terminal=True),
)
```bash
nix flake show --system no-eval
```
You can replace `"nixpkgs#nixos-anywhere"` with your local path:
```python
run(
nix_shell(
["<path_to_local_src>#nixos-anywhere"],
cmd,
),
RunOpts(log=Log.BOTH, prefix=machine.name, needs_user_terminal=True),
)
Under `checks` you will find all tests that are executed in our CI. Under `packages` you find all our projects.
```
git+file:///home/lhebendanz/Projects/clan-core
├───apps
│ └───x86_64-linux
│ ├───install-vm: app
│ └───install-vm-nogui: app
├───checks
│ └───x86_64-linux
│ ├───borgbackup omitted (use '--all-systems' to show)
│ ├───check-for-breakpoints omitted (use '--all-systems' to show)
│ ├───clan-dep-age omitted (use '--all-systems' to show)
│ ├───clan-dep-bash omitted (use '--all-systems' to show)
│ ├───clan-dep-e2fsprogs omitted (use '--all-systems' to show)
│ ├───clan-dep-fakeroot omitted (use '--all-systems' to show)
│ ├───clan-dep-git omitted (use '--all-systems' to show)
│ ├───clan-dep-nix omitted (use '--all-systems' to show)
│ ├───clan-dep-openssh omitted (use '--all-systems' to show)
│ ├───"clan-dep-python3.11-mypy" omitted (use '--all-systems' to show)
├───packages
│ └───x86_64-linux
│ ├───clan-cli omitted (use '--all-systems' to show)
│ ├───clan-cli-docs omitted (use '--all-systems' to show)
│ ├───clan-ts-api omitted (use '--all-systems' to show)
│ ├───clan-app omitted (use '--all-systems' to show)
│ ├───default omitted (use '--all-systems' to show)
│ ├───deploy-docs omitted (use '--all-systems' to show)
│ ├───docs omitted (use '--all-systems' to show)
│ ├───editor omitted (use '--all-systems' to show)
└───templates
├───default: template: Initialize a new clan flake
└───new-clan: template: Initialize a new clan flake
```
You can execute every test separately by following the tree path `nix build .#checks.x86_64-linux.clan-pytest` for example.
## Test Locally in Devshell with Breakpoints
To test the cli locally in a development environment and set breakpoints for debugging, follow these steps:
1. Run the following command to execute your tests and allow for debugging with breakpoints:
```bash
cd ./pkgs/clan-cli
pytest -n0 -s --maxfail=1 ./tests/test_nameofthetest.py
```
You can place `breakpoint()` in your Python code where you want to trigger a breakpoint for debugging.
## Test Locally in a Nix Sandbox
To run tests in a Nix sandbox, you have two options depending on whether your test functions have been marked as impure or not:
### Running Tests Marked as Impure
If your test functions need to execute `nix build` and have been marked as impure because you can't execute `nix build` inside a Nix sandbox, use the following command:
```bash
nix run .#impure-checks
```
This command will run the impure test functions.
### Running Pure Tests
For test functions that have not been marked as impure and don't require executing `nix build`, you can use the following command:
```bash
nix build .#checks.x86_64-linux.clan-pytest --rebuild
```
This command will run all pure test functions.
### Inspecting the Nix Sandbox
If you need to inspect the Nix sandbox while running tests, follow these steps:
1. Insert an endless sleep into your test code where you want to pause the execution. For example:
```python
import time
time.sleep(3600) # Sleep for one hour
```
2. Use `cntr` and `psgrep` to attach to the Nix sandbox. This allows you to interactively debug your code while it's paused. For example:
```bash
psgrep -a -x your_python_process_name
cntr attach <container id, container name or process id>
```
Or you can also use the [nix breakpoint hook](https://nixos.org/manual/nixpkgs/stable/#breakpointhook)
The <path_to_local_src> doesn't need to be a local path, it can be any valid [flakeref](https://nix.dev/manual/nix/2.26/command-ref/new-cli/nix3-flake.html#flake-references).
And thus can point to test already opened PRs for example.
# Standards

View File

@@ -43,31 +43,25 @@ exclude_docs: |
nav:
- Home: index.md
- Getting Started:
- Setup Clan: getting-started/index.md
- Create Installer: getting-started/installer.md
- Add Machines: getting-started/configure.md
- Getting Started: getting-started/index.md
- Installer: getting-started/installer.md
- Configure: getting-started/configure.md
- Secrets & Facts: getting-started/secrets.md
- Deploy Machine: getting-started/deploy.md
- Continuous Integration: getting-started/check.md
- Guides:
- Overview: manual/index.md
- Disk Encryption: getting-started/disk-encryption.md
- Mesh VPN: getting-started/mesh-vpn.md
- Backup & Restore: getting-started/backups.md
- Vars Backend: manual/vars-backend.md
- Facts Backend: manual/secrets.md
- Autoincludes: manual/adding-machines.md
- Adding Machines: manual/adding-machines.md
- Inventory: manual/inventory.md
- Secrets: manual/secrets.md
- Secure Boot: manual/secure-boot.md
- Flake-parts: manual/flake-parts.md
- Authoring:
- Modules: clanmodules/index.md
- Disk Templates: manual/disk-templates.md
- Contributing:
- Contribute: contributing/contribute.md
- Debugging: contributing/debugging.md
- Testing: contributing/testing.md
- Repo Layout: manual/repo-layout.md
- Migrate existing Flakes: manual/migration-guide.md
- Contribute: manual/contribute.md
# - Concepts:
# - Overview: concepts/index.md
- Reference:
@@ -95,7 +89,6 @@ nav:
- reference/clanModules/matrix-synapse.md
- reference/clanModules/moonlight.md
- reference/clanModules/mumble.md
- reference/clanModules/mycelium.md
- reference/clanModules/nginx.md
- reference/clanModules/packages.md
- reference/clanModules/postgresql.md
@@ -110,7 +103,6 @@ nav:
- reference/clanModules/thelounge.md
- reference/clanModules/trusted-nix-caches.md
- reference/clanModules/user-password.md
- reference/clanModules/auto-upgrade.md
- reference/clanModules/vaultwarden.md
- reference/clanModules/xfce.md
- reference/clanModules/zerotier-static-peers.md
@@ -125,7 +117,6 @@ nav:
- reference/cli/flash.md
- reference/cli/history.md
- reference/cli/machines.md
- reference/cli/select.md
- reference/cli/secrets.md
- reference/cli/show.md
- reference/cli/ssh.md

View File

@@ -20,7 +20,10 @@
# Frontmatter for clanModules
clanModulesFrontmatter =
let
docs = pkgs.nixosOptionsDoc { options = self.lib.modules.frontmatterOptions; };
docs = pkgs.nixosOptionsDoc {
options = self.lib.modules.frontmatterOptions;
warningsAreErrors = true;
};
in
docs.optionsJSON;
@@ -69,13 +72,7 @@
];
}
''
export CLAN_CORE_PATH=${
self.filter {
include = [
"clanModules"
];
}
}
export CLAN_CORE_PATH=${self}
export CLAN_CORE_DOCS=${jsonDocs.clanCore}/share/doc/nixos/options.json
# A file that contains the links to all clanModule docs
export CLAN_MODULES_VIA_ROLES=${clanModulesViaRoles}

View File

@@ -103,7 +103,7 @@ def render_option(
read_only = option.get("readOnly")
res = f"""
{"#" * level} {sanitize(name) if short_head is None else sanitize(short_head)} {"{: #" + sanitize_anchor(name) + "}" if level > 1 else ""}
{"#" * level} {sanitize(name) if short_head is None else sanitize(short_head)} {"{: #"+sanitize_anchor(name)+"}" if level > 1 else ""}
"""
@@ -125,7 +125,7 @@ def render_option(
**Default**:
```nix
{option.get("default", {}).get("text") if option.get("default") else "No default set."}
{option.get("default",{}).get("text") if option.get("default") else "No default set."}
```
"""
example = option.get("example", {}).get("text")
@@ -585,7 +585,7 @@ Each attribute is documented below
```nix
buildClan {
self = self;
directory = self;
machines = {
jon = { };
sara = { };

View File

@@ -48,12 +48,12 @@ clanModules/borgbackup
=== "User module"
If the module should be ad-hoc loaded.
It can be made available in any project via the [`clan.inventory.modules`](../reference/nix-api/inventory.md#inventory.modules) attribute.
It can be made avilable in any project via the [`clan.inventory.modules`](../reference/nix-api/inventory.md#inventory.modules) attribute.
```nix title="flake.nix"
# ...
buildClan {
# 1. Add the module to the available clanModules with inventory support
# 1. Add the module to the avilable inventory modules
inventory.modules = {
custom-module = ./modules/my_module;
};
@@ -111,7 +111,7 @@ Adds the roles: `client` and `server`
Sometimes a `ClanModule` should be usable via both clan's `inventory` concept but also natively as a NixOS module.
> In the long term, we want most modules to implement support for the inventory,
> but we are also aware that there are certain low-level modules that always serve as a backend for other higher-level `clanModules` with inventory support.
> but we are also aware that there are certain low-level modules that always serve as a backend for other higher-level inventory modules.
> These modules may not want to implement inventory interfaces as they are always used directly by other modules.
This can be achieved by placing an additional `default.nix` into the root of the ClanModules directory as shown:

View File

@@ -1,167 +0,0 @@
Here are some methods for debugging and testing the clan-cli
## Using a Development Branch
To streamline your development process, I suggest not installing `clan-cli`. Instead, clone the `clan-core` repository and add `clan-core/pkgs/clan-cli/bin` to your PATH to use the checked-out version directly.
!!! Note
After cloning, navigate to `clan-core/pkgs/clan-cli` and execute `direnv allow` to activate the devshell. This will set up a symlink to nixpkgs at a specific location; without it, `clan-cli` won't function correctly.
With this setup, you can easily use [breakpoint()](https://docs.python.org/3/library/pdb.html) to inspect the application's internal state as needed.
This approach is feasible because `clan-cli` only requires a Python interpreter and has no other dependencies.
```nix
pkgs.mkShell {
packages = [
pkgs.python3
];
shellHook = ''
export GIT_ROOT="$(git rev-parse --show-toplevel)"
export PATH=$PATH:~/Projects/clan-core/pkgs/clan-cli/bin
'';
}
```
## The Debug Flag
You can enhance your debugging process with the `--debug` flag in the `clan` command. When you add this flag to any command, it displays all subprocess commands initiated by `clan` in a readable format, along with the source code position that triggered them. This feature makes it easier to understand and trace what's happening under the hood.
```bash
$ clan machines list --debug 1
Debug log activated
nix \
--extra-experimental-features 'nix-command flakes' \
eval \
--show-trace --json \
--print-build-logs '/home/qubasa/Projects/qubasas-clan#clanInternals.machines.x86_64-linux' \
--apply builtins.attrNames \
--json
Caller: ~/Projects/clan-core/pkgs/clan-cli/clan_cli/machines/list.py:96::list_nixos_machines
warning: Git tree '/home/qubasa/Projects/qubasas-clan' is dirty
demo
gchq-local
wintux
```
## VSCode
If you're using VSCode, it has a handy feature that makes paths to source code files clickable in the integrated terminal. Combined with the previously mentioned techniques, this allows you to open a Clan in VSCode, execute a command like `clan machines list --debug`, and receive a printed path to the code that initiates the subprocess. With the `Ctrl` key (or `Cmd` on macOS) and a mouse click, you can jump directly to the corresponding line in the code file and add a `breakpoint()` function to it, to inspect the internal state.
## Finding Print Messages
To identify where a specific print message comes from, you can enable a helpful feature. Simply set the environment variable `export TRACE_PRINT=1`. When you run commands with `--debug` mode, each print message will include information about its source location.
If you need more details, you can expand the stack trace information that appears with each print by setting the environment variable `export TRACE_DEPTH=3`.
## Analyzing Performance
To understand what's causing slow performance, set the environment variable `export CLAN_CLI_PERF=1`. When you complete a clan command, you'll see a summary of various performance metrics, helping you identify what's taking up time.
## See all possible packages and tests
To quickly show all possible packages and tests execute:
```bash
nix flake show
```
Under `checks` you will find all tests that are executed in our CI. Under `packages` you find all our projects.
```
git+file:///home/lhebendanz/Projects/clan-core
├───apps
│ └───x86_64-linux
│ ├───install-vm: app
│ └───install-vm-nogui: app
├───checks
│ └───x86_64-linux
│ ├───borgbackup omitted (use '--all-systems' to show)
│ ├───check-for-breakpoints omitted (use '--all-systems' to show)
│ ├───clan-dep-age omitted (use '--all-systems' to show)
│ ├───clan-dep-bash omitted (use '--all-systems' to show)
│ ├───clan-dep-e2fsprogs omitted (use '--all-systems' to show)
│ ├───clan-dep-fakeroot omitted (use '--all-systems' to show)
│ ├───clan-dep-git omitted (use '--all-systems' to show)
│ ├───clan-dep-nix omitted (use '--all-systems' to show)
│ ├───clan-dep-openssh omitted (use '--all-systems' to show)
│ ├───"clan-dep-python3.11-mypy" omitted (use '--all-systems' to show)
├───packages
│ └───x86_64-linux
│ ├───clan-cli omitted (use '--all-systems' to show)
│ ├───clan-cli-docs omitted (use '--all-systems' to show)
│ ├───clan-ts-api omitted (use '--all-systems' to show)
│ ├───clan-app omitted (use '--all-systems' to show)
│ ├───default omitted (use '--all-systems' to show)
│ ├───deploy-docs omitted (use '--all-systems' to show)
│ ├───docs omitted (use '--all-systems' to show)
│ ├───editor omitted (use '--all-systems' to show)
└───templates
├───default: template: Initialize a new clan flake
└───new-clan: template: Initialize a new clan flake
```
You can execute every test separately by following the tree path `nix run .#checks.x86_64-linux.clan-pytest -L` for example.
## Test Locally in Devshell with Breakpoints
To test the cli locally in a development environment and set breakpoints for debugging, follow these steps:
1. Run the following command to execute your tests and allow for debugging with breakpoints:
```bash
cd ./pkgs/clan-cli
pytest -n0 -s --maxfail=1 ./tests/test_nameofthetest.py
```
You can place `breakpoint()` in your Python code where you want to trigger a breakpoint for debugging.
## Test Locally in a Nix Sandbox
To run tests in a Nix sandbox, you have two options depending on whether your test functions have been marked as impure or not:
### Running Tests Marked as Impure
If your test functions need to execute `nix build` and have been marked as impure because you can't execute `nix build` inside a Nix sandbox, use the following command:
```bash
nix run .#impure-checks -L
```
This command will run the impure test functions.
### Running Pure Tests
For test functions that have not been marked as impure and don't require executing `nix build`, you can use the following command:
```bash
nix build .#checks.x86_64-linux.clan-pytest --rebuild
```
This command will run all pure test functions.
### Inspecting the Nix Sandbox
If you need to inspect the Nix sandbox while running tests, follow these steps:
1. Insert an endless sleep into your test code where you want to pause the execution. For example:
```python
import time
time.sleep(3600) # Sleep for one hour
```
2. Use `cntr` and `psgrep` to attach to the Nix sandbox. This allows you to interactively debug your code while it's paused. For example:
```bash
psgrep <your_python_process_name>
cntr attach <container id, container name or process id>
```
Or you can also use the [nix breakpoint hook](https://nixos.org/manual/nixpkgs/stable/#breakpointhook)

View File

@@ -1,316 +0,0 @@
# Testing your contributions
Each feature added to clan should be tested extensively via automated tests.
This document covers different methods of automated testing, including creating, running and debugging such tests.
In order to test the behavior of clan, different testing frameworks are used depending on the concern:
- NixOS VM tests: for high level integration
- NixOS container tests: for high level integration
- Python tests via pytest: for unit tests and integration tests
- Nix eval tests: for nix functions, libraries, modules, etc.
## NixOS VM Tests
The [NixOS VM Testing Framework](https://nixos.org/manual/nixos/stable/index.html#sec-nixos-tests) is used to create high level integration tests, by running one or more VMs generated from a specified config. Commands can be executed on the booted machine(s) to verify a deployment of a service works as expected. All machines within a test are connected by a virtual network. Internet access is not available.
### When to use VM tests
- testing that a service defined through a clan module works as expected after deployment
- testing clan-cli subcommands which require accessing a remote machine
### When not to use VM tests
NixOS VM Tests are slow and expensive. They should only be used for testing high level integration of components.
VM tests should be avoided wherever it is possible to implement a cheaper unit test instead.
- testing detailed behavior of a certain clan-cli command -> use unit testing via pytest instead
- regression testing -> add a unit test
### Finding examples for VM tests
Existing nixos vm tests in clan-core can be found by using ripgrep:
```shellSession
rg "import.*/lib/test-base.nix"
```
### Locating definitions of failing VM tests
All nixos vm tests in clan are exported as individual flake outputs under `checks.x86_64-linux.{test-attr-name}`.
If a test fails in CI:
- look for the job name of the test near the top if the CI Job page, like, for example `gitea:clan/clan-core#checks.x86_64-linux.borgbackup/1242`
- in this case `checks.x86_64-linux.borgbackup` is the attribute path
- note the last element of that attribute path, in this case `borgbackup`
- search for the attribute name inside the `/checks` directory via ripgrep
example: locating the vm test named `borgbackup`:
```shellSession
$ rg "borgbackup =" ./checks
./checks/flake-module.nix
41: borgbackup = import ./borgbackup nixosTestArgs;
```
-> the location of that test is `/checks/flake-module.nix` line `41`.
### Adding vm tests
Create a nixos test module under `/checks/{name}/default.nix` and import it in `/checks/flake-module.nix`.
### Running VM tests
```shellSession
nix build .#checks.x86_64-linux.{test-attr-name}
```
(replace `{test-attr-name}` with the name of the test)
### Debugging VM tests
The following techniques can be used to debug a VM test:
#### Print Statements
Locate the definition (see above) and add print statements, like, for example `print(client.succeed("systemctl --failed"))`, then re-run the test via `nix build` (see above)
#### Interactive Shell
- Execute the vm test outside the nix Sandbox via the following command:
`nix run .#checks.x86_64-linux.{test-attr-name}.driver -- --interactive`
- Then run the commands in the machines manually, like for example:
```python3
start_all()
machine1.succeed("echo hello")
```
#### Breakpoints
To get an interactive shell at a specific line in the VM test script, add a `breakpoint()` call before the line to debug, then run the test outside of the sandbox via:
`nix run .#checks.x86_64-linux.{test-attr-name}.driver`
## NixOS Container Tests
Those are very similar to NixOS VM tests, as in they run virtualized nixos machines, but instead of using VMs, they use containers which are much cheaper to launch.
As of now the container test driver is a downstream development in clan-core.
Basically everything stated under the NixOS VM tests sections applies here, except some limitations.
Limitations:
- does not yet support networking
- supports only one machine as of now
### Where to find examples for NixOS container tests
Existing nixos container tests in clan-core can be found by using ripgrep:
```shellSession
rg "import.*/lib/container-test.nix"
```
## Python tests via pytest
Since the clan cli is written in python, the `pytest` framework is used to define unit tests and integration tests via python
Due to superior efficiency,
### When to use python tests
- writing unit tests for python functions and modules, or bugfixes of such
- all integrations tests that do not require building or running a nixos machine
- impure integrations tests that require internet access (very rare, try to avoid)
### When not to use python tests
- integrations tests that require building or running a nixos machine (use NixOS VM or container tests instead)
- testing behavior of a nix function or library (use nix eval tests instead)
### Finding examples of python tests
Existing python tests in clan-core can be found by using ripgrep:
```shellSession
rg "import pytest"
```
### Locating definitions of failing python tests
If any python test fails in the CI pipeline, an error message like this can be found at the end of the log:
```
...
FAILED tests/test_machines_cli.py::test_machine_delete - clan_cli.errors.ClanError: Template 'new-machine' not in 'inputs.clan-core
...
```
In this case the test is defined in the file `/tests/test_machines_cli.py` via the test function `test_machine_delete`.
### Adding python tests
If a specific python module is tested, the test should be located near the tested module in a subdirectory called `./tests`
If the test is not clearly related to a specific module, put it in the top-level `./tests` directory of the tested python package. For `clan-cli` this would be `/pkgs/clan-cli/clan_cli/tests`.
All filenames must be prefixed with `test_` and test functions prefixed with `test_` for pytest to discover them.
### Running python tests
#### Running all python tests
To run all python tests which are executed in the CI pipeline locally, use this `nix build` command
```shellSession
nix build .#checks.x86_64-linux.clan-pytest-{with,without}-core
```
#### Running a specific python test
To run a specific python test outside the nix sandbox
1. Enter the development environment of the python package, by either:
- Having direnv enabled and entering the directory of the package (eg. `/pkgs/clan-cli`)
- Or using the command `select-shell {package}` in the top-level dev shell of clan-core, (eg. `switch-shell clan-cli`)
2. Execute the test via pytest using issuing
`pytest ./path/to/test_file.py:test_function_name -s -n0`
The flags `-sn0` are useful to forwards all stdout/stderr output to the terminal and be able to debug interactively via `breakpoint()`.
### Debugging python tests
To debug a specific python test, find its definition (see above) and make sure to enter the correct dev environment for that python package.
Modify the test and add `breakpoint()` statements to it.
Execute the test using the flags `-sn0` in order to get an interactive shell at the breakpoint:
```shelSession
pytest ./path/to/test_file.py:test_function_name -sn0
```
## Nix Eval Tests
### When to use nix eval tests
Nix eval tests are good for testing any nix logic, including
- nix functions
- nix libraries
- modules for the nixos module system
When not to use
- tests that require building nix derivations (except some very cheap ones)
- tests that require running programs written in other languages
- tests that require building or running nixos machines
### Finding examples of nix eval tests
Existing nix eval tests can be found via this ripgrep command:
```shellSession
rg "nix-unit --eval-store"
```
### Locating definitions of failing nix eval tests
Failing nix eval tests look like this:
```shellSession
> ✅ test_attrsOf_attrsOf_submodule
> ✅ test_attrsOf_submodule
> ❌ test_default
> /build/nix-8-2/expected.nix --- Nix
> 1 { foo = { bar = { __prio = 1500; }; } 1 { foo = { bar = { __prio = 1501; }; }
> . ; } . ; }
>
>
> ✅ test_no_default
> ✅ test_submodule
> ✅ test_submoduleWith
> ✅ test_submodule_with_merging
>
> 😢 6/7 successful
> error: Tests failed
```
To locate the definition, find the flake attribute name of the failing test near the top of the CI Job page, like for example `gitea:clan/clan-core#checks.x86_64-linux.lib-values-eval/1242`.
In this case `lib-values-eval` is the attribute we are looking for.
Find the attribute via ripgrep:
```shellSession
$ rg "lib-values-eval ="
lib/values/flake-module.nix
21: lib-values-eval = pkgs.runCommand "tests" { nativeBuildInputs = [ pkgs.nix-unit ]; } ''
grmpf@grmpf-nix ~/p/c/clan-core (test-docs)>
```
In this case the test is defined in the file `lib/values/flake-module.nix` line 21
### Adding nix eval tests
In clan core, the following pattern is usually followed:
- tests are put in a `test.nix` file
- a CI Job is exposed via a `flake-module.nix`
- that `flake-module.nix` is imported via the `flake.nix` at the root of the project
For example see `/lib/values/{test.nix,flake-module.nix}`.
### Running nix eval tests
Since all nix eval tests are exposed via the flake outputs, they can be ran via `nix build`:
```shellSession
nix build .#checks.x86_64-linux.{test-attr-name}
```
For quicker iteration times, instead of `nix build` use the `nix-unit` command available in the dev environment.
Example:
```shellSession
nix-unit --flake .#legacyPackages.x86_64-linux.{test-attr-name}
```
### Debugging nix eval tests
Follow the instructions above to find the definition of the test, then use one of the following techniques:
#### Print debugging
Add `lib.trace` or `lib.traceVal` statements in order to print some variables during evaluation
#### Nix repl
Use `nix repl` to evaluate to inspec the test.
Each test consists opf an `expr` (expression) and an `expected` field. `nix-unit` simply checks if `expr == expected` and prints the diff if that's not the case.
`nix repl` can be used to inspect `expr` manually, or any other variables that you choose to expose.
Example:
```shellSession
$ nix repl
Nix 2.25.5
Type :? for help.
nix-repl> tests = import ./lib/values/test.nix {}
nix-repl> tests
{
test_attrsOf_attrsOf_submodule = { ... };
test_attrsOf_submodule = { ... };
test_default = { ... };
test_no_default = { ... };
test_submodule = { ... };
test_submoduleWith = { ... };
test_submodule_with_merging = { ... };
}
nix-repl> tests.test_default.expr
{
foo = { ... };
}
```

View File

@@ -1,4 +1,6 @@
# Introduction to Backups
# Backups
## Introduction to Backups
When you're managing your own services, creating regular backups is crucial to ensure your data's safety.
This guide introduces you to Clan's built-in backup functionalities.
@@ -7,6 +9,8 @@ We might add more options in the future, but for now, let's dive into how you ca
## Backing Up Locally with Localbackup
### What is Localbackup?
Localbackup lets you backup your data onto physical storage devices connected to your computer,
such as USB hard drives or network-attached storage. It uses a tool called rsnapshot for this purpose.
@@ -143,25 +147,3 @@ Ensure the path to the public key is correct.
```bash
clan backups create mymachine
```
- **Restoring Backups:** To restore a backup that has been listed by the list command (NAME):
```bash
clan backups restore [MACHINE] [PROVIDER] [NAME]
```
Example (Restoring a machine called `client` with the backup provider `borgbackup`):
```bash
clan backups restore client borgbackup [NAME]
```
The `backups` command is service aware and allows optional specification of the `--service` flag.
To only restore the service called `zerotier` on a machine called `controller` through the backup provider `borgbackup` use the following command:
```bash
clan backups restore client borgbackup [NAME] --service zerotier
```

View File

@@ -1,28 +0,0 @@
### Generate Facts and Vars
Typically, this step is handled automatically when a machine is deployed. However, to enable the use of `nix flake check` with your configuration, it must be completed manually beforehand.
Currently, generating all the necessary facts requires two separate commands. This is due to the coexistence of two parallel secret management solutions:
the newer, recommended version (`clan vars`) and the older version (`clan facts`) that we are slowly phasing out.
To generate both facts and vars, execute the following commands:
```sh
clan facts generate && clan vars generate
```
### Check Configuration
Validate your configuration by running:
```bash
nix flake check
```
This command helps ensure that your system configuration is correct and free from errors.
!!! Tip
You can integrate this step into your [Continuous Integration](https://en.wikipedia.org/wiki/Continuous_integration) workflow to ensure that only valid Nix configurations are merged into your codebase.

View File

@@ -1,8 +1,10 @@
# Configuration - How to configure clan with your own machines
Managing machine configurations can be done in the following ways:
- writing `nix` expressions in a `flake.nix` file,
- placing `autoincluded` files into your machine directory,
- configuring everything in a simple UI (upcoming).
Clan currently offers the following methods to configure machines:
@@ -78,14 +80,9 @@ Adding or configuring a new machine requires two simple steps:
└─nvme0n1p3 nvme-eui.e8238fa6bf530001001b448b4aec2929-part3 swap 16.8G
```
!!! Warning
Make sure to copy the `ID-LINK` from toplevel disk device like `nvme0n1` or `sda` instead of `nvme0n1p1` or `sda1`
1. Edit the following fields inside the `./machines/jon/configuration.nix` and/or `./machines/sara/configuration.nix`
2. Edit the following fields inside the `./machines/jon/configuration.nix` and/or `./machines/sara/configuration.nix`
<!-- Note: Use "jon" instead of "<machine>" as "<" is not supported in title tag -->
```nix title="./machines/jon/configuration.nix" hl_lines="13 18 22 26"
```nix title="./machines/<machine>/configuration.nix" hl_lines="13 18 23 27"
{
imports = [
./hardware-configuration.nix
@@ -98,15 +95,16 @@ Adding or configuring a new machine requires two simple steps:
];
# Put your username here for login
users.users.user.name = "__YOUR_USERNAME__";
users.users.user.username = "__YOUR_USERNAME__";
# Set this for clan commands that use ssh
# Set this for clan commands use ssh i.e. `clan machines update`
# If you change the hostname, you need to update this line to root@<new-hostname>
# This only works however if you have avahi running on your admin machine else use IP
clan.core.networking.targetHost = "root@__IP__";
# Replace this __CHANGE_ME__ with the result of the lsblk command from step 1.
# You can get your disk id by running the following command on the installer:
# Replace <IP> with the IP of the installer printed on the screen or by running the `ip addr` command.
# ssh root@<IP> lsblk --output NAME,ID-LINK,FSTYPE,SIZE,MOUNTPOINT
disko.devices.disk.main.device = "/dev/disk/by-id/__CHANGE_ME__";
# IMPORTANT! Add your SSH key here
@@ -117,32 +115,79 @@ Adding or configuring a new machine requires two simple steps:
}
```
You can also create additional machines using the `clan machines create` command:
```
$ clan machines create --help
usage: clan [-h] [SUBCOMMAND] machines create [-h] [--tags TAGS [TAGS ...]] [--template-name TEMPLATE_NAME]
[--target-host TARGET_HOST] [--debug] [--option name value] [--flake PATH]
machine_name
positional arguments:
machine_name The name of the machine to create
options:
-h, --help show this help message and exit
--tags TAGS [TAGS ...]
Tags to associate with the machine. Can be used to assign multiple machines to services.
--template-name TEMPLATE_NAME
The name of the template machine to import
--target-host TARGET_HOST
Address of the machine to install and update, in the format of user@host:1234
--debug Enable debug logging
--option name value Nix option to set
--flake PATH path to the flake where the clan resides in, can be a remote flake or local, can be set through
the [CLAN_DIR] environment variable
```
!!! Info "Replace `__YOUR_USERNAME__` with the ip of your machine, if you use avahi you can also use your hostname"
!!! Info "Replace `__IP__` with the ip of your machine, if you use avahi you can also use your hostname"
!!! Info "Replace `__CHANGE_ME__` with the appropriate `ID-LINK` identifier, such as `nvme-eui.e8238fa6bf530001001b448b4aec2929`"
!!! Info "Replace `__CHANGE_ME__` with the appropriate identifier, such as `nvme-eui.e8238fa6bf530001001b448b4aec2929`"
!!! Info "Replace `__YOUR_SSH_KEY__` with your personal key, like `ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILoMI0NC5eT9pHlQExrvR5ASV3iW9+BXwhfchq0smXUJ jon@jon-desktop`"
These steps will allow you to update your machine later.
You can also create additional machines using the cli:
### Step 2: Detect Drivers
```
$ clan machines create <machinename>
Generate the `hardware-configuration.nix` file for your machine by executing the following command:
```bash
clan machines update-hardware-config [MACHINE_NAME] [HOSTNAME]
```
replace `[MACHINE_NAME]` with the name of the machine i.e. `jon` and `[HOSTNAME]` with the `ip_address` or `hostname` of the machine within the network. i.e. `<IP>`
!!! Example
```bash
clan machines update-hardware-config jon
```
### Step 2: Custom Disk Formatting
This command connects to the ip configured in the previous step, runs `nixos-generate-config` to detect hardware configurations (excluding filesystems), and writes them to `machines/jon/hardware-configuration.nix`.
### Step 3: Custom Disk Formatting
In `./modules/disko.nix`, a simple `ext4` disk partitioning scheme is defined for the Disko module. For more complex disk partitioning setups,
refer to the [Disko templates](https://github.com/nix-community/disko-templates) or [Disko examples](https://github.com/nix-community/disko/tree/master/example).
### (Optional): Renaming Machine
### Step 4: Custom Configuration
For renaming jon to your own machine name, you can use the following command:
Modify `./machines/jon/configuration.nix` to personalize the system settings according to your requirements.
If you wish to name your machine to something else, do the following steps:
```
git mv ./machines/jon ./machines/newname
mv ./machines/jon/configuration.nix ./machines/newname/configuration.nix
```
Than rename `jon` to your preferred name in `machines` in `flake.nix` as well as the import line:
```diff
- imports = [ ./machines/jon/configuration.nix ];
+ imports = [ ./machines/__NEW_NAME__/configuration.nix ];
```
!!! Info "Replace `__NEW_NAME__` with the name of the machine"
Note that our clan lives inside a git repository.
Only files that have been added with `git add` are recognized by `nix`.
So for every file that you add or rename you also need to run:
@@ -151,11 +196,22 @@ So for every file that you add or rename you also need to run:
git add ./path/to/my/file
```
For renaming jon to your own machine name, you can use the following command:
### (Optional): Removing a Machine
```
git mv ./machines/jon ./machines/newname
```
If you only want to setup a single machine at this point, you can delete `sara` from `flake.nix` as well as from the machines directory:
```
git rm -rf ./machines/sara
git rm ./machines/sara
```
---
## What's next?
- [Secrets & Facts](secrets.md): Setting up secrets with sops-nix
---

View File

@@ -1,7 +1,14 @@
# Deploy your Clan
# Deploy Machine
Now that you have created a new machine, we will walk through how to install it.
Integrating a new machine into your Clan environment is an easy yet flexible process, allowing for a straight forward management of multiple NixOS configurations.
We'll walk you through adding a new computer to your Clan.
## Installing a New Machine
Clan CLI, in conjunction with [nixos-anywhere](https://github.com/nix-community/nixos-anywhere), provides a seamless method for installing NixOS on various machines.
This process involves preparing a suitable hardware and disk partitioning configuration and ensuring the target machine is accessible via SSH.
### Step 0. Prerequisites
@@ -18,7 +25,7 @@ Now that you have created a new machine, we will walk through how to install it.
2. Boot the target machine and connect it to a network that makes it reachable from your setup computer.
=== "**Cloud VMs**"
=== "**Remote Machines**"
- [x] **Two Computers**: You need one computer that you're getting ready (we'll call this the Target Computer) and another one to set it up from (we'll call this the Setup Computer). Make sure both can talk to each other over the network using SSH.
- [x] **Machine configuration**: See our basic [configuration guide](./configure.md)
@@ -101,27 +108,32 @@ Now that you have created a new machine, we will walk through how to install it.
For easy sharing of deployment information via QR code, we highly recommend using [KDE Connect](https://apps.kde.org/de/kdeconnect/).
There are two ways to deploy your machine:
=== "**Password Auth**"
Run the following command to login over SSH with password authentication
1. **SSH with Password Authentication**
Run the following command to install using SSH:
```bash
clan machines install [MACHINE] --target-host <IP> --update-hardware-config nixos-facter
```
=== "**QR Code Auth**"
Using the JSON contents of the QR Code:
```terminal
clan machines install [MACHINE] --json "[JSON]" --update-hardware-config nixos-facter
```
OR using a picture containing the QR code
```terminal
clan machines install [MACHINE] --png [PATH] --update-hardware-config nixos-facter
clan machines install [MACHINE] --target-host <IP>
```
=== "**Cloud VM**"
2. **Scanning a QR Code for Installation Details**
You can input the information by following one of these methods:
- **Using a JSON String or File Path:**
Provide the path to a JSON string or input the string directly:
```terminal
clan machines install [MACHINE] --json [JSON]
```
- **Using an Image Containing the QR Code:**
Provide the path to an image file containing the relevant QR code:
```terminal
clan machines install [MACHINE] --png [PATH]
```
=== "**SSH access**"
Replace `<target_host>` with the **target computers' ip address**:
```bash
clan machines install [MACHINE] --target-host <target_host> --update-hardware-config nixos-facter
clan machines install [MACHINE] --target-host <target_host>
```
@@ -203,4 +215,12 @@ buildClan {
This is useful for machines that are not always online or are not part of the regular update cycle.
---
## What's next ?
- [**Disk Encryption**](./disk-encryption.md): Configure disk encryption with remote decryption
- [**Mesh VPN**](./mesh-vpn.md): Configuring a secure mesh network.
---

View File

@@ -14,7 +14,7 @@ lsblk --output NAME,ID-LINK,FSTYPE,SIZE,MOUNTPOINT
=== "**Single Disk**"
Below is the configuration for `disko.nix`
```nix hl_lines="13 53"
```nix hl_lines="17 48"
--8<-- "docs/code-examples/disko-single-disk.nix"
```
@@ -22,7 +22,7 @@ lsblk --output NAME,ID-LINK,FSTYPE,SIZE,MOUNTPOINT
=== "**Raid 1**"
Below is the configuration for `disko.nix`
```nix hl_lines="13 53 54"
```nix hl_lines="17 48 49"
--8<-- "docs/code-examples/disko-raid.nix"
```
@@ -49,8 +49,7 @@ Replace `kernelModules` with the ethernet module loaded one on your target machi
port = 7172;
authorizedKeys = [ "<yourkey>" ];
hostKeys = [
"/var/lib/initrd_host_ed25519_key"
"/var/lib/initrd_host_rsa_key"
"/var/lib/initrd-ssh-key"
];
};
};
@@ -74,7 +73,7 @@ Before starting the installation process, ensure that the SSH public key is copi
ssh-copy-id -o PreferredAuthentications=password -o PubkeyAuthentication=no root@nixos-installer.local
```
### Step 1.5: Prepare Secret Key and Partition Disks
### Step 1.5: Prepare Secret Key and Clear Disk Data
1. Access the installer using SSH:
@@ -91,13 +90,13 @@ nano /tmp/secret.key
3. Discard the old disk partition data:
```bash
blkdiscard /dev/disk/by-id/<installdisk>
blkdiscard /dev/disk/by-id/nvme-eui.002538b931b59865
```
4. Run `clan` machines install, only running kexec and disko, with the following command:
4. Run the `clan` machine installation with the following command:
```bash
clan machines install gchq-local --target-host root@nixos-installer --phases kexec,disko
clan machines install gchq-local --target-host root@nixos-installer --yes --no-reboot
```
### Step 2: ZFS Pool Import and System Installation
@@ -108,10 +107,14 @@ clan machines install gchq-local --target-host root@nixos-installer --phases kex
ssh root@nixos-installer.local
```
2. Run the following command on the remote installation environment:
2. Perform the following commands on the remote installation environment:
```bash
zpool import zroot
zfs set keylocation=prompt zroot/root
zfs load-key zroot/root
zfs set mountpoint=/mnt zroot/root/nixos
mount /dev/nvme0n1p2 /mnt/boot
```
3. Disconnect from the SSH session:
@@ -120,36 +123,43 @@ zfs set keylocation=prompt zroot/root
CTRL+D
```
4. Locally generate ssh host keys. You only need to generate ones for the algorithms you're using in `authorizedKeys`.
4. Securely copy your local `initrd_rsa_key` to the installer's `/mnt` directory:
```bash
ssh-keygen -q -N "" -t ed25519 -f ./initrd_host_ed25519_key
ssh-keygen -q -N "" -t rsa -b 4096 -f ./initrd_host_rsa_key
scp ~/.ssh/initrd_rsa_key root@nixos-installer.local:/mnt/var/lib/initrd-ssh-key
```
5. Securely copy your local initrd ssh host keys to the installer's `/mnt` directory:
5. SSH back into the installer:
```bash
scp ./initrd_host* root@nixos-installer.local:/mnt/var/lib/
ssh root@nixos-installer.local
```
6. Install nixos to the mounted partitions
6. Navigate to the `/mnt` directory, enter the `nixos-enter` environment, and then exit:
```bash
clan machines install gchq-local --target-host root@nixos-installer --phases install
cd /mnt
nixos-enter
realpath /run/current-system
exit
```
7. After the installation process, unmount `/mnt/boot`, change the ZFS mountpoints and unmount all the ZFS volumes by exporting the zpool:
7. Run the `nixos-install` command with the appropriate system path `<SYS_PATH>`:
```bash
nixos-install --no-root-passwd --no-channel-copy --root /mnt --system <SYS_PATH>
```
8. After the installation process, unmount `/mnt/boot`, change the ZFS mountpoint, and reboot the system:
```bash
umount /mnt/boot
cd /
zfs set -u mountpoint=/ zroot/root/nixos
zfs set -u mountpoint=/tmp zroot/root/tmp
zfs set -u mountpoint=/home zroot/root/home
zpool export zroot
zfs set mountpoint=/ zroot/root/nixos
reboot
```
8. Perform a reboot of the machine and remove the USB installer.
9. Perform a hard reboot of the machine and remove the USB stick.
### Step 3: Accessing the Initial Ramdisk (initrd) Environment
@@ -162,7 +172,7 @@ ssh -p 7172 root@192.168.178.141
2. Run the `systemd-tty-ask-password-agent` utility to query a password:
```bash
systemd-tty-ask-password-agent
systemd-tty-ask-password-agent --query
```
After completing these steps, your NixOS should be successfully installed and ready for use.

View File

@@ -1,9 +1,6 @@
# :material-clock-fast: Getting Started
Ready to create your own clan and manage a fleet of machines? Follow these simple steps to get started.
By the end of this guide, you'll have a fresh NixOS configuration ready to push to one or more machines. You'll create a new git repository and a flake, and all you need is at least one machine to push to. This is the easiest way to begin, and we recommend you to copy your existing configuration into this new setup!
Create your own clan with these initial steps and manage a fleet of machines with one single testable git repository!
### Prerequisites
@@ -42,7 +39,7 @@ By the end of this guide, you'll have a fresh NixOS configuration ready to push
Add the Clan CLI into your development workflow:
```bash
nix shell git+https://git.clan.lol/clan/clan-core#clan-cli --refresh
nix shell git+https://git.clan.lol/clan/clan-core#clan-cli
```
You can find reference documentation for the `clan` cli program [here](../reference/cli/index.md).
@@ -54,8 +51,6 @@ clan --help
### Step 2: Initialize Your Project
If you want to migrate an existing project, follow this [guide](https://docs.clan.lol/manual/migration-guide/).
Set the foundation of your Clan project by initializing it as follows:
```bash
@@ -92,21 +87,6 @@ This should yield the following:
5 directories, 9 files
```
??? info "Recommended way of sourcing the `clan` cli tool"
The default template also adds the `clan` cli tool to the development shell.
Meaning you can get the exact version you need directly from the folder
you are in right now.
In the `my-clan` directory run the following command:
```
nix develop
```
That way you will have the tool available in the shell environment.
We also recommend setting up [direnv](https://direnv.net/) for your shell, for a more convenient
experience.
```bash
clan machines list
```
@@ -120,3 +100,10 @@ sara
You just successfully bootstrapped your first clan directory.
---
### What's Next?
- [**Installer**](./installer.md): Setting up new computers remotely is easy with an USB stick.
---

View File

@@ -1,16 +1,11 @@
# Clan Installer Image for Physical Machines
# Installer
To install Clan on physical machines, you need to use our custom installer image. This is necessary for proper installation and operation.
Our installer image simplifies the process of performing remote installations.
!!! note "Using a Cloud VM?"
If you're using a cloud provider's virtual machine (VM), you can skip this section and go directly to the [Configure Machines](configure.md) step. In this scenario, we automatically use [nixos-anywhere](https://github.com/nix-community/nixos-anywhere) to replace the kernel during runtime.
??? info "Why nixos-anywhere Doesn't Work on Physical Hardware?"
nixos-anywhere relies on [kexec](https://wiki.archlinux.org/title/Kexec) to replace the running kernel with our custom one. This method often has compatibility issues with real hardware, especially systems with dedicated graphics cards like laptops and servers, leading to crashes and black screens.
??? info "Reasons for a Custom Install Image"
Our custom install images are built to include essential tools like [nixos-facter](https://github.com/nix-community/nixos-facter) and support for [ZFS](https://wiki.archlinux.org/title/ZFS). They're also optimized to run on systems with as little as 1 GB of RAM, ensuring efficient performance even on lower-end hardware.
Follow our step-by-step guide to create and transfer this image onto a bootable USB drive.
!!! info
If you already have a NixOS machine you can ssh into (in the cloud for example) you can skip this chapter and go directly to [Configure Machines](configure.md).
### Step 0. Prerequisites
@@ -45,9 +40,9 @@ To install Clan on physical machines, you need to use our custom installer image
sudo umount /dev/sdb1
```
=== "**Linux OS**"
### Step 2. Create a Custom Installer
### Step 2. Flash Custom Installer
Using clan flash enables the inclusion of ssh public keys into the image.
Using clan flash enables the inclusion of ssh public keys and wifi access points.
It also allows to set language and keymap in the installer image.
```bash
@@ -66,8 +61,7 @@ sudo umount /dev/sdb1
The `clan flash` utility will erase the disk. Make sure to specify the correct device
- **SSH-Pubkey Option**
- **SSH-Pubkey Option**:
To add an ssh public key into the installer image append the option:
```
--ssh-pubkey <pubkey_path>
@@ -75,21 +69,19 @@ sudo umount /dev/sdb1
If you do not have an ssh key yet, you can generate one with `ssh-keygen -t ed25519` command.
This ssh key will be installed into the root user.
- **Connect to the installer**
- **Connect to the installer
On boot, the installer will display on-screen the IP address it received from the network.
If you need to configure Wi-Fi first, refer to the next section.
If Multicast-DNS (Avahi) is enabled on your own machine, you can also access the installer using the `flash-installer.local` address.
- **List Keymaps**
- **List Keymaps**:
You can get a list of all keymaps with the following command:
```
clan flash list keymaps
```
- **List Languages**
- **List Languages**:
You can get a list of all languages with the following command:
```
clan flash list languages
@@ -202,3 +194,10 @@ Press ++ctrl+d++ to exit `IWD`.
You're all set up
---
## What's next?
- [Configure Machines](configure.md): Customize machine configuration
---

View File

@@ -1,3 +1,4 @@
# Mesh VPN
This guide provides detailed instructions for configuring
[ZeroTier VPN](https://zerotier.com) within Clan. Follow the
@@ -18,53 +19,14 @@ Clan
If you select multiple network technologies at the same time. e.g. (zerotier + yggdrassil)
You must choose one of them as primary network and the machines are always connected via the primary network.
This guide shows you how to configure `zerotier` either through `NixOS Options` directly, or Clan's `Inventory` System.
## 1. Set-Up the VPN Controller
The VPN controller is initially essential for providing configuration to new
peers. Once addresses are allocated, the controller's continuous operation is not essential.
=== "**Inventory**"
## 1. Choose the Controller
The controller is the initial entrypoint for new machines into the vpn.
It will sign the id's of new machines.
Once id's are signed, the controller's continuous operation is not essential.
A good controller choice is nevertheless a machine that can always be reached for updates - so that new peers can be added to the network.
For the purpose of this guide we have two machines:
- The `controller` machine, which will be the zerotier controller.
- The `new_machine` machine, which is the machine we want to add to the vpn network.
## 2. Configure the Inventory
```nix
clan.inventory = {
services.zerotier.default = {
roles.controller.machines = [
"controller"
];
roles.peer.machines = [
"new_machine"
];
};
};
```
## 3. Apply the Configuration
Update the `controller` machine:
```bash
clan machines update controller
```
=== "**NixOS Options**"
## 1. Set-Up the VPN Controller
The VPN controller is initially essential for providing configuration to new
peers. Once addresses are allocated, the controller's continuous operation is not essential.
1. **Designate a Machine**: Label a machine as the VPN controller in the clan,
1. **Designate a Machine**: Label a machine as the VPN controller in the clan,
referred to as `<CONTROLLER>` henceforth in this guide.
2. **Add Configuration**: Input the following configuration to the NixOS
2. **Add Configuration**: Input the following configuration to the NixOS
configuration of the controller machine:
```nix
clan.core.networking.zerotier.controller = {
@@ -72,24 +34,24 @@ This guide shows you how to configure `zerotier` either through `NixOS Options`
public = true;
};
```
3. **Update the Controller Machine**: Execute the following:
3. **Update the Controller Machine**: Execute the following:
```bash
clan machines update <CONTROLLER>
```
Your machine is now operational as the VPN controller.
## 2. Add Machines to the VPN
## 2. Add Machines to the VPN
To introduce a new machine to the VPN, adhere to the following steps:
To introduce a new machine to the VPN, adhere to the following steps:
1. **Update Configuration**: On the new machine, incorporate the following to its
1. **Update Configuration**: On the new machine, incorporate the following to its
configuration, substituting `<CONTROLLER>` with the controller machine name:
```nix
{ config, ... }: {
clan.core.networking.zerotier.networkId = builtins.readFile (config.clan.core.settings.directory + "/machines/<CONTROLLER>/facts/zerotier-network-id");
}
```
1. **Update the New Machine**: Execute:
1. **Update the New Machine**: Execute:
```bash
$ clan machines update <NEW_MACHINE>
```
@@ -132,7 +94,7 @@ This guide shows you how to configure `zerotier` either through `NixOS Options`
```
Substitute `<ID>` with the ZeroTier ID obtained previously.
2. **Verify Connection**: On the `new_machine`, re-execute:
2. **Verify Connection**: On the `new_machine`, re-execute:
```bash
$ sudo zerotier-cli info
```

View File

@@ -1,8 +1,8 @@
# Secrets / Facts
Clan enables encryption of secrets (such as passwords & keys) ensuring security and ease-of-use among users.
By default Clan utilizes the [sops](https://github.com/getsops/sops) format and integrates with [sops-nix](https://github.com/Mic92/sops-nix) on NixOS machines.
Clan can also be configured to be used with other secret store [backends](https://docs.clan.lol/reference/clan-core/vars/#clan.core.vars.settings.secretStore).
Clan utilizes the [sops](https://github.com/getsops/sops) format and integrates with [sops-nix](https://github.com/Mic92/sops-nix) on NixOS machines.
This guide will walk you through:
@@ -40,7 +40,7 @@ Also add your age public key to the repository with 'clan secrets users add YOUR
### Add Your Public Key
```bash
clan secrets users add $USER --age-key <your_public_key>
clan secrets users add $USER <your_public_key>
```
It's best to choose the same username as on your Setup/Admin Machine that you use to control the deployment with.
@@ -54,3 +54,38 @@ sops/
└── key.json
```
If you followed the quickstart tutorial all necessary secrets are initialized at this point.
### Generate Facts and Vars
Typically, this step is handled automatically when a machine is deployed. However, to enable the use of `nix flake check` with your configuration, it must be completed manually beforehand.
Currently, generating all the necessary facts requires two separate commands. This is due to the coexistence of two parallel secret management solutions: the older, stable version (`clan secrets` and `clan facts`) and the newer, experimental version (`clan vars`).
To generate both facts and vars, execute the following commands:
```sh
clan facts generate && clan vars generate
```
### Check Configuration
Validate your configuration by running:
```bash
nix flake check
```
This command helps ensure that your system configuration is correct and free from errors.
!!! Tip
You can integrate this step into your [Continuous Integration](https://en.wikipedia.org/wiki/Continuous_integration) workflow to ensure that only valid Nix configurations are merged into your codebase.
## What's next?
- [Deployment](deploy.md): How to remotely deploy your machine
- Full [Secrets](../manual/secrets.md) guide If you want to know more about how to save and share passwords in your clan

View File

@@ -4,13 +4,13 @@ hide:
- toc
---
# :material-home: Welcome to **Clan**'s documentation
# :material-home: Welcome to **Clan**'s awesome documentation
[Getting Started](./getting-started/index.md){ .md-button }
## Tutorials
## What's inside
**Learning-oriented adventures with a hands-on experience.**
This documentation is structured into the following sections
<div class="grid cards" markdown>
@@ -19,69 +19,25 @@ hide:
---
Create your own clan and get everything
running in minutes
running in a couple of minutes.
[:octicons-arrow-right-24: Getting started](./getting-started/index.md)
- :fontawesome-solid-user-group:{ .lg .middle } __Authoring Modules__
- :material-sign-direction:{ .lg .middle } __Guides__
---
Create clanModules that can be reused by the community.
[:octicons-arrow-right-24: Authoring clanModules](./clanmodules/index.md)
</div>
## :material-book: Guides
**How-to Guides for achieving a certain goal or solving a specific issue.**
<div class="grid cards" markdown>
- [Autoincludes](./manual/adding-machines.md)
---
Learn how Clan automatically includes machines and Nix files.
- [Vars Backend](./manual/vars-backend.md)
---
Learn how to manage secrets with facts.
- [Inventory](./manual/inventory.md)
---
Clan's declaration format for running **services** on one or multiple **machines**.
- [Flake-parts](./manual/flake-parts.md)
---
Use clan with [https://flake.parts/]()
- [Contribute](./contributing/contribute.md)
---
Discover how to set up a development environment to contribute to Clan!
</div>
## API Reference
**Reference API Documentation**
<div class="grid cards" markdown>
- [Reference Overview](./reference/index.md)
---
Learn how to interface with Clan programmatically
Instructions and explanations for practical Implementations ordered by Topic.
[:octicons-arrow-right-24: Guides](./manual/index.md)
- :material-api:{ .lg .middle } __Reference__
---
Detailed Specification of Functions and APIs.
[:octicons-arrow-right-24: Reference](./reference/index.md)
</div>

View File

@@ -1,3 +1,4 @@
# Adding Machines
Clan has two general methods of adding machines:
@@ -17,8 +18,6 @@ Every folder `machines/{machineName}` will be registered automatically as a Clan
- [x] `machines/{machineName}/facter.json` Automatically configured, for further information see [nixos-facter](https://clan.lol/blog/nixos-facter/)
- [x] `machines/{machineName}/disko.nix` Automatically loaded, for further information see the [disko docs](https://github.com/nix-community/disko/blob/master/docs/quickstart.md).
## Manual declaration
Machines can also be added manually under `buildClan`, `clan.*` in flake-parts or via [`inventory`](../manual/inventory.md).

View File

@@ -1,3 +1,5 @@
# Disk Templates
!!! Danger ":fontawesome-solid-road-barrier: Under Construction :fontawesome-solid-road-barrier:"
Currently under construction use with caution

View File

@@ -1,3 +1,4 @@
# Clan with `flake-parts`
Clan supports integration with [flake.parts](https://flake.parts/) a tool which allows composing nixos modules in a modular way.

66
docs/site/manual/index.md Normal file
View File

@@ -0,0 +1,66 @@
# :material-book: Guides
Instructions and explanations for practical Implementations ordered by Topics.
## Tutorials
**Learning-oriented adventures with a hands-on experience.**
<div class="grid cards" markdown>
- :material-clock-fast:{ .lg .middle } __Set up in 15 minutes__
---
Create your own clan and get everything
running in minutes
[:octicons-arrow-right-24: Getting started](../getting-started/index.md)
- :fontawesome-solid-user-group:{ .lg .middle } __Authoring Modules__
---
Create clanModules that can be reused by the community.
[:octicons-arrow-right-24: Authoring clanModules](../clanmodules/index.md)
</div>
## Guides
**How-to Guides for achieving a certain goal or solving a specific issue.**
<div class="grid cards" markdown>
- [Machines](./adding-machines.md)
---
Learn how Clan automatically includes machines and Nix files.
- [Secrets](./secrets.md)
---
Learn how to manage secrets.
- [Inventory](./inventory.md)
---
Clan's declaration format for running **services** on one or multiple **machines**.
- [Flake-parts](./flake-parts.md)
---
Use clan with [https://flake-parts.dev]()
- [Contribute](./contribute.md)
---
Discover how to set up a development environment to contribute to Clan!
</div>

View File

@@ -1,15 +1,10 @@
# Inventory
`Inventory` is an abstract service layer for consistently configuring distributed services across machine boundaries.
## Concept
See [Inventory API Documentation](../reference/nix-api/inventory.md)
Its concept is slightly different to what NixOS veterans might be used to. The inventory is a service definition on a higher level, not a machine configuration. This allows you to define a consistent and coherent service.
The inventory logic will automatically derive the modules and configurations to enable on each machine in your `clan` based on its `role`. This makes it super easy to setup distributed `services` such as Backups, Networking, traditional cloud services, or peer-to-peer based applications.
The following tutorial will walk through setting up a Backup service where the terms `Service` and `Role` will become more clear.
See also: [Inventory API Documentation](../reference/nix-api/inventory.md)
This guide will walk you through setting up a backup service, where the inventory becomes useful.
!!! example "Experimental status"
The inventory implementation is not considered stable yet.
@@ -23,13 +18,17 @@ See also: [Inventory API Documentation](../reference/nix-api/inventory.md)
## Services
The inventory defines `services`. Membership of `machines` is defined via `roles` exclusively.
The inventory defines `services`. Membership of `machines` is defined via roles exclusively.
See each [modules documentation](../reference/clanModules/index.md) for its available roles.
See the each [module documentation](../reference/clanModules/index.md) for available roles.
!!! Note
It is possible to use any [clanModule](../reference/clanModules/index.md) in the inventory and add machines via
`roles.default.*`
### Adding services to machines
A service can be added to one or multiple machines via `Roles`. clan's `Role` interface provide sane defaults for a module this allows the module author to reduce the configuration overhead to a minimum.
A module can be added to one or multiple machines via `Roles`. clan's `Role` interface provide sane defaults for a module this allows the module author to reduce the configuration overhead to a minimum.
Each service can still be customized and configured according to the modules options.

View File

@@ -1,171 +0,0 @@
# Migrate existing NixOS configurations
This guide will help you migrate your existing Nix configurations into Clan.
!!! Warning
Migrating instead of starting new can be trickier and might lead to bugs or
unexpected issues. We recommend following the [Getting Started](../getting-started/index.md) guide first. Once you have a working setup, you can easily transfer your Nix configurations over.
## Back up your existing configuration!
Before you start, it is strongly recommended to back up your existing
configuration in any form you see fit. If you use version control to manage
your configuration changes, it is also a good idea to follow the migration
guide in a separte branch until everything works as expected.
## Starting Point
We assume you are already using NixOS flakes to manage your configuration. If
not, migrate to a flake-based setup following the official [NixOS
documentation](https://nix.dev/manual/nix/2.25/command-ref/new-cli/nix3-flake.html).
The snippet below shows a common Nix flake. For this example we will assume you
have have two hosts: **berlin** and **cologne**.
```nix
{
inputs.nixpkgs.url = "github:nixos/nixpkgs/nixos-unstable";
outputs = { self, nixpkgs, ... }: {
nixosConfigurations = {
berlin = nixpkgs.lib.nixosSystem {
system = "x86_64-linux";
modules = [./machines/berlin/configuration.nix];
};
cologne = nixpkgs.lib.nixosSystem {
system = "x86_64-linux";
modules = [./machines/cologne/configuration.nix];
};
};
};
}
```
## Add clan-core Input
Add `clan-core` to your flake as input. It will provide everything we need to
manage your configurations with clan.
```nix
inputs.clan-core = {
url = "git+https://git.clan.lol/clan/clan-core";
# Don't do this if your machines are on nixpkgs stable.
inputs.nixpkgs.follows = "nixpkgs";
};
```
## Update Outputs
To be able to access our newly added dependency, it has to be added to the
output parameters.
```diff
- outputs = { self, nixpkgs, ... }:
+ outputs = { self, nixpkgs, clan-core }:
```
The existing `nixosConfigurations` output of your flake will be created by
clan. In addition, a new `clanInternals` output will be added. Since both of
these are provided by the output of `lib.buildClan`, a common syntax is to use a
`let...in` statement to create your clan and access it's parameters in the flake
outputs.
For the provide flake example, your flake should now look like this:
```nix
{
inputs.nixpkgs.url = "github:nixos/nixpkgs/nixos-unstable";
outputs = { self, nixpkgs, ... }:
let
clan = clan-core.lib.buildClan {
self = self; # this needs to point at the repository root
specialArgs = {};
inventory.meta.name = "NEEDS_TO_BE_UNIQUE"; # TODO: Changeme
machines = {
berlin = {
nixpkgs.hostPlatform = "x86_64-linux";
imports = [ ./machines/berlin/configuration.nix ];
};
cologne = {
nixpkgs.hostPlatform = "x86_64-linux";
imports = [ ./machines/cologne/configuration.nix ];
};
};
};
in
{
nixosConfigurations = clan.nixosConfigurations;
inherit (clan) clanInternals;
};
}
```
Et voilà! Your existing hosts are now part of a clan. Existing Nix tooling
should still work as normal. To check that you didn't make any errors, run `nix
flake show` and verify both hosts are still recognized as if nothing had
changed. You should also see the new `clanInternals` output.
```
nix flake show
git+file:///my-nixos-config
├───clanInternals: unknown
└───nixosConfigurations
├───berlin: NixOS configuration
└───cologne: NixOS configuration
```
Of course you can also rebuild your configuration using `nixos-rebuild` and
veryify everything still works.
## Add Clan CLI devShell
At this point Clan is set up, but you can't use the CLI yet. To do so, it is
recommended to expose it via a `devShell` in your flake. It is also possible to
install it any other way you would install a package in Nix, but using a
developtment shell ensures the CLI's version will always be in sync with your
configuration.
A minimal example is provided below, add it to your flake outputs.
```nix
devShells."x86_64-linux".default = nixpkgs.legacyPackages."x86_64-linux".mkShell {
packages = [ clan-core.packages."x86_64-linux".clan-cli ];
};
```
To use the CLI, execute `nix develop` in the directory of your flake. The
resulting shell, provides you with the `clan` CLI tool. Since you will be using
it every time you interact with Clan, it is recommended to set up
[direnv](https://direnv.net/).
Verify everything works as expected by running `clan machines list`.
```
nix develop
[user@host:~/my-nixos-config]$ clan machines list
berlin
cologne
```
## Specify Targets
Clan needs to know where it can reach your hosts. For each of your hosts, set
`clan.core.networking.targetHost` to its adress or hostname.
```nix
# machines/berlin/configuration.nix
{
clan.core.networking.targetHost = "123.4.56.78";
}
```
## Next Steps
You are now fully set up. Use the CLI to manage your hosts or proceed to
configure further services. At this point you should be able to run commands
like `clan machines update berlin` to deploy a host.

View File

@@ -1,20 +0,0 @@
This guide will help you navigate the codebase and locate key files:
```bash
$ tree -L 1
.
├── checks # Contains NixOS and VM tests
├── clanModules # Clan modules available for end-user import
├── docs # Source files for docs.clan.lol, generated with MkDocs
├── flakeModules
├── lib # User-exposed Clan Nix functions like buildClan and inventory
├── machines
├── nixosModules # Internal Clan Nix functions, e.g., clanCore
├── pkgs # Clan applications and packaged dependencies
├── formatter.nix # Configuration for nix-treefmt, manages `nix fmt`
├── scripts
├── sops
├── templates # Template files for creating a new Clan
└── vars
```

View File

@@ -1,151 +0,0 @@
!!! Note
Vars is the new secret backend that will soon replace the Facts backend
Defining a linux user's password via the nixos configuration previously required running `mkpasswd ...` and then copying the hash back into the nix configuration.
In this example, we will guide you through automating that interaction using clan `vars`.
For a more general explanation of what clan vars are and how it works, see the intro of the [Reference Documentation for vars](https://docs.clan.lol/reference/clan-core/vars/)
This guide assumes
- clan is set up already (see [Getting Started](../getting-started/index.md))
- a machine has been added to the clan (see [Adding Machines](./adding-machines.md))
This section will walk you through the following steps:
1. declare a `generator` in the machine's nixos configuration
2. inspect the status via the clan cli
3. generate the vars
4. observer the changes
5. update the machine
6. share the root password between machines
7. change the password
## Declare the generator
In this example, a `vars` `generator` is used to:
- prompt the user for the password
- run the required `mkpasswd` command to generate the hash
- store the hash in a file
- expose the file path to the nixos configuration
Create a new nix file `root-password.nix` with the following content and import it into your `configuration.nix`
```nix
{config, pkgs, ...}: {
clan.core.vars.generators.root-password = {
# prompt the user for a password
# (`password-input` being an arbitrary name)
prompts.password-input.description = "the root user's password";
prompts.password-input.type = "hidden";
# don't store the prompted password itself
prompts.password-input.persist = false;
# define an output file for storing the hash
files.password-hash.secret = false;
# define the logic for generating the hash
script = ''
cat $prompts/password-input | mkpasswd -m sha-512 > $out/password-hash
'';
# the tools required by the script
runtimeInputs = [ pkgs.mkpasswd ];
};
# ensure users are immutable (otherwise the following config might be ignored)
users.mutableUsers = false;
# set the root password to the file containing the hash
users.users.root.hashedPasswordFile =
# clan will make sure, this path exists
config.clan.core.vars.generators.root-password.files.password-hash.path;
}
```
## Inspect the status
Executing `clan vars list`, you should see the following:
```shellSession
$ clan vars list my_machine
root-password/password-hash: <not set>
```
...indicating that the value `password-hash` for the generator `root-password` is not set yet.
## Generate the values
This step is not strictly necessary, as deploying the machine via `clan machines update` would trigger the generator as well.
To run the generator, execute `clan vars generate` for your machine
```shellSession
$ clan vars generate my_machine
Enter the value for root-password/password-input (hidden):
```
After entering the value, the updated status is reported:
```shellSession
Updated var root-password/password-hash
old: <not set>
new: $6$RMats/YMeypFtcYX$DUi...
```
## Observe the changes
With the last step, a new file was created in your repository:
`vars/per-machine/my-machine/root-password/password-hash/value`
If the repository is a git repository, a commit was created automatically:
```shellSession
$ git log -n1
commit ... (HEAD -> master)
Author: ...
Date: ...
Update vars via generator root-password for machine grmpf-nix
```
## Update the machine
```shell
clan machines update my_machine
```
## Share root password between machines
If we just imported the `root-password.nix` from above into more machines, clan would ask for a new password for each additional machine.
If the root password instead should only be entered once and shared across all machines, the generator defined above needs to be declared as `shared`, by adding `share = true` to it:
```nix
{config, pkgs, ...}: {
clan.vars.generators.root-password = {
share = true;
# ...
}
}
```
Importing that shared generator into each machine, will ensure that the password is only asked once the first machine gets updated and then re-used for all subsequent machines.
## Change the root password
Changing the password can be done via this command.
Replace `my-machine` with your machine.
If the password is shared, just pick any machine that has the generator declared.
```shellSession
$ clan vars generate my-machine --generator root-password --regenerate
...
Enter the value for root-password/password-input (hidden):
Input received. Processing...
...
Updated var root-password/password-hash
old: $6$tb27m6EOdff.X9TM$19N...
new: $6$OyoQtDVzeemgh8EQ$zRK...
```
## Further Reading
- [Reference Documentation for `clan.core.vars` nixos options](../reference/clan-core/vars.md)
- [Reference Documentation for the `clan vars` cli command](../reference/cli/vars.md)

View File

@@ -0,0 +1 @@
/nix/store/8y5h98wk5p94mv1wyb2c4gkrr7bswd19-asciinema-player.css

View File

@@ -0,0 +1 @@
/nix/store/w0i3f9qzn9n6jmfnfgiw5wnab2f9ssdw-asciinema-player.min.js

View File

@@ -15,8 +15,3 @@
.md-header img {
filter: invert(100%) brightness(100%);
}
.md-nav__title,
.md-nav__item.md-nav__item--section > label > span {
color: var(--md-typeset-a-color);
}

47
flake.lock generated
View File

@@ -7,11 +7,11 @@
]
},
"locked": {
"lastModified": 1741786315,
"narHash": "sha256-VT65AE2syHVj6v/DGB496bqBnu1PXrrzwlw07/Zpllc=",
"lastModified": 1735468753,
"narHash": "sha256-2dt1nOe9zf9pDkf5Kn7FUFyPRo581s0n90jxYXJ94l0=",
"owner": "nix-community",
"repo": "disko",
"rev": "0d8c6ad4a43906d14abd5c60e0ffe7b587b213de",
"rev": "84a5b93637cc16cbfcc61b6e1684d626df61eb21",
"type": "github"
},
"original": {
@@ -27,11 +27,11 @@
]
},
"locked": {
"lastModified": 1741352980,
"narHash": "sha256-+u2UunDA4Cl5Fci3m7S643HzKmIDAe+fiXrLqYsR2fs=",
"lastModified": 1735774679,
"narHash": "sha256-soePLBazJk0qQdDVhdbM98vYdssfs3WFedcq+raipRI=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "f4330d22f1c5d2ba72d3d22df5597d123fdb60a9",
"rev": "f2f7418ce0ab4a5309a4596161d154cfc877af66",
"type": "github"
},
"original": {
@@ -42,11 +42,11 @@
},
"nixos-facter-modules": {
"locked": {
"lastModified": 1738752252,
"narHash": "sha256-/nA3tDdp/2g0FBy8966ppC2WDoyXtUWaHkZWL+N3ZKc=",
"lastModified": 1734596637,
"narHash": "sha256-MRqwVAe3gsb88u4ME1UidmZFVCx+FEnoob0zkpO9DMY=",
"owner": "numtide",
"repo": "nixos-facter-modules",
"rev": "60f8b8f3f99667de6a493a44375e5506bf0c48b1",
"rev": "536472754982bf03079b4b4e0261838a760587c0",
"type": "github"
},
"original": {
@@ -57,15 +57,18 @@
},
"nixpkgs": {
"locked": {
"lastModified": 315532800,
"narHash": "sha256-+bxPXRQiQ0SsjR8syBcc8X+S8WGllNM+Qreu5Td7gnI=",
"rev": "1750f3c1c89488e2ffdd47cab9d05454dddfb734",
"type": "tarball",
"url": "https://releases.nixos.org/nixpkgs/nixpkgs-25.05pre773343.1750f3c1c894/nixexprs.tar.xz"
"lastModified": 1734435836,
"narHash": "sha256-kMBQ5PRiFLagltK0sH+08aiNt3zGERC2297iB6vrvlU=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "4989a246d7a390a859852baddb1013f825435cee",
"type": "github"
},
"original": {
"type": "tarball",
"url": "https://nixos.org/channels/nixpkgs-unstable/nixexprs.tar.xz"
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
@@ -86,11 +89,11 @@
]
},
"locked": {
"lastModified": 1742700801,
"narHash": "sha256-ZGlpUDsuBdeZeTNgoMv+aw0ByXT2J3wkYw9kJwkAS4M=",
"lastModified": 1736064798,
"narHash": "sha256-xJRN0FmX9QJ6+w8eIIIxzBU1AyQcLKJ1M/Gp6lnSD20=",
"owner": "Mic92",
"repo": "sops-nix",
"rev": "67566fe68a8bed2a7b1175fdfb0697ed22ae8852",
"rev": "5dc08f9cc77f03b43aacffdfbc8316807773c930",
"type": "github"
},
"original": {
@@ -121,11 +124,11 @@
]
},
"locked": {
"lastModified": 1742982148,
"narHash": "sha256-aRA6LSxjlbMI6MmMzi/M5WH/ynd8pK+vACD9za3MKLQ=",
"lastModified": 1736115332,
"narHash": "sha256-FBG9d7e0BTFfxVdw4b5EmNll2Mv7hfRc54hbB4LrKko=",
"owner": "numtide",
"repo": "treefmt-nix",
"rev": "61c88349bf6dff49fa52d7dfc39b21026c2a8881",
"rev": "1788ca5acd4b542b923d4757d4cfe4183cc6a92d",
"type": "github"
},
"original": {

View File

@@ -2,7 +2,7 @@
description = "clan.lol base operating system";
inputs = {
nixpkgs.url = "https://nixos.org/channels/nixpkgs-unstable/nixexprs.tar.xz";
nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
flake-parts.url = "github:hercules-ci/flake-parts";
flake-parts.inputs.nixpkgs-lib.follows = "nixpkgs";
@@ -24,50 +24,36 @@
outputs =
inputs@{
flake-parts,
nixpkgs,
self,
systems,
...
}:
let
inherit (nixpkgs.lib)
filter
optional
pathExists
;
in
flake-parts.lib.mkFlake { inherit inputs; } (
{ ... }:
{
clan = {
meta.name = "clan-core";
directory = self;
};
systems = import systems;
imports =
# only importing existing paths allows to minimize the flake for test
# by removing files
filter pathExists [
imports = [
./checks/flake-module.nix
./clanModules/flake-module.nix
./devShell.nix
./docs/nix/flake-module.nix
./flakeModules/flake-module.nix
./flakeModules/demo_iso.nix
./lib/filter-clan-core/flake-module.nix
(import ./flakeModules/clan.nix inputs.self)
./devShell.nix
# TODO: migrate this @davHau
# ./docs/flake-module
./docs/nix/flake-module.nix
./lib/flake-module.nix
./nixosModules/clanCore/vars/flake-module.nix
./nixosModules/flake-module.nix
./nixosModules/clanCore/vars/flake-module.nix
./pkgs/flake-module.nix
./templates/flake-module.nix
]
++ [
(if pathExists ./flakeModules/clan.nix then import ./flakeModules/clan.nix inputs.self else { })
]
# Make treefmt-nix optional
# This only works if you set inputs.clan-core.inputs.treefmt-nix.follows
# to a non-empty input that doesn't export a flakeModule
++ optional (pathExists ./formatter.nix && inputs.treefmt-nix ? flakeModule) ./formatter.nix;
] ++ inputs.nixpkgs.lib.optional (inputs.treefmt-nix ? flakeModule) ./formatter.nix;
}
);
}

View File

@@ -27,13 +27,9 @@ in
};
options.flake = flake-parts-lib.mkSubmoduleOptions {
clan = lib.mkOption { type = types.raw; };
clanInternals = lib.mkOption { type = types.raw; };
};
config = {
flake.clan = {
inherit (config.clan.clanInternals) templates;
};
flake.clanInternals = config.clan.clanInternals;
flake.nixosConfigurations = config.clan.nixosConfigurations;
};

View File

@@ -1,101 +0,0 @@
{ self, ... }:
let
pkgs = self.inputs.nixpkgs.legacyPackages.x86_64-linux;
demoModule = {
imports = [
"${self.clanModules.mycelium}/roles/peer.nix"
# TODO do we need this? maybe not
(
{ modulesPath, ... }:
{
imports = [ "${modulesPath}/installer/cd-dvd/iso-image.nix" ];
}
)
];
};
clan_welcome = pkgs.writeShellApplication {
name = "clan_welcome";
runtimeInputs = [
pkgs.gum
pkgs.gitMinimal
pkgs.retry
self.packages.${pkgs.system}.clan-cli
];
text = ''
set -efu
gum confirm '
Welcome to Clan, a NixOS-based operating system for the CLAN project.
This installer can be used to try out clan on your machine, for that reason we setup a cooperative environment to play and hack together :)
' || exit 1
until retry -t 5 ping -c 1 -W 1 git.clan.lol &> /dev/null; do
# TODO make this nicer
nmtui
done
if ! test -e ~/clan-core; then
# git clone https://git.clan.lol/clan/clan-core.git ~/clan-core
cp -rv ${self} clan-core
fi
cd clan-core
clan machines morph demo-template --i-will-be-fired-for-using-this
exit
'';
};
morphModule = {
imports = [
(
{ modulesPath, ... }:
{
imports = [ "${modulesPath}/image/images.nix" ];
}
)
];
image.modules.iso.isoImage.squashfsCompression = "zstd -Xcompression-level 1";
networking.networkmanager.enable = true;
services.getty.autologinUser = "root";
programs.bash.interactiveShellInit = ''
if [[ "$(tty)" =~ /dev/(tty1|hvc0|ttyS0)$ ]]; then
# workaround for https://github.com/NixOS/nixpkgs/issues/219239
systemctl restart systemd-vconsole-setup.service
reset
${clan_welcome}/bin/clan_welcome
fi
'';
};
in
{
clan.templates.machine.demo-template = {
description = "Demo machine for the CLAN project";
# path = pkgs.runCommand "demo-template" {} ''
# mkdir -p $out
# echo '{ self, ... }: { imports = [ self.nixosModules.demoModule ]; }' > $out/configuration.nix
# '';
path = ./demo_template;
};
flake.nixosModules = { inherit morphModule demoModule; };
perSystem =
{ system, lib, ... }:
{
packages =
lib.mkIf
(lib.any (x: x == system) [
"x86_64-linux"
"aarch64-linux"
])
{
demo-iso =
(self.inputs.nixpkgs.lib.nixosSystem {
modules = [
{ nixpkgs.hostPlatform = system; }
morphModule
];
}).config.system.build.images.iso;
};
};
}

View File

@@ -1,38 +0,0 @@
{ pkgs, config, ... }:
{
fileSystems."/".device = "nodev";
boot.loader.grub.device = "nodev";
clan.core.vars.settings.secretStore = "fs";
clan.core.vars.generators.mycelium = {
files."key" = { };
files."ip".secret = false;
files."pubkey".secret = false;
runtimeInputs = [
pkgs.mycelium
pkgs.coreutils
pkgs.jq
];
script = ''
timeout 5 mycelium --key-file "$out"/key || :
mycelium inspect --key-file "$out"/key --json | jq -r .publicKey > "$out"/pubkey
mycelium inspect --key-file "$out"/key --json | jq -r .address > "$out"/ip
'';
};
services.mycelium = {
enable = true;
addHostedPublicNodes = true;
openFirewall = true;
keyFile = config.clan.core.vars.generators.mycelium.files.key.path;
};
services.getty.autologinUser = "root";
programs.bash.interactiveShellInit = ''
if [[ "$(tty)" =~ /dev/(tty1|hvc0|ttyS0)$ ]]; then
# workaround for https://github.com/NixOS/nixpkgs/issues/219239
systemctl restart systemd-vconsole-setup.service
reset
your mycelium IP is: $(cat /var/lib/mycelium/ip)
fi
'';
}

View File

@@ -92,12 +92,13 @@
treefmt.programs.mypy.directories =
{
"clan-cli" = {
extraPythonPackages = self'.packages.clan-cli.testDependencies;
directory = "pkgs/clan-cli";
extraPythonPackages = (self'.packages.clan-cli.devshellPyDeps pkgs.python3Packages);
};
"clan-app" = {
directory = "pkgs/clan-app";
extraPythonPackages = (self'.packages.clan-app.devshellPyDeps pkgs.python3Packages);
extraPythonPackages =
(self'.packages.clan-app.externalTestDeps or [ ]) ++ self'.packages.clan-cli.testDependencies;
extraPythonPaths = [ "../clan-cli" ];
};
}
@@ -106,9 +107,8 @@
{
"clan-vm-manager" = {
directory = "pkgs/clan-vm-manager";
extraPythonPackages = self'.packages.clan-vm-manager.externalTestDeps ++ [
(pkgs.python3.withPackages (ps: self'.packages.clan-cli.devshellPyDeps ps))
];
extraPythonPackages =
self'.packages.clan-vm-manager.externalTestDeps ++ self'.packages.clan-cli.testDependencies;
extraPythonPaths = [ "../clan-cli" ];
};
}

View File

@@ -56,7 +56,7 @@
"machines": {
"test-inventory-machine": {
"config": {
"packages": ["hello"]
"packages": ["zed-editor"]
},
"extraModules": []
}

View File

@@ -1,23 +0,0 @@
{
lib,
self,
...
}:
let
# Returns an attrset with inputs that have the attribute `clanModules`
inputsWithClanModules = lib.filterAttrs (
_name: value: builtins.hasAttr "clanModules" value
) self.inputs;
flattenedClanModules = lib.foldl' (
acc: input:
lib.mkMerge [
acc
input.clanModules
]
) { } (lib.attrValues inputsWithClanModules);
in
{
inventory.modules = flattenedClanModules;
}

View File

@@ -8,8 +8,7 @@
}:
{
## Inputs
self ? lib.warn "Argument: 'self' must be set when using 'buildClan'." null, # Reference to the current flake
# allows to include machine-specific modules i.e. machines.${name} = { ... }
directory, # The directory containing the machines subdirectory # allows to include machine-specific modules i.e. machines.${name} = { ... }
# A map from arch to pkgs, if specified this nixpkgs will be only imported once for each system.
# This improves performance, but all nipxkgs.* options will be ignored.
# deadnix: skip
@@ -24,12 +23,11 @@ let
inherit
lib
nixpkgs
specialArgs
clan-core
self
;
inherit specialArgs;
self = directory;
};
rest = builtins.removeAttrs attrs [ "specialArgs" ];
in
eval {

View File

@@ -7,7 +7,7 @@ let
};
evalDocs = pkgs.nixosOptionsDoc {
options = eval.options;
warningsAreErrors = false;
warningsAreErrors = true;
};
in
{

View File

@@ -2,8 +2,8 @@
lib,
nixpkgs,
clan-core,
self,
specialArgs ? { },
self,
}:
# Returns a function that takes self, which should point to the directory of the flake
module:
@@ -14,8 +14,6 @@ module:
modules = [
./interface.nix
module
{
inherit specialArgs;
}
{ inherit specialArgs; }
];
}).config

View File

@@ -20,11 +20,12 @@ in
jsonDocs = import ./eval-docs.nix {
inherit pkgs lib;
};
in
{
legacyPackages.clan-internals-docs = jsonDocs.optionsJSON;
# Run: nix-unit --extra-experimental-features flakes --flake .#legacyPackages.x86_64-linux.evalTests-build-clan
# Run: nix-unit --extra-experimental-features flakes --flake .#legacyPackages.x86_64-linux.evalTests
legacyPackages.evalTests-build-clan = import ./tests.nix {
inherit lib;
inherit (inputs) nixpkgs;
@@ -38,20 +39,7 @@ in
nix-unit --eval-store "$HOME" \
--extra-experimental-features flakes \
${inputOverrides} \
--flake ${
self.filter {
include = [
"flakeModules"
"inventory.json"
"lib/build-clan"
"lib/default.nix"
"lib/flake-module.nix"
"lib/inventory"
"machines"
"nixosModules"
];
}
}#legacyPackages.${system}.evalTests-build-clan
--flake ${self}#legacyPackages.${system}.evalTests-build-clan
touch $out
'';

View File

@@ -8,26 +8,10 @@ let
in
{
options = {
self = lib.mkOption {
type = types.raw;
default = self;
defaultText = "Reference to the current flake";
description = ''
This is used to import external clan modules.
'';
};
# Required options
directory = lib.mkOption {
type = types.coercedTo lib.types.raw (
v:
if lib.isAttrs v then
lib.warn "It appears you set 'clan.directory = self'. Instead set 'clan.self = self'. 'clan.directory' expects a path" v
else if v == null then
throw "Please set either clan.self or clan.directory"
else
builtins.toString v
) lib.types.path;
default = builtins.toString self;
type = types.path;
default = self;
defaultText = "Root directory of the flake";
description = ''
The directory containing the clan.
@@ -69,15 +53,6 @@ in
```
'';
};
templates = lib.mkOption {
type = types.submodule { imports = [ ./templates/interface.nix ]; };
default = { };
description = ''
Define Clan templates.
'';
};
inventory = lib.mkOption {
type = types.submodule { imports = [ ../inventory/build-inventory/interface.nix ]; };
description = ''
@@ -121,11 +96,11 @@ in
type = types.lazyAttrsOf types.raw;
default = { };
};
# flake.clanInternals
clanInternals = lib.mkOption {
# Hide from documentation. Exposes internals to the cli.
visible = false;
# type = types.raw;
# ClanInternals
type = types.submodule {
options = {
@@ -133,20 +108,15 @@ in
# We don't specify the type here, for better performance.
inventory = lib.mkOption { type = lib.types.raw; };
inventoryValuesPrios = lib.mkOption { type = lib.types.raw; };
# all exported clan templates from this clan
templates = lib.mkOption { type = lib.types.raw; };
# all exported clan modules from this clan
modules = lib.mkOption { type = lib.types.raw; };
# all inventory module schemas
moduleSchemas = lib.mkOption { type = lib.types.raw; };
inventoryFile = lib.mkOption { type = lib.types.raw; };
# The machine 'imports' generated by the inventory per machine
inventoryClass = lib.mkOption { type = lib.types.raw; };
serviceConfigs = lib.mkOption { type = lib.types.raw; };
# clan-core's modules
clanModules = lib.mkOption { type = lib.types.raw; };
source = lib.mkOption { type = lib.types.raw; };
meta = lib.mkOption { type = lib.types.raw; };
lib = lib.mkOption { type = lib.types.raw; };
all-machines-json = lib.mkOption { type = lib.types.raw; };
machines = lib.mkOption { type = lib.types.raw; };
machinesFunc = lib.mkOption { type = lib.types.raw; };

View File

@@ -1,4 +1,3 @@
# NixOS module
{
config,
clan-core,
@@ -42,9 +41,10 @@ let
# map from machine name to service configuration
# { ${machineName} :: Config }
inventoryClass = (
serviceConfigs = (
buildInventory {
inherit inventory directory;
inherit inventory;
inherit directory;
}
);
@@ -76,7 +76,7 @@ let
(machines.${name} or { })
# Inherit the inventory assertions ?
# { inherit (mergedInventory) assertions; }
{ imports = inventoryClass.machines.${name}.machineImports or [ ]; }
{ imports = serviceConfigs.${name} or [ ]; }
(
{
# Settings
@@ -96,6 +96,12 @@ let
networking.hostName = lib.mkDefault name;
# speeds up nix commands by using the nixpkgs from the host system (especially useful in VMs)
nix.registry.nixpkgs.to = lib.mkDefault {
type = "path";
path = lib.mkDefault nixpkgs;
};
# For vars we need to override the system so we run vars
# generators on the machine that runs `clan vars generate`. If a
# users is using the `pkgsForSystem`, we don't set
@@ -161,11 +167,9 @@ let
(builtins.fromJSON (builtins.readFile inventoryFile))
else
{ };
in
{
imports = [
./auto-imports.nix
# Merge the inventory file
{
inventory = _: {
@@ -177,7 +181,7 @@ in
{
inventory.machines = lib.optionalAttrs (builtins.pathExists "${directory}/machines") (
builtins.mapAttrs (_n: _v: { }) (
lib.filterAttrs (_: t: t == "directory") (builtins.readDir "${directory}/machines")
(lib.filterAttrs (_: t: t == "directory") (builtins.readDir "${directory}/machines"))
)
);
}
@@ -185,9 +189,7 @@ in
inventory.machines = lib.mapAttrs (_n: _: { }) config.machines;
}
# Merge the meta attributes from the buildClan function
{
inventory.modules = clan-core.clanModules;
}
{ inventory.modules = clan-core.clanModules; }
# config.inventory.meta <- config.meta
{ inventory.meta = config.meta; }
# Set default for computed tags
@@ -198,7 +200,7 @@ in
clanInternals = {
moduleSchemas = clan-core.lib.modules.getModulesSchema config.inventory.modules;
inherit inventoryClass;
inherit serviceConfigs;
inherit (clan-core) clanModules;
inherit inventoryFile;
inventoryValuesPrios =
@@ -206,14 +208,8 @@ in
builtins.removeAttrs (clan-core.lib.values.getPrios { options = inventory.options; })
# tags are freeformType which is not supported yet.
[ "tags" ];
modules = config.modules;
templates = config.templates;
inventory = config.inventory;
meta = config.inventory.meta;
lib = {
inherit (clan-core.lib) select;
};
source = "${clan-core}";

View File

@@ -1,57 +0,0 @@
{
lib,
...
}:
let
inherit (lib) types;
templateType = types.submodule (
{ name, ... }:
{
options.description = lib.mkOption {
type = types.str;
default = name;
description = ''
The name of the template.
'';
};
options.path = lib.mkOption {
type = types.path;
description = ''
Holds the path to the clan template.
'';
};
}
);
in
{
options = {
# clan.templates.clan
clan = lib.mkOption {
type = types.attrsOf templateType;
default = { };
description = ''
Holds the different clan templates.
'';
};
# clan.templates.disko
disko = lib.mkOption {
type = types.attrsOf templateType;
default = { };
description = ''
Holds different disko templates.
'';
};
# clan.templates.machine
machine = lib.mkOption {
type = types.attrsOf templateType;
default = { };
description = ''
Holds the different machine templates.
'';
};
};
}

View File

@@ -10,52 +10,24 @@ let
inherit lib nixpkgs clan-core;
self = ./.;
};
# Shallowly force all attribute values to be evaluated.
shallowForceAllAttributes = lib.foldlAttrs (
_acc: _name: value:
lib.seq value true
) true;
in
#######
{
test_missing_self =
let
config = buildClan {
meta.name = "test";
imports = [ ./module.nix ];
};
in
{
expr = shallowForceAllAttributes config;
expectedError = {
type = "ThrownError";
msg = "A definition for option `directory' is not of type `absolute path*";
};
};
test_only_required =
let
config = evalClan {
self = {
inputs = { };
outPath = ./.;
};
meta.name = "test";
imports = [ ./module.nix ];
};
in
{
expr = shallowForceAllAttributes config;
expr = config.inventory ? meta;
expected = true;
};
test_all_simple =
let
config = evalClan {
self = {
inputs = { };
};
directory = ./.;
machines = { };
inventory = {
@@ -71,10 +43,6 @@ in
test_outputs_clanInternals =
let
config = evalClan {
self = {
inputs = { };
};
directory = ./.;
imports = [
# What the user needs to specif
{
@@ -100,9 +68,6 @@ in
test_fn_simple =
let
result = buildClan {
self = {
inputs = { };
};
directory = ./.;
meta.name = "test";
};
@@ -119,9 +84,6 @@ in
test_fn_extensiv_meta =
let
result = buildClan {
self = {
inputs = { };
};
directory = ./.;
meta.name = "test";
meta.description = "test";
@@ -142,9 +104,6 @@ in
test_fn_clan_core =
let
result = buildClan {
self = {
inputs = { };
};
directory = ../../.;
meta.name = "test-clan-core";
};
@@ -160,9 +119,6 @@ in
test_buildClan_all_machines =
let
result = buildClan {
self = {
inputs = { };
};
directory = ./.;
meta.name = "test";
inventory.machines.machine1.meta.name = "machine1";
@@ -182,9 +138,6 @@ in
test_buildClan_specialArgs =
let
result = buildClan {
self = {
inputs = { };
};
directory = ./.;
meta.name = "test";
specialArgs.foo = "dream2nix";

View File

@@ -21,5 +21,4 @@ in
inherit lib;
self = clan-core;
};
select = import ./select.nix;
}

View File

@@ -6,14 +6,11 @@
let
baseModule = {
imports = (import (pkgs.path + "/nixos/modules/module-list.nix")) ++ [
(
{ config, ... }:
{
nixpkgs.pkgs = pkgs;
clan.core.name = "dummy";
system.stateVersion = config.system.nixos.release;
system.stateVersion = lib.version;
}
)
];
};

Some files were not shown because too many files have changed in this diff Show More