clan tests: convert makeTestClan to a module

Let's not put yet another wrapper around runTest, instead expose our logic as a module that can be imported into any nixos-vm-test
This commit is contained in:
DavHau
2025-06-03 18:59:34 +07:00
parent 9cb6382cec
commit 1add6a6314
16 changed files with 736 additions and 719 deletions

View File

@@ -1,61 +1,62 @@
{ {
pkgs, pkgs,
self, nixosLib,
clanLib, clan-core,
... ...
}: }:
let let
public-key = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAII6zj7ubTg6z/aDwRNwvM/WlQdUocMprQ8E92NWxl6t+ test@test"; public-key = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAII6zj7ubTg6z/aDwRNwvM/WlQdUocMprQ8E92NWxl6t+ test@test";
in in
nixosLib.runTest (
{ ... }:
{
imports = [
clan-core.modules.nixosVmTest.clanTest
];
clanLib.test.makeTestClan { hostPkgs = pkgs;
inherit pkgs self;
nixosTest = (
{ ... }:
{ name = "admin";
name = "admin";
clan = { clan = {
directory = ./.; directory = ./.;
modules."@clan/admin" = ../../clanServices/admin/default.nix; modules."@clan/admin" = ../../clanServices/admin/default.nix;
inventory = { inventory = {
machines.client = { }; machines.client = { };
machines.server = { }; machines.server = { };
instances = { instances = {
ssh-test-one = { ssh-test-one = {
module.name = "@clan/admin"; module.name = "@clan/admin";
roles.default.machines."server".settings = { roles.default.machines."server".settings = {
allowedKeys.testkey = public-key; allowedKeys.testkey = public-key;
};
}; };
}; };
}; };
}; };
};
nodes = { nodes = {
client.environment.etc.private-test-key.source = ./private-test-key; client.environment.etc.private-test-key.source = ./private-test-key;
server = { server = {
services.openssh.enable = true; services.openssh.enable = true;
};
}; };
};
testScript = '' testScript = ''
start_all() start_all()
machines = [client, server] machines = [client, server]
for m in machines: for m in machines:
m.systemctl("start network-online.target") m.systemctl("start network-online.target")
for m in machines: for m in machines:
m.wait_for_unit("network-online.target") m.wait_for_unit("network-online.target")
client.succeed(f"ssh -F /dev/null -i /etc/private-test-key -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o BatchMode=yes root@server true &>/dev/null") client.succeed(f"ssh -F /dev/null -i /etc/private-test-key -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o BatchMode=yes root@server true &>/dev/null")
''; '';
} }
); )
}

View File

@@ -1,118 +1,118 @@
{ {
pkgs, pkgs,
self, nixosLib,
clanLib, clan-core,
... ...
}: }:
nixosLib.runTest (
{ ... }:
{
imports = [
clan-core.modules.nixosVmTest.clanTest
];
clanLib.test.makeTestClan { hostPkgs = pkgs;
inherit pkgs self;
useContainers = true;
nixosTest = ( name = "borgbackup";
{ ... }:
{ clan = {
name = "borgbackup"; directory = ./.;
test.useContainers = true;
modules."@clan/borgbackup" = ../../clanServices/borgbackup/default.nix;
inventory = {
clan = { machines.clientone = { };
directory = ./.; machines.serverone = { };
modules."@clan/borgbackup" = ../../clanServices/borgbackup/default.nix;
inventory = {
machines.clientone = { }; instances = {
machines.serverone = { }; borgone = {
instances = { module.name = "@clan/borgbackup";
borgone = {
module.name = "@clan/borgbackup"; roles.client.machines."clientone" = { };
roles.server.machines."serverone".settings.directory = "/tmp/borg-test";
roles.client.machines."clientone" = { };
roles.server.machines."serverone".settings.directory = "/tmp/borg-test";
};
}; };
}; };
}; };
};
nodes = { nodes = {
serverone = {
services.openssh.enable = true;
# Needed so PAM doesn't see the user as locked
users.users.borg.password = "borg";
};
clientone =
{ config, pkgs, ... }:
let
dependencies = [
clan-core
pkgs.stdenv.drvPath
] ++ builtins.map (i: i.outPath) (builtins.attrValues clan-core.inputs);
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
in
{
serverone = {
services.openssh.enable = true; services.openssh.enable = true;
# Needed so PAM doesn't see the user as locked
users.users.borg.password = "borg"; users.users.root.openssh.authorizedKeys.keyFiles = [ ../assets/ssh/pubkey ];
clan.core.networking.targetHost = config.networking.hostName;
environment.systemPackages = [ clan-core.packages.${pkgs.system}.clan-cli ];
environment.etc.install-closure.source = "${closureInfo}/store-paths";
nix.settings = {
substituters = pkgs.lib.mkForce [ ];
hashed-mirrors = null;
connect-timeout = pkgs.lib.mkForce 3;
flake-registry = pkgs.writeText "flake-registry" ''{"flakes":[],"version":2}'';
};
system.extraDependencies = dependencies;
clan.core.state.test-backups.folders = [ "/var/test-backups" ];
}; };
clientone = };
{ config, pkgs, ... }:
let
dependencies = [
self
pkgs.stdenv.drvPath
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
in testScript = ''
{ import json
start_all()
services.openssh.enable = true; machines = [clientone, serverone]
users.users.root.openssh.authorizedKeys.keyFiles = [ ../assets/ssh/pubkey ]; for m in machines:
m.systemctl("start network-online.target")
clan.core.networking.targetHost = config.networking.hostName; for m in machines:
m.wait_for_unit("network-online.target")
environment.systemPackages = [ self.packages.${pkgs.system}.clan-cli ]; # dummy data
clientone.succeed("mkdir -p /var/test-backups /var/test-service")
clientone.succeed("echo testing > /var/test-backups/somefile")
environment.etc.install-closure.source = "${closureInfo}/store-paths"; clientone.succeed("${pkgs.coreutils}/bin/install -Dm 600 ${../assets/ssh/privkey} /root/.ssh/id_ed25519")
nix.settings = { clientone.succeed("${pkgs.coreutils}/bin/touch /root/.ssh/known_hosts")
substituters = pkgs.lib.mkForce [ ]; clientone.wait_until_succeeds("timeout 2 ssh -o StrictHostKeyChecking=accept-new localhost hostname")
hashed-mirrors = null; clientone.wait_until_succeeds("timeout 2 ssh -o StrictHostKeyChecking=accept-new $(hostname) hostname")
connect-timeout = pkgs.lib.mkForce 3;
flake-registry = pkgs.writeText "flake-registry" ''{"flakes":[],"version":2}'';
};
system.extraDependencies = dependencies;
clan.core.state.test-backups.folders = [ "/var/test-backups" ]; # create
}; clientone.succeed("borgbackup-create >&2")
clientone.wait_until_succeeds("! systemctl is-active borgbackup-job-serverone >&2")
}; # list
backup_id = json.loads(clientone.succeed("borg-job-serverone list --json"))["archives"][0]["archive"]
out = clientone.succeed("borgbackup-list").strip()
print(out)
assert backup_id in out, f"backup {backup_id} not found in {out}"
testScript = '' # borgbackup restore
import json clientone.succeed("rm -f /var/test-backups/somefile")
start_all() clientone.succeed(f"NAME='serverone::borg@serverone:.::{backup_id}' borgbackup-restore >&2")
assert clientone.succeed("cat /var/test-backups/somefile").strip() == "testing", "restore failed"
machines = [clientone, serverone] '';
}
for m in machines: )
m.systemctl("start network-online.target")
for m in machines:
m.wait_for_unit("network-online.target")
# dummy data
clientone.succeed("mkdir -p /var/test-backups /var/test-service")
clientone.succeed("echo testing > /var/test-backups/somefile")
clientone.succeed("${pkgs.coreutils}/bin/install -Dm 600 ${../assets/ssh/privkey} /root/.ssh/id_ed25519")
clientone.succeed("${pkgs.coreutils}/bin/touch /root/.ssh/known_hosts")
clientone.wait_until_succeeds("timeout 2 ssh -o StrictHostKeyChecking=accept-new localhost hostname")
clientone.wait_until_succeeds("timeout 2 ssh -o StrictHostKeyChecking=accept-new $(hostname) hostname")
# create
clientone.succeed("borgbackup-create >&2")
clientone.wait_until_succeeds("! systemctl is-active borgbackup-job-serverone >&2")
# list
backup_id = json.loads(clientone.succeed("borg-job-serverone list --json"))["archives"][0]["archive"]
out = clientone.succeed("borgbackup-list").strip()
print(out)
assert backup_id in out, f"backup {backup_id} not found in {out}"
# borgbackup restore
clientone.succeed("rm -f /var/test-backups/somefile")
clientone.succeed(f"NAME='serverone::borg@serverone:.::{backup_id}' borgbackup-restore >&2")
assert clientone.succeed("cat /var/test-backups/somefile").strip() == "testing", "restore failed"
'';
}
);
}

View File

@@ -1,86 +1,89 @@
{ {
pkgs, pkgs,
self, nixosLib,
clanLib, clan-core,
lib,
... ...
}: }:
clanLib.test.makeTestClan { let
inherit pkgs self; machines = [
nixosTest = ( "admin"
{ lib, ... }: "peer"
let "signer"
machines = [ ];
"admin" in
"peer" nixosLib.runTest (
"signer" { ... }:
]; {
in imports = [
{ clan-core.modules.nixosVmTest.clanTest
name = "data-mesher"; ];
clan = { hostPkgs = pkgs;
directory = ./.; name = "data-mesher";
inventory = {
machines = lib.genAttrs machines (_: { }); clan = {
services = { directory = ./.;
data-mesher.default = { inventory = {
roles.peer.machines = [ "peer" ]; machines = lib.genAttrs machines (_: { });
roles.admin.machines = [ "admin" ]; services = {
roles.signer.machines = [ "signer" ]; data-mesher.default = {
}; roles.peer.machines = [ "peer" ];
roles.admin.machines = [ "admin" ];
roles.signer.machines = [ "signer" ];
}; };
}; };
}; };
};
defaults = defaults =
{ config, ... }: { config, ... }:
{ {
environment.systemPackages = [ environment.systemPackages = [
config.services.data-mesher.package config.services.data-mesher.package
]; ];
clan.data-mesher.network.interface = "eth1"; clan.data-mesher.network.interface = "eth1";
clan.data-mesher.bootstrapNodes = [ clan.data-mesher.bootstrapNodes = [
"[2001:db8:1::1]:7946" # peer1 "[2001:db8:1::1]:7946" # peer1
"[2001:db8:1::2]:7946" # peer2 "[2001:db8:1::2]:7946" # peer2
]; ];
# speed up for testing # speed up for testing
services.data-mesher.settings = { services.data-mesher.settings = {
cluster.join_interval = lib.mkForce "2s"; cluster.join_interval = lib.mkForce "2s";
cluster.push_pull_interval = lib.mkForce "5s"; cluster.push_pull_interval = lib.mkForce "5s";
};
}; };
nodes = {
admin.clan.data-mesher.network.tld = "foo";
}; };
# TODO Add better test script. nodes = {
testScript = '' admin.clan.data-mesher.network.tld = "foo";
};
def resolve(node, success = {}, fail = [], timeout = 60): # TODO Add better test script.
for hostname, ips in success.items(): testScript = ''
for ip in ips:
node.wait_until_succeeds(f"getent ahosts {hostname} | grep {ip}", timeout)
for hostname in fail: def resolve(node, success = {}, fail = [], timeout = 60):
node.wait_until_fails(f"getent ahosts {hostname}") for hostname, ips in success.items():
for ip in ips:
node.wait_until_succeeds(f"getent ahosts {hostname} | grep {ip}", timeout)
start_all() for hostname in fail:
node.wait_until_fails(f"getent ahosts {hostname}")
admin.wait_for_unit("data-mesher") start_all()
signer.wait_for_unit("data-mesher")
peer.wait_for_unit("data-mesher")
# check dns resolution admin.wait_for_unit("data-mesher")
for node in [admin, signer, peer]: signer.wait_for_unit("data-mesher")
resolve(node, { peer.wait_for_unit("data-mesher")
"admin.foo": ["2001:db8:1::1", "192.168.1.1"],
"peer.foo": ["2001:db8:1::2", "192.168.1.2"], # check dns resolution
"signer.foo": ["2001:db8:1::3", "192.168.1.3"] for node in [admin, signer, peer]:
}) resolve(node, {
''; "admin.foo": ["2001:db8:1::1", "192.168.1.1"],
} "peer.foo": ["2001:db8:1::2", "192.168.1.2"],
); "signer.foo": ["2001:db8:1::3", "192.168.1.3"]
} })
'';
}
)

View File

@@ -1,93 +1,96 @@
{ {
pkgs, pkgs,
self, nixosLib,
clanLib, clan-core,
... ...
}: }:
clanLib.test.makeTestClan { nixosLib.runTest (
inherit pkgs self; { ... }:
nixosTest = ( {
{ ... }: imports = [
{ clan-core.modules.nixosVmTest.clanTest
# This tests the compatibility of the inventory ];
# With the test framework
# - legacy-modules
# - clan.service modules
name = "dummy-inventory-test";
clan = { hostPkgs = pkgs;
directory = ./.;
inventory = {
machines.peer1 = { };
machines.admin1 = { };
services = {
legacy-module.default = {
roles.peer.machines = [ "peer1" ];
roles.admin.machines = [ "admin1" ];
};
};
instances."test" = { # This tests the compatibility of the inventory
module.name = "new-service"; # With the test framework
roles.peer.machines.peer1 = { }; # - legacy-modules
}; # - clan.service modules
name = "dummy-inventory-test";
modules = { clan = {
legacy-module = ./legacy-module; directory = ./.;
inventory = {
machines.peer1 = { };
machines.admin1 = { };
services = {
legacy-module.default = {
roles.peer.machines = [ "peer1" ];
roles.admin.machines = [ "admin1" ];
}; };
}; };
modules.new-service = {
_class = "clan.service"; instances."test" = {
manifest.name = "new-service"; module.name = "new-service";
roles.peer = { }; roles.peer.machines.peer1 = { };
perMachine = { };
nixosModule = {
# This should be generated by: modules = {
# nix run .#generate-test-vars -- checks/dummy-inventory-test dummy-inventory-test legacy-module = ./legacy-module;
clan.core.vars.generators.new-service = { };
files.not-a-secret = { };
secret = false; modules.new-service = {
deploy = true; _class = "clan.service";
}; manifest.name = "new-service";
files.a-secret = { roles.peer = { };
secret = true; perMachine = {
deploy = true; nixosModule = {
owner = "nobody"; # This should be generated by:
group = "users"; # nix run .#generate-test-vars -- checks/dummy-inventory-test dummy-inventory-test
mode = "0644"; clan.core.vars.generators.new-service = {
}; files.not-a-secret = {
script = '' secret = false;
# This is a dummy script that does nothing deploy = true;
echo -n "not-a-secret" > $out/not-a-secret
echo -n "a-secret" > $out/a-secret
'';
}; };
files.a-secret = {
secret = true;
deploy = true;
owner = "nobody";
group = "users";
mode = "0644";
};
script = ''
# This is a dummy script that does nothing
echo -n "not-a-secret" > $out/not-a-secret
echo -n "a-secret" > $out/a-secret
'';
}; };
}; };
}; };
}; };
};
testScript = testScript =
{ nodes, ... }: { nodes, ... }:
'' ''
start_all() start_all()
admin1.wait_for_unit("multi-user.target") admin1.wait_for_unit("multi-user.target")
peer1.wait_for_unit("multi-user.target") peer1.wait_for_unit("multi-user.target")
# Provided by the legacy module # Provided by the legacy module
print(admin1.succeed("systemctl status dummy-service")) print(admin1.succeed("systemctl status dummy-service"))
print(peer1.succeed("systemctl status dummy-service")) print(peer1.succeed("systemctl status dummy-service"))
# peer1 should have the 'hello' file # peer1 should have the 'hello' file
peer1.succeed("cat ${nodes.peer1.clan.core.vars.generators.new-service.files.not-a-secret.path}") peer1.succeed("cat ${nodes.peer1.clan.core.vars.generators.new-service.files.not-a-secret.path}")
ls_out = peer1.succeed("ls -la ${nodes.peer1.clan.core.vars.generators.new-service.files.a-secret.path}") ls_out = peer1.succeed("ls -la ${nodes.peer1.clan.core.vars.generators.new-service.files.a-secret.path}")
# Check that the file is owned by 'nobody' # Check that the file is owned by 'nobody'
assert "nobody" in ls_out, f"File is not owned by 'nobody': {ls_out}" assert "nobody" in ls_out, f"File is not owned by 'nobody': {ls_out}"
# Check that the file is in the 'users' group # Check that the file is in the 'users' group
assert "users" in ls_out, f"File is not in the 'users' group: {ls_out}" assert "users" in ls_out, f"File is not in the 'users' group: {ls_out}"
# Check that the file is in the '0644' mode # Check that the file is in the '0644' mode
assert "-rw-r--r--" in ls_out, f"File is not in the '0644' mode: {ls_out}" assert "-rw-r--r--" in ls_out, f"File is not in the '0644' mode: {ls_out}"
''; '';
} }
); )
}

View File

@@ -4,6 +4,7 @@ let
filter filter
pathExists pathExists
; ;
nixosLib = import (self.inputs.nixpkgs + "/nixos/lib") { };
in in
{ {
imports = filter pathExists [ imports = filter pathExists [
@@ -29,10 +30,11 @@ in
let let
nixosTestArgs = { nixosTestArgs = {
# reference to nixpkgs for the current system # reference to nixpkgs for the current system
inherit pkgs lib; inherit pkgs lib nixosLib;
# this gives us a reference to our flake but also all flake inputs # this gives us a reference to our flake but also all flake inputs
inherit self; inherit self;
inherit (self) clanLib; inherit (self) clanLib;
clan-core = self;
}; };
nixosTests = nixosTests =
lib.optionalAttrs (pkgs.stdenv.isLinux) { lib.optionalAttrs (pkgs.stdenv.isLinux) {

View File

@@ -1,130 +1,132 @@
{ {
pkgs, pkgs,
self, nixosLib,
clanLib, clan-core,
lib,
... ...
}: }:
clanLib.test.makeTestClan { nixosLib.runTest (
inherit pkgs self; { ... }:
nixosTest = ( let
{ lib, ... }: machines = [
let "peer1"
common = "peer2"
{ pkgs, modulesPath, ... }: ];
{ in
imports = [ {
(modulesPath + "/../tests/common/x11.nix") imports = [
]; clan-core.modules.nixosVmTest.clanTest
];
clan.services.mumble.user = "alice"; hostPkgs = pkgs;
environment.systemPackages = [ pkgs.killall ];
};
machines = [
"peer1"
"peer2"
];
in
{
name = "mumble";
clan = { name = "mumble";
directory = ./.;
# TODO: container driver does not support: sleep, wait_for_window, send_chars, wait_for_text defaults =
test.useContainers = false; { pkgs, modulesPath, ... }:
inventory = { {
machines = lib.genAttrs machines (_: { }); imports = [
services = { (modulesPath + "/../tests/common/x11.nix")
mumble.default = { ];
roles.server.machines = machines;
}; clan.services.mumble.user = "alice";
environment.systemPackages = [ pkgs.killall ];
};
clan = {
directory = ./.;
# TODO: container driver does not support: sleep, wait_for_window, send_chars, wait_for_text
test.useContainers = false;
inventory = {
machines = lib.genAttrs machines (_: { });
services = {
mumble.default = {
roles.server.machines = machines;
}; };
}; };
}; };
};
enableOCR = true; enableOCR = true;
nodes.peer1 = common; testScript = ''
nodes.peer2 = common; import time
import re
testScript = ''
import time
import re
def machine_has_text(machine: Machine, regex: str) -> bool: def machine_has_text(machine: Machine, regex: str) -> bool:
variants = machine.get_screen_text_variants() variants = machine.get_screen_text_variants()
# for debugging # for debugging
# machine.screenshot(f"/tmp/{machine.name}.png") # machine.screenshot(f"/tmp/{machine.name}.png")
for text in variants: for text in variants:
print(f"Expecting '{regex}' in '{text}'") print(f"Expecting '{regex}' in '{text}'")
if re.search(regex, text) is not None: if re.search(regex, text) is not None:
return True return True
return False return False
start_all() start_all()
with subtest("Waiting for x"): with subtest("Waiting for x"):
peer1.wait_for_x() peer1.wait_for_x()
peer2.wait_for_x() peer2.wait_for_x()
with subtest("Waiting for murmur"): with subtest("Waiting for murmur"):
peer1.wait_for_unit("murmur.service") peer1.wait_for_unit("murmur.service")
peer2.wait_for_unit("murmur.service") peer2.wait_for_unit("murmur.service")
with subtest("Starting Mumble"): with subtest("Starting Mumble"):
# starting mumble is blocking # starting mumble is blocking
peer1.execute("mumble >&2 &") peer1.execute("mumble >&2 &")
peer2.execute("mumble >&2 &") peer2.execute("mumble >&2 &")
with subtest("Wait for Mumble"): with subtest("Wait for Mumble"):
peer1.wait_for_window(r"Mumble") peer1.wait_for_window(r"Mumble")
peer2.wait_for_window(r"Mumble") peer2.wait_for_window(r"Mumble")
with subtest("Wait for certificate creation"): with subtest("Wait for certificate creation"):
peer1.wait_for_window(r"Mumble") peer1.wait_for_window(r"Mumble")
peer2.wait_for_window(r"Mumble") peer2.wait_for_window(r"Mumble")
for i in range(20): for i in range(20):
time.sleep(1) time.sleep(1)
peer1.send_chars("\n") peer1.send_chars("\n")
peer1.send_chars("\n") peer1.send_chars("\n")
peer2.send_chars("\n") peer2.send_chars("\n")
peer2.send_chars("\n") peer2.send_chars("\n")
if machine_has_text(peer1, r"Mumble Server Connect") and \ if machine_has_text(peer1, r"Mumble Server Connect") and \
machine_has_text(peer2, r"Mumble Server Connect"): machine_has_text(peer2, r"Mumble Server Connect"):
break break
else: else:
raise Exception("Timeout waiting for certificate creation") raise Exception("Timeout waiting for certificate creation")
with subtest("Check validity of server certificates"): with subtest("Check validity of server certificates"):
peer1.execute("killall .mumble-wrapped") peer1.execute("killall .mumble-wrapped")
peer1.sleep(1) peer1.sleep(1)
peer1.execute("mumble mumble://peer2 >&2 &") peer1.execute("mumble mumble://peer2 >&2 &")
peer1.wait_for_window(r"Mumble") peer1.wait_for_window(r"Mumble")
for i in range(20): for i in range(20):
time.sleep(1) time.sleep(1)
peer1.send_chars("\n") peer1.send_chars("\n")
peer1.send_chars("\n") peer1.send_chars("\n")
if machine_has_text(peer1, "Connected."): if machine_has_text(peer1, "Connected."):
break break
else: else:
raise Exception("Timeout waiting for certificate creation") raise Exception("Timeout waiting for certificate creation")
peer2.execute("killall .mumble-wrapped") peer2.execute("killall .mumble-wrapped")
peer2.sleep(1) peer2.sleep(1)
peer2.execute("mumble mumble://peer1 >&2 &") peer2.execute("mumble mumble://peer1 >&2 &")
peer2.wait_for_window(r"Mumble") peer2.wait_for_window(r"Mumble")
for i in range(20): for i in range(20):
time.sleep(1) time.sleep(1)
peer2.send_chars("\n") peer2.send_chars("\n")
peer2.send_chars("\n") peer2.send_chars("\n")
if machine_has_text(peer2, "Connected."): if machine_has_text(peer2, "Connected."):
break break
else: else:
raise Exception("Timeout waiting for certificate creation") raise Exception("Timeout waiting for certificate creation")
''; '';
} }
); )
}

View File

@@ -1,83 +1,87 @@
{ {
pkgs, pkgs,
self, nixosLib,
clanLib, clan-core,
lib,
... ...
}: }:
clanLib.test.makeTestClan { nixosLib.runTest (
inherit pkgs self; { ... }:
nixosTest = ( {
{ lib, ... }: imports = [
{ clan-core.modules.nixosVmTest.clanTest
name = "syncthing"; ];
clan = { hostPkgs = pkgs;
directory = ./.;
# TODO: container driver does not support wait_for_file() yet name = "syncthing";
test.useContainers = false;
inventory = { clan = {
machines = lib.genAttrs [ directory = ./.;
"introducer" # TODO: container driver does not support wait_for_file() yet
"peer1" test.useContainers = false;
"peer2" inventory = {
] (_: { }); machines = lib.genAttrs [
services = { "introducer"
syncthing.default = { "peer1"
roles.peer.machines = [ "peer2"
"peer1" ] (_: { });
"peer2" services = {
]; syncthing.default = {
roles.introducer.machines = [ "introducer" ]; roles.peer.machines = [
"peer1"
"peer2"
];
roles.introducer.machines = [ "introducer" ];
};
};
};
};
nodes.introducer = {
# Doesn't test zerotier!
services.syncthing.openDefaultPorts = true;
services.syncthing.settings.folders = {
"Shared" = {
enable = true;
path = "~/Shared";
versioning = {
type = "trashcan";
params = {
cleanoutDays = "30";
}; };
}; };
}; };
}; };
clan.syncthing.autoAcceptDevices = true;
clan.syncthing.autoShares = [ "Shared" ];
# For faster Tests
systemd.timers.syncthing-auto-accept.timerConfig = {
OnActiveSec = 1;
OnUnitActiveSec = 1;
};
};
nodes.peer1 = {
services.syncthing.openDefaultPorts = true;
};
nodes.peer2 = {
services.syncthing.openDefaultPorts = true;
};
nodes.introducer = { testScript = ''
# Doesn't test zerotier! start_all()
services.syncthing.openDefaultPorts = true; introducer.wait_for_unit("syncthing")
services.syncthing.settings.folders = { peer1.wait_for_unit("syncthing")
"Shared" = { peer2.wait_for_unit("syncthing")
enable = true; peer1.execute("ls -la /var/lib/syncthing")
path = "~/Shared"; peer2.execute("ls -la /var/lib/syncthing")
versioning = { peer1.wait_for_file("/var/lib/syncthing/Shared")
type = "trashcan"; peer2.wait_for_file("/var/lib/syncthing/Shared")
params = { introducer.shutdown()
cleanoutDays = "30"; peer1.execute("echo hello > /var/lib/syncthing/Shared/hello")
}; peer2.wait_for_file("/var/lib/syncthing/Shared/hello")
}; out = peer2.succeed("cat /var/lib/syncthing/Shared/hello")
}; assert "hello" in out
}; '';
clan.syncthing.autoAcceptDevices = true; }
clan.syncthing.autoShares = [ "Shared" ]; )
# For faster Tests
systemd.timers.syncthing-auto-accept.timerConfig = {
OnActiveSec = 1;
OnUnitActiveSec = 1;
};
};
nodes.peer1 = {
services.syncthing.openDefaultPorts = true;
};
nodes.peer2 = {
services.syncthing.openDefaultPorts = true;
};
testScript = ''
start_all()
introducer.wait_for_unit("syncthing")
peer1.wait_for_unit("syncthing")
peer2.wait_for_unit("syncthing")
peer1.execute("ls -la /var/lib/syncthing")
peer2.execute("ls -la /var/lib/syncthing")
peer1.wait_for_file("/var/lib/syncthing/Shared")
peer2.wait_for_file("/var/lib/syncthing/Shared")
introducer.shutdown()
peer1.execute("echo hello > /var/lib/syncthing/Shared/hello")
peer2.wait_for_file("/var/lib/syncthing/Shared/hello")
out = peer2.succeed("cat /var/lib/syncthing/Shared/hello")
assert "hello" in out
'';
}
);
}

View File

@@ -47,8 +47,8 @@ in
hello-service = import ./tests/vm/default.nix { hello-service = import ./tests/vm/default.nix {
inherit module; inherit module;
inherit self inputs pkgs; inherit self inputs pkgs;
# clanLib is exposed from inputs.clan-core nixosLib = import (self.inputs.nixpkgs + "/nixos/lib") { };
clanLib = self.clanLib; clan-core = self;
}; };
}; };
}; };

View File

@@ -1,41 +1,44 @@
{ {
pkgs, pkgs,
self, nixosLib,
clanLib, clan-core,
module, module,
... ...
}: }:
clanLib.test.makeTestClan { nixosLib.runTest (
inherit pkgs self; { ... }:
nixosTest = ( {
{ ... }: imports = [
{ clan-core.modules.nixosVmTest.clanTest
name = "hello-service"; ];
clan = { hostPkgs = pkgs;
directory = ./.;
modules = {
hello-service = module;
};
inventory = {
machines.peer1 = { };
instances."test" = { name = "hello-service";
module.name = "hello-service";
roles.peer.machines.peer1 = { }; clan = {
}; directory = ./.;
modules = {
hello-service = module;
};
inventory = {
machines.peer1 = { };
instances."test" = {
module.name = "hello-service";
roles.peer.machines.peer1 = { };
}; };
}; };
};
testScript = testScript =
{ nodes, ... }: { nodes, ... }:
'' ''
start_all() start_all()
# peer1 should have the 'hello' file # peer1 should have the 'hello' file
value = peer1.succeed("cat ${nodes.peer1.clan.core.vars.generators.hello.files.hello.path}") value = peer1.succeed("cat ${nodes.peer1.clan.core.vars.generators.hello.files.hello.path}")
assert value.strip() == "Hello world from peer1", value assert value.strip() == "Hello world from peer1", value
''; '';
} }
); )
}

View File

@@ -28,8 +28,9 @@ in
lib.optionalAttrs (pkgs.stdenv.isLinux) { lib.optionalAttrs (pkgs.stdenv.isLinux) {
wifi-service = import ./tests/vm/default.nix { wifi-service = import ./tests/vm/default.nix {
inherit module; inherit module;
inherit self inputs pkgs; inherit inputs pkgs;
clanLib = self.clanLib; clan-core = self;
nixosLib = import (self.inputs.nixpkgs + "/nixos/lib") { };
}; };
}; };
}; };

View File

@@ -1,43 +1,46 @@
{ {
pkgs, pkgs,
self, nixosLib,
clanLib, clan-core,
module, module,
... ...
}: }:
clanLib.test.makeTestClan { nixosLib.runTest (
inherit pkgs self; { ... }:
nixosTest = ( {
{ ... }: imports = [
{ clan-core.modules.nixosVmTest.clanTest
name = "wifi-service"; ];
clan = { hostPkgs = pkgs;
directory = ./.;
test.useContainers = false;
modules."@clan/wifi" = module;
inventory = {
machines.test = { }; name = "wifi-service";
instances = { clan = {
wg-test-one = { directory = ./.;
module.name = "@clan/wifi"; test.useContainers = false;
modules."@clan/wifi" = module;
inventory = {
roles.default.machines = { machines.test = { };
test.settings.networks.one = { };
}; instances = {
wg-test-one = {
module.name = "@clan/wifi";
roles.default.machines = {
test.settings.networks.one = { };
}; };
}; };
}; };
}; };
};
testScript = '' testScript = ''
start_all() start_all()
test.wait_for_unit("NetworkManager.service") test.wait_for_unit("NetworkManager.service")
psk = test.succeed("cat /run/NetworkManager/system-connections/one.nmconnection") psk = test.succeed("cat /run/NetworkManager/system-connections/one.nmconnection")
assert "password-eins" in psk, "Password is incorrect" assert "password-eins" in psk, "Password is incorrect"
''; '';
} }
); )
}

View File

@@ -0,0 +1,197 @@
{
lib,
self,
config,
...
}:
let
inherit (lib)
mkOption
removePrefix
types
mapAttrsToList
flip
unique
flatten
;
clanLib = config.flake.clanLib;
in
{
# A function that returns an extension to runTest
# TODO: remove this from clanLib, add to legacyPackages, simplify signature
flake.modules.nixosVmTest.clanTest =
{ config, hostPkgs, ... }:
let
clanFlakeResult = config.clan;
testName = config.name;
update-vars-script = "${
self.packages.${hostPkgs.system}.generate-test-vars
}/bin/generate-test-vars";
relativeDir = removePrefix ("${self}/") (toString config.clan.directory);
update-vars = hostPkgs.writeShellScriptBin "update-vars" ''
${update-vars-script} $PRJ_ROOT/${relativeDir} ${testName}
'';
testSrc = lib.cleanSource config.clan.directory;
inputsForMachine =
machine:
flip mapAttrsToList machine.clan.core.vars.generators (_name: generator: generator.runtimeInputs);
generatorRuntimeInputs = unique (
flatten (flip mapAttrsToList config.nodes (_machineName: machine: inputsForMachine machine))
);
vars-check =
hostPkgs.runCommand "update-vars-check"
{
nativeBuildInputs = generatorRuntimeInputs ++ [
hostPkgs.nix
hostPkgs.git
hostPkgs.age
hostPkgs.sops
hostPkgs.bubblewrap
];
closureInfo = hostPkgs.closureInfo {
rootPaths = generatorRuntimeInputs ++ [
hostPkgs.bash
hostPkgs.coreutils
hostPkgs.jq.dev
hostPkgs.stdenv
hostPkgs.stdenvNoCC
hostPkgs.shellcheck-minimal
hostPkgs.age
hostPkgs.sops
];
};
}
''
${self.legacyPackages.${hostPkgs.system}.setupNixInNix}
cp -r ${testSrc} ./src
chmod +w -R ./src
find ./src/sops ./src/vars | sort > filesBefore
${update-vars-script} ./src ${testName} \
--repo-root ${self.packages.${hostPkgs.system}.clan-core-flake} \
--clean
find ./src/sops ./src/vars | sort > filesAfter
if ! diff -q filesBefore filesAfter; then
echo "The update-vars script changed the files in ${testSrc}."
echo "Diff:"
diff filesBefore filesAfter || true
exit 1
fi
touch $out
'';
in
{
imports = [
../test/container-test-driver/driver-module.nix
];
options = {
clanSettings = mkOption {
default = { };
type = types.submodule {
options = {
clan-core = mkOption { default = self; };
nixpkgs = mkOption { default = self.inputs.nixpkgs; };
nix-darwin = mkOption { default = self.inputs.nix-darwin; };
};
};
};
clan = mkOption {
default = { };
type = types.submoduleWith {
specialArgs = {
inherit (config.clanSettings)
clan-core
nixpkgs
nix-darwin
;
};
modules = [
clanLib.buildClanModule.flakePartsModule
{
_prefix = [
"checks"
"<system>"
config.name
"config"
"clan"
];
}
{
options = {
test.useContainers = mkOption {
default = true;
type = types.bool;
description = "Whether to use containers for the test.";
};
};
}
];
};
};
};
config = {
# Inherit all nodes from the clan
# i.e. nodes.jon <- clan.machines.jon
# clanInternals.nixosModules contains nixosModules per node
nodes = clanFlakeResult.clanInternals.nixosModules;
# !WARNING: Write a detailed comment if adding new options here
# We should be very careful about adding new options here because it affects all tests
# Keep in mind:
# - tests should be close to the real world as possible
# - ensure stability: in clan-core and downstream
# - ensure that the tests are fast and reliable
defaults = (
{ config, ... }:
{
imports = [
# Speed up evaluation
clanLib.test.minifyModule
# Setup for sops during tests
# configures a static age-key to skip the age-key generation
clanLib.test.sopsModule
];
# Disable documentation
# This is nice to speed up the evaluation
# And also suppresses any warnings or errors about the documentation
documentation.enable = lib.mkDefault false;
# Disable garbage collection during the test
# https://nix.dev/manual/nix/2.28/command-ref/conf-file.html?highlight=min-free#available-settings
nix.settings.min-free = 0;
# This is typically set once via vars generate for a machine
# Since we have ephemeral machines, we set it here for the test
system.stateVersion = config.system.nixos.release;
# Currently this is the default in NixOS, but we set it explicitly to avoid surprises
# Disable the initrd systemd service which has the following effect
#
# With the below on 'false' initrd runs a 'minimal shell script', called the stage-1 init.
# Benefits:
# Simple and fast.
# Easier to debug for very minimal or custom setups.
# Drawbacks:
# Limited flexibility.
# Harder to handle advanced setups (like TPM, LUKS, or LVM-on-LUKS) but not needed since we are in a test
# No systemd journal logs from initrd.
boot.initrd.systemd.enable = false;
# make the test depend on its vars-check derivation
environment.variables.CLAN_VARS_CHECK = "${vars-check}";
}
);
result = { inherit update-vars vars-check; };
};
};
}

View File

@@ -8,6 +8,7 @@ rec {
# TODO: automatically generate this from the directory conventions # TODO: automatically generate this from the directory conventions
imports = [ imports = [
./build-clan/flake-module.nix ./build-clan/flake-module.nix
./clanTest/flake-module.nix
./introspection/flake-module.nix ./introspection/flake-module.nix
./inventory/flake-module.nix ./inventory/flake-module.nix
./jsonschema/flake-module.nix ./jsonschema/flake-module.nix

View File

@@ -8,7 +8,6 @@ let
evalModules evalModules
; ;
# TODO: Use makeTestClan
evalInventory = evalInventory =
m: m:
(evalModules { (evalModules {

View File

@@ -1,19 +1,7 @@
{ {
lib,
clanLib, clanLib,
...
}: }:
let
inherit (lib)
mkOption
removePrefix
types
mapAttrsToList
flip
unique
flatten
;
in
{ {
containerTest = import ./container-test.nix; containerTest = import ./container-test.nix;
baseTest = import ./test-base.nix; baseTest = import ./test-base.nix;
@@ -22,194 +10,4 @@ in
minifyModule = ./minify.nix; minifyModule = ./minify.nix;
sopsModule = ./sops.nix; sopsModule = ./sops.nix;
# A function that returns an extension to runTest
# TODO: remove this from clanLib, add to legacyPackages, simplify signature
makeTestClan =
{
nixosTest,
pkgs,
self,
...
}:
let
nixos-lib = import (pkgs.path + "/nixos/lib") { };
test = (
nixos-lib.runTest (
{ config, ... }:
let
clanFlakeResult = config.clan;
testName = config.name;
update-vars-script = "${self.packages.${pkgs.system}.generate-test-vars}/bin/generate-test-vars";
relativeDir = removePrefix ("${self}/") (toString config.clan.directory);
update-vars = pkgs.writeShellScriptBin "update-vars" ''
${update-vars-script} $PRJ_ROOT/${relativeDir} ${testName}
'';
testSrc = lib.cleanSource config.clan.directory;
inputsForMachine =
machine:
flip mapAttrsToList machine.clan.core.vars.generators (_name: generator: generator.runtimeInputs);
generatorRuntimeInputs = unique (
flatten (flip mapAttrsToList config.nodes (_machineName: machine: inputsForMachine machine))
);
vars-check =
pkgs.runCommand "update-vars-check"
{
nativeBuildInputs = generatorRuntimeInputs ++ [
pkgs.nix
pkgs.git
pkgs.age
pkgs.sops
pkgs.bubblewrap
];
closureInfo = pkgs.closureInfo {
rootPaths = generatorRuntimeInputs ++ [
pkgs.bash
pkgs.coreutils
pkgs.jq.dev
pkgs.stdenv
pkgs.stdenvNoCC
pkgs.shellcheck-minimal
pkgs.age
pkgs.sops
];
};
}
''
${self.legacyPackages.${pkgs.system}.setupNixInNix}
cp -r ${testSrc} ./src
chmod +w -R ./src
find ./src/sops ./src/vars | sort > filesBefore
${update-vars-script} ./src ${testName} \
--repo-root ${self.packages.${pkgs.system}.clan-core-flake} \
--clean
find ./src/sops ./src/vars | sort > filesAfter
if ! diff -q filesBefore filesAfter; then
echo "The update-vars script changed the files in ${testSrc}."
echo "Diff:"
diff filesBefore filesAfter || true
exit 1
fi
touch $out
'';
in
{
imports = [
nixosTest
./container-test-driver/driver-module.nix
];
options = {
clanSettings = mkOption {
default = { };
type = types.submodule {
options = {
clan-core = mkOption { default = self; };
nixpkgs = mkOption { default = self.inputs.nixpkgs; };
nix-darwin = mkOption { default = self.inputs.nix-darwin; };
};
};
};
clan = mkOption {
default = { };
type = types.submoduleWith {
specialArgs = {
inherit (config.clanSettings)
clan-core
nixpkgs
nix-darwin
;
};
modules = [
clanLib.buildClanModule.flakePartsModule
{
_prefix = [
"checks"
"<system>"
config.name
"config"
"clan"
];
options = {
test.useContainers = mkOption {
default = true;
type = types.bool;
description = "Whether to use containers for the test.";
};
};
}
];
};
};
};
config = {
# Inherit all nodes from the clan
# i.e. nodes.jon <- clan.machines.jon
# clanInternals.nixosModules contains nixosModules per node
nodes = clanFlakeResult.clanInternals.nixosModules;
hostPkgs = pkgs;
# !WARNING: Write a detailed comment if adding new options here
# We should be very careful about adding new options here because it affects all tests
# Keep in mind:
# - tests should be close to the real world as possible
# - ensure stability: in clan-core and downstream
# - ensure that the tests are fast and reliable
defaults = (
{ config, ... }:
{
imports = [
# Speed up evaluation
clanLib.test.minifyModule
# Setup for sops during tests
# configures a static age-key to skip the age-key generation
clanLib.test.sopsModule
];
# Disable documentation
# This is nice to speed up the evaluation
# And also suppresses any warnings or errors about the documentation
documentation.enable = lib.mkDefault false;
# Disable garbage collection during the test
# https://nix.dev/manual/nix/2.28/command-ref/conf-file.html?highlight=min-free#available-settings
nix.settings.min-free = 0;
# This is typically set once via vars generate for a machine
# Since we have ephemeral machines, we set it here for the test
system.stateVersion = config.system.nixos.release;
# Currently this is the default in NixOS, but we set it explicitly to avoid surprises
# Disable the initrd systemd service which has the following effect
#
# With the below on 'false' initrd runs a 'minimal shell script', called the stage-1 init.
# Benefits:
# Simple and fast.
# Easier to debug for very minimal or custom setups.
# Drawbacks:
# Limited flexibility.
# Harder to handle advanced setups (like TPM, LUKS, or LVM-on-LUKS) but not needed since we are in a test
# No systemd journal logs from initrd.
boot.initrd.systemd.enable = false;
# make the test depend on its vars-check derivation
environment.variables.CLAN_VARS_CHECK = "${vars-check}";
}
);
result = { inherit update-vars vars-check; };
};
}
)
);
in
test;
} }

View File

@@ -93,8 +93,8 @@ class Options:
def parse_args() -> Options: def parse_args() -> Options:
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description=""" description="""
Update the vars of a 'makeTestClan' integration test. Update the vars of a 'clanTest' integration test.
See 'clanLib.test.makeTestClan' for more information on how to create such a test. See 'clanLib.test.clanTest' for more information on how to create such a test.
""", """,
) )
parser.add_argument( parser.add_argument(