clan tests: convert makeTestClan to a module
Let's not put yet another wrapper around runTest, instead expose our logic as a module that can be imported into any nixos-vm-test
This commit is contained in:
@@ -1,61 +1,62 @@
|
||||
{
|
||||
pkgs,
|
||||
self,
|
||||
clanLib,
|
||||
nixosLib,
|
||||
clan-core,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
public-key = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAII6zj7ubTg6z/aDwRNwvM/WlQdUocMprQ8E92NWxl6t+ test@test";
|
||||
in
|
||||
nixosLib.runTest (
|
||||
{ ... }:
|
||||
{
|
||||
imports = [
|
||||
clan-core.modules.nixosVmTest.clanTest
|
||||
];
|
||||
|
||||
clanLib.test.makeTestClan {
|
||||
inherit pkgs self;
|
||||
nixosTest = (
|
||||
{ ... }:
|
||||
hostPkgs = pkgs;
|
||||
|
||||
{
|
||||
name = "admin";
|
||||
name = "admin";
|
||||
|
||||
clan = {
|
||||
directory = ./.;
|
||||
modules."@clan/admin" = ../../clanServices/admin/default.nix;
|
||||
inventory = {
|
||||
clan = {
|
||||
directory = ./.;
|
||||
modules."@clan/admin" = ../../clanServices/admin/default.nix;
|
||||
inventory = {
|
||||
|
||||
machines.client = { };
|
||||
machines.server = { };
|
||||
machines.client = { };
|
||||
machines.server = { };
|
||||
|
||||
instances = {
|
||||
ssh-test-one = {
|
||||
module.name = "@clan/admin";
|
||||
roles.default.machines."server".settings = {
|
||||
allowedKeys.testkey = public-key;
|
||||
};
|
||||
instances = {
|
||||
ssh-test-one = {
|
||||
module.name = "@clan/admin";
|
||||
roles.default.machines."server".settings = {
|
||||
allowedKeys.testkey = public-key;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
nodes = {
|
||||
client.environment.etc.private-test-key.source = ./private-test-key;
|
||||
nodes = {
|
||||
client.environment.etc.private-test-key.source = ./private-test-key;
|
||||
|
||||
server = {
|
||||
services.openssh.enable = true;
|
||||
};
|
||||
server = {
|
||||
services.openssh.enable = true;
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
machines = [client, server]
|
||||
for m in machines:
|
||||
m.systemctl("start network-online.target")
|
||||
machines = [client, server]
|
||||
for m in machines:
|
||||
m.systemctl("start network-online.target")
|
||||
|
||||
for m in machines:
|
||||
m.wait_for_unit("network-online.target")
|
||||
for m in machines:
|
||||
m.wait_for_unit("network-online.target")
|
||||
|
||||
client.succeed(f"ssh -F /dev/null -i /etc/private-test-key -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o BatchMode=yes root@server true &>/dev/null")
|
||||
'';
|
||||
}
|
||||
);
|
||||
}
|
||||
client.succeed(f"ssh -F /dev/null -i /etc/private-test-key -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o BatchMode=yes root@server true &>/dev/null")
|
||||
'';
|
||||
}
|
||||
)
|
||||
|
||||
@@ -1,118 +1,118 @@
|
||||
{
|
||||
pkgs,
|
||||
self,
|
||||
clanLib,
|
||||
nixosLib,
|
||||
clan-core,
|
||||
...
|
||||
}:
|
||||
nixosLib.runTest (
|
||||
{ ... }:
|
||||
{
|
||||
imports = [
|
||||
clan-core.modules.nixosVmTest.clanTest
|
||||
];
|
||||
|
||||
clanLib.test.makeTestClan {
|
||||
inherit pkgs self;
|
||||
useContainers = true;
|
||||
hostPkgs = pkgs;
|
||||
|
||||
nixosTest = (
|
||||
{ ... }:
|
||||
name = "borgbackup";
|
||||
|
||||
{
|
||||
name = "borgbackup";
|
||||
clan = {
|
||||
directory = ./.;
|
||||
test.useContainers = true;
|
||||
modules."@clan/borgbackup" = ../../clanServices/borgbackup/default.nix;
|
||||
inventory = {
|
||||
|
||||
clan = {
|
||||
directory = ./.;
|
||||
modules."@clan/borgbackup" = ../../clanServices/borgbackup/default.nix;
|
||||
inventory = {
|
||||
machines.clientone = { };
|
||||
machines.serverone = { };
|
||||
|
||||
machines.clientone = { };
|
||||
machines.serverone = { };
|
||||
instances = {
|
||||
borgone = {
|
||||
|
||||
instances = {
|
||||
borgone = {
|
||||
module.name = "@clan/borgbackup";
|
||||
|
||||
module.name = "@clan/borgbackup";
|
||||
|
||||
roles.client.machines."clientone" = { };
|
||||
roles.server.machines."serverone".settings.directory = "/tmp/borg-test";
|
||||
};
|
||||
roles.client.machines."clientone" = { };
|
||||
roles.server.machines."serverone".settings.directory = "/tmp/borg-test";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
nodes = {
|
||||
nodes = {
|
||||
|
||||
serverone = {
|
||||
services.openssh.enable = true;
|
||||
# Needed so PAM doesn't see the user as locked
|
||||
users.users.borg.password = "borg";
|
||||
};
|
||||
|
||||
clientone =
|
||||
{ config, pkgs, ... }:
|
||||
let
|
||||
dependencies = [
|
||||
clan-core
|
||||
pkgs.stdenv.drvPath
|
||||
] ++ builtins.map (i: i.outPath) (builtins.attrValues clan-core.inputs);
|
||||
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
|
||||
|
||||
in
|
||||
{
|
||||
|
||||
serverone = {
|
||||
services.openssh.enable = true;
|
||||
# Needed so PAM doesn't see the user as locked
|
||||
users.users.borg.password = "borg";
|
||||
|
||||
users.users.root.openssh.authorizedKeys.keyFiles = [ ../assets/ssh/pubkey ];
|
||||
|
||||
clan.core.networking.targetHost = config.networking.hostName;
|
||||
|
||||
environment.systemPackages = [ clan-core.packages.${pkgs.system}.clan-cli ];
|
||||
|
||||
environment.etc.install-closure.source = "${closureInfo}/store-paths";
|
||||
nix.settings = {
|
||||
substituters = pkgs.lib.mkForce [ ];
|
||||
hashed-mirrors = null;
|
||||
connect-timeout = pkgs.lib.mkForce 3;
|
||||
flake-registry = pkgs.writeText "flake-registry" ''{"flakes":[],"version":2}'';
|
||||
};
|
||||
system.extraDependencies = dependencies;
|
||||
|
||||
clan.core.state.test-backups.folders = [ "/var/test-backups" ];
|
||||
};
|
||||
|
||||
clientone =
|
||||
{ config, pkgs, ... }:
|
||||
let
|
||||
dependencies = [
|
||||
self
|
||||
pkgs.stdenv.drvPath
|
||||
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
|
||||
};
|
||||
|
||||
in
|
||||
{
|
||||
testScript = ''
|
||||
import json
|
||||
start_all()
|
||||
|
||||
services.openssh.enable = true;
|
||||
machines = [clientone, serverone]
|
||||
|
||||
users.users.root.openssh.authorizedKeys.keyFiles = [ ../assets/ssh/pubkey ];
|
||||
for m in machines:
|
||||
m.systemctl("start network-online.target")
|
||||
|
||||
clan.core.networking.targetHost = config.networking.hostName;
|
||||
for m in machines:
|
||||
m.wait_for_unit("network-online.target")
|
||||
|
||||
environment.systemPackages = [ self.packages.${pkgs.system}.clan-cli ];
|
||||
# dummy data
|
||||
clientone.succeed("mkdir -p /var/test-backups /var/test-service")
|
||||
clientone.succeed("echo testing > /var/test-backups/somefile")
|
||||
|
||||
environment.etc.install-closure.source = "${closureInfo}/store-paths";
|
||||
nix.settings = {
|
||||
substituters = pkgs.lib.mkForce [ ];
|
||||
hashed-mirrors = null;
|
||||
connect-timeout = pkgs.lib.mkForce 3;
|
||||
flake-registry = pkgs.writeText "flake-registry" ''{"flakes":[],"version":2}'';
|
||||
};
|
||||
system.extraDependencies = dependencies;
|
||||
clientone.succeed("${pkgs.coreutils}/bin/install -Dm 600 ${../assets/ssh/privkey} /root/.ssh/id_ed25519")
|
||||
clientone.succeed("${pkgs.coreutils}/bin/touch /root/.ssh/known_hosts")
|
||||
clientone.wait_until_succeeds("timeout 2 ssh -o StrictHostKeyChecking=accept-new localhost hostname")
|
||||
clientone.wait_until_succeeds("timeout 2 ssh -o StrictHostKeyChecking=accept-new $(hostname) hostname")
|
||||
|
||||
clan.core.state.test-backups.folders = [ "/var/test-backups" ];
|
||||
};
|
||||
# create
|
||||
clientone.succeed("borgbackup-create >&2")
|
||||
clientone.wait_until_succeeds("! systemctl is-active borgbackup-job-serverone >&2")
|
||||
|
||||
};
|
||||
# list
|
||||
backup_id = json.loads(clientone.succeed("borg-job-serverone list --json"))["archives"][0]["archive"]
|
||||
out = clientone.succeed("borgbackup-list").strip()
|
||||
print(out)
|
||||
assert backup_id in out, f"backup {backup_id} not found in {out}"
|
||||
|
||||
testScript = ''
|
||||
import json
|
||||
start_all()
|
||||
|
||||
machines = [clientone, serverone]
|
||||
|
||||
for m in machines:
|
||||
m.systemctl("start network-online.target")
|
||||
|
||||
for m in machines:
|
||||
m.wait_for_unit("network-online.target")
|
||||
|
||||
# dummy data
|
||||
clientone.succeed("mkdir -p /var/test-backups /var/test-service")
|
||||
clientone.succeed("echo testing > /var/test-backups/somefile")
|
||||
|
||||
clientone.succeed("${pkgs.coreutils}/bin/install -Dm 600 ${../assets/ssh/privkey} /root/.ssh/id_ed25519")
|
||||
clientone.succeed("${pkgs.coreutils}/bin/touch /root/.ssh/known_hosts")
|
||||
clientone.wait_until_succeeds("timeout 2 ssh -o StrictHostKeyChecking=accept-new localhost hostname")
|
||||
clientone.wait_until_succeeds("timeout 2 ssh -o StrictHostKeyChecking=accept-new $(hostname) hostname")
|
||||
|
||||
# create
|
||||
clientone.succeed("borgbackup-create >&2")
|
||||
clientone.wait_until_succeeds("! systemctl is-active borgbackup-job-serverone >&2")
|
||||
|
||||
# list
|
||||
backup_id = json.loads(clientone.succeed("borg-job-serverone list --json"))["archives"][0]["archive"]
|
||||
out = clientone.succeed("borgbackup-list").strip()
|
||||
print(out)
|
||||
assert backup_id in out, f"backup {backup_id} not found in {out}"
|
||||
|
||||
# borgbackup restore
|
||||
clientone.succeed("rm -f /var/test-backups/somefile")
|
||||
clientone.succeed(f"NAME='serverone::borg@serverone:.::{backup_id}' borgbackup-restore >&2")
|
||||
assert clientone.succeed("cat /var/test-backups/somefile").strip() == "testing", "restore failed"
|
||||
'';
|
||||
}
|
||||
);
|
||||
}
|
||||
# borgbackup restore
|
||||
clientone.succeed("rm -f /var/test-backups/somefile")
|
||||
clientone.succeed(f"NAME='serverone::borg@serverone:.::{backup_id}' borgbackup-restore >&2")
|
||||
assert clientone.succeed("cat /var/test-backups/somefile").strip() == "testing", "restore failed"
|
||||
'';
|
||||
}
|
||||
)
|
||||
|
||||
@@ -1,86 +1,89 @@
|
||||
{
|
||||
pkgs,
|
||||
self,
|
||||
clanLib,
|
||||
nixosLib,
|
||||
clan-core,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
clanLib.test.makeTestClan {
|
||||
inherit pkgs self;
|
||||
nixosTest = (
|
||||
{ lib, ... }:
|
||||
let
|
||||
machines = [
|
||||
"admin"
|
||||
"peer"
|
||||
"signer"
|
||||
];
|
||||
in
|
||||
{
|
||||
name = "data-mesher";
|
||||
let
|
||||
machines = [
|
||||
"admin"
|
||||
"peer"
|
||||
"signer"
|
||||
];
|
||||
in
|
||||
nixosLib.runTest (
|
||||
{ ... }:
|
||||
{
|
||||
imports = [
|
||||
clan-core.modules.nixosVmTest.clanTest
|
||||
];
|
||||
|
||||
clan = {
|
||||
directory = ./.;
|
||||
inventory = {
|
||||
machines = lib.genAttrs machines (_: { });
|
||||
services = {
|
||||
data-mesher.default = {
|
||||
roles.peer.machines = [ "peer" ];
|
||||
roles.admin.machines = [ "admin" ];
|
||||
roles.signer.machines = [ "signer" ];
|
||||
};
|
||||
hostPkgs = pkgs;
|
||||
name = "data-mesher";
|
||||
|
||||
clan = {
|
||||
directory = ./.;
|
||||
inventory = {
|
||||
machines = lib.genAttrs machines (_: { });
|
||||
services = {
|
||||
data-mesher.default = {
|
||||
roles.peer.machines = [ "peer" ];
|
||||
roles.admin.machines = [ "admin" ];
|
||||
roles.signer.machines = [ "signer" ];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
defaults =
|
||||
{ config, ... }:
|
||||
{
|
||||
environment.systemPackages = [
|
||||
config.services.data-mesher.package
|
||||
];
|
||||
defaults =
|
||||
{ config, ... }:
|
||||
{
|
||||
environment.systemPackages = [
|
||||
config.services.data-mesher.package
|
||||
];
|
||||
|
||||
clan.data-mesher.network.interface = "eth1";
|
||||
clan.data-mesher.bootstrapNodes = [
|
||||
"[2001:db8:1::1]:7946" # peer1
|
||||
"[2001:db8:1::2]:7946" # peer2
|
||||
];
|
||||
clan.data-mesher.network.interface = "eth1";
|
||||
clan.data-mesher.bootstrapNodes = [
|
||||
"[2001:db8:1::1]:7946" # peer1
|
||||
"[2001:db8:1::2]:7946" # peer2
|
||||
];
|
||||
|
||||
# speed up for testing
|
||||
services.data-mesher.settings = {
|
||||
cluster.join_interval = lib.mkForce "2s";
|
||||
cluster.push_pull_interval = lib.mkForce "5s";
|
||||
};
|
||||
# speed up for testing
|
||||
services.data-mesher.settings = {
|
||||
cluster.join_interval = lib.mkForce "2s";
|
||||
cluster.push_pull_interval = lib.mkForce "5s";
|
||||
};
|
||||
|
||||
nodes = {
|
||||
admin.clan.data-mesher.network.tld = "foo";
|
||||
};
|
||||
|
||||
# TODO Add better test script.
|
||||
testScript = ''
|
||||
nodes = {
|
||||
admin.clan.data-mesher.network.tld = "foo";
|
||||
};
|
||||
|
||||
def resolve(node, success = {}, fail = [], timeout = 60):
|
||||
for hostname, ips in success.items():
|
||||
for ip in ips:
|
||||
node.wait_until_succeeds(f"getent ahosts {hostname} | grep {ip}", timeout)
|
||||
# TODO Add better test script.
|
||||
testScript = ''
|
||||
|
||||
for hostname in fail:
|
||||
node.wait_until_fails(f"getent ahosts {hostname}")
|
||||
def resolve(node, success = {}, fail = [], timeout = 60):
|
||||
for hostname, ips in success.items():
|
||||
for ip in ips:
|
||||
node.wait_until_succeeds(f"getent ahosts {hostname} | grep {ip}", timeout)
|
||||
|
||||
start_all()
|
||||
for hostname in fail:
|
||||
node.wait_until_fails(f"getent ahosts {hostname}")
|
||||
|
||||
admin.wait_for_unit("data-mesher")
|
||||
signer.wait_for_unit("data-mesher")
|
||||
peer.wait_for_unit("data-mesher")
|
||||
start_all()
|
||||
|
||||
# check dns resolution
|
||||
for node in [admin, signer, peer]:
|
||||
resolve(node, {
|
||||
"admin.foo": ["2001:db8:1::1", "192.168.1.1"],
|
||||
"peer.foo": ["2001:db8:1::2", "192.168.1.2"],
|
||||
"signer.foo": ["2001:db8:1::3", "192.168.1.3"]
|
||||
})
|
||||
'';
|
||||
}
|
||||
);
|
||||
}
|
||||
admin.wait_for_unit("data-mesher")
|
||||
signer.wait_for_unit("data-mesher")
|
||||
peer.wait_for_unit("data-mesher")
|
||||
|
||||
# check dns resolution
|
||||
for node in [admin, signer, peer]:
|
||||
resolve(node, {
|
||||
"admin.foo": ["2001:db8:1::1", "192.168.1.1"],
|
||||
"peer.foo": ["2001:db8:1::2", "192.168.1.2"],
|
||||
"signer.foo": ["2001:db8:1::3", "192.168.1.3"]
|
||||
})
|
||||
'';
|
||||
}
|
||||
)
|
||||
|
||||
@@ -1,93 +1,96 @@
|
||||
{
|
||||
pkgs,
|
||||
self,
|
||||
clanLib,
|
||||
nixosLib,
|
||||
clan-core,
|
||||
...
|
||||
}:
|
||||
clanLib.test.makeTestClan {
|
||||
inherit pkgs self;
|
||||
nixosTest = (
|
||||
{ ... }:
|
||||
{
|
||||
# This tests the compatibility of the inventory
|
||||
# With the test framework
|
||||
# - legacy-modules
|
||||
# - clan.service modules
|
||||
name = "dummy-inventory-test";
|
||||
nixosLib.runTest (
|
||||
{ ... }:
|
||||
{
|
||||
imports = [
|
||||
clan-core.modules.nixosVmTest.clanTest
|
||||
];
|
||||
|
||||
clan = {
|
||||
directory = ./.;
|
||||
inventory = {
|
||||
machines.peer1 = { };
|
||||
machines.admin1 = { };
|
||||
services = {
|
||||
legacy-module.default = {
|
||||
roles.peer.machines = [ "peer1" ];
|
||||
roles.admin.machines = [ "admin1" ];
|
||||
};
|
||||
};
|
||||
hostPkgs = pkgs;
|
||||
|
||||
instances."test" = {
|
||||
module.name = "new-service";
|
||||
roles.peer.machines.peer1 = { };
|
||||
};
|
||||
# This tests the compatibility of the inventory
|
||||
# With the test framework
|
||||
# - legacy-modules
|
||||
# - clan.service modules
|
||||
name = "dummy-inventory-test";
|
||||
|
||||
modules = {
|
||||
legacy-module = ./legacy-module;
|
||||
clan = {
|
||||
directory = ./.;
|
||||
inventory = {
|
||||
machines.peer1 = { };
|
||||
machines.admin1 = { };
|
||||
services = {
|
||||
legacy-module.default = {
|
||||
roles.peer.machines = [ "peer1" ];
|
||||
roles.admin.machines = [ "admin1" ];
|
||||
};
|
||||
};
|
||||
modules.new-service = {
|
||||
_class = "clan.service";
|
||||
manifest.name = "new-service";
|
||||
roles.peer = { };
|
||||
perMachine = {
|
||||
nixosModule = {
|
||||
# This should be generated by:
|
||||
# nix run .#generate-test-vars -- checks/dummy-inventory-test dummy-inventory-test
|
||||
clan.core.vars.generators.new-service = {
|
||||
files.not-a-secret = {
|
||||
secret = false;
|
||||
deploy = true;
|
||||
};
|
||||
files.a-secret = {
|
||||
secret = true;
|
||||
deploy = true;
|
||||
owner = "nobody";
|
||||
group = "users";
|
||||
mode = "0644";
|
||||
};
|
||||
script = ''
|
||||
# This is a dummy script that does nothing
|
||||
echo -n "not-a-secret" > $out/not-a-secret
|
||||
echo -n "a-secret" > $out/a-secret
|
||||
'';
|
||||
|
||||
instances."test" = {
|
||||
module.name = "new-service";
|
||||
roles.peer.machines.peer1 = { };
|
||||
};
|
||||
|
||||
modules = {
|
||||
legacy-module = ./legacy-module;
|
||||
};
|
||||
};
|
||||
modules.new-service = {
|
||||
_class = "clan.service";
|
||||
manifest.name = "new-service";
|
||||
roles.peer = { };
|
||||
perMachine = {
|
||||
nixosModule = {
|
||||
# This should be generated by:
|
||||
# nix run .#generate-test-vars -- checks/dummy-inventory-test dummy-inventory-test
|
||||
clan.core.vars.generators.new-service = {
|
||||
files.not-a-secret = {
|
||||
secret = false;
|
||||
deploy = true;
|
||||
};
|
||||
files.a-secret = {
|
||||
secret = true;
|
||||
deploy = true;
|
||||
owner = "nobody";
|
||||
group = "users";
|
||||
mode = "0644";
|
||||
};
|
||||
script = ''
|
||||
# This is a dummy script that does nothing
|
||||
echo -n "not-a-secret" > $out/not-a-secret
|
||||
echo -n "a-secret" > $out/a-secret
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript =
|
||||
{ nodes, ... }:
|
||||
''
|
||||
start_all()
|
||||
admin1.wait_for_unit("multi-user.target")
|
||||
peer1.wait_for_unit("multi-user.target")
|
||||
# Provided by the legacy module
|
||||
print(admin1.succeed("systemctl status dummy-service"))
|
||||
print(peer1.succeed("systemctl status dummy-service"))
|
||||
testScript =
|
||||
{ nodes, ... }:
|
||||
''
|
||||
start_all()
|
||||
admin1.wait_for_unit("multi-user.target")
|
||||
peer1.wait_for_unit("multi-user.target")
|
||||
# Provided by the legacy module
|
||||
print(admin1.succeed("systemctl status dummy-service"))
|
||||
print(peer1.succeed("systemctl status dummy-service"))
|
||||
|
||||
# peer1 should have the 'hello' file
|
||||
peer1.succeed("cat ${nodes.peer1.clan.core.vars.generators.new-service.files.not-a-secret.path}")
|
||||
# peer1 should have the 'hello' file
|
||||
peer1.succeed("cat ${nodes.peer1.clan.core.vars.generators.new-service.files.not-a-secret.path}")
|
||||
|
||||
ls_out = peer1.succeed("ls -la ${nodes.peer1.clan.core.vars.generators.new-service.files.a-secret.path}")
|
||||
# Check that the file is owned by 'nobody'
|
||||
assert "nobody" in ls_out, f"File is not owned by 'nobody': {ls_out}"
|
||||
# Check that the file is in the 'users' group
|
||||
assert "users" in ls_out, f"File is not in the 'users' group: {ls_out}"
|
||||
# Check that the file is in the '0644' mode
|
||||
assert "-rw-r--r--" in ls_out, f"File is not in the '0644' mode: {ls_out}"
|
||||
'';
|
||||
}
|
||||
);
|
||||
}
|
||||
ls_out = peer1.succeed("ls -la ${nodes.peer1.clan.core.vars.generators.new-service.files.a-secret.path}")
|
||||
# Check that the file is owned by 'nobody'
|
||||
assert "nobody" in ls_out, f"File is not owned by 'nobody': {ls_out}"
|
||||
# Check that the file is in the 'users' group
|
||||
assert "users" in ls_out, f"File is not in the 'users' group: {ls_out}"
|
||||
# Check that the file is in the '0644' mode
|
||||
assert "-rw-r--r--" in ls_out, f"File is not in the '0644' mode: {ls_out}"
|
||||
'';
|
||||
}
|
||||
)
|
||||
|
||||
@@ -4,6 +4,7 @@ let
|
||||
filter
|
||||
pathExists
|
||||
;
|
||||
nixosLib = import (self.inputs.nixpkgs + "/nixos/lib") { };
|
||||
in
|
||||
{
|
||||
imports = filter pathExists [
|
||||
@@ -29,10 +30,11 @@ in
|
||||
let
|
||||
nixosTestArgs = {
|
||||
# reference to nixpkgs for the current system
|
||||
inherit pkgs lib;
|
||||
inherit pkgs lib nixosLib;
|
||||
# this gives us a reference to our flake but also all flake inputs
|
||||
inherit self;
|
||||
inherit (self) clanLib;
|
||||
clan-core = self;
|
||||
};
|
||||
nixosTests =
|
||||
lib.optionalAttrs (pkgs.stdenv.isLinux) {
|
||||
|
||||
@@ -1,130 +1,132 @@
|
||||
{
|
||||
pkgs,
|
||||
self,
|
||||
clanLib,
|
||||
nixosLib,
|
||||
clan-core,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
clanLib.test.makeTestClan {
|
||||
inherit pkgs self;
|
||||
nixosTest = (
|
||||
{ lib, ... }:
|
||||
let
|
||||
common =
|
||||
{ pkgs, modulesPath, ... }:
|
||||
{
|
||||
imports = [
|
||||
(modulesPath + "/../tests/common/x11.nix")
|
||||
];
|
||||
nixosLib.runTest (
|
||||
{ ... }:
|
||||
let
|
||||
machines = [
|
||||
"peer1"
|
||||
"peer2"
|
||||
];
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
clan-core.modules.nixosVmTest.clanTest
|
||||
];
|
||||
|
||||
clan.services.mumble.user = "alice";
|
||||
environment.systemPackages = [ pkgs.killall ];
|
||||
};
|
||||
machines = [
|
||||
"peer1"
|
||||
"peer2"
|
||||
];
|
||||
in
|
||||
{
|
||||
name = "mumble";
|
||||
hostPkgs = pkgs;
|
||||
|
||||
clan = {
|
||||
directory = ./.;
|
||||
# TODO: container driver does not support: sleep, wait_for_window, send_chars, wait_for_text
|
||||
test.useContainers = false;
|
||||
inventory = {
|
||||
machines = lib.genAttrs machines (_: { });
|
||||
services = {
|
||||
mumble.default = {
|
||||
roles.server.machines = machines;
|
||||
};
|
||||
name = "mumble";
|
||||
|
||||
defaults =
|
||||
{ pkgs, modulesPath, ... }:
|
||||
{
|
||||
imports = [
|
||||
(modulesPath + "/../tests/common/x11.nix")
|
||||
];
|
||||
|
||||
clan.services.mumble.user = "alice";
|
||||
environment.systemPackages = [ pkgs.killall ];
|
||||
};
|
||||
|
||||
clan = {
|
||||
directory = ./.;
|
||||
# TODO: container driver does not support: sleep, wait_for_window, send_chars, wait_for_text
|
||||
test.useContainers = false;
|
||||
inventory = {
|
||||
machines = lib.genAttrs machines (_: { });
|
||||
services = {
|
||||
mumble.default = {
|
||||
roles.server.machines = machines;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
enableOCR = true;
|
||||
enableOCR = true;
|
||||
|
||||
nodes.peer1 = common;
|
||||
nodes.peer2 = common;
|
||||
|
||||
testScript = ''
|
||||
import time
|
||||
import re
|
||||
testScript = ''
|
||||
import time
|
||||
import re
|
||||
|
||||
|
||||
def machine_has_text(machine: Machine, regex: str) -> bool:
|
||||
variants = machine.get_screen_text_variants()
|
||||
# for debugging
|
||||
# machine.screenshot(f"/tmp/{machine.name}.png")
|
||||
for text in variants:
|
||||
print(f"Expecting '{regex}' in '{text}'")
|
||||
if re.search(regex, text) is not None:
|
||||
return True
|
||||
return False
|
||||
def machine_has_text(machine: Machine, regex: str) -> bool:
|
||||
variants = machine.get_screen_text_variants()
|
||||
# for debugging
|
||||
# machine.screenshot(f"/tmp/{machine.name}.png")
|
||||
for text in variants:
|
||||
print(f"Expecting '{regex}' in '{text}'")
|
||||
if re.search(regex, text) is not None:
|
||||
return True
|
||||
return False
|
||||
|
||||
start_all()
|
||||
start_all()
|
||||
|
||||
with subtest("Waiting for x"):
|
||||
peer1.wait_for_x()
|
||||
peer2.wait_for_x()
|
||||
with subtest("Waiting for x"):
|
||||
peer1.wait_for_x()
|
||||
peer2.wait_for_x()
|
||||
|
||||
with subtest("Waiting for murmur"):
|
||||
peer1.wait_for_unit("murmur.service")
|
||||
peer2.wait_for_unit("murmur.service")
|
||||
with subtest("Waiting for murmur"):
|
||||
peer1.wait_for_unit("murmur.service")
|
||||
peer2.wait_for_unit("murmur.service")
|
||||
|
||||
with subtest("Starting Mumble"):
|
||||
# starting mumble is blocking
|
||||
peer1.execute("mumble >&2 &")
|
||||
peer2.execute("mumble >&2 &")
|
||||
with subtest("Starting Mumble"):
|
||||
# starting mumble is blocking
|
||||
peer1.execute("mumble >&2 &")
|
||||
peer2.execute("mumble >&2 &")
|
||||
|
||||
with subtest("Wait for Mumble"):
|
||||
peer1.wait_for_window(r"Mumble")
|
||||
peer2.wait_for_window(r"Mumble")
|
||||
with subtest("Wait for Mumble"):
|
||||
peer1.wait_for_window(r"Mumble")
|
||||
peer2.wait_for_window(r"Mumble")
|
||||
|
||||
with subtest("Wait for certificate creation"):
|
||||
peer1.wait_for_window(r"Mumble")
|
||||
peer2.wait_for_window(r"Mumble")
|
||||
with subtest("Wait for certificate creation"):
|
||||
peer1.wait_for_window(r"Mumble")
|
||||
peer2.wait_for_window(r"Mumble")
|
||||
|
||||
for i in range(20):
|
||||
time.sleep(1)
|
||||
peer1.send_chars("\n")
|
||||
peer1.send_chars("\n")
|
||||
peer2.send_chars("\n")
|
||||
peer2.send_chars("\n")
|
||||
if machine_has_text(peer1, r"Mumble Server Connect") and \
|
||||
machine_has_text(peer2, r"Mumble Server Connect"):
|
||||
break
|
||||
else:
|
||||
raise Exception("Timeout waiting for certificate creation")
|
||||
for i in range(20):
|
||||
time.sleep(1)
|
||||
peer1.send_chars("\n")
|
||||
peer1.send_chars("\n")
|
||||
peer2.send_chars("\n")
|
||||
peer2.send_chars("\n")
|
||||
if machine_has_text(peer1, r"Mumble Server Connect") and \
|
||||
machine_has_text(peer2, r"Mumble Server Connect"):
|
||||
break
|
||||
else:
|
||||
raise Exception("Timeout waiting for certificate creation")
|
||||
|
||||
with subtest("Check validity of server certificates"):
|
||||
peer1.execute("killall .mumble-wrapped")
|
||||
peer1.sleep(1)
|
||||
peer1.execute("mumble mumble://peer2 >&2 &")
|
||||
peer1.wait_for_window(r"Mumble")
|
||||
with subtest("Check validity of server certificates"):
|
||||
peer1.execute("killall .mumble-wrapped")
|
||||
peer1.sleep(1)
|
||||
peer1.execute("mumble mumble://peer2 >&2 &")
|
||||
peer1.wait_for_window(r"Mumble")
|
||||
|
||||
for i in range(20):
|
||||
time.sleep(1)
|
||||
peer1.send_chars("\n")
|
||||
peer1.send_chars("\n")
|
||||
if machine_has_text(peer1, "Connected."):
|
||||
break
|
||||
else:
|
||||
raise Exception("Timeout waiting for certificate creation")
|
||||
for i in range(20):
|
||||
time.sleep(1)
|
||||
peer1.send_chars("\n")
|
||||
peer1.send_chars("\n")
|
||||
if machine_has_text(peer1, "Connected."):
|
||||
break
|
||||
else:
|
||||
raise Exception("Timeout waiting for certificate creation")
|
||||
|
||||
peer2.execute("killall .mumble-wrapped")
|
||||
peer2.sleep(1)
|
||||
peer2.execute("mumble mumble://peer1 >&2 &")
|
||||
peer2.wait_for_window(r"Mumble")
|
||||
peer2.execute("killall .mumble-wrapped")
|
||||
peer2.sleep(1)
|
||||
peer2.execute("mumble mumble://peer1 >&2 &")
|
||||
peer2.wait_for_window(r"Mumble")
|
||||
|
||||
for i in range(20):
|
||||
time.sleep(1)
|
||||
peer2.send_chars("\n")
|
||||
peer2.send_chars("\n")
|
||||
if machine_has_text(peer2, "Connected."):
|
||||
break
|
||||
else:
|
||||
raise Exception("Timeout waiting for certificate creation")
|
||||
'';
|
||||
}
|
||||
);
|
||||
}
|
||||
for i in range(20):
|
||||
time.sleep(1)
|
||||
peer2.send_chars("\n")
|
||||
peer2.send_chars("\n")
|
||||
if machine_has_text(peer2, "Connected."):
|
||||
break
|
||||
else:
|
||||
raise Exception("Timeout waiting for certificate creation")
|
||||
'';
|
||||
}
|
||||
)
|
||||
|
||||
@@ -1,83 +1,87 @@
|
||||
{
|
||||
pkgs,
|
||||
self,
|
||||
clanLib,
|
||||
nixosLib,
|
||||
clan-core,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
clanLib.test.makeTestClan {
|
||||
inherit pkgs self;
|
||||
nixosTest = (
|
||||
{ lib, ... }:
|
||||
{
|
||||
name = "syncthing";
|
||||
nixosLib.runTest (
|
||||
{ ... }:
|
||||
{
|
||||
imports = [
|
||||
clan-core.modules.nixosVmTest.clanTest
|
||||
];
|
||||
|
||||
clan = {
|
||||
directory = ./.;
|
||||
# TODO: container driver does not support wait_for_file() yet
|
||||
test.useContainers = false;
|
||||
inventory = {
|
||||
machines = lib.genAttrs [
|
||||
"introducer"
|
||||
"peer1"
|
||||
"peer2"
|
||||
] (_: { });
|
||||
services = {
|
||||
syncthing.default = {
|
||||
roles.peer.machines = [
|
||||
"peer1"
|
||||
"peer2"
|
||||
];
|
||||
roles.introducer.machines = [ "introducer" ];
|
||||
hostPkgs = pkgs;
|
||||
|
||||
name = "syncthing";
|
||||
|
||||
clan = {
|
||||
directory = ./.;
|
||||
# TODO: container driver does not support wait_for_file() yet
|
||||
test.useContainers = false;
|
||||
inventory = {
|
||||
machines = lib.genAttrs [
|
||||
"introducer"
|
||||
"peer1"
|
||||
"peer2"
|
||||
] (_: { });
|
||||
services = {
|
||||
syncthing.default = {
|
||||
roles.peer.machines = [
|
||||
"peer1"
|
||||
"peer2"
|
||||
];
|
||||
roles.introducer.machines = [ "introducer" ];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
nodes.introducer = {
|
||||
# Doesn't test zerotier!
|
||||
services.syncthing.openDefaultPorts = true;
|
||||
services.syncthing.settings.folders = {
|
||||
"Shared" = {
|
||||
enable = true;
|
||||
path = "~/Shared";
|
||||
versioning = {
|
||||
type = "trashcan";
|
||||
params = {
|
||||
cleanoutDays = "30";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
clan.syncthing.autoAcceptDevices = true;
|
||||
clan.syncthing.autoShares = [ "Shared" ];
|
||||
# For faster Tests
|
||||
systemd.timers.syncthing-auto-accept.timerConfig = {
|
||||
OnActiveSec = 1;
|
||||
OnUnitActiveSec = 1;
|
||||
};
|
||||
};
|
||||
nodes.peer1 = {
|
||||
services.syncthing.openDefaultPorts = true;
|
||||
};
|
||||
nodes.peer2 = {
|
||||
services.syncthing.openDefaultPorts = true;
|
||||
};
|
||||
|
||||
nodes.introducer = {
|
||||
# Doesn't test zerotier!
|
||||
services.syncthing.openDefaultPorts = true;
|
||||
services.syncthing.settings.folders = {
|
||||
"Shared" = {
|
||||
enable = true;
|
||||
path = "~/Shared";
|
||||
versioning = {
|
||||
type = "trashcan";
|
||||
params = {
|
||||
cleanoutDays = "30";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
clan.syncthing.autoAcceptDevices = true;
|
||||
clan.syncthing.autoShares = [ "Shared" ];
|
||||
# For faster Tests
|
||||
systemd.timers.syncthing-auto-accept.timerConfig = {
|
||||
OnActiveSec = 1;
|
||||
OnUnitActiveSec = 1;
|
||||
};
|
||||
};
|
||||
nodes.peer1 = {
|
||||
services.syncthing.openDefaultPorts = true;
|
||||
};
|
||||
nodes.peer2 = {
|
||||
services.syncthing.openDefaultPorts = true;
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
introducer.wait_for_unit("syncthing")
|
||||
peer1.wait_for_unit("syncthing")
|
||||
peer2.wait_for_unit("syncthing")
|
||||
peer1.execute("ls -la /var/lib/syncthing")
|
||||
peer2.execute("ls -la /var/lib/syncthing")
|
||||
peer1.wait_for_file("/var/lib/syncthing/Shared")
|
||||
peer2.wait_for_file("/var/lib/syncthing/Shared")
|
||||
introducer.shutdown()
|
||||
peer1.execute("echo hello > /var/lib/syncthing/Shared/hello")
|
||||
peer2.wait_for_file("/var/lib/syncthing/Shared/hello")
|
||||
out = peer2.succeed("cat /var/lib/syncthing/Shared/hello")
|
||||
assert "hello" in out
|
||||
'';
|
||||
}
|
||||
);
|
||||
}
|
||||
testScript = ''
|
||||
start_all()
|
||||
introducer.wait_for_unit("syncthing")
|
||||
peer1.wait_for_unit("syncthing")
|
||||
peer2.wait_for_unit("syncthing")
|
||||
peer1.execute("ls -la /var/lib/syncthing")
|
||||
peer2.execute("ls -la /var/lib/syncthing")
|
||||
peer1.wait_for_file("/var/lib/syncthing/Shared")
|
||||
peer2.wait_for_file("/var/lib/syncthing/Shared")
|
||||
introducer.shutdown()
|
||||
peer1.execute("echo hello > /var/lib/syncthing/Shared/hello")
|
||||
peer2.wait_for_file("/var/lib/syncthing/Shared/hello")
|
||||
out = peer2.succeed("cat /var/lib/syncthing/Shared/hello")
|
||||
assert "hello" in out
|
||||
'';
|
||||
}
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user