Merge pull request 'clanCore: Init machine.id and idShort' (#2045) from Qubasa/clan-core:Qubasa-main into main

This commit is contained in:
clan-bot
2024-09-06 17:00:34 +00:00
32 changed files with 302 additions and 78 deletions

View File

@@ -134,6 +134,10 @@
];
virtualisation.emptyDiskImages = [ 256 ];
clan.core.clanDir = ./.;
clan.core.machine = {
id = "a73f5245cdba4576ab6cfef3145ac9ec";
diskId = "c4c47b";
};
};
testScript = ''

View File

@@ -18,6 +18,10 @@
{
clan.core.machineName = "machine";
clan.core.clanDir = ./.;
clan.core.machine = {
id = "a73f5245cdba4576ab6cfef3145ac9ec";
diskId = "c4c47b";
};
clan.core.state.testState.folders = [ "/etc/state" ];
environment.etc.state.text = "hello world";
systemd.tmpfiles.settings."vmsecrets" = {

View File

@@ -9,6 +9,7 @@
networking.hostName = "machine";
services.openssh.enable = true;
services.openssh.startWhenNeeded = false;
};
testScript = ''
start_all()

View File

@@ -12,6 +12,10 @@
{
clan.core.machineName = "machine";
clan.core.clanDir = ./.;
clan.core.machine = {
id = "a73f5245cdba4576ab6cfef3145ac9ec";
diskId = "c4c47b";
};
}
];
};

View File

@@ -10,6 +10,7 @@
let
dependencies = [
pkgs.disko
pkgs.age
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine.config.system.build.toplevel
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine.config.system.build.diskoScript
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine.config.system.build.diskoScript.drvPath
@@ -25,9 +26,11 @@
nodes.target = {
virtualisation.emptyDiskImages = [ 4096 ];
virtualisation.memorySize = 3000;
environment.systemPackages = [ self.packages.${pkgs.system}.clan-cli ];
environment.systemPackages = [
self.packages.${pkgs.system}.clan-cli
] ++ self.packages.${pkgs.system}.clan-cli.runtimeDependencies;
environment.variables."SOPS_AGE_KEY" = builtins.readFile ../lib/age/privkey;
environment.etc."install-closure".source = "${closureInfo}/store-paths";
nix.settings = {
substituters = lib.mkForce [ ];
hashed-mirrors = null;
@@ -38,11 +41,15 @@
"flakes"
];
};
system.extraDependencies = dependencies;
};
testScript = ''
start_all()
machine.succeed("clan flash --debug --flake ${../..} --yes --disk main /dev/vdb test-install-machine")
machine.succeed("cp -r ${../..} test-flake && chmod -R +w test-flake")
machine.succeed("clan secrets key generate")
machine.succeed("clan secrets users add --debug --flake test-flake testuser '${builtins.readFile ../lib/age/pubkey}'")
machine.succeed("clan flash --debug --flake test-flake --yes --disk main /dev/vdb test-install-machine")
'';
} { inherit pkgs self; };
};

View File

@@ -1,7 +1,12 @@
{ self, lib, ... }:
{
clan.machines.test-install-machine = {
clan.core.networking.targetHost = "test-install-machine";
clan.core.machine = {
id = "a73f5245cdba4576ab6cfef3145ac9ec";
diskId = "c4c47b";
};
fileSystems."/".device = lib.mkDefault "/dev/vdb";
boot.loader.grub.device = lib.mkDefault "/dev/vdb";
@@ -17,7 +22,10 @@
(modulesPath + "/profiles/qemu-guest.nix")
];
clan.single-disk.device = "/dev/vdb";
clan.core.machine = {
id = "a73f5245cdba4576ab6cfef3145ac9ec";
diskId = "c4c47b";
};
environment.etc."install-successful".text = "ok";
boot.consoleLogLevel = lib.mkForce 100;
@@ -34,8 +42,10 @@
let
dependencies = [
self
pkgs.age
self.nixosConfigurations.test-install-machine.config.system.build.toplevel
self.nixosConfigurations.test-install-machine.config.system.build.diskoScript
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine.config.system.build.diskoScript.drvPath
self.nixosConfigurations.test-install-machine.config.system.clan.deployment.file
pkgs.stdenv.drvPath
pkgs.nixos-anywhere
@@ -50,6 +60,7 @@
services.openssh.enable = true;
users.users.root.openssh.authorizedKeys.keyFiles = [ ../lib/ssh/pubkey ];
system.nixos.variant_id = "installer";
virtualisation.emptyDiskImages = [ 4096 ];
nix.settings = {
substituters = lib.mkForce [ ];
@@ -67,6 +78,7 @@
self.packages.${pkgs.system}.clan-cli
] ++ self.packages.${pkgs.system}.clan-cli.runtimeDependencies;
environment.etc."install-closure".source = "${closureInfo}/store-paths";
environment.variables."SOPS_AGE_KEY" = builtins.readFile ../lib/age/privkey;
virtualisation.memorySize = 2048;
nix.settings = {
substituters = lib.mkForce [ ];
@@ -99,9 +111,11 @@
client.wait_until_succeeds("timeout 2 ssh -o StrictHostKeyChecking=accept-new -v root@target hostname")
client.succeed("cp -r ${../..} test-flake && chmod -R +w test-flake")
client.fail("test -f test-flake/machines/test-install-machine/hardware-configuration.nix")
client.succeed("clan machines hw-generate --flake test-flake test-install-machine root@target>&2")
client.succeed("clan secrets key generate")
client.succeed("clan secrets users add --debug --flake test-flake testuser '${builtins.readFile ../lib/age/pubkey}'")
client.succeed("clan machines hw-generate --debug --flake test-flake test-install-machine root@target>&2")
client.succeed("test -f test-flake/machines/test-install-machine/hardware-configuration.nix")
client.succeed("clan machines install --debug --flake ${../..} --yes test-install-machine root@target >&2")
client.succeed("clan machines install --debug --flake test-flake --yes test-install-machine root@target >&2")
try:
target.shutdown()
except BrokenPipeError:

1
checks/lib/age/privkey Normal file
View File

@@ -0,0 +1 @@
AGE-SECRET-KEY-1KF8E3SR3TTGL6M476SKF7EEMR4H9NF7ZWYSLJUAK8JX276JC7KUSSURKFK

1
checks/lib/age/pubkey Normal file
View File

@@ -0,0 +1 @@
age1dhwqzkah943xzc34tc3dlmfayyevcmdmxzjezdgdy33euxwf59vsp3vk3c

View File

@@ -17,7 +17,10 @@
{
clan.core.machineName = "machine";
clan.core.clanDir = ./.;
clan.core.machine = {
id = "a73f5245cdba4576ab6cfef3145ac9ec";
diskId = "c4c47b";
};
services.nginx.virtualHosts."matrix.clan.test" = {
enableACME = lib.mkForce false;
forceSSL = lib.mkForce false;

View File

@@ -32,6 +32,10 @@
common
{
clan.core.machineName = "peer1";
clan.core.machine = {
id = "df97124f09da48e3a22d77ce30ee8da6";
diskId = "c9c52c";
};
environment.etc = {
"mumble-key".source = ./peer_1/peer_1_test_key;
"mumble-cert".source = ./peer_1/peer_1_test_cert;
@@ -65,6 +69,10 @@
imports = [
common
{
clan.core.machine = {
id = "a73f5245cdba4576ab6cfef3145ac9ec";
diskId = "c4c47b";
};
clan.core.machineName = "peer2";
environment.etc = {
"mumble-key".source = ./peer_2/peer_2_test_key;

View File

@@ -8,7 +8,13 @@ let
self.nixosModules.clanCore
# This is the only option that is not part of the
# module because it is usually set by flake-parts
{ clan.core.clanDir = ./.; }
{
clan.core.clanDir = ./.;
clan.core.machine = {
id = "df97124f09da48e3a22d77ce30ee8da6";
diskId = "c9c52c";
};
}
];
};
in

View File

@@ -15,6 +15,11 @@
clan.localbackup.targets.hdd.directory = "/mnt/external-disk";
clan.core.clanDir = ./.;
clan.core.machine = {
id = "df97124f09da48e3a22d77ce30ee8da6";
diskId = "c9c52c";
};
systemd.services.sample-service = {
wantedBy = [ "multi-user.target" ];
script = ''

View File

@@ -12,6 +12,10 @@
clan.core.clanDir = "${./.}";
clan.core.machineName = "machine";
clan.core.machine = {
id = "df97124f09da48e3a22d77ce30ee8da6";
diskId = "c9c52c";
};
networking.hostName = "machine";
};

View File

@@ -14,6 +14,10 @@
{
clan.core.machineName = "introducer";
clan.core.clanDir = ./.;
clan.core.machine = {
id = "df97124f09da48e3a22d77ce30ee8da6";
diskId = "c9c52c";
};
environment.etc = {
"syncthing.pam".source = ./introducer/introducer_test_cert;
"syncthing.key".source = ./introducer/introducer_test_key;
@@ -55,6 +59,10 @@
{
clan.core.machineName = "peer1";
clan.core.clanDir = ./.;
clan.core.machine = {
id = "645a43ad1d6f456aa2d623464efed096";
diskId = "9404bf2fb28343cba82e64d1a9131ea4";
};
clan.syncthing.introducer = lib.strings.removeSuffix "\n" (
builtins.readFile ./introducer/introducer_device_id
);
@@ -77,6 +85,10 @@
{
clan.core.machineName = "peer2";
clan.core.clanDir = ./.;
clan.core.machine = {
id = "dd0927b2113b4fa58a94a4be15b0408e";
diskId = "05d6d08214d14261b001782b417ca2a3";
};
clan.syncthing.introducer = lib.strings.removeSuffix "\n" (
builtins.readFile ./introducer/introducer_device_id
);

View File

@@ -16,6 +16,10 @@ import ../lib/test-base.nix (
{
clan.core.machineName = "machine";
clan.core.clanDir = ./.;
clan.core.machine = {
id = "df97124f09da48e3a22d77ce30ee8da6";
diskId = "c9c52c";
};
}
];
services.wayland-proxy-virtwl.enable = true;

View File

@@ -12,6 +12,10 @@
{
clan.core.machineName = "machine";
clan.core.clanDir = ./.;
clan.core.machine = {
id = "df97124f09da48e3a22d77ce30ee8da6";
diskId = "c9c52c";
};
}
];
};

View File

@@ -1,4 +1,7 @@
{ lib, config, ... }:
let
cfg = config.clan.single-disk;
in
{
options.clan.single-disk = {
device = lib.mkOption {
@@ -8,26 +11,38 @@
# Question: should we set a default here?
# default = "/dev/null";
};
suffix = lib.mkOption {
default = config.clan.core.machine.diskId;
defaultText = "abcdef";
type = lib.types.nullOr lib.types.str;
description = "The suffix to use for the disk";
};
};
config = {
assertions = [
{
assertion = cfg.suffix != null;
message = "clan.core.machine.diskId must be set, please run `clan facts generate`";
}
];
boot.loader.grub.efiSupport = lib.mkDefault true;
boot.loader.grub.efiInstallAsRemovable = lib.mkDefault true;
disko.devices = {
disko.devices = lib.mkIf (cfg.suffix != null) {
disk = {
main = {
type = "disk";
# This is set through the UI
device = config.clan.single-disk.device;
device = cfg.device;
content = {
type = "gpt";
partitions = {
"${config.networking.hostName}-boot" = {
"boot-${cfg.suffix}" = {
size = "1M";
type = "EF02"; # for grub MBR
priority = 1;
};
"${config.networking.hostName}-ESP" = {
"ESP-${cfg.suffix}" = {
size = "512M";
type = "EF00";
content = {
@@ -36,7 +51,7 @@
mountpoint = "/boot";
};
};
"${config.networking.hostName}-root" = {
"root-${cfg.suffix}" = {
size = "100%";
content = {
type = "filesystem";

View File

@@ -108,6 +108,7 @@ nav:
- reference/clan-core/sops.md
- reference/clan-core/state.md
- reference/clan-core/deployment.md
- reference/clan-core/machine.md
- reference/clan-core/networking.md
- Nix API:
- reference/nix-api/index.md

View File

@@ -17,18 +17,19 @@ lsblk --output NAME,ID-LINK,FSTYPE,SIZE,MOUNTPOINT
```nix hl_lines="14 40"
{ lib, ... }:
let
suffix = config.clan.core.machine.diskId;
mirrorBoot = idx: {
type = "disk";
device = "/dev/disk/by-id/${idx}";
content = {
type = "gpt";
partitions = {
"${config.networking.hostName}-boot" = {
"boot-${suffix}" = {
size = "1M";
type = "EF02"; # for grub MBR
priority = 1;
};
"${config.networking.hostName}-ESP" = lib.mkIf (idx == "nvme-eui.002538b931b59865") {
"ESP-${suffix}" = lib.mkIf (idx == "nvme-eui.002538b931b59865") {
size = "1G";
type = "EF00";
content = {
@@ -38,7 +39,7 @@ lsblk --output NAME,ID-LINK,FSTYPE,SIZE,MOUNTPOINT
mountOptions = [ "nofail" ];
};
};
"${config.networking.hostName}-root" = {
"root-${suffix}" = {
size = "100%";
content = {
type = "zfs";
@@ -108,18 +109,19 @@ lsblk --output NAME,ID-LINK,FSTYPE,SIZE,MOUNTPOINT
```nix hl_lines="14 40 41"
{ lib, ... }:
let
suffix = config.clan.core.machine.diskId;
mirrorBoot = idx: {
type = "disk";
device = "/dev/disk/by-id/${idx}";
content = {
type = "gpt";
partitions = {
boot = {
"boot-${suffix}" = {
size = "1M";
type = "EF02"; # for grub MBR
priority = 1;
};
ESP = lib.mkIf (idx == "nvme-eui.002538b931b59865") {
"ESP-${suffix}" = lib.mkIf (idx == "nvme-eui.002538b931b59865") {
size = "1G";
type = "EF00";
content = {
@@ -129,7 +131,7 @@ lsblk --output NAME,ID-LINK,FSTYPE,SIZE,MOUNTPOINT
mountOptions = [ "nofail" ];
};
};
zfs = {
"root-${suffix}" = {
size = "100%";
content = {
type = "zfs";

View File

@@ -88,7 +88,8 @@
"machines": {
"test-inventory-machine": {
"config": {
"device": "/dev/null"
"device": "/dev/null",
"suffix": "foobar"
},
"imports": []
}

View File

@@ -25,6 +25,9 @@ let
evaled = lib.evalModules {
modules = [
baseModule
({
clan.core.clanDir = ./.;
})
clan-core.nixosModules.clanCore
] ++ (map (name: clanModules.${name}) modulenames);
};

View File

@@ -18,5 +18,6 @@
./vm.nix
./wayland-proxy-virtwl.nix
./zerotier
./machine_id.nix
];
}

View File

@@ -0,0 +1,58 @@
{
config,
pkgs,
lib,
...
}:
let
cfg = config.clan.core.machine;
in
{
options.clan.core.machine = {
id = lib.mkOption {
type = lib.types.nullOr lib.types.str;
description = "The machine id";
};
idShort = lib.mkOption {
readOnly = true;
type = lib.types.nullOr lib.types.str;
description = "The short machine id";
};
diskId = lib.mkOption {
type = lib.types.nullOr lib.types.str;
description = "The disk id";
};
};
config = {
clan.core.machine.id =
lib.mkDefault
config.clan.core.facts.services."machine_id".public."machine_id".value;
clan.core.machine.idShort = if (cfg.id != null) then (lib.substring 0 8 cfg.id) else null;
clan.core.machine.diskId =
lib.mkDefault
config.clan.core.facts.services."machine_id".public."diskId".value;
clan.core.facts.services."machine_id" = {
public."machine_id" = { };
public."diskId" = { };
generator.path = [
pkgs.coreutils
];
generator.script = ''
machine_uuid=$(dd if=/dev/urandom bs=1 count=16 2>/dev/null | od -An -tx1 | tr -d ' \n')
disk_uuid=$(dd if=/dev/urandom bs=1 count=3 2>/dev/null | od -An -tx1 | tr -d ' \n')
echo -n "$machine_uuid" > "$facts"/machine_id
echo -n "$disk_uuid" > "$facts"/diskId
'';
};
networking.hostId = lib.mkIf (cfg.id != null) (lib.mkDefault cfg.idShort);
boot.kernelParams = lib.mkIf (cfg.id != null) [
''systemd.machine_id=${cfg.id}''
];
};
}

View File

@@ -11,6 +11,7 @@ from typing import NamedTuple
import pytest
from clan_cli.dirs import nixpkgs_source
from fixture_error import FixtureError
from helpers import cli
from root import CLAN_CORE
log = logging.getLogger(__name__)
@@ -50,9 +51,14 @@ class FlakeForTest(NamedTuple):
path: Path
from age_keys import KEYS, KeyPair
def generate_flake(
temporary_home: Path,
flake_template: Path,
monkeypatch: pytest.MonkeyPatch,
sops_key: KeyPair = KEYS[0],
substitutions: dict[str, str] | None = None,
# define the machines directly including their config
machine_configs: dict[str, dict] | None = None,
@@ -75,7 +81,6 @@ def generate_flake(
)
)
"""
# copy the template to a new temporary location
if inventory is None:
inventory = {}
@@ -133,6 +138,19 @@ def generate_flake(
sp.run(["git", "config", "user.name", "clan-tool"], cwd=flake, check=True)
sp.run(["git", "config", "user.email", "clan@example.com"], cwd=flake, check=True)
sp.run(["git", "commit", "-a", "-m", "Initial commit"], cwd=flake, check=True)
monkeypatch.setenv("SOPS_AGE_KEY", sops_key.privkey)
cli.run(
[
"secrets",
"users",
"add",
"user1",
sops_key.pubkey,
"--flake",
str(flake),
"--debug",
]
)
return FlakeForTest(flake)

View File

@@ -8,6 +8,7 @@ from pathlib import Path
from time import sleep
from clan_cli.dirs import vm_state_dir
from clan_cli.errors import ClanError
from clan_cli.qemu.qga import QgaSession
from clan_cli.qemu.qmp import QEMUMonitorProtocol
@@ -21,33 +22,46 @@ def find_free_port() -> int:
return sock.getsockname()[1]
def run_vm_in_thread(machine_name: str, ssh_port: int | None = None) -> int:
class VmThread(threading.Thread):
def __init__(self, machine_name: str, ssh_port: int | None = None) -> None:
super().__init__()
self.machine_name = machine_name
self.ssh_port = ssh_port
self.exception: Exception | None = None
self.daemon = True
def run(self) -> None:
try:
cli.run(
["vms", "run", self.machine_name, "--publish", f"{self.ssh_port}:22"]
)
except Exception as ex:
# print exception details
print(traceback.format_exc(), file=sys.stderr)
print(sys.exc_info()[2], file=sys.stderr)
self.exception = ex
def run_vm_in_thread(machine_name: str, ssh_port: int | None = None) -> VmThread:
# runs machine and prints exceptions
if ssh_port is None:
ssh_port = find_free_port()
def run() -> None:
try:
cli.run(["vms", "run", machine_name, "--publish", f"{ssh_port}:22"])
except Exception:
# print exception details
print(traceback.format_exc(), file=sys.stderr)
print(sys.exc_info()[2], file=sys.stderr)
# run the machine in a separate thread
t = threading.Thread(target=run, name="run")
t.daemon = True
t.start()
return ssh_port
vm_thread = VmThread(machine_name, ssh_port)
vm_thread.start()
return vm_thread
# wait for qmp socket to exist
def wait_vm_up(machine_name: str, flake_url: str | None = None) -> None:
def wait_vm_up(machine_name: str, vm: VmThread, flake_url: str | None = None) -> None:
if flake_url is None:
flake_url = str(Path.cwd())
socket_file = vm_state_dir(flake_url, machine_name) / "qmp.sock"
timeout: float = 600
while True:
if vm.exception:
msg = "VM failed to start"
raise ClanError(msg) from vm.exception
if timeout <= 0:
msg = f"qmp socket {socket_file} not found. Is the VM running?"
raise TimeoutError(msg)
@@ -58,12 +72,15 @@ def wait_vm_up(machine_name: str, flake_url: str | None = None) -> None:
# wait for vm to be down by checking if qmp socket is down
def wait_vm_down(machine_name: str, flake_url: str | None = None) -> None:
def wait_vm_down(machine_name: str, vm: VmThread, flake_url: str | None = None) -> None:
if flake_url is None:
flake_url = str(Path.cwd())
socket_file = vm_state_dir(flake_url, machine_name) / "qmp.sock"
timeout: float = 300
while socket_file.exists():
if vm.exception:
msg = "VM failed to start"
raise ClanError(msg) from vm.exception
if timeout <= 0:
msg = f"qmp socket {socket_file} still exists. Is the VM down?"
raise TimeoutError(msg)
@@ -72,11 +89,13 @@ def wait_vm_down(machine_name: str, flake_url: str | None = None) -> None:
# wait for vm to be up then connect and return qmp instance
def qmp_connect(machine_name: str, flake_url: str | None = None) -> QEMUMonitorProtocol:
def qmp_connect(
machine_name: str, vm: VmThread, flake_url: str | None = None
) -> QEMUMonitorProtocol:
if flake_url is None:
flake_url = str(Path.cwd())
state_dir = vm_state_dir(flake_url, machine_name)
wait_vm_up(machine_name, flake_url)
wait_vm_up(machine_name, vm, flake_url)
qmp = QEMUMonitorProtocol(
address=str(os.path.realpath(state_dir / "qmp.sock")),
)
@@ -85,9 +104,11 @@ def qmp_connect(machine_name: str, flake_url: str | None = None) -> QEMUMonitorP
# wait for vm to be up then connect and return qga instance
def qga_connect(machine_name: str, flake_url: str | None = None) -> QgaSession:
def qga_connect(
machine_name: str, vm: VmThread, flake_url: str | None = None
) -> QgaSession:
if flake_url is None:
flake_url = str(Path.cwd())
state_dir = vm_state_dir(flake_url, machine_name)
wait_vm_up(machine_name, flake_url)
wait_vm_up(machine_name, vm, flake_url)
return QgaSession(os.path.realpath(state_dir / "qga.sock"))

View File

@@ -76,6 +76,7 @@ def test_generate_public_var(
temporary_home,
flake_template=CLAN_CORE / "templates" / "minimal",
machine_configs={"my_machine": config},
monkeypatch=monkeypatch,
)
monkeypatch.chdir(flake.path)
machine = Machine(name="my_machine", flake=FlakeId(str(flake.path)))
@@ -105,6 +106,7 @@ def test_generate_secret_var_sops(
temporary_home,
flake_template=CLAN_CORE / "templates" / "minimal",
machine_configs={"my_machine": config},
monkeypatch=monkeypatch,
)
monkeypatch.chdir(flake.path)
sops_setup.init()
@@ -140,6 +142,7 @@ def test_generate_secret_var_sops_with_default_group(
temporary_home,
flake_template=CLAN_CORE / "templates" / "minimal",
machine_configs={"my_machine": config},
monkeypatch=monkeypatch,
)
monkeypatch.chdir(flake.path)
sops_setup.init()
@@ -170,6 +173,7 @@ def test_generate_secret_var_password_store(
temporary_home,
flake_template=CLAN_CORE / "templates" / "minimal",
machine_configs={"my_machine": config},
monkeypatch=monkeypatch,
)
monkeypatch.chdir(flake.path)
gnupghome = temporary_home / "gpg"
@@ -237,6 +241,7 @@ def test_generate_secret_for_multiple_machines(
temporary_home,
flake_template=CLAN_CORE / "templates" / "minimal",
machine_configs={"machine1": machine1_config, "machine2": machine2_config},
monkeypatch=monkeypatch,
)
monkeypatch.chdir(flake.path)
sops_setup.init()
@@ -282,6 +287,7 @@ def test_dependant_generators(
temporary_home,
flake_template=CLAN_CORE / "templates" / "minimal",
machine_configs={"my_machine": config},
monkeypatch=monkeypatch,
)
monkeypatch.chdir(flake.path)
cli.run(["vars", "generate", "--flake", str(flake.path), "my_machine"])
@@ -321,6 +327,7 @@ def test_prompt(
temporary_home,
flake_template=CLAN_CORE / "templates" / "minimal",
machine_configs={"my_machine": config},
monkeypatch=monkeypatch,
)
monkeypatch.chdir(flake.path)
monkeypatch.setattr("sys.stdin", StringIO(input_value))
@@ -359,6 +366,7 @@ def test_share_flag(
temporary_home,
flake_template=CLAN_CORE / "templates" / "minimal",
machine_configs={"my_machine": config},
monkeypatch=monkeypatch,
)
monkeypatch.chdir(flake.path)
sops_setup.init()
@@ -398,6 +406,7 @@ def test_prompt_create_file(
temporary_home,
flake_template=CLAN_CORE / "templates" / "minimal",
machine_configs={"my_machine": config},
monkeypatch=monkeypatch,
)
monkeypatch.chdir(flake.path)
sops_setup.init()
@@ -426,6 +435,7 @@ def test_api_get_prompts(
temporary_home,
flake_template=CLAN_CORE / "templates" / "minimal",
machine_configs={"my_machine": config},
monkeypatch=monkeypatch,
)
monkeypatch.chdir(flake.path)
monkeypatch.setattr("sys.stdin", StringIO("input1"))
@@ -454,6 +464,7 @@ def test_api_set_prompts(
temporary_home,
flake_template=CLAN_CORE / "templates" / "minimal",
machine_configs={"my_machine": config},
monkeypatch=monkeypatch,
)
monkeypatch.chdir(flake.path)
machine = Machine(name="my_machine", flake=FlakeId(str(flake.path)))

View File

@@ -42,6 +42,7 @@ def test_vm_deployment(
temporary_home,
flake_template=CLAN_CORE / "templates" / "minimal",
machine_configs={"my_machine": config},
monkeypatch=monkeypatch,
)
monkeypatch.chdir(flake.path)
sops_setup.init()
@@ -65,8 +66,8 @@ def test_vm_deployment(
)
).stdout.strip()
assert "no-such-path" not in my_secret_path
run_vm_in_thread("my_machine")
qga = qga_connect("my_machine")
vm = run_vm_in_thread("my_machine")
qga = qga_connect("my_machine", vm)
# check my_secret is deployed
_, out, _ = qga.run("cat /run/secrets/vars/my_generator/my_secret", check=True)
assert out == "hello\n"
@@ -81,4 +82,4 @@ def test_vm_deployment(
)
assert returncode != 0
qga.exec_cmd("poweroff")
wait_vm_down("my_machine")
wait_vm_down("my_machine", vm)

View File

@@ -73,16 +73,17 @@ def test_vm_qmp(
"services": {"getty": {"autologinUser": "root"}},
}
},
monkeypatch=monkeypatch,
)
# 'clan vms run' must be executed from within the flake
monkeypatch.chdir(flake.path)
# start the VM
run_vm_in_thread("my_machine")
vm = run_vm_in_thread("my_machine")
# connect with qmp
qmp = qmp_connect("my_machine")
qmp = qmp_connect("my_machine", vm)
# verify that issuing a command works
# result = qmp.cmd_obj({"execute": "query-status"})
@@ -121,14 +122,15 @@ def test_vm_persistence(
temporary_home,
flake_template=CLAN_CORE / "templates" / "minimal",
machine_configs=config,
monkeypatch=monkeypatch,
)
monkeypatch.chdir(flake.path)
run_vm_in_thread("my_machine")
vm = run_vm_in_thread("my_machine")
# wait for the VM to start and connect qga
qga = qga_connect("my_machine")
qga = qga_connect("my_machine", vm)
# create state via qmp command instead of systemd service
qga.run("echo 'dream2nix' > /var/my-state/root", check=True)
@@ -139,13 +141,13 @@ def test_vm_persistence(
qga.exec_cmd("poweroff")
# wait for socket to be down (systemd service 'poweroff' rebooting machine)
wait_vm_down("my_machine")
wait_vm_down("my_machine", vm)
# start vm again
run_vm_in_thread("my_machine")
vm = run_vm_in_thread("my_machine")
# connect second time
qga = qga_connect("my_machine")
qga = qga_connect("my_machine", vm)
# check state exists
qga.run("cat /var/my-state/test", check=True)
# ensure root file is owned by root
@@ -171,5 +173,5 @@ def test_vm_persistence(
assert exitcode == 0, out
# use qmp to shutdown the machine (prevent zombie qemu processes)
qmp = qmp_connect("my_machine")
qmp = qmp_connect("my_machine", vm)
qmp.command("system_powerdown")

View File

@@ -112,15 +112,15 @@ const InstallMachine = (props: InstallMachineProps) => {
e.preventDefault();
const curr_uri = activeURI();
const disk = getValue(formStore, "disk");
const disk_id = props.disks.find((d) => d.name === disk)?.id_link;
if (!curr_uri || !disk_id || !props.name) {
const diskId = props.disks.find((d) => d.name === disk)?.id_link;
if (!curr_uri || !diskId || !props.name) {
return;
}
const r = await callApi("set_single_disk_uuid", {
base_path: curr_uri,
machine_name: props.name,
disk_uuid: disk_id,
disk_uuid: diskId,
});
if (r.status === "error") {
toast.error("Failed to set disk");

View File

@@ -1,4 +1,8 @@
{ lib, ... }:
let
suffix = config.clan.core.machine.diskId;
in
{
boot.loader.grub.efiSupport = lib.mkDefault true;
boot.loader.grub.efiInstallAsRemovable = lib.mkDefault true;
@@ -11,12 +15,12 @@
content = {
type = "gpt";
partitions = {
"boot" = {
"boot-${suffix}" = {
size = "1M";
type = "EF02"; # for grub MBR
priority = 1;
};
"ESP" = {
"ESP-${suffix}" = {
size = "512M";
type = "EF00";
content = {
@@ -25,7 +29,7 @@
mountpoint = "/boot";
};
};
"root" = {
"root-${suffix}" = {
size = "100%";
content = {
type = "filesystem";

View File

@@ -1,4 +1,8 @@
{ lib, ... }:
let
suffix = config.clan.core.machine.diskId;
in
{
# TO NOT EDIT THIS FILE AFTER INSTALLATION of a machine
# Otherwise your system might not boot because of missing partitions / filesystems
@@ -13,12 +17,12 @@
content = {
type = "gpt";
partitions = {
"boot" = {
"boot-${suffix}" = {
size = "1M";
type = "EF02"; # for grub MBR
priority = 1;
};
"ESP" = {
"ESP-${suffix}" = {
size = "512M";
type = "EF00";
content = {
@@ -28,7 +32,7 @@
mountOptions = [ "nofail" ];
};
};
"root" = {
"root-${suffix}" = {
size = "100%";
content = {
type = "filesystem";