Merge pull request 'clanCore: Init machine.id and idShort' (#2045) from Qubasa/clan-core:Qubasa-main into main
This commit is contained in:
@@ -134,6 +134,10 @@
|
|||||||
];
|
];
|
||||||
virtualisation.emptyDiskImages = [ 256 ];
|
virtualisation.emptyDiskImages = [ 256 ];
|
||||||
clan.core.clanDir = ./.;
|
clan.core.clanDir = ./.;
|
||||||
|
clan.core.machine = {
|
||||||
|
id = "a73f5245cdba4576ab6cfef3145ac9ec";
|
||||||
|
diskId = "c4c47b";
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
testScript = ''
|
testScript = ''
|
||||||
|
|||||||
@@ -18,6 +18,10 @@
|
|||||||
{
|
{
|
||||||
clan.core.machineName = "machine";
|
clan.core.machineName = "machine";
|
||||||
clan.core.clanDir = ./.;
|
clan.core.clanDir = ./.;
|
||||||
|
clan.core.machine = {
|
||||||
|
id = "a73f5245cdba4576ab6cfef3145ac9ec";
|
||||||
|
diskId = "c4c47b";
|
||||||
|
};
|
||||||
clan.core.state.testState.folders = [ "/etc/state" ];
|
clan.core.state.testState.folders = [ "/etc/state" ];
|
||||||
environment.etc.state.text = "hello world";
|
environment.etc.state.text = "hello world";
|
||||||
systemd.tmpfiles.settings."vmsecrets" = {
|
systemd.tmpfiles.settings."vmsecrets" = {
|
||||||
|
|||||||
@@ -9,6 +9,7 @@
|
|||||||
networking.hostName = "machine";
|
networking.hostName = "machine";
|
||||||
services.openssh.enable = true;
|
services.openssh.enable = true;
|
||||||
services.openssh.startWhenNeeded = false;
|
services.openssh.startWhenNeeded = false;
|
||||||
|
|
||||||
};
|
};
|
||||||
testScript = ''
|
testScript = ''
|
||||||
start_all()
|
start_all()
|
||||||
|
|||||||
@@ -12,6 +12,10 @@
|
|||||||
{
|
{
|
||||||
clan.core.machineName = "machine";
|
clan.core.machineName = "machine";
|
||||||
clan.core.clanDir = ./.;
|
clan.core.clanDir = ./.;
|
||||||
|
clan.core.machine = {
|
||||||
|
id = "a73f5245cdba4576ab6cfef3145ac9ec";
|
||||||
|
diskId = "c4c47b";
|
||||||
|
};
|
||||||
}
|
}
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -10,6 +10,7 @@
|
|||||||
let
|
let
|
||||||
dependencies = [
|
dependencies = [
|
||||||
pkgs.disko
|
pkgs.disko
|
||||||
|
pkgs.age
|
||||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine.config.system.build.toplevel
|
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine.config.system.build.toplevel
|
||||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine.config.system.build.diskoScript
|
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine.config.system.build.diskoScript
|
||||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine.config.system.build.diskoScript.drvPath
|
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine.config.system.build.diskoScript.drvPath
|
||||||
@@ -25,9 +26,11 @@
|
|||||||
nodes.target = {
|
nodes.target = {
|
||||||
virtualisation.emptyDiskImages = [ 4096 ];
|
virtualisation.emptyDiskImages = [ 4096 ];
|
||||||
virtualisation.memorySize = 3000;
|
virtualisation.memorySize = 3000;
|
||||||
environment.systemPackages = [ self.packages.${pkgs.system}.clan-cli ];
|
environment.systemPackages = [
|
||||||
|
self.packages.${pkgs.system}.clan-cli
|
||||||
|
] ++ self.packages.${pkgs.system}.clan-cli.runtimeDependencies;
|
||||||
|
environment.variables."SOPS_AGE_KEY" = builtins.readFile ../lib/age/privkey;
|
||||||
environment.etc."install-closure".source = "${closureInfo}/store-paths";
|
environment.etc."install-closure".source = "${closureInfo}/store-paths";
|
||||||
|
|
||||||
nix.settings = {
|
nix.settings = {
|
||||||
substituters = lib.mkForce [ ];
|
substituters = lib.mkForce [ ];
|
||||||
hashed-mirrors = null;
|
hashed-mirrors = null;
|
||||||
@@ -38,11 +41,15 @@
|
|||||||
"flakes"
|
"flakes"
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
|
system.extraDependencies = dependencies;
|
||||||
};
|
};
|
||||||
testScript = ''
|
testScript = ''
|
||||||
start_all()
|
start_all()
|
||||||
|
|
||||||
machine.succeed("clan flash --debug --flake ${../..} --yes --disk main /dev/vdb test-install-machine")
|
machine.succeed("cp -r ${../..} test-flake && chmod -R +w test-flake")
|
||||||
|
machine.succeed("clan secrets key generate")
|
||||||
|
machine.succeed("clan secrets users add --debug --flake test-flake testuser '${builtins.readFile ../lib/age/pubkey}'")
|
||||||
|
machine.succeed("clan flash --debug --flake test-flake --yes --disk main /dev/vdb test-install-machine")
|
||||||
'';
|
'';
|
||||||
} { inherit pkgs self; };
|
} { inherit pkgs self; };
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -1,7 +1,12 @@
|
|||||||
{ self, lib, ... }:
|
{ self, lib, ... }:
|
||||||
|
|
||||||
{
|
{
|
||||||
clan.machines.test-install-machine = {
|
clan.machines.test-install-machine = {
|
||||||
clan.core.networking.targetHost = "test-install-machine";
|
clan.core.networking.targetHost = "test-install-machine";
|
||||||
|
clan.core.machine = {
|
||||||
|
id = "a73f5245cdba4576ab6cfef3145ac9ec";
|
||||||
|
diskId = "c4c47b";
|
||||||
|
};
|
||||||
fileSystems."/".device = lib.mkDefault "/dev/vdb";
|
fileSystems."/".device = lib.mkDefault "/dev/vdb";
|
||||||
boot.loader.grub.device = lib.mkDefault "/dev/vdb";
|
boot.loader.grub.device = lib.mkDefault "/dev/vdb";
|
||||||
|
|
||||||
@@ -17,7 +22,10 @@
|
|||||||
(modulesPath + "/profiles/qemu-guest.nix")
|
(modulesPath + "/profiles/qemu-guest.nix")
|
||||||
];
|
];
|
||||||
clan.single-disk.device = "/dev/vdb";
|
clan.single-disk.device = "/dev/vdb";
|
||||||
|
clan.core.machine = {
|
||||||
|
id = "a73f5245cdba4576ab6cfef3145ac9ec";
|
||||||
|
diskId = "c4c47b";
|
||||||
|
};
|
||||||
environment.etc."install-successful".text = "ok";
|
environment.etc."install-successful".text = "ok";
|
||||||
|
|
||||||
boot.consoleLogLevel = lib.mkForce 100;
|
boot.consoleLogLevel = lib.mkForce 100;
|
||||||
@@ -34,8 +42,10 @@
|
|||||||
let
|
let
|
||||||
dependencies = [
|
dependencies = [
|
||||||
self
|
self
|
||||||
|
pkgs.age
|
||||||
self.nixosConfigurations.test-install-machine.config.system.build.toplevel
|
self.nixosConfigurations.test-install-machine.config.system.build.toplevel
|
||||||
self.nixosConfigurations.test-install-machine.config.system.build.diskoScript
|
self.nixosConfigurations.test-install-machine.config.system.build.diskoScript
|
||||||
|
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine.config.system.build.diskoScript.drvPath
|
||||||
self.nixosConfigurations.test-install-machine.config.system.clan.deployment.file
|
self.nixosConfigurations.test-install-machine.config.system.clan.deployment.file
|
||||||
pkgs.stdenv.drvPath
|
pkgs.stdenv.drvPath
|
||||||
pkgs.nixos-anywhere
|
pkgs.nixos-anywhere
|
||||||
@@ -50,6 +60,7 @@
|
|||||||
services.openssh.enable = true;
|
services.openssh.enable = true;
|
||||||
users.users.root.openssh.authorizedKeys.keyFiles = [ ../lib/ssh/pubkey ];
|
users.users.root.openssh.authorizedKeys.keyFiles = [ ../lib/ssh/pubkey ];
|
||||||
system.nixos.variant_id = "installer";
|
system.nixos.variant_id = "installer";
|
||||||
|
|
||||||
virtualisation.emptyDiskImages = [ 4096 ];
|
virtualisation.emptyDiskImages = [ 4096 ];
|
||||||
nix.settings = {
|
nix.settings = {
|
||||||
substituters = lib.mkForce [ ];
|
substituters = lib.mkForce [ ];
|
||||||
@@ -67,6 +78,7 @@
|
|||||||
self.packages.${pkgs.system}.clan-cli
|
self.packages.${pkgs.system}.clan-cli
|
||||||
] ++ self.packages.${pkgs.system}.clan-cli.runtimeDependencies;
|
] ++ self.packages.${pkgs.system}.clan-cli.runtimeDependencies;
|
||||||
environment.etc."install-closure".source = "${closureInfo}/store-paths";
|
environment.etc."install-closure".source = "${closureInfo}/store-paths";
|
||||||
|
environment.variables."SOPS_AGE_KEY" = builtins.readFile ../lib/age/privkey;
|
||||||
virtualisation.memorySize = 2048;
|
virtualisation.memorySize = 2048;
|
||||||
nix.settings = {
|
nix.settings = {
|
||||||
substituters = lib.mkForce [ ];
|
substituters = lib.mkForce [ ];
|
||||||
@@ -99,9 +111,11 @@
|
|||||||
client.wait_until_succeeds("timeout 2 ssh -o StrictHostKeyChecking=accept-new -v root@target hostname")
|
client.wait_until_succeeds("timeout 2 ssh -o StrictHostKeyChecking=accept-new -v root@target hostname")
|
||||||
client.succeed("cp -r ${../..} test-flake && chmod -R +w test-flake")
|
client.succeed("cp -r ${../..} test-flake && chmod -R +w test-flake")
|
||||||
client.fail("test -f test-flake/machines/test-install-machine/hardware-configuration.nix")
|
client.fail("test -f test-flake/machines/test-install-machine/hardware-configuration.nix")
|
||||||
client.succeed("clan machines hw-generate --flake test-flake test-install-machine root@target>&2")
|
client.succeed("clan secrets key generate")
|
||||||
|
client.succeed("clan secrets users add --debug --flake test-flake testuser '${builtins.readFile ../lib/age/pubkey}'")
|
||||||
|
client.succeed("clan machines hw-generate --debug --flake test-flake test-install-machine root@target>&2")
|
||||||
client.succeed("test -f test-flake/machines/test-install-machine/hardware-configuration.nix")
|
client.succeed("test -f test-flake/machines/test-install-machine/hardware-configuration.nix")
|
||||||
client.succeed("clan machines install --debug --flake ${../..} --yes test-install-machine root@target >&2")
|
client.succeed("clan machines install --debug --flake test-flake --yes test-install-machine root@target >&2")
|
||||||
try:
|
try:
|
||||||
target.shutdown()
|
target.shutdown()
|
||||||
except BrokenPipeError:
|
except BrokenPipeError:
|
||||||
|
|||||||
1
checks/lib/age/privkey
Normal file
1
checks/lib/age/privkey
Normal file
@@ -0,0 +1 @@
|
|||||||
|
AGE-SECRET-KEY-1KF8E3SR3TTGL6M476SKF7EEMR4H9NF7ZWYSLJUAK8JX276JC7KUSSURKFK
|
||||||
1
checks/lib/age/pubkey
Normal file
1
checks/lib/age/pubkey
Normal file
@@ -0,0 +1 @@
|
|||||||
|
age1dhwqzkah943xzc34tc3dlmfayyevcmdmxzjezdgdy33euxwf59vsp3vk3c
|
||||||
@@ -17,7 +17,10 @@
|
|||||||
{
|
{
|
||||||
clan.core.machineName = "machine";
|
clan.core.machineName = "machine";
|
||||||
clan.core.clanDir = ./.;
|
clan.core.clanDir = ./.;
|
||||||
|
clan.core.machine = {
|
||||||
|
id = "a73f5245cdba4576ab6cfef3145ac9ec";
|
||||||
|
diskId = "c4c47b";
|
||||||
|
};
|
||||||
services.nginx.virtualHosts."matrix.clan.test" = {
|
services.nginx.virtualHosts."matrix.clan.test" = {
|
||||||
enableACME = lib.mkForce false;
|
enableACME = lib.mkForce false;
|
||||||
forceSSL = lib.mkForce false;
|
forceSSL = lib.mkForce false;
|
||||||
|
|||||||
@@ -32,6 +32,10 @@
|
|||||||
common
|
common
|
||||||
{
|
{
|
||||||
clan.core.machineName = "peer1";
|
clan.core.machineName = "peer1";
|
||||||
|
clan.core.machine = {
|
||||||
|
id = "df97124f09da48e3a22d77ce30ee8da6";
|
||||||
|
diskId = "c9c52c";
|
||||||
|
};
|
||||||
environment.etc = {
|
environment.etc = {
|
||||||
"mumble-key".source = ./peer_1/peer_1_test_key;
|
"mumble-key".source = ./peer_1/peer_1_test_key;
|
||||||
"mumble-cert".source = ./peer_1/peer_1_test_cert;
|
"mumble-cert".source = ./peer_1/peer_1_test_cert;
|
||||||
@@ -65,6 +69,10 @@
|
|||||||
imports = [
|
imports = [
|
||||||
common
|
common
|
||||||
{
|
{
|
||||||
|
clan.core.machine = {
|
||||||
|
id = "a73f5245cdba4576ab6cfef3145ac9ec";
|
||||||
|
diskId = "c4c47b";
|
||||||
|
};
|
||||||
clan.core.machineName = "peer2";
|
clan.core.machineName = "peer2";
|
||||||
environment.etc = {
|
environment.etc = {
|
||||||
"mumble-key".source = ./peer_2/peer_2_test_key;
|
"mumble-key".source = ./peer_2/peer_2_test_key;
|
||||||
|
|||||||
@@ -8,7 +8,13 @@ let
|
|||||||
self.nixosModules.clanCore
|
self.nixosModules.clanCore
|
||||||
# This is the only option that is not part of the
|
# This is the only option that is not part of the
|
||||||
# module because it is usually set by flake-parts
|
# module because it is usually set by flake-parts
|
||||||
{ clan.core.clanDir = ./.; }
|
{
|
||||||
|
clan.core.clanDir = ./.;
|
||||||
|
clan.core.machine = {
|
||||||
|
id = "df97124f09da48e3a22d77ce30ee8da6";
|
||||||
|
diskId = "c9c52c";
|
||||||
|
};
|
||||||
|
}
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
in
|
in
|
||||||
|
|||||||
@@ -15,6 +15,11 @@
|
|||||||
clan.localbackup.targets.hdd.directory = "/mnt/external-disk";
|
clan.localbackup.targets.hdd.directory = "/mnt/external-disk";
|
||||||
clan.core.clanDir = ./.;
|
clan.core.clanDir = ./.;
|
||||||
|
|
||||||
|
clan.core.machine = {
|
||||||
|
id = "df97124f09da48e3a22d77ce30ee8da6";
|
||||||
|
diskId = "c9c52c";
|
||||||
|
};
|
||||||
|
|
||||||
systemd.services.sample-service = {
|
systemd.services.sample-service = {
|
||||||
wantedBy = [ "multi-user.target" ];
|
wantedBy = [ "multi-user.target" ];
|
||||||
script = ''
|
script = ''
|
||||||
|
|||||||
@@ -12,6 +12,10 @@
|
|||||||
|
|
||||||
clan.core.clanDir = "${./.}";
|
clan.core.clanDir = "${./.}";
|
||||||
clan.core.machineName = "machine";
|
clan.core.machineName = "machine";
|
||||||
|
clan.core.machine = {
|
||||||
|
id = "df97124f09da48e3a22d77ce30ee8da6";
|
||||||
|
diskId = "c9c52c";
|
||||||
|
};
|
||||||
|
|
||||||
networking.hostName = "machine";
|
networking.hostName = "machine";
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -14,6 +14,10 @@
|
|||||||
{
|
{
|
||||||
clan.core.machineName = "introducer";
|
clan.core.machineName = "introducer";
|
||||||
clan.core.clanDir = ./.;
|
clan.core.clanDir = ./.;
|
||||||
|
clan.core.machine = {
|
||||||
|
id = "df97124f09da48e3a22d77ce30ee8da6";
|
||||||
|
diskId = "c9c52c";
|
||||||
|
};
|
||||||
environment.etc = {
|
environment.etc = {
|
||||||
"syncthing.pam".source = ./introducer/introducer_test_cert;
|
"syncthing.pam".source = ./introducer/introducer_test_cert;
|
||||||
"syncthing.key".source = ./introducer/introducer_test_key;
|
"syncthing.key".source = ./introducer/introducer_test_key;
|
||||||
@@ -55,6 +59,10 @@
|
|||||||
{
|
{
|
||||||
clan.core.machineName = "peer1";
|
clan.core.machineName = "peer1";
|
||||||
clan.core.clanDir = ./.;
|
clan.core.clanDir = ./.;
|
||||||
|
clan.core.machine = {
|
||||||
|
id = "645a43ad1d6f456aa2d623464efed096";
|
||||||
|
diskId = "9404bf2fb28343cba82e64d1a9131ea4";
|
||||||
|
};
|
||||||
clan.syncthing.introducer = lib.strings.removeSuffix "\n" (
|
clan.syncthing.introducer = lib.strings.removeSuffix "\n" (
|
||||||
builtins.readFile ./introducer/introducer_device_id
|
builtins.readFile ./introducer/introducer_device_id
|
||||||
);
|
);
|
||||||
@@ -77,6 +85,10 @@
|
|||||||
{
|
{
|
||||||
clan.core.machineName = "peer2";
|
clan.core.machineName = "peer2";
|
||||||
clan.core.clanDir = ./.;
|
clan.core.clanDir = ./.;
|
||||||
|
clan.core.machine = {
|
||||||
|
id = "dd0927b2113b4fa58a94a4be15b0408e";
|
||||||
|
diskId = "05d6d08214d14261b001782b417ca2a3";
|
||||||
|
};
|
||||||
clan.syncthing.introducer = lib.strings.removeSuffix "\n" (
|
clan.syncthing.introducer = lib.strings.removeSuffix "\n" (
|
||||||
builtins.readFile ./introducer/introducer_device_id
|
builtins.readFile ./introducer/introducer_device_id
|
||||||
);
|
);
|
||||||
|
|||||||
@@ -16,6 +16,10 @@ import ../lib/test-base.nix (
|
|||||||
{
|
{
|
||||||
clan.core.machineName = "machine";
|
clan.core.machineName = "machine";
|
||||||
clan.core.clanDir = ./.;
|
clan.core.clanDir = ./.;
|
||||||
|
clan.core.machine = {
|
||||||
|
id = "df97124f09da48e3a22d77ce30ee8da6";
|
||||||
|
diskId = "c9c52c";
|
||||||
|
};
|
||||||
}
|
}
|
||||||
];
|
];
|
||||||
services.wayland-proxy-virtwl.enable = true;
|
services.wayland-proxy-virtwl.enable = true;
|
||||||
|
|||||||
@@ -12,6 +12,10 @@
|
|||||||
{
|
{
|
||||||
clan.core.machineName = "machine";
|
clan.core.machineName = "machine";
|
||||||
clan.core.clanDir = ./.;
|
clan.core.clanDir = ./.;
|
||||||
|
clan.core.machine = {
|
||||||
|
id = "df97124f09da48e3a22d77ce30ee8da6";
|
||||||
|
diskId = "c9c52c";
|
||||||
|
};
|
||||||
}
|
}
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -1,4 +1,7 @@
|
|||||||
{ lib, config, ... }:
|
{ lib, config, ... }:
|
||||||
|
let
|
||||||
|
cfg = config.clan.single-disk;
|
||||||
|
in
|
||||||
{
|
{
|
||||||
options.clan.single-disk = {
|
options.clan.single-disk = {
|
||||||
device = lib.mkOption {
|
device = lib.mkOption {
|
||||||
@@ -8,26 +11,38 @@
|
|||||||
# Question: should we set a default here?
|
# Question: should we set a default here?
|
||||||
# default = "/dev/null";
|
# default = "/dev/null";
|
||||||
};
|
};
|
||||||
|
suffix = lib.mkOption {
|
||||||
|
default = config.clan.core.machine.diskId;
|
||||||
|
defaultText = "abcdef";
|
||||||
|
type = lib.types.nullOr lib.types.str;
|
||||||
|
description = "The suffix to use for the disk";
|
||||||
|
};
|
||||||
};
|
};
|
||||||
config = {
|
config = {
|
||||||
|
assertions = [
|
||||||
|
{
|
||||||
|
assertion = cfg.suffix != null;
|
||||||
|
message = "clan.core.machine.diskId must be set, please run `clan facts generate`";
|
||||||
|
}
|
||||||
|
];
|
||||||
boot.loader.grub.efiSupport = lib.mkDefault true;
|
boot.loader.grub.efiSupport = lib.mkDefault true;
|
||||||
boot.loader.grub.efiInstallAsRemovable = lib.mkDefault true;
|
boot.loader.grub.efiInstallAsRemovable = lib.mkDefault true;
|
||||||
disko.devices = {
|
disko.devices = lib.mkIf (cfg.suffix != null) {
|
||||||
disk = {
|
disk = {
|
||||||
main = {
|
main = {
|
||||||
type = "disk";
|
type = "disk";
|
||||||
# This is set through the UI
|
# This is set through the UI
|
||||||
device = config.clan.single-disk.device;
|
device = cfg.device;
|
||||||
|
|
||||||
content = {
|
content = {
|
||||||
type = "gpt";
|
type = "gpt";
|
||||||
partitions = {
|
partitions = {
|
||||||
"${config.networking.hostName}-boot" = {
|
"boot-${cfg.suffix}" = {
|
||||||
size = "1M";
|
size = "1M";
|
||||||
type = "EF02"; # for grub MBR
|
type = "EF02"; # for grub MBR
|
||||||
priority = 1;
|
priority = 1;
|
||||||
};
|
};
|
||||||
"${config.networking.hostName}-ESP" = {
|
"ESP-${cfg.suffix}" = {
|
||||||
size = "512M";
|
size = "512M";
|
||||||
type = "EF00";
|
type = "EF00";
|
||||||
content = {
|
content = {
|
||||||
@@ -36,7 +51,7 @@
|
|||||||
mountpoint = "/boot";
|
mountpoint = "/boot";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
"${config.networking.hostName}-root" = {
|
"root-${cfg.suffix}" = {
|
||||||
size = "100%";
|
size = "100%";
|
||||||
content = {
|
content = {
|
||||||
type = "filesystem";
|
type = "filesystem";
|
||||||
|
|||||||
@@ -108,6 +108,7 @@ nav:
|
|||||||
- reference/clan-core/sops.md
|
- reference/clan-core/sops.md
|
||||||
- reference/clan-core/state.md
|
- reference/clan-core/state.md
|
||||||
- reference/clan-core/deployment.md
|
- reference/clan-core/deployment.md
|
||||||
|
- reference/clan-core/machine.md
|
||||||
- reference/clan-core/networking.md
|
- reference/clan-core/networking.md
|
||||||
- Nix API:
|
- Nix API:
|
||||||
- reference/nix-api/index.md
|
- reference/nix-api/index.md
|
||||||
|
|||||||
@@ -17,18 +17,19 @@ lsblk --output NAME,ID-LINK,FSTYPE,SIZE,MOUNTPOINT
|
|||||||
```nix hl_lines="14 40"
|
```nix hl_lines="14 40"
|
||||||
{ lib, ... }:
|
{ lib, ... }:
|
||||||
let
|
let
|
||||||
|
suffix = config.clan.core.machine.diskId;
|
||||||
mirrorBoot = idx: {
|
mirrorBoot = idx: {
|
||||||
type = "disk";
|
type = "disk";
|
||||||
device = "/dev/disk/by-id/${idx}";
|
device = "/dev/disk/by-id/${idx}";
|
||||||
content = {
|
content = {
|
||||||
type = "gpt";
|
type = "gpt";
|
||||||
partitions = {
|
partitions = {
|
||||||
"${config.networking.hostName}-boot" = {
|
"boot-${suffix}" = {
|
||||||
size = "1M";
|
size = "1M";
|
||||||
type = "EF02"; # for grub MBR
|
type = "EF02"; # for grub MBR
|
||||||
priority = 1;
|
priority = 1;
|
||||||
};
|
};
|
||||||
"${config.networking.hostName}-ESP" = lib.mkIf (idx == "nvme-eui.002538b931b59865") {
|
"ESP-${suffix}" = lib.mkIf (idx == "nvme-eui.002538b931b59865") {
|
||||||
size = "1G";
|
size = "1G";
|
||||||
type = "EF00";
|
type = "EF00";
|
||||||
content = {
|
content = {
|
||||||
@@ -38,7 +39,7 @@ lsblk --output NAME,ID-LINK,FSTYPE,SIZE,MOUNTPOINT
|
|||||||
mountOptions = [ "nofail" ];
|
mountOptions = [ "nofail" ];
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
"${config.networking.hostName}-root" = {
|
"root-${suffix}" = {
|
||||||
size = "100%";
|
size = "100%";
|
||||||
content = {
|
content = {
|
||||||
type = "zfs";
|
type = "zfs";
|
||||||
@@ -108,18 +109,19 @@ lsblk --output NAME,ID-LINK,FSTYPE,SIZE,MOUNTPOINT
|
|||||||
```nix hl_lines="14 40 41"
|
```nix hl_lines="14 40 41"
|
||||||
{ lib, ... }:
|
{ lib, ... }:
|
||||||
let
|
let
|
||||||
|
suffix = config.clan.core.machine.diskId;
|
||||||
mirrorBoot = idx: {
|
mirrorBoot = idx: {
|
||||||
type = "disk";
|
type = "disk";
|
||||||
device = "/dev/disk/by-id/${idx}";
|
device = "/dev/disk/by-id/${idx}";
|
||||||
content = {
|
content = {
|
||||||
type = "gpt";
|
type = "gpt";
|
||||||
partitions = {
|
partitions = {
|
||||||
boot = {
|
"boot-${suffix}" = {
|
||||||
size = "1M";
|
size = "1M";
|
||||||
type = "EF02"; # for grub MBR
|
type = "EF02"; # for grub MBR
|
||||||
priority = 1;
|
priority = 1;
|
||||||
};
|
};
|
||||||
ESP = lib.mkIf (idx == "nvme-eui.002538b931b59865") {
|
"ESP-${suffix}" = lib.mkIf (idx == "nvme-eui.002538b931b59865") {
|
||||||
size = "1G";
|
size = "1G";
|
||||||
type = "EF00";
|
type = "EF00";
|
||||||
content = {
|
content = {
|
||||||
@@ -129,7 +131,7 @@ lsblk --output NAME,ID-LINK,FSTYPE,SIZE,MOUNTPOINT
|
|||||||
mountOptions = [ "nofail" ];
|
mountOptions = [ "nofail" ];
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
zfs = {
|
"root-${suffix}" = {
|
||||||
size = "100%";
|
size = "100%";
|
||||||
content = {
|
content = {
|
||||||
type = "zfs";
|
type = "zfs";
|
||||||
|
|||||||
@@ -88,7 +88,8 @@
|
|||||||
"machines": {
|
"machines": {
|
||||||
"test-inventory-machine": {
|
"test-inventory-machine": {
|
||||||
"config": {
|
"config": {
|
||||||
"device": "/dev/null"
|
"device": "/dev/null",
|
||||||
|
"suffix": "foobar"
|
||||||
},
|
},
|
||||||
"imports": []
|
"imports": []
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -25,6 +25,9 @@ let
|
|||||||
evaled = lib.evalModules {
|
evaled = lib.evalModules {
|
||||||
modules = [
|
modules = [
|
||||||
baseModule
|
baseModule
|
||||||
|
({
|
||||||
|
clan.core.clanDir = ./.;
|
||||||
|
})
|
||||||
clan-core.nixosModules.clanCore
|
clan-core.nixosModules.clanCore
|
||||||
] ++ (map (name: clanModules.${name}) modulenames);
|
] ++ (map (name: clanModules.${name}) modulenames);
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -18,5 +18,6 @@
|
|||||||
./vm.nix
|
./vm.nix
|
||||||
./wayland-proxy-virtwl.nix
|
./wayland-proxy-virtwl.nix
|
||||||
./zerotier
|
./zerotier
|
||||||
|
./machine_id.nix
|
||||||
];
|
];
|
||||||
}
|
}
|
||||||
|
|||||||
58
nixosModules/clanCore/machine_id.nix
Normal file
58
nixosModules/clanCore/machine_id.nix
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
{
|
||||||
|
config,
|
||||||
|
pkgs,
|
||||||
|
lib,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
|
||||||
|
let
|
||||||
|
cfg = config.clan.core.machine;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
options.clan.core.machine = {
|
||||||
|
id = lib.mkOption {
|
||||||
|
type = lib.types.nullOr lib.types.str;
|
||||||
|
description = "The machine id";
|
||||||
|
};
|
||||||
|
idShort = lib.mkOption {
|
||||||
|
readOnly = true;
|
||||||
|
type = lib.types.nullOr lib.types.str;
|
||||||
|
description = "The short machine id";
|
||||||
|
};
|
||||||
|
diskId = lib.mkOption {
|
||||||
|
type = lib.types.nullOr lib.types.str;
|
||||||
|
description = "The disk id";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = {
|
||||||
|
clan.core.machine.id =
|
||||||
|
lib.mkDefault
|
||||||
|
config.clan.core.facts.services."machine_id".public."machine_id".value;
|
||||||
|
clan.core.machine.idShort = if (cfg.id != null) then (lib.substring 0 8 cfg.id) else null;
|
||||||
|
|
||||||
|
clan.core.machine.diskId =
|
||||||
|
lib.mkDefault
|
||||||
|
config.clan.core.facts.services."machine_id".public."diskId".value;
|
||||||
|
|
||||||
|
clan.core.facts.services."machine_id" = {
|
||||||
|
public."machine_id" = { };
|
||||||
|
public."diskId" = { };
|
||||||
|
generator.path = [
|
||||||
|
pkgs.coreutils
|
||||||
|
];
|
||||||
|
generator.script = ''
|
||||||
|
machine_uuid=$(dd if=/dev/urandom bs=1 count=16 2>/dev/null | od -An -tx1 | tr -d ' \n')
|
||||||
|
disk_uuid=$(dd if=/dev/urandom bs=1 count=3 2>/dev/null | od -An -tx1 | tr -d ' \n')
|
||||||
|
echo -n "$machine_uuid" > "$facts"/machine_id
|
||||||
|
echo -n "$disk_uuid" > "$facts"/diskId
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
networking.hostId = lib.mkIf (cfg.id != null) (lib.mkDefault cfg.idShort);
|
||||||
|
|
||||||
|
boot.kernelParams = lib.mkIf (cfg.id != null) [
|
||||||
|
''systemd.machine_id=${cfg.id}''
|
||||||
|
];
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -11,6 +11,7 @@ from typing import NamedTuple
|
|||||||
import pytest
|
import pytest
|
||||||
from clan_cli.dirs import nixpkgs_source
|
from clan_cli.dirs import nixpkgs_source
|
||||||
from fixture_error import FixtureError
|
from fixture_error import FixtureError
|
||||||
|
from helpers import cli
|
||||||
from root import CLAN_CORE
|
from root import CLAN_CORE
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
@@ -50,9 +51,14 @@ class FlakeForTest(NamedTuple):
|
|||||||
path: Path
|
path: Path
|
||||||
|
|
||||||
|
|
||||||
|
from age_keys import KEYS, KeyPair
|
||||||
|
|
||||||
|
|
||||||
def generate_flake(
|
def generate_flake(
|
||||||
temporary_home: Path,
|
temporary_home: Path,
|
||||||
flake_template: Path,
|
flake_template: Path,
|
||||||
|
monkeypatch: pytest.MonkeyPatch,
|
||||||
|
sops_key: KeyPair = KEYS[0],
|
||||||
substitutions: dict[str, str] | None = None,
|
substitutions: dict[str, str] | None = None,
|
||||||
# define the machines directly including their config
|
# define the machines directly including their config
|
||||||
machine_configs: dict[str, dict] | None = None,
|
machine_configs: dict[str, dict] | None = None,
|
||||||
@@ -75,7 +81,6 @@ def generate_flake(
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# copy the template to a new temporary location
|
# copy the template to a new temporary location
|
||||||
if inventory is None:
|
if inventory is None:
|
||||||
inventory = {}
|
inventory = {}
|
||||||
@@ -133,6 +138,19 @@ def generate_flake(
|
|||||||
sp.run(["git", "config", "user.name", "clan-tool"], cwd=flake, check=True)
|
sp.run(["git", "config", "user.name", "clan-tool"], cwd=flake, check=True)
|
||||||
sp.run(["git", "config", "user.email", "clan@example.com"], cwd=flake, check=True)
|
sp.run(["git", "config", "user.email", "clan@example.com"], cwd=flake, check=True)
|
||||||
sp.run(["git", "commit", "-a", "-m", "Initial commit"], cwd=flake, check=True)
|
sp.run(["git", "commit", "-a", "-m", "Initial commit"], cwd=flake, check=True)
|
||||||
|
monkeypatch.setenv("SOPS_AGE_KEY", sops_key.privkey)
|
||||||
|
cli.run(
|
||||||
|
[
|
||||||
|
"secrets",
|
||||||
|
"users",
|
||||||
|
"add",
|
||||||
|
"user1",
|
||||||
|
sops_key.pubkey,
|
||||||
|
"--flake",
|
||||||
|
str(flake),
|
||||||
|
"--debug",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
return FlakeForTest(flake)
|
return FlakeForTest(flake)
|
||||||
|
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ from pathlib import Path
|
|||||||
from time import sleep
|
from time import sleep
|
||||||
|
|
||||||
from clan_cli.dirs import vm_state_dir
|
from clan_cli.dirs import vm_state_dir
|
||||||
|
from clan_cli.errors import ClanError
|
||||||
from clan_cli.qemu.qga import QgaSession
|
from clan_cli.qemu.qga import QgaSession
|
||||||
from clan_cli.qemu.qmp import QEMUMonitorProtocol
|
from clan_cli.qemu.qmp import QEMUMonitorProtocol
|
||||||
|
|
||||||
@@ -21,33 +22,46 @@ def find_free_port() -> int:
|
|||||||
return sock.getsockname()[1]
|
return sock.getsockname()[1]
|
||||||
|
|
||||||
|
|
||||||
def run_vm_in_thread(machine_name: str, ssh_port: int | None = None) -> int:
|
class VmThread(threading.Thread):
|
||||||
|
def __init__(self, machine_name: str, ssh_port: int | None = None) -> None:
|
||||||
|
super().__init__()
|
||||||
|
self.machine_name = machine_name
|
||||||
|
self.ssh_port = ssh_port
|
||||||
|
self.exception: Exception | None = None
|
||||||
|
self.daemon = True
|
||||||
|
|
||||||
|
def run(self) -> None:
|
||||||
|
try:
|
||||||
|
cli.run(
|
||||||
|
["vms", "run", self.machine_name, "--publish", f"{self.ssh_port}:22"]
|
||||||
|
)
|
||||||
|
except Exception as ex:
|
||||||
|
# print exception details
|
||||||
|
print(traceback.format_exc(), file=sys.stderr)
|
||||||
|
print(sys.exc_info()[2], file=sys.stderr)
|
||||||
|
self.exception = ex
|
||||||
|
|
||||||
|
|
||||||
|
def run_vm_in_thread(machine_name: str, ssh_port: int | None = None) -> VmThread:
|
||||||
# runs machine and prints exceptions
|
# runs machine and prints exceptions
|
||||||
if ssh_port is None:
|
if ssh_port is None:
|
||||||
ssh_port = find_free_port()
|
ssh_port = find_free_port()
|
||||||
|
|
||||||
def run() -> None:
|
vm_thread = VmThread(machine_name, ssh_port)
|
||||||
try:
|
vm_thread.start()
|
||||||
cli.run(["vms", "run", machine_name, "--publish", f"{ssh_port}:22"])
|
return vm_thread
|
||||||
except Exception:
|
|
||||||
# print exception details
|
|
||||||
print(traceback.format_exc(), file=sys.stderr)
|
|
||||||
print(sys.exc_info()[2], file=sys.stderr)
|
|
||||||
|
|
||||||
# run the machine in a separate thread
|
|
||||||
t = threading.Thread(target=run, name="run")
|
|
||||||
t.daemon = True
|
|
||||||
t.start()
|
|
||||||
return ssh_port
|
|
||||||
|
|
||||||
|
|
||||||
# wait for qmp socket to exist
|
# wait for qmp socket to exist
|
||||||
def wait_vm_up(machine_name: str, flake_url: str | None = None) -> None:
|
def wait_vm_up(machine_name: str, vm: VmThread, flake_url: str | None = None) -> None:
|
||||||
if flake_url is None:
|
if flake_url is None:
|
||||||
flake_url = str(Path.cwd())
|
flake_url = str(Path.cwd())
|
||||||
socket_file = vm_state_dir(flake_url, machine_name) / "qmp.sock"
|
socket_file = vm_state_dir(flake_url, machine_name) / "qmp.sock"
|
||||||
timeout: float = 600
|
timeout: float = 600
|
||||||
while True:
|
while True:
|
||||||
|
if vm.exception:
|
||||||
|
msg = "VM failed to start"
|
||||||
|
raise ClanError(msg) from vm.exception
|
||||||
if timeout <= 0:
|
if timeout <= 0:
|
||||||
msg = f"qmp socket {socket_file} not found. Is the VM running?"
|
msg = f"qmp socket {socket_file} not found. Is the VM running?"
|
||||||
raise TimeoutError(msg)
|
raise TimeoutError(msg)
|
||||||
@@ -58,12 +72,15 @@ def wait_vm_up(machine_name: str, flake_url: str | None = None) -> None:
|
|||||||
|
|
||||||
|
|
||||||
# wait for vm to be down by checking if qmp socket is down
|
# wait for vm to be down by checking if qmp socket is down
|
||||||
def wait_vm_down(machine_name: str, flake_url: str | None = None) -> None:
|
def wait_vm_down(machine_name: str, vm: VmThread, flake_url: str | None = None) -> None:
|
||||||
if flake_url is None:
|
if flake_url is None:
|
||||||
flake_url = str(Path.cwd())
|
flake_url = str(Path.cwd())
|
||||||
socket_file = vm_state_dir(flake_url, machine_name) / "qmp.sock"
|
socket_file = vm_state_dir(flake_url, machine_name) / "qmp.sock"
|
||||||
timeout: float = 300
|
timeout: float = 300
|
||||||
while socket_file.exists():
|
while socket_file.exists():
|
||||||
|
if vm.exception:
|
||||||
|
msg = "VM failed to start"
|
||||||
|
raise ClanError(msg) from vm.exception
|
||||||
if timeout <= 0:
|
if timeout <= 0:
|
||||||
msg = f"qmp socket {socket_file} still exists. Is the VM down?"
|
msg = f"qmp socket {socket_file} still exists. Is the VM down?"
|
||||||
raise TimeoutError(msg)
|
raise TimeoutError(msg)
|
||||||
@@ -72,11 +89,13 @@ def wait_vm_down(machine_name: str, flake_url: str | None = None) -> None:
|
|||||||
|
|
||||||
|
|
||||||
# wait for vm to be up then connect and return qmp instance
|
# wait for vm to be up then connect and return qmp instance
|
||||||
def qmp_connect(machine_name: str, flake_url: str | None = None) -> QEMUMonitorProtocol:
|
def qmp_connect(
|
||||||
|
machine_name: str, vm: VmThread, flake_url: str | None = None
|
||||||
|
) -> QEMUMonitorProtocol:
|
||||||
if flake_url is None:
|
if flake_url is None:
|
||||||
flake_url = str(Path.cwd())
|
flake_url = str(Path.cwd())
|
||||||
state_dir = vm_state_dir(flake_url, machine_name)
|
state_dir = vm_state_dir(flake_url, machine_name)
|
||||||
wait_vm_up(machine_name, flake_url)
|
wait_vm_up(machine_name, vm, flake_url)
|
||||||
qmp = QEMUMonitorProtocol(
|
qmp = QEMUMonitorProtocol(
|
||||||
address=str(os.path.realpath(state_dir / "qmp.sock")),
|
address=str(os.path.realpath(state_dir / "qmp.sock")),
|
||||||
)
|
)
|
||||||
@@ -85,9 +104,11 @@ def qmp_connect(machine_name: str, flake_url: str | None = None) -> QEMUMonitorP
|
|||||||
|
|
||||||
|
|
||||||
# wait for vm to be up then connect and return qga instance
|
# wait for vm to be up then connect and return qga instance
|
||||||
def qga_connect(machine_name: str, flake_url: str | None = None) -> QgaSession:
|
def qga_connect(
|
||||||
|
machine_name: str, vm: VmThread, flake_url: str | None = None
|
||||||
|
) -> QgaSession:
|
||||||
if flake_url is None:
|
if flake_url is None:
|
||||||
flake_url = str(Path.cwd())
|
flake_url = str(Path.cwd())
|
||||||
state_dir = vm_state_dir(flake_url, machine_name)
|
state_dir = vm_state_dir(flake_url, machine_name)
|
||||||
wait_vm_up(machine_name, flake_url)
|
wait_vm_up(machine_name, vm, flake_url)
|
||||||
return QgaSession(os.path.realpath(state_dir / "qga.sock"))
|
return QgaSession(os.path.realpath(state_dir / "qga.sock"))
|
||||||
|
|||||||
@@ -76,6 +76,7 @@ def test_generate_public_var(
|
|||||||
temporary_home,
|
temporary_home,
|
||||||
flake_template=CLAN_CORE / "templates" / "minimal",
|
flake_template=CLAN_CORE / "templates" / "minimal",
|
||||||
machine_configs={"my_machine": config},
|
machine_configs={"my_machine": config},
|
||||||
|
monkeypatch=monkeypatch,
|
||||||
)
|
)
|
||||||
monkeypatch.chdir(flake.path)
|
monkeypatch.chdir(flake.path)
|
||||||
machine = Machine(name="my_machine", flake=FlakeId(str(flake.path)))
|
machine = Machine(name="my_machine", flake=FlakeId(str(flake.path)))
|
||||||
@@ -105,6 +106,7 @@ def test_generate_secret_var_sops(
|
|||||||
temporary_home,
|
temporary_home,
|
||||||
flake_template=CLAN_CORE / "templates" / "minimal",
|
flake_template=CLAN_CORE / "templates" / "minimal",
|
||||||
machine_configs={"my_machine": config},
|
machine_configs={"my_machine": config},
|
||||||
|
monkeypatch=monkeypatch,
|
||||||
)
|
)
|
||||||
monkeypatch.chdir(flake.path)
|
monkeypatch.chdir(flake.path)
|
||||||
sops_setup.init()
|
sops_setup.init()
|
||||||
@@ -140,6 +142,7 @@ def test_generate_secret_var_sops_with_default_group(
|
|||||||
temporary_home,
|
temporary_home,
|
||||||
flake_template=CLAN_CORE / "templates" / "minimal",
|
flake_template=CLAN_CORE / "templates" / "minimal",
|
||||||
machine_configs={"my_machine": config},
|
machine_configs={"my_machine": config},
|
||||||
|
monkeypatch=monkeypatch,
|
||||||
)
|
)
|
||||||
monkeypatch.chdir(flake.path)
|
monkeypatch.chdir(flake.path)
|
||||||
sops_setup.init()
|
sops_setup.init()
|
||||||
@@ -170,6 +173,7 @@ def test_generate_secret_var_password_store(
|
|||||||
temporary_home,
|
temporary_home,
|
||||||
flake_template=CLAN_CORE / "templates" / "minimal",
|
flake_template=CLAN_CORE / "templates" / "minimal",
|
||||||
machine_configs={"my_machine": config},
|
machine_configs={"my_machine": config},
|
||||||
|
monkeypatch=monkeypatch,
|
||||||
)
|
)
|
||||||
monkeypatch.chdir(flake.path)
|
monkeypatch.chdir(flake.path)
|
||||||
gnupghome = temporary_home / "gpg"
|
gnupghome = temporary_home / "gpg"
|
||||||
@@ -237,6 +241,7 @@ def test_generate_secret_for_multiple_machines(
|
|||||||
temporary_home,
|
temporary_home,
|
||||||
flake_template=CLAN_CORE / "templates" / "minimal",
|
flake_template=CLAN_CORE / "templates" / "minimal",
|
||||||
machine_configs={"machine1": machine1_config, "machine2": machine2_config},
|
machine_configs={"machine1": machine1_config, "machine2": machine2_config},
|
||||||
|
monkeypatch=monkeypatch,
|
||||||
)
|
)
|
||||||
monkeypatch.chdir(flake.path)
|
monkeypatch.chdir(flake.path)
|
||||||
sops_setup.init()
|
sops_setup.init()
|
||||||
@@ -282,6 +287,7 @@ def test_dependant_generators(
|
|||||||
temporary_home,
|
temporary_home,
|
||||||
flake_template=CLAN_CORE / "templates" / "minimal",
|
flake_template=CLAN_CORE / "templates" / "minimal",
|
||||||
machine_configs={"my_machine": config},
|
machine_configs={"my_machine": config},
|
||||||
|
monkeypatch=monkeypatch,
|
||||||
)
|
)
|
||||||
monkeypatch.chdir(flake.path)
|
monkeypatch.chdir(flake.path)
|
||||||
cli.run(["vars", "generate", "--flake", str(flake.path), "my_machine"])
|
cli.run(["vars", "generate", "--flake", str(flake.path), "my_machine"])
|
||||||
@@ -321,6 +327,7 @@ def test_prompt(
|
|||||||
temporary_home,
|
temporary_home,
|
||||||
flake_template=CLAN_CORE / "templates" / "minimal",
|
flake_template=CLAN_CORE / "templates" / "minimal",
|
||||||
machine_configs={"my_machine": config},
|
machine_configs={"my_machine": config},
|
||||||
|
monkeypatch=monkeypatch,
|
||||||
)
|
)
|
||||||
monkeypatch.chdir(flake.path)
|
monkeypatch.chdir(flake.path)
|
||||||
monkeypatch.setattr("sys.stdin", StringIO(input_value))
|
monkeypatch.setattr("sys.stdin", StringIO(input_value))
|
||||||
@@ -359,6 +366,7 @@ def test_share_flag(
|
|||||||
temporary_home,
|
temporary_home,
|
||||||
flake_template=CLAN_CORE / "templates" / "minimal",
|
flake_template=CLAN_CORE / "templates" / "minimal",
|
||||||
machine_configs={"my_machine": config},
|
machine_configs={"my_machine": config},
|
||||||
|
monkeypatch=monkeypatch,
|
||||||
)
|
)
|
||||||
monkeypatch.chdir(flake.path)
|
monkeypatch.chdir(flake.path)
|
||||||
sops_setup.init()
|
sops_setup.init()
|
||||||
@@ -398,6 +406,7 @@ def test_prompt_create_file(
|
|||||||
temporary_home,
|
temporary_home,
|
||||||
flake_template=CLAN_CORE / "templates" / "minimal",
|
flake_template=CLAN_CORE / "templates" / "minimal",
|
||||||
machine_configs={"my_machine": config},
|
machine_configs={"my_machine": config},
|
||||||
|
monkeypatch=monkeypatch,
|
||||||
)
|
)
|
||||||
monkeypatch.chdir(flake.path)
|
monkeypatch.chdir(flake.path)
|
||||||
sops_setup.init()
|
sops_setup.init()
|
||||||
@@ -426,6 +435,7 @@ def test_api_get_prompts(
|
|||||||
temporary_home,
|
temporary_home,
|
||||||
flake_template=CLAN_CORE / "templates" / "minimal",
|
flake_template=CLAN_CORE / "templates" / "minimal",
|
||||||
machine_configs={"my_machine": config},
|
machine_configs={"my_machine": config},
|
||||||
|
monkeypatch=monkeypatch,
|
||||||
)
|
)
|
||||||
monkeypatch.chdir(flake.path)
|
monkeypatch.chdir(flake.path)
|
||||||
monkeypatch.setattr("sys.stdin", StringIO("input1"))
|
monkeypatch.setattr("sys.stdin", StringIO("input1"))
|
||||||
@@ -454,6 +464,7 @@ def test_api_set_prompts(
|
|||||||
temporary_home,
|
temporary_home,
|
||||||
flake_template=CLAN_CORE / "templates" / "minimal",
|
flake_template=CLAN_CORE / "templates" / "minimal",
|
||||||
machine_configs={"my_machine": config},
|
machine_configs={"my_machine": config},
|
||||||
|
monkeypatch=monkeypatch,
|
||||||
)
|
)
|
||||||
monkeypatch.chdir(flake.path)
|
monkeypatch.chdir(flake.path)
|
||||||
machine = Machine(name="my_machine", flake=FlakeId(str(flake.path)))
|
machine = Machine(name="my_machine", flake=FlakeId(str(flake.path)))
|
||||||
|
|||||||
@@ -42,6 +42,7 @@ def test_vm_deployment(
|
|||||||
temporary_home,
|
temporary_home,
|
||||||
flake_template=CLAN_CORE / "templates" / "minimal",
|
flake_template=CLAN_CORE / "templates" / "minimal",
|
||||||
machine_configs={"my_machine": config},
|
machine_configs={"my_machine": config},
|
||||||
|
monkeypatch=monkeypatch,
|
||||||
)
|
)
|
||||||
monkeypatch.chdir(flake.path)
|
monkeypatch.chdir(flake.path)
|
||||||
sops_setup.init()
|
sops_setup.init()
|
||||||
@@ -65,8 +66,8 @@ def test_vm_deployment(
|
|||||||
)
|
)
|
||||||
).stdout.strip()
|
).stdout.strip()
|
||||||
assert "no-such-path" not in my_secret_path
|
assert "no-such-path" not in my_secret_path
|
||||||
run_vm_in_thread("my_machine")
|
vm = run_vm_in_thread("my_machine")
|
||||||
qga = qga_connect("my_machine")
|
qga = qga_connect("my_machine", vm)
|
||||||
# check my_secret is deployed
|
# check my_secret is deployed
|
||||||
_, out, _ = qga.run("cat /run/secrets/vars/my_generator/my_secret", check=True)
|
_, out, _ = qga.run("cat /run/secrets/vars/my_generator/my_secret", check=True)
|
||||||
assert out == "hello\n"
|
assert out == "hello\n"
|
||||||
@@ -81,4 +82,4 @@ def test_vm_deployment(
|
|||||||
)
|
)
|
||||||
assert returncode != 0
|
assert returncode != 0
|
||||||
qga.exec_cmd("poweroff")
|
qga.exec_cmd("poweroff")
|
||||||
wait_vm_down("my_machine")
|
wait_vm_down("my_machine", vm)
|
||||||
|
|||||||
@@ -73,16 +73,17 @@ def test_vm_qmp(
|
|||||||
"services": {"getty": {"autologinUser": "root"}},
|
"services": {"getty": {"autologinUser": "root"}},
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
monkeypatch=monkeypatch,
|
||||||
)
|
)
|
||||||
|
|
||||||
# 'clan vms run' must be executed from within the flake
|
# 'clan vms run' must be executed from within the flake
|
||||||
monkeypatch.chdir(flake.path)
|
monkeypatch.chdir(flake.path)
|
||||||
|
|
||||||
# start the VM
|
# start the VM
|
||||||
run_vm_in_thread("my_machine")
|
vm = run_vm_in_thread("my_machine")
|
||||||
|
|
||||||
# connect with qmp
|
# connect with qmp
|
||||||
qmp = qmp_connect("my_machine")
|
qmp = qmp_connect("my_machine", vm)
|
||||||
|
|
||||||
# verify that issuing a command works
|
# verify that issuing a command works
|
||||||
# result = qmp.cmd_obj({"execute": "query-status"})
|
# result = qmp.cmd_obj({"execute": "query-status"})
|
||||||
@@ -121,14 +122,15 @@ def test_vm_persistence(
|
|||||||
temporary_home,
|
temporary_home,
|
||||||
flake_template=CLAN_CORE / "templates" / "minimal",
|
flake_template=CLAN_CORE / "templates" / "minimal",
|
||||||
machine_configs=config,
|
machine_configs=config,
|
||||||
|
monkeypatch=monkeypatch,
|
||||||
)
|
)
|
||||||
|
|
||||||
monkeypatch.chdir(flake.path)
|
monkeypatch.chdir(flake.path)
|
||||||
|
|
||||||
run_vm_in_thread("my_machine")
|
vm = run_vm_in_thread("my_machine")
|
||||||
|
|
||||||
# wait for the VM to start and connect qga
|
# wait for the VM to start and connect qga
|
||||||
qga = qga_connect("my_machine")
|
qga = qga_connect("my_machine", vm)
|
||||||
|
|
||||||
# create state via qmp command instead of systemd service
|
# create state via qmp command instead of systemd service
|
||||||
qga.run("echo 'dream2nix' > /var/my-state/root", check=True)
|
qga.run("echo 'dream2nix' > /var/my-state/root", check=True)
|
||||||
@@ -139,13 +141,13 @@ def test_vm_persistence(
|
|||||||
qga.exec_cmd("poweroff")
|
qga.exec_cmd("poweroff")
|
||||||
|
|
||||||
# wait for socket to be down (systemd service 'poweroff' rebooting machine)
|
# wait for socket to be down (systemd service 'poweroff' rebooting machine)
|
||||||
wait_vm_down("my_machine")
|
wait_vm_down("my_machine", vm)
|
||||||
|
|
||||||
# start vm again
|
# start vm again
|
||||||
run_vm_in_thread("my_machine")
|
vm = run_vm_in_thread("my_machine")
|
||||||
|
|
||||||
# connect second time
|
# connect second time
|
||||||
qga = qga_connect("my_machine")
|
qga = qga_connect("my_machine", vm)
|
||||||
# check state exists
|
# check state exists
|
||||||
qga.run("cat /var/my-state/test", check=True)
|
qga.run("cat /var/my-state/test", check=True)
|
||||||
# ensure root file is owned by root
|
# ensure root file is owned by root
|
||||||
@@ -171,5 +173,5 @@ def test_vm_persistence(
|
|||||||
assert exitcode == 0, out
|
assert exitcode == 0, out
|
||||||
|
|
||||||
# use qmp to shutdown the machine (prevent zombie qemu processes)
|
# use qmp to shutdown the machine (prevent zombie qemu processes)
|
||||||
qmp = qmp_connect("my_machine")
|
qmp = qmp_connect("my_machine", vm)
|
||||||
qmp.command("system_powerdown")
|
qmp.command("system_powerdown")
|
||||||
|
|||||||
@@ -112,15 +112,15 @@ const InstallMachine = (props: InstallMachineProps) => {
|
|||||||
e.preventDefault();
|
e.preventDefault();
|
||||||
const curr_uri = activeURI();
|
const curr_uri = activeURI();
|
||||||
const disk = getValue(formStore, "disk");
|
const disk = getValue(formStore, "disk");
|
||||||
const disk_id = props.disks.find((d) => d.name === disk)?.id_link;
|
const diskId = props.disks.find((d) => d.name === disk)?.id_link;
|
||||||
if (!curr_uri || !disk_id || !props.name) {
|
if (!curr_uri || !diskId || !props.name) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const r = await callApi("set_single_disk_uuid", {
|
const r = await callApi("set_single_disk_uuid", {
|
||||||
base_path: curr_uri,
|
base_path: curr_uri,
|
||||||
machine_name: props.name,
|
machine_name: props.name,
|
||||||
disk_uuid: disk_id,
|
disk_uuid: diskId,
|
||||||
});
|
});
|
||||||
if (r.status === "error") {
|
if (r.status === "error") {
|
||||||
toast.error("Failed to set disk");
|
toast.error("Failed to set disk");
|
||||||
|
|||||||
@@ -1,4 +1,8 @@
|
|||||||
{ lib, ... }:
|
{ lib, ... }:
|
||||||
|
|
||||||
|
let
|
||||||
|
suffix = config.clan.core.machine.diskId;
|
||||||
|
in
|
||||||
{
|
{
|
||||||
boot.loader.grub.efiSupport = lib.mkDefault true;
|
boot.loader.grub.efiSupport = lib.mkDefault true;
|
||||||
boot.loader.grub.efiInstallAsRemovable = lib.mkDefault true;
|
boot.loader.grub.efiInstallAsRemovable = lib.mkDefault true;
|
||||||
@@ -11,12 +15,12 @@
|
|||||||
content = {
|
content = {
|
||||||
type = "gpt";
|
type = "gpt";
|
||||||
partitions = {
|
partitions = {
|
||||||
"boot" = {
|
"boot-${suffix}" = {
|
||||||
size = "1M";
|
size = "1M";
|
||||||
type = "EF02"; # for grub MBR
|
type = "EF02"; # for grub MBR
|
||||||
priority = 1;
|
priority = 1;
|
||||||
};
|
};
|
||||||
"ESP" = {
|
"ESP-${suffix}" = {
|
||||||
size = "512M";
|
size = "512M";
|
||||||
type = "EF00";
|
type = "EF00";
|
||||||
content = {
|
content = {
|
||||||
@@ -25,7 +29,7 @@
|
|||||||
mountpoint = "/boot";
|
mountpoint = "/boot";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
"root" = {
|
"root-${suffix}" = {
|
||||||
size = "100%";
|
size = "100%";
|
||||||
content = {
|
content = {
|
||||||
type = "filesystem";
|
type = "filesystem";
|
||||||
|
|||||||
@@ -1,4 +1,8 @@
|
|||||||
{ lib, ... }:
|
{ lib, ... }:
|
||||||
|
|
||||||
|
let
|
||||||
|
suffix = config.clan.core.machine.diskId;
|
||||||
|
in
|
||||||
{
|
{
|
||||||
# TO NOT EDIT THIS FILE AFTER INSTALLATION of a machine
|
# TO NOT EDIT THIS FILE AFTER INSTALLATION of a machine
|
||||||
# Otherwise your system might not boot because of missing partitions / filesystems
|
# Otherwise your system might not boot because of missing partitions / filesystems
|
||||||
@@ -13,12 +17,12 @@
|
|||||||
content = {
|
content = {
|
||||||
type = "gpt";
|
type = "gpt";
|
||||||
partitions = {
|
partitions = {
|
||||||
"boot" = {
|
"boot-${suffix}" = {
|
||||||
size = "1M";
|
size = "1M";
|
||||||
type = "EF02"; # for grub MBR
|
type = "EF02"; # for grub MBR
|
||||||
priority = 1;
|
priority = 1;
|
||||||
};
|
};
|
||||||
"ESP" = {
|
"ESP-${suffix}" = {
|
||||||
size = "512M";
|
size = "512M";
|
||||||
type = "EF00";
|
type = "EF00";
|
||||||
content = {
|
content = {
|
||||||
@@ -28,7 +32,7 @@
|
|||||||
mountOptions = [ "nofail" ];
|
mountOptions = [ "nofail" ];
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
"root" = {
|
"root-${suffix}" = {
|
||||||
size = "100%";
|
size = "100%";
|
||||||
content = {
|
content = {
|
||||||
type = "filesystem";
|
type = "filesystem";
|
||||||
|
|||||||
Reference in New Issue
Block a user