Compare commits
97 Commits
p2p-update
...
push-wqqzv
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
01c9432cc5 | ||
|
|
f62e9db126 | ||
|
|
dcb2231332 | ||
|
|
725eeb87ae | ||
|
|
66df677fd2 | ||
|
|
f7d15215ea | ||
|
|
c25574bebd | ||
|
|
fe5796ba17 | ||
|
|
f2e89d27fe | ||
|
|
06dd2ebf8c | ||
|
|
40740860c0 | ||
|
|
89bc39869c | ||
|
|
84d0a2f2f0 | ||
|
|
1d07737989 | ||
|
|
9d386485dd | ||
|
|
ee9ae9c76d | ||
|
|
d4d4d77d2d | ||
|
|
c0ebad1cd9 | ||
|
|
86d0c95da7 | ||
|
|
0fb1b5c5ce | ||
|
|
dc0349e835 | ||
|
|
cc8a74b195 | ||
|
|
046fe0df36 | ||
|
|
3f948fdbd4 | ||
|
|
eb35e6ea21 | ||
|
|
4a0e1b3b6b | ||
|
|
1b8974d167 | ||
|
|
5e2b5fe213 | ||
|
|
74fb3abbc7 | ||
|
|
f2b04e74f1 | ||
|
|
d3ae684575 | ||
|
|
5b294e7651 | ||
|
|
40ae510075 | ||
|
|
48d910f11f | ||
|
|
f242b9a35c | ||
|
|
978822d40a | ||
|
|
fa6c3be21e | ||
|
|
be61bac9af | ||
|
|
42b58910a9 | ||
|
|
a746b10578 | ||
|
|
19341e4cb1 | ||
|
|
f4e06271ba | ||
|
|
d93fe229b3 | ||
|
|
5fc62806b1 | ||
|
|
e0be2f3435 | ||
|
|
a69b81488b | ||
|
|
b133a2407a | ||
|
|
68ae27899a | ||
|
|
b83d3ecba2 | ||
|
|
bec4317709 | ||
|
|
f37f15c482 | ||
|
|
fae8ec318d | ||
|
|
8e2005f38c | ||
|
|
94781bb358 | ||
|
|
de740cf686 | ||
|
|
064edf61ef | ||
|
|
aaf58d7be8 | ||
|
|
03f8e41291 | ||
|
|
43bd4403c6 | ||
|
|
ebee55ffdc | ||
|
|
47e9e5a8f0 | ||
|
|
d1a79653fe | ||
|
|
351ce1414a | ||
|
|
e2ccd979ed | ||
|
|
f5f3f96809 | ||
|
|
59253a9c71 | ||
|
|
aa03adc581 | ||
|
|
ffd84d50f7 | ||
|
|
679387e4ba | ||
|
|
1d60f94cc5 | ||
|
|
1235177541 | ||
|
|
5c08e9a38d | ||
|
|
28dd54d866 | ||
|
|
5baf37f7e9 | ||
|
|
ff669e2957 | ||
|
|
8d4c1839e7 | ||
|
|
0765d981c6 | ||
|
|
10c27a0152 | ||
|
|
ccb5af9565 | ||
|
|
828eff528a | ||
|
|
cbf47580cf | ||
|
|
355ac57ccb | ||
|
|
227e293421 | ||
|
|
9b3621b516 | ||
|
|
62f09a450f | ||
|
|
95282bd880 | ||
|
|
7a49ec252e | ||
|
|
5f9ee97cab | ||
|
|
c6be9bbf07 | ||
|
|
d77ae5eed0 | ||
|
|
3c2888edc7 | ||
|
|
b0f23353ef | ||
|
|
3fccccc092 | ||
|
|
0a5d1bf322 | ||
|
|
9ca5cb7bcc | ||
|
|
845abd1356 | ||
|
|
2b4a4f2422 |
@@ -42,7 +42,6 @@
|
|||||||
|
|
||||||
clan.core.networking.targetHost = "machine";
|
clan.core.networking.targetHost = "machine";
|
||||||
networking.hostName = "machine";
|
networking.hostName = "machine";
|
||||||
nixpkgs.hostPlatform = "x86_64-linux";
|
|
||||||
|
|
||||||
programs.ssh.knownHosts = {
|
programs.ssh.knownHosts = {
|
||||||
machine.hostNames = [ "machine" ];
|
machine.hostNames = [ "machine" ];
|
||||||
@@ -161,22 +160,15 @@
|
|||||||
"flake.lock"
|
"flake.lock"
|
||||||
"flakeModules"
|
"flakeModules"
|
||||||
"inventory.json"
|
"inventory.json"
|
||||||
"lib/build-clan"
|
|
||||||
"lib/default.nix"
|
|
||||||
"lib/select.nix"
|
|
||||||
"lib/flake-module.nix"
|
|
||||||
"lib/frontmatter"
|
|
||||||
"lib/inventory"
|
|
||||||
"lib/constraints"
|
|
||||||
"nixosModules"
|
"nixosModules"
|
||||||
|
# Just include everything in 'lib'
|
||||||
|
# If anything changes in /lib that may affect everything
|
||||||
|
"lib"
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
# Needs investigation on aarch64-linux
|
checks = pkgs.lib.mkIf pkgs.stdenv.isLinux {
|
||||||
# vm-test-run-test-backups> qemu-kvm: No machine specified, and there is no default
|
|
||||||
# vm-test-run-test-backups> Use -machine help to list supported machines
|
|
||||||
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux && pkgs.stdenv.hostPlatform.system != "aarch64-linux") {
|
|
||||||
test-backups = (import ../lib/container-test.nix) {
|
test-backups = (import ../lib/container-test.nix) {
|
||||||
name = "test-backups";
|
name = "test-backups";
|
||||||
nodes.machine = {
|
nodes.machine = {
|
||||||
|
|||||||
138
checks/data-mesher/default.nix
Normal file
138
checks/data-mesher/default.nix
Normal file
@@ -0,0 +1,138 @@
|
|||||||
|
(import ../lib/test-base.nix) (
|
||||||
|
{ self, lib, ... }:
|
||||||
|
let
|
||||||
|
|
||||||
|
inherit (self.lib.inventory) buildInventory;
|
||||||
|
|
||||||
|
machines = [
|
||||||
|
"signer"
|
||||||
|
"admin"
|
||||||
|
"peer"
|
||||||
|
];
|
||||||
|
|
||||||
|
serviceConfigs = buildInventory {
|
||||||
|
inventory = {
|
||||||
|
machines = lib.genAttrs machines (_: { });
|
||||||
|
services = {
|
||||||
|
data-mesher.default = {
|
||||||
|
roles.peer.machines = [ "peer" ];
|
||||||
|
roles.admin.machines = [ "admin" ];
|
||||||
|
roles.signer.machines = [ "signer" ];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
modules = {
|
||||||
|
data-mesher = self.clanModules.data-mesher;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
directory = ./.;
|
||||||
|
};
|
||||||
|
|
||||||
|
commonConfig =
|
||||||
|
{ config, ... }:
|
||||||
|
{
|
||||||
|
|
||||||
|
imports = [ self.nixosModules.clanCore ];
|
||||||
|
|
||||||
|
clan.core.settings.directory = builtins.toString ./.;
|
||||||
|
|
||||||
|
environment.systemPackages = [
|
||||||
|
config.services.data-mesher.package
|
||||||
|
];
|
||||||
|
|
||||||
|
clan.core.vars.settings.publicStore = "in_repo";
|
||||||
|
clan.core.vars.settings.secretStore = "vm";
|
||||||
|
|
||||||
|
clan.data-mesher.network.interface = "eth1";
|
||||||
|
clan.data-mesher.bootstrapNodes = [
|
||||||
|
"[2001:db8:1::1]:7946" # peer1
|
||||||
|
"[2001:db8:1::2]:7946" # peer2
|
||||||
|
];
|
||||||
|
|
||||||
|
# speed up for testing
|
||||||
|
services.data-mesher.settings = {
|
||||||
|
cluster.join_interval = lib.mkForce "2s";
|
||||||
|
cluster.push_pull_interval = lib.mkForce "5s";
|
||||||
|
};
|
||||||
|
|
||||||
|
systemd.tmpfiles.settings."vmsecrets" = {
|
||||||
|
"/etc/secrets" = {
|
||||||
|
C.argument = "${./vars/secret/${config.clan.core.settings.machine.name}}";
|
||||||
|
z = {
|
||||||
|
mode = "0700";
|
||||||
|
user = "data-mesher";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
adminConfig = {
|
||||||
|
imports = serviceConfigs.machines.admin.machineImports;
|
||||||
|
|
||||||
|
config.clan.data-mesher.network.tld = "foo";
|
||||||
|
config.clan.core.settings.machine.name = "admin";
|
||||||
|
};
|
||||||
|
|
||||||
|
peerConfig = {
|
||||||
|
imports = serviceConfigs.machines.peer.machineImports;
|
||||||
|
config.clan.core.settings.machine.name = "peer";
|
||||||
|
};
|
||||||
|
|
||||||
|
signerConfig = {
|
||||||
|
imports = serviceConfigs.machines.signer.machineImports;
|
||||||
|
clan.core.settings.machine.name = "signer";
|
||||||
|
};
|
||||||
|
|
||||||
|
in
|
||||||
|
{
|
||||||
|
name = "data-mesher";
|
||||||
|
|
||||||
|
nodes = {
|
||||||
|
peer = {
|
||||||
|
imports = [
|
||||||
|
peerConfig
|
||||||
|
commonConfig
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
admin = {
|
||||||
|
imports = [
|
||||||
|
adminConfig
|
||||||
|
commonConfig
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
signer = {
|
||||||
|
imports = [
|
||||||
|
signerConfig
|
||||||
|
commonConfig
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
# TODO Add better test script.
|
||||||
|
testScript = ''
|
||||||
|
|
||||||
|
def resolve(node, success = {}, fail = [], timeout = 60):
|
||||||
|
for hostname, ips in success.items():
|
||||||
|
for ip in ips:
|
||||||
|
node.wait_until_succeeds(f"getent ahosts {hostname} | grep {ip}", timeout)
|
||||||
|
|
||||||
|
for hostname in fail:
|
||||||
|
node.wait_until_fails(f"getent ahosts {hostname}")
|
||||||
|
|
||||||
|
start_all()
|
||||||
|
|
||||||
|
admin.wait_for_unit("data-mesher")
|
||||||
|
signer.wait_for_unit("data-mesher")
|
||||||
|
peer.wait_for_unit("data-mesher")
|
||||||
|
|
||||||
|
# check dns resolution
|
||||||
|
for node in [admin, signer, peer]:
|
||||||
|
resolve(node, {
|
||||||
|
"admin.foo": ["2001:db8:1::1", "192.168.1.1"],
|
||||||
|
"peer.foo": ["2001:db8:1::2", "192.168.1.2"],
|
||||||
|
"signer.foo": ["2001:db8:1::3", "192.168.1.3"]
|
||||||
|
})
|
||||||
|
'';
|
||||||
|
}
|
||||||
|
)
|
||||||
@@ -0,0 +1,3 @@
|
|||||||
|
-----BEGIN PUBLIC KEY-----
|
||||||
|
MCowBQYDK2VwAyEAV/XZHv1UQEEzfD2YbJP1Q2jd1ZDG+CP5wvGf/1hcR+Q=
|
||||||
|
-----END PUBLIC KEY-----
|
||||||
@@ -0,0 +1,3 @@
|
|||||||
|
-----BEGIN PUBLIC KEY-----
|
||||||
|
MCowBQYDK2VwAyEAKSSUXJCftt5Vif6ek57CNKBcDRNfrWrxZUHjAIFW9HY=
|
||||||
|
-----END PUBLIC KEY-----
|
||||||
@@ -0,0 +1,3 @@
|
|||||||
|
-----BEGIN PUBLIC KEY-----
|
||||||
|
MCowBQYDK2VwAyEAvLD0mHQA+hf9ItlUHD0ml3i5XEArmmjwCC5rYEOmzWs=
|
||||||
|
-----END PUBLIC KEY-----
|
||||||
@@ -0,0 +1,3 @@
|
|||||||
|
-----BEGIN PRIVATE KEY-----
|
||||||
|
MC4CAQAwBQYDK2VwBCIEIFX+AzHy821hHqWLPeK3nzRuHod3FNrnPfaDoFvpz6LX
|
||||||
|
-----END PRIVATE KEY-----
|
||||||
@@ -0,0 +1,3 @@
|
|||||||
|
-----BEGIN PRIVATE KEY-----
|
||||||
|
MC4CAQAwBQYDK2VwBCIEIMwuDntiLoC7cFFyttGDf7cQWlOXOR0q90Jz3lEiuLg+
|
||||||
|
-----END PRIVATE KEY-----
|
||||||
@@ -0,0 +1,3 @@
|
|||||||
|
-----BEGIN PRIVATE KEY-----
|
||||||
|
MC4CAQAwBQYDK2VwBCIEIPmH2+vjYG6UOp+/g0Iqu7yZZKId5jffrfsySE36yO+D
|
||||||
|
-----END PRIVATE KEY-----
|
||||||
@@ -0,0 +1,3 @@
|
|||||||
|
-----BEGIN PRIVATE KEY-----
|
||||||
|
MC4CAQAwBQYDK2VwBCIEINS0tSnjHPG8IfpzQAS3wzoJA+4mYM70DIpltN8O4YD7
|
||||||
|
-----END PRIVATE KEY-----
|
||||||
@@ -0,0 +1,3 @@
|
|||||||
|
-----BEGIN PUBLIC KEY-----
|
||||||
|
MCowBQYDK2VwAyEA3P18+R5Gt+Jn7wYXpWNTXM5pyWn2WiOWekYCzXqWPwg=
|
||||||
|
-----END PUBLIC KEY-----
|
||||||
@@ -41,6 +41,7 @@ in
|
|||||||
borgbackup = import ./borgbackup nixosTestArgs;
|
borgbackup = import ./borgbackup nixosTestArgs;
|
||||||
matrix-synapse = import ./matrix-synapse nixosTestArgs;
|
matrix-synapse = import ./matrix-synapse nixosTestArgs;
|
||||||
mumble = import ./mumble nixosTestArgs;
|
mumble = import ./mumble nixosTestArgs;
|
||||||
|
data-mesher = import ./data-mesher nixosTestArgs;
|
||||||
syncthing = import ./syncthing nixosTestArgs;
|
syncthing = import ./syncthing nixosTestArgs;
|
||||||
zt-tcp-relay = import ./zt-tcp-relay nixosTestArgs;
|
zt-tcp-relay = import ./zt-tcp-relay nixosTestArgs;
|
||||||
postgresql = import ./postgresql nixosTestArgs;
|
postgresql = import ./postgresql nixosTestArgs;
|
||||||
@@ -50,7 +51,7 @@ in
|
|||||||
flakeOutputs =
|
flakeOutputs =
|
||||||
lib.mapAttrs' (
|
lib.mapAttrs' (
|
||||||
name: config: lib.nameValuePair "nixos-${name}" config.config.system.build.toplevel
|
name: config: lib.nameValuePair "nixos-${name}" config.config.system.build.toplevel
|
||||||
) (lib.filterAttrs (n: _v: n != "test-install-machine-without-system") self.nixosConfigurations)
|
) (lib.filterAttrs (n: _: !lib.hasPrefix "test-" n) self.nixosConfigurations)
|
||||||
// lib.mapAttrs' (n: lib.nameValuePair "package-${n}") self'.packages
|
// lib.mapAttrs' (n: lib.nameValuePair "package-${n}") self'.packages
|
||||||
// lib.mapAttrs' (n: lib.nameValuePair "devShell-${n}") self'.devShells
|
// lib.mapAttrs' (n: lib.nameValuePair "devShell-${n}") self'.devShells
|
||||||
// lib.mapAttrs' (name: config: lib.nameValuePair "home-manager-${name}" config.activation-script) (
|
// lib.mapAttrs' (name: config: lib.nameValuePair "home-manager-${name}" config.activation-script) (
|
||||||
|
|||||||
@@ -1,12 +1,26 @@
|
|||||||
{ self, lib, ... }:
|
|
||||||
{
|
{
|
||||||
clan.machines.test-flash-machine = {
|
config,
|
||||||
clan.core.networking.targetHost = "test-flash-machine";
|
self,
|
||||||
fileSystems."/".device = lib.mkDefault "/dev/vda";
|
lib,
|
||||||
boot.loader.grub.device = lib.mkDefault "/dev/vda";
|
...
|
||||||
|
}:
|
||||||
|
{
|
||||||
|
clan.machines = lib.listToAttrs (
|
||||||
|
lib.map (
|
||||||
|
system:
|
||||||
|
lib.nameValuePair "test-flash-machine-${system}" {
|
||||||
|
clan.core.networking.targetHost = "test-flash-machine";
|
||||||
|
fileSystems."/".device = lib.mkDefault "/dev/vda";
|
||||||
|
boot.loader.grub.device = lib.mkDefault "/dev/vda";
|
||||||
|
|
||||||
imports = [ self.nixosModules.test-flash-machine ];
|
# We need to use `mkForce` because we inherit from `test-install-machine`
|
||||||
};
|
# which currently hardcodes `nixpkgs.hostPlatform`
|
||||||
|
nixpkgs.hostPlatform = lib.mkForce system;
|
||||||
|
|
||||||
|
imports = [ self.nixosModules.test-flash-machine ];
|
||||||
|
}
|
||||||
|
) (lib.filter (lib.hasSuffix "linux") config.systems)
|
||||||
|
);
|
||||||
|
|
||||||
flake.nixosModules = {
|
flake.nixosModules = {
|
||||||
test-flash-machine =
|
test-flash-machine =
|
||||||
@@ -30,20 +44,20 @@
|
|||||||
let
|
let
|
||||||
dependencies = [
|
dependencies = [
|
||||||
pkgs.disko
|
pkgs.disko
|
||||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-flash-machine.pkgs.perlPackages.ConfigIniFiles
|
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".pkgs.perlPackages.ConfigIniFiles
|
||||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-flash-machine.pkgs.perlPackages.FileSlurp
|
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".pkgs.perlPackages.FileSlurp
|
||||||
|
|
||||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-flash-machine.config.system.build.toplevel
|
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.toplevel
|
||||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-flash-machine.config.system.build.diskoScript
|
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.diskoScript
|
||||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-flash-machine.config.system.build.diskoScript.drvPath
|
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.diskoScript.drvPath
|
||||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-flash-machine.config.system.clan.deployment.file
|
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.clan.deployment.file
|
||||||
|
|
||||||
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||||
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
|
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux) {
|
checks = pkgs.lib.mkIf pkgs.stdenv.isLinux {
|
||||||
flash = (import ../lib/test-base.nix) {
|
test-flash = (import ../lib/test-base.nix) {
|
||||||
name = "flash";
|
name = "flash";
|
||||||
nodes.target = {
|
nodes.target = {
|
||||||
virtualisation.emptyDiskImages = [ 4096 ];
|
virtualisation.emptyDiskImages = [ 4096 ];
|
||||||
@@ -65,7 +79,7 @@
|
|||||||
testScript = ''
|
testScript = ''
|
||||||
start_all()
|
start_all()
|
||||||
|
|
||||||
machine.succeed("clan flash write --debug --flake ${../..} --yes --disk main /dev/vdb test-flash-machine")
|
machine.succeed("clan flash write --debug --flake ${../..} --yes --disk main /dev/vdb test-flash-machine-${pkgs.hostPlatform.system}")
|
||||||
'';
|
'';
|
||||||
} { inherit pkgs self; };
|
} { inherit pkgs self; };
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -17,18 +17,25 @@
|
|||||||
|
|
||||||
imports = [ self.nixosModules.test-install-machine-without-system ];
|
imports = [ self.nixosModules.test-install-machine-without-system ];
|
||||||
};
|
};
|
||||||
clan.machines.test-install-machine-with-system = {
|
clan.machines.test-install-machine-with-system =
|
||||||
# https://git.clan.lol/clan/test-fixtures
|
{ pkgs, ... }:
|
||||||
facter.reportPath = builtins.fetchurl {
|
{
|
||||||
url = "https://git.clan.lol/clan/test-fixtures/raw/commit/3508b7ed11dad068ffc8c9f0047a5c7d54644e2c/nixos-vm-facter-json/facter.json";
|
# https://git.clan.lol/clan/test-fixtures
|
||||||
sha256 = "sha256:16myh0ll2gdwsiwkjw5ba4dl23ppwbsanxx214863j7nvzx42pws";
|
facter.reportPath = builtins.fetchurl {
|
||||||
|
url = "https://git.clan.lol/clan/test-fixtures/raw/commit/4a2bc56d886578124b05060d3fb7eddc38c019f8/nixos-vm-facter-json/${pkgs.hostPlatform.system}.json";
|
||||||
|
sha256 =
|
||||||
|
{
|
||||||
|
aarch64-linux = "sha256:1rlfymk03rmfkm2qgrc8l5kj5i20srx79n1y1h4nzlpwaz0j7hh2";
|
||||||
|
x86_64-linux = "sha256:16myh0ll2gdwsiwkjw5ba4dl23ppwbsanxx214863j7nvzx42pws";
|
||||||
|
}
|
||||||
|
.${pkgs.hostPlatform.system};
|
||||||
|
};
|
||||||
|
|
||||||
|
fileSystems."/".device = lib.mkDefault "/dev/vda";
|
||||||
|
boot.loader.grub.device = lib.mkDefault "/dev/vda";
|
||||||
|
|
||||||
|
imports = [ self.nixosModules.test-install-machine-without-system ];
|
||||||
};
|
};
|
||||||
|
|
||||||
fileSystems."/".device = lib.mkDefault "/dev/vda";
|
|
||||||
boot.loader.grub.device = lib.mkDefault "/dev/vda";
|
|
||||||
|
|
||||||
imports = [ self.nixosModules.test-install-machine-without-system ];
|
|
||||||
};
|
|
||||||
flake.nixosModules = {
|
flake.nixosModules = {
|
||||||
test-install-machine-without-system =
|
test-install-machine-without-system =
|
||||||
{ lib, modulesPath, ... }:
|
{ lib, modulesPath, ... }:
|
||||||
@@ -108,9 +115,9 @@
|
|||||||
let
|
let
|
||||||
dependencies = [
|
dependencies = [
|
||||||
self
|
self
|
||||||
self.nixosConfigurations.test-install-machine-with-system.config.system.build.toplevel
|
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.toplevel
|
||||||
self.nixosConfigurations.test-install-machine-with-system.config.system.build.diskoScript
|
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.diskoScript
|
||||||
self.nixosConfigurations.test-install-machine-with-system.config.system.clan.deployment.file
|
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.clan.deployment.file
|
||||||
pkgs.stdenv.drvPath
|
pkgs.stdenv.drvPath
|
||||||
pkgs.bash.drvPath
|
pkgs.bash.drvPath
|
||||||
pkgs.nixos-anywhere
|
pkgs.nixos-anywhere
|
||||||
@@ -130,15 +137,19 @@
|
|||||||
in
|
in
|
||||||
{
|
{
|
||||||
# On aarch64-linux, hangs on reboot with after installation:
|
# On aarch64-linux, hangs on reboot with after installation:
|
||||||
# vm-test-run-test-installation> (finished: waiting for the VM to power off, in 1.97 seconds)
|
# vm-test-run-test-installation-without-system> installer # [ 288.002871] reboot: Restarting system
|
||||||
# vm-test-run-test-installation>
|
# vm-test-run-test-installation-without-system> client # [test-install-machine] ### Done! ###
|
||||||
# vm-test-run-test-installation> new_machine: must succeed: cat /etc/install-successful
|
# vm-test-run-test-installation-without-system> client # [test-install-machine] + step 'Done!'
|
||||||
# vm-test-run-test-installation> new_machine: waiting for the VM to finish booting
|
# vm-test-run-test-installation-without-system> client # [test-install-machine] + echo '### Done! ###'
|
||||||
# vm-test-run-test-installation> new_machine: starting vm
|
# vm-test-run-test-installation-without-system> client # [test-install-machine] + rm -rf /tmp/tmp.qb16EAq7hJ
|
||||||
# vm-test-run-test-installation> new_machine: QEMU running (pid 80)
|
# vm-test-run-test-installation-without-system> (finished: must succeed: clan machines install --debug --flake test-flake --yes test-install-machine-without-system --target-host root@installer --update-hardware-config nixos-facter >&2, in 154.62 seconds)
|
||||||
# vm-test-run-test-installation> new_machine: Guest root shell did not produce any data yet...
|
# vm-test-run-test-installation-without-system> target: starting vm
|
||||||
# vm-test-run-test-installation> new_machine: To debug, enter the VM and run 'systemctl status backdoor.service'.
|
# vm-test-run-test-installation-without-system> target: QEMU running (pid 144)
|
||||||
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux && pkgs.stdenv.hostPlatform.system != "aarch64-linux") {
|
# vm-test-run-test-installation-without-system> target: waiting for unit multi-user.target
|
||||||
|
# vm-test-run-test-installation-without-system> target: waiting for the VM to finish booting
|
||||||
|
# vm-test-run-test-installation-without-system> target: Guest root shell did not produce any data yet...
|
||||||
|
# vm-test-run-test-installation-without-system> target: To debug, enter the VM and run 'systemctl status backdoor.service'.
|
||||||
|
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux && !pkgs.stdenv.isAarch64) {
|
||||||
test-installation-without-system = (import ../lib/test-base.nix) {
|
test-installation-without-system = (import ../lib/test-base.nix) {
|
||||||
name = "test-installation-without-system";
|
name = "test-installation-without-system";
|
||||||
nodes.target = {
|
nodes.target = {
|
||||||
@@ -154,7 +165,6 @@
|
|||||||
(modulesPath + "/../tests/common/auto-format-root-device.nix")
|
(modulesPath + "/../tests/common/auto-format-root-device.nix")
|
||||||
];
|
];
|
||||||
services.openssh.enable = true;
|
services.openssh.enable = true;
|
||||||
users.users.root.openssh.authorizedKeys.keyFiles = [ ../lib/ssh/pubkey ];
|
|
||||||
system.nixos.variant_id = "installer";
|
system.nixos.variant_id = "installer";
|
||||||
environment.systemPackages = [ pkgs.nixos-facter ];
|
environment.systemPackages = [ pkgs.nixos-facter ];
|
||||||
virtualisation.emptyDiskImages = [ 512 ];
|
virtualisation.emptyDiskImages = [ 512 ];
|
||||||
@@ -173,6 +183,12 @@
|
|||||||
"flakes"
|
"flakes"
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
|
users.users.nonrootuser = {
|
||||||
|
isNormalUser = true;
|
||||||
|
openssh.authorizedKeys.keyFiles = [ ../lib/ssh/pubkey ];
|
||||||
|
extraGroups = [ "wheel" ];
|
||||||
|
};
|
||||||
|
security.sudo.wheelNeedsPassword = false;
|
||||||
system.extraDependencies = dependencies;
|
system.extraDependencies = dependencies;
|
||||||
};
|
};
|
||||||
nodes.client = {
|
nodes.client = {
|
||||||
@@ -200,14 +216,14 @@
|
|||||||
installer.start()
|
installer.start()
|
||||||
|
|
||||||
client.succeed("${pkgs.coreutils}/bin/install -Dm 600 ${../lib/ssh/privkey} /root/.ssh/id_ed25519")
|
client.succeed("${pkgs.coreutils}/bin/install -Dm 600 ${../lib/ssh/privkey} /root/.ssh/id_ed25519")
|
||||||
client.wait_until_succeeds("timeout 2 ssh -o StrictHostKeyChecking=accept-new -v root@installer hostname")
|
client.wait_until_succeeds("timeout 2 ssh -o StrictHostKeyChecking=accept-new -v nonrootuser@installer hostname")
|
||||||
client.succeed("cp -r ${../..} test-flake && chmod -R +w test-flake")
|
client.succeed("cp -r ${../..} test-flake && chmod -R +w test-flake")
|
||||||
client.fail("test -f test-flake/machines/test-install-machine-without-system/hardware-configuration.nix")
|
client.fail("test -f test-flake/machines/test-install-machine-without-system/hardware-configuration.nix")
|
||||||
client.fail("test -f test-flake/machines/test-install-machine-without-system/facter.json")
|
client.fail("test -f test-flake/machines/test-install-machine-without-system/facter.json")
|
||||||
client.succeed("clan machines update-hardware-config --flake test-flake test-install-machine-without-system root@installer >&2")
|
client.succeed("clan machines update-hardware-config --flake test-flake test-install-machine-without-system nonrootuser@installer >&2")
|
||||||
client.succeed("test -f test-flake/machines/test-install-machine-without-system/facter.json")
|
client.succeed("test -f test-flake/machines/test-install-machine-without-system/facter.json")
|
||||||
client.succeed("rm test-flake/machines/test-install-machine-without-system/facter.json")
|
client.succeed("rm test-flake/machines/test-install-machine-without-system/facter.json")
|
||||||
client.succeed("clan machines install --debug --flake test-flake --yes test-install-machine-without-system --target-host root@installer --update-hardware-config nixos-facter >&2")
|
client.succeed("clan machines install --debug --flake test-flake --yes test-install-machine-without-system --target-host nonrootuser@installer --update-hardware-config nixos-facter >&2")
|
||||||
try:
|
try:
|
||||||
installer.shutdown()
|
installer.shutdown()
|
||||||
except BrokenPipeError:
|
except BrokenPipeError:
|
||||||
|
|||||||
@@ -23,7 +23,6 @@
|
|||||||
|
|
||||||
environment.etc."install-successful".text = "ok";
|
environment.etc."install-successful".text = "ok";
|
||||||
|
|
||||||
nixpkgs.hostPlatform = "x86_64-linux";
|
|
||||||
boot.consoleLogLevel = lib.mkForce 100;
|
boot.consoleLogLevel = lib.mkForce 100;
|
||||||
boot.kernelParams = [ "boot.shell_on_fail" ];
|
boot.kernelParams = [ "boot.shell_on_fail" ];
|
||||||
|
|
||||||
@@ -89,9 +88,9 @@
|
|||||||
let
|
let
|
||||||
dependencies = [
|
dependencies = [
|
||||||
self
|
self
|
||||||
self.nixosConfigurations.test-install-machine.config.system.build.toplevel
|
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine.config.system.build.toplevel
|
||||||
self.nixosConfigurations.test-install-machine.config.system.build.diskoScript
|
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine.config.system.build.diskoScript
|
||||||
self.nixosConfigurations.test-install-machine.config.system.clan.deployment.file
|
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine.config.system.clan.deployment.file
|
||||||
pkgs.bash.drvPath
|
pkgs.bash.drvPath
|
||||||
pkgs.stdenv.drvPath
|
pkgs.stdenv.drvPath
|
||||||
pkgs.nixos-anywhere
|
pkgs.nixos-anywhere
|
||||||
@@ -120,7 +119,7 @@
|
|||||||
# vm-test-run-test-installation> new_machine: QEMU running (pid 80)
|
# vm-test-run-test-installation> new_machine: QEMU running (pid 80)
|
||||||
# vm-test-run-test-installation> new_machine: Guest root shell did not produce any data yet...
|
# vm-test-run-test-installation> new_machine: Guest root shell did not produce any data yet...
|
||||||
# vm-test-run-test-installation> new_machine: To debug, enter the VM and run 'systemctl status backdoor.service'.
|
# vm-test-run-test-installation> new_machine: To debug, enter the VM and run 'systemctl status backdoor.service'.
|
||||||
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux && pkgs.stdenv.hostPlatform.system != "aarch64-linux") {
|
checks = pkgs.lib.mkIf pkgs.stdenv.isLinux {
|
||||||
test-installation = (import ../lib/test-base.nix) {
|
test-installation = (import ../lib/test-base.nix) {
|
||||||
name = "test-installation";
|
name = "test-installation";
|
||||||
nodes.target = {
|
nodes.target = {
|
||||||
|
|||||||
@@ -23,7 +23,7 @@
|
|||||||
...
|
...
|
||||||
}:
|
}:
|
||||||
{
|
{
|
||||||
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux && pkgs.stdenv.hostPlatform.system != "aarch64-linux") {
|
checks = pkgs.lib.mkIf pkgs.stdenv.isLinux {
|
||||||
test-morph = (import ../lib/test-base.nix) {
|
test-morph = (import ../lib/test-base.nix) {
|
||||||
name = "morph";
|
name = "morph";
|
||||||
|
|
||||||
|
|||||||
10
clanModules/data-mesher/README.md
Normal file
10
clanModules/data-mesher/README.md
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
---
|
||||||
|
description = "Set up data-mesher"
|
||||||
|
categories = ["System"]
|
||||||
|
features = [ "inventory" ]
|
||||||
|
|
||||||
|
[constraints]
|
||||||
|
roles.admin.min = 1
|
||||||
|
roles.admin.max = 1
|
||||||
|
---
|
||||||
|
|
||||||
19
clanModules/data-mesher/lib.nix
Normal file
19
clanModules/data-mesher/lib.nix
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
lib: {
|
||||||
|
|
||||||
|
machines =
|
||||||
|
config:
|
||||||
|
let
|
||||||
|
instanceNames = builtins.attrNames config.clan.inventory.services.data-mesher;
|
||||||
|
instanceName = builtins.head instanceNames;
|
||||||
|
dataMesherInstances = config.clan.inventory.services.data-mesher.${instanceName};
|
||||||
|
|
||||||
|
uniqueStrings = list: builtins.attrNames (builtins.groupBy lib.id list);
|
||||||
|
in
|
||||||
|
rec {
|
||||||
|
admins = dataMesherInstances.roles.admin.machines or [ ];
|
||||||
|
signers = dataMesherInstances.roles.signer.machines or [ ];
|
||||||
|
peers = dataMesherInstances.roles.peer.machines or [ ];
|
||||||
|
bootstrap = uniqueStrings (admins ++ signers);
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
51
clanModules/data-mesher/roles/admin.nix
Normal file
51
clanModules/data-mesher/roles/admin.nix
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
{ lib, config, ... }:
|
||||||
|
let
|
||||||
|
cfg = config.clan.data-mesher;
|
||||||
|
|
||||||
|
dmLib = import ../lib.nix lib;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
imports = [
|
||||||
|
../shared.nix
|
||||||
|
];
|
||||||
|
|
||||||
|
options.clan.data-mesher = {
|
||||||
|
network = {
|
||||||
|
tld = lib.mkOption {
|
||||||
|
type = lib.types.str;
|
||||||
|
default = (config.networking.domain or "clan");
|
||||||
|
description = "Top level domain to use for the network";
|
||||||
|
};
|
||||||
|
|
||||||
|
hostTTL = lib.mkOption {
|
||||||
|
type = lib.types.str;
|
||||||
|
default = "672h"; # 28 days
|
||||||
|
example = "24h";
|
||||||
|
description = "The TTL for hosts in the network, in the form of a Go time.Duration";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = {
|
||||||
|
services.data-mesher.initNetwork =
|
||||||
|
let
|
||||||
|
# for a given machine, read it's public key and remove any new lines
|
||||||
|
readHostKey =
|
||||||
|
machine:
|
||||||
|
let
|
||||||
|
path = "${config.clan.core.settings.directory}/vars/per-machine/${machine}/data-mesher-host-key/public_key/value";
|
||||||
|
in
|
||||||
|
builtins.elemAt (lib.splitString "\n" (builtins.readFile path)) 1;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
enable = true;
|
||||||
|
keyPath = config.clan.core.vars.generators.data-mesher-network-key.files.private_key.path;
|
||||||
|
|
||||||
|
tld = cfg.network.tld;
|
||||||
|
hostTTL = cfg.network.hostTTL;
|
||||||
|
|
||||||
|
# admin and signer host public keys
|
||||||
|
signingKeys = builtins.map readHostKey (dmLib.machines config).bootstrap;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
5
clanModules/data-mesher/roles/peer.nix
Normal file
5
clanModules/data-mesher/roles/peer.nix
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
{
|
||||||
|
imports = [
|
||||||
|
../shared.nix
|
||||||
|
];
|
||||||
|
}
|
||||||
5
clanModules/data-mesher/roles/signer.nix
Normal file
5
clanModules/data-mesher/roles/signer.nix
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
{
|
||||||
|
imports = [
|
||||||
|
../shared.nix
|
||||||
|
];
|
||||||
|
}
|
||||||
154
clanModules/data-mesher/shared.nix
Normal file
154
clanModules/data-mesher/shared.nix
Normal file
@@ -0,0 +1,154 @@
|
|||||||
|
{
|
||||||
|
config,
|
||||||
|
lib,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
let
|
||||||
|
cfg = config.clan.data-mesher;
|
||||||
|
dmLib = import ./lib.nix lib;
|
||||||
|
|
||||||
|
# the default bootstrap nodes are any machines with the admin or signers role
|
||||||
|
# we iterate through those machines, determining an IP address for them based on their VPN
|
||||||
|
# currently only supports zerotier
|
||||||
|
defaultBootstrapNodes = builtins.foldl' (
|
||||||
|
urls: name:
|
||||||
|
if
|
||||||
|
builtins.pathExists "${config.clan.core.settings.directory}/machines/${name}/facts/zerotier-ip"
|
||||||
|
then
|
||||||
|
let
|
||||||
|
ip = builtins.readFile "${config.clan.core.settings.directory}/machines/${name}/facts/zerotier-ip";
|
||||||
|
in
|
||||||
|
urls ++ "${ip}:${cfg.network.port}"
|
||||||
|
else
|
||||||
|
urls
|
||||||
|
) [ ] (dmLib.machines config).bootstrap;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
options.clan.data-mesher = {
|
||||||
|
|
||||||
|
bootstrapNodes = lib.mkOption {
|
||||||
|
type = lib.types.nullOr (lib.types.listOf lib.types.str);
|
||||||
|
default = null;
|
||||||
|
description = ''
|
||||||
|
A list of bootstrap nodes that act as an initial gateway when joining
|
||||||
|
the cluster.
|
||||||
|
'';
|
||||||
|
example = [
|
||||||
|
"192.168.1.1:7946"
|
||||||
|
"192.168.1.2:7946"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
network = {
|
||||||
|
|
||||||
|
interface = lib.mkOption {
|
||||||
|
type = lib.types.str;
|
||||||
|
description = ''
|
||||||
|
The interface over which cluster communication should be performed.
|
||||||
|
All the ip addresses associate with this interface will be part of
|
||||||
|
our host claim, including both ipv4 and ipv6.
|
||||||
|
|
||||||
|
This should be set to an internal/VPN interface.
|
||||||
|
'';
|
||||||
|
example = "tailscale0";
|
||||||
|
};
|
||||||
|
|
||||||
|
port = lib.mkOption {
|
||||||
|
type = lib.types.port;
|
||||||
|
default = 7946;
|
||||||
|
description = ''
|
||||||
|
Port to listen on for cluster communication.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = {
|
||||||
|
|
||||||
|
services.data-mesher = {
|
||||||
|
enable = true;
|
||||||
|
openFirewall = true;
|
||||||
|
|
||||||
|
settings = {
|
||||||
|
log_level = "warn";
|
||||||
|
state_dir = "/var/lib/data-mesher";
|
||||||
|
|
||||||
|
# read network id from vars
|
||||||
|
network.id = config.clan.core.vars.generators.data-mesher-network-key.files.public_key.value;
|
||||||
|
|
||||||
|
host = {
|
||||||
|
names = [ config.networking.hostName ];
|
||||||
|
key_path = config.clan.core.vars.generators.data-mesher-host-key.files.private_key.path;
|
||||||
|
};
|
||||||
|
|
||||||
|
cluster = {
|
||||||
|
port = cfg.network.port;
|
||||||
|
join_interval = "30s";
|
||||||
|
push_pull_interval = "30s";
|
||||||
|
|
||||||
|
interface = cfg.network.interface;
|
||||||
|
bootstrap_nodes = cfg.bootstrapNodes or defaultBootstrapNodes;
|
||||||
|
};
|
||||||
|
|
||||||
|
http.port = 7331;
|
||||||
|
http.interface = "lo";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
# Generate host key.
|
||||||
|
clan.core.vars.generators.data-mesher-host-key = {
|
||||||
|
files =
|
||||||
|
let
|
||||||
|
owner = config.users.users.data-mesher.name;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
private_key = {
|
||||||
|
inherit owner;
|
||||||
|
};
|
||||||
|
public_key = {
|
||||||
|
inherit owner;
|
||||||
|
secret = false;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
runtimeInputs = [
|
||||||
|
config.services.data-mesher.package
|
||||||
|
];
|
||||||
|
|
||||||
|
script = ''
|
||||||
|
data-mesher generate keypair \
|
||||||
|
--public-key-path $out/public_key \
|
||||||
|
--private-key-path $out/private_key
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
clan.core.vars.generators.data-mesher-network-key = {
|
||||||
|
# generated once per clan
|
||||||
|
share = true;
|
||||||
|
|
||||||
|
files =
|
||||||
|
let
|
||||||
|
owner = config.users.users.data-mesher.name;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
private_key = {
|
||||||
|
inherit owner;
|
||||||
|
};
|
||||||
|
public_key = {
|
||||||
|
inherit owner;
|
||||||
|
secret = false;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
runtimeInputs = [
|
||||||
|
config.services.data-mesher.package
|
||||||
|
];
|
||||||
|
|
||||||
|
script = ''
|
||||||
|
data-mesher generate keypair \
|
||||||
|
--public-key-path $out/public_key \
|
||||||
|
--private-key-path $out/private_key
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -13,6 +13,7 @@ in
|
|||||||
borgbackup = ./borgbackup;
|
borgbackup = ./borgbackup;
|
||||||
borgbackup-static = ./borgbackup-static;
|
borgbackup-static = ./borgbackup-static;
|
||||||
deltachat = ./deltachat;
|
deltachat = ./deltachat;
|
||||||
|
data-mesher = ./data-mesher;
|
||||||
disk-id = ./disk-id;
|
disk-id = ./disk-id;
|
||||||
dyndns = ./dyndns;
|
dyndns = ./dyndns;
|
||||||
ergochat = ./ergochat;
|
ergochat = ./ergochat;
|
||||||
|
|||||||
116
decisions/02-clan-api.md
Normal file
116
decisions/02-clan-api.md
Normal file
@@ -0,0 +1,116 @@
|
|||||||
|
# Clan as library
|
||||||
|
|
||||||
|
## Status
|
||||||
|
|
||||||
|
Accepted
|
||||||
|
|
||||||
|
## Context
|
||||||
|
|
||||||
|
In the long term we envision the clan application will consist of the following user facing tools in the long term.
|
||||||
|
|
||||||
|
- `CLI`
|
||||||
|
- `TUI`
|
||||||
|
- `Desktop Application`
|
||||||
|
- `REST-API`
|
||||||
|
- `Mobile Application`
|
||||||
|
|
||||||
|
We might not be sure whether all of those will exist but the architecture should be generic such that those are possible without major changes of the underlying system.
|
||||||
|
|
||||||
|
## Decision
|
||||||
|
|
||||||
|
This leads to the conclusion that we should do `library` centric development.
|
||||||
|
With the current `clan` python code beeing a library that can be imported to create various tools ontop of it.
|
||||||
|
All **CLI** or **UI** related parts should be moved out of the main library.
|
||||||
|
|
||||||
|
*Note: The next person who wants implement any new frontend should do this first. Currently it looks like the TUI is the next one.*
|
||||||
|
|
||||||
|
Imagine roughly the following architecture:
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
graph TD
|
||||||
|
%% Define styles
|
||||||
|
classDef frontend fill:#f9f,stroke:#333,stroke-width:2px;
|
||||||
|
classDef backend fill:#bbf,stroke:#333,stroke-width:2px;
|
||||||
|
classDef storage fill:#ff9,stroke:#333,stroke-width:2px;
|
||||||
|
classDef testing fill:#cfc,stroke:#333,stroke-width:2px;
|
||||||
|
|
||||||
|
%% Define nodes
|
||||||
|
user(["User"]) -->|Interacts with| Frontends
|
||||||
|
|
||||||
|
subgraph "Frontends"
|
||||||
|
CLI["CLI"]:::frontend
|
||||||
|
APP["Desktop App"]:::frontend
|
||||||
|
TUI["TUI"]:::frontend
|
||||||
|
REST["REST API"]:::frontend
|
||||||
|
end
|
||||||
|
|
||||||
|
subgraph "Python"
|
||||||
|
API["Library <br>for interacting with clan"]:::backend
|
||||||
|
BusinessLogic["Business Logic<br>Implements actions like 'machine create'"]:::backend
|
||||||
|
STORAGE[("Persistence")]:::storage
|
||||||
|
NIX["Nix Eval & Build"]:::backend
|
||||||
|
end
|
||||||
|
|
||||||
|
subgraph "CI/CD & Tests"
|
||||||
|
TEST["Feature Testing"]:::testing
|
||||||
|
end
|
||||||
|
|
||||||
|
%% Define connections
|
||||||
|
CLI --> API
|
||||||
|
APP --> API
|
||||||
|
TUI --> API
|
||||||
|
REST --> API
|
||||||
|
|
||||||
|
TEST --> API
|
||||||
|
|
||||||
|
API --> BusinessLogic
|
||||||
|
BusinessLogic --> STORAGE
|
||||||
|
BusinessLogic --> NIX
|
||||||
|
```
|
||||||
|
|
||||||
|
With this very simple design it is ensured that all the basic features remain stable across all frontends.
|
||||||
|
In the end it is straight forward to create python library function calls in a testing framework to ensure that kind of stability.
|
||||||
|
|
||||||
|
Integration tests and smaller unit-tests should both be utilized to ensure the stability of the library.
|
||||||
|
|
||||||
|
Note: Library function don't have to be json-serializable in general.
|
||||||
|
|
||||||
|
Persistence includes but is not limited to: creating git commits, writing to inventory.json, reading and writing vars and to/from disk in general.
|
||||||
|
|
||||||
|
## Benefits / Drawbacks
|
||||||
|
|
||||||
|
- (+) Less tight coupling of frontend- / backend-teams
|
||||||
|
- (+) Consistency and inherent behavior
|
||||||
|
- (+) Performance & Scalability
|
||||||
|
- (+) Different frontends for different user groups
|
||||||
|
- (+) Documentation per library function makes it convenient to interact with the clan resources.
|
||||||
|
- (+) Testing the library ensures stability of the underlyings for all layers above.
|
||||||
|
- (-) Complexity overhead
|
||||||
|
- (-) library needs to be designed / documented
|
||||||
|
- (+) library can be well documented since it is a finite set of functions.
|
||||||
|
- (-) Error handling might be harder.
|
||||||
|
- (+) Common error reporting
|
||||||
|
- (-) different frontends need different features. The library must include them all.
|
||||||
|
- (+) All those core features must be implemented anyways.
|
||||||
|
- (+) VPN Benchmarking uses the existing library's already and works relatively well.
|
||||||
|
|
||||||
|
## Implementation considerations
|
||||||
|
|
||||||
|
Not all required details that need to change over time are possible to be pointed out ahead of time.
|
||||||
|
The goal of this document is to create a common understanding for how we like our project to be structured.
|
||||||
|
Any future commits should contribute to this goal.
|
||||||
|
|
||||||
|
Some ideas what might be needed to change:
|
||||||
|
|
||||||
|
- Having separate locations or packages for the library and the CLI.
|
||||||
|
- Rename the `clan_cli` package to `clan` and move the `cli` frontend into a subfolder or a separate package.
|
||||||
|
- Python Argparse or other cli related code should not exist in the `clan` python library.
|
||||||
|
- `__init__.py` should be very minimal. Only init the business logic models and resources. Note that all `__init__.py` files all the way up in the module tree are always executed as part of the python module import logic and thus should be as small as possible.
|
||||||
|
i.e. `from clan_cli.vars.generators import ...` executes both `clan_cli/__init__.py` and `clan_cli/vars/__init__.py` if any of those exist.
|
||||||
|
- `api` folder doesn't make sense since the python library `clan` is the api.
|
||||||
|
- Logic needed for the webui that performs json serialization and deserialization will be some `json-adapter` folder or package.
|
||||||
|
- Code for serializing dataclasses and typed dictionaries is needed for the persistence layer. (i.e. for read-write of inventory.json)
|
||||||
|
- The inventory-json is a backend resource, that is internal. Its logic includes merging, unmerging and partial updates with considering nix values and their priorities. Nobody should try to read or write to it directly.
|
||||||
|
Instead there will be library methods i.e. to add a `service` or to update/read/delete some information from it.
|
||||||
|
- Library functions should be carefully designed with suitable conventions for writing good api's in mind. (i.e: https://swagger.io/resources/articles/best-practices-in-api-design/)
|
||||||
|
|
||||||
36
devShell.nix
36
devShell.nix
@@ -1,10 +1,12 @@
|
|||||||
{ ... }:
|
{ inputs, ... }:
|
||||||
{
|
{
|
||||||
perSystem =
|
perSystem =
|
||||||
{
|
{
|
||||||
|
lib,
|
||||||
pkgs,
|
pkgs,
|
||||||
self',
|
self',
|
||||||
config,
|
config,
|
||||||
|
system,
|
||||||
...
|
...
|
||||||
}:
|
}:
|
||||||
let
|
let
|
||||||
@@ -24,18 +26,26 @@
|
|||||||
in
|
in
|
||||||
{
|
{
|
||||||
devShells.default = pkgs.mkShell {
|
devShells.default = pkgs.mkShell {
|
||||||
packages = [
|
packages =
|
||||||
select-shell
|
[
|
||||||
pkgs.nix-unit
|
select-shell
|
||||||
pkgs.tea
|
pkgs.nix-unit
|
||||||
# Better error messages than nix 2.18
|
pkgs.tea
|
||||||
pkgs.nixVersions.latest
|
# Better error messages than nix 2.18
|
||||||
self'.packages.tea-create-pr
|
pkgs.nixVersions.latest
|
||||||
self'.packages.merge-after-ci
|
self'.packages.tea-create-pr
|
||||||
self'.packages.pending-reviews
|
self'.packages.merge-after-ci
|
||||||
# treefmt with config defined in ./flake-parts/formatting.nix
|
self'.packages.pending-reviews
|
||||||
config.treefmt.build.wrapper
|
# treefmt with config defined in ./flake-parts/formatting.nix
|
||||||
];
|
config.treefmt.build.wrapper
|
||||||
|
]
|
||||||
|
# bring in data-mesher for the cli which can help with things like key generation
|
||||||
|
++ (
|
||||||
|
let
|
||||||
|
data-mesher = inputs.data-mesher.packages.${system}.data-mesher or null;
|
||||||
|
in
|
||||||
|
lib.optional (data-mesher != null) data-mesher
|
||||||
|
);
|
||||||
shellHook = ''
|
shellHook = ''
|
||||||
echo -e "${ansiEscapes.green}switch to another dev-shell using: select-shell${ansiEscapes.reset}"
|
echo -e "${ansiEscapes.green}switch to another dev-shell using: select-shell${ansiEscapes.reset}"
|
||||||
export PRJ_ROOT=$(git rev-parse --show-toplevel)
|
export PRJ_ROOT=$(git rev-parse --show-toplevel)
|
||||||
|
|||||||
@@ -79,6 +79,7 @@ nav:
|
|||||||
# This is the module overview and should stay at the top
|
# This is the module overview and should stay at the top
|
||||||
- reference/clanModules/admin.md
|
- reference/clanModules/admin.md
|
||||||
- reference/clanModules/borgbackup-static.md
|
- reference/clanModules/borgbackup-static.md
|
||||||
|
- reference/clanModules/data-mesher.md
|
||||||
- reference/clanModules/borgbackup.md
|
- reference/clanModules/borgbackup.md
|
||||||
- reference/clanModules/deltachat.md
|
- reference/clanModules/deltachat.md
|
||||||
- reference/clanModules/disk-id.md
|
- reference/clanModules/disk-id.md
|
||||||
|
|||||||
@@ -13,8 +13,8 @@
|
|||||||
# { clanCore = «derivation JSON»; clanModules = { ${name} = «derivation JSON» }; }
|
# { clanCore = «derivation JSON»; clanModules = { ${name} = «derivation JSON» }; }
|
||||||
jsonDocs = pkgs.callPackage ./get-module-docs.nix {
|
jsonDocs = pkgs.callPackage ./get-module-docs.nix {
|
||||||
inherit (self) clanModules;
|
inherit (self) clanModules;
|
||||||
evalClanModules = self.lib.evalClanModules;
|
evalClanModules = self.lib.evalClan.evalClanModules;
|
||||||
modulesRolesOptions = self.lib.evalClanModulesWithRoles self.clanModules;
|
modulesRolesOptions = self.lib.evalClan.evalClanModulesWithRoles self.clanModules;
|
||||||
};
|
};
|
||||||
|
|
||||||
# Frontmatter for clanModules
|
# Frontmatter for clanModules
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ Managing machine configurations can be done in the following ways:
|
|||||||
|
|
||||||
- writing `nix` expressions in a `flake.nix` file,
|
- writing `nix` expressions in a `flake.nix` file,
|
||||||
- placing `autoincluded` files into your machine directory,
|
- placing `autoincluded` files into your machine directory,
|
||||||
- configuring everything in a simple UI (upcoming).
|
|
||||||
|
|
||||||
Clan currently offers the following methods to configure machines:
|
Clan currently offers the following methods to configure machines:
|
||||||
|
|
||||||
|
|||||||
@@ -73,7 +73,7 @@ hide:
|
|||||||
|
|
||||||
## API Reference
|
## API Reference
|
||||||
|
|
||||||
**Auto generated API Documentation**
|
**Reference API Documentation**
|
||||||
|
|
||||||
<div class="grid cards" markdown>
|
<div class="grid cards" markdown>
|
||||||
|
|
||||||
|
|||||||
48
flake.lock
generated
48
flake.lock
generated
@@ -1,5 +1,34 @@
|
|||||||
{
|
{
|
||||||
"nodes": {
|
"nodes": {
|
||||||
|
"data-mesher": {
|
||||||
|
"inputs": {
|
||||||
|
"flake-parts": [
|
||||||
|
"flake-parts"
|
||||||
|
],
|
||||||
|
"nixpkgs": [
|
||||||
|
"nixpkgs"
|
||||||
|
],
|
||||||
|
"systems": [
|
||||||
|
"systems"
|
||||||
|
],
|
||||||
|
"treefmt-nix": [
|
||||||
|
"treefmt-nix"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1743379277,
|
||||||
|
"narHash": "sha256-4BNv+I6hksqZeRCrEHcQygK0MV1acjA8+L2TtA11H3c=",
|
||||||
|
"ref": "refs/heads/main",
|
||||||
|
"rev": "bf8c5448d826e047b842d6f2ac0fc698e976dda5",
|
||||||
|
"revCount": 375,
|
||||||
|
"type": "git",
|
||||||
|
"url": "https://git.clan.lol/clan/data-mesher"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"type": "git",
|
||||||
|
"url": "https://git.clan.lol/clan/data-mesher"
|
||||||
|
}
|
||||||
|
},
|
||||||
"disko": {
|
"disko": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"nixpkgs": [
|
"nixpkgs": [
|
||||||
@@ -58,10 +87,10 @@
|
|||||||
"nixpkgs": {
|
"nixpkgs": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 315532800,
|
"lastModified": 315532800,
|
||||||
"narHash": "sha256-xGZrrDemIGmDHe43RG0GNy8142DVzE6kY+dKlxsBkNs=",
|
"narHash": "sha256-Ls4VPCGSQrm6k3FCokyonfX/sgIdZc8f5ZzqEdukBFA=",
|
||||||
"rev": "94c4dbe77c0740ebba36c173672ca15a7926c993",
|
"rev": "eb0e0f21f15c559d2ac7633dc81d079d1caf5f5f",
|
||||||
"type": "tarball",
|
"type": "tarball",
|
||||||
"url": "https://releases.nixos.org/nixpkgs/nixpkgs-25.05pre771533.94c4dbe77c07/nixexprs.tar.xz"
|
"url": "https://releases.nixos.org/nixpkgs/nixpkgs-25.05pre776128.eb0e0f21f15c/nixexprs.tar.xz"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"type": "tarball",
|
"type": "tarball",
|
||||||
@@ -70,6 +99,7 @@
|
|||||||
},
|
},
|
||||||
"root": {
|
"root": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
|
"data-mesher": "data-mesher",
|
||||||
"disko": "disko",
|
"disko": "disko",
|
||||||
"flake-parts": "flake-parts",
|
"flake-parts": "flake-parts",
|
||||||
"nixos-facter-modules": "nixos-facter-modules",
|
"nixos-facter-modules": "nixos-facter-modules",
|
||||||
@@ -86,11 +116,11 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1742700801,
|
"lastModified": 1743305778,
|
||||||
"narHash": "sha256-ZGlpUDsuBdeZeTNgoMv+aw0ByXT2J3wkYw9kJwkAS4M=",
|
"narHash": "sha256-Ux/UohNtnM5mn9SFjaHp6IZe2aAnUCzklMluNtV6zFo=",
|
||||||
"owner": "Mic92",
|
"owner": "Mic92",
|
||||||
"repo": "sops-nix",
|
"repo": "sops-nix",
|
||||||
"rev": "67566fe68a8bed2a7b1175fdfb0697ed22ae8852",
|
"rev": "8e873886bbfc32163fe027b8676c75637b7da114",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
@@ -121,11 +151,11 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1742370146,
|
"lastModified": 1743081648,
|
||||||
"narHash": "sha256-XRE8hL4vKIQyVMDXykFh4ceo3KSpuJF3ts8GKwh5bIU=",
|
"narHash": "sha256-WRAylyYptt6OX5eCEBWyTwOEqEtD6zt33rlUkr6u3cE=",
|
||||||
"owner": "numtide",
|
"owner": "numtide",
|
||||||
"repo": "treefmt-nix",
|
"repo": "treefmt-nix",
|
||||||
"rev": "adc195eef5da3606891cedf80c0d9ce2d3190808",
|
"rev": "29a3d7b768c70addce17af0869f6e2bd8f5be4b7",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
|||||||
12
flake.nix
12
flake.nix
@@ -2,7 +2,7 @@
|
|||||||
description = "clan.lol base operating system";
|
description = "clan.lol base operating system";
|
||||||
|
|
||||||
inputs = {
|
inputs = {
|
||||||
nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
|
nixpkgs.url = "https://nixos.org/channels/nixpkgs-unstable/nixexprs.tar.xz";
|
||||||
|
|
||||||
flake-parts.url = "github:hercules-ci/flake-parts";
|
flake-parts.url = "github:hercules-ci/flake-parts";
|
||||||
flake-parts.inputs.nixpkgs-lib.follows = "nixpkgs";
|
flake-parts.inputs.nixpkgs-lib.follows = "nixpkgs";
|
||||||
@@ -19,6 +19,16 @@
|
|||||||
|
|
||||||
treefmt-nix.url = "github:numtide/treefmt-nix";
|
treefmt-nix.url = "github:numtide/treefmt-nix";
|
||||||
treefmt-nix.inputs.nixpkgs.follows = "nixpkgs";
|
treefmt-nix.inputs.nixpkgs.follows = "nixpkgs";
|
||||||
|
|
||||||
|
data-mesher = {
|
||||||
|
url = "git+https://git.clan.lol/clan/data-mesher";
|
||||||
|
inputs = {
|
||||||
|
flake-parts.follows = "flake-parts";
|
||||||
|
nixpkgs.follows = "nixpkgs";
|
||||||
|
systems.follows = "systems";
|
||||||
|
treefmt-nix.follows = "treefmt-nix";
|
||||||
|
};
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
outputs =
|
outputs =
|
||||||
|
|||||||
@@ -23,6 +23,7 @@
|
|||||||
"*.clan-flake"
|
"*.clan-flake"
|
||||||
"*.code-workspace"
|
"*.code-workspace"
|
||||||
"*.pub"
|
"*.pub"
|
||||||
|
"*.priv"
|
||||||
"*.typed"
|
"*.typed"
|
||||||
"*.age"
|
"*.age"
|
||||||
"*.list"
|
"*.list"
|
||||||
@@ -37,6 +38,7 @@
|
|||||||
# prettier messes up our mkdocs flavoured markdown
|
# prettier messes up our mkdocs flavoured markdown
|
||||||
"*.md"
|
"*.md"
|
||||||
|
|
||||||
|
"checks/data-mesher/vars/*"
|
||||||
"checks/lib/ssh/privkey"
|
"checks/lib/ssh/privkey"
|
||||||
"checks/lib/ssh/pubkey"
|
"checks/lib/ssh/pubkey"
|
||||||
"checks/matrix-synapse/synapse-registration_shared_secret"
|
"checks/matrix-synapse/synapse-registration_shared_secret"
|
||||||
|
|||||||
72
lib/README.md
Normal file
72
lib/README.md
Normal file
@@ -0,0 +1,72 @@
|
|||||||
|
# ClanLib
|
||||||
|
|
||||||
|
This folder is supposed to contain clan specific nix functions.
|
||||||
|
|
||||||
|
Such as:
|
||||||
|
|
||||||
|
- build-clan function
|
||||||
|
- select
|
||||||
|
- build-inventory function
|
||||||
|
- json-schema-converter
|
||||||
|
|
||||||
|
## Structure
|
||||||
|
|
||||||
|
Similar to `nixpkgs/lib` this produces a recursive attribute set in a fixed-point.
|
||||||
|
Functions within lib can depend on each other to create new abstractions.
|
||||||
|
|
||||||
|
### Conventions
|
||||||
|
|
||||||
|
Note: This is not consistently enforced yet.
|
||||||
|
If you start a new feature, or refactoring/touching existing ones, please help us to move towards the below illustrated.
|
||||||
|
|
||||||
|
A single feature-set/module may be organized like this:
|
||||||
|
|
||||||
|
```nix
|
||||||
|
# ↓ The final clanLib
|
||||||
|
{lib, clanLib, ...}:
|
||||||
|
# ↓ portion to add to clanLib
|
||||||
|
{
|
||||||
|
inventory.resolveTags = tags: inventory.machines; # implementation
|
||||||
|
inventory.buildMachines = x: clanLib.inventory.resolveTags x; # implementation
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Every bigger feature should live in a subfolder with the feature name.
|
||||||
|
It should contain two files:
|
||||||
|
|
||||||
|
- `impl.nix`
|
||||||
|
- `test.nix`
|
||||||
|
- Everything else may be adopted as needed.
|
||||||
|
|
||||||
|
```
|
||||||
|
Example filetree
|
||||||
|
```
|
||||||
|
```sh
|
||||||
|
.
|
||||||
|
├── default.nix
|
||||||
|
├── feature_foo
|
||||||
|
│ ├── impl.nix
|
||||||
|
│ └── test.nix
|
||||||
|
└── feature_bar
|
||||||
|
├── impl.nix
|
||||||
|
├── complex-subfeature
|
||||||
|
│ ├── impl.nix
|
||||||
|
│ └── test.nix
|
||||||
|
├── testless-subfeature # <- We immediately see that this feature is not tested on itself.
|
||||||
|
│ └── impl.nix
|
||||||
|
└── test.nix
|
||||||
|
```
|
||||||
|
|
||||||
|
```nix
|
||||||
|
# default.nix
|
||||||
|
{lib, clanLib, ...}:
|
||||||
|
{
|
||||||
|
inventory.resolveTags = import ./resolveTags { inherit lib clanLib; };
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Testing
|
||||||
|
|
||||||
|
For testing we use [nix-unit](https://github.com/nix-community/nix-unit)
|
||||||
|
|
||||||
|
TODO: define a helper that automatically hooks up `tests` in `flake.legacyPackages` and a corresponding buildable `checks` attribute
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
{
|
|
||||||
lib,
|
|
||||||
self,
|
|
||||||
...
|
|
||||||
}:
|
|
||||||
|
|
||||||
let
|
|
||||||
# Returns an attrset with inputs that have the attribute `clanModules`
|
|
||||||
inputsWithClanModules = lib.filterAttrs (
|
|
||||||
_name: value: builtins.hasAttr "clanModules" value
|
|
||||||
) self.inputs;
|
|
||||||
|
|
||||||
flattenedClanModules = lib.foldl' (
|
|
||||||
acc: input:
|
|
||||||
lib.mkMerge [
|
|
||||||
acc
|
|
||||||
input.clanModules
|
|
||||||
]
|
|
||||||
) { } (lib.attrValues inputsWithClanModules);
|
|
||||||
in
|
|
||||||
{
|
|
||||||
inventory.modules = flattenedClanModules;
|
|
||||||
}
|
|
||||||
@@ -43,10 +43,7 @@ in
|
|||||||
include = [
|
include = [
|
||||||
"flakeModules"
|
"flakeModules"
|
||||||
"inventory.json"
|
"inventory.json"
|
||||||
"lib/build-clan"
|
"lib"
|
||||||
"lib/default.nix"
|
|
||||||
"lib/flake-module.nix"
|
|
||||||
"lib/inventory"
|
|
||||||
"machines"
|
"machines"
|
||||||
"nixosModules"
|
"nixosModules"
|
||||||
];
|
];
|
||||||
|
|||||||
@@ -142,11 +142,13 @@ in
|
|||||||
inventoryFile = lib.mkOption { type = lib.types.raw; };
|
inventoryFile = lib.mkOption { type = lib.types.raw; };
|
||||||
# The machine 'imports' generated by the inventory per machine
|
# The machine 'imports' generated by the inventory per machine
|
||||||
inventoryClass = lib.mkOption { type = lib.types.raw; };
|
inventoryClass = lib.mkOption { type = lib.types.raw; };
|
||||||
|
# new attribute
|
||||||
|
distributedServices = lib.mkOption { type = lib.types.raw; };
|
||||||
# clan-core's modules
|
# clan-core's modules
|
||||||
clanModules = lib.mkOption { type = lib.types.raw; };
|
clanModules = lib.mkOption { type = lib.types.raw; };
|
||||||
source = lib.mkOption { type = lib.types.raw; };
|
source = lib.mkOption { type = lib.types.raw; };
|
||||||
meta = lib.mkOption { type = lib.types.raw; };
|
meta = lib.mkOption { type = lib.types.raw; };
|
||||||
lib = lib.mkOption { type = lib.types.raw; };
|
clanLib = lib.mkOption { type = lib.types.raw; };
|
||||||
all-machines-json = lib.mkOption { type = lib.types.raw; };
|
all-machines-json = lib.mkOption { type = lib.types.raw; };
|
||||||
machines = lib.mkOption { type = lib.types.raw; };
|
machines = lib.mkOption { type = lib.types.raw; };
|
||||||
machinesFunc = lib.mkOption { type = lib.types.raw; };
|
machinesFunc = lib.mkOption { type = lib.types.raw; };
|
||||||
|
|||||||
@@ -77,6 +77,9 @@ let
|
|||||||
# Inherit the inventory assertions ?
|
# Inherit the inventory assertions ?
|
||||||
# { inherit (mergedInventory) assertions; }
|
# { inherit (mergedInventory) assertions; }
|
||||||
{ imports = inventoryClass.machines.${name}.machineImports or [ ]; }
|
{ imports = inventoryClass.machines.${name}.machineImports or [ ]; }
|
||||||
|
|
||||||
|
# Import the distribute services
|
||||||
|
{ imports = config.clanInternals.distributedServices.allMachines.${name} or [ ]; }
|
||||||
(
|
(
|
||||||
{
|
{
|
||||||
# Settings
|
# Settings
|
||||||
@@ -165,7 +168,6 @@ let
|
|||||||
in
|
in
|
||||||
{
|
{
|
||||||
imports = [
|
imports = [
|
||||||
./auto-imports.nix
|
|
||||||
# Merge the inventory file
|
# Merge the inventory file
|
||||||
{
|
{
|
||||||
inventory = _: {
|
inventory = _: {
|
||||||
@@ -199,7 +201,11 @@ in
|
|||||||
clanInternals = {
|
clanInternals = {
|
||||||
moduleSchemas = clan-core.lib.modules.getModulesSchema config.inventory.modules;
|
moduleSchemas = clan-core.lib.modules.getModulesSchema config.inventory.modules;
|
||||||
inherit inventoryClass;
|
inherit inventoryClass;
|
||||||
inherit (clan-core) clanModules;
|
distributedServices = import ../distributed-service/inventory-adapter.nix {
|
||||||
|
inherit lib inventory;
|
||||||
|
flake = config.self;
|
||||||
|
};
|
||||||
|
inherit (clan-core) clanModules clanLib;
|
||||||
inherit inventoryFile;
|
inherit inventoryFile;
|
||||||
inventoryValuesPrios =
|
inventoryValuesPrios =
|
||||||
# Temporary workaround
|
# Temporary workaround
|
||||||
@@ -211,9 +217,6 @@ in
|
|||||||
templates = config.templates;
|
templates = config.templates;
|
||||||
inventory = config.inventory;
|
inventory = config.inventory;
|
||||||
meta = config.inventory.meta;
|
meta = config.inventory.meta;
|
||||||
lib = {
|
|
||||||
inherit (clan-core.lib) select;
|
|
||||||
};
|
|
||||||
|
|
||||||
source = "${clan-core}";
|
source = "${clan-core}";
|
||||||
|
|
||||||
|
|||||||
@@ -1,25 +1,35 @@
|
|||||||
{
|
{
|
||||||
lib,
|
lib,
|
||||||
clan-core,
|
self,
|
||||||
nixpkgs,
|
nixpkgs,
|
||||||
...
|
...
|
||||||
}:
|
}:
|
||||||
let
|
# Produces the
|
||||||
|
# 'clanLib' attribute set
|
||||||
|
# Wrapped with fix, so we can depend on other clanLib functions without passing the whole flake
|
||||||
|
lib.fix (clanLib: {
|
||||||
|
# TODO:
|
||||||
|
# SSome bad lib functions that depend on something in 'self'.
|
||||||
|
# We should reduce the dependency on 'self' aka the 'flake' object
|
||||||
|
# This makes it easier to test
|
||||||
|
# most of the time passing the whole flake is unnecessary
|
||||||
|
callLib = file: args: import file { inherit lib clanLib; } // args;
|
||||||
|
|
||||||
evalClan = import ./eval-clan-modules {
|
evalClan = import ./eval-clan-modules {
|
||||||
inherit clan-core lib;
|
inherit lib;
|
||||||
|
clan-core = self;
|
||||||
pkgs = nixpkgs.legacyPackages.x86_64-linux;
|
pkgs = nixpkgs.legacyPackages.x86_64-linux;
|
||||||
};
|
};
|
||||||
in
|
buildClan = import ./build-clan {
|
||||||
{
|
inherit lib nixpkgs;
|
||||||
inherit (evalClan) evalClanModules evalClanModulesWithRoles;
|
clan-core = self;
|
||||||
buildClan = import ./build-clan { inherit lib nixpkgs clan-core; };
|
};
|
||||||
|
# ------------------------------------
|
||||||
|
# Lib functions that don't depend on 'self'
|
||||||
|
inventory = clanLib.callLib ./inventory { };
|
||||||
|
modules = clanLib.callLib ./frontmatter { };
|
||||||
facts = import ./facts.nix { inherit lib; };
|
facts = import ./facts.nix { inherit lib; };
|
||||||
inventory = import ./inventory { inherit lib clan-core; };
|
|
||||||
values = import ./values { inherit lib; };
|
values = import ./values { inherit lib; };
|
||||||
jsonschema = import ./jsonschema { inherit lib; };
|
jsonschema = import ./jsonschema { inherit lib; };
|
||||||
modules = import ./frontmatter {
|
|
||||||
inherit lib;
|
|
||||||
self = clan-core;
|
|
||||||
};
|
|
||||||
select = import ./select.nix;
|
select = import ./select.nix;
|
||||||
}
|
})
|
||||||
|
|||||||
33
lib/distributed-service/flake-module.nix
Normal file
33
lib/distributed-service/flake-module.nix
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
{ self, inputs, ... }:
|
||||||
|
let
|
||||||
|
inputOverrides = builtins.concatStringsSep " " (
|
||||||
|
builtins.map (input: " --override-input ${input} ${inputs.${input}}") (builtins.attrNames inputs)
|
||||||
|
);
|
||||||
|
in
|
||||||
|
{
|
||||||
|
perSystem =
|
||||||
|
{
|
||||||
|
pkgs,
|
||||||
|
lib,
|
||||||
|
system,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
{
|
||||||
|
# Run: nix-unit --extra-experimental-features flakes --flake .#legacyPackages.x86_64-linux.<attrName>
|
||||||
|
legacyPackages.evalTest-distributedServices = import ./tests {
|
||||||
|
inherit lib self;
|
||||||
|
};
|
||||||
|
|
||||||
|
checks = {
|
||||||
|
lib-distributedServices-eval = pkgs.runCommand "tests" { nativeBuildInputs = [ pkgs.nix-unit ]; } ''
|
||||||
|
export HOME="$(realpath .)"
|
||||||
|
nix-unit --eval-store "$HOME" \
|
||||||
|
--extra-experimental-features flakes \
|
||||||
|
${inputOverrides} \
|
||||||
|
--flake ${self}#legacyPackages.${system}.evalTest-distributedServices
|
||||||
|
|
||||||
|
touch $out
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
199
lib/distributed-service/inventory-adapter.nix
Normal file
199
lib/distributed-service/inventory-adapter.nix
Normal file
@@ -0,0 +1,199 @@
|
|||||||
|
# Adapter function between the inventory.instances and the clan.service module
|
||||||
|
#
|
||||||
|
# Data flow:
|
||||||
|
# - inventory.instances -> Adapter -> clan.service module -> Service Resources (i.e. NixosModules per Machine, Vars per Service, etc.)
|
||||||
|
#
|
||||||
|
# What this file does:
|
||||||
|
#
|
||||||
|
# - Resolves the [Module] to an actual module-path and imports it.
|
||||||
|
# - Groups together all the same modules into a single import and creates all instances for it.
|
||||||
|
# - Resolves the inventory tags into machines. Tags don't exist at the service level.
|
||||||
|
# Also combines the settings for 'machines' and 'tags'.
|
||||||
|
{
|
||||||
|
lib,
|
||||||
|
# This is used to resolve the module imports from 'flake.inputs'
|
||||||
|
flake,
|
||||||
|
# The clan inventory
|
||||||
|
inventory,
|
||||||
|
}:
|
||||||
|
let
|
||||||
|
# Returns the list of machine names
|
||||||
|
# { ... } -> [ string ]
|
||||||
|
resolveTags =
|
||||||
|
{
|
||||||
|
# Available InventoryMachines :: { {name} :: { tags = [ string ]; }; }
|
||||||
|
machines,
|
||||||
|
# Requested members :: { machines, tags }
|
||||||
|
# Those will be resolved against the available machines
|
||||||
|
members,
|
||||||
|
# Not needed for resolution - only for error reporting
|
||||||
|
roleName,
|
||||||
|
instanceName,
|
||||||
|
}:
|
||||||
|
{
|
||||||
|
machines =
|
||||||
|
members.machines or [ ]
|
||||||
|
++ (builtins.foldl' (
|
||||||
|
acc: tag:
|
||||||
|
let
|
||||||
|
# For error printing
|
||||||
|
availableTags = lib.foldlAttrs (
|
||||||
|
acc: _: v:
|
||||||
|
v.tags or [ ] ++ acc
|
||||||
|
) [ ] (machines);
|
||||||
|
|
||||||
|
tagMembers = builtins.attrNames (lib.filterAttrs (_n: v: builtins.elem tag v.tags or [ ]) machines);
|
||||||
|
in
|
||||||
|
if tagMembers == [ ] then
|
||||||
|
lib.warn ''
|
||||||
|
Service instance '${instanceName}': - ${roleName} tags: no machine with tag '${tag}' found.
|
||||||
|
Available tags: ${builtins.toJSON (lib.unique availableTags)}
|
||||||
|
'' acc
|
||||||
|
else
|
||||||
|
acc ++ tagMembers
|
||||||
|
) [ ] members.tags or [ ]);
|
||||||
|
};
|
||||||
|
|
||||||
|
machineHasTag = machineName: tagName: lib.elem tagName inventory.machines.${machineName}.tags;
|
||||||
|
|
||||||
|
# map the instances into the module
|
||||||
|
importedModuleWithInstances = lib.mapAttrs (
|
||||||
|
instanceName: instance:
|
||||||
|
let
|
||||||
|
# TODO:
|
||||||
|
resolvedModuleSet =
|
||||||
|
# If the module.name is self then take the modules defined in the flake
|
||||||
|
# Otherwise its an external input which provides the modules via 'clan.modules' attribute
|
||||||
|
if instance.module.input == null then
|
||||||
|
inventory.modules
|
||||||
|
else
|
||||||
|
let
|
||||||
|
input =
|
||||||
|
flake.inputs.${instance.module.input} or (throw ''
|
||||||
|
Flake doesn't provide input with name '${instance.module.input}'
|
||||||
|
|
||||||
|
Choose one of the following inputs:
|
||||||
|
- ${
|
||||||
|
builtins.concatStringsSep "\n- " (
|
||||||
|
lib.attrNames (lib.filterAttrs (_name: input: input ? clan) flake.inputs)
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
To import a local module from 'inventory.modules' remove the 'input' attribute from the module definition
|
||||||
|
Remove the following line from the module definition:
|
||||||
|
|
||||||
|
...
|
||||||
|
- module.input = "${instance.module.input}"
|
||||||
|
|
||||||
|
|
||||||
|
'');
|
||||||
|
clanAttrs =
|
||||||
|
input.clan
|
||||||
|
or (throw "It seems the flake input ${instance.module.input} doesn't export any clan resources");
|
||||||
|
in
|
||||||
|
clanAttrs.modules;
|
||||||
|
|
||||||
|
resolvedModule =
|
||||||
|
resolvedModuleSet.${instance.module.name}
|
||||||
|
or (throw "flake doesn't provide clan-module with name ${instance.module.name}");
|
||||||
|
|
||||||
|
# Every instance includes machines via roles
|
||||||
|
# :: { client :: ... }
|
||||||
|
instanceRoles = lib.mapAttrs (
|
||||||
|
roleName: role:
|
||||||
|
let
|
||||||
|
resolvedMachines = resolveTags {
|
||||||
|
members = {
|
||||||
|
# Explicit members
|
||||||
|
machines = lib.attrNames role.machines;
|
||||||
|
# Resolved Members
|
||||||
|
tags = lib.attrNames role.tags;
|
||||||
|
};
|
||||||
|
inherit (inventory) machines;
|
||||||
|
inherit instanceName roleName;
|
||||||
|
};
|
||||||
|
in
|
||||||
|
# instances.<instanceName>.roles.<roleName> =
|
||||||
|
{
|
||||||
|
machines = lib.genAttrs resolvedMachines.machines (
|
||||||
|
machineName:
|
||||||
|
let
|
||||||
|
machineSettings = instance.roles.${roleName}.machines.${machineName}.settings or { };
|
||||||
|
settingsViaTags = lib.filterAttrs (
|
||||||
|
tagName: _: machineHasTag machineName tagName
|
||||||
|
) instance.roles.${roleName}.tags;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
# TODO: Do we want to wrap settings with
|
||||||
|
# setDefaultModuleLocation "inventory.instances.${instanceName}.roles.${roleName}.tags.${tagName}";
|
||||||
|
settings = {
|
||||||
|
imports = [
|
||||||
|
machineSettings
|
||||||
|
] ++ lib.attrValues (lib.mapAttrs (_tagName: v: v.settings) settingsViaTags);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
);
|
||||||
|
# Maps to settings for the role.
|
||||||
|
# In other words this sets the following path of a clan.service module:
|
||||||
|
# instances.<instanceName>.roles.<roleName>.settings
|
||||||
|
settings = role.settings;
|
||||||
|
}
|
||||||
|
) instance.roles;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
inherit (instance) module;
|
||||||
|
inherit resolvedModule instanceRoles;
|
||||||
|
}
|
||||||
|
) inventory.instances;
|
||||||
|
|
||||||
|
# TODO: Eagerly check the _class of the resolved module
|
||||||
|
evals = lib.mapAttrs (
|
||||||
|
_module_ident: instances:
|
||||||
|
(lib.evalModules {
|
||||||
|
class = "clan.service";
|
||||||
|
modules =
|
||||||
|
[
|
||||||
|
./service-module.nix
|
||||||
|
# Import the resolved module
|
||||||
|
(builtins.head instances).instance.resolvedModule
|
||||||
|
]
|
||||||
|
# Include all the instances that correlate to the resolved module
|
||||||
|
++ (builtins.map (v: {
|
||||||
|
instances.${v.instanceName}.roles = v.instance.instanceRoles;
|
||||||
|
}) instances);
|
||||||
|
})
|
||||||
|
) grouped;
|
||||||
|
|
||||||
|
# Group the instances by the module they resolve to
|
||||||
|
# This is necessary to evaluate the module in a single pass
|
||||||
|
# :: { <module.input>_<module.name> :: [ { name, value } ] }
|
||||||
|
# Since 'perMachine' needs access to all the instances we should include them as a whole
|
||||||
|
grouped = lib.foldlAttrs (
|
||||||
|
acc: instanceName: instance:
|
||||||
|
let
|
||||||
|
inputName = if instance.module.input == null then "self" else instance.module.input;
|
||||||
|
id = inputName + "-" + instance.module.name;
|
||||||
|
in
|
||||||
|
acc
|
||||||
|
// {
|
||||||
|
${id} = acc.${id} or [ ] ++ [
|
||||||
|
{
|
||||||
|
inherit instanceName instance;
|
||||||
|
}
|
||||||
|
];
|
||||||
|
}
|
||||||
|
) { } importedModuleWithInstances;
|
||||||
|
|
||||||
|
# TODO: Return an attribute set of resources instead of a plain list of nixosModules
|
||||||
|
allMachines = lib.foldlAttrs (
|
||||||
|
acc: _name: eval:
|
||||||
|
acc
|
||||||
|
// lib.mapAttrs (
|
||||||
|
machineName: result: acc.${machineName} or [ ] ++ [ result.nixosModule ]
|
||||||
|
) eval.config.result.final
|
||||||
|
) { } evals;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
inherit importedModuleWithInstances grouped;
|
||||||
|
inherit evals allMachines;
|
||||||
|
}
|
||||||
514
lib/distributed-service/service-module.nix
Normal file
514
lib/distributed-service/service-module.nix
Normal file
@@ -0,0 +1,514 @@
|
|||||||
|
{ lib, config, ... }:
|
||||||
|
let
|
||||||
|
inherit (lib) mkOption types;
|
||||||
|
inherit (types) attrsWith submoduleWith;
|
||||||
|
|
||||||
|
# TODO:
|
||||||
|
# Remove once this gets merged upstream; performs in O(n*log(n) instead of O(n^2))
|
||||||
|
# https://github.com/NixOS/nixpkgs/pull/355616/files
|
||||||
|
uniqueStrings = list: builtins.attrNames (builtins.groupBy lib.id list);
|
||||||
|
|
||||||
|
checkInstanceRoles =
|
||||||
|
instanceName: instanceRoles:
|
||||||
|
let
|
||||||
|
unmatchedRoles = lib.filter (roleName: !lib.elem roleName (lib.attrNames config.roles)) (
|
||||||
|
lib.attrNames instanceRoles
|
||||||
|
);
|
||||||
|
in
|
||||||
|
if unmatchedRoles == [ ] then
|
||||||
|
true
|
||||||
|
else
|
||||||
|
throw ''
|
||||||
|
inventory instance: 'instances.${instanceName}' defines the following roles:
|
||||||
|
${builtins.toJSON unmatchedRoles}
|
||||||
|
|
||||||
|
But the clan-service module '${config.manifest.name}' defines roles:
|
||||||
|
${builtins.toJSON (lib.attrNames config.roles)}
|
||||||
|
'';
|
||||||
|
|
||||||
|
# checkInstanceSettings =
|
||||||
|
# instanceName: instanceSettings:
|
||||||
|
# let
|
||||||
|
# unmatchedRoles = 1;
|
||||||
|
# in
|
||||||
|
# unmatchedRoles;
|
||||||
|
|
||||||
|
/**
|
||||||
|
Merges the role- and machine-settings using the role interface
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
|
||||||
|
- roleName: The name of the role
|
||||||
|
- instanceName: The name of the instance
|
||||||
|
- settings: The settings of the machine. Leave empty to get the role settings
|
||||||
|
|
||||||
|
Returns: evalModules result
|
||||||
|
|
||||||
|
The caller is responsible to use .config or .extendModules
|
||||||
|
*/
|
||||||
|
# TODO: evaluate against the role.settings statically and use extendModules to get the machineSettings
|
||||||
|
# Doing this might improve performance
|
||||||
|
evalMachineSettings =
|
||||||
|
{
|
||||||
|
roleName,
|
||||||
|
instanceName,
|
||||||
|
machineName ? null,
|
||||||
|
settings,
|
||||||
|
}:
|
||||||
|
lib.evalModules {
|
||||||
|
# Prefix for better error reporting
|
||||||
|
# This prints the path where the option should be defined rather than the plain path within settings
|
||||||
|
# "The option `instances.foo.roles.server.machines.test.settings.<>' was accessed but has no value defined. Try setting the option."
|
||||||
|
prefix =
|
||||||
|
[
|
||||||
|
"instances"
|
||||||
|
instanceName
|
||||||
|
"roles"
|
||||||
|
roleName
|
||||||
|
]
|
||||||
|
++ (lib.optionals (machineName != null) [
|
||||||
|
"machines"
|
||||||
|
machineName
|
||||||
|
])
|
||||||
|
++ [ "settings" ];
|
||||||
|
|
||||||
|
# This may lead to better error reporting
|
||||||
|
# And catch errors if anyone tried to import i.e. a nixosConfiguration
|
||||||
|
# Set some class: i.e "network.server.settings"
|
||||||
|
class = lib.concatStringsSep "." [
|
||||||
|
config.manifest.name
|
||||||
|
roleName
|
||||||
|
"settings"
|
||||||
|
];
|
||||||
|
|
||||||
|
modules = [
|
||||||
|
(lib.setDefaultModuleLocation "Via clan.service module: roles.${roleName}.interface"
|
||||||
|
config.roles.${roleName}.interface
|
||||||
|
)
|
||||||
|
(lib.setDefaultModuleLocation "inventory.instances.${instanceName}.roles.${roleName}.settings"
|
||||||
|
config.instances.${instanceName}.roles.${roleName}.settings
|
||||||
|
)
|
||||||
|
settings
|
||||||
|
# Dont set the module location here
|
||||||
|
# This should already be set by the tags resolver
|
||||||
|
# config.instances.${instanceName}.roles.${roleName}.machines.${machineName}.settings
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
Makes a module extensible
|
||||||
|
returning its config
|
||||||
|
and making it extensible via '__functor' polymorphism
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```nix-repl
|
||||||
|
res = makeExtensibleConfig (evalModules { options.foo = mkOption { default = 42; };)
|
||||||
|
res
|
||||||
|
=>
|
||||||
|
{
|
||||||
|
foo = 42;
|
||||||
|
_functor = <function>;
|
||||||
|
}
|
||||||
|
|
||||||
|
# This allows to override using mkDefault, mkForce, etc.
|
||||||
|
res { foo = 100; }
|
||||||
|
=>
|
||||||
|
{
|
||||||
|
foo = 100;
|
||||||
|
_functor = <function>;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
*/
|
||||||
|
makeExtensibleConfig =
|
||||||
|
f: args:
|
||||||
|
let
|
||||||
|
makeModuleExtensible =
|
||||||
|
eval:
|
||||||
|
eval.config
|
||||||
|
// {
|
||||||
|
__functor = _self: m: makeModuleExtensible (eval.extendModules { modules = lib.toList m; });
|
||||||
|
};
|
||||||
|
in
|
||||||
|
makeModuleExtensible (f args);
|
||||||
|
|
||||||
|
/**
|
||||||
|
Apply the settings to the instance
|
||||||
|
|
||||||
|
Takes a [ServiceInstance] :: { roles :: { roleName :: { machines :: { machineName :: { settings :: { ... } } } } } }
|
||||||
|
Returns the same object but evaluates the settings against the interface.
|
||||||
|
|
||||||
|
We need this because 'perMachine' shouldn't gain access the raw deferred module.
|
||||||
|
*/
|
||||||
|
applySettings =
|
||||||
|
instanceName: instance:
|
||||||
|
lib.mapAttrs (roleName: role: {
|
||||||
|
machines = lib.mapAttrs (machineName: v: {
|
||||||
|
# TODO: evaluate the settings against the interface
|
||||||
|
# settings = (evalMachineSettings { inherit roleName instanceName; inherit (v) settings; }).config;
|
||||||
|
settings = (
|
||||||
|
makeExtensibleConfig evalMachineSettings {
|
||||||
|
inherit roleName instanceName machineName;
|
||||||
|
inherit (v) settings;
|
||||||
|
}
|
||||||
|
);
|
||||||
|
}) role.machines;
|
||||||
|
# TODO: evaluate the settings against the interface
|
||||||
|
settings = (
|
||||||
|
makeExtensibleConfig evalMachineSettings {
|
||||||
|
inherit roleName instanceName;
|
||||||
|
inherit (role) settings;
|
||||||
|
}
|
||||||
|
);
|
||||||
|
}) instance.roles;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
options = {
|
||||||
|
instances = mkOption {
|
||||||
|
default = throw ''
|
||||||
|
The clan service module ${config.manifest.name} doesn't define any instances.
|
||||||
|
|
||||||
|
Did you forget to create instances via 'inventory.instances' ?
|
||||||
|
'';
|
||||||
|
|
||||||
|
type = attrsWith {
|
||||||
|
placeholder = "instanceName";
|
||||||
|
elemType = submoduleWith {
|
||||||
|
modules = [
|
||||||
|
(
|
||||||
|
{ name, ... }:
|
||||||
|
{
|
||||||
|
# options.settings = mkOption {
|
||||||
|
# description = "settings of 'instance': ${name}";
|
||||||
|
# default = {};
|
||||||
|
# apply = v: lib.seq (checkInstanceSettings name v) v;
|
||||||
|
# };
|
||||||
|
options.roles = mkOption {
|
||||||
|
default = throw ''
|
||||||
|
Instance '${name}' of service '${config.manifest.name}' mut define members via 'roles'.
|
||||||
|
|
||||||
|
To include a machine:
|
||||||
|
'instances.${name}.roles.<role-name>.machines.<your-machine-name>' must be set.
|
||||||
|
'';
|
||||||
|
type = attrsWith {
|
||||||
|
placeholder = "roleName";
|
||||||
|
elemType = submoduleWith {
|
||||||
|
modules = [
|
||||||
|
(
|
||||||
|
{ ... }:
|
||||||
|
{
|
||||||
|
# instances.{instanceName}.roles.{roleName}.machines
|
||||||
|
options.machines = mkOption {
|
||||||
|
type = attrsWith {
|
||||||
|
placeholder = "machineName";
|
||||||
|
elemType = submoduleWith {
|
||||||
|
modules = [
|
||||||
|
(m: {
|
||||||
|
options.settings = mkOption {
|
||||||
|
type = types.raw;
|
||||||
|
description = "Settings of '${name}-machine': ${m.name}.";
|
||||||
|
default = { };
|
||||||
|
};
|
||||||
|
})
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
# instances.{instanceName}.roles.{roleName}.settings
|
||||||
|
# options._settings = mkOption { };
|
||||||
|
# options._settingsViaTags = mkOption { };
|
||||||
|
# A deferred module that combines _settingsViaTags with _settings
|
||||||
|
options.settings = mkOption {
|
||||||
|
type = types.raw;
|
||||||
|
description = "Settings of 'role': ${name}";
|
||||||
|
default = { };
|
||||||
|
};
|
||||||
|
}
|
||||||
|
)
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
apply = v: lib.seq (checkInstanceRoles name v) v;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
)
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
manifest = mkOption {
|
||||||
|
description = "Meta information about this module itself";
|
||||||
|
type = submoduleWith {
|
||||||
|
modules = [
|
||||||
|
{
|
||||||
|
options = {
|
||||||
|
name = mkOption {
|
||||||
|
description = ''
|
||||||
|
The name of the module
|
||||||
|
|
||||||
|
Mainly used to create an error context while evaluating.
|
||||||
|
This helps backtracking which module was included; And where an error came from originally.
|
||||||
|
'';
|
||||||
|
type = types.str;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
roles = mkOption {
|
||||||
|
default = throw ''
|
||||||
|
Role behavior of service '${config.manifest.name}' must be defined.
|
||||||
|
A 'clan.service' module should always define its behavior via 'roles'
|
||||||
|
---
|
||||||
|
To add the role:
|
||||||
|
`roles.client = {}`
|
||||||
|
|
||||||
|
To define multiple instance behavior:
|
||||||
|
`roles.client.perInstance = { ... }: {}`
|
||||||
|
'';
|
||||||
|
type = attrsWith {
|
||||||
|
placeholder = "roleName";
|
||||||
|
elemType = submoduleWith {
|
||||||
|
modules = [
|
||||||
|
(
|
||||||
|
{ name, ... }:
|
||||||
|
let
|
||||||
|
roleName = name;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
options.interface = mkOption {
|
||||||
|
type = types.deferredModule;
|
||||||
|
# TODO: Default to an empty module
|
||||||
|
# need to test that an the empty module can be evaluated to empty settings
|
||||||
|
default = { };
|
||||||
|
};
|
||||||
|
options.perInstance = mkOption {
|
||||||
|
type = types.deferredModuleWith {
|
||||||
|
staticModules = [
|
||||||
|
# Common output format
|
||||||
|
# As described by adr
|
||||||
|
# { nixosModule, services, ... }
|
||||||
|
(
|
||||||
|
{ ... }:
|
||||||
|
{
|
||||||
|
options.nixosModule = mkOption { default = { }; };
|
||||||
|
options.services = mkOption {
|
||||||
|
type = attrsWith {
|
||||||
|
placeholder = "serviceName";
|
||||||
|
elemType = submoduleWith {
|
||||||
|
modules = [ ./service-module.nix ];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
default = { };
|
||||||
|
};
|
||||||
|
}
|
||||||
|
)
|
||||||
|
];
|
||||||
|
};
|
||||||
|
default = { };
|
||||||
|
apply =
|
||||||
|
/**
|
||||||
|
This apply transforms the module into a function that takes arguments and returns an evaluated module
|
||||||
|
The arguments of the function are determined by its scope:
|
||||||
|
-> 'perInstance' maps over all instances and over all machines hence it takes 'instanceName' and 'machineName' as iterator arguments
|
||||||
|
*/
|
||||||
|
v: instanceName: machineName:
|
||||||
|
(lib.evalModules {
|
||||||
|
specialArgs = {
|
||||||
|
inherit instanceName;
|
||||||
|
machine = {
|
||||||
|
name = machineName;
|
||||||
|
roles = applySettings instanceName config.instances.${instanceName};
|
||||||
|
};
|
||||||
|
settings = (
|
||||||
|
makeExtensibleConfig evalMachineSettings {
|
||||||
|
inherit roleName instanceName machineName;
|
||||||
|
settings =
|
||||||
|
config.instances.${instanceName}.roles.${roleName}.machines.${machineName}.settings or { };
|
||||||
|
}
|
||||||
|
);
|
||||||
|
};
|
||||||
|
modules = [ v ];
|
||||||
|
}).config;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
)
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
perMachine = mkOption {
|
||||||
|
type = types.deferredModuleWith {
|
||||||
|
staticModules = [
|
||||||
|
# Common output format
|
||||||
|
# As described by adr
|
||||||
|
# { nixosModule, services, ... }
|
||||||
|
(
|
||||||
|
{ ... }:
|
||||||
|
{
|
||||||
|
options.nixosModule = mkOption { default = { }; };
|
||||||
|
options.services = mkOption {
|
||||||
|
type = attrsWith {
|
||||||
|
placeholder = "serviceName";
|
||||||
|
elemType = submoduleWith {
|
||||||
|
modules = [ ./service-module.nix ];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
default = { };
|
||||||
|
};
|
||||||
|
}
|
||||||
|
)
|
||||||
|
];
|
||||||
|
};
|
||||||
|
default = { };
|
||||||
|
apply =
|
||||||
|
v: machineName: machineScope:
|
||||||
|
(lib.evalModules {
|
||||||
|
specialArgs = {
|
||||||
|
/**
|
||||||
|
This apply transforms the module into a function that takes arguments and returns an evaluated module
|
||||||
|
The arguments of the function are determined by its scope:
|
||||||
|
-> 'perMachine' maps over all machines of a service 'machineName' and a helper 'scope' (some aggregated attributes) as iterator arguments
|
||||||
|
The 'scope' attribute is used to collect the 'roles' of all 'instances' where the machine is part of and inject both into the specialArgs
|
||||||
|
*/
|
||||||
|
machine = {
|
||||||
|
name = machineName;
|
||||||
|
roles =
|
||||||
|
let
|
||||||
|
collectRoles =
|
||||||
|
instances:
|
||||||
|
lib.foldlAttrs (
|
||||||
|
r: _instanceName: instance:
|
||||||
|
r
|
||||||
|
++ lib.foldlAttrs (
|
||||||
|
r2: roleName: _role:
|
||||||
|
r2 ++ [ roleName ]
|
||||||
|
) [ ] instance.roles
|
||||||
|
) [ ] instances;
|
||||||
|
in
|
||||||
|
uniqueStrings (collectRoles machineScope.instances);
|
||||||
|
};
|
||||||
|
inherit (machineScope) instances;
|
||||||
|
|
||||||
|
# There are no machine settings.
|
||||||
|
# Settings are always role specific, having settings that apply to a machine globally would mean to merge all role and all instance settings into a single module.
|
||||||
|
# But that will likely cause conflicts because it is inherently wrong.
|
||||||
|
settings = throw ''
|
||||||
|
'perMachine' doesn't have a 'settings' argument.
|
||||||
|
|
||||||
|
Alternatives:
|
||||||
|
- 'instances.<instanceName>.roles.<roleName>.settings' should be used instead.
|
||||||
|
- 'instances.<instanceName>.roles.<roleName>.machines.<machineName>.settings' should be used instead.
|
||||||
|
|
||||||
|
If that is insufficient, you might also consider using 'roles.<roleName>.perInstance' instead of 'perMachine'.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
modules = [ v ];
|
||||||
|
}).config;
|
||||||
|
};
|
||||||
|
# ---
|
||||||
|
# Place the result in _module.result to mark them as "internal" and discourage usage/overrides
|
||||||
|
#
|
||||||
|
# ---
|
||||||
|
# Intermediate result by mapping over the 'roles', 'instances', and 'machines'.
|
||||||
|
# During this step the 'perMachine' and 'perInstance' are applied.
|
||||||
|
# The result-set for a single machine can then be found by collecting all 'nixosModules' recursively.
|
||||||
|
result.allRoles = mkOption {
|
||||||
|
readOnly = true;
|
||||||
|
default = lib.mapAttrs (roleName: roleCfg: {
|
||||||
|
allInstances = lib.mapAttrs (instanceName: instanceCfg: {
|
||||||
|
allMachines = lib.mapAttrs (
|
||||||
|
machineName: _machineCfg: roleCfg.perInstance instanceName machineName
|
||||||
|
) instanceCfg.roles.${roleName}.machines or { };
|
||||||
|
}) config.instances;
|
||||||
|
}) config.roles;
|
||||||
|
};
|
||||||
|
|
||||||
|
result.allMachines = mkOption {
|
||||||
|
readOnly = true;
|
||||||
|
default =
|
||||||
|
let
|
||||||
|
collectMachinesFromInstance =
|
||||||
|
instance:
|
||||||
|
uniqueStrings (
|
||||||
|
lib.foldlAttrs (
|
||||||
|
acc: _roleName: role:
|
||||||
|
acc ++ (lib.attrNames role.machines)
|
||||||
|
) [ ] instance.roles
|
||||||
|
);
|
||||||
|
# The service machines are defined by collecting all instance machines
|
||||||
|
serviceMachines = lib.foldlAttrs (
|
||||||
|
acc: instanceName: instance:
|
||||||
|
acc
|
||||||
|
// lib.genAttrs (collectMachinesFromInstance instance) (machineName:
|
||||||
|
# Store information why this machine is part of the service
|
||||||
|
# MachineOrigin :: { instances :: [ string ]; }
|
||||||
|
{
|
||||||
|
# Helper attribute to
|
||||||
|
instances = [ instanceName ] ++ acc.${machineName}.instances or [ ];
|
||||||
|
# All roles of the machine ?
|
||||||
|
roles = lib.foldlAttrs (
|
||||||
|
acc2: roleName: role:
|
||||||
|
if builtins.elem machineName (lib.attrNames role.machines) then acc2 ++ [ roleName ] else acc2
|
||||||
|
) [ ] instance.roles;
|
||||||
|
})
|
||||||
|
) { } config.instances;
|
||||||
|
|
||||||
|
allMachines = lib.mapAttrs (_machineName: MachineOrigin: {
|
||||||
|
# Filter out instances of which the machine is not part of
|
||||||
|
instances = lib.mapAttrs (_n: v: { roles = v; }) (
|
||||||
|
lib.filterAttrs (instanceName: _: builtins.elem instanceName MachineOrigin.instances) (
|
||||||
|
# Instances with evaluated settings
|
||||||
|
lib.mapAttrs applySettings config.instances
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}) serviceMachines;
|
||||||
|
in
|
||||||
|
# allMachines;
|
||||||
|
lib.mapAttrs config.perMachine allMachines;
|
||||||
|
};
|
||||||
|
|
||||||
|
result.final = mkOption {
|
||||||
|
readOnly = true;
|
||||||
|
default = lib.mapAttrs (
|
||||||
|
machineName: machineResult:
|
||||||
|
let
|
||||||
|
# config.result.allRoles.client.allInstances.bar.allMachines.test
|
||||||
|
# instanceResults = config.result.allRoles.client.allInstances.bar.allMachines.${machineName};
|
||||||
|
instanceResults = lib.foldlAttrs (
|
||||||
|
acc: roleName: role:
|
||||||
|
acc
|
||||||
|
++ lib.foldlAttrs (
|
||||||
|
acc: instanceName: instance:
|
||||||
|
if instance.allMachines.${machineName}.nixosModule or { } != { } then
|
||||||
|
acc
|
||||||
|
++ [
|
||||||
|
(lib.setDefaultModuleLocation
|
||||||
|
"Via instances.${instanceName}.roles.${roleName}.machines.${machineName}"
|
||||||
|
instance.allMachines.${machineName}.nixosModule
|
||||||
|
)
|
||||||
|
]
|
||||||
|
else
|
||||||
|
acc
|
||||||
|
) [ ] role.allInstances
|
||||||
|
) [ ] config.result.allRoles;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
inherit instanceResults;
|
||||||
|
nixosModule = {
|
||||||
|
imports = [
|
||||||
|
# For error backtracing. This module was produced by the 'perMachine' function
|
||||||
|
# TODO: check if we need this or if it leads to better errors if we pass the underlying module locations
|
||||||
|
(lib.setDefaultModuleLocation "clan.service: ${config.manifest.name} - via perMachine" machineResult.nixosModule)
|
||||||
|
] ++ instanceResults;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
) config.result.allMachines;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
327
lib/distributed-service/tests/default.nix
Normal file
327
lib/distributed-service/tests/default.nix
Normal file
@@ -0,0 +1,327 @@
|
|||||||
|
{
|
||||||
|
lib,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
let
|
||||||
|
inherit (lib)
|
||||||
|
evalModules
|
||||||
|
;
|
||||||
|
|
||||||
|
evalInventory =
|
||||||
|
m:
|
||||||
|
(evalModules {
|
||||||
|
# Static modules
|
||||||
|
modules = [
|
||||||
|
../../inventory/build-inventory/interface.nix
|
||||||
|
{
|
||||||
|
modules.test = { };
|
||||||
|
}
|
||||||
|
m
|
||||||
|
];
|
||||||
|
}).config;
|
||||||
|
|
||||||
|
flakeFixture = {
|
||||||
|
inputs = { };
|
||||||
|
};
|
||||||
|
|
||||||
|
callInventoryAdapter =
|
||||||
|
inventoryModule:
|
||||||
|
import ../inventory-adapter.nix {
|
||||||
|
inherit lib;
|
||||||
|
flake = flakeFixture;
|
||||||
|
inventory = evalInventory inventoryModule;
|
||||||
|
};
|
||||||
|
in
|
||||||
|
{
|
||||||
|
test_simple =
|
||||||
|
let
|
||||||
|
res = callInventoryAdapter {
|
||||||
|
# Authored module
|
||||||
|
# A minimal module looks like this
|
||||||
|
# It isn't exactly doing anything but it's a valid module that produces an output
|
||||||
|
modules."simple-module" = {
|
||||||
|
_class = "clan.service";
|
||||||
|
manifest = {
|
||||||
|
name = "netwitness";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
# User config
|
||||||
|
instances."instance_foo" = {
|
||||||
|
module = {
|
||||||
|
name = "simple-module";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
in
|
||||||
|
{
|
||||||
|
# Test that the module is mapped into the output
|
||||||
|
# We might change the attribute name in the future
|
||||||
|
expr = res.evals ? "self-simple-module";
|
||||||
|
expected = true;
|
||||||
|
};
|
||||||
|
|
||||||
|
# A module can be imported multiple times
|
||||||
|
# A module can also have multiple instances within the same module
|
||||||
|
# This mean modules must be grouped together, imported once
|
||||||
|
# All instances should be included within one evaluation to make all of them available
|
||||||
|
test_module_grouping =
|
||||||
|
let
|
||||||
|
res = callInventoryAdapter {
|
||||||
|
# Authored module
|
||||||
|
# A minimal module looks like this
|
||||||
|
# It isn't exactly doing anything but it's a valid module that produces an output
|
||||||
|
modules."A" = {
|
||||||
|
_class = "clan.service";
|
||||||
|
manifest = {
|
||||||
|
name = "A-name";
|
||||||
|
};
|
||||||
|
|
||||||
|
perMachine = { }: { };
|
||||||
|
};
|
||||||
|
modules."B" = {
|
||||||
|
_class = "clan.service";
|
||||||
|
manifest = {
|
||||||
|
name = "B-name";
|
||||||
|
};
|
||||||
|
|
||||||
|
perMachine = { }: { };
|
||||||
|
};
|
||||||
|
# User config
|
||||||
|
instances."instance_foo" = {
|
||||||
|
module = {
|
||||||
|
name = "A";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
instances."instance_bar" = {
|
||||||
|
module = {
|
||||||
|
name = "B";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
instances."instance_baz" = {
|
||||||
|
module = {
|
||||||
|
name = "A";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
in
|
||||||
|
{
|
||||||
|
# Test that the module is mapped into the output
|
||||||
|
# We might change the attribute name in the future
|
||||||
|
expr = lib.mapAttrs (_n: v: builtins.length v) res.grouped;
|
||||||
|
expected = {
|
||||||
|
self-A = 2;
|
||||||
|
self-B = 1;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
test_creates_all_instances =
|
||||||
|
let
|
||||||
|
res = callInventoryAdapter {
|
||||||
|
# Authored module
|
||||||
|
# A minimal module looks like this
|
||||||
|
# It isn't exactly doing anything but it's a valid module that produces an output
|
||||||
|
modules."A" = {
|
||||||
|
_class = "clan.service";
|
||||||
|
manifest = {
|
||||||
|
name = "network";
|
||||||
|
};
|
||||||
|
|
||||||
|
perMachine = { }: { };
|
||||||
|
};
|
||||||
|
instances."instance_foo" = {
|
||||||
|
module = {
|
||||||
|
name = "A";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
instances."instance_bar" = {
|
||||||
|
module = {
|
||||||
|
name = "A";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
instances."instance_zaza" = {
|
||||||
|
module = {
|
||||||
|
name = "B";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
in
|
||||||
|
{
|
||||||
|
# Test that the module is mapped into the output
|
||||||
|
# We might change the attribute name in the future
|
||||||
|
expr = lib.attrNames res.evals.self-A.config.instances;
|
||||||
|
expected = [
|
||||||
|
"instance_bar"
|
||||||
|
"instance_foo"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
# Membership via roles
|
||||||
|
test_add_machines_directly =
|
||||||
|
let
|
||||||
|
res = callInventoryAdapter {
|
||||||
|
# Authored module
|
||||||
|
# A minimal module looks like this
|
||||||
|
# It isn't exactly doing anything but it's a valid module that produces an output
|
||||||
|
modules."A" = {
|
||||||
|
_class = "clan.service";
|
||||||
|
manifest = {
|
||||||
|
name = "network";
|
||||||
|
};
|
||||||
|
# Define a role without special behavior
|
||||||
|
roles.peer = { };
|
||||||
|
|
||||||
|
# perMachine = {}: {};
|
||||||
|
};
|
||||||
|
machines = {
|
||||||
|
jon = { };
|
||||||
|
sara = { };
|
||||||
|
hxi = { };
|
||||||
|
};
|
||||||
|
instances."instance_foo" = {
|
||||||
|
module = {
|
||||||
|
name = "A";
|
||||||
|
};
|
||||||
|
roles.peer.machines.jon = { };
|
||||||
|
};
|
||||||
|
instances."instance_bar" = {
|
||||||
|
module = {
|
||||||
|
name = "A";
|
||||||
|
};
|
||||||
|
roles.peer.machines.sara = { };
|
||||||
|
};
|
||||||
|
instances."instance_zaza" = {
|
||||||
|
module = {
|
||||||
|
name = "B";
|
||||||
|
};
|
||||||
|
roles.peer.tags.all = { };
|
||||||
|
};
|
||||||
|
};
|
||||||
|
in
|
||||||
|
{
|
||||||
|
# Test that the module is mapped into the output
|
||||||
|
# We might change the attribute name in the future
|
||||||
|
expr = lib.attrNames res.evals.self-A.config.result.allMachines;
|
||||||
|
expected = [
|
||||||
|
"jon"
|
||||||
|
"sara"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
# Membership via tags
|
||||||
|
test_add_machines_via_tags =
|
||||||
|
let
|
||||||
|
res = callInventoryAdapter {
|
||||||
|
# Authored module
|
||||||
|
# A minimal module looks like this
|
||||||
|
# It isn't exactly doing anything but it's a valid module that produces an output
|
||||||
|
modules."A" = {
|
||||||
|
_class = "clan.service";
|
||||||
|
manifest = {
|
||||||
|
name = "network";
|
||||||
|
};
|
||||||
|
# Define a role without special behavior
|
||||||
|
roles.peer = { };
|
||||||
|
|
||||||
|
# perMachine = {}: {};
|
||||||
|
};
|
||||||
|
machines = {
|
||||||
|
jon = {
|
||||||
|
tags = [ "foo" ];
|
||||||
|
};
|
||||||
|
sara = {
|
||||||
|
tags = [ "foo" ];
|
||||||
|
};
|
||||||
|
hxi = { };
|
||||||
|
};
|
||||||
|
instances."instance_foo" = {
|
||||||
|
module = {
|
||||||
|
name = "A";
|
||||||
|
};
|
||||||
|
roles.peer.tags.foo = { };
|
||||||
|
};
|
||||||
|
instances."instance_zaza" = {
|
||||||
|
module = {
|
||||||
|
name = "B";
|
||||||
|
};
|
||||||
|
roles.peer.tags.all = { };
|
||||||
|
};
|
||||||
|
};
|
||||||
|
in
|
||||||
|
{
|
||||||
|
# Test that the module is mapped into the output
|
||||||
|
# We might change the attribute name in the future
|
||||||
|
expr = lib.attrNames res.evals.self-A.config.result.allMachines;
|
||||||
|
expected = [
|
||||||
|
"jon"
|
||||||
|
"sara"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
per_machine_args = import ./per_machine_args.nix { inherit lib callInventoryAdapter; };
|
||||||
|
# test_per_machine_receives_instances =
|
||||||
|
# let
|
||||||
|
# res = callInventoryAdapter {
|
||||||
|
# # Authored module
|
||||||
|
# # A minimal module looks like this
|
||||||
|
# # It isn't exactly doing anything but it's a valid module that produces an output
|
||||||
|
# modules."A" = {
|
||||||
|
# _class = "clan.service";
|
||||||
|
# manifest = {
|
||||||
|
# name = "network";
|
||||||
|
# };
|
||||||
|
# # Define a role without special behavior
|
||||||
|
# roles.peer = { };
|
||||||
|
|
||||||
|
# perMachine =
|
||||||
|
# { instances, ... }:
|
||||||
|
# {
|
||||||
|
# nixosModule = instances;
|
||||||
|
# };
|
||||||
|
# };
|
||||||
|
# machines = {
|
||||||
|
# jon = { };
|
||||||
|
# sara = { };
|
||||||
|
# };
|
||||||
|
# instances."instance_foo" = {
|
||||||
|
# module = {
|
||||||
|
# name = "A";
|
||||||
|
# };
|
||||||
|
# roles.peer.machines.jon = { };
|
||||||
|
# };
|
||||||
|
# instances."instance_bar" = {
|
||||||
|
# module = {
|
||||||
|
# name = "A";
|
||||||
|
# };
|
||||||
|
# roles.peer.machines.sara = { };
|
||||||
|
# };
|
||||||
|
# instances."instance_zaza" = {
|
||||||
|
# module = {
|
||||||
|
# name = "B";
|
||||||
|
# };
|
||||||
|
# roles.peer.tags.all = { };
|
||||||
|
# };
|
||||||
|
# };
|
||||||
|
# in
|
||||||
|
# {
|
||||||
|
# expr = {
|
||||||
|
# hasMachineSettings =
|
||||||
|
# res.evals.self-A.config.result.allMachines.jon.nixosModule. # { {instanceName} :: { roles :: { {roleName} :: { machines :: { {machineName} :: { settings :: {} } } } } } }
|
||||||
|
# instance_foo.roles.peer.machines.jon ? settings;
|
||||||
|
# machineSettingsEmpty =
|
||||||
|
# lib.filterAttrs (n: _v: n != "__functor" ) res.evals.self-A.config.result.allMachines.jon.nixosModule. # { {instanceName} :: { roles :: { {roleName} :: { machines :: { {machineName} :: { settings :: {} } } } } } }
|
||||||
|
# instance_foo.roles.peer.machines.jon.settings;
|
||||||
|
# hasRoleSettings =
|
||||||
|
# res.evals.self-A.config.result.allMachines.jon.nixosModule. # { {instanceName} :: { roles :: { {roleName} :: { machines :: { {machineName} :: { settings :: {} } } } } } }
|
||||||
|
# instance_foo.roles.peer ? settings;
|
||||||
|
# roleSettingsEmpty =
|
||||||
|
# lib.filterAttrs (n: _v: n != "__functor" ) res.evals.self-A.config.result.allMachines.jon.nixosModule. # { {instanceName} :: { roles :: { {roleName} :: { machines :: { {machineName} :: { settings :: {} } } } } } }
|
||||||
|
# instance_foo.roles.peer.settings;
|
||||||
|
# };
|
||||||
|
# expected = {
|
||||||
|
# hasMachineSettings = true;
|
||||||
|
# machineSettingsEmpty = {};
|
||||||
|
# hasRoleSettings = true;
|
||||||
|
# roleSettingsEmpty = {};
|
||||||
|
# };
|
||||||
|
# };
|
||||||
|
}
|
||||||
107
lib/distributed-service/tests/per_machine_args.nix
Normal file
107
lib/distributed-service/tests/per_machine_args.nix
Normal file
@@ -0,0 +1,107 @@
|
|||||||
|
{ lib, callInventoryAdapter }:
|
||||||
|
|
||||||
|
let # Authored module
|
||||||
|
# A minimal module looks like this
|
||||||
|
# It isn't exactly doing anything but it's a valid module that produces an output
|
||||||
|
modules."A" = {
|
||||||
|
_class = "clan.service";
|
||||||
|
manifest = {
|
||||||
|
name = "network";
|
||||||
|
};
|
||||||
|
# Define two roles with unmergeable interfaces
|
||||||
|
# Both define some 'timeout' but with completely different types.
|
||||||
|
roles.peer.interface =
|
||||||
|
{ lib, ... }:
|
||||||
|
{
|
||||||
|
options.timeout = lib.mkOption {
|
||||||
|
type = lib.types.str;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
roles.server.interface =
|
||||||
|
{ lib, ... }:
|
||||||
|
{
|
||||||
|
options.timeout = lib.mkOption {
|
||||||
|
type = lib.types.submodule;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
perMachine =
|
||||||
|
{ instances, ... }:
|
||||||
|
{
|
||||||
|
nixosModule = instances;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
machines = {
|
||||||
|
jon = { };
|
||||||
|
sara = { };
|
||||||
|
};
|
||||||
|
res = callInventoryAdapter {
|
||||||
|
inherit modules machines;
|
||||||
|
instances."instance_foo" = {
|
||||||
|
module = {
|
||||||
|
name = "A";
|
||||||
|
};
|
||||||
|
roles.peer.machines.jon = {
|
||||||
|
settings.timeout = lib.mkForce "foo-peer-jon";
|
||||||
|
};
|
||||||
|
roles.peer = {
|
||||||
|
settings.timeout = "foo-peer";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
instances."instance_bar" = {
|
||||||
|
module = {
|
||||||
|
name = "A";
|
||||||
|
};
|
||||||
|
roles.peer.machines.jon = {
|
||||||
|
settings.timeout = "bar-peer-jon";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
instances."instance_zaza" = {
|
||||||
|
module = {
|
||||||
|
name = "B";
|
||||||
|
};
|
||||||
|
roles.peer.tags.all = { };
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
filterInternals = lib.filterAttrs (n: _v: !lib.hasPrefix "_" n);
|
||||||
|
in
|
||||||
|
|
||||||
|
{
|
||||||
|
|
||||||
|
# settings should evaluate
|
||||||
|
test_per_machine_receives_instance_settings = {
|
||||||
|
expr = {
|
||||||
|
hasMachineSettings =
|
||||||
|
res.evals.self-A.config.result.allMachines.jon.nixosModule.instance_foo.roles.peer.machines.jon
|
||||||
|
? settings;
|
||||||
|
|
||||||
|
# settings are specific.
|
||||||
|
# Below we access:
|
||||||
|
# instance = instance_foo
|
||||||
|
# roles = peer
|
||||||
|
# machines = jon
|
||||||
|
specificMachineSettings = filterInternals res.evals.self-A.config.result.allMachines.jon.nixosModule.instance_foo.roles.peer.machines.jon.settings;
|
||||||
|
|
||||||
|
hasRoleSettings =
|
||||||
|
res.evals.self-A.config.result.allMachines.jon.nixosModule.instance_foo.roles.peer ? settings;
|
||||||
|
|
||||||
|
# settings are specific.
|
||||||
|
# Below we access:
|
||||||
|
# instance = instance_foo
|
||||||
|
# roles = peer
|
||||||
|
# machines = *
|
||||||
|
specificRoleSettings = filterInternals res.evals.self-A.config.result.allMachines.jon.nixosModule.instance_foo.roles.peer.settings;
|
||||||
|
};
|
||||||
|
expected = {
|
||||||
|
hasMachineSettings = true;
|
||||||
|
specificMachineSettings = {
|
||||||
|
timeout = "foo-peer-jon";
|
||||||
|
};
|
||||||
|
hasRoleSettings = true;
|
||||||
|
specificRoleSettings = {
|
||||||
|
timeout = "foo-peer";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -10,16 +10,20 @@ let
|
|||||||
pathExists
|
pathExists
|
||||||
;
|
;
|
||||||
in
|
in
|
||||||
{
|
rec {
|
||||||
|
# We should remove this.
|
||||||
|
# It would enforce treating at least 'lib' as a module in a whole
|
||||||
imports = filter pathExists [
|
imports = filter pathExists [
|
||||||
./jsonschema/flake-module.nix
|
./jsonschema/flake-module.nix
|
||||||
./inventory/flake-module.nix
|
./inventory/flake-module.nix
|
||||||
./build-clan/flake-module.nix
|
./build-clan/flake-module.nix
|
||||||
./values/flake-module.nix
|
./values/flake-module.nix
|
||||||
|
./distributed-service/flake-module.nix
|
||||||
];
|
];
|
||||||
flake.lib = import ./default.nix {
|
flake.clanLib = import ./default.nix {
|
||||||
inherit lib inputs;
|
inherit lib inputs self;
|
||||||
inherit (inputs) nixpkgs;
|
inherit (inputs) nixpkgs;
|
||||||
clan-core = self;
|
|
||||||
};
|
};
|
||||||
|
# TODO: remove this legacy alias
|
||||||
|
flake.lib = flake.clanLib;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
{ lib, self }:
|
{ lib, clanLib }:
|
||||||
let
|
let
|
||||||
# Trim the .nix extension from a filename
|
# Trim the .nix extension from a filename
|
||||||
trimExtension = name: builtins.substring 0 (builtins.stringLength name - 4) name;
|
trimExtension = name: builtins.substring 0 (builtins.stringLength name - 4) name;
|
||||||
|
|
||||||
jsonWithoutHeader = self.lib.jsonschema {
|
jsonWithoutHeader = clanLib.jsonschema {
|
||||||
includeDefaults = true;
|
includeDefaults = true;
|
||||||
header = { };
|
header = { };
|
||||||
};
|
};
|
||||||
@@ -13,7 +13,7 @@ let
|
|||||||
lib.mapAttrs (
|
lib.mapAttrs (
|
||||||
_moduleName: rolesOptions:
|
_moduleName: rolesOptions:
|
||||||
lib.mapAttrs (_roleName: options: jsonWithoutHeader.parseOptions options { }) rolesOptions
|
lib.mapAttrs (_roleName: options: jsonWithoutHeader.parseOptions options { }) rolesOptions
|
||||||
) (self.lib.evalClanModulesWithRoles modules);
|
) (clanLib.evalClan.evalClanModulesWithRoles modules);
|
||||||
|
|
||||||
evalFrontmatter =
|
evalFrontmatter =
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
{
|
{
|
||||||
lib,
|
lib,
|
||||||
config,
|
config,
|
||||||
clan-core,
|
clanLib,
|
||||||
...
|
...
|
||||||
}:
|
}:
|
||||||
let
|
let
|
||||||
@@ -43,8 +43,7 @@ let
|
|||||||
|
|
||||||
checkService =
|
checkService =
|
||||||
modulepath: serviceName:
|
modulepath: serviceName:
|
||||||
builtins.elem "inventory"
|
builtins.elem "inventory" (clanLib.modules.getFrontmatter modulepath serviceName).features or [ ];
|
||||||
(clan-core.lib.modules.getFrontmatter modulepath serviceName).features or [ ];
|
|
||||||
|
|
||||||
compileMachine =
|
compileMachine =
|
||||||
{ machineConfig }:
|
{ machineConfig }:
|
||||||
@@ -160,7 +159,7 @@ in
|
|||||||
inherit
|
inherit
|
||||||
resolveTags
|
resolveTags
|
||||||
inventory
|
inventory
|
||||||
clan-core
|
clanLib
|
||||||
machineName
|
machineName
|
||||||
serviceConfigs
|
serviceConfigs
|
||||||
;
|
;
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
config,
|
config,
|
||||||
resolveTags,
|
resolveTags,
|
||||||
inventory,
|
inventory,
|
||||||
clan-core,
|
clanLib,
|
||||||
machineName,
|
machineName,
|
||||||
serviceConfigs,
|
serviceConfigs,
|
||||||
...
|
...
|
||||||
@@ -14,7 +14,7 @@ in
|
|||||||
{
|
{
|
||||||
# Roles resolution
|
# Roles resolution
|
||||||
# : List String
|
# : List String
|
||||||
supportedRoles = clan-core.lib.modules.getRoles inventory.modules serviceName;
|
supportedRoles = clanLib.modules.getRoles inventory.modules serviceName;
|
||||||
matchedRoles = builtins.attrNames (
|
matchedRoles = builtins.attrNames (
|
||||||
lib.filterAttrs (_: ms: builtins.elem machineName ms) config.machinesRoles
|
lib.filterAttrs (_: ms: builtins.elem machineName ms) config.machinesRoles
|
||||||
);
|
);
|
||||||
@@ -56,7 +56,7 @@ in
|
|||||||
|
|
||||||
assertions = lib.concatMapAttrs (
|
assertions = lib.concatMapAttrs (
|
||||||
instanceName: resolvedRoles:
|
instanceName: resolvedRoles:
|
||||||
clan-core.lib.modules.checkConstraints {
|
clanLib.modules.checkConstraints {
|
||||||
moduleName = serviceName;
|
moduleName = serviceName;
|
||||||
allModules = inventory.modules;
|
allModules = inventory.modules;
|
||||||
inherit resolvedRoles instanceName;
|
inherit resolvedRoles instanceName;
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
# Generate partial NixOS configurations for every machine in the inventory
|
# Generate partial NixOS configurations for every machine in the inventory
|
||||||
# This function is responsible for generating the module configuration for every machine in the inventory.
|
# This function is responsible for generating the module configuration for every machine in the inventory.
|
||||||
{ lib, clan-core }:
|
{ lib, clanLib }:
|
||||||
let
|
let
|
||||||
/*
|
/*
|
||||||
Returns a set with NixOS configuration for every machine in the inventory.
|
Returns a set with NixOS configuration for every machine in the inventory.
|
||||||
@@ -11,7 +11,7 @@ let
|
|||||||
{ inventory, directory }:
|
{ inventory, directory }:
|
||||||
(lib.evalModules {
|
(lib.evalModules {
|
||||||
specialArgs = {
|
specialArgs = {
|
||||||
inherit clan-core;
|
inherit clanLib;
|
||||||
};
|
};
|
||||||
modules = [
|
modules = [
|
||||||
./builder
|
./builder
|
||||||
|
|||||||
@@ -103,7 +103,9 @@ in
|
|||||||
default = options;
|
default = options;
|
||||||
};
|
};
|
||||||
modules = lib.mkOption {
|
modules = lib.mkOption {
|
||||||
type = types.attrsOf types.path;
|
# Don't define the type yet
|
||||||
|
# We manually transform the value with types.deferredModule.merge later to keep them serializable
|
||||||
|
type = types.attrsOf types.raw;
|
||||||
default = { };
|
default = { };
|
||||||
defaultText = "clanModules of clan-core";
|
defaultText = "clanModules of clan-core";
|
||||||
description = ''
|
description = ''
|
||||||
@@ -275,7 +277,79 @@ in
|
|||||||
)
|
)
|
||||||
);
|
);
|
||||||
};
|
};
|
||||||
|
instances = lib.mkOption {
|
||||||
|
# Keep as internal until all de-/serialization issues are resolved
|
||||||
|
visible = false;
|
||||||
|
internal = true;
|
||||||
|
description = "Multi host service module instances";
|
||||||
|
type = types.attrsOf (
|
||||||
|
types.submodule {
|
||||||
|
options = {
|
||||||
|
# ModuleSpec
|
||||||
|
module = lib.mkOption {
|
||||||
|
type = types.submodule {
|
||||||
|
options.input = lib.mkOption {
|
||||||
|
type = types.nullOr types.str;
|
||||||
|
default = null;
|
||||||
|
defaultText = "Name of the input. Default to 'null' which means the module is local";
|
||||||
|
description = ''
|
||||||
|
Name of the input. Default to 'null' which means the module is local
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
options.name = lib.mkOption {
|
||||||
|
type = types.str;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
roles = lib.mkOption {
|
||||||
|
default = { };
|
||||||
|
type = types.attrsOf (
|
||||||
|
types.submodule {
|
||||||
|
options = {
|
||||||
|
# TODO: deduplicate
|
||||||
|
machines = lib.mkOption {
|
||||||
|
type = types.attrsOf (
|
||||||
|
types.submodule {
|
||||||
|
options.settings = lib.mkOption {
|
||||||
|
default = { };
|
||||||
|
# Dont transform the value with `types.deferredModule` here. We need to keep it json serializable
|
||||||
|
# TODO: We need a custom serializer for deferredModule
|
||||||
|
type = types.deferredModule;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
);
|
||||||
|
default = { };
|
||||||
|
};
|
||||||
|
tags = lib.mkOption {
|
||||||
|
type = types.attrsOf (
|
||||||
|
types.submodule {
|
||||||
|
options.settings = lib.mkOption {
|
||||||
|
default = { };
|
||||||
|
type = types.deferredModule;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
);
|
||||||
|
default = { };
|
||||||
|
};
|
||||||
|
settings = lib.mkOption {
|
||||||
|
default = { };
|
||||||
|
type = types.deferredModule;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
|
);
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
|
);
|
||||||
|
default = { };
|
||||||
|
apply =
|
||||||
|
v:
|
||||||
|
if v == { } then
|
||||||
|
v
|
||||||
|
else
|
||||||
|
lib.warn "Inventory.instances and related features are still under development. Please use with care." v;
|
||||||
|
};
|
||||||
services = lib.mkOption {
|
services = lib.mkOption {
|
||||||
description = ''
|
description = ''
|
||||||
Services of the inventory.
|
Services of the inventory.
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
{ lib, clan-core }:
|
{ lib, clanLib }:
|
||||||
{
|
{
|
||||||
inherit (import ./build-inventory { inherit lib clan-core; }) buildInventory;
|
inherit (import ./build-inventory { inherit lib clanLib; }) buildInventory;
|
||||||
interface = ./build-inventory/interface.nix;
|
interface = ./build-inventory/interface.nix;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -50,11 +50,7 @@ in
|
|||||||
self.filter {
|
self.filter {
|
||||||
include = [
|
include = [
|
||||||
"flakeModules"
|
"flakeModules"
|
||||||
"lib/default.nix"
|
"lib"
|
||||||
"lib/flake-module.nix"
|
|
||||||
"lib/inventory"
|
|
||||||
"lib/constraints"
|
|
||||||
"lib/frontmatter"
|
|
||||||
"clanModules/flake-module.nix"
|
"clanModules/flake-module.nix"
|
||||||
"clanModules/borgbackup"
|
"clanModules/borgbackup"
|
||||||
];
|
];
|
||||||
|
|||||||
@@ -1,4 +0,0 @@
|
|||||||
---
|
|
||||||
features = [ "inventory" ]
|
|
||||||
---
|
|
||||||
Description
|
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
{ ... }:
|
|
||||||
{
|
|
||||||
_class = "clan";
|
|
||||||
perInstance = { };
|
|
||||||
perService = { };
|
|
||||||
}
|
|
||||||
@@ -2,8 +2,8 @@
|
|||||||
let
|
let
|
||||||
inventory = (
|
inventory = (
|
||||||
import ../build-inventory {
|
import ../build-inventory {
|
||||||
|
inherit lib;
|
||||||
inherit lib clan-core;
|
clanLib = clan-core.clanLib;
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
inherit (inventory) buildInventory;
|
inherit (inventory) buildInventory;
|
||||||
@@ -17,11 +17,9 @@ in
|
|||||||
A = { };
|
A = { };
|
||||||
};
|
};
|
||||||
services = {
|
services = {
|
||||||
clanModule = { };
|
|
||||||
legacyModule = { };
|
legacyModule = { };
|
||||||
};
|
};
|
||||||
modules = {
|
modules = {
|
||||||
clanModule = ./clanModule;
|
|
||||||
legacyModule = ./legacyModule;
|
legacyModule = ./legacyModule;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
@@ -30,17 +28,11 @@ in
|
|||||||
in
|
in
|
||||||
{
|
{
|
||||||
expr = {
|
expr = {
|
||||||
clanModule = lib.filterAttrs (
|
|
||||||
name: _: name == "isClanModule"
|
|
||||||
) compiled.machines.A.compiledServices.clanModule;
|
|
||||||
legacyModule = lib.filterAttrs (
|
legacyModule = lib.filterAttrs (
|
||||||
name: _: name == "isClanModule"
|
name: _: name == "isClanModule"
|
||||||
) compiled.machines.A.compiledServices.legacyModule;
|
) compiled.machines.A.compiledServices.legacyModule;
|
||||||
};
|
};
|
||||||
expected = {
|
expected = {
|
||||||
clanModule = {
|
|
||||||
isClanModule = true;
|
|
||||||
};
|
|
||||||
legacyModule = {
|
legacyModule = {
|
||||||
isClanModule = false;
|
isClanModule = false;
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -1,10 +1,9 @@
|
|||||||
{
|
{
|
||||||
lib,
|
lib,
|
||||||
config,
|
|
||||||
clan-core,
|
clan-core,
|
||||||
...
|
...
|
||||||
}:
|
}:
|
||||||
{
|
{
|
||||||
# Just some random stuff
|
# Just some random stuff
|
||||||
config.user.user = lib.mapAttrs clan-core.users.root;
|
options.test = lib.mapAttrs clan-core;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -28,9 +28,7 @@ in
|
|||||||
self.filter {
|
self.filter {
|
||||||
include = [
|
include = [
|
||||||
"flakeModules"
|
"flakeModules"
|
||||||
"lib/default.nix"
|
"lib"
|
||||||
"lib/flake-module.nix"
|
|
||||||
"lib/values"
|
|
||||||
];
|
];
|
||||||
}
|
}
|
||||||
}#legacyPackages.${system}.evalTests-values
|
}#legacyPackages.${system}.evalTests-values
|
||||||
|
|||||||
@@ -6,11 +6,12 @@
|
|||||||
{
|
{
|
||||||
config.clan.core.vars.settings = lib.mkIf (config.clan.core.vars.settings.secretStore == "vm") {
|
config.clan.core.vars.settings = lib.mkIf (config.clan.core.vars.settings.secretStore == "vm") {
|
||||||
fileModule = file: {
|
fileModule = file: {
|
||||||
path =
|
path = lib.mkIf (file.config.secret == true) (
|
||||||
if file.config.neededFor == "partitioning" then
|
if file.config.neededFor == "partitioning" then
|
||||||
"/run/partitioning-secrets/${file.config.generatorName}/${file.config.name}"
|
"/run/partitioning-secrets/${file.config.generatorName}/${file.config.name}"
|
||||||
else
|
else
|
||||||
"/etc/secrets/${file.config.generatorName}/${file.config.name}";
|
"/etc/secrets/${file.config.generatorName}/${file.config.name}"
|
||||||
|
);
|
||||||
};
|
};
|
||||||
secretModule = "clan_cli.vars.secret_modules.vm";
|
secretModule = "clan_cli.vars.secret_modules.vm";
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -12,6 +12,7 @@
|
|||||||
inputs.sops-nix.nixosModules.sops
|
inputs.sops-nix.nixosModules.sops
|
||||||
inputs.nixos-facter-modules.nixosModules.facter
|
inputs.nixos-facter-modules.nixosModules.facter
|
||||||
inputs.disko.nixosModules.default
|
inputs.disko.nixosModules.default
|
||||||
|
inputs.data-mesher.nixosModules.data-mesher
|
||||||
./clanCore
|
./clanCore
|
||||||
(
|
(
|
||||||
{ pkgs, lib, ... }:
|
{ pkgs, lib, ... }:
|
||||||
|
|||||||
@@ -86,7 +86,7 @@ class FlakeCacheEntry:
|
|||||||
self.selector = {int(selectors[0])}
|
self.selector = {int(selectors[0])}
|
||||||
selector = int(selectors[0])
|
selector = int(selectors[0])
|
||||||
elif isinstance(selectors[0], str):
|
elif isinstance(selectors[0], str):
|
||||||
self.selector = {(selectors[0])}
|
self.selector = {selectors[0]}
|
||||||
selector = selectors[0]
|
selector = selectors[0]
|
||||||
elif isinstance(selectors[0], AllSelector):
|
elif isinstance(selectors[0], AllSelector):
|
||||||
self.selector = AllSelector()
|
self.selector = AllSelector()
|
||||||
@@ -154,7 +154,9 @@ class FlakeCacheEntry:
|
|||||||
self.value = value
|
self.value = value
|
||||||
|
|
||||||
def insert(
|
def insert(
|
||||||
self, value: str | float | dict[str, Any] | list[Any], selectors: list[Selector]
|
self,
|
||||||
|
value: str | float | dict[str, Any] | list[Any] | None,
|
||||||
|
selectors: list[Selector],
|
||||||
) -> None:
|
) -> None:
|
||||||
selector: Selector
|
selector: Selector
|
||||||
if selectors == []:
|
if selectors == []:
|
||||||
@@ -244,6 +246,12 @@ class FlakeCacheEntry:
|
|||||||
if self.value != value:
|
if self.value != value:
|
||||||
msg = "value mismatch in cache, something is fishy"
|
msg = "value mismatch in cache, something is fishy"
|
||||||
raise TypeError(msg)
|
raise TypeError(msg)
|
||||||
|
|
||||||
|
elif value is None:
|
||||||
|
if self.value is not None:
|
||||||
|
msg = "value mismatch in cache, something is fishy"
|
||||||
|
raise TypeError(msg)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
msg = f"Cannot insert value of type {type(value)} into cache"
|
msg = f"Cannot insert value of type {type(value)} into cache"
|
||||||
raise TypeError(msg)
|
raise TypeError(msg)
|
||||||
@@ -473,7 +481,7 @@ class Flake:
|
|||||||
flake = builtins.getFlake("path:{self.store_path}?narHash={self.hash}");
|
flake = builtins.getFlake("path:{self.store_path}?narHash={self.hash}");
|
||||||
in
|
in
|
||||||
flake.inputs.nixpkgs.legacyPackages.{config["system"]}.writeText "clan-flake-select" (
|
flake.inputs.nixpkgs.legacyPackages.{config["system"]}.writeText "clan-flake-select" (
|
||||||
builtins.toJSON [ ({" ".join([f"flake.clanInternals.lib.select ''{attr}'' flake" for attr in selectors])}) ]
|
builtins.toJSON [ ({" ".join([f"flake.clanInternals.clanLib.select ''{attr}'' flake" for attr in selectors])}) ]
|
||||||
)
|
)
|
||||||
"""
|
"""
|
||||||
if tmp_store := nix_test_store():
|
if tmp_store := nix_test_store():
|
||||||
|
|||||||
@@ -31,6 +31,6 @@ Service = dict[str, Any]
|
|||||||
class Inventory(TypedDict):
|
class Inventory(TypedDict):
|
||||||
machines: NotRequired[dict[str, Machine]]
|
machines: NotRequired[dict[str, Machine]]
|
||||||
meta: NotRequired[Meta]
|
meta: NotRequired[Meta]
|
||||||
modules: NotRequired[dict[str, str]]
|
modules: NotRequired[dict[str, Any]]
|
||||||
services: NotRequired[dict[str, Service]]
|
services: NotRequired[dict[str, Service]]
|
||||||
tags: NotRequired[dict[str, list[str]]]
|
tags: NotRequired[dict[str, Any]]
|
||||||
|
|||||||
@@ -5,4 +5,4 @@ set -euo pipefail
|
|||||||
jsonSchema=$(nix build .#schemas.inventory-schema-abstract --print-out-paths)/schema.json
|
jsonSchema=$(nix build .#schemas.inventory-schema-abstract --print-out-paths)/schema.json
|
||||||
SCRIPT_DIR=$(dirname "$0")
|
SCRIPT_DIR=$(dirname "$0")
|
||||||
cd "$SCRIPT_DIR"
|
cd "$SCRIPT_DIR"
|
||||||
nix run .#classgen -- "$jsonSchema" "../../../clan-cli/clan_cli/inventory/classes.py" --stop-at "Service"
|
nix run .#classgen -- "$jsonSchema" "../../../clan-cli/clan_cli/inventory/classes.py"
|
||||||
|
|||||||
@@ -135,6 +135,11 @@ def generate_machine_hardware_info(opts: HardwareGenerateOptions) -> HardwareCon
|
|||||||
]
|
]
|
||||||
|
|
||||||
host = machine.target_host
|
host = machine.target_host
|
||||||
|
|
||||||
|
# HACK: to make non-root user work
|
||||||
|
if host.user != "root":
|
||||||
|
config_command.insert(0, "sudo")
|
||||||
|
|
||||||
cmd = nix_shell(
|
cmd = nix_shell(
|
||||||
[
|
[
|
||||||
"nixpkgs#openssh",
|
"nixpkgs#openssh",
|
||||||
|
|||||||
@@ -152,6 +152,7 @@ def deploy_machines(machines: list[Machine]) -> None:
|
|||||||
"--flake",
|
"--flake",
|
||||||
f"{path}#{machine.name}",
|
f"{path}#{machine.name}",
|
||||||
]
|
]
|
||||||
|
|
||||||
switch_cmd = ["nixos-rebuild", "switch", *nix_options]
|
switch_cmd = ["nixos-rebuild", "switch", *nix_options]
|
||||||
test_cmd = ["nixos-rebuild", "test", *nix_options]
|
test_cmd = ["nixos-rebuild", "test", *nix_options]
|
||||||
|
|
||||||
@@ -160,6 +161,10 @@ def deploy_machines(machines: list[Machine]) -> None:
|
|||||||
switch_cmd.extend(["--target-host", target_host.target])
|
switch_cmd.extend(["--target-host", target_host.target])
|
||||||
test_cmd.extend(["--target-host", target_host.target])
|
test_cmd.extend(["--target-host", target_host.target])
|
||||||
|
|
||||||
|
if target_host and target_host.user != "root":
|
||||||
|
switch_cmd.extend(["--use-remote-sudo"])
|
||||||
|
test_cmd.extend(["--use-remote-sudo"])
|
||||||
|
|
||||||
env = host.nix_ssh_env(None)
|
env = host.nix_ssh_env(None)
|
||||||
ret = host.run(
|
ret = host.run(
|
||||||
switch_cmd,
|
switch_cmd,
|
||||||
@@ -185,6 +190,7 @@ def deploy_machines(machines: list[Machine]) -> None:
|
|||||||
test_cmd,
|
test_cmd,
|
||||||
RunOpts(msg_color=MsgColor(stderr=AnsiColor.DEFAULT)),
|
RunOpts(msg_color=MsgColor(stderr=AnsiColor.DEFAULT)),
|
||||||
extra_env=env,
|
extra_env=env,
|
||||||
|
become_root=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
# retry nixos-rebuild switch if the first attempt failed
|
# retry nixos-rebuild switch if the first attempt failed
|
||||||
@@ -193,6 +199,7 @@ def deploy_machines(machines: list[Machine]) -> None:
|
|||||||
switch_cmd,
|
switch_cmd,
|
||||||
RunOpts(msg_color=MsgColor(stderr=AnsiColor.DEFAULT)),
|
RunOpts(msg_color=MsgColor(stderr=AnsiColor.DEFAULT)),
|
||||||
extra_env=env,
|
extra_env=env,
|
||||||
|
become_root=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
with AsyncRuntime() as runtime:
|
with AsyncRuntime() as runtime:
|
||||||
|
|||||||
@@ -30,7 +30,7 @@ def import_sops(args: argparse.Namespace) -> None:
|
|||||||
if args.input_type:
|
if args.input_type:
|
||||||
cmd += ["--input-type", args.input_type]
|
cmd += ["--input-type", args.input_type]
|
||||||
cmd += ["--output-type", "json", "--decrypt", args.sops_file]
|
cmd += ["--output-type", "json", "--decrypt", args.sops_file]
|
||||||
cmd = nix_shell(["nixpkgs#sops"], cmd)
|
cmd = nix_shell(["nixpkgs#sops", "nixpkgs#gnupg"], cmd)
|
||||||
|
|
||||||
res = run(cmd, RunOpts(error_msg=f"Could not import sops file {file}"))
|
res = run(cmd, RunOpts(error_msg=f"Could not import sops file {file}"))
|
||||||
secrets = json.loads(res.stdout)
|
secrets = json.loads(res.stdout)
|
||||||
|
|||||||
@@ -233,7 +233,7 @@ def sops_run(
|
|||||||
raise ClanError(msg)
|
raise ClanError(msg)
|
||||||
sops_cmd.append(str(secret_path))
|
sops_cmd.append(str(secret_path))
|
||||||
|
|
||||||
cmd = nix_shell(["nixpkgs#sops"], sops_cmd)
|
cmd = nix_shell(["nixpkgs#sops", "nixpkgs#gnupg"], sops_cmd)
|
||||||
opts = (
|
opts = (
|
||||||
dataclasses.replace(run_opts, env=environ)
|
dataclasses.replace(run_opts, env=environ)
|
||||||
if run_opts
|
if run_opts
|
||||||
|
|||||||
@@ -1,21 +1,28 @@
|
|||||||
import tarfile
|
import tarfile
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
from shlex import quote
|
||||||
from tempfile import TemporaryDirectory
|
from tempfile import TemporaryDirectory
|
||||||
|
|
||||||
from clan_cli.cmd import Log, RunOpts
|
from clan_cli.cmd import Log, RunOpts
|
||||||
from clan_cli.cmd import run as run_local
|
from clan_cli.cmd import run as run_local
|
||||||
|
from clan_cli.errors import ClanError
|
||||||
from clan_cli.ssh.host import Host
|
from clan_cli.ssh.host import Host
|
||||||
|
|
||||||
|
|
||||||
def upload(
|
def upload(
|
||||||
host: Host,
|
host: Host,
|
||||||
local_src: Path, # must be a directory
|
local_src: Path,
|
||||||
remote_dest: Path, # must be a directory
|
remote_dest: Path, # must be a directory
|
||||||
file_user: str = "root",
|
file_user: str = "root",
|
||||||
file_group: str = "root",
|
file_group: str = "root",
|
||||||
dir_mode: int = 0o700,
|
dir_mode: int = 0o700,
|
||||||
file_mode: int = 0o400,
|
file_mode: int = 0o400,
|
||||||
) -> None:
|
) -> None:
|
||||||
|
# Check if the remote destination is at least 3 directories deep
|
||||||
|
if len(remote_dest.parts) < 3:
|
||||||
|
msg = f"The remote destination must be at least 3 directories deep. Got: {remote_dest}. Reason: The directory will be deleted with 'rm -rf'."
|
||||||
|
raise ClanError(msg)
|
||||||
|
|
||||||
# Create the tarball from the temporary directory
|
# Create the tarball from the temporary directory
|
||||||
with TemporaryDirectory(prefix="facts-upload-") as tardir:
|
with TemporaryDirectory(prefix="facts-upload-") as tardir:
|
||||||
tar_path = Path(tardir) / "upload.tar.gz"
|
tar_path = Path(tardir) / "upload.tar.gz"
|
||||||
@@ -55,50 +62,22 @@ def upload(
|
|||||||
with local_src.open("rb") as f:
|
with local_src.open("rb") as f:
|
||||||
tar.addfile(tarinfo, f)
|
tar.addfile(tarinfo, f)
|
||||||
|
|
||||||
if local_src.is_dir():
|
sudo = ""
|
||||||
cmd = [
|
if host.user != "root":
|
||||||
*host.ssh_cmd(),
|
sudo = "sudo -- "
|
||||||
"rm",
|
|
||||||
"-r",
|
cmd = "rm -rf $0 && mkdir -m $1 -p $0 && tar -C $0 -xzf -"
|
||||||
str(remote_dest),
|
|
||||||
";",
|
|
||||||
"mkdir",
|
|
||||||
"-m",
|
|
||||||
f"{dir_mode:o}",
|
|
||||||
"-p",
|
|
||||||
str(remote_dest),
|
|
||||||
"&&",
|
|
||||||
"tar",
|
|
||||||
"-C",
|
|
||||||
str(remote_dest),
|
|
||||||
"-xzf",
|
|
||||||
"-",
|
|
||||||
]
|
|
||||||
else:
|
|
||||||
# For single file, extract to parent directory and ensure correct name
|
|
||||||
cmd = [
|
|
||||||
*host.ssh_cmd(),
|
|
||||||
"rm",
|
|
||||||
"-f",
|
|
||||||
str(remote_dest),
|
|
||||||
";",
|
|
||||||
"mkdir",
|
|
||||||
"-m",
|
|
||||||
f"{dir_mode:o}",
|
|
||||||
"-p",
|
|
||||||
str(remote_dest.parent),
|
|
||||||
"&&",
|
|
||||||
"tar",
|
|
||||||
"-C",
|
|
||||||
str(remote_dest.parent),
|
|
||||||
"-xzf",
|
|
||||||
"-",
|
|
||||||
]
|
|
||||||
|
|
||||||
# TODO accept `input` to be an IO object instead of bytes so that we don't have to read the tarfile into memory.
|
# TODO accept `input` to be an IO object instead of bytes so that we don't have to read the tarfile into memory.
|
||||||
with tar_path.open("rb") as f:
|
with tar_path.open("rb") as f:
|
||||||
run_local(
|
run_local(
|
||||||
cmd,
|
[
|
||||||
|
*host.ssh_cmd(),
|
||||||
|
"--",
|
||||||
|
f"{sudo}bash -c {quote(cmd)}",
|
||||||
|
str(remote_dest),
|
||||||
|
f"{dir_mode:o}",
|
||||||
|
],
|
||||||
RunOpts(
|
RunOpts(
|
||||||
input=f.read(),
|
input=f.read(),
|
||||||
log=Log.BOTH,
|
log=Log.BOTH,
|
||||||
|
|||||||
@@ -39,7 +39,6 @@ class Generator:
|
|||||||
name: str
|
name: str
|
||||||
files: list[Var] = field(default_factory=list)
|
files: list[Var] = field(default_factory=list)
|
||||||
share: bool = False
|
share: bool = False
|
||||||
validation: str | None = None
|
|
||||||
prompts: list[Prompt] = field(default_factory=list)
|
prompts: list[Prompt] = field(default_factory=list)
|
||||||
dependencies: list[str] = field(default_factory=list)
|
dependencies: list[str] = field(default_factory=list)
|
||||||
|
|
||||||
@@ -62,7 +61,6 @@ class Generator:
|
|||||||
name=data["name"],
|
name=data["name"],
|
||||||
share=data["share"],
|
share=data["share"],
|
||||||
files=[Var.from_json(data["name"], f) for f in data["files"].values()],
|
files=[Var.from_json(data["name"], f) for f in data["files"].values()],
|
||||||
validation=data["validationHash"],
|
|
||||||
dependencies=data["dependencies"],
|
dependencies=data["dependencies"],
|
||||||
migrate_fact=data["migrateFact"],
|
migrate_fact=data["migrateFact"],
|
||||||
prompts=[Prompt.from_json(p) for p in data["prompts"].values()],
|
prompts=[Prompt.from_json(p) for p in data["prompts"].values()],
|
||||||
@@ -76,6 +74,13 @@ class Generator:
|
|||||||
)
|
)
|
||||||
return final_script
|
return final_script
|
||||||
|
|
||||||
|
@property
|
||||||
|
def validation(self) -> str | None:
|
||||||
|
assert self._machine is not None
|
||||||
|
return self._machine.eval_nix(
|
||||||
|
f'config.clan.core.vars.generators."{self.name}".validationHash'
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def bubblewrap_cmd(generator: str, tmpdir: Path) -> list[str]:
|
def bubblewrap_cmd(generator: str, tmpdir: Path) -> list[str]:
|
||||||
test_store = nix_test_store()
|
test_store = nix_test_store()
|
||||||
@@ -253,6 +258,8 @@ def execute_generator(
|
|||||||
machine.flake_dir,
|
machine.flake_dir,
|
||||||
f"Update vars via generator {generator.name} for machine {machine.name}",
|
f"Update vars via generator {generator.name} for machine {machine.name}",
|
||||||
)
|
)
|
||||||
|
if len(files_to_commit) > 0:
|
||||||
|
machine.flush_caches()
|
||||||
|
|
||||||
|
|
||||||
def _ask_prompts(
|
def _ask_prompts(
|
||||||
@@ -456,8 +463,6 @@ def generate_vars_for_machine(
|
|||||||
public_vars_store=machine.public_vars_store,
|
public_vars_store=machine.public_vars_store,
|
||||||
prompt_values=_ask_prompts(generator),
|
prompt_values=_ask_prompts(generator),
|
||||||
)
|
)
|
||||||
# flush caches to make sure the new secrets are available in evaluation
|
|
||||||
machine.flush_caches()
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -45,7 +45,7 @@ def ask(
|
|||||||
text = f"Enter the value for {ident}:"
|
text = f"Enter the value for {ident}:"
|
||||||
if label:
|
if label:
|
||||||
text = f"{label}"
|
text = f"{label}"
|
||||||
|
log.info(f"Prompting value for {ident}")
|
||||||
if MOCK_PROMPT_RESPONSE:
|
if MOCK_PROMPT_RESPONSE:
|
||||||
return next(MOCK_PROMPT_RESPONSE)
|
return next(MOCK_PROMPT_RESPONSE)
|
||||||
match input_type:
|
match input_type:
|
||||||
|
|||||||
@@ -51,7 +51,7 @@ let
|
|||||||
testDependencies = testRuntimeDependencies ++ [
|
testDependencies = testRuntimeDependencies ++ [
|
||||||
gnupg
|
gnupg
|
||||||
stdenv.cc # Compiler used for certain native extensions
|
stdenv.cc # Compiler used for certain native extensions
|
||||||
(pythonRuntime.withPackages (ps: (pyTestDeps ps) ++ (pyDeps ps)))
|
(pythonRuntime.withPackages pyTestDeps)
|
||||||
];
|
];
|
||||||
|
|
||||||
source = runCommand "clan-cli-source" { } ''
|
source = runCommand "clan-cli-source" { } ''
|
||||||
@@ -60,7 +60,7 @@ let
|
|||||||
ln -sf ${nixpkgs'} $out/clan_cli/nixpkgs
|
ln -sf ${nixpkgs'} $out/clan_cli/nixpkgs
|
||||||
cp -r ${../../templates} $out/clan_cli/templates
|
cp -r ${../../templates} $out/clan_cli/templates
|
||||||
|
|
||||||
${classgen}/bin/classgen ${inventory-schema-abstract}/schema.json $out/clan_cli/inventory/classes.py --stop-at "Service"
|
${classgen}/bin/classgen ${inventory-schema-abstract}/schema.json $out/clan_cli/inventory/classes.py
|
||||||
'';
|
'';
|
||||||
|
|
||||||
# Create a custom nixpkgs for use within the project
|
# Create a custom nixpkgs for use within the project
|
||||||
@@ -127,7 +127,7 @@ pythonRuntime.pkgs.buildPythonApplication {
|
|||||||
# Define and expose the tests and checks to run in CI
|
# Define and expose the tests and checks to run in CI
|
||||||
passthru.tests =
|
passthru.tests =
|
||||||
(lib.mapAttrs' (n: lib.nameValuePair "clan-dep-${n}") testRuntimeDependenciesMap)
|
(lib.mapAttrs' (n: lib.nameValuePair "clan-dep-${n}") testRuntimeDependenciesMap)
|
||||||
// lib.optionalAttrs (!stdenv.isDarwin) {
|
// {
|
||||||
# disabled on macOS until we fix all remaining issues
|
# disabled on macOS until we fix all remaining issues
|
||||||
clan-pytest-without-core =
|
clan-pytest-without-core =
|
||||||
runCommand "clan-pytest-without-core"
|
runCommand "clan-pytest-without-core"
|
||||||
@@ -159,6 +159,8 @@ pythonRuntime.pkgs.buildPythonApplication {
|
|||||||
python -m pytest -m "not impure and not with_core" -n $jobs ./tests
|
python -m pytest -m "not impure and not with_core" -n $jobs ./tests
|
||||||
touch $out
|
touch $out
|
||||||
'';
|
'';
|
||||||
|
}
|
||||||
|
// lib.optionalAttrs (!stdenv.isDarwin) {
|
||||||
clan-pytest-with-core =
|
clan-pytest-with-core =
|
||||||
runCommand "clan-pytest-with-core"
|
runCommand "clan-pytest-with-core"
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -169,7 +169,7 @@
|
|||||||
];
|
];
|
||||||
|
|
||||||
installPhase = ''
|
installPhase = ''
|
||||||
${self'.packages.classgen}/bin/classgen ${self'.legacyPackages.schemas.inventory-schema-abstract}/schema.json ./clan_cli/inventory/classes.py --stop-at "Service"
|
${self'.packages.classgen}/bin/classgen ${self'.legacyPackages.schemas.inventory-schema-abstract}/schema.json ./clan_cli/inventory/classes.py
|
||||||
|
|
||||||
python docs.py reference
|
python docs.py reference
|
||||||
mkdir -p $out
|
mkdir -p $out
|
||||||
@@ -188,7 +188,7 @@
|
|||||||
];
|
];
|
||||||
|
|
||||||
installPhase = ''
|
installPhase = ''
|
||||||
${self'.packages.classgen}/bin/classgen ${self'.legacyPackages.schemas.inventory-schema-abstract}/schema.json ./clan_cli/inventory/classes.py --stop-at "Service"
|
${self'.packages.classgen}/bin/classgen ${self'.legacyPackages.schemas.inventory-schema-abstract}/schema.json ./clan_cli/inventory/classes.py
|
||||||
mkdir -p $out
|
mkdir -p $out
|
||||||
# Retrieve python API Typescript types
|
# Retrieve python API Typescript types
|
||||||
python api.py > $out/API.json
|
python api.py > $out/API.json
|
||||||
@@ -214,7 +214,7 @@
|
|||||||
classFile = "classes.py";
|
classFile = "classes.py";
|
||||||
};
|
};
|
||||||
installPhase = ''
|
installPhase = ''
|
||||||
${self'.packages.classgen}/bin/classgen ${self'.legacyPackages.schemas.inventory-schema-abstract}/schema.json b_classes.py --stop-at "Service"
|
${self'.packages.classgen}/bin/classgen ${self'.legacyPackages.schemas.inventory-schema-abstract}/schema.json b_classes.py
|
||||||
file1=$classFile
|
file1=$classFile
|
||||||
file2=b_classes.py
|
file2=b_classes.py
|
||||||
|
|
||||||
|
|||||||
@@ -46,6 +46,6 @@ mkShell {
|
|||||||
|
|
||||||
# Generate classes.py from inventory schema
|
# Generate classes.py from inventory schema
|
||||||
# This file is in .gitignore
|
# This file is in .gitignore
|
||||||
${self'.packages.classgen}/bin/classgen ${self'.legacyPackages.schemas.inventory-schema-abstract}/schema.json $PKG_ROOT/clan_cli/inventory/classes.py --stop-at "Service"
|
${self'.packages.classgen}/bin/classgen ${self'.legacyPackages.schemas.inventory-schema-abstract}/schema.json $PKG_ROOT/clan_cli/inventory/classes.py
|
||||||
'';
|
'';
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,14 +1,12 @@
|
|||||||
import subprocess
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
from clan_cli.custom_logger import setup_logging
|
from clan_cli.custom_logger import setup_logging
|
||||||
from clan_cli.nix import nix_shell
|
|
||||||
|
|
||||||
pytest_plugins = [
|
pytest_plugins = [
|
||||||
"temporary_dir",
|
"temporary_dir",
|
||||||
"root",
|
"root",
|
||||||
"age_keys",
|
"age_keys",
|
||||||
|
"gpg_keys",
|
||||||
|
"git_repo",
|
||||||
"sshd",
|
"sshd",
|
||||||
"command",
|
"command",
|
||||||
"ports",
|
"ports",
|
||||||
@@ -28,18 +26,3 @@ def pytest_sessionstart(session: pytest.Session) -> None:
|
|||||||
print(f"Session config: {session.config}")
|
print(f"Session config: {session.config}")
|
||||||
|
|
||||||
setup_logging(level="DEBUG")
|
setup_logging(level="DEBUG")
|
||||||
|
|
||||||
|
|
||||||
# fixture for git_repo
|
|
||||||
@pytest.fixture
|
|
||||||
def git_repo(tmp_path: Path) -> Path:
|
|
||||||
# initialize a git repository
|
|
||||||
cmd = nix_shell(["nixpkgs#git"], ["git", "init"])
|
|
||||||
subprocess.run(cmd, cwd=tmp_path, check=True)
|
|
||||||
# set user.name and user.email
|
|
||||||
cmd = nix_shell(["nixpkgs#git"], ["git", "config", "user.name", "test"])
|
|
||||||
subprocess.run(cmd, cwd=tmp_path, check=True)
|
|
||||||
cmd = nix_shell(["nixpkgs#git"], ["git", "config", "user.email", "test@test.test"])
|
|
||||||
subprocess.run(cmd, cwd=tmp_path, check=True)
|
|
||||||
# return the path to the git repository
|
|
||||||
return tmp_path
|
|
||||||
|
|||||||
@@ -294,18 +294,19 @@ def create_flake(
|
|||||||
if tmp_store := nix_test_store():
|
if tmp_store := nix_test_store():
|
||||||
nix_options += ["--store", str(tmp_store)]
|
nix_options += ["--store", str(tmp_store)]
|
||||||
|
|
||||||
sp.run(
|
with locked_open(Path(lock_nix), "w"):
|
||||||
[
|
sp.run(
|
||||||
"nix",
|
[
|
||||||
"flake",
|
"nix",
|
||||||
"lock",
|
"flake",
|
||||||
flake,
|
"lock",
|
||||||
"--extra-experimental-features",
|
flake,
|
||||||
"nix-command flakes",
|
"--extra-experimental-features",
|
||||||
*nix_options,
|
"nix-command flakes",
|
||||||
],
|
*nix_options,
|
||||||
check=True,
|
],
|
||||||
)
|
check=True,
|
||||||
|
)
|
||||||
|
|
||||||
if "/tmp" not in str(os.environ.get("HOME")):
|
if "/tmp" not in str(os.environ.get("HOME")):
|
||||||
log.warning(
|
log.warning(
|
||||||
|
|||||||
@@ -6,12 +6,44 @@
|
|||||||
#include <string.h>
|
#include <string.h>
|
||||||
#include <sys/types.h>
|
#include <sys/types.h>
|
||||||
|
|
||||||
|
#ifdef __APPLE__
|
||||||
|
#include <sandbox.h>
|
||||||
|
#include <unistd.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef __APPLE__
|
||||||
|
struct dyld_interpose {
|
||||||
|
const void *replacement;
|
||||||
|
const void *replacee;
|
||||||
|
};
|
||||||
|
#define WRAPPER(ret, name) static ret _fakeroot_wrapper_##name
|
||||||
|
#define WRAPPER_DEF(name) \
|
||||||
|
__attribute__(( \
|
||||||
|
used)) static struct dyld_interpose _fakeroot_interpose_##name \
|
||||||
|
__attribute__((section("__DATA,__interpose"))) = { \
|
||||||
|
&_fakeroot_wrapper_##name, &name};
|
||||||
|
#else
|
||||||
|
#define WRAPPER(ret, name) ret name
|
||||||
|
#define WRAPPER_DEF(name)
|
||||||
|
#endif
|
||||||
|
|
||||||
typedef struct passwd *(*getpwnam_type)(const char *name);
|
typedef struct passwd *(*getpwnam_type)(const char *name);
|
||||||
|
|
||||||
struct passwd *getpwnam(const char *name) {
|
WRAPPER(struct passwd *, getpwnam)(const char *name) {
|
||||||
struct passwd *pw;
|
struct passwd *pw;
|
||||||
getpwnam_type orig_getpwnam;
|
#ifdef __APPLE__
|
||||||
orig_getpwnam = (getpwnam_type)dlsym(RTLD_NEXT, "getpwnam");
|
#define orig_getpwnam(name) getpwnam(name)
|
||||||
|
#else
|
||||||
|
static getpwnam_type orig_getpwnam = NULL;
|
||||||
|
|
||||||
|
if (!orig_getpwnam) {
|
||||||
|
orig_getpwnam = (getpwnam_type)dlsym(RTLD_NEXT, "getpwnam");
|
||||||
|
if (!orig_getpwnam) {
|
||||||
|
fprintf(stderr, "dlsym error: %s\n", dlerror());
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
pw = orig_getpwnam(name);
|
pw = orig_getpwnam(name);
|
||||||
|
|
||||||
if (pw) {
|
if (pw) {
|
||||||
@@ -21,6 +53,17 @@ struct passwd *getpwnam(const char *name) {
|
|||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
pw->pw_shell = strdup(shell);
|
pw->pw_shell = strdup(shell);
|
||||||
|
fprintf(stderr, "getpwnam: %s -> %s\n", name, pw->pw_shell);
|
||||||
}
|
}
|
||||||
return pw;
|
return pw;
|
||||||
}
|
}
|
||||||
|
WRAPPER_DEF(getpwnam)
|
||||||
|
|
||||||
|
#ifdef __APPLE__
|
||||||
|
// sandbox_init(3) doesn't work in nix build sandbox
|
||||||
|
WRAPPER(int, sandbox_init)(const char *profile, uint64_t flags, void *handle) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
WRAPPER_DEF(sandbox_init)
|
||||||
|
#else
|
||||||
|
#endif
|
||||||
|
|||||||
20
pkgs/clan-cli/tests/git_repo.py
Normal file
20
pkgs/clan-cli/tests/git_repo.py
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
import subprocess
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
from clan_cli.nix import nix_shell
|
||||||
|
|
||||||
|
|
||||||
|
# fixture for git_repo
|
||||||
|
@pytest.fixture
|
||||||
|
def git_repo(temp_dir: Path) -> Path:
|
||||||
|
# initialize a git repository
|
||||||
|
cmd = nix_shell(["nixpkgs#git"], ["git", "init"])
|
||||||
|
subprocess.run(cmd, cwd=temp_dir, check=True)
|
||||||
|
# set user.name and user.email
|
||||||
|
cmd = nix_shell(["nixpkgs#git"], ["git", "config", "user.name", "test"])
|
||||||
|
subprocess.run(cmd, cwd=temp_dir, check=True)
|
||||||
|
cmd = nix_shell(["nixpkgs#git"], ["git", "config", "user.email", "test@test.test"])
|
||||||
|
subprocess.run(cmd, cwd=temp_dir, check=True)
|
||||||
|
# return the path to the git repository
|
||||||
|
return temp_dir
|
||||||
25
pkgs/clan-cli/tests/gpg_keys.py
Normal file
25
pkgs/clan-cli/tests/gpg_keys.py
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
import shutil
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class GpgKey:
|
||||||
|
fingerprint: str
|
||||||
|
gpg_home: Path
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def gpg_key(
|
||||||
|
temp_dir: Path,
|
||||||
|
monkeypatch: pytest.MonkeyPatch,
|
||||||
|
test_root: Path,
|
||||||
|
) -> GpgKey:
|
||||||
|
gpg_home = temp_dir / "gnupghome"
|
||||||
|
|
||||||
|
shutil.copytree(test_root / "data" / "gnupg-home", gpg_home)
|
||||||
|
monkeypatch.setenv("GNUPGHOME", str(gpg_home))
|
||||||
|
|
||||||
|
return GpgKey("9A9B2741C8062D3D3DF1302D8B049E262A5CA255", gpg_home)
|
||||||
@@ -57,7 +57,10 @@ def sshd_config(test_root: Path) -> Iterator[SshdConfig]:
|
|||||||
)
|
)
|
||||||
config = tmpdir / "sshd_config"
|
config = tmpdir / "sshd_config"
|
||||||
config.write_text(content)
|
config.write_text(content)
|
||||||
login_shell = tmpdir / "shell"
|
bin_path = tmpdir / "bin"
|
||||||
|
login_shell = bin_path / "shell"
|
||||||
|
fake_sudo = bin_path / "sudo"
|
||||||
|
login_shell.parent.mkdir(parents=True)
|
||||||
|
|
||||||
bash = shutil.which("bash")
|
bash = shutil.which("bash")
|
||||||
path = os.environ["PATH"]
|
path = os.environ["PATH"]
|
||||||
@@ -65,19 +68,23 @@ def sshd_config(test_root: Path) -> Iterator[SshdConfig]:
|
|||||||
|
|
||||||
login_shell.write_text(
|
login_shell.write_text(
|
||||||
f"""#!{bash}
|
f"""#!{bash}
|
||||||
|
set -x
|
||||||
if [[ -f /etc/profile ]]; then
|
if [[ -f /etc/profile ]]; then
|
||||||
source /etc/profile
|
source /etc/profile
|
||||||
fi
|
fi
|
||||||
if [[ -n "$REALPATH" ]]; then
|
export PATH="{bin_path}:{path}"
|
||||||
export PATH="$REALPATH:${path}"
|
|
||||||
else
|
|
||||||
export PATH="${path}"
|
|
||||||
fi
|
|
||||||
exec {bash} -l "${{@}}"
|
exec {bash} -l "${{@}}"
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
login_shell.chmod(0o755)
|
login_shell.chmod(0o755)
|
||||||
|
|
||||||
|
fake_sudo.write_text(
|
||||||
|
f"""#!{bash}
|
||||||
|
exec "${{@}}"
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
fake_sudo.chmod(0o755)
|
||||||
|
|
||||||
lib_path = None
|
lib_path = None
|
||||||
|
|
||||||
extension = ".so"
|
extension = ".so"
|
||||||
|
|||||||
@@ -17,6 +17,14 @@ def test_select() -> None:
|
|||||||
assert not test_cache.is_cached(["x", "z", 1])
|
assert not test_cache.is_cached(["x", "z", 1])
|
||||||
|
|
||||||
|
|
||||||
|
def test_insert() -> None:
|
||||||
|
test_cache = FlakeCacheEntry({}, [])
|
||||||
|
# Inserting the same thing twice should succeed
|
||||||
|
test_cache.insert(None, ["nix"])
|
||||||
|
test_cache.insert(None, ["nix"])
|
||||||
|
assert test_cache.select(["nix"]) is None
|
||||||
|
|
||||||
|
|
||||||
def test_out_path() -> None:
|
def test_out_path() -> None:
|
||||||
testdict = {"x": {"y": [123, 345, 456], "z": "/nix/store/bla"}}
|
testdict = {"x": {"y": [123, 345, 456], "z": "/nix/store/bla"}}
|
||||||
test_cache = FlakeCacheEntry(testdict, [])
|
test_cache = FlakeCacheEntry(testdict, [])
|
||||||
|
|||||||
@@ -1,18 +1,16 @@
|
|||||||
import functools
|
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import subprocess
|
|
||||||
from collections.abc import Iterator
|
from collections.abc import Iterator
|
||||||
from contextlib import contextmanager
|
from contextlib import contextmanager
|
||||||
from pathlib import Path
|
|
||||||
from typing import TYPE_CHECKING
|
from typing import TYPE_CHECKING
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
from age_keys import assert_secrets_file_recipients
|
from age_keys import assert_secrets_file_recipients
|
||||||
from clan_cli.errors import ClanError
|
from clan_cli.errors import ClanError
|
||||||
from fixtures_flakes import FlakeForTest
|
from fixtures_flakes import FlakeForTest
|
||||||
|
from gpg_keys import GpgKey
|
||||||
from helpers import cli
|
from helpers import cli
|
||||||
from stdout import CaptureOutput
|
from stdout import CaptureOutput
|
||||||
|
|
||||||
@@ -426,12 +424,12 @@ def use_age_key(key: str, monkeypatch: pytest.MonkeyPatch) -> Iterator[None]:
|
|||||||
|
|
||||||
|
|
||||||
@contextmanager
|
@contextmanager
|
||||||
def use_gpg_key(key: str, monkeypatch: pytest.MonkeyPatch) -> Iterator[None]:
|
def use_gpg_key(key: GpgKey, monkeypatch: pytest.MonkeyPatch) -> Iterator[None]:
|
||||||
old_key_file = os.environ.get("SOPS_AGE_KEY_FILE")
|
old_key_file = os.environ.get("SOPS_AGE_KEY_FILE")
|
||||||
old_key = os.environ.get("SOPS_AGE_KEY")
|
old_key = os.environ.get("SOPS_AGE_KEY")
|
||||||
monkeypatch.delenv("SOPS_AGE_KEY_FILE", raising=False)
|
monkeypatch.delenv("SOPS_AGE_KEY_FILE", raising=False)
|
||||||
monkeypatch.delenv("SOPS_AGE_KEY", raising=False)
|
monkeypatch.delenv("SOPS_AGE_KEY", raising=False)
|
||||||
monkeypatch.setenv("SOPS_PGP_FP", key)
|
monkeypatch.setenv("SOPS_PGP_FP", key.fingerprint)
|
||||||
try:
|
try:
|
||||||
yield
|
yield
|
||||||
finally:
|
finally:
|
||||||
@@ -442,54 +440,11 @@ def use_gpg_key(key: str, monkeypatch: pytest.MonkeyPatch) -> Iterator[None]:
|
|||||||
monkeypatch.setenv("SOPS_AGE_KEY", old_key)
|
monkeypatch.setenv("SOPS_AGE_KEY", old_key)
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def gpg_key(
|
|
||||||
tmp_path: Path,
|
|
||||||
monkeypatch: pytest.MonkeyPatch,
|
|
||||||
) -> str:
|
|
||||||
gpg_home = tmp_path / "gnupghome"
|
|
||||||
gpg_home.mkdir(mode=0o700)
|
|
||||||
|
|
||||||
gpg_environ = os.environ.copy()
|
|
||||||
gpg_environ["GNUPGHOME"] = str(gpg_home)
|
|
||||||
run = functools.partial(
|
|
||||||
subprocess.run,
|
|
||||||
encoding="utf-8",
|
|
||||||
check=True,
|
|
||||||
env=gpg_environ,
|
|
||||||
)
|
|
||||||
key_parameters = "\n".join(
|
|
||||||
(
|
|
||||||
"%no-protection",
|
|
||||||
"%transient-key",
|
|
||||||
"Key-Type: rsa",
|
|
||||||
"Key-Usage: cert encrypt",
|
|
||||||
"Name-Real: Foo Bar",
|
|
||||||
"Name-Comment: Test user",
|
|
||||||
"Name-Email: test@clan.lol",
|
|
||||||
"%commit",
|
|
||||||
)
|
|
||||||
)
|
|
||||||
run(["gpg", "--batch", "--quiet", "--generate-key"], input=key_parameters)
|
|
||||||
details = run(["gpg", "--list-keys", "--with-colons"], capture_output=True)
|
|
||||||
fingerprint = None
|
|
||||||
for line in details.stdout.strip().split(os.linesep):
|
|
||||||
if not line.startswith("fpr"):
|
|
||||||
continue
|
|
||||||
fingerprint = line.split(":")[9]
|
|
||||||
break
|
|
||||||
assert fingerprint is not None, "Could not generate test GPG key"
|
|
||||||
log.info(f"Created GPG key under {gpg_home}")
|
|
||||||
|
|
||||||
monkeypatch.setenv("GNUPGHOME", str(gpg_home))
|
|
||||||
return fingerprint
|
|
||||||
|
|
||||||
|
|
||||||
def test_secrets(
|
def test_secrets(
|
||||||
test_flake: FlakeForTest,
|
test_flake: FlakeForTest,
|
||||||
capture_output: CaptureOutput,
|
capture_output: CaptureOutput,
|
||||||
monkeypatch: pytest.MonkeyPatch,
|
monkeypatch: pytest.MonkeyPatch,
|
||||||
gpg_key: str,
|
gpg_key: GpgKey,
|
||||||
age_keys: list["KeyPair"],
|
age_keys: list["KeyPair"],
|
||||||
) -> None:
|
) -> None:
|
||||||
with capture_output as output:
|
with capture_output as output:
|
||||||
@@ -716,7 +671,7 @@ def test_secrets(
|
|||||||
"--flake",
|
"--flake",
|
||||||
str(test_flake.path),
|
str(test_flake.path),
|
||||||
"--pgp-key",
|
"--pgp-key",
|
||||||
gpg_key,
|
gpg_key.fingerprint,
|
||||||
"user2",
|
"user2",
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
@@ -783,7 +738,7 @@ def test_secrets_key_generate_gpg(
|
|||||||
test_flake: FlakeForTest,
|
test_flake: FlakeForTest,
|
||||||
capture_output: CaptureOutput,
|
capture_output: CaptureOutput,
|
||||||
monkeypatch: pytest.MonkeyPatch,
|
monkeypatch: pytest.MonkeyPatch,
|
||||||
gpg_key: str,
|
gpg_key: GpgKey,
|
||||||
) -> None:
|
) -> None:
|
||||||
with use_gpg_key(gpg_key, monkeypatch):
|
with use_gpg_key(gpg_key, monkeypatch):
|
||||||
# Make sure clan secrets key generate recognizes
|
# Make sure clan secrets key generate recognizes
|
||||||
@@ -805,7 +760,7 @@ def test_secrets_key_generate_gpg(
|
|||||||
cli.run(["secrets", "key", "show", "--flake", str(test_flake.path)])
|
cli.run(["secrets", "key", "show", "--flake", str(test_flake.path)])
|
||||||
key = json.loads(output.out)
|
key = json.loads(output.out)
|
||||||
assert key["type"] == "pgp"
|
assert key["type"] == "pgp"
|
||||||
assert key["publickey"] == gpg_key
|
assert key["publickey"] == gpg_key.fingerprint
|
||||||
|
|
||||||
# Add testuser with the key that was (not) generated for the clan:
|
# Add testuser with the key that was (not) generated for the clan:
|
||||||
cli.run(
|
cli.run(
|
||||||
@@ -816,7 +771,7 @@ def test_secrets_key_generate_gpg(
|
|||||||
"--flake",
|
"--flake",
|
||||||
str(test_flake.path),
|
str(test_flake.path),
|
||||||
"--pgp-key",
|
"--pgp-key",
|
||||||
gpg_key,
|
gpg_key.fingerprint,
|
||||||
"testuser",
|
"testuser",
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
@@ -833,7 +788,7 @@ def test_secrets_key_generate_gpg(
|
|||||||
)
|
)
|
||||||
key = json.loads(output.out)
|
key = json.loads(output.out)
|
||||||
assert key["type"] == "pgp"
|
assert key["type"] == "pgp"
|
||||||
assert key["publickey"] == gpg_key
|
assert key["publickey"] == gpg_key.fingerprint
|
||||||
|
|
||||||
monkeypatch.setenv("SOPS_NIX_SECRET", "secret-value")
|
monkeypatch.setenv("SOPS_NIX_SECRET", "secret-value")
|
||||||
cli.run(["secrets", "set", "--flake", str(test_flake.path), "secret-name"])
|
cli.run(["secrets", "set", "--flake", str(test_flake.path), "secret-name"])
|
||||||
|
|||||||
@@ -26,6 +26,17 @@ def test_secrets_upload(
|
|||||||
monkeypatch.chdir(str(flake.path))
|
monkeypatch.chdir(str(flake.path))
|
||||||
monkeypatch.setenv("SOPS_AGE_KEY", age_keys[0].privkey)
|
monkeypatch.setenv("SOPS_AGE_KEY", age_keys[0].privkey)
|
||||||
|
|
||||||
|
sops_dir = flake.path / "facts"
|
||||||
|
|
||||||
|
# the flake defines this path as the location where the sops key should be installed
|
||||||
|
sops_key = sops_dir / "key.txt"
|
||||||
|
sops_key2 = sops_dir / "key2.txt"
|
||||||
|
|
||||||
|
# Create old state, which should be cleaned up
|
||||||
|
sops_dir.mkdir()
|
||||||
|
sops_key.write_text("OLD STATE")
|
||||||
|
sops_key2.write_text("OLD STATE2")
|
||||||
|
|
||||||
cli.run(
|
cli.run(
|
||||||
[
|
[
|
||||||
"secrets",
|
"secrets",
|
||||||
@@ -56,8 +67,6 @@ def test_secrets_upload(
|
|||||||
|
|
||||||
cli.run(["facts", "upload", "--flake", str(flake_path), "vm1"])
|
cli.run(["facts", "upload", "--flake", str(flake_path), "vm1"])
|
||||||
|
|
||||||
# the flake defines this path as the location where the sops key should be installed
|
|
||||||
sops_key = flake.path / "facts" / "key.txt"
|
|
||||||
|
|
||||||
assert sops_key.exists()
|
assert sops_key.exists()
|
||||||
assert sops_key.read_text() == age_keys[0].privkey
|
assert sops_key.read_text() == age_keys[0].privkey
|
||||||
|
assert not sops_key2.exists()
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
import contextlib
|
import contextlib
|
||||||
|
import sys
|
||||||
from collections.abc import Generator
|
from collections.abc import Generator
|
||||||
from typing import Any, NamedTuple
|
from typing import Any, NamedTuple
|
||||||
|
|
||||||
@@ -127,6 +128,10 @@ def test_parse_ssh_options() -> None:
|
|||||||
assert host.ssh_options["StrictHostKeyChecking"] == "yes"
|
assert host.ssh_options["StrictHostKeyChecking"] == "yes"
|
||||||
|
|
||||||
|
|
||||||
|
is_darwin = sys.platform == "darwin"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.skipif(is_darwin, reason="preload doesn't work on darwin")
|
||||||
def test_run(hosts: list[Host], runtime: AsyncRuntime) -> None:
|
def test_run(hosts: list[Host], runtime: AsyncRuntime) -> None:
|
||||||
for host in hosts:
|
for host in hosts:
|
||||||
proc = runtime.async_run(
|
proc = runtime.async_run(
|
||||||
@@ -135,6 +140,7 @@ def test_run(hosts: list[Host], runtime: AsyncRuntime) -> None:
|
|||||||
assert proc.wait().result.stdout == "hello\n"
|
assert proc.wait().result.stdout == "hello\n"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.skipif(is_darwin, reason="preload doesn't work on darwin")
|
||||||
def test_run_environment(hosts: list[Host], runtime: AsyncRuntime) -> None:
|
def test_run_environment(hosts: list[Host], runtime: AsyncRuntime) -> None:
|
||||||
for host in hosts:
|
for host in hosts:
|
||||||
proc = runtime.async_run(
|
proc = runtime.async_run(
|
||||||
@@ -157,6 +163,7 @@ def test_run_environment(hosts: list[Host], runtime: AsyncRuntime) -> None:
|
|||||||
assert "env_var=true" in p2.wait().result.stdout
|
assert "env_var=true" in p2.wait().result.stdout
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.skipif(is_darwin, reason="preload doesn't work on darwin")
|
||||||
def test_run_no_shell(hosts: list[Host], runtime: AsyncRuntime) -> None:
|
def test_run_no_shell(hosts: list[Host], runtime: AsyncRuntime) -> None:
|
||||||
for host in hosts:
|
for host in hosts:
|
||||||
proc = runtime.async_run(
|
proc = runtime.async_run(
|
||||||
@@ -165,6 +172,7 @@ def test_run_no_shell(hosts: list[Host], runtime: AsyncRuntime) -> None:
|
|||||||
assert proc.wait().result.stdout == "hello\n"
|
assert proc.wait().result.stdout == "hello\n"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.skipif(is_darwin, reason="preload doesn't work on darwin")
|
||||||
def test_run_function(hosts: list[Host], runtime: AsyncRuntime) -> None:
|
def test_run_function(hosts: list[Host], runtime: AsyncRuntime) -> None:
|
||||||
def some_func(h: Host) -> bool:
|
def some_func(h: Host) -> bool:
|
||||||
p = h.run(["echo", "hello"])
|
p = h.run(["echo", "hello"])
|
||||||
@@ -175,6 +183,7 @@ def test_run_function(hosts: list[Host], runtime: AsyncRuntime) -> None:
|
|||||||
assert proc.wait().result
|
assert proc.wait().result
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.skipif(is_darwin, reason="preload doesn't work on darwin")
|
||||||
def test_timeout(hosts: list[Host], runtime: AsyncRuntime) -> None:
|
def test_timeout(hosts: list[Host], runtime: AsyncRuntime) -> None:
|
||||||
for host in hosts:
|
for host in hosts:
|
||||||
proc = runtime.async_run(
|
proc = runtime.async_run(
|
||||||
@@ -184,6 +193,7 @@ def test_timeout(hosts: list[Host], runtime: AsyncRuntime) -> None:
|
|||||||
assert isinstance(error, ClanCmdTimeoutError)
|
assert isinstance(error, ClanCmdTimeoutError)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.skipif(is_darwin, reason="preload doesn't work on darwin")
|
||||||
def test_run_exception(hosts: list[Host], runtime: AsyncRuntime) -> None:
|
def test_run_exception(hosts: list[Host], runtime: AsyncRuntime) -> None:
|
||||||
for host in hosts:
|
for host in hosts:
|
||||||
proc = runtime.async_run(
|
proc = runtime.async_run(
|
||||||
@@ -203,6 +213,7 @@ def test_run_exception(hosts: list[Host], runtime: AsyncRuntime) -> None:
|
|||||||
raise AssertionError(msg)
|
raise AssertionError(msg)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.skipif(is_darwin, reason="preload doesn't work on darwin")
|
||||||
def test_run_function_exception(hosts: list[Host], runtime: AsyncRuntime) -> None:
|
def test_run_function_exception(hosts: list[Host], runtime: AsyncRuntime) -> None:
|
||||||
def some_func(h: Host) -> CmdOut:
|
def some_func(h: Host) -> CmdOut:
|
||||||
return h.run_local(["exit 1"], RunOpts(shell=True))
|
return h.run_local(["exit 1"], RunOpts(shell=True))
|
||||||
|
|||||||
@@ -919,3 +919,75 @@ def test_invalidation(
|
|||||||
str(machine.flake.path), machine.name, "my_generator/my_value"
|
str(machine.flake.path), machine.name, "my_generator/my_value"
|
||||||
).printable_value
|
).printable_value
|
||||||
assert value2 == value2_new
|
assert value2 == value2_new
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.with_core
|
||||||
|
def test_dynamic_invalidation(
|
||||||
|
monkeypatch: pytest.MonkeyPatch,
|
||||||
|
flake: ClanFlake,
|
||||||
|
) -> None:
|
||||||
|
gen_prefix = "config.clan.core.vars.generators"
|
||||||
|
|
||||||
|
machine = Machine(name="my_machine", flake=Flake(str(flake.path)))
|
||||||
|
|
||||||
|
config = flake.machines[machine.name]
|
||||||
|
config["nixpkgs"]["hostPlatform"] = "x86_64-linux"
|
||||||
|
|
||||||
|
my_generator = config["clan"]["core"]["vars"]["generators"]["my_generator"]
|
||||||
|
my_generator["files"]["my_value"]["secret"] = False
|
||||||
|
my_generator["script"] = "echo -n $RANDOM > $out/my_value"
|
||||||
|
|
||||||
|
dependent_generator = config["clan"]["core"]["vars"]["generators"][
|
||||||
|
"dependent_generator"
|
||||||
|
]
|
||||||
|
dependent_generator["files"]["my_value"]["secret"] = False
|
||||||
|
dependent_generator["dependencies"] = ["my_generator"]
|
||||||
|
dependent_generator["script"] = "echo -n $RANDOM > $out/my_value"
|
||||||
|
|
||||||
|
flake.refresh()
|
||||||
|
|
||||||
|
# this is an abuse
|
||||||
|
custom_nix = flake.path / "machines" / machine.name / "hardware-configuration.nix"
|
||||||
|
custom_nix.write_text("""
|
||||||
|
{ config, ... }: let
|
||||||
|
p = config.clan.core.vars.generators.my_generator.files.my_value.path;
|
||||||
|
in {
|
||||||
|
clan.core.vars.generators.dependent_generator.validation = if builtins.pathExists p then builtins.readFile p else null;
|
||||||
|
}
|
||||||
|
""")
|
||||||
|
|
||||||
|
flake.refresh()
|
||||||
|
machine.flush_caches()
|
||||||
|
monkeypatch.chdir(flake.path)
|
||||||
|
|
||||||
|
# before generating, dependent generator validation should be empty; see bogus hardware-configuration.nix above
|
||||||
|
# we have to avoid `*.files.value` in this initial select because the generators haven't been run yet
|
||||||
|
generators_0 = machine.eval_nix(f"{gen_prefix}.*.{{validationHash}}")
|
||||||
|
assert generators_0["dependent_generator"]["validationHash"] is None
|
||||||
|
|
||||||
|
# generate both my_generator and (the dependent) dependent_generator
|
||||||
|
cli.run(["vars", "generate", "--flake", str(flake.path), machine.name])
|
||||||
|
machine.flush_caches()
|
||||||
|
|
||||||
|
# after generating once, dependent generator validation should be set
|
||||||
|
generators_1 = machine.eval_nix(gen_prefix)
|
||||||
|
assert generators_1["dependent_generator"]["validationHash"] is not None
|
||||||
|
|
||||||
|
# after generating once, neither generator should want to run again because `clan vars generate` should have re-evaluated the dependent generator's validationHash after executing the parent generator but before executing the dependent generator
|
||||||
|
# this ensures that validation can depend on parent generators while still only requiring a single pass
|
||||||
|
cli.run(["vars", "generate", "--flake", str(flake.path), machine.name])
|
||||||
|
machine.flush_caches()
|
||||||
|
|
||||||
|
generators_2 = machine.eval_nix(gen_prefix)
|
||||||
|
assert (
|
||||||
|
generators_1["dependent_generator"]["validationHash"]
|
||||||
|
== generators_2["dependent_generator"]["validationHash"]
|
||||||
|
)
|
||||||
|
assert (
|
||||||
|
generators_1["my_generator"]["files"]["my_value"]["value"]
|
||||||
|
== generators_2["my_generator"]["files"]["my_value"]["value"]
|
||||||
|
)
|
||||||
|
assert (
|
||||||
|
generators_1["dependent_generator"]["files"]["my_value"]["value"]
|
||||||
|
== generators_2["dependent_generator"]["files"]["my_value"]["value"]
|
||||||
|
)
|
||||||
|
|||||||
@@ -32,8 +32,14 @@ def map_json_type(
|
|||||||
return {"str"}
|
return {"str"}
|
||||||
if json_type == "integer":
|
if json_type == "integer":
|
||||||
return {"int"}
|
return {"int"}
|
||||||
|
if json_type == "number":
|
||||||
|
return {"float"}
|
||||||
if json_type == "boolean":
|
if json_type == "boolean":
|
||||||
return {"bool"}
|
return {"bool"}
|
||||||
|
# In Python, "number" is analogous to the float type.
|
||||||
|
# https://json-schema.org/understanding-json-schema/reference/numeric#number
|
||||||
|
if json_type == "number":
|
||||||
|
return {"float"}
|
||||||
if json_type == "array":
|
if json_type == "array":
|
||||||
assert nested_types, f"Array type not found for {parent}"
|
assert nested_types, f"Array type not found for {parent}"
|
||||||
return {f"""list[{" | ".join(nested_types)}]"""}
|
return {f"""list[{" | ".join(nested_types)}]"""}
|
||||||
@@ -48,7 +54,11 @@ def map_json_type(
|
|||||||
|
|
||||||
known_classes = set()
|
known_classes = set()
|
||||||
root_class = "Inventory"
|
root_class = "Inventory"
|
||||||
stop_at = None
|
# TODO: make this configurable
|
||||||
|
# For now this only includes static top-level attributes of the inventory.
|
||||||
|
attrs = ["machines", "meta", "services"]
|
||||||
|
|
||||||
|
static: dict[str, str] = {"Service": "dict[str, Any]"}
|
||||||
|
|
||||||
|
|
||||||
def field_def_from_default_type(
|
def field_def_from_default_type(
|
||||||
@@ -187,19 +197,32 @@ def get_field_def(
|
|||||||
|
|
||||||
|
|
||||||
# Recursive function to generate dataclasses from JSON schema
|
# Recursive function to generate dataclasses from JSON schema
|
||||||
def generate_dataclass(schema: dict[str, Any], class_name: str = root_class) -> str:
|
def generate_dataclass(
|
||||||
|
schema: dict[str, Any],
|
||||||
|
attr_path: list[str],
|
||||||
|
class_name: str = root_class,
|
||||||
|
) -> str:
|
||||||
properties = schema.get("properties", {})
|
properties = schema.get("properties", {})
|
||||||
|
|
||||||
required_fields = []
|
required_fields = []
|
||||||
fields_with_default = []
|
fields_with_default = []
|
||||||
nested_classes: list[str] = []
|
nested_classes: list[str] = []
|
||||||
if stop_at and class_name == stop_at:
|
|
||||||
# Skip generating classes below the stop_at property
|
# if We are at the top level, and the attribute name is in shallow
|
||||||
return f"{class_name} = dict[str, Any]"
|
# return f"{class_name} = dict[str, Any]"
|
||||||
|
if class_name in static:
|
||||||
|
return f"{class_name} = {static[class_name]}"
|
||||||
|
|
||||||
for prop, prop_info in properties.items():
|
for prop, prop_info in properties.items():
|
||||||
|
# If we are at the top level, and the attribute name is not explicitly included we only do shallow
|
||||||
field_name = prop.replace("-", "_")
|
field_name = prop.replace("-", "_")
|
||||||
|
|
||||||
|
if len(attr_path) == 0 and prop not in attrs:
|
||||||
|
field_def = f"{field_name}: NotRequired[dict[str, Any]]"
|
||||||
|
fields_with_default.append(field_def)
|
||||||
|
# breakpoint()
|
||||||
|
continue
|
||||||
|
|
||||||
prop_type = prop_info.get("type", None)
|
prop_type = prop_info.get("type", None)
|
||||||
union_variants = prop_info.get("oneOf", [])
|
union_variants = prop_info.get("oneOf", [])
|
||||||
enum_variants = prop_info.get("enum", [])
|
enum_variants = prop_info.get("enum", [])
|
||||||
@@ -237,7 +260,9 @@ def generate_dataclass(schema: dict[str, Any], class_name: str = root_class) ->
|
|||||||
|
|
||||||
if nested_class_name not in known_classes:
|
if nested_class_name not in known_classes:
|
||||||
nested_classes.append(
|
nested_classes.append(
|
||||||
generate_dataclass(inner_type, nested_class_name)
|
generate_dataclass(
|
||||||
|
inner_type, [*attr_path, prop], nested_class_name
|
||||||
|
)
|
||||||
)
|
)
|
||||||
known_classes.add(nested_class_name)
|
known_classes.add(nested_class_name)
|
||||||
|
|
||||||
@@ -253,7 +278,9 @@ def generate_dataclass(schema: dict[str, Any], class_name: str = root_class) ->
|
|||||||
field_types = {nested_class_name}
|
field_types = {nested_class_name}
|
||||||
if nested_class_name not in known_classes:
|
if nested_class_name not in known_classes:
|
||||||
nested_classes.append(
|
nested_classes.append(
|
||||||
generate_dataclass(prop_info, nested_class_name)
|
generate_dataclass(
|
||||||
|
prop_info, [*attr_path, prop], nested_class_name
|
||||||
|
)
|
||||||
)
|
)
|
||||||
known_classes.add(nested_class_name)
|
known_classes.add(nested_class_name)
|
||||||
else:
|
else:
|
||||||
@@ -318,6 +345,8 @@ def generate_dataclass(schema: dict[str, Any], class_name: str = root_class) ->
|
|||||||
)
|
)
|
||||||
required_fields.append(field_def)
|
required_fields.append(field_def)
|
||||||
|
|
||||||
|
# breakpoint()
|
||||||
|
|
||||||
fields_str = "\n ".join(required_fields + fields_with_default)
|
fields_str = "\n ".join(required_fields + fields_with_default)
|
||||||
nested_classes_str = "\n\n".join(nested_classes)
|
nested_classes_str = "\n\n".join(nested_classes)
|
||||||
|
|
||||||
@@ -332,14 +361,11 @@ def generate_dataclass(schema: dict[str, Any], class_name: str = root_class) ->
|
|||||||
|
|
||||||
def run_gen(args: argparse.Namespace) -> None:
|
def run_gen(args: argparse.Namespace) -> None:
|
||||||
print(f"Converting {args.input} to {args.output}")
|
print(f"Converting {args.input} to {args.output}")
|
||||||
if args.stop_at:
|
|
||||||
global stop_at
|
|
||||||
stop_at = args.stop_at
|
|
||||||
|
|
||||||
dataclass_code = ""
|
dataclass_code = ""
|
||||||
with args.input.open() as f:
|
with args.input.open() as f:
|
||||||
schema = json.load(f)
|
schema = json.load(f)
|
||||||
dataclass_code = generate_dataclass(schema)
|
dataclass_code = generate_dataclass(schema, [])
|
||||||
|
|
||||||
with args.output.open("w") as f:
|
with args.output.open("w") as f:
|
||||||
f.write(
|
f.write(
|
||||||
|
|||||||
652
pkgs/webview-ui/app/package-lock.json
generated
652
pkgs/webview-ui/app/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -11,6 +11,8 @@ export async function get_iwd_service(base_path: string, machine_name: string) {
|
|||||||
if (r.status == "error") {
|
if (r.status == "error") {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
// @FIXME: Clean this up once we implement the feature
|
||||||
|
// @ts-expect-error: This doesn't check currently
|
||||||
const inventory: Inventory = r.data;
|
const inventory: Inventory = r.data;
|
||||||
|
|
||||||
const instance_key = instance_name(machine_name);
|
const instance_key = instance_name(machine_name);
|
||||||
|
|||||||
Reference in New Issue
Block a user