Compare commits
411 Commits
lassulus
...
push-wqqzv
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
01c9432cc5 | ||
|
|
f62e9db126 | ||
|
|
dcb2231332 | ||
|
|
725eeb87ae | ||
|
|
66df677fd2 | ||
|
|
f7d15215ea | ||
|
|
c25574bebd | ||
|
|
fe5796ba17 | ||
|
|
f2e89d27fe | ||
|
|
06dd2ebf8c | ||
|
|
40740860c0 | ||
|
|
89bc39869c | ||
|
|
84d0a2f2f0 | ||
|
|
1d07737989 | ||
|
|
9d386485dd | ||
|
|
ee9ae9c76d | ||
|
|
d4d4d77d2d | ||
|
|
c0ebad1cd9 | ||
|
|
86d0c95da7 | ||
|
|
0fb1b5c5ce | ||
|
|
dc0349e835 | ||
|
|
cc8a74b195 | ||
|
|
046fe0df36 | ||
|
|
3f948fdbd4 | ||
|
|
eb35e6ea21 | ||
|
|
4a0e1b3b6b | ||
|
|
1b8974d167 | ||
|
|
5e2b5fe213 | ||
|
|
74fb3abbc7 | ||
|
|
f2b04e74f1 | ||
|
|
d3ae684575 | ||
|
|
5b294e7651 | ||
|
|
40ae510075 | ||
|
|
48d910f11f | ||
|
|
f242b9a35c | ||
|
|
978822d40a | ||
|
|
fa6c3be21e | ||
|
|
be61bac9af | ||
|
|
42b58910a9 | ||
|
|
a746b10578 | ||
|
|
19341e4cb1 | ||
|
|
f4e06271ba | ||
|
|
d93fe229b3 | ||
|
|
5fc62806b1 | ||
|
|
e0be2f3435 | ||
|
|
a69b81488b | ||
|
|
b133a2407a | ||
|
|
68ae27899a | ||
|
|
b83d3ecba2 | ||
|
|
bec4317709 | ||
|
|
f37f15c482 | ||
|
|
fae8ec318d | ||
|
|
8e2005f38c | ||
|
|
94781bb358 | ||
|
|
de740cf686 | ||
|
|
064edf61ef | ||
|
|
aaf58d7be8 | ||
|
|
03f8e41291 | ||
|
|
43bd4403c6 | ||
|
|
ebee55ffdc | ||
|
|
47e9e5a8f0 | ||
|
|
d1a79653fe | ||
|
|
351ce1414a | ||
|
|
e2ccd979ed | ||
|
|
f5f3f96809 | ||
|
|
59253a9c71 | ||
|
|
aa03adc581 | ||
|
|
ffd84d50f7 | ||
|
|
679387e4ba | ||
|
|
1d60f94cc5 | ||
|
|
1235177541 | ||
|
|
5c08e9a38d | ||
|
|
28dd54d866 | ||
|
|
5baf37f7e9 | ||
|
|
ff669e2957 | ||
|
|
8d4c1839e7 | ||
|
|
0765d981c6 | ||
|
|
10c27a0152 | ||
|
|
ccb5af9565 | ||
|
|
828eff528a | ||
|
|
cbf47580cf | ||
|
|
355ac57ccb | ||
|
|
227e293421 | ||
|
|
9b3621b516 | ||
|
|
62f09a450f | ||
|
|
95282bd880 | ||
|
|
7a49ec252e | ||
|
|
5f9ee97cab | ||
|
|
c6be9bbf07 | ||
|
|
d77ae5eed0 | ||
|
|
3c2888edc7 | ||
|
|
b0f23353ef | ||
|
|
3fccccc092 | ||
|
|
0a5d1bf322 | ||
|
|
9ca5cb7bcc | ||
|
|
cc1b356a94 | ||
|
|
9aa8c1b8eb | ||
|
|
709d773768 | ||
|
|
845abd1356 | ||
|
|
2b4a4f2422 | ||
|
|
82da5b6734 | ||
|
|
33a9fd8d3d | ||
|
|
4beb097a95 | ||
|
|
b4cd62b9f8 | ||
|
|
ee7b98c34d | ||
|
|
8552d4b3bd | ||
|
|
375edcff81 | ||
|
|
3183b26777 | ||
|
|
0feacaf300 | ||
|
|
6917021996 | ||
|
|
3965f7b59f | ||
|
|
610a70e4f8 | ||
|
|
6134eb0293 | ||
|
|
62e9fe8f9f | ||
|
|
5bc2d00014 | ||
|
|
616b294b8c | ||
|
|
2d7b92b3f9 | ||
|
|
0487670d30 | ||
|
|
4cd174b268 | ||
|
|
a8b257f32c | ||
|
|
047b767054 | ||
|
|
c74d23b799 | ||
|
|
850627c5c6 | ||
|
|
60d56c4e3b | ||
|
|
4911901f7c | ||
|
|
a96860a24b | ||
|
|
c429b41d2e | ||
|
|
fe305f7f47 | ||
|
|
591d397df9 | ||
|
|
8231979bae | ||
|
|
6899461d0d | ||
|
|
16b067d291 | ||
|
|
93cbe62765 | ||
|
|
7fef29d7aa | ||
|
|
952d1facce | ||
|
|
a565a85a5e | ||
|
|
3d5ef5e909 | ||
|
|
a5c5033273 | ||
|
|
0ee0351e3e | ||
|
|
c02f19205f | ||
|
|
dbcb8d6a4c | ||
|
|
039b309255 | ||
|
|
538374558d | ||
|
|
ef5ad09b2d | ||
|
|
9780463e6a | ||
|
|
cac4b1200c | ||
|
|
c8db27340e | ||
|
|
31a9c74e88 | ||
|
|
dc8bfab65d | ||
|
|
33abb7ecd7 | ||
|
|
fcbdae9d09 | ||
|
|
27b5680441 | ||
|
|
f13971167f | ||
|
|
e75b5f3a2e | ||
|
|
d5c0a2eb9c | ||
|
|
8cc8d09a11 | ||
|
|
dfa3305450 | ||
|
|
94415dfd0e | ||
|
|
6fb5bca801 | ||
|
|
4162810ee1 | ||
|
|
0b3badb0ef | ||
|
|
6a5954ad77 | ||
|
|
02231b979b | ||
|
|
028f6a4d3d | ||
|
|
170908db7b | ||
|
|
39e6534dbb | ||
|
|
71809c1bdc | ||
|
|
eecedf95e4 | ||
|
|
a208a9973c | ||
|
|
d276d2faea | ||
|
|
d470283dca | ||
|
|
88dab7d8bd | ||
|
|
8474a0aaef | ||
|
|
5ab2f206ea | ||
|
|
ea8037006f | ||
|
|
3a682a6b3e | ||
|
|
0556ea624f | ||
|
|
8671fd7407 | ||
|
|
3a9f0eb608 | ||
|
|
1736b0f539 | ||
|
|
eb375f3d81 | ||
|
|
6162b82adb | ||
|
|
085189d1c4 | ||
|
|
3cb22ad2a1 | ||
|
|
27269d4ed9 | ||
|
|
7cbedc74a5 | ||
|
|
5ac30a767b | ||
|
|
89c6bcda4d | ||
|
|
51da020de2 | ||
|
|
e943d8531f | ||
|
|
13b9c23db9 | ||
|
|
ad43f323b8 | ||
|
|
aeb3cc4428 | ||
|
|
d81ca7206b | ||
|
|
0011cf594a | ||
|
|
41cd4533ba | ||
|
|
c15544e928 | ||
|
|
fa0fe23985 | ||
|
|
1497e76bc2 | ||
|
|
b3d9c23e39 | ||
|
|
5520641feb | ||
|
|
97f5a6bd4c | ||
|
|
3b2b5db84a | ||
|
|
84da7d437d | ||
|
|
b2db2c7abc | ||
|
|
cb104b700d | ||
|
|
41054885db | ||
|
|
70c63221ec | ||
|
|
9c130c73e4 | ||
|
|
178fff0618 | ||
|
|
6324b495ee | ||
|
|
ce7a70f9e1 | ||
|
|
7102af9bd9 | ||
|
|
b38fddaf29 | ||
|
|
e7ffcedd14 | ||
|
|
b5a66e767b | ||
|
|
854d0fa83e | ||
|
|
4ccf5ca373 | ||
|
|
781d439567 | ||
|
|
68e00ff613 | ||
|
|
828028e4b3 | ||
|
|
b48d07f5c5 | ||
|
|
ea8c9ed649 | ||
|
|
68cb04c958 | ||
|
|
b8cb85fc72 | ||
|
|
bdb97308d0 | ||
|
|
9708bdc6e7 | ||
|
|
9ac8a45f1d | ||
|
|
a14fe1aef8 | ||
|
|
b1401d6e6b | ||
|
|
f882c86fb0 | ||
|
|
98d566c46e | ||
|
|
c4ec4ccb3f | ||
|
|
5a6677379a | ||
|
|
30d19d088f | ||
|
|
f3c45eb23e | ||
|
|
eaac6c76e2 | ||
|
|
0939b29a8e | ||
|
|
a2a395cdb0 | ||
|
|
df7429dbe7 | ||
|
|
362faaf063 | ||
|
|
e215a9db6e | ||
|
|
a5dd76b66d | ||
|
|
4472c51c25 | ||
|
|
c6cf9d1336 | ||
|
|
9b6e42790e | ||
|
|
547b012e0b | ||
|
|
9797ef792a | ||
|
|
fe0de90a28 | ||
|
|
539fd30206 | ||
|
|
a11d5471ec | ||
|
|
19f2facbce | ||
|
|
468a25034e | ||
|
|
a2b76eb5a2 | ||
|
|
ba0ed30997 | ||
|
|
2a4d2c9cb5 | ||
|
|
4c1e74fae6 | ||
|
|
cee62bf168 | ||
|
|
a865213894 | ||
|
|
d8f9375580 | ||
|
|
526072806f | ||
|
|
91a19d9ea9 | ||
|
|
38c7644692 | ||
|
|
726f2ab5f8 | ||
|
|
5918620535 | ||
|
|
58e85eda9c | ||
|
|
e98e817941 | ||
|
|
fe92c7d1e6 | ||
|
|
4222f9788c | ||
|
|
3d80423259 | ||
|
|
186e81d8b9 | ||
|
|
212c899767 | ||
|
|
312c12c98f | ||
|
|
2ec4e49650 | ||
|
|
4e5b4a1b80 | ||
|
|
ccb3bdb740 | ||
|
|
a903a9028b | ||
|
|
ba28691747 | ||
|
|
e7aa5cfb4e | ||
|
|
8b74147721 | ||
|
|
299180703e | ||
|
|
6c941deb96 | ||
|
|
39761946a0 | ||
|
|
b71e16dd5d | ||
|
|
0da1a05b55 | ||
|
|
3551d061ce | ||
|
|
6099aeb0c6 | ||
|
|
bcd6c7108a | ||
|
|
d20f13abe7 | ||
|
|
cfeda1f06d | ||
|
|
73dd981f71 | ||
|
|
bc239e104c | ||
|
|
bd2702df6d | ||
|
|
7b0e652a7a | ||
|
|
0c0eafe0f5 | ||
|
|
3e0cd4bdfb | ||
|
|
2cf40fea51 | ||
|
|
40d1a76d8a | ||
|
|
60b22fdf0e | ||
|
|
cb13e7fab8 | ||
|
|
b82a3b6085 | ||
|
|
44345ed28b | ||
|
|
456b25c921 | ||
|
|
dfb5e5123f | ||
|
|
636ee65428 | ||
|
|
cbf8685f6e | ||
|
|
500af543bb | ||
|
|
46971aa51f | ||
|
|
3d83266916 | ||
|
|
b87768d44a | ||
|
|
5b821c610d | ||
|
|
347a5a5f76 | ||
|
|
8f6dd4acc4 | ||
|
|
f3cbd0b289 | ||
|
|
7b8a980336 | ||
|
|
d53e062024 | ||
|
|
5ac629f549 | ||
|
|
6c7fc15c0e | ||
|
|
3121c5ecdb | ||
|
|
ada544ef56 | ||
|
|
3e0f9f52bb | ||
|
|
3992d0ed0d | ||
|
|
6037dde559 | ||
|
|
baa0a615ea | ||
|
|
b0760bc2b9 | ||
|
|
6a33fe8e7a | ||
|
|
1f3bd09245 | ||
|
|
122dbf4240 | ||
|
|
8ac286bcaf | ||
|
|
8fcc004b68 | ||
|
|
37bbbefa8e | ||
|
|
d44def5381 | ||
|
|
03ce74fc74 | ||
|
|
6c8137d30b | ||
|
|
27a3126d68 | ||
|
|
faee6c2a79 | ||
|
|
6070219b1a | ||
|
|
a5e32f9b6d | ||
|
|
89e3793831 | ||
|
|
fd908e18c3 | ||
|
|
a4d4b991a1 | ||
|
|
4670525106 | ||
|
|
5a0ed03c56 | ||
|
|
af228db398 | ||
|
|
b0e7de3c8b | ||
|
|
cb89fb0847 | ||
|
|
014aec9531 | ||
|
|
160bbfcb37 | ||
|
|
5c68e129b7 | ||
|
|
bc53c7a886 | ||
|
|
61c1943ccc | ||
|
|
c3013c1a02 | ||
|
|
3cff6577da | ||
|
|
c795a1d895 | ||
|
|
66e166068e | ||
|
|
0c7173afd0 | ||
|
|
d5e391ecc8 | ||
|
|
2a3bc7b31b | ||
|
|
b54346ce03 | ||
|
|
39bc7c1f17 | ||
|
|
153b5560c3 | ||
|
|
2412513ad4 | ||
|
|
873f650678 | ||
|
|
35aedddf65 | ||
|
|
663ab70465 | ||
|
|
4f1e2ba582 | ||
|
|
d3bd120a04 | ||
|
|
f8bf39e43a | ||
|
|
93a7e272b1 | ||
|
|
de3153259d | ||
|
|
bf492d4deb | ||
|
|
41cb679eab | ||
|
|
b138cfcd69 | ||
|
|
a22d426b25 | ||
|
|
c0f07afb98 | ||
|
|
0eaaabcf63 | ||
|
|
7df51d0474 | ||
|
|
5a6038f742 | ||
|
|
15e8df894e | ||
|
|
50924ad7ff | ||
|
|
2e212e3e31 | ||
|
|
23b57b0a3a | ||
|
|
69d092c46b | ||
|
|
2663a181d0 | ||
|
|
9ab81a9c5d | ||
|
|
0872b781d7 | ||
|
|
86e91c8604 | ||
|
|
14377f25c9 | ||
|
|
9b706c148b | ||
|
|
dee284d669 | ||
|
|
718e553211 | ||
|
|
cbe3cb94b7 | ||
|
|
91661da320 | ||
|
|
7ebc11f96f | ||
|
|
27ef7040c2 | ||
|
|
283aad7ea0 | ||
|
|
775088ccd9 | ||
|
|
d71a8329f2 | ||
|
|
022d0babc5 | ||
|
|
934d8fc2a4 | ||
|
|
e75b50e335 | ||
|
|
f9fc6904f0 | ||
|
|
6deaab506a | ||
|
|
32748c14f4 | ||
|
|
6d2845c645 | ||
|
|
4899c38e52 | ||
|
|
0d69d72899 | ||
|
|
34904b8758 | ||
|
|
51d65873a7 | ||
|
|
02929e9d42 | ||
|
|
2018de8d9e |
@@ -8,5 +8,5 @@ jobs:
|
||||
checks-impure:
|
||||
runs-on: nix
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- run: nix run .#impure-checks
|
||||
|
||||
@@ -7,7 +7,7 @@ jobs:
|
||||
deploy-docs:
|
||||
runs-on: nix
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- run: nix run .#deploy-docs
|
||||
env:
|
||||
SSH_HOMEPAGE_KEY: ${{ secrets.SSH_HOMEPAGE_KEY }}
|
||||
|
||||
6
.github/dependabot.yml
vendored
Normal file
6
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
9
.github/workflows/repo-sync.yml
vendored
9
.github/workflows/repo-sync.yml
vendored
@@ -3,10 +3,8 @@ on:
|
||||
schedule:
|
||||
- cron: "39 * * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
repo-sync:
|
||||
if: github.repository_owner == 'clan-lol'
|
||||
@@ -15,10 +13,15 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/create-github-app-token@v1
|
||||
id: app-token
|
||||
with:
|
||||
app-id: ${{ vars.CI_APP_ID }}
|
||||
private-key: ${{ secrets.CI_PRIVATE_KEY }}
|
||||
- name: repo-sync
|
||||
uses: repo-sync/github-sync@v2
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GITHUB_TOKEN: ${{ steps.app-token.outputs.token }}
|
||||
with:
|
||||
source_repo: "https://git.clan.lol/clan/clan-core.git"
|
||||
source_branch: "main"
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# Contributing to Clan
|
||||
|
||||
<!-- Local file: docs/CONTRIBUTING.md -->
|
||||
Go to the Contributing guide at https://docs.clan.lol/manual/contribute/
|
||||
@@ -5,6 +5,12 @@
|
||||
fileSystems."/".device = "/dev/null";
|
||||
boot.loader.grub.device = "/dev/null";
|
||||
};
|
||||
clan.inventory.services = {
|
||||
borgbackup.test-backup = {
|
||||
roles.client.machines = [ "test-backup" ];
|
||||
roles.server.machines = [ "test-backup" ];
|
||||
};
|
||||
};
|
||||
flake.nixosModules = {
|
||||
test-backup =
|
||||
{
|
||||
@@ -22,12 +28,20 @@
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
self.clanModules.borgbackup
|
||||
# Do not import inventory modules. They should be configured via 'clan.inventory'
|
||||
#
|
||||
# TODO: Configure localbackup via inventory
|
||||
self.clanModules.localbackup
|
||||
];
|
||||
# Borgbackup overrides
|
||||
services.borgbackup.repos.test-backups = {
|
||||
path = "/var/lib/borgbackup/test-backups";
|
||||
authorizedKeys = [ (builtins.readFile ../lib/ssh/pubkey) ];
|
||||
};
|
||||
clan.borgbackup.destinations.test-backup.repo = lib.mkForce "borg@machine:.";
|
||||
|
||||
clan.core.networking.targetHost = "machine";
|
||||
networking.hostName = "machine";
|
||||
nixpkgs.hostPlatform = "x86_64-linux";
|
||||
|
||||
programs.ssh.knownHosts = {
|
||||
machine.hostNames = [ "machine" ];
|
||||
@@ -108,7 +122,6 @@
|
||||
'';
|
||||
folders = [ "/var/test-service" ];
|
||||
};
|
||||
clan.borgbackup.destinations.test-backup.repo = "borg@machine:.";
|
||||
|
||||
fileSystems."/mnt/external-disk" = {
|
||||
device = "/dev/vdb"; # created in tests with virtualisation.emptyDisks
|
||||
@@ -129,11 +142,6 @@
|
||||
touch /run/unmount-external-disk
|
||||
'';
|
||||
};
|
||||
|
||||
services.borgbackup.repos.test-backups = {
|
||||
path = "/var/lib/borgbackup/test-backups";
|
||||
authorizedKeys = [ (builtins.readFile ../lib/ssh/pubkey) ];
|
||||
};
|
||||
};
|
||||
};
|
||||
perSystem =
|
||||
@@ -152,27 +160,27 @@
|
||||
"flake.lock"
|
||||
"flakeModules"
|
||||
"inventory.json"
|
||||
"lib/build-clan"
|
||||
"lib/default.nix"
|
||||
"lib/flake-module.nix"
|
||||
"lib/frontmatter"
|
||||
"lib/inventory"
|
||||
"nixosModules"
|
||||
# Just include everything in 'lib'
|
||||
# If anything changes in /lib that may affect everything
|
||||
"lib"
|
||||
];
|
||||
};
|
||||
in
|
||||
{
|
||||
# Needs investigation on aarch64-linux
|
||||
# vm-test-run-test-backups> qemu-kvm: No machine specified, and there is no default
|
||||
# vm-test-run-test-backups> Use -machine help to list supported machines
|
||||
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux && pkgs.stdenv.hostPlatform.system != "aarch64-linux") {
|
||||
checks = pkgs.lib.mkIf pkgs.stdenv.isLinux {
|
||||
test-backups = (import ../lib/container-test.nix) {
|
||||
name = "test-backups";
|
||||
nodes.machine = {
|
||||
imports = [
|
||||
self.nixosModules.clanCore
|
||||
self.nixosModules.test-backup
|
||||
];
|
||||
imports =
|
||||
[
|
||||
self.nixosModules.clanCore
|
||||
# Some custom overrides for the backup tests
|
||||
self.nixosModules.test-backup
|
||||
]
|
||||
++
|
||||
# import the inventory generated nixosModules
|
||||
self.clanInternals.inventoryClass.machines.test-backup.machineImports;
|
||||
clan.core.settings.directory = ./.;
|
||||
environment.systemPackages = [
|
||||
(pkgs.writeShellScriptBin "foo" ''
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
(import ../lib/container-test.nix) (
|
||||
{ ... }:
|
||||
{
|
||||
name = "secrets";
|
||||
name = "container";
|
||||
|
||||
nodes.machine =
|
||||
{ ... }:
|
||||
|
||||
138
checks/data-mesher/default.nix
Normal file
138
checks/data-mesher/default.nix
Normal file
@@ -0,0 +1,138 @@
|
||||
(import ../lib/test-base.nix) (
|
||||
{ self, lib, ... }:
|
||||
let
|
||||
|
||||
inherit (self.lib.inventory) buildInventory;
|
||||
|
||||
machines = [
|
||||
"signer"
|
||||
"admin"
|
||||
"peer"
|
||||
];
|
||||
|
||||
serviceConfigs = buildInventory {
|
||||
inventory = {
|
||||
machines = lib.genAttrs machines (_: { });
|
||||
services = {
|
||||
data-mesher.default = {
|
||||
roles.peer.machines = [ "peer" ];
|
||||
roles.admin.machines = [ "admin" ];
|
||||
roles.signer.machines = [ "signer" ];
|
||||
};
|
||||
};
|
||||
modules = {
|
||||
data-mesher = self.clanModules.data-mesher;
|
||||
};
|
||||
};
|
||||
directory = ./.;
|
||||
};
|
||||
|
||||
commonConfig =
|
||||
{ config, ... }:
|
||||
{
|
||||
|
||||
imports = [ self.nixosModules.clanCore ];
|
||||
|
||||
clan.core.settings.directory = builtins.toString ./.;
|
||||
|
||||
environment.systemPackages = [
|
||||
config.services.data-mesher.package
|
||||
];
|
||||
|
||||
clan.core.vars.settings.publicStore = "in_repo";
|
||||
clan.core.vars.settings.secretStore = "vm";
|
||||
|
||||
clan.data-mesher.network.interface = "eth1";
|
||||
clan.data-mesher.bootstrapNodes = [
|
||||
"[2001:db8:1::1]:7946" # peer1
|
||||
"[2001:db8:1::2]:7946" # peer2
|
||||
];
|
||||
|
||||
# speed up for testing
|
||||
services.data-mesher.settings = {
|
||||
cluster.join_interval = lib.mkForce "2s";
|
||||
cluster.push_pull_interval = lib.mkForce "5s";
|
||||
};
|
||||
|
||||
systemd.tmpfiles.settings."vmsecrets" = {
|
||||
"/etc/secrets" = {
|
||||
C.argument = "${./vars/secret/${config.clan.core.settings.machine.name}}";
|
||||
z = {
|
||||
mode = "0700";
|
||||
user = "data-mesher";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
adminConfig = {
|
||||
imports = serviceConfigs.machines.admin.machineImports;
|
||||
|
||||
config.clan.data-mesher.network.tld = "foo";
|
||||
config.clan.core.settings.machine.name = "admin";
|
||||
};
|
||||
|
||||
peerConfig = {
|
||||
imports = serviceConfigs.machines.peer.machineImports;
|
||||
config.clan.core.settings.machine.name = "peer";
|
||||
};
|
||||
|
||||
signerConfig = {
|
||||
imports = serviceConfigs.machines.signer.machineImports;
|
||||
clan.core.settings.machine.name = "signer";
|
||||
};
|
||||
|
||||
in
|
||||
{
|
||||
name = "data-mesher";
|
||||
|
||||
nodes = {
|
||||
peer = {
|
||||
imports = [
|
||||
peerConfig
|
||||
commonConfig
|
||||
];
|
||||
};
|
||||
|
||||
admin = {
|
||||
imports = [
|
||||
adminConfig
|
||||
commonConfig
|
||||
];
|
||||
};
|
||||
|
||||
signer = {
|
||||
imports = [
|
||||
signerConfig
|
||||
commonConfig
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
# TODO Add better test script.
|
||||
testScript = ''
|
||||
|
||||
def resolve(node, success = {}, fail = [], timeout = 60):
|
||||
for hostname, ips in success.items():
|
||||
for ip in ips:
|
||||
node.wait_until_succeeds(f"getent ahosts {hostname} | grep {ip}", timeout)
|
||||
|
||||
for hostname in fail:
|
||||
node.wait_until_fails(f"getent ahosts {hostname}")
|
||||
|
||||
start_all()
|
||||
|
||||
admin.wait_for_unit("data-mesher")
|
||||
signer.wait_for_unit("data-mesher")
|
||||
peer.wait_for_unit("data-mesher")
|
||||
|
||||
# check dns resolution
|
||||
for node in [admin, signer, peer]:
|
||||
resolve(node, {
|
||||
"admin.foo": ["2001:db8:1::1", "192.168.1.1"],
|
||||
"peer.foo": ["2001:db8:1::2", "192.168.1.2"],
|
||||
"signer.foo": ["2001:db8:1::3", "192.168.1.3"]
|
||||
})
|
||||
'';
|
||||
}
|
||||
)
|
||||
@@ -0,0 +1,3 @@
|
||||
-----BEGIN PUBLIC KEY-----
|
||||
MCowBQYDK2VwAyEAV/XZHv1UQEEzfD2YbJP1Q2jd1ZDG+CP5wvGf/1hcR+Q=
|
||||
-----END PUBLIC KEY-----
|
||||
@@ -0,0 +1,3 @@
|
||||
-----BEGIN PUBLIC KEY-----
|
||||
MCowBQYDK2VwAyEAKSSUXJCftt5Vif6ek57CNKBcDRNfrWrxZUHjAIFW9HY=
|
||||
-----END PUBLIC KEY-----
|
||||
@@ -0,0 +1,3 @@
|
||||
-----BEGIN PUBLIC KEY-----
|
||||
MCowBQYDK2VwAyEAvLD0mHQA+hf9ItlUHD0ml3i5XEArmmjwCC5rYEOmzWs=
|
||||
-----END PUBLIC KEY-----
|
||||
@@ -0,0 +1,3 @@
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MC4CAQAwBQYDK2VwBCIEIFX+AzHy821hHqWLPeK3nzRuHod3FNrnPfaDoFvpz6LX
|
||||
-----END PRIVATE KEY-----
|
||||
@@ -0,0 +1,3 @@
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MC4CAQAwBQYDK2VwBCIEIMwuDntiLoC7cFFyttGDf7cQWlOXOR0q90Jz3lEiuLg+
|
||||
-----END PRIVATE KEY-----
|
||||
@@ -0,0 +1,3 @@
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MC4CAQAwBQYDK2VwBCIEIPmH2+vjYG6UOp+/g0Iqu7yZZKId5jffrfsySE36yO+D
|
||||
-----END PRIVATE KEY-----
|
||||
@@ -0,0 +1,3 @@
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MC4CAQAwBQYDK2VwBCIEINS0tSnjHPG8IfpzQAS3wzoJA+4mYM70DIpltN8O4YD7
|
||||
-----END PRIVATE KEY-----
|
||||
@@ -0,0 +1,3 @@
|
||||
-----BEGIN PUBLIC KEY-----
|
||||
MCowBQYDK2VwAyEA3P18+R5Gt+Jn7wYXpWNTXM5pyWn2WiOWekYCzXqWPwg=
|
||||
-----END PUBLIC KEY-----
|
||||
@@ -12,6 +12,8 @@ in
|
||||
./flash/flake-module.nix
|
||||
./impure/flake-module.nix
|
||||
./installation/flake-module.nix
|
||||
./installation-without-system/flake-module.nix
|
||||
./morph/flake-module.nix
|
||||
./nixos-documentation/flake-module.nix
|
||||
];
|
||||
perSystem =
|
||||
@@ -39,6 +41,7 @@ in
|
||||
borgbackup = import ./borgbackup nixosTestArgs;
|
||||
matrix-synapse = import ./matrix-synapse nixosTestArgs;
|
||||
mumble = import ./mumble nixosTestArgs;
|
||||
data-mesher = import ./data-mesher nixosTestArgs;
|
||||
syncthing = import ./syncthing nixosTestArgs;
|
||||
zt-tcp-relay = import ./zt-tcp-relay nixosTestArgs;
|
||||
postgresql = import ./postgresql nixosTestArgs;
|
||||
@@ -48,7 +51,7 @@ in
|
||||
flakeOutputs =
|
||||
lib.mapAttrs' (
|
||||
name: config: lib.nameValuePair "nixos-${name}" config.config.system.build.toplevel
|
||||
) self.nixosConfigurations
|
||||
) (lib.filterAttrs (n: _: !lib.hasPrefix "test-" n) self.nixosConfigurations)
|
||||
// lib.mapAttrs' (n: lib.nameValuePair "package-${n}") self'.packages
|
||||
// lib.mapAttrs' (n: lib.nameValuePair "devShell-${n}") self'.devShells
|
||||
// lib.mapAttrs' (name: config: lib.nameValuePair "home-manager-${name}" config.activation-script) (
|
||||
|
||||
@@ -1,12 +1,26 @@
|
||||
{ self, lib, ... }:
|
||||
{
|
||||
clan.machines.test-flash-machine = {
|
||||
clan.core.networking.targetHost = "test-flash-machine";
|
||||
fileSystems."/".device = lib.mkDefault "/dev/vda";
|
||||
boot.loader.grub.device = lib.mkDefault "/dev/vda";
|
||||
config,
|
||||
self,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
clan.machines = lib.listToAttrs (
|
||||
lib.map (
|
||||
system:
|
||||
lib.nameValuePair "test-flash-machine-${system}" {
|
||||
clan.core.networking.targetHost = "test-flash-machine";
|
||||
fileSystems."/".device = lib.mkDefault "/dev/vda";
|
||||
boot.loader.grub.device = lib.mkDefault "/dev/vda";
|
||||
|
||||
imports = [ self.nixosModules.test-flash-machine ];
|
||||
};
|
||||
# We need to use `mkForce` because we inherit from `test-install-machine`
|
||||
# which currently hardcodes `nixpkgs.hostPlatform`
|
||||
nixpkgs.hostPlatform = lib.mkForce system;
|
||||
|
||||
imports = [ self.nixosModules.test-flash-machine ];
|
||||
}
|
||||
) (lib.filter (lib.hasSuffix "linux") config.systems)
|
||||
);
|
||||
|
||||
flake.nixosModules = {
|
||||
test-flash-machine =
|
||||
@@ -30,20 +44,20 @@
|
||||
let
|
||||
dependencies = [
|
||||
pkgs.disko
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-flash-machine.pkgs.perlPackages.ConfigIniFiles
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-flash-machine.pkgs.perlPackages.FileSlurp
|
||||
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".pkgs.perlPackages.ConfigIniFiles
|
||||
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".pkgs.perlPackages.FileSlurp
|
||||
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-flash-machine.config.system.build.toplevel
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-flash-machine.config.system.build.diskoScript
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-flash-machine.config.system.build.diskoScript.drvPath
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-flash-machine.config.system.clan.deployment.file
|
||||
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.toplevel
|
||||
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.diskoScript
|
||||
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.diskoScript.drvPath
|
||||
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.clan.deployment.file
|
||||
|
||||
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
|
||||
in
|
||||
{
|
||||
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux) {
|
||||
flash = (import ../lib/test-base.nix) {
|
||||
checks = pkgs.lib.mkIf pkgs.stdenv.isLinux {
|
||||
test-flash = (import ../lib/test-base.nix) {
|
||||
name = "flash";
|
||||
nodes.target = {
|
||||
virtualisation.emptyDiskImages = [ 4096 ];
|
||||
@@ -65,7 +79,7 @@
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
machine.succeed("clan flash write --debug --flake ${../..} --yes --disk main /dev/vdb test-flash-machine")
|
||||
machine.succeed("clan flash write --debug --flake ${../..} --yes --disk main /dev/vdb test-flash-machine-${pkgs.hostPlatform.system}")
|
||||
'';
|
||||
} { inherit pkgs self; };
|
||||
};
|
||||
|
||||
241
checks/installation-without-system/flake-module.nix
Normal file
241
checks/installation-without-system/flake-module.nix
Normal file
@@ -0,0 +1,241 @@
|
||||
{
|
||||
self,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
# The purpose of this test is to ensure `clan machines install` works
|
||||
# for machines that don't have a hardware config yet.
|
||||
|
||||
# If this test starts failing it could be due to the `facter.json` being out of date
|
||||
# you can get a new one by adding
|
||||
# client.fail("cat test-flake/machines/test-install-machine/facter.json >&2")
|
||||
# to the installation test.
|
||||
clan.machines.test-install-machine-without-system = {
|
||||
fileSystems."/".device = lib.mkDefault "/dev/vda";
|
||||
boot.loader.grub.device = lib.mkDefault "/dev/vda";
|
||||
|
||||
imports = [ self.nixosModules.test-install-machine-without-system ];
|
||||
};
|
||||
clan.machines.test-install-machine-with-system =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
# https://git.clan.lol/clan/test-fixtures
|
||||
facter.reportPath = builtins.fetchurl {
|
||||
url = "https://git.clan.lol/clan/test-fixtures/raw/commit/4a2bc56d886578124b05060d3fb7eddc38c019f8/nixos-vm-facter-json/${pkgs.hostPlatform.system}.json";
|
||||
sha256 =
|
||||
{
|
||||
aarch64-linux = "sha256:1rlfymk03rmfkm2qgrc8l5kj5i20srx79n1y1h4nzlpwaz0j7hh2";
|
||||
x86_64-linux = "sha256:16myh0ll2gdwsiwkjw5ba4dl23ppwbsanxx214863j7nvzx42pws";
|
||||
}
|
||||
.${pkgs.hostPlatform.system};
|
||||
};
|
||||
|
||||
fileSystems."/".device = lib.mkDefault "/dev/vda";
|
||||
boot.loader.grub.device = lib.mkDefault "/dev/vda";
|
||||
|
||||
imports = [ self.nixosModules.test-install-machine-without-system ];
|
||||
};
|
||||
flake.nixosModules = {
|
||||
test-install-machine-without-system =
|
||||
{ lib, modulesPath, ... }:
|
||||
{
|
||||
imports = [
|
||||
(modulesPath + "/testing/test-instrumentation.nix") # we need these 2 modules always to be able to run the tests
|
||||
(modulesPath + "/profiles/qemu-guest.nix")
|
||||
../lib/minify.nix
|
||||
];
|
||||
|
||||
networking.hostName = "test-install-machine";
|
||||
|
||||
environment.etc."install-successful".text = "ok";
|
||||
|
||||
boot.consoleLogLevel = lib.mkForce 100;
|
||||
boot.kernelParams = [ "boot.shell_on_fail" ];
|
||||
|
||||
# disko config
|
||||
boot.loader.grub.efiSupport = lib.mkDefault true;
|
||||
boot.loader.grub.efiInstallAsRemovable = lib.mkDefault true;
|
||||
clan.core.vars.settings.secretStore = "vm";
|
||||
clan.core.vars.generators.test = {
|
||||
files.test.neededFor = "partitioning";
|
||||
script = ''
|
||||
echo "notok" > $out/test
|
||||
'';
|
||||
};
|
||||
disko.devices = {
|
||||
disk = {
|
||||
main = {
|
||||
type = "disk";
|
||||
device = "/dev/vda";
|
||||
|
||||
preCreateHook = ''
|
||||
test -e /run/partitioning-secrets/test/test
|
||||
'';
|
||||
|
||||
content = {
|
||||
type = "gpt";
|
||||
partitions = {
|
||||
boot = {
|
||||
size = "1M";
|
||||
type = "EF02"; # for grub MBR
|
||||
priority = 1;
|
||||
};
|
||||
ESP = {
|
||||
size = "512M";
|
||||
type = "EF00";
|
||||
content = {
|
||||
type = "filesystem";
|
||||
format = "vfat";
|
||||
mountpoint = "/boot";
|
||||
mountOptions = [ "umask=0077" ];
|
||||
};
|
||||
};
|
||||
root = {
|
||||
size = "100%";
|
||||
content = {
|
||||
type = "filesystem";
|
||||
format = "ext4";
|
||||
mountpoint = "/";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
perSystem =
|
||||
{
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
dependencies = [
|
||||
self
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.toplevel
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.diskoScript
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.clan.deployment.file
|
||||
pkgs.stdenv.drvPath
|
||||
pkgs.bash.drvPath
|
||||
pkgs.nixos-anywhere
|
||||
pkgs.bubblewrap
|
||||
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
|
||||
# with Nix 2.24 we get:
|
||||
# vm-test-run-test-installation> client # error: sized: unexpected end-of-file
|
||||
# vm-test-run-test-installation> client # error: unexpected end-of-file
|
||||
# This seems to be fixed with Nix 2.26
|
||||
# Remove this line once `pkgs.nix` is 2.26+
|
||||
nixPackage =
|
||||
assert
|
||||
lib.versionOlder pkgs.nix.version "2.26"
|
||||
&& lib.versionAtLeast pkgs.nixVersions.latest.version "2.26";
|
||||
pkgs.nixVersions.latest;
|
||||
in
|
||||
{
|
||||
# On aarch64-linux, hangs on reboot with after installation:
|
||||
# vm-test-run-test-installation-without-system> installer # [ 288.002871] reboot: Restarting system
|
||||
# vm-test-run-test-installation-without-system> client # [test-install-machine] ### Done! ###
|
||||
# vm-test-run-test-installation-without-system> client # [test-install-machine] + step 'Done!'
|
||||
# vm-test-run-test-installation-without-system> client # [test-install-machine] + echo '### Done! ###'
|
||||
# vm-test-run-test-installation-without-system> client # [test-install-machine] + rm -rf /tmp/tmp.qb16EAq7hJ
|
||||
# vm-test-run-test-installation-without-system> (finished: must succeed: clan machines install --debug --flake test-flake --yes test-install-machine-without-system --target-host root@installer --update-hardware-config nixos-facter >&2, in 154.62 seconds)
|
||||
# vm-test-run-test-installation-without-system> target: starting vm
|
||||
# vm-test-run-test-installation-without-system> target: QEMU running (pid 144)
|
||||
# vm-test-run-test-installation-without-system> target: waiting for unit multi-user.target
|
||||
# vm-test-run-test-installation-without-system> target: waiting for the VM to finish booting
|
||||
# vm-test-run-test-installation-without-system> target: Guest root shell did not produce any data yet...
|
||||
# vm-test-run-test-installation-without-system> target: To debug, enter the VM and run 'systemctl status backdoor.service'.
|
||||
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux && !pkgs.stdenv.isAarch64) {
|
||||
test-installation-without-system = (import ../lib/test-base.nix) {
|
||||
name = "test-installation-without-system";
|
||||
nodes.target = {
|
||||
services.openssh.enable = true;
|
||||
virtualisation.diskImage = "./target.qcow2";
|
||||
virtualisation.useBootLoader = true;
|
||||
nix.package = nixPackage;
|
||||
};
|
||||
nodes.installer =
|
||||
{ modulesPath, ... }:
|
||||
{
|
||||
imports = [
|
||||
(modulesPath + "/../tests/common/auto-format-root-device.nix")
|
||||
];
|
||||
services.openssh.enable = true;
|
||||
system.nixos.variant_id = "installer";
|
||||
environment.systemPackages = [ pkgs.nixos-facter ];
|
||||
virtualisation.emptyDiskImages = [ 512 ];
|
||||
virtualisation.diskSize = 8 * 1024;
|
||||
virtualisation.rootDevice = "/dev/vdb";
|
||||
# both installer and target need to use the same diskImage
|
||||
virtualisation.diskImage = "./target.qcow2";
|
||||
nix.package = nixPackage;
|
||||
nix.settings = {
|
||||
substituters = lib.mkForce [ ];
|
||||
hashed-mirrors = null;
|
||||
connect-timeout = lib.mkForce 3;
|
||||
flake-registry = pkgs.writeText "flake-registry" ''{"flakes":[],"version":2}'';
|
||||
experimental-features = [
|
||||
"nix-command"
|
||||
"flakes"
|
||||
];
|
||||
};
|
||||
users.users.nonrootuser = {
|
||||
isNormalUser = true;
|
||||
openssh.authorizedKeys.keyFiles = [ ../lib/ssh/pubkey ];
|
||||
extraGroups = [ "wheel" ];
|
||||
};
|
||||
security.sudo.wheelNeedsPassword = false;
|
||||
system.extraDependencies = dependencies;
|
||||
};
|
||||
nodes.client = {
|
||||
environment.systemPackages = [
|
||||
self.packages.${pkgs.system}.clan-cli
|
||||
] ++ self.packages.${pkgs.system}.clan-cli.runtimeDependencies;
|
||||
environment.etc."install-closure".source = "${closureInfo}/store-paths";
|
||||
virtualisation.memorySize = 3048;
|
||||
nix.package = nixPackage;
|
||||
nix.settings = {
|
||||
substituters = lib.mkForce [ ];
|
||||
hashed-mirrors = null;
|
||||
connect-timeout = lib.mkForce 3;
|
||||
flake-registry = pkgs.writeText "flake-registry" ''{"flakes":[],"version":2}'';
|
||||
experimental-features = [
|
||||
"nix-command"
|
||||
"flakes"
|
||||
];
|
||||
};
|
||||
system.extraDependencies = dependencies;
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
client.start()
|
||||
installer.start()
|
||||
|
||||
client.succeed("${pkgs.coreutils}/bin/install -Dm 600 ${../lib/ssh/privkey} /root/.ssh/id_ed25519")
|
||||
client.wait_until_succeeds("timeout 2 ssh -o StrictHostKeyChecking=accept-new -v nonrootuser@installer hostname")
|
||||
client.succeed("cp -r ${../..} test-flake && chmod -R +w test-flake")
|
||||
client.fail("test -f test-flake/machines/test-install-machine-without-system/hardware-configuration.nix")
|
||||
client.fail("test -f test-flake/machines/test-install-machine-without-system/facter.json")
|
||||
client.succeed("clan machines update-hardware-config --flake test-flake test-install-machine-without-system nonrootuser@installer >&2")
|
||||
client.succeed("test -f test-flake/machines/test-install-machine-without-system/facter.json")
|
||||
client.succeed("rm test-flake/machines/test-install-machine-without-system/facter.json")
|
||||
client.succeed("clan machines install --debug --flake test-flake --yes test-install-machine-without-system --target-host nonrootuser@installer --update-hardware-config nixos-facter >&2")
|
||||
try:
|
||||
installer.shutdown()
|
||||
except BrokenPipeError:
|
||||
# qemu has already exited
|
||||
pass
|
||||
|
||||
target.state_dir = installer.state_dir
|
||||
target.start()
|
||||
target.wait_for_unit("multi-user.target")
|
||||
assert(target.succeed("cat /etc/install-successful").strip() == "ok")
|
||||
'';
|
||||
} { inherit pkgs self; };
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -23,7 +23,6 @@
|
||||
|
||||
environment.etc."install-successful".text = "ok";
|
||||
|
||||
nixpkgs.hostPlatform = "x86_64-linux";
|
||||
boot.consoleLogLevel = lib.mkForce 100;
|
||||
boot.kernelParams = [ "boot.shell_on_fail" ];
|
||||
|
||||
@@ -89,14 +88,26 @@
|
||||
let
|
||||
dependencies = [
|
||||
self
|
||||
self.nixosConfigurations.test-install-machine.config.system.build.toplevel
|
||||
self.nixosConfigurations.test-install-machine.config.system.build.diskoScript
|
||||
self.nixosConfigurations.test-install-machine.config.system.clan.deployment.file
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine.config.system.build.toplevel
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine.config.system.build.diskoScript
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine.config.system.clan.deployment.file
|
||||
pkgs.bash.drvPath
|
||||
pkgs.stdenv.drvPath
|
||||
pkgs.nixos-anywhere
|
||||
pkgs.bubblewrap
|
||||
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
|
||||
|
||||
# with Nix 2.24 we get:
|
||||
# vm-test-run-test-installation> client # error: sized: unexpected end-of-file
|
||||
# vm-test-run-test-installation> client # error: unexpected end-of-file
|
||||
# This seems to be fixed with Nix 2.26
|
||||
# Remove this line once `pkgs.nix` is 2.26+
|
||||
nixPackage =
|
||||
assert
|
||||
lib.versionOlder pkgs.nix.version "2.26"
|
||||
&& lib.versionAtLeast pkgs.nixVersions.latest.version "2.26";
|
||||
pkgs.nixVersions.latest;
|
||||
in
|
||||
{
|
||||
# On aarch64-linux, hangs on reboot with after installation:
|
||||
@@ -108,13 +119,14 @@
|
||||
# vm-test-run-test-installation> new_machine: QEMU running (pid 80)
|
||||
# vm-test-run-test-installation> new_machine: Guest root shell did not produce any data yet...
|
||||
# vm-test-run-test-installation> new_machine: To debug, enter the VM and run 'systemctl status backdoor.service'.
|
||||
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux && pkgs.stdenv.hostPlatform.system != "aarch64-linux") {
|
||||
checks = pkgs.lib.mkIf pkgs.stdenv.isLinux {
|
||||
test-installation = (import ../lib/test-base.nix) {
|
||||
name = "test-installation";
|
||||
nodes.target = {
|
||||
services.openssh.enable = true;
|
||||
virtualisation.diskImage = "./target.qcow2";
|
||||
virtualisation.useBootLoader = true;
|
||||
nix.package = nixPackage;
|
||||
|
||||
# virtualisation.fileSystems."/" = {
|
||||
# device = "/dev/disk/by-label/this-is-not-real-and-will-never-be-used";
|
||||
@@ -136,6 +148,7 @@
|
||||
virtualisation.rootDevice = "/dev/vdb";
|
||||
# both installer and target need to use the same diskImage
|
||||
virtualisation.diskImage = "./target.qcow2";
|
||||
nix.package = nixPackage;
|
||||
nix.settings = {
|
||||
substituters = lib.mkForce [ ];
|
||||
hashed-mirrors = null;
|
||||
@@ -153,7 +166,8 @@
|
||||
self.packages.${pkgs.system}.clan-cli
|
||||
] ++ self.packages.${pkgs.system}.clan-cli.runtimeDependencies;
|
||||
environment.etc."install-closure".source = "${closureInfo}/store-paths";
|
||||
virtualisation.memorySize = 2048;
|
||||
virtualisation.memorySize = 3048;
|
||||
nix.package = nixPackage;
|
||||
nix.settings = {
|
||||
substituters = lib.mkForce [ ];
|
||||
hashed-mirrors = null;
|
||||
@@ -174,12 +188,19 @@
|
||||
client.succeed("${pkgs.coreutils}/bin/install -Dm 600 ${../lib/ssh/privkey} /root/.ssh/id_ed25519")
|
||||
client.wait_until_succeeds("timeout 2 ssh -o StrictHostKeyChecking=accept-new -v root@installer hostname")
|
||||
client.succeed("cp -r ${../..} test-flake && chmod -R +w test-flake")
|
||||
|
||||
# test that we can generate hardware configurations
|
||||
client.fail("test -f test-flake/machines/test-install-machine/facter.json")
|
||||
client.fail("test -f test-flake/machines/test-install-machine/hardware-configuration.nix")
|
||||
client.succeed("clan machines update-hardware-config --flake test-flake test-install-machine root@installer >&2")
|
||||
client.succeed("test -f test-flake/machines/test-install-machine/facter.json")
|
||||
client.succeed("clan machines update-hardware-config --backend nixos-generate-config --flake test-flake test-install-machine root@installer>&2")
|
||||
client.succeed("test -f test-flake/machines/test-install-machine/hardware-configuration.nix")
|
||||
client.succeed("clan machines install --debug --flake ${../..} --yes test-install-machine --target-host root@installer >&2")
|
||||
|
||||
# but we don't use them because they're not cached
|
||||
client.succeed("rm test-flake/machines/test-install-machine/hardware-configuration.nix test-flake/machines/test-install-machine/facter.json")
|
||||
|
||||
client.succeed("clan machines install --debug --flake test-flake --yes test-install-machine --target-host root@installer >&2")
|
||||
try:
|
||||
installer.shutdown()
|
||||
except BrokenPipeError:
|
||||
|
||||
@@ -16,6 +16,9 @@ in
|
||||
documentation.enable = lib.mkDefault false;
|
||||
boot.isContainer = true;
|
||||
|
||||
# needed since nixpkgs 7fb2f407c01b017737eafc26b065d7f56434a992 removed the getty unit by default
|
||||
console.enable = true;
|
||||
|
||||
# undo qemu stuff
|
||||
system.build.initialRamdisk = "";
|
||||
virtualisation.sharedDirectories = lib.mkForce { };
|
||||
@@ -31,6 +34,7 @@ in
|
||||
};
|
||||
# to accept external dependencies such as disko
|
||||
node.specialArgs.self = self;
|
||||
_module.args = { inherit self; };
|
||||
imports = [
|
||||
test
|
||||
./container-driver/module.nix
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
{ lib, ... }:
|
||||
{
|
||||
nixpkgs.flake.setFlakeRegistry = false;
|
||||
nixpkgs.flake.setNixPath = false;
|
||||
nix.registry.nixpkgs.to = { };
|
||||
nix.registry = lib.mkForce { };
|
||||
documentation.doc.enable = false;
|
||||
documentation.man.enable = false;
|
||||
}
|
||||
|
||||
@@ -7,15 +7,19 @@ in
|
||||
(nixos-lib.runTest {
|
||||
hostPkgs = pkgs;
|
||||
# speed-up evaluation
|
||||
defaults = {
|
||||
imports = [
|
||||
./minify.nix
|
||||
];
|
||||
documentation.enable = lib.mkDefault false;
|
||||
nix.settings.min-free = 0;
|
||||
system.stateVersion = lib.version;
|
||||
};
|
||||
defaults = (
|
||||
{ config, ... }:
|
||||
{
|
||||
imports = [
|
||||
./minify.nix
|
||||
];
|
||||
documentation.enable = lib.mkDefault false;
|
||||
nix.settings.min-free = 0;
|
||||
system.stateVersion = config.system.nixos.release;
|
||||
}
|
||||
);
|
||||
|
||||
_module.args = { inherit self; };
|
||||
# to accept external dependencies such as disko
|
||||
node.specialArgs.self = self;
|
||||
imports = [ test ];
|
||||
|
||||
62
checks/morph/flake-module.nix
Normal file
62
checks/morph/flake-module.nix
Normal file
@@ -0,0 +1,62 @@
|
||||
{
|
||||
self,
|
||||
...
|
||||
}:
|
||||
{
|
||||
clan.machines.test-morph-machine = {
|
||||
imports = [
|
||||
./template/configuration.nix
|
||||
self.nixosModules.clanCore
|
||||
];
|
||||
nixpkgs.hostPlatform = "x86_64-linux";
|
||||
environment.etc."testfile".text = "morphed";
|
||||
};
|
||||
|
||||
clan.templates.machine.test-morph-template = {
|
||||
description = "Morph a machine";
|
||||
path = ./template;
|
||||
};
|
||||
|
||||
perSystem =
|
||||
{
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
checks = pkgs.lib.mkIf pkgs.stdenv.isLinux {
|
||||
test-morph = (import ../lib/test-base.nix) {
|
||||
name = "morph";
|
||||
|
||||
nodes = {
|
||||
actual =
|
||||
{ pkgs, ... }:
|
||||
let
|
||||
dependencies = [
|
||||
self
|
||||
pkgs.nixos-anywhere
|
||||
pkgs.stdenv.drvPath
|
||||
pkgs.stdenvNoCC
|
||||
self.nixosConfigurations.test-morph-machine.config.system.build.toplevel
|
||||
self.nixosConfigurations.test-morph-machine.config.system.clan.deployment.file
|
||||
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
|
||||
in
|
||||
|
||||
{
|
||||
environment.etc."install-closure".source = "${closureInfo}/store-paths";
|
||||
system.extraDependencies = dependencies;
|
||||
virtualisation.memorySize = 2048;
|
||||
environment.systemPackages = [ self.packages.${pkgs.system}.clan-cli ];
|
||||
};
|
||||
};
|
||||
testScript = ''
|
||||
start_all()
|
||||
actual.fail("cat /etc/testfile")
|
||||
actual.succeed("env CLAN_DIR=${self} clan machines morph test-morph-template --i-will-be-fired-for-using-this --debug --name test-morph-machine")
|
||||
assert actual.succeed("cat /etc/testfile") == "morphed"
|
||||
'';
|
||||
} { inherit pkgs self; };
|
||||
};
|
||||
|
||||
};
|
||||
}
|
||||
12
checks/morph/template/configuration.nix
Normal file
12
checks/morph/template/configuration.nix
Normal file
@@ -0,0 +1,12 @@
|
||||
{ modulesPath, ... }:
|
||||
{
|
||||
imports = [
|
||||
# we need these 2 modules always to be able to run the tests
|
||||
(modulesPath + "/testing/test-instrumentation.nix")
|
||||
(modulesPath + "/virtualisation/qemu-vm.nix")
|
||||
|
||||
(modulesPath + "/profiles/minimal.nix")
|
||||
];
|
||||
|
||||
clan.core.enableRecommendedDefaults = false;
|
||||
}
|
||||
8
clanModules/auto-upgrade/README.md
Normal file
8
clanModules/auto-upgrade/README.md
Normal file
@@ -0,0 +1,8 @@
|
||||
---
|
||||
description = "Set up automatic upgrades"
|
||||
categories = ["System"]
|
||||
features = [ "inventory" ]
|
||||
---
|
||||
|
||||
Whether to periodically upgrade NixOS to the latest version. If enabled, a
|
||||
systemd timer will run `nixos-rebuild switch --upgrade` once a day.
|
||||
24
clanModules/auto-upgrade/roles/default.nix
Normal file
24
clanModules/auto-upgrade/roles/default.nix
Normal file
@@ -0,0 +1,24 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
cfg = config.clan.autoUpgrade;
|
||||
in
|
||||
{
|
||||
options.clan.autoUpgrade = {
|
||||
flake = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "Flake reference";
|
||||
};
|
||||
};
|
||||
config = {
|
||||
system.autoUpgrade = {
|
||||
inherit (cfg.clan.autoUpgrade) flake;
|
||||
enable = true;
|
||||
dates = "02:00";
|
||||
randomizedDelaySec = "45min";
|
||||
};
|
||||
};
|
||||
}
|
||||
10
clanModules/data-mesher/README.md
Normal file
10
clanModules/data-mesher/README.md
Normal file
@@ -0,0 +1,10 @@
|
||||
---
|
||||
description = "Set up data-mesher"
|
||||
categories = ["System"]
|
||||
features = [ "inventory" ]
|
||||
|
||||
[constraints]
|
||||
roles.admin.min = 1
|
||||
roles.admin.max = 1
|
||||
---
|
||||
|
||||
19
clanModules/data-mesher/lib.nix
Normal file
19
clanModules/data-mesher/lib.nix
Normal file
@@ -0,0 +1,19 @@
|
||||
lib: {
|
||||
|
||||
machines =
|
||||
config:
|
||||
let
|
||||
instanceNames = builtins.attrNames config.clan.inventory.services.data-mesher;
|
||||
instanceName = builtins.head instanceNames;
|
||||
dataMesherInstances = config.clan.inventory.services.data-mesher.${instanceName};
|
||||
|
||||
uniqueStrings = list: builtins.attrNames (builtins.groupBy lib.id list);
|
||||
in
|
||||
rec {
|
||||
admins = dataMesherInstances.roles.admin.machines or [ ];
|
||||
signers = dataMesherInstances.roles.signer.machines or [ ];
|
||||
peers = dataMesherInstances.roles.peer.machines or [ ];
|
||||
bootstrap = uniqueStrings (admins ++ signers);
|
||||
};
|
||||
|
||||
}
|
||||
51
clanModules/data-mesher/roles/admin.nix
Normal file
51
clanModules/data-mesher/roles/admin.nix
Normal file
@@ -0,0 +1,51 @@
|
||||
{ lib, config, ... }:
|
||||
let
|
||||
cfg = config.clan.data-mesher;
|
||||
|
||||
dmLib = import ../lib.nix lib;
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
../shared.nix
|
||||
];
|
||||
|
||||
options.clan.data-mesher = {
|
||||
network = {
|
||||
tld = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = (config.networking.domain or "clan");
|
||||
description = "Top level domain to use for the network";
|
||||
};
|
||||
|
||||
hostTTL = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "672h"; # 28 days
|
||||
example = "24h";
|
||||
description = "The TTL for hosts in the network, in the form of a Go time.Duration";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = {
|
||||
services.data-mesher.initNetwork =
|
||||
let
|
||||
# for a given machine, read it's public key and remove any new lines
|
||||
readHostKey =
|
||||
machine:
|
||||
let
|
||||
path = "${config.clan.core.settings.directory}/vars/per-machine/${machine}/data-mesher-host-key/public_key/value";
|
||||
in
|
||||
builtins.elemAt (lib.splitString "\n" (builtins.readFile path)) 1;
|
||||
in
|
||||
{
|
||||
enable = true;
|
||||
keyPath = config.clan.core.vars.generators.data-mesher-network-key.files.private_key.path;
|
||||
|
||||
tld = cfg.network.tld;
|
||||
hostTTL = cfg.network.hostTTL;
|
||||
|
||||
# admin and signer host public keys
|
||||
signingKeys = builtins.map readHostKey (dmLib.machines config).bootstrap;
|
||||
};
|
||||
};
|
||||
}
|
||||
5
clanModules/data-mesher/roles/peer.nix
Normal file
5
clanModules/data-mesher/roles/peer.nix
Normal file
@@ -0,0 +1,5 @@
|
||||
{
|
||||
imports = [
|
||||
../shared.nix
|
||||
];
|
||||
}
|
||||
5
clanModules/data-mesher/roles/signer.nix
Normal file
5
clanModules/data-mesher/roles/signer.nix
Normal file
@@ -0,0 +1,5 @@
|
||||
{
|
||||
imports = [
|
||||
../shared.nix
|
||||
];
|
||||
}
|
||||
154
clanModules/data-mesher/shared.nix
Normal file
154
clanModules/data-mesher/shared.nix
Normal file
@@ -0,0 +1,154 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
cfg = config.clan.data-mesher;
|
||||
dmLib = import ./lib.nix lib;
|
||||
|
||||
# the default bootstrap nodes are any machines with the admin or signers role
|
||||
# we iterate through those machines, determining an IP address for them based on their VPN
|
||||
# currently only supports zerotier
|
||||
defaultBootstrapNodes = builtins.foldl' (
|
||||
urls: name:
|
||||
if
|
||||
builtins.pathExists "${config.clan.core.settings.directory}/machines/${name}/facts/zerotier-ip"
|
||||
then
|
||||
let
|
||||
ip = builtins.readFile "${config.clan.core.settings.directory}/machines/${name}/facts/zerotier-ip";
|
||||
in
|
||||
urls ++ "${ip}:${cfg.network.port}"
|
||||
else
|
||||
urls
|
||||
) [ ] (dmLib.machines config).bootstrap;
|
||||
in
|
||||
{
|
||||
options.clan.data-mesher = {
|
||||
|
||||
bootstrapNodes = lib.mkOption {
|
||||
type = lib.types.nullOr (lib.types.listOf lib.types.str);
|
||||
default = null;
|
||||
description = ''
|
||||
A list of bootstrap nodes that act as an initial gateway when joining
|
||||
the cluster.
|
||||
'';
|
||||
example = [
|
||||
"192.168.1.1:7946"
|
||||
"192.168.1.2:7946"
|
||||
];
|
||||
};
|
||||
|
||||
network = {
|
||||
|
||||
interface = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = ''
|
||||
The interface over which cluster communication should be performed.
|
||||
All the ip addresses associate with this interface will be part of
|
||||
our host claim, including both ipv4 and ipv6.
|
||||
|
||||
This should be set to an internal/VPN interface.
|
||||
'';
|
||||
example = "tailscale0";
|
||||
};
|
||||
|
||||
port = lib.mkOption {
|
||||
type = lib.types.port;
|
||||
default = 7946;
|
||||
description = ''
|
||||
Port to listen on for cluster communication.
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = {
|
||||
|
||||
services.data-mesher = {
|
||||
enable = true;
|
||||
openFirewall = true;
|
||||
|
||||
settings = {
|
||||
log_level = "warn";
|
||||
state_dir = "/var/lib/data-mesher";
|
||||
|
||||
# read network id from vars
|
||||
network.id = config.clan.core.vars.generators.data-mesher-network-key.files.public_key.value;
|
||||
|
||||
host = {
|
||||
names = [ config.networking.hostName ];
|
||||
key_path = config.clan.core.vars.generators.data-mesher-host-key.files.private_key.path;
|
||||
};
|
||||
|
||||
cluster = {
|
||||
port = cfg.network.port;
|
||||
join_interval = "30s";
|
||||
push_pull_interval = "30s";
|
||||
|
||||
interface = cfg.network.interface;
|
||||
bootstrap_nodes = cfg.bootstrapNodes or defaultBootstrapNodes;
|
||||
};
|
||||
|
||||
http.port = 7331;
|
||||
http.interface = "lo";
|
||||
};
|
||||
};
|
||||
|
||||
# Generate host key.
|
||||
clan.core.vars.generators.data-mesher-host-key = {
|
||||
files =
|
||||
let
|
||||
owner = config.users.users.data-mesher.name;
|
||||
in
|
||||
{
|
||||
private_key = {
|
||||
inherit owner;
|
||||
};
|
||||
public_key = {
|
||||
inherit owner;
|
||||
secret = false;
|
||||
};
|
||||
};
|
||||
|
||||
runtimeInputs = [
|
||||
config.services.data-mesher.package
|
||||
];
|
||||
|
||||
script = ''
|
||||
data-mesher generate keypair \
|
||||
--public-key-path $out/public_key \
|
||||
--private-key-path $out/private_key
|
||||
'';
|
||||
};
|
||||
|
||||
clan.core.vars.generators.data-mesher-network-key = {
|
||||
# generated once per clan
|
||||
share = true;
|
||||
|
||||
files =
|
||||
let
|
||||
owner = config.users.users.data-mesher.name;
|
||||
in
|
||||
{
|
||||
private_key = {
|
||||
inherit owner;
|
||||
};
|
||||
public_key = {
|
||||
inherit owner;
|
||||
secret = false;
|
||||
};
|
||||
};
|
||||
|
||||
runtimeInputs = [
|
||||
config.services.data-mesher.package
|
||||
];
|
||||
|
||||
script = ''
|
||||
data-mesher generate keypair \
|
||||
--public-key-path $out/public_key \
|
||||
--private-key-path $out/private_key
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -9,9 +9,11 @@ in
|
||||
# only import available files, as this allows to filter the files for tests.
|
||||
flake.clanModules = filterAttrs (_name: pathExists) {
|
||||
admin = ./admin;
|
||||
auto-upgrade = ./auto-upgrade;
|
||||
borgbackup = ./borgbackup;
|
||||
borgbackup-static = ./borgbackup-static;
|
||||
deltachat = ./deltachat;
|
||||
data-mesher = ./data-mesher;
|
||||
disk-id = ./disk-id;
|
||||
dyndns = ./dyndns;
|
||||
ergochat = ./ergochat;
|
||||
|
||||
@@ -3,8 +3,7 @@ description = "S3-compatible object store for small self-hosted geo-distributed
|
||||
---
|
||||
|
||||
This module generates garage specific keys automatically.
|
||||
When using garage in a distributed deployment the `rpc_key` between connected instances must be shared.
|
||||
This is currently still a manual process.
|
||||
Also shares the `rpc_secret` between instances.
|
||||
|
||||
Options: [NixosModuleOptions](https://search.nixos.org/options?channel=unstable&size=50&sort=relevance&type=packages&query=garage)
|
||||
Documentation: https://garagehq.deuxfleurs.fr/
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
{
|
||||
systemd.services.garage.serviceConfig = {
|
||||
LoadCredential = [
|
||||
"rpc_secret_path:${config.clan.core.facts.services.garage.secret.garage_rpc_secret.path}"
|
||||
"admin_token_path:${config.clan.core.facts.services.garage.secret.garage_admin_token.path}"
|
||||
"metrics_token_path:${config.clan.core.facts.services.garage.secret.garage_metrics_token.path}"
|
||||
"rpc_secret_path:${config.clan.core.vars.generators.garage-shared.files.rpc_secret.path}"
|
||||
"admin_token_path:${config.clan.core.vars.generators.garage.files.admin_token.path}"
|
||||
"metrics_token_path:${config.clan.core.vars.generators.garage.files.metrics_token.path}"
|
||||
];
|
||||
Environment = [
|
||||
"GARAGE_ALLOW_WORLD_READABLE_SECRETS=true"
|
||||
@@ -14,37 +14,30 @@
|
||||
];
|
||||
};
|
||||
|
||||
clan.core.facts.services.garage = {
|
||||
secret.garage_rpc_secret = { };
|
||||
secret.garage_admin_token = { };
|
||||
secret.garage_metrics_token = { };
|
||||
generator.path = [
|
||||
clan.core.vars.generators.garage = {
|
||||
files.admin_token = { };
|
||||
files.metrics_token = { };
|
||||
runtimeInputs = [
|
||||
pkgs.coreutils
|
||||
pkgs.openssl
|
||||
];
|
||||
generator.script = ''
|
||||
openssl rand -hex -out $secrets/garage_rpc_secret 32
|
||||
openssl rand -base64 -out $secrets/garage_admin_token 32
|
||||
openssl rand -base64 -out $secrets/garage_metrics_token 32
|
||||
script = ''
|
||||
openssl rand -base64 -out $out/admin_token 32
|
||||
openssl rand -base64 -out $out/metrics_token 32
|
||||
'';
|
||||
};
|
||||
|
||||
# TODO: Vars is not in a useable state currently
|
||||
# Move back, once it is implemented.
|
||||
# clan.core.vars.generators.garage = {
|
||||
# files.rpc_secret = { };
|
||||
# files.admin_token = { };
|
||||
# files.metrics_token = { };
|
||||
# runtimeInputs = [
|
||||
# pkgs.coreutils
|
||||
# pkgs.openssl
|
||||
# ];
|
||||
# script = ''
|
||||
# openssl rand -hex -out $out/rpc_secret 32
|
||||
# openssl rand -base64 -out $out/admin_token 32
|
||||
# openssl rand -base64 -out $out/metrics_token 32
|
||||
# '';
|
||||
# };
|
||||
clan.core.vars.generators.garage-shared = {
|
||||
share = true;
|
||||
files.rpc_secret = { };
|
||||
runtimeInputs = [
|
||||
pkgs.coreutils
|
||||
pkgs.openssl
|
||||
];
|
||||
script = ''
|
||||
openssl rand -hex -out $out/rpc_secret 32
|
||||
'';
|
||||
};
|
||||
|
||||
clan.core.state.garage.folders = [ config.services.garage.settings.metadata_dir ];
|
||||
}
|
||||
|
||||
@@ -6,4 +6,4 @@ categories = [ "Network" ]
|
||||
|
||||
!!! Warning
|
||||
If you've been using network manager + wpa_supplicant and now are switching to IWD read this migration guide:
|
||||
https://iwd.wiki.kernel.org/networkmanager#converting_network_profiles
|
||||
https://archive.kernel.org/oldwiki/iwd.wiki.kernel.org/networkmanager.html#converting_network_profiles
|
||||
|
||||
@@ -1,4 +1,9 @@
|
||||
{ lib, config, ... }:
|
||||
{
|
||||
lib,
|
||||
config,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
cfg = config.clan.iwd;
|
||||
@@ -12,12 +17,13 @@ let
|
||||
{
|
||||
secret.${secret_name} = { };
|
||||
generator.prompt = "Wifi password for '${value.ssid}'";
|
||||
# ref. man iwd.network
|
||||
generator.script = ''
|
||||
config="
|
||||
[Settings]
|
||||
AutoConnect=${if value.AutoConnect then "true" else "false"}
|
||||
[Security]
|
||||
Passphrase=\"$prompt_value\"
|
||||
Passphrase=$(echo -e "$prompt_value" | ${lib.getExe pkgs.gnused} "s=\\\=\\\\\\\=g;s=\t=\\\t=g;s=\r=\\\r=g;s=^ =\\\s=")
|
||||
"
|
||||
echo "$config" > "$secrets/${secret_name}"
|
||||
'';
|
||||
|
||||
@@ -10,18 +10,18 @@ let
|
||||
in
|
||||
{
|
||||
config = lib.mkMerge [
|
||||
(lib.mkIf ((var.machineId.value or null) != null) {
|
||||
(lib.mkIf ((var.value or null) != null) {
|
||||
assertions = [
|
||||
{
|
||||
assertion = lib.stringLength var.machineId.value == 32;
|
||||
assertion = lib.stringLength var.value == 32;
|
||||
message = "machineId must be exactly 32 characters long.";
|
||||
}
|
||||
];
|
||||
boot.kernelParams = [
|
||||
''systemd.machine_id=${var.machineId.value}''
|
||||
''systemd.machine_id=${var.value}''
|
||||
];
|
||||
environment.etc."machine-id" = {
|
||||
text = var.machineId.value;
|
||||
text = var.value;
|
||||
};
|
||||
})
|
||||
{
|
||||
|
||||
@@ -24,14 +24,7 @@ mycelium.default = {
|
||||
"berlin"
|
||||
"munich"
|
||||
];
|
||||
config = {
|
||||
topLevelDomain = "m";
|
||||
openFirewall = true;
|
||||
addHostedPublicNodes = true;
|
||||
};
|
||||
};
|
||||
```
|
||||
|
||||
This will add the machines named `berlin` and `munich` to the `mycelium` vpn.
|
||||
And will also set the toplevel domain of the mycelium vpn to `m`, meaning the
|
||||
machines are now reachable via `berlin.m` and `munich.m`.
|
||||
|
||||
@@ -4,54 +4,18 @@
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
flake = config.clan.core.settings.directory;
|
||||
machineName = config.clan.core.settings.machine.name;
|
||||
|
||||
# Instances might be empty, if the module is not used via the inventory
|
||||
#
|
||||
# Type: { ${instanceName} :: { roles :: Roles } }
|
||||
# Roles :: { ${role_name} :: { machines :: [string] } }
|
||||
instances = config.clan.inventory.services.mycelium or { };
|
||||
|
||||
allPeers = lib.foldlAttrs (
|
||||
acc: _instanceName: instanceConfig:
|
||||
acc
|
||||
++ (
|
||||
if (builtins.elem machineName instanceConfig.roles.peer.machines) then
|
||||
instanceConfig.roles.peer.machines
|
||||
else
|
||||
[ ]
|
||||
)
|
||||
) [ ] instances;
|
||||
allPeerConfigurations = lib.filterAttrs (n: _: builtins.elem n allPeers) flake.nixosConfigurations;
|
||||
allPeersWithIp =
|
||||
builtins.mapAttrs
|
||||
(_: x: lib.removeSuffix "\n" x.config.clan.core.vars.generators.mycelium.files.ip.value)
|
||||
(
|
||||
lib.filterAttrs (
|
||||
_: x: (builtins.tryEval x.config.clan.core.vars.generators.mycelium.files.ip.value).success
|
||||
) allPeerConfigurations
|
||||
);
|
||||
|
||||
ips = lib.attrValues allPeersWithIp;
|
||||
peers = lib.concatMap (ip: [
|
||||
"tcp://[${ip}]:9651"
|
||||
"quic://[${ip}]:9651"
|
||||
]) ips;
|
||||
in
|
||||
{
|
||||
options = {
|
||||
clan.mycelium.topLevelDomain = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "";
|
||||
description = "Top level domain to reach hosts";
|
||||
};
|
||||
clan.mycelium.openFirewall = lib.mkEnableOption "Open the firewall for mycelium";
|
||||
clan.mycelium.addHostedPublicNodes = lib.mkEnableOption "Add hosted Public nodes";
|
||||
clan.mycelium.addHosts = lib.mkOption {
|
||||
clan.mycelium.openFirewall = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = true;
|
||||
description = "Add mycelium ip's to the host file";
|
||||
description = "Open the firewall for mycelium";
|
||||
};
|
||||
|
||||
clan.mycelium.addHostedPublicNodes = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = true;
|
||||
description = "Add hosted Public nodes";
|
||||
};
|
||||
};
|
||||
|
||||
@@ -60,18 +24,8 @@ in
|
||||
addHostedPublicNodes = lib.mkDefault config.clan.mycelium.addHostedPublicNodes;
|
||||
openFirewall = lib.mkDefault config.clan.mycelium.openFirewall;
|
||||
keyFile = config.clan.core.vars.generators.mycelium.files.key.path;
|
||||
inherit peers;
|
||||
};
|
||||
|
||||
config.networking.hosts = lib.mkIf (config.clan.mycelium.addHosts) (
|
||||
lib.mapAttrs' (
|
||||
host: ip:
|
||||
lib.nameValuePair ip (
|
||||
if (config.clan.mycelium.topLevelDomain == "") then [ host ] else [ "${host}.m" ]
|
||||
)
|
||||
) allPeersWithIp
|
||||
);
|
||||
|
||||
config.clan.core.vars.generators.mycelium = {
|
||||
files."key" = { };
|
||||
files."ip".secret = false;
|
||||
|
||||
@@ -12,6 +12,9 @@
|
||||
files.password-hash = {
|
||||
neededFor = "users";
|
||||
};
|
||||
files.password = {
|
||||
deploy = false;
|
||||
};
|
||||
migrateFact = "root-password";
|
||||
runtimeInputs = [
|
||||
pkgs.coreutils
|
||||
|
||||
@@ -37,6 +37,7 @@ in
|
||||
type = "rsa";
|
||||
};
|
||||
};
|
||||
|
||||
clan.core.vars.generators.openssh = {
|
||||
files."ssh.id_ed25519" = { };
|
||||
files."ssh.id_ed25519.pub".secret = false;
|
||||
@@ -50,6 +51,14 @@ in
|
||||
'';
|
||||
};
|
||||
|
||||
programs.ssh.knownHosts.clan-sshd-self-ed25519 = {
|
||||
hostNames = [
|
||||
"localhost"
|
||||
config.networking.hostName
|
||||
] ++ (lib.optional (config.networking.domain != null) config.networking.fqdn);
|
||||
publicKey = config.clan.core.vars.generators.openssh.files."ssh.id_ed25519.pub".value;
|
||||
};
|
||||
|
||||
clan.core.vars.generators.openssh-rsa = lib.mkIf config.clan.sshd.hostKeys.rsa.enable {
|
||||
files."ssh.id_rsa" = { };
|
||||
files."ssh.id_rsa.pub".secret = false;
|
||||
|
||||
@@ -3,7 +3,7 @@ let
|
||||
var = config.clan.core.vars.generators.state-version.files.version or { };
|
||||
in
|
||||
{
|
||||
system.stateVersion = lib.mkDefault var.value;
|
||||
system.stateVersion = lib.mkDefault (lib.removeSuffix "\n" var.value);
|
||||
|
||||
clan.core.vars.generators.state-version = {
|
||||
files.version = {
|
||||
|
||||
@@ -7,7 +7,8 @@
|
||||
let
|
||||
dir = config.clan.core.settings.directory;
|
||||
machineDir = dir + "/machines/";
|
||||
syncthingPublicKeyPath = machines: machineDir + machines + "/facts/syncthing.pub";
|
||||
machineVarDir = dir + "/vars/per-machine/";
|
||||
syncthingPublicKeyPath = machines: machineVarDir + machines + "/syncthing/id/value";
|
||||
machinesFileSet = builtins.readDir machineDir;
|
||||
machines = lib.mapAttrsToList (name: _: name) machinesFileSet;
|
||||
syncthingPublicKeysUnchecked = builtins.map (
|
||||
@@ -83,24 +84,26 @@ in
|
||||
configDir = "/var/lib/syncthing";
|
||||
group = "syncthing";
|
||||
|
||||
key = lib.mkDefault config.clan.core.facts.services.syncthing.secret."syncthing.key".path or null;
|
||||
cert = lib.mkDefault config.clan.core.facts.services.syncthing.secret."syncthing.cert".path or null;
|
||||
key = lib.mkDefault config.clan.core.vars.generators.syncthing.files.key.path or null;
|
||||
cert = lib.mkDefault config.clan.core.vars.generators.syncthing.files.cert.path or null;
|
||||
};
|
||||
|
||||
clan.core.facts.services.syncthing = {
|
||||
secret."syncthing.key" = { };
|
||||
secret."syncthing.cert" = { };
|
||||
public."syncthing.pub" = { };
|
||||
generator.path = [
|
||||
clan.core.vars.generators.syncthing = {
|
||||
files.key = { };
|
||||
files.cert = { };
|
||||
files.api = { };
|
||||
files.id.secret = false;
|
||||
runtimeInputs = [
|
||||
pkgs.coreutils
|
||||
pkgs.gnugrep
|
||||
pkgs.syncthing
|
||||
];
|
||||
generator.script = ''
|
||||
syncthing generate --config "$secrets"
|
||||
mv "$secrets"/key.pem "$secrets"/syncthing.key
|
||||
mv "$secrets"/cert.pem "$secrets"/syncthing.cert
|
||||
cat "$secrets"/config.xml | grep -oP '(?<=<device id=")[^"]+' | uniq > "$facts"/syncthing.pub
|
||||
script = ''
|
||||
syncthing generate --config $out
|
||||
mv $out/key.pem $out/key
|
||||
mv $out/cert.pem $out/cert
|
||||
cat $out/config.xml | grep -oP '(?<=<device id=")[^"]+' | uniq > $out/id
|
||||
cat $out/config.xml | grep -oP '<apikey>\K[^<]+' | uniq > $out/api
|
||||
'';
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
description = "Configures [Zerotier VPN](https://zerotier.com) secure and efficient networking within a Clan.."
|
||||
description = "Configures [Zerotier VPN](https://zerotier.com) secure and efficient networking within a Clan."
|
||||
features = [ "inventory" ]
|
||||
categories = [ "Network", "System" ]
|
||||
|
||||
|
||||
547
decisions/01-ClanModules.md
Normal file
547
decisions/01-ClanModules.md
Normal file
@@ -0,0 +1,547 @@
|
||||
# Clan service modules
|
||||
|
||||
Status: Accepted
|
||||
|
||||
## Context
|
||||
|
||||
To define a service in Clan, you need to define two things:
|
||||
|
||||
- `clanModule` - defined by module authors
|
||||
- `inventory` - defined by users
|
||||
|
||||
The `clanModule` is currently a plain NixOS module. It is conditionally imported into each machine depending on the `service` and `role`.
|
||||
|
||||
A `role` is a function of a machine within a service. For example in the `backup` service there are `client` and `server` roles.
|
||||
|
||||
The `inventory` contains the settings for the user/consumer of the module. It describes what `services` run on each machine and with which `roles`.
|
||||
|
||||
Additionally any `service` can be instantiated multiple times.
|
||||
|
||||
This ADR proposes that we change how to write a `clanModule`. The `inventory` should get a new attribute called `instances` that allow for configuration of these modules.
|
||||
|
||||
### Status Quo
|
||||
|
||||
In this example the user configures 2 instances of the `networking` service:
|
||||
|
||||
The *user* defines
|
||||
|
||||
```nix
|
||||
{
|
||||
inventory.services = {
|
||||
# anything inside an instance is instance specific
|
||||
networking."instance1" = {
|
||||
roles.client.tags = [ "all" ];
|
||||
machines.foo.config = { ... /* machine specific settings */ };
|
||||
|
||||
# this will not apply to `clients` outside of `instance1`
|
||||
roles.client.config = { ... /* client specific settings */ };
|
||||
};
|
||||
networking."instance2" = {
|
||||
roles.server.tags = [ "all" ];
|
||||
config = { ... /* applies to every machine that runs this instance */ };
|
||||
};
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
The *module author* defines:
|
||||
|
||||
```nix
|
||||
# networking/roles/client.nix
|
||||
{ config, ... }:
|
||||
let
|
||||
instances = config.clan.inventory.services.networking or { };
|
||||
|
||||
serviceConfig = config.clan.networking;
|
||||
in {
|
||||
## Set some nixos options
|
||||
}
|
||||
```
|
||||
|
||||
### Problems
|
||||
|
||||
Problems with the current way of writing clanModules:
|
||||
|
||||
1. No way to retrieve the config of a single service instance, together with its name.
|
||||
2. Directly exporting a single, anonymous nixosModule without any intermediary attribute layers doesn't leave room for exporting other inventory resources such as potentially `vars` or `homeManagerConfig`.
|
||||
3. Can't access multiple config instances individually.
|
||||
Example:
|
||||
```nix
|
||||
inventory = {
|
||||
services = {
|
||||
network.c-base = {
|
||||
instanceConfig.ips = {
|
||||
mors = "172.139.0.2";
|
||||
};
|
||||
};
|
||||
network.gg23 = {
|
||||
instanceConfig.ips = {
|
||||
mors = "10.23.0.2";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
```
|
||||
This doesn't work because all instance configs are applied to the same namespace. So this results in a conflict currently.
|
||||
Resolving this problem means that new inventory modules cannot be plain nixos modules anymore. If they are configured via `instances` / `instanceConfig` they cannot be configured without using the inventory. (There might be ways to inject instanceConfig but that requires knowledge of inventory internals)
|
||||
|
||||
4. Writing modules for multiple instances is cumbersome. Currently the clanModule author has to write one or multiple `fold` operations for potentially every nixos option to define how multiple service instances merge into every single one option. The new idea behind this adr is to pull the common fold function into the outer context provide it as a common helper. (See the example below. `perInstance` analog to the well known `perSystem` of flake-parts)
|
||||
|
||||
5. Each role has a different interface. We need to render that interface into json-schema which includes creating an unnecessary test machine currently. Defining the interface at a higher level (outside of any machine context) allows faster evaluation and an isolation by design from any machine.
|
||||
This allows rendering the UI (options tree) of a service by just knowing the service and the corresponding roles without creating a dummy machine.
|
||||
|
||||
6. The interface of defining config is wrong. It is possible to define config that applies to multiple machine at once. It is possible to define config that applies to
|
||||
a machine as a hole. But this is wrong behavior because the options exist at the role level. So config must also always exist at the role level.
|
||||
Currently we merge options and config together but that may produce conflicts. Those module system conflicts are very hard to foresee since they depend on what roles exist at runtime.
|
||||
|
||||
## Proposed Change
|
||||
|
||||
We will create a new module class which is defined by `_class = "clan.service"` ([documented here](https://nixos.org/manual/nixpkgs/stable/#module-system-lib-evalModules-param-class)).
|
||||
|
||||
Existing clan modules will still work by continuing to be plain NixOS modules. All new modules can set `_class = "clan.service";` to use the proposed features.
|
||||
|
||||
In short the change introduces a new module class that makes the currently necessary folding of `clan.service`s `instances` and `roles` a common operation. The module author can define the inner function of the fold operations which is called a `clan.service` module.
|
||||
|
||||
There are the following attributes of such a module:
|
||||
|
||||
### `roles.<roleName>.interface`
|
||||
|
||||
Each role can have a different interface for how to be configured.
|
||||
I.e.: A `client` role might have different options than a `server` role.
|
||||
|
||||
This attribute should be used to define `options`. (Not `config` !)
|
||||
|
||||
The end-user defines the corresponding `config`.
|
||||
|
||||
This submodule will be evaluated for each `instance role` combination and passed as argument into `perInstance`.
|
||||
|
||||
This submodules `options` will be evaluated to build the UI for that module dynamically.
|
||||
|
||||
### **Result attributes**
|
||||
|
||||
Some common result attributes are produced by modules of this proposal, those will be referenced later in this document but are commonly defined as:
|
||||
|
||||
- `nixosModule` A single nixos module. (`{config, ...}:{ environment.systemPackages = []; }`)
|
||||
- `services.<serviceName>` An attribute set of `_class = clan.service`. Which contain the same thing as this whole ADR proposes.
|
||||
- `vars` To be defined. Reserved for now.
|
||||
|
||||
### `roles.<roleName>.perInstance`
|
||||
|
||||
This acts like a function that maps over all `service instances` of a given `role`.
|
||||
It produces the previously defined **result attributes**.
|
||||
|
||||
I.e. This allows to produce multiple `nixosModules` one for every instance of the service.
|
||||
Hence making multiple `service instances` convenient by leveraging the module-system merge behavior.
|
||||
|
||||
### `perMachine`
|
||||
|
||||
This acts like a function that maps over all `machines` of a given `service`.
|
||||
It produces the previously defined **result attributes**.
|
||||
|
||||
I.e. this allows to produce exactly one `nixosModule` per `service`.
|
||||
Making it easy to set nixos-options only once if they have a one-to-one relation to a service being enabled.
|
||||
|
||||
Note: `lib.mkIf` can be used on i.e. `roleName` to make the scope more specific.
|
||||
|
||||
### `services.<serviceName>`
|
||||
|
||||
This allows to define nested services.
|
||||
i.e the *service* `backup` might define a nested *service* `ssh` which sets up an ssh connection.
|
||||
|
||||
This can be defined in `perMachine` and `perInstance`
|
||||
|
||||
- For Every `instance` a given `service` may add multiple nested `services`.
|
||||
- A given `service` may add a static set of nested `services`; Even if there are multiple instances of the same given service.
|
||||
|
||||
Q: Why is this not a top-level attribute?
|
||||
A: Because nested service definitions may also depend on a `role` which must be resolved depending on `machine` and `instance`. The top-level module doesn't know anything about machines. Keeping the service layer machine agnostic allows us to build the UI for a module without adding any machines. (One of the problems with the current system)
|
||||
|
||||
```
|
||||
zerotier/default.nix
|
||||
```
|
||||
```nix
|
||||
# Some example module
|
||||
{
|
||||
_class = "clan.service";
|
||||
|
||||
# Analog to flake-parts 'perSystem' only that it takes instance
|
||||
# The exact arguments will be specified and documented along with the actual implementation.
|
||||
roles.client.perInstance = {
|
||||
# attrs : settings of that instance
|
||||
settings,
|
||||
# string : name of the instance
|
||||
instanceName,
|
||||
# { name :: string , roles :: listOf string; }
|
||||
machine,
|
||||
# { {roleName} :: { machines :: listOf string; } }
|
||||
roles,
|
||||
...
|
||||
}:
|
||||
{
|
||||
# Return a nixos module for every instance.
|
||||
# The module author must be aware that this may return multiple modules (one for every instance) which are merged natively
|
||||
nixosModule = {
|
||||
config.debug."${instanceName}-client" = instanceConfig;
|
||||
};
|
||||
};
|
||||
# Function that is called once for every machine with the role "client"
|
||||
# Receives at least the following parameters:
|
||||
#
|
||||
# machine :: { name :: String, roles :: listOf string; }
|
||||
# Name of the machine
|
||||
#
|
||||
# instances :: { instanceName :: { roleName :: { machines :: [ string ]; }}}
|
||||
# Resolved roles
|
||||
# Same type as currently in `clan.inventory.services.<ServiceName>.<InstanceName>.roles`
|
||||
#
|
||||
# The exact arguments will be specified and documented along with the actual implementation.
|
||||
perMachine = {machine, instances, ... }: {
|
||||
nixosModule =
|
||||
{ lib, ... }:
|
||||
{
|
||||
# Some shared code should be put into a shared file
|
||||
# Which is then imported into all/some roles
|
||||
imports = [
|
||||
../shared.nix
|
||||
] ++
|
||||
(lib.optional (builtins.elem "client" machine.roles)
|
||||
{
|
||||
options.debug = lib.mkOption {
|
||||
type = lib.types.attrsOf lib.types.raw;
|
||||
};
|
||||
});
|
||||
};
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
## Inventory.instances
|
||||
|
||||
This document also proposes to add a new attribute to the inventory that allow for exclusive configuration of the new modules.
|
||||
This allows to better separate the new and the old way of writing and configuring modules. Keeping the new implementation more focussed and keeping existing technical debt out from the beginning.
|
||||
|
||||
The following thoughts went into this:
|
||||
|
||||
- Getting rid of `<serviceName>`: Using only the attribute name (plain string) is not sufficient for defining the source of the service module. Encoding meta information into it would also require some extensible format specification and parser.
|
||||
- removing instanceConfig and machineConfig: There is no such config. Service configuration must always be role specific, because the options are defined on the role.
|
||||
- renaming `config` to `settings` or similar. Since `config` is a module system internal name.
|
||||
- Tags and machines should be an attribute set to allow setting `settings` on that level instead.
|
||||
|
||||
```nix
|
||||
{
|
||||
inventory.instances = {
|
||||
"instance1" = {
|
||||
# Allows to define where the module should be imported from.
|
||||
module = {
|
||||
input = "clan-core";
|
||||
name = "borgbackup";
|
||||
};
|
||||
# settings that apply to all client machines
|
||||
roles.client.settings = {};
|
||||
# settings that apply to the client service of machine with name <machineName>
|
||||
# There might be a server service that takes different settings on the same machine!
|
||||
roles.client.machines.<machineName>.settings = {};
|
||||
# settings that apply to all client-instances with tag <tagName>
|
||||
roles.client.tags.<tagName>.settings = {};
|
||||
};
|
||||
"instance2" = {
|
||||
# ...
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
## Iteration note
|
||||
|
||||
We want to implement the system as described. Once we have sufficient data on real world use-cases and modules we might revisit this document along with the updated implementation.
|
||||
|
||||
|
||||
## Real world example
|
||||
|
||||
The following module demonstrates the idea in the example of *borgbackup*.
|
||||
|
||||
```nix
|
||||
{
|
||||
_class = "clan.service";
|
||||
|
||||
# Define the 'options' of 'settings' see argument of perInstance
|
||||
roles.server.interface =
|
||||
{ lib, ... }:
|
||||
{
|
||||
options = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "/var/lib/borgbackup";
|
||||
description = ''
|
||||
The directory where the borgbackup repositories are stored.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
roles.server.perInstance =
|
||||
{
|
||||
instanceName,
|
||||
settings,
|
||||
roles,
|
||||
...
|
||||
}:
|
||||
{
|
||||
nixosModule =
|
||||
{ config, lib, ... }:
|
||||
let
|
||||
dir = config.clan.core.settings.directory;
|
||||
machineDir = dir + "/vars/per-machine/";
|
||||
allClients = roles.client.machines;
|
||||
in
|
||||
{
|
||||
# services.borgbackup is a native nixos option
|
||||
config.services.borgbackup.repos =
|
||||
let
|
||||
borgbackupIpMachinePath = machine: machineDir + machine + "/borgbackup/borgbackup.ssh.pub/value";
|
||||
|
||||
machinesMaybeKey = builtins.map (
|
||||
machine:
|
||||
let
|
||||
fullPath = borgbackupIpMachinePath machine;
|
||||
in
|
||||
if builtins.pathExists fullPath then
|
||||
machine
|
||||
else
|
||||
lib.warn ''
|
||||
Machine ${machine} does not have a borgbackup key at ${fullPath},
|
||||
run `clan var generate ${machine}` to generate it.
|
||||
'' null
|
||||
) allClients;
|
||||
|
||||
machinesWithKey = lib.filter (x: x != null) machinesMaybeKey;
|
||||
|
||||
hosts = builtins.map (machine: {
|
||||
name = instanceName + machine;
|
||||
value = {
|
||||
path = "${settings.directory}/${machine}";
|
||||
authorizedKeys = [ (builtins.readFile (borgbackupIpMachinePath machine)) ];
|
||||
};
|
||||
}) machinesWithKey;
|
||||
in
|
||||
if (builtins.listToAttrs hosts) != [ ] then builtins.listToAttrs hosts else { };
|
||||
};
|
||||
};
|
||||
|
||||
roles.client.interface =
|
||||
{ lib, ... }:
|
||||
{
|
||||
# There might be a better interface now. This is just how clan borgbackup was configured in the 'old' way
|
||||
options.destinations = lib.mkOption {
|
||||
type = lib.types.attrsOf (
|
||||
lib.types.submodule (
|
||||
{ name, ... }:
|
||||
{
|
||||
options = {
|
||||
name = lib.mkOption {
|
||||
type = lib.types.strMatching "^[a-zA-Z0-9._-]+$";
|
||||
default = name;
|
||||
description = "the name of the backup job";
|
||||
};
|
||||
repo = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "the borgbackup repository to backup to";
|
||||
};
|
||||
rsh = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
default = null;
|
||||
defaultText = "ssh -i \${config.clan.core.vars.generators.borgbackup.files.\"borgbackup.ssh\".path} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null";
|
||||
description = "the rsh to use for the backup";
|
||||
};
|
||||
};
|
||||
}
|
||||
)
|
||||
);
|
||||
default = { };
|
||||
description = ''
|
||||
destinations where the machine should be backed up to
|
||||
'';
|
||||
};
|
||||
|
||||
options.exclude = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
example = [ "*.pyc" ];
|
||||
default = [ ];
|
||||
description = ''
|
||||
Directories/Files to exclude from the backup.
|
||||
Use * as a wildcard.
|
||||
'';
|
||||
};
|
||||
};
|
||||
roles.client.perInstance =
|
||||
{
|
||||
instanceName,
|
||||
roles,
|
||||
machine,
|
||||
settings,
|
||||
...
|
||||
}:
|
||||
{
|
||||
nixosModule =
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
allServers = roles.server.machines;
|
||||
|
||||
# machineName = config.clan.core.settings.machine.name;
|
||||
|
||||
# cfg = config.clan.borgbackup;
|
||||
preBackupScript = ''
|
||||
declare -A preCommandErrors
|
||||
|
||||
${lib.concatMapStringsSep "\n" (
|
||||
state:
|
||||
lib.optionalString (state.preBackupCommand != null) ''
|
||||
echo "Running pre-backup command for ${state.name}"
|
||||
if ! /run/current-system/sw/bin/${state.preBackupCommand}; then
|
||||
preCommandErrors["${state.name}"]=1
|
||||
fi
|
||||
''
|
||||
) (lib.attrValues config.clan.core.state)}
|
||||
|
||||
if [[ ''${#preCommandErrors[@]} -gt 0 ]]; then
|
||||
echo "pre-backup commands failed for the following services:"
|
||||
for state in "''${!preCommandErrors[@]}"; do
|
||||
echo " $state"
|
||||
done
|
||||
exit 1
|
||||
fi
|
||||
'';
|
||||
|
||||
destinations =
|
||||
let
|
||||
destList = builtins.map (serverName: {
|
||||
name = "${instanceName}-${serverName}";
|
||||
value = {
|
||||
repo = "borg@${serverName}:/var/lib/borgbackup/${machine.name}";
|
||||
rsh = "ssh -i ${
|
||||
config.clan.core.vars.generators."borgbackup-${instanceName}".files."borgbackup.ssh".path
|
||||
} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=Yes";
|
||||
} // settings.destinations.${serverName};
|
||||
}) allServers;
|
||||
in
|
||||
(builtins.listToAttrs destList);
|
||||
in
|
||||
{
|
||||
config = {
|
||||
# Derived from the destinations
|
||||
systemd.services = lib.mapAttrs' (
|
||||
_: dest:
|
||||
lib.nameValuePair "borgbackup-job-${instanceName}-${dest.name}" {
|
||||
# since borgbackup mounts the system read-only, we need to run in a ExecStartPre script, so we can generate additional files.
|
||||
serviceConfig.ExecStartPre = [
|
||||
''+${pkgs.writeShellScript "borgbackup-job-${dest.name}-pre-backup-commands" preBackupScript}''
|
||||
];
|
||||
}
|
||||
) destinations;
|
||||
|
||||
services.borgbackup.jobs = lib.mapAttrs (_destinationName: dest: {
|
||||
paths = lib.unique (
|
||||
lib.flatten (map (state: state.folders) (lib.attrValues config.clan.core.state))
|
||||
);
|
||||
exclude = settings.exclude;
|
||||
repo = dest.repo;
|
||||
environment.BORG_RSH = dest.rsh;
|
||||
compression = "auto,zstd";
|
||||
startAt = "*-*-* 01:00:00";
|
||||
persistentTimer = true;
|
||||
|
||||
encryption = {
|
||||
mode = "repokey";
|
||||
passCommand = "cat ${config.clan.core.vars.generators."borgbackup-${instanceName}".files."borgbackup.repokey".path}";
|
||||
};
|
||||
|
||||
prune.keep = {
|
||||
within = "1d"; # Keep all archives from the last day
|
||||
daily = 7;
|
||||
weekly = 4;
|
||||
monthly = 0;
|
||||
};
|
||||
}) destinations;
|
||||
|
||||
environment.systemPackages = [
|
||||
(pkgs.writeShellApplication {
|
||||
name = "borgbackup-create";
|
||||
runtimeInputs = [ config.systemd.package ];
|
||||
text = ''
|
||||
${lib.concatMapStringsSep "\n" (dest: ''
|
||||
systemctl start borgbackup-job-${dest.name}
|
||||
'') (lib.attrValues destinations)}
|
||||
'';
|
||||
})
|
||||
(pkgs.writeShellApplication {
|
||||
name = "borgbackup-list";
|
||||
runtimeInputs = [ pkgs.jq ];
|
||||
text = ''
|
||||
(${
|
||||
lib.concatMapStringsSep "\n" (
|
||||
dest:
|
||||
# we need yes here to skip the changed url verification
|
||||
''echo y | /run/current-system/sw/bin/borg-job-${dest.name} list --json | jq '[.archives[] | {"name": ("${dest.name}::${dest.repo}::" + .name)}]' ''
|
||||
) (lib.attrValues destinations)
|
||||
}) | jq -s 'add // []'
|
||||
'';
|
||||
})
|
||||
(pkgs.writeShellApplication {
|
||||
name = "borgbackup-restore";
|
||||
runtimeInputs = [ pkgs.gawk ];
|
||||
text = ''
|
||||
cd /
|
||||
IFS=':' read -ra FOLDER <<< "''${FOLDERS-}"
|
||||
job_name=$(echo "$NAME" | awk -F'::' '{print $1}')
|
||||
backup_name=''${NAME#"$job_name"::}
|
||||
if [[ ! -x /run/current-system/sw/bin/borg-job-"$job_name" ]]; then
|
||||
echo "borg-job-$job_name not found: Backup name is invalid" >&2
|
||||
exit 1
|
||||
fi
|
||||
echo y | /run/current-system/sw/bin/borg-job-"$job_name" extract "$backup_name" "''${FOLDER[@]}"
|
||||
'';
|
||||
})
|
||||
];
|
||||
# every borgbackup instance adds its own vars
|
||||
clan.core.vars.generators."borgbackup-${instanceName}" = {
|
||||
files."borgbackup.ssh.pub".secret = false;
|
||||
files."borgbackup.ssh" = { };
|
||||
files."borgbackup.repokey" = { };
|
||||
|
||||
migrateFact = "borgbackup";
|
||||
runtimeInputs = [
|
||||
pkgs.coreutils
|
||||
pkgs.openssh
|
||||
pkgs.xkcdpass
|
||||
];
|
||||
script = ''
|
||||
ssh-keygen -t ed25519 -N "" -f $out/borgbackup.ssh
|
||||
xkcdpass -n 4 -d - > $out/borgbackup.repokey
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
perMachine = {
|
||||
nixosModule =
|
||||
{ ... }:
|
||||
{
|
||||
clan.core.backups.providers.borgbackup = {
|
||||
list = "borgbackup-list";
|
||||
create = "borgbackup-create";
|
||||
restore = "borgbackup-restore";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
## Prior-art
|
||||
|
||||
- https://github.com/NixOS/nixops
|
||||
- https://github.com/infinisil/nixus
|
||||
116
decisions/02-clan-api.md
Normal file
116
decisions/02-clan-api.md
Normal file
@@ -0,0 +1,116 @@
|
||||
# Clan as library
|
||||
|
||||
## Status
|
||||
|
||||
Accepted
|
||||
|
||||
## Context
|
||||
|
||||
In the long term we envision the clan application will consist of the following user facing tools in the long term.
|
||||
|
||||
- `CLI`
|
||||
- `TUI`
|
||||
- `Desktop Application`
|
||||
- `REST-API`
|
||||
- `Mobile Application`
|
||||
|
||||
We might not be sure whether all of those will exist but the architecture should be generic such that those are possible without major changes of the underlying system.
|
||||
|
||||
## Decision
|
||||
|
||||
This leads to the conclusion that we should do `library` centric development.
|
||||
With the current `clan` python code beeing a library that can be imported to create various tools ontop of it.
|
||||
All **CLI** or **UI** related parts should be moved out of the main library.
|
||||
|
||||
*Note: The next person who wants implement any new frontend should do this first. Currently it looks like the TUI is the next one.*
|
||||
|
||||
Imagine roughly the following architecture:
|
||||
|
||||
```mermaid
|
||||
graph TD
|
||||
%% Define styles
|
||||
classDef frontend fill:#f9f,stroke:#333,stroke-width:2px;
|
||||
classDef backend fill:#bbf,stroke:#333,stroke-width:2px;
|
||||
classDef storage fill:#ff9,stroke:#333,stroke-width:2px;
|
||||
classDef testing fill:#cfc,stroke:#333,stroke-width:2px;
|
||||
|
||||
%% Define nodes
|
||||
user(["User"]) -->|Interacts with| Frontends
|
||||
|
||||
subgraph "Frontends"
|
||||
CLI["CLI"]:::frontend
|
||||
APP["Desktop App"]:::frontend
|
||||
TUI["TUI"]:::frontend
|
||||
REST["REST API"]:::frontend
|
||||
end
|
||||
|
||||
subgraph "Python"
|
||||
API["Library <br>for interacting with clan"]:::backend
|
||||
BusinessLogic["Business Logic<br>Implements actions like 'machine create'"]:::backend
|
||||
STORAGE[("Persistence")]:::storage
|
||||
NIX["Nix Eval & Build"]:::backend
|
||||
end
|
||||
|
||||
subgraph "CI/CD & Tests"
|
||||
TEST["Feature Testing"]:::testing
|
||||
end
|
||||
|
||||
%% Define connections
|
||||
CLI --> API
|
||||
APP --> API
|
||||
TUI --> API
|
||||
REST --> API
|
||||
|
||||
TEST --> API
|
||||
|
||||
API --> BusinessLogic
|
||||
BusinessLogic --> STORAGE
|
||||
BusinessLogic --> NIX
|
||||
```
|
||||
|
||||
With this very simple design it is ensured that all the basic features remain stable across all frontends.
|
||||
In the end it is straight forward to create python library function calls in a testing framework to ensure that kind of stability.
|
||||
|
||||
Integration tests and smaller unit-tests should both be utilized to ensure the stability of the library.
|
||||
|
||||
Note: Library function don't have to be json-serializable in general.
|
||||
|
||||
Persistence includes but is not limited to: creating git commits, writing to inventory.json, reading and writing vars and to/from disk in general.
|
||||
|
||||
## Benefits / Drawbacks
|
||||
|
||||
- (+) Less tight coupling of frontend- / backend-teams
|
||||
- (+) Consistency and inherent behavior
|
||||
- (+) Performance & Scalability
|
||||
- (+) Different frontends for different user groups
|
||||
- (+) Documentation per library function makes it convenient to interact with the clan resources.
|
||||
- (+) Testing the library ensures stability of the underlyings for all layers above.
|
||||
- (-) Complexity overhead
|
||||
- (-) library needs to be designed / documented
|
||||
- (+) library can be well documented since it is a finite set of functions.
|
||||
- (-) Error handling might be harder.
|
||||
- (+) Common error reporting
|
||||
- (-) different frontends need different features. The library must include them all.
|
||||
- (+) All those core features must be implemented anyways.
|
||||
- (+) VPN Benchmarking uses the existing library's already and works relatively well.
|
||||
|
||||
## Implementation considerations
|
||||
|
||||
Not all required details that need to change over time are possible to be pointed out ahead of time.
|
||||
The goal of this document is to create a common understanding for how we like our project to be structured.
|
||||
Any future commits should contribute to this goal.
|
||||
|
||||
Some ideas what might be needed to change:
|
||||
|
||||
- Having separate locations or packages for the library and the CLI.
|
||||
- Rename the `clan_cli` package to `clan` and move the `cli` frontend into a subfolder or a separate package.
|
||||
- Python Argparse or other cli related code should not exist in the `clan` python library.
|
||||
- `__init__.py` should be very minimal. Only init the business logic models and resources. Note that all `__init__.py` files all the way up in the module tree are always executed as part of the python module import logic and thus should be as small as possible.
|
||||
i.e. `from clan_cli.vars.generators import ...` executes both `clan_cli/__init__.py` and `clan_cli/vars/__init__.py` if any of those exist.
|
||||
- `api` folder doesn't make sense since the python library `clan` is the api.
|
||||
- Logic needed for the webui that performs json serialization and deserialization will be some `json-adapter` folder or package.
|
||||
- Code for serializing dataclasses and typed dictionaries is needed for the persistence layer. (i.e. for read-write of inventory.json)
|
||||
- The inventory-json is a backend resource, that is internal. Its logic includes merging, unmerging and partial updates with considering nix values and their priorities. Nobody should try to read or write to it directly.
|
||||
Instead there will be library methods i.e. to add a `service` or to update/read/delete some information from it.
|
||||
- Library functions should be carefully designed with suitable conventions for writing good api's in mind. (i.e: https://swagger.io/resources/articles/best-practices-in-api-design/)
|
||||
|
||||
36
devShell.nix
36
devShell.nix
@@ -1,10 +1,12 @@
|
||||
{ ... }:
|
||||
{ inputs, ... }:
|
||||
{
|
||||
perSystem =
|
||||
{
|
||||
lib,
|
||||
pkgs,
|
||||
self',
|
||||
config,
|
||||
system,
|
||||
...
|
||||
}:
|
||||
let
|
||||
@@ -24,18 +26,26 @@
|
||||
in
|
||||
{
|
||||
devShells.default = pkgs.mkShell {
|
||||
packages = [
|
||||
select-shell
|
||||
pkgs.nix-unit
|
||||
pkgs.tea
|
||||
# Better error messages than nix 2.18
|
||||
pkgs.nixVersions.latest
|
||||
self'.packages.tea-create-pr
|
||||
self'.packages.merge-after-ci
|
||||
self'.packages.pending-reviews
|
||||
# treefmt with config defined in ./flake-parts/formatting.nix
|
||||
config.treefmt.build.wrapper
|
||||
];
|
||||
packages =
|
||||
[
|
||||
select-shell
|
||||
pkgs.nix-unit
|
||||
pkgs.tea
|
||||
# Better error messages than nix 2.18
|
||||
pkgs.nixVersions.latest
|
||||
self'.packages.tea-create-pr
|
||||
self'.packages.merge-after-ci
|
||||
self'.packages.pending-reviews
|
||||
# treefmt with config defined in ./flake-parts/formatting.nix
|
||||
config.treefmt.build.wrapper
|
||||
]
|
||||
# bring in data-mesher for the cli which can help with things like key generation
|
||||
++ (
|
||||
let
|
||||
data-mesher = inputs.data-mesher.packages.${system}.data-mesher or null;
|
||||
in
|
||||
lib.optional (data-mesher != null) data-mesher
|
||||
);
|
||||
shellHook = ''
|
||||
echo -e "${ansiEscapes.green}switch to another dev-shell using: select-shell${ansiEscapes.reset}"
|
||||
export PRJ_ROOT=$(git rev-parse --show-toplevel)
|
||||
|
||||
@@ -21,14 +21,14 @@ Let's get your development environment up and running:
|
||||
curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install
|
||||
```
|
||||
|
||||
2. **Install direnv**:
|
||||
1. **Install direnv**:
|
||||
|
||||
- To automatically setup a devshell on entering the directory
|
||||
```bash
|
||||
nix profile install nixpkgs#nix-direnv-flakes nixpkgs#direnv
|
||||
```
|
||||
|
||||
3. **Add direnv to your shell**:
|
||||
1. **Add direnv to your shell**:
|
||||
|
||||
- Direnv needs to [hook into your shell](https://direnv.net/docs/hook.html) to work.
|
||||
You can do this by executing following command. The example below will setup direnv for `zsh` and `bash`
|
||||
@@ -37,10 +37,10 @@ Let's get your development environment up and running:
|
||||
echo 'eval "$(direnv hook zsh)"' >> ~/.zshrc && echo 'eval "$(direnv hook bash)"' >> ~/.bashrc && eval "$SHELL"
|
||||
```
|
||||
|
||||
3. **Allow the devshell**
|
||||
1. **Allow the devshell**
|
||||
- Go to `clan-core/pkgs/clan-cli` and do a `direnv allow` to setup the necessary development environment to execute the `clan` command
|
||||
|
||||
4. **Create a Gitea Account**:
|
||||
1. **Create a Gitea Account**:
|
||||
- Register an account on https://git.clan.lol
|
||||
- Fork the [clan-core](https://git.clan.lol/clan/clan-core) repository
|
||||
- Clone the repository and navigate to it
|
||||
@@ -48,30 +48,7 @@ Let's get your development environment up and running:
|
||||
```bash
|
||||
git remote add upstream gitea@git.clan.lol:clan/clan-core.git
|
||||
```
|
||||
5. **Create an access token**:
|
||||
- Log in to Gitea.
|
||||
- Go to your account settings.
|
||||
- Navigate to the Applications section.
|
||||
- Click Generate New Token.
|
||||
- Name your token and select all available scopes.
|
||||
- Generate the token and copy it for later use.
|
||||
- Your access token is now ready to use with all permissions.
|
||||
|
||||
5. **Register Your Gitea Account Locally**:
|
||||
|
||||
- Execute the following command to add your Gitea account locally:
|
||||
```bash
|
||||
tea login add
|
||||
```
|
||||
- Fill out the prompt as follows:
|
||||
- URL of Gitea instance: `https://git.clan.lol`
|
||||
- Name of new Login [git.clan.lol]:
|
||||
- Do you have an access token? Yes
|
||||
- Token: <yourtoken>
|
||||
- Set Optional settings: No
|
||||
|
||||
|
||||
6. **Allow .envrc**:
|
||||
1. **Allow .envrc**:
|
||||
|
||||
- When you enter the directory, you'll receive an error message like this:
|
||||
```bash
|
||||
@@ -79,7 +56,7 @@ Let's get your development environment up and running:
|
||||
```
|
||||
- Execute `direnv allow` to automatically execute the shell script `.envrc` when entering the directory.
|
||||
|
||||
7. **(Optional) Install Git Hooks**:
|
||||
1. **(Optional) Install Git Hooks**:
|
||||
- To syntax check your code you can run:
|
||||
```bash
|
||||
nix fmt
|
||||
@@ -89,15 +66,9 @@ Let's get your development environment up and running:
|
||||
./scripts/pre-commit
|
||||
```
|
||||
|
||||
8. **Open a Pull Request**:
|
||||
- To automatically open up a pull request you can use our tool called:
|
||||
```
|
||||
merge-after-ci --reviewers Mic92 Lassulus Qubasa
|
||||
```
|
||||
|
||||
## Related Projects
|
||||
|
||||
- **Data Mesher**: [dm](https://git.clan.lol/clan/dm)
|
||||
- **Data Mesher**: [data-mesher](https://git.clan.lol/clan/data-mesher)
|
||||
- **Nixos Facter**: [nixos-facter](https://github.com/nix-community/nixos-facter)
|
||||
- **Nixos Anywhere**: [nixos-anywhere](https://github.com/nix-community/nixos-anywhere)
|
||||
- **Disko**: [disko](https://github.com/nix-community/disko)
|
||||
@@ -128,8 +99,12 @@ run(
|
||||
),
|
||||
RunOpts(log=Log.BOTH, prefix=machine.name, needs_user_terminal=True),
|
||||
)
|
||||
|
||||
```
|
||||
|
||||
The <path_to_local_src> doesn't need to be a local path, it can be any valid [flakeref](https://nix.dev/manual/nix/2.26/command-ref/new-cli/nix3-flake.html#flake-references).
|
||||
And thus can point to test already opened PRs for example.
|
||||
|
||||
# Standards
|
||||
|
||||
- Every new module name should be in kebab-case.
|
||||
|
||||
@@ -48,6 +48,7 @@ nav:
|
||||
- Add Machines: getting-started/configure.md
|
||||
- Secrets & Facts: getting-started/secrets.md
|
||||
- Deploy Machine: getting-started/deploy.md
|
||||
- Continuous Integration: getting-started/check.md
|
||||
- Guides:
|
||||
- Disk Encryption: getting-started/disk-encryption.md
|
||||
- Mesh VPN: getting-started/mesh-vpn.md
|
||||
@@ -61,8 +62,10 @@ nav:
|
||||
- Authoring:
|
||||
- Modules: clanmodules/index.md
|
||||
- Disk Templates: manual/disk-templates.md
|
||||
- Contribute: manual/contribute.md
|
||||
- Debugging: manual/debugging.md
|
||||
- Contributing:
|
||||
- Contribute: contributing/contribute.md
|
||||
- Debugging: contributing/debugging.md
|
||||
- Testing: contributing/testing.md
|
||||
- Repo Layout: manual/repo-layout.md
|
||||
- Migrate existing Flakes: manual/migration-guide.md
|
||||
# - Concepts:
|
||||
@@ -76,6 +79,7 @@ nav:
|
||||
# This is the module overview and should stay at the top
|
||||
- reference/clanModules/admin.md
|
||||
- reference/clanModules/borgbackup-static.md
|
||||
- reference/clanModules/data-mesher.md
|
||||
- reference/clanModules/borgbackup.md
|
||||
- reference/clanModules/deltachat.md
|
||||
- reference/clanModules/disk-id.md
|
||||
@@ -107,6 +111,7 @@ nav:
|
||||
- reference/clanModules/thelounge.md
|
||||
- reference/clanModules/trusted-nix-caches.md
|
||||
- reference/clanModules/user-password.md
|
||||
- reference/clanModules/auto-upgrade.md
|
||||
- reference/clanModules/vaultwarden.md
|
||||
- reference/clanModules/xfce.md
|
||||
- reference/clanModules/zerotier-static-peers.md
|
||||
|
||||
@@ -13,8 +13,8 @@
|
||||
# { clanCore = «derivation JSON»; clanModules = { ${name} = «derivation JSON» }; }
|
||||
jsonDocs = pkgs.callPackage ./get-module-docs.nix {
|
||||
inherit (self) clanModules;
|
||||
evalClanModules = self.lib.evalClanModules;
|
||||
modulesRolesOptions = self.lib.evalClanModulesWithRoles self.clanModules;
|
||||
evalClanModules = self.lib.evalClan.evalClanModules;
|
||||
modulesRolesOptions = self.lib.evalClan.evalClanModulesWithRoles self.clanModules;
|
||||
};
|
||||
|
||||
# Frontmatter for clanModules
|
||||
|
||||
@@ -585,7 +585,7 @@ Each attribute is documented below
|
||||
|
||||
```nix
|
||||
buildClan {
|
||||
directory = self;
|
||||
self = self;
|
||||
machines = {
|
||||
jon = { };
|
||||
sara = { };
|
||||
|
||||
@@ -51,6 +51,20 @@ wintux
|
||||
|
||||
If you're using VSCode, it has a handy feature that makes paths to source code files clickable in the integrated terminal. Combined with the previously mentioned techniques, this allows you to open a Clan in VSCode, execute a command like `clan machines list --debug`, and receive a printed path to the code that initiates the subprocess. With the `Ctrl` key (or `Cmd` on macOS) and a mouse click, you can jump directly to the corresponding line in the code file and add a `breakpoint()` function to it, to inspect the internal state.
|
||||
|
||||
|
||||
|
||||
## Finding Print Messages
|
||||
|
||||
To identify where a specific print message comes from, you can enable a helpful feature. Simply set the environment variable `export TRACE_PRINT=1`. When you run commands with `--debug` mode, each print message will include information about its source location.
|
||||
|
||||
If you need more details, you can expand the stack trace information that appears with each print by setting the environment variable `export TRACE_DEPTH=3`.
|
||||
|
||||
## Analyzing Performance
|
||||
|
||||
To understand what's causing slow performance, set the environment variable `export CLAN_CLI_PERF=1`. When you complete a clan command, you'll see a summary of various performance metrics, helping you identify what's taking up time.
|
||||
|
||||
|
||||
|
||||
## See all possible packages and tests
|
||||
|
||||
To quickly show all possible packages and tests execute:
|
||||
316
docs/site/contributing/testing.md
Normal file
316
docs/site/contributing/testing.md
Normal file
@@ -0,0 +1,316 @@
|
||||
# Testing your contributions
|
||||
|
||||
Each feature added to clan should be tested extensively via automated tests.
|
||||
|
||||
This document covers different methods of automated testing, including creating, running and debugging such tests.
|
||||
|
||||
In order to test the behavior of clan, different testing frameworks are used depending on the concern:
|
||||
|
||||
- NixOS VM tests: for high level integration
|
||||
- NixOS container tests: for high level integration
|
||||
- Python tests via pytest: for unit tests and integration tests
|
||||
- Nix eval tests: for nix functions, libraries, modules, etc.
|
||||
|
||||
## NixOS VM Tests
|
||||
|
||||
The [NixOS VM Testing Framework](https://nixos.org/manual/nixos/stable/index.html#sec-nixos-tests) is used to create high level integration tests, by running one or more VMs generated from a specified config. Commands can be executed on the booted machine(s) to verify a deployment of a service works as expected. All machines within a test are connected by a virtual network. Internet access is not available.
|
||||
|
||||
### When to use VM tests
|
||||
|
||||
- testing that a service defined through a clan module works as expected after deployment
|
||||
- testing clan-cli subcommands which require accessing a remote machine
|
||||
|
||||
### When not to use VM tests
|
||||
|
||||
NixOS VM Tests are slow and expensive. They should only be used for testing high level integration of components.
|
||||
VM tests should be avoided wherever it is possible to implement a cheaper unit test instead.
|
||||
|
||||
- testing detailed behavior of a certain clan-cli command -> use unit testing via pytest instead
|
||||
- regression testing -> add a unit test
|
||||
|
||||
### Finding examples for VM tests
|
||||
|
||||
Existing nixos vm tests in clan-core can be found by using ripgrep:
|
||||
```shellSession
|
||||
rg "import.*/lib/test-base.nix"
|
||||
```
|
||||
|
||||
### Locating definitions of failing VM tests
|
||||
|
||||
All nixos vm tests in clan are exported as individual flake outputs under `checks.x86_64-linux.{test-attr-name}`.
|
||||
If a test fails in CI:
|
||||
|
||||
- look for the job name of the test near the top if the CI Job page, like, for example `gitea:clan/clan-core#checks.x86_64-linux.borgbackup/1242`
|
||||
- in this case `checks.x86_64-linux.borgbackup` is the attribute path
|
||||
- note the last element of that attribute path, in this case `borgbackup`
|
||||
- search for the attribute name inside the `/checks` directory via ripgrep
|
||||
|
||||
example: locating the vm test named `borgbackup`:
|
||||
|
||||
```shellSession
|
||||
$ rg "borgbackup =" ./checks
|
||||
./checks/flake-module.nix
|
||||
41: borgbackup = import ./borgbackup nixosTestArgs;
|
||||
```
|
||||
|
||||
-> the location of that test is `/checks/flake-module.nix` line `41`.
|
||||
|
||||
### Adding vm tests
|
||||
|
||||
Create a nixos test module under `/checks/{name}/default.nix` and import it in `/checks/flake-module.nix`.
|
||||
|
||||
|
||||
### Running VM tests
|
||||
|
||||
```shellSession
|
||||
nix build .#checks.x86_64-linux.{test-attr-name}
|
||||
```
|
||||
(replace `{test-attr-name}` with the name of the test)
|
||||
|
||||
### Debugging VM tests
|
||||
|
||||
The following techniques can be used to debug a VM test:
|
||||
|
||||
#### Print Statements
|
||||
|
||||
Locate the definition (see above) and add print statements, like, for example `print(client.succeed("systemctl --failed"))`, then re-run the test via `nix build` (see above)
|
||||
|
||||
#### Interactive Shell
|
||||
|
||||
- Execute the vm test outside the nix Sandbox via the following command:
|
||||
`nix run .#checks.x86_64-linux.{test-attr-name}.driver -- --interactive`
|
||||
- Then run the commands in the machines manually, like for example:
|
||||
```python3
|
||||
start_all()
|
||||
machine1.succeed("echo hello")
|
||||
```
|
||||
|
||||
#### Breakpoints
|
||||
|
||||
To get an interactive shell at a specific line in the VM test script, add a `breakpoint()` call before the line to debug, then run the test outside of the sandbox via:
|
||||
`nix run .#checks.x86_64-linux.{test-attr-name}.driver`
|
||||
|
||||
|
||||
## NixOS Container Tests
|
||||
|
||||
Those are very similar to NixOS VM tests, as in they run virtualized nixos machines, but instead of using VMs, they use containers which are much cheaper to launch.
|
||||
As of now the container test driver is a downstream development in clan-core.
|
||||
Basically everything stated under the NixOS VM tests sections applies here, except some limitations.
|
||||
|
||||
Limitations:
|
||||
|
||||
- does not yet support networking
|
||||
- supports only one machine as of now
|
||||
|
||||
|
||||
### Where to find examples for NixOS container tests
|
||||
|
||||
Existing nixos container tests in clan-core can be found by using ripgrep:
|
||||
```shellSession
|
||||
rg "import.*/lib/container-test.nix"
|
||||
```
|
||||
|
||||
|
||||
## Python tests via pytest
|
||||
|
||||
Since the clan cli is written in python, the `pytest` framework is used to define unit tests and integration tests via python
|
||||
|
||||
Due to superior efficiency,
|
||||
|
||||
### When to use python tests
|
||||
|
||||
- writing unit tests for python functions and modules, or bugfixes of such
|
||||
- all integrations tests that do not require building or running a nixos machine
|
||||
- impure integrations tests that require internet access (very rare, try to avoid)
|
||||
|
||||
|
||||
### When not to use python tests
|
||||
|
||||
- integrations tests that require building or running a nixos machine (use NixOS VM or container tests instead)
|
||||
- testing behavior of a nix function or library (use nix eval tests instead)
|
||||
|
||||
### Finding examples of python tests
|
||||
|
||||
Existing python tests in clan-core can be found by using ripgrep:
|
||||
```shellSession
|
||||
rg "import pytest"
|
||||
```
|
||||
|
||||
### Locating definitions of failing python tests
|
||||
|
||||
If any python test fails in the CI pipeline, an error message like this can be found at the end of the log:
|
||||
```
|
||||
...
|
||||
FAILED tests/test_machines_cli.py::test_machine_delete - clan_cli.errors.ClanError: Template 'new-machine' not in 'inputs.clan-core
|
||||
...
|
||||
```
|
||||
|
||||
In this case the test is defined in the file `/tests/test_machines_cli.py` via the test function `test_machine_delete`.
|
||||
|
||||
### Adding python tests
|
||||
|
||||
If a specific python module is tested, the test should be located near the tested module in a subdirectory called `./tests`
|
||||
If the test is not clearly related to a specific module, put it in the top-level `./tests` directory of the tested python package. For `clan-cli` this would be `/pkgs/clan-cli/clan_cli/tests`.
|
||||
All filenames must be prefixed with `test_` and test functions prefixed with `test_` for pytest to discover them.
|
||||
|
||||
### Running python tests
|
||||
|
||||
#### Running all python tests
|
||||
|
||||
To run all python tests which are executed in the CI pipeline locally, use this `nix build` command
|
||||
|
||||
```shellSession
|
||||
nix build .#checks.x86_64-linux.clan-pytest-{with,without}-core
|
||||
```
|
||||
|
||||
#### Running a specific python test
|
||||
|
||||
To run a specific python test outside the nix sandbox
|
||||
|
||||
1. Enter the development environment of the python package, by either:
|
||||
- Having direnv enabled and entering the directory of the package (eg. `/pkgs/clan-cli`)
|
||||
- Or using the command `select-shell {package}` in the top-level dev shell of clan-core, (eg. `switch-shell clan-cli`)
|
||||
2. Execute the test via pytest using issuing
|
||||
`pytest ./path/to/test_file.py:test_function_name -s -n0`
|
||||
|
||||
The flags `-sn0` are useful to forwards all stdout/stderr output to the terminal and be able to debug interactively via `breakpoint()`.
|
||||
|
||||
|
||||
### Debugging python tests
|
||||
|
||||
To debug a specific python test, find its definition (see above) and make sure to enter the correct dev environment for that python package.
|
||||
|
||||
Modify the test and add `breakpoint()` statements to it.
|
||||
|
||||
Execute the test using the flags `-sn0` in order to get an interactive shell at the breakpoint:
|
||||
|
||||
```shelSession
|
||||
pytest ./path/to/test_file.py:test_function_name -sn0
|
||||
```
|
||||
|
||||
## Nix Eval Tests
|
||||
|
||||
### When to use nix eval tests
|
||||
|
||||
Nix eval tests are good for testing any nix logic, including
|
||||
|
||||
- nix functions
|
||||
- nix libraries
|
||||
- modules for the nixos module system
|
||||
|
||||
When not to use
|
||||
|
||||
- tests that require building nix derivations (except some very cheap ones)
|
||||
- tests that require running programs written in other languages
|
||||
- tests that require building or running nixos machines
|
||||
|
||||
### Finding examples of nix eval tests
|
||||
|
||||
Existing nix eval tests can be found via this ripgrep command:
|
||||
|
||||
```shellSession
|
||||
rg "nix-unit --eval-store"
|
||||
```
|
||||
|
||||
### Locating definitions of failing nix eval tests
|
||||
|
||||
Failing nix eval tests look like this:
|
||||
|
||||
```shellSession
|
||||
> ✅ test_attrsOf_attrsOf_submodule
|
||||
> ✅ test_attrsOf_submodule
|
||||
> ❌ test_default
|
||||
> /build/nix-8-2/expected.nix --- Nix
|
||||
> 1 { foo = { bar = { __prio = 1500; }; } 1 { foo = { bar = { __prio = 1501; }; }
|
||||
> . ; } . ; }
|
||||
>
|
||||
>
|
||||
> ✅ test_no_default
|
||||
> ✅ test_submodule
|
||||
> ✅ test_submoduleWith
|
||||
> ✅ test_submodule_with_merging
|
||||
>
|
||||
> 😢 6/7 successful
|
||||
> error: Tests failed
|
||||
```
|
||||
|
||||
To locate the definition, find the flake attribute name of the failing test near the top of the CI Job page, like for example `gitea:clan/clan-core#checks.x86_64-linux.lib-values-eval/1242`.
|
||||
|
||||
In this case `lib-values-eval` is the attribute we are looking for.
|
||||
|
||||
Find the attribute via ripgrep:
|
||||
|
||||
```shellSession
|
||||
$ rg "lib-values-eval ="
|
||||
lib/values/flake-module.nix
|
||||
21: lib-values-eval = pkgs.runCommand "tests" { nativeBuildInputs = [ pkgs.nix-unit ]; } ''
|
||||
grmpf@grmpf-nix ~/p/c/clan-core (test-docs)>
|
||||
```
|
||||
|
||||
In this case the test is defined in the file `lib/values/flake-module.nix` line 21
|
||||
|
||||
### Adding nix eval tests
|
||||
|
||||
In clan core, the following pattern is usually followed:
|
||||
|
||||
- tests are put in a `test.nix` file
|
||||
- a CI Job is exposed via a `flake-module.nix`
|
||||
- that `flake-module.nix` is imported via the `flake.nix` at the root of the project
|
||||
|
||||
For example see `/lib/values/{test.nix,flake-module.nix}`.
|
||||
|
||||
### Running nix eval tests
|
||||
|
||||
Since all nix eval tests are exposed via the flake outputs, they can be ran via `nix build`:
|
||||
|
||||
```shellSession
|
||||
nix build .#checks.x86_64-linux.{test-attr-name}
|
||||
```
|
||||
|
||||
For quicker iteration times, instead of `nix build` use the `nix-unit` command available in the dev environment.
|
||||
Example:
|
||||
|
||||
```shellSession
|
||||
nix-unit --flake .#legacyPackages.x86_64-linux.{test-attr-name}
|
||||
```
|
||||
|
||||
### Debugging nix eval tests
|
||||
|
||||
Follow the instructions above to find the definition of the test, then use one of the following techniques:
|
||||
|
||||
#### Print debugging
|
||||
|
||||
Add `lib.trace` or `lib.traceVal` statements in order to print some variables during evaluation
|
||||
|
||||
#### Nix repl
|
||||
|
||||
Use `nix repl` to evaluate to inspec the test.
|
||||
|
||||
Each test consists opf an `expr` (expression) and an `expected` field. `nix-unit` simply checks if `expr == expected` and prints the diff if that's not the case.
|
||||
|
||||
`nix repl` can be used to inspect `expr` manually, or any other variables that you choose to expose.
|
||||
|
||||
Example:
|
||||
|
||||
```shellSession
|
||||
$ nix repl
|
||||
Nix 2.25.5
|
||||
Type :? for help.
|
||||
nix-repl> tests = import ./lib/values/test.nix {}
|
||||
|
||||
nix-repl> tests
|
||||
{
|
||||
test_attrsOf_attrsOf_submodule = { ... };
|
||||
test_attrsOf_submodule = { ... };
|
||||
test_default = { ... };
|
||||
test_no_default = { ... };
|
||||
test_submodule = { ... };
|
||||
test_submoduleWith = { ... };
|
||||
test_submodule_with_merging = { ... };
|
||||
}
|
||||
|
||||
nix-repl> tests.test_default.expr
|
||||
{
|
||||
foo = { ... };
|
||||
}
|
||||
```
|
||||
@@ -143,3 +143,25 @@ Ensure the path to the public key is correct.
|
||||
```bash
|
||||
clan backups create mymachine
|
||||
```
|
||||
|
||||
- **Restoring Backups:** To restore a backup that has been listed by the list command (NAME):
|
||||
|
||||
```bash
|
||||
clan backups restore [MACHINE] [PROVIDER] [NAME]
|
||||
|
||||
```
|
||||
|
||||
Example (Restoring a machine called `client` with the backup provider `borgbackup`):
|
||||
|
||||
```bash
|
||||
clan backups restore client borgbackup [NAME]
|
||||
|
||||
```
|
||||
|
||||
The `backups` command is service aware and allows optional specification of the `--service` flag.
|
||||
|
||||
To only restore the service called `zerotier` on a machine called `controller` through the backup provider `borgbackup` use the following command:
|
||||
|
||||
```bash
|
||||
clan backups restore client borgbackup [NAME] --service zerotier
|
||||
```
|
||||
|
||||
28
docs/site/getting-started/check.md
Normal file
28
docs/site/getting-started/check.md
Normal file
@@ -0,0 +1,28 @@
|
||||
### Generate Facts and Vars
|
||||
|
||||
Typically, this step is handled automatically when a machine is deployed. However, to enable the use of `nix flake check` with your configuration, it must be completed manually beforehand.
|
||||
|
||||
Currently, generating all the necessary facts requires two separate commands. This is due to the coexistence of two parallel secret management solutions:
|
||||
the newer, recommended version (`clan vars`) and the older version (`clan facts`) that we are slowly phasing out.
|
||||
|
||||
To generate both facts and vars, execute the following commands:
|
||||
|
||||
```sh
|
||||
clan facts generate && clan vars generate
|
||||
```
|
||||
|
||||
|
||||
### Check Configuration
|
||||
|
||||
Validate your configuration by running:
|
||||
|
||||
```bash
|
||||
nix flake check
|
||||
```
|
||||
|
||||
This command helps ensure that your system configuration is correct and free from errors.
|
||||
|
||||
!!! Tip
|
||||
|
||||
You can integrate this step into your [Continuous Integration](https://en.wikipedia.org/wiki/Continuous_integration) workflow to ensure that only valid Nix configurations are merged into your codebase.
|
||||
|
||||
@@ -3,7 +3,6 @@ Managing machine configurations can be done in the following ways:
|
||||
|
||||
- writing `nix` expressions in a `flake.nix` file,
|
||||
- placing `autoincluded` files into your machine directory,
|
||||
- configuring everything in a simple UI (upcoming).
|
||||
|
||||
Clan currently offers the following methods to configure machines:
|
||||
|
||||
@@ -79,9 +78,14 @@ Adding or configuring a new machine requires two simple steps:
|
||||
└─nvme0n1p3 nvme-eui.e8238fa6bf530001001b448b4aec2929-part3 swap 16.8G
|
||||
```
|
||||
|
||||
1. Edit the following fields inside the `./machines/jon/configuration.nix` and/or `./machines/sara/configuration.nix`
|
||||
!!! Warning
|
||||
Make sure to copy the `ID-LINK` from toplevel disk device like `nvme0n1` or `sda` instead of `nvme0n1p1` or `sda1`
|
||||
|
||||
```nix title="./machines/<machine>/configuration.nix" hl_lines="13 18 23 27"
|
||||
|
||||
2. Edit the following fields inside the `./machines/jon/configuration.nix` and/or `./machines/sara/configuration.nix`
|
||||
|
||||
<!-- Note: Use "jon" instead of "<machine>" as "<" is not supported in title tag -->
|
||||
```nix title="./machines/jon/configuration.nix" hl_lines="13 18 22 26"
|
||||
{
|
||||
imports = [
|
||||
./hardware-configuration.nix
|
||||
@@ -94,16 +98,15 @@ Adding or configuring a new machine requires two simple steps:
|
||||
];
|
||||
|
||||
# Put your username here for login
|
||||
users.users.user.username = "__YOUR_USERNAME__";
|
||||
users.users.user.name = "__YOUR_USERNAME__";
|
||||
|
||||
# Set this for clan commands use ssh i.e. `clan machines update`
|
||||
# Set this for clan commands that use ssh
|
||||
# If you change the hostname, you need to update this line to root@<new-hostname>
|
||||
# This only works however if you have avahi running on your admin machine else use IP
|
||||
clan.core.networking.targetHost = "root@__IP__";
|
||||
|
||||
# You can get your disk id by running the following command on the installer:
|
||||
# Replace <IP> with the IP of the installer printed on the screen or by running the `ip addr` command.
|
||||
# ssh root@<IP> lsblk --output NAME,ID-LINK,FSTYPE,SIZE,MOUNTPOINT
|
||||
|
||||
# Replace this __CHANGE_ME__ with the result of the lsblk command from step 1.
|
||||
disko.devices.disk.main.device = "/dev/disk/by-id/__CHANGE_ME__";
|
||||
|
||||
# IMPORTANT! Add your SSH key here
|
||||
@@ -114,80 +117,32 @@ Adding or configuring a new machine requires two simple steps:
|
||||
}
|
||||
```
|
||||
|
||||
You can also create additional machines using the `clan machines create` command:
|
||||
|
||||
```
|
||||
$ clan machines create --help
|
||||
usage: clan [-h] [SUBCOMMAND] machines create [-h] [--tags TAGS [TAGS ...]] [--template-name TEMPLATE_NAME]
|
||||
[--target-host TARGET_HOST] [--debug] [--option name value] [--flake PATH]
|
||||
machine_name
|
||||
|
||||
positional arguments:
|
||||
machine_name The name of the machine to create
|
||||
|
||||
options:
|
||||
-h, --help show this help message and exit
|
||||
--tags TAGS [TAGS ...]
|
||||
Tags to associate with the machine. Can be used to assign multiple machines to services.
|
||||
--template-name TEMPLATE_NAME
|
||||
The name of the template machine to import
|
||||
--target-host TARGET_HOST
|
||||
Address of the machine to install and update, in the format of user@host:1234
|
||||
--debug Enable debug logging
|
||||
--option name value Nix option to set
|
||||
--flake PATH path to the flake where the clan resides in, can be a remote flake or local, can be set through
|
||||
the [CLAN_DIR] environment variable
|
||||
```
|
||||
|
||||
|
||||
!!! Info "Replace `__YOUR_USERNAME__` with the ip of your machine, if you use avahi you can also use your hostname"
|
||||
!!! Info "Replace `__IP__` with the ip of your machine, if you use avahi you can also use your hostname"
|
||||
!!! Info "Replace `__CHANGE_ME__` with the appropriate identifier, such as `nvme-eui.e8238fa6bf530001001b448b4aec2929`"
|
||||
!!! Info "Replace `__CHANGE_ME__` with the appropriate `ID-LINK` identifier, such as `nvme-eui.e8238fa6bf530001001b448b4aec2929`"
|
||||
!!! Info "Replace `__YOUR_SSH_KEY__` with your personal key, like `ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILoMI0NC5eT9pHlQExrvR5ASV3iW9+BXwhfchq0smXUJ jon@jon-desktop`"
|
||||
|
||||
These steps will allow you to update your machine later.
|
||||
|
||||
### Step 2: Detect Drivers
|
||||
You can also create additional machines using the cli:
|
||||
|
||||
Generate the `hardware-configuration.nix` file for your machine by executing the following command:
|
||||
```
|
||||
$ clan machines create <machinename>
|
||||
```
|
||||
|
||||
```bash
|
||||
clan machines update-hardware-config [MACHINE_NAME] [HOSTNAME]
|
||||
```
|
||||
|
||||
replace `[MACHINE_NAME]` with the name of the machine i.e. `jon` and `[HOSTNAME]` with the `ip_address` or `hostname` of the machine within the network. i.e. `<IP>`
|
||||
|
||||
!!! Example
|
||||
```bash
|
||||
clan machines update-hardware-config jon
|
||||
```
|
||||
|
||||
This command connects to the ip configured in the previous step, runs [nixos-facter](https://github.com/nix-community/nixos-facter)
|
||||
to detect hardware configurations (excluding filesystems), and writes them to `machines/jon/facter.json`.
|
||||
|
||||
### Step 3: Custom Disk Formatting
|
||||
### Step 2: Custom Disk Formatting
|
||||
|
||||
In `./modules/disko.nix`, a simple `ext4` disk partitioning scheme is defined for the Disko module. For more complex disk partitioning setups,
|
||||
refer to the [Disko templates](https://github.com/nix-community/disko-templates) or [Disko examples](https://github.com/nix-community/disko/tree/master/example).
|
||||
|
||||
### Step 4: Custom Configuration
|
||||
### (Optional): Renaming Machine
|
||||
|
||||
Modify `./machines/jon/configuration.nix` to personalize the system settings according to your requirements.
|
||||
If you wish to name your machine to something else, do the following steps:
|
||||
For renaming jon to your own machine name, you can use the following command:
|
||||
|
||||
```
|
||||
mv ./machines/jon/configuration.nix ./machines/newname/configuration.nix
|
||||
git mv ./machines/jon ./machines/newname
|
||||
```
|
||||
|
||||
Than rename `jon` to your preferred name in `machines` in `flake.nix` as well as the import line:
|
||||
|
||||
```diff
|
||||
- imports = [ ./machines/jon/configuration.nix ];
|
||||
+ imports = [ ./machines/__NEW_NAME__/configuration.nix ];
|
||||
```
|
||||
|
||||
!!! Info "Replace `__NEW_NAME__` with the name of the machine"
|
||||
|
||||
Note that our clan lives inside a git repository.
|
||||
Only files that have been added with `git add` are recognized by `nix`.
|
||||
So for every file that you add or rename you also need to run:
|
||||
@@ -196,14 +151,11 @@ So for every file that you add or rename you also need to run:
|
||||
git add ./path/to/my/file
|
||||
```
|
||||
|
||||
For renaming jon to your own machine name, you can use the following command:
|
||||
|
||||
```
|
||||
git mv ./machines/jon ./machines/newname
|
||||
```
|
||||
### (Optional): Removing a Machine
|
||||
|
||||
If you only want to setup a single machine at this point, you can delete `sara` from `flake.nix` as well as from the machines directory:
|
||||
|
||||
```
|
||||
git rm ./machines/sara
|
||||
git rm -rf ./machines/sara
|
||||
```
|
||||
|
||||
@@ -1,13 +1,7 @@
|
||||
# Deploy your Clan
|
||||
|
||||
Integrating a new machine into your Clan environment is an easy yet flexible process, allowing for a straight forward management of multiple NixOS configurations.
|
||||
Now that you have created a new machine, we will walk through how to install it.
|
||||
|
||||
We'll walk you through adding a new computer to your Clan.
|
||||
|
||||
## Installing a New Machine
|
||||
|
||||
Clan CLI, in conjunction with [nixos-anywhere](https://github.com/nix-community/nixos-anywhere), provides a seamless method for installing NixOS on various machines.
|
||||
|
||||
This process involves preparing a suitable hardware and disk partitioning configuration and ensuring the target machine is accessible via SSH.
|
||||
|
||||
### Step 0. Prerequisites
|
||||
|
||||
@@ -24,7 +18,7 @@ This process involves preparing a suitable hardware and disk partitioning config
|
||||
|
||||
2. Boot the target machine and connect it to a network that makes it reachable from your setup computer.
|
||||
|
||||
=== "**Remote Machines**"
|
||||
=== "**Cloud VMs**"
|
||||
|
||||
- [x] **Two Computers**: You need one computer that you're getting ready (we'll call this the Target Computer) and another one to set it up from (we'll call this the Setup Computer). Make sure both can talk to each other over the network using SSH.
|
||||
- [x] **Machine configuration**: See our basic [configuration guide](./configure.md)
|
||||
@@ -107,32 +101,27 @@ This process involves preparing a suitable hardware and disk partitioning config
|
||||
For easy sharing of deployment information via QR code, we highly recommend using [KDE Connect](https://apps.kde.org/de/kdeconnect/).
|
||||
|
||||
There are two ways to deploy your machine:
|
||||
=== "**Password Auth**"
|
||||
Run the following command to login over SSH with password authentication
|
||||
```bash
|
||||
clan machines install [MACHINE] --target-host <IP> --update-hardware-config nixos-facter
|
||||
```
|
||||
=== "**QR Code Auth**"
|
||||
Using the JSON contents of the QR Code:
|
||||
```terminal
|
||||
clan machines install [MACHINE] --json "[JSON]" --update-hardware-config nixos-facter
|
||||
```
|
||||
OR using a picture containing the QR code
|
||||
```terminal
|
||||
clan machines install [MACHINE] --png [PATH] --update-hardware-config nixos-facter
|
||||
```
|
||||
|
||||
1. **SSH with Password Authentication**
|
||||
Run the following command to install using SSH:
|
||||
```bash
|
||||
clan machines install [MACHINE] --target-host <IP>
|
||||
```
|
||||
|
||||
2. **Scanning a QR Code for Installation Details**
|
||||
You can input the information by following one of these methods:
|
||||
- **Using a JSON String or File Path:**
|
||||
Provide the path to a JSON string or input the string directly:
|
||||
```terminal
|
||||
clan machines install [MACHINE] --json [JSON]
|
||||
```
|
||||
- **Using an Image Containing the QR Code:**
|
||||
Provide the path to an image file containing the relevant QR code:
|
||||
```terminal
|
||||
clan machines install [MACHINE] --png [PATH]
|
||||
```
|
||||
|
||||
=== "**SSH access**"
|
||||
=== "**Cloud VM**"
|
||||
|
||||
Replace `<target_host>` with the **target computers' ip address**:
|
||||
|
||||
```bash
|
||||
clan machines install [MACHINE] --target-host <target_host>
|
||||
clan machines install [MACHINE] --target-host <target_host> --update-hardware-config nixos-facter
|
||||
```
|
||||
|
||||
|
||||
|
||||
@@ -49,7 +49,8 @@ Replace `kernelModules` with the ethernet module loaded one on your target machi
|
||||
port = 7172;
|
||||
authorizedKeys = [ "<yourkey>" ];
|
||||
hostKeys = [
|
||||
"/var/lib/initrd-ssh-key"
|
||||
"/var/lib/initrd_host_ed25519_key"
|
||||
"/var/lib/initrd_host_rsa_key"
|
||||
];
|
||||
};
|
||||
};
|
||||
@@ -73,7 +74,7 @@ Before starting the installation process, ensure that the SSH public key is copi
|
||||
ssh-copy-id -o PreferredAuthentications=password -o PubkeyAuthentication=no root@nixos-installer.local
|
||||
```
|
||||
|
||||
### Step 1.5: Prepare Secret Key and Clear Disk Data
|
||||
### Step 1.5: Prepare Secret Key and Partition Disks
|
||||
|
||||
1. Access the installer using SSH:
|
||||
|
||||
@@ -90,13 +91,13 @@ nano /tmp/secret.key
|
||||
3. Discard the old disk partition data:
|
||||
|
||||
```bash
|
||||
blkdiscard /dev/disk/by-id/nvme-eui.002538b931b59865
|
||||
blkdiscard /dev/disk/by-id/<installdisk>
|
||||
```
|
||||
|
||||
4. Run the `clan` machine installation with the following command:
|
||||
4. Run `clan` machines install, only running kexec and disko, with the following command:
|
||||
|
||||
```bash
|
||||
clan machines install gchq-local --target-host root@nixos-installer --yes --no-reboot
|
||||
clan machines install gchq-local --target-host root@nixos-installer --phases kexec,disko
|
||||
```
|
||||
|
||||
### Step 2: ZFS Pool Import and System Installation
|
||||
@@ -107,14 +108,10 @@ clan machines install gchq-local --target-host root@nixos-installer --yes --no-r
|
||||
ssh root@nixos-installer.local
|
||||
```
|
||||
|
||||
2. Perform the following commands on the remote installation environment:
|
||||
2. Run the following command on the remote installation environment:
|
||||
|
||||
```bash
|
||||
zpool import zroot
|
||||
zfs set keylocation=prompt zroot/root
|
||||
zfs load-key zroot/root
|
||||
zfs set mountpoint=/mnt zroot/root/nixos
|
||||
mount /dev/nvme0n1p2 /mnt/boot
|
||||
```
|
||||
|
||||
3. Disconnect from the SSH session:
|
||||
@@ -123,43 +120,36 @@ mount /dev/nvme0n1p2 /mnt/boot
|
||||
CTRL+D
|
||||
```
|
||||
|
||||
4. Securely copy your local `initrd_rsa_key` to the installer's `/mnt` directory:
|
||||
4. Locally generate ssh host keys. You only need to generate ones for the algorithms you're using in `authorizedKeys`.
|
||||
|
||||
```bash
|
||||
scp ~/.ssh/initrd_rsa_key root@nixos-installer.local:/mnt/var/lib/initrd-ssh-key
|
||||
ssh-keygen -q -N "" -t ed25519 -f ./initrd_host_ed25519_key
|
||||
ssh-keygen -q -N "" -t rsa -b 4096 -f ./initrd_host_rsa_key
|
||||
```
|
||||
|
||||
5. SSH back into the installer:
|
||||
5. Securely copy your local initrd ssh host keys to the installer's `/mnt` directory:
|
||||
|
||||
```bash
|
||||
ssh root@nixos-installer.local
|
||||
scp ./initrd_host* root@nixos-installer.local:/mnt/var/lib/
|
||||
```
|
||||
|
||||
6. Navigate to the `/mnt` directory, enter the `nixos-enter` environment, and then exit:
|
||||
|
||||
6. Install nixos to the mounted partitions
|
||||
```bash
|
||||
cd /mnt
|
||||
nixos-enter
|
||||
realpath /run/current-system
|
||||
exit
|
||||
clan machines install gchq-local --target-host root@nixos-installer --phases install
|
||||
```
|
||||
|
||||
7. Run the `nixos-install` command with the appropriate system path `<SYS_PATH>`:
|
||||
|
||||
```bash
|
||||
nixos-install --no-root-passwd --no-channel-copy --root /mnt --system <SYS_PATH>
|
||||
```
|
||||
|
||||
8. After the installation process, unmount `/mnt/boot`, change the ZFS mountpoint, and reboot the system:
|
||||
7. After the installation process, unmount `/mnt/boot`, change the ZFS mountpoints and unmount all the ZFS volumes by exporting the zpool:
|
||||
|
||||
```bash
|
||||
umount /mnt/boot
|
||||
cd /
|
||||
zfs set mountpoint=/ zroot/root/nixos
|
||||
reboot
|
||||
zfs set -u mountpoint=/ zroot/root/nixos
|
||||
zfs set -u mountpoint=/tmp zroot/root/tmp
|
||||
zfs set -u mountpoint=/home zroot/root/home
|
||||
zpool export zroot
|
||||
```
|
||||
|
||||
9. Perform a hard reboot of the machine and remove the USB stick.
|
||||
8. Perform a reboot of the machine and remove the USB installer.
|
||||
|
||||
### Step 3: Accessing the Initial Ramdisk (initrd) Environment
|
||||
|
||||
|
||||
@@ -42,7 +42,7 @@ By the end of this guide, you'll have a fresh NixOS configuration ready to push
|
||||
Add the Clan CLI into your development workflow:
|
||||
|
||||
```bash
|
||||
nix shell git+https://git.clan.lol/clan/clan-core#clan-cli
|
||||
nix shell git+https://git.clan.lol/clan/clan-core#clan-cli --refresh
|
||||
```
|
||||
|
||||
You can find reference documentation for the `clan` cli program [here](../reference/cli/index.md).
|
||||
@@ -92,6 +92,21 @@ This should yield the following:
|
||||
5 directories, 9 files
|
||||
```
|
||||
|
||||
??? info "Recommended way of sourcing the `clan` cli tool"
|
||||
The default template also adds the `clan` cli tool to the development shell.
|
||||
Meaning you can get the exact version you need directly from the folder
|
||||
you are in right now.
|
||||
|
||||
In the `my-clan` directory run the following command:
|
||||
```
|
||||
nix develop
|
||||
```
|
||||
That way you will have the tool available in the shell environment.
|
||||
We also recommend setting up [direnv](https://direnv.net/) for your shell, for a more convenient
|
||||
experience.
|
||||
|
||||
|
||||
|
||||
```bash
|
||||
clan machines list
|
||||
```
|
||||
|
||||
@@ -1,11 +1,16 @@
|
||||
# Create an Installer Image
|
||||
# Clan Installer Image for Physical Machines
|
||||
|
||||
Our installer image simplifies the process of performing remote installations.
|
||||
To install Clan on physical machines, you need to use our custom installer image. This is necessary for proper installation and operation.
|
||||
|
||||
Follow our step-by-step guide to create and transfer this image onto a bootable USB drive.
|
||||
!!! note "Using a Cloud VM?"
|
||||
If you're using a cloud provider's virtual machine (VM), you can skip this section and go directly to the [Configure Machines](configure.md) step. In this scenario, we automatically use [nixos-anywhere](https://github.com/nix-community/nixos-anywhere) to replace the kernel during runtime.
|
||||
|
||||
??? info "Why nixos-anywhere Doesn't Work on Physical Hardware?"
|
||||
nixos-anywhere relies on [kexec](https://wiki.archlinux.org/title/Kexec) to replace the running kernel with our custom one. This method often has compatibility issues with real hardware, especially systems with dedicated graphics cards like laptops and servers, leading to crashes and black screens.
|
||||
|
||||
??? info "Reasons for a Custom Install Image"
|
||||
Our custom install images are built to include essential tools like [nixos-facter](https://github.com/nix-community/nixos-facter) and support for [ZFS](https://wiki.archlinux.org/title/ZFS). They're also optimized to run on systems with as little as 1 GB of RAM, ensuring efficient performance even on lower-end hardware.
|
||||
|
||||
!!! info
|
||||
If you already have a NixOS machine you can ssh into (in the cloud for example) you can skip this chapter and go directly to [Configure Machines](configure.md).
|
||||
|
||||
### Step 0. Prerequisites
|
||||
|
||||
@@ -40,9 +45,9 @@ Follow our step-by-step guide to create and transfer this image onto a bootable
|
||||
sudo umount /dev/sdb1
|
||||
```
|
||||
=== "**Linux OS**"
|
||||
### Step 2. Flash Custom Installer
|
||||
### Step 2. Create a Custom Installer
|
||||
|
||||
Using clan flash enables the inclusion of ssh public keys and wifi access points.
|
||||
Using clan flash enables the inclusion of ssh public keys into the image.
|
||||
It also allows to set language and keymap in the installer image.
|
||||
|
||||
```bash
|
||||
|
||||
@@ -18,89 +18,128 @@ Clan
|
||||
If you select multiple network technologies at the same time. e.g. (zerotier + yggdrassil)
|
||||
You must choose one of them as primary network and the machines are always connected via the primary network.
|
||||
|
||||
## 1. Set-Up the VPN Controller
|
||||
|
||||
The VPN controller is initially essential for providing configuration to new
|
||||
peers. Once addresses are allocated, the controller's continuous operation is not essential.
|
||||
|
||||
1. **Designate a Machine**: Label a machine as the VPN controller in the clan,
|
||||
referred to as `<CONTROLLER>` henceforth in this guide.
|
||||
2. **Add Configuration**: Input the following configuration to the NixOS
|
||||
configuration of the controller machine:
|
||||
```nix
|
||||
clan.core.networking.zerotier.controller = {
|
||||
enable = true;
|
||||
public = true;
|
||||
};
|
||||
```
|
||||
3. **Update the Controller Machine**: Execute the following:
|
||||
```bash
|
||||
clan machines update <CONTROLLER>
|
||||
```
|
||||
Your machine is now operational as the VPN controller.
|
||||
|
||||
## 2. Add Machines to the VPN
|
||||
|
||||
To introduce a new machine to the VPN, adhere to the following steps:
|
||||
|
||||
1. **Update Configuration**: On the new machine, incorporate the following to its
|
||||
configuration, substituting `<CONTROLLER>` with the controller machine name:
|
||||
```nix
|
||||
{ config, ... }: {
|
||||
clan.core.networking.zerotier.networkId = builtins.readFile (config.clan.core.settings.directory + "/machines/<CONTROLLER>/facts/zerotier-network-id");
|
||||
}
|
||||
```
|
||||
1. **Update the New Machine**: Execute:
|
||||
```bash
|
||||
$ clan machines update <NEW_MACHINE>
|
||||
```
|
||||
Replace `<NEW_MACHINE>` with the designated new machine name.
|
||||
|
||||
!!! Note "For Private Networks"
|
||||
1. **Retrieve Zerotier Metadata**
|
||||
|
||||
=== "From the repo"
|
||||
**Retrieve the ZeroTier IP**: In the clan repo, execute:
|
||||
```console
|
||||
$ clan facts list <NEW_MACHINE> | jq -r '.["zerotier-ip"]'
|
||||
```
|
||||
|
||||
The returned address is the Zerotier IP address of the machine.
|
||||
|
||||
=== "On the new machine"
|
||||
**Retrieve the ZeroTier ID**: On the `new_machine`, execute:
|
||||
```bash
|
||||
$ sudo zerotier-cli info
|
||||
```
|
||||
Example Output:
|
||||
```{.console, .no-copy}
|
||||
200 info d2c71971db 1.12.1 OFFLINE
|
||||
```
|
||||
, where `d2c71971db` is the ZeroTier ID.
|
||||
This guide shows you how to configure `zerotier` either through `NixOS Options` directly, or Clan's `Inventory` System.
|
||||
|
||||
|
||||
2. **Authorize the New Machine on the Controller**: On the controller machine,
|
||||
execute:
|
||||
=== "**Inventory**"
|
||||
## 1. Choose the Controller
|
||||
|
||||
=== "with ZerotierIP"
|
||||
```bash
|
||||
$ sudo zerotier-members allow --member-ip <IP>
|
||||
```
|
||||
Substitute `<IP>` with the ZeroTier IP obtained previously.
|
||||
=== "with ZerotierID"
|
||||
```bash
|
||||
$ sudo zerotier-members allow <ID>
|
||||
```
|
||||
Substitute `<ID>` with the ZeroTier ID obtained previously.
|
||||
The controller is the initial entrypoint for new machines into the vpn.
|
||||
It will sign the id's of new machines.
|
||||
Once id's are signed, the controller's continuous operation is not essential.
|
||||
A good controller choice is nevertheless a machine that can always be reached for updates - so that new peers can be added to the network.
|
||||
|
||||
2. **Verify Connection**: On the `new_machine`, re-execute:
|
||||
```bash
|
||||
$ sudo zerotier-cli info
|
||||
```
|
||||
The status should now be "ONLINE":
|
||||
```{.console, .no-copy}
|
||||
200 info d2c71971db 1.12.1 ONLINE
|
||||
```
|
||||
For the purpose of this guide we have two machines:
|
||||
|
||||
- The `controller` machine, which will be the zerotier controller.
|
||||
- The `new_machine` machine, which is the machine we want to add to the vpn network.
|
||||
|
||||
## 2. Configure the Inventory
|
||||
```nix
|
||||
clan.inventory = {
|
||||
services.zerotier.default = {
|
||||
roles.controller.machines = [
|
||||
"controller"
|
||||
];
|
||||
roles.peer.machines = [
|
||||
"new_machine"
|
||||
];
|
||||
};
|
||||
};
|
||||
```
|
||||
|
||||
## 3. Apply the Configuration
|
||||
Update the `controller` machine:
|
||||
|
||||
```bash
|
||||
clan machines update controller
|
||||
```
|
||||
|
||||
|
||||
=== "**NixOS Options**"
|
||||
## 1. Set-Up the VPN Controller
|
||||
|
||||
The VPN controller is initially essential for providing configuration to new
|
||||
peers. Once addresses are allocated, the controller's continuous operation is not essential.
|
||||
|
||||
1. **Designate a Machine**: Label a machine as the VPN controller in the clan,
|
||||
referred to as `<CONTROLLER>` henceforth in this guide.
|
||||
2. **Add Configuration**: Input the following configuration to the NixOS
|
||||
configuration of the controller machine:
|
||||
```nix
|
||||
clan.core.networking.zerotier.controller = {
|
||||
enable = true;
|
||||
public = true;
|
||||
};
|
||||
```
|
||||
3. **Update the Controller Machine**: Execute the following:
|
||||
```bash
|
||||
clan machines update <CONTROLLER>
|
||||
```
|
||||
Your machine is now operational as the VPN controller.
|
||||
|
||||
## 2. Add Machines to the VPN
|
||||
|
||||
To introduce a new machine to the VPN, adhere to the following steps:
|
||||
|
||||
1. **Update Configuration**: On the new machine, incorporate the following to its
|
||||
configuration, substituting `<CONTROLLER>` with the controller machine name:
|
||||
```nix
|
||||
{ config, ... }: {
|
||||
clan.core.networking.zerotier.networkId = builtins.readFile (config.clan.core.settings.directory + "/machines/<CONTROLLER>/facts/zerotier-network-id");
|
||||
}
|
||||
```
|
||||
1. **Update the New Machine**: Execute:
|
||||
```bash
|
||||
$ clan machines update <NEW_MACHINE>
|
||||
```
|
||||
Replace `<NEW_MACHINE>` with the designated new machine name.
|
||||
|
||||
!!! Note "For Private Networks"
|
||||
1. **Retrieve Zerotier Metadata**
|
||||
|
||||
=== "From the repo"
|
||||
**Retrieve the ZeroTier IP**: In the clan repo, execute:
|
||||
```console
|
||||
$ clan facts list <NEW_MACHINE> | jq -r '.["zerotier-ip"]'
|
||||
```
|
||||
|
||||
The returned address is the Zerotier IP address of the machine.
|
||||
|
||||
=== "On the new machine"
|
||||
**Retrieve the ZeroTier ID**: On the `new_machine`, execute:
|
||||
```bash
|
||||
$ sudo zerotier-cli info
|
||||
```
|
||||
Example Output:
|
||||
```{.console, .no-copy}
|
||||
200 info d2c71971db 1.12.1 OFFLINE
|
||||
```
|
||||
, where `d2c71971db` is the ZeroTier ID.
|
||||
|
||||
|
||||
2. **Authorize the New Machine on the Controller**: On the controller machine,
|
||||
execute:
|
||||
|
||||
=== "with ZerotierIP"
|
||||
```bash
|
||||
$ sudo zerotier-members allow --member-ip <IP>
|
||||
```
|
||||
Substitute `<IP>` with the ZeroTier IP obtained previously.
|
||||
=== "with ZerotierID"
|
||||
```bash
|
||||
$ sudo zerotier-members allow <ID>
|
||||
```
|
||||
Substitute `<ID>` with the ZeroTier ID obtained previously.
|
||||
|
||||
2. **Verify Connection**: On the `new_machine`, re-execute:
|
||||
```bash
|
||||
$ sudo zerotier-cli info
|
||||
```
|
||||
The status should now be "ONLINE":
|
||||
```{.console, .no-copy}
|
||||
200 info d2c71971db 1.12.1 ONLINE
|
||||
```
|
||||
|
||||
!!! success "Congratulations!"
|
||||
The new machine is now part of the VPN, and the ZeroTier
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
|
||||
Clan enables encryption of secrets (such as passwords & keys) ensuring security and ease-of-use among users.
|
||||
|
||||
Clan utilizes the [sops](https://github.com/getsops/sops) format and integrates with [sops-nix](https://github.com/Mic92/sops-nix) on NixOS machines.
|
||||
By default Clan utilizes the [sops](https://github.com/getsops/sops) format and integrates with [sops-nix](https://github.com/Mic92/sops-nix) on NixOS machines.
|
||||
Clan can also be configured to be used with other secret store [backends](https://docs.clan.lol/reference/clan-core/vars/#clan.core.vars.settings.secretStore).
|
||||
|
||||
This guide will walk you through:
|
||||
|
||||
@@ -39,7 +40,7 @@ Also add your age public key to the repository with 'clan secrets users add YOUR
|
||||
### Add Your Public Key
|
||||
|
||||
```bash
|
||||
clan secrets users add $USER <your_public_key>
|
||||
clan secrets users add $USER --age-key <your_public_key>
|
||||
```
|
||||
|
||||
It's best to choose the same username as on your Setup/Admin Machine that you use to control the deployment with.
|
||||
@@ -53,33 +54,3 @@ sops/
|
||||
└── key.json
|
||||
```
|
||||
If you followed the quickstart tutorial all necessary secrets are initialized at this point.
|
||||
|
||||
|
||||
|
||||
### Generate Facts and Vars
|
||||
|
||||
Typically, this step is handled automatically when a machine is deployed. However, to enable the use of `nix flake check` with your configuration, it must be completed manually beforehand.
|
||||
|
||||
Currently, generating all the necessary facts requires two separate commands. This is due to the coexistence of two parallel secret management solutions: the older, stable version (`clan secrets` and `clan facts`) and the newer, experimental version (`clan vars`).
|
||||
|
||||
To generate both facts and vars, execute the following commands:
|
||||
|
||||
```sh
|
||||
clan facts generate && clan vars generate
|
||||
```
|
||||
|
||||
|
||||
### Check Configuration
|
||||
|
||||
Validate your configuration by running:
|
||||
|
||||
```bash
|
||||
nix flake check
|
||||
```
|
||||
|
||||
This command helps ensure that your system configuration is correct and free from errors.
|
||||
|
||||
!!! Tip
|
||||
|
||||
You can integrate this step into your [Continuous Integration](https://en.wikipedia.org/wiki/Continuous_integration) workflow to ensure that only valid Nix configurations are merged into your codebase.
|
||||
|
||||
|
||||
@@ -61,9 +61,9 @@ hide:
|
||||
|
||||
---
|
||||
|
||||
Use clan with [https://flake-parts.dev]()
|
||||
Use clan with [https://flake.parts/]()
|
||||
|
||||
- [Contribute](./manual/contribute.md)
|
||||
- [Contribute](./contributing/contribute.md)
|
||||
|
||||
---
|
||||
|
||||
@@ -73,7 +73,7 @@ hide:
|
||||
|
||||
## API Reference
|
||||
|
||||
**Auto generated API Documentation**
|
||||
**Reference API Documentation**
|
||||
|
||||
<div class="grid cards" markdown>
|
||||
|
||||
|
||||
@@ -81,7 +81,7 @@ For the provide flake example, your flake should now look like this:
|
||||
outputs = { self, nixpkgs, ... }:
|
||||
let
|
||||
clan = clan-core.lib.buildClan {
|
||||
directory = self; # this needs to point at the repository root
|
||||
self = self; # this needs to point at the repository root
|
||||
specialArgs = {};
|
||||
inventory.meta.name = "NEEDS_TO_BE_UNIQUE"; # TODO: Changeme
|
||||
|
||||
|
||||
@@ -18,8 +18,3 @@ $ tree -L 1
|
||||
├── templates # Template files for creating a new Clan
|
||||
└── vars
|
||||
```
|
||||
|
||||
## Getting Started with Infrastructure
|
||||
|
||||
To dive into infrastructure, check out our clan infra repo: [clan-infra](https://git.clan.lol/clan/clan-infra). Please provide us with your public SOPS key so we can add you as an admin.
|
||||
|
||||
|
||||
82
flake.lock
generated
82
flake.lock
generated
@@ -1,5 +1,34 @@
|
||||
{
|
||||
"nodes": {
|
||||
"data-mesher": {
|
||||
"inputs": {
|
||||
"flake-parts": [
|
||||
"flake-parts"
|
||||
],
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
],
|
||||
"systems": [
|
||||
"systems"
|
||||
],
|
||||
"treefmt-nix": [
|
||||
"treefmt-nix"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1743379277,
|
||||
"narHash": "sha256-4BNv+I6hksqZeRCrEHcQygK0MV1acjA8+L2TtA11H3c=",
|
||||
"ref": "refs/heads/main",
|
||||
"rev": "bf8c5448d826e047b842d6f2ac0fc698e976dda5",
|
||||
"revCount": 375,
|
||||
"type": "git",
|
||||
"url": "https://git.clan.lol/clan/data-mesher"
|
||||
},
|
||||
"original": {
|
||||
"type": "git",
|
||||
"url": "https://git.clan.lol/clan/data-mesher"
|
||||
}
|
||||
},
|
||||
"disko": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
@@ -7,11 +36,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1738148035,
|
||||
"narHash": "sha256-KYOATYEwaKysL3HdHdS5kbQMXvzS4iPJzJrML+3TKAo=",
|
||||
"lastModified": 1741786315,
|
||||
"narHash": "sha256-VT65AE2syHVj6v/DGB496bqBnu1PXrrzwlw07/Zpllc=",
|
||||
"owner": "nix-community",
|
||||
"repo": "disko",
|
||||
"rev": "18d0a984cc2bc82cf61df19523a34ad463aa7f54",
|
||||
"rev": "0d8c6ad4a43906d14abd5c60e0ffe7b587b213de",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -27,11 +56,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1738453229,
|
||||
"narHash": "sha256-7H9XgNiGLKN1G1CgRh0vUL4AheZSYzPm+zmZ7vxbJdo=",
|
||||
"lastModified": 1741352980,
|
||||
"narHash": "sha256-+u2UunDA4Cl5Fci3m7S643HzKmIDAe+fiXrLqYsR2fs=",
|
||||
"owner": "hercules-ci",
|
||||
"repo": "flake-parts",
|
||||
"rev": "32ea77a06711b758da0ad9bd6a844c5740a87abd",
|
||||
"rev": "f4330d22f1c5d2ba72d3d22df5597d123fdb60a9",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -42,11 +71,11 @@
|
||||
},
|
||||
"nixos-facter-modules": {
|
||||
"locked": {
|
||||
"lastModified": 1736931726,
|
||||
"narHash": "sha256-aY55yiifyo1XPPpbpH0kWlV1g2dNGBlx6622b7OK8ks=",
|
||||
"lastModified": 1738752252,
|
||||
"narHash": "sha256-/nA3tDdp/2g0FBy8966ppC2WDoyXtUWaHkZWL+N3ZKc=",
|
||||
"owner": "numtide",
|
||||
"repo": "nixos-facter-modules",
|
||||
"rev": "fa11d87b61b2163efbb9aed7b7a5ae0299e5ab9c",
|
||||
"rev": "60f8b8f3f99667de6a493a44375e5506bf0c48b1",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -57,22 +86,20 @@
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1738422722,
|
||||
"narHash": "sha256-Q4vhtbLYWBUnjWD4iQb003Lt+N5PuURDad1BngGKdUs=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "102a39bfee444533e6b4e8611d7e92aa39b7bec1",
|
||||
"type": "github"
|
||||
"lastModified": 315532800,
|
||||
"narHash": "sha256-Ls4VPCGSQrm6k3FCokyonfX/sgIdZc8f5ZzqEdukBFA=",
|
||||
"rev": "eb0e0f21f15c559d2ac7633dc81d079d1caf5f5f",
|
||||
"type": "tarball",
|
||||
"url": "https://releases.nixos.org/nixpkgs/nixpkgs-25.05pre776128.eb0e0f21f15c/nixexprs.tar.xz"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixpkgs-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
"type": "tarball",
|
||||
"url": "https://nixos.org/channels/nixpkgs-unstable/nixexprs.tar.xz"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"data-mesher": "data-mesher",
|
||||
"disko": "disko",
|
||||
"flake-parts": "flake-parts",
|
||||
"nixos-facter-modules": "nixos-facter-modules",
|
||||
@@ -89,16 +116,15 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1736953253,
|
||||
"narHash": "sha256-shJxzy7qypjq9hpETQ3gJsBZXO5E3KR0INca/xwiVp4=",
|
||||
"owner": "pinpox",
|
||||
"lastModified": 1743305778,
|
||||
"narHash": "sha256-Ux/UohNtnM5mn9SFjaHp6IZe2aAnUCzklMluNtV6zFo=",
|
||||
"owner": "Mic92",
|
||||
"repo": "sops-nix",
|
||||
"rev": "a7c6e64401b6dde13c0de90230cb64087c9d9693",
|
||||
"rev": "8e873886bbfc32163fe027b8676c75637b7da114",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "pinpox",
|
||||
"ref": "lazy-assertions",
|
||||
"owner": "Mic92",
|
||||
"repo": "sops-nix",
|
||||
"type": "github"
|
||||
}
|
||||
@@ -125,11 +151,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1738070913,
|
||||
"narHash": "sha256-j6jC12vCFsTGDmY2u1H12lMr62fnclNjuCtAdF1a4Nk=",
|
||||
"lastModified": 1743081648,
|
||||
"narHash": "sha256-WRAylyYptt6OX5eCEBWyTwOEqEtD6zt33rlUkr6u3cE=",
|
||||
"owner": "numtide",
|
||||
"repo": "treefmt-nix",
|
||||
"rev": "bebf27d00f7d10ba75332a0541ac43676985dea3",
|
||||
"rev": "29a3d7b768c70addce17af0869f6e2bd8f5be4b7",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
||||
19
flake.nix
19
flake.nix
@@ -2,7 +2,7 @@
|
||||
description = "clan.lol base operating system";
|
||||
|
||||
inputs = {
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
|
||||
nixpkgs.url = "https://nixos.org/channels/nixpkgs-unstable/nixexprs.tar.xz";
|
||||
|
||||
flake-parts.url = "github:hercules-ci/flake-parts";
|
||||
flake-parts.inputs.nixpkgs-lib.follows = "nixpkgs";
|
||||
@@ -12,13 +12,23 @@
|
||||
|
||||
nixos-facter-modules.url = "github:numtide/nixos-facter-modules";
|
||||
|
||||
sops-nix.url = "github:pinpox/sops-nix/lazy-assertions";
|
||||
sops-nix.url = "github:Mic92/sops-nix";
|
||||
sops-nix.inputs.nixpkgs.follows = "nixpkgs";
|
||||
|
||||
systems.url = "github:nix-systems/default";
|
||||
|
||||
treefmt-nix.url = "github:numtide/treefmt-nix";
|
||||
treefmt-nix.inputs.nixpkgs.follows = "nixpkgs";
|
||||
|
||||
data-mesher = {
|
||||
url = "git+https://git.clan.lol/clan/data-mesher";
|
||||
inputs = {
|
||||
flake-parts.follows = "flake-parts";
|
||||
nixpkgs.follows = "nixpkgs";
|
||||
systems.follows = "systems";
|
||||
treefmt-nix.follows = "treefmt-nix";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
outputs =
|
||||
@@ -43,10 +53,6 @@
|
||||
meta.name = "clan-core";
|
||||
};
|
||||
|
||||
flake = {
|
||||
clan.templates = import ./templates { };
|
||||
};
|
||||
|
||||
systems = import systems;
|
||||
imports =
|
||||
# only importing existing paths allows to minimize the flake for test
|
||||
@@ -57,6 +63,7 @@
|
||||
./devShell.nix
|
||||
./docs/nix/flake-module.nix
|
||||
./flakeModules/flake-module.nix
|
||||
./flakeModules/demo_iso.nix
|
||||
./lib/filter-clan-core/flake-module.nix
|
||||
./lib/flake-module.nix
|
||||
./nixosModules/clanCore/vars/flake-module.nix
|
||||
|
||||
@@ -27,9 +27,13 @@ in
|
||||
};
|
||||
|
||||
options.flake = flake-parts-lib.mkSubmoduleOptions {
|
||||
clan = lib.mkOption { type = types.raw; };
|
||||
clanInternals = lib.mkOption { type = types.raw; };
|
||||
};
|
||||
config = {
|
||||
flake.clan = {
|
||||
inherit (config.clan.clanInternals) templates;
|
||||
};
|
||||
flake.clanInternals = config.clan.clanInternals;
|
||||
flake.nixosConfigurations = config.clan.nixosConfigurations;
|
||||
};
|
||||
|
||||
101
flakeModules/demo_iso.nix
Normal file
101
flakeModules/demo_iso.nix
Normal file
@@ -0,0 +1,101 @@
|
||||
{ self, ... }:
|
||||
|
||||
let
|
||||
pkgs = self.inputs.nixpkgs.legacyPackages.x86_64-linux;
|
||||
|
||||
demoModule = {
|
||||
imports = [
|
||||
"${self.clanModules.mycelium}/roles/peer.nix"
|
||||
# TODO do we need this? maybe not
|
||||
(
|
||||
{ modulesPath, ... }:
|
||||
{
|
||||
imports = [ "${modulesPath}/installer/cd-dvd/iso-image.nix" ];
|
||||
}
|
||||
)
|
||||
];
|
||||
};
|
||||
|
||||
clan_welcome = pkgs.writeShellApplication {
|
||||
name = "clan_welcome";
|
||||
runtimeInputs = [
|
||||
pkgs.gum
|
||||
pkgs.gitMinimal
|
||||
pkgs.retry
|
||||
self.packages.${pkgs.system}.clan-cli
|
||||
];
|
||||
text = ''
|
||||
set -efu
|
||||
|
||||
gum confirm '
|
||||
Welcome to Clan, a NixOS-based operating system for the CLAN project.
|
||||
This installer can be used to try out clan on your machine, for that reason we setup a cooperative environment to play and hack together :)
|
||||
' || exit 1
|
||||
until retry -t 5 ping -c 1 -W 1 git.clan.lol &> /dev/null; do
|
||||
# TODO make this nicer
|
||||
nmtui
|
||||
done
|
||||
if ! test -e ~/clan-core; then
|
||||
# git clone https://git.clan.lol/clan/clan-core.git ~/clan-core
|
||||
cp -rv ${self} clan-core
|
||||
fi
|
||||
cd clan-core
|
||||
clan machines morph demo-template --i-will-be-fired-for-using-this
|
||||
exit
|
||||
'';
|
||||
};
|
||||
|
||||
morphModule = {
|
||||
imports = [
|
||||
(
|
||||
{ modulesPath, ... }:
|
||||
{
|
||||
imports = [ "${modulesPath}/image/images.nix" ];
|
||||
}
|
||||
)
|
||||
];
|
||||
image.modules.iso.isoImage.squashfsCompression = "zstd -Xcompression-level 1";
|
||||
networking.networkmanager.enable = true;
|
||||
services.getty.autologinUser = "root";
|
||||
programs.bash.interactiveShellInit = ''
|
||||
if [[ "$(tty)" =~ /dev/(tty1|hvc0|ttyS0)$ ]]; then
|
||||
# workaround for https://github.com/NixOS/nixpkgs/issues/219239
|
||||
systemctl restart systemd-vconsole-setup.service
|
||||
|
||||
reset
|
||||
|
||||
${clan_welcome}/bin/clan_welcome
|
||||
fi
|
||||
'';
|
||||
};
|
||||
in
|
||||
{
|
||||
clan.templates.machine.demo-template = {
|
||||
description = "Demo machine for the CLAN project";
|
||||
# path = pkgs.runCommand "demo-template" {} ''
|
||||
# mkdir -p $out
|
||||
# echo '{ self, ... }: { imports = [ self.nixosModules.demoModule ]; }' > $out/configuration.nix
|
||||
# '';
|
||||
path = ./demo_template;
|
||||
};
|
||||
flake.nixosModules = { inherit morphModule demoModule; };
|
||||
perSystem =
|
||||
{ system, lib, ... }:
|
||||
{
|
||||
packages =
|
||||
lib.mkIf
|
||||
(lib.any (x: x == system) [
|
||||
"x86_64-linux"
|
||||
"aarch64-linux"
|
||||
])
|
||||
{
|
||||
demo-iso =
|
||||
(self.inputs.nixpkgs.lib.nixosSystem {
|
||||
modules = [
|
||||
{ nixpkgs.hostPlatform = system; }
|
||||
morphModule
|
||||
];
|
||||
}).config.system.build.images.iso;
|
||||
};
|
||||
};
|
||||
}
|
||||
38
flakeModules/demo_template/configuration.nix
Normal file
38
flakeModules/demo_template/configuration.nix
Normal file
@@ -0,0 +1,38 @@
|
||||
{ pkgs, config, ... }:
|
||||
{
|
||||
fileSystems."/".device = "nodev";
|
||||
boot.loader.grub.device = "nodev";
|
||||
clan.core.vars.settings.secretStore = "fs";
|
||||
clan.core.vars.generators.mycelium = {
|
||||
files."key" = { };
|
||||
files."ip".secret = false;
|
||||
files."pubkey".secret = false;
|
||||
runtimeInputs = [
|
||||
pkgs.mycelium
|
||||
pkgs.coreutils
|
||||
pkgs.jq
|
||||
];
|
||||
script = ''
|
||||
timeout 5 mycelium --key-file "$out"/key || :
|
||||
mycelium inspect --key-file "$out"/key --json | jq -r .publicKey > "$out"/pubkey
|
||||
mycelium inspect --key-file "$out"/key --json | jq -r .address > "$out"/ip
|
||||
'';
|
||||
};
|
||||
services.mycelium = {
|
||||
enable = true;
|
||||
addHostedPublicNodes = true;
|
||||
openFirewall = true;
|
||||
keyFile = config.clan.core.vars.generators.mycelium.files.key.path;
|
||||
};
|
||||
services.getty.autologinUser = "root";
|
||||
programs.bash.interactiveShellInit = ''
|
||||
if [[ "$(tty)" =~ /dev/(tty1|hvc0|ttyS0)$ ]]; then
|
||||
# workaround for https://github.com/NixOS/nixpkgs/issues/219239
|
||||
systemctl restart systemd-vconsole-setup.service
|
||||
|
||||
reset
|
||||
|
||||
your mycelium IP is: $(cat /var/lib/mycelium/ip)
|
||||
fi
|
||||
'';
|
||||
}
|
||||
@@ -23,6 +23,7 @@
|
||||
"*.clan-flake"
|
||||
"*.code-workspace"
|
||||
"*.pub"
|
||||
"*.priv"
|
||||
"*.typed"
|
||||
"*.age"
|
||||
"*.list"
|
||||
@@ -37,6 +38,7 @@
|
||||
# prettier messes up our mkdocs flavoured markdown
|
||||
"*.md"
|
||||
|
||||
"checks/data-mesher/vars/*"
|
||||
"checks/lib/ssh/privkey"
|
||||
"checks/lib/ssh/pubkey"
|
||||
"checks/matrix-synapse/synapse-registration_shared_secret"
|
||||
|
||||
@@ -56,7 +56,7 @@
|
||||
"machines": {
|
||||
"test-inventory-machine": {
|
||||
"config": {
|
||||
"packages": ["zed-editor"]
|
||||
"packages": ["hello"]
|
||||
},
|
||||
"extraModules": []
|
||||
}
|
||||
|
||||
72
lib/README.md
Normal file
72
lib/README.md
Normal file
@@ -0,0 +1,72 @@
|
||||
# ClanLib
|
||||
|
||||
This folder is supposed to contain clan specific nix functions.
|
||||
|
||||
Such as:
|
||||
|
||||
- build-clan function
|
||||
- select
|
||||
- build-inventory function
|
||||
- json-schema-converter
|
||||
|
||||
## Structure
|
||||
|
||||
Similar to `nixpkgs/lib` this produces a recursive attribute set in a fixed-point.
|
||||
Functions within lib can depend on each other to create new abstractions.
|
||||
|
||||
### Conventions
|
||||
|
||||
Note: This is not consistently enforced yet.
|
||||
If you start a new feature, or refactoring/touching existing ones, please help us to move towards the below illustrated.
|
||||
|
||||
A single feature-set/module may be organized like this:
|
||||
|
||||
```nix
|
||||
# ↓ The final clanLib
|
||||
{lib, clanLib, ...}:
|
||||
# ↓ portion to add to clanLib
|
||||
{
|
||||
inventory.resolveTags = tags: inventory.machines; # implementation
|
||||
inventory.buildMachines = x: clanLib.inventory.resolveTags x; # implementation
|
||||
}
|
||||
```
|
||||
|
||||
Every bigger feature should live in a subfolder with the feature name.
|
||||
It should contain two files:
|
||||
|
||||
- `impl.nix`
|
||||
- `test.nix`
|
||||
- Everything else may be adopted as needed.
|
||||
|
||||
```
|
||||
Example filetree
|
||||
```
|
||||
```sh
|
||||
.
|
||||
├── default.nix
|
||||
├── feature_foo
|
||||
│ ├── impl.nix
|
||||
│ └── test.nix
|
||||
└── feature_bar
|
||||
├── impl.nix
|
||||
├── complex-subfeature
|
||||
│ ├── impl.nix
|
||||
│ └── test.nix
|
||||
├── testless-subfeature # <- We immediately see that this feature is not tested on itself.
|
||||
│ └── impl.nix
|
||||
└── test.nix
|
||||
```
|
||||
|
||||
```nix
|
||||
# default.nix
|
||||
{lib, clanLib, ...}:
|
||||
{
|
||||
inventory.resolveTags = import ./resolveTags { inherit lib clanLib; };
|
||||
}
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
For testing we use [nix-unit](https://github.com/nix-community/nix-unit)
|
||||
|
||||
TODO: define a helper that automatically hooks up `tests` in `flake.legacyPackages` and a corresponding buildable `checks` attribute
|
||||
@@ -1,23 +0,0 @@
|
||||
{
|
||||
lib,
|
||||
self,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
# Returns an attrset with inputs that have the attribute `clanModules`
|
||||
inputsWithClanModules = lib.filterAttrs (
|
||||
_name: value: builtins.hasAttr "clanModules" value
|
||||
) self.inputs;
|
||||
|
||||
flattenedClanModules = lib.foldl' (
|
||||
acc: input:
|
||||
lib.mkMerge [
|
||||
acc
|
||||
input.clanModules
|
||||
]
|
||||
) { } (lib.attrValues inputsWithClanModules);
|
||||
in
|
||||
{
|
||||
inventory.modules = flattenedClanModules;
|
||||
}
|
||||
@@ -43,10 +43,7 @@ in
|
||||
include = [
|
||||
"flakeModules"
|
||||
"inventory.json"
|
||||
"lib/build-clan"
|
||||
"lib/default.nix"
|
||||
"lib/flake-module.nix"
|
||||
"lib/inventory"
|
||||
"lib"
|
||||
"machines"
|
||||
"nixosModules"
|
||||
];
|
||||
|
||||
@@ -69,6 +69,15 @@ in
|
||||
```
|
||||
'';
|
||||
};
|
||||
|
||||
templates = lib.mkOption {
|
||||
type = types.submodule { imports = [ ./templates/interface.nix ]; };
|
||||
default = { };
|
||||
description = ''
|
||||
Define Clan templates.
|
||||
'';
|
||||
};
|
||||
|
||||
inventory = lib.mkOption {
|
||||
type = types.submodule { imports = [ ../inventory/build-inventory/interface.nix ]; };
|
||||
description = ''
|
||||
@@ -112,11 +121,11 @@ in
|
||||
type = types.lazyAttrsOf types.raw;
|
||||
default = { };
|
||||
};
|
||||
|
||||
# flake.clanInternals
|
||||
clanInternals = lib.mkOption {
|
||||
# Hide from documentation. Exposes internals to the cli.
|
||||
visible = false;
|
||||
# type = types.raw;
|
||||
# ClanInternals
|
||||
type = types.submodule {
|
||||
options = {
|
||||
@@ -132,12 +141,14 @@ in
|
||||
moduleSchemas = lib.mkOption { type = lib.types.raw; };
|
||||
inventoryFile = lib.mkOption { type = lib.types.raw; };
|
||||
# The machine 'imports' generated by the inventory per machine
|
||||
serviceConfigs = lib.mkOption { type = lib.types.raw; };
|
||||
inventoryClass = lib.mkOption { type = lib.types.raw; };
|
||||
# new attribute
|
||||
distributedServices = lib.mkOption { type = lib.types.raw; };
|
||||
# clan-core's modules
|
||||
clanModules = lib.mkOption { type = lib.types.raw; };
|
||||
source = lib.mkOption { type = lib.types.raw; };
|
||||
meta = lib.mkOption { type = lib.types.raw; };
|
||||
lib = lib.mkOption { type = lib.types.raw; };
|
||||
clanLib = lib.mkOption { type = lib.types.raw; };
|
||||
all-machines-json = lib.mkOption { type = lib.types.raw; };
|
||||
machines = lib.mkOption { type = lib.types.raw; };
|
||||
machinesFunc = lib.mkOption { type = lib.types.raw; };
|
||||
|
||||
@@ -42,7 +42,7 @@ let
|
||||
|
||||
# map from machine name to service configuration
|
||||
# { ${machineName} :: Config }
|
||||
serviceConfigs = (
|
||||
inventoryClass = (
|
||||
buildInventory {
|
||||
inherit inventory directory;
|
||||
}
|
||||
@@ -76,7 +76,10 @@ let
|
||||
(machines.${name} or { })
|
||||
# Inherit the inventory assertions ?
|
||||
# { inherit (mergedInventory) assertions; }
|
||||
{ imports = serviceConfigs.machines.${name}.machineImports or [ ]; }
|
||||
{ imports = inventoryClass.machines.${name}.machineImports or [ ]; }
|
||||
|
||||
# Import the distribute services
|
||||
{ imports = config.clanInternals.distributedServices.allMachines.${name} or [ ]; }
|
||||
(
|
||||
{
|
||||
# Settings
|
||||
@@ -96,12 +99,6 @@ let
|
||||
|
||||
networking.hostName = lib.mkDefault name;
|
||||
|
||||
# speeds up nix commands by using the nixpkgs from the host system (especially useful in VMs)
|
||||
nix.registry.nixpkgs.to = lib.mkDefault {
|
||||
type = "path";
|
||||
path = lib.mkDefault nixpkgs;
|
||||
};
|
||||
|
||||
# For vars we need to override the system so we run vars
|
||||
# generators on the machine that runs `clan vars generate`. If a
|
||||
# users is using the `pkgsForSystem`, we don't set
|
||||
@@ -171,7 +168,6 @@ let
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
./auto-imports.nix
|
||||
# Merge the inventory file
|
||||
{
|
||||
inventory = _: {
|
||||
@@ -204,8 +200,12 @@ in
|
||||
|
||||
clanInternals = {
|
||||
moduleSchemas = clan-core.lib.modules.getModulesSchema config.inventory.modules;
|
||||
inherit serviceConfigs;
|
||||
inherit (clan-core) clanModules;
|
||||
inherit inventoryClass;
|
||||
distributedServices = import ../distributed-service/inventory-adapter.nix {
|
||||
inherit lib inventory;
|
||||
flake = config.self;
|
||||
};
|
||||
inherit (clan-core) clanModules clanLib;
|
||||
inherit inventoryFile;
|
||||
inventoryValuesPrios =
|
||||
# Temporary workaround
|
||||
@@ -217,9 +217,6 @@ in
|
||||
templates = config.templates;
|
||||
inventory = config.inventory;
|
||||
meta = config.inventory.meta;
|
||||
lib = {
|
||||
inherit (clan-core.lib) select;
|
||||
};
|
||||
|
||||
source = "${clan-core}";
|
||||
|
||||
|
||||
57
lib/build-clan/templates/interface.nix
Normal file
57
lib/build-clan/templates/interface.nix
Normal file
@@ -0,0 +1,57 @@
|
||||
{
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
inherit (lib) types;
|
||||
|
||||
templateType = types.submodule (
|
||||
{ name, ... }:
|
||||
{
|
||||
options.description = lib.mkOption {
|
||||
type = types.str;
|
||||
default = name;
|
||||
description = ''
|
||||
The name of the template.
|
||||
'';
|
||||
};
|
||||
|
||||
options.path = lib.mkOption {
|
||||
type = types.path;
|
||||
description = ''
|
||||
Holds the path to the clan template.
|
||||
'';
|
||||
};
|
||||
}
|
||||
);
|
||||
in
|
||||
{
|
||||
options = {
|
||||
# clan.templates.clan
|
||||
clan = lib.mkOption {
|
||||
type = types.attrsOf templateType;
|
||||
default = { };
|
||||
description = ''
|
||||
Holds the different clan templates.
|
||||
'';
|
||||
};
|
||||
|
||||
# clan.templates.disko
|
||||
disko = lib.mkOption {
|
||||
type = types.attrsOf templateType;
|
||||
default = { };
|
||||
description = ''
|
||||
Holds different disko templates.
|
||||
'';
|
||||
};
|
||||
|
||||
# clan.templates.machine
|
||||
machine = lib.mkOption {
|
||||
type = types.attrsOf templateType;
|
||||
default = { };
|
||||
description = ''
|
||||
Holds the different machine templates.
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -30,7 +30,7 @@ in
|
||||
expr = shallowForceAllAttributes config;
|
||||
expectedError = {
|
||||
type = "ThrownError";
|
||||
msg = "A definition for option `directory' is not of type `path*";
|
||||
msg = "A definition for option `directory' is not of type `absolute path*";
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@@ -1,25 +1,35 @@
|
||||
{
|
||||
lib,
|
||||
clan-core,
|
||||
self,
|
||||
nixpkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
# Produces the
|
||||
# 'clanLib' attribute set
|
||||
# Wrapped with fix, so we can depend on other clanLib functions without passing the whole flake
|
||||
lib.fix (clanLib: {
|
||||
# TODO:
|
||||
# SSome bad lib functions that depend on something in 'self'.
|
||||
# We should reduce the dependency on 'self' aka the 'flake' object
|
||||
# This makes it easier to test
|
||||
# most of the time passing the whole flake is unnecessary
|
||||
callLib = file: args: import file { inherit lib clanLib; } // args;
|
||||
|
||||
evalClan = import ./eval-clan-modules {
|
||||
inherit clan-core lib;
|
||||
inherit lib;
|
||||
clan-core = self;
|
||||
pkgs = nixpkgs.legacyPackages.x86_64-linux;
|
||||
};
|
||||
in
|
||||
{
|
||||
inherit (evalClan) evalClanModules evalClanModulesWithRoles;
|
||||
buildClan = import ./build-clan { inherit lib nixpkgs clan-core; };
|
||||
buildClan = import ./build-clan {
|
||||
inherit lib nixpkgs;
|
||||
clan-core = self;
|
||||
};
|
||||
# ------------------------------------
|
||||
# Lib functions that don't depend on 'self'
|
||||
inventory = clanLib.callLib ./inventory { };
|
||||
modules = clanLib.callLib ./frontmatter { };
|
||||
facts = import ./facts.nix { inherit lib; };
|
||||
inventory = import ./inventory { inherit lib clan-core; };
|
||||
values = import ./values { inherit lib; };
|
||||
jsonschema = import ./jsonschema { inherit lib; };
|
||||
modules = import ./frontmatter {
|
||||
inherit lib;
|
||||
self = clan-core;
|
||||
};
|
||||
select = import ./select.nix;
|
||||
}
|
||||
})
|
||||
|
||||
33
lib/distributed-service/flake-module.nix
Normal file
33
lib/distributed-service/flake-module.nix
Normal file
@@ -0,0 +1,33 @@
|
||||
{ self, inputs, ... }:
|
||||
let
|
||||
inputOverrides = builtins.concatStringsSep " " (
|
||||
builtins.map (input: " --override-input ${input} ${inputs.${input}}") (builtins.attrNames inputs)
|
||||
);
|
||||
in
|
||||
{
|
||||
perSystem =
|
||||
{
|
||||
pkgs,
|
||||
lib,
|
||||
system,
|
||||
...
|
||||
}:
|
||||
{
|
||||
# Run: nix-unit --extra-experimental-features flakes --flake .#legacyPackages.x86_64-linux.<attrName>
|
||||
legacyPackages.evalTest-distributedServices = import ./tests {
|
||||
inherit lib self;
|
||||
};
|
||||
|
||||
checks = {
|
||||
lib-distributedServices-eval = pkgs.runCommand "tests" { nativeBuildInputs = [ pkgs.nix-unit ]; } ''
|
||||
export HOME="$(realpath .)"
|
||||
nix-unit --eval-store "$HOME" \
|
||||
--extra-experimental-features flakes \
|
||||
${inputOverrides} \
|
||||
--flake ${self}#legacyPackages.${system}.evalTest-distributedServices
|
||||
|
||||
touch $out
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
199
lib/distributed-service/inventory-adapter.nix
Normal file
199
lib/distributed-service/inventory-adapter.nix
Normal file
@@ -0,0 +1,199 @@
|
||||
# Adapter function between the inventory.instances and the clan.service module
|
||||
#
|
||||
# Data flow:
|
||||
# - inventory.instances -> Adapter -> clan.service module -> Service Resources (i.e. NixosModules per Machine, Vars per Service, etc.)
|
||||
#
|
||||
# What this file does:
|
||||
#
|
||||
# - Resolves the [Module] to an actual module-path and imports it.
|
||||
# - Groups together all the same modules into a single import and creates all instances for it.
|
||||
# - Resolves the inventory tags into machines. Tags don't exist at the service level.
|
||||
# Also combines the settings for 'machines' and 'tags'.
|
||||
{
|
||||
lib,
|
||||
# This is used to resolve the module imports from 'flake.inputs'
|
||||
flake,
|
||||
# The clan inventory
|
||||
inventory,
|
||||
}:
|
||||
let
|
||||
# Returns the list of machine names
|
||||
# { ... } -> [ string ]
|
||||
resolveTags =
|
||||
{
|
||||
# Available InventoryMachines :: { {name} :: { tags = [ string ]; }; }
|
||||
machines,
|
||||
# Requested members :: { machines, tags }
|
||||
# Those will be resolved against the available machines
|
||||
members,
|
||||
# Not needed for resolution - only for error reporting
|
||||
roleName,
|
||||
instanceName,
|
||||
}:
|
||||
{
|
||||
machines =
|
||||
members.machines or [ ]
|
||||
++ (builtins.foldl' (
|
||||
acc: tag:
|
||||
let
|
||||
# For error printing
|
||||
availableTags = lib.foldlAttrs (
|
||||
acc: _: v:
|
||||
v.tags or [ ] ++ acc
|
||||
) [ ] (machines);
|
||||
|
||||
tagMembers = builtins.attrNames (lib.filterAttrs (_n: v: builtins.elem tag v.tags or [ ]) machines);
|
||||
in
|
||||
if tagMembers == [ ] then
|
||||
lib.warn ''
|
||||
Service instance '${instanceName}': - ${roleName} tags: no machine with tag '${tag}' found.
|
||||
Available tags: ${builtins.toJSON (lib.unique availableTags)}
|
||||
'' acc
|
||||
else
|
||||
acc ++ tagMembers
|
||||
) [ ] members.tags or [ ]);
|
||||
};
|
||||
|
||||
machineHasTag = machineName: tagName: lib.elem tagName inventory.machines.${machineName}.tags;
|
||||
|
||||
# map the instances into the module
|
||||
importedModuleWithInstances = lib.mapAttrs (
|
||||
instanceName: instance:
|
||||
let
|
||||
# TODO:
|
||||
resolvedModuleSet =
|
||||
# If the module.name is self then take the modules defined in the flake
|
||||
# Otherwise its an external input which provides the modules via 'clan.modules' attribute
|
||||
if instance.module.input == null then
|
||||
inventory.modules
|
||||
else
|
||||
let
|
||||
input =
|
||||
flake.inputs.${instance.module.input} or (throw ''
|
||||
Flake doesn't provide input with name '${instance.module.input}'
|
||||
|
||||
Choose one of the following inputs:
|
||||
- ${
|
||||
builtins.concatStringsSep "\n- " (
|
||||
lib.attrNames (lib.filterAttrs (_name: input: input ? clan) flake.inputs)
|
||||
)
|
||||
}
|
||||
|
||||
To import a local module from 'inventory.modules' remove the 'input' attribute from the module definition
|
||||
Remove the following line from the module definition:
|
||||
|
||||
...
|
||||
- module.input = "${instance.module.input}"
|
||||
|
||||
|
||||
'');
|
||||
clanAttrs =
|
||||
input.clan
|
||||
or (throw "It seems the flake input ${instance.module.input} doesn't export any clan resources");
|
||||
in
|
||||
clanAttrs.modules;
|
||||
|
||||
resolvedModule =
|
||||
resolvedModuleSet.${instance.module.name}
|
||||
or (throw "flake doesn't provide clan-module with name ${instance.module.name}");
|
||||
|
||||
# Every instance includes machines via roles
|
||||
# :: { client :: ... }
|
||||
instanceRoles = lib.mapAttrs (
|
||||
roleName: role:
|
||||
let
|
||||
resolvedMachines = resolveTags {
|
||||
members = {
|
||||
# Explicit members
|
||||
machines = lib.attrNames role.machines;
|
||||
# Resolved Members
|
||||
tags = lib.attrNames role.tags;
|
||||
};
|
||||
inherit (inventory) machines;
|
||||
inherit instanceName roleName;
|
||||
};
|
||||
in
|
||||
# instances.<instanceName>.roles.<roleName> =
|
||||
{
|
||||
machines = lib.genAttrs resolvedMachines.machines (
|
||||
machineName:
|
||||
let
|
||||
machineSettings = instance.roles.${roleName}.machines.${machineName}.settings or { };
|
||||
settingsViaTags = lib.filterAttrs (
|
||||
tagName: _: machineHasTag machineName tagName
|
||||
) instance.roles.${roleName}.tags;
|
||||
in
|
||||
{
|
||||
# TODO: Do we want to wrap settings with
|
||||
# setDefaultModuleLocation "inventory.instances.${instanceName}.roles.${roleName}.tags.${tagName}";
|
||||
settings = {
|
||||
imports = [
|
||||
machineSettings
|
||||
] ++ lib.attrValues (lib.mapAttrs (_tagName: v: v.settings) settingsViaTags);
|
||||
};
|
||||
}
|
||||
);
|
||||
# Maps to settings for the role.
|
||||
# In other words this sets the following path of a clan.service module:
|
||||
# instances.<instanceName>.roles.<roleName>.settings
|
||||
settings = role.settings;
|
||||
}
|
||||
) instance.roles;
|
||||
in
|
||||
{
|
||||
inherit (instance) module;
|
||||
inherit resolvedModule instanceRoles;
|
||||
}
|
||||
) inventory.instances;
|
||||
|
||||
# TODO: Eagerly check the _class of the resolved module
|
||||
evals = lib.mapAttrs (
|
||||
_module_ident: instances:
|
||||
(lib.evalModules {
|
||||
class = "clan.service";
|
||||
modules =
|
||||
[
|
||||
./service-module.nix
|
||||
# Import the resolved module
|
||||
(builtins.head instances).instance.resolvedModule
|
||||
]
|
||||
# Include all the instances that correlate to the resolved module
|
||||
++ (builtins.map (v: {
|
||||
instances.${v.instanceName}.roles = v.instance.instanceRoles;
|
||||
}) instances);
|
||||
})
|
||||
) grouped;
|
||||
|
||||
# Group the instances by the module they resolve to
|
||||
# This is necessary to evaluate the module in a single pass
|
||||
# :: { <module.input>_<module.name> :: [ { name, value } ] }
|
||||
# Since 'perMachine' needs access to all the instances we should include them as a whole
|
||||
grouped = lib.foldlAttrs (
|
||||
acc: instanceName: instance:
|
||||
let
|
||||
inputName = if instance.module.input == null then "self" else instance.module.input;
|
||||
id = inputName + "-" + instance.module.name;
|
||||
in
|
||||
acc
|
||||
// {
|
||||
${id} = acc.${id} or [ ] ++ [
|
||||
{
|
||||
inherit instanceName instance;
|
||||
}
|
||||
];
|
||||
}
|
||||
) { } importedModuleWithInstances;
|
||||
|
||||
# TODO: Return an attribute set of resources instead of a plain list of nixosModules
|
||||
allMachines = lib.foldlAttrs (
|
||||
acc: _name: eval:
|
||||
acc
|
||||
// lib.mapAttrs (
|
||||
machineName: result: acc.${machineName} or [ ] ++ [ result.nixosModule ]
|
||||
) eval.config.result.final
|
||||
) { } evals;
|
||||
in
|
||||
{
|
||||
inherit importedModuleWithInstances grouped;
|
||||
inherit evals allMachines;
|
||||
}
|
||||
514
lib/distributed-service/service-module.nix
Normal file
514
lib/distributed-service/service-module.nix
Normal file
@@ -0,0 +1,514 @@
|
||||
{ lib, config, ... }:
|
||||
let
|
||||
inherit (lib) mkOption types;
|
||||
inherit (types) attrsWith submoduleWith;
|
||||
|
||||
# TODO:
|
||||
# Remove once this gets merged upstream; performs in O(n*log(n) instead of O(n^2))
|
||||
# https://github.com/NixOS/nixpkgs/pull/355616/files
|
||||
uniqueStrings = list: builtins.attrNames (builtins.groupBy lib.id list);
|
||||
|
||||
checkInstanceRoles =
|
||||
instanceName: instanceRoles:
|
||||
let
|
||||
unmatchedRoles = lib.filter (roleName: !lib.elem roleName (lib.attrNames config.roles)) (
|
||||
lib.attrNames instanceRoles
|
||||
);
|
||||
in
|
||||
if unmatchedRoles == [ ] then
|
||||
true
|
||||
else
|
||||
throw ''
|
||||
inventory instance: 'instances.${instanceName}' defines the following roles:
|
||||
${builtins.toJSON unmatchedRoles}
|
||||
|
||||
But the clan-service module '${config.manifest.name}' defines roles:
|
||||
${builtins.toJSON (lib.attrNames config.roles)}
|
||||
'';
|
||||
|
||||
# checkInstanceSettings =
|
||||
# instanceName: instanceSettings:
|
||||
# let
|
||||
# unmatchedRoles = 1;
|
||||
# in
|
||||
# unmatchedRoles;
|
||||
|
||||
/**
|
||||
Merges the role- and machine-settings using the role interface
|
||||
|
||||
Arguments:
|
||||
|
||||
- roleName: The name of the role
|
||||
- instanceName: The name of the instance
|
||||
- settings: The settings of the machine. Leave empty to get the role settings
|
||||
|
||||
Returns: evalModules result
|
||||
|
||||
The caller is responsible to use .config or .extendModules
|
||||
*/
|
||||
# TODO: evaluate against the role.settings statically and use extendModules to get the machineSettings
|
||||
# Doing this might improve performance
|
||||
evalMachineSettings =
|
||||
{
|
||||
roleName,
|
||||
instanceName,
|
||||
machineName ? null,
|
||||
settings,
|
||||
}:
|
||||
lib.evalModules {
|
||||
# Prefix for better error reporting
|
||||
# This prints the path where the option should be defined rather than the plain path within settings
|
||||
# "The option `instances.foo.roles.server.machines.test.settings.<>' was accessed but has no value defined. Try setting the option."
|
||||
prefix =
|
||||
[
|
||||
"instances"
|
||||
instanceName
|
||||
"roles"
|
||||
roleName
|
||||
]
|
||||
++ (lib.optionals (machineName != null) [
|
||||
"machines"
|
||||
machineName
|
||||
])
|
||||
++ [ "settings" ];
|
||||
|
||||
# This may lead to better error reporting
|
||||
# And catch errors if anyone tried to import i.e. a nixosConfiguration
|
||||
# Set some class: i.e "network.server.settings"
|
||||
class = lib.concatStringsSep "." [
|
||||
config.manifest.name
|
||||
roleName
|
||||
"settings"
|
||||
];
|
||||
|
||||
modules = [
|
||||
(lib.setDefaultModuleLocation "Via clan.service module: roles.${roleName}.interface"
|
||||
config.roles.${roleName}.interface
|
||||
)
|
||||
(lib.setDefaultModuleLocation "inventory.instances.${instanceName}.roles.${roleName}.settings"
|
||||
config.instances.${instanceName}.roles.${roleName}.settings
|
||||
)
|
||||
settings
|
||||
# Dont set the module location here
|
||||
# This should already be set by the tags resolver
|
||||
# config.instances.${instanceName}.roles.${roleName}.machines.${machineName}.settings
|
||||
];
|
||||
};
|
||||
|
||||
/**
|
||||
Makes a module extensible
|
||||
returning its config
|
||||
and making it extensible via '__functor' polymorphism
|
||||
|
||||
Example:
|
||||
|
||||
```nix-repl
|
||||
res = makeExtensibleConfig (evalModules { options.foo = mkOption { default = 42; };)
|
||||
res
|
||||
=>
|
||||
{
|
||||
foo = 42;
|
||||
_functor = <function>;
|
||||
}
|
||||
|
||||
# This allows to override using mkDefault, mkForce, etc.
|
||||
res { foo = 100; }
|
||||
=>
|
||||
{
|
||||
foo = 100;
|
||||
_functor = <function>;
|
||||
}
|
||||
```
|
||||
*/
|
||||
makeExtensibleConfig =
|
||||
f: args:
|
||||
let
|
||||
makeModuleExtensible =
|
||||
eval:
|
||||
eval.config
|
||||
// {
|
||||
__functor = _self: m: makeModuleExtensible (eval.extendModules { modules = lib.toList m; });
|
||||
};
|
||||
in
|
||||
makeModuleExtensible (f args);
|
||||
|
||||
/**
|
||||
Apply the settings to the instance
|
||||
|
||||
Takes a [ServiceInstance] :: { roles :: { roleName :: { machines :: { machineName :: { settings :: { ... } } } } } }
|
||||
Returns the same object but evaluates the settings against the interface.
|
||||
|
||||
We need this because 'perMachine' shouldn't gain access the raw deferred module.
|
||||
*/
|
||||
applySettings =
|
||||
instanceName: instance:
|
||||
lib.mapAttrs (roleName: role: {
|
||||
machines = lib.mapAttrs (machineName: v: {
|
||||
# TODO: evaluate the settings against the interface
|
||||
# settings = (evalMachineSettings { inherit roleName instanceName; inherit (v) settings; }).config;
|
||||
settings = (
|
||||
makeExtensibleConfig evalMachineSettings {
|
||||
inherit roleName instanceName machineName;
|
||||
inherit (v) settings;
|
||||
}
|
||||
);
|
||||
}) role.machines;
|
||||
# TODO: evaluate the settings against the interface
|
||||
settings = (
|
||||
makeExtensibleConfig evalMachineSettings {
|
||||
inherit roleName instanceName;
|
||||
inherit (role) settings;
|
||||
}
|
||||
);
|
||||
}) instance.roles;
|
||||
in
|
||||
{
|
||||
options = {
|
||||
instances = mkOption {
|
||||
default = throw ''
|
||||
The clan service module ${config.manifest.name} doesn't define any instances.
|
||||
|
||||
Did you forget to create instances via 'inventory.instances' ?
|
||||
'';
|
||||
|
||||
type = attrsWith {
|
||||
placeholder = "instanceName";
|
||||
elemType = submoduleWith {
|
||||
modules = [
|
||||
(
|
||||
{ name, ... }:
|
||||
{
|
||||
# options.settings = mkOption {
|
||||
# description = "settings of 'instance': ${name}";
|
||||
# default = {};
|
||||
# apply = v: lib.seq (checkInstanceSettings name v) v;
|
||||
# };
|
||||
options.roles = mkOption {
|
||||
default = throw ''
|
||||
Instance '${name}' of service '${config.manifest.name}' mut define members via 'roles'.
|
||||
|
||||
To include a machine:
|
||||
'instances.${name}.roles.<role-name>.machines.<your-machine-name>' must be set.
|
||||
'';
|
||||
type = attrsWith {
|
||||
placeholder = "roleName";
|
||||
elemType = submoduleWith {
|
||||
modules = [
|
||||
(
|
||||
{ ... }:
|
||||
{
|
||||
# instances.{instanceName}.roles.{roleName}.machines
|
||||
options.machines = mkOption {
|
||||
type = attrsWith {
|
||||
placeholder = "machineName";
|
||||
elemType = submoduleWith {
|
||||
modules = [
|
||||
(m: {
|
||||
options.settings = mkOption {
|
||||
type = types.raw;
|
||||
description = "Settings of '${name}-machine': ${m.name}.";
|
||||
default = { };
|
||||
};
|
||||
})
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# instances.{instanceName}.roles.{roleName}.settings
|
||||
# options._settings = mkOption { };
|
||||
# options._settingsViaTags = mkOption { };
|
||||
# A deferred module that combines _settingsViaTags with _settings
|
||||
options.settings = mkOption {
|
||||
type = types.raw;
|
||||
description = "Settings of 'role': ${name}";
|
||||
default = { };
|
||||
};
|
||||
}
|
||||
)
|
||||
];
|
||||
};
|
||||
};
|
||||
apply = v: lib.seq (checkInstanceRoles name v) v;
|
||||
};
|
||||
}
|
||||
)
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
manifest = mkOption {
|
||||
description = "Meta information about this module itself";
|
||||
type = submoduleWith {
|
||||
modules = [
|
||||
{
|
||||
options = {
|
||||
name = mkOption {
|
||||
description = ''
|
||||
The name of the module
|
||||
|
||||
Mainly used to create an error context while evaluating.
|
||||
This helps backtracking which module was included; And where an error came from originally.
|
||||
'';
|
||||
type = types.str;
|
||||
};
|
||||
};
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
roles = mkOption {
|
||||
default = throw ''
|
||||
Role behavior of service '${config.manifest.name}' must be defined.
|
||||
A 'clan.service' module should always define its behavior via 'roles'
|
||||
---
|
||||
To add the role:
|
||||
`roles.client = {}`
|
||||
|
||||
To define multiple instance behavior:
|
||||
`roles.client.perInstance = { ... }: {}`
|
||||
'';
|
||||
type = attrsWith {
|
||||
placeholder = "roleName";
|
||||
elemType = submoduleWith {
|
||||
modules = [
|
||||
(
|
||||
{ name, ... }:
|
||||
let
|
||||
roleName = name;
|
||||
in
|
||||
{
|
||||
options.interface = mkOption {
|
||||
type = types.deferredModule;
|
||||
# TODO: Default to an empty module
|
||||
# need to test that an the empty module can be evaluated to empty settings
|
||||
default = { };
|
||||
};
|
||||
options.perInstance = mkOption {
|
||||
type = types.deferredModuleWith {
|
||||
staticModules = [
|
||||
# Common output format
|
||||
# As described by adr
|
||||
# { nixosModule, services, ... }
|
||||
(
|
||||
{ ... }:
|
||||
{
|
||||
options.nixosModule = mkOption { default = { }; };
|
||||
options.services = mkOption {
|
||||
type = attrsWith {
|
||||
placeholder = "serviceName";
|
||||
elemType = submoduleWith {
|
||||
modules = [ ./service-module.nix ];
|
||||
};
|
||||
};
|
||||
default = { };
|
||||
};
|
||||
}
|
||||
)
|
||||
];
|
||||
};
|
||||
default = { };
|
||||
apply =
|
||||
/**
|
||||
This apply transforms the module into a function that takes arguments and returns an evaluated module
|
||||
The arguments of the function are determined by its scope:
|
||||
-> 'perInstance' maps over all instances and over all machines hence it takes 'instanceName' and 'machineName' as iterator arguments
|
||||
*/
|
||||
v: instanceName: machineName:
|
||||
(lib.evalModules {
|
||||
specialArgs = {
|
||||
inherit instanceName;
|
||||
machine = {
|
||||
name = machineName;
|
||||
roles = applySettings instanceName config.instances.${instanceName};
|
||||
};
|
||||
settings = (
|
||||
makeExtensibleConfig evalMachineSettings {
|
||||
inherit roleName instanceName machineName;
|
||||
settings =
|
||||
config.instances.${instanceName}.roles.${roleName}.machines.${machineName}.settings or { };
|
||||
}
|
||||
);
|
||||
};
|
||||
modules = [ v ];
|
||||
}).config;
|
||||
};
|
||||
}
|
||||
)
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
perMachine = mkOption {
|
||||
type = types.deferredModuleWith {
|
||||
staticModules = [
|
||||
# Common output format
|
||||
# As described by adr
|
||||
# { nixosModule, services, ... }
|
||||
(
|
||||
{ ... }:
|
||||
{
|
||||
options.nixosModule = mkOption { default = { }; };
|
||||
options.services = mkOption {
|
||||
type = attrsWith {
|
||||
placeholder = "serviceName";
|
||||
elemType = submoduleWith {
|
||||
modules = [ ./service-module.nix ];
|
||||
};
|
||||
};
|
||||
default = { };
|
||||
};
|
||||
}
|
||||
)
|
||||
];
|
||||
};
|
||||
default = { };
|
||||
apply =
|
||||
v: machineName: machineScope:
|
||||
(lib.evalModules {
|
||||
specialArgs = {
|
||||
/**
|
||||
This apply transforms the module into a function that takes arguments and returns an evaluated module
|
||||
The arguments of the function are determined by its scope:
|
||||
-> 'perMachine' maps over all machines of a service 'machineName' and a helper 'scope' (some aggregated attributes) as iterator arguments
|
||||
The 'scope' attribute is used to collect the 'roles' of all 'instances' where the machine is part of and inject both into the specialArgs
|
||||
*/
|
||||
machine = {
|
||||
name = machineName;
|
||||
roles =
|
||||
let
|
||||
collectRoles =
|
||||
instances:
|
||||
lib.foldlAttrs (
|
||||
r: _instanceName: instance:
|
||||
r
|
||||
++ lib.foldlAttrs (
|
||||
r2: roleName: _role:
|
||||
r2 ++ [ roleName ]
|
||||
) [ ] instance.roles
|
||||
) [ ] instances;
|
||||
in
|
||||
uniqueStrings (collectRoles machineScope.instances);
|
||||
};
|
||||
inherit (machineScope) instances;
|
||||
|
||||
# There are no machine settings.
|
||||
# Settings are always role specific, having settings that apply to a machine globally would mean to merge all role and all instance settings into a single module.
|
||||
# But that will likely cause conflicts because it is inherently wrong.
|
||||
settings = throw ''
|
||||
'perMachine' doesn't have a 'settings' argument.
|
||||
|
||||
Alternatives:
|
||||
- 'instances.<instanceName>.roles.<roleName>.settings' should be used instead.
|
||||
- 'instances.<instanceName>.roles.<roleName>.machines.<machineName>.settings' should be used instead.
|
||||
|
||||
If that is insufficient, you might also consider using 'roles.<roleName>.perInstance' instead of 'perMachine'.
|
||||
'';
|
||||
};
|
||||
|
||||
modules = [ v ];
|
||||
}).config;
|
||||
};
|
||||
# ---
|
||||
# Place the result in _module.result to mark them as "internal" and discourage usage/overrides
|
||||
#
|
||||
# ---
|
||||
# Intermediate result by mapping over the 'roles', 'instances', and 'machines'.
|
||||
# During this step the 'perMachine' and 'perInstance' are applied.
|
||||
# The result-set for a single machine can then be found by collecting all 'nixosModules' recursively.
|
||||
result.allRoles = mkOption {
|
||||
readOnly = true;
|
||||
default = lib.mapAttrs (roleName: roleCfg: {
|
||||
allInstances = lib.mapAttrs (instanceName: instanceCfg: {
|
||||
allMachines = lib.mapAttrs (
|
||||
machineName: _machineCfg: roleCfg.perInstance instanceName machineName
|
||||
) instanceCfg.roles.${roleName}.machines or { };
|
||||
}) config.instances;
|
||||
}) config.roles;
|
||||
};
|
||||
|
||||
result.allMachines = mkOption {
|
||||
readOnly = true;
|
||||
default =
|
||||
let
|
||||
collectMachinesFromInstance =
|
||||
instance:
|
||||
uniqueStrings (
|
||||
lib.foldlAttrs (
|
||||
acc: _roleName: role:
|
||||
acc ++ (lib.attrNames role.machines)
|
||||
) [ ] instance.roles
|
||||
);
|
||||
# The service machines are defined by collecting all instance machines
|
||||
serviceMachines = lib.foldlAttrs (
|
||||
acc: instanceName: instance:
|
||||
acc
|
||||
// lib.genAttrs (collectMachinesFromInstance instance) (machineName:
|
||||
# Store information why this machine is part of the service
|
||||
# MachineOrigin :: { instances :: [ string ]; }
|
||||
{
|
||||
# Helper attribute to
|
||||
instances = [ instanceName ] ++ acc.${machineName}.instances or [ ];
|
||||
# All roles of the machine ?
|
||||
roles = lib.foldlAttrs (
|
||||
acc2: roleName: role:
|
||||
if builtins.elem machineName (lib.attrNames role.machines) then acc2 ++ [ roleName ] else acc2
|
||||
) [ ] instance.roles;
|
||||
})
|
||||
) { } config.instances;
|
||||
|
||||
allMachines = lib.mapAttrs (_machineName: MachineOrigin: {
|
||||
# Filter out instances of which the machine is not part of
|
||||
instances = lib.mapAttrs (_n: v: { roles = v; }) (
|
||||
lib.filterAttrs (instanceName: _: builtins.elem instanceName MachineOrigin.instances) (
|
||||
# Instances with evaluated settings
|
||||
lib.mapAttrs applySettings config.instances
|
||||
)
|
||||
);
|
||||
}) serviceMachines;
|
||||
in
|
||||
# allMachines;
|
||||
lib.mapAttrs config.perMachine allMachines;
|
||||
};
|
||||
|
||||
result.final = mkOption {
|
||||
readOnly = true;
|
||||
default = lib.mapAttrs (
|
||||
machineName: machineResult:
|
||||
let
|
||||
# config.result.allRoles.client.allInstances.bar.allMachines.test
|
||||
# instanceResults = config.result.allRoles.client.allInstances.bar.allMachines.${machineName};
|
||||
instanceResults = lib.foldlAttrs (
|
||||
acc: roleName: role:
|
||||
acc
|
||||
++ lib.foldlAttrs (
|
||||
acc: instanceName: instance:
|
||||
if instance.allMachines.${machineName}.nixosModule or { } != { } then
|
||||
acc
|
||||
++ [
|
||||
(lib.setDefaultModuleLocation
|
||||
"Via instances.${instanceName}.roles.${roleName}.machines.${machineName}"
|
||||
instance.allMachines.${machineName}.nixosModule
|
||||
)
|
||||
]
|
||||
else
|
||||
acc
|
||||
) [ ] role.allInstances
|
||||
) [ ] config.result.allRoles;
|
||||
in
|
||||
{
|
||||
inherit instanceResults;
|
||||
nixosModule = {
|
||||
imports = [
|
||||
# For error backtracing. This module was produced by the 'perMachine' function
|
||||
# TODO: check if we need this or if it leads to better errors if we pass the underlying module locations
|
||||
(lib.setDefaultModuleLocation "clan.service: ${config.manifest.name} - via perMachine" machineResult.nixosModule)
|
||||
] ++ instanceResults;
|
||||
};
|
||||
}
|
||||
) config.result.allMachines;
|
||||
};
|
||||
};
|
||||
}
|
||||
327
lib/distributed-service/tests/default.nix
Normal file
327
lib/distributed-service/tests/default.nix
Normal file
@@ -0,0 +1,327 @@
|
||||
{
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
inherit (lib)
|
||||
evalModules
|
||||
;
|
||||
|
||||
evalInventory =
|
||||
m:
|
||||
(evalModules {
|
||||
# Static modules
|
||||
modules = [
|
||||
../../inventory/build-inventory/interface.nix
|
||||
{
|
||||
modules.test = { };
|
||||
}
|
||||
m
|
||||
];
|
||||
}).config;
|
||||
|
||||
flakeFixture = {
|
||||
inputs = { };
|
||||
};
|
||||
|
||||
callInventoryAdapter =
|
||||
inventoryModule:
|
||||
import ../inventory-adapter.nix {
|
||||
inherit lib;
|
||||
flake = flakeFixture;
|
||||
inventory = evalInventory inventoryModule;
|
||||
};
|
||||
in
|
||||
{
|
||||
test_simple =
|
||||
let
|
||||
res = callInventoryAdapter {
|
||||
# Authored module
|
||||
# A minimal module looks like this
|
||||
# It isn't exactly doing anything but it's a valid module that produces an output
|
||||
modules."simple-module" = {
|
||||
_class = "clan.service";
|
||||
manifest = {
|
||||
name = "netwitness";
|
||||
};
|
||||
};
|
||||
# User config
|
||||
instances."instance_foo" = {
|
||||
module = {
|
||||
name = "simple-module";
|
||||
};
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
# Test that the module is mapped into the output
|
||||
# We might change the attribute name in the future
|
||||
expr = res.evals ? "self-simple-module";
|
||||
expected = true;
|
||||
};
|
||||
|
||||
# A module can be imported multiple times
|
||||
# A module can also have multiple instances within the same module
|
||||
# This mean modules must be grouped together, imported once
|
||||
# All instances should be included within one evaluation to make all of them available
|
||||
test_module_grouping =
|
||||
let
|
||||
res = callInventoryAdapter {
|
||||
# Authored module
|
||||
# A minimal module looks like this
|
||||
# It isn't exactly doing anything but it's a valid module that produces an output
|
||||
modules."A" = {
|
||||
_class = "clan.service";
|
||||
manifest = {
|
||||
name = "A-name";
|
||||
};
|
||||
|
||||
perMachine = { }: { };
|
||||
};
|
||||
modules."B" = {
|
||||
_class = "clan.service";
|
||||
manifest = {
|
||||
name = "B-name";
|
||||
};
|
||||
|
||||
perMachine = { }: { };
|
||||
};
|
||||
# User config
|
||||
instances."instance_foo" = {
|
||||
module = {
|
||||
name = "A";
|
||||
};
|
||||
};
|
||||
instances."instance_bar" = {
|
||||
module = {
|
||||
name = "B";
|
||||
};
|
||||
};
|
||||
instances."instance_baz" = {
|
||||
module = {
|
||||
name = "A";
|
||||
};
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
# Test that the module is mapped into the output
|
||||
# We might change the attribute name in the future
|
||||
expr = lib.mapAttrs (_n: v: builtins.length v) res.grouped;
|
||||
expected = {
|
||||
self-A = 2;
|
||||
self-B = 1;
|
||||
};
|
||||
};
|
||||
|
||||
test_creates_all_instances =
|
||||
let
|
||||
res = callInventoryAdapter {
|
||||
# Authored module
|
||||
# A minimal module looks like this
|
||||
# It isn't exactly doing anything but it's a valid module that produces an output
|
||||
modules."A" = {
|
||||
_class = "clan.service";
|
||||
manifest = {
|
||||
name = "network";
|
||||
};
|
||||
|
||||
perMachine = { }: { };
|
||||
};
|
||||
instances."instance_foo" = {
|
||||
module = {
|
||||
name = "A";
|
||||
};
|
||||
};
|
||||
instances."instance_bar" = {
|
||||
module = {
|
||||
name = "A";
|
||||
};
|
||||
};
|
||||
instances."instance_zaza" = {
|
||||
module = {
|
||||
name = "B";
|
||||
};
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
# Test that the module is mapped into the output
|
||||
# We might change the attribute name in the future
|
||||
expr = lib.attrNames res.evals.self-A.config.instances;
|
||||
expected = [
|
||||
"instance_bar"
|
||||
"instance_foo"
|
||||
];
|
||||
};
|
||||
|
||||
# Membership via roles
|
||||
test_add_machines_directly =
|
||||
let
|
||||
res = callInventoryAdapter {
|
||||
# Authored module
|
||||
# A minimal module looks like this
|
||||
# It isn't exactly doing anything but it's a valid module that produces an output
|
||||
modules."A" = {
|
||||
_class = "clan.service";
|
||||
manifest = {
|
||||
name = "network";
|
||||
};
|
||||
# Define a role without special behavior
|
||||
roles.peer = { };
|
||||
|
||||
# perMachine = {}: {};
|
||||
};
|
||||
machines = {
|
||||
jon = { };
|
||||
sara = { };
|
||||
hxi = { };
|
||||
};
|
||||
instances."instance_foo" = {
|
||||
module = {
|
||||
name = "A";
|
||||
};
|
||||
roles.peer.machines.jon = { };
|
||||
};
|
||||
instances."instance_bar" = {
|
||||
module = {
|
||||
name = "A";
|
||||
};
|
||||
roles.peer.machines.sara = { };
|
||||
};
|
||||
instances."instance_zaza" = {
|
||||
module = {
|
||||
name = "B";
|
||||
};
|
||||
roles.peer.tags.all = { };
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
# Test that the module is mapped into the output
|
||||
# We might change the attribute name in the future
|
||||
expr = lib.attrNames res.evals.self-A.config.result.allMachines;
|
||||
expected = [
|
||||
"jon"
|
||||
"sara"
|
||||
];
|
||||
};
|
||||
|
||||
# Membership via tags
|
||||
test_add_machines_via_tags =
|
||||
let
|
||||
res = callInventoryAdapter {
|
||||
# Authored module
|
||||
# A minimal module looks like this
|
||||
# It isn't exactly doing anything but it's a valid module that produces an output
|
||||
modules."A" = {
|
||||
_class = "clan.service";
|
||||
manifest = {
|
||||
name = "network";
|
||||
};
|
||||
# Define a role without special behavior
|
||||
roles.peer = { };
|
||||
|
||||
# perMachine = {}: {};
|
||||
};
|
||||
machines = {
|
||||
jon = {
|
||||
tags = [ "foo" ];
|
||||
};
|
||||
sara = {
|
||||
tags = [ "foo" ];
|
||||
};
|
||||
hxi = { };
|
||||
};
|
||||
instances."instance_foo" = {
|
||||
module = {
|
||||
name = "A";
|
||||
};
|
||||
roles.peer.tags.foo = { };
|
||||
};
|
||||
instances."instance_zaza" = {
|
||||
module = {
|
||||
name = "B";
|
||||
};
|
||||
roles.peer.tags.all = { };
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
# Test that the module is mapped into the output
|
||||
# We might change the attribute name in the future
|
||||
expr = lib.attrNames res.evals.self-A.config.result.allMachines;
|
||||
expected = [
|
||||
"jon"
|
||||
"sara"
|
||||
];
|
||||
};
|
||||
|
||||
per_machine_args = import ./per_machine_args.nix { inherit lib callInventoryAdapter; };
|
||||
# test_per_machine_receives_instances =
|
||||
# let
|
||||
# res = callInventoryAdapter {
|
||||
# # Authored module
|
||||
# # A minimal module looks like this
|
||||
# # It isn't exactly doing anything but it's a valid module that produces an output
|
||||
# modules."A" = {
|
||||
# _class = "clan.service";
|
||||
# manifest = {
|
||||
# name = "network";
|
||||
# };
|
||||
# # Define a role without special behavior
|
||||
# roles.peer = { };
|
||||
|
||||
# perMachine =
|
||||
# { instances, ... }:
|
||||
# {
|
||||
# nixosModule = instances;
|
||||
# };
|
||||
# };
|
||||
# machines = {
|
||||
# jon = { };
|
||||
# sara = { };
|
||||
# };
|
||||
# instances."instance_foo" = {
|
||||
# module = {
|
||||
# name = "A";
|
||||
# };
|
||||
# roles.peer.machines.jon = { };
|
||||
# };
|
||||
# instances."instance_bar" = {
|
||||
# module = {
|
||||
# name = "A";
|
||||
# };
|
||||
# roles.peer.machines.sara = { };
|
||||
# };
|
||||
# instances."instance_zaza" = {
|
||||
# module = {
|
||||
# name = "B";
|
||||
# };
|
||||
# roles.peer.tags.all = { };
|
||||
# };
|
||||
# };
|
||||
# in
|
||||
# {
|
||||
# expr = {
|
||||
# hasMachineSettings =
|
||||
# res.evals.self-A.config.result.allMachines.jon.nixosModule. # { {instanceName} :: { roles :: { {roleName} :: { machines :: { {machineName} :: { settings :: {} } } } } } }
|
||||
# instance_foo.roles.peer.machines.jon ? settings;
|
||||
# machineSettingsEmpty =
|
||||
# lib.filterAttrs (n: _v: n != "__functor" ) res.evals.self-A.config.result.allMachines.jon.nixosModule. # { {instanceName} :: { roles :: { {roleName} :: { machines :: { {machineName} :: { settings :: {} } } } } } }
|
||||
# instance_foo.roles.peer.machines.jon.settings;
|
||||
# hasRoleSettings =
|
||||
# res.evals.self-A.config.result.allMachines.jon.nixosModule. # { {instanceName} :: { roles :: { {roleName} :: { machines :: { {machineName} :: { settings :: {} } } } } } }
|
||||
# instance_foo.roles.peer ? settings;
|
||||
# roleSettingsEmpty =
|
||||
# lib.filterAttrs (n: _v: n != "__functor" ) res.evals.self-A.config.result.allMachines.jon.nixosModule. # { {instanceName} :: { roles :: { {roleName} :: { machines :: { {machineName} :: { settings :: {} } } } } } }
|
||||
# instance_foo.roles.peer.settings;
|
||||
# };
|
||||
# expected = {
|
||||
# hasMachineSettings = true;
|
||||
# machineSettingsEmpty = {};
|
||||
# hasRoleSettings = true;
|
||||
# roleSettingsEmpty = {};
|
||||
# };
|
||||
# };
|
||||
}
|
||||
107
lib/distributed-service/tests/per_machine_args.nix
Normal file
107
lib/distributed-service/tests/per_machine_args.nix
Normal file
@@ -0,0 +1,107 @@
|
||||
{ lib, callInventoryAdapter }:
|
||||
|
||||
let # Authored module
|
||||
# A minimal module looks like this
|
||||
# It isn't exactly doing anything but it's a valid module that produces an output
|
||||
modules."A" = {
|
||||
_class = "clan.service";
|
||||
manifest = {
|
||||
name = "network";
|
||||
};
|
||||
# Define two roles with unmergeable interfaces
|
||||
# Both define some 'timeout' but with completely different types.
|
||||
roles.peer.interface =
|
||||
{ lib, ... }:
|
||||
{
|
||||
options.timeout = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
};
|
||||
};
|
||||
roles.server.interface =
|
||||
{ lib, ... }:
|
||||
{
|
||||
options.timeout = lib.mkOption {
|
||||
type = lib.types.submodule;
|
||||
};
|
||||
};
|
||||
|
||||
perMachine =
|
||||
{ instances, ... }:
|
||||
{
|
||||
nixosModule = instances;
|
||||
};
|
||||
};
|
||||
machines = {
|
||||
jon = { };
|
||||
sara = { };
|
||||
};
|
||||
res = callInventoryAdapter {
|
||||
inherit modules machines;
|
||||
instances."instance_foo" = {
|
||||
module = {
|
||||
name = "A";
|
||||
};
|
||||
roles.peer.machines.jon = {
|
||||
settings.timeout = lib.mkForce "foo-peer-jon";
|
||||
};
|
||||
roles.peer = {
|
||||
settings.timeout = "foo-peer";
|
||||
};
|
||||
};
|
||||
instances."instance_bar" = {
|
||||
module = {
|
||||
name = "A";
|
||||
};
|
||||
roles.peer.machines.jon = {
|
||||
settings.timeout = "bar-peer-jon";
|
||||
};
|
||||
};
|
||||
instances."instance_zaza" = {
|
||||
module = {
|
||||
name = "B";
|
||||
};
|
||||
roles.peer.tags.all = { };
|
||||
};
|
||||
};
|
||||
|
||||
filterInternals = lib.filterAttrs (n: _v: !lib.hasPrefix "_" n);
|
||||
in
|
||||
|
||||
{
|
||||
|
||||
# settings should evaluate
|
||||
test_per_machine_receives_instance_settings = {
|
||||
expr = {
|
||||
hasMachineSettings =
|
||||
res.evals.self-A.config.result.allMachines.jon.nixosModule.instance_foo.roles.peer.machines.jon
|
||||
? settings;
|
||||
|
||||
# settings are specific.
|
||||
# Below we access:
|
||||
# instance = instance_foo
|
||||
# roles = peer
|
||||
# machines = jon
|
||||
specificMachineSettings = filterInternals res.evals.self-A.config.result.allMachines.jon.nixosModule.instance_foo.roles.peer.machines.jon.settings;
|
||||
|
||||
hasRoleSettings =
|
||||
res.evals.self-A.config.result.allMachines.jon.nixosModule.instance_foo.roles.peer ? settings;
|
||||
|
||||
# settings are specific.
|
||||
# Below we access:
|
||||
# instance = instance_foo
|
||||
# roles = peer
|
||||
# machines = *
|
||||
specificRoleSettings = filterInternals res.evals.self-A.config.result.allMachines.jon.nixosModule.instance_foo.roles.peer.settings;
|
||||
};
|
||||
expected = {
|
||||
hasMachineSettings = true;
|
||||
specificMachineSettings = {
|
||||
timeout = "foo-peer-jon";
|
||||
};
|
||||
hasRoleSettings = true;
|
||||
specificRoleSettings = {
|
||||
timeout = "foo-peer";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -6,11 +6,14 @@
|
||||
let
|
||||
baseModule = {
|
||||
imports = (import (pkgs.path + "/nixos/modules/module-list.nix")) ++ [
|
||||
{
|
||||
nixpkgs.pkgs = pkgs;
|
||||
clan.core.name = "dummy";
|
||||
system.stateVersion = lib.version;
|
||||
}
|
||||
(
|
||||
{ config, ... }:
|
||||
{
|
||||
nixpkgs.pkgs = pkgs;
|
||||
clan.core.name = "dummy";
|
||||
system.stateVersion = config.system.nixos.release;
|
||||
}
|
||||
)
|
||||
];
|
||||
};
|
||||
|
||||
|
||||
@@ -10,16 +10,20 @@ let
|
||||
pathExists
|
||||
;
|
||||
in
|
||||
{
|
||||
rec {
|
||||
# We should remove this.
|
||||
# It would enforce treating at least 'lib' as a module in a whole
|
||||
imports = filter pathExists [
|
||||
./jsonschema/flake-module.nix
|
||||
./inventory/flake-module.nix
|
||||
./build-clan/flake-module.nix
|
||||
./values/flake-module.nix
|
||||
./distributed-service/flake-module.nix
|
||||
];
|
||||
flake.lib = import ./default.nix {
|
||||
inherit lib inputs;
|
||||
flake.clanLib = import ./default.nix {
|
||||
inherit lib inputs self;
|
||||
inherit (inputs) nixpkgs;
|
||||
clan-core = self;
|
||||
};
|
||||
# TODO: remove this legacy alias
|
||||
flake.lib = flake.clanLib;
|
||||
}
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
{ lib, self }:
|
||||
{ lib, clanLib }:
|
||||
let
|
||||
# Trim the .nix extension from a filename
|
||||
trimExtension = name: builtins.substring 0 (builtins.stringLength name - 4) name;
|
||||
|
||||
jsonWithoutHeader = self.lib.jsonschema {
|
||||
jsonWithoutHeader = clanLib.jsonschema {
|
||||
includeDefaults = true;
|
||||
header = { };
|
||||
};
|
||||
@@ -13,7 +13,7 @@ let
|
||||
lib.mapAttrs (
|
||||
_moduleName: rolesOptions:
|
||||
lib.mapAttrs (_roleName: options: jsonWithoutHeader.parseOptions options { }) rolesOptions
|
||||
) (self.lib.evalClanModulesWithRoles modules);
|
||||
) (clanLib.evalClan.evalClanModulesWithRoles modules);
|
||||
|
||||
evalFrontmatter =
|
||||
{
|
||||
|
||||
272
lib/inventory/build-inventory/builder/default.nix
Normal file
272
lib/inventory/build-inventory/builder/default.nix
Normal file
@@ -0,0 +1,272 @@
|
||||
{
|
||||
lib,
|
||||
config,
|
||||
clanLib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
inherit (config) inventory directory;
|
||||
resolveTags =
|
||||
# Inventory, { machines :: [string], tags :: [string] }
|
||||
{
|
||||
serviceName,
|
||||
instanceName,
|
||||
roleName,
|
||||
inventory,
|
||||
members,
|
||||
}:
|
||||
{
|
||||
machines =
|
||||
members.machines or [ ]
|
||||
++ (builtins.foldl' (
|
||||
acc: tag:
|
||||
let
|
||||
# For error printing
|
||||
availableTags = lib.foldlAttrs (
|
||||
acc: _: v:
|
||||
v.tags or [ ] ++ acc
|
||||
) [ ] (inventory.machines);
|
||||
|
||||
tagMembers = builtins.attrNames (
|
||||
lib.filterAttrs (_n: v: builtins.elem tag v.tags or [ ]) inventory.machines
|
||||
);
|
||||
in
|
||||
if tagMembers == [ ] then
|
||||
lib.warn ''
|
||||
inventory.services.${serviceName}.${instanceName}: - ${roleName} tags: no machine with tag '${tag}' found.
|
||||
Available tags: ${builtins.toJSON (lib.unique availableTags)}
|
||||
'' [ ]
|
||||
else
|
||||
acc ++ tagMembers
|
||||
) [ ] members.tags or [ ]);
|
||||
};
|
||||
|
||||
checkService =
|
||||
modulepath: serviceName:
|
||||
builtins.elem "inventory" (clanLib.modules.getFrontmatter modulepath serviceName).features or [ ];
|
||||
|
||||
compileMachine =
|
||||
{ machineConfig }:
|
||||
{
|
||||
machineImports = [
|
||||
(lib.optionalAttrs (machineConfig.deploy.targetHost or null != null) {
|
||||
config.clan.core.networking.targetHost = machineConfig.deploy.targetHost;
|
||||
})
|
||||
];
|
||||
assertions = { };
|
||||
};
|
||||
|
||||
legacyResolveImports =
|
||||
{
|
||||
supportedRoles,
|
||||
resolvedRolesPerInstance,
|
||||
serviceConfigs,
|
||||
serviceName,
|
||||
machineName,
|
||||
getRoleFile,
|
||||
}:
|
||||
(lib.foldlAttrs (
|
||||
# : [ Modules ] -> String -> ServiceConfig -> [ Modules ]
|
||||
acc2: instanceName: serviceConfig:
|
||||
let
|
||||
resolvedRoles = resolvedRolesPerInstance.${instanceName};
|
||||
|
||||
isInService = builtins.any (members: builtins.elem machineName members.machines) (
|
||||
builtins.attrValues resolvedRoles
|
||||
);
|
||||
|
||||
# all roles where the machine is present
|
||||
machineRoles = builtins.attrNames (
|
||||
lib.filterAttrs (_role: roleConfig: builtins.elem machineName roleConfig.machines) resolvedRoles
|
||||
);
|
||||
|
||||
machineServiceConfig = (serviceConfig.machines.${machineName} or { }).config or { };
|
||||
globalConfig = serviceConfig.config or { };
|
||||
|
||||
globalExtraModules = serviceConfig.extraModules or [ ];
|
||||
machineExtraModules = serviceConfig.machines.${machineName}.extraModules or [ ];
|
||||
roleServiceExtraModules = builtins.foldl' (
|
||||
acc: role: acc ++ serviceConfig.roles.${role}.extraModules or [ ]
|
||||
) [ ] machineRoles;
|
||||
|
||||
# TODO: maybe optimize this don't lookup the role in inverse roles. Imports are not lazy
|
||||
roleModules = builtins.map (
|
||||
role:
|
||||
if builtins.elem role supportedRoles && inventory.modules ? ${serviceName} then
|
||||
getRoleFile role
|
||||
else
|
||||
throw "Module ${serviceName} doesn't have role: '${role}'. Role: ${
|
||||
inventory.modules.${serviceName}
|
||||
}/roles/${role}.nix not found."
|
||||
) machineRoles;
|
||||
|
||||
roleServiceConfigs = builtins.filter (m: m != { }) (
|
||||
builtins.map (role: serviceConfig.roles.${role}.config or { }) machineRoles
|
||||
);
|
||||
|
||||
extraModules = map (s: if builtins.typeOf s == "string" then "${directory}/${s}" else s) (
|
||||
globalExtraModules ++ machineExtraModules ++ roleServiceExtraModules
|
||||
);
|
||||
in
|
||||
if !(serviceConfig.enabled or true) then
|
||||
acc2
|
||||
else if isInService then
|
||||
acc2
|
||||
++ [
|
||||
{
|
||||
imports = roleModules ++ extraModules;
|
||||
clan.inventory.services.${serviceName}.${instanceName} = {
|
||||
roles = resolvedRoles;
|
||||
# TODO: Add inverseRoles to the service config if needed
|
||||
# inherit inverseRoles;
|
||||
};
|
||||
}
|
||||
(lib.optionalAttrs (globalConfig != { } || machineServiceConfig != { } || roleServiceConfigs != [ ])
|
||||
{
|
||||
clan.${serviceName} = lib.mkMerge (
|
||||
[
|
||||
globalConfig
|
||||
machineServiceConfig
|
||||
]
|
||||
++ roleServiceConfigs
|
||||
);
|
||||
}
|
||||
)
|
||||
]
|
||||
else
|
||||
acc2
|
||||
) [ ] (serviceConfigs));
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
./interface.nix
|
||||
];
|
||||
config = {
|
||||
machines = builtins.mapAttrs (
|
||||
machineName: machineConfig: m:
|
||||
let
|
||||
compiledServices = lib.mapAttrs (
|
||||
_: serviceConfigs:
|
||||
(
|
||||
{ config, ... }:
|
||||
let
|
||||
serviceName = config.serviceName;
|
||||
|
||||
getRoleFile = role: builtins.seq role inventory.modules.${serviceName} + "/roles/${role}.nix";
|
||||
in
|
||||
{
|
||||
_module.args = {
|
||||
inherit
|
||||
resolveTags
|
||||
inventory
|
||||
clanLib
|
||||
machineName
|
||||
serviceConfigs
|
||||
;
|
||||
};
|
||||
imports = [
|
||||
./roles.nix
|
||||
];
|
||||
|
||||
isClanModule =
|
||||
let
|
||||
firstRole = import (getRoleFile (builtins.head config.supportedRoles));
|
||||
loadModuleForClassCheck =
|
||||
m:
|
||||
if lib.isFunction m then
|
||||
let
|
||||
args = lib.functionArgs m;
|
||||
in
|
||||
m args
|
||||
else
|
||||
m;
|
||||
module = loadModuleForClassCheck (firstRole);
|
||||
in
|
||||
if (module) ? _class then module._class == "clan" else false;
|
||||
# The actual result
|
||||
machineImports =
|
||||
if config.isClanModule then
|
||||
throw "Clan modules are not supported yet."
|
||||
else
|
||||
legacyResolveImports {
|
||||
supportedRoles = config.supportedRoles;
|
||||
resolvedRolesPerInstance = config.resolvedRolesPerInstance;
|
||||
inherit
|
||||
serviceConfigs
|
||||
serviceName
|
||||
machineName
|
||||
getRoleFile
|
||||
;
|
||||
};
|
||||
|
||||
# Assertions
|
||||
assertions = {
|
||||
"checkservice.${serviceName}" = {
|
||||
assertion = checkService inventory.modules.${serviceName} serviceName;
|
||||
message = ''
|
||||
Service ${serviceName} cannot be used in inventory. It does not declare the 'inventory' feature.
|
||||
|
||||
To allow it add the following to the beginning of the README.md of the module:
|
||||
|
||||
---
|
||||
...
|
||||
|
||||
features = [ "inventory" ]
|
||||
---
|
||||
|
||||
Also make sure to test the module with the 'inventory' feature enabled.
|
||||
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
)
|
||||
) (config.inventory.services or { });
|
||||
|
||||
compiledMachine = compileMachine {
|
||||
inherit
|
||||
machineConfig
|
||||
;
|
||||
};
|
||||
|
||||
machineImports = (
|
||||
compiledMachine.machineImports
|
||||
++ builtins.foldl' (
|
||||
acc: service:
|
||||
let
|
||||
failedAssertions = (lib.filterAttrs (_: v: !v.assertion) service.assertions);
|
||||
failedAssertionsImports =
|
||||
if failedAssertions != { } then
|
||||
[
|
||||
{
|
||||
clan.inventory.assertions = failedAssertions;
|
||||
}
|
||||
]
|
||||
else
|
||||
[
|
||||
{
|
||||
clan.inventory.assertions = {
|
||||
"alive.assertion.inventory" = {
|
||||
assertion = true;
|
||||
message = ''
|
||||
No failed assertions found for machine ${machineName}. This will never be displayed.
|
||||
It is here for testing purposes.
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
];
|
||||
in
|
||||
acc
|
||||
++ service.machineImports
|
||||
# Import failed assertions
|
||||
++ failedAssertionsImports
|
||||
) [ ] (builtins.attrValues m.config.compiledServices)
|
||||
);
|
||||
in
|
||||
{
|
||||
inherit machineImports compiledServices compiledMachine;
|
||||
}
|
||||
) (inventory.machines or { });
|
||||
};
|
||||
}
|
||||
91
lib/inventory/build-inventory/builder/interface.nix
Normal file
91
lib/inventory/build-inventory/builder/interface.nix
Normal file
@@ -0,0 +1,91 @@
|
||||
{ lib, ... }:
|
||||
let
|
||||
inherit (lib) types mkOption;
|
||||
submodule = m: types.submoduleWith { modules = [ m ]; };
|
||||
|
||||
in
|
||||
{
|
||||
options = {
|
||||
directory = mkOption {
|
||||
type = types.path;
|
||||
};
|
||||
inventory = mkOption {
|
||||
type = types.raw;
|
||||
};
|
||||
machines = mkOption {
|
||||
type = types.attrsOf (
|
||||
submodule (
|
||||
{ name, ... }:
|
||||
let
|
||||
machineName = name;
|
||||
in
|
||||
{
|
||||
options = {
|
||||
compiledMachine = mkOption {
|
||||
type = types.raw;
|
||||
};
|
||||
compiledServices = mkOption {
|
||||
# type = types.attrsOf;
|
||||
type = types.attrsOf (
|
||||
types.submoduleWith {
|
||||
modules = [
|
||||
(
|
||||
{ name, ... }:
|
||||
let
|
||||
serviceName = name;
|
||||
in
|
||||
{
|
||||
options = {
|
||||
machineName = mkOption {
|
||||
default = machineName;
|
||||
readOnly = true;
|
||||
};
|
||||
serviceName = mkOption {
|
||||
default = serviceName;
|
||||
readOnly = true;
|
||||
};
|
||||
# Outputs
|
||||
machineImports = mkOption {
|
||||
type = types.listOf types.raw;
|
||||
};
|
||||
supportedRoles = mkOption {
|
||||
type = types.listOf types.str;
|
||||
};
|
||||
matchedRoles = mkOption {
|
||||
type = types.listOf types.str;
|
||||
};
|
||||
isClanModule = mkOption {
|
||||
type = types.bool;
|
||||
};
|
||||
machinesRoles = mkOption {
|
||||
type = types.attrsOf (types.listOf types.str);
|
||||
};
|
||||
resolvedRolesPerInstance = mkOption {
|
||||
type = types.attrsOf (
|
||||
types.attrsOf (submodule {
|
||||
options.machines = mkOption {
|
||||
type = types.listOf types.str;
|
||||
};
|
||||
})
|
||||
);
|
||||
};
|
||||
assertions = mkOption {
|
||||
type = types.attrsOf types.raw;
|
||||
};
|
||||
};
|
||||
}
|
||||
)
|
||||
];
|
||||
}
|
||||
);
|
||||
};
|
||||
machineImports = mkOption {
|
||||
type = types.listOf types.raw;
|
||||
};
|
||||
};
|
||||
}
|
||||
)
|
||||
);
|
||||
};
|
||||
};
|
||||
}
|
||||
65
lib/inventory/build-inventory/builder/roles.nix
Normal file
65
lib/inventory/build-inventory/builder/roles.nix
Normal file
@@ -0,0 +1,65 @@
|
||||
{
|
||||
lib,
|
||||
config,
|
||||
resolveTags,
|
||||
inventory,
|
||||
clanLib,
|
||||
machineName,
|
||||
serviceConfigs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
serviceName = config.serviceName;
|
||||
in
|
||||
{
|
||||
# Roles resolution
|
||||
# : List String
|
||||
supportedRoles = clanLib.modules.getRoles inventory.modules serviceName;
|
||||
matchedRoles = builtins.attrNames (
|
||||
lib.filterAttrs (_: ms: builtins.elem machineName ms) config.machinesRoles
|
||||
);
|
||||
resolvedRolesPerInstance = lib.mapAttrs (
|
||||
instanceName: instanceConfig:
|
||||
let
|
||||
resolvedRoles = lib.genAttrs config.supportedRoles (
|
||||
roleName:
|
||||
resolveTags {
|
||||
members = instanceConfig.roles.${roleName} or { };
|
||||
inherit
|
||||
instanceName
|
||||
serviceName
|
||||
roleName
|
||||
inventory
|
||||
;
|
||||
}
|
||||
);
|
||||
usedRoles = builtins.attrNames instanceConfig.roles;
|
||||
unmatchedRoles = builtins.filter (role: !builtins.elem role config.supportedRoles) usedRoles;
|
||||
in
|
||||
if unmatchedRoles != [ ] then
|
||||
throw ''
|
||||
Roles ${builtins.toJSON unmatchedRoles} are not defined in the service ${serviceName}.
|
||||
Instance: '${instanceName}'
|
||||
Please use one of available roles: ${builtins.toJSON config.supportedRoles}
|
||||
''
|
||||
else
|
||||
resolvedRoles
|
||||
) serviceConfigs;
|
||||
|
||||
machinesRoles = builtins.zipAttrsWith (
|
||||
_n: vs:
|
||||
let
|
||||
flat = builtins.foldl' (acc: s: acc ++ s.machines) [ ] vs;
|
||||
in
|
||||
lib.unique flat
|
||||
) (builtins.attrValues config.resolvedRolesPerInstance);
|
||||
|
||||
assertions = lib.concatMapAttrs (
|
||||
instanceName: resolvedRoles:
|
||||
clanLib.modules.checkConstraints {
|
||||
moduleName = serviceName;
|
||||
allModules = inventory.modules;
|
||||
inherit resolvedRoles instanceName;
|
||||
}
|
||||
) config.resolvedRolesPerInstance;
|
||||
}
|
||||
@@ -1,272 +1,7 @@
|
||||
# Generate partial NixOS configurations for every machine in the inventory
|
||||
# This function is responsible for generating the module configuration for every machine in the inventory.
|
||||
{ lib, clan-core }:
|
||||
{ lib, clanLib }:
|
||||
let
|
||||
resolveTags =
|
||||
# Inventory, { machines :: [string], tags :: [string] }
|
||||
{
|
||||
serviceName,
|
||||
instanceName,
|
||||
roleName,
|
||||
inventory,
|
||||
members,
|
||||
}:
|
||||
{
|
||||
machines =
|
||||
members.machines or [ ]
|
||||
++ (builtins.foldl' (
|
||||
acc: tag:
|
||||
let
|
||||
# For error printing
|
||||
availableTags = lib.foldlAttrs (
|
||||
acc: _: v:
|
||||
v.tags or [ ] ++ acc
|
||||
) [ ] (inventory.machines);
|
||||
|
||||
tagMembers = builtins.attrNames (
|
||||
lib.filterAttrs (_n: v: builtins.elem tag v.tags or [ ]) inventory.machines
|
||||
);
|
||||
in
|
||||
if tagMembers == [ ] then
|
||||
lib.warn ''
|
||||
inventory.services.${serviceName}.${instanceName}: - ${roleName} tags: no machine with tag '${tag}' found.
|
||||
Available tags: ${builtins.toJSON (lib.unique availableTags)}
|
||||
'' [ ]
|
||||
else
|
||||
acc ++ tagMembers
|
||||
) [ ] members.tags or [ ]);
|
||||
};
|
||||
|
||||
checkService =
|
||||
modulepath: serviceName:
|
||||
builtins.elem "inventory"
|
||||
(clan-core.lib.modules.getFrontmatter modulepath serviceName).features or [ ];
|
||||
|
||||
compileMachine =
|
||||
{ machineConfig }:
|
||||
{
|
||||
machineImports = [
|
||||
(lib.optionalAttrs (machineConfig.deploy.targetHost or null != null) {
|
||||
config.clan.core.networking.targetHost = machineConfig.deploy.targetHost;
|
||||
})
|
||||
];
|
||||
assertions = { };
|
||||
};
|
||||
|
||||
compileServicesForMachine =
|
||||
# Returns a NixOS configuration for the machine 'machineName'.
|
||||
# Return Format: { imports = [ ... ]; config = { ... }; options = { ... } }
|
||||
{
|
||||
machineName,
|
||||
inventory,
|
||||
directory,
|
||||
}:
|
||||
let
|
||||
compileServiceModules =
|
||||
serviceName: serviceConfigs:
|
||||
let
|
||||
supportedRoles = clan-core.lib.modules.getRoles inventory.modules serviceName;
|
||||
|
||||
firstRole = import (getRoleFile (builtins.head supportedRoles));
|
||||
|
||||
loadModuleForClassCheck =
|
||||
m:
|
||||
if lib.isFunction m then
|
||||
let
|
||||
args = lib.functionArgs m;
|
||||
in
|
||||
m args
|
||||
else
|
||||
m;
|
||||
|
||||
isClanModule =
|
||||
let
|
||||
module = loadModuleForClassCheck firstRole;
|
||||
in
|
||||
if module ? _class then module._class == "clan" else false;
|
||||
|
||||
getRoleFile = role: builtins.seq role inventory.modules.${serviceName} + "/roles/${role}.nix";
|
||||
|
||||
resolvedRolesPerInstance = lib.mapAttrs (
|
||||
instanceName: instanceConfig:
|
||||
let
|
||||
resolvedRoles = lib.genAttrs supportedRoles (
|
||||
roleName:
|
||||
resolveTags {
|
||||
members = instanceConfig.roles.${roleName} or { };
|
||||
inherit
|
||||
instanceName
|
||||
serviceName
|
||||
roleName
|
||||
inventory
|
||||
;
|
||||
}
|
||||
);
|
||||
usedRoles = builtins.attrNames instanceConfig.roles;
|
||||
unmatchedRoles = builtins.filter (role: !builtins.elem role supportedRoles) usedRoles;
|
||||
in
|
||||
if unmatchedRoles != [ ] then
|
||||
throw ''
|
||||
Service: '${serviceName}' Instance: '${instanceName}'
|
||||
The following roles do not exist: ${builtins.toJSON unmatchedRoles}
|
||||
Please use one of available roles: ${builtins.toJSON supportedRoles}
|
||||
''
|
||||
else
|
||||
resolvedRoles
|
||||
) serviceConfigs;
|
||||
|
||||
machinesRoles = builtins.zipAttrsWith (
|
||||
_n: vs:
|
||||
let
|
||||
flat = builtins.foldl' (acc: s: acc ++ s.machines) [ ] vs;
|
||||
in
|
||||
lib.unique flat
|
||||
) (builtins.attrValues resolvedRolesPerInstance);
|
||||
|
||||
matchedRoles = builtins.attrNames (
|
||||
lib.filterAttrs (_: ms: builtins.elem machineName ms) machinesRoles
|
||||
);
|
||||
in
|
||||
# roleImports = lib.mapAttrsToList (
|
||||
# roleName: _: inventory.modules.${serviceName} + "/roles/${roleName}.nix"
|
||||
# ) (lib.filterAttrs (_: ms: builtins.elem machineName ms) machinesRoles);
|
||||
# CompiledService :: { machineImports :: []; machineRoles :: [ String ] }
|
||||
{
|
||||
inherit
|
||||
machinesRoles
|
||||
matchedRoles
|
||||
resolvedRolesPerInstance
|
||||
firstRole
|
||||
isClanModule
|
||||
supportedRoles
|
||||
;
|
||||
# TODO: Add other attributes
|
||||
machineImports =
|
||||
if isClanModule then
|
||||
throw "Clan modules are not supported yet."
|
||||
else
|
||||
(lib.foldlAttrs (
|
||||
# [ Modules ], String, ServiceConfig
|
||||
acc2: instanceName: serviceConfig:
|
||||
let
|
||||
resolvedRoles = lib.genAttrs supportedRoles (
|
||||
roleName:
|
||||
resolveTags {
|
||||
members = serviceConfig.roles.${roleName} or { };
|
||||
inherit
|
||||
serviceName
|
||||
instanceName
|
||||
roleName
|
||||
inventory
|
||||
;
|
||||
}
|
||||
);
|
||||
|
||||
isInService = builtins.any (members: builtins.elem machineName members.machines) (
|
||||
builtins.attrValues resolvedRoles
|
||||
);
|
||||
|
||||
# all roles where the machine is present
|
||||
machineRoles = builtins.attrNames (
|
||||
lib.filterAttrs (_role: roleConfig: builtins.elem machineName roleConfig.machines) resolvedRoles
|
||||
);
|
||||
machineServiceConfig = (serviceConfig.machines.${machineName} or { }).config or { };
|
||||
globalConfig = serviceConfig.config or { };
|
||||
|
||||
globalExtraModules = serviceConfig.extraModules or [ ];
|
||||
machineExtraModules = serviceConfig.machines.${machineName}.extraModules or [ ];
|
||||
roleServiceExtraModules = builtins.foldl' (
|
||||
acc: role: acc ++ serviceConfig.roles.${role}.extraModules or [ ]
|
||||
) [ ] machineRoles;
|
||||
|
||||
# TODO: maybe optimize this dont lookup the role in inverse roles. Imports are not lazy
|
||||
roleModules = builtins.map (
|
||||
role:
|
||||
if builtins.elem role supportedRoles && inventory.modules ? ${serviceName} then
|
||||
getRoleFile role
|
||||
else
|
||||
throw "Module ${serviceName} doesn't have role: '${role}'. Role: ${
|
||||
inventory.modules.${serviceName}
|
||||
}/roles/${role}.nix not found."
|
||||
) machineRoles;
|
||||
|
||||
roleServiceConfigs = builtins.filter (m: m != { }) (
|
||||
builtins.map (role: serviceConfig.roles.${role}.config or { }) machineRoles
|
||||
);
|
||||
|
||||
extraModules = map (s: if builtins.typeOf s == "string" then "${directory}/${s}" else s) (
|
||||
globalExtraModules ++ machineExtraModules ++ roleServiceExtraModules
|
||||
);
|
||||
|
||||
nonExistingRoles = builtins.filter (role: !(builtins.elem role supportedRoles)) (
|
||||
builtins.attrNames (serviceConfig.roles or { })
|
||||
);
|
||||
|
||||
constraintAssertions = clan-core.lib.modules.checkConstraints {
|
||||
moduleName = serviceName;
|
||||
allModules = inventory.modules;
|
||||
inherit resolvedRoles instanceName;
|
||||
};
|
||||
in
|
||||
if (nonExistingRoles != [ ]) then
|
||||
throw "Roles ${builtins.toString nonExistingRoles} are not defined in the service ${serviceName}."
|
||||
else if !(serviceConfig.enabled or true) then
|
||||
acc2
|
||||
else if isInService then
|
||||
acc2
|
||||
++ [
|
||||
{
|
||||
imports = roleModules ++ extraModules;
|
||||
|
||||
clan.inventory.assertions = constraintAssertions;
|
||||
clan.inventory.services.${serviceName}.${instanceName} = {
|
||||
roles = resolvedRoles;
|
||||
# TODO: Add inverseRoles to the service config if needed
|
||||
# inherit inverseRoles;
|
||||
};
|
||||
}
|
||||
(lib.optionalAttrs (globalConfig != { } || machineServiceConfig != { } || roleServiceConfigs != [ ])
|
||||
{
|
||||
clan.${serviceName} = lib.mkMerge (
|
||||
[
|
||||
globalConfig
|
||||
machineServiceConfig
|
||||
]
|
||||
++ roleServiceConfigs
|
||||
);
|
||||
}
|
||||
)
|
||||
]
|
||||
else
|
||||
acc2
|
||||
) [ ] (serviceConfigs));
|
||||
|
||||
assertions = lib.mapAttrs' (name: value: {
|
||||
name = "checkservice.${serviceName}.${name}";
|
||||
value = {
|
||||
assertion = checkService inventory.modules.${serviceName} serviceName;
|
||||
message = ''
|
||||
Service ${serviceName} cannot be used in inventory. It does not declare the 'inventory' feature.
|
||||
|
||||
|
||||
To allow it add the following to the beginning of the README.md of the module:
|
||||
|
||||
---
|
||||
...
|
||||
|
||||
features = [ "inventory" ]
|
||||
---
|
||||
|
||||
Also make sure to test the module with the 'inventory' feature enabled.
|
||||
|
||||
'';
|
||||
};
|
||||
}) inventory.services;
|
||||
};
|
||||
|
||||
in
|
||||
lib.mapAttrs compileServiceModules inventory.services;
|
||||
|
||||
/*
|
||||
Returns a set with NixOS configuration for every machine in the inventory.
|
||||
|
||||
@@ -276,57 +11,11 @@ let
|
||||
{ inventory, directory }:
|
||||
(lib.evalModules {
|
||||
specialArgs = {
|
||||
inherit directory inventory;
|
||||
inherit clanLib;
|
||||
};
|
||||
modules = [
|
||||
./internal.nix
|
||||
(
|
||||
{ ... }:
|
||||
{
|
||||
machines = builtins.mapAttrs (
|
||||
machineName: machineConfig:
|
||||
let
|
||||
compiledServices = compileServicesForMachine {
|
||||
inherit
|
||||
machineName
|
||||
inventory
|
||||
directory
|
||||
;
|
||||
};
|
||||
compiledMachine = compileMachine {
|
||||
inherit
|
||||
machineConfig
|
||||
;
|
||||
};
|
||||
|
||||
machineImports =
|
||||
compiledMachine.machineImports
|
||||
++ builtins.foldl' (
|
||||
acc: service:
|
||||
let
|
||||
failedAssertions = (lib.filterAttrs (_: v: !v.assertion) service.assertions);
|
||||
failedAssertionsImports =
|
||||
if failedAssertions != { } then
|
||||
[
|
||||
{
|
||||
clan.inventory.assertions = failedAssertions;
|
||||
}
|
||||
]
|
||||
else
|
||||
[ ];
|
||||
in
|
||||
acc
|
||||
++ service.machineImports
|
||||
# Import failed assertions
|
||||
++ failedAssertionsImports
|
||||
) [ ] (builtins.attrValues compiledServices);
|
||||
in
|
||||
{
|
||||
inherit machineImports compiledServices compiledMachine;
|
||||
}
|
||||
) (inventory.machines or { });
|
||||
}
|
||||
)
|
||||
./builder
|
||||
{ inherit directory inventory; }
|
||||
];
|
||||
}).config;
|
||||
in
|
||||
|
||||
@@ -103,7 +103,9 @@ in
|
||||
default = options;
|
||||
};
|
||||
modules = lib.mkOption {
|
||||
type = types.attrsOf types.path;
|
||||
# Don't define the type yet
|
||||
# We manually transform the value with types.deferredModule.merge later to keep them serializable
|
||||
type = types.attrsOf types.raw;
|
||||
default = { };
|
||||
defaultText = "clanModules of clan-core";
|
||||
description = ''
|
||||
@@ -275,7 +277,79 @@ in
|
||||
)
|
||||
);
|
||||
};
|
||||
|
||||
instances = lib.mkOption {
|
||||
# Keep as internal until all de-/serialization issues are resolved
|
||||
visible = false;
|
||||
internal = true;
|
||||
description = "Multi host service module instances";
|
||||
type = types.attrsOf (
|
||||
types.submodule {
|
||||
options = {
|
||||
# ModuleSpec
|
||||
module = lib.mkOption {
|
||||
type = types.submodule {
|
||||
options.input = lib.mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
defaultText = "Name of the input. Default to 'null' which means the module is local";
|
||||
description = ''
|
||||
Name of the input. Default to 'null' which means the module is local
|
||||
'';
|
||||
};
|
||||
options.name = lib.mkOption {
|
||||
type = types.str;
|
||||
};
|
||||
};
|
||||
};
|
||||
roles = lib.mkOption {
|
||||
default = { };
|
||||
type = types.attrsOf (
|
||||
types.submodule {
|
||||
options = {
|
||||
# TODO: deduplicate
|
||||
machines = lib.mkOption {
|
||||
type = types.attrsOf (
|
||||
types.submodule {
|
||||
options.settings = lib.mkOption {
|
||||
default = { };
|
||||
# Dont transform the value with `types.deferredModule` here. We need to keep it json serializable
|
||||
# TODO: We need a custom serializer for deferredModule
|
||||
type = types.deferredModule;
|
||||
};
|
||||
}
|
||||
);
|
||||
default = { };
|
||||
};
|
||||
tags = lib.mkOption {
|
||||
type = types.attrsOf (
|
||||
types.submodule {
|
||||
options.settings = lib.mkOption {
|
||||
default = { };
|
||||
type = types.deferredModule;
|
||||
};
|
||||
}
|
||||
);
|
||||
default = { };
|
||||
};
|
||||
settings = lib.mkOption {
|
||||
default = { };
|
||||
type = types.deferredModule;
|
||||
};
|
||||
};
|
||||
}
|
||||
);
|
||||
};
|
||||
};
|
||||
}
|
||||
);
|
||||
default = { };
|
||||
apply =
|
||||
v:
|
||||
if v == { } then
|
||||
v
|
||||
else
|
||||
lib.warn "Inventory.instances and related features are still under development. Please use with care." v;
|
||||
};
|
||||
services = lib.mkOption {
|
||||
description = ''
|
||||
Services of the inventory.
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
{ lib, ... }:
|
||||
let
|
||||
inherit (lib) types mkOption;
|
||||
submodule = m: types.submoduleWith { modules = [ m ]; };
|
||||
in
|
||||
{
|
||||
options = {
|
||||
machines = mkOption {
|
||||
type = types.attrsOf (submodule {
|
||||
options = {
|
||||
compiledMachine = mkOption {
|
||||
type = types.raw;
|
||||
};
|
||||
compiledServices = mkOption {
|
||||
type = types.raw;
|
||||
};
|
||||
machineImports = mkOption {
|
||||
type = types.raw;
|
||||
};
|
||||
};
|
||||
});
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
{ lib, clan-core }:
|
||||
{ lib, clanLib }:
|
||||
{
|
||||
inherit (import ./build-inventory { inherit lib clan-core; }) buildInventory;
|
||||
inherit (import ./build-inventory { inherit lib clanLib; }) buildInventory;
|
||||
interface = ./build-inventory/interface.nix;
|
||||
}
|
||||
|
||||
@@ -50,10 +50,7 @@ in
|
||||
self.filter {
|
||||
include = [
|
||||
"flakeModules"
|
||||
"lib/default.nix"
|
||||
"lib/flake-module.nix"
|
||||
"lib/inventory"
|
||||
"lib/frontmatter"
|
||||
"lib"
|
||||
"clanModules/flake-module.nix"
|
||||
"clanModules/borgbackup"
|
||||
];
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
---
|
||||
features = [ "inventory" ]
|
||||
---
|
||||
Description
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user