Compare commits
343 Commits
lassulus
...
enable-mor
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7f34ba5d5e | ||
|
|
1d60f94cc5 | ||
|
|
1235177541 | ||
|
|
5c08e9a38d | ||
|
|
28dd54d866 | ||
|
|
5baf37f7e9 | ||
|
|
ff669e2957 | ||
|
|
8d4c1839e7 | ||
|
|
0765d981c6 | ||
|
|
10c27a0152 | ||
|
|
ccb5af9565 | ||
|
|
828eff528a | ||
|
|
cbf47580cf | ||
|
|
355ac57ccb | ||
|
|
227e293421 | ||
|
|
9b3621b516 | ||
|
|
62f09a450f | ||
|
|
95282bd880 | ||
|
|
7a49ec252e | ||
|
|
5f9ee97cab | ||
|
|
c6be9bbf07 | ||
|
|
d77ae5eed0 | ||
|
|
3c2888edc7 | ||
|
|
b0f23353ef | ||
|
|
3fccccc092 | ||
|
|
0a5d1bf322 | ||
|
|
9ca5cb7bcc | ||
|
|
cc1b356a94 | ||
|
|
9aa8c1b8eb | ||
|
|
709d773768 | ||
|
|
845abd1356 | ||
|
|
2b4a4f2422 | ||
|
|
82da5b6734 | ||
|
|
33a9fd8d3d | ||
|
|
4beb097a95 | ||
|
|
b4cd62b9f8 | ||
|
|
ee7b98c34d | ||
|
|
8552d4b3bd | ||
|
|
375edcff81 | ||
|
|
3183b26777 | ||
|
|
0feacaf300 | ||
|
|
6917021996 | ||
|
|
3965f7b59f | ||
|
|
610a70e4f8 | ||
|
|
6134eb0293 | ||
|
|
62e9fe8f9f | ||
|
|
5bc2d00014 | ||
|
|
616b294b8c | ||
|
|
2d7b92b3f9 | ||
|
|
0487670d30 | ||
|
|
4cd174b268 | ||
|
|
a8b257f32c | ||
|
|
047b767054 | ||
|
|
c74d23b799 | ||
|
|
850627c5c6 | ||
|
|
60d56c4e3b | ||
|
|
4911901f7c | ||
|
|
a96860a24b | ||
|
|
c429b41d2e | ||
|
|
fe305f7f47 | ||
|
|
591d397df9 | ||
|
|
8231979bae | ||
|
|
6899461d0d | ||
|
|
16b067d291 | ||
|
|
93cbe62765 | ||
|
|
7fef29d7aa | ||
|
|
952d1facce | ||
|
|
a565a85a5e | ||
|
|
3d5ef5e909 | ||
|
|
a5c5033273 | ||
|
|
0ee0351e3e | ||
|
|
c02f19205f | ||
|
|
dbcb8d6a4c | ||
|
|
039b309255 | ||
|
|
538374558d | ||
|
|
ef5ad09b2d | ||
|
|
9780463e6a | ||
|
|
cac4b1200c | ||
|
|
c8db27340e | ||
|
|
31a9c74e88 | ||
|
|
dc8bfab65d | ||
|
|
33abb7ecd7 | ||
|
|
fcbdae9d09 | ||
|
|
27b5680441 | ||
|
|
f13971167f | ||
|
|
e75b5f3a2e | ||
|
|
d5c0a2eb9c | ||
|
|
8cc8d09a11 | ||
|
|
dfa3305450 | ||
|
|
94415dfd0e | ||
|
|
6fb5bca801 | ||
|
|
4162810ee1 | ||
|
|
0b3badb0ef | ||
|
|
6a5954ad77 | ||
|
|
02231b979b | ||
|
|
028f6a4d3d | ||
|
|
170908db7b | ||
|
|
39e6534dbb | ||
|
|
71809c1bdc | ||
|
|
eecedf95e4 | ||
|
|
a208a9973c | ||
|
|
d276d2faea | ||
|
|
d470283dca | ||
|
|
88dab7d8bd | ||
|
|
8474a0aaef | ||
|
|
5ab2f206ea | ||
|
|
ea8037006f | ||
|
|
3a682a6b3e | ||
|
|
0556ea624f | ||
|
|
8671fd7407 | ||
|
|
3a9f0eb608 | ||
|
|
1736b0f539 | ||
|
|
eb375f3d81 | ||
|
|
6162b82adb | ||
|
|
085189d1c4 | ||
|
|
3cb22ad2a1 | ||
|
|
27269d4ed9 | ||
|
|
7cbedc74a5 | ||
|
|
5ac30a767b | ||
|
|
89c6bcda4d | ||
|
|
51da020de2 | ||
|
|
e943d8531f | ||
|
|
13b9c23db9 | ||
|
|
ad43f323b8 | ||
|
|
aeb3cc4428 | ||
|
|
d81ca7206b | ||
|
|
0011cf594a | ||
|
|
41cd4533ba | ||
|
|
c15544e928 | ||
|
|
fa0fe23985 | ||
|
|
1497e76bc2 | ||
|
|
b3d9c23e39 | ||
|
|
5520641feb | ||
|
|
97f5a6bd4c | ||
|
|
3b2b5db84a | ||
|
|
84da7d437d | ||
|
|
b2db2c7abc | ||
|
|
cb104b700d | ||
|
|
41054885db | ||
|
|
70c63221ec | ||
|
|
9c130c73e4 | ||
|
|
178fff0618 | ||
|
|
6324b495ee | ||
|
|
ce7a70f9e1 | ||
|
|
7102af9bd9 | ||
|
|
b38fddaf29 | ||
|
|
e7ffcedd14 | ||
|
|
b5a66e767b | ||
|
|
854d0fa83e | ||
|
|
4ccf5ca373 | ||
|
|
781d439567 | ||
|
|
68e00ff613 | ||
|
|
828028e4b3 | ||
|
|
b48d07f5c5 | ||
|
|
ea8c9ed649 | ||
|
|
68cb04c958 | ||
|
|
b8cb85fc72 | ||
|
|
bdb97308d0 | ||
|
|
9708bdc6e7 | ||
|
|
9ac8a45f1d | ||
|
|
a14fe1aef8 | ||
|
|
b1401d6e6b | ||
|
|
f882c86fb0 | ||
|
|
98d566c46e | ||
|
|
c4ec4ccb3f | ||
|
|
5a6677379a | ||
|
|
30d19d088f | ||
|
|
f3c45eb23e | ||
|
|
eaac6c76e2 | ||
|
|
0939b29a8e | ||
|
|
a2a395cdb0 | ||
|
|
df7429dbe7 | ||
|
|
362faaf063 | ||
|
|
e215a9db6e | ||
|
|
a5dd76b66d | ||
|
|
4472c51c25 | ||
|
|
c6cf9d1336 | ||
|
|
9b6e42790e | ||
|
|
547b012e0b | ||
|
|
9797ef792a | ||
|
|
fe0de90a28 | ||
|
|
539fd30206 | ||
|
|
a11d5471ec | ||
|
|
19f2facbce | ||
|
|
468a25034e | ||
|
|
a2b76eb5a2 | ||
|
|
ba0ed30997 | ||
|
|
2a4d2c9cb5 | ||
|
|
4c1e74fae6 | ||
|
|
cee62bf168 | ||
|
|
a865213894 | ||
|
|
d8f9375580 | ||
|
|
526072806f | ||
|
|
91a19d9ea9 | ||
|
|
38c7644692 | ||
|
|
726f2ab5f8 | ||
|
|
5918620535 | ||
|
|
58e85eda9c | ||
|
|
e98e817941 | ||
|
|
fe92c7d1e6 | ||
|
|
4222f9788c | ||
|
|
3d80423259 | ||
|
|
186e81d8b9 | ||
|
|
212c899767 | ||
|
|
312c12c98f | ||
|
|
2ec4e49650 | ||
|
|
4e5b4a1b80 | ||
|
|
ccb3bdb740 | ||
|
|
a903a9028b | ||
|
|
ba28691747 | ||
|
|
e7aa5cfb4e | ||
|
|
8b74147721 | ||
|
|
299180703e | ||
|
|
6c941deb96 | ||
|
|
39761946a0 | ||
|
|
b71e16dd5d | ||
|
|
0da1a05b55 | ||
|
|
3551d061ce | ||
|
|
6099aeb0c6 | ||
|
|
bcd6c7108a | ||
|
|
d20f13abe7 | ||
|
|
cfeda1f06d | ||
|
|
73dd981f71 | ||
|
|
bc239e104c | ||
|
|
bd2702df6d | ||
|
|
7b0e652a7a | ||
|
|
0c0eafe0f5 | ||
|
|
3e0cd4bdfb | ||
|
|
2cf40fea51 | ||
|
|
40d1a76d8a | ||
|
|
60b22fdf0e | ||
|
|
cb13e7fab8 | ||
|
|
b82a3b6085 | ||
|
|
44345ed28b | ||
|
|
456b25c921 | ||
|
|
dfb5e5123f | ||
|
|
636ee65428 | ||
|
|
cbf8685f6e | ||
|
|
500af543bb | ||
|
|
46971aa51f | ||
|
|
3d83266916 | ||
|
|
b87768d44a | ||
|
|
5b821c610d | ||
|
|
347a5a5f76 | ||
|
|
8f6dd4acc4 | ||
|
|
f3cbd0b289 | ||
|
|
7b8a980336 | ||
|
|
d53e062024 | ||
|
|
5ac629f549 | ||
|
|
6c7fc15c0e | ||
|
|
3121c5ecdb | ||
|
|
ada544ef56 | ||
|
|
3e0f9f52bb | ||
|
|
3992d0ed0d | ||
|
|
6037dde559 | ||
|
|
baa0a615ea | ||
|
|
b0760bc2b9 | ||
|
|
6a33fe8e7a | ||
|
|
1f3bd09245 | ||
|
|
122dbf4240 | ||
|
|
8ac286bcaf | ||
|
|
8fcc004b68 | ||
|
|
37bbbefa8e | ||
|
|
d44def5381 | ||
|
|
03ce74fc74 | ||
|
|
6c8137d30b | ||
|
|
27a3126d68 | ||
|
|
faee6c2a79 | ||
|
|
6070219b1a | ||
|
|
a5e32f9b6d | ||
|
|
89e3793831 | ||
|
|
fd908e18c3 | ||
|
|
a4d4b991a1 | ||
|
|
4670525106 | ||
|
|
5a0ed03c56 | ||
|
|
af228db398 | ||
|
|
b0e7de3c8b | ||
|
|
cb89fb0847 | ||
|
|
014aec9531 | ||
|
|
160bbfcb37 | ||
|
|
5c68e129b7 | ||
|
|
bc53c7a886 | ||
|
|
61c1943ccc | ||
|
|
c3013c1a02 | ||
|
|
3cff6577da | ||
|
|
c795a1d895 | ||
|
|
66e166068e | ||
|
|
0c7173afd0 | ||
|
|
d5e391ecc8 | ||
|
|
2a3bc7b31b | ||
|
|
b54346ce03 | ||
|
|
39bc7c1f17 | ||
|
|
153b5560c3 | ||
|
|
2412513ad4 | ||
|
|
873f650678 | ||
|
|
35aedddf65 | ||
|
|
663ab70465 | ||
|
|
4f1e2ba582 | ||
|
|
d3bd120a04 | ||
|
|
f8bf39e43a | ||
|
|
93a7e272b1 | ||
|
|
de3153259d | ||
|
|
bf492d4deb | ||
|
|
41cb679eab | ||
|
|
b138cfcd69 | ||
|
|
a22d426b25 | ||
|
|
c0f07afb98 | ||
|
|
0eaaabcf63 | ||
|
|
7df51d0474 | ||
|
|
5a6038f742 | ||
|
|
15e8df894e | ||
|
|
50924ad7ff | ||
|
|
2e212e3e31 | ||
|
|
23b57b0a3a | ||
|
|
69d092c46b | ||
|
|
2663a181d0 | ||
|
|
9ab81a9c5d | ||
|
|
0872b781d7 | ||
|
|
86e91c8604 | ||
|
|
14377f25c9 | ||
|
|
9b706c148b | ||
|
|
dee284d669 | ||
|
|
718e553211 | ||
|
|
cbe3cb94b7 | ||
|
|
91661da320 | ||
|
|
7ebc11f96f | ||
|
|
27ef7040c2 | ||
|
|
283aad7ea0 | ||
|
|
775088ccd9 | ||
|
|
d71a8329f2 | ||
|
|
022d0babc5 | ||
|
|
934d8fc2a4 | ||
|
|
e75b50e335 | ||
|
|
f9fc6904f0 | ||
|
|
6deaab506a | ||
|
|
32748c14f4 | ||
|
|
6d2845c645 | ||
|
|
4899c38e52 | ||
|
|
0d69d72899 | ||
|
|
34904b8758 | ||
|
|
51d65873a7 | ||
|
|
02929e9d42 | ||
|
|
2018de8d9e |
@@ -8,5 +8,5 @@ jobs:
|
||||
checks-impure:
|
||||
runs-on: nix
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- run: nix run .#impure-checks
|
||||
|
||||
@@ -7,7 +7,7 @@ jobs:
|
||||
deploy-docs:
|
||||
runs-on: nix
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- run: nix run .#deploy-docs
|
||||
env:
|
||||
SSH_HOMEPAGE_KEY: ${{ secrets.SSH_HOMEPAGE_KEY }}
|
||||
|
||||
6
.github/dependabot.yml
vendored
Normal file
6
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
9
.github/workflows/repo-sync.yml
vendored
9
.github/workflows/repo-sync.yml
vendored
@@ -3,10 +3,8 @@ on:
|
||||
schedule:
|
||||
- cron: "39 * * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
repo-sync:
|
||||
if: github.repository_owner == 'clan-lol'
|
||||
@@ -15,10 +13,15 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/create-github-app-token@v1
|
||||
id: app-token
|
||||
with:
|
||||
app-id: ${{ vars.CI_APP_ID }}
|
||||
private-key: ${{ secrets.CI_PRIVATE_KEY }}
|
||||
- name: repo-sync
|
||||
uses: repo-sync/github-sync@v2
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GITHUB_TOKEN: ${{ steps.app-token.outputs.token }}
|
||||
with:
|
||||
source_repo: "https://git.clan.lol/clan/clan-core.git"
|
||||
source_branch: "main"
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# Contributing to Clan
|
||||
|
||||
Go to the Contributing guide at https://docs.clan.lol/manual/contribute/
|
||||
<!-- Local file: docs/CONTRIBUTING.md -->
|
||||
Go to the Contributing guide at https://docs.clan.lol/manual/contribute/
|
||||
@@ -5,6 +5,12 @@
|
||||
fileSystems."/".device = "/dev/null";
|
||||
boot.loader.grub.device = "/dev/null";
|
||||
};
|
||||
clan.inventory.services = {
|
||||
borgbackup.test-backup = {
|
||||
roles.client.machines = [ "test-backup" ];
|
||||
roles.server.machines = [ "test-backup" ];
|
||||
};
|
||||
};
|
||||
flake.nixosModules = {
|
||||
test-backup =
|
||||
{
|
||||
@@ -22,12 +28,20 @@
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
self.clanModules.borgbackup
|
||||
# Do not import inventory modules. They should be configured via 'clan.inventory'
|
||||
#
|
||||
# TODO: Configure localbackup via inventory
|
||||
self.clanModules.localbackup
|
||||
];
|
||||
# Borgbackup overrides
|
||||
services.borgbackup.repos.test-backups = {
|
||||
path = "/var/lib/borgbackup/test-backups";
|
||||
authorizedKeys = [ (builtins.readFile ../lib/ssh/pubkey) ];
|
||||
};
|
||||
clan.borgbackup.destinations.test-backup.repo = lib.mkForce "borg@machine:.";
|
||||
|
||||
clan.core.networking.targetHost = "machine";
|
||||
networking.hostName = "machine";
|
||||
nixpkgs.hostPlatform = "x86_64-linux";
|
||||
|
||||
programs.ssh.knownHosts = {
|
||||
machine.hostNames = [ "machine" ];
|
||||
@@ -108,7 +122,6 @@
|
||||
'';
|
||||
folders = [ "/var/test-service" ];
|
||||
};
|
||||
clan.borgbackup.destinations.test-backup.repo = "borg@machine:.";
|
||||
|
||||
fileSystems."/mnt/external-disk" = {
|
||||
device = "/dev/vdb"; # created in tests with virtualisation.emptyDisks
|
||||
@@ -129,11 +142,6 @@
|
||||
touch /run/unmount-external-disk
|
||||
'';
|
||||
};
|
||||
|
||||
services.borgbackup.repos.test-backups = {
|
||||
path = "/var/lib/borgbackup/test-backups";
|
||||
authorizedKeys = [ (builtins.readFile ../lib/ssh/pubkey) ];
|
||||
};
|
||||
};
|
||||
};
|
||||
perSystem =
|
||||
@@ -154,25 +162,29 @@
|
||||
"inventory.json"
|
||||
"lib/build-clan"
|
||||
"lib/default.nix"
|
||||
"lib/select.nix"
|
||||
"lib/flake-module.nix"
|
||||
"lib/frontmatter"
|
||||
"lib/inventory"
|
||||
"lib/constraints"
|
||||
"nixosModules"
|
||||
];
|
||||
};
|
||||
in
|
||||
{
|
||||
# Needs investigation on aarch64-linux
|
||||
# vm-test-run-test-backups> qemu-kvm: No machine specified, and there is no default
|
||||
# vm-test-run-test-backups> Use -machine help to list supported machines
|
||||
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux && pkgs.stdenv.hostPlatform.system != "aarch64-linux") {
|
||||
checks = pkgs.lib.mkIf pkgs.stdenv.isLinux {
|
||||
test-backups = (import ../lib/container-test.nix) {
|
||||
name = "test-backups";
|
||||
nodes.machine = {
|
||||
imports = [
|
||||
self.nixosModules.clanCore
|
||||
self.nixosModules.test-backup
|
||||
];
|
||||
imports =
|
||||
[
|
||||
self.nixosModules.clanCore
|
||||
# Some custom overrides for the backup tests
|
||||
self.nixosModules.test-backup
|
||||
]
|
||||
++
|
||||
# import the inventory generated nixosModules
|
||||
self.clanInternals.inventoryClass.machines.test-backup.machineImports;
|
||||
clan.core.settings.directory = ./.;
|
||||
environment.systemPackages = [
|
||||
(pkgs.writeShellScriptBin "foo" ''
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
(import ../lib/container-test.nix) (
|
||||
{ ... }:
|
||||
{
|
||||
name = "secrets";
|
||||
name = "container";
|
||||
|
||||
nodes.machine =
|
||||
{ ... }:
|
||||
|
||||
@@ -12,6 +12,8 @@ in
|
||||
./flash/flake-module.nix
|
||||
./impure/flake-module.nix
|
||||
./installation/flake-module.nix
|
||||
./installation-without-system/flake-module.nix
|
||||
./morph/flake-module.nix
|
||||
./nixos-documentation/flake-module.nix
|
||||
];
|
||||
perSystem =
|
||||
@@ -48,7 +50,7 @@ in
|
||||
flakeOutputs =
|
||||
lib.mapAttrs' (
|
||||
name: config: lib.nameValuePair "nixos-${name}" config.config.system.build.toplevel
|
||||
) self.nixosConfigurations
|
||||
) (lib.filterAttrs (n: _: !lib.hasPrefix "test-" n) self.nixosConfigurations)
|
||||
// lib.mapAttrs' (n: lib.nameValuePair "package-${n}") self'.packages
|
||||
// lib.mapAttrs' (n: lib.nameValuePair "devShell-${n}") self'.devShells
|
||||
// lib.mapAttrs' (name: config: lib.nameValuePair "home-manager-${name}" config.activation-script) (
|
||||
|
||||
@@ -1,12 +1,26 @@
|
||||
{ self, lib, ... }:
|
||||
{
|
||||
clan.machines.test-flash-machine = {
|
||||
clan.core.networking.targetHost = "test-flash-machine";
|
||||
fileSystems."/".device = lib.mkDefault "/dev/vda";
|
||||
boot.loader.grub.device = lib.mkDefault "/dev/vda";
|
||||
config,
|
||||
self,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
clan.machines = lib.listToAttrs (
|
||||
lib.map (
|
||||
system:
|
||||
lib.nameValuePair "test-flash-machine-${system}" {
|
||||
clan.core.networking.targetHost = "test-flash-machine";
|
||||
fileSystems."/".device = lib.mkDefault "/dev/vda";
|
||||
boot.loader.grub.device = lib.mkDefault "/dev/vda";
|
||||
|
||||
imports = [ self.nixosModules.test-flash-machine ];
|
||||
};
|
||||
# We need to use `mkForce` because we inherit from `test-install-machine`
|
||||
# which currently hardcodes `nixpkgs.hostPlatform`
|
||||
nixpkgs.hostPlatform = lib.mkForce system;
|
||||
|
||||
imports = [ self.nixosModules.test-flash-machine ];
|
||||
}
|
||||
) (lib.filter (lib.hasSuffix "linux") config.systems)
|
||||
);
|
||||
|
||||
flake.nixosModules = {
|
||||
test-flash-machine =
|
||||
@@ -30,20 +44,20 @@
|
||||
let
|
||||
dependencies = [
|
||||
pkgs.disko
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-flash-machine.pkgs.perlPackages.ConfigIniFiles
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-flash-machine.pkgs.perlPackages.FileSlurp
|
||||
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".pkgs.perlPackages.ConfigIniFiles
|
||||
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".pkgs.perlPackages.FileSlurp
|
||||
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-flash-machine.config.system.build.toplevel
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-flash-machine.config.system.build.diskoScript
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-flash-machine.config.system.build.diskoScript.drvPath
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-flash-machine.config.system.clan.deployment.file
|
||||
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.toplevel
|
||||
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.diskoScript
|
||||
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.diskoScript.drvPath
|
||||
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.clan.deployment.file
|
||||
|
||||
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
|
||||
in
|
||||
{
|
||||
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux) {
|
||||
flash = (import ../lib/test-base.nix) {
|
||||
checks = pkgs.lib.mkIf pkgs.stdenv.isLinux {
|
||||
test-flash = (import ../lib/test-base.nix) {
|
||||
name = "flash";
|
||||
nodes.target = {
|
||||
virtualisation.emptyDiskImages = [ 4096 ];
|
||||
@@ -65,7 +79,7 @@
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
machine.succeed("clan flash write --debug --flake ${../..} --yes --disk main /dev/vdb test-flash-machine")
|
||||
machine.succeed("clan flash write --debug --flake ${../..} --yes --disk main /dev/vdb test-flash-machine-${pkgs.hostPlatform.system}")
|
||||
'';
|
||||
} { inherit pkgs self; };
|
||||
};
|
||||
|
||||
236
checks/installation-without-system/flake-module.nix
Normal file
236
checks/installation-without-system/flake-module.nix
Normal file
@@ -0,0 +1,236 @@
|
||||
{
|
||||
self,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
# The purpose of this test is to ensure `clan machines install` works
|
||||
# for machines that don't have a hardware config yet.
|
||||
|
||||
# If this test starts failing it could be due to the `facter.json` being out of date
|
||||
# you can get a new one by adding
|
||||
# client.fail("cat test-flake/machines/test-install-machine/facter.json >&2")
|
||||
# to the installation test.
|
||||
clan.machines.test-install-machine-without-system = {
|
||||
fileSystems."/".device = lib.mkDefault "/dev/vda";
|
||||
boot.loader.grub.device = lib.mkDefault "/dev/vda";
|
||||
|
||||
imports = [ self.nixosModules.test-install-machine-without-system ];
|
||||
};
|
||||
clan.machines.test-install-machine-with-system =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
# https://git.clan.lol/clan/test-fixtures
|
||||
facter.reportPath = builtins.fetchurl {
|
||||
url = "https://git.clan.lol/clan/test-fixtures/raw/commit/4a2bc56d886578124b05060d3fb7eddc38c019f8/nixos-vm-facter-json/${pkgs.hostPlatform.system}.json";
|
||||
sha256 =
|
||||
{
|
||||
aarch64-linux = "sha256:1rlfymk03rmfkm2qgrc8l5kj5i20srx79n1y1h4nzlpwaz0j7hh2";
|
||||
x86_64-linux = "sha256:16myh0ll2gdwsiwkjw5ba4dl23ppwbsanxx214863j7nvzx42pws";
|
||||
}
|
||||
.${pkgs.hostPlatform.system};
|
||||
};
|
||||
|
||||
fileSystems."/".device = lib.mkDefault "/dev/vda";
|
||||
boot.loader.grub.device = lib.mkDefault "/dev/vda";
|
||||
|
||||
imports = [ self.nixosModules.test-install-machine-without-system ];
|
||||
};
|
||||
flake.nixosModules = {
|
||||
test-install-machine-without-system =
|
||||
{ lib, modulesPath, ... }:
|
||||
{
|
||||
imports = [
|
||||
(modulesPath + "/testing/test-instrumentation.nix") # we need these 2 modules always to be able to run the tests
|
||||
(modulesPath + "/profiles/qemu-guest.nix")
|
||||
../lib/minify.nix
|
||||
];
|
||||
|
||||
networking.hostName = "test-install-machine";
|
||||
|
||||
environment.etc."install-successful".text = "ok";
|
||||
|
||||
boot.consoleLogLevel = lib.mkForce 100;
|
||||
boot.kernelParams = [ "boot.shell_on_fail" ];
|
||||
|
||||
# disko config
|
||||
boot.loader.grub.efiSupport = lib.mkDefault true;
|
||||
boot.loader.grub.efiInstallAsRemovable = lib.mkDefault true;
|
||||
clan.core.vars.settings.secretStore = "vm";
|
||||
clan.core.vars.generators.test = {
|
||||
files.test.neededFor = "partitioning";
|
||||
script = ''
|
||||
echo "notok" > $out/test
|
||||
'';
|
||||
};
|
||||
disko.devices = {
|
||||
disk = {
|
||||
main = {
|
||||
type = "disk";
|
||||
device = "/dev/vda";
|
||||
|
||||
preCreateHook = ''
|
||||
test -e /run/partitioning-secrets/test/test
|
||||
'';
|
||||
|
||||
content = {
|
||||
type = "gpt";
|
||||
partitions = {
|
||||
boot = {
|
||||
size = "1M";
|
||||
type = "EF02"; # for grub MBR
|
||||
priority = 1;
|
||||
};
|
||||
ESP = {
|
||||
size = "512M";
|
||||
type = "EF00";
|
||||
content = {
|
||||
type = "filesystem";
|
||||
format = "vfat";
|
||||
mountpoint = "/boot";
|
||||
mountOptions = [ "umask=0077" ];
|
||||
};
|
||||
};
|
||||
root = {
|
||||
size = "100%";
|
||||
content = {
|
||||
type = "filesystem";
|
||||
format = "ext4";
|
||||
mountpoint = "/";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
perSystem =
|
||||
{
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
dependencies = [
|
||||
self
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.toplevel
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.diskoScript
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.clan.deployment.file
|
||||
pkgs.stdenv.drvPath
|
||||
pkgs.bash.drvPath
|
||||
pkgs.nixos-anywhere
|
||||
pkgs.bubblewrap
|
||||
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
|
||||
# with Nix 2.24 we get:
|
||||
# vm-test-run-test-installation> client # error: sized: unexpected end-of-file
|
||||
# vm-test-run-test-installation> client # error: unexpected end-of-file
|
||||
# This seems to be fixed with Nix 2.26
|
||||
# Remove this line once `pkgs.nix` is 2.26+
|
||||
nixPackage =
|
||||
assert
|
||||
lib.versionOlder pkgs.nix.version "2.26"
|
||||
&& lib.versionAtLeast pkgs.nixVersions.latest.version "2.26";
|
||||
pkgs.nixVersions.latest;
|
||||
in
|
||||
{
|
||||
# On aarch64-linux, hangs on reboot with after installation:
|
||||
# vm-test-run-test-installation-without-system> installer # [ 288.002871] reboot: Restarting system
|
||||
# vm-test-run-test-installation-without-system> client # [test-install-machine] ### Done! ###
|
||||
# vm-test-run-test-installation-without-system> client # [test-install-machine] + step 'Done!'
|
||||
# vm-test-run-test-installation-without-system> client # [test-install-machine] + echo '### Done! ###'
|
||||
# vm-test-run-test-installation-without-system> client # [test-install-machine] + rm -rf /tmp/tmp.qb16EAq7hJ
|
||||
# vm-test-run-test-installation-without-system> (finished: must succeed: clan machines install --debug --flake test-flake --yes test-install-machine-without-system --target-host root@installer --update-hardware-config nixos-facter >&2, in 154.62 seconds)
|
||||
# vm-test-run-test-installation-without-system> target: starting vm
|
||||
# vm-test-run-test-installation-without-system> target: QEMU running (pid 144)
|
||||
# vm-test-run-test-installation-without-system> target: waiting for unit multi-user.target
|
||||
# vm-test-run-test-installation-without-system> target: waiting for the VM to finish booting
|
||||
# vm-test-run-test-installation-without-system> target: Guest root shell did not produce any data yet...
|
||||
# vm-test-run-test-installation-without-system> target: To debug, enter the VM and run 'systemctl status backdoor.service'.
|
||||
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux && !pkgs.stdenv.isAarch64) {
|
||||
test-installation-without-system = (import ../lib/test-base.nix) {
|
||||
name = "test-installation-without-system";
|
||||
nodes.target = {
|
||||
services.openssh.enable = true;
|
||||
virtualisation.diskImage = "./target.qcow2";
|
||||
virtualisation.useBootLoader = true;
|
||||
nix.package = nixPackage;
|
||||
};
|
||||
nodes.installer =
|
||||
{ modulesPath, ... }:
|
||||
{
|
||||
imports = [
|
||||
(modulesPath + "/../tests/common/auto-format-root-device.nix")
|
||||
];
|
||||
services.openssh.enable = true;
|
||||
users.users.root.openssh.authorizedKeys.keyFiles = [ ../lib/ssh/pubkey ];
|
||||
system.nixos.variant_id = "installer";
|
||||
environment.systemPackages = [ pkgs.nixos-facter ];
|
||||
virtualisation.emptyDiskImages = [ 512 ];
|
||||
virtualisation.diskSize = 8 * 1024;
|
||||
virtualisation.rootDevice = "/dev/vdb";
|
||||
# both installer and target need to use the same diskImage
|
||||
virtualisation.diskImage = "./target.qcow2";
|
||||
nix.package = nixPackage;
|
||||
nix.settings = {
|
||||
substituters = lib.mkForce [ ];
|
||||
hashed-mirrors = null;
|
||||
connect-timeout = lib.mkForce 3;
|
||||
flake-registry = pkgs.writeText "flake-registry" ''{"flakes":[],"version":2}'';
|
||||
experimental-features = [
|
||||
"nix-command"
|
||||
"flakes"
|
||||
];
|
||||
};
|
||||
system.extraDependencies = dependencies;
|
||||
};
|
||||
nodes.client = {
|
||||
environment.systemPackages = [
|
||||
self.packages.${pkgs.system}.clan-cli
|
||||
] ++ self.packages.${pkgs.system}.clan-cli.runtimeDependencies;
|
||||
environment.etc."install-closure".source = "${closureInfo}/store-paths";
|
||||
virtualisation.memorySize = 3048;
|
||||
nix.package = nixPackage;
|
||||
nix.settings = {
|
||||
substituters = lib.mkForce [ ];
|
||||
hashed-mirrors = null;
|
||||
connect-timeout = lib.mkForce 3;
|
||||
flake-registry = pkgs.writeText "flake-registry" ''{"flakes":[],"version":2}'';
|
||||
experimental-features = [
|
||||
"nix-command"
|
||||
"flakes"
|
||||
];
|
||||
};
|
||||
system.extraDependencies = dependencies;
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
client.start()
|
||||
installer.start()
|
||||
|
||||
client.succeed("${pkgs.coreutils}/bin/install -Dm 600 ${../lib/ssh/privkey} /root/.ssh/id_ed25519")
|
||||
client.wait_until_succeeds("timeout 2 ssh -o StrictHostKeyChecking=accept-new -v root@installer hostname")
|
||||
client.succeed("cp -r ${../..} test-flake && chmod -R +w test-flake")
|
||||
client.fail("test -f test-flake/machines/test-install-machine-without-system/hardware-configuration.nix")
|
||||
client.fail("test -f test-flake/machines/test-install-machine-without-system/facter.json")
|
||||
client.succeed("clan machines update-hardware-config --flake test-flake test-install-machine-without-system root@installer >&2")
|
||||
client.succeed("test -f test-flake/machines/test-install-machine-without-system/facter.json")
|
||||
client.succeed("rm test-flake/machines/test-install-machine-without-system/facter.json")
|
||||
client.succeed("clan machines install --debug --flake test-flake --yes test-install-machine-without-system --target-host root@installer --update-hardware-config nixos-facter >&2")
|
||||
try:
|
||||
installer.shutdown()
|
||||
except BrokenPipeError:
|
||||
# qemu has already exited
|
||||
pass
|
||||
|
||||
target.state_dir = installer.state_dir
|
||||
target.start()
|
||||
target.wait_for_unit("multi-user.target")
|
||||
assert(target.succeed("cat /etc/install-successful").strip() == "ok")
|
||||
'';
|
||||
} { inherit pkgs self; };
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -92,11 +92,23 @@
|
||||
self.nixosConfigurations.test-install-machine.config.system.build.toplevel
|
||||
self.nixosConfigurations.test-install-machine.config.system.build.diskoScript
|
||||
self.nixosConfigurations.test-install-machine.config.system.clan.deployment.file
|
||||
pkgs.bash.drvPath
|
||||
pkgs.stdenv.drvPath
|
||||
pkgs.nixos-anywhere
|
||||
pkgs.bubblewrap
|
||||
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
|
||||
|
||||
# with Nix 2.24 we get:
|
||||
# vm-test-run-test-installation> client # error: sized: unexpected end-of-file
|
||||
# vm-test-run-test-installation> client # error: unexpected end-of-file
|
||||
# This seems to be fixed with Nix 2.26
|
||||
# Remove this line once `pkgs.nix` is 2.26+
|
||||
nixPackage =
|
||||
assert
|
||||
lib.versionOlder pkgs.nix.version "2.26"
|
||||
&& lib.versionAtLeast pkgs.nixVersions.latest.version "2.26";
|
||||
pkgs.nixVersions.latest;
|
||||
in
|
||||
{
|
||||
# On aarch64-linux, hangs on reboot with after installation:
|
||||
@@ -108,13 +120,14 @@
|
||||
# vm-test-run-test-installation> new_machine: QEMU running (pid 80)
|
||||
# vm-test-run-test-installation> new_machine: Guest root shell did not produce any data yet...
|
||||
# vm-test-run-test-installation> new_machine: To debug, enter the VM and run 'systemctl status backdoor.service'.
|
||||
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux && pkgs.stdenv.hostPlatform.system != "aarch64-linux") {
|
||||
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux && !pkgs.stdenv.isAarch64) {
|
||||
test-installation = (import ../lib/test-base.nix) {
|
||||
name = "test-installation";
|
||||
nodes.target = {
|
||||
services.openssh.enable = true;
|
||||
virtualisation.diskImage = "./target.qcow2";
|
||||
virtualisation.useBootLoader = true;
|
||||
nix.package = nixPackage;
|
||||
|
||||
# virtualisation.fileSystems."/" = {
|
||||
# device = "/dev/disk/by-label/this-is-not-real-and-will-never-be-used";
|
||||
@@ -136,6 +149,7 @@
|
||||
virtualisation.rootDevice = "/dev/vdb";
|
||||
# both installer and target need to use the same diskImage
|
||||
virtualisation.diskImage = "./target.qcow2";
|
||||
nix.package = nixPackage;
|
||||
nix.settings = {
|
||||
substituters = lib.mkForce [ ];
|
||||
hashed-mirrors = null;
|
||||
@@ -153,7 +167,8 @@
|
||||
self.packages.${pkgs.system}.clan-cli
|
||||
] ++ self.packages.${pkgs.system}.clan-cli.runtimeDependencies;
|
||||
environment.etc."install-closure".source = "${closureInfo}/store-paths";
|
||||
virtualisation.memorySize = 2048;
|
||||
virtualisation.memorySize = 3048;
|
||||
nix.package = nixPackage;
|
||||
nix.settings = {
|
||||
substituters = lib.mkForce [ ];
|
||||
hashed-mirrors = null;
|
||||
@@ -174,12 +189,19 @@
|
||||
client.succeed("${pkgs.coreutils}/bin/install -Dm 600 ${../lib/ssh/privkey} /root/.ssh/id_ed25519")
|
||||
client.wait_until_succeeds("timeout 2 ssh -o StrictHostKeyChecking=accept-new -v root@installer hostname")
|
||||
client.succeed("cp -r ${../..} test-flake && chmod -R +w test-flake")
|
||||
|
||||
# test that we can generate hardware configurations
|
||||
client.fail("test -f test-flake/machines/test-install-machine/facter.json")
|
||||
client.fail("test -f test-flake/machines/test-install-machine/hardware-configuration.nix")
|
||||
client.succeed("clan machines update-hardware-config --flake test-flake test-install-machine root@installer >&2")
|
||||
client.succeed("test -f test-flake/machines/test-install-machine/facter.json")
|
||||
client.succeed("clan machines update-hardware-config --backend nixos-generate-config --flake test-flake test-install-machine root@installer>&2")
|
||||
client.succeed("test -f test-flake/machines/test-install-machine/hardware-configuration.nix")
|
||||
client.succeed("clan machines install --debug --flake ${../..} --yes test-install-machine --target-host root@installer >&2")
|
||||
|
||||
# but we don't use them because they're not cached
|
||||
client.succeed("rm test-flake/machines/test-install-machine/hardware-configuration.nix test-flake/machines/test-install-machine/facter.json")
|
||||
|
||||
client.succeed("clan machines install --debug --flake test-flake --yes test-install-machine --target-host root@installer >&2")
|
||||
try:
|
||||
installer.shutdown()
|
||||
except BrokenPipeError:
|
||||
|
||||
@@ -16,6 +16,9 @@ in
|
||||
documentation.enable = lib.mkDefault false;
|
||||
boot.isContainer = true;
|
||||
|
||||
# needed since nixpkgs 7fb2f407c01b017737eafc26b065d7f56434a992 removed the getty unit by default
|
||||
console.enable = true;
|
||||
|
||||
# undo qemu stuff
|
||||
system.build.initialRamdisk = "";
|
||||
virtualisation.sharedDirectories = lib.mkForce { };
|
||||
@@ -31,6 +34,7 @@ in
|
||||
};
|
||||
# to accept external dependencies such as disko
|
||||
node.specialArgs.self = self;
|
||||
_module.args = { inherit self; };
|
||||
imports = [
|
||||
test
|
||||
./container-driver/module.nix
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
{ lib, ... }:
|
||||
{
|
||||
nixpkgs.flake.setFlakeRegistry = false;
|
||||
nixpkgs.flake.setNixPath = false;
|
||||
nix.registry.nixpkgs.to = { };
|
||||
nix.registry = lib.mkForce { };
|
||||
documentation.doc.enable = false;
|
||||
documentation.man.enable = false;
|
||||
}
|
||||
|
||||
@@ -7,15 +7,19 @@ in
|
||||
(nixos-lib.runTest {
|
||||
hostPkgs = pkgs;
|
||||
# speed-up evaluation
|
||||
defaults = {
|
||||
imports = [
|
||||
./minify.nix
|
||||
];
|
||||
documentation.enable = lib.mkDefault false;
|
||||
nix.settings.min-free = 0;
|
||||
system.stateVersion = lib.version;
|
||||
};
|
||||
defaults = (
|
||||
{ config, ... }:
|
||||
{
|
||||
imports = [
|
||||
./minify.nix
|
||||
];
|
||||
documentation.enable = lib.mkDefault false;
|
||||
nix.settings.min-free = 0;
|
||||
system.stateVersion = config.system.nixos.release;
|
||||
}
|
||||
);
|
||||
|
||||
_module.args = { inherit self; };
|
||||
# to accept external dependencies such as disko
|
||||
node.specialArgs.self = self;
|
||||
imports = [ test ];
|
||||
|
||||
62
checks/morph/flake-module.nix
Normal file
62
checks/morph/flake-module.nix
Normal file
@@ -0,0 +1,62 @@
|
||||
{
|
||||
self,
|
||||
...
|
||||
}:
|
||||
{
|
||||
clan.machines.test-morph-machine = {
|
||||
imports = [
|
||||
./template/configuration.nix
|
||||
self.nixosModules.clanCore
|
||||
];
|
||||
nixpkgs.hostPlatform = "x86_64-linux";
|
||||
environment.etc."testfile".text = "morphed";
|
||||
};
|
||||
|
||||
clan.templates.machine.test-morph-template = {
|
||||
description = "Morph a machine";
|
||||
path = ./template;
|
||||
};
|
||||
|
||||
perSystem =
|
||||
{
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux && !pkgs.stdenv.isAarch64) {
|
||||
test-morph = (import ../lib/test-base.nix) {
|
||||
name = "morph";
|
||||
|
||||
nodes = {
|
||||
actual =
|
||||
{ pkgs, ... }:
|
||||
let
|
||||
dependencies = [
|
||||
self
|
||||
pkgs.nixos-anywhere
|
||||
pkgs.stdenv.drvPath
|
||||
pkgs.stdenvNoCC
|
||||
self.nixosConfigurations.test-morph-machine.config.system.build.toplevel
|
||||
self.nixosConfigurations.test-morph-machine.config.system.clan.deployment.file
|
||||
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
|
||||
in
|
||||
|
||||
{
|
||||
environment.etc."install-closure".source = "${closureInfo}/store-paths";
|
||||
system.extraDependencies = dependencies;
|
||||
virtualisation.memorySize = 2048;
|
||||
environment.systemPackages = [ self.packages.${pkgs.system}.clan-cli ];
|
||||
};
|
||||
};
|
||||
testScript = ''
|
||||
start_all()
|
||||
actual.fail("cat /etc/testfile")
|
||||
actual.succeed("env CLAN_DIR=${self} clan machines morph test-morph-template --i-will-be-fired-for-using-this --debug --name test-morph-machine")
|
||||
assert actual.succeed("cat /etc/testfile") == "morphed"
|
||||
'';
|
||||
} { inherit pkgs self; };
|
||||
};
|
||||
|
||||
};
|
||||
}
|
||||
12
checks/morph/template/configuration.nix
Normal file
12
checks/morph/template/configuration.nix
Normal file
@@ -0,0 +1,12 @@
|
||||
{ modulesPath, ... }:
|
||||
{
|
||||
imports = [
|
||||
# we need these 2 modules always to be able to run the tests
|
||||
(modulesPath + "/testing/test-instrumentation.nix")
|
||||
(modulesPath + "/virtualisation/qemu-vm.nix")
|
||||
|
||||
(modulesPath + "/profiles/minimal.nix")
|
||||
];
|
||||
|
||||
clan.core.enableRecommendedDefaults = false;
|
||||
}
|
||||
8
clanModules/auto-upgrade/README.md
Normal file
8
clanModules/auto-upgrade/README.md
Normal file
@@ -0,0 +1,8 @@
|
||||
---
|
||||
description = "Set up automatic upgrades"
|
||||
categories = ["System"]
|
||||
features = [ "inventory" ]
|
||||
---
|
||||
|
||||
Whether to periodically upgrade NixOS to the latest version. If enabled, a
|
||||
systemd timer will run `nixos-rebuild switch --upgrade` once a day.
|
||||
24
clanModules/auto-upgrade/roles/default.nix
Normal file
24
clanModules/auto-upgrade/roles/default.nix
Normal file
@@ -0,0 +1,24 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
cfg = config.clan.autoUpgrade;
|
||||
in
|
||||
{
|
||||
options.clan.autoUpgrade = {
|
||||
flake = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "Flake reference";
|
||||
};
|
||||
};
|
||||
config = {
|
||||
system.autoUpgrade = {
|
||||
inherit (cfg.clan.autoUpgrade) flake;
|
||||
enable = true;
|
||||
dates = "02:00";
|
||||
randomizedDelaySec = "45min";
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -9,6 +9,7 @@ in
|
||||
# only import available files, as this allows to filter the files for tests.
|
||||
flake.clanModules = filterAttrs (_name: pathExists) {
|
||||
admin = ./admin;
|
||||
auto-upgrade = ./auto-upgrade;
|
||||
borgbackup = ./borgbackup;
|
||||
borgbackup-static = ./borgbackup-static;
|
||||
deltachat = ./deltachat;
|
||||
|
||||
@@ -3,8 +3,7 @@ description = "S3-compatible object store for small self-hosted geo-distributed
|
||||
---
|
||||
|
||||
This module generates garage specific keys automatically.
|
||||
When using garage in a distributed deployment the `rpc_key` between connected instances must be shared.
|
||||
This is currently still a manual process.
|
||||
Also shares the `rpc_secret` between instances.
|
||||
|
||||
Options: [NixosModuleOptions](https://search.nixos.org/options?channel=unstable&size=50&sort=relevance&type=packages&query=garage)
|
||||
Documentation: https://garagehq.deuxfleurs.fr/
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
{
|
||||
systemd.services.garage.serviceConfig = {
|
||||
LoadCredential = [
|
||||
"rpc_secret_path:${config.clan.core.facts.services.garage.secret.garage_rpc_secret.path}"
|
||||
"admin_token_path:${config.clan.core.facts.services.garage.secret.garage_admin_token.path}"
|
||||
"metrics_token_path:${config.clan.core.facts.services.garage.secret.garage_metrics_token.path}"
|
||||
"rpc_secret_path:${config.clan.core.vars.generators.garage-shared.files.rpc_secret.path}"
|
||||
"admin_token_path:${config.clan.core.vars.generators.garage.files.admin_token.path}"
|
||||
"metrics_token_path:${config.clan.core.vars.generators.garage.files.metrics_token.path}"
|
||||
];
|
||||
Environment = [
|
||||
"GARAGE_ALLOW_WORLD_READABLE_SECRETS=true"
|
||||
@@ -14,37 +14,30 @@
|
||||
];
|
||||
};
|
||||
|
||||
clan.core.facts.services.garage = {
|
||||
secret.garage_rpc_secret = { };
|
||||
secret.garage_admin_token = { };
|
||||
secret.garage_metrics_token = { };
|
||||
generator.path = [
|
||||
clan.core.vars.generators.garage = {
|
||||
files.admin_token = { };
|
||||
files.metrics_token = { };
|
||||
runtimeInputs = [
|
||||
pkgs.coreutils
|
||||
pkgs.openssl
|
||||
];
|
||||
generator.script = ''
|
||||
openssl rand -hex -out $secrets/garage_rpc_secret 32
|
||||
openssl rand -base64 -out $secrets/garage_admin_token 32
|
||||
openssl rand -base64 -out $secrets/garage_metrics_token 32
|
||||
script = ''
|
||||
openssl rand -base64 -out $out/admin_token 32
|
||||
openssl rand -base64 -out $out/metrics_token 32
|
||||
'';
|
||||
};
|
||||
|
||||
# TODO: Vars is not in a useable state currently
|
||||
# Move back, once it is implemented.
|
||||
# clan.core.vars.generators.garage = {
|
||||
# files.rpc_secret = { };
|
||||
# files.admin_token = { };
|
||||
# files.metrics_token = { };
|
||||
# runtimeInputs = [
|
||||
# pkgs.coreutils
|
||||
# pkgs.openssl
|
||||
# ];
|
||||
# script = ''
|
||||
# openssl rand -hex -out $out/rpc_secret 32
|
||||
# openssl rand -base64 -out $out/admin_token 32
|
||||
# openssl rand -base64 -out $out/metrics_token 32
|
||||
# '';
|
||||
# };
|
||||
clan.core.vars.generators.garage-shared = {
|
||||
share = true;
|
||||
files.rpc_secret = { };
|
||||
runtimeInputs = [
|
||||
pkgs.coreutils
|
||||
pkgs.openssl
|
||||
];
|
||||
script = ''
|
||||
openssl rand -hex -out $out/rpc_secret 32
|
||||
'';
|
||||
};
|
||||
|
||||
clan.core.state.garage.folders = [ config.services.garage.settings.metadata_dir ];
|
||||
}
|
||||
|
||||
@@ -6,4 +6,4 @@ categories = [ "Network" ]
|
||||
|
||||
!!! Warning
|
||||
If you've been using network manager + wpa_supplicant and now are switching to IWD read this migration guide:
|
||||
https://iwd.wiki.kernel.org/networkmanager#converting_network_profiles
|
||||
https://archive.kernel.org/oldwiki/iwd.wiki.kernel.org/networkmanager.html#converting_network_profiles
|
||||
|
||||
@@ -1,4 +1,9 @@
|
||||
{ lib, config, ... }:
|
||||
{
|
||||
lib,
|
||||
config,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
cfg = config.clan.iwd;
|
||||
@@ -12,12 +17,13 @@ let
|
||||
{
|
||||
secret.${secret_name} = { };
|
||||
generator.prompt = "Wifi password for '${value.ssid}'";
|
||||
# ref. man iwd.network
|
||||
generator.script = ''
|
||||
config="
|
||||
[Settings]
|
||||
AutoConnect=${if value.AutoConnect then "true" else "false"}
|
||||
[Security]
|
||||
Passphrase=\"$prompt_value\"
|
||||
Passphrase=$(echo -e "$prompt_value" | ${lib.getExe pkgs.gnused} "s=\\\=\\\\\\\=g;s=\t=\\\t=g;s=\r=\\\r=g;s=^ =\\\s=")
|
||||
"
|
||||
echo "$config" > "$secrets/${secret_name}"
|
||||
'';
|
||||
|
||||
@@ -10,18 +10,18 @@ let
|
||||
in
|
||||
{
|
||||
config = lib.mkMerge [
|
||||
(lib.mkIf ((var.machineId.value or null) != null) {
|
||||
(lib.mkIf ((var.value or null) != null) {
|
||||
assertions = [
|
||||
{
|
||||
assertion = lib.stringLength var.machineId.value == 32;
|
||||
assertion = lib.stringLength var.value == 32;
|
||||
message = "machineId must be exactly 32 characters long.";
|
||||
}
|
||||
];
|
||||
boot.kernelParams = [
|
||||
''systemd.machine_id=${var.machineId.value}''
|
||||
''systemd.machine_id=${var.value}''
|
||||
];
|
||||
environment.etc."machine-id" = {
|
||||
text = var.machineId.value;
|
||||
text = var.value;
|
||||
};
|
||||
})
|
||||
{
|
||||
|
||||
@@ -24,14 +24,7 @@ mycelium.default = {
|
||||
"berlin"
|
||||
"munich"
|
||||
];
|
||||
config = {
|
||||
topLevelDomain = "m";
|
||||
openFirewall = true;
|
||||
addHostedPublicNodes = true;
|
||||
};
|
||||
};
|
||||
```
|
||||
|
||||
This will add the machines named `berlin` and `munich` to the `mycelium` vpn.
|
||||
And will also set the toplevel domain of the mycelium vpn to `m`, meaning the
|
||||
machines are now reachable via `berlin.m` and `munich.m`.
|
||||
|
||||
@@ -4,54 +4,18 @@
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
flake = config.clan.core.settings.directory;
|
||||
machineName = config.clan.core.settings.machine.name;
|
||||
|
||||
# Instances might be empty, if the module is not used via the inventory
|
||||
#
|
||||
# Type: { ${instanceName} :: { roles :: Roles } }
|
||||
# Roles :: { ${role_name} :: { machines :: [string] } }
|
||||
instances = config.clan.inventory.services.mycelium or { };
|
||||
|
||||
allPeers = lib.foldlAttrs (
|
||||
acc: _instanceName: instanceConfig:
|
||||
acc
|
||||
++ (
|
||||
if (builtins.elem machineName instanceConfig.roles.peer.machines) then
|
||||
instanceConfig.roles.peer.machines
|
||||
else
|
||||
[ ]
|
||||
)
|
||||
) [ ] instances;
|
||||
allPeerConfigurations = lib.filterAttrs (n: _: builtins.elem n allPeers) flake.nixosConfigurations;
|
||||
allPeersWithIp =
|
||||
builtins.mapAttrs
|
||||
(_: x: lib.removeSuffix "\n" x.config.clan.core.vars.generators.mycelium.files.ip.value)
|
||||
(
|
||||
lib.filterAttrs (
|
||||
_: x: (builtins.tryEval x.config.clan.core.vars.generators.mycelium.files.ip.value).success
|
||||
) allPeerConfigurations
|
||||
);
|
||||
|
||||
ips = lib.attrValues allPeersWithIp;
|
||||
peers = lib.concatMap (ip: [
|
||||
"tcp://[${ip}]:9651"
|
||||
"quic://[${ip}]:9651"
|
||||
]) ips;
|
||||
in
|
||||
{
|
||||
options = {
|
||||
clan.mycelium.topLevelDomain = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "";
|
||||
description = "Top level domain to reach hosts";
|
||||
};
|
||||
clan.mycelium.openFirewall = lib.mkEnableOption "Open the firewall for mycelium";
|
||||
clan.mycelium.addHostedPublicNodes = lib.mkEnableOption "Add hosted Public nodes";
|
||||
clan.mycelium.addHosts = lib.mkOption {
|
||||
clan.mycelium.openFirewall = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = true;
|
||||
description = "Add mycelium ip's to the host file";
|
||||
description = "Open the firewall for mycelium";
|
||||
};
|
||||
|
||||
clan.mycelium.addHostedPublicNodes = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = true;
|
||||
description = "Add hosted Public nodes";
|
||||
};
|
||||
};
|
||||
|
||||
@@ -60,18 +24,8 @@ in
|
||||
addHostedPublicNodes = lib.mkDefault config.clan.mycelium.addHostedPublicNodes;
|
||||
openFirewall = lib.mkDefault config.clan.mycelium.openFirewall;
|
||||
keyFile = config.clan.core.vars.generators.mycelium.files.key.path;
|
||||
inherit peers;
|
||||
};
|
||||
|
||||
config.networking.hosts = lib.mkIf (config.clan.mycelium.addHosts) (
|
||||
lib.mapAttrs' (
|
||||
host: ip:
|
||||
lib.nameValuePair ip (
|
||||
if (config.clan.mycelium.topLevelDomain == "") then [ host ] else [ "${host}.m" ]
|
||||
)
|
||||
) allPeersWithIp
|
||||
);
|
||||
|
||||
config.clan.core.vars.generators.mycelium = {
|
||||
files."key" = { };
|
||||
files."ip".secret = false;
|
||||
|
||||
@@ -12,6 +12,9 @@
|
||||
files.password-hash = {
|
||||
neededFor = "users";
|
||||
};
|
||||
files.password = {
|
||||
deploy = false;
|
||||
};
|
||||
migrateFact = "root-password";
|
||||
runtimeInputs = [
|
||||
pkgs.coreutils
|
||||
|
||||
@@ -37,6 +37,7 @@ in
|
||||
type = "rsa";
|
||||
};
|
||||
};
|
||||
|
||||
clan.core.vars.generators.openssh = {
|
||||
files."ssh.id_ed25519" = { };
|
||||
files."ssh.id_ed25519.pub".secret = false;
|
||||
@@ -50,6 +51,14 @@ in
|
||||
'';
|
||||
};
|
||||
|
||||
programs.ssh.knownHosts.clan-sshd-self-ed25519 = {
|
||||
hostNames = [
|
||||
"localhost"
|
||||
config.networking.hostName
|
||||
] ++ (lib.optional (config.networking.domain != null) config.networking.fqdn);
|
||||
publicKey = config.clan.core.vars.generators.openssh.files."ssh.id_ed25519.pub".value;
|
||||
};
|
||||
|
||||
clan.core.vars.generators.openssh-rsa = lib.mkIf config.clan.sshd.hostKeys.rsa.enable {
|
||||
files."ssh.id_rsa" = { };
|
||||
files."ssh.id_rsa.pub".secret = false;
|
||||
|
||||
@@ -3,7 +3,7 @@ let
|
||||
var = config.clan.core.vars.generators.state-version.files.version or { };
|
||||
in
|
||||
{
|
||||
system.stateVersion = lib.mkDefault var.value;
|
||||
system.stateVersion = lib.mkDefault (lib.removeSuffix "\n" var.value);
|
||||
|
||||
clan.core.vars.generators.state-version = {
|
||||
files.version = {
|
||||
|
||||
@@ -7,7 +7,8 @@
|
||||
let
|
||||
dir = config.clan.core.settings.directory;
|
||||
machineDir = dir + "/machines/";
|
||||
syncthingPublicKeyPath = machines: machineDir + machines + "/facts/syncthing.pub";
|
||||
machineVarDir = dir + "/vars/per-machine/";
|
||||
syncthingPublicKeyPath = machines: machineVarDir + machines + "/syncthing/id/value";
|
||||
machinesFileSet = builtins.readDir machineDir;
|
||||
machines = lib.mapAttrsToList (name: _: name) machinesFileSet;
|
||||
syncthingPublicKeysUnchecked = builtins.map (
|
||||
@@ -83,24 +84,26 @@ in
|
||||
configDir = "/var/lib/syncthing";
|
||||
group = "syncthing";
|
||||
|
||||
key = lib.mkDefault config.clan.core.facts.services.syncthing.secret."syncthing.key".path or null;
|
||||
cert = lib.mkDefault config.clan.core.facts.services.syncthing.secret."syncthing.cert".path or null;
|
||||
key = lib.mkDefault config.clan.core.vars.generators.syncthing.files.key.path or null;
|
||||
cert = lib.mkDefault config.clan.core.vars.generators.syncthing.files.cert.path or null;
|
||||
};
|
||||
|
||||
clan.core.facts.services.syncthing = {
|
||||
secret."syncthing.key" = { };
|
||||
secret."syncthing.cert" = { };
|
||||
public."syncthing.pub" = { };
|
||||
generator.path = [
|
||||
clan.core.vars.generators.syncthing = {
|
||||
files.key = { };
|
||||
files.cert = { };
|
||||
files.api = { };
|
||||
files.id.secret = false;
|
||||
runtimeInputs = [
|
||||
pkgs.coreutils
|
||||
pkgs.gnugrep
|
||||
pkgs.syncthing
|
||||
];
|
||||
generator.script = ''
|
||||
syncthing generate --config "$secrets"
|
||||
mv "$secrets"/key.pem "$secrets"/syncthing.key
|
||||
mv "$secrets"/cert.pem "$secrets"/syncthing.cert
|
||||
cat "$secrets"/config.xml | grep -oP '(?<=<device id=")[^"]+' | uniq > "$facts"/syncthing.pub
|
||||
script = ''
|
||||
syncthing generate --config $out
|
||||
mv $out/key.pem $out/key
|
||||
mv $out/cert.pem $out/cert
|
||||
cat $out/config.xml | grep -oP '(?<=<device id=")[^"]+' | uniq > $out/id
|
||||
cat $out/config.xml | grep -oP '<apikey>\K[^<]+' | uniq > $out/api
|
||||
'';
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
description = "Configures [Zerotier VPN](https://zerotier.com) secure and efficient networking within a Clan.."
|
||||
description = "Configures [Zerotier VPN](https://zerotier.com) secure and efficient networking within a Clan."
|
||||
features = [ "inventory" ]
|
||||
categories = [ "Network", "System" ]
|
||||
|
||||
|
||||
547
decisions/01-ClanModules.md
Normal file
547
decisions/01-ClanModules.md
Normal file
@@ -0,0 +1,547 @@
|
||||
# Clan service modules
|
||||
|
||||
Status: Accepted
|
||||
|
||||
## Context
|
||||
|
||||
To define a service in Clan, you need to define two things:
|
||||
|
||||
- `clanModule` - defined by module authors
|
||||
- `inventory` - defined by users
|
||||
|
||||
The `clanModule` is currently a plain NixOS module. It is conditionally imported into each machine depending on the `service` and `role`.
|
||||
|
||||
A `role` is a function of a machine within a service. For example in the `backup` service there are `client` and `server` roles.
|
||||
|
||||
The `inventory` contains the settings for the user/consumer of the module. It describes what `services` run on each machine and with which `roles`.
|
||||
|
||||
Additionally any `service` can be instantiated multiple times.
|
||||
|
||||
This ADR proposes that we change how to write a `clanModule`. The `inventory` should get a new attribute called `instances` that allow for configuration of these modules.
|
||||
|
||||
### Status Quo
|
||||
|
||||
In this example the user configures 2 instances of the `networking` service:
|
||||
|
||||
The *user* defines
|
||||
|
||||
```nix
|
||||
{
|
||||
inventory.services = {
|
||||
# anything inside an instance is instance specific
|
||||
networking."instance1" = {
|
||||
roles.client.tags = [ "all" ];
|
||||
machines.foo.config = { ... /* machine specific settings */ };
|
||||
|
||||
# this will not apply to `clients` outside of `instance1`
|
||||
roles.client.config = { ... /* client specific settings */ };
|
||||
};
|
||||
networking."instance2" = {
|
||||
roles.server.tags = [ "all" ];
|
||||
config = { ... /* applies to every machine that runs this instance */ };
|
||||
};
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
The *module author* defines:
|
||||
|
||||
```nix
|
||||
# networking/roles/client.nix
|
||||
{ config, ... }:
|
||||
let
|
||||
instances = config.clan.inventory.services.networking or { };
|
||||
|
||||
serviceConfig = config.clan.networking;
|
||||
in {
|
||||
## Set some nixos options
|
||||
}
|
||||
```
|
||||
|
||||
### Problems
|
||||
|
||||
Problems with the current way of writing clanModules:
|
||||
|
||||
1. No way to retrieve the config of a single service instance, together with its name.
|
||||
2. Directly exporting a single, anonymous nixosModule without any intermediary attribute layers doesn't leave room for exporting other inventory resources such as potentially `vars` or `homeManagerConfig`.
|
||||
3. Can't access multiple config instances individually.
|
||||
Example:
|
||||
```nix
|
||||
inventory = {
|
||||
services = {
|
||||
network.c-base = {
|
||||
instanceConfig.ips = {
|
||||
mors = "172.139.0.2";
|
||||
};
|
||||
};
|
||||
network.gg23 = {
|
||||
instanceConfig.ips = {
|
||||
mors = "10.23.0.2";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
```
|
||||
This doesn't work because all instance configs are applied to the same namespace. So this results in a conflict currently.
|
||||
Resolving this problem means that new inventory modules cannot be plain nixos modules anymore. If they are configured via `instances` / `instanceConfig` they cannot be configured without using the inventory. (There might be ways to inject instanceConfig but that requires knowledge of inventory internals)
|
||||
|
||||
4. Writing modules for multiple instances is cumbersome. Currently the clanModule author has to write one or multiple `fold` operations for potentially every nixos option to define how multiple service instances merge into every single one option. The new idea behind this adr is to pull the common fold function into the outer context provide it as a common helper. (See the example below. `perInstance` analog to the well known `perSystem` of flake-parts)
|
||||
|
||||
5. Each role has a different interface. We need to render that interface into json-schema which includes creating an unnecessary test machine currently. Defining the interface at a higher level (outside of any machine context) allows faster evaluation and an isolation by design from any machine.
|
||||
This allows rendering the UI (options tree) of a service by just knowing the service and the corresponding roles without creating a dummy machine.
|
||||
|
||||
6. The interface of defining config is wrong. It is possible to define config that applies to multiple machine at once. It is possible to define config that applies to
|
||||
a machine as a hole. But this is wrong behavior because the options exist at the role level. So config must also always exist at the role level.
|
||||
Currently we merge options and config together but that may produce conflicts. Those module system conflicts are very hard to foresee since they depend on what roles exist at runtime.
|
||||
|
||||
## Proposed Change
|
||||
|
||||
We will create a new module class which is defined by `_class = "clan.service"` ([documented here](https://nixos.org/manual/nixpkgs/stable/#module-system-lib-evalModules-param-class)).
|
||||
|
||||
Existing clan modules will still work by continuing to be plain NixOS modules. All new modules can set `_class = "clan.service";` to use the proposed features.
|
||||
|
||||
In short the change introduces a new module class that makes the currently necessary folding of `clan.service`s `instances` and `roles` a common operation. The module author can define the inner function of the fold operations which is called a `clan.service` module.
|
||||
|
||||
There are the following attributes of such a module:
|
||||
|
||||
### `roles.<roleName>.interface`
|
||||
|
||||
Each role can have a different interface for how to be configured.
|
||||
I.e.: A `client` role might have different options than a `server` role.
|
||||
|
||||
This attribute should be used to define `options`. (Not `config` !)
|
||||
|
||||
The end-user defines the corresponding `config`.
|
||||
|
||||
This submodule will be evaluated for each `instance role` combination and passed as argument into `perInstance`.
|
||||
|
||||
This submodules `options` will be evaluated to build the UI for that module dynamically.
|
||||
|
||||
### **Result attributes**
|
||||
|
||||
Some common result attributes are produced by modules of this proposal, those will be referenced later in this document but are commonly defined as:
|
||||
|
||||
- `nixosModule` A single nixos module. (`{config, ...}:{ environment.systemPackages = []; }`)
|
||||
- `services.<serviceName>` An attribute set of `_class = clan.service`. Which contain the same thing as this whole ADR proposes.
|
||||
- `vars` To be defined. Reserved for now.
|
||||
|
||||
### `roles.<roleName>.perInstance`
|
||||
|
||||
This acts like a function that maps over all `service instances` of a given `role`.
|
||||
It produces the previously defined **result attributes**.
|
||||
|
||||
I.e. This allows to produce multiple `nixosModules` one for every instance of the service.
|
||||
Hence making multiple `service instances` convenient by leveraging the module-system merge behavior.
|
||||
|
||||
### `perMachine`
|
||||
|
||||
This acts like a function that maps over all `machines` of a given `service`.
|
||||
It produces the previously defined **result attributes**.
|
||||
|
||||
I.e. this allows to produce exactly one `nixosModule` per `service`.
|
||||
Making it easy to set nixos-options only once if they have a one-to-one relation to a service being enabled.
|
||||
|
||||
Note: `lib.mkIf` can be used on i.e. `roleName` to make the scope more specific.
|
||||
|
||||
### `services.<serviceName>`
|
||||
|
||||
This allows to define nested services.
|
||||
i.e the *service* `backup` might define a nested *service* `ssh` which sets up an ssh connection.
|
||||
|
||||
This can be defined in `perMachine` and `perInstance`
|
||||
|
||||
- For Every `instance` a given `service` may add multiple nested `services`.
|
||||
- A given `service` may add a static set of nested `services`; Even if there are multiple instances of the same given service.
|
||||
|
||||
Q: Why is this not a top-level attribute?
|
||||
A: Because nested service definitions may also depend on a `role` which must be resolved depending on `machine` and `instance`. The top-level module doesn't know anything about machines. Keeping the service layer machine agnostic allows us to build the UI for a module without adding any machines. (One of the problems with the current system)
|
||||
|
||||
```
|
||||
zerotier/default.nix
|
||||
```
|
||||
```nix
|
||||
# Some example module
|
||||
{
|
||||
_class = "clan.service";
|
||||
|
||||
# Analog to flake-parts 'perSystem' only that it takes instance
|
||||
# The exact arguments will be specified and documented along with the actual implementation.
|
||||
roles.client.perInstance = {
|
||||
# attrs : settings of that instance
|
||||
settings,
|
||||
# string : name of the instance
|
||||
instanceName,
|
||||
# { name :: string , roles :: listOf string; }
|
||||
machine,
|
||||
# { {roleName} :: { machines :: listOf string; } }
|
||||
roles,
|
||||
...
|
||||
}:
|
||||
{
|
||||
# Return a nixos module for every instance.
|
||||
# The module author must be aware that this may return multiple modules (one for every instance) which are merged natively
|
||||
nixosModule = {
|
||||
config.debug."${instanceName}-client" = instanceConfig;
|
||||
};
|
||||
};
|
||||
# Function that is called once for every machine with the role "client"
|
||||
# Receives at least the following parameters:
|
||||
#
|
||||
# machine :: { name :: String, roles :: listOf string; }
|
||||
# Name of the machine
|
||||
#
|
||||
# instances :: { instanceName :: { roleName :: { machines :: [ string ]; }}}
|
||||
# Resolved roles
|
||||
# Same type as currently in `clan.inventory.services.<ServiceName>.<InstanceName>.roles`
|
||||
#
|
||||
# The exact arguments will be specified and documented along with the actual implementation.
|
||||
perMachine = {machine, instances, ... }: {
|
||||
nixosModule =
|
||||
{ lib, ... }:
|
||||
{
|
||||
# Some shared code should be put into a shared file
|
||||
# Which is then imported into all/some roles
|
||||
imports = [
|
||||
../shared.nix
|
||||
] ++
|
||||
(lib.optional (builtins.elem "client" machine.roles)
|
||||
{
|
||||
options.debug = lib.mkOption {
|
||||
type = lib.types.attrsOf lib.types.raw;
|
||||
};
|
||||
});
|
||||
};
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
## Inventory.instances
|
||||
|
||||
This document also proposes to add a new attribute to the inventory that allow for exclusive configuration of the new modules.
|
||||
This allows to better separate the new and the old way of writing and configuring modules. Keeping the new implementation more focussed and keeping existing technical debt out from the beginning.
|
||||
|
||||
The following thoughts went into this:
|
||||
|
||||
- Getting rid of `<serviceName>`: Using only the attribute name (plain string) is not sufficient for defining the source of the service module. Encoding meta information into it would also require some extensible format specification and parser.
|
||||
- removing instanceConfig and machineConfig: There is no such config. Service configuration must always be role specific, because the options are defined on the role.
|
||||
- renaming `config` to `settings` or similar. Since `config` is a module system internal name.
|
||||
- Tags and machines should be an attribute set to allow setting `settings` on that level instead.
|
||||
|
||||
```nix
|
||||
{
|
||||
inventory.instances = {
|
||||
"instance1" = {
|
||||
# Allows to define where the module should be imported from.
|
||||
module = {
|
||||
input = "clan-core";
|
||||
name = "borgbackup";
|
||||
};
|
||||
# settings that apply to all client machines
|
||||
roles.client.settings = {};
|
||||
# settings that apply to the client service of machine with name <machineName>
|
||||
# There might be a server service that takes different settings on the same machine!
|
||||
roles.client.machines.<machineName>.settings = {};
|
||||
# settings that apply to all client-instances with tag <tagName>
|
||||
roles.client.tags.<tagName>.settings = {};
|
||||
};
|
||||
"instance2" = {
|
||||
# ...
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
## Iteration note
|
||||
|
||||
We want to implement the system as described. Once we have sufficient data on real world use-cases and modules we might revisit this document along with the updated implementation.
|
||||
|
||||
|
||||
## Real world example
|
||||
|
||||
The following module demonstrates the idea in the example of *borgbackup*.
|
||||
|
||||
```nix
|
||||
{
|
||||
_class = "clan.service";
|
||||
|
||||
# Define the 'options' of 'settings' see argument of perInstance
|
||||
roles.server.interface =
|
||||
{ lib, ... }:
|
||||
{
|
||||
options = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "/var/lib/borgbackup";
|
||||
description = ''
|
||||
The directory where the borgbackup repositories are stored.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
roles.server.perInstance =
|
||||
{
|
||||
instanceName,
|
||||
settings,
|
||||
roles,
|
||||
...
|
||||
}:
|
||||
{
|
||||
nixosModule =
|
||||
{ config, lib, ... }:
|
||||
let
|
||||
dir = config.clan.core.settings.directory;
|
||||
machineDir = dir + "/vars/per-machine/";
|
||||
allClients = roles.client.machines;
|
||||
in
|
||||
{
|
||||
# services.borgbackup is a native nixos option
|
||||
config.services.borgbackup.repos =
|
||||
let
|
||||
borgbackupIpMachinePath = machine: machineDir + machine + "/borgbackup/borgbackup.ssh.pub/value";
|
||||
|
||||
machinesMaybeKey = builtins.map (
|
||||
machine:
|
||||
let
|
||||
fullPath = borgbackupIpMachinePath machine;
|
||||
in
|
||||
if builtins.pathExists fullPath then
|
||||
machine
|
||||
else
|
||||
lib.warn ''
|
||||
Machine ${machine} does not have a borgbackup key at ${fullPath},
|
||||
run `clan var generate ${machine}` to generate it.
|
||||
'' null
|
||||
) allClients;
|
||||
|
||||
machinesWithKey = lib.filter (x: x != null) machinesMaybeKey;
|
||||
|
||||
hosts = builtins.map (machine: {
|
||||
name = instanceName + machine;
|
||||
value = {
|
||||
path = "${settings.directory}/${machine}";
|
||||
authorizedKeys = [ (builtins.readFile (borgbackupIpMachinePath machine)) ];
|
||||
};
|
||||
}) machinesWithKey;
|
||||
in
|
||||
if (builtins.listToAttrs hosts) != [ ] then builtins.listToAttrs hosts else { };
|
||||
};
|
||||
};
|
||||
|
||||
roles.client.interface =
|
||||
{ lib, ... }:
|
||||
{
|
||||
# There might be a better interface now. This is just how clan borgbackup was configured in the 'old' way
|
||||
options.destinations = lib.mkOption {
|
||||
type = lib.types.attrsOf (
|
||||
lib.types.submodule (
|
||||
{ name, ... }:
|
||||
{
|
||||
options = {
|
||||
name = lib.mkOption {
|
||||
type = lib.types.strMatching "^[a-zA-Z0-9._-]+$";
|
||||
default = name;
|
||||
description = "the name of the backup job";
|
||||
};
|
||||
repo = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "the borgbackup repository to backup to";
|
||||
};
|
||||
rsh = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
default = null;
|
||||
defaultText = "ssh -i \${config.clan.core.vars.generators.borgbackup.files.\"borgbackup.ssh\".path} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null";
|
||||
description = "the rsh to use for the backup";
|
||||
};
|
||||
};
|
||||
}
|
||||
)
|
||||
);
|
||||
default = { };
|
||||
description = ''
|
||||
destinations where the machine should be backed up to
|
||||
'';
|
||||
};
|
||||
|
||||
options.exclude = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
example = [ "*.pyc" ];
|
||||
default = [ ];
|
||||
description = ''
|
||||
Directories/Files to exclude from the backup.
|
||||
Use * as a wildcard.
|
||||
'';
|
||||
};
|
||||
};
|
||||
roles.client.perInstance =
|
||||
{
|
||||
instanceName,
|
||||
roles,
|
||||
machine,
|
||||
settings,
|
||||
...
|
||||
}:
|
||||
{
|
||||
nixosModule =
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
allServers = roles.server.machines;
|
||||
|
||||
# machineName = config.clan.core.settings.machine.name;
|
||||
|
||||
# cfg = config.clan.borgbackup;
|
||||
preBackupScript = ''
|
||||
declare -A preCommandErrors
|
||||
|
||||
${lib.concatMapStringsSep "\n" (
|
||||
state:
|
||||
lib.optionalString (state.preBackupCommand != null) ''
|
||||
echo "Running pre-backup command for ${state.name}"
|
||||
if ! /run/current-system/sw/bin/${state.preBackupCommand}; then
|
||||
preCommandErrors["${state.name}"]=1
|
||||
fi
|
||||
''
|
||||
) (lib.attrValues config.clan.core.state)}
|
||||
|
||||
if [[ ''${#preCommandErrors[@]} -gt 0 ]]; then
|
||||
echo "pre-backup commands failed for the following services:"
|
||||
for state in "''${!preCommandErrors[@]}"; do
|
||||
echo " $state"
|
||||
done
|
||||
exit 1
|
||||
fi
|
||||
'';
|
||||
|
||||
destinations =
|
||||
let
|
||||
destList = builtins.map (serverName: {
|
||||
name = "${instanceName}-${serverName}";
|
||||
value = {
|
||||
repo = "borg@${serverName}:/var/lib/borgbackup/${machine.name}";
|
||||
rsh = "ssh -i ${
|
||||
config.clan.core.vars.generators."borgbackup-${instanceName}".files."borgbackup.ssh".path
|
||||
} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=Yes";
|
||||
} // settings.destinations.${serverName};
|
||||
}) allServers;
|
||||
in
|
||||
(builtins.listToAttrs destList);
|
||||
in
|
||||
{
|
||||
config = {
|
||||
# Derived from the destinations
|
||||
systemd.services = lib.mapAttrs' (
|
||||
_: dest:
|
||||
lib.nameValuePair "borgbackup-job-${instanceName}-${dest.name}" {
|
||||
# since borgbackup mounts the system read-only, we need to run in a ExecStartPre script, so we can generate additional files.
|
||||
serviceConfig.ExecStartPre = [
|
||||
''+${pkgs.writeShellScript "borgbackup-job-${dest.name}-pre-backup-commands" preBackupScript}''
|
||||
];
|
||||
}
|
||||
) destinations;
|
||||
|
||||
services.borgbackup.jobs = lib.mapAttrs (_destinationName: dest: {
|
||||
paths = lib.unique (
|
||||
lib.flatten (map (state: state.folders) (lib.attrValues config.clan.core.state))
|
||||
);
|
||||
exclude = settings.exclude;
|
||||
repo = dest.repo;
|
||||
environment.BORG_RSH = dest.rsh;
|
||||
compression = "auto,zstd";
|
||||
startAt = "*-*-* 01:00:00";
|
||||
persistentTimer = true;
|
||||
|
||||
encryption = {
|
||||
mode = "repokey";
|
||||
passCommand = "cat ${config.clan.core.vars.generators."borgbackup-${instanceName}".files."borgbackup.repokey".path}";
|
||||
};
|
||||
|
||||
prune.keep = {
|
||||
within = "1d"; # Keep all archives from the last day
|
||||
daily = 7;
|
||||
weekly = 4;
|
||||
monthly = 0;
|
||||
};
|
||||
}) destinations;
|
||||
|
||||
environment.systemPackages = [
|
||||
(pkgs.writeShellApplication {
|
||||
name = "borgbackup-create";
|
||||
runtimeInputs = [ config.systemd.package ];
|
||||
text = ''
|
||||
${lib.concatMapStringsSep "\n" (dest: ''
|
||||
systemctl start borgbackup-job-${dest.name}
|
||||
'') (lib.attrValues destinations)}
|
||||
'';
|
||||
})
|
||||
(pkgs.writeShellApplication {
|
||||
name = "borgbackup-list";
|
||||
runtimeInputs = [ pkgs.jq ];
|
||||
text = ''
|
||||
(${
|
||||
lib.concatMapStringsSep "\n" (
|
||||
dest:
|
||||
# we need yes here to skip the changed url verification
|
||||
''echo y | /run/current-system/sw/bin/borg-job-${dest.name} list --json | jq '[.archives[] | {"name": ("${dest.name}::${dest.repo}::" + .name)}]' ''
|
||||
) (lib.attrValues destinations)
|
||||
}) | jq -s 'add // []'
|
||||
'';
|
||||
})
|
||||
(pkgs.writeShellApplication {
|
||||
name = "borgbackup-restore";
|
||||
runtimeInputs = [ pkgs.gawk ];
|
||||
text = ''
|
||||
cd /
|
||||
IFS=':' read -ra FOLDER <<< "''${FOLDERS-}"
|
||||
job_name=$(echo "$NAME" | awk -F'::' '{print $1}')
|
||||
backup_name=''${NAME#"$job_name"::}
|
||||
if [[ ! -x /run/current-system/sw/bin/borg-job-"$job_name" ]]; then
|
||||
echo "borg-job-$job_name not found: Backup name is invalid" >&2
|
||||
exit 1
|
||||
fi
|
||||
echo y | /run/current-system/sw/bin/borg-job-"$job_name" extract "$backup_name" "''${FOLDER[@]}"
|
||||
'';
|
||||
})
|
||||
];
|
||||
# every borgbackup instance adds its own vars
|
||||
clan.core.vars.generators."borgbackup-${instanceName}" = {
|
||||
files."borgbackup.ssh.pub".secret = false;
|
||||
files."borgbackup.ssh" = { };
|
||||
files."borgbackup.repokey" = { };
|
||||
|
||||
migrateFact = "borgbackup";
|
||||
runtimeInputs = [
|
||||
pkgs.coreutils
|
||||
pkgs.openssh
|
||||
pkgs.xkcdpass
|
||||
];
|
||||
script = ''
|
||||
ssh-keygen -t ed25519 -N "" -f $out/borgbackup.ssh
|
||||
xkcdpass -n 4 -d - > $out/borgbackup.repokey
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
perMachine = {
|
||||
nixosModule =
|
||||
{ ... }:
|
||||
{
|
||||
clan.core.backups.providers.borgbackup = {
|
||||
list = "borgbackup-list";
|
||||
create = "borgbackup-create";
|
||||
restore = "borgbackup-restore";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
## Prior-art
|
||||
|
||||
- https://github.com/NixOS/nixops
|
||||
- https://github.com/infinisil/nixus
|
||||
@@ -21,14 +21,14 @@ Let's get your development environment up and running:
|
||||
curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install
|
||||
```
|
||||
|
||||
2. **Install direnv**:
|
||||
1. **Install direnv**:
|
||||
|
||||
- To automatically setup a devshell on entering the directory
|
||||
```bash
|
||||
nix profile install nixpkgs#nix-direnv-flakes nixpkgs#direnv
|
||||
```
|
||||
|
||||
3. **Add direnv to your shell**:
|
||||
1. **Add direnv to your shell**:
|
||||
|
||||
- Direnv needs to [hook into your shell](https://direnv.net/docs/hook.html) to work.
|
||||
You can do this by executing following command. The example below will setup direnv for `zsh` and `bash`
|
||||
@@ -37,10 +37,10 @@ Let's get your development environment up and running:
|
||||
echo 'eval "$(direnv hook zsh)"' >> ~/.zshrc && echo 'eval "$(direnv hook bash)"' >> ~/.bashrc && eval "$SHELL"
|
||||
```
|
||||
|
||||
3. **Allow the devshell**
|
||||
1. **Allow the devshell**
|
||||
- Go to `clan-core/pkgs/clan-cli` and do a `direnv allow` to setup the necessary development environment to execute the `clan` command
|
||||
|
||||
4. **Create a Gitea Account**:
|
||||
1. **Create a Gitea Account**:
|
||||
- Register an account on https://git.clan.lol
|
||||
- Fork the [clan-core](https://git.clan.lol/clan/clan-core) repository
|
||||
- Clone the repository and navigate to it
|
||||
@@ -48,30 +48,7 @@ Let's get your development environment up and running:
|
||||
```bash
|
||||
git remote add upstream gitea@git.clan.lol:clan/clan-core.git
|
||||
```
|
||||
5. **Create an access token**:
|
||||
- Log in to Gitea.
|
||||
- Go to your account settings.
|
||||
- Navigate to the Applications section.
|
||||
- Click Generate New Token.
|
||||
- Name your token and select all available scopes.
|
||||
- Generate the token and copy it for later use.
|
||||
- Your access token is now ready to use with all permissions.
|
||||
|
||||
5. **Register Your Gitea Account Locally**:
|
||||
|
||||
- Execute the following command to add your Gitea account locally:
|
||||
```bash
|
||||
tea login add
|
||||
```
|
||||
- Fill out the prompt as follows:
|
||||
- URL of Gitea instance: `https://git.clan.lol`
|
||||
- Name of new Login [git.clan.lol]:
|
||||
- Do you have an access token? Yes
|
||||
- Token: <yourtoken>
|
||||
- Set Optional settings: No
|
||||
|
||||
|
||||
6. **Allow .envrc**:
|
||||
1. **Allow .envrc**:
|
||||
|
||||
- When you enter the directory, you'll receive an error message like this:
|
||||
```bash
|
||||
@@ -79,7 +56,7 @@ Let's get your development environment up and running:
|
||||
```
|
||||
- Execute `direnv allow` to automatically execute the shell script `.envrc` when entering the directory.
|
||||
|
||||
7. **(Optional) Install Git Hooks**:
|
||||
1. **(Optional) Install Git Hooks**:
|
||||
- To syntax check your code you can run:
|
||||
```bash
|
||||
nix fmt
|
||||
@@ -89,15 +66,9 @@ Let's get your development environment up and running:
|
||||
./scripts/pre-commit
|
||||
```
|
||||
|
||||
8. **Open a Pull Request**:
|
||||
- To automatically open up a pull request you can use our tool called:
|
||||
```
|
||||
merge-after-ci --reviewers Mic92 Lassulus Qubasa
|
||||
```
|
||||
|
||||
## Related Projects
|
||||
|
||||
- **Data Mesher**: [dm](https://git.clan.lol/clan/dm)
|
||||
- **Data Mesher**: [data-mesher](https://git.clan.lol/clan/data-mesher)
|
||||
- **Nixos Facter**: [nixos-facter](https://github.com/nix-community/nixos-facter)
|
||||
- **Nixos Anywhere**: [nixos-anywhere](https://github.com/nix-community/nixos-anywhere)
|
||||
- **Disko**: [disko](https://github.com/nix-community/disko)
|
||||
@@ -128,8 +99,12 @@ run(
|
||||
),
|
||||
RunOpts(log=Log.BOTH, prefix=machine.name, needs_user_terminal=True),
|
||||
)
|
||||
|
||||
```
|
||||
|
||||
The <path_to_local_src> doesn't need to be a local path, it can be any valid [flakeref](https://nix.dev/manual/nix/2.26/command-ref/new-cli/nix3-flake.html#flake-references).
|
||||
And thus can point to test already opened PRs for example.
|
||||
|
||||
# Standards
|
||||
|
||||
- Every new module name should be in kebab-case.
|
||||
|
||||
@@ -48,6 +48,7 @@ nav:
|
||||
- Add Machines: getting-started/configure.md
|
||||
- Secrets & Facts: getting-started/secrets.md
|
||||
- Deploy Machine: getting-started/deploy.md
|
||||
- Continuous Integration: getting-started/check.md
|
||||
- Guides:
|
||||
- Disk Encryption: getting-started/disk-encryption.md
|
||||
- Mesh VPN: getting-started/mesh-vpn.md
|
||||
@@ -61,8 +62,10 @@ nav:
|
||||
- Authoring:
|
||||
- Modules: clanmodules/index.md
|
||||
- Disk Templates: manual/disk-templates.md
|
||||
- Contribute: manual/contribute.md
|
||||
- Debugging: manual/debugging.md
|
||||
- Contributing:
|
||||
- Contribute: contributing/contribute.md
|
||||
- Debugging: contributing/debugging.md
|
||||
- Testing: contributing/testing.md
|
||||
- Repo Layout: manual/repo-layout.md
|
||||
- Migrate existing Flakes: manual/migration-guide.md
|
||||
# - Concepts:
|
||||
@@ -107,6 +110,7 @@ nav:
|
||||
- reference/clanModules/thelounge.md
|
||||
- reference/clanModules/trusted-nix-caches.md
|
||||
- reference/clanModules/user-password.md
|
||||
- reference/clanModules/auto-upgrade.md
|
||||
- reference/clanModules/vaultwarden.md
|
||||
- reference/clanModules/xfce.md
|
||||
- reference/clanModules/zerotier-static-peers.md
|
||||
|
||||
@@ -585,7 +585,7 @@ Each attribute is documented below
|
||||
|
||||
```nix
|
||||
buildClan {
|
||||
directory = self;
|
||||
self = self;
|
||||
machines = {
|
||||
jon = { };
|
||||
sara = { };
|
||||
|
||||
@@ -51,6 +51,20 @@ wintux
|
||||
|
||||
If you're using VSCode, it has a handy feature that makes paths to source code files clickable in the integrated terminal. Combined with the previously mentioned techniques, this allows you to open a Clan in VSCode, execute a command like `clan machines list --debug`, and receive a printed path to the code that initiates the subprocess. With the `Ctrl` key (or `Cmd` on macOS) and a mouse click, you can jump directly to the corresponding line in the code file and add a `breakpoint()` function to it, to inspect the internal state.
|
||||
|
||||
|
||||
|
||||
## Finding Print Messages
|
||||
|
||||
To identify where a specific print message comes from, you can enable a helpful feature. Simply set the environment variable `export TRACE_PRINT=1`. When you run commands with `--debug` mode, each print message will include information about its source location.
|
||||
|
||||
If you need more details, you can expand the stack trace information that appears with each print by setting the environment variable `export TRACE_DEPTH=3`.
|
||||
|
||||
## Analyzing Performance
|
||||
|
||||
To understand what's causing slow performance, set the environment variable `export CLAN_CLI_PERF=1`. When you complete a clan command, you'll see a summary of various performance metrics, helping you identify what's taking up time.
|
||||
|
||||
|
||||
|
||||
## See all possible packages and tests
|
||||
|
||||
To quickly show all possible packages and tests execute:
|
||||
316
docs/site/contributing/testing.md
Normal file
316
docs/site/contributing/testing.md
Normal file
@@ -0,0 +1,316 @@
|
||||
# Testing your contributions
|
||||
|
||||
Each feature added to clan should be tested extensively via automated tests.
|
||||
|
||||
This document covers different methods of automated testing, including creating, running and debugging such tests.
|
||||
|
||||
In order to test the behavior of clan, different testing frameworks are used depending on the concern:
|
||||
|
||||
- NixOS VM tests: for high level integration
|
||||
- NixOS container tests: for high level integration
|
||||
- Python tests via pytest: for unit tests and integration tests
|
||||
- Nix eval tests: for nix functions, libraries, modules, etc.
|
||||
|
||||
## NixOS VM Tests
|
||||
|
||||
The [NixOS VM Testing Framework](https://nixos.org/manual/nixos/stable/index.html#sec-nixos-tests) is used to create high level integration tests, by running one or more VMs generated from a specified config. Commands can be executed on the booted machine(s) to verify a deployment of a service works as expected. All machines within a test are connected by a virtual network. Internet access is not available.
|
||||
|
||||
### When to use VM tests
|
||||
|
||||
- testing that a service defined through a clan module works as expected after deployment
|
||||
- testing clan-cli subcommands which require accessing a remote machine
|
||||
|
||||
### When not to use VM tests
|
||||
|
||||
NixOS VM Tests are slow and expensive. They should only be used for testing high level integration of components.
|
||||
VM tests should be avoided wherever it is possible to implement a cheaper unit test instead.
|
||||
|
||||
- testing detailed behavior of a certain clan-cli command -> use unit testing via pytest instead
|
||||
- regression testing -> add a unit test
|
||||
|
||||
### Finding examples for VM tests
|
||||
|
||||
Existing nixos vm tests in clan-core can be found by using ripgrep:
|
||||
```shellSession
|
||||
rg "import.*/lib/test-base.nix"
|
||||
```
|
||||
|
||||
### Locating definitions of failing VM tests
|
||||
|
||||
All nixos vm tests in clan are exported as individual flake outputs under `checks.x86_64-linux.{test-attr-name}`.
|
||||
If a test fails in CI:
|
||||
|
||||
- look for the job name of the test near the top if the CI Job page, like, for example `gitea:clan/clan-core#checks.x86_64-linux.borgbackup/1242`
|
||||
- in this case `checks.x86_64-linux.borgbackup` is the attribute path
|
||||
- note the last element of that attribute path, in this case `borgbackup`
|
||||
- search for the attribute name inside the `/checks` directory via ripgrep
|
||||
|
||||
example: locating the vm test named `borgbackup`:
|
||||
|
||||
```shellSession
|
||||
$ rg "borgbackup =" ./checks
|
||||
./checks/flake-module.nix
|
||||
41: borgbackup = import ./borgbackup nixosTestArgs;
|
||||
```
|
||||
|
||||
-> the location of that test is `/checks/flake-module.nix` line `41`.
|
||||
|
||||
### Adding vm tests
|
||||
|
||||
Create a nixos test module under `/checks/{name}/default.nix` and import it in `/checks/flake-module.nix`.
|
||||
|
||||
|
||||
### Running VM tests
|
||||
|
||||
```shellSession
|
||||
nix build .#checks.x86_64-linux.{test-attr-name}
|
||||
```
|
||||
(replace `{test-attr-name}` with the name of the test)
|
||||
|
||||
### Debugging VM tests
|
||||
|
||||
The following techniques can be used to debug a VM test:
|
||||
|
||||
#### Print Statements
|
||||
|
||||
Locate the definition (see above) and add print statements, like, for example `print(client.succeed("systemctl --failed"))`, then re-run the test via `nix build` (see above)
|
||||
|
||||
#### Interactive Shell
|
||||
|
||||
- Execute the vm test outside the nix Sandbox via the following command:
|
||||
`nix run .#checks.x86_64-linux.{test-attr-name}.driver -- --interactive`
|
||||
- Then run the commands in the machines manually, like for example:
|
||||
```python3
|
||||
start_all()
|
||||
machine1.succeed("echo hello")
|
||||
```
|
||||
|
||||
#### Breakpoints
|
||||
|
||||
To get an interactive shell at a specific line in the VM test script, add a `breakpoint()` call before the line to debug, then run the test outside of the sandbox via:
|
||||
`nix run .#checks.x86_64-linux.{test-attr-name}.driver`
|
||||
|
||||
|
||||
## NixOS Container Tests
|
||||
|
||||
Those are very similar to NixOS VM tests, as in they run virtualized nixos machines, but instead of using VMs, they use containers which are much cheaper to launch.
|
||||
As of now the container test driver is a downstream development in clan-core.
|
||||
Basically everything stated under the NixOS VM tests sections applies here, except some limitations.
|
||||
|
||||
Limitations:
|
||||
|
||||
- does not yet support networking
|
||||
- supports only one machine as of now
|
||||
|
||||
|
||||
### Where to find examples for NixOS container tests
|
||||
|
||||
Existing nixos container tests in clan-core can be found by using ripgrep:
|
||||
```shellSession
|
||||
rg "import.*/lib/container-test.nix"
|
||||
```
|
||||
|
||||
|
||||
## Python tests via pytest
|
||||
|
||||
Since the clan cli is written in python, the `pytest` framework is used to define unit tests and integration tests via python
|
||||
|
||||
Due to superior efficiency,
|
||||
|
||||
### When to use python tests
|
||||
|
||||
- writing unit tests for python functions and modules, or bugfixes of such
|
||||
- all integrations tests that do not require building or running a nixos machine
|
||||
- impure integrations tests that require internet access (very rare, try to avoid)
|
||||
|
||||
|
||||
### When not to use python tests
|
||||
|
||||
- integrations tests that require building or running a nixos machine (use NixOS VM or container tests instead)
|
||||
- testing behavior of a nix function or library (use nix eval tests instead)
|
||||
|
||||
### Finding examples of python tests
|
||||
|
||||
Existing python tests in clan-core can be found by using ripgrep:
|
||||
```shellSession
|
||||
rg "import pytest"
|
||||
```
|
||||
|
||||
### Locating definitions of failing python tests
|
||||
|
||||
If any python test fails in the CI pipeline, an error message like this can be found at the end of the log:
|
||||
```
|
||||
...
|
||||
FAILED tests/test_machines_cli.py::test_machine_delete - clan_cli.errors.ClanError: Template 'new-machine' not in 'inputs.clan-core
|
||||
...
|
||||
```
|
||||
|
||||
In this case the test is defined in the file `/tests/test_machines_cli.py` via the test function `test_machine_delete`.
|
||||
|
||||
### Adding python tests
|
||||
|
||||
If a specific python module is tested, the test should be located near the tested module in a subdirectory called `./tests`
|
||||
If the test is not clearly related to a specific module, put it in the top-level `./tests` directory of the tested python package. For `clan-cli` this would be `/pkgs/clan-cli/clan_cli/tests`.
|
||||
All filenames must be prefixed with `test_` and test functions prefixed with `test_` for pytest to discover them.
|
||||
|
||||
### Running python tests
|
||||
|
||||
#### Running all python tests
|
||||
|
||||
To run all python tests which are executed in the CI pipeline locally, use this `nix build` command
|
||||
|
||||
```shellSession
|
||||
nix build .#checks.x86_64-linux.clan-pytest-{with,without}-core
|
||||
```
|
||||
|
||||
#### Running a specific python test
|
||||
|
||||
To run a specific python test outside the nix sandbox
|
||||
|
||||
1. Enter the development environment of the python package, by either:
|
||||
- Having direnv enabled and entering the directory of the package (eg. `/pkgs/clan-cli`)
|
||||
- Or using the command `select-shell {package}` in the top-level dev shell of clan-core, (eg. `switch-shell clan-cli`)
|
||||
2. Execute the test via pytest using issuing
|
||||
`pytest ./path/to/test_file.py:test_function_name -s -n0`
|
||||
|
||||
The flags `-sn0` are useful to forwards all stdout/stderr output to the terminal and be able to debug interactively via `breakpoint()`.
|
||||
|
||||
|
||||
### Debugging python tests
|
||||
|
||||
To debug a specific python test, find its definition (see above) and make sure to enter the correct dev environment for that python package.
|
||||
|
||||
Modify the test and add `breakpoint()` statements to it.
|
||||
|
||||
Execute the test using the flags `-sn0` in order to get an interactive shell at the breakpoint:
|
||||
|
||||
```shelSession
|
||||
pytest ./path/to/test_file.py:test_function_name -sn0
|
||||
```
|
||||
|
||||
## Nix Eval Tests
|
||||
|
||||
### When to use nix eval tests
|
||||
|
||||
Nix eval tests are good for testing any nix logic, including
|
||||
|
||||
- nix functions
|
||||
- nix libraries
|
||||
- modules for the nixos module system
|
||||
|
||||
When not to use
|
||||
|
||||
- tests that require building nix derivations (except some very cheap ones)
|
||||
- tests that require running programs written in other languages
|
||||
- tests that require building or running nixos machines
|
||||
|
||||
### Finding examples of nix eval tests
|
||||
|
||||
Existing nix eval tests can be found via this ripgrep command:
|
||||
|
||||
```shellSession
|
||||
rg "nix-unit --eval-store"
|
||||
```
|
||||
|
||||
### Locating definitions of failing nix eval tests
|
||||
|
||||
Failing nix eval tests look like this:
|
||||
|
||||
```shellSession
|
||||
> ✅ test_attrsOf_attrsOf_submodule
|
||||
> ✅ test_attrsOf_submodule
|
||||
> ❌ test_default
|
||||
> /build/nix-8-2/expected.nix --- Nix
|
||||
> 1 { foo = { bar = { __prio = 1500; }; } 1 { foo = { bar = { __prio = 1501; }; }
|
||||
> . ; } . ; }
|
||||
>
|
||||
>
|
||||
> ✅ test_no_default
|
||||
> ✅ test_submodule
|
||||
> ✅ test_submoduleWith
|
||||
> ✅ test_submodule_with_merging
|
||||
>
|
||||
> 😢 6/7 successful
|
||||
> error: Tests failed
|
||||
```
|
||||
|
||||
To locate the definition, find the flake attribute name of the failing test near the top of the CI Job page, like for example `gitea:clan/clan-core#checks.x86_64-linux.lib-values-eval/1242`.
|
||||
|
||||
In this case `lib-values-eval` is the attribute we are looking for.
|
||||
|
||||
Find the attribute via ripgrep:
|
||||
|
||||
```shellSession
|
||||
$ rg "lib-values-eval ="
|
||||
lib/values/flake-module.nix
|
||||
21: lib-values-eval = pkgs.runCommand "tests" { nativeBuildInputs = [ pkgs.nix-unit ]; } ''
|
||||
grmpf@grmpf-nix ~/p/c/clan-core (test-docs)>
|
||||
```
|
||||
|
||||
In this case the test is defined in the file `lib/values/flake-module.nix` line 21
|
||||
|
||||
### Adding nix eval tests
|
||||
|
||||
In clan core, the following pattern is usually followed:
|
||||
|
||||
- tests are put in a `test.nix` file
|
||||
- a CI Job is exposed via a `flake-module.nix`
|
||||
- that `flake-module.nix` is imported via the `flake.nix` at the root of the project
|
||||
|
||||
For example see `/lib/values/{test.nix,flake-module.nix}`.
|
||||
|
||||
### Running nix eval tests
|
||||
|
||||
Since all nix eval tests are exposed via the flake outputs, they can be ran via `nix build`:
|
||||
|
||||
```shellSession
|
||||
nix build .#checks.x86_64-linux.{test-attr-name}
|
||||
```
|
||||
|
||||
For quicker iteration times, instead of `nix build` use the `nix-unit` command available in the dev environment.
|
||||
Example:
|
||||
|
||||
```shellSession
|
||||
nix-unit --flake .#legacyPackages.x86_64-linux.{test-attr-name}
|
||||
```
|
||||
|
||||
### Debugging nix eval tests
|
||||
|
||||
Follow the instructions above to find the definition of the test, then use one of the following techniques:
|
||||
|
||||
#### Print debugging
|
||||
|
||||
Add `lib.trace` or `lib.traceVal` statements in order to print some variables during evaluation
|
||||
|
||||
#### Nix repl
|
||||
|
||||
Use `nix repl` to evaluate to inspec the test.
|
||||
|
||||
Each test consists opf an `expr` (expression) and an `expected` field. `nix-unit` simply checks if `expr == expected` and prints the diff if that's not the case.
|
||||
|
||||
`nix repl` can be used to inspect `expr` manually, or any other variables that you choose to expose.
|
||||
|
||||
Example:
|
||||
|
||||
```shellSession
|
||||
$ nix repl
|
||||
Nix 2.25.5
|
||||
Type :? for help.
|
||||
nix-repl> tests = import ./lib/values/test.nix {}
|
||||
|
||||
nix-repl> tests
|
||||
{
|
||||
test_attrsOf_attrsOf_submodule = { ... };
|
||||
test_attrsOf_submodule = { ... };
|
||||
test_default = { ... };
|
||||
test_no_default = { ... };
|
||||
test_submodule = { ... };
|
||||
test_submoduleWith = { ... };
|
||||
test_submodule_with_merging = { ... };
|
||||
}
|
||||
|
||||
nix-repl> tests.test_default.expr
|
||||
{
|
||||
foo = { ... };
|
||||
}
|
||||
```
|
||||
@@ -143,3 +143,25 @@ Ensure the path to the public key is correct.
|
||||
```bash
|
||||
clan backups create mymachine
|
||||
```
|
||||
|
||||
- **Restoring Backups:** To restore a backup that has been listed by the list command (NAME):
|
||||
|
||||
```bash
|
||||
clan backups restore [MACHINE] [PROVIDER] [NAME]
|
||||
|
||||
```
|
||||
|
||||
Example (Restoring a machine called `client` with the backup provider `borgbackup`):
|
||||
|
||||
```bash
|
||||
clan backups restore client borgbackup [NAME]
|
||||
|
||||
```
|
||||
|
||||
The `backups` command is service aware and allows optional specification of the `--service` flag.
|
||||
|
||||
To only restore the service called `zerotier` on a machine called `controller` through the backup provider `borgbackup` use the following command:
|
||||
|
||||
```bash
|
||||
clan backups restore client borgbackup [NAME] --service zerotier
|
||||
```
|
||||
|
||||
28
docs/site/getting-started/check.md
Normal file
28
docs/site/getting-started/check.md
Normal file
@@ -0,0 +1,28 @@
|
||||
### Generate Facts and Vars
|
||||
|
||||
Typically, this step is handled automatically when a machine is deployed. However, to enable the use of `nix flake check` with your configuration, it must be completed manually beforehand.
|
||||
|
||||
Currently, generating all the necessary facts requires two separate commands. This is due to the coexistence of two parallel secret management solutions:
|
||||
the newer, recommended version (`clan vars`) and the older version (`clan facts`) that we are slowly phasing out.
|
||||
|
||||
To generate both facts and vars, execute the following commands:
|
||||
|
||||
```sh
|
||||
clan facts generate && clan vars generate
|
||||
```
|
||||
|
||||
|
||||
### Check Configuration
|
||||
|
||||
Validate your configuration by running:
|
||||
|
||||
```bash
|
||||
nix flake check
|
||||
```
|
||||
|
||||
This command helps ensure that your system configuration is correct and free from errors.
|
||||
|
||||
!!! Tip
|
||||
|
||||
You can integrate this step into your [Continuous Integration](https://en.wikipedia.org/wiki/Continuous_integration) workflow to ensure that only valid Nix configurations are merged into your codebase.
|
||||
|
||||
@@ -3,7 +3,6 @@ Managing machine configurations can be done in the following ways:
|
||||
|
||||
- writing `nix` expressions in a `flake.nix` file,
|
||||
- placing `autoincluded` files into your machine directory,
|
||||
- configuring everything in a simple UI (upcoming).
|
||||
|
||||
Clan currently offers the following methods to configure machines:
|
||||
|
||||
@@ -79,9 +78,14 @@ Adding or configuring a new machine requires two simple steps:
|
||||
└─nvme0n1p3 nvme-eui.e8238fa6bf530001001b448b4aec2929-part3 swap 16.8G
|
||||
```
|
||||
|
||||
1. Edit the following fields inside the `./machines/jon/configuration.nix` and/or `./machines/sara/configuration.nix`
|
||||
!!! Warning
|
||||
Make sure to copy the `ID-LINK` from toplevel disk device like `nvme0n1` or `sda` instead of `nvme0n1p1` or `sda1`
|
||||
|
||||
```nix title="./machines/<machine>/configuration.nix" hl_lines="13 18 23 27"
|
||||
|
||||
2. Edit the following fields inside the `./machines/jon/configuration.nix` and/or `./machines/sara/configuration.nix`
|
||||
|
||||
<!-- Note: Use "jon" instead of "<machine>" as "<" is not supported in title tag -->
|
||||
```nix title="./machines/jon/configuration.nix" hl_lines="13 18 22 26"
|
||||
{
|
||||
imports = [
|
||||
./hardware-configuration.nix
|
||||
@@ -94,16 +98,15 @@ Adding or configuring a new machine requires two simple steps:
|
||||
];
|
||||
|
||||
# Put your username here for login
|
||||
users.users.user.username = "__YOUR_USERNAME__";
|
||||
users.users.user.name = "__YOUR_USERNAME__";
|
||||
|
||||
# Set this for clan commands use ssh i.e. `clan machines update`
|
||||
# Set this for clan commands that use ssh
|
||||
# If you change the hostname, you need to update this line to root@<new-hostname>
|
||||
# This only works however if you have avahi running on your admin machine else use IP
|
||||
clan.core.networking.targetHost = "root@__IP__";
|
||||
|
||||
# You can get your disk id by running the following command on the installer:
|
||||
# Replace <IP> with the IP of the installer printed on the screen or by running the `ip addr` command.
|
||||
# ssh root@<IP> lsblk --output NAME,ID-LINK,FSTYPE,SIZE,MOUNTPOINT
|
||||
|
||||
# Replace this __CHANGE_ME__ with the result of the lsblk command from step 1.
|
||||
disko.devices.disk.main.device = "/dev/disk/by-id/__CHANGE_ME__";
|
||||
|
||||
# IMPORTANT! Add your SSH key here
|
||||
@@ -114,80 +117,32 @@ Adding or configuring a new machine requires two simple steps:
|
||||
}
|
||||
```
|
||||
|
||||
You can also create additional machines using the `clan machines create` command:
|
||||
|
||||
```
|
||||
$ clan machines create --help
|
||||
usage: clan [-h] [SUBCOMMAND] machines create [-h] [--tags TAGS [TAGS ...]] [--template-name TEMPLATE_NAME]
|
||||
[--target-host TARGET_HOST] [--debug] [--option name value] [--flake PATH]
|
||||
machine_name
|
||||
|
||||
positional arguments:
|
||||
machine_name The name of the machine to create
|
||||
|
||||
options:
|
||||
-h, --help show this help message and exit
|
||||
--tags TAGS [TAGS ...]
|
||||
Tags to associate with the machine. Can be used to assign multiple machines to services.
|
||||
--template-name TEMPLATE_NAME
|
||||
The name of the template machine to import
|
||||
--target-host TARGET_HOST
|
||||
Address of the machine to install and update, in the format of user@host:1234
|
||||
--debug Enable debug logging
|
||||
--option name value Nix option to set
|
||||
--flake PATH path to the flake where the clan resides in, can be a remote flake or local, can be set through
|
||||
the [CLAN_DIR] environment variable
|
||||
```
|
||||
|
||||
|
||||
!!! Info "Replace `__YOUR_USERNAME__` with the ip of your machine, if you use avahi you can also use your hostname"
|
||||
!!! Info "Replace `__IP__` with the ip of your machine, if you use avahi you can also use your hostname"
|
||||
!!! Info "Replace `__CHANGE_ME__` with the appropriate identifier, such as `nvme-eui.e8238fa6bf530001001b448b4aec2929`"
|
||||
!!! Info "Replace `__CHANGE_ME__` with the appropriate `ID-LINK` identifier, such as `nvme-eui.e8238fa6bf530001001b448b4aec2929`"
|
||||
!!! Info "Replace `__YOUR_SSH_KEY__` with your personal key, like `ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILoMI0NC5eT9pHlQExrvR5ASV3iW9+BXwhfchq0smXUJ jon@jon-desktop`"
|
||||
|
||||
These steps will allow you to update your machine later.
|
||||
|
||||
### Step 2: Detect Drivers
|
||||
You can also create additional machines using the cli:
|
||||
|
||||
Generate the `hardware-configuration.nix` file for your machine by executing the following command:
|
||||
```
|
||||
$ clan machines create <machinename>
|
||||
```
|
||||
|
||||
```bash
|
||||
clan machines update-hardware-config [MACHINE_NAME] [HOSTNAME]
|
||||
```
|
||||
|
||||
replace `[MACHINE_NAME]` with the name of the machine i.e. `jon` and `[HOSTNAME]` with the `ip_address` or `hostname` of the machine within the network. i.e. `<IP>`
|
||||
|
||||
!!! Example
|
||||
```bash
|
||||
clan machines update-hardware-config jon
|
||||
```
|
||||
|
||||
This command connects to the ip configured in the previous step, runs [nixos-facter](https://github.com/nix-community/nixos-facter)
|
||||
to detect hardware configurations (excluding filesystems), and writes them to `machines/jon/facter.json`.
|
||||
|
||||
### Step 3: Custom Disk Formatting
|
||||
### Step 2: Custom Disk Formatting
|
||||
|
||||
In `./modules/disko.nix`, a simple `ext4` disk partitioning scheme is defined for the Disko module. For more complex disk partitioning setups,
|
||||
refer to the [Disko templates](https://github.com/nix-community/disko-templates) or [Disko examples](https://github.com/nix-community/disko/tree/master/example).
|
||||
|
||||
### Step 4: Custom Configuration
|
||||
### (Optional): Renaming Machine
|
||||
|
||||
Modify `./machines/jon/configuration.nix` to personalize the system settings according to your requirements.
|
||||
If you wish to name your machine to something else, do the following steps:
|
||||
For renaming jon to your own machine name, you can use the following command:
|
||||
|
||||
```
|
||||
mv ./machines/jon/configuration.nix ./machines/newname/configuration.nix
|
||||
git mv ./machines/jon ./machines/newname
|
||||
```
|
||||
|
||||
Than rename `jon` to your preferred name in `machines` in `flake.nix` as well as the import line:
|
||||
|
||||
```diff
|
||||
- imports = [ ./machines/jon/configuration.nix ];
|
||||
+ imports = [ ./machines/__NEW_NAME__/configuration.nix ];
|
||||
```
|
||||
|
||||
!!! Info "Replace `__NEW_NAME__` with the name of the machine"
|
||||
|
||||
Note that our clan lives inside a git repository.
|
||||
Only files that have been added with `git add` are recognized by `nix`.
|
||||
So for every file that you add or rename you also need to run:
|
||||
@@ -196,14 +151,11 @@ So for every file that you add or rename you also need to run:
|
||||
git add ./path/to/my/file
|
||||
```
|
||||
|
||||
For renaming jon to your own machine name, you can use the following command:
|
||||
|
||||
```
|
||||
git mv ./machines/jon ./machines/newname
|
||||
```
|
||||
### (Optional): Removing a Machine
|
||||
|
||||
If you only want to setup a single machine at this point, you can delete `sara` from `flake.nix` as well as from the machines directory:
|
||||
|
||||
```
|
||||
git rm ./machines/sara
|
||||
git rm -rf ./machines/sara
|
||||
```
|
||||
|
||||
@@ -1,13 +1,7 @@
|
||||
# Deploy your Clan
|
||||
|
||||
Integrating a new machine into your Clan environment is an easy yet flexible process, allowing for a straight forward management of multiple NixOS configurations.
|
||||
Now that you have created a new machine, we will walk through how to install it.
|
||||
|
||||
We'll walk you through adding a new computer to your Clan.
|
||||
|
||||
## Installing a New Machine
|
||||
|
||||
Clan CLI, in conjunction with [nixos-anywhere](https://github.com/nix-community/nixos-anywhere), provides a seamless method for installing NixOS on various machines.
|
||||
|
||||
This process involves preparing a suitable hardware and disk partitioning configuration and ensuring the target machine is accessible via SSH.
|
||||
|
||||
### Step 0. Prerequisites
|
||||
|
||||
@@ -24,7 +18,7 @@ This process involves preparing a suitable hardware and disk partitioning config
|
||||
|
||||
2. Boot the target machine and connect it to a network that makes it reachable from your setup computer.
|
||||
|
||||
=== "**Remote Machines**"
|
||||
=== "**Cloud VMs**"
|
||||
|
||||
- [x] **Two Computers**: You need one computer that you're getting ready (we'll call this the Target Computer) and another one to set it up from (we'll call this the Setup Computer). Make sure both can talk to each other over the network using SSH.
|
||||
- [x] **Machine configuration**: See our basic [configuration guide](./configure.md)
|
||||
@@ -107,32 +101,27 @@ This process involves preparing a suitable hardware and disk partitioning config
|
||||
For easy sharing of deployment information via QR code, we highly recommend using [KDE Connect](https://apps.kde.org/de/kdeconnect/).
|
||||
|
||||
There are two ways to deploy your machine:
|
||||
=== "**Password Auth**"
|
||||
Run the following command to login over SSH with password authentication
|
||||
```bash
|
||||
clan machines install [MACHINE] --target-host <IP> --update-hardware-config nixos-facter
|
||||
```
|
||||
=== "**QR Code Auth**"
|
||||
Using the JSON contents of the QR Code:
|
||||
```terminal
|
||||
clan machines install [MACHINE] --json "[JSON]" --update-hardware-config nixos-facter
|
||||
```
|
||||
OR using a picture containing the QR code
|
||||
```terminal
|
||||
clan machines install [MACHINE] --png [PATH] --update-hardware-config nixos-facter
|
||||
```
|
||||
|
||||
1. **SSH with Password Authentication**
|
||||
Run the following command to install using SSH:
|
||||
```bash
|
||||
clan machines install [MACHINE] --target-host <IP>
|
||||
```
|
||||
|
||||
2. **Scanning a QR Code for Installation Details**
|
||||
You can input the information by following one of these methods:
|
||||
- **Using a JSON String or File Path:**
|
||||
Provide the path to a JSON string or input the string directly:
|
||||
```terminal
|
||||
clan machines install [MACHINE] --json [JSON]
|
||||
```
|
||||
- **Using an Image Containing the QR Code:**
|
||||
Provide the path to an image file containing the relevant QR code:
|
||||
```terminal
|
||||
clan machines install [MACHINE] --png [PATH]
|
||||
```
|
||||
|
||||
=== "**SSH access**"
|
||||
=== "**Cloud VM**"
|
||||
|
||||
Replace `<target_host>` with the **target computers' ip address**:
|
||||
|
||||
```bash
|
||||
clan machines install [MACHINE] --target-host <target_host>
|
||||
clan machines install [MACHINE] --target-host <target_host> --update-hardware-config nixos-facter
|
||||
```
|
||||
|
||||
|
||||
|
||||
@@ -49,7 +49,8 @@ Replace `kernelModules` with the ethernet module loaded one on your target machi
|
||||
port = 7172;
|
||||
authorizedKeys = [ "<yourkey>" ];
|
||||
hostKeys = [
|
||||
"/var/lib/initrd-ssh-key"
|
||||
"/var/lib/initrd_host_ed25519_key"
|
||||
"/var/lib/initrd_host_rsa_key"
|
||||
];
|
||||
};
|
||||
};
|
||||
@@ -73,7 +74,7 @@ Before starting the installation process, ensure that the SSH public key is copi
|
||||
ssh-copy-id -o PreferredAuthentications=password -o PubkeyAuthentication=no root@nixos-installer.local
|
||||
```
|
||||
|
||||
### Step 1.5: Prepare Secret Key and Clear Disk Data
|
||||
### Step 1.5: Prepare Secret Key and Partition Disks
|
||||
|
||||
1. Access the installer using SSH:
|
||||
|
||||
@@ -90,13 +91,13 @@ nano /tmp/secret.key
|
||||
3. Discard the old disk partition data:
|
||||
|
||||
```bash
|
||||
blkdiscard /dev/disk/by-id/nvme-eui.002538b931b59865
|
||||
blkdiscard /dev/disk/by-id/<installdisk>
|
||||
```
|
||||
|
||||
4. Run the `clan` machine installation with the following command:
|
||||
4. Run `clan` machines install, only running kexec and disko, with the following command:
|
||||
|
||||
```bash
|
||||
clan machines install gchq-local --target-host root@nixos-installer --yes --no-reboot
|
||||
clan machines install gchq-local --target-host root@nixos-installer --phases kexec,disko
|
||||
```
|
||||
|
||||
### Step 2: ZFS Pool Import and System Installation
|
||||
@@ -107,14 +108,10 @@ clan machines install gchq-local --target-host root@nixos-installer --yes --no-r
|
||||
ssh root@nixos-installer.local
|
||||
```
|
||||
|
||||
2. Perform the following commands on the remote installation environment:
|
||||
2. Run the following command on the remote installation environment:
|
||||
|
||||
```bash
|
||||
zpool import zroot
|
||||
zfs set keylocation=prompt zroot/root
|
||||
zfs load-key zroot/root
|
||||
zfs set mountpoint=/mnt zroot/root/nixos
|
||||
mount /dev/nvme0n1p2 /mnt/boot
|
||||
```
|
||||
|
||||
3. Disconnect from the SSH session:
|
||||
@@ -123,43 +120,36 @@ mount /dev/nvme0n1p2 /mnt/boot
|
||||
CTRL+D
|
||||
```
|
||||
|
||||
4. Securely copy your local `initrd_rsa_key` to the installer's `/mnt` directory:
|
||||
4. Locally generate ssh host keys. You only need to generate ones for the algorithms you're using in `authorizedKeys`.
|
||||
|
||||
```bash
|
||||
scp ~/.ssh/initrd_rsa_key root@nixos-installer.local:/mnt/var/lib/initrd-ssh-key
|
||||
ssh-keygen -q -N "" -t ed25519 -f ./initrd_host_ed25519_key
|
||||
ssh-keygen -q -N "" -t rsa -b 4096 -f ./initrd_host_rsa_key
|
||||
```
|
||||
|
||||
5. SSH back into the installer:
|
||||
5. Securely copy your local initrd ssh host keys to the installer's `/mnt` directory:
|
||||
|
||||
```bash
|
||||
ssh root@nixos-installer.local
|
||||
scp ./initrd_host* root@nixos-installer.local:/mnt/var/lib/
|
||||
```
|
||||
|
||||
6. Navigate to the `/mnt` directory, enter the `nixos-enter` environment, and then exit:
|
||||
|
||||
6. Install nixos to the mounted partitions
|
||||
```bash
|
||||
cd /mnt
|
||||
nixos-enter
|
||||
realpath /run/current-system
|
||||
exit
|
||||
clan machines install gchq-local --target-host root@nixos-installer --phases install
|
||||
```
|
||||
|
||||
7. Run the `nixos-install` command with the appropriate system path `<SYS_PATH>`:
|
||||
|
||||
```bash
|
||||
nixos-install --no-root-passwd --no-channel-copy --root /mnt --system <SYS_PATH>
|
||||
```
|
||||
|
||||
8. After the installation process, unmount `/mnt/boot`, change the ZFS mountpoint, and reboot the system:
|
||||
7. After the installation process, unmount `/mnt/boot`, change the ZFS mountpoints and unmount all the ZFS volumes by exporting the zpool:
|
||||
|
||||
```bash
|
||||
umount /mnt/boot
|
||||
cd /
|
||||
zfs set mountpoint=/ zroot/root/nixos
|
||||
reboot
|
||||
zfs set -u mountpoint=/ zroot/root/nixos
|
||||
zfs set -u mountpoint=/tmp zroot/root/tmp
|
||||
zfs set -u mountpoint=/home zroot/root/home
|
||||
zpool export zroot
|
||||
```
|
||||
|
||||
9. Perform a hard reboot of the machine and remove the USB stick.
|
||||
8. Perform a reboot of the machine and remove the USB installer.
|
||||
|
||||
### Step 3: Accessing the Initial Ramdisk (initrd) Environment
|
||||
|
||||
|
||||
@@ -42,7 +42,7 @@ By the end of this guide, you'll have a fresh NixOS configuration ready to push
|
||||
Add the Clan CLI into your development workflow:
|
||||
|
||||
```bash
|
||||
nix shell git+https://git.clan.lol/clan/clan-core#clan-cli
|
||||
nix shell git+https://git.clan.lol/clan/clan-core#clan-cli --refresh
|
||||
```
|
||||
|
||||
You can find reference documentation for the `clan` cli program [here](../reference/cli/index.md).
|
||||
@@ -92,6 +92,21 @@ This should yield the following:
|
||||
5 directories, 9 files
|
||||
```
|
||||
|
||||
??? info "Recommended way of sourcing the `clan` cli tool"
|
||||
The default template also adds the `clan` cli tool to the development shell.
|
||||
Meaning you can get the exact version you need directly from the folder
|
||||
you are in right now.
|
||||
|
||||
In the `my-clan` directory run the following command:
|
||||
```
|
||||
nix develop
|
||||
```
|
||||
That way you will have the tool available in the shell environment.
|
||||
We also recommend setting up [direnv](https://direnv.net/) for your shell, for a more convenient
|
||||
experience.
|
||||
|
||||
|
||||
|
||||
```bash
|
||||
clan machines list
|
||||
```
|
||||
|
||||
@@ -1,11 +1,16 @@
|
||||
# Create an Installer Image
|
||||
# Clan Installer Image for Physical Machines
|
||||
|
||||
Our installer image simplifies the process of performing remote installations.
|
||||
To install Clan on physical machines, you need to use our custom installer image. This is necessary for proper installation and operation.
|
||||
|
||||
Follow our step-by-step guide to create and transfer this image onto a bootable USB drive.
|
||||
!!! note "Using a Cloud VM?"
|
||||
If you're using a cloud provider's virtual machine (VM), you can skip this section and go directly to the [Configure Machines](configure.md) step. In this scenario, we automatically use [nixos-anywhere](https://github.com/nix-community/nixos-anywhere) to replace the kernel during runtime.
|
||||
|
||||
??? info "Why nixos-anywhere Doesn't Work on Physical Hardware?"
|
||||
nixos-anywhere relies on [kexec](https://wiki.archlinux.org/title/Kexec) to replace the running kernel with our custom one. This method often has compatibility issues with real hardware, especially systems with dedicated graphics cards like laptops and servers, leading to crashes and black screens.
|
||||
|
||||
??? info "Reasons for a Custom Install Image"
|
||||
Our custom install images are built to include essential tools like [nixos-facter](https://github.com/nix-community/nixos-facter) and support for [ZFS](https://wiki.archlinux.org/title/ZFS). They're also optimized to run on systems with as little as 1 GB of RAM, ensuring efficient performance even on lower-end hardware.
|
||||
|
||||
!!! info
|
||||
If you already have a NixOS machine you can ssh into (in the cloud for example) you can skip this chapter and go directly to [Configure Machines](configure.md).
|
||||
|
||||
### Step 0. Prerequisites
|
||||
|
||||
@@ -40,9 +45,9 @@ Follow our step-by-step guide to create and transfer this image onto a bootable
|
||||
sudo umount /dev/sdb1
|
||||
```
|
||||
=== "**Linux OS**"
|
||||
### Step 2. Flash Custom Installer
|
||||
### Step 2. Create a Custom Installer
|
||||
|
||||
Using clan flash enables the inclusion of ssh public keys and wifi access points.
|
||||
Using clan flash enables the inclusion of ssh public keys into the image.
|
||||
It also allows to set language and keymap in the installer image.
|
||||
|
||||
```bash
|
||||
|
||||
@@ -18,89 +18,128 @@ Clan
|
||||
If you select multiple network technologies at the same time. e.g. (zerotier + yggdrassil)
|
||||
You must choose one of them as primary network and the machines are always connected via the primary network.
|
||||
|
||||
## 1. Set-Up the VPN Controller
|
||||
|
||||
The VPN controller is initially essential for providing configuration to new
|
||||
peers. Once addresses are allocated, the controller's continuous operation is not essential.
|
||||
|
||||
1. **Designate a Machine**: Label a machine as the VPN controller in the clan,
|
||||
referred to as `<CONTROLLER>` henceforth in this guide.
|
||||
2. **Add Configuration**: Input the following configuration to the NixOS
|
||||
configuration of the controller machine:
|
||||
```nix
|
||||
clan.core.networking.zerotier.controller = {
|
||||
enable = true;
|
||||
public = true;
|
||||
};
|
||||
```
|
||||
3. **Update the Controller Machine**: Execute the following:
|
||||
```bash
|
||||
clan machines update <CONTROLLER>
|
||||
```
|
||||
Your machine is now operational as the VPN controller.
|
||||
|
||||
## 2. Add Machines to the VPN
|
||||
|
||||
To introduce a new machine to the VPN, adhere to the following steps:
|
||||
|
||||
1. **Update Configuration**: On the new machine, incorporate the following to its
|
||||
configuration, substituting `<CONTROLLER>` with the controller machine name:
|
||||
```nix
|
||||
{ config, ... }: {
|
||||
clan.core.networking.zerotier.networkId = builtins.readFile (config.clan.core.settings.directory + "/machines/<CONTROLLER>/facts/zerotier-network-id");
|
||||
}
|
||||
```
|
||||
1. **Update the New Machine**: Execute:
|
||||
```bash
|
||||
$ clan machines update <NEW_MACHINE>
|
||||
```
|
||||
Replace `<NEW_MACHINE>` with the designated new machine name.
|
||||
|
||||
!!! Note "For Private Networks"
|
||||
1. **Retrieve Zerotier Metadata**
|
||||
|
||||
=== "From the repo"
|
||||
**Retrieve the ZeroTier IP**: In the clan repo, execute:
|
||||
```console
|
||||
$ clan facts list <NEW_MACHINE> | jq -r '.["zerotier-ip"]'
|
||||
```
|
||||
|
||||
The returned address is the Zerotier IP address of the machine.
|
||||
|
||||
=== "On the new machine"
|
||||
**Retrieve the ZeroTier ID**: On the `new_machine`, execute:
|
||||
```bash
|
||||
$ sudo zerotier-cli info
|
||||
```
|
||||
Example Output:
|
||||
```{.console, .no-copy}
|
||||
200 info d2c71971db 1.12.1 OFFLINE
|
||||
```
|
||||
, where `d2c71971db` is the ZeroTier ID.
|
||||
This guide shows you how to configure `zerotier` either through `NixOS Options` directly, or Clan's `Inventory` System.
|
||||
|
||||
|
||||
2. **Authorize the New Machine on the Controller**: On the controller machine,
|
||||
execute:
|
||||
=== "**Inventory**"
|
||||
## 1. Choose the Controller
|
||||
|
||||
=== "with ZerotierIP"
|
||||
```bash
|
||||
$ sudo zerotier-members allow --member-ip <IP>
|
||||
```
|
||||
Substitute `<IP>` with the ZeroTier IP obtained previously.
|
||||
=== "with ZerotierID"
|
||||
```bash
|
||||
$ sudo zerotier-members allow <ID>
|
||||
```
|
||||
Substitute `<ID>` with the ZeroTier ID obtained previously.
|
||||
The controller is the initial entrypoint for new machines into the vpn.
|
||||
It will sign the id's of new machines.
|
||||
Once id's are signed, the controller's continuous operation is not essential.
|
||||
A good controller choice is nevertheless a machine that can always be reached for updates - so that new peers can be added to the network.
|
||||
|
||||
2. **Verify Connection**: On the `new_machine`, re-execute:
|
||||
```bash
|
||||
$ sudo zerotier-cli info
|
||||
```
|
||||
The status should now be "ONLINE":
|
||||
```{.console, .no-copy}
|
||||
200 info d2c71971db 1.12.1 ONLINE
|
||||
```
|
||||
For the purpose of this guide we have two machines:
|
||||
|
||||
- The `controller` machine, which will be the zerotier controller.
|
||||
- The `new_machine` machine, which is the machine we want to add to the vpn network.
|
||||
|
||||
## 2. Configure the Inventory
|
||||
```nix
|
||||
clan.inventory = {
|
||||
services.zerotier.default = {
|
||||
roles.controller.machines = [
|
||||
"controller"
|
||||
];
|
||||
roles.peer.machines = [
|
||||
"new_machine"
|
||||
];
|
||||
};
|
||||
};
|
||||
```
|
||||
|
||||
## 3. Apply the Configuration
|
||||
Update the `controller` machine:
|
||||
|
||||
```bash
|
||||
clan machines update controller
|
||||
```
|
||||
|
||||
|
||||
=== "**NixOS Options**"
|
||||
## 1. Set-Up the VPN Controller
|
||||
|
||||
The VPN controller is initially essential for providing configuration to new
|
||||
peers. Once addresses are allocated, the controller's continuous operation is not essential.
|
||||
|
||||
1. **Designate a Machine**: Label a machine as the VPN controller in the clan,
|
||||
referred to as `<CONTROLLER>` henceforth in this guide.
|
||||
2. **Add Configuration**: Input the following configuration to the NixOS
|
||||
configuration of the controller machine:
|
||||
```nix
|
||||
clan.core.networking.zerotier.controller = {
|
||||
enable = true;
|
||||
public = true;
|
||||
};
|
||||
```
|
||||
3. **Update the Controller Machine**: Execute the following:
|
||||
```bash
|
||||
clan machines update <CONTROLLER>
|
||||
```
|
||||
Your machine is now operational as the VPN controller.
|
||||
|
||||
## 2. Add Machines to the VPN
|
||||
|
||||
To introduce a new machine to the VPN, adhere to the following steps:
|
||||
|
||||
1. **Update Configuration**: On the new machine, incorporate the following to its
|
||||
configuration, substituting `<CONTROLLER>` with the controller machine name:
|
||||
```nix
|
||||
{ config, ... }: {
|
||||
clan.core.networking.zerotier.networkId = builtins.readFile (config.clan.core.settings.directory + "/machines/<CONTROLLER>/facts/zerotier-network-id");
|
||||
}
|
||||
```
|
||||
1. **Update the New Machine**: Execute:
|
||||
```bash
|
||||
$ clan machines update <NEW_MACHINE>
|
||||
```
|
||||
Replace `<NEW_MACHINE>` with the designated new machine name.
|
||||
|
||||
!!! Note "For Private Networks"
|
||||
1. **Retrieve Zerotier Metadata**
|
||||
|
||||
=== "From the repo"
|
||||
**Retrieve the ZeroTier IP**: In the clan repo, execute:
|
||||
```console
|
||||
$ clan facts list <NEW_MACHINE> | jq -r '.["zerotier-ip"]'
|
||||
```
|
||||
|
||||
The returned address is the Zerotier IP address of the machine.
|
||||
|
||||
=== "On the new machine"
|
||||
**Retrieve the ZeroTier ID**: On the `new_machine`, execute:
|
||||
```bash
|
||||
$ sudo zerotier-cli info
|
||||
```
|
||||
Example Output:
|
||||
```{.console, .no-copy}
|
||||
200 info d2c71971db 1.12.1 OFFLINE
|
||||
```
|
||||
, where `d2c71971db` is the ZeroTier ID.
|
||||
|
||||
|
||||
2. **Authorize the New Machine on the Controller**: On the controller machine,
|
||||
execute:
|
||||
|
||||
=== "with ZerotierIP"
|
||||
```bash
|
||||
$ sudo zerotier-members allow --member-ip <IP>
|
||||
```
|
||||
Substitute `<IP>` with the ZeroTier IP obtained previously.
|
||||
=== "with ZerotierID"
|
||||
```bash
|
||||
$ sudo zerotier-members allow <ID>
|
||||
```
|
||||
Substitute `<ID>` with the ZeroTier ID obtained previously.
|
||||
|
||||
2. **Verify Connection**: On the `new_machine`, re-execute:
|
||||
```bash
|
||||
$ sudo zerotier-cli info
|
||||
```
|
||||
The status should now be "ONLINE":
|
||||
```{.console, .no-copy}
|
||||
200 info d2c71971db 1.12.1 ONLINE
|
||||
```
|
||||
|
||||
!!! success "Congratulations!"
|
||||
The new machine is now part of the VPN, and the ZeroTier
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
|
||||
Clan enables encryption of secrets (such as passwords & keys) ensuring security and ease-of-use among users.
|
||||
|
||||
Clan utilizes the [sops](https://github.com/getsops/sops) format and integrates with [sops-nix](https://github.com/Mic92/sops-nix) on NixOS machines.
|
||||
By default Clan utilizes the [sops](https://github.com/getsops/sops) format and integrates with [sops-nix](https://github.com/Mic92/sops-nix) on NixOS machines.
|
||||
Clan can also be configured to be used with other secret store [backends](https://docs.clan.lol/reference/clan-core/vars/#clan.core.vars.settings.secretStore).
|
||||
|
||||
This guide will walk you through:
|
||||
|
||||
@@ -39,7 +40,7 @@ Also add your age public key to the repository with 'clan secrets users add YOUR
|
||||
### Add Your Public Key
|
||||
|
||||
```bash
|
||||
clan secrets users add $USER <your_public_key>
|
||||
clan secrets users add $USER --age-key <your_public_key>
|
||||
```
|
||||
|
||||
It's best to choose the same username as on your Setup/Admin Machine that you use to control the deployment with.
|
||||
@@ -53,33 +54,3 @@ sops/
|
||||
└── key.json
|
||||
```
|
||||
If you followed the quickstart tutorial all necessary secrets are initialized at this point.
|
||||
|
||||
|
||||
|
||||
### Generate Facts and Vars
|
||||
|
||||
Typically, this step is handled automatically when a machine is deployed. However, to enable the use of `nix flake check` with your configuration, it must be completed manually beforehand.
|
||||
|
||||
Currently, generating all the necessary facts requires two separate commands. This is due to the coexistence of two parallel secret management solutions: the older, stable version (`clan secrets` and `clan facts`) and the newer, experimental version (`clan vars`).
|
||||
|
||||
To generate both facts and vars, execute the following commands:
|
||||
|
||||
```sh
|
||||
clan facts generate && clan vars generate
|
||||
```
|
||||
|
||||
|
||||
### Check Configuration
|
||||
|
||||
Validate your configuration by running:
|
||||
|
||||
```bash
|
||||
nix flake check
|
||||
```
|
||||
|
||||
This command helps ensure that your system configuration is correct and free from errors.
|
||||
|
||||
!!! Tip
|
||||
|
||||
You can integrate this step into your [Continuous Integration](https://en.wikipedia.org/wiki/Continuous_integration) workflow to ensure that only valid Nix configurations are merged into your codebase.
|
||||
|
||||
|
||||
@@ -61,9 +61,9 @@ hide:
|
||||
|
||||
---
|
||||
|
||||
Use clan with [https://flake-parts.dev]()
|
||||
Use clan with [https://flake.parts/]()
|
||||
|
||||
- [Contribute](./manual/contribute.md)
|
||||
- [Contribute](./contributing/contribute.md)
|
||||
|
||||
---
|
||||
|
||||
@@ -73,7 +73,7 @@ hide:
|
||||
|
||||
## API Reference
|
||||
|
||||
**Auto generated API Documentation**
|
||||
**Reference API Documentation**
|
||||
|
||||
<div class="grid cards" markdown>
|
||||
|
||||
@@ -84,4 +84,4 @@ hide:
|
||||
Learn how to interface with Clan programmatically
|
||||
|
||||
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -81,7 +81,7 @@ For the provide flake example, your flake should now look like this:
|
||||
outputs = { self, nixpkgs, ... }:
|
||||
let
|
||||
clan = clan-core.lib.buildClan {
|
||||
directory = self; # this needs to point at the repository root
|
||||
self = self; # this needs to point at the repository root
|
||||
specialArgs = {};
|
||||
inventory.meta.name = "NEEDS_TO_BE_UNIQUE"; # TODO: Changeme
|
||||
|
||||
|
||||
@@ -18,8 +18,3 @@ $ tree -L 1
|
||||
├── templates # Template files for creating a new Clan
|
||||
└── vars
|
||||
```
|
||||
|
||||
## Getting Started with Infrastructure
|
||||
|
||||
To dive into infrastructure, check out our clan infra repo: [clan-infra](https://git.clan.lol/clan/clan-infra). Please provide us with your public SOPS key so we can add you as an admin.
|
||||
|
||||
|
||||
52
flake.lock
generated
52
flake.lock
generated
@@ -7,11 +7,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1738148035,
|
||||
"narHash": "sha256-KYOATYEwaKysL3HdHdS5kbQMXvzS4iPJzJrML+3TKAo=",
|
||||
"lastModified": 1741786315,
|
||||
"narHash": "sha256-VT65AE2syHVj6v/DGB496bqBnu1PXrrzwlw07/Zpllc=",
|
||||
"owner": "nix-community",
|
||||
"repo": "disko",
|
||||
"rev": "18d0a984cc2bc82cf61df19523a34ad463aa7f54",
|
||||
"rev": "0d8c6ad4a43906d14abd5c60e0ffe7b587b213de",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -27,11 +27,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1738453229,
|
||||
"narHash": "sha256-7H9XgNiGLKN1G1CgRh0vUL4AheZSYzPm+zmZ7vxbJdo=",
|
||||
"lastModified": 1741352980,
|
||||
"narHash": "sha256-+u2UunDA4Cl5Fci3m7S643HzKmIDAe+fiXrLqYsR2fs=",
|
||||
"owner": "hercules-ci",
|
||||
"repo": "flake-parts",
|
||||
"rev": "32ea77a06711b758da0ad9bd6a844c5740a87abd",
|
||||
"rev": "f4330d22f1c5d2ba72d3d22df5597d123fdb60a9",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -42,11 +42,11 @@
|
||||
},
|
||||
"nixos-facter-modules": {
|
||||
"locked": {
|
||||
"lastModified": 1736931726,
|
||||
"narHash": "sha256-aY55yiifyo1XPPpbpH0kWlV1g2dNGBlx6622b7OK8ks=",
|
||||
"lastModified": 1738752252,
|
||||
"narHash": "sha256-/nA3tDdp/2g0FBy8966ppC2WDoyXtUWaHkZWL+N3ZKc=",
|
||||
"owner": "numtide",
|
||||
"repo": "nixos-facter-modules",
|
||||
"rev": "fa11d87b61b2163efbb9aed7b7a5ae0299e5ab9c",
|
||||
"rev": "60f8b8f3f99667de6a493a44375e5506bf0c48b1",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -57,18 +57,15 @@
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1738422722,
|
||||
"narHash": "sha256-Q4vhtbLYWBUnjWD4iQb003Lt+N5PuURDad1BngGKdUs=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "102a39bfee444533e6b4e8611d7e92aa39b7bec1",
|
||||
"type": "github"
|
||||
"lastModified": 315532800,
|
||||
"narHash": "sha256-+bxPXRQiQ0SsjR8syBcc8X+S8WGllNM+Qreu5Td7gnI=",
|
||||
"rev": "1750f3c1c89488e2ffdd47cab9d05454dddfb734",
|
||||
"type": "tarball",
|
||||
"url": "https://releases.nixos.org/nixpkgs/nixpkgs-25.05pre773343.1750f3c1c894/nixexprs.tar.xz"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixpkgs-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
"type": "tarball",
|
||||
"url": "https://nixos.org/channels/nixpkgs-unstable/nixexprs.tar.xz"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
@@ -89,16 +86,15 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1736953253,
|
||||
"narHash": "sha256-shJxzy7qypjq9hpETQ3gJsBZXO5E3KR0INca/xwiVp4=",
|
||||
"owner": "pinpox",
|
||||
"lastModified": 1742700801,
|
||||
"narHash": "sha256-ZGlpUDsuBdeZeTNgoMv+aw0ByXT2J3wkYw9kJwkAS4M=",
|
||||
"owner": "Mic92",
|
||||
"repo": "sops-nix",
|
||||
"rev": "a7c6e64401b6dde13c0de90230cb64087c9d9693",
|
||||
"rev": "67566fe68a8bed2a7b1175fdfb0697ed22ae8852",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "pinpox",
|
||||
"ref": "lazy-assertions",
|
||||
"owner": "Mic92",
|
||||
"repo": "sops-nix",
|
||||
"type": "github"
|
||||
}
|
||||
@@ -125,11 +121,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1738070913,
|
||||
"narHash": "sha256-j6jC12vCFsTGDmY2u1H12lMr62fnclNjuCtAdF1a4Nk=",
|
||||
"lastModified": 1742370146,
|
||||
"narHash": "sha256-XRE8hL4vKIQyVMDXykFh4ceo3KSpuJF3ts8GKwh5bIU=",
|
||||
"owner": "numtide",
|
||||
"repo": "treefmt-nix",
|
||||
"rev": "bebf27d00f7d10ba75332a0541ac43676985dea3",
|
||||
"rev": "adc195eef5da3606891cedf80c0d9ce2d3190808",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
description = "clan.lol base operating system";
|
||||
|
||||
inputs = {
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
|
||||
nixpkgs.url = "https://nixos.org/channels/nixpkgs-unstable/nixexprs.tar.xz";
|
||||
|
||||
flake-parts.url = "github:hercules-ci/flake-parts";
|
||||
flake-parts.inputs.nixpkgs-lib.follows = "nixpkgs";
|
||||
@@ -12,7 +12,7 @@
|
||||
|
||||
nixos-facter-modules.url = "github:numtide/nixos-facter-modules";
|
||||
|
||||
sops-nix.url = "github:pinpox/sops-nix/lazy-assertions";
|
||||
sops-nix.url = "github:Mic92/sops-nix";
|
||||
sops-nix.inputs.nixpkgs.follows = "nixpkgs";
|
||||
|
||||
systems.url = "github:nix-systems/default";
|
||||
@@ -43,10 +43,6 @@
|
||||
meta.name = "clan-core";
|
||||
};
|
||||
|
||||
flake = {
|
||||
clan.templates = import ./templates { };
|
||||
};
|
||||
|
||||
systems = import systems;
|
||||
imports =
|
||||
# only importing existing paths allows to minimize the flake for test
|
||||
@@ -57,6 +53,7 @@
|
||||
./devShell.nix
|
||||
./docs/nix/flake-module.nix
|
||||
./flakeModules/flake-module.nix
|
||||
./flakeModules/demo_iso.nix
|
||||
./lib/filter-clan-core/flake-module.nix
|
||||
./lib/flake-module.nix
|
||||
./nixosModules/clanCore/vars/flake-module.nix
|
||||
|
||||
@@ -27,9 +27,13 @@ in
|
||||
};
|
||||
|
||||
options.flake = flake-parts-lib.mkSubmoduleOptions {
|
||||
clan = lib.mkOption { type = types.raw; };
|
||||
clanInternals = lib.mkOption { type = types.raw; };
|
||||
};
|
||||
config = {
|
||||
flake.clan = {
|
||||
inherit (config.clan.clanInternals) templates;
|
||||
};
|
||||
flake.clanInternals = config.clan.clanInternals;
|
||||
flake.nixosConfigurations = config.clan.nixosConfigurations;
|
||||
};
|
||||
|
||||
101
flakeModules/demo_iso.nix
Normal file
101
flakeModules/demo_iso.nix
Normal file
@@ -0,0 +1,101 @@
|
||||
{ self, ... }:
|
||||
|
||||
let
|
||||
pkgs = self.inputs.nixpkgs.legacyPackages.x86_64-linux;
|
||||
|
||||
demoModule = {
|
||||
imports = [
|
||||
"${self.clanModules.mycelium}/roles/peer.nix"
|
||||
# TODO do we need this? maybe not
|
||||
(
|
||||
{ modulesPath, ... }:
|
||||
{
|
||||
imports = [ "${modulesPath}/installer/cd-dvd/iso-image.nix" ];
|
||||
}
|
||||
)
|
||||
];
|
||||
};
|
||||
|
||||
clan_welcome = pkgs.writeShellApplication {
|
||||
name = "clan_welcome";
|
||||
runtimeInputs = [
|
||||
pkgs.gum
|
||||
pkgs.gitMinimal
|
||||
pkgs.retry
|
||||
self.packages.${pkgs.system}.clan-cli
|
||||
];
|
||||
text = ''
|
||||
set -efu
|
||||
|
||||
gum confirm '
|
||||
Welcome to Clan, a NixOS-based operating system for the CLAN project.
|
||||
This installer can be used to try out clan on your machine, for that reason we setup a cooperative environment to play and hack together :)
|
||||
' || exit 1
|
||||
until retry -t 5 ping -c 1 -W 1 git.clan.lol &> /dev/null; do
|
||||
# TODO make this nicer
|
||||
nmtui
|
||||
done
|
||||
if ! test -e ~/clan-core; then
|
||||
# git clone https://git.clan.lol/clan/clan-core.git ~/clan-core
|
||||
cp -rv ${self} clan-core
|
||||
fi
|
||||
cd clan-core
|
||||
clan machines morph demo-template --i-will-be-fired-for-using-this
|
||||
exit
|
||||
'';
|
||||
};
|
||||
|
||||
morphModule = {
|
||||
imports = [
|
||||
(
|
||||
{ modulesPath, ... }:
|
||||
{
|
||||
imports = [ "${modulesPath}/image/images.nix" ];
|
||||
}
|
||||
)
|
||||
];
|
||||
image.modules.iso.isoImage.squashfsCompression = "zstd -Xcompression-level 1";
|
||||
networking.networkmanager.enable = true;
|
||||
services.getty.autologinUser = "root";
|
||||
programs.bash.interactiveShellInit = ''
|
||||
if [[ "$(tty)" =~ /dev/(tty1|hvc0|ttyS0)$ ]]; then
|
||||
# workaround for https://github.com/NixOS/nixpkgs/issues/219239
|
||||
systemctl restart systemd-vconsole-setup.service
|
||||
|
||||
reset
|
||||
|
||||
${clan_welcome}/bin/clan_welcome
|
||||
fi
|
||||
'';
|
||||
};
|
||||
in
|
||||
{
|
||||
clan.templates.machine.demo-template = {
|
||||
description = "Demo machine for the CLAN project";
|
||||
# path = pkgs.runCommand "demo-template" {} ''
|
||||
# mkdir -p $out
|
||||
# echo '{ self, ... }: { imports = [ self.nixosModules.demoModule ]; }' > $out/configuration.nix
|
||||
# '';
|
||||
path = ./demo_template;
|
||||
};
|
||||
flake.nixosModules = { inherit morphModule demoModule; };
|
||||
perSystem =
|
||||
{ system, lib, ... }:
|
||||
{
|
||||
packages =
|
||||
lib.mkIf
|
||||
(lib.any (x: x == system) [
|
||||
"x86_64-linux"
|
||||
"aarch64-linux"
|
||||
])
|
||||
{
|
||||
demo-iso =
|
||||
(self.inputs.nixpkgs.lib.nixosSystem {
|
||||
modules = [
|
||||
{ nixpkgs.hostPlatform = system; }
|
||||
morphModule
|
||||
];
|
||||
}).config.system.build.images.iso;
|
||||
};
|
||||
};
|
||||
}
|
||||
38
flakeModules/demo_template/configuration.nix
Normal file
38
flakeModules/demo_template/configuration.nix
Normal file
@@ -0,0 +1,38 @@
|
||||
{ pkgs, config, ... }:
|
||||
{
|
||||
fileSystems."/".device = "nodev";
|
||||
boot.loader.grub.device = "nodev";
|
||||
clan.core.vars.settings.secretStore = "fs";
|
||||
clan.core.vars.generators.mycelium = {
|
||||
files."key" = { };
|
||||
files."ip".secret = false;
|
||||
files."pubkey".secret = false;
|
||||
runtimeInputs = [
|
||||
pkgs.mycelium
|
||||
pkgs.coreutils
|
||||
pkgs.jq
|
||||
];
|
||||
script = ''
|
||||
timeout 5 mycelium --key-file "$out"/key || :
|
||||
mycelium inspect --key-file "$out"/key --json | jq -r .publicKey > "$out"/pubkey
|
||||
mycelium inspect --key-file "$out"/key --json | jq -r .address > "$out"/ip
|
||||
'';
|
||||
};
|
||||
services.mycelium = {
|
||||
enable = true;
|
||||
addHostedPublicNodes = true;
|
||||
openFirewall = true;
|
||||
keyFile = config.clan.core.vars.generators.mycelium.files.key.path;
|
||||
};
|
||||
services.getty.autologinUser = "root";
|
||||
programs.bash.interactiveShellInit = ''
|
||||
if [[ "$(tty)" =~ /dev/(tty1|hvc0|ttyS0)$ ]]; then
|
||||
# workaround for https://github.com/NixOS/nixpkgs/issues/219239
|
||||
systemctl restart systemd-vconsole-setup.service
|
||||
|
||||
reset
|
||||
|
||||
your mycelium IP is: $(cat /var/lib/mycelium/ip)
|
||||
fi
|
||||
'';
|
||||
}
|
||||
@@ -56,7 +56,7 @@
|
||||
"machines": {
|
||||
"test-inventory-machine": {
|
||||
"config": {
|
||||
"packages": ["zed-editor"]
|
||||
"packages": ["hello"]
|
||||
},
|
||||
"extraModules": []
|
||||
}
|
||||
|
||||
@@ -69,6 +69,15 @@ in
|
||||
```
|
||||
'';
|
||||
};
|
||||
|
||||
templates = lib.mkOption {
|
||||
type = types.submodule { imports = [ ./templates/interface.nix ]; };
|
||||
default = { };
|
||||
description = ''
|
||||
Define Clan templates.
|
||||
'';
|
||||
};
|
||||
|
||||
inventory = lib.mkOption {
|
||||
type = types.submodule { imports = [ ../inventory/build-inventory/interface.nix ]; };
|
||||
description = ''
|
||||
@@ -112,11 +121,11 @@ in
|
||||
type = types.lazyAttrsOf types.raw;
|
||||
default = { };
|
||||
};
|
||||
|
||||
# flake.clanInternals
|
||||
clanInternals = lib.mkOption {
|
||||
# Hide from documentation. Exposes internals to the cli.
|
||||
visible = false;
|
||||
# type = types.raw;
|
||||
# ClanInternals
|
||||
type = types.submodule {
|
||||
options = {
|
||||
@@ -132,7 +141,7 @@ in
|
||||
moduleSchemas = lib.mkOption { type = lib.types.raw; };
|
||||
inventoryFile = lib.mkOption { type = lib.types.raw; };
|
||||
# The machine 'imports' generated by the inventory per machine
|
||||
serviceConfigs = lib.mkOption { type = lib.types.raw; };
|
||||
inventoryClass = lib.mkOption { type = lib.types.raw; };
|
||||
# clan-core's modules
|
||||
clanModules = lib.mkOption { type = lib.types.raw; };
|
||||
source = lib.mkOption { type = lib.types.raw; };
|
||||
|
||||
@@ -42,7 +42,7 @@ let
|
||||
|
||||
# map from machine name to service configuration
|
||||
# { ${machineName} :: Config }
|
||||
serviceConfigs = (
|
||||
inventoryClass = (
|
||||
buildInventory {
|
||||
inherit inventory directory;
|
||||
}
|
||||
@@ -76,7 +76,7 @@ let
|
||||
(machines.${name} or { })
|
||||
# Inherit the inventory assertions ?
|
||||
# { inherit (mergedInventory) assertions; }
|
||||
{ imports = serviceConfigs.machines.${name}.machineImports or [ ]; }
|
||||
{ imports = inventoryClass.machines.${name}.machineImports or [ ]; }
|
||||
(
|
||||
{
|
||||
# Settings
|
||||
@@ -96,12 +96,6 @@ let
|
||||
|
||||
networking.hostName = lib.mkDefault name;
|
||||
|
||||
# speeds up nix commands by using the nixpkgs from the host system (especially useful in VMs)
|
||||
nix.registry.nixpkgs.to = lib.mkDefault {
|
||||
type = "path";
|
||||
path = lib.mkDefault nixpkgs;
|
||||
};
|
||||
|
||||
# For vars we need to override the system so we run vars
|
||||
# generators on the machine that runs `clan vars generate`. If a
|
||||
# users is using the `pkgsForSystem`, we don't set
|
||||
@@ -204,7 +198,7 @@ in
|
||||
|
||||
clanInternals = {
|
||||
moduleSchemas = clan-core.lib.modules.getModulesSchema config.inventory.modules;
|
||||
inherit serviceConfigs;
|
||||
inherit inventoryClass;
|
||||
inherit (clan-core) clanModules;
|
||||
inherit inventoryFile;
|
||||
inventoryValuesPrios =
|
||||
|
||||
57
lib/build-clan/templates/interface.nix
Normal file
57
lib/build-clan/templates/interface.nix
Normal file
@@ -0,0 +1,57 @@
|
||||
{
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
inherit (lib) types;
|
||||
|
||||
templateType = types.submodule (
|
||||
{ name, ... }:
|
||||
{
|
||||
options.description = lib.mkOption {
|
||||
type = types.str;
|
||||
default = name;
|
||||
description = ''
|
||||
The name of the template.
|
||||
'';
|
||||
};
|
||||
|
||||
options.path = lib.mkOption {
|
||||
type = types.path;
|
||||
description = ''
|
||||
Holds the path to the clan template.
|
||||
'';
|
||||
};
|
||||
}
|
||||
);
|
||||
in
|
||||
{
|
||||
options = {
|
||||
# clan.templates.clan
|
||||
clan = lib.mkOption {
|
||||
type = types.attrsOf templateType;
|
||||
default = { };
|
||||
description = ''
|
||||
Holds the different clan templates.
|
||||
'';
|
||||
};
|
||||
|
||||
# clan.templates.disko
|
||||
disko = lib.mkOption {
|
||||
type = types.attrsOf templateType;
|
||||
default = { };
|
||||
description = ''
|
||||
Holds different disko templates.
|
||||
'';
|
||||
};
|
||||
|
||||
# clan.templates.machine
|
||||
machine = lib.mkOption {
|
||||
type = types.attrsOf templateType;
|
||||
default = { };
|
||||
description = ''
|
||||
Holds the different machine templates.
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -30,7 +30,7 @@ in
|
||||
expr = shallowForceAllAttributes config;
|
||||
expectedError = {
|
||||
type = "ThrownError";
|
||||
msg = "A definition for option `directory' is not of type `path*";
|
||||
msg = "A definition for option `directory' is not of type `absolute path*";
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@@ -6,11 +6,14 @@
|
||||
let
|
||||
baseModule = {
|
||||
imports = (import (pkgs.path + "/nixos/modules/module-list.nix")) ++ [
|
||||
{
|
||||
nixpkgs.pkgs = pkgs;
|
||||
clan.core.name = "dummy";
|
||||
system.stateVersion = lib.version;
|
||||
}
|
||||
(
|
||||
{ config, ... }:
|
||||
{
|
||||
nixpkgs.pkgs = pkgs;
|
||||
clan.core.name = "dummy";
|
||||
system.stateVersion = config.system.nixos.release;
|
||||
}
|
||||
)
|
||||
];
|
||||
};
|
||||
|
||||
|
||||
273
lib/inventory/build-inventory/builder/default.nix
Normal file
273
lib/inventory/build-inventory/builder/default.nix
Normal file
@@ -0,0 +1,273 @@
|
||||
{
|
||||
lib,
|
||||
config,
|
||||
clan-core,
|
||||
...
|
||||
}:
|
||||
let
|
||||
inherit (config) inventory directory;
|
||||
resolveTags =
|
||||
# Inventory, { machines :: [string], tags :: [string] }
|
||||
{
|
||||
serviceName,
|
||||
instanceName,
|
||||
roleName,
|
||||
inventory,
|
||||
members,
|
||||
}:
|
||||
{
|
||||
machines =
|
||||
members.machines or [ ]
|
||||
++ (builtins.foldl' (
|
||||
acc: tag:
|
||||
let
|
||||
# For error printing
|
||||
availableTags = lib.foldlAttrs (
|
||||
acc: _: v:
|
||||
v.tags or [ ] ++ acc
|
||||
) [ ] (inventory.machines);
|
||||
|
||||
tagMembers = builtins.attrNames (
|
||||
lib.filterAttrs (_n: v: builtins.elem tag v.tags or [ ]) inventory.machines
|
||||
);
|
||||
in
|
||||
if tagMembers == [ ] then
|
||||
lib.warn ''
|
||||
inventory.services.${serviceName}.${instanceName}: - ${roleName} tags: no machine with tag '${tag}' found.
|
||||
Available tags: ${builtins.toJSON (lib.unique availableTags)}
|
||||
'' [ ]
|
||||
else
|
||||
acc ++ tagMembers
|
||||
) [ ] members.tags or [ ]);
|
||||
};
|
||||
|
||||
checkService =
|
||||
modulepath: serviceName:
|
||||
builtins.elem "inventory"
|
||||
(clan-core.lib.modules.getFrontmatter modulepath serviceName).features or [ ];
|
||||
|
||||
compileMachine =
|
||||
{ machineConfig }:
|
||||
{
|
||||
machineImports = [
|
||||
(lib.optionalAttrs (machineConfig.deploy.targetHost or null != null) {
|
||||
config.clan.core.networking.targetHost = machineConfig.deploy.targetHost;
|
||||
})
|
||||
];
|
||||
assertions = { };
|
||||
};
|
||||
|
||||
legacyResolveImports =
|
||||
{
|
||||
supportedRoles,
|
||||
resolvedRolesPerInstance,
|
||||
serviceConfigs,
|
||||
serviceName,
|
||||
machineName,
|
||||
getRoleFile,
|
||||
}:
|
||||
(lib.foldlAttrs (
|
||||
# : [ Modules ] -> String -> ServiceConfig -> [ Modules ]
|
||||
acc2: instanceName: serviceConfig:
|
||||
let
|
||||
resolvedRoles = resolvedRolesPerInstance.${instanceName};
|
||||
|
||||
isInService = builtins.any (members: builtins.elem machineName members.machines) (
|
||||
builtins.attrValues resolvedRoles
|
||||
);
|
||||
|
||||
# all roles where the machine is present
|
||||
machineRoles = builtins.attrNames (
|
||||
lib.filterAttrs (_role: roleConfig: builtins.elem machineName roleConfig.machines) resolvedRoles
|
||||
);
|
||||
|
||||
machineServiceConfig = (serviceConfig.machines.${machineName} or { }).config or { };
|
||||
globalConfig = serviceConfig.config or { };
|
||||
|
||||
globalExtraModules = serviceConfig.extraModules or [ ];
|
||||
machineExtraModules = serviceConfig.machines.${machineName}.extraModules or [ ];
|
||||
roleServiceExtraModules = builtins.foldl' (
|
||||
acc: role: acc ++ serviceConfig.roles.${role}.extraModules or [ ]
|
||||
) [ ] machineRoles;
|
||||
|
||||
# TODO: maybe optimize this don't lookup the role in inverse roles. Imports are not lazy
|
||||
roleModules = builtins.map (
|
||||
role:
|
||||
if builtins.elem role supportedRoles && inventory.modules ? ${serviceName} then
|
||||
getRoleFile role
|
||||
else
|
||||
throw "Module ${serviceName} doesn't have role: '${role}'. Role: ${
|
||||
inventory.modules.${serviceName}
|
||||
}/roles/${role}.nix not found."
|
||||
) machineRoles;
|
||||
|
||||
roleServiceConfigs = builtins.filter (m: m != { }) (
|
||||
builtins.map (role: serviceConfig.roles.${role}.config or { }) machineRoles
|
||||
);
|
||||
|
||||
extraModules = map (s: if builtins.typeOf s == "string" then "${directory}/${s}" else s) (
|
||||
globalExtraModules ++ machineExtraModules ++ roleServiceExtraModules
|
||||
);
|
||||
in
|
||||
if !(serviceConfig.enabled or true) then
|
||||
acc2
|
||||
else if isInService then
|
||||
acc2
|
||||
++ [
|
||||
{
|
||||
imports = roleModules ++ extraModules;
|
||||
clan.inventory.services.${serviceName}.${instanceName} = {
|
||||
roles = resolvedRoles;
|
||||
# TODO: Add inverseRoles to the service config if needed
|
||||
# inherit inverseRoles;
|
||||
};
|
||||
}
|
||||
(lib.optionalAttrs (globalConfig != { } || machineServiceConfig != { } || roleServiceConfigs != [ ])
|
||||
{
|
||||
clan.${serviceName} = lib.mkMerge (
|
||||
[
|
||||
globalConfig
|
||||
machineServiceConfig
|
||||
]
|
||||
++ roleServiceConfigs
|
||||
);
|
||||
}
|
||||
)
|
||||
]
|
||||
else
|
||||
acc2
|
||||
) [ ] (serviceConfigs));
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
./interface.nix
|
||||
];
|
||||
config = {
|
||||
machines = builtins.mapAttrs (
|
||||
machineName: machineConfig: m:
|
||||
let
|
||||
compiledServices = lib.mapAttrs (
|
||||
_: serviceConfigs:
|
||||
(
|
||||
{ config, ... }:
|
||||
let
|
||||
serviceName = config.serviceName;
|
||||
|
||||
getRoleFile = role: builtins.seq role inventory.modules.${serviceName} + "/roles/${role}.nix";
|
||||
in
|
||||
{
|
||||
_module.args = {
|
||||
inherit
|
||||
resolveTags
|
||||
inventory
|
||||
clan-core
|
||||
machineName
|
||||
serviceConfigs
|
||||
;
|
||||
};
|
||||
imports = [
|
||||
./roles.nix
|
||||
];
|
||||
|
||||
isClanModule =
|
||||
let
|
||||
firstRole = import (getRoleFile (builtins.head config.supportedRoles));
|
||||
loadModuleForClassCheck =
|
||||
m:
|
||||
if lib.isFunction m then
|
||||
let
|
||||
args = lib.functionArgs m;
|
||||
in
|
||||
m args
|
||||
else
|
||||
m;
|
||||
module = loadModuleForClassCheck (firstRole);
|
||||
in
|
||||
if (module) ? _class then module._class == "clan" else false;
|
||||
# The actual result
|
||||
machineImports =
|
||||
if config.isClanModule then
|
||||
throw "Clan modules are not supported yet."
|
||||
else
|
||||
legacyResolveImports {
|
||||
supportedRoles = config.supportedRoles;
|
||||
resolvedRolesPerInstance = config.resolvedRolesPerInstance;
|
||||
inherit
|
||||
serviceConfigs
|
||||
serviceName
|
||||
machineName
|
||||
getRoleFile
|
||||
;
|
||||
};
|
||||
|
||||
# Assertions
|
||||
assertions = {
|
||||
"checkservice.${serviceName}" = {
|
||||
assertion = checkService inventory.modules.${serviceName} serviceName;
|
||||
message = ''
|
||||
Service ${serviceName} cannot be used in inventory. It does not declare the 'inventory' feature.
|
||||
|
||||
To allow it add the following to the beginning of the README.md of the module:
|
||||
|
||||
---
|
||||
...
|
||||
|
||||
features = [ "inventory" ]
|
||||
---
|
||||
|
||||
Also make sure to test the module with the 'inventory' feature enabled.
|
||||
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
)
|
||||
) (config.inventory.services or { });
|
||||
|
||||
compiledMachine = compileMachine {
|
||||
inherit
|
||||
machineConfig
|
||||
;
|
||||
};
|
||||
|
||||
machineImports = (
|
||||
compiledMachine.machineImports
|
||||
++ builtins.foldl' (
|
||||
acc: service:
|
||||
let
|
||||
failedAssertions = (lib.filterAttrs (_: v: !v.assertion) service.assertions);
|
||||
failedAssertionsImports =
|
||||
if failedAssertions != { } then
|
||||
[
|
||||
{
|
||||
clan.inventory.assertions = failedAssertions;
|
||||
}
|
||||
]
|
||||
else
|
||||
[
|
||||
{
|
||||
clan.inventory.assertions = {
|
||||
"alive.assertion.inventory" = {
|
||||
assertion = true;
|
||||
message = ''
|
||||
No failed assertions found for machine ${machineName}. This will never be displayed.
|
||||
It is here for testing purposes.
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
];
|
||||
in
|
||||
acc
|
||||
++ service.machineImports
|
||||
# Import failed assertions
|
||||
++ failedAssertionsImports
|
||||
) [ ] (builtins.attrValues m.config.compiledServices)
|
||||
);
|
||||
in
|
||||
{
|
||||
inherit machineImports compiledServices compiledMachine;
|
||||
}
|
||||
) (inventory.machines or { });
|
||||
};
|
||||
}
|
||||
91
lib/inventory/build-inventory/builder/interface.nix
Normal file
91
lib/inventory/build-inventory/builder/interface.nix
Normal file
@@ -0,0 +1,91 @@
|
||||
{ lib, ... }:
|
||||
let
|
||||
inherit (lib) types mkOption;
|
||||
submodule = m: types.submoduleWith { modules = [ m ]; };
|
||||
|
||||
in
|
||||
{
|
||||
options = {
|
||||
directory = mkOption {
|
||||
type = types.path;
|
||||
};
|
||||
inventory = mkOption {
|
||||
type = types.raw;
|
||||
};
|
||||
machines = mkOption {
|
||||
type = types.attrsOf (
|
||||
submodule (
|
||||
{ name, ... }:
|
||||
let
|
||||
machineName = name;
|
||||
in
|
||||
{
|
||||
options = {
|
||||
compiledMachine = mkOption {
|
||||
type = types.raw;
|
||||
};
|
||||
compiledServices = mkOption {
|
||||
# type = types.attrsOf;
|
||||
type = types.attrsOf (
|
||||
types.submoduleWith {
|
||||
modules = [
|
||||
(
|
||||
{ name, ... }:
|
||||
let
|
||||
serviceName = name;
|
||||
in
|
||||
{
|
||||
options = {
|
||||
machineName = mkOption {
|
||||
default = machineName;
|
||||
readOnly = true;
|
||||
};
|
||||
serviceName = mkOption {
|
||||
default = serviceName;
|
||||
readOnly = true;
|
||||
};
|
||||
# Outputs
|
||||
machineImports = mkOption {
|
||||
type = types.listOf types.raw;
|
||||
};
|
||||
supportedRoles = mkOption {
|
||||
type = types.listOf types.str;
|
||||
};
|
||||
matchedRoles = mkOption {
|
||||
type = types.listOf types.str;
|
||||
};
|
||||
isClanModule = mkOption {
|
||||
type = types.bool;
|
||||
};
|
||||
machinesRoles = mkOption {
|
||||
type = types.attrsOf (types.listOf types.str);
|
||||
};
|
||||
resolvedRolesPerInstance = mkOption {
|
||||
type = types.attrsOf (
|
||||
types.attrsOf (submodule {
|
||||
options.machines = mkOption {
|
||||
type = types.listOf types.str;
|
||||
};
|
||||
})
|
||||
);
|
||||
};
|
||||
assertions = mkOption {
|
||||
type = types.attrsOf types.raw;
|
||||
};
|
||||
};
|
||||
}
|
||||
)
|
||||
];
|
||||
}
|
||||
);
|
||||
};
|
||||
machineImports = mkOption {
|
||||
type = types.listOf types.raw;
|
||||
};
|
||||
};
|
||||
}
|
||||
)
|
||||
);
|
||||
};
|
||||
};
|
||||
}
|
||||
65
lib/inventory/build-inventory/builder/roles.nix
Normal file
65
lib/inventory/build-inventory/builder/roles.nix
Normal file
@@ -0,0 +1,65 @@
|
||||
{
|
||||
lib,
|
||||
config,
|
||||
resolveTags,
|
||||
inventory,
|
||||
clan-core,
|
||||
machineName,
|
||||
serviceConfigs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
serviceName = config.serviceName;
|
||||
in
|
||||
{
|
||||
# Roles resolution
|
||||
# : List String
|
||||
supportedRoles = clan-core.lib.modules.getRoles inventory.modules serviceName;
|
||||
matchedRoles = builtins.attrNames (
|
||||
lib.filterAttrs (_: ms: builtins.elem machineName ms) config.machinesRoles
|
||||
);
|
||||
resolvedRolesPerInstance = lib.mapAttrs (
|
||||
instanceName: instanceConfig:
|
||||
let
|
||||
resolvedRoles = lib.genAttrs config.supportedRoles (
|
||||
roleName:
|
||||
resolveTags {
|
||||
members = instanceConfig.roles.${roleName} or { };
|
||||
inherit
|
||||
instanceName
|
||||
serviceName
|
||||
roleName
|
||||
inventory
|
||||
;
|
||||
}
|
||||
);
|
||||
usedRoles = builtins.attrNames instanceConfig.roles;
|
||||
unmatchedRoles = builtins.filter (role: !builtins.elem role config.supportedRoles) usedRoles;
|
||||
in
|
||||
if unmatchedRoles != [ ] then
|
||||
throw ''
|
||||
Roles ${builtins.toJSON unmatchedRoles} are not defined in the service ${serviceName}.
|
||||
Instance: '${instanceName}'
|
||||
Please use one of available roles: ${builtins.toJSON config.supportedRoles}
|
||||
''
|
||||
else
|
||||
resolvedRoles
|
||||
) serviceConfigs;
|
||||
|
||||
machinesRoles = builtins.zipAttrsWith (
|
||||
_n: vs:
|
||||
let
|
||||
flat = builtins.foldl' (acc: s: acc ++ s.machines) [ ] vs;
|
||||
in
|
||||
lib.unique flat
|
||||
) (builtins.attrValues config.resolvedRolesPerInstance);
|
||||
|
||||
assertions = lib.concatMapAttrs (
|
||||
instanceName: resolvedRoles:
|
||||
clan-core.lib.modules.checkConstraints {
|
||||
moduleName = serviceName;
|
||||
allModules = inventory.modules;
|
||||
inherit resolvedRoles instanceName;
|
||||
}
|
||||
) config.resolvedRolesPerInstance;
|
||||
}
|
||||
@@ -2,271 +2,6 @@
|
||||
# This function is responsible for generating the module configuration for every machine in the inventory.
|
||||
{ lib, clan-core }:
|
||||
let
|
||||
resolveTags =
|
||||
# Inventory, { machines :: [string], tags :: [string] }
|
||||
{
|
||||
serviceName,
|
||||
instanceName,
|
||||
roleName,
|
||||
inventory,
|
||||
members,
|
||||
}:
|
||||
{
|
||||
machines =
|
||||
members.machines or [ ]
|
||||
++ (builtins.foldl' (
|
||||
acc: tag:
|
||||
let
|
||||
# For error printing
|
||||
availableTags = lib.foldlAttrs (
|
||||
acc: _: v:
|
||||
v.tags or [ ] ++ acc
|
||||
) [ ] (inventory.machines);
|
||||
|
||||
tagMembers = builtins.attrNames (
|
||||
lib.filterAttrs (_n: v: builtins.elem tag v.tags or [ ]) inventory.machines
|
||||
);
|
||||
in
|
||||
if tagMembers == [ ] then
|
||||
lib.warn ''
|
||||
inventory.services.${serviceName}.${instanceName}: - ${roleName} tags: no machine with tag '${tag}' found.
|
||||
Available tags: ${builtins.toJSON (lib.unique availableTags)}
|
||||
'' [ ]
|
||||
else
|
||||
acc ++ tagMembers
|
||||
) [ ] members.tags or [ ]);
|
||||
};
|
||||
|
||||
checkService =
|
||||
modulepath: serviceName:
|
||||
builtins.elem "inventory"
|
||||
(clan-core.lib.modules.getFrontmatter modulepath serviceName).features or [ ];
|
||||
|
||||
compileMachine =
|
||||
{ machineConfig }:
|
||||
{
|
||||
machineImports = [
|
||||
(lib.optionalAttrs (machineConfig.deploy.targetHost or null != null) {
|
||||
config.clan.core.networking.targetHost = machineConfig.deploy.targetHost;
|
||||
})
|
||||
];
|
||||
assertions = { };
|
||||
};
|
||||
|
||||
compileServicesForMachine =
|
||||
# Returns a NixOS configuration for the machine 'machineName'.
|
||||
# Return Format: { imports = [ ... ]; config = { ... }; options = { ... } }
|
||||
{
|
||||
machineName,
|
||||
inventory,
|
||||
directory,
|
||||
}:
|
||||
let
|
||||
compileServiceModules =
|
||||
serviceName: serviceConfigs:
|
||||
let
|
||||
supportedRoles = clan-core.lib.modules.getRoles inventory.modules serviceName;
|
||||
|
||||
firstRole = import (getRoleFile (builtins.head supportedRoles));
|
||||
|
||||
loadModuleForClassCheck =
|
||||
m:
|
||||
if lib.isFunction m then
|
||||
let
|
||||
args = lib.functionArgs m;
|
||||
in
|
||||
m args
|
||||
else
|
||||
m;
|
||||
|
||||
isClanModule =
|
||||
let
|
||||
module = loadModuleForClassCheck firstRole;
|
||||
in
|
||||
if module ? _class then module._class == "clan" else false;
|
||||
|
||||
getRoleFile = role: builtins.seq role inventory.modules.${serviceName} + "/roles/${role}.nix";
|
||||
|
||||
resolvedRolesPerInstance = lib.mapAttrs (
|
||||
instanceName: instanceConfig:
|
||||
let
|
||||
resolvedRoles = lib.genAttrs supportedRoles (
|
||||
roleName:
|
||||
resolveTags {
|
||||
members = instanceConfig.roles.${roleName} or { };
|
||||
inherit
|
||||
instanceName
|
||||
serviceName
|
||||
roleName
|
||||
inventory
|
||||
;
|
||||
}
|
||||
);
|
||||
usedRoles = builtins.attrNames instanceConfig.roles;
|
||||
unmatchedRoles = builtins.filter (role: !builtins.elem role supportedRoles) usedRoles;
|
||||
in
|
||||
if unmatchedRoles != [ ] then
|
||||
throw ''
|
||||
Service: '${serviceName}' Instance: '${instanceName}'
|
||||
The following roles do not exist: ${builtins.toJSON unmatchedRoles}
|
||||
Please use one of available roles: ${builtins.toJSON supportedRoles}
|
||||
''
|
||||
else
|
||||
resolvedRoles
|
||||
) serviceConfigs;
|
||||
|
||||
machinesRoles = builtins.zipAttrsWith (
|
||||
_n: vs:
|
||||
let
|
||||
flat = builtins.foldl' (acc: s: acc ++ s.machines) [ ] vs;
|
||||
in
|
||||
lib.unique flat
|
||||
) (builtins.attrValues resolvedRolesPerInstance);
|
||||
|
||||
matchedRoles = builtins.attrNames (
|
||||
lib.filterAttrs (_: ms: builtins.elem machineName ms) machinesRoles
|
||||
);
|
||||
in
|
||||
# roleImports = lib.mapAttrsToList (
|
||||
# roleName: _: inventory.modules.${serviceName} + "/roles/${roleName}.nix"
|
||||
# ) (lib.filterAttrs (_: ms: builtins.elem machineName ms) machinesRoles);
|
||||
# CompiledService :: { machineImports :: []; machineRoles :: [ String ] }
|
||||
{
|
||||
inherit
|
||||
machinesRoles
|
||||
matchedRoles
|
||||
resolvedRolesPerInstance
|
||||
firstRole
|
||||
isClanModule
|
||||
supportedRoles
|
||||
;
|
||||
# TODO: Add other attributes
|
||||
machineImports =
|
||||
if isClanModule then
|
||||
throw "Clan modules are not supported yet."
|
||||
else
|
||||
(lib.foldlAttrs (
|
||||
# [ Modules ], String, ServiceConfig
|
||||
acc2: instanceName: serviceConfig:
|
||||
let
|
||||
resolvedRoles = lib.genAttrs supportedRoles (
|
||||
roleName:
|
||||
resolveTags {
|
||||
members = serviceConfig.roles.${roleName} or { };
|
||||
inherit
|
||||
serviceName
|
||||
instanceName
|
||||
roleName
|
||||
inventory
|
||||
;
|
||||
}
|
||||
);
|
||||
|
||||
isInService = builtins.any (members: builtins.elem machineName members.machines) (
|
||||
builtins.attrValues resolvedRoles
|
||||
);
|
||||
|
||||
# all roles where the machine is present
|
||||
machineRoles = builtins.attrNames (
|
||||
lib.filterAttrs (_role: roleConfig: builtins.elem machineName roleConfig.machines) resolvedRoles
|
||||
);
|
||||
machineServiceConfig = (serviceConfig.machines.${machineName} or { }).config or { };
|
||||
globalConfig = serviceConfig.config or { };
|
||||
|
||||
globalExtraModules = serviceConfig.extraModules or [ ];
|
||||
machineExtraModules = serviceConfig.machines.${machineName}.extraModules or [ ];
|
||||
roleServiceExtraModules = builtins.foldl' (
|
||||
acc: role: acc ++ serviceConfig.roles.${role}.extraModules or [ ]
|
||||
) [ ] machineRoles;
|
||||
|
||||
# TODO: maybe optimize this dont lookup the role in inverse roles. Imports are not lazy
|
||||
roleModules = builtins.map (
|
||||
role:
|
||||
if builtins.elem role supportedRoles && inventory.modules ? ${serviceName} then
|
||||
getRoleFile role
|
||||
else
|
||||
throw "Module ${serviceName} doesn't have role: '${role}'. Role: ${
|
||||
inventory.modules.${serviceName}
|
||||
}/roles/${role}.nix not found."
|
||||
) machineRoles;
|
||||
|
||||
roleServiceConfigs = builtins.filter (m: m != { }) (
|
||||
builtins.map (role: serviceConfig.roles.${role}.config or { }) machineRoles
|
||||
);
|
||||
|
||||
extraModules = map (s: if builtins.typeOf s == "string" then "${directory}/${s}" else s) (
|
||||
globalExtraModules ++ machineExtraModules ++ roleServiceExtraModules
|
||||
);
|
||||
|
||||
nonExistingRoles = builtins.filter (role: !(builtins.elem role supportedRoles)) (
|
||||
builtins.attrNames (serviceConfig.roles or { })
|
||||
);
|
||||
|
||||
constraintAssertions = clan-core.lib.modules.checkConstraints {
|
||||
moduleName = serviceName;
|
||||
allModules = inventory.modules;
|
||||
inherit resolvedRoles instanceName;
|
||||
};
|
||||
in
|
||||
if (nonExistingRoles != [ ]) then
|
||||
throw "Roles ${builtins.toString nonExistingRoles} are not defined in the service ${serviceName}."
|
||||
else if !(serviceConfig.enabled or true) then
|
||||
acc2
|
||||
else if isInService then
|
||||
acc2
|
||||
++ [
|
||||
{
|
||||
imports = roleModules ++ extraModules;
|
||||
|
||||
clan.inventory.assertions = constraintAssertions;
|
||||
clan.inventory.services.${serviceName}.${instanceName} = {
|
||||
roles = resolvedRoles;
|
||||
# TODO: Add inverseRoles to the service config if needed
|
||||
# inherit inverseRoles;
|
||||
};
|
||||
}
|
||||
(lib.optionalAttrs (globalConfig != { } || machineServiceConfig != { } || roleServiceConfigs != [ ])
|
||||
{
|
||||
clan.${serviceName} = lib.mkMerge (
|
||||
[
|
||||
globalConfig
|
||||
machineServiceConfig
|
||||
]
|
||||
++ roleServiceConfigs
|
||||
);
|
||||
}
|
||||
)
|
||||
]
|
||||
else
|
||||
acc2
|
||||
) [ ] (serviceConfigs));
|
||||
|
||||
assertions = lib.mapAttrs' (name: value: {
|
||||
name = "checkservice.${serviceName}.${name}";
|
||||
value = {
|
||||
assertion = checkService inventory.modules.${serviceName} serviceName;
|
||||
message = ''
|
||||
Service ${serviceName} cannot be used in inventory. It does not declare the 'inventory' feature.
|
||||
|
||||
|
||||
To allow it add the following to the beginning of the README.md of the module:
|
||||
|
||||
---
|
||||
...
|
||||
|
||||
features = [ "inventory" ]
|
||||
---
|
||||
|
||||
Also make sure to test the module with the 'inventory' feature enabled.
|
||||
|
||||
'';
|
||||
};
|
||||
}) inventory.services;
|
||||
};
|
||||
|
||||
in
|
||||
lib.mapAttrs compileServiceModules inventory.services;
|
||||
|
||||
/*
|
||||
Returns a set with NixOS configuration for every machine in the inventory.
|
||||
|
||||
@@ -276,57 +11,11 @@ let
|
||||
{ inventory, directory }:
|
||||
(lib.evalModules {
|
||||
specialArgs = {
|
||||
inherit directory inventory;
|
||||
inherit clan-core;
|
||||
};
|
||||
modules = [
|
||||
./internal.nix
|
||||
(
|
||||
{ ... }:
|
||||
{
|
||||
machines = builtins.mapAttrs (
|
||||
machineName: machineConfig:
|
||||
let
|
||||
compiledServices = compileServicesForMachine {
|
||||
inherit
|
||||
machineName
|
||||
inventory
|
||||
directory
|
||||
;
|
||||
};
|
||||
compiledMachine = compileMachine {
|
||||
inherit
|
||||
machineConfig
|
||||
;
|
||||
};
|
||||
|
||||
machineImports =
|
||||
compiledMachine.machineImports
|
||||
++ builtins.foldl' (
|
||||
acc: service:
|
||||
let
|
||||
failedAssertions = (lib.filterAttrs (_: v: !v.assertion) service.assertions);
|
||||
failedAssertionsImports =
|
||||
if failedAssertions != { } then
|
||||
[
|
||||
{
|
||||
clan.inventory.assertions = failedAssertions;
|
||||
}
|
||||
]
|
||||
else
|
||||
[ ];
|
||||
in
|
||||
acc
|
||||
++ service.machineImports
|
||||
# Import failed assertions
|
||||
++ failedAssertionsImports
|
||||
) [ ] (builtins.attrValues compiledServices);
|
||||
in
|
||||
{
|
||||
inherit machineImports compiledServices compiledMachine;
|
||||
}
|
||||
) (inventory.machines or { });
|
||||
}
|
||||
)
|
||||
./builder
|
||||
{ inherit directory inventory; }
|
||||
];
|
||||
}).config;
|
||||
in
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
{ lib, ... }:
|
||||
let
|
||||
inherit (lib) types mkOption;
|
||||
submodule = m: types.submoduleWith { modules = [ m ]; };
|
||||
in
|
||||
{
|
||||
options = {
|
||||
machines = mkOption {
|
||||
type = types.attrsOf (submodule {
|
||||
options = {
|
||||
compiledMachine = mkOption {
|
||||
type = types.raw;
|
||||
};
|
||||
compiledServices = mkOption {
|
||||
type = types.raw;
|
||||
};
|
||||
machineImports = mkOption {
|
||||
type = types.raw;
|
||||
};
|
||||
};
|
||||
});
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -53,6 +53,7 @@ in
|
||||
"lib/default.nix"
|
||||
"lib/flake-module.nix"
|
||||
"lib/inventory"
|
||||
"lib/constraints"
|
||||
"lib/frontmatter"
|
||||
"clanModules/flake-module.nix"
|
||||
"clanModules/borgbackup"
|
||||
|
||||
@@ -231,7 +231,7 @@ in
|
||||
expr = configs.machines.machine_1.machineImports;
|
||||
expectedError = {
|
||||
type = "ThrownError";
|
||||
msg = "Roles roleXYZ are not defined in the service borgbackup.";
|
||||
msg = ''Roles \["roleXYZ"\] are not defined in the service borgbackup'';
|
||||
};
|
||||
};
|
||||
# Needs NIX_ABORT_ON_WARN=1
|
||||
@@ -286,7 +286,9 @@ in
|
||||
in
|
||||
{
|
||||
inherit configs;
|
||||
expr = builtins.filter (v: v != { }) configs.machines.machine_1.machineImports;
|
||||
expr = builtins.filter (
|
||||
v: v != { } && !v.clan.inventory.assertions ? "alive.assertion.inventory"
|
||||
) configs.machines.machine_1.machineImports;
|
||||
expected = [ ];
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
{ lib, config, ... }:
|
||||
{
|
||||
imports = [
|
||||
./backups.nix
|
||||
./defaults.nix
|
||||
./facts
|
||||
./inventory
|
||||
./manual.nix
|
||||
./meta/interface.nix
|
||||
./metadata.nix
|
||||
./networking.nix
|
||||
@@ -12,7 +11,6 @@
|
||||
./nix-settings.nix
|
||||
./options.nix
|
||||
./outputs.nix
|
||||
./packages.nix
|
||||
./schema.nix
|
||||
./sops.nix
|
||||
./vars
|
||||
@@ -22,14 +20,4 @@
|
||||
./zfs.nix
|
||||
];
|
||||
|
||||
# Use systemd during boot as well except:
|
||||
# - systems with raids as this currently require manual configuration: https://github.com/NixOS/nixpkgs/issues/210210
|
||||
# - for containers we currently rely on the `stage-2` init script that sets up our /etc
|
||||
boot.initrd.systemd.enable = lib.mkDefault (!config.boot.swraid.enable && !config.boot.isContainer);
|
||||
|
||||
# Work around for https://github.com/NixOS/nixpkgs/issues/124215
|
||||
documentation.info.enable = lib.mkDefault false;
|
||||
|
||||
# Don't install the /lib/ld-linux.so.2 stub. This saves one instance of nixpkgs.
|
||||
environment.ldso32 = null;
|
||||
}
|
||||
|
||||
51
nixosModules/clanCore/defaults.nix
Normal file
51
nixosModules/clanCore/defaults.nix
Normal file
@@ -0,0 +1,51 @@
|
||||
{
|
||||
lib,
|
||||
config,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
options.clan.core.enableRecommendedDefaults = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
description = ''
|
||||
Whether to enable recommended default settings for NixOS by Clan.
|
||||
|
||||
These settings are entirely optional and are not necessary for using Clan.
|
||||
|
||||
This enables the new systemd in stage-1, disables some options that increase the closure size
|
||||
and adds some extra packages for debugging like `tcpdump` and `dnsutils`.
|
||||
'';
|
||||
default = true;
|
||||
example = false;
|
||||
};
|
||||
|
||||
config = lib.mkIf config.clan.core.enableRecommendedDefaults {
|
||||
# Use systemd during boot as well except:
|
||||
# - systems with raids as this currently require manual configuration: https://github.com/NixOS/nixpkgs/issues/210210
|
||||
# - for containers we currently rely on the `stage-2` init script that sets up our /etc
|
||||
boot.initrd.systemd.enable = lib.mkDefault (!config.boot.swraid.enable && !config.boot.isContainer);
|
||||
|
||||
# This disables the HTML manual and `nixos-help` command but leaves
|
||||
# `man configuration.nix`
|
||||
documentation.doc.enable = lib.mkDefault false;
|
||||
|
||||
# Work around for https://github.com/NixOS/nixpkgs/issues/124215
|
||||
documentation.info.enable = lib.mkDefault false;
|
||||
|
||||
# Don't install the /lib/ld-linux.so.2 stub. This saves one instance of nixpkgs.
|
||||
environment.ldso32 = null;
|
||||
|
||||
environment.systemPackages = [
|
||||
# essential debugging tools for networked services
|
||||
pkgs.dnsutils
|
||||
pkgs.tcpdump
|
||||
pkgs.curl
|
||||
pkgs.jq
|
||||
pkgs.htop
|
||||
|
||||
pkgs.nixos-facter # for `clan machines update-hardware-config --backend nixos-facter`
|
||||
|
||||
pkgs.gitMinimal
|
||||
];
|
||||
};
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
documentation.nixos.enable = pkgs.lib.mkDefault false;
|
||||
}
|
||||
@@ -6,16 +6,18 @@
|
||||
description = ''
|
||||
The target SSH node for deployment.
|
||||
|
||||
By default, the node's attribute name will be used.
|
||||
By default, the node's fully quantified domain name or hostname will be used.
|
||||
|
||||
If set to null, only local deployment will be supported.
|
||||
|
||||
format: user@host:port&SSH_OPTION=SSH_VALUE
|
||||
format: user@host:port?SSH_OPTION=SSH_VALUE[&SSH_OPTION_2=VALUE_2]
|
||||
examples:
|
||||
- machine.example.com
|
||||
- user@machine2.example.com
|
||||
- root@example.com:2222&IdentityFile=/path/to/private/key
|
||||
- root@example.com:2222?IdentityFile=/path/to/private/key&StrictHostKeyChecking=yes
|
||||
'';
|
||||
default = null;
|
||||
default = "root@${config.networking.fqdnOrHostName}";
|
||||
defaultText = "root@\${config.networking.fqdnOrHostName}";
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
};
|
||||
buildHost = lib.mkOption {
|
||||
@@ -24,11 +26,11 @@
|
||||
|
||||
If set to null, the targetHost will be used.
|
||||
|
||||
format: user@host:port&SSH_OPTION=SSH_VALUE
|
||||
format: user@host:port?SSH_OPTION=SSH_VALUE&SSH_OPTION_2=VALUE_2
|
||||
examples:
|
||||
- machine.example.com
|
||||
- user@machine2.example.com
|
||||
- root@example.com:2222&IdentityFile=/path/to/private/key
|
||||
- root@example.com:2222?IdentityFile=/path/to/private/key&StrictHostKeyChecking=yes
|
||||
'';
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
default = null;
|
||||
@@ -91,7 +93,7 @@
|
||||
]
|
||||
)
|
||||
];
|
||||
config = {
|
||||
config = lib.mkIf config.clan.core.enableRecommendedDefaults {
|
||||
# conflicts with systemd-resolved
|
||||
networking.useHostResolvConf = false;
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{ lib, ... }:
|
||||
{ lib, config, ... }:
|
||||
# Taken from:
|
||||
# https://github.com/nix-community/srvos/blob/main/nixos/common/nix.nix
|
||||
{
|
||||
lib.mkIf config.clan.core.enableRecommendedDefaults {
|
||||
# Fallback quickly if substituters are not available.
|
||||
nix.settings.connect-timeout = 5;
|
||||
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
{
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
environment.systemPackages = [
|
||||
# essential debugging tools for networked services
|
||||
pkgs.dnsutils
|
||||
pkgs.tcpdump
|
||||
pkgs.curl
|
||||
pkgs.jq
|
||||
pkgs.htop
|
||||
|
||||
pkgs.nixos-facter # for `clan machines update-hardware-config --backend nixos-facter`
|
||||
|
||||
pkgs.gitMinimal
|
||||
];
|
||||
}
|
||||
@@ -16,6 +16,7 @@ in
|
||||
{
|
||||
imports = [
|
||||
./public/in_repo.nix
|
||||
./secret/fs.nix
|
||||
./secret/password-store.nix
|
||||
./secret/sops
|
||||
./secret/vm.nix
|
||||
|
||||
17
nixosModules/clanCore/vars/secret/fs.nix
Normal file
17
nixosModules/clanCore/vars/secret/fs.nix
Normal file
@@ -0,0 +1,17 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
config.clan.core.vars.settings = lib.mkIf (config.clan.core.vars.settings.secretStore == "fs") {
|
||||
fileModule = file: {
|
||||
path =
|
||||
if file.config.neededFor == "partitioning" then
|
||||
throw "${file.config.generatorName}/${file.config.name}: FS backend does not support partitioning."
|
||||
else
|
||||
"/run/secrets/${file.config.generatorName}/${file.config.name}";
|
||||
};
|
||||
secretModule = "clan_cli.vars.secret_modules.fs";
|
||||
};
|
||||
}
|
||||
@@ -5,6 +5,7 @@
|
||||
"sops"
|
||||
"password-store"
|
||||
"vm"
|
||||
"fs"
|
||||
"custom"
|
||||
];
|
||||
default = "sops";
|
||||
@@ -14,6 +15,17 @@
|
||||
'';
|
||||
};
|
||||
|
||||
passBackend = lib.mkOption {
|
||||
type = lib.types.enum [
|
||||
"passage"
|
||||
"pass"
|
||||
];
|
||||
default = "pass";
|
||||
description = ''
|
||||
password-store backend to use. Valid options are `pass` and `passage`
|
||||
'';
|
||||
};
|
||||
|
||||
secretModule = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
internal = true;
|
||||
|
||||
@@ -5,7 +5,7 @@ import pytest
|
||||
|
||||
TEST_ROOT = Path(__file__).parent.resolve()
|
||||
PROJECT_ROOT = TEST_ROOT.parent
|
||||
if CLAN_CORE_ := os.environ.get("CLAN_CORE"):
|
||||
if CLAN_CORE_ := os.environ.get("CLAN_CORE_PATH"):
|
||||
CLAN_CORE = Path(CLAN_CORE_)
|
||||
else:
|
||||
CLAN_CORE = PROJECT_ROOT.parent.parent
|
||||
|
||||
@@ -19,9 +19,10 @@ def temporary_home(monkeypatch: pytest.MonkeyPatch) -> Iterator[Path]:
|
||||
monkeypatch.chdir(str(path))
|
||||
yield path
|
||||
else:
|
||||
with tempfile.TemporaryDirectory(prefix="pytest-") as dirpath:
|
||||
with tempfile.TemporaryDirectory(prefix="pytest-") as _dirpath:
|
||||
dirpath = Path(_dirpath)
|
||||
monkeypatch.setenv("HOME", str(dirpath))
|
||||
monkeypatch.setenv("XDG_CONFIG_HOME", str(Path(dirpath) / ".config"))
|
||||
monkeypatch.setenv("XDG_CONFIG_HOME", str(dirpath / ".config"))
|
||||
monkeypatch.chdir(str(dirpath))
|
||||
log.debug("Temp HOME directory: %s", str(dirpath))
|
||||
yield Path(dirpath)
|
||||
yield dirpath
|
||||
|
||||
@@ -22,11 +22,11 @@ from . import (
|
||||
state,
|
||||
vms,
|
||||
)
|
||||
from .clan_uri import FlakeId
|
||||
from .custom_logger import setup_logging
|
||||
from .dirs import get_clan_flake_toplevel_or_env
|
||||
from .errors import ClanError
|
||||
from .facts import cli as facts
|
||||
from .flake import Flake
|
||||
from .flash import cli as flash_cli
|
||||
from .hyperlink import help_hyperlink
|
||||
from .machines import cli as machines
|
||||
@@ -41,17 +41,17 @@ with contextlib.suppress(ImportError):
|
||||
import argcomplete # type: ignore[no-redef]
|
||||
|
||||
|
||||
def flake_path(arg: str) -> FlakeId:
|
||||
def flake_path(arg: str) -> Flake:
|
||||
flake_dir = Path(arg).resolve()
|
||||
if flake_dir.exists() and flake_dir.is_dir():
|
||||
return FlakeId(str(flake_dir))
|
||||
return FlakeId(arg)
|
||||
return Flake(str(flake_dir))
|
||||
return Flake(arg)
|
||||
|
||||
|
||||
def default_flake() -> FlakeId | None:
|
||||
def default_flake() -> Flake | None:
|
||||
val = get_clan_flake_toplevel_or_env()
|
||||
if val:
|
||||
return FlakeId(str(val))
|
||||
return Flake(str(val))
|
||||
return None
|
||||
|
||||
|
||||
@@ -148,6 +148,7 @@ Note: The meta results from clan/meta.json and manual flake arguments.
|
||||
|
||||
parser_backups = subparsers.add_parser(
|
||||
"backups",
|
||||
aliases=["b"],
|
||||
help="Manage backups of clan machines",
|
||||
description="Manage backups of clan machines",
|
||||
epilog=(
|
||||
@@ -175,6 +176,7 @@ For more detailed information visit: {help_hyperlink("backups", "https://docs.cl
|
||||
|
||||
parser_flake = subparsers.add_parser(
|
||||
"flakes",
|
||||
aliases=["f"],
|
||||
help="Create a clan flake inside the current directory",
|
||||
description="Create a clan flake inside the current directory",
|
||||
epilog=(
|
||||
@@ -300,6 +302,7 @@ For more detailed information, visit: {help_hyperlink("secrets", "https://docs.c
|
||||
# like facts but with vars instead of facts
|
||||
parser_vars = subparsers.add_parser(
|
||||
"vars",
|
||||
aliases=["va"],
|
||||
help="Manage vars",
|
||||
description="Manage vars",
|
||||
epilog=(
|
||||
@@ -336,6 +339,7 @@ For more detailed information, visit: {help_hyperlink("secrets", "https://docs.c
|
||||
|
||||
parser_machine = subparsers.add_parser(
|
||||
"machines",
|
||||
aliases=["m"],
|
||||
help="Manage machines and their configuration",
|
||||
description="Manage machines and their configuration",
|
||||
epilog=(
|
||||
@@ -376,6 +380,7 @@ For more detailed information, visit: {help_hyperlink("deploy", "https://docs.cl
|
||||
|
||||
parser_select = subparsers.add_parser(
|
||||
"select",
|
||||
aliases=["se"],
|
||||
help="Select nixos values from the flake",
|
||||
description="Select nixos values from the flake",
|
||||
epilog=(
|
||||
@@ -400,6 +405,7 @@ Examples:
|
||||
|
||||
parser_state = subparsers.add_parser(
|
||||
"state",
|
||||
aliases=["st"],
|
||||
help="Query state information about machines",
|
||||
description="Query state information about machines",
|
||||
epilog=(
|
||||
@@ -431,7 +437,7 @@ For more detailed information, visit: {help_hyperlink("getting-started", "https:
|
||||
state.register_parser(parser_state)
|
||||
|
||||
if argcomplete:
|
||||
argcomplete.autocomplete(parser)
|
||||
argcomplete.autocomplete(parser, exclude=["morph"])
|
||||
|
||||
register_common_flags(parser)
|
||||
|
||||
|
||||
@@ -171,6 +171,7 @@ def set_machine_disk_schema(
|
||||
# For every placeholder check that the value is valid
|
||||
for placeholder_name, placeholder_value in placeholders.items():
|
||||
ph = disk_schema.placeholders.get(placeholder_name)
|
||||
|
||||
# Unknown placeholder
|
||||
if not ph:
|
||||
msg = (
|
||||
|
||||
@@ -35,6 +35,7 @@ log = logging.getLogger(__name__)
|
||||
# Define generics for return type and call signature
|
||||
R = TypeVar("R") # Return type of the callable
|
||||
P = ParamSpec("P") # Parameters of the callable
|
||||
Q = TypeVar("Q") # Data type for the async_opts.data field
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -200,6 +201,15 @@ class AsyncFuture(Generic[R]):
|
||||
return result
|
||||
|
||||
|
||||
@dataclass
|
||||
class AsyncFutureRef(AsyncFuture[R], Generic[R, Q]):
|
||||
ref: Q | None
|
||||
|
||||
|
||||
class AsyncOptsRef(AsyncOpts, Generic[Q]):
|
||||
ref: Q | None = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class AsyncRuntime:
|
||||
tasks: dict[str, AsyncThread[Any, Any]] = field(default_factory=dict)
|
||||
@@ -232,6 +242,21 @@ class AsyncRuntime:
|
||||
thread.start()
|
||||
return AsyncFuture(opts.tid, self)
|
||||
|
||||
def async_run_ref(
|
||||
self,
|
||||
ref: Q,
|
||||
opts: AsyncOpts | None,
|
||||
function: Callable[P, R],
|
||||
*args: P.args,
|
||||
**kwargs: P.kwargs,
|
||||
) -> AsyncFutureRef[R, Q]:
|
||||
"""
|
||||
The same as async_run, but with an additional reference to an object.
|
||||
This is useful to keep track of the origin of the task.
|
||||
"""
|
||||
future = self.async_run(opts, function, *args, **kwargs)
|
||||
return AsyncFutureRef(_tid=future._tid, _runtime=self, ref=ref) # noqa: SLF001
|
||||
|
||||
def join_all(self) -> None:
|
||||
"""
|
||||
Wait for all tasks to finish
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import argparse
|
||||
import json
|
||||
import logging
|
||||
|
||||
from clan_cli.completions import (
|
||||
@@ -15,7 +14,7 @@ log = logging.getLogger(__name__)
|
||||
|
||||
def create_backup(machine: Machine, provider: str | None = None) -> None:
|
||||
machine.info(f"creating backup for {machine.name}")
|
||||
backup_scripts = json.loads(machine.eval_nix("config.clan.core.backups"))
|
||||
backup_scripts = machine.eval_nix("config.clan.core.backups")
|
||||
if provider is None:
|
||||
if not backup_scripts["providers"]:
|
||||
msg = "No providers specified"
|
||||
|
||||
@@ -20,7 +20,7 @@ class Backup:
|
||||
|
||||
def list_provider(machine: Machine, provider: str) -> list[Backup]:
|
||||
results = []
|
||||
backup_metadata = json.loads(machine.eval_nix("config.clan.core.backups"))
|
||||
backup_metadata = machine.eval_nix("config.clan.core.backups")
|
||||
list_command = backup_metadata["providers"][provider]["list"]
|
||||
proc = machine.target_host.run(
|
||||
[list_command],
|
||||
@@ -46,7 +46,7 @@ def list_provider(machine: Machine, provider: str) -> list[Backup]:
|
||||
|
||||
|
||||
def list_backups(machine: Machine, provider: str | None = None) -> list[Backup]:
|
||||
backup_metadata = json.loads(machine.eval_nix("config.clan.core.backups"))
|
||||
backup_metadata = machine.eval_nix("config.clan.core.backups")
|
||||
results = []
|
||||
if provider is None:
|
||||
for _provider in backup_metadata["providers"]:
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import argparse
|
||||
import json
|
||||
|
||||
from clan_cli.cmd import Log, RunOpts
|
||||
from clan_cli.completions import (
|
||||
@@ -12,14 +11,17 @@ from clan_cli.machines.machines import Machine
|
||||
|
||||
|
||||
def restore_service(machine: Machine, name: str, provider: str, service: str) -> None:
|
||||
backup_metadata = json.loads(machine.eval_nix("config.clan.core.backups"))
|
||||
backup_folders = json.loads(machine.eval_nix("config.clan.core.state"))
|
||||
backup_metadata = machine.eval_nix("config.clan.core.backups")
|
||||
backup_folders = machine.eval_nix("config.clan.core.state")
|
||||
|
||||
if service not in backup_folders:
|
||||
msg = f"Service {service} not found in configuration. Available services are: {', '.join(backup_folders.keys())}"
|
||||
raise ClanError(msg)
|
||||
|
||||
folders = backup_folders[service]["folders"]
|
||||
folders = backup_folders[service]["folders"].values()
|
||||
assert all(isinstance(f, str) for f in folders), (
|
||||
f"folders must be a list of strings instead of {folders}"
|
||||
)
|
||||
env = {}
|
||||
env["NAME"] = name
|
||||
# FIXME: If we have too many folder this might overflow the stack.
|
||||
@@ -63,7 +65,7 @@ def restore_backup(
|
||||
) -> None:
|
||||
errors = []
|
||||
if service is None:
|
||||
backup_folders = json.loads(machine.eval_nix("config.clan.core.state"))
|
||||
backup_folders = machine.eval_nix("config.clan.core.state")
|
||||
for _service in backup_folders:
|
||||
try:
|
||||
restore_service(machine, name, provider, _service)
|
||||
|
||||
42
pkgs/clan-cli/clan_cli/bwrap/__init__.py
Normal file
42
pkgs/clan-cli/clan_cli/bwrap/__init__.py
Normal file
@@ -0,0 +1,42 @@
|
||||
from clan_cli.cmd import run
|
||||
from clan_cli.nix import nix_shell
|
||||
|
||||
_works: bool | None = None
|
||||
|
||||
|
||||
def bubblewrap_works() -> bool:
|
||||
global _works
|
||||
if _works is None:
|
||||
_works = _bubblewrap_works()
|
||||
return _works
|
||||
|
||||
|
||||
def _bubblewrap_works() -> bool:
|
||||
# fmt: off
|
||||
cmd = nix_shell(
|
||||
[
|
||||
"nixpkgs#bash",
|
||||
"nixpkgs#bubblewrap",
|
||||
],
|
||||
[
|
||||
"bwrap",
|
||||
"--unshare-all",
|
||||
"--tmpfs", "/",
|
||||
"--ro-bind", "/nix/store", "/nix/store",
|
||||
"--dev", "/dev",
|
||||
"--chdir", "/",
|
||||
"--bind", "/proc", "/proc",
|
||||
"--uid", "1000",
|
||||
"--gid", "1000",
|
||||
"--",
|
||||
# do nothing, just test if bash executes
|
||||
"bash", "-c", ":"
|
||||
],
|
||||
)
|
||||
# fmt: on
|
||||
try:
|
||||
run(cmd)
|
||||
except Exception:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
16
pkgs/clan-cli/clan_cli/bwrap/tests/test_bwrap.py
Normal file
16
pkgs/clan-cli/clan_cli/bwrap/tests/test_bwrap.py
Normal file
@@ -0,0 +1,16 @@
|
||||
import sys
|
||||
|
||||
import pytest
|
||||
from clan_cli.bwrap import bubblewrap_works
|
||||
|
||||
|
||||
@pytest.mark.skipif(sys.platform != "linux", reason="bubblewrap only works on linux")
|
||||
def test_bubblewrap_works_on_linux() -> None:
|
||||
assert bubblewrap_works() is True
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
sys.platform == "linux", reason="bubblewrap does not work on non-linux"
|
||||
)
|
||||
def test_bubblewrap_detection_non_linux() -> None:
|
||||
assert bubblewrap_works() is False
|
||||
@@ -5,11 +5,11 @@ from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
|
||||
from clan_cli.api import API
|
||||
from clan_cli.clan_uri import FlakeId
|
||||
from clan_cli.cmd import CmdOut, RunOpts, run
|
||||
from clan_cli.errors import ClanError
|
||||
from clan_cli.flake import Flake
|
||||
from clan_cli.inventory import Inventory, init_inventory
|
||||
from clan_cli.nix import nix_shell
|
||||
from clan_cli.nix import nix_command, nix_metadata, nix_shell
|
||||
from clan_cli.templates import (
|
||||
InputPrio,
|
||||
TemplateName,
|
||||
@@ -33,10 +33,11 @@ class CreateClanResponse:
|
||||
class CreateOptions:
|
||||
dest: Path
|
||||
template_name: str
|
||||
src_flake: FlakeId | None = None
|
||||
src_flake: Flake | None = None
|
||||
input_prio: InputPrio | None = None
|
||||
setup_git: bool = True
|
||||
initial: Inventory | None = None
|
||||
update_clan: bool = True
|
||||
|
||||
|
||||
def git_command(directory: Path, *args: str) -> list[str]:
|
||||
@@ -47,6 +48,16 @@ def git_command(directory: Path, *args: str) -> list[str]:
|
||||
def create_clan(opts: CreateOptions) -> CreateClanResponse:
|
||||
dest = opts.dest.resolve()
|
||||
|
||||
if opts.src_flake is not None:
|
||||
try:
|
||||
nix_metadata(str(opts.src_flake))
|
||||
except ClanError:
|
||||
log.error(
|
||||
f"Found a repository, but it is not a valid flake: {opts.src_flake}"
|
||||
)
|
||||
log.warning("Setting src_flake to None")
|
||||
opts.src_flake = None
|
||||
|
||||
template = get_template(
|
||||
TemplateName(opts.template_name),
|
||||
"clan",
|
||||
@@ -54,21 +65,15 @@ def create_clan(opts: CreateOptions) -> CreateClanResponse:
|
||||
clan_dir=opts.src_flake,
|
||||
)
|
||||
log.info(f"Found template '{template.name}' in '{template.input_variant}'")
|
||||
src = Path(template.src["path"])
|
||||
|
||||
if dest.exists():
|
||||
dest /= src.name
|
||||
dest /= template.name
|
||||
|
||||
if dest.exists():
|
||||
msg = f"Destination directory {dest} already exists"
|
||||
raise ClanError(msg)
|
||||
|
||||
if not src.exists():
|
||||
msg = f"Template {template} does not exist"
|
||||
raise ClanError(msg)
|
||||
if not src.is_dir():
|
||||
msg = f"Template {template} is not a directory"
|
||||
raise ClanError(msg)
|
||||
src = Path(template.src["path"])
|
||||
|
||||
copy_from_nixstore(src, dest)
|
||||
|
||||
@@ -96,10 +101,9 @@ def create_clan(opts: CreateOptions) -> CreateClanResponse:
|
||||
git_command(dest, "config", "user.email", "clan@example.com")
|
||||
)
|
||||
|
||||
flake_update = run(
|
||||
nix_shell(["nixpkgs#nix"], ["nix", "flake", "update"]), RunOpts(cwd=dest)
|
||||
)
|
||||
response.flake_update = flake_update
|
||||
if opts.update_clan:
|
||||
flake_update = run(nix_command(["flake", "update"]), RunOpts(cwd=dest))
|
||||
response.flake_update = flake_update
|
||||
|
||||
if opts.initial:
|
||||
init_inventory(str(opts.dest), init=opts.initial)
|
||||
@@ -147,6 +151,13 @@ def register_create_parser(parser: argparse.ArgumentParser) -> None:
|
||||
default=Path(),
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--no-update",
|
||||
help="Do not update the clan flake",
|
||||
action="store_true",
|
||||
default=False,
|
||||
)
|
||||
|
||||
def create_flake_command(args: argparse.Namespace) -> None:
|
||||
if len(args.input) == 0:
|
||||
args.input = ["clan", "clan-core"]
|
||||
@@ -163,6 +174,7 @@ def register_create_parser(parser: argparse.ArgumentParser) -> None:
|
||||
template_name=args.template,
|
||||
setup_git=not args.no_git,
|
||||
src_flake=args.flake,
|
||||
update_clan=not args.no_update,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@@ -3,10 +3,10 @@ from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from clan_cli.clan_uri import FlakeId
|
||||
from clan_cli.cmd import run
|
||||
from clan_cli.dirs import machine_gcroot
|
||||
from clan_cli.errors import ClanError
|
||||
from clan_cli.flake import Flake
|
||||
from clan_cli.machines.list import list_nixos_machines
|
||||
from clan_cli.machines.machines import Machine
|
||||
from clan_cli.nix import (
|
||||
@@ -21,7 +21,7 @@ from clan_cli.vms.inspect import VmConfig, inspect_vm
|
||||
|
||||
@dataclass
|
||||
class FlakeConfig:
|
||||
flake_url: FlakeId
|
||||
flake_url: Flake
|
||||
flake_attr: str
|
||||
|
||||
clan_name: str
|
||||
@@ -35,7 +35,7 @@ class FlakeConfig:
|
||||
@classmethod
|
||||
def from_json(cls: type["FlakeConfig"], data: dict[str, Any]) -> "FlakeConfig":
|
||||
return cls(
|
||||
flake_url=FlakeId.from_json(data["flake_url"]),
|
||||
flake_url=Flake.from_json(data["flake_url"]),
|
||||
flake_attr=data["flake_attr"],
|
||||
clan_name=data["clan_name"],
|
||||
nar_hash=data["nar_hash"],
|
||||
@@ -62,7 +62,7 @@ def inspect_flake(flake_url: str | Path, machine_name: str) -> FlakeConfig:
|
||||
msg = f"Machine {machine_name} not found in {flake_url}. Available machines: {', '.join(machines)}"
|
||||
raise ClanError(msg)
|
||||
|
||||
machine = Machine(machine_name, FlakeId(str(flake_url)))
|
||||
machine = Machine(machine_name, Flake(str(flake_url)))
|
||||
vm = inspect_vm(machine)
|
||||
|
||||
# Make symlink to gcroots from vm.machine_icon
|
||||
@@ -105,7 +105,7 @@ def inspect_flake(flake_url: str | Path, machine_name: str) -> FlakeConfig:
|
||||
meta = nix_metadata(flake_url)
|
||||
return FlakeConfig(
|
||||
vm=vm,
|
||||
flake_url=FlakeId(str(flake_url)),
|
||||
flake_url=Flake(str(flake_url)),
|
||||
clan_name=clan_name,
|
||||
flake_attr=machine_name,
|
||||
nar_hash=meta["locked"]["narHash"],
|
||||
@@ -119,13 +119,13 @@ def inspect_flake(flake_url: str | Path, machine_name: str) -> FlakeConfig:
|
||||
@dataclass
|
||||
class InspectOptions:
|
||||
machine: str
|
||||
flake: FlakeId
|
||||
flake: Flake
|
||||
|
||||
|
||||
def inspect_command(args: argparse.Namespace) -> None:
|
||||
inspect_options = InspectOptions(
|
||||
machine=args.machine,
|
||||
flake=args.flake or FlakeId(str(Path.cwd())),
|
||||
flake=args.flake or Flake(str(Path.cwd())),
|
||||
)
|
||||
res = inspect_flake(
|
||||
flake_url=str(inspect_options.flake), machine_name=inspect_options.machine
|
||||
|
||||
@@ -3,68 +3,14 @@ import urllib.parse
|
||||
import urllib.request
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
|
||||
@dataclass
|
||||
class FlakeId:
|
||||
loc: str
|
||||
|
||||
@classmethod
|
||||
def from_json(cls: type["FlakeId"], data: dict[str, Any]) -> "FlakeId":
|
||||
return cls(loc=data["loc"])
|
||||
|
||||
def __str__(self) -> str:
|
||||
return str(self.loc)
|
||||
|
||||
def __hash__(self) -> int:
|
||||
return hash(str(self.loc))
|
||||
|
||||
@property
|
||||
def path(self) -> Path:
|
||||
assert self.is_local(), f"Flake {self.loc} is not a local path"
|
||||
return Path(self.loc)
|
||||
|
||||
@property
|
||||
def url(self) -> str:
|
||||
assert self.is_remote(), f"Flake {self.loc} is not a remote url"
|
||||
return str(self.loc)
|
||||
|
||||
def is_local(self) -> bool:
|
||||
"""
|
||||
https://nix.dev/manual/nix/2.22/language/builtins.html?highlight=urlS#source-types
|
||||
|
||||
Examples:
|
||||
|
||||
- file:///home/eelco/nix/README.md file LOCAL
|
||||
- git+file://git:github.com:NixOS/nixpkgs git+file LOCAL
|
||||
- https://example.com/index.html https REMOTE
|
||||
- github:nixos/nixpkgs github REMOTE
|
||||
- ftp://serv.file ftp REMOTE
|
||||
- ./. '' LOCAL
|
||||
|
||||
"""
|
||||
x = urllib.parse.urlparse(str(self.loc))
|
||||
# See above *file* or empty are the only local schemas
|
||||
return x.scheme == "" or "file" in x.scheme
|
||||
|
||||
def is_remote(self) -> bool:
|
||||
return not self.is_local()
|
||||
|
||||
|
||||
def _parse_url(comps: urllib.parse.ParseResult) -> FlakeId:
|
||||
if comps.scheme == "" or "file" in comps.scheme:
|
||||
res_p = Path(comps.path).expanduser().resolve()
|
||||
flake_id = FlakeId(str(res_p))
|
||||
else:
|
||||
flake_id = FlakeId(comps.geturl())
|
||||
return flake_id
|
||||
from clan_cli.flake import Flake
|
||||
|
||||
|
||||
# Define the ClanURI class
|
||||
@dataclass
|
||||
class ClanURI:
|
||||
flake: FlakeId
|
||||
flake: Flake
|
||||
machine_name: str
|
||||
|
||||
def get_url(self) -> str:
|
||||
@@ -104,7 +50,10 @@ class ClanURI:
|
||||
clean_comps = components._replace(query=components.query, fragment="")
|
||||
|
||||
# Parse the URL into a ClanUrl object
|
||||
flake = _parse_url(clean_comps)
|
||||
if clean_comps.path and Path(clean_comps.path).exists():
|
||||
flake = Flake(clean_comps.path)
|
||||
else:
|
||||
flake = Flake(clean_comps.geturl())
|
||||
machine_name = "defaultVM"
|
||||
if components.fragment:
|
||||
machine_name = components.fragment
|
||||
|
||||
@@ -70,7 +70,7 @@ def complete_machines(
|
||||
if thread.is_alive():
|
||||
return iter([])
|
||||
|
||||
machines_dict = {name: "machine" for name in machines}
|
||||
machines_dict = dict.fromkeys(machines, "machine")
|
||||
return machines_dict
|
||||
|
||||
|
||||
@@ -113,7 +113,7 @@ def complete_services_for_machine(
|
||||
if thread.is_alive():
|
||||
return iter([])
|
||||
|
||||
services_dict = {name: "service" for name in services}
|
||||
services_dict = dict.fromkeys(services, "service")
|
||||
return services_dict
|
||||
|
||||
|
||||
@@ -155,7 +155,7 @@ def complete_backup_providers_for_machine(
|
||||
if thread.is_alive():
|
||||
return iter([])
|
||||
|
||||
providers_dict = {name: "provider" for name in providers}
|
||||
providers_dict = dict.fromkeys(providers, "provider")
|
||||
return providers_dict
|
||||
|
||||
|
||||
@@ -197,7 +197,7 @@ def complete_state_services_for_machine(
|
||||
if thread.is_alive():
|
||||
return iter([])
|
||||
|
||||
providers_dict = {name: "service" for name in providers}
|
||||
providers_dict = dict.fromkeys(providers, "service")
|
||||
return providers_dict
|
||||
|
||||
|
||||
@@ -207,19 +207,14 @@ def complete_secrets(
|
||||
"""
|
||||
Provides completion functionality for clan secrets
|
||||
"""
|
||||
from .clan_uri import FlakeId
|
||||
from .secrets.secrets import ListSecretsOptions, list_secrets
|
||||
from .clan_uri import Flake
|
||||
from .secrets.secrets import list_secrets
|
||||
|
||||
flake = clan_dir_result if (clan_dir_result := clan_dir(None)) is not None else "."
|
||||
|
||||
options = ListSecretsOptions(
|
||||
flake=FlakeId(flake),
|
||||
pattern=None,
|
||||
)
|
||||
secrets = list_secrets(Flake(flake).path)
|
||||
|
||||
secrets = list_secrets(options.flake.path, options.pattern)
|
||||
|
||||
secrets_dict = {name: "secret" for name in secrets}
|
||||
secrets_dict = dict.fromkeys(secrets, "secret")
|
||||
return secrets_dict
|
||||
|
||||
|
||||
@@ -237,7 +232,7 @@ def complete_users(
|
||||
|
||||
users = list_users(Path(flake))
|
||||
|
||||
users_dict = {name: "user" for name in users}
|
||||
users_dict = dict.fromkeys(users, "user")
|
||||
return users_dict
|
||||
|
||||
|
||||
@@ -256,7 +251,7 @@ def complete_groups(
|
||||
groups_list = list_groups(Path(flake))
|
||||
groups = [group.name for group in groups_list]
|
||||
|
||||
groups_dict = {name: "group" for name in groups}
|
||||
groups_dict = dict.fromkeys(groups, "group")
|
||||
return groups_dict
|
||||
|
||||
|
||||
@@ -296,7 +291,7 @@ def complete_target_host(
|
||||
if thread.is_alive():
|
||||
return iter([])
|
||||
|
||||
providers_dict = {name: "target_host" for name in target_hosts}
|
||||
providers_dict = dict.fromkeys(target_hosts, "target_host")
|
||||
return providers_dict
|
||||
|
||||
|
||||
@@ -401,7 +396,7 @@ def complete_tags(
|
||||
if any(thread.is_alive() for thread in threads):
|
||||
return iter([])
|
||||
|
||||
providers_dict = {name: "tag" for name in tags}
|
||||
providers_dict = dict.fromkeys(tags, "tag")
|
||||
return providers_dict
|
||||
|
||||
|
||||
|
||||
@@ -5,8 +5,6 @@ import urllib
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
|
||||
from clan_cli.clan_uri import FlakeId
|
||||
|
||||
from .errors import ClanError
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
@@ -67,9 +65,12 @@ def clan_templates(template_type: TemplateType) -> Path:
|
||||
def user_config_dir() -> Path:
|
||||
if sys.platform == "win32":
|
||||
return Path(os.getenv("APPDATA", Path("~\\AppData\\Roaming\\").expanduser()))
|
||||
xdg_config = os.getenv("XDG_CONFIG_HOME")
|
||||
if xdg_config:
|
||||
return Path(xdg_config)
|
||||
if sys.platform == "darwin":
|
||||
return Path("~/Library/Application Support/").expanduser()
|
||||
return Path(os.getenv("XDG_CONFIG_HOME", Path("~/.config").expanduser()))
|
||||
return Path("~/.config").expanduser()
|
||||
|
||||
|
||||
def user_data_dir() -> Path:
|
||||
@@ -77,9 +78,12 @@ def user_data_dir() -> Path:
|
||||
return Path(
|
||||
Path(os.getenv("LOCALAPPDATA", Path("~\\AppData\\Local\\").expanduser()))
|
||||
)
|
||||
xdg_data = os.getenv("XDG_DATA_HOME")
|
||||
if xdg_data:
|
||||
return Path(xdg_data)
|
||||
if sys.platform == "darwin":
|
||||
return Path("~/Library/Application Support/").expanduser()
|
||||
return Path(os.getenv("XDG_DATA_HOME", Path("~/.local/share").expanduser()))
|
||||
return Path("~/.local/share").expanduser()
|
||||
|
||||
|
||||
def user_cache_dir() -> Path:
|
||||
@@ -87,9 +91,12 @@ def user_cache_dir() -> Path:
|
||||
return Path(
|
||||
Path(os.getenv("LOCALAPPDATA", Path("~\\AppData\\Local\\").expanduser()))
|
||||
)
|
||||
xdg_cache = os.getenv("XDG_CACHE_HOME")
|
||||
if xdg_cache:
|
||||
return Path(xdg_cache)
|
||||
if sys.platform == "darwin":
|
||||
return Path("~/Library/Caches/").expanduser()
|
||||
return Path(os.getenv("XDG_CACHE_HOME", Path("~/.cache").expanduser()))
|
||||
return Path("~/.cache").expanduser()
|
||||
|
||||
|
||||
def user_gcroot_dir() -> Path:
|
||||
@@ -110,7 +117,7 @@ def user_history_file() -> Path:
|
||||
return user_config_dir() / "clan" / "history"
|
||||
|
||||
|
||||
def vm_state_dir(flake_url: FlakeId, vm_name: str) -> Path:
|
||||
def vm_state_dir(flake_url: str, vm_name: str) -> Path:
|
||||
clan_key = clan_key_safe(str(flake_url))
|
||||
return user_data_dir() / "clan" / "vmstate" / clan_key / vm_name
|
||||
|
||||
|
||||
@@ -103,7 +103,9 @@ def generate_service_facts(
|
||||
service, machine.facts_data[service]["generator"]["prompt"]
|
||||
)
|
||||
env["prompt_value"] = prompt_value
|
||||
if sys.platform == "linux":
|
||||
from clan_cli import bwrap
|
||||
|
||||
if sys.platform == "linux" and bwrap.bubblewrap_works():
|
||||
cmd = bubblewrap_cmd(generator, facts_dir, secrets_dir)
|
||||
else:
|
||||
cmd = ["bash", "-c", generator]
|
||||
@@ -200,8 +202,8 @@ def generate_facts(
|
||||
prompt: Callable[[str, str], str] = prompt_func,
|
||||
) -> bool:
|
||||
was_regenerated = False
|
||||
with TemporaryDirectory(prefix="facts-generate-") as tmp:
|
||||
tmpdir = Path(tmp)
|
||||
with TemporaryDirectory(prefix="facts-generate-") as _tmpdir:
|
||||
tmpdir = Path(_tmpdir).resolve()
|
||||
|
||||
for machine in machines:
|
||||
errors = 0
|
||||
|
||||
@@ -14,14 +14,6 @@ log = logging.getLogger(__name__)
|
||||
def get_all_facts(machine: Machine) -> dict:
|
||||
public_facts_store = get_public_facts_store(machine)
|
||||
|
||||
# for service in machine.secrets_data:
|
||||
# facts[service] = {}
|
||||
# for fact in machine.secrets_data[service]["facts"]:
|
||||
# fact_content = fact_store.get(service, fact)
|
||||
# if fact_content:
|
||||
# facts[service][fact] = fact_content.decode()
|
||||
# else:
|
||||
# log.error(f"Fact {fact} for service {service} is missing")
|
||||
return public_facts_store.get_all()
|
||||
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ class FactStore(FactStoreBase):
|
||||
self.works_remotely = False
|
||||
|
||||
def set(self, service: str, name: str, value: bytes) -> Path | None:
|
||||
if self.machine.flake.is_local():
|
||||
if self.machine.flake.is_local:
|
||||
fact_path = (
|
||||
self.machine.flake.path
|
||||
/ "machines"
|
||||
|
||||
@@ -14,7 +14,7 @@ class FactStore(FactStoreBase):
|
||||
def __init__(self, machine: Machine) -> None:
|
||||
self.machine = machine
|
||||
self.works_remotely = False
|
||||
self.dir = vm_state_dir(machine.flake, machine.name) / "facts"
|
||||
self.dir = vm_state_dir(machine.flake.identifier, machine.name) / "facts"
|
||||
machine.debug(f"FactStore initialized with dir {self.dir}")
|
||||
|
||||
def exists(self, service: str, name: str) -> bool:
|
||||
|
||||
@@ -10,7 +10,7 @@ from . import SecretStoreBase
|
||||
class SecretStore(SecretStoreBase):
|
||||
def __init__(self, machine: Machine) -> None:
|
||||
self.machine = machine
|
||||
self.dir = vm_state_dir(machine.flake, machine.name) / "secrets"
|
||||
self.dir = vm_state_dir(machine.flake.identifier, machine.name) / "secrets"
|
||||
self.dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
def set(
|
||||
|
||||
@@ -19,8 +19,8 @@ def upload_secrets(machine: Machine) -> None:
|
||||
machine.info("Secrets already uploaded")
|
||||
return
|
||||
|
||||
with TemporaryDirectory(prefix="facts-upload-") as tempdir:
|
||||
local_secret_dir = Path(tempdir)
|
||||
with TemporaryDirectory(prefix="facts-upload-") as _tempdir:
|
||||
local_secret_dir = Path(_tempdir).resolve()
|
||||
secret_facts_store.upload(local_secret_dir)
|
||||
remote_secret_dir = Path(machine.secrets_upload_directory)
|
||||
|
||||
|
||||
@@ -1,13 +1,21 @@
|
||||
import json
|
||||
import logging
|
||||
import pickle
|
||||
import re
|
||||
from dataclasses import dataclass, field
|
||||
from dataclasses import dataclass
|
||||
from hashlib import sha1
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from typing import Any, cast
|
||||
|
||||
from clan_cli.cmd import run
|
||||
from clan_cli.cmd import Log, RunOpts, run
|
||||
from clan_cli.dirs import user_cache_dir
|
||||
from clan_cli.errors import ClanError
|
||||
from clan_cli.nix import nix_build, nix_config
|
||||
from clan_cli.nix import (
|
||||
nix_build,
|
||||
nix_command,
|
||||
nix_config,
|
||||
nix_metadata,
|
||||
)
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@@ -60,43 +68,66 @@ class FlakeCacheEntry:
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
value: str | float | dict[str, Any] | list[Any],
|
||||
value: str | float | dict[str, Any] | list[Any] | None,
|
||||
selectors: list[Selector],
|
||||
is_out_path: bool = False,
|
||||
) -> None:
|
||||
self.value: str | float | int | dict[str | int, FlakeCacheEntry]
|
||||
self.selector: Selector
|
||||
self.value: str | float | int | None | dict[str | int, FlakeCacheEntry]
|
||||
self.selector: set[int] | set[str] | AllSelector
|
||||
selector: Selector = AllSelector()
|
||||
|
||||
if selectors == []:
|
||||
self.selector = AllSelector()
|
||||
elif isinstance(selectors[0], set):
|
||||
self.selector = selectors[0]
|
||||
selector = selectors[0]
|
||||
elif isinstance(selectors[0], int):
|
||||
self.selector = {int(selectors[0])}
|
||||
selector = int(selectors[0])
|
||||
elif isinstance(selectors[0], str):
|
||||
self.selector = selectors[0]
|
||||
self.value = {self.selector: FlakeCacheEntry(value, selectors[1:])}
|
||||
return
|
||||
else:
|
||||
self.selector = selectors[0]
|
||||
self.selector = {(selectors[0])}
|
||||
selector = selectors[0]
|
||||
elif isinstance(selectors[0], AllSelector):
|
||||
self.selector = AllSelector()
|
||||
|
||||
if isinstance(value, dict):
|
||||
if is_out_path:
|
||||
if selectors != []:
|
||||
msg = "Cannot index outPath"
|
||||
raise ValueError(msg)
|
||||
if not isinstance(value, str):
|
||||
msg = "outPath must be a string"
|
||||
raise ValueError(msg)
|
||||
self.value = value
|
||||
|
||||
elif isinstance(selector, str):
|
||||
self.value = {selector: FlakeCacheEntry(value, selectors[1:])}
|
||||
|
||||
elif isinstance(value, dict):
|
||||
if isinstance(self.selector, set):
|
||||
if not all(isinstance(v, str) for v in self.selector):
|
||||
msg = "Cannot index dict with non-str set"
|
||||
raise ValueError(msg)
|
||||
self.value = {}
|
||||
for key, value_ in value.items():
|
||||
self.value[key] = FlakeCacheEntry(value_, selectors[1:])
|
||||
if key == "outPath":
|
||||
self.value[key] = FlakeCacheEntry(
|
||||
value_, selectors[1:], is_out_path=True
|
||||
)
|
||||
else:
|
||||
self.value[key] = FlakeCacheEntry(value_, selectors[1:])
|
||||
|
||||
elif isinstance(value, list):
|
||||
if isinstance(self.selector, int):
|
||||
if isinstance(selector, int):
|
||||
if len(value) != 1:
|
||||
msg = "Cannot index list with int selector when value is not singleton"
|
||||
raise ValueError(msg)
|
||||
self.value = {}
|
||||
self.value[int(self.selector)] = FlakeCacheEntry(
|
||||
value[0], selectors[1:]
|
||||
)
|
||||
if isinstance(self.selector, set):
|
||||
if all(isinstance(v, int) for v in self.selector):
|
||||
self.value = {
|
||||
int(selector): FlakeCacheEntry(value[0], selectors[1:]),
|
||||
}
|
||||
if isinstance(selector, set):
|
||||
if all(isinstance(v, int) for v in selector):
|
||||
self.value = {}
|
||||
for i, v in enumerate(self.selector):
|
||||
for i, v in enumerate([selector]):
|
||||
assert isinstance(v, int)
|
||||
self.value[int(v)] = FlakeCacheEntry(value[i], selectors[1:])
|
||||
else:
|
||||
@@ -108,10 +139,17 @@ class FlakeCacheEntry:
|
||||
if isinstance(v, dict | list | str | float | int):
|
||||
self.value[i] = FlakeCacheEntry(v, selectors[1:])
|
||||
else:
|
||||
msg = f"expected integer selector or all for type list, but got {type(selectors[0])}"
|
||||
msg = f"expected integer selector or all for type list, but got {type(selector)}"
|
||||
raise TypeError(msg)
|
||||
|
||||
elif isinstance(value, (str | float | int)):
|
||||
elif isinstance(value, str) and value.startswith("/nix/store/"):
|
||||
self.value = {}
|
||||
self.selector = self.selector = {"outPath"}
|
||||
self.value["outPath"] = FlakeCacheEntry(
|
||||
value, selectors[1:], is_out_path=True
|
||||
)
|
||||
|
||||
elif isinstance(value, (str | float | int | None)):
|
||||
self.value = value
|
||||
|
||||
def insert(
|
||||
@@ -136,7 +174,34 @@ class FlakeCacheEntry:
|
||||
if isinstance(selector, AllSelector):
|
||||
self.selector = AllSelector()
|
||||
elif isinstance(self.selector, set) and isinstance(selector, set):
|
||||
self.selector.union(selector)
|
||||
if all(isinstance(v, str) for v in self.selector) and all(
|
||||
isinstance(v, str) for v in selector
|
||||
):
|
||||
selector = cast(set[str], selector)
|
||||
self.selector = cast(set[str], self.selector)
|
||||
self.selector = self.selector.union(selector)
|
||||
elif all(isinstance(v, int) for v in self.selector) and all(
|
||||
isinstance(v, int) for v in selector
|
||||
):
|
||||
selector = cast(set[int], selector)
|
||||
self.selector = cast(set[int], self.selector)
|
||||
self.selector = self.selector.union(selector)
|
||||
else:
|
||||
msg = "Cannot union set of different types"
|
||||
raise ValueError(msg)
|
||||
elif isinstance(self.selector, set) and isinstance(selector, int):
|
||||
if all(isinstance(v, int) for v in self.selector):
|
||||
self.selector = cast(set[int], self.selector)
|
||||
self.selector.add(selector)
|
||||
|
||||
elif isinstance(self.selector, set) and isinstance(selector, str):
|
||||
if all(isinstance(v, str) for v in self.selector):
|
||||
self.selector = cast(set[str], self.selector)
|
||||
self.selector.add(selector)
|
||||
|
||||
else:
|
||||
msg = f"Cannot insert {selector} into {self.selector}"
|
||||
raise TypeError(msg)
|
||||
|
||||
if isinstance(self.value, dict) and isinstance(value, dict):
|
||||
for key, value_ in value.items():
|
||||
@@ -167,6 +232,11 @@ class FlakeCacheEntry:
|
||||
self.value[selector].insert(value[0], selectors[1:])
|
||||
else:
|
||||
self.value[selector] = FlakeCacheEntry(value[0], selectors[1:])
|
||||
elif isinstance(value, str) and value.startswith("/nix/store/"):
|
||||
self.value = {}
|
||||
self.value["outPath"] = FlakeCacheEntry(
|
||||
value, selectors[1:], is_out_path=True
|
||||
)
|
||||
|
||||
elif isinstance(value, (str | float | int)):
|
||||
if self.value:
|
||||
@@ -184,13 +254,16 @@ class FlakeCacheEntry:
|
||||
else:
|
||||
selector = selectors[0]
|
||||
|
||||
if isinstance(self.value, str | float | int):
|
||||
if isinstance(self.value, str) and self.value.startswith("/nix/store/"):
|
||||
return Path(self.value).exists()
|
||||
if isinstance(self.value, str | float | int | None):
|
||||
return selectors == []
|
||||
if isinstance(selector, AllSelector):
|
||||
if isinstance(self.selector, AllSelector):
|
||||
return all(
|
||||
result = all(
|
||||
self.value[sel].is_cached(selectors[1:]) for sel in self.value
|
||||
)
|
||||
return result
|
||||
# TODO: check if we already have all the keys anyway?
|
||||
return False
|
||||
if (
|
||||
@@ -200,11 +273,19 @@ class FlakeCacheEntry:
|
||||
):
|
||||
if not selector.issubset(self.selector):
|
||||
return False
|
||||
return all(self.value[sel].is_cached(selectors[1:]) for sel in selector)
|
||||
|
||||
result = all(
|
||||
self.value[sel].is_cached(selectors[1:]) if sel in self.value else True
|
||||
for sel in selector
|
||||
)
|
||||
|
||||
return result
|
||||
if isinstance(selector, str | int) and isinstance(self.value, dict):
|
||||
if selector in self.value:
|
||||
return self.value[selector].is_cached(selectors[1:])
|
||||
result = self.value[selector].is_cached(selectors[1:])
|
||||
return result
|
||||
return False
|
||||
|
||||
return False
|
||||
|
||||
def select(self, selectors: list[Selector]) -> Any:
|
||||
@@ -214,7 +295,10 @@ class FlakeCacheEntry:
|
||||
else:
|
||||
selector = selectors[0]
|
||||
|
||||
if isinstance(self.value, str | float | int):
|
||||
if selectors == [] and isinstance(self.value, dict) and "outPath" in self.value:
|
||||
return self.value["outPath"].value
|
||||
|
||||
if isinstance(self.value, str | float | int | None):
|
||||
return self.value
|
||||
if isinstance(self.value, dict):
|
||||
if isinstance(selector, AllSelector):
|
||||
@@ -242,6 +326,7 @@ class FlakeCacheEntry:
|
||||
return f"FlakeCache {self.value}"
|
||||
|
||||
|
||||
@dataclass
|
||||
class FlakeCache:
|
||||
"""
|
||||
an in-memory cache for flake outputs, uses a recursive FLakeCacheEntry structure
|
||||
@@ -266,6 +351,17 @@ class FlakeCache:
|
||||
selectors = split_selector(selector_str)
|
||||
return self.cache.is_cached(selectors)
|
||||
|
||||
def save_to_file(self, path: Path) -> None:
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
with path.open("wb") as f:
|
||||
pickle.dump(self.cache, f)
|
||||
|
||||
def load_from_file(self, path: Path) -> None:
|
||||
if path.exists():
|
||||
with path.open("rb") as f:
|
||||
log.debug(f"Loading cache from {path}")
|
||||
self.cache = pickle.load(f)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Flake:
|
||||
@@ -275,51 +371,138 @@ class Flake:
|
||||
"""
|
||||
|
||||
identifier: str
|
||||
cache: FlakeCache = field(default_factory=FlakeCache)
|
||||
inputs_from: str | None = None
|
||||
hash: str | None = None
|
||||
flake_cache_path: Path | None = None
|
||||
store_path: str | None = None
|
||||
cache: FlakeCache | None = None
|
||||
_cache: FlakeCache | None = None
|
||||
_path: Path | None = None
|
||||
_is_local: bool | None = None
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
flake_prefetch = run(["nix", "flake", "prefetch", "--json", self.identifier])
|
||||
@classmethod
|
||||
def from_json(cls: type["Flake"], data: dict[str, Any]) -> "Flake":
|
||||
return cls(data["identifier"])
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self.identifier
|
||||
|
||||
def __hash__(self) -> int:
|
||||
return hash(self.identifier)
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
if not isinstance(other, Flake):
|
||||
return NotImplemented
|
||||
return self.identifier == other.identifier
|
||||
|
||||
@property
|
||||
def is_local(self) -> bool:
|
||||
if self._is_local is None:
|
||||
self.prefetch()
|
||||
assert isinstance(self._is_local, bool)
|
||||
return self._is_local
|
||||
|
||||
@property
|
||||
def path(self) -> Path:
|
||||
if self._path is None:
|
||||
self.prefetch()
|
||||
assert isinstance(self._path, Path)
|
||||
return self._path
|
||||
|
||||
def prefetch(self) -> None:
|
||||
"""
|
||||
Run prefetch to flush the cache as well as initializing it.
|
||||
"""
|
||||
cmd = [
|
||||
"flake",
|
||||
"prefetch",
|
||||
"--json",
|
||||
"--option",
|
||||
"flake-registry",
|
||||
"",
|
||||
self.identifier,
|
||||
]
|
||||
|
||||
if self.inputs_from:
|
||||
cmd += ["--inputs-from", self.inputs_from]
|
||||
|
||||
flake_prefetch = run(nix_command(cmd))
|
||||
flake_metadata = json.loads(flake_prefetch.stdout)
|
||||
self.store_path = flake_metadata["storePath"]
|
||||
self.hash = flake_metadata["hash"]
|
||||
self.cache = FlakeCache()
|
||||
|
||||
def prepare_cache(self, selectors: list[str]) -> None:
|
||||
self._cache = FlakeCache()
|
||||
assert self.hash is not None
|
||||
hashed_hash = sha1(self.hash.encode()).hexdigest()
|
||||
self.flake_cache_path = Path(user_cache_dir()) / "clan" / "flakes" / hashed_hash
|
||||
if self.flake_cache_path.exists():
|
||||
self._cache.load_from_file(self.flake_cache_path)
|
||||
|
||||
if "original" not in flake_metadata:
|
||||
flake_metadata = nix_metadata(self.identifier)
|
||||
|
||||
if flake_metadata["original"].get("url", "").startswith("file:"):
|
||||
self._is_local = True
|
||||
path = flake_metadata["original"]["url"].removeprefix("file://")
|
||||
path = path.removeprefix("file:")
|
||||
self._path = Path(path)
|
||||
elif flake_metadata["original"].get("path"):
|
||||
self._is_local = True
|
||||
self._path = Path(flake_metadata["original"]["path"])
|
||||
else:
|
||||
self._is_local = False
|
||||
assert self.store_path is not None
|
||||
self._path = Path(self.store_path)
|
||||
|
||||
def get_from_nix(
|
||||
self,
|
||||
selectors: list[str],
|
||||
nix_options: list[str] | None = None,
|
||||
) -> None:
|
||||
if self._cache is None:
|
||||
self.prefetch()
|
||||
assert self._cache is not None
|
||||
|
||||
if nix_options is None:
|
||||
nix_options = []
|
||||
|
||||
config = nix_config()
|
||||
nix_code = f"""
|
||||
let
|
||||
flake = builtins.getFlake("path:{self.store_path}?narHash={self.hash}");
|
||||
in
|
||||
flake.inputs.nixpkgs.legacyPackages.{config["system"]}.writeText "clan-flake-select" (builtins.toJSON [ ({" ".join([f'flake.clanInternals.lib.select "{attr}" flake' for attr in selectors])}) ])
|
||||
flake.inputs.nixpkgs.legacyPackages.{config["system"]}.writeText "clan-flake-select" (
|
||||
builtins.toJSON [ ({" ".join([f"flake.clanInternals.lib.select ''{attr}'' flake" for attr in selectors])}) ]
|
||||
)
|
||||
"""
|
||||
cmd = nix_build(["--expr", nix_code, '-vvv'])
|
||||
# breakpoint()
|
||||
import subprocess
|
||||
print("running nix build")
|
||||
# breakpoint()
|
||||
# build_output = run(cmd).stdout.strip()
|
||||
import time
|
||||
t = time.time()
|
||||
# breakpoint()
|
||||
import os
|
||||
print(os.environ["HOME"])
|
||||
build_output = subprocess.run(cmd, capture_output=True, text=True).stdout.strip()
|
||||
print(f"ran nix build 1: {time.time() - t}")
|
||||
t = time.time()
|
||||
build_output = subprocess.run(cmd, capture_output=True, text=True).stdout.strip()
|
||||
print(f"ran nix build 2: {time.time() - t}")
|
||||
outputs = json.loads(Path(build_output).read_text())
|
||||
print("got output")
|
||||
build_output = Path(
|
||||
run(
|
||||
nix_build(["--expr", nix_code, *nix_options]), RunOpts(log=Log.NONE)
|
||||
).stdout.strip()
|
||||
)
|
||||
|
||||
outputs = json.loads(build_output.read_text())
|
||||
if len(outputs) != len(selectors):
|
||||
msg = f"flake_prepare_cache: Expected {len(outputs)} outputs, got {len(outputs)}"
|
||||
raise ClanError(msg)
|
||||
assert self.flake_cache_path is not None
|
||||
self._cache.load_from_file(self.flake_cache_path)
|
||||
for i, selector in enumerate(selectors):
|
||||
self.cache.insert(outputs[i], selector)
|
||||
self._cache.insert(outputs[i], selector)
|
||||
self._cache.save_to_file(self.flake_cache_path)
|
||||
|
||||
def select(self, selector: str) -> Any:
|
||||
if not self.cache.is_cached(selector):
|
||||
log.info(f"Cache miss for {selector}")
|
||||
print("preparing cache")
|
||||
self.prepare_cache([selector])
|
||||
print("cache prepared")
|
||||
return self.cache.select(selector)
|
||||
def select(
|
||||
self,
|
||||
selector: str,
|
||||
nix_options: list[str] | None = None,
|
||||
) -> Any:
|
||||
if self._cache is None:
|
||||
self.prefetch()
|
||||
assert self._cache is not None
|
||||
assert self.flake_cache_path is not None
|
||||
|
||||
if not self._cache.is_cached(selector):
|
||||
log.debug(f"Cache miss for {selector}")
|
||||
self.get_from_nix([selector], nix_options)
|
||||
value = self._cache.select(selector)
|
||||
return value
|
||||
|
||||
@@ -100,8 +100,8 @@ def flash_machine(
|
||||
secret_facts_store: SecretStoreBase = secret_facts_module.SecretStore(
|
||||
machine=machine
|
||||
)
|
||||
with TemporaryDirectory(prefix="disko-install-") as tmpdir_:
|
||||
tmpdir = Path(tmpdir_)
|
||||
with TemporaryDirectory(prefix="disko-install-") as _tmpdir:
|
||||
tmpdir = Path(_tmpdir)
|
||||
upload_dir = machine.secrets_upload_directory
|
||||
|
||||
if upload_dir.startswith("/"):
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user