Compare commits

..

1 Commits

Author SHA1 Message Date
pinpox
61f238210f Remove clanModules 2025-07-17 12:15:22 +02:00
1085 changed files with 19227 additions and 51648 deletions

View File

@@ -1,12 +0,0 @@
## Description of the change
<!-- Brief summary of the change if not already clear from the title -->
## Checklist
- [ ] Updated Documentation
- [ ] Added tests
- [ ] Doesn't affect backwards compatibility - or check the next points
- [ ] Add the breaking change and migration details to docs/release-notes.md
- !!! Review from another person is required *BEFORE* merge !!!
- [ ] Add introduction of major feature to docs/release-notes.md

View File

@@ -1,20 +0,0 @@
name: Build Clan App (Darwin)
on:
schedule:
# Run every 4 hours
- cron: "0 */4 * * *"
workflow_dispatch:
push:
branches:
- main
jobs:
build-clan-app-darwin:
runs-on: nix
steps:
- uses: actions/checkout@v4
- name: Build clan-app for x86_64-darwin
run: |
nix build .#packages.x86_64-darwin.clan-app --system x86_64-darwin --log-format bar-with-logs

View File

@@ -0,0 +1,9 @@
name: checks
on:
pull_request:
jobs:
checks-impure:
runs-on: nix
steps:
- uses: actions/checkout@v4
- run: nix run .#impure-checks

View File

@@ -1,7 +1,6 @@
#!/bin/sh #!/usr/bin/env bash
# Shared script for creating pull requests in Gitea workflows # Shared script for creating pull requests in Gitea workflows
set -eu set -euo pipefail
# Required environment variables: # Required environment variables:
# - CI_BOT_TOKEN: Gitea bot token for authentication # - CI_BOT_TOKEN: Gitea bot token for authentication
@@ -9,22 +8,22 @@ set -eu
# - PR_TITLE: Title of the pull request # - PR_TITLE: Title of the pull request
# - PR_BODY: Body/description of the pull request # - PR_BODY: Body/description of the pull request
if [ -z "${CI_BOT_TOKEN:-}" ]; then if [[ -z "${CI_BOT_TOKEN:-}" ]]; then
echo "Error: CI_BOT_TOKEN is not set" >&2 echo "Error: CI_BOT_TOKEN is not set" >&2
exit 1 exit 1
fi fi
if [ -z "${PR_BRANCH:-}" ]; then if [[ -z "${PR_BRANCH:-}" ]]; then
echo "Error: PR_BRANCH is not set" >&2 echo "Error: PR_BRANCH is not set" >&2
exit 1 exit 1
fi fi
if [ -z "${PR_TITLE:-}" ]; then if [[ -z "${PR_TITLE:-}" ]]; then
echo "Error: PR_TITLE is not set" >&2 echo "Error: PR_TITLE is not set" >&2
exit 1 exit 1
fi fi
if [ -z "${PR_BODY:-}" ]; then if [[ -z "${PR_BODY:-}" ]]; then
echo "Error: PR_BODY is not set" >&2 echo "Error: PR_BODY is not set" >&2
exit 1 exit 1
fi fi
@@ -44,12 +43,9 @@ resp=$(nix run --inputs-from . nixpkgs#curl -- -X POST \
}" \ }" \
"https://git.clan.lol/api/v1/repos/clan/clan-core/pulls") "https://git.clan.lol/api/v1/repos/clan/clan-core/pulls")
if ! pr_number=$(echo "$resp" | jq -r '.number'); then pr_number=$(echo "$resp" | jq -r '.number')
echo "Error parsing response from pull request creation" >&2
exit 1
fi
if [ "$pr_number" = "null" ]; then if [[ "$pr_number" == "null" ]]; then
echo "Error creating pull request:" >&2 echo "Error creating pull request:" >&2
echo "$resp" | jq . >&2 echo "$resp" | jq . >&2
exit 1 exit 1
@@ -68,15 +64,12 @@ while true; do
"delete_branch_after_merge": true "delete_branch_after_merge": true
}' \ }' \
"https://git.clan.lol/api/v1/repos/clan/clan-core/pulls/$pr_number/merge") "https://git.clan.lol/api/v1/repos/clan/clan-core/pulls/$pr_number/merge")
if ! msg=$(echo "$resp" | jq -r '.message'); then msg=$(echo "$resp" | jq -r '.message')
echo "Error parsing merge response" >&2 if [[ "$msg" != "Please try again later" ]]; then
exit 1
fi
if [ "$msg" != "Please try again later" ]; then
break break
fi fi
echo "Retrying in 2 seconds..." echo "Retrying in 2 seconds..."
sleep 2 sleep 2
done done
echo "Pull request #$pr_number merge initiated" echo "Pull request #$pr_number merge initiated"

View File

@@ -8,6 +8,6 @@ jobs:
runs-on: nix runs-on: nix
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- run: nix run --print-build-logs .#deploy-docs - run: nix run .#deploy-docs
env: env:
SSH_HOMEPAGE_KEY: ${{ secrets.SSH_HOMEPAGE_KEY }} SSH_HOMEPAGE_KEY: ${{ secrets.SSH_HOMEPAGE_KEY }}

View File

@@ -0,0 +1,28 @@
name: "Update pinned clan-core for checks"
on:
repository_dispatch:
workflow_dispatch:
schedule:
- cron: "51 2 * * *"
jobs:
update-pinned-clan-core:
runs-on: nix
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Update clan-core for checks
run: nix run .#update-clan-core-for-checks
- name: Create pull request
env:
CI_BOT_TOKEN: ${{ secrets.CI_BOT_TOKEN }}
run: |
export GIT_AUTHOR_NAME=clan-bot GIT_AUTHOR_EMAIL=clan-bot@clan.lol GIT_COMMITTER_NAME=clan-bot GIT_COMMITTER_EMAIL=clan-bot@clan.lol
git commit -am "Update pinned clan-core for checks"
# Use shared PR creation script
export PR_BRANCH="update-clan-core-for-checks"
export PR_TITLE="Update Clan Core for Checks"
export PR_BODY="This PR updates the pinned clan-core flake input that is used for checks."
./.gitea/workflows/create-pr.sh

View File

@@ -19,11 +19,8 @@ jobs:
uses: Mic92/update-flake-inputs-gitea@main uses: Mic92/update-flake-inputs-gitea@main
with: with:
# Exclude private flakes and update-clan-core checks flake # Exclude private flakes and update-clan-core checks flake
exclude-patterns: "checks/impure/flake.nix"
exclude-patterns: "devFlake/private/flake.nix,checks/impure/flake.nix"
auto-merge: true auto-merge: true
git-author-name: "clan-bot"
git-committer-name: "clan-bot"
git-author-email: "clan-bot@clan.lol"
git-committer-email: "clan-bot@clan.lol"
gitea-token: ${{ secrets.CI_BOT_TOKEN }} gitea-token: ${{ secrets.CI_BOT_TOKEN }}
github-token: ${{ secrets.CI_BOT_GITHUB_TOKEN }} github-token: ${{ secrets.CI_BOT_GITHUB_TOKEN }}

View File

@@ -0,0 +1,40 @@
name: "Update private flake inputs"
on:
repository_dispatch:
workflow_dispatch:
schedule:
- cron: "0 3 * * *" # Run daily at 3 AM
jobs:
update-private-flake:
runs-on: nix
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Update private flake inputs
run: |
# Update the private flake lock file
cd devFlake/private
nix flake update
cd ../..
# Update the narHash
bash ./devFlake/update-private-narhash
- name: Create pull request
env:
CI_BOT_TOKEN: ${{ secrets.CI_BOT_TOKEN }}
run: |
export GIT_AUTHOR_NAME=clan-bot GIT_AUTHOR_EMAIL=clan-bot@clan.lol GIT_COMMITTER_NAME=clan-bot GIT_COMMITTER_EMAIL=clan-bot@clan.lol
# Check if there are any changes
if ! git diff --quiet; then
git add devFlake/private/flake.lock devFlake/private.narHash
git commit -m "Update dev flake"
# Use shared PR creation script
export PR_BRANCH="update-dev-flake"
export PR_TITLE="Update dev flake"
export PR_BODY="This PR updates the dev flake inputs and corresponding narHash."
else
echo "No changes detected in dev flake inputs"
fi

View File

@@ -10,7 +10,7 @@ jobs:
if: github.repository_owner == 'clan-lol' if: github.repository_owner == 'clan-lol'
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v4
with: with:
persist-credentials: false persist-credentials: false
- uses: actions/create-github-app-token@v2 - uses: actions/create-github-app-token@v2

3
.gitignore vendored
View File

@@ -39,6 +39,7 @@ select
# Generated files # Generated files
pkgs/clan-app/ui/api/API.json pkgs/clan-app/ui/api/API.json
pkgs/clan-app/ui/api/API.ts pkgs/clan-app/ui/api/API.ts
pkgs/clan-app/ui/api/Inventory.ts
pkgs/clan-app/ui/api/modules_schemas.json pkgs/clan-app/ui/api/modules_schemas.json
pkgs/clan-app/ui/api/schema.json pkgs/clan-app/ui/api/schema.json
pkgs/clan-app/ui/.fonts pkgs/clan-app/ui/.fonts
@@ -52,5 +53,3 @@ pkgs/clan-app/ui/.fonts
*.gif *.gif
*.mp4 *.mp4
*.mkv *.mkv
.jj

View File

@@ -1,20 +1,2 @@
clanServices/.* @pinpox @kenji nixosModules/clanCore/vars/.* @lopter
pkgs/clan-cli/clan_cli/(secrets|vars)/.* @lopter
lib/test/container-test-driver/.* @DavHau @mic92
lib/modules/inventory/.* @hsjobeki
lib/modules/inventoryClass/.* @hsjobeki
pkgs/clan-app/ui/.* @hsjobeki @brianmcgee
pkgs/clan-app/clan_app/.* @qubasa @hsjobeki
pkgs/clan-cli/clan_cli/.* @lassulus @mic92 @kenji
pkgs/clan-cli/clan_cli/(secrets|vars)/.* @DavHau @lassulus
pkgs/clan-cli/clan_lib/log_machines/.* @Qubasa
pkgs/clan-cli/clan_lib/ssh/.* @Qubasa @Mic92 @lassulus
pkgs/clan-cli/clan_lib/tags/.* @hsjobeki
pkgs/clan-cli/clan_lib/persist/.* @hsjobeki
pkgs/clan-cli/clan_lib/flake/.* @lassulus
pkgs/clan-cli/api.py @hsjobeki
pkgs/clan-cli/openapi.py @hsjobeki

4
CONTRIBUTING.md Normal file
View File

@@ -0,0 +1,4 @@
# Contributing to Clan
<!-- Local file: docs/CONTRIBUTING.md -->
Go to the Contributing guide at https://docs.clan.lol/guides/contributing/CONTRIBUTING

View File

@@ -8,7 +8,7 @@ Our mission is simple: to democratize computing by providing tools that empower
## Features of Clan ## Features of Clan
- **Full-Stack System Deployment:** Utilize Clan's toolkit alongside Nix's reliability to build and manage systems effortlessly. - **Full-Stack System Deployment:** Utilize Clans toolkit alongside Nix's reliability to build and manage systems effortlessly.
- **Overlay Networks:** Secure, private communication channels between devices. - **Overlay Networks:** Secure, private communication channels between devices.
- **Virtual Machine Integration:** Seamless operation of VM applications within the main operating system. - **Virtual Machine Integration:** Seamless operation of VM applications within the main operating system.
- **Robust Backup Management:** Long-term, self-hosted data preservation. - **Robust Backup Management:** Long-term, self-hosted data preservation.
@@ -24,13 +24,13 @@ If you're new to Clan and eager to dive in, start with our quickstart guide and
In the Clan ecosystem, security is paramount. Learn how to handle secrets effectively: In the Clan ecosystem, security is paramount. Learn how to handle secrets effectively:
- **Secrets Management**: Securely manage secrets by consulting [Vars](https://docs.clan.lol/concepts/generators/)<!-- [secrets.md](docs/site/concepts/generators.md) -->. - **Secrets Management**: Securely manage secrets by consulting [secrets](https://docs.clan.lol/guides/getting-started/secrets/)<!-- [secrets.md](docs/site/guides/getting-started/secrets.md) -->.
### Contributing to Clan ### Contributing to Clan
The Clan project thrives on community contributions. We welcome everyone to contribute and collaborate: The Clan project thrives on community contributions. We welcome everyone to contribute and collaborate:
- **Contribution Guidelines**: Make a meaningful impact by following the steps in [contributing](https://docs.clan.lol/guides/contributing/CONTRIBUTING/)<!-- [contributing.md](docs/CONTRIBUTING.md) -->. - **Contribution Guidelines**: Make a meaningful impact by following the steps in [contributing](https://docs.clan.lol/contributing/contributing/)<!-- [contributing.md](docs/CONTRIBUTING.md) -->.
## Join the revolution ## Join the revolution

View File

@@ -0,0 +1,51 @@
(
{ ... }:
{
name = "borgbackup";
nodes.machine =
{ self, pkgs, ... }:
{
imports = [
self.clanModules.borgbackup
self.nixosModules.clanCore
{
services.openssh.enable = true;
services.borgbackup.repos.testrepo = {
authorizedKeys = [ (builtins.readFile ../assets/ssh/pubkey) ];
};
}
{
clan.core.settings.directory = ./.;
clan.core.state.testState.folders = [ "/etc/state" ];
environment.etc.state.text = "hello world";
systemd.tmpfiles.settings."vmsecrets" = {
"/etc/secrets/borgbackup/borgbackup.ssh" = {
C.argument = "${../assets/ssh/privkey}";
z = {
mode = "0400";
user = "root";
};
};
"/etc/secrets/borgbackup/borgbackup.repokey" = {
C.argument = builtins.toString (pkgs.writeText "repokey" "repokey12345");
z = {
mode = "0400";
user = "root";
};
};
};
# clan.core.facts.secretStore = "vm";
clan.core.vars.settings.secretStore = "vm";
clan.borgbackup.destinations.test.repo = "borg@localhost:.";
}
];
};
testScript = ''
start_all()
machine.systemctl("start --wait borgbackup-job-test.service")
assert "machine-test" in machine.succeed("BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK=yes /run/current-system/sw/bin/borg-job-test list")
'';
}
)

View File

@@ -0,0 +1,6 @@
{ fetchgit }:
fetchgit {
url = "https://git.clan.lol/clan/clan-core.git";
rev = "eea93ea22c9818da67e148ba586277bab9e73cea";
sha256 = "sha256-PV0Z+97QuxQbkYSVuNIJwUNXMbHZG/vhsA9M4cDTCOE=";
}

View File

@@ -2,7 +2,6 @@
self, self,
lib, lib,
inputs, inputs,
privateInputs ? { },
... ...
}: }:
let let
@@ -12,6 +11,7 @@ let
elem elem
filter filter
filterAttrs filterAttrs
flip
genAttrs genAttrs
hasPrefix hasPrefix
pathExists pathExists
@@ -20,22 +20,21 @@ let
in in
{ {
imports = filter pathExists [ imports = filter pathExists [
./backups/flake-module.nix
../nixosModules/clanCore/machine-id/tests/flake-module.nix
../nixosModules/clanCore/state-version/tests/flake-module.nix
./devshell/flake-module.nix ./devshell/flake-module.nix
./flash/flake-module.nix ./flash/flake-module.nix
./impure/flake-module.nix
./installation/flake-module.nix ./installation/flake-module.nix
./update/flake-module.nix
./morph/flake-module.nix ./morph/flake-module.nix
./nixos-documentation/flake-module.nix ./nixos-documentation/flake-module.nix
./dont-depend-on-repo-root.nix ./dont-depend-on-repo-root.nix
# clan core submodule tests
../nixosModules/clanCore/machine-id/tests/flake-module.nix
../nixosModules/clanCore/postgresql/tests/flake-module.nix
../nixosModules/clanCore/state-version/tests/flake-module.nix
]; ];
flake.check = genAttrs [ "x86_64-linux" "aarch64-darwin" ] ( flake.check = genAttrs [ "x86_64-linux" "aarch64-darwin" ] (
system: system:
let let
checks = filterAttrs ( checks = flip filterAttrs self.checks.${system} (
name: _check: name: _check:
!(hasPrefix "nixos-test-" name) !(hasPrefix "nixos-test-" name)
&& !(hasPrefix "nixos-" name) && !(hasPrefix "nixos-" name)
@@ -47,7 +46,7 @@ in
"clan-core-for-checks" "clan-core-for-checks"
"clan-deps" "clan-deps"
]) ])
) self.checks.${system}; );
in in
inputs.nixpkgs.legacyPackages.${system}.runCommand "fast-flake-checks-${system}" inputs.nixpkgs.legacyPackages.${system}.runCommand "fast-flake-checks-${system}"
{ passthru.checks = checks; } { passthru.checks = checks; }
@@ -86,11 +85,13 @@ in
# Container Tests # Container Tests
nixos-test-container = self.clanLib.test.containerTest ./container nixosTestArgs; nixos-test-container = self.clanLib.test.containerTest ./container nixosTestArgs;
# nixos-test-zt-tcp-relay = self.clanLib.test.containerTest ./zt-tcp-relay nixosTestArgs;
# nixos-test-matrix-synapse = self.clanLib.test.containerTest ./matrix-synapse nixosTestArgs;
# nixos-test-postgresql = self.clanLib.test.containerTest ./postgresql nixosTestArgs;
nixos-test-user-firewall-iptables = self.clanLib.test.containerTest ./user-firewall/iptables.nix nixosTestArgs; nixos-test-user-firewall-iptables = self.clanLib.test.containerTest ./user-firewall/iptables.nix nixosTestArgs;
nixos-test-user-firewall-nftables = self.clanLib.test.containerTest ./user-firewall/nftables.nix nixosTestArgs; nixos-test-user-firewall-nftables = self.clanLib.test.containerTest ./user-firewall/nftables.nix nixosTestArgs;
service-dummy-test = import ./service-dummy-test nixosTestArgs; service-dummy-test = import ./service-dummy-test nixosTestArgs;
wireguard = import ./wireguard nixosTestArgs;
service-dummy-test-from-flake = import ./service-dummy-test-from-flake nixosTestArgs; service-dummy-test-from-flake = import ./service-dummy-test-from-flake nixosTestArgs;
}; };
@@ -100,8 +101,6 @@ in
"dont-depend-on-repo-root" "dont-depend-on-repo-root"
]; ];
# Temporary workaround: Filter out docs package and devshell for aarch64-darwin due to CI builder hangs
# TODO: Remove this filter once macOS CI builder is updated
flakeOutputs = flakeOutputs =
lib.mapAttrs' ( lib.mapAttrs' (
name: config: lib.nameValuePair "nixos-${name}" config.config.system.build.toplevel name: config: lib.nameValuePair "nixos-${name}" config.config.system.build.toplevel
@@ -109,18 +108,8 @@ in
// lib.mapAttrs' ( // lib.mapAttrs' (
name: config: lib.nameValuePair "darwin-${name}" config.config.system.build.toplevel name: config: lib.nameValuePair "darwin-${name}" config.config.system.build.toplevel
) (self.darwinConfigurations or { }) ) (self.darwinConfigurations or { })
// lib.mapAttrs' (n: lib.nameValuePair "package-${n}") ( // lib.mapAttrs' (n: lib.nameValuePair "package-${n}") packagesToBuild
if system == "aarch64-darwin" then // lib.mapAttrs' (n: lib.nameValuePair "devShell-${n}") self'.devShells
lib.filterAttrs (n: _: n != "docs" && n != "deploy-docs" && n != "option-search") packagesToBuild
else
packagesToBuild
)
// lib.mapAttrs' (n: lib.nameValuePair "devShell-${n}") (
if system == "aarch64-darwin" then
lib.filterAttrs (n: _: n != "docs") self'.devShells
else
self'.devShells
)
// lib.mapAttrs' (name: config: lib.nameValuePair "home-manager-${name}" config.activation-script) ( // lib.mapAttrs' (name: config: lib.nameValuePair "home-manager-${name}" config.activation-script) (
self'.legacyPackages.homeConfigurations or { } self'.legacyPackages.homeConfigurations or { }
); );
@@ -128,13 +117,37 @@ in
nixosTests nixosTests
// flakeOutputs // flakeOutputs
// { // {
clan-core-for-checks = pkgs.runCommand "clan-core-for-checks" { } '' # TODO: Automatically provide this check to downstream users to check their modules
cp -r ${privateInputs.clan-core-for-checks} $out clan-modules-json-compatible =
chmod -R +w $out let
cp ${../flake.lock} $out/flake.lock allSchemas = lib.mapAttrs (
_n: m:
let
schema =
(self.clanLib.evalService {
modules = [ m ];
prefix = [
"checks"
system
];
}).config.result.api.schema;
in
schema
) self.clan.modules;
in
pkgs.runCommand "combined-result"
{
schemaFile = builtins.toFile "schemas.json" (builtins.toJSON allSchemas);
}
''
mkdir -p $out
cat $schemaFile > $out/allSchemas.json
'';
# Create marker file to disable private flake loading in tests clan-core-for-checks = pkgs.runCommand "clan-core-for-checks" { } ''
touch $out/.skip-private-inputs cp -r ${pkgs.callPackage ./clan-core-for-checks.nix { }} $out
chmod +w $out/flake.lock
cp ${../flake.lock} $out/flake.lock
''; '';
}; };
packages = lib.optionalAttrs (pkgs.stdenv.isLinux) { packages = lib.optionalAttrs (pkgs.stdenv.isLinux) {

View File

@@ -13,6 +13,8 @@
fileSystems."/".device = lib.mkDefault "/dev/vda"; fileSystems."/".device = lib.mkDefault "/dev/vda";
boot.loader.grub.device = lib.mkDefault "/dev/vda"; boot.loader.grub.device = lib.mkDefault "/dev/vda";
# We need to use `mkForce` because we inherit from `test-install-machine`
# which currently hardcodes `nixpkgs.hostPlatform`
nixpkgs.hostPlatform = lib.mkForce system; nixpkgs.hostPlatform = lib.mkForce system;
imports = [ self.nixosModules.test-flash-machine ]; imports = [ self.nixosModules.test-flash-machine ];
@@ -26,24 +28,10 @@
{ {
imports = [ self.nixosModules.test-install-machine-without-system ]; imports = [ self.nixosModules.test-install-machine-without-system ];
# We don't want our system to define any `vars` generators as these can't
# be generated as the flake is inside `/nix/store`.
clan.core.settings.state-version.enable = false;
clan.core.vars.generators.test = lib.mkForce { }; clan.core.vars.generators.test = lib.mkForce { };
disko.devices.disk.main.preCreateHook = lib.mkForce ""; disko.devices.disk.main.preCreateHook = lib.mkForce "";
# Every option here should match the options set through `clan flash write`
# if you get a mass rebuild on the disko derivation, this means you need to
# adjust something here. Also make sure that the injected json in clan flash write
# is up to date.
i18n.defaultLocale = "de_DE.UTF-8";
console.keyMap = "de";
services.xserver.xkb.layout = "de";
users.users.root.openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIRWUusawhlIorx7VFeQJHmMkhl9X3QpnvOdhnV/bQNG root@target\n"
];
}; };
}; };
perSystem = perSystem =
@@ -56,31 +44,22 @@
dependencies = [ dependencies = [
pkgs.disko pkgs.disko
pkgs.buildPackages.xorg.lndir pkgs.buildPackages.xorg.lndir
pkgs.glibcLocales
pkgs.kbd.out
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".pkgs.perlPackages.ConfigIniFiles self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".pkgs.perlPackages.ConfigIniFiles
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".pkgs.perlPackages.FileSlurp self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".pkgs.perlPackages.FileSlurp
pkgs.bubblewrap
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.toplevel self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.toplevel
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.diskoScript self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.diskoScript
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.diskoScript.drvPath self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.diskoScript.drvPath
] ] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
closureInfo = pkgs.closureInfo { rootPaths = dependencies; }; closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
in in
{ {
# Skip flash test on aarch64-linux for now as it's too slow checks = pkgs.lib.mkIf pkgs.stdenv.isLinux {
checks = lib.optionalAttrs (pkgs.stdenv.isLinux && pkgs.hostPlatform.system != "aarch64-linux") {
nixos-test-flash = self.clanLib.test.baseTest { nixos-test-flash = self.clanLib.test.baseTest {
name = "flash"; name = "flash";
nodes.target = { nodes.target = {
virtualisation.emptyDiskImages = [ 4096 ]; virtualisation.emptyDiskImages = [ 4096 ];
virtualisation.memorySize = 4096; virtualisation.memorySize = 4096;
virtualisation.useNixStoreImage = true;
virtualisation.writableStore = true;
environment.systemPackages = [ self.packages.${pkgs.system}.clan-cli ]; environment.systemPackages = [ self.packages.${pkgs.system}.clan-cli ];
environment.etc."install-closure".source = "${closureInfo}/store-paths"; environment.etc."install-closure".source = "${closureInfo}/store-paths";
@@ -88,7 +67,7 @@
substituters = lib.mkForce [ ]; substituters = lib.mkForce [ ];
hashed-mirrors = null; hashed-mirrors = null;
connect-timeout = lib.mkForce 3; connect-timeout = lib.mkForce 3;
flake-registry = ""; flake-registry = pkgs.writeText "flake-registry" ''{"flakes":[],"version":2}'';
experimental-features = [ experimental-features = [
"nix-command" "nix-command"
"flakes" "flakes"
@@ -97,10 +76,10 @@
}; };
testScript = '' testScript = ''
start_all() start_all()
machine.succeed("echo 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIRWUusawhlIorx7VFeQJHmMkhl9X3QpnvOdhnV/bQNG root@target' > ./test_id_ed25519.pub")
# Some distros like to automount disks with spaces # Some distros like to automount disks with spaces
machine.succeed('mkdir -p "/mnt/with spaces" && mkfs.ext4 /dev/vdc && mount /dev/vdc "/mnt/with spaces"') machine.succeed('mkdir -p "/mnt/with spaces" && mkfs.ext4 /dev/vdb && mount /dev/vdb "/mnt/with spaces"')
machine.succeed("clan flash write --ssh-pubkey ./test_id_ed25519.pub --keymap de --language de_DE.UTF-8 --debug --flake ${self.checks.x86_64-linux.clan-core-for-checks} --yes --disk main /dev/vdc test-flash-machine-${pkgs.hostPlatform.system}") machine.succeed("clan flash write --debug --flake ${self.checks.x86_64-linux.clan-core-for-checks} --yes --disk main /dev/vdb test-flash-machine-${pkgs.hostPlatform.system}")
''; '';
} { inherit pkgs self; }; } { inherit pkgs self; };
}; };

View File

@@ -0,0 +1,51 @@
{
perSystem =
{
pkgs,
lib,
self',
...
}:
{
# a script that executes all other checks
packages.impure-checks = pkgs.writeShellScriptBin "impure-checks" ''
#!${pkgs.bash}/bin/bash
set -euo pipefail
unset CLAN_DIR
export PATH="${
lib.makeBinPath (
[
pkgs.gitMinimal
pkgs.nix
pkgs.coreutils
pkgs.rsync # needed to have rsync installed on the dummy ssh server
]
++ self'.packages.clan-cli-full.runtimeDependencies
)
}"
ROOT=$(git rev-parse --show-toplevel)
cd "$ROOT/pkgs/clan-cli"
# Set up custom git configuration for tests
export GIT_CONFIG_GLOBAL=$(mktemp)
git config --file "$GIT_CONFIG_GLOBAL" user.name "Test User"
git config --file "$GIT_CONFIG_GLOBAL" user.email "test@example.com"
export GIT_CONFIG_SYSTEM=/dev/null
# this disables dynamic dependency loading in clan-cli
export CLAN_NO_DYNAMIC_DEPS=1
jobs=$(nproc)
# Spawning worker in pytest is relatively slow, so we limit the number of jobs to 13
# (current number of impure tests)
jobs="$((jobs > 13 ? 13 : jobs))"
nix develop "$ROOT#clan-cli" -c bash -c "TMPDIR=/tmp python -m pytest -n $jobs -m impure ./clan_cli $@"
# Clean up temporary git config
rm -f "$GIT_CONFIG_GLOBAL"
'';
};
}

View File

@@ -1,8 +1,7 @@
{ {
config,
self, self,
lib, lib,
privateInputs,
... ...
}: }:
{ {
@@ -14,38 +13,31 @@
# you can get a new one by adding # you can get a new one by adding
# client.fail("cat test-flake/machines/test-install-machine/facter.json >&2") # client.fail("cat test-flake/machines/test-install-machine/facter.json >&2")
# to the installation test. # to the installation test.
clan.machines = { clan.machines.test-install-machine-without-system = {
test-install-machine-without-system = { fileSystems."/".device = lib.mkDefault "/dev/vda";
boot.loader.grub.device = lib.mkDefault "/dev/vda";
imports = [ self.nixosModules.test-install-machine-without-system ];
};
clan.machines.test-install-machine-with-system =
{ pkgs, ... }:
{
# https://git.clan.lol/clan/test-fixtures
facter.reportPath = builtins.fetchurl {
url = "https://git.clan.lol/clan/test-fixtures/raw/commit/4a2bc56d886578124b05060d3fb7eddc38c019f8/nixos-vm-facter-json/${pkgs.hostPlatform.system}.json";
sha256 =
{
aarch64-linux = "sha256:1rlfymk03rmfkm2qgrc8l5kj5i20srx79n1y1h4nzlpwaz0j7hh2";
x86_64-linux = "sha256:16myh0ll2gdwsiwkjw5ba4dl23ppwbsanxx214863j7nvzx42pws";
}
.${pkgs.hostPlatform.system};
};
fileSystems."/".device = lib.mkDefault "/dev/vda"; fileSystems."/".device = lib.mkDefault "/dev/vda";
boot.loader.grub.device = lib.mkDefault "/dev/vda"; boot.loader.grub.device = lib.mkDefault "/dev/vda";
imports = [ imports = [ self.nixosModules.test-install-machine-without-system ];
self.nixosModules.test-install-machine-without-system
];
}; };
}
// (lib.listToAttrs (
lib.map (
system:
lib.nameValuePair "test-install-machine-${system}" {
imports = [
self.nixosModules.test-install-machine-without-system
(
if privateInputs ? test-fixtures then
{
facter.reportPath = privateInputs.test-fixtures + /nixos-vm-facter-json/${system}.json;
}
else
{ nixpkgs.hostPlatform = system; }
)
];
fileSystems."/".device = lib.mkDefault "/dev/vda";
boot.loader.grub.device = lib.mkDefault "/dev/vda";
}
) (lib.filter (lib.hasSuffix "linux") config.systems)
));
flake.nixosModules = { flake.nixosModules = {
test-install-machine-without-system = test-install-machine-without-system =
{ lib, modulesPath, ... }: { lib, modulesPath, ... }:
@@ -157,17 +149,17 @@
# vm-test-run-test-installation-> target: To debug, enter the VM and run 'systemctl status backdoor.service'. # vm-test-run-test-installation-> target: To debug, enter the VM and run 'systemctl status backdoor.service'.
checks = checks =
let let
# Custom Python package for port management utilities
closureInfo = pkgs.closureInfo { closureInfo = pkgs.closureInfo {
rootPaths = [ rootPaths = [
privateInputs.clan-core-for-checks self.checks.x86_64-linux.clan-core-for-checks
self.nixosConfigurations."test-install-machine-${pkgs.hostPlatform.system}".config.system.build.toplevel self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.toplevel
self.nixosConfigurations."test-install-machine-${pkgs.hostPlatform.system}".config.system.build.initialRamdisk self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.initialRamdisk
self.nixosConfigurations."test-install-machine-${pkgs.hostPlatform.system}".config.system.build.diskoScript self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.diskoScript
pkgs.stdenv.drvPath pkgs.stdenv.drvPath
pkgs.bash.drvPath pkgs.bash.drvPath
pkgs.buildPackages.xorg.lndir pkgs.buildPackages.xorg.lndir
] ] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
}; };
in in
pkgs.lib.mkIf (pkgs.stdenv.isLinux && !pkgs.stdenv.isAarch64) { pkgs.lib.mkIf (pkgs.stdenv.isLinux && !pkgs.stdenv.isAarch64) {
@@ -215,7 +207,7 @@
# Prepare test flake and Nix store # Prepare test flake and Nix store
flake_dir = prepare_test_flake( flake_dir = prepare_test_flake(
temp_dir, temp_dir,
"${self.checks.${pkgs.hostPlatform.system}.clan-core-for-checks}", "${self.checks.x86_64-linux.clan-core-for-checks}",
"${closureInfo}" "${closureInfo}"
) )
@@ -226,22 +218,6 @@
"${../assets/ssh/privkey}" "${../assets/ssh/privkey}"
) )
# Run clan install from host using port forwarding
clan_cmd = [
"${self.packages.${pkgs.system}.clan-cli-full}/bin/clan",
"machines",
"init-hardware-config",
"--debug",
"--flake", str(flake_dir),
"--yes", "test-install-machine-without-system",
"--host-key-check", "none",
"--target-host", f"nonrootuser@localhost:{ssh_conn.host_port}",
"-i", ssh_conn.ssh_key,
"--option", "store", os.environ['CLAN_TEST_STORE']
]
subprocess.run(clan_cmd, check=True)
# Run clan install from host using port forwarding # Run clan install from host using port forwarding
clan_cmd = [ clan_cmd = [
"${self.packages.${pkgs.system}.clan-cli-full}/bin/clan", "${self.packages.${pkgs.system}.clan-cli-full}/bin/clan",
@@ -249,13 +225,12 @@
"install", "install",
"--phases", "disko,install", "--phases", "disko,install",
"--debug", "--debug",
"--flake", str(flake_dir), "--flake", flake_dir,
"--yes", "test-install-machine-without-system", "--yes", "test-install-machine-without-system",
"--target-host", f"nonrootuser@localhost:{ssh_conn.host_port}", "--target-host", f"nonrootuser@localhost:{ssh_conn.host_port}",
"-i", ssh_conn.ssh_key, "-i", ssh_conn.ssh_key,
"--option", "store", os.environ['CLAN_TEST_STORE'], "--option", "store", os.environ['CLAN_TEST_STORE'],
"--update-hardware-config", "nixos-facter", "--update-hardware-config", "nixos-facter",
"--no-persist-state",
] ]
subprocess.run(clan_cmd, check=True) subprocess.run(clan_cmd, check=True)
@@ -265,7 +240,7 @@
target.shutdown() target.shutdown()
except BrokenPipeError: except BrokenPipeError:
# qemu has already exited # qemu has already exited
target.connected = False pass
# Create a new machine instance that boots from the installed system # Create a new machine instance that boots from the installed system
installed_machine = create_test_machine(target, "${pkgs.qemu_test}", name="after_install") installed_machine = create_test_machine(target, "${pkgs.qemu_test}", name="after_install")
@@ -296,10 +271,10 @@
# Prepare test flake and Nix store # Prepare test flake and Nix store
flake_dir = prepare_test_flake( flake_dir = prepare_test_flake(
temp_dir, temp_dir,
"${self.checks.${pkgs.hostPlatform.system}.clan-core-for-checks}", "${self.checks.x86_64-linux.clan-core-for-checks}",
"${closureInfo}" "${closureInfo}"
) )
# Set up SSH connection # Set up SSH connection
ssh_conn = setup_ssh_connection( ssh_conn = setup_ssh_connection(
target, target,
@@ -314,6 +289,9 @@
assert not os.path.exists(hw_config_file), "hardware-configuration.nix should not exist initially" assert not os.path.exists(hw_config_file), "hardware-configuration.nix should not exist initially"
assert not os.path.exists(facter_file), "facter.json should not exist initially" assert not os.path.exists(facter_file), "facter.json should not exist initially"
# Set CLAN_FLAKE for the commands
os.environ["CLAN_FLAKE"] = flake_dir
# Test facter backend # Test facter backend
clan_cmd = [ clan_cmd = [
"${self.packages.${pkgs.system}.clan-cli-full}/bin/clan", "${self.packages.${pkgs.system}.clan-cli-full}/bin/clan",
@@ -325,8 +303,7 @@
"test-install-machine-without-system", "test-install-machine-without-system",
"-i", ssh_conn.ssh_key, "-i", ssh_conn.ssh_key,
"--option", "store", os.environ['CLAN_TEST_STORE'], "--option", "store", os.environ['CLAN_TEST_STORE'],
"--target-host", f"nonrootuser@localhost:{ssh_conn.host_port}", f"nonrootuser@localhost:{ssh_conn.host_port}"
"--yes"
] ]
result = subprocess.run(clan_cmd, capture_output=True, cwd=flake_dir) result = subprocess.run(clan_cmd, capture_output=True, cwd=flake_dir)
@@ -350,9 +327,7 @@
"test-install-machine-without-system", "test-install-machine-without-system",
"-i", ssh_conn.ssh_key, "-i", ssh_conn.ssh_key,
"--option", "store", os.environ['CLAN_TEST_STORE'], "--option", "store", os.environ['CLAN_TEST_STORE'],
"--target-host", f"nonrootuser@localhost:{ssh_conn.host_port}"
f"nonrootuser@localhost:{ssh_conn.host_port}",
"--yes"
] ]
result = subprocess.run(clan_cmd, capture_output=True, cwd=flake_dir) result = subprocess.run(clan_cmd, capture_output=True, cwd=flake_dir)

View File

@@ -15,6 +15,7 @@ let
networking.useNetworkd = true; networking.useNetworkd = true;
services.openssh.enable = true; services.openssh.enable = true;
services.openssh.settings.UseDns = false; services.openssh.settings.UseDns = false;
services.openssh.settings.PasswordAuthentication = false;
system.nixos.variant_id = "installer"; system.nixos.variant_id = "installer";
environment.systemPackages = [ environment.systemPackages = [
pkgs.nixos-facter pkgs.nixos-facter
@@ -146,11 +147,27 @@ let
]; ];
doCheck = false; doCheck = false;
}; };
# Common closure info
closureInfo = pkgs.closureInfo {
rootPaths = [
self.checks.x86_64-linux.clan-core-for-checks
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.toplevel
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.initialRamdisk
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.diskoScript
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.clan.deployment.file
pkgs.stdenv.drvPath
pkgs.bash.drvPath
pkgs.buildPackages.xorg.lndir
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
};
in in
{ {
inherit inherit
target target
baseTestMachine baseTestMachine
nixosTestLib nixosTestLib
closureInfo
; ;
} }

View File

@@ -0,0 +1,83 @@
(
{ pkgs, ... }:
{
name = "matrix-synapse";
nodes.machine =
{
config,
self,
lib,
...
}:
{
imports = [
self.clanModules.matrix-synapse
self.nixosModules.clanCore
{
clan.core.settings.directory = ./.;
services.nginx.virtualHosts."matrix.clan.test" = {
enableACME = lib.mkForce false;
forceSSL = lib.mkForce false;
};
clan.nginx.acme.email = "admins@clan.lol";
clan.matrix-synapse = {
server_tld = "clan.test";
app_domain = "matrix.clan.test";
};
clan.matrix-synapse.users.admin.admin = true;
clan.matrix-synapse.users.someuser = { };
clan.core.facts.secretStore = "vm";
clan.core.vars.settings.secretStore = "vm";
clan.core.vars.settings.publicStore = "in_repo";
# because we use systemd-tmpfiles to copy the secrets, we need to a separate systemd-tmpfiles call to provision them.
boot.postBootCommands = "${config.systemd.package}/bin/systemd-tmpfiles --create /etc/tmpfiles.d/00-vmsecrets.conf";
systemd.tmpfiles.settings."00-vmsecrets" = {
# run before 00-nixos.conf
"/etc/secrets" = {
d.mode = "0700";
z.mode = "0700";
};
"/etc/secrets/matrix-synapse/synapse-registration_shared_secret" = {
f.argument = "supersecret";
z = {
mode = "0400";
user = "root";
};
};
"/etc/secrets/matrix-password-admin/matrix-password-admin" = {
f.argument = "matrix-password1";
z = {
mode = "0400";
user = "root";
};
};
"/etc/secrets/matrix-password-someuser/matrix-password-someuser" = {
f.argument = "matrix-password2";
z = {
mode = "0400";
user = "root";
};
};
};
}
];
};
testScript = ''
start_all()
machine.wait_for_unit("matrix-synapse")
machine.succeed("${pkgs.netcat}/bin/nc -z -v ::1 8008")
machine.wait_until_succeeds("${pkgs.curl}/bin/curl -Ssf -L http://localhost/_matrix/static/ -H 'Host: matrix.clan.test'")
machine.systemctl("restart matrix-synapse >&2") # check if user creation is idempotent
machine.execute("journalctl -u matrix-synapse --no-pager >&2")
machine.wait_for_unit("matrix-synapse")
machine.succeed("${pkgs.netcat}/bin/nc -z -v ::1 8008")
machine.succeed("${pkgs.curl}/bin/curl -Ssf -L http://localhost/_matrix/static/ -H 'Host: matrix.clan.test'")
'';
}
)

View File

@@ -0,0 +1 @@
registration_shared_secret: supersecret

View File

@@ -35,8 +35,7 @@
pkgs.stdenv.drvPath pkgs.stdenv.drvPath
pkgs.stdenvNoCC pkgs.stdenvNoCC
self.nixosConfigurations.test-morph-machine.config.system.build.toplevel self.nixosConfigurations.test-morph-machine.config.system.build.toplevel
] ] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
closureInfo = pkgs.closureInfo { rootPaths = dependencies; }; closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
in in

View File

@@ -0,0 +1,73 @@
({
name = "postgresql";
nodes.machine =
{ self, config, ... }:
{
imports = [
self.nixosModules.clanCore
self.clanModules.postgresql
self.clanModules.localbackup
];
clan.postgresql.users.test = { };
clan.postgresql.databases.test.create.options.OWNER = "test";
clan.postgresql.databases.test.restore.stopOnRestore = [ "sample-service" ];
clan.localbackup.targets.hdd.directory = "/mnt/external-disk";
clan.core.settings.directory = ./.;
systemd.services.sample-service = {
wantedBy = [ "multi-user.target" ];
script = ''
while true; do
echo "Hello, world!"
sleep 5
done
'';
};
environment.systemPackages = [ config.services.postgresql.package ];
};
testScript =
{ nodes, ... }:
''
start_all()
machine.wait_for_unit("postgresql")
machine.wait_for_unit("sample-service")
# Create a test table
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -c 'CREATE TABLE test (id serial PRIMARY KEY);' test")
machine.succeed("/run/current-system/sw/bin/localbackup-create >&2")
timestamp_before = int(machine.succeed("systemctl show --property=ExecMainStartTimestampMonotonic sample-service | cut -d= -f2").strip())
machine.succeed("test -e /mnt/external-disk/snapshot.0/machine/var/backup/postgres/test/pg-dump || { echo 'pg-dump not found'; exit 1; }")
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -d test -c 'INSERT INTO test DEFAULT VALUES;'")
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -d test -c 'DROP TABLE test;'")
machine.succeed("test -e /var/backup/postgres/test/pg-dump || { echo 'pg-dump not found'; exit 1; }")
machine.succeed("rm -rf /var/backup/postgres")
machine.succeed("NAME=/mnt/external-disk/snapshot.0 FOLDERS=/var/backup/postgres/test /run/current-system/sw/bin/localbackup-restore >&2")
machine.succeed("test -e /var/backup/postgres/test/pg-dump || { echo 'pg-dump not found'; exit 1; }")
machine.succeed("""
set -x
${nodes.machine.clan.core.state.test.postRestoreCommand}
""")
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -l >&2")
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -d test -c '\dt' >&2")
timestamp_after = int(machine.succeed("systemctl show --property=ExecMainStartTimestampMonotonic sample-service | cut -d= -f2").strip())
assert timestamp_before < timestamp_after, f"{timestamp_before} >= {timestamp_after}: expected sample-service to be restarted after restore"
# Check that the table is still there
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -d test -c 'SELECT * FROM test;'")
output = machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql --csv -c \"SELECT datdba::regrole FROM pg_database WHERE datname = 'test'\"")
owner = output.split("\n")[1]
assert owner == "test", f"Expected database owner to be 'test', got '{owner}'"
# check if restore works if the database does not exist
machine.succeed("runuser -u postgres -- dropdb test")
machine.succeed("${nodes.machine.clan.core.state.test.postRestoreCommand}")
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -d test -c '\dt' >&2")
'';
})

View File

@@ -28,35 +28,35 @@ nixosLib.runTest (
testScript = testScript =
{ nodes, ... }: { nodes, ... }:
'' ''
import subprocess from nixos_test_lib.nix_setup import setup_nix_in_nix # type: ignore[import-untyped]
import tempfile setup_nix_in_nix(None) # No closure info for this test
from nixos_test_lib.nix_setup import setup_nix_in_nix
with tempfile.TemporaryDirectory() as temp_dir: def run_clan(cmd: list[str], **kwargs) -> str:
setup_nix_in_nix(temp_dir, None) # No closure info for this test import subprocess
clan = "${clan-core.packages.${hostPkgs.system}.clan-cli}/bin/clan"
clan_args = ["--flake", "${config.clan.test.flakeForSandbox}"]
return subprocess.run(
["${hostPkgs.util-linux}/bin/unshare", "--user", "--map-user", "1000", "--map-group", "1000", clan, *cmd, *clan_args],
**kwargs,
check=True,
).stdout
start_all() start_all()
admin1.wait_for_unit("multi-user.target") admin1.wait_for_unit("multi-user.target")
peer1.wait_for_unit("multi-user.target") peer1.wait_for_unit("multi-user.target")
# peer1 should have the 'hello' file # peer1 should have the 'hello' file
peer1.succeed("cat ${nodes.peer1.clan.core.vars.generators.new-service.files.not-a-secret.path}") peer1.succeed("cat ${nodes.peer1.clan.core.vars.generators.new-service.files.not-a-secret.path}")
ls_out = peer1.succeed("ls -la ${nodes.peer1.clan.core.vars.generators.new-service.files.a-secret.path}") ls_out = peer1.succeed("ls -la ${nodes.peer1.clan.core.vars.generators.new-service.files.a-secret.path}")
# Check that the file is owned by 'nobody' # Check that the file is owned by 'nobody'
assert "nobody" in ls_out, f"File is not owned by 'nobody': {ls_out}" assert "nobody" in ls_out, f"File is not owned by 'nobody': {ls_out}"
# Check that the file is in the 'users' group # Check that the file is in the 'users' group
assert "users" in ls_out, f"File is not in the 'users' group: {ls_out}" assert "users" in ls_out, f"File is not in the 'users' group: {ls_out}"
# Check that the file is in the '0644' mode # Check that the file is in the '0644' mode
assert "-rw-r--r--" in ls_out, f"File is not in the '0644' mode: {ls_out}" assert "-rw-r--r--" in ls_out, f"File is not in the '0644' mode: {ls_out}"
# Run clan command run_clan(["machines", "list"])
result = subprocess.run(
["${
clan-core.packages.${hostPkgs.system}.clan-cli
}/bin/clan", "machines", "list", "--flake", "${config.clan.test.flakeForSandbox}"],
check=True
)
''; '';
} }
) )

View File

@@ -27,9 +27,7 @@
modules.new-service = { modules.new-service = {
_class = "clan.service"; _class = "clan.service";
manifest.name = "new-service"; manifest.name = "new-service";
roles.peer = { roles.peer = { };
description = "A peer that uses the new-service to generate some files.";
};
perMachine = { perMachine = {
nixosModule = { nixosModule = {
# This should be generated by: # This should be generated by:

View File

@@ -34,9 +34,7 @@ nixosLib.runTest (
modules.new-service = { modules.new-service = {
_class = "clan.service"; _class = "clan.service";
manifest.name = "new-service"; manifest.name = "new-service";
roles.peer = { roles.peer = { };
description = "A peer that uses the new-service to generate some files.";
};
perMachine = { perMachine = {
nixosModule = { nixosModule = {
# This should be generated by: # This should be generated by:

View File

@@ -1,318 +0,0 @@
{ self, ... }:
{
# Machine for update test
clan.machines.test-update-machine = {
imports = [
self.nixosModules.test-update-machine
# Import the configuration file that will be created/updated during the test
./test-update-machine/configuration.nix
];
};
flake.nixosModules.test-update-machine =
{ lib, modulesPath, ... }:
{
imports = [
(modulesPath + "/testing/test-instrumentation.nix")
(modulesPath + "/profiles/qemu-guest.nix")
self.clanLib.test.minifyModule
../../lib/test/container-test-driver/nixos-module.nix
];
# Apply patch to fix x-initrd.mount filesystem handling in switch-to-configuration-ng
nixpkgs.overlays = [
(_final: prev: {
switch-to-configuration-ng = prev.switch-to-configuration-ng.overrideAttrs (old: {
patches = (old.patches or [ ]) ++ [ ./switch-to-configuration-initrd-mount-fix.patch ];
});
})
];
networking.hostName = "update-machine";
environment.etc."install-successful".text = "ok";
# Enable SSH and add authorized key for testing
services.openssh.enable = true;
services.openssh.settings.PasswordAuthentication = false;
users.users.root.openssh.authorizedKeys.keys = [ (builtins.readFile ../assets/ssh/pubkey) ];
services.openssh.knownHosts.localhost.publicKeyFile = ../assets/ssh/pubkey;
services.openssh.hostKeys = [
{
path = ../assets/ssh/privkey;
type = "ed25519";
}
];
security.sudo.wheelNeedsPassword = false;
boot.consoleLogLevel = lib.mkForce 100;
boot.kernelParams = [ "boot.shell_on_fail" ];
boot.isContainer = true;
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
# Preserve the IP addresses assigned by the test framework
# (based on virtualisation.vlans = [1] and node number 1)
networking.interfaces.eth1 = {
useDHCP = false;
ipv4.addresses = [
{
address = "192.168.1.1";
prefixLength = 24;
}
];
ipv6.addresses = [
{
address = "2001:db8:1::1";
prefixLength = 64;
}
];
};
nix.settings = {
flake-registry = "";
# required for setting the `flake-registry`
experimental-features = [
"nix-command"
"flakes"
];
};
# Define the mounts that exist in the container to prevent them from being stopped
fileSystems = {
"/" = {
device = "/dev/disk/by-label/nixos";
fsType = "ext4";
options = [ "x-initrd.mount" ];
};
"/nix/.rw-store" = {
device = "tmpfs";
fsType = "tmpfs";
options = [
"mode=0755"
];
};
"/nix/store" = {
device = "overlay";
fsType = "overlay";
options = [
"lowerdir=/nix/.ro-store"
"upperdir=/nix/.rw-store/upper"
"workdir=/nix/.rw-store/work"
];
};
};
};
perSystem =
{
pkgs,
...
}:
{
checks =
pkgs.lib.optionalAttrs (pkgs.stdenv.isLinux && pkgs.stdenv.hostPlatform.system == "x86_64-linux")
{
nixos-test-update =
let
closureInfo = pkgs.closureInfo {
rootPaths = [
self.packages.${pkgs.hostPlatform.system}.clan-cli
self.checks.${pkgs.hostPlatform.system}.clan-core-for-checks
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-update-machine.config.system.build.toplevel
pkgs.stdenv.drvPath
pkgs.bash.drvPath
pkgs.buildPackages.xorg.lndir
pkgs.bubblewrap
]
++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
};
in
self.clanLib.test.containerTest {
name = "update";
nodes.machine = {
imports = [ self.nixosModules.test-update-machine ];
};
extraPythonPackages = _p: [
self.legacyPackages.${pkgs.hostPlatform.system}.nixosTestLib
];
testScript = ''
import tempfile
import os
import subprocess
from nixos_test_lib.ssh import setup_ssh_connection # type: ignore[import-untyped]
from nixos_test_lib.nix_setup import prepare_test_flake # type: ignore[import-untyped]
start_all()
machine.wait_for_unit("multi-user.target")
# Verify initial state
machine.succeed("test -f /etc/install-successful")
machine.fail("test -f /etc/update-successful")
# Set up test environment
with tempfile.TemporaryDirectory() as temp_dir:
# Prepare test flake and Nix store
flake_dir = prepare_test_flake(
temp_dir,
"${self.checks.${pkgs.hostPlatform.system}.clan-core-for-checks}",
"${closureInfo}"
)
(flake_dir / ".clan-flake").write_text("") # Ensure .clan-flake exists
# Set up SSH connection
ssh_conn = setup_ssh_connection(
machine,
temp_dir,
"${../assets/ssh/privkey}"
)
# Update the machine configuration to add a new file
machine_config_path = os.path.join(flake_dir, "machines", "test-update-machine", "configuration.nix")
os.makedirs(os.path.dirname(machine_config_path), exist_ok=True)
# Note: update command doesn't accept -i flag, SSH key must be in ssh-agent
# Start ssh-agent and add the key
agent_output = subprocess.check_output(["${pkgs.openssh}/bin/ssh-agent", "-s"], text=True)
for line in agent_output.splitlines():
if line.startswith("SSH_AUTH_SOCK="):
os.environ["SSH_AUTH_SOCK"] = line.split("=", 1)[1].split(";")[0]
elif line.startswith("SSH_AGENT_PID="):
os.environ["SSH_AGENT_PID"] = line.split("=", 1)[1].split(";")[0]
# Add the SSH key to the agent
subprocess.run(["${pkgs.openssh}/bin/ssh-add", ssh_conn.ssh_key], check=True)
##############
print("TEST: update with --build-host local")
with open(machine_config_path, "w") as f:
f.write("""
{
environment.etc."update-build-local-successful".text = "ok";
}
""")
# rsync the flake into the container
os.environ["PATH"] = f"{os.environ['PATH']}:${pkgs.openssh}/bin"
subprocess.run(
[
"${pkgs.rsync}/bin/rsync",
"-a",
"--delete",
"-e",
"ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no",
f"{str(flake_dir)}/",
f"root@192.168.1.1:/flake",
],
check=True
)
# allow machine to ssh into itself
subprocess.run([
"ssh",
"-o", "UserKnownHostsFile=/dev/null",
"-o", "StrictHostKeyChecking=no",
f"root@192.168.1.1",
"mkdir -p /root/.ssh && chmod 700 /root/.ssh && echo \"$(cat \"${../assets/ssh/privkey}\")\" > /root/.ssh/id_ed25519 && chmod 600 /root/.ssh/id_ed25519",
], check=True)
# install the clan-cli package into the container's Nix store
subprocess.run(
[
"${pkgs.nix}/bin/nix",
"copy",
"--from",
f"{temp_dir}/store",
"--to",
"ssh://root@192.168.1.1",
"--no-check-sigs",
f"${self.packages.${pkgs.hostPlatform.system}.clan-cli}",
"--extra-experimental-features", "nix-command flakes",
],
check=True,
env={
**os.environ,
"NIX_SSHOPTS": "-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no",
},
)
# Run ssh on the host to run the clan update command via --build-host local
subprocess.run([
"ssh",
"-o", "UserKnownHostsFile=/dev/null",
"-o", "StrictHostKeyChecking=no",
f"root@192.168.1.1",
"${self.packages.${pkgs.hostPlatform.system}.clan-cli}/bin/clan",
"machines",
"update",
"--debug",
"--flake", "/flake",
"--host-key-check", "none",
"--upload-inputs", # Use local store instead of fetching from network
"--build-host", "localhost",
"test-update-machine",
"--target-host", f"root@localhost",
], check=True)
# Verify the update was successful
machine.succeed("test -f /etc/update-build-local-successful")
##############
print("TEST: update with --target-host")
with open(machine_config_path, "w") as f:
f.write("""
{
environment.etc."target-host-update-successful".text = "ok";
}
""")
# Run clan update command
subprocess.run([
"${self.packages.${pkgs.hostPlatform.system}.clan-cli-full}/bin/clan",
"machines",
"update",
"--debug",
"--flake", flake_dir,
"--host-key-check", "none",
"--upload-inputs", # Use local store instead of fetching from network
"test-update-machine",
"--target-host", f"root@192.168.1.1:{ssh_conn.host_port}",
], check=True)
# Verify the update was successful
machine.succeed("test -f /etc/target-host-update-successful")
##############
print("TEST: update with --build-host")
# Update configuration again
with open(machine_config_path, "w") as f:
f.write("""
{
environment.etc."build-host-update-successful".text = "ok";
}
""")
# Run clan update command with --build-host
subprocess.run([
"${self.packages.${pkgs.hostPlatform.system}.clan-cli-full}/bin/clan",
"machines",
"update",
"--debug",
"--flake", flake_dir,
"--host-key-check", "none",
"--upload-inputs", # Use local store instead of fetching from network
"--build-host", f"root@192.168.1.1:{ssh_conn.host_port}",
"test-update-machine",
"--target-host", f"root@192.168.1.1:{ssh_conn.host_port}",
], check=True)
# Verify the second update was successful
machine.succeed("test -f /etc/build-host-update-successful")
'';
} { inherit pkgs self; };
};
};
}

View File

@@ -1,17 +0,0 @@
diff --git a/src/main.rs b/src/main.rs
index 8baf5924a7db..1234567890ab 100644
--- a/src/main.rs
+++ b/src/main.rs
@@ -1295,6 +1295,12 @@ won't take effect until you reboot the system.
for (mountpoint, current_filesystem) in current_filesystems {
// Use current version of systemctl binary before daemon is reexeced.
+
+ // Skip filesystem comparison if x-initrd.mount is present in options
+ if current_filesystem.options.contains("x-initrd.mount") {
+ continue;
+ }
+
let unit = path_to_unit_name(&current_system_bin, &mountpoint);
if let Some(new_filesystem) = new_filesystems.get(&mountpoint) {
if current_filesystem.fs_type != new_filesystem.fs_type

View File

@@ -1,3 +0,0 @@
{
# Initial empty configuration
}

View File

@@ -1,115 +0,0 @@
{
pkgs,
nixosLib,
clan-core,
lib,
...
}:
nixosLib.runTest (
{ ... }:
let
machines = [
"controller1"
"controller2"
"peer1"
"peer2"
"peer3"
];
in
{
imports = [
clan-core.modules.nixosTest.clanTest
];
hostPkgs = pkgs;
name = "wireguard";
clan = {
directory = ./.;
modules."@clan/wireguard" = import ../../clanServices/wireguard/default.nix;
inventory = {
machines = lib.genAttrs machines (_: { });
instances = {
/*
wg-test-one
controller2 controller1
peer2 peer1 peer3
*/
wg-test-one = {
module.name = "@clan/wireguard";
module.input = "self";
roles.controller.machines."controller1".settings = {
endpoint = "192.168.1.1";
};
roles.controller.machines."controller2".settings = {
endpoint = "192.168.1.2";
};
roles.peer.machines = {
peer1.settings.controller = "controller1";
peer2.settings.controller = "controller2";
peer3.settings.controller = "controller1";
};
};
# TODO: Will this actually work with conflicting ports? Can we re-use interfaces?
#wg-test-two = {
# module.name = "@clan/wireguard";
# roles.controller.machines."controller1".settings = {
# endpoint = "192.168.1.1";
# port = 51922;
# };
# roles.peer.machines = {
# peer1 = { };
# };
#};
};
};
};
testScript = ''
start_all()
# Show all addresses
machines = [peer1, peer2, peer3, controller1, controller2]
for m in machines:
m.systemctl("start network-online.target")
for m in machines:
m.wait_for_unit("network-online.target")
m.wait_for_unit("systemd-networkd.service")
print("\n\n" + "="*60)
print("STARTING PING TESTS")
print("="*60)
for m1 in machines:
for m2 in machines:
if m1 != m2:
print(f"\n--- Pinging from {m1.name} to {m2.name}.wg-test-one ---")
m1.wait_until_succeeds(f"ping -c1 {m2.name}.wg-test-one >&2")
'';
}
)

View File

@@ -1,6 +0,0 @@
[
{
"publickey": "age1rnkc2vmrupy9234clyu7fpur5kephuqs3v7qauaw5zeg00jqjdasefn3cc",
"type": "age"
}
]

View File

@@ -1,6 +0,0 @@
[
{
"publickey": "age1t2hhg99d4p2yymuhngcy5ccutp8mvu7qwvg5cdhck303h9e7ha9qnlt635",
"type": "age"
}
]

View File

@@ -1,6 +0,0 @@
[
{
"publickey": "age1jts52rzlqcwjc36jkp56a7fmjn3czr7kl9ta2spkfzhvfama33sqacrzzd",
"type": "age"
}
]

View File

@@ -1,6 +0,0 @@
[
{
"publickey": "age12nqnp0zd435ckp5p0v2fv4p2x4cvur2mnxe8use2sx3fgy883vaq4ae75e",
"type": "age"
}
]

View File

@@ -1,6 +0,0 @@
[
{
"publickey": "age1sglr4zp34drjfydzeweq43fz3uwpul3hkh53lsfa9drhuzwmkqyqn5jegp",
"type": "age"
}
]

View File

@@ -1,15 +0,0 @@
{
"data": "ENC[AES256_GCM,data:zDF0RiBqaawpg+GaFkuLPomJ01Xu+lgY5JfUzaIk2j03XkCzIf8EMrmn6pRtBP3iUjPBm+gQSTQk6GHTONrixA5hRNyETV+UgQw=,iv:zUUCAGZ0cz4Tc2t/HOjVYNsdnrAOtid/Ns5ak7rnyCk=,tag:z43WtNSue4Ddf7AVu21IKA==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBlY1NEdjAzQm5RMFZWY3BJ\nclp6c01FdlZFK3dOSDB4cHc1NTdwMXErMFJFCnIrRVFNZEFYOG1rVUhFd2xsbTJ2\nVkJHNmdOWXlOcHJoQ0QzM1VyZmxmcGcKLS0tIFk1cEx4dFdvNGRwK1FWdDZsb1lR\nV2d1RFZtNzZqVFdtQ1FzNStEcEgyUUkKx8tkxqJz/Ko3xgvhvd6IYiV/lRGmrY13\nUZpYWR9tsQwZAR9dLjCyVU3JRuXeGB1unXC1CO0Ff3R0A/PuuRHh+g==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-08-13T09:19:37Z",
"mac": "ENC[AES256_GCM,data:8RGOUhZ2LGmC9ugULwHDgdMrtdo9vzBm3BJmL4XTuNJKm0NlKfgNLi1E4n9DMQ+kD4hKvcwbiUcwSGE8jZD6sm7Sh3bJi/HZCoiWm/O/OIzstli2NNDBGvQBgyWZA5H+kDjZ6aEi6icNWIlm5gsty7KduABnf5B3p0Bn5Uf5Bio=,iv:sGZp0XF+mgocVzAfHF8ATdlSE/5zyz5WUSRMJqNeDQs=,tag:ymYVBRwF5BOSAu5ONU2qKw==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../users/admin

View File

@@ -1,15 +0,0 @@
{
"data": "ENC[AES256_GCM,data:dHM7zWzqnC1QLRKYpbI2t63kOFnSaQy6ur9zlkLQf17Q03CNrqUsZtdEbwMnLR3llu7eVMhtvVRkXjEkvn3leb9HsNFmtk/DP70=,iv:roEZsBFqRypM106O5sehTzo7SySOJUJgAR738rTtOo8=,tag:VDd9/6uU0SAM7pWRLIUhUQ==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBKTEVYUmVGbUtOcHZ4cnc3\nKzNETnlxaVRKYTI3eWVHdEoyc3l2SnhsZ1J3CnB2RnZrOXM5Uml6TThDUlZjY25J\nbkJ6eUZ2ckN1NWpNUU9IaE93UDJQdlEKLS0tIC95ZDhkU0R1VHhCdldxdW4zSmps\nN3NqL1cvd05hRTRPdDA3R2pzNUFFajgKS+DJH14fH9AvEAa3PoUC1jEqKAzTmExN\nl32FeHTHbGMo1PKeaFm+Eg0WSpAmFE7beBunc5B73SW30ok6x4FcQw==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-08-13T09:19:47Z",
"mac": "ENC[AES256_GCM,data:77EnuBQyguvkCtobUg8/6zoLHjmeGDrSBZuIXOZBMxdbJjzhRg++qxQjuu6t0FoWATtz7u4Y3/jzUMGffr/N5HegqSq0D2bhv7AqJwBiVaOwd80fRTtM+YiP/zXsCk52Pj/Gadapg208bDPQ1BBDOyz/DrqZ7w//j+ARJjAnugI=,iv:IuTDmJKZEuHXJXjxrBw0gP2t6vpxAYEqbtpnVbavVCY=,tag:4EnpX6rOamtg1O+AaEQahQ==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../users/admin

View File

@@ -1,15 +0,0 @@
{
"data": "ENC[AES256_GCM,data:wcSsqxTKiMAnzPwxs5DNjcSdLyjVQ9UOrZxfSbOkVfniwx6F7xz6dLNhaDq7MHQ0vRWpg28yNs7NHrp52bYFnb/+eZsis46WiCw=,iv:B4t1lvS2gC601MtsmZfEiEulLWvSGei3/LSajwFS9Vs=,tag:hnRXlZyYEFfLJUrw1SqbSQ==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAybUgya2VEdzMvRG1hdkpu\nM2pGNmcyVmcvYVZ1ZjJlY3A1bXFUUUtkMTI0CmJoRFZmejZjN2UxUXNuc1k5WnE2\nNmxIcnpNQ1lJZ3ZKSmhtSlVURXJTSUUKLS0tIGU4Wi9yZ3VYekJkVW9pNWFHblFk\na0gzbTVKUWdSam1sVjRUaUlTdVd5YWMKntRc9yb9VPOTMibp8QM5m57DilP01N/X\nPTQaw8oI40znnHdctTZz7S+W/3Te6sRnkOhFyalWmsKY0CWg/FELlA==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-08-13T09:19:58Z",
"mac": "ENC[AES256_GCM,data:8nq+ugkUJxE24lUIySySs/cAF8vnfqr936L/5F0O1QFwNrbpPmKRXkuwa6u0V+187L2952Id20Fym4ke59f3fJJsF840NCKDwDDZhBZ20q9GfOqIKImEom/Nzw6D0WXQLUT3w8EMyJ/F+UaJxnBNPR6f6+Kx4YgStYzCcA6Ahzg=,iv:VBPktEz7qwWBBnXE+xOP/EUVy7/AmNCHPoK56Yt/ZNc=,tag:qXONwOLFAlopymBEf5p4Sw==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../users/admin

View File

@@ -1,15 +0,0 @@
{
"data": "ENC[AES256_GCM,data:4d3ri0EsDmWRtA8vzvpPRLMsSp4MIMKwvtn0n0pRY05uBPXs3KcjnweMPIeTE1nIhqnMR2o2MfLah5TCPpaFax9+wxIt74uacbg=,iv:0LBAldTC/hN4QLCxgXTl6d9UB8WmUTnj4sD2zHQuG2w=,tag:zr/RhG/AU4g9xj9l2BprKw==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBvV0JnZDhlU1piU1g2cng0\ncytKOEZ6WlZlNGRGUjV3MmVMd2Nzc0ZwelgwCjBGdThCUGlXbVFYdnNoZWpJZ3Vm\nc2xkRXhxS09vdzltSVoxLzhFSVduak0KLS0tIE5DRjJ6cGxiVlB1eElHWXhxN1pJ\nYWtIMDMvb0Z6akJjUzlqeEFsNHkxL2cKpghv/QegnXimeqd9OPFouGM//jYvoVmw\n2d4mLT2JSMkEhpfGcqb6vswhdJfCiKuqr2B4bqwAnPMaykhsm8DFRQ==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-08-13T09:20:08Z",
"mac": "ENC[AES256_GCM,data:BzlQVAJ7HzcxNPKB3JhabqRX/uU0EElj172YecjmOflHnzz/s9xgfdAfJK/c53hXlX4LtGPnubH7a8jOolRq98zmZeBYE27+WLs2aN7Ufld6mYk90/i7u4CqR+Fh2Kfht04SlUJCjnS5A9bTPwU9XGRHJ0BiOhzTuSMUJTRaPRM=,iv:L50K5zc1o99Ix9nP0pb9PRH+VIN2yvq7JqKeVHxVXmc=,tag:XFLkSCsdbTPxbasDYYxcFQ==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../users/admin

View File

@@ -1,15 +0,0 @@
{
"data": "ENC[AES256_GCM,data:qfLm6+g1vYnESCik9uyBeKsY6Ju2Gq3arnn2I8HHNO67Ri5BWbOQTvtz7WT8/q94RwVjv8SGeJ/fsJSpwLSrJSbqTZCPAnYwzzQ=,iv:PnA9Ao8RRELNhNQYbaorstc0KaIXRU7h3+lgDCXZFHk=,tag:VeLgYQYwqthYihIoQTwYiA==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBNWVVQaDJFd0N3WHptRC9Z\nZTgxTWh5bnU1SkpqRWRXZnhPaFhpSVJmVEhrCjFvdHFYenNWaFNrdXlha09iS2xj\nOTZDcUNkcHkvTDUwNjM4Z3gxUkxreUEKLS0tIE5oY3Q2bWhsb2FSQTVGTWVSclJw\nWllrelRwT3duYjJJbTV0d3FwU1VuNlkK2eN3fHFX/sVUWom8TeZC9fddqnSCsC1+\nJRCZsG46uHDxqLcKIfdFWh++2t16XupQYk3kn+NUR/aMc3fR32Uwjw==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-08-13T09:20:18Z",
"mac": "ENC[AES256_GCM,data:nUwsPcP1bsDjAHFjQ1NlVkTwyZY4B+BpzNkMx9gl0rE14j425HVLtlhlLndhRp+XMpnDldQppLAAtSdzMsrw8r5efNgTRl7cu4Fy/b9cHt84k7m0aou5lrGus9SV1bM7/fzC9Xm7CSXBcRzyDGVsKC6UBl1rx+ybh7HyAN05XSo=,iv:It57H+zUUNPkoN1D8sYwyZx5zIFIga7mydhGUHYBCGE=,tag:mBQdYqUpjPknbYa13qESyw==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../users/admin

View File

@@ -1 +0,0 @@
../../../../../../sops/machines/controller1

View File

@@ -1,19 +0,0 @@
{
"data": "ENC[AES256_GCM,data:noe913+28JWkoDkGGMu++cc1+j5NPDoyIhWixdsowoiVO3cTWGkZ88SUGO5D,iv:ynYMljwqMcBdk8RpVcw/2Jflg2RCF28r4fKUgIAF8B4=,tag:+TsXDJgfUhKgg4iQVXKKlQ==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBhYVRReTZBQ05GYmVBVjhS\nNXM5aFlhVzZRaVl6UHl6S3JnMC9Sb1dwZ1ZjCmVuS2dEVExYZWROVklUZWFCSnM2\nZnlxbVNseTM2c0Q0TjhsT3NzYmtqREUKLS0tIHBRTFpvVGt6d1cxZ2lFclRsUVhZ\nZDlWaG9PcXVrNUZKaEgxWndjUDVpYjgKt0eOhAgcYdkg9JSEakx4FjChLTn3pis+\njOkuGd4JfXMKcwC7vJV5ygQBxzVJSBw+RucP7sYCBPK0m8Voj94ntw==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1rnkc2vmrupy9234clyu7fpur5kephuqs3v7qauaw5zeg00jqjdasefn3cc",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB6MFJqNHNraG9DSnJZMFdz\ndU8zVXNTamxROFd1dWtuK2RiekhPdHhleVhFCi8zNWJDNXJMRUlDdjc4Q0UycTIz\nSGFGSmdnNU0wZWlDaTEwTzBqWjh6SFkKLS0tIEJOdjhOMDY2TUFLb3RPczNvMERx\nYkpSeW5VOXZvMlEvdm53MDE3aUFTNjgKyelSTjrTIR9I3rJd3krvzpsrKF1uGs4J\n4MtmQj0/3G+zPYZVBx7b3HF6B3f1Z7LYh05+z7nCnN/duXyPnDjNcg==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-08-13T09:19:37Z",
"mac": "ENC[AES256_GCM,data:+DmIkPG/H6tCtf8CvB98E1QFXv08QfTcCB3CRsi+XWnIRBkryRd/Au9JahViHMdK7MED8WNf84NWTjY2yH4y824/DjI8XXNMF1iVMo0CqY42xbVHtUuhXrYeT+c8CyEw+M6zfy1jC0+Bm3WQWgagz1G6A9SZk3D2ycu0N08+axA=,iv:kwBjTYebIy5i2hagAajSwwuKnSkrM9GyrnbeQXB2e/w=,tag:EgKJ5gVGYj1NGFUduxLGfg==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../../../../sops/users/admin

View File

@@ -1 +0,0 @@
lQfR7GhivN87XoXruTGOPjVPhNu1Brt//wyc3pdwE20=

View File

@@ -1 +0,0 @@
7470bb5c79df224a9b7f5a2259acd2e46db763c27e24cb3416c8b591cb328077

View File

@@ -1 +0,0 @@
../../../../../../sops/machines/controller2

View File

@@ -1,19 +0,0 @@
{
"data": "ENC[AES256_GCM,data:2kehACgvNgoYGPwnW7p86BR0yUu689Chth6qZf9zoJtuTY9ATS68dxDyBc5S,iv:qb2iDUtExegTeN3jt6SA8RnU61W5GDDhn56QXiQT4gw=,tag:pSGPICX5p6qlZ1WMVoIEYQ==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBSTTR5TDY4RE9VYmlCK1dL\nWkVRcVZqVDlsbmQvUlJmdzF2b1Z1S0k3NngwCkFWNzRVaERtSmFsd0o2aFJOb0ZX\nSU9yUnVaNi9IUjJWeGRFcEpDUXo5WkEKLS0tIEczNkxiYnJsTWRoLzFhQVF1M21n\nWnZEdGV1N2N5d1FZQkJUQ1IrdGFLblkKPTpha2bxS8CCAMXWTDKX/WOcdvggaP3Y\nqewyahDNzb4ggP+LNKp55BtwFjdvoPoq4BpYOOgMRbQMMk+H1o9WFw==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1t2hhg99d4p2yymuhngcy5ccutp8mvu7qwvg5cdhck303h9e7ha9qnlt635",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBYcEZ6Tzk3M0pkV0tOdTBj\nenF2a0tHNnhBa0NrazMwV1VBbXBZR3pzSHpvCnBZOEU0VlFHS1FHcVpTTDdPczVV\nV0RFSlZ0VmIzWGoydEdKVXlIUE9OOEkKLS0tIFZ0cWVBR1loeVlWa2c4U3oweXE2\ncm1ja0JCS3U5Nk41dlAzV2NabDc2bDQKdgCDNnpRZlFPnEGlX6fo0SQX4yOB+E6r\ntnSwofR3xxZvkyme/6JJU5qBZXyCXEAhKMRkFyvJANXzMJAUo/Osow==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-08-13T09:19:48Z",
"mac": "ENC[AES256_GCM,data:e3EkL8vwRhLsec83Zi9DE3PKT+4RwgiffpN4QHcJKTgmDW6hzizWc5kAxbNWGJ9Qqe6sso2KY7tc+hg1lHEsmzjCbg153p8h+7lVI2XT6adi/CS8WZ2VpeL+0X9zDQCjqHmrESZAYFBdkLqO4jucdf0Pc3CKKD+N3BDDTwSUvHM=,iv:xvR7dJL8sdYen00ovrYT8PNxhB9XxSWDSRz1IK23I/o=,tag:OyhAvllBgfAp3eGeNpR/Nw==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../../../../sops/users/admin

View File

@@ -1 +0,0 @@
5Z7gbLFbXpEFfomW2pKyZBpZN5xvUtiqrIL0GVfNtQ8=

View File

@@ -1 +0,0 @@
c3672fdb9fb31ddaf6572fc813cf7a8fe50488ef4e9d534c62d4f29da60a1a99

View File

@@ -1 +0,0 @@
../../../../../../sops/machines/peer1

View File

@@ -1,19 +0,0 @@
{
"data": "ENC[AES256_GCM,data:b+akw85T3D9xc75CPLHucR//k7inpxKDvgpR8tCNKwNDRVjVHjcABhfZNLXW,iv:g11fZE8UI0MVh9GKdjR6leBlxa4wN7ZubozXG/VlBbw=,tag:0YkzWCW3zJ3Mt3br/jmTYw==,type:str]",
"sops": {
"age": [
{
"recipient": "age1jts52rzlqcwjc36jkp56a7fmjn3czr7kl9ta2spkfzhvfama33sqacrzzd",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBXWkJUR0pIa2xOSEw2dThm\nYlNuOHZCVW93Wkc5LzE4YmpUTHRkZlk3ckc4CnN4M3ZRMWNFVitCT3FyWkxaR0di\nb0NmSXFhRHJmTWg0d05OcWx1LytscEEKLS0tIEtleTFqU3JrRjVsdHpJeTNuVUhF\nWEtnOVlXVXRFamFSak5ia2F2b0JiTzAKlhOBZvZ4AN+QqAYQXvd6YNmgVS4gtkWT\nbV3bLNTgwtrDtet9NDHM8vdF+cn5RZxwFfgmTbDEow6Zm8EXfpxj/g==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB6YVYyQkZqMTJYQTlyRG5Y\nbnJ2UkE1TS9FZkpSa2tQbk1hQjViMi9OcGk0CjFaZUdjU3JtNzh0bDFXdTdUVW4x\nanFqZHZjZjdzKzA2MC8vTWh3Uy82UGcKLS0tIDhyOFl3UGs3czdoMlpza3UvMlB1\nSE90MnpGc05sSCtmVWg0UVNVdmRvN2MKHlCr4U+7bsoYb+2fgT4mEseZCEjxrtLu\n55sR/4YH0vqMnIBnLTSA0e+WMrs3tQfseeJM5jY/ZNnpec1LbxkGTg==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-08-13T09:19:58Z",
"mac": "ENC[AES256_GCM,data:gEoEC9D2Z7k5F8egaY1qPXT5/96FFVsyofSBivQ28Ir/9xHX2j40PAQrYRJUWsk/GAUMOyi52Wm7kPuacw+bBcdtQ0+MCDEmjkEnh1V83eZ/baey7iMmg05uO92MYY5o4e7ZkwzXoAeMCMcfO0GqjNvsYJHF1pSNa+UNDj+eflw=,iv:dnIYpvhAdvUDe9md53ll42krb0sxcHy/toqGc7JFxNA=,tag:0WkZU7GeKMD1DQTYaI+1dg==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../../../../sops/users/admin

View File

@@ -1 +0,0 @@
juK7P/92N2t2t680aLIRobHc3ts49CsZBvfZOyIKpUc=

View File

@@ -1 +0,0 @@
b36142569a74a0de0f9b229f2a040ae33a22d53bef5e62aa6939912d0cda05ba

View File

@@ -1 +0,0 @@
../../../../../../sops/machines/peer2

View File

@@ -1,19 +0,0 @@
{
"data": "ENC[AES256_GCM,data:apX2sLwtq6iQgLJslFwiRMNBUe0XLzLQbhKfmb2pKiJG7jGNHUgHJz3Ls4Ca,iv:HTDatm3iD5wACTkkd3LdRNvJfnfg75RMtn9G6Q7Fqd4=,tag:Mfehlljnes5CFD1NJdk27A==,type:str]",
"sops": {
"age": [
{
"recipient": "age12nqnp0zd435ckp5p0v2fv4p2x4cvur2mnxe8use2sx3fgy883vaq4ae75e",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBVZzFyMUZsd2V2VWxOUmhP\nZE8yZTc4Q0RkZisxR25NemR1TzVDWmJZVjBVClA1MWhsU0xzSG16aUx3cWFWKzlG\nSkxrT09OTkVqLzlWejVESE1QWHVJaFkKLS0tIGxlaGVuWU43RXErNTB3c3FaUnM3\nT0N5M253anZkbnFkZWw2VHA0eWhxQW8Kd1PMtEX1h0Hd3fDLMi++gKJkzPi9FXUm\n+uYhx+pb+pJM+iLkPwP/q6AWC7T0T4bHfekkdzxrbsKMi73x/GrOiw==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBqVzRIMWdlNjVwTURyMFkv\nSUhiajZkZVNuWklRYit6cno4UzNDa2szOFN3CkQ2TWhHb25pbmR1MlBsRXNLL2lx\ncVZ3c3BsWXN2aS9UUVYvN3I4S0xUSmMKLS0tIE5FV0U5aXVUZk9XL0U0Z2ZSNGd5\nbU9zY3IvMlpSNVFLYkRNQUpUYVZOWFUK7j4Otzb8CJTcT7aAj9/irxHEDXh1HkTg\nzz7Ho8/ZncNtaCVHlHxjTgVW9d5aIx8fSsV9LRCFwHMtNzvwj1Nshg==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-08-13T09:20:08Z",
"mac": "ENC[AES256_GCM,data:e7WNVEz78noHBiz6S3A6qNfop+yBXB3rYN0k4GvaQKz3b99naEHuqIF8Smzzt4XrbbiPKu2iLa5ddLBlqqsi32UQUB8JS9TY7hvW8ol+jpn0VxusGCXW9ThdDEsM/hXiPyr331C73zTvbOYI1hmcGMlJL9cunVRO9rkMtEqhEfo=,iv:6zt7wjIs1y5xDHNK+yLOwoOuUpY7/dOGJGT6UWAFeOg=,tag:gzFTgoxhoLzUV0lvzOhhfg==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../../../../sops/users/admin

View File

@@ -1 +0,0 @@
XI9uSaQRDBCb82cMnGzGJcbqRfDG/IXZobyeL+kV03k=

View File

@@ -1 +0,0 @@
360f9fce4a984eb87ce2a673eb5341ecb89c0f62126548d45ef25ff5243dd646

View File

@@ -1 +0,0 @@
../../../../../../sops/machines/peer3

View File

@@ -1,19 +0,0 @@
{
"data": "ENC[AES256_GCM,data:Gluvjes/3oH5YsDq00JDJyJgoEFcj56smioMArPSt309MDGExYX2QsCzeO1q,iv:oBBJRDdTj/1dWEvzhdFKQ2WfeCKyavKMLmnMbqnU5PM=,tag:2WNFxKz2dWyVcybpm5N4iw==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBtQWpjRmhZTFdPa2VSZkFN\nbUczMlY5bDBmMTdoMy8xcWxMaXpWVitMZGdjCnRWb2Y3eGpHU1hmNHRJVFBqbU5w\nVEZGdUIrQXk0U0dUUEZ6bE5EMFpTRHMKLS0tIGpYSmZmQThJUTlvTHpjc05ZVlM4\nQWhTOWxnUHZnYlJ3czE3ZUJ0L3ozWTQK3a7N0Zpzo4sUezYveqvKR49RUdJL23eD\n+cK5lk2xbtj+YHkeG+dg7UlHfDaicj0wnFH1KLuWmNd1ONa6eQp3BQ==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1sglr4zp34drjfydzeweq43fz3uwpul3hkh53lsfa9drhuzwmkqyqn5jegp",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA3a2FOWlVsSkdnendrYmUz\ndEpuL1hZSWNFTUtDYm14S3V1aW9KS3hsazJRCkp2SkFFbi9hbGJpNks1MlNTL0s5\nTk5pcUMxaEJobkcvWmRGeU9jMkdNdzAKLS0tIDR6M0Y5eE1ETHJJejAzVW1EYy9v\nZCtPWHJPUkhuWnRzSGhMUUtTa280UmMKXvtnxyop7PmRvTOFkV80LziDjhGh93Pf\nYwhD/ByD/vMmr21Fd6PVHOX70FFT30BdnMc1/wt7c/0iAw4w4GoQsA==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-08-13T09:20:18Z",
"mac": "ENC[AES256_GCM,data:3nXMTma0UYXCco+EM8UW45cth7DVMboFBKyesL86GmaG6OlTkA2/25AeDrtSVO13a5c2jC6yNFK5dE6pSe5R9f0BoDF7d41mgc85zyn+LGECNWKC6hy6gADNSDD6RRuV1S3FisFQl1F1LD8LiSWmg/XNMZzChNlHYsCS8M+I84g=,iv:pu5VVXAVPmVoXy0BJ+hq5Ar8R0pZttKSYa4YS+dhDNc=,tag:xp1S/4qExnxMTGwhfLJrkA==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../../../../sops/users/admin

View File

@@ -1 +0,0 @@
t6qN4VGLR+VMhrBDNKQEXZVyRsEXs1/nGFRs5DI82F8=

View File

@@ -1 +0,0 @@
e3facc99b73fe029d4c295f71829a83f421f38d82361cf412326398175da162a

View File

@@ -0,0 +1,24 @@
(
{ pkgs, ... }:
{
name = "zt-tcp-relay";
nodes.machine =
{ self, ... }:
{
imports = [
self.nixosModules.clanCore
self.clanModules.zt-tcp-relay
{
clan.core.settings.directory = ./.;
}
];
};
testScript = ''
start_all()
machine.wait_for_unit("zt-tcp-relay.service")
out = machine.succeed("${pkgs.netcat}/bin/nc -z -v localhost 4443")
print(out)
'';
}
)

View File

@@ -0,0 +1,210 @@
{
config,
lib,
pkgs,
...
}:
let
# Instances might be empty, if the module is not used via the inventory
instances = config.clan.inventory.services.borgbackup or { };
# roles = { ${role_name} :: { machines :: [string] } }
allServers = lib.foldlAttrs (
acc: _instanceName: instanceConfig:
acc
++ (
if builtins.elem machineName instanceConfig.roles.client.machines then
instanceConfig.roles.server.machines
else
[ ]
)
) [ ] instances;
machineName = config.clan.core.settings.machine.name;
cfg = config.clan.borgbackup;
preBackupScript = ''
declare -A preCommandErrors
${lib.concatMapStringsSep "\n" (
state:
lib.optionalString (state.preBackupCommand != null) ''
echo "Running pre-backup command for ${state.name}"
if ! /run/current-system/sw/bin/${state.preBackupCommand}; then
preCommandErrors["${state.name}"]=1
fi
''
) (lib.attrValues config.clan.core.state)}
if [[ ''${#preCommandErrors[@]} -gt 0 ]]; then
echo "pre-backup commands failed for the following services:"
for state in "''${!preCommandErrors[@]}"; do
echo " $state"
done
exit 1
fi
'';
in
{
options.clan.borgbackup.destinations = lib.mkOption {
type = lib.types.attrsOf (
lib.types.submodule (
{ name, ... }:
{
options = {
name = lib.mkOption {
type = lib.types.strMatching "^[a-zA-Z0-9._-]+$";
default = name;
description = "the name of the backup job";
};
repo = lib.mkOption {
type = lib.types.str;
description = "the borgbackup repository to backup to";
};
rsh = lib.mkOption {
type = lib.types.str;
default = "ssh -i ${
config.clan.core.vars.generators.borgbackup.files."borgbackup.ssh".path
} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=Yes";
defaultText = "ssh -i \${config.clan.core.vars.generators.borgbackup.files.\"borgbackup.ssh\".path} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null";
description = "the rsh to use for the backup";
};
};
}
)
);
default = { };
description = ''
destinations where the machine should be backuped to
'';
};
options.clan.borgbackup.exclude = lib.mkOption {
type = lib.types.listOf lib.types.str;
example = [ "*.pyc" ];
default = [ ];
description = ''
Directories/Files to exclude from the backup.
Use * as a wildcard.
'';
};
config = {
warnings = [
"The clan.borgbackup module is deprecated and will be removed on 2025-07-15.
Please migrate to user-maintained configuration or the new equivalent clan services
(https://docs.clan.lol/reference/clanServices)."
];
# Destinations
clan.borgbackup.destinations =
let
destinations = builtins.map (serverName: {
name = serverName;
value = {
repo = "borg@${serverName}:/var/lib/borgbackup/${machineName}";
};
}) allServers;
in
(builtins.listToAttrs destinations);
# Derived from the destinations
systemd.services = lib.mapAttrs' (
_: dest:
lib.nameValuePair "borgbackup-job-${dest.name}" {
# since borgbackup mounts the system read-only, we need to run in a
# ExecStartPre script, so we can generate additional files.
serviceConfig.ExecStartPre = [
''+${pkgs.writeShellScript "borgbackup-job-${dest.name}-pre-backup-commands" preBackupScript}''
];
}
) cfg.destinations;
services.borgbackup.jobs = lib.mapAttrs (_: dest: {
paths = lib.unique (
lib.flatten (map (state: state.folders) (lib.attrValues config.clan.core.state))
);
exclude = cfg.exclude;
repo = dest.repo;
environment.BORG_RSH = dest.rsh;
compression = "auto,zstd";
startAt = "*-*-* 01:00:00";
persistentTimer = true;
encryption = {
mode = "repokey";
passCommand = "cat ${config.clan.core.vars.generators.borgbackup.files."borgbackup.repokey".path}";
};
prune.keep = {
within = "1d"; # Keep all archives from the last day
daily = 7;
weekly = 4;
monthly = 0;
};
}) cfg.destinations;
environment.systemPackages = [
(pkgs.writeShellApplication {
name = "borgbackup-create";
runtimeInputs = [ config.systemd.package ];
text = ''
${lib.concatMapStringsSep "\n" (dest: ''
systemctl start borgbackup-job-${dest.name}
'') (lib.attrValues cfg.destinations)}
'';
})
(pkgs.writeShellApplication {
name = "borgbackup-list";
runtimeInputs = [ pkgs.jq ];
text = ''
(${
lib.concatMapStringsSep "\n" (
dest:
# we need yes here to skip the changed url verification
''echo y | /run/current-system/sw/bin/borg-job-${dest.name} list --json | jq '[.archives[] | {"name": ("${dest.name}::${dest.repo}::" + .name)}]' ''
) (lib.attrValues cfg.destinations)
}) | jq -s 'add // []'
'';
})
(pkgs.writeShellApplication {
name = "borgbackup-restore";
runtimeInputs = [ pkgs.gawk ];
text = ''
cd /
IFS=':' read -ra FOLDER <<< "''${FOLDERS-}"
job_name=$(echo "$NAME" | awk -F'::' '{print $1}')
backup_name=''${NAME#"$job_name"::}
if [[ ! -x /run/current-system/sw/bin/borg-job-"$job_name" ]]; then
echo "borg-job-$job_name not found: Backup name is invalid" >&2
exit 1
fi
echo y | /run/current-system/sw/bin/borg-job-"$job_name" extract "$backup_name" "''${FOLDER[@]}"
'';
})
];
clan.core.vars.generators.borgbackup = {
files."borgbackup.ssh.pub".secret = false;
files."borgbackup.ssh" = { };
files."borgbackup.repokey" = { };
migrateFact = "borgbackup";
runtimeInputs = [
pkgs.coreutils
pkgs.openssh
pkgs.xkcdpass
];
script = ''
ssh-keygen -t ed25519 -N "" -C "" -f "$out"/borgbackup.ssh
xkcdpass -n 4 -d - > "$out"/borgbackup.repokey
'';
};
clan.core.backups.providers.borgbackup = {
list = "borgbackup-list";
create = "borgbackup-create";
restore = "borgbackup-restore";
};
};
}

View File

@@ -0,0 +1,36 @@
{
pkgs,
...
}:
{
config = {
warnings = [
''
The clan.disk-id module is deprecated and will be removed on 2025-07-15.
For migration see: https://docs.clan.lol/guides/migrations/disk-id/
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!!! Please migrate. Otherwise you may not be able to boot your system after that date. !!!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
''
];
clan.core.vars.generators.disk-id = {
files.diskId.secret = false;
runtimeInputs = [
pkgs.coreutils
pkgs.bash
];
script = ''
uuid=$(bash ${./uuid4.sh})
# Remove the hyphens from the UUID
uuid_no_hyphens=$(echo -n "$uuid" | tr -d '-')
echo -n "$uuid_no_hyphens" > "$out/diskId"
'';
};
};
}

View File

@@ -2,61 +2,22 @@
let let
error = builtins.throw '' error = builtins.throw ''
clanModules have been removed!
############################################################################### Refer to https://docs.clan.lol/guides/migrations/migrate-inventory-services for migration.
# #
# Clan modules (clanModules) have been deprecated and removed in favor of #
# Clan services! #
# #
# Refer to https://docs.clan.lol/guides/migrations/migrate-inventory-services #
# for migration instructions. #
# #
###############################################################################
''; '';
modnames = [
"admin"
"borgbackup"
"borgbackup-static"
"deltachat"
"disk-id"
"dyndns"
"ergochat"
"garage"
"heisenbridge"
"iwd"
"localbackup"
"localsend"
"matrix-synapse"
"moonlight"
"mumble"
"nginx"
"packages"
"postgresql"
"root-password"
"single-disk"
"sshd"
"state-version"
"static-hosts"
"sunshine"
"syncthing"
"syncthing-static-peers"
"thelounge"
"trusted-nix-caches"
"user-password"
"vaultwarden"
"xfce"
"zerotier-static-peers"
"zt-tcp-relay"
];
in in
{ {
flake.clanModules = builtins.listToAttrs ( flake.clanModules = {
map (name: { outPath = "removed-clan-modules";
inherit name; value = error;
value = error; };
}) modnames
); # builtins.listToAttrs (
# map (name: {
# inherit name;
# value = error;
# }) modnames
# );
} }

View File

@@ -0,0 +1,27 @@
---
description = "Convenient, structured module imports for hosts."
categories = ["Utility"]
features = [ "inventory" ]
---
The importer module allows users to configure importing modules in a flexible and structured way.
It exposes the `extraModules` functionality of the inventory, without any added configuration.
## Usage
```nix
inventory.services = {
importer.base = {
roles.default.tags = [ "all" ];
roles.default.extraModules = [ "modules/base.nix" ];
};
importer.zone1 = {
roles.default.tags = [ "zone1" ];
roles.default.extraModules = [ "modules/zone1.nix" ];
};
};
```
This will import the module `modules/base.nix` to all machines that have the `all` tag,
which by default is every machine managed by the clan.
And also import for all machines tagged with `zone1` the module at `modules/zone1.nix`.

View File

@@ -0,0 +1,106 @@
{
config,
pkgs,
lib,
...
}:
let
stringSet = list: builtins.attrNames (builtins.groupBy lib.id list);
domains = stringSet config.clan.sshd.certificate.searchDomains;
cfg = config.clan.sshd;
in
{
imports = [ ../shared.nix ];
options = {
clan.sshd.hostKeys.rsa.enable = lib.mkEnableOption "Generate RSA host key";
};
config = {
warnings = [
"The clan.sshd module is deprecated and will be removed on 2025-07-15.
Please migrate to user-maintained configuration or the new equivalent clan services
(https://docs.clan.lol/reference/clanServices)."
];
services.openssh = {
enable = true;
settings.PasswordAuthentication = false;
settings.HostCertificate = lib.mkIf (
cfg.certificate.searchDomains != [ ]
) config.clan.core.vars.generators.openssh-cert.files."ssh.id_ed25519-cert.pub".path;
hostKeys =
[
{
path = config.clan.core.vars.generators.openssh.files."ssh.id_ed25519".path;
type = "ed25519";
}
]
++ lib.optional cfg.hostKeys.rsa.enable {
path = config.clan.core.vars.generators.openssh-rsa.files."ssh.id_rsa".path;
type = "rsa";
};
};
clan.core.vars.generators.openssh = {
files."ssh.id_ed25519" = { };
files."ssh.id_ed25519.pub".secret = false;
migrateFact = "openssh";
runtimeInputs = [
pkgs.coreutils
pkgs.openssh
];
script = ''
ssh-keygen -t ed25519 -N "" -C "" -f "$out"/ssh.id_ed25519
'';
};
programs.ssh.knownHosts.clan-sshd-self-ed25519 = {
hostNames = [
"localhost"
config.networking.hostName
] ++ (lib.optional (config.networking.domain != null) config.networking.fqdn);
publicKey = config.clan.core.vars.generators.openssh.files."ssh.id_ed25519.pub".value;
};
clan.core.vars.generators.openssh-rsa = lib.mkIf config.clan.sshd.hostKeys.rsa.enable {
files."ssh.id_rsa" = { };
files."ssh.id_rsa.pub".secret = false;
runtimeInputs = [
pkgs.coreutils
pkgs.openssh
];
script = ''
ssh-keygen -t rsa -b 4096 -N "" -C "" -f "$out"/ssh.id_rsa
'';
};
clan.core.vars.generators.openssh-cert = lib.mkIf (cfg.certificate.searchDomains != [ ]) {
files."ssh.id_ed25519-cert.pub".secret = false;
dependencies = [
"openssh"
"openssh-ca"
];
validation = {
name = config.clan.core.settings.machine.name;
domains = lib.genAttrs config.clan.sshd.certificate.searchDomains lib.id;
};
runtimeInputs = [
pkgs.openssh
pkgs.jq
];
script = ''
ssh-keygen \
-s $in/openssh-ca/id_ed25519 \
-I ${config.clan.core.settings.machine.name} \
-h \
-n ${lib.concatMapStringsSep "," (d: "${config.clan.core.settings.machine.name}.${d}") domains} \
$in/openssh/ssh.id_ed25519.pub
mv $in/openssh/ssh.id_ed25519-cert.pub "$out"/ssh.id_ed25519-cert.pub
'';
};
};
}

View File

@@ -0,0 +1,49 @@
{
config,
lib,
pkgs,
...
}:
{
options = {
clan.sshd.certificate = {
# TODO: allow per-server domains that we than collect in the inventory
#domains = lib.mkOption {
# type = lib.types.listOf lib.types.str;
# default = [ ];
# example = [ "git.mydomain.com" ];
# description = "List of domains to include in the certificate. This option will not prepend the machine name in front of each domain.";
#};
searchDomains = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ ];
example = [ "mydomain.com" ];
description = "List of domains to include in the certificate. This option will prepend the machine name in front of each domain before adding it to the certificate.";
};
};
};
config = {
clan.core.vars.generators.openssh-ca =
lib.mkIf (config.clan.sshd.certificate.searchDomains != [ ])
{
share = true;
files.id_ed25519.deploy = false;
files."id_ed25519.pub" = {
deploy = false;
secret = false;
};
runtimeInputs = [
pkgs.openssh
];
script = ''
ssh-keygen -t ed25519 -N "" -C "" -f "$out"/id_ed25519
'';
};
programs.ssh.knownHosts.ssh-ca = lib.mkIf (config.clan.sshd.certificate.searchDomains != [ ]) {
certAuthority = true;
extraHostNames = builtins.map (domain: "*.${domain}") config.clan.sshd.certificate.searchDomains;
publicKey = config.clan.core.vars.generators.openssh-ca.files."id_ed25519.pub".value;
};
};
}

View File

@@ -1,14 +1,15 @@
{ ... }:
{ {
_class = "clan.service"; _class = "clan.service";
manifest.name = "clan-core/admin"; manifest.name = "clan-core/admin";
manifest.description = "Adds a root user with ssh access"; manifest.description = "Convenient Administration for the Clan App";
manifest.categories = [ "Utility" ]; manifest.categories = [ "Utility" ];
roles.default = { roles.default = {
description = "Placeholder role to apply the admin service";
interface = interface =
{ lib, ... }: { lib, ... }:
{ {
options = { options = {
allowedKeys = lib.mkOption { allowedKeys = lib.mkOption {
default = { }; default = { };
@@ -40,13 +41,25 @@
}; };
}; };
}; };
};
# We don't have a good way to specify dependencies between perInstance =
# clanServices for now. When it get's implemtende, we should just { settings, ... }:
# use the ssh and users modules here. {
imports = [ nixosModule =
./ssh.nix { ... }:
./root-password.nix {
]; imports = [
# We don't have a good way to specify dependencies between
# clanServices for now. When it get's implemtende, we should just
# use the ssh and users modules here.
./ssh.nix
./root-password.nix
];
_module.args = { inherit settings; };
users.users.root.openssh.authorizedKeys.keys = builtins.attrValues settings.allowedKeys;
};
};
};
} }

View File

@@ -1,6 +1,6 @@
{ ... }: { lib, ... }:
let let
module = ./default.nix; module = lib.modules.importApply ./default.nix { };
in in
{ {
clan.modules = { clan.modules = {

View File

@@ -1,55 +1,39 @@
# We don't have a way of specifying dependencies between clanServices for now. # We don't have a way of specifying dependencies between clanServices for now.
# When it get's added this file should be removed and the users module used instead. # When it get's added this file should be removed and the users module used instead.
{ {
roles.default.perInstance = config,
{ ... }: pkgs,
{ ...
nixosModule = }:
{ {
config,
pkgs,
...
}:
{
users.mutableUsers = false; users.mutableUsers = false;
users.users.root.hashedPasswordFile = users.users.root.hashedPasswordFile =
config.clan.core.vars.generators.root-password.files.password-hash.path; config.clan.core.vars.generators.root-password.files.password-hash.path;
clan.core.vars.generators.root-password = { clan.core.vars.generators.root-password = {
files.password-hash.neededFor = "users"; files.password-hash.neededFor = "users";
files.password.deploy = false; files.password.deploy = false;
runtimeInputs = [ runtimeInputs = [
pkgs.coreutils pkgs.coreutils
pkgs.mkpasswd pkgs.mkpasswd
pkgs.xkcdpass pkgs.xkcdpass
]; ];
prompts.password.display = { prompts.password.type = "hidden";
group = "Root User"; prompts.password.persist = true;
label = "Password"; prompts.password.description = "You can autogenerate a password, if you leave this prompt blank.";
required = false;
helperText = ''
Your password will be encrypted and stored securely using the secret store you've configured.
'';
};
prompts.password.type = "hidden"; script = ''
prompts.password.persist = true; prompt_value="$(cat "$prompts"/password)"
prompts.password.description = "Leave empty to generate automatically"; if [[ -n "''${prompt_value-}" ]]; then
echo "$prompt_value" | tr -d "\n" > "$out"/password
script = '' else
prompt_value="$(cat "$prompts"/password)" xkcdpass --numwords 5 --delimiter - --count 1 | tr -d "\n" > "$out"/password
if [[ -n "''${prompt_value-}" ]]; then fi
echo "$prompt_value" | tr -d "\n" > "$out"/password mkpasswd -s -m sha-512 < "$out"/password | tr -d "\n" > "$out"/password-hash
else '';
xkcdpass --numwords 5 --delimiter - --count 1 | tr -d "\n" > "$out"/password };
fi
mkpasswd -s -m sha-512 < "$out"/password | tr -d "\n" > "$out"/password-hash
'';
};
};
};
} }

View File

@@ -1,124 +1,115 @@
{ {
roles.default.perInstance = config,
{ settings, ... }: pkgs,
{ lib,
nixosModule = settings,
...
}:
let
stringSet = list: builtins.attrNames (builtins.groupBy lib.id list);
domains = stringSet settings.certificateSearchDomains;
in
{
services.openssh = {
enable = true;
settings.PasswordAuthentication = false;
settings.HostCertificate = lib.mkIf (
settings.certificateSearchDomains != [ ]
) config.clan.core.vars.generators.openssh-cert.files."ssh.id_ed25519-cert.pub".path;
hostKeys =
[
{ {
config, path = config.clan.core.vars.generators.openssh.files."ssh.id_ed25519".path;
pkgs, type = "ed25519";
lib, }
... ]
}: ++ lib.optional settings.rsaHostKey.enable {
let path = config.clan.core.vars.generators.openssh-rsa.files."ssh.id_rsa".path;
stringSet = list: builtins.attrNames (builtins.groupBy lib.id list); type = "rsa";
};
};
domains = stringSet settings.certificateSearchDomains; clan.core.vars.generators.openssh = {
files."ssh.id_ed25519" = { };
files."ssh.id_ed25519.pub".secret = false;
migrateFact = "openssh";
runtimeInputs = [
pkgs.coreutils
pkgs.openssh
];
script = ''
ssh-keygen -t ed25519 -N "" -C "" -f "$out"/ssh.id_ed25519
'';
};
in programs.ssh.knownHosts.clan-sshd-self-ed25519 = {
{ hostNames = [
"localhost"
config.networking.hostName
] ++ (lib.optional (config.networking.domain != null) config.networking.fqdn);
publicKey = config.clan.core.vars.generators.openssh.files."ssh.id_ed25519.pub".value;
};
users.users.root.openssh.authorizedKeys.keys = builtins.attrValues settings.allowedKeys; clan.core.vars.generators.openssh-rsa = lib.mkIf settings.rsaHostKey.enable {
files."ssh.id_rsa" = { };
files."ssh.id_rsa.pub".secret = false;
runtimeInputs = [
pkgs.coreutils
pkgs.openssh
];
script = ''
ssh-keygen -t rsa -b 4096 -N "" -C "" -f "$out"/ssh.id_rsa
'';
};
services.openssh = { clan.core.vars.generators.openssh-cert = lib.mkIf (settings.certificateSearchDomains != [ ]) {
enable = true; files."ssh.id_ed25519-cert.pub".secret = false;
settings.PasswordAuthentication = false; dependencies = [
"openssh"
settings.HostCertificate = lib.mkIf ( "openssh-ca"
settings.certificateSearchDomains != [ ] ];
) config.clan.core.vars.generators.openssh-cert.files."ssh.id_ed25519-cert.pub".path; validation = {
name = config.clan.core.settings.machine.name;
hostKeys = [ domains = lib.genAttrs settings.certificateSearchDomains lib.id;
{
path = config.clan.core.vars.generators.openssh.files."ssh.id_ed25519".path;
type = "ed25519";
}
]
++ lib.optional settings.rsaHostKey.enable {
path = config.clan.core.vars.generators.openssh-rsa.files."ssh.id_rsa".path;
type = "rsa";
};
};
clan.core.vars.generators.openssh = {
files."ssh.id_ed25519" = { };
files."ssh.id_ed25519.pub".secret = false;
migrateFact = "openssh";
runtimeInputs = [
pkgs.coreutils
pkgs.openssh
];
script = ''
ssh-keygen -t ed25519 -N "" -C "" -f "$out"/ssh.id_ed25519
'';
};
programs.ssh.knownHosts.clan-sshd-self-ed25519 = {
hostNames = [
"localhost"
config.networking.hostName
]
++ (lib.optional (config.networking.domain != null) config.networking.fqdn);
publicKey = config.clan.core.vars.generators.openssh.files."ssh.id_ed25519.pub".value;
};
clan.core.vars.generators.openssh-rsa = lib.mkIf settings.rsaHostKey.enable {
files."ssh.id_rsa" = { };
files."ssh.id_rsa.pub".secret = false;
runtimeInputs = [
pkgs.coreutils
pkgs.openssh
];
script = ''
ssh-keygen -t rsa -b 4096 -N "" -C "" -f "$out"/ssh.id_rsa
'';
};
clan.core.vars.generators.openssh-cert = lib.mkIf (settings.certificateSearchDomains != [ ]) {
files."ssh.id_ed25519-cert.pub".secret = false;
dependencies = [
"openssh"
"openssh-ca"
];
validation = {
name = config.clan.core.settings.machine.name;
domains = lib.genAttrs settings.certificateSearchDomains lib.id;
};
runtimeInputs = [
pkgs.openssh
pkgs.jq
];
script = ''
ssh-keygen \
-s $in/openssh-ca/id_ed25519 \
-I ${config.clan.core.settings.machine.name} \
-h \
-n ${lib.concatMapStringsSep "," (d: "${config.clan.core.settings.machine.name}.${d}") domains} \
$in/openssh/ssh.id_ed25519.pub
mv $in/openssh/ssh.id_ed25519-cert.pub "$out"/ssh.id_ed25519-cert.pub
'';
};
clan.core.vars.generators.openssh-ca = lib.mkIf (settings.certificateSearchDomains != [ ]) {
share = true;
files.id_ed25519.deploy = false;
files."id_ed25519.pub" = {
deploy = false;
secret = false;
};
runtimeInputs = [
pkgs.openssh
];
script = ''
ssh-keygen -t ed25519 -N "" -C "" -f "$out"/id_ed25519
'';
};
programs.ssh.knownHosts.ssh-ca = lib.mkIf (settings.certificateSearchDomains != [ ]) {
certAuthority = true;
extraHostNames = builtins.map (domain: "*.${domain}") settings.certificateSearchDomains;
publicKey = config.clan.core.vars.generators.openssh-ca.files."id_ed25519.pub".value;
};
};
}; };
runtimeInputs = [
pkgs.openssh
pkgs.jq
];
script = ''
ssh-keygen \
-s $in/openssh-ca/id_ed25519 \
-I ${config.clan.core.settings.machine.name} \
-h \
-n ${lib.concatMapStringsSep "," (d: "${config.clan.core.settings.machine.name}.${d}") domains} \
$in/openssh/ssh.id_ed25519.pub
mv $in/openssh/ssh.id_ed25519-cert.pub "$out"/ssh.id_ed25519-cert.pub
'';
};
clan.core.vars.generators.openssh-ca = lib.mkIf (settings.certificateSearchDomains != [ ]) {
share = true;
files.id_ed25519.deploy = false;
files."id_ed25519.pub" = {
deploy = false;
secret = false;
};
runtimeInputs = [
pkgs.openssh
];
script = ''
ssh-keygen -t ed25519 -N "" -C "" -f "$out"/id_ed25519
'';
};
programs.ssh.knownHosts.ssh-ca = lib.mkIf (settings.certificateSearchDomains != [ ]) {
certAuthority = true;
extraHostNames = builtins.map (domain: "*.${domain}") settings.certificateSearchDomains;
publicKey = config.clan.core.vars.generators.openssh-ca.files."id_ed25519.pub".value;
};
} }

View File

@@ -1,59 +1,9 @@
## Usage BorgBackup (short: Borg) gives you:
```nix - Space efficient storage of backups.
inventory.instances = { - Secure, authenticated encryption.
borgbackup = { - Compression: lz4, zstd, zlib, lzma or none.
module = { - Mountable backups with FUSE.
name = "borgbackup";
input = "clan-core";
};
roles.client.machines."jon".settings = {
destinations."storagebox" = {
repo = "username@hostname:/./borgbackup";
rsh = ''ssh -oPort=23 -i /run/secrets/vars/borgbackup/borgbackup.ssh'';
};
};
roles.server.machines = { };
};
};
```
The input should be named according to your flake input. Jon is configured as a
client machine with a destination pointing to a Hetzner Storage Box.
## Overview
This guide explains how to set up and manage
[BorgBackup](https://borgbackup.readthedocs.io/) for secure, efficient backups
in a clan network. BorgBackup provides:
- Space efficient storage of backups with deduplication
- Secure, authenticated encryption
- Compression: lz4, zstd, zlib, lzma or none
- Mountable backups with FUSE
- Easy installation on multiple platforms: Linux, macOS, BSD, … - Easy installation on multiple platforms: Linux, macOS, BSD, …
- Free software (BSD license). - Free software (BSD license).
- Backed by a large and active open-source community. - Backed by a large and active open-source community.
## Roles
### 1. Client
Clients are machines that create and send backups to various destinations. Each
client can have multiple backup destinations configured.
### 2. Server
Servers act as backup repositories, receiving and storing backups from client
machines. They can be dedicated backup servers within your clan network.
## Backup destinations
This service allows you to perform backups to multiple `destinations`.
Destinations can be:
- **Local**: Local disk storage
- **Server**: Your own borgbackup server (using the `server` role)
- **Third-party services**: Such as Hetzner's Storage Box
For a more comprehensive guide on backups look into the guide section.

View File

@@ -9,7 +9,7 @@
# TODO: a client can only be in one instance, add constraint # TODO: a client can only be in one instance, add constraint
roles.server = { roles.server = {
description = "A borgbackup server that stores the backups of clients.";
interface = interface =
{ lib, ... }: { lib, ... }:
{ {
@@ -54,7 +54,7 @@
authorizedKeys = [ (builtins.readFile (borgbackupIpMachinePath machineName)) ]; authorizedKeys = [ (builtins.readFile (borgbackupIpMachinePath machineName)) ];
# }; # };
# }) machinesWithKey; # }) machinesWithKey;
}) (roles.client.machines or { }); }) roles.client.machines;
in in
hosts; hosts;
}; };
@@ -62,7 +62,6 @@
}; };
roles.client = { roles.client = {
description = "A borgbackup client that backs up to all borgbackup server roles.";
interface = interface =
{ {
lib, lib,
@@ -188,7 +187,7 @@
config.clan.core.vars.generators.borgbackup.files."borgbackup.ssh".path config.clan.core.vars.generators.borgbackup.files."borgbackup.ssh".path
} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=Yes"; } -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=Yes";
}; };
}) (builtins.attrNames (roles.server.machines or { })); }) (builtins.attrNames roles.server.machines);
in in
(builtins.listToAttrs destinations); (builtins.listToAttrs destinations);

View File

@@ -1,6 +1,6 @@
{ ... }: { lib, ... }:
let let
module = ./default.nix; module = lib.modules.importApply ./default.nix { };
in in
{ {
clan.modules = { clan.modules = {

View File

@@ -1,32 +0,0 @@
This service sets up a certificate authority (CA) that can issue certificates to
other machines in your clan. For this the `ca` role is used.
It additionally provides a `default` role, that can be applied to all machines
in your clan and will make sure they trust your CA.
## Example Usage
The following configuration would add a CA for the top level domain `.foo`. If
the machine `server` now hosts a webservice at `https://something.foo`, it will
get a certificate from `ca` which is valid inside your clan. The machine
`client` will trust this certificate if it makes a request to
`https://something.foo`.
This clan service can be combined with the `coredns` service for easy to deploy,
SSL secured clan-internal service hosting.
```nix
inventory = {
machines.ca = { };
machines.client = { };
machines.server = { };
instances."certificates" = {
module.name = "certificates";
module.input = "self";
roles.ca.machines.ca.settings.tlds = [ "foo" ];
roles.default.machines.client = { };
roles.default.machines.server = { };
};
};
```

View File

@@ -1,246 +0,0 @@
{ ... }:
{
_class = "clan.service";
manifest.name = "certificates";
manifest.description = "Sets up a PKI certificate chain using step-ca";
manifest.categories = [ "Network" ];
manifest.readme = builtins.readFile ./README.md;
roles.ca = {
description = "A certificate authority that issues and signs certificates for other machines.";
interface =
{ lib, ... }:
{
options.acmeEmail = lib.mkOption {
type = lib.types.str;
default = "none@none.tld";
description = ''
Email address for account creation and correspondence from the CA.
It is recommended to use the same email for all certs to avoid account
creation limits.
'';
};
options.tlds = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = "Top level domain for this CA. Certificates will be issued and trusted for *.<tld>";
};
options.expire = lib.mkOption {
type = lib.types.nullOr lib.types.str;
description = "When the certificate should expire.";
default = "8760h";
example = "8760h";
};
};
perInstance =
{ settings, ... }:
{
nixosModule =
{
config,
pkgs,
lib,
...
}:
let
domains = map (tld: "ca.${tld}") settings.tlds;
in
{
security.acme.defaults.email = settings.acmeEmail;
security.acme = {
certs = builtins.listToAttrs (
map (domain: {
name = domain;
value = {
server = "https://${domain}:1443/acme/acme/directory";
};
}) domains
);
};
networking.firewall.allowedTCPPorts = [
80
443
];
services.nginx = {
enable = true;
recommendedProxySettings = true;
virtualHosts = builtins.listToAttrs (
map (domain: {
name = domain;
value = {
addSSL = true;
enableACME = true;
locations."/".proxyPass = "https://localhost:1443";
locations."= /ca.crt".alias =
config.clan.core.vars.generators.step-intermediate-cert.files."intermediate.crt".path;
};
}) domains
);
};
clan.core.vars.generators = {
# Intermediate key generator
"step-intermediate-key" = {
files."intermediate.key" = {
secret = true;
deploy = true;
owner = "step-ca";
group = "step-ca";
};
runtimeInputs = [ pkgs.step-cli ];
script = ''
step crypto keypair --kty EC --curve P-256 --no-password --insecure $out/intermediate.pub $out/intermediate.key
'';
};
# Intermediate certificate generator
"step-intermediate-cert" = {
files."intermediate.crt".secret = false;
dependencies = [
"step-ca"
"step-intermediate-key"
];
runtimeInputs = [ pkgs.step-cli ];
script = ''
# Create intermediate certificate
step certificate create \
--ca $in/step-ca/ca.crt \
--ca-key $in/step-ca/ca.key \
--ca-password-file /dev/null \
--key $in/step-intermediate-key/intermediate.key \
--template ${pkgs.writeText "intermediate.tmpl" ''
{
"subject": {{ toJson .Subject }},
"keyUsage": ["certSign", "crlSign"],
"basicConstraints": {
"isCA": true,
"maxPathLen": 0
},
"nameConstraints": {
"critical": true,
"permittedDNSDomains": [${
(lib.strings.concatStringsSep "," (map (tld: ''"${tld}"'') settings.tlds))
}]
}
}
''} ${lib.optionalString (settings.expire != null) "--not-after ${settings.expire}"} \
--not-before=-12h \
--no-password --insecure \
"Clan Intermediate CA" \
$out/intermediate.crt
'';
};
};
services.step-ca = {
enable = true;
intermediatePasswordFile = "/dev/null";
address = "0.0.0.0";
port = 1443;
settings = {
root = config.clan.core.vars.generators.step-ca.files."ca.crt".path;
crt = config.clan.core.vars.generators.step-intermediate-cert.files."intermediate.crt".path;
key = config.clan.core.vars.generators.step-intermediate-key.files."intermediate.key".path;
dnsNames = domains;
logger.format = "text";
db = {
type = "badger";
dataSource = "/var/lib/step-ca/db";
};
authority = {
provisioners = [
{
type = "ACME";
name = "acme";
forceCN = true;
}
];
claims = {
maxTLSCertDuration = "2160h";
defaultTLSCertDuration = "2160h";
};
backdate = "1m0s";
};
tls = {
cipherSuites = [
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256"
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"
];
minVersion = 1.2;
maxVersion = 1.3;
renegotiation = false;
};
};
};
};
};
};
# Empty role, so we can add non-ca machins to the instance to trust the CA
roles.default = {
description = "A machine that trusts the CA and can get certificates issued by it.";
interface =
{ lib, ... }:
{
options.acmeEmail = lib.mkOption {
type = lib.types.str;
default = "none@none.tld";
description = ''
Email address for account creation and correspondence from the CA.
It is recommended to use the same email for all certs to avoid account
creation limits.
'';
};
};
perInstance =
{ settings, ... }:
{
nixosModule.security.acme.defaults.email = settings.acmeEmail;
};
};
# All machines (independent of role) will trust the CA
perMachine.nixosModule =
{ pkgs, config, ... }:
{
# Root CA generator
clan.core.vars.generators = {
"step-ca" = {
share = true;
files."ca.key" = {
secret = true;
deploy = false;
};
files."ca.crt".secret = false;
runtimeInputs = [ pkgs.step-cli ];
script = ''
step certificate create --template ${pkgs.writeText "root.tmpl" ''
{
"subject": {{ toJson .Subject }},
"issuer": {{ toJson .Subject }},
"keyUsage": ["certSign", "crlSign"],
"basicConstraints": {
"isCA": true,
"maxPathLen": 1
}
}
''} "Clan Root CA" $out/ca.crt $out/ca.key \
--kty EC --curve P-256 \
--not-after=8760h \
--not-before=-12h \
--no-password --insecure
'';
};
};
security.pki.certificateFiles = [ config.clan.core.vars.generators."step-ca".files."ca.crt".path ];
environment.systemPackages = [ pkgs.openssl ];
security.acme.acceptTerms = true;
};
}

View File

@@ -1,17 +0,0 @@
{
...
}:
let
module = ./default.nix;
in
{
clan.modules.certificates = module;
perSystem =
{ ... }:
{
clan.nixosTests.certificates = {
imports = [ ./tests/vm/default.nix ];
clan.modules.certificates = module;
};
};
}

View File

@@ -1,84 +0,0 @@
{
name = "certificates";
clan = {
directory = ./.;
inventory = {
machines.ca = { }; # 192.168.1.1
machines.client = { }; # 192.168.1.2
machines.server = { }; # 192.168.1.3
instances."certificates" = {
module.name = "certificates";
module.input = "self";
roles.ca.machines.ca.settings.tlds = [ "foo" ];
roles.default.machines.client = { };
roles.default.machines.server = { };
};
};
};
nodes =
let
hostConfig = ''
192.168.1.1 ca.foo
192.168.1.3 test.foo
'';
in
{
client.networking.extraHosts = hostConfig;
ca.networking.extraHosts = hostConfig;
server = {
networking.extraHosts = hostConfig;
# TODO: Could this be set automatically?
# I would like to get this information from the coredns module, but we
# cannot model dependencies yet
security.acme.certs."test.foo".server = "https://ca.foo/acme/acme/directory";
# Host a simple service on 'server', with SSL provided via our CA. 'client'
# should be able to curl it via https and accept the certificates
# presented
networking.firewall.allowedTCPPorts = [
80
443
];
services.nginx = {
enable = true;
virtualHosts."test.foo" = {
enableACME = true;
forceSSL = true;
locations."/" = {
return = "200 'test server response'";
extraConfig = "add_header Content-Type text/plain;";
};
};
};
};
};
testScript = ''
start_all()
import time
time.sleep(3)
ca.succeed("systemctl restart acme-order-renew-ca.foo.service ")
time.sleep(3)
server.succeed("systemctl restart acme-test.foo.service")
# It takes a while for the correct certs to appear (before that self-signed
# are presented by nginx) so we wait for a bit.
client.wait_until_succeeds("curl -v https://test.foo")
# Show certificate information for debugging
client.succeed("openssl s_client -connect test.foo:443 -servername test.foo </dev/null 2>/dev/null | openssl x509 -text -noout 1>&2")
'';
}

View File

@@ -1,6 +0,0 @@
[
{
"publickey": "age1yd2cden7jav8x4nzx2fwze2fsa5j0qm2m3t7zum765z3u4gj433q7dqj43",
"type": "age"
}
]

View File

@@ -1,6 +0,0 @@
[
{
"publickey": "age1js225d8jc507sgcg0fdfv2x3xv3asm4ds5c6s4hp37nq8spxu95sc5x3ce",
"type": "age"
}
]

View File

@@ -1,6 +0,0 @@
[
{
"publickey": "age1nwuh8lc604mnz5r8ku8zswyswnwv02excw237c0cmtlejp7xfp8sdrcwfa",
"type": "age"
}
]

View File

@@ -1,15 +0,0 @@
{
"data": "ENC[AES256_GCM,data:6+XilULKRuWtAZ6B8Lj9UqCfi1T6dmqrDqBNXqS4SvBwM1bIWiL6juaT1Q7ByOexzID7tY740gmQBqTey54uLydh8mW0m4ZtUqw=,iv:9kscsrMPBGkutTnxrc5nrc7tQXpzLxw+929pUDKqTu0=,tag:753uIjm8ZRs0xsjiejEY8g==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA1d3kycldZRXhmR0FqTXJp\nWWU0MDBYNmxxbFE5M2xKYm5KWnQ0MXBHNEM4CjN4RFFVcFlkd3pjTFVDQ3Vackdj\nVTVhMWoxdFpsWHp5S1p4L05kYk5LUkkKLS0tIENtZFZZTjY2amFVQmZLZFplQzBC\nZm1vWFI4MXR1ZHIxTTQ5VXdSYUhvOTQKte0bKjXQ0xA8FrpuChjDUvjVqp97D8kT\n3tVh6scdjxW48VSBZP1GRmqcMqCdj75GvJTbWeNEV4PDBW7GI0UW+Q==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-09-02T08:42:39Z",
"mac": "ENC[AES256_GCM,data:AftMorrH7qX5ctVu5evYHn5h9pC4Mmm2VYaAV8Hy0PKTc777jNsL6DrxFVV3NVqtecpwrzZFWKgzukcdcRJe4veVeBrusmoZYtifH0AWZTEVpVlr2UXYYxCDmNZt1WHfVUo40bT//X6QM0ye6a/2Y1jYPbMbryQNcGmnpk9PDvU=,iv:5nk+d8hzA05LQp7ZHRbIgiENg2Ha6J6YzyducM6zcNU=,tag:dy1hqWVzMu/+fSK57h9ZCA==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../users/admin

View File

@@ -1,15 +0,0 @@
{
"data": "ENC[AES256_GCM,data:jdTuGQUYvT1yXei1RHKsOCsABmMlkcLuziHDVhA7NequZeNu0fSbrJTXQDCHsDGhlYRcjU5EsEDT750xdleXuD3Gs9zWvPVobI4=,iv:YVow3K1j6fzRF9bRfIEpuOkO/nRpku/UQxWNGC+UJQQ=,tag:cNLM5R7uu6QpwPB9K6MYzg==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBvOVF2WXRSL0NpQzFZR01I\nNU85TGcyQmVDazN1dmpuRFVTZEg5NDRKTGhrCk1IVjFSU1V6WHBVRnFWcHkyVERr\nTjFKbW1mQ2FWOWhjN2VPamMxVEQ5VkkKLS0tIENVUGlhanhuWGtDKzBzRmk2dE4v\nMXZBRXNMa3IrOTZTNHRUWVE3UXEwSWMK2cBLoL/H/Vxd/klVrqVLdX9Mww5j7gw/\nEWc5/hN+km6XoW+DiJxVG4qaJ7qqld6u5ZnKgJT+2h9CfjA04I2akg==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-09-02T08:42:51Z",
"mac": "ENC[AES256_GCM,data:zOBQVM2Ydu4v0+Fw3p3cEU+5+7eKaadV0tKro1JVOxclG1Vs6Myq57nw2eWf5JxIl0ulL+FavPKY26qOQ3aqcGOT3PMRlCda9z+0oSn9Im9bE/DzAGmoH/bp76kFkgTTOCZTMUoqJ+UJqv0qy1BH/92sSSKmYshEX6d1vr5ISrw=,iv:i9ZW4sLxOCan4UokHlySVr1CW39nCTusG4DmEPj/gIw=,tag:iZBDPHDkE3Vt5mFcFu1TPQ==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

Some files were not shown because too many files have changed in this diff Show More