Compare commits
1 Commits
serve-json
...
remove-mod
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
61f238210f |
@@ -1,20 +0,0 @@
|
||||
name: Build Clan App (Darwin)
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Run every 4 hours
|
||||
- cron: "0 */4 * * *"
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
build-clan-app-darwin:
|
||||
runs-on: nix
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Build clan-app for x86_64-darwin
|
||||
run: |
|
||||
nix build .#packages.x86_64-darwin.clan-app --system x86_64-darwin --log-format bar-with-logs
|
||||
9
.gitea/workflows/checks.yaml
Normal file
9
.gitea/workflows/checks.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
name: checks
|
||||
on:
|
||||
pull_request:
|
||||
jobs:
|
||||
checks-impure:
|
||||
runs-on: nix
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- run: nix run .#impure-checks
|
||||
@@ -1,7 +1,6 @@
|
||||
#!/bin/sh
|
||||
|
||||
#!/usr/bin/env bash
|
||||
# Shared script for creating pull requests in Gitea workflows
|
||||
set -eu
|
||||
set -euo pipefail
|
||||
|
||||
# Required environment variables:
|
||||
# - CI_BOT_TOKEN: Gitea bot token for authentication
|
||||
@@ -9,22 +8,22 @@ set -eu
|
||||
# - PR_TITLE: Title of the pull request
|
||||
# - PR_BODY: Body/description of the pull request
|
||||
|
||||
if [ -z "${CI_BOT_TOKEN:-}" ]; then
|
||||
if [[ -z "${CI_BOT_TOKEN:-}" ]]; then
|
||||
echo "Error: CI_BOT_TOKEN is not set" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "${PR_BRANCH:-}" ]; then
|
||||
if [[ -z "${PR_BRANCH:-}" ]]; then
|
||||
echo "Error: PR_BRANCH is not set" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "${PR_TITLE:-}" ]; then
|
||||
if [[ -z "${PR_TITLE:-}" ]]; then
|
||||
echo "Error: PR_TITLE is not set" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "${PR_BODY:-}" ]; then
|
||||
if [[ -z "${PR_BODY:-}" ]]; then
|
||||
echo "Error: PR_BODY is not set" >&2
|
||||
exit 1
|
||||
fi
|
||||
@@ -44,12 +43,9 @@ resp=$(nix run --inputs-from . nixpkgs#curl -- -X POST \
|
||||
}" \
|
||||
"https://git.clan.lol/api/v1/repos/clan/clan-core/pulls")
|
||||
|
||||
if ! pr_number=$(echo "$resp" | jq -r '.number'); then
|
||||
echo "Error parsing response from pull request creation" >&2
|
||||
exit 1
|
||||
fi
|
||||
pr_number=$(echo "$resp" | jq -r '.number')
|
||||
|
||||
if [ "$pr_number" = "null" ]; then
|
||||
if [[ "$pr_number" == "null" ]]; then
|
||||
echo "Error creating pull request:" >&2
|
||||
echo "$resp" | jq . >&2
|
||||
exit 1
|
||||
@@ -68,15 +64,12 @@ while true; do
|
||||
"delete_branch_after_merge": true
|
||||
}' \
|
||||
"https://git.clan.lol/api/v1/repos/clan/clan-core/pulls/$pr_number/merge")
|
||||
if ! msg=$(echo "$resp" | jq -r '.message'); then
|
||||
echo "Error parsing merge response" >&2
|
||||
exit 1
|
||||
fi
|
||||
if [ "$msg" != "Please try again later" ]; then
|
||||
msg=$(echo "$resp" | jq -r '.message')
|
||||
if [[ "$msg" != "Please try again later" ]]; then
|
||||
break
|
||||
fi
|
||||
echo "Retrying in 2 seconds..."
|
||||
sleep 2
|
||||
done
|
||||
|
||||
echo "Pull request #$pr_number merge initiated"
|
||||
echo "Pull request #$pr_number merge initiated"
|
||||
28
.gitea/workflows/update-clan-core-for-checks.yml
Normal file
28
.gitea/workflows/update-clan-core-for-checks.yml
Normal file
@@ -0,0 +1,28 @@
|
||||
name: "Update pinned clan-core for checks"
|
||||
on:
|
||||
repository_dispatch:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: "51 2 * * *"
|
||||
jobs:
|
||||
update-pinned-clan-core:
|
||||
runs-on: nix
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
- name: Update clan-core for checks
|
||||
run: nix run .#update-clan-core-for-checks
|
||||
- name: Create pull request
|
||||
env:
|
||||
CI_BOT_TOKEN: ${{ secrets.CI_BOT_TOKEN }}
|
||||
run: |
|
||||
export GIT_AUTHOR_NAME=clan-bot GIT_AUTHOR_EMAIL=clan-bot@clan.lol GIT_COMMITTER_NAME=clan-bot GIT_COMMITTER_EMAIL=clan-bot@clan.lol
|
||||
git commit -am "Update pinned clan-core for checks"
|
||||
|
||||
# Use shared PR creation script
|
||||
export PR_BRANCH="update-clan-core-for-checks"
|
||||
export PR_TITLE="Update Clan Core for Checks"
|
||||
export PR_BODY="This PR updates the pinned clan-core flake input that is used for checks."
|
||||
|
||||
./.gitea/workflows/create-pr.sh
|
||||
@@ -19,11 +19,8 @@ jobs:
|
||||
uses: Mic92/update-flake-inputs-gitea@main
|
||||
with:
|
||||
# Exclude private flakes and update-clan-core checks flake
|
||||
exclude-patterns: "checks/impure/flake.nix"
|
||||
|
||||
exclude-patterns: "devFlake/private/flake.nix,checks/impure/flake.nix"
|
||||
auto-merge: true
|
||||
git-author-name: "clan-bot"
|
||||
git-committer-name: "clan-bot"
|
||||
git-author-email: "clan-bot@clan.lol"
|
||||
git-committer-email: "clan-bot@clan.lol"
|
||||
gitea-token: ${{ secrets.CI_BOT_TOKEN }}
|
||||
github-token: ${{ secrets.CI_BOT_GITHUB_TOKEN }}
|
||||
|
||||
40
.gitea/workflows/update-private-flake-inputs.yml
Normal file
40
.gitea/workflows/update-private-flake-inputs.yml
Normal file
@@ -0,0 +1,40 @@
|
||||
name: "Update private flake inputs"
|
||||
on:
|
||||
repository_dispatch:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: "0 3 * * *" # Run daily at 3 AM
|
||||
jobs:
|
||||
update-private-flake:
|
||||
runs-on: nix
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
- name: Update private flake inputs
|
||||
run: |
|
||||
# Update the private flake lock file
|
||||
cd devFlake/private
|
||||
nix flake update
|
||||
cd ../..
|
||||
|
||||
# Update the narHash
|
||||
bash ./devFlake/update-private-narhash
|
||||
- name: Create pull request
|
||||
env:
|
||||
CI_BOT_TOKEN: ${{ secrets.CI_BOT_TOKEN }}
|
||||
run: |
|
||||
export GIT_AUTHOR_NAME=clan-bot GIT_AUTHOR_EMAIL=clan-bot@clan.lol GIT_COMMITTER_NAME=clan-bot GIT_COMMITTER_EMAIL=clan-bot@clan.lol
|
||||
|
||||
# Check if there are any changes
|
||||
if ! git diff --quiet; then
|
||||
git add devFlake/private/flake.lock devFlake/private.narHash
|
||||
git commit -m "Update dev flake"
|
||||
|
||||
# Use shared PR creation script
|
||||
export PR_BRANCH="update-dev-flake"
|
||||
export PR_TITLE="Update dev flake"
|
||||
export PR_BODY="This PR updates the dev flake inputs and corresponding narHash."
|
||||
else
|
||||
echo "No changes detected in dev flake inputs"
|
||||
fi
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -39,6 +39,7 @@ select
|
||||
# Generated files
|
||||
pkgs/clan-app/ui/api/API.json
|
||||
pkgs/clan-app/ui/api/API.ts
|
||||
pkgs/clan-app/ui/api/Inventory.ts
|
||||
pkgs/clan-app/ui/api/modules_schemas.json
|
||||
pkgs/clan-app/ui/api/schema.json
|
||||
pkgs/clan-app/ui/.fonts
|
||||
|
||||
22
CODEOWNERS
22
CODEOWNERS
@@ -1,20 +1,2 @@
|
||||
clanServices/.* @pinpox @kenji
|
||||
|
||||
lib/test/container-test-driver/.* @DavHau @mic92
|
||||
lib/modules/inventory/.* @hsjobeki
|
||||
lib/modules/inventoryClass/.* @hsjobeki
|
||||
|
||||
pkgs/clan-app/ui/.* @hsjobeki @brianmcgee
|
||||
pkgs/clan-app/clan_app/.* @qubasa @hsjobeki
|
||||
|
||||
pkgs/clan-cli/clan_cli/.* @lassulus @mic92 @kenji
|
||||
pkgs/clan-cli/clan_cli/(secrets|vars)/.* @DavHau @lassulus
|
||||
|
||||
pkgs/clan-cli/clan_lib/log_machines/.* @Qubasa
|
||||
pkgs/clan-cli/clan_lib/ssh/.* @Qubasa @Mic92 @lassulus
|
||||
pkgs/clan-cli/clan_lib/tags/.* @hsjobeki
|
||||
pkgs/clan-cli/clan_lib/persist/.* @hsjobeki
|
||||
pkgs/clan-cli/clan_lib/flake/.* @lassulus
|
||||
|
||||
pkgs/clan-cli/api.py @hsjobeki
|
||||
pkgs/clan-cli/openapi.py @hsjobeki
|
||||
nixosModules/clanCore/vars/.* @lopter
|
||||
pkgs/clan-cli/clan_cli/(secrets|vars)/.* @lopter
|
||||
|
||||
@@ -24,7 +24,7 @@ If you're new to Clan and eager to dive in, start with our quickstart guide and
|
||||
|
||||
In the Clan ecosystem, security is paramount. Learn how to handle secrets effectively:
|
||||
|
||||
- **Secrets Management**: Securely manage secrets by consulting [Vars](https://docs.clan.lol/concepts/generators/)<!-- [secrets.md](docs/site/concepts/generators.md) -->.
|
||||
- **Secrets Management**: Securely manage secrets by consulting [secrets](https://docs.clan.lol/guides/getting-started/secrets/)<!-- [secrets.md](docs/site/guides/getting-started/secrets.md) -->.
|
||||
|
||||
### Contributing to Clan
|
||||
|
||||
|
||||
51
checks/borgbackup-legacy/default.nix
Normal file
51
checks/borgbackup-legacy/default.nix
Normal file
@@ -0,0 +1,51 @@
|
||||
(
|
||||
{ ... }:
|
||||
{
|
||||
name = "borgbackup";
|
||||
|
||||
nodes.machine =
|
||||
{ self, pkgs, ... }:
|
||||
{
|
||||
imports = [
|
||||
self.clanModules.borgbackup
|
||||
self.nixosModules.clanCore
|
||||
{
|
||||
services.openssh.enable = true;
|
||||
services.borgbackup.repos.testrepo = {
|
||||
authorizedKeys = [ (builtins.readFile ../assets/ssh/pubkey) ];
|
||||
};
|
||||
}
|
||||
{
|
||||
clan.core.settings.directory = ./.;
|
||||
clan.core.state.testState.folders = [ "/etc/state" ];
|
||||
environment.etc.state.text = "hello world";
|
||||
systemd.tmpfiles.settings."vmsecrets" = {
|
||||
"/etc/secrets/borgbackup/borgbackup.ssh" = {
|
||||
C.argument = "${../assets/ssh/privkey}";
|
||||
z = {
|
||||
mode = "0400";
|
||||
user = "root";
|
||||
};
|
||||
};
|
||||
"/etc/secrets/borgbackup/borgbackup.repokey" = {
|
||||
C.argument = builtins.toString (pkgs.writeText "repokey" "repokey12345");
|
||||
z = {
|
||||
mode = "0400";
|
||||
user = "root";
|
||||
};
|
||||
};
|
||||
};
|
||||
# clan.core.facts.secretStore = "vm";
|
||||
clan.core.vars.settings.secretStore = "vm";
|
||||
|
||||
clan.borgbackup.destinations.test.repo = "borg@localhost:.";
|
||||
}
|
||||
];
|
||||
};
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine.systemctl("start --wait borgbackup-job-test.service")
|
||||
assert "machine-test" in machine.succeed("BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK=yes /run/current-system/sw/bin/borg-job-test list")
|
||||
'';
|
||||
}
|
||||
)
|
||||
@@ -1,6 +1,6 @@
|
||||
{ fetchgit }:
|
||||
fetchgit {
|
||||
url = "https://git.clan.lol/clan/clan-core.git";
|
||||
rev = "5d884cecc2585a29b6a3596681839d081b4de192";
|
||||
sha256 = "09is1afmncamavb2q88qac37vmsijxzsy1iz1vr6gsyjq2rixaxc";
|
||||
rev = "eea93ea22c9818da67e148ba586277bab9e73cea";
|
||||
sha256 = "sha256-PV0Z+97QuxQbkYSVuNIJwUNXMbHZG/vhsA9M4cDTCOE=";
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
self,
|
||||
lib,
|
||||
inputs,
|
||||
privateInputs ? { },
|
||||
...
|
||||
}:
|
||||
let
|
||||
@@ -20,28 +19,18 @@ let
|
||||
nixosLib = import (self.inputs.nixpkgs + "/nixos/lib") { };
|
||||
in
|
||||
{
|
||||
imports =
|
||||
let
|
||||
clanCoreModulesDir = ../nixosModules/clanCore;
|
||||
getClanCoreTestModules =
|
||||
let
|
||||
moduleNames = attrNames (builtins.readDir clanCoreModulesDir);
|
||||
testPaths = map (
|
||||
moduleName: clanCoreModulesDir + "/${moduleName}/tests/flake-module.nix"
|
||||
) moduleNames;
|
||||
in
|
||||
filter pathExists testPaths;
|
||||
in
|
||||
getClanCoreTestModules
|
||||
++ filter pathExists [
|
||||
./devshell/flake-module.nix
|
||||
./flash/flake-module.nix
|
||||
./installation/flake-module.nix
|
||||
./update/flake-module.nix
|
||||
./morph/flake-module.nix
|
||||
./nixos-documentation/flake-module.nix
|
||||
./dont-depend-on-repo-root.nix
|
||||
];
|
||||
imports = filter pathExists [
|
||||
./backups/flake-module.nix
|
||||
../nixosModules/clanCore/machine-id/tests/flake-module.nix
|
||||
../nixosModules/clanCore/state-version/tests/flake-module.nix
|
||||
./devshell/flake-module.nix
|
||||
./flash/flake-module.nix
|
||||
./impure/flake-module.nix
|
||||
./installation/flake-module.nix
|
||||
./morph/flake-module.nix
|
||||
./nixos-documentation/flake-module.nix
|
||||
./dont-depend-on-repo-root.nix
|
||||
];
|
||||
flake.check = genAttrs [ "x86_64-linux" "aarch64-darwin" ] (
|
||||
system:
|
||||
let
|
||||
@@ -96,11 +85,13 @@ in
|
||||
|
||||
# Container Tests
|
||||
nixos-test-container = self.clanLib.test.containerTest ./container nixosTestArgs;
|
||||
# nixos-test-zt-tcp-relay = self.clanLib.test.containerTest ./zt-tcp-relay nixosTestArgs;
|
||||
# nixos-test-matrix-synapse = self.clanLib.test.containerTest ./matrix-synapse nixosTestArgs;
|
||||
# nixos-test-postgresql = self.clanLib.test.containerTest ./postgresql nixosTestArgs;
|
||||
nixos-test-user-firewall-iptables = self.clanLib.test.containerTest ./user-firewall/iptables.nix nixosTestArgs;
|
||||
nixos-test-user-firewall-nftables = self.clanLib.test.containerTest ./user-firewall/nftables.nix nixosTestArgs;
|
||||
|
||||
service-dummy-test = import ./service-dummy-test nixosTestArgs;
|
||||
wireguard = import ./wireguard nixosTestArgs;
|
||||
service-dummy-test-from-flake = import ./service-dummy-test-from-flake nixosTestArgs;
|
||||
};
|
||||
|
||||
@@ -110,8 +101,6 @@ in
|
||||
"dont-depend-on-repo-root"
|
||||
];
|
||||
|
||||
# Temporary workaround: Filter out docs package and devshell for aarch64-darwin due to CI builder hangs
|
||||
# TODO: Remove this filter once macOS CI builder is updated
|
||||
flakeOutputs =
|
||||
lib.mapAttrs' (
|
||||
name: config: lib.nameValuePair "nixos-${name}" config.config.system.build.toplevel
|
||||
@@ -119,18 +108,8 @@ in
|
||||
// lib.mapAttrs' (
|
||||
name: config: lib.nameValuePair "darwin-${name}" config.config.system.build.toplevel
|
||||
) (self.darwinConfigurations or { })
|
||||
// lib.mapAttrs' (n: lib.nameValuePair "package-${n}") (
|
||||
if system == "aarch64-darwin" then
|
||||
lib.filterAttrs (n: _: n != "docs" && n != "deploy-docs" && n != "docs-options") packagesToBuild
|
||||
else
|
||||
packagesToBuild
|
||||
)
|
||||
// lib.mapAttrs' (n: lib.nameValuePair "devShell-${n}") (
|
||||
if system == "aarch64-darwin" then
|
||||
lib.filterAttrs (n: _: n != "docs") self'.devShells
|
||||
else
|
||||
self'.devShells
|
||||
)
|
||||
// lib.mapAttrs' (n: lib.nameValuePair "package-${n}") packagesToBuild
|
||||
// lib.mapAttrs' (n: lib.nameValuePair "devShell-${n}") self'.devShells
|
||||
// lib.mapAttrs' (name: config: lib.nameValuePair "home-manager-${name}" config.activation-script) (
|
||||
self'.legacyPackages.homeConfigurations or { }
|
||||
);
|
||||
@@ -138,13 +117,37 @@ in
|
||||
nixosTests
|
||||
// flakeOutputs
|
||||
// {
|
||||
clan-core-for-checks = pkgs.runCommand "clan-core-for-checks" { } ''
|
||||
cp -r ${privateInputs.clan-core-for-checks} $out
|
||||
chmod -R +w $out
|
||||
cp ${../flake.lock} $out/flake.lock
|
||||
# TODO: Automatically provide this check to downstream users to check their modules
|
||||
clan-modules-json-compatible =
|
||||
let
|
||||
allSchemas = lib.mapAttrs (
|
||||
_n: m:
|
||||
let
|
||||
schema =
|
||||
(self.clanLib.evalService {
|
||||
modules = [ m ];
|
||||
prefix = [
|
||||
"checks"
|
||||
system
|
||||
];
|
||||
}).config.result.api.schema;
|
||||
in
|
||||
schema
|
||||
) self.clan.modules;
|
||||
in
|
||||
pkgs.runCommand "combined-result"
|
||||
{
|
||||
schemaFile = builtins.toFile "schemas.json" (builtins.toJSON allSchemas);
|
||||
}
|
||||
''
|
||||
mkdir -p $out
|
||||
cat $schemaFile > $out/allSchemas.json
|
||||
'';
|
||||
|
||||
# Create marker file to disable private flake loading in tests
|
||||
touch $out/.skip-private-inputs
|
||||
clan-core-for-checks = pkgs.runCommand "clan-core-for-checks" { } ''
|
||||
cp -r ${pkgs.callPackage ./clan-core-for-checks.nix { }} $out
|
||||
chmod +w $out/flake.lock
|
||||
cp ${../flake.lock} $out/flake.lock
|
||||
'';
|
||||
};
|
||||
packages = lib.optionalAttrs (pkgs.stdenv.isLinux) {
|
||||
|
||||
@@ -50,8 +50,7 @@
|
||||
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.toplevel
|
||||
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.diskoScript
|
||||
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.diskoScript.drvPath
|
||||
]
|
||||
++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
|
||||
in
|
||||
{
|
||||
@@ -61,10 +60,6 @@
|
||||
nodes.target = {
|
||||
virtualisation.emptyDiskImages = [ 4096 ];
|
||||
virtualisation.memorySize = 4096;
|
||||
|
||||
virtualisation.useNixStoreImage = true;
|
||||
virtualisation.writableStore = true;
|
||||
|
||||
environment.systemPackages = [ self.packages.${pkgs.system}.clan-cli ];
|
||||
environment.etc."install-closure".source = "${closureInfo}/store-paths";
|
||||
|
||||
@@ -83,8 +78,8 @@
|
||||
start_all()
|
||||
|
||||
# Some distros like to automount disks with spaces
|
||||
machine.succeed('mkdir -p "/mnt/with spaces" && mkfs.ext4 /dev/vdc && mount /dev/vdc "/mnt/with spaces"')
|
||||
machine.succeed("clan flash write --debug --flake ${self.checks.x86_64-linux.clan-core-for-checks} --yes --disk main /dev/vdc test-flash-machine-${pkgs.hostPlatform.system}")
|
||||
machine.succeed('mkdir -p "/mnt/with spaces" && mkfs.ext4 /dev/vdb && mount /dev/vdb "/mnt/with spaces"')
|
||||
machine.succeed("clan flash write --debug --flake ${self.checks.x86_64-linux.clan-core-for-checks} --yes --disk main /dev/vdb test-flash-machine-${pkgs.hostPlatform.system}")
|
||||
'';
|
||||
} { inherit pkgs self; };
|
||||
};
|
||||
|
||||
51
checks/impure/flake-module.nix
Normal file
51
checks/impure/flake-module.nix
Normal file
@@ -0,0 +1,51 @@
|
||||
{
|
||||
perSystem =
|
||||
{
|
||||
pkgs,
|
||||
lib,
|
||||
self',
|
||||
...
|
||||
}:
|
||||
{
|
||||
# a script that executes all other checks
|
||||
packages.impure-checks = pkgs.writeShellScriptBin "impure-checks" ''
|
||||
#!${pkgs.bash}/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
unset CLAN_DIR
|
||||
|
||||
export PATH="${
|
||||
lib.makeBinPath (
|
||||
[
|
||||
pkgs.gitMinimal
|
||||
pkgs.nix
|
||||
pkgs.coreutils
|
||||
pkgs.rsync # needed to have rsync installed on the dummy ssh server
|
||||
]
|
||||
++ self'.packages.clan-cli-full.runtimeDependencies
|
||||
)
|
||||
}"
|
||||
ROOT=$(git rev-parse --show-toplevel)
|
||||
cd "$ROOT/pkgs/clan-cli"
|
||||
|
||||
# Set up custom git configuration for tests
|
||||
export GIT_CONFIG_GLOBAL=$(mktemp)
|
||||
git config --file "$GIT_CONFIG_GLOBAL" user.name "Test User"
|
||||
git config --file "$GIT_CONFIG_GLOBAL" user.email "test@example.com"
|
||||
export GIT_CONFIG_SYSTEM=/dev/null
|
||||
|
||||
# this disables dynamic dependency loading in clan-cli
|
||||
export CLAN_NO_DYNAMIC_DEPS=1
|
||||
|
||||
jobs=$(nproc)
|
||||
# Spawning worker in pytest is relatively slow, so we limit the number of jobs to 13
|
||||
# (current number of impure tests)
|
||||
jobs="$((jobs > 13 ? 13 : jobs))"
|
||||
|
||||
nix develop "$ROOT#clan-cli" -c bash -c "TMPDIR=/tmp python -m pytest -n $jobs -m impure ./clan_cli $@"
|
||||
|
||||
# Clean up temporary git config
|
||||
rm -f "$GIT_CONFIG_GLOBAL"
|
||||
'';
|
||||
};
|
||||
}
|
||||
@@ -1,7 +1,6 @@
|
||||
{
|
||||
self,
|
||||
lib,
|
||||
privateInputs,
|
||||
|
||||
...
|
||||
}:
|
||||
@@ -150,17 +149,17 @@
|
||||
# vm-test-run-test-installation-> target: To debug, enter the VM and run 'systemctl status backdoor.service'.
|
||||
checks =
|
||||
let
|
||||
# Custom Python package for port management utilities
|
||||
closureInfo = pkgs.closureInfo {
|
||||
rootPaths = [
|
||||
privateInputs.clan-core-for-checks
|
||||
self.checks.x86_64-linux.clan-core-for-checks
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.toplevel
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.initialRamdisk
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.diskoScript
|
||||
pkgs.stdenv.drvPath
|
||||
pkgs.bash.drvPath
|
||||
pkgs.buildPackages.xorg.lndir
|
||||
]
|
||||
++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||
};
|
||||
in
|
||||
pkgs.lib.mkIf (pkgs.stdenv.isLinux && !pkgs.stdenv.isAarch64) {
|
||||
@@ -226,7 +225,7 @@
|
||||
"install",
|
||||
"--phases", "disko,install",
|
||||
"--debug",
|
||||
"--flake", str(flake_dir),
|
||||
"--flake", flake_dir,
|
||||
"--yes", "test-install-machine-without-system",
|
||||
"--target-host", f"nonrootuser@localhost:{ssh_conn.host_port}",
|
||||
"-i", ssh_conn.ssh_key,
|
||||
@@ -241,7 +240,7 @@
|
||||
target.shutdown()
|
||||
except BrokenPipeError:
|
||||
# qemu has already exited
|
||||
target.connected = False
|
||||
pass
|
||||
|
||||
# Create a new machine instance that boots from the installed system
|
||||
installed_machine = create_test_machine(target, "${pkgs.qemu_test}", name="after_install")
|
||||
@@ -290,6 +289,9 @@
|
||||
assert not os.path.exists(hw_config_file), "hardware-configuration.nix should not exist initially"
|
||||
assert not os.path.exists(facter_file), "facter.json should not exist initially"
|
||||
|
||||
# Set CLAN_FLAKE for the commands
|
||||
os.environ["CLAN_FLAKE"] = flake_dir
|
||||
|
||||
# Test facter backend
|
||||
clan_cmd = [
|
||||
"${self.packages.${pkgs.system}.clan-cli-full}/bin/clan",
|
||||
|
||||
@@ -159,8 +159,7 @@ let
|
||||
pkgs.stdenv.drvPath
|
||||
pkgs.bash.drvPath
|
||||
pkgs.buildPackages.xorg.lndir
|
||||
]
|
||||
++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||
};
|
||||
|
||||
in
|
||||
|
||||
83
checks/matrix-synapse/default.nix
Normal file
83
checks/matrix-synapse/default.nix
Normal file
@@ -0,0 +1,83 @@
|
||||
(
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "matrix-synapse";
|
||||
|
||||
nodes.machine =
|
||||
{
|
||||
config,
|
||||
self,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
self.clanModules.matrix-synapse
|
||||
self.nixosModules.clanCore
|
||||
{
|
||||
clan.core.settings.directory = ./.;
|
||||
|
||||
services.nginx.virtualHosts."matrix.clan.test" = {
|
||||
enableACME = lib.mkForce false;
|
||||
forceSSL = lib.mkForce false;
|
||||
};
|
||||
clan.nginx.acme.email = "admins@clan.lol";
|
||||
clan.matrix-synapse = {
|
||||
server_tld = "clan.test";
|
||||
app_domain = "matrix.clan.test";
|
||||
};
|
||||
clan.matrix-synapse.users.admin.admin = true;
|
||||
clan.matrix-synapse.users.someuser = { };
|
||||
|
||||
clan.core.facts.secretStore = "vm";
|
||||
clan.core.vars.settings.secretStore = "vm";
|
||||
clan.core.vars.settings.publicStore = "in_repo";
|
||||
|
||||
# because we use systemd-tmpfiles to copy the secrets, we need to a separate systemd-tmpfiles call to provision them.
|
||||
boot.postBootCommands = "${config.systemd.package}/bin/systemd-tmpfiles --create /etc/tmpfiles.d/00-vmsecrets.conf";
|
||||
|
||||
systemd.tmpfiles.settings."00-vmsecrets" = {
|
||||
# run before 00-nixos.conf
|
||||
"/etc/secrets" = {
|
||||
d.mode = "0700";
|
||||
z.mode = "0700";
|
||||
};
|
||||
"/etc/secrets/matrix-synapse/synapse-registration_shared_secret" = {
|
||||
f.argument = "supersecret";
|
||||
z = {
|
||||
mode = "0400";
|
||||
user = "root";
|
||||
};
|
||||
};
|
||||
"/etc/secrets/matrix-password-admin/matrix-password-admin" = {
|
||||
f.argument = "matrix-password1";
|
||||
z = {
|
||||
mode = "0400";
|
||||
user = "root";
|
||||
};
|
||||
};
|
||||
"/etc/secrets/matrix-password-someuser/matrix-password-someuser" = {
|
||||
f.argument = "matrix-password2";
|
||||
z = {
|
||||
mode = "0400";
|
||||
user = "root";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
];
|
||||
};
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine.wait_for_unit("matrix-synapse")
|
||||
machine.succeed("${pkgs.netcat}/bin/nc -z -v ::1 8008")
|
||||
machine.wait_until_succeeds("${pkgs.curl}/bin/curl -Ssf -L http://localhost/_matrix/static/ -H 'Host: matrix.clan.test'")
|
||||
|
||||
machine.systemctl("restart matrix-synapse >&2") # check if user creation is idempotent
|
||||
machine.execute("journalctl -u matrix-synapse --no-pager >&2")
|
||||
machine.wait_for_unit("matrix-synapse")
|
||||
machine.succeed("${pkgs.netcat}/bin/nc -z -v ::1 8008")
|
||||
machine.succeed("${pkgs.curl}/bin/curl -Ssf -L http://localhost/_matrix/static/ -H 'Host: matrix.clan.test'")
|
||||
'';
|
||||
}
|
||||
)
|
||||
1
checks/matrix-synapse/synapse-registration_shared_secret
Normal file
1
checks/matrix-synapse/synapse-registration_shared_secret
Normal file
@@ -0,0 +1 @@
|
||||
registration_shared_secret: supersecret
|
||||
@@ -35,8 +35,7 @@
|
||||
pkgs.stdenv.drvPath
|
||||
pkgs.stdenvNoCC
|
||||
self.nixosConfigurations.test-morph-machine.config.system.build.toplevel
|
||||
]
|
||||
++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
|
||||
in
|
||||
|
||||
|
||||
73
checks/postgresql/default.nix
Normal file
73
checks/postgresql/default.nix
Normal file
@@ -0,0 +1,73 @@
|
||||
({
|
||||
name = "postgresql";
|
||||
|
||||
nodes.machine =
|
||||
{ self, config, ... }:
|
||||
{
|
||||
imports = [
|
||||
self.nixosModules.clanCore
|
||||
self.clanModules.postgresql
|
||||
self.clanModules.localbackup
|
||||
];
|
||||
clan.postgresql.users.test = { };
|
||||
clan.postgresql.databases.test.create.options.OWNER = "test";
|
||||
clan.postgresql.databases.test.restore.stopOnRestore = [ "sample-service" ];
|
||||
clan.localbackup.targets.hdd.directory = "/mnt/external-disk";
|
||||
clan.core.settings.directory = ./.;
|
||||
|
||||
systemd.services.sample-service = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
script = ''
|
||||
while true; do
|
||||
echo "Hello, world!"
|
||||
sleep 5
|
||||
done
|
||||
'';
|
||||
};
|
||||
|
||||
environment.systemPackages = [ config.services.postgresql.package ];
|
||||
};
|
||||
testScript =
|
||||
{ nodes, ... }:
|
||||
''
|
||||
start_all()
|
||||
machine.wait_for_unit("postgresql")
|
||||
machine.wait_for_unit("sample-service")
|
||||
# Create a test table
|
||||
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -c 'CREATE TABLE test (id serial PRIMARY KEY);' test")
|
||||
|
||||
machine.succeed("/run/current-system/sw/bin/localbackup-create >&2")
|
||||
timestamp_before = int(machine.succeed("systemctl show --property=ExecMainStartTimestampMonotonic sample-service | cut -d= -f2").strip())
|
||||
|
||||
machine.succeed("test -e /mnt/external-disk/snapshot.0/machine/var/backup/postgres/test/pg-dump || { echo 'pg-dump not found'; exit 1; }")
|
||||
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -d test -c 'INSERT INTO test DEFAULT VALUES;'")
|
||||
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -d test -c 'DROP TABLE test;'")
|
||||
machine.succeed("test -e /var/backup/postgres/test/pg-dump || { echo 'pg-dump not found'; exit 1; }")
|
||||
|
||||
machine.succeed("rm -rf /var/backup/postgres")
|
||||
|
||||
machine.succeed("NAME=/mnt/external-disk/snapshot.0 FOLDERS=/var/backup/postgres/test /run/current-system/sw/bin/localbackup-restore >&2")
|
||||
machine.succeed("test -e /var/backup/postgres/test/pg-dump || { echo 'pg-dump not found'; exit 1; }")
|
||||
|
||||
machine.succeed("""
|
||||
set -x
|
||||
${nodes.machine.clan.core.state.test.postRestoreCommand}
|
||||
""")
|
||||
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -l >&2")
|
||||
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -d test -c '\dt' >&2")
|
||||
|
||||
timestamp_after = int(machine.succeed("systemctl show --property=ExecMainStartTimestampMonotonic sample-service | cut -d= -f2").strip())
|
||||
assert timestamp_before < timestamp_after, f"{timestamp_before} >= {timestamp_after}: expected sample-service to be restarted after restore"
|
||||
|
||||
# Check that the table is still there
|
||||
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -d test -c 'SELECT * FROM test;'")
|
||||
output = machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql --csv -c \"SELECT datdba::regrole FROM pg_database WHERE datname = 'test'\"")
|
||||
owner = output.split("\n")[1]
|
||||
assert owner == "test", f"Expected database owner to be 'test', got '{owner}'"
|
||||
|
||||
# check if restore works if the database does not exist
|
||||
machine.succeed("runuser -u postgres -- dropdb test")
|
||||
machine.succeed("${nodes.machine.clan.core.state.test.postRestoreCommand}")
|
||||
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -d test -c '\dt' >&2")
|
||||
'';
|
||||
})
|
||||
@@ -28,11 +28,19 @@ nixosLib.runTest (
|
||||
testScript =
|
||||
{ nodes, ... }:
|
||||
''
|
||||
import subprocess
|
||||
from nixos_test_lib.nix_setup import setup_nix_in_nix # type: ignore[import-untyped]
|
||||
|
||||
setup_nix_in_nix(None) # No closure info for this test
|
||||
|
||||
def run_clan(cmd: list[str], **kwargs) -> str:
|
||||
import subprocess
|
||||
clan = "${clan-core.packages.${hostPkgs.system}.clan-cli}/bin/clan"
|
||||
clan_args = ["--flake", "${config.clan.test.flakeForSandbox}"]
|
||||
return subprocess.run(
|
||||
["${hostPkgs.util-linux}/bin/unshare", "--user", "--map-user", "1000", "--map-group", "1000", clan, *cmd, *clan_args],
|
||||
**kwargs,
|
||||
check=True,
|
||||
).stdout
|
||||
|
||||
start_all()
|
||||
admin1.wait_for_unit("multi-user.target")
|
||||
peer1.wait_for_unit("multi-user.target")
|
||||
@@ -48,13 +56,7 @@ nixosLib.runTest (
|
||||
# Check that the file is in the '0644' mode
|
||||
assert "-rw-r--r--" in ls_out, f"File is not in the '0644' mode: {ls_out}"
|
||||
|
||||
# Run clan command
|
||||
result = subprocess.run(
|
||||
["${
|
||||
clan-core.packages.${hostPkgs.system}.clan-cli
|
||||
}/bin/clan", "machines", "list", "--flake", "${config.clan.test.flakeForSandbox}"],
|
||||
check=True
|
||||
)
|
||||
run_clan(["machines", "list"])
|
||||
'';
|
||||
}
|
||||
)
|
||||
|
||||
@@ -1,307 +0,0 @@
|
||||
{ self, ... }:
|
||||
{
|
||||
# Machine for update test
|
||||
clan.machines.test-update-machine = {
|
||||
imports = [
|
||||
self.nixosModules.test-update-machine
|
||||
# Import the configuration file that will be created/updated during the test
|
||||
./test-update-machine/configuration.nix
|
||||
];
|
||||
};
|
||||
flake.nixosModules.test-update-machine =
|
||||
{ lib, modulesPath, ... }:
|
||||
{
|
||||
imports = [
|
||||
(modulesPath + "/testing/test-instrumentation.nix")
|
||||
(modulesPath + "/profiles/qemu-guest.nix")
|
||||
self.clanLib.test.minifyModule
|
||||
../../lib/test/container-test-driver/nixos-module.nix
|
||||
];
|
||||
|
||||
# Apply patch to fix x-initrd.mount filesystem handling in switch-to-configuration-ng
|
||||
nixpkgs.overlays = [
|
||||
(_final: prev: {
|
||||
switch-to-configuration-ng = prev.switch-to-configuration-ng.overrideAttrs (old: {
|
||||
patches = (old.patches or [ ]) ++ [ ./switch-to-configuration-initrd-mount-fix.patch ];
|
||||
});
|
||||
})
|
||||
];
|
||||
|
||||
networking.hostName = "update-machine";
|
||||
|
||||
environment.etc."install-successful".text = "ok";
|
||||
|
||||
# Enable SSH and add authorized key for testing
|
||||
services.openssh.enable = true;
|
||||
services.openssh.settings.PasswordAuthentication = false;
|
||||
users.users.root.openssh.authorizedKeys.keys = [ (builtins.readFile ../assets/ssh/pubkey) ];
|
||||
services.openssh.knownHosts.localhost.publicKeyFile = ../assets/ssh/pubkey;
|
||||
services.openssh.hostKeys = [
|
||||
{
|
||||
path = ../assets/ssh/privkey;
|
||||
type = "ed25519";
|
||||
}
|
||||
];
|
||||
security.sudo.wheelNeedsPassword = false;
|
||||
|
||||
boot.consoleLogLevel = lib.mkForce 100;
|
||||
boot.kernelParams = [ "boot.shell_on_fail" ];
|
||||
|
||||
boot.isContainer = true;
|
||||
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
|
||||
# Preserve the IP addresses assigned by the test framework
|
||||
# (based on virtualisation.vlans = [1] and node number 1)
|
||||
networking.interfaces.eth1 = {
|
||||
useDHCP = false;
|
||||
ipv4.addresses = [
|
||||
{
|
||||
address = "192.168.1.1";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
ipv6.addresses = [
|
||||
{
|
||||
address = "2001:db8:1::1";
|
||||
prefixLength = 64;
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
# Define the mounts that exist in the container to prevent them from being stopped
|
||||
fileSystems = {
|
||||
"/" = {
|
||||
device = "/dev/disk/by-label/nixos";
|
||||
fsType = "ext4";
|
||||
options = [ "x-initrd.mount" ];
|
||||
};
|
||||
"/nix/.rw-store" = {
|
||||
device = "tmpfs";
|
||||
fsType = "tmpfs";
|
||||
options = [
|
||||
"mode=0755"
|
||||
];
|
||||
};
|
||||
"/nix/store" = {
|
||||
device = "overlay";
|
||||
fsType = "overlay";
|
||||
options = [
|
||||
"lowerdir=/nix/.ro-store"
|
||||
"upperdir=/nix/.rw-store/upper"
|
||||
"workdir=/nix/.rw-store/work"
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
perSystem =
|
||||
{
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
checks =
|
||||
pkgs.lib.optionalAttrs (pkgs.stdenv.isLinux && pkgs.stdenv.hostPlatform.system == "x86_64-linux")
|
||||
{
|
||||
nixos-test-update =
|
||||
let
|
||||
closureInfo = pkgs.closureInfo {
|
||||
rootPaths = [
|
||||
self.packages.${pkgs.system}.clan-cli
|
||||
self.checks.${pkgs.system}.clan-core-for-checks
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-update-machine.config.system.build.toplevel
|
||||
pkgs.stdenv.drvPath
|
||||
pkgs.bash.drvPath
|
||||
pkgs.buildPackages.xorg.lndir
|
||||
]
|
||||
++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||
};
|
||||
in
|
||||
self.clanLib.test.containerTest {
|
||||
name = "update";
|
||||
nodes.machine = {
|
||||
imports = [ self.nixosModules.test-update-machine ];
|
||||
};
|
||||
extraPythonPackages = _p: [
|
||||
self.legacyPackages.${pkgs.system}.nixosTestLib
|
||||
];
|
||||
|
||||
testScript = ''
|
||||
import tempfile
|
||||
import os
|
||||
import subprocess
|
||||
from nixos_test_lib.ssh import setup_ssh_connection # type: ignore[import-untyped]
|
||||
from nixos_test_lib.nix_setup import prepare_test_flake # type: ignore[import-untyped]
|
||||
|
||||
start_all()
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
|
||||
# Verify initial state
|
||||
machine.succeed("test -f /etc/install-successful")
|
||||
machine.fail("test -f /etc/update-successful")
|
||||
|
||||
# Set up test environment
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
# Prepare test flake and Nix store
|
||||
flake_dir = prepare_test_flake(
|
||||
temp_dir,
|
||||
"${self.checks.x86_64-linux.clan-core-for-checks}",
|
||||
"${closureInfo}"
|
||||
)
|
||||
(flake_dir / ".clan-flake").write_text("") # Ensure .clan-flake exists
|
||||
|
||||
# Set up SSH connection
|
||||
ssh_conn = setup_ssh_connection(
|
||||
machine,
|
||||
temp_dir,
|
||||
"${../assets/ssh/privkey}"
|
||||
)
|
||||
|
||||
# Update the machine configuration to add a new file
|
||||
machine_config_path = os.path.join(flake_dir, "machines", "test-update-machine", "configuration.nix")
|
||||
os.makedirs(os.path.dirname(machine_config_path), exist_ok=True)
|
||||
|
||||
# Note: update command doesn't accept -i flag, SSH key must be in ssh-agent
|
||||
# Start ssh-agent and add the key
|
||||
agent_output = subprocess.check_output(["${pkgs.openssh}/bin/ssh-agent", "-s"], text=True)
|
||||
for line in agent_output.splitlines():
|
||||
if line.startswith("SSH_AUTH_SOCK="):
|
||||
os.environ["SSH_AUTH_SOCK"] = line.split("=", 1)[1].split(";")[0]
|
||||
elif line.startswith("SSH_AGENT_PID="):
|
||||
os.environ["SSH_AGENT_PID"] = line.split("=", 1)[1].split(";")[0]
|
||||
|
||||
# Add the SSH key to the agent
|
||||
subprocess.run(["${pkgs.openssh}/bin/ssh-add", ssh_conn.ssh_key], check=True)
|
||||
|
||||
|
||||
##############
|
||||
print("TEST: update with --build-host local")
|
||||
with open(machine_config_path, "w") as f:
|
||||
f.write("""
|
||||
{
|
||||
environment.etc."update-build-local-successful".text = "ok";
|
||||
}
|
||||
""")
|
||||
|
||||
# rsync the flake into the container
|
||||
os.environ["PATH"] = f"{os.environ['PATH']}:${pkgs.openssh}/bin"
|
||||
subprocess.run(
|
||||
[
|
||||
"${pkgs.rsync}/bin/rsync",
|
||||
"-a",
|
||||
"--delete",
|
||||
"-e",
|
||||
"ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no",
|
||||
f"{str(flake_dir)}/",
|
||||
f"root@192.168.1.1:/flake",
|
||||
],
|
||||
check=True
|
||||
)
|
||||
|
||||
# allow machine to ssh into itself
|
||||
subprocess.run([
|
||||
"ssh",
|
||||
"-o", "UserKnownHostsFile=/dev/null",
|
||||
"-o", "StrictHostKeyChecking=no",
|
||||
f"root@192.168.1.1",
|
||||
"mkdir -p /root/.ssh && chmod 700 /root/.ssh && echo \"$(cat \"${../assets/ssh/privkey}\")\" > /root/.ssh/id_ed25519 && chmod 600 /root/.ssh/id_ed25519",
|
||||
], check=True)
|
||||
|
||||
# install the clan-cli package into the container's Nix store
|
||||
subprocess.run(
|
||||
[
|
||||
"${pkgs.nix}/bin/nix",
|
||||
"copy",
|
||||
"--to",
|
||||
"ssh://root@192.168.1.1",
|
||||
"--no-check-sigs",
|
||||
f"${self.packages.${pkgs.system}.clan-cli}",
|
||||
"--extra-experimental-features", "nix-command flakes",
|
||||
"--from", f"{os.environ["TMPDIR"]}/store"
|
||||
],
|
||||
check=True,
|
||||
env={
|
||||
**os.environ,
|
||||
"NIX_SSHOPTS": "-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no",
|
||||
},
|
||||
)
|
||||
|
||||
# Run ssh on the host to run the clan update command via --build-host local
|
||||
subprocess.run([
|
||||
"ssh",
|
||||
"-o", "UserKnownHostsFile=/dev/null",
|
||||
"-o", "StrictHostKeyChecking=no",
|
||||
f"root@192.168.1.1",
|
||||
"${self.packages.${pkgs.system}.clan-cli}/bin/clan",
|
||||
"machines",
|
||||
"update",
|
||||
"--debug",
|
||||
"--flake", "/flake",
|
||||
"--host-key-check", "none",
|
||||
"--upload-inputs", # Use local store instead of fetching from network
|
||||
"--build-host", "localhost",
|
||||
"test-update-machine",
|
||||
"--target-host", f"root@localhost",
|
||||
], check=True)
|
||||
|
||||
# Verify the update was successful
|
||||
machine.succeed("test -f /etc/update-build-local-successful")
|
||||
|
||||
|
||||
##############
|
||||
print("TEST: update with --target-host")
|
||||
|
||||
with open(machine_config_path, "w") as f:
|
||||
f.write("""
|
||||
{
|
||||
environment.etc."target-host-update-successful".text = "ok";
|
||||
}
|
||||
""")
|
||||
|
||||
# Run clan update command
|
||||
subprocess.run([
|
||||
"${self.packages.${pkgs.system}.clan-cli-full}/bin/clan",
|
||||
"machines",
|
||||
"update",
|
||||
"--debug",
|
||||
"--flake", flake_dir,
|
||||
"--host-key-check", "none",
|
||||
"--upload-inputs", # Use local store instead of fetching from network
|
||||
"test-update-machine",
|
||||
"--target-host", f"root@192.168.1.1:{ssh_conn.host_port}",
|
||||
], check=True)
|
||||
|
||||
# Verify the update was successful
|
||||
machine.succeed("test -f /etc/target-host-update-successful")
|
||||
|
||||
|
||||
##############
|
||||
print("TEST: update with --build-host")
|
||||
# Update configuration again
|
||||
with open(machine_config_path, "w") as f:
|
||||
f.write("""
|
||||
{
|
||||
environment.etc."build-host-update-successful".text = "ok";
|
||||
}
|
||||
""")
|
||||
|
||||
# Run clan update command with --build-host
|
||||
subprocess.run([
|
||||
"${self.packages.${pkgs.system}.clan-cli-full}/bin/clan",
|
||||
"machines",
|
||||
"update",
|
||||
"--debug",
|
||||
"--flake", flake_dir,
|
||||
"--host-key-check", "none",
|
||||
"--upload-inputs", # Use local store instead of fetching from network
|
||||
"--build-host", f"root@192.168.1.1:{ssh_conn.host_port}",
|
||||
"test-update-machine",
|
||||
"--target-host", f"root@192.168.1.1:{ssh_conn.host_port}",
|
||||
], check=True)
|
||||
|
||||
# Verify the second update was successful
|
||||
machine.succeed("test -f /etc/build-host-update-successful")
|
||||
'';
|
||||
} { inherit pkgs self; };
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
diff --git a/src/main.rs b/src/main.rs
|
||||
index 8baf5924a7db..1234567890ab 100644
|
||||
--- a/src/main.rs
|
||||
+++ b/src/main.rs
|
||||
@@ -1295,6 +1295,12 @@ won't take effect until you reboot the system.
|
||||
|
||||
for (mountpoint, current_filesystem) in current_filesystems {
|
||||
// Use current version of systemctl binary before daemon is reexeced.
|
||||
+
|
||||
+ // Skip filesystem comparison if x-initrd.mount is present in options
|
||||
+ if current_filesystem.options.contains("x-initrd.mount") {
|
||||
+ continue;
|
||||
+ }
|
||||
+
|
||||
let unit = path_to_unit_name(¤t_system_bin, &mountpoint);
|
||||
if let Some(new_filesystem) = new_filesystems.get(&mountpoint) {
|
||||
if current_filesystem.fs_type != new_filesystem.fs_type
|
||||
@@ -1,3 +0,0 @@
|
||||
{
|
||||
# Initial empty configuration
|
||||
}
|
||||
@@ -1,115 +0,0 @@
|
||||
{
|
||||
pkgs,
|
||||
nixosLib,
|
||||
clan-core,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
nixosLib.runTest (
|
||||
{ ... }:
|
||||
|
||||
let
|
||||
machines = [
|
||||
"controller1"
|
||||
"controller2"
|
||||
"peer1"
|
||||
"peer2"
|
||||
"peer3"
|
||||
];
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
clan-core.modules.nixosTest.clanTest
|
||||
];
|
||||
|
||||
hostPkgs = pkgs;
|
||||
|
||||
name = "wireguard";
|
||||
|
||||
clan = {
|
||||
directory = ./.;
|
||||
modules."@clan/wireguard" = import ../../clanServices/wireguard/default.nix;
|
||||
inventory = {
|
||||
|
||||
machines = lib.genAttrs machines (_: { });
|
||||
|
||||
instances = {
|
||||
|
||||
/*
|
||||
wg-test-one
|
||||
┌───────────────────────────────┐
|
||||
│ ◄───────────── │
|
||||
│ controller2 controller1
|
||||
│ ▲ ─────────────► ▲ ▲
|
||||
│ │ │ │ │ │ │ │ │
|
||||
│ │ │ │ │ │ │ │ │
|
||||
│ │ │ │ │ │ │ │ │
|
||||
│ │ │ │ └───────────────┐ │ │ │ │
|
||||
│ │ │ └──────────────┐ │ │ │ │ │
|
||||
│ ▼ │ ▼ ▼ ▼
|
||||
└─► peer2 │ peer1 peer3
|
||||
│ ▲
|
||||
└──────────┘
|
||||
*/
|
||||
|
||||
wg-test-one = {
|
||||
|
||||
module.name = "@clan/wireguard";
|
||||
module.input = "self";
|
||||
|
||||
roles.controller.machines."controller1".settings = {
|
||||
endpoint = "192.168.1.1";
|
||||
};
|
||||
|
||||
roles.controller.machines."controller2".settings = {
|
||||
endpoint = "192.168.1.2";
|
||||
};
|
||||
|
||||
roles.peer.machines = {
|
||||
peer1.settings.controller = "controller1";
|
||||
peer2.settings.controller = "controller2";
|
||||
peer3.settings.controller = "controller1";
|
||||
};
|
||||
};
|
||||
|
||||
# TODO: Will this actually work with conflicting ports? Can we re-use interfaces?
|
||||
#wg-test-two = {
|
||||
# module.name = "@clan/wireguard";
|
||||
|
||||
# roles.controller.machines."controller1".settings = {
|
||||
# endpoint = "192.168.1.1";
|
||||
# port = 51922;
|
||||
# };
|
||||
|
||||
# roles.peer.machines = {
|
||||
# peer1 = { };
|
||||
# };
|
||||
#};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
# Show all addresses
|
||||
machines = [peer1, peer2, peer3, controller1, controller2]
|
||||
for m in machines:
|
||||
m.systemctl("start network-online.target")
|
||||
|
||||
for m in machines:
|
||||
m.wait_for_unit("network-online.target")
|
||||
m.wait_for_unit("systemd-networkd.service")
|
||||
|
||||
print("\n\n" + "="*60)
|
||||
print("STARTING PING TESTS")
|
||||
print("="*60)
|
||||
|
||||
for m1 in machines:
|
||||
for m2 in machines:
|
||||
if m1 != m2:
|
||||
print(f"\n--- Pinging from {m1.name} to {m2.name}.wg-test-one ---")
|
||||
m1.wait_until_succeeds(f"ping -c1 {m2.name}.wg-test-one >&2")
|
||||
'';
|
||||
}
|
||||
)
|
||||
@@ -1,6 +0,0 @@
|
||||
[
|
||||
{
|
||||
"publickey": "age1rnkc2vmrupy9234clyu7fpur5kephuqs3v7qauaw5zeg00jqjdasefn3cc",
|
||||
"type": "age"
|
||||
}
|
||||
]
|
||||
@@ -1,6 +0,0 @@
|
||||
[
|
||||
{
|
||||
"publickey": "age1t2hhg99d4p2yymuhngcy5ccutp8mvu7qwvg5cdhck303h9e7ha9qnlt635",
|
||||
"type": "age"
|
||||
}
|
||||
]
|
||||
@@ -1,6 +0,0 @@
|
||||
[
|
||||
{
|
||||
"publickey": "age1jts52rzlqcwjc36jkp56a7fmjn3czr7kl9ta2spkfzhvfama33sqacrzzd",
|
||||
"type": "age"
|
||||
}
|
||||
]
|
||||
@@ -1,6 +0,0 @@
|
||||
[
|
||||
{
|
||||
"publickey": "age12nqnp0zd435ckp5p0v2fv4p2x4cvur2mnxe8use2sx3fgy883vaq4ae75e",
|
||||
"type": "age"
|
||||
}
|
||||
]
|
||||
@@ -1,6 +0,0 @@
|
||||
[
|
||||
{
|
||||
"publickey": "age1sglr4zp34drjfydzeweq43fz3uwpul3hkh53lsfa9drhuzwmkqyqn5jegp",
|
||||
"type": "age"
|
||||
}
|
||||
]
|
||||
@@ -1,15 +0,0 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:zDF0RiBqaawpg+GaFkuLPomJ01Xu+lgY5JfUzaIk2j03XkCzIf8EMrmn6pRtBP3iUjPBm+gQSTQk6GHTONrixA5hRNyETV+UgQw=,iv:zUUCAGZ0cz4Tc2t/HOjVYNsdnrAOtid/Ns5ak7rnyCk=,tag:z43WtNSue4Ddf7AVu21IKA==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBlY1NEdjAzQm5RMFZWY3BJ\nclp6c01FdlZFK3dOSDB4cHc1NTdwMXErMFJFCnIrRVFNZEFYOG1rVUhFd2xsbTJ2\nVkJHNmdOWXlOcHJoQ0QzM1VyZmxmcGcKLS0tIFk1cEx4dFdvNGRwK1FWdDZsb1lR\nV2d1RFZtNzZqVFdtQ1FzNStEcEgyUUkKx8tkxqJz/Ko3xgvhvd6IYiV/lRGmrY13\nUZpYWR9tsQwZAR9dLjCyVU3JRuXeGB1unXC1CO0Ff3R0A/PuuRHh+g==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-08-13T09:19:37Z",
|
||||
"mac": "ENC[AES256_GCM,data:8RGOUhZ2LGmC9ugULwHDgdMrtdo9vzBm3BJmL4XTuNJKm0NlKfgNLi1E4n9DMQ+kD4hKvcwbiUcwSGE8jZD6sm7Sh3bJi/HZCoiWm/O/OIzstli2NNDBGvQBgyWZA5H+kDjZ6aEi6icNWIlm5gsty7KduABnf5B3p0Bn5Uf5Bio=,iv:sGZp0XF+mgocVzAfHF8ATdlSE/5zyz5WUSRMJqNeDQs=,tag:ymYVBRwF5BOSAu5ONU2qKw==,type:str]",
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.10.2"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../../users/admin
|
||||
@@ -1,15 +0,0 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:dHM7zWzqnC1QLRKYpbI2t63kOFnSaQy6ur9zlkLQf17Q03CNrqUsZtdEbwMnLR3llu7eVMhtvVRkXjEkvn3leb9HsNFmtk/DP70=,iv:roEZsBFqRypM106O5sehTzo7SySOJUJgAR738rTtOo8=,tag:VDd9/6uU0SAM7pWRLIUhUQ==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBKTEVYUmVGbUtOcHZ4cnc3\nKzNETnlxaVRKYTI3eWVHdEoyc3l2SnhsZ1J3CnB2RnZrOXM5Uml6TThDUlZjY25J\nbkJ6eUZ2ckN1NWpNUU9IaE93UDJQdlEKLS0tIC95ZDhkU0R1VHhCdldxdW4zSmps\nN3NqL1cvd05hRTRPdDA3R2pzNUFFajgKS+DJH14fH9AvEAa3PoUC1jEqKAzTmExN\nl32FeHTHbGMo1PKeaFm+Eg0WSpAmFE7beBunc5B73SW30ok6x4FcQw==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-08-13T09:19:47Z",
|
||||
"mac": "ENC[AES256_GCM,data:77EnuBQyguvkCtobUg8/6zoLHjmeGDrSBZuIXOZBMxdbJjzhRg++qxQjuu6t0FoWATtz7u4Y3/jzUMGffr/N5HegqSq0D2bhv7AqJwBiVaOwd80fRTtM+YiP/zXsCk52Pj/Gadapg208bDPQ1BBDOyz/DrqZ7w//j+ARJjAnugI=,iv:IuTDmJKZEuHXJXjxrBw0gP2t6vpxAYEqbtpnVbavVCY=,tag:4EnpX6rOamtg1O+AaEQahQ==,type:str]",
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.10.2"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../../users/admin
|
||||
@@ -1,15 +0,0 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:wcSsqxTKiMAnzPwxs5DNjcSdLyjVQ9UOrZxfSbOkVfniwx6F7xz6dLNhaDq7MHQ0vRWpg28yNs7NHrp52bYFnb/+eZsis46WiCw=,iv:B4t1lvS2gC601MtsmZfEiEulLWvSGei3/LSajwFS9Vs=,tag:hnRXlZyYEFfLJUrw1SqbSQ==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAybUgya2VEdzMvRG1hdkpu\nM2pGNmcyVmcvYVZ1ZjJlY3A1bXFUUUtkMTI0CmJoRFZmejZjN2UxUXNuc1k5WnE2\nNmxIcnpNQ1lJZ3ZKSmhtSlVURXJTSUUKLS0tIGU4Wi9yZ3VYekJkVW9pNWFHblFk\na0gzbTVKUWdSam1sVjRUaUlTdVd5YWMKntRc9yb9VPOTMibp8QM5m57DilP01N/X\nPTQaw8oI40znnHdctTZz7S+W/3Te6sRnkOhFyalWmsKY0CWg/FELlA==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-08-13T09:19:58Z",
|
||||
"mac": "ENC[AES256_GCM,data:8nq+ugkUJxE24lUIySySs/cAF8vnfqr936L/5F0O1QFwNrbpPmKRXkuwa6u0V+187L2952Id20Fym4ke59f3fJJsF840NCKDwDDZhBZ20q9GfOqIKImEom/Nzw6D0WXQLUT3w8EMyJ/F+UaJxnBNPR6f6+Kx4YgStYzCcA6Ahzg=,iv:VBPktEz7qwWBBnXE+xOP/EUVy7/AmNCHPoK56Yt/ZNc=,tag:qXONwOLFAlopymBEf5p4Sw==,type:str]",
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.10.2"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../../users/admin
|
||||
@@ -1,15 +0,0 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:4d3ri0EsDmWRtA8vzvpPRLMsSp4MIMKwvtn0n0pRY05uBPXs3KcjnweMPIeTE1nIhqnMR2o2MfLah5TCPpaFax9+wxIt74uacbg=,iv:0LBAldTC/hN4QLCxgXTl6d9UB8WmUTnj4sD2zHQuG2w=,tag:zr/RhG/AU4g9xj9l2BprKw==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBvV0JnZDhlU1piU1g2cng0\ncytKOEZ6WlZlNGRGUjV3MmVMd2Nzc0ZwelgwCjBGdThCUGlXbVFYdnNoZWpJZ3Vm\nc2xkRXhxS09vdzltSVoxLzhFSVduak0KLS0tIE5DRjJ6cGxiVlB1eElHWXhxN1pJ\nYWtIMDMvb0Z6akJjUzlqeEFsNHkxL2cKpghv/QegnXimeqd9OPFouGM//jYvoVmw\n2d4mLT2JSMkEhpfGcqb6vswhdJfCiKuqr2B4bqwAnPMaykhsm8DFRQ==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-08-13T09:20:08Z",
|
||||
"mac": "ENC[AES256_GCM,data:BzlQVAJ7HzcxNPKB3JhabqRX/uU0EElj172YecjmOflHnzz/s9xgfdAfJK/c53hXlX4LtGPnubH7a8jOolRq98zmZeBYE27+WLs2aN7Ufld6mYk90/i7u4CqR+Fh2Kfht04SlUJCjnS5A9bTPwU9XGRHJ0BiOhzTuSMUJTRaPRM=,iv:L50K5zc1o99Ix9nP0pb9PRH+VIN2yvq7JqKeVHxVXmc=,tag:XFLkSCsdbTPxbasDYYxcFQ==,type:str]",
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.10.2"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../../users/admin
|
||||
@@ -1,15 +0,0 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:qfLm6+g1vYnESCik9uyBeKsY6Ju2Gq3arnn2I8HHNO67Ri5BWbOQTvtz7WT8/q94RwVjv8SGeJ/fsJSpwLSrJSbqTZCPAnYwzzQ=,iv:PnA9Ao8RRELNhNQYbaorstc0KaIXRU7h3+lgDCXZFHk=,tag:VeLgYQYwqthYihIoQTwYiA==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBNWVVQaDJFd0N3WHptRC9Z\nZTgxTWh5bnU1SkpqRWRXZnhPaFhpSVJmVEhrCjFvdHFYenNWaFNrdXlha09iS2xj\nOTZDcUNkcHkvTDUwNjM4Z3gxUkxreUEKLS0tIE5oY3Q2bWhsb2FSQTVGTWVSclJw\nWllrelRwT3duYjJJbTV0d3FwU1VuNlkK2eN3fHFX/sVUWom8TeZC9fddqnSCsC1+\nJRCZsG46uHDxqLcKIfdFWh++2t16XupQYk3kn+NUR/aMc3fR32Uwjw==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-08-13T09:20:18Z",
|
||||
"mac": "ENC[AES256_GCM,data:nUwsPcP1bsDjAHFjQ1NlVkTwyZY4B+BpzNkMx9gl0rE14j425HVLtlhlLndhRp+XMpnDldQppLAAtSdzMsrw8r5efNgTRl7cu4Fy/b9cHt84k7m0aou5lrGus9SV1bM7/fzC9Xm7CSXBcRzyDGVsKC6UBl1rx+ybh7HyAN05XSo=,iv:It57H+zUUNPkoN1D8sYwyZx5zIFIga7mydhGUHYBCGE=,tag:mBQdYqUpjPknbYa13qESyw==,type:str]",
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.10.2"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../../users/admin
|
||||
@@ -1,4 +0,0 @@
|
||||
{
|
||||
"publickey": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"type": "age"
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../../../../../sops/machines/controller1
|
||||
@@ -1,19 +0,0 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:noe913+28JWkoDkGGMu++cc1+j5NPDoyIhWixdsowoiVO3cTWGkZ88SUGO5D,iv:ynYMljwqMcBdk8RpVcw/2Jflg2RCF28r4fKUgIAF8B4=,tag:+TsXDJgfUhKgg4iQVXKKlQ==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBhYVRReTZBQ05GYmVBVjhS\nNXM5aFlhVzZRaVl6UHl6S3JnMC9Sb1dwZ1ZjCmVuS2dEVExYZWROVklUZWFCSnM2\nZnlxbVNseTM2c0Q0TjhsT3NzYmtqREUKLS0tIHBRTFpvVGt6d1cxZ2lFclRsUVhZ\nZDlWaG9PcXVrNUZKaEgxWndjUDVpYjgKt0eOhAgcYdkg9JSEakx4FjChLTn3pis+\njOkuGd4JfXMKcwC7vJV5ygQBxzVJSBw+RucP7sYCBPK0m8Voj94ntw==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
},
|
||||
{
|
||||
"recipient": "age1rnkc2vmrupy9234clyu7fpur5kephuqs3v7qauaw5zeg00jqjdasefn3cc",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB6MFJqNHNraG9DSnJZMFdz\ndU8zVXNTamxROFd1dWtuK2RiekhPdHhleVhFCi8zNWJDNXJMRUlDdjc4Q0UycTIz\nSGFGSmdnNU0wZWlDaTEwTzBqWjh6SFkKLS0tIEJOdjhOMDY2TUFLb3RPczNvMERx\nYkpSeW5VOXZvMlEvdm53MDE3aUFTNjgKyelSTjrTIR9I3rJd3krvzpsrKF1uGs4J\n4MtmQj0/3G+zPYZVBx7b3HF6B3f1Z7LYh05+z7nCnN/duXyPnDjNcg==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-08-13T09:19:37Z",
|
||||
"mac": "ENC[AES256_GCM,data:+DmIkPG/H6tCtf8CvB98E1QFXv08QfTcCB3CRsi+XWnIRBkryRd/Au9JahViHMdK7MED8WNf84NWTjY2yH4y824/DjI8XXNMF1iVMo0CqY42xbVHtUuhXrYeT+c8CyEw+M6zfy1jC0+Bm3WQWgagz1G6A9SZk3D2ycu0N08+axA=,iv:kwBjTYebIy5i2hagAajSwwuKnSkrM9GyrnbeQXB2e/w=,tag:EgKJ5gVGYj1NGFUduxLGfg==,type:str]",
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.10.2"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../../../../../sops/users/admin
|
||||
@@ -1 +0,0 @@
|
||||
lQfR7GhivN87XoXruTGOPjVPhNu1Brt//wyc3pdwE20=
|
||||
@@ -1 +0,0 @@
|
||||
7470bb5c79df224a9b7f5a2259acd2e46db763c27e24cb3416c8b591cb328077
|
||||
@@ -1 +0,0 @@
|
||||
fd51:19c1:3b:f700
|
||||
@@ -1 +0,0 @@
|
||||
../../../../../../sops/machines/controller2
|
||||
@@ -1,19 +0,0 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:2kehACgvNgoYGPwnW7p86BR0yUu689Chth6qZf9zoJtuTY9ATS68dxDyBc5S,iv:qb2iDUtExegTeN3jt6SA8RnU61W5GDDhn56QXiQT4gw=,tag:pSGPICX5p6qlZ1WMVoIEYQ==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBSTTR5TDY4RE9VYmlCK1dL\nWkVRcVZqVDlsbmQvUlJmdzF2b1Z1S0k3NngwCkFWNzRVaERtSmFsd0o2aFJOb0ZX\nSU9yUnVaNi9IUjJWeGRFcEpDUXo5WkEKLS0tIEczNkxiYnJsTWRoLzFhQVF1M21n\nWnZEdGV1N2N5d1FZQkJUQ1IrdGFLblkKPTpha2bxS8CCAMXWTDKX/WOcdvggaP3Y\nqewyahDNzb4ggP+LNKp55BtwFjdvoPoq4BpYOOgMRbQMMk+H1o9WFw==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
},
|
||||
{
|
||||
"recipient": "age1t2hhg99d4p2yymuhngcy5ccutp8mvu7qwvg5cdhck303h9e7ha9qnlt635",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBYcEZ6Tzk3M0pkV0tOdTBj\nenF2a0tHNnhBa0NrazMwV1VBbXBZR3pzSHpvCnBZOEU0VlFHS1FHcVpTTDdPczVV\nV0RFSlZ0VmIzWGoydEdKVXlIUE9OOEkKLS0tIFZ0cWVBR1loeVlWa2c4U3oweXE2\ncm1ja0JCS3U5Nk41dlAzV2NabDc2bDQKdgCDNnpRZlFPnEGlX6fo0SQX4yOB+E6r\ntnSwofR3xxZvkyme/6JJU5qBZXyCXEAhKMRkFyvJANXzMJAUo/Osow==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-08-13T09:19:48Z",
|
||||
"mac": "ENC[AES256_GCM,data:e3EkL8vwRhLsec83Zi9DE3PKT+4RwgiffpN4QHcJKTgmDW6hzizWc5kAxbNWGJ9Qqe6sso2KY7tc+hg1lHEsmzjCbg153p8h+7lVI2XT6adi/CS8WZ2VpeL+0X9zDQCjqHmrESZAYFBdkLqO4jucdf0Pc3CKKD+N3BDDTwSUvHM=,iv:xvR7dJL8sdYen00ovrYT8PNxhB9XxSWDSRz1IK23I/o=,tag:OyhAvllBgfAp3eGeNpR/Nw==,type:str]",
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.10.2"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../../../../../sops/users/admin
|
||||
@@ -1 +0,0 @@
|
||||
5Z7gbLFbXpEFfomW2pKyZBpZN5xvUtiqrIL0GVfNtQ8=
|
||||
@@ -1 +0,0 @@
|
||||
c3672fdb9fb31ddaf6572fc813cf7a8fe50488ef4e9d534c62d4f29da60a1a99
|
||||
@@ -1 +0,0 @@
|
||||
fd51:19c1:c1:aa00
|
||||
@@ -1 +0,0 @@
|
||||
../../../../../../sops/machines/peer1
|
||||
@@ -1,19 +0,0 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:b+akw85T3D9xc75CPLHucR//k7inpxKDvgpR8tCNKwNDRVjVHjcABhfZNLXW,iv:g11fZE8UI0MVh9GKdjR6leBlxa4wN7ZubozXG/VlBbw=,tag:0YkzWCW3zJ3Mt3br/jmTYw==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age1jts52rzlqcwjc36jkp56a7fmjn3czr7kl9ta2spkfzhvfama33sqacrzzd",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBXWkJUR0pIa2xOSEw2dThm\nYlNuOHZCVW93Wkc5LzE4YmpUTHRkZlk3ckc4CnN4M3ZRMWNFVitCT3FyWkxaR0di\nb0NmSXFhRHJmTWg0d05OcWx1LytscEEKLS0tIEtleTFqU3JrRjVsdHpJeTNuVUhF\nWEtnOVlXVXRFamFSak5ia2F2b0JiTzAKlhOBZvZ4AN+QqAYQXvd6YNmgVS4gtkWT\nbV3bLNTgwtrDtet9NDHM8vdF+cn5RZxwFfgmTbDEow6Zm8EXfpxj/g==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
},
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB6YVYyQkZqMTJYQTlyRG5Y\nbnJ2UkE1TS9FZkpSa2tQbk1hQjViMi9OcGk0CjFaZUdjU3JtNzh0bDFXdTdUVW4x\nanFqZHZjZjdzKzA2MC8vTWh3Uy82UGcKLS0tIDhyOFl3UGs3czdoMlpza3UvMlB1\nSE90MnpGc05sSCtmVWg0UVNVdmRvN2MKHlCr4U+7bsoYb+2fgT4mEseZCEjxrtLu\n55sR/4YH0vqMnIBnLTSA0e+WMrs3tQfseeJM5jY/ZNnpec1LbxkGTg==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-08-13T09:19:58Z",
|
||||
"mac": "ENC[AES256_GCM,data:gEoEC9D2Z7k5F8egaY1qPXT5/96FFVsyofSBivQ28Ir/9xHX2j40PAQrYRJUWsk/GAUMOyi52Wm7kPuacw+bBcdtQ0+MCDEmjkEnh1V83eZ/baey7iMmg05uO92MYY5o4e7ZkwzXoAeMCMcfO0GqjNvsYJHF1pSNa+UNDj+eflw=,iv:dnIYpvhAdvUDe9md53ll42krb0sxcHy/toqGc7JFxNA=,tag:0WkZU7GeKMD1DQTYaI+1dg==,type:str]",
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.10.2"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../../../../../sops/users/admin
|
||||
@@ -1 +0,0 @@
|
||||
juK7P/92N2t2t680aLIRobHc3ts49CsZBvfZOyIKpUc=
|
||||
@@ -1 +0,0 @@
|
||||
b36142569a74a0de0f9b229f2a040ae33a22d53bef5e62aa6939912d0cda05ba
|
||||
@@ -1 +0,0 @@
|
||||
6987:50a0:9b93:4337
|
||||
@@ -1 +0,0 @@
|
||||
../../../../../../sops/machines/peer2
|
||||
@@ -1,19 +0,0 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:apX2sLwtq6iQgLJslFwiRMNBUe0XLzLQbhKfmb2pKiJG7jGNHUgHJz3Ls4Ca,iv:HTDatm3iD5wACTkkd3LdRNvJfnfg75RMtn9G6Q7Fqd4=,tag:Mfehlljnes5CFD1NJdk27A==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age12nqnp0zd435ckp5p0v2fv4p2x4cvur2mnxe8use2sx3fgy883vaq4ae75e",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBVZzFyMUZsd2V2VWxOUmhP\nZE8yZTc4Q0RkZisxR25NemR1TzVDWmJZVjBVClA1MWhsU0xzSG16aUx3cWFWKzlG\nSkxrT09OTkVqLzlWejVESE1QWHVJaFkKLS0tIGxlaGVuWU43RXErNTB3c3FaUnM3\nT0N5M253anZkbnFkZWw2VHA0eWhxQW8Kd1PMtEX1h0Hd3fDLMi++gKJkzPi9FXUm\n+uYhx+pb+pJM+iLkPwP/q6AWC7T0T4bHfekkdzxrbsKMi73x/GrOiw==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
},
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBqVzRIMWdlNjVwTURyMFkv\nSUhiajZkZVNuWklRYit6cno4UzNDa2szOFN3CkQ2TWhHb25pbmR1MlBsRXNLL2lx\ncVZ3c3BsWXN2aS9UUVYvN3I4S0xUSmMKLS0tIE5FV0U5aXVUZk9XL0U0Z2ZSNGd5\nbU9zY3IvMlpSNVFLYkRNQUpUYVZOWFUK7j4Otzb8CJTcT7aAj9/irxHEDXh1HkTg\nzz7Ho8/ZncNtaCVHlHxjTgVW9d5aIx8fSsV9LRCFwHMtNzvwj1Nshg==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-08-13T09:20:08Z",
|
||||
"mac": "ENC[AES256_GCM,data:e7WNVEz78noHBiz6S3A6qNfop+yBXB3rYN0k4GvaQKz3b99naEHuqIF8Smzzt4XrbbiPKu2iLa5ddLBlqqsi32UQUB8JS9TY7hvW8ol+jpn0VxusGCXW9ThdDEsM/hXiPyr331C73zTvbOYI1hmcGMlJL9cunVRO9rkMtEqhEfo=,iv:6zt7wjIs1y5xDHNK+yLOwoOuUpY7/dOGJGT6UWAFeOg=,tag:gzFTgoxhoLzUV0lvzOhhfg==,type:str]",
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.10.2"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../../../../../sops/users/admin
|
||||
@@ -1 +0,0 @@
|
||||
XI9uSaQRDBCb82cMnGzGJcbqRfDG/IXZobyeL+kV03k=
|
||||
@@ -1 +0,0 @@
|
||||
360f9fce4a984eb87ce2a673eb5341ecb89c0f62126548d45ef25ff5243dd646
|
||||
@@ -1 +0,0 @@
|
||||
3b21:3ced:003e:89b3
|
||||
@@ -1 +0,0 @@
|
||||
../../../../../../sops/machines/peer3
|
||||
@@ -1,19 +0,0 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:Gluvjes/3oH5YsDq00JDJyJgoEFcj56smioMArPSt309MDGExYX2QsCzeO1q,iv:oBBJRDdTj/1dWEvzhdFKQ2WfeCKyavKMLmnMbqnU5PM=,tag:2WNFxKz2dWyVcybpm5N4iw==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBtQWpjRmhZTFdPa2VSZkFN\nbUczMlY5bDBmMTdoMy8xcWxMaXpWVitMZGdjCnRWb2Y3eGpHU1hmNHRJVFBqbU5w\nVEZGdUIrQXk0U0dUUEZ6bE5EMFpTRHMKLS0tIGpYSmZmQThJUTlvTHpjc05ZVlM4\nQWhTOWxnUHZnYlJ3czE3ZUJ0L3ozWTQK3a7N0Zpzo4sUezYveqvKR49RUdJL23eD\n+cK5lk2xbtj+YHkeG+dg7UlHfDaicj0wnFH1KLuWmNd1ONa6eQp3BQ==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
},
|
||||
{
|
||||
"recipient": "age1sglr4zp34drjfydzeweq43fz3uwpul3hkh53lsfa9drhuzwmkqyqn5jegp",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA3a2FOWlVsSkdnendrYmUz\ndEpuL1hZSWNFTUtDYm14S3V1aW9KS3hsazJRCkp2SkFFbi9hbGJpNks1MlNTL0s5\nTk5pcUMxaEJobkcvWmRGeU9jMkdNdzAKLS0tIDR6M0Y5eE1ETHJJejAzVW1EYy9v\nZCtPWHJPUkhuWnRzSGhMUUtTa280UmMKXvtnxyop7PmRvTOFkV80LziDjhGh93Pf\nYwhD/ByD/vMmr21Fd6PVHOX70FFT30BdnMc1/wt7c/0iAw4w4GoQsA==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-08-13T09:20:18Z",
|
||||
"mac": "ENC[AES256_GCM,data:3nXMTma0UYXCco+EM8UW45cth7DVMboFBKyesL86GmaG6OlTkA2/25AeDrtSVO13a5c2jC6yNFK5dE6pSe5R9f0BoDF7d41mgc85zyn+LGECNWKC6hy6gADNSDD6RRuV1S3FisFQl1F1LD8LiSWmg/XNMZzChNlHYsCS8M+I84g=,iv:pu5VVXAVPmVoXy0BJ+hq5Ar8R0pZttKSYa4YS+dhDNc=,tag:xp1S/4qExnxMTGwhfLJrkA==,type:str]",
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.10.2"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../../../../../sops/users/admin
|
||||
@@ -1 +0,0 @@
|
||||
t6qN4VGLR+VMhrBDNKQEXZVyRsEXs1/nGFRs5DI82F8=
|
||||
@@ -1 +0,0 @@
|
||||
e3facc99b73fe029d4c295f71829a83f421f38d82361cf412326398175da162a
|
||||
@@ -1 +0,0 @@
|
||||
e42b:bf85:33f4:f0b1
|
||||
24
checks/zt-tcp-relay/default.nix
Normal file
24
checks/zt-tcp-relay/default.nix
Normal file
@@ -0,0 +1,24 @@
|
||||
(
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "zt-tcp-relay";
|
||||
|
||||
nodes.machine =
|
||||
{ self, ... }:
|
||||
{
|
||||
imports = [
|
||||
self.nixosModules.clanCore
|
||||
self.clanModules.zt-tcp-relay
|
||||
{
|
||||
clan.core.settings.directory = ./.;
|
||||
}
|
||||
];
|
||||
};
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine.wait_for_unit("zt-tcp-relay.service")
|
||||
out = machine.succeed("${pkgs.netcat}/bin/nc -z -v localhost 4443")
|
||||
print(out)
|
||||
'';
|
||||
}
|
||||
)
|
||||
210
clanModules/borgbackup/roles/client.nix
Normal file
210
clanModules/borgbackup/roles/client.nix
Normal file
@@ -0,0 +1,210 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
# Instances might be empty, if the module is not used via the inventory
|
||||
instances = config.clan.inventory.services.borgbackup or { };
|
||||
# roles = { ${role_name} :: { machines :: [string] } }
|
||||
allServers = lib.foldlAttrs (
|
||||
acc: _instanceName: instanceConfig:
|
||||
acc
|
||||
++ (
|
||||
if builtins.elem machineName instanceConfig.roles.client.machines then
|
||||
instanceConfig.roles.server.machines
|
||||
else
|
||||
[ ]
|
||||
)
|
||||
) [ ] instances;
|
||||
|
||||
machineName = config.clan.core.settings.machine.name;
|
||||
|
||||
cfg = config.clan.borgbackup;
|
||||
preBackupScript = ''
|
||||
declare -A preCommandErrors
|
||||
|
||||
${lib.concatMapStringsSep "\n" (
|
||||
state:
|
||||
lib.optionalString (state.preBackupCommand != null) ''
|
||||
echo "Running pre-backup command for ${state.name}"
|
||||
if ! /run/current-system/sw/bin/${state.preBackupCommand}; then
|
||||
preCommandErrors["${state.name}"]=1
|
||||
fi
|
||||
''
|
||||
) (lib.attrValues config.clan.core.state)}
|
||||
|
||||
if [[ ''${#preCommandErrors[@]} -gt 0 ]]; then
|
||||
echo "pre-backup commands failed for the following services:"
|
||||
for state in "''${!preCommandErrors[@]}"; do
|
||||
echo " $state"
|
||||
done
|
||||
exit 1
|
||||
fi
|
||||
'';
|
||||
in
|
||||
{
|
||||
options.clan.borgbackup.destinations = lib.mkOption {
|
||||
type = lib.types.attrsOf (
|
||||
lib.types.submodule (
|
||||
{ name, ... }:
|
||||
{
|
||||
options = {
|
||||
name = lib.mkOption {
|
||||
type = lib.types.strMatching "^[a-zA-Z0-9._-]+$";
|
||||
default = name;
|
||||
description = "the name of the backup job";
|
||||
};
|
||||
repo = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "the borgbackup repository to backup to";
|
||||
};
|
||||
rsh = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "ssh -i ${
|
||||
config.clan.core.vars.generators.borgbackup.files."borgbackup.ssh".path
|
||||
} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=Yes";
|
||||
defaultText = "ssh -i \${config.clan.core.vars.generators.borgbackup.files.\"borgbackup.ssh\".path} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null";
|
||||
description = "the rsh to use for the backup";
|
||||
};
|
||||
};
|
||||
}
|
||||
)
|
||||
);
|
||||
default = { };
|
||||
description = ''
|
||||
destinations where the machine should be backuped to
|
||||
'';
|
||||
};
|
||||
|
||||
options.clan.borgbackup.exclude = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
example = [ "*.pyc" ];
|
||||
default = [ ];
|
||||
description = ''
|
||||
Directories/Files to exclude from the backup.
|
||||
Use * as a wildcard.
|
||||
'';
|
||||
};
|
||||
|
||||
config = {
|
||||
|
||||
warnings = [
|
||||
"The clan.borgbackup module is deprecated and will be removed on 2025-07-15.
|
||||
Please migrate to user-maintained configuration or the new equivalent clan services
|
||||
(https://docs.clan.lol/reference/clanServices)."
|
||||
];
|
||||
|
||||
# Destinations
|
||||
clan.borgbackup.destinations =
|
||||
let
|
||||
destinations = builtins.map (serverName: {
|
||||
name = serverName;
|
||||
value = {
|
||||
repo = "borg@${serverName}:/var/lib/borgbackup/${machineName}";
|
||||
};
|
||||
}) allServers;
|
||||
in
|
||||
(builtins.listToAttrs destinations);
|
||||
|
||||
# Derived from the destinations
|
||||
systemd.services = lib.mapAttrs' (
|
||||
_: dest:
|
||||
lib.nameValuePair "borgbackup-job-${dest.name}" {
|
||||
# since borgbackup mounts the system read-only, we need to run in a
|
||||
# ExecStartPre script, so we can generate additional files.
|
||||
serviceConfig.ExecStartPre = [
|
||||
''+${pkgs.writeShellScript "borgbackup-job-${dest.name}-pre-backup-commands" preBackupScript}''
|
||||
];
|
||||
}
|
||||
) cfg.destinations;
|
||||
|
||||
services.borgbackup.jobs = lib.mapAttrs (_: dest: {
|
||||
paths = lib.unique (
|
||||
lib.flatten (map (state: state.folders) (lib.attrValues config.clan.core.state))
|
||||
);
|
||||
exclude = cfg.exclude;
|
||||
repo = dest.repo;
|
||||
environment.BORG_RSH = dest.rsh;
|
||||
compression = "auto,zstd";
|
||||
startAt = "*-*-* 01:00:00";
|
||||
persistentTimer = true;
|
||||
|
||||
encryption = {
|
||||
mode = "repokey";
|
||||
passCommand = "cat ${config.clan.core.vars.generators.borgbackup.files."borgbackup.repokey".path}";
|
||||
};
|
||||
|
||||
prune.keep = {
|
||||
within = "1d"; # Keep all archives from the last day
|
||||
daily = 7;
|
||||
weekly = 4;
|
||||
monthly = 0;
|
||||
};
|
||||
}) cfg.destinations;
|
||||
|
||||
environment.systemPackages = [
|
||||
(pkgs.writeShellApplication {
|
||||
name = "borgbackup-create";
|
||||
runtimeInputs = [ config.systemd.package ];
|
||||
text = ''
|
||||
${lib.concatMapStringsSep "\n" (dest: ''
|
||||
systemctl start borgbackup-job-${dest.name}
|
||||
'') (lib.attrValues cfg.destinations)}
|
||||
'';
|
||||
})
|
||||
(pkgs.writeShellApplication {
|
||||
name = "borgbackup-list";
|
||||
runtimeInputs = [ pkgs.jq ];
|
||||
text = ''
|
||||
(${
|
||||
lib.concatMapStringsSep "\n" (
|
||||
dest:
|
||||
# we need yes here to skip the changed url verification
|
||||
''echo y | /run/current-system/sw/bin/borg-job-${dest.name} list --json | jq '[.archives[] | {"name": ("${dest.name}::${dest.repo}::" + .name)}]' ''
|
||||
) (lib.attrValues cfg.destinations)
|
||||
}) | jq -s 'add // []'
|
||||
'';
|
||||
})
|
||||
(pkgs.writeShellApplication {
|
||||
name = "borgbackup-restore";
|
||||
runtimeInputs = [ pkgs.gawk ];
|
||||
text = ''
|
||||
cd /
|
||||
IFS=':' read -ra FOLDER <<< "''${FOLDERS-}"
|
||||
job_name=$(echo "$NAME" | awk -F'::' '{print $1}')
|
||||
backup_name=''${NAME#"$job_name"::}
|
||||
if [[ ! -x /run/current-system/sw/bin/borg-job-"$job_name" ]]; then
|
||||
echo "borg-job-$job_name not found: Backup name is invalid" >&2
|
||||
exit 1
|
||||
fi
|
||||
echo y | /run/current-system/sw/bin/borg-job-"$job_name" extract "$backup_name" "''${FOLDER[@]}"
|
||||
'';
|
||||
})
|
||||
];
|
||||
|
||||
clan.core.vars.generators.borgbackup = {
|
||||
files."borgbackup.ssh.pub".secret = false;
|
||||
files."borgbackup.ssh" = { };
|
||||
files."borgbackup.repokey" = { };
|
||||
|
||||
migrateFact = "borgbackup";
|
||||
runtimeInputs = [
|
||||
pkgs.coreutils
|
||||
pkgs.openssh
|
||||
pkgs.xkcdpass
|
||||
];
|
||||
script = ''
|
||||
ssh-keygen -t ed25519 -N "" -C "" -f "$out"/borgbackup.ssh
|
||||
xkcdpass -n 4 -d - > "$out"/borgbackup.repokey
|
||||
'';
|
||||
};
|
||||
|
||||
clan.core.backups.providers.borgbackup = {
|
||||
list = "borgbackup-list";
|
||||
create = "borgbackup-create";
|
||||
restore = "borgbackup-restore";
|
||||
};
|
||||
};
|
||||
}
|
||||
36
clanModules/disk-id/roles/default.nix
Normal file
36
clanModules/disk-id/roles/default.nix
Normal file
@@ -0,0 +1,36 @@
|
||||
{
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
|
||||
{
|
||||
|
||||
config = {
|
||||
|
||||
warnings = [
|
||||
''
|
||||
The clan.disk-id module is deprecated and will be removed on 2025-07-15.
|
||||
For migration see: https://docs.clan.lol/guides/migrations/disk-id/
|
||||
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
!!! Please migrate. Otherwise you may not be able to boot your system after that date. !!!
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
''
|
||||
];
|
||||
clan.core.vars.generators.disk-id = {
|
||||
files.diskId.secret = false;
|
||||
runtimeInputs = [
|
||||
pkgs.coreutils
|
||||
pkgs.bash
|
||||
];
|
||||
script = ''
|
||||
uuid=$(bash ${./uuid4.sh})
|
||||
|
||||
# Remove the hyphens from the UUID
|
||||
uuid_no_hyphens=$(echo -n "$uuid" | tr -d '-')
|
||||
|
||||
echo -n "$uuid_no_hyphens" > "$out/diskId"
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -2,61 +2,22 @@
|
||||
|
||||
let
|
||||
error = builtins.throw ''
|
||||
clanModules have been removed!
|
||||
|
||||
###############################################################################
|
||||
# #
|
||||
# Clan modules (clanModules) have been deprecated and removed in favor of #
|
||||
# Clan services! #
|
||||
# #
|
||||
# Refer to https://docs.clan.lol/guides/migrations/migrate-inventory-services #
|
||||
# for migration instructions. #
|
||||
# #
|
||||
###############################################################################
|
||||
|
||||
Refer to https://docs.clan.lol/guides/migrations/migrate-inventory-services for migration.
|
||||
'';
|
||||
|
||||
modnames = [
|
||||
"admin"
|
||||
"borgbackup"
|
||||
"borgbackup-static"
|
||||
"deltachat"
|
||||
"disk-id"
|
||||
"dyndns"
|
||||
"ergochat"
|
||||
"garage"
|
||||
"heisenbridge"
|
||||
"iwd"
|
||||
"localbackup"
|
||||
"localsend"
|
||||
"matrix-synapse"
|
||||
"moonlight"
|
||||
"mumble"
|
||||
"nginx"
|
||||
"packages"
|
||||
"postgresql"
|
||||
"root-password"
|
||||
"single-disk"
|
||||
"sshd"
|
||||
"state-version"
|
||||
"static-hosts"
|
||||
"sunshine"
|
||||
"syncthing"
|
||||
"syncthing-static-peers"
|
||||
"thelounge"
|
||||
"trusted-nix-caches"
|
||||
"user-password"
|
||||
"vaultwarden"
|
||||
"xfce"
|
||||
"zerotier-static-peers"
|
||||
"zt-tcp-relay"
|
||||
];
|
||||
in
|
||||
|
||||
{
|
||||
flake.clanModules = builtins.listToAttrs (
|
||||
map (name: {
|
||||
inherit name;
|
||||
value = error;
|
||||
}) modnames
|
||||
);
|
||||
flake.clanModules = {
|
||||
outPath = "removed-clan-modules";
|
||||
value = error;
|
||||
};
|
||||
|
||||
# builtins.listToAttrs (
|
||||
# map (name: {
|
||||
# inherit name;
|
||||
# value = error;
|
||||
# }) modnames
|
||||
# );
|
||||
}
|
||||
|
||||
27
clanModules/importer/README.md
Normal file
27
clanModules/importer/README.md
Normal file
@@ -0,0 +1,27 @@
|
||||
---
|
||||
description = "Convenient, structured module imports for hosts."
|
||||
categories = ["Utility"]
|
||||
features = [ "inventory" ]
|
||||
---
|
||||
The importer module allows users to configure importing modules in a flexible and structured way.
|
||||
|
||||
It exposes the `extraModules` functionality of the inventory, without any added configuration.
|
||||
|
||||
## Usage
|
||||
|
||||
```nix
|
||||
inventory.services = {
|
||||
importer.base = {
|
||||
roles.default.tags = [ "all" ];
|
||||
roles.default.extraModules = [ "modules/base.nix" ];
|
||||
};
|
||||
importer.zone1 = {
|
||||
roles.default.tags = [ "zone1" ];
|
||||
roles.default.extraModules = [ "modules/zone1.nix" ];
|
||||
};
|
||||
};
|
||||
```
|
||||
|
||||
This will import the module `modules/base.nix` to all machines that have the `all` tag,
|
||||
which by default is every machine managed by the clan.
|
||||
And also import for all machines tagged with `zone1` the module at `modules/zone1.nix`.
|
||||
106
clanModules/sshd/roles/server.nix
Normal file
106
clanModules/sshd/roles/server.nix
Normal file
@@ -0,0 +1,106 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
stringSet = list: builtins.attrNames (builtins.groupBy lib.id list);
|
||||
|
||||
domains = stringSet config.clan.sshd.certificate.searchDomains;
|
||||
|
||||
cfg = config.clan.sshd;
|
||||
in
|
||||
{
|
||||
imports = [ ../shared.nix ];
|
||||
options = {
|
||||
clan.sshd.hostKeys.rsa.enable = lib.mkEnableOption "Generate RSA host key";
|
||||
};
|
||||
config = {
|
||||
|
||||
warnings = [
|
||||
"The clan.sshd module is deprecated and will be removed on 2025-07-15.
|
||||
Please migrate to user-maintained configuration or the new equivalent clan services
|
||||
(https://docs.clan.lol/reference/clanServices)."
|
||||
];
|
||||
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
settings.PasswordAuthentication = false;
|
||||
|
||||
settings.HostCertificate = lib.mkIf (
|
||||
cfg.certificate.searchDomains != [ ]
|
||||
) config.clan.core.vars.generators.openssh-cert.files."ssh.id_ed25519-cert.pub".path;
|
||||
|
||||
hostKeys =
|
||||
[
|
||||
{
|
||||
path = config.clan.core.vars.generators.openssh.files."ssh.id_ed25519".path;
|
||||
type = "ed25519";
|
||||
}
|
||||
]
|
||||
++ lib.optional cfg.hostKeys.rsa.enable {
|
||||
path = config.clan.core.vars.generators.openssh-rsa.files."ssh.id_rsa".path;
|
||||
type = "rsa";
|
||||
};
|
||||
};
|
||||
|
||||
clan.core.vars.generators.openssh = {
|
||||
files."ssh.id_ed25519" = { };
|
||||
files."ssh.id_ed25519.pub".secret = false;
|
||||
migrateFact = "openssh";
|
||||
runtimeInputs = [
|
||||
pkgs.coreutils
|
||||
pkgs.openssh
|
||||
];
|
||||
script = ''
|
||||
ssh-keygen -t ed25519 -N "" -C "" -f "$out"/ssh.id_ed25519
|
||||
'';
|
||||
};
|
||||
|
||||
programs.ssh.knownHosts.clan-sshd-self-ed25519 = {
|
||||
hostNames = [
|
||||
"localhost"
|
||||
config.networking.hostName
|
||||
] ++ (lib.optional (config.networking.domain != null) config.networking.fqdn);
|
||||
publicKey = config.clan.core.vars.generators.openssh.files."ssh.id_ed25519.pub".value;
|
||||
};
|
||||
|
||||
clan.core.vars.generators.openssh-rsa = lib.mkIf config.clan.sshd.hostKeys.rsa.enable {
|
||||
files."ssh.id_rsa" = { };
|
||||
files."ssh.id_rsa.pub".secret = false;
|
||||
runtimeInputs = [
|
||||
pkgs.coreutils
|
||||
pkgs.openssh
|
||||
];
|
||||
script = ''
|
||||
ssh-keygen -t rsa -b 4096 -N "" -C "" -f "$out"/ssh.id_rsa
|
||||
'';
|
||||
};
|
||||
|
||||
clan.core.vars.generators.openssh-cert = lib.mkIf (cfg.certificate.searchDomains != [ ]) {
|
||||
files."ssh.id_ed25519-cert.pub".secret = false;
|
||||
dependencies = [
|
||||
"openssh"
|
||||
"openssh-ca"
|
||||
];
|
||||
validation = {
|
||||
name = config.clan.core.settings.machine.name;
|
||||
domains = lib.genAttrs config.clan.sshd.certificate.searchDomains lib.id;
|
||||
};
|
||||
runtimeInputs = [
|
||||
pkgs.openssh
|
||||
pkgs.jq
|
||||
];
|
||||
script = ''
|
||||
ssh-keygen \
|
||||
-s $in/openssh-ca/id_ed25519 \
|
||||
-I ${config.clan.core.settings.machine.name} \
|
||||
-h \
|
||||
-n ${lib.concatMapStringsSep "," (d: "${config.clan.core.settings.machine.name}.${d}") domains} \
|
||||
$in/openssh/ssh.id_ed25519.pub
|
||||
mv $in/openssh/ssh.id_ed25519-cert.pub "$out"/ssh.id_ed25519-cert.pub
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
49
clanModules/sshd/shared.nix
Normal file
49
clanModules/sshd/shared.nix
Normal file
@@ -0,0 +1,49 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
options = {
|
||||
clan.sshd.certificate = {
|
||||
# TODO: allow per-server domains that we than collect in the inventory
|
||||
#domains = lib.mkOption {
|
||||
# type = lib.types.listOf lib.types.str;
|
||||
# default = [ ];
|
||||
# example = [ "git.mydomain.com" ];
|
||||
# description = "List of domains to include in the certificate. This option will not prepend the machine name in front of each domain.";
|
||||
#};
|
||||
searchDomains = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
default = [ ];
|
||||
example = [ "mydomain.com" ];
|
||||
description = "List of domains to include in the certificate. This option will prepend the machine name in front of each domain before adding it to the certificate.";
|
||||
};
|
||||
};
|
||||
};
|
||||
config = {
|
||||
clan.core.vars.generators.openssh-ca =
|
||||
lib.mkIf (config.clan.sshd.certificate.searchDomains != [ ])
|
||||
{
|
||||
share = true;
|
||||
files.id_ed25519.deploy = false;
|
||||
files."id_ed25519.pub" = {
|
||||
deploy = false;
|
||||
secret = false;
|
||||
};
|
||||
runtimeInputs = [
|
||||
pkgs.openssh
|
||||
];
|
||||
script = ''
|
||||
ssh-keygen -t ed25519 -N "" -C "" -f "$out"/id_ed25519
|
||||
'';
|
||||
};
|
||||
|
||||
programs.ssh.knownHosts.ssh-ca = lib.mkIf (config.clan.sshd.certificate.searchDomains != [ ]) {
|
||||
certAuthority = true;
|
||||
extraHostNames = builtins.map (domain: "*.${domain}") config.clan.sshd.certificate.searchDomains;
|
||||
publicKey = config.clan.core.vars.generators.openssh-ca.files."id_ed25519.pub".value;
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -41,13 +41,25 @@
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# We don't have a good way to specify dependencies between
|
||||
# clanServices for now. When it get's implemtende, we should just
|
||||
# use the ssh and users modules here.
|
||||
imports = [
|
||||
./ssh.nix
|
||||
./root-password.nix
|
||||
];
|
||||
perInstance =
|
||||
{ settings, ... }:
|
||||
{
|
||||
nixosModule =
|
||||
{ ... }:
|
||||
{
|
||||
imports = [
|
||||
# We don't have a good way to specify dependencies between
|
||||
# clanServices for now. When it get's implemtende, we should just
|
||||
# use the ssh and users modules here.
|
||||
./ssh.nix
|
||||
./root-password.nix
|
||||
];
|
||||
|
||||
_module.args = { inherit settings; };
|
||||
|
||||
users.users.root.openssh.authorizedKeys.keys = builtins.attrValues settings.allowedKeys;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,55 +1,39 @@
|
||||
# We don't have a way of specifying dependencies between clanServices for now.
|
||||
# When it get's added this file should be removed and the users module used instead.
|
||||
{
|
||||
roles.default.perInstance =
|
||||
{ ... }:
|
||||
{
|
||||
nixosModule =
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
|
||||
users.mutableUsers = false;
|
||||
users.users.root.hashedPasswordFile =
|
||||
config.clan.core.vars.generators.root-password.files.password-hash.path;
|
||||
users.mutableUsers = false;
|
||||
users.users.root.hashedPasswordFile =
|
||||
config.clan.core.vars.generators.root-password.files.password-hash.path;
|
||||
|
||||
clan.core.vars.generators.root-password = {
|
||||
files.password-hash.neededFor = "users";
|
||||
clan.core.vars.generators.root-password = {
|
||||
files.password-hash.neededFor = "users";
|
||||
|
||||
files.password.deploy = false;
|
||||
files.password.deploy = false;
|
||||
|
||||
runtimeInputs = [
|
||||
pkgs.coreutils
|
||||
pkgs.mkpasswd
|
||||
pkgs.xkcdpass
|
||||
];
|
||||
runtimeInputs = [
|
||||
pkgs.coreutils
|
||||
pkgs.mkpasswd
|
||||
pkgs.xkcdpass
|
||||
];
|
||||
|
||||
prompts.password.display = {
|
||||
group = "Root User";
|
||||
label = "Password";
|
||||
required = false;
|
||||
helperText = ''
|
||||
Your password will be encrypted and stored securely using the secret store you've configured.
|
||||
'';
|
||||
};
|
||||
prompts.password.type = "hidden";
|
||||
prompts.password.persist = true;
|
||||
prompts.password.description = "You can autogenerate a password, if you leave this prompt blank.";
|
||||
|
||||
prompts.password.type = "hidden";
|
||||
prompts.password.persist = true;
|
||||
prompts.password.description = "Leave empty to generate automatically";
|
||||
|
||||
script = ''
|
||||
prompt_value="$(cat "$prompts"/password)"
|
||||
if [[ -n "''${prompt_value-}" ]]; then
|
||||
echo "$prompt_value" | tr -d "\n" > "$out"/password
|
||||
else
|
||||
xkcdpass --numwords 5 --delimiter - --count 1 | tr -d "\n" > "$out"/password
|
||||
fi
|
||||
mkpasswd -s -m sha-512 < "$out"/password | tr -d "\n" > "$out"/password-hash
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
script = ''
|
||||
prompt_value="$(cat "$prompts"/password)"
|
||||
if [[ -n "''${prompt_value-}" ]]; then
|
||||
echo "$prompt_value" | tr -d "\n" > "$out"/password
|
||||
else
|
||||
xkcdpass --numwords 5 --delimiter - --count 1 | tr -d "\n" > "$out"/password
|
||||
fi
|
||||
mkpasswd -s -m sha-512 < "$out"/password | tr -d "\n" > "$out"/password-hash
|
||||
'';
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,124 +1,115 @@
|
||||
{
|
||||
roles.default.perInstance =
|
||||
{ settings, ... }:
|
||||
{
|
||||
nixosModule =
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
settings,
|
||||
...
|
||||
}:
|
||||
let
|
||||
stringSet = list: builtins.attrNames (builtins.groupBy lib.id list);
|
||||
|
||||
domains = stringSet settings.certificateSearchDomains;
|
||||
|
||||
in
|
||||
{
|
||||
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
settings.PasswordAuthentication = false;
|
||||
|
||||
settings.HostCertificate = lib.mkIf (
|
||||
settings.certificateSearchDomains != [ ]
|
||||
) config.clan.core.vars.generators.openssh-cert.files."ssh.id_ed25519-cert.pub".path;
|
||||
|
||||
hostKeys =
|
||||
[
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
stringSet = list: builtins.attrNames (builtins.groupBy lib.id list);
|
||||
path = config.clan.core.vars.generators.openssh.files."ssh.id_ed25519".path;
|
||||
type = "ed25519";
|
||||
}
|
||||
]
|
||||
++ lib.optional settings.rsaHostKey.enable {
|
||||
path = config.clan.core.vars.generators.openssh-rsa.files."ssh.id_rsa".path;
|
||||
type = "rsa";
|
||||
};
|
||||
};
|
||||
|
||||
domains = stringSet settings.certificateSearchDomains;
|
||||
clan.core.vars.generators.openssh = {
|
||||
files."ssh.id_ed25519" = { };
|
||||
files."ssh.id_ed25519.pub".secret = false;
|
||||
migrateFact = "openssh";
|
||||
runtimeInputs = [
|
||||
pkgs.coreutils
|
||||
pkgs.openssh
|
||||
];
|
||||
script = ''
|
||||
ssh-keygen -t ed25519 -N "" -C "" -f "$out"/ssh.id_ed25519
|
||||
'';
|
||||
};
|
||||
|
||||
in
|
||||
{
|
||||
programs.ssh.knownHosts.clan-sshd-self-ed25519 = {
|
||||
hostNames = [
|
||||
"localhost"
|
||||
config.networking.hostName
|
||||
] ++ (lib.optional (config.networking.domain != null) config.networking.fqdn);
|
||||
publicKey = config.clan.core.vars.generators.openssh.files."ssh.id_ed25519.pub".value;
|
||||
};
|
||||
|
||||
users.users.root.openssh.authorizedKeys.keys = builtins.attrValues settings.allowedKeys;
|
||||
clan.core.vars.generators.openssh-rsa = lib.mkIf settings.rsaHostKey.enable {
|
||||
files."ssh.id_rsa" = { };
|
||||
files."ssh.id_rsa.pub".secret = false;
|
||||
runtimeInputs = [
|
||||
pkgs.coreutils
|
||||
pkgs.openssh
|
||||
];
|
||||
script = ''
|
||||
ssh-keygen -t rsa -b 4096 -N "" -C "" -f "$out"/ssh.id_rsa
|
||||
'';
|
||||
};
|
||||
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
settings.PasswordAuthentication = false;
|
||||
|
||||
settings.HostCertificate = lib.mkIf (
|
||||
settings.certificateSearchDomains != [ ]
|
||||
) config.clan.core.vars.generators.openssh-cert.files."ssh.id_ed25519-cert.pub".path;
|
||||
|
||||
hostKeys = [
|
||||
{
|
||||
path = config.clan.core.vars.generators.openssh.files."ssh.id_ed25519".path;
|
||||
type = "ed25519";
|
||||
}
|
||||
]
|
||||
++ lib.optional settings.rsaHostKey.enable {
|
||||
path = config.clan.core.vars.generators.openssh-rsa.files."ssh.id_rsa".path;
|
||||
type = "rsa";
|
||||
};
|
||||
};
|
||||
|
||||
clan.core.vars.generators.openssh = {
|
||||
files."ssh.id_ed25519" = { };
|
||||
files."ssh.id_ed25519.pub".secret = false;
|
||||
migrateFact = "openssh";
|
||||
runtimeInputs = [
|
||||
pkgs.coreutils
|
||||
pkgs.openssh
|
||||
];
|
||||
script = ''
|
||||
ssh-keygen -t ed25519 -N "" -C "" -f "$out"/ssh.id_ed25519
|
||||
'';
|
||||
};
|
||||
|
||||
programs.ssh.knownHosts.clan-sshd-self-ed25519 = {
|
||||
hostNames = [
|
||||
"localhost"
|
||||
config.networking.hostName
|
||||
]
|
||||
++ (lib.optional (config.networking.domain != null) config.networking.fqdn);
|
||||
publicKey = config.clan.core.vars.generators.openssh.files."ssh.id_ed25519.pub".value;
|
||||
};
|
||||
|
||||
clan.core.vars.generators.openssh-rsa = lib.mkIf settings.rsaHostKey.enable {
|
||||
files."ssh.id_rsa" = { };
|
||||
files."ssh.id_rsa.pub".secret = false;
|
||||
runtimeInputs = [
|
||||
pkgs.coreutils
|
||||
pkgs.openssh
|
||||
];
|
||||
script = ''
|
||||
ssh-keygen -t rsa -b 4096 -N "" -C "" -f "$out"/ssh.id_rsa
|
||||
'';
|
||||
};
|
||||
|
||||
clan.core.vars.generators.openssh-cert = lib.mkIf (settings.certificateSearchDomains != [ ]) {
|
||||
files."ssh.id_ed25519-cert.pub".secret = false;
|
||||
dependencies = [
|
||||
"openssh"
|
||||
"openssh-ca"
|
||||
];
|
||||
validation = {
|
||||
name = config.clan.core.settings.machine.name;
|
||||
domains = lib.genAttrs settings.certificateSearchDomains lib.id;
|
||||
};
|
||||
runtimeInputs = [
|
||||
pkgs.openssh
|
||||
pkgs.jq
|
||||
];
|
||||
script = ''
|
||||
ssh-keygen \
|
||||
-s $in/openssh-ca/id_ed25519 \
|
||||
-I ${config.clan.core.settings.machine.name} \
|
||||
-h \
|
||||
-n ${lib.concatMapStringsSep "," (d: "${config.clan.core.settings.machine.name}.${d}") domains} \
|
||||
$in/openssh/ssh.id_ed25519.pub
|
||||
mv $in/openssh/ssh.id_ed25519-cert.pub "$out"/ssh.id_ed25519-cert.pub
|
||||
'';
|
||||
};
|
||||
|
||||
clan.core.vars.generators.openssh-ca = lib.mkIf (settings.certificateSearchDomains != [ ]) {
|
||||
share = true;
|
||||
files.id_ed25519.deploy = false;
|
||||
files."id_ed25519.pub" = {
|
||||
deploy = false;
|
||||
secret = false;
|
||||
};
|
||||
runtimeInputs = [
|
||||
pkgs.openssh
|
||||
];
|
||||
script = ''
|
||||
ssh-keygen -t ed25519 -N "" -C "" -f "$out"/id_ed25519
|
||||
'';
|
||||
};
|
||||
|
||||
programs.ssh.knownHosts.ssh-ca = lib.mkIf (settings.certificateSearchDomains != [ ]) {
|
||||
certAuthority = true;
|
||||
extraHostNames = builtins.map (domain: "*.${domain}") settings.certificateSearchDomains;
|
||||
publicKey = config.clan.core.vars.generators.openssh-ca.files."id_ed25519.pub".value;
|
||||
};
|
||||
};
|
||||
clan.core.vars.generators.openssh-cert = lib.mkIf (settings.certificateSearchDomains != [ ]) {
|
||||
files."ssh.id_ed25519-cert.pub".secret = false;
|
||||
dependencies = [
|
||||
"openssh"
|
||||
"openssh-ca"
|
||||
];
|
||||
validation = {
|
||||
name = config.clan.core.settings.machine.name;
|
||||
domains = lib.genAttrs settings.certificateSearchDomains lib.id;
|
||||
};
|
||||
runtimeInputs = [
|
||||
pkgs.openssh
|
||||
pkgs.jq
|
||||
];
|
||||
script = ''
|
||||
ssh-keygen \
|
||||
-s $in/openssh-ca/id_ed25519 \
|
||||
-I ${config.clan.core.settings.machine.name} \
|
||||
-h \
|
||||
-n ${lib.concatMapStringsSep "," (d: "${config.clan.core.settings.machine.name}.${d}") domains} \
|
||||
$in/openssh/ssh.id_ed25519.pub
|
||||
mv $in/openssh/ssh.id_ed25519-cert.pub "$out"/ssh.id_ed25519-cert.pub
|
||||
'';
|
||||
};
|
||||
|
||||
clan.core.vars.generators.openssh-ca = lib.mkIf (settings.certificateSearchDomains != [ ]) {
|
||||
share = true;
|
||||
files.id_ed25519.deploy = false;
|
||||
files."id_ed25519.pub" = {
|
||||
deploy = false;
|
||||
secret = false;
|
||||
};
|
||||
runtimeInputs = [
|
||||
pkgs.openssh
|
||||
];
|
||||
script = ''
|
||||
ssh-keygen -t ed25519 -N "" -C "" -f "$out"/id_ed25519
|
||||
'';
|
||||
};
|
||||
|
||||
programs.ssh.knownHosts.ssh-ca = lib.mkIf (settings.certificateSearchDomains != [ ]) {
|
||||
certAuthority = true;
|
||||
extraHostNames = builtins.map (domain: "*.${domain}") settings.certificateSearchDomains;
|
||||
publicKey = config.clan.core.vars.generators.openssh-ca.files."id_ed25519.pub".value;
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,59 +1,9 @@
|
||||
## Usage
|
||||
BorgBackup (short: Borg) gives you:
|
||||
|
||||
```nix
|
||||
inventory.instances = {
|
||||
borgbackup = {
|
||||
module = {
|
||||
name = "borgbackup";
|
||||
input = "clan";
|
||||
};
|
||||
roles.client.machines."jon".settings = {
|
||||
destinations."storagebox" = {
|
||||
repo = "username@$hostname:/./borgbackup";
|
||||
rsh = ''ssh -oPort=23 -i /run/secrets/vars/borgbackup/borgbackup.ssh'';
|
||||
};
|
||||
};
|
||||
roles.server.machines = { };
|
||||
};
|
||||
};
|
||||
```
|
||||
|
||||
The input should be named according to your flake input. Jon is configured as a
|
||||
client machine with a destination pointing to a Hetzner Storage Box.
|
||||
|
||||
## Overview
|
||||
|
||||
This guide explains how to set up and manage
|
||||
[BorgBackup](https://borgbackup.readthedocs.io/) for secure, efficient backups
|
||||
in a clan network. BorgBackup provides:
|
||||
|
||||
- Space efficient storage of backups with deduplication
|
||||
- Secure, authenticated encryption
|
||||
- Compression: lz4, zstd, zlib, lzma or none
|
||||
- Mountable backups with FUSE
|
||||
- Space efficient storage of backups.
|
||||
- Secure, authenticated encryption.
|
||||
- Compression: lz4, zstd, zlib, lzma or none.
|
||||
- Mountable backups with FUSE.
|
||||
- Easy installation on multiple platforms: Linux, macOS, BSD, …
|
||||
- Free software (BSD license).
|
||||
- Backed by a large and active open-source community.
|
||||
|
||||
## Roles
|
||||
|
||||
### 1. Client
|
||||
|
||||
Clients are machines that create and send backups to various destinations. Each
|
||||
client can have multiple backup destinations configured.
|
||||
|
||||
### 2. Server
|
||||
|
||||
Servers act as backup repositories, receiving and storing backups from client
|
||||
machines. They can be dedicated backup servers within your clan network.
|
||||
|
||||
## Backup destinations
|
||||
|
||||
This service allows you to perform backups to multiple `destinations`.
|
||||
Destinations can be:
|
||||
|
||||
- **Local**: Local disk storage
|
||||
- **Server**: Your own borgbackup server (using the `server` role)
|
||||
- **Third-party services**: Such as Hetzner's Storage Box
|
||||
|
||||
For a more comprehensive guide on backups look into the guide section.
|
||||
|
||||
29
clanServices/data-mesher/admin.nix
Normal file
29
clanServices/data-mesher/admin.nix
Normal file
@@ -0,0 +1,29 @@
|
||||
{
|
||||
lib,
|
||||
config,
|
||||
settings,
|
||||
...
|
||||
}:
|
||||
{
|
||||
|
||||
services.data-mesher.initNetwork =
|
||||
let
|
||||
# for a given machine, read it's public key and remove any new lines
|
||||
readHostKey =
|
||||
machine:
|
||||
let
|
||||
path = "${config.clan.core.settings.directory}/vars/per-machine/${machine}/data-mesher-host-key/public_key/value";
|
||||
in
|
||||
builtins.elemAt (lib.splitString "\n" (builtins.readFile path)) 1;
|
||||
in
|
||||
{
|
||||
enable = true;
|
||||
keyPath = config.clan.core.vars.generators.data-mesher-network-key.files.private_key.path;
|
||||
|
||||
tld = settings.network.tld;
|
||||
hostTTL = settings.network.hostTTL;
|
||||
|
||||
# admin and signer host public keys
|
||||
signingKeys = builtins.map readHostKey (builtins.attrNames settings.bootstrapNodes);
|
||||
};
|
||||
}
|
||||
@@ -5,15 +5,31 @@ let
|
||||
{
|
||||
options = {
|
||||
bootstrapNodes = lib.mkOption {
|
||||
type = lib.types.nullOr (lib.types.listOf lib.types.str);
|
||||
type = lib.types.nullOr (lib.types.attrsOf lib.types.str);
|
||||
# the default bootstrap nodes are any machines with the admin or signers role
|
||||
# we iterate through those machines, determining an IP address for them based on their VPN
|
||||
# currently only supports zerotier
|
||||
# default = builtins.foldl' (
|
||||
# urls: name:
|
||||
# let
|
||||
# ipPath = "${config.clan.core.settings.directory}/vars/per-machine/${name}/zerotier/zerotier-ip/value";
|
||||
# in
|
||||
# if builtins.pathExists ipPath then
|
||||
# let
|
||||
# ip = builtins.readFile ipPath;
|
||||
# in
|
||||
# urls ++ [ "[${ip}]:${builtins.toString settings.network.port}" ]
|
||||
# else
|
||||
# urls
|
||||
# ) [ ] (dmLib.machines config).bootstrap;
|
||||
description = ''
|
||||
A list of bootstrap nodes that act as an initial gateway when joining
|
||||
the cluster.
|
||||
'';
|
||||
example = [
|
||||
"192.168.1.1:7946"
|
||||
"192.168.1.2:7946"
|
||||
];
|
||||
example = {
|
||||
"node1" = "192.168.1.1:7946";
|
||||
"node2" = "192.168.1.2:7946";
|
||||
};
|
||||
};
|
||||
|
||||
network = {
|
||||
@@ -39,59 +55,6 @@ let
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
mkBootstrapNodes =
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
roles,
|
||||
settings,
|
||||
}:
|
||||
lib.mkDefault (
|
||||
builtins.foldl' (
|
||||
urls: name:
|
||||
let
|
||||
ipPath = "${config.clan.core.settings.directory}/vars/per-machine/${name}/zerotier/zerotier-ip/value";
|
||||
in
|
||||
if builtins.pathExists ipPath then
|
||||
let
|
||||
ip = builtins.readFile ipPath;
|
||||
in
|
||||
urls ++ [ "[${ip}]:${builtins.toString settings.network.port}" ]
|
||||
else
|
||||
urls
|
||||
) [ ] (builtins.attrNames ((roles.admin.machines or { }) // (roles.signer.machines or { })))
|
||||
);
|
||||
|
||||
mkDmService = dmSettings: config: {
|
||||
enable = true;
|
||||
openFirewall = true;
|
||||
|
||||
settings = {
|
||||
log_level = "warn";
|
||||
state_dir = "/var/lib/data-mesher";
|
||||
|
||||
# read network id from vars
|
||||
network.id = config.clan.core.vars.generators.data-mesher-network-key.files.public_key.value;
|
||||
|
||||
host = {
|
||||
names = [ config.networking.hostName ];
|
||||
key_path = config.clan.core.vars.generators.data-mesher-host-key.files.private_key.path;
|
||||
};
|
||||
|
||||
cluster = {
|
||||
port = dmSettings.network.port;
|
||||
join_interval = "30s";
|
||||
push_pull_interval = "30s";
|
||||
interface = dmSettings.network.interface;
|
||||
bootstrap_nodes = dmSettings.bootstrapNodes;
|
||||
};
|
||||
|
||||
http.port = 7331;
|
||||
http.interface = "lo";
|
||||
};
|
||||
};
|
||||
|
||||
in
|
||||
{
|
||||
_class = "clan.service";
|
||||
@@ -104,9 +67,11 @@ in
|
||||
interface =
|
||||
{ lib, ... }:
|
||||
{
|
||||
|
||||
imports = [ sharedInterface ];
|
||||
|
||||
options = {
|
||||
|
||||
network = {
|
||||
tld = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
@@ -124,117 +89,54 @@ in
|
||||
};
|
||||
};
|
||||
perInstance =
|
||||
{ settings, roles, ... }:
|
||||
{
|
||||
extendSettings,
|
||||
roles,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
nixosModule =
|
||||
{ config, ... }:
|
||||
let
|
||||
settings = extendSettings {
|
||||
bootstrapNodes = mkBootstrapNodes {
|
||||
inherit
|
||||
config
|
||||
lib
|
||||
roles
|
||||
settings
|
||||
;
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
imports = [ ./shared.nix ];
|
||||
|
||||
services.data-mesher = (mkDmService settings config) // {
|
||||
initNetwork =
|
||||
let
|
||||
# for a given machine, read it's public key and remove any new lines
|
||||
readHostKey =
|
||||
machine:
|
||||
let
|
||||
path = "${config.clan.core.settings.directory}/vars/per-machine/${machine}/data-mesher-host-key/public_key/value";
|
||||
in
|
||||
builtins.elemAt (lib.splitString "\n" (builtins.readFile path)) 1;
|
||||
in
|
||||
{
|
||||
enable = true;
|
||||
keyPath = config.clan.core.vars.generators.data-mesher-network-key.files.private_key.path;
|
||||
|
||||
tld = settings.network.tld;
|
||||
hostTTL = settings.network.hostTTL;
|
||||
|
||||
# admin and signer host public keys
|
||||
signingKeys = builtins.map readHostKey (
|
||||
builtins.attrNames ((roles.admin.machines or { }) // (roles.signer.machines or { }))
|
||||
);
|
||||
};
|
||||
};
|
||||
};
|
||||
nixosModule = {
|
||||
imports = [
|
||||
./admin.nix
|
||||
./shared.nix
|
||||
];
|
||||
_module.args = { inherit settings roles; };
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
roles.signer = {
|
||||
interface = sharedInterface;
|
||||
interface =
|
||||
{ ... }:
|
||||
{
|
||||
imports = [ sharedInterface ];
|
||||
};
|
||||
perInstance =
|
||||
{ settings, roles, ... }:
|
||||
{
|
||||
extendSettings,
|
||||
lib,
|
||||
roles,
|
||||
...
|
||||
}:
|
||||
{
|
||||
nixosModule =
|
||||
{ config, ... }:
|
||||
let
|
||||
settings = extendSettings {
|
||||
bootstrapNodes = mkBootstrapNodes {
|
||||
inherit
|
||||
config
|
||||
lib
|
||||
roles
|
||||
settings
|
||||
;
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
imports = [ ./shared.nix ];
|
||||
services.data-mesher = (mkDmService settings config);
|
||||
};
|
||||
nixosModule = {
|
||||
imports = [
|
||||
./signer.nix
|
||||
./shared.nix
|
||||
];
|
||||
_module.args = { inherit settings roles; };
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
roles.peer = {
|
||||
interface = sharedInterface;
|
||||
interface =
|
||||
{ ... }:
|
||||
{
|
||||
imports = [ sharedInterface ];
|
||||
};
|
||||
perInstance =
|
||||
{ settings, roles, ... }:
|
||||
{
|
||||
extendSettings,
|
||||
lib,
|
||||
roles,
|
||||
...
|
||||
}:
|
||||
{
|
||||
nixosModule =
|
||||
{ config, ... }:
|
||||
let
|
||||
settings = extendSettings {
|
||||
bootstrapNodes = mkBootstrapNodes {
|
||||
inherit
|
||||
config
|
||||
lib
|
||||
roles
|
||||
settings
|
||||
;
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
imports = [ ./shared.nix ];
|
||||
services.data-mesher = (mkDmService settings config);
|
||||
};
|
||||
nixosModule = {
|
||||
imports = [
|
||||
./peer.nix
|
||||
./shared.nix
|
||||
];
|
||||
_module.args = { inherit settings roles; };
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
@@ -1,9 +1,39 @@
|
||||
{
|
||||
config,
|
||||
settings,
|
||||
...
|
||||
}:
|
||||
{
|
||||
|
||||
services.data-mesher = {
|
||||
enable = true;
|
||||
openFirewall = true;
|
||||
|
||||
settings = {
|
||||
log_level = "warn";
|
||||
state_dir = "/var/lib/data-mesher";
|
||||
|
||||
# read network id from vars
|
||||
network.id = config.clan.core.vars.generators.data-mesher-network-key.files.public_key.value;
|
||||
|
||||
host = {
|
||||
names = [ config.networking.hostName ];
|
||||
key_path = config.clan.core.vars.generators.data-mesher-host-key.files.private_key.path;
|
||||
};
|
||||
|
||||
cluster = {
|
||||
port = settings.network.port;
|
||||
join_interval = "30s";
|
||||
push_pull_interval = "30s";
|
||||
interface = settings.network.interface;
|
||||
bootstrap_nodes = (builtins.attrValues settings.bootstrapNodes);
|
||||
};
|
||||
|
||||
http.port = 7331;
|
||||
http.interface = "lo";
|
||||
};
|
||||
};
|
||||
|
||||
# Generate host key.
|
||||
clan.core.vars.generators.data-mesher-host-key = {
|
||||
files =
|
||||
|
||||
@@ -16,11 +16,11 @@
|
||||
instances = {
|
||||
data-mesher =
|
||||
let
|
||||
bootstrapNodes = [
|
||||
"[2001:db8:1::1]:7946" # admin
|
||||
"[2001:db8:1::2]:7946" # peer
|
||||
# "2001:db8:1::3:7946" #signer
|
||||
];
|
||||
bootstrapNodes = {
|
||||
admin = "[2001:db8:1::1]:7946";
|
||||
peer = "[2001:db8:1::2]:7946";
|
||||
# signer = "2001:db8:1::3:7946";
|
||||
};
|
||||
in
|
||||
{
|
||||
roles.peer.machines.peer.settings = {
|
||||
|
||||
@@ -1,86 +0,0 @@
|
||||
|
||||
A Dynamic-DNS (DDNS) service continuously keeps one or more DNS records in sync with the current public IP address of your machine.
|
||||
In *clan* this service is backed by [qdm12/ddns-updater](https://github.com/qdm12/ddns-updater).
|
||||
|
||||
> Info
|
||||
> ddns-updater itself is **heavily opinionated and version-specific**. Whenever you need the exhaustive list of flags or
|
||||
> provider-specific fields refer to its *versioned* documentation – **not** the GitHub README
|
||||
---
|
||||
|
||||
# 1. Configuration model
|
||||
|
||||
Internally ddns-updater consumes a single file named `config.json`.
|
||||
A minimal configuration for the registrar *Namecheap* looks like:
|
||||
|
||||
```json
|
||||
{
|
||||
"settings": [
|
||||
{
|
||||
"provider": "namecheap",
|
||||
"domain": "sub.example.com",
|
||||
"password": "e5322165c1d74692bfa6d807100c0310"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Another example for *Porkbun*:
|
||||
|
||||
```json
|
||||
{
|
||||
"settings": [
|
||||
{
|
||||
"provider": "porkbun",
|
||||
"domain": "domain.com",
|
||||
"api_key": "sk1_…",
|
||||
"secret_api_key": "pk1_…",
|
||||
"ip_version": "ipv4",
|
||||
"ipv6_suffix": ""
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
When you write a `clan.nix` the **common** fields (`provider`, `domain`, `period`, …) are already exposed as typed
|
||||
*Nix options*.
|
||||
Registrar-specific or very new keys can be passed through an open attribute set called **extraSettings**.
|
||||
|
||||
---
|
||||
|
||||
# 2. Full Porkbun example
|
||||
|
||||
Manage three records – `@`, `home` and `test` – of the domain
|
||||
`jon.blog` and refresh them every 15 minutes:
|
||||
|
||||
```nix title="clan.nix" hl_lines="10-11"
|
||||
inventory.instances = {
|
||||
dyndns = {
|
||||
roles.default.machines."jon" = { };
|
||||
roles.default.settings = {
|
||||
period = 15; # minutes
|
||||
settings = {
|
||||
"all-jon-blog" = {
|
||||
provider = "porkbun";
|
||||
domain = "jon.blog";
|
||||
|
||||
# (1) tell the secret-manager which key we are going to store
|
||||
secret_field_name = "secret_api_key";
|
||||
|
||||
# everything below is copied verbatim into config.json
|
||||
extraSettings = {
|
||||
host = "@,home,test"; # (2) comma-separated list of sub-domains
|
||||
ip_version = "ipv4";
|
||||
ipv6_suffix = "";
|
||||
api_key = "pk1_4bb2b231275a02fdc23b7e6f3552s01S213S"; # (3) public – safe to commit
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
```
|
||||
|
||||
1. `secret_field_name` tells the *vars-generator* to store the entered secret under the specified JSON field name in the configuration.
|
||||
2. ddns-updater allows multiple hosts by separating them with a comma.
|
||||
3. The `api_key` above is *public*; the corresponding **private key** is retrieved through `secret_field_name`.
|
||||
|
||||
@@ -1,277 +0,0 @@
|
||||
{ ... }:
|
||||
{
|
||||
_class = "clan.service";
|
||||
manifest.name = "clan-core/dyndns";
|
||||
manifest.description = "A dynamic DNS service to update domain IPs";
|
||||
manifest.categories = [ "Network" ];
|
||||
manifest.readme = builtins.readFile ./README.md;
|
||||
|
||||
roles.default = {
|
||||
interface =
|
||||
{ lib, ... }:
|
||||
{
|
||||
options = {
|
||||
server = {
|
||||
enable = lib.mkEnableOption "dyndns webserver";
|
||||
domain = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "Domain to serve the webservice on";
|
||||
};
|
||||
port = lib.mkOption {
|
||||
type = lib.types.int;
|
||||
default = 54805;
|
||||
description = "Port to listen on";
|
||||
};
|
||||
acmeEmail = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = ''
|
||||
Email address for account creation and correspondence from the CA.
|
||||
It is recommended to use the same email for all certs to avoid account
|
||||
creation limits.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
period = lib.mkOption {
|
||||
type = lib.types.int;
|
||||
default = 5;
|
||||
description = "Domain update period in minutes";
|
||||
};
|
||||
|
||||
settings = lib.mkOption {
|
||||
type = lib.types.attrsOf (
|
||||
lib.types.submodule (
|
||||
{ ... }:
|
||||
{
|
||||
options = {
|
||||
provider = lib.mkOption {
|
||||
example = "namecheap";
|
||||
type = lib.types.str;
|
||||
description = "The dyndns provider to use";
|
||||
};
|
||||
domain = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
example = "example.com";
|
||||
description = "The top level domain to update.";
|
||||
};
|
||||
secret_field_name = lib.mkOption {
|
||||
example = "api_key";
|
||||
|
||||
type = lib.types.enum [
|
||||
"password"
|
||||
"token"
|
||||
"api_key"
|
||||
"secret_api_key"
|
||||
];
|
||||
default = "password";
|
||||
description = "The field name for the secret";
|
||||
};
|
||||
extraSettings = lib.mkOption {
|
||||
type = lib.types.attrsOf lib.types.str;
|
||||
default = { };
|
||||
description = ''
|
||||
Extra settings for the provider.
|
||||
Provider specific settings: https://github.com/qdm12/ddns-updater#configuration
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
)
|
||||
);
|
||||
default = { };
|
||||
description = "Configuration for which domains to update";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
perInstance =
|
||||
{ settings, ... }:
|
||||
{
|
||||
nixosModule =
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
name = "dyndns";
|
||||
cfg = settings;
|
||||
|
||||
# We dedup secrets if they have the same provider + base domain
|
||||
secret_id = opt: "${name}-${opt.provider}-${opt.domain}";
|
||||
secret_path =
|
||||
opt: config.clan.core.vars.generators."${secret_id opt}".files."${secret_id opt}".path;
|
||||
|
||||
# We check that a secret has not been set in extraSettings.
|
||||
extraSettingsSafe =
|
||||
opt:
|
||||
if (builtins.hasAttr opt.secret_field_name opt.extraSettings) then
|
||||
throw "Please do not set ${opt.secret_field_name} in extraSettings, it is automatically set by the dyndns module."
|
||||
else
|
||||
opt.extraSettings;
|
||||
|
||||
service_config = {
|
||||
settings = builtins.catAttrs "value" (
|
||||
builtins.attrValues (
|
||||
lib.mapAttrs (_: opt: {
|
||||
value =
|
||||
(extraSettingsSafe opt)
|
||||
// {
|
||||
domain = opt.domain;
|
||||
provider = opt.provider;
|
||||
}
|
||||
// {
|
||||
"${opt.secret_field_name}" = secret_id opt;
|
||||
};
|
||||
}) cfg.settings
|
||||
)
|
||||
);
|
||||
};
|
||||
|
||||
secret_generator = _: opt: {
|
||||
name = secret_id opt;
|
||||
value = {
|
||||
share = true;
|
||||
migrateFact = "${secret_id opt}";
|
||||
prompts.${secret_id opt} = {
|
||||
type = "hidden";
|
||||
persist = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
imports = lib.optional cfg.server.enable (
|
||||
lib.modules.importApply ./nginx.nix {
|
||||
inherit config;
|
||||
inherit settings;
|
||||
inherit lib;
|
||||
}
|
||||
);
|
||||
|
||||
clan.core.vars.generators = lib.mkIf (cfg.settings != { }) (
|
||||
lib.mapAttrs' secret_generator cfg.settings
|
||||
);
|
||||
|
||||
users.groups.${name} = lib.mkIf (cfg.settings != { }) { };
|
||||
users.users.${name} = lib.mkIf (cfg.settings != { }) {
|
||||
group = name;
|
||||
isSystemUser = true;
|
||||
description = "User for ${name} service";
|
||||
home = "/var/lib/${name}";
|
||||
createHome = true;
|
||||
};
|
||||
|
||||
services.nginx = lib.mkIf cfg.server.enable {
|
||||
virtualHosts = {
|
||||
"${cfg.server.domain}" = {
|
||||
forceSSL = true;
|
||||
enableACME = true;
|
||||
locations."/" = {
|
||||
proxyPass = "http://localhost:${toString cfg.server.port}";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.${name} = lib.mkIf (cfg.settings != { }) {
|
||||
path = [ ];
|
||||
description = "Dynamic DNS updater";
|
||||
after = [ "network.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
environment = {
|
||||
MYCONFIG = "${builtins.toJSON service_config}";
|
||||
SERVER_ENABLED = if cfg.server.enable then "yes" else "no";
|
||||
PERIOD = "${toString cfg.period}m";
|
||||
LISTENING_ADDRESS = ":${toString cfg.server.port}";
|
||||
GODEBUG = "netdns=go"; # We need to set this untill this has been merged. https://github.com/NixOS/nixpkgs/pull/432758
|
||||
};
|
||||
|
||||
serviceConfig =
|
||||
let
|
||||
pyscript =
|
||||
pkgs.writers.writePython3Bin "generate_secret_config.py"
|
||||
{
|
||||
libraries = [ ];
|
||||
doCheck = false;
|
||||
}
|
||||
''
|
||||
import json
|
||||
from pathlib import Path
|
||||
import os
|
||||
|
||||
cred_dir = Path(os.getenv("CREDENTIALS_DIRECTORY"))
|
||||
config_str = os.getenv("MYCONFIG")
|
||||
|
||||
|
||||
def get_credential(name):
|
||||
secret_p = cred_dir / name
|
||||
with open(secret_p, 'r') as f:
|
||||
return f.read().strip()
|
||||
|
||||
|
||||
config = json.loads(config_str)
|
||||
print(f"Config: {config}")
|
||||
for attrset in config["settings"]:
|
||||
if "password" in attrset:
|
||||
attrset['password'] = get_credential(attrset['password'])
|
||||
elif "token" in attrset:
|
||||
attrset['token'] = get_credential(attrset['token'])
|
||||
elif "secret_api_key" in attrset:
|
||||
attrset['secret_api_key'] = get_credential(attrset['secret_api_key'])
|
||||
elif "api_key" in attrset:
|
||||
attrset['api_key'] = get_credential(attrset['api_key'])
|
||||
else:
|
||||
raise ValueError(f"Missing secret field in {attrset}")
|
||||
|
||||
# create directory data if it does not exist
|
||||
data_dir = Path('data')
|
||||
data_dir.mkdir(mode=0o770, exist_ok=True)
|
||||
|
||||
# Create a temporary config file
|
||||
# with appropriate permissions
|
||||
tmp_config_path = data_dir / '.config.json'
|
||||
tmp_config_path.touch(mode=0o660, exist_ok=False)
|
||||
|
||||
# Write the config with secrets back
|
||||
with open(tmp_config_path, 'w') as f:
|
||||
f.write(json.dumps(config, indent=4))
|
||||
|
||||
# Move config into place
|
||||
config_path = data_dir / 'config.json'
|
||||
tmp_config_path.rename(config_path)
|
||||
|
||||
# Set file permissions to read
|
||||
# and write only by the user and group
|
||||
for file in data_dir.iterdir():
|
||||
file.chmod(0o660)
|
||||
'';
|
||||
in
|
||||
{
|
||||
ExecStartPre = lib.getExe pyscript;
|
||||
ExecStart = lib.getExe pkgs.ddns-updater;
|
||||
LoadCredential = lib.mapAttrsToList (_: opt: "${secret_id opt}:${secret_path opt}") cfg.settings;
|
||||
User = name;
|
||||
Group = name;
|
||||
NoNewPrivileges = true;
|
||||
PrivateTmp = true;
|
||||
ProtectSystem = "strict";
|
||||
ReadOnlyPaths = "/";
|
||||
PrivateDevices = "yes";
|
||||
ProtectKernelModules = "yes";
|
||||
ProtectKernelTunables = "yes";
|
||||
WorkingDirectory = "/var/lib/${name}";
|
||||
ReadWritePaths = [
|
||||
"/proc/self"
|
||||
"/var/lib/${name}"
|
||||
];
|
||||
|
||||
Restart = "always";
|
||||
RestartSec = 60;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
{ lib, ... }:
|
||||
let
|
||||
module = lib.modules.importApply ./default.nix { };
|
||||
in
|
||||
{
|
||||
clan.modules = {
|
||||
dyndns = module;
|
||||
};
|
||||
|
||||
perSystem =
|
||||
{ ... }:
|
||||
{
|
||||
clan.nixosTests.dyndns = {
|
||||
imports = [ ./tests/vm/default.nix ];
|
||||
|
||||
clan.modules."@clan/dyndns" = module;
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,50 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
settings,
|
||||
...
|
||||
}:
|
||||
{
|
||||
security.acme.acceptTerms = true;
|
||||
security.acme.defaults.email = settings.server.acmeEmail;
|
||||
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
443
|
||||
80
|
||||
];
|
||||
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
|
||||
statusPage = lib.mkDefault true;
|
||||
recommendedBrotliSettings = lib.mkDefault true;
|
||||
recommendedGzipSettings = lib.mkDefault true;
|
||||
recommendedOptimisation = lib.mkDefault true;
|
||||
recommendedProxySettings = lib.mkDefault true;
|
||||
recommendedTlsSettings = lib.mkDefault true;
|
||||
|
||||
# Nginx sends all the access logs to /var/log/nginx/access.log by default.
|
||||
# instead of going to the journal!
|
||||
commonHttpConfig = "access_log syslog:server=unix:/dev/log;";
|
||||
|
||||
resolver.addresses =
|
||||
let
|
||||
isIPv6 = addr: builtins.match ".*:.*:.*" addr != null;
|
||||
escapeIPv6 = addr: if isIPv6 addr then "[${addr}]" else addr;
|
||||
cloudflare = [
|
||||
"1.1.1.1"
|
||||
"2606:4700:4700::1111"
|
||||
];
|
||||
resolvers =
|
||||
if config.networking.nameservers == [ ] then cloudflare else config.networking.nameservers;
|
||||
in
|
||||
map escapeIPv6 resolvers;
|
||||
|
||||
sslDhparam = config.security.dhparams.params.nginx.path;
|
||||
};
|
||||
|
||||
security.dhparams = {
|
||||
enable = true;
|
||||
params.nginx = { };
|
||||
};
|
||||
}
|
||||
@@ -1,77 +0,0 @@
|
||||
{
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
name = "service-dyndns";
|
||||
|
||||
clan = {
|
||||
directory = ./.;
|
||||
inventory = {
|
||||
machines.server = { };
|
||||
|
||||
instances = {
|
||||
dyndns-test = {
|
||||
module.name = "@clan/dyndns";
|
||||
module.input = "self";
|
||||
roles.default.machines."server".settings = {
|
||||
server = {
|
||||
enable = true;
|
||||
domain = "test.example.com";
|
||||
port = 54805;
|
||||
acmeEmail = "test@example.com";
|
||||
};
|
||||
period = 1;
|
||||
settings = {
|
||||
"test.example.com" = {
|
||||
provider = "namecheap";
|
||||
domain = "example.com";
|
||||
secret_field_name = "password";
|
||||
extraSettings = {
|
||||
host = "test";
|
||||
server = "dynamicdns.park-your-domain.com";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
nodes = {
|
||||
server = {
|
||||
# Disable firewall for testing
|
||||
networking.firewall.enable = false;
|
||||
|
||||
# Mock ACME for testing (avoid real certificate requests)
|
||||
security.acme.defaults.server = "https://localhost:14000/dir";
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
# Test that dyndns service starts (will fail without secrets, but that's expected)
|
||||
server.wait_for_unit("multi-user.target")
|
||||
|
||||
# Test that nginx service is running
|
||||
server.wait_for_unit("nginx.service")
|
||||
|
||||
# Test that nginx is listening on expected ports
|
||||
server.wait_for_open_port(80)
|
||||
server.wait_for_open_port(443)
|
||||
|
||||
# Test that the dyndns user was created
|
||||
# server.succeed("getent passwd dyndns")
|
||||
# server.succeed("getent group dyndns")
|
||||
#
|
||||
# Test that the home directory was created
|
||||
server.succeed("test -d /var/lib/dyndns")
|
||||
|
||||
# Test that nginx configuration includes our domain
|
||||
server.succeed("${pkgs.nginx}/bin/nginx -t")
|
||||
|
||||
print("All tests passed!")
|
||||
'';
|
||||
}
|
||||
@@ -1,9 +1,3 @@
|
||||
# Example clan service. See https://docs.clan.lol/guides/services/community/
|
||||
# for more details
|
||||
|
||||
# The test for this module in ./tests/vm/default.nix shows an example of how
|
||||
# the service is used.
|
||||
|
||||
{ packages }:
|
||||
{ ... }:
|
||||
{
|
||||
@@ -11,94 +5,30 @@
|
||||
manifest.name = "clan-core/hello-word";
|
||||
manifest.description = "This is a test";
|
||||
|
||||
# This service provides two roles: "morning" and "evening". Roles can be
|
||||
# defined in this file directly (e.g. the "morning" role) or split up into a
|
||||
# separate file (e.g. the "evening" role)
|
||||
roles.morning = {
|
||||
roles.peer = {
|
||||
interface =
|
||||
{ lib, ... }:
|
||||
{
|
||||
# Here we define the settings for this role. They will be accessible
|
||||
# via `roles.morning.settings` in the role
|
||||
|
||||
options.greeting = lib.mkOption {
|
||||
options.foo = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "Good morning";
|
||||
description = "The greeting to use";
|
||||
};
|
||||
};
|
||||
# Maps over all instances and produces one result per instance.
|
||||
perInstance =
|
||||
{
|
||||
# Role settings for this machine/instance
|
||||
settings,
|
||||
|
||||
# The name of this instance of the service
|
||||
instanceName,
|
||||
|
||||
# The current machine
|
||||
machine,
|
||||
|
||||
# All roles of this service, with their assigned machines
|
||||
roles,
|
||||
...
|
||||
}:
|
||||
{
|
||||
# Analog to 'perSystem' of flake-parts.
|
||||
# For every instance of this service we will add a nixosModule to a morning-machine
|
||||
nixosModule =
|
||||
{ config, ... }:
|
||||
{
|
||||
# Interaction examples what you could do here:
|
||||
# - Get some settings of this machine
|
||||
# settings.ipRanges
|
||||
#
|
||||
# - Get all evening names:
|
||||
# allEveningNames = lib.attrNames roles.evening.machines
|
||||
#
|
||||
# - Get all roles of the machine:
|
||||
# machine.roles
|
||||
#
|
||||
# - Get the settings that where applied to a specific evening machine:
|
||||
# roles.evening.machines.peer1.settings
|
||||
imports = [ ];
|
||||
environment.etc.hello.text = "${settings.greeting} World!";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# The impnlementation of the evening role is in a separate file. We have kept
|
||||
# the interface here, so we can see all settings of the service in one place,
|
||||
# but you can also move it to the respective file
|
||||
roles.evening = {
|
||||
interface =
|
||||
{ lib, ... }:
|
||||
{
|
||||
options.greeting = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "Good evening";
|
||||
description = "The greeting to use";
|
||||
# default = "";
|
||||
description = "Some option";
|
||||
};
|
||||
};
|
||||
};
|
||||
imports = [ ./evening.nix ];
|
||||
|
||||
# This part gets applied to all machines, regardless of their role.
|
||||
perMachine =
|
||||
{ machine, ... }:
|
||||
{
|
||||
nixosModule =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = [
|
||||
(pkgs.writeShellScriptBin "greet-world" ''
|
||||
#!${pkgs.bash}/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
cat /etc/hello
|
||||
echo " I'm ${machine.name}"
|
||||
'')
|
||||
];
|
||||
nixosModule = {
|
||||
clan.core.vars.generators.hello = {
|
||||
files.hello = {
|
||||
secret = false;
|
||||
};
|
||||
script = ''
|
||||
echo "Hello world from ${machine.name}" > $out/hello
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
{
|
||||
roles.evening.perInstance =
|
||||
{ settings, ... }:
|
||||
{
|
||||
nixosModule =
|
||||
{ ... }:
|
||||
{
|
||||
imports = [ ];
|
||||
environment.etc.hello.text = "${settings.greeting} World!";
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -27,10 +27,20 @@ let
|
||||
module.name = "hello-world";
|
||||
module.input = "self";
|
||||
|
||||
roles.evening.machines.jon = { };
|
||||
roles.peer.machines.jon = { };
|
||||
};
|
||||
};
|
||||
};
|
||||
# NOTE:
|
||||
# If you wonder why 'self-zerotier-redux':
|
||||
# A local module has prefix 'self', otherwise it is the name of the 'input'
|
||||
# The rest is the name of the service as in the instance 'module.name';
|
||||
#
|
||||
# -> ${module.input}-${module.name}
|
||||
# In this case it is 'self-zerotier-redux'
|
||||
# This is usually only used internally, but we can use it to test the evaluation of service module in isolation
|
||||
# evaluatedService =
|
||||
# testFlake.clanInternals.inventoryClass.distributedServices.importedModulesEvaluated.self-zerotier-redux.config;
|
||||
in
|
||||
{
|
||||
test_simple = {
|
||||
|
||||
@@ -5,35 +5,22 @@
|
||||
directory = ./.;
|
||||
inventory = {
|
||||
machines.peer1 = { };
|
||||
machines.peer2 = { };
|
||||
|
||||
instances."test" = {
|
||||
module.name = "hello-service";
|
||||
module.input = "self";
|
||||
|
||||
# Assign the roles to the two machines
|
||||
roles.morning.machines.peer1 = { };
|
||||
|
||||
roles.evening.machines.peer2 = {
|
||||
# Set roles settings for the peers, where we want to differ from
|
||||
# the role defaults
|
||||
settings = {
|
||||
greeting = "Good night";
|
||||
};
|
||||
};
|
||||
roles.peer.machines.peer1 = { };
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript =
|
||||
{ ... }:
|
||||
{ nodes, ... }:
|
||||
''
|
||||
start_all()
|
||||
|
||||
value = peer1.succeed("greet-world")
|
||||
assert value.strip() == "Good morning World! I'm peer1", value
|
||||
|
||||
value = peer2.succeed("greet-world")
|
||||
assert value.strip() == "Good night World! I'm peer2", value
|
||||
# peer1 should have the 'hello' file
|
||||
value = peer1.succeed("cat ${nodes.peer1.clan.core.vars.generators.hello.files.hello.path}")
|
||||
assert value.strip() == "Hello world from peer1", value
|
||||
'';
|
||||
}
|
||||
|
||||
@@ -1,47 +0,0 @@
|
||||
{ ... }:
|
||||
{
|
||||
_class = "clan.service";
|
||||
manifest.name = "clan-core/internet";
|
||||
manifest.description = "direct access (or via ssh jumphost) to machines";
|
||||
manifest.categories = [
|
||||
"System"
|
||||
"Network"
|
||||
];
|
||||
roles.default = {
|
||||
interface =
|
||||
{ lib, ... }:
|
||||
{
|
||||
options = {
|
||||
host = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = ''
|
||||
ip address or hostname (domain) of the machine
|
||||
'';
|
||||
};
|
||||
jumphosts = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
default = [ ];
|
||||
description = ''
|
||||
optional list of jumphosts to use to connect to the machine
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
perInstance =
|
||||
{
|
||||
roles,
|
||||
lib,
|
||||
settings,
|
||||
...
|
||||
}:
|
||||
{
|
||||
exports.networking = {
|
||||
# TODO add user space network support to clan-cli
|
||||
peers = lib.mapAttrs (_name: machine: {
|
||||
host.plain = machine.settings.host;
|
||||
SSHOptions = map (_x: "-J x") machine.settings.jumphosts;
|
||||
}) roles.default.machines;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
{ lib, ... }:
|
||||
let
|
||||
module = lib.modules.importApply ./default.nix { };
|
||||
in
|
||||
{
|
||||
clan.modules = {
|
||||
internet = module;
|
||||
};
|
||||
}
|
||||
@@ -1,35 +0,0 @@
|
||||
## Features
|
||||
|
||||
- Creates incremental snapshots using rsnapshot
|
||||
- Supports multiple backup targets
|
||||
- Mount/unmount hooks for external storage
|
||||
- Pre/post backup hooks for custom scripts
|
||||
- Configurable snapshot retention
|
||||
- Automatic state folder detection
|
||||
|
||||
## Usage
|
||||
|
||||
Enable the localbackup service and configure backup targets:
|
||||
|
||||
```nix
|
||||
instances = {
|
||||
localbackup = {
|
||||
module.name = "@clan/localbackup";
|
||||
module.input = "self";
|
||||
roles.default.machines."machine".settings = {
|
||||
targets.external= {
|
||||
directory = "/mnt/backup";
|
||||
mountpoint = "/mnt/backup";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
```
|
||||
|
||||
## Commands
|
||||
|
||||
The service provides these commands:
|
||||
|
||||
- `localbackup-create`: Create a new backup
|
||||
- `localbackup-list`: List available backups
|
||||
- `localbackup-restore`: Restore from backup (requires NAME and FOLDERS environment variables)
|
||||
@@ -1,267 +0,0 @@
|
||||
{ ... }:
|
||||
{
|
||||
_class = "clan.service";
|
||||
manifest.name = "localbackup";
|
||||
manifest.description = "Automatically backups current machine to local directory.";
|
||||
manifest.categories = [ "System" ];
|
||||
manifest.readme = builtins.readFile ./README.md;
|
||||
|
||||
roles.default = {
|
||||
interface =
|
||||
{ lib, ... }:
|
||||
{
|
||||
|
||||
options = {
|
||||
|
||||
targets = lib.mkOption {
|
||||
type = lib.types.attrsOf (
|
||||
lib.types.submodule (
|
||||
{ name, ... }:
|
||||
{
|
||||
options = {
|
||||
name = lib.mkOption {
|
||||
type = lib.types.strMatching "^[a-zA-Z0-9._-]+$";
|
||||
default = name;
|
||||
description = "the name of the backup job";
|
||||
};
|
||||
directory = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "the directory to backup";
|
||||
};
|
||||
mountpoint = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
default = null;
|
||||
description = "mountpoint of the directory to backup. If set, the directory will be mounted before the backup and unmounted afterwards";
|
||||
};
|
||||
preMountHook = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.lines;
|
||||
default = null;
|
||||
description = "Shell commands to run before the directory is mounted";
|
||||
};
|
||||
postMountHook = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.lines;
|
||||
default = null;
|
||||
description = "Shell commands to run after the directory is mounted";
|
||||
};
|
||||
preUnmountHook = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.lines;
|
||||
default = null;
|
||||
description = "Shell commands to run before the directory is unmounted";
|
||||
};
|
||||
postUnmountHook = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.lines;
|
||||
default = null;
|
||||
description = "Shell commands to run after the directory is unmounted";
|
||||
};
|
||||
preBackupHook = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.lines;
|
||||
default = null;
|
||||
description = "Shell commands to run before the backup";
|
||||
};
|
||||
postBackupHook = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.lines;
|
||||
default = null;
|
||||
description = "Shell commands to run after the backup";
|
||||
};
|
||||
};
|
||||
}
|
||||
)
|
||||
);
|
||||
# default = { };
|
||||
description = "List of directories where backups are stored";
|
||||
};
|
||||
|
||||
snapshots = lib.mkOption {
|
||||
type = lib.types.int;
|
||||
default = 20;
|
||||
description = "Number of snapshots to keep";
|
||||
};
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
perInstance =
|
||||
{
|
||||
settings,
|
||||
...
|
||||
}:
|
||||
{
|
||||
nixosModule =
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
mountHook = target: ''
|
||||
if [[ -x /run/current-system/sw/bin/localbackup-mount-${target.name} ]]; then
|
||||
/run/current-system/sw/bin/localbackup-mount-${target.name}
|
||||
fi
|
||||
if [[ -x /run/current-system/sw/bin/localbackup-unmount-${target.name} ]]; then
|
||||
trap "/run/current-system/sw/bin/localbackup-unmount-${target.name}" EXIT
|
||||
fi
|
||||
'';
|
||||
|
||||
uniqueFolders = lib.unique (
|
||||
lib.flatten (lib.mapAttrsToList (_name: state: state.folders) config.clan.core.state)
|
||||
);
|
||||
|
||||
rsnapshotConfig = target: ''
|
||||
config_version 1.2
|
||||
snapshot_root ${target.directory}
|
||||
sync_first 1
|
||||
cmd_cp ${pkgs.coreutils}/bin/cp
|
||||
cmd_rm ${pkgs.coreutils}/bin/rm
|
||||
cmd_rsync ${pkgs.rsync}/bin/rsync
|
||||
cmd_ssh ${pkgs.openssh}/bin/ssh
|
||||
cmd_logger ${pkgs.inetutils}/bin/logger
|
||||
cmd_du ${pkgs.coreutils}/bin/du
|
||||
cmd_rsnapshot_diff ${pkgs.rsnapshot}/bin/rsnapshot-diff
|
||||
|
||||
${lib.optionalString (target.postBackupHook != null) ''
|
||||
cmd_postexec ${pkgs.writeShellScript "postexec.sh" ''
|
||||
set -efu -o pipefail
|
||||
${target.postBackupHook}
|
||||
''}
|
||||
''}
|
||||
retain snapshot ${builtins.toString settings.snapshots}
|
||||
${lib.concatMapStringsSep "\n" (folder: ''
|
||||
backup ${folder} ${config.networking.hostName}/
|
||||
'') uniqueFolders}
|
||||
'';
|
||||
in
|
||||
|
||||
{
|
||||
|
||||
environment.systemPackages = [
|
||||
(pkgs.writeShellScriptBin "localbackup-create" ''
|
||||
set -efu -o pipefail
|
||||
export PATH=${
|
||||
lib.makeBinPath [
|
||||
pkgs.rsnapshot
|
||||
pkgs.coreutils
|
||||
pkgs.util-linux
|
||||
]
|
||||
}
|
||||
${lib.concatMapStringsSep "\n" (target: ''
|
||||
${mountHook target}
|
||||
echo "Creating backup '${target.name}'"
|
||||
|
||||
${lib.optionalString (target.preBackupHook != null) ''
|
||||
(
|
||||
${target.preBackupHook}
|
||||
)
|
||||
''}
|
||||
|
||||
declare -A preCommandErrors
|
||||
${lib.concatMapStringsSep "\n" (
|
||||
state:
|
||||
lib.optionalString (state.preBackupCommand != null) ''
|
||||
echo "Running pre-backup command for ${state.name}"
|
||||
if ! /run/current-system/sw/bin/${state.preBackupCommand}; then
|
||||
preCommandErrors["${state.name}"]=1
|
||||
fi
|
||||
''
|
||||
) (builtins.attrValues config.clan.core.state)}
|
||||
|
||||
rsnapshot -c "${pkgs.writeText "rsnapshot.conf" (rsnapshotConfig target)}" sync
|
||||
rsnapshot -c "${pkgs.writeText "rsnapshot.conf" (rsnapshotConfig target)}" snapshot
|
||||
'') (builtins.attrValues settings.targets)}'')
|
||||
(pkgs.writeShellScriptBin "localbackup-list" ''
|
||||
set -efu -o pipefail
|
||||
export PATH=${
|
||||
lib.makeBinPath [
|
||||
pkgs.jq
|
||||
pkgs.findutils
|
||||
pkgs.coreutils
|
||||
pkgs.util-linux
|
||||
]
|
||||
}
|
||||
(${
|
||||
lib.concatMapStringsSep "\n" (target: ''
|
||||
(
|
||||
${mountHook target}
|
||||
find ${lib.escapeShellArg target.directory} -mindepth 1 -maxdepth 1 -name "snapshot.*" -print0 -type d \
|
||||
| jq -Rs 'split("\u0000") | .[] | select(. != "") | { "name": ("${target.name}::" + .)}'
|
||||
)
|
||||
'') (builtins.attrValues settings.targets)
|
||||
}) | jq -s .
|
||||
'')
|
||||
(pkgs.writeShellScriptBin "localbackup-restore" ''
|
||||
set -efu -o pipefail
|
||||
export PATH=${
|
||||
lib.makeBinPath [
|
||||
pkgs.rsync
|
||||
pkgs.coreutils
|
||||
pkgs.util-linux
|
||||
pkgs.gawk
|
||||
]
|
||||
}
|
||||
if [[ "''${NAME:-}" == "" ]]; then
|
||||
echo "No backup name given via NAME environment variable"
|
||||
exit 1
|
||||
fi
|
||||
if [[ "''${FOLDERS:-}" == "" ]]; then
|
||||
echo "No folders given via FOLDERS environment variable"
|
||||
exit 1
|
||||
fi
|
||||
name=$(awk -F'::' '{print $1}' <<< $NAME)
|
||||
backupname=''${NAME#$name::}
|
||||
|
||||
if command -v localbackup-mount-$name; then
|
||||
localbackup-mount-$name
|
||||
fi
|
||||
if command -v localbackup-unmount-$name; then
|
||||
trap "localbackup-unmount-$name" EXIT
|
||||
fi
|
||||
|
||||
if [[ ! -d $backupname ]]; then
|
||||
echo "No backup found $backupname"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
IFS=':' read -ra FOLDER <<< "''$FOLDERS"
|
||||
for folder in "''${FOLDER[@]}"; do
|
||||
mkdir -p "$folder"
|
||||
rsync -a "$backupname/${config.networking.hostName}$folder/" "$folder"
|
||||
done
|
||||
'')
|
||||
]
|
||||
++ (lib.mapAttrsToList (
|
||||
name: target:
|
||||
pkgs.writeShellScriptBin ("localbackup-mount-" + name) ''
|
||||
set -efu -o pipefail
|
||||
${lib.optionalString (target.preMountHook != null) target.preMountHook}
|
||||
${lib.optionalString (target.mountpoint != null) ''
|
||||
if ! ${pkgs.util-linux}/bin/mountpoint -q ${lib.escapeShellArg target.mountpoint}; then
|
||||
${pkgs.util-linux}/bin/mount -o X-mount.mkdir ${lib.escapeShellArg target.mountpoint}
|
||||
fi
|
||||
''}
|
||||
${lib.optionalString (target.postMountHook != null) target.postMountHook}
|
||||
''
|
||||
) settings.targets)
|
||||
++ lib.mapAttrsToList (
|
||||
name: target:
|
||||
pkgs.writeShellScriptBin ("localbackup-unmount-" + name) ''
|
||||
set -efu -o pipefail
|
||||
${lib.optionalString (target.preUnmountHook != null) target.preUnmountHook}
|
||||
${lib.optionalString (
|
||||
target.mountpoint != null
|
||||
) "${pkgs.util-linux}/bin/umount ${lib.escapeShellArg target.mountpoint}"}
|
||||
${lib.optionalString (target.postUnmountHook != null) target.postUnmountHook}
|
||||
''
|
||||
) settings.targets;
|
||||
|
||||
clan.core.backups.providers.localbackup = {
|
||||
# TODO list needs to run locally or on the remote machine
|
||||
list = "localbackup-list";
|
||||
create = "localbackup-create";
|
||||
restore = "localbackup-restore";
|
||||
};
|
||||
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
{ lib, ... }:
|
||||
let
|
||||
module = lib.modules.importApply ./default.nix { };
|
||||
in
|
||||
{
|
||||
clan.modules.localbackup = module;
|
||||
perSystem =
|
||||
{ ... }:
|
||||
{
|
||||
clan.nixosTests.localbackup = {
|
||||
imports = [ ./tests/vm/default.nix ];
|
||||
|
||||
clan.modules."@clan/localbackup" = module;
|
||||
};
|
||||
};
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user