Compare commits
1 Commits
ke-qa-nixp
...
vm-mamange
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5859eeac5a |
9
.gitea/workflows/checks.yaml
Normal file
9
.gitea/workflows/checks.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
name: checks
|
||||
on:
|
||||
pull_request:
|
||||
jobs:
|
||||
checks-impure:
|
||||
runs-on: nix
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- run: nix run .#impure-checks
|
||||
@@ -8,6 +8,6 @@ jobs:
|
||||
runs-on: nix
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- run: nix run --print-build-logs .#deploy-docs
|
||||
- run: nix run .#deploy-docs
|
||||
env:
|
||||
SSH_HOMEPAGE_KEY: ${{ secrets.SSH_HOMEPAGE_KEY }}
|
||||
|
||||
@@ -21,9 +21,5 @@ jobs:
|
||||
# Exclude private flakes and update-clan-core checks flake
|
||||
exclude-patterns: "checks/impure/flake.nix"
|
||||
auto-merge: true
|
||||
git-author-name: "clan-bot"
|
||||
git-committer-name: "clan-bot"
|
||||
git-author-email: "clan-bot@clan.lol"
|
||||
git-committer-email: "clan-bot@clan.lol"
|
||||
gitea-token: ${{ secrets.CI_BOT_TOKEN }}
|
||||
github-token: ${{ secrets.CI_BOT_GITHUB_TOKEN }}
|
||||
|
||||
2
.github/workflows/repo-sync.yml
vendored
2
.github/workflows/repo-sync.yml
vendored
@@ -10,7 +10,7 @@ jobs:
|
||||
if: github.repository_owner == 'clan-lol'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/create-github-app-token@v2
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -39,6 +39,7 @@ select
|
||||
# Generated files
|
||||
pkgs/clan-app/ui/api/API.json
|
||||
pkgs/clan-app/ui/api/API.ts
|
||||
pkgs/clan-app/ui/api/Inventory.ts
|
||||
pkgs/clan-app/ui/api/modules_schemas.json
|
||||
pkgs/clan-app/ui/api/schema.json
|
||||
pkgs/clan-app/ui/.fonts
|
||||
@@ -52,5 +53,3 @@ pkgs/clan-app/ui/.fonts
|
||||
*.gif
|
||||
*.mp4
|
||||
*.mkv
|
||||
|
||||
.jj
|
||||
|
||||
20
CODEOWNERS
20
CODEOWNERS
@@ -1,20 +0,0 @@
|
||||
clanServices/.* @pinpox @kenji
|
||||
|
||||
lib/test/container-test-driver/.* @DavHau @mic92
|
||||
lib/modules/inventory/.* @hsjobeki
|
||||
lib/modules/inventoryClass/.* @hsjobeki
|
||||
|
||||
pkgs/clan-app/ui/.* @hsjobeki @brianmcgee
|
||||
pkgs/clan-app/clan_app/.* @qubasa @hsjobeki
|
||||
|
||||
pkgs/clan-cli/clan_cli/.* @lassulus @mic92 @kenji
|
||||
pkgs/clan-cli/clan_cli/(secrets|vars)/.* @DavHau @lassulus
|
||||
|
||||
pkgs/clan-cli/clan_lib/log_machines/.* @Qubasa
|
||||
pkgs/clan-cli/clan_lib/ssh/.* @Qubasa @Mic92 @lassulus
|
||||
pkgs/clan-cli/clan_lib/tags/.* @hsjobeki
|
||||
pkgs/clan-cli/clan_lib/persist/.* @hsjobeki
|
||||
pkgs/clan-cli/clan_lib/flake/.* @lassulus
|
||||
|
||||
pkgs/clan-cli/api.py @hsjobeki
|
||||
pkgs/clan-cli/openapi.py @hsjobeki
|
||||
4
CONTRIBUTING.md
Normal file
4
CONTRIBUTING.md
Normal file
@@ -0,0 +1,4 @@
|
||||
# Contributing to Clan
|
||||
|
||||
<!-- Local file: docs/CONTRIBUTING.md -->
|
||||
Go to the Contributing guide at https://docs.clan.lol/guides/contributing/CONTRIBUTING
|
||||
@@ -8,7 +8,7 @@ Our mission is simple: to democratize computing by providing tools that empower
|
||||
|
||||
## Features of Clan
|
||||
|
||||
- **Full-Stack System Deployment:** Utilize Clan's toolkit alongside Nix's reliability to build and manage systems effortlessly.
|
||||
- **Full-Stack System Deployment:** Utilize Clan’s toolkit alongside Nix's reliability to build and manage systems effortlessly.
|
||||
- **Overlay Networks:** Secure, private communication channels between devices.
|
||||
- **Virtual Machine Integration:** Seamless operation of VM applications within the main operating system.
|
||||
- **Robust Backup Management:** Long-term, self-hosted data preservation.
|
||||
@@ -30,7 +30,7 @@ In the Clan ecosystem, security is paramount. Learn how to handle secrets effect
|
||||
|
||||
The Clan project thrives on community contributions. We welcome everyone to contribute and collaborate:
|
||||
|
||||
- **Contribution Guidelines**: Make a meaningful impact by following the steps in [contributing](https://docs.clan.lol/guides/contributing/CONTRIBUTING/)<!-- [contributing.md](docs/CONTRIBUTING.md) -->.
|
||||
- **Contribution Guidelines**: Make a meaningful impact by following the steps in [contributing](https://docs.clan.lol/contributing/contributing/)<!-- [contributing.md](docs/CONTRIBUTING.md) -->.
|
||||
|
||||
## Join the revolution
|
||||
|
||||
|
||||
51
checks/borgbackup-legacy/default.nix
Normal file
51
checks/borgbackup-legacy/default.nix
Normal file
@@ -0,0 +1,51 @@
|
||||
(
|
||||
{ ... }:
|
||||
{
|
||||
name = "borgbackup";
|
||||
|
||||
nodes.machine =
|
||||
{ self, pkgs, ... }:
|
||||
{
|
||||
imports = [
|
||||
self.clanModules.borgbackup
|
||||
self.nixosModules.clanCore
|
||||
{
|
||||
services.openssh.enable = true;
|
||||
services.borgbackup.repos.testrepo = {
|
||||
authorizedKeys = [ (builtins.readFile ../assets/ssh/pubkey) ];
|
||||
};
|
||||
}
|
||||
{
|
||||
clan.core.settings.directory = ./.;
|
||||
clan.core.state.testState.folders = [ "/etc/state" ];
|
||||
environment.etc.state.text = "hello world";
|
||||
systemd.tmpfiles.settings."vmsecrets" = {
|
||||
"/etc/secrets/borgbackup/borgbackup.ssh" = {
|
||||
C.argument = "${../assets/ssh/privkey}";
|
||||
z = {
|
||||
mode = "0400";
|
||||
user = "root";
|
||||
};
|
||||
};
|
||||
"/etc/secrets/borgbackup/borgbackup.repokey" = {
|
||||
C.argument = builtins.toString (pkgs.writeText "repokey" "repokey12345");
|
||||
z = {
|
||||
mode = "0400";
|
||||
user = "root";
|
||||
};
|
||||
};
|
||||
};
|
||||
# clan.core.facts.secretStore = "vm";
|
||||
clan.core.vars.settings.secretStore = "vm";
|
||||
|
||||
clan.borgbackup.destinations.test.repo = "borg@localhost:.";
|
||||
}
|
||||
];
|
||||
};
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine.systemctl("start --wait borgbackup-job-test.service")
|
||||
assert "machine-test" in machine.succeed("BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK=yes /run/current-system/sw/bin/borg-job-test list")
|
||||
'';
|
||||
}
|
||||
)
|
||||
6
checks/clan-core-for-checks.nix
Normal file
6
checks/clan-core-for-checks.nix
Normal file
@@ -0,0 +1,6 @@
|
||||
{ fetchgit }:
|
||||
fetchgit {
|
||||
url = "https://git.clan.lol/clan/clan-core.git";
|
||||
rev = "5d884cecc2585a29b6a3596681839d081b4de192";
|
||||
sha256 = "09is1afmncamavb2q88qac37vmsijxzsy1iz1vr6gsyjq2rixaxc";
|
||||
}
|
||||
@@ -12,6 +12,7 @@ let
|
||||
elem
|
||||
filter
|
||||
filterAttrs
|
||||
flip
|
||||
genAttrs
|
||||
hasPrefix
|
||||
pathExists
|
||||
@@ -35,6 +36,7 @@ in
|
||||
++ filter pathExists [
|
||||
./devshell/flake-module.nix
|
||||
./flash/flake-module.nix
|
||||
./impure/flake-module.nix
|
||||
./installation/flake-module.nix
|
||||
./update/flake-module.nix
|
||||
./morph/flake-module.nix
|
||||
@@ -44,7 +46,7 @@ in
|
||||
flake.check = genAttrs [ "x86_64-linux" "aarch64-darwin" ] (
|
||||
system:
|
||||
let
|
||||
checks = filterAttrs (
|
||||
checks = flip filterAttrs self.checks.${system} (
|
||||
name: _check:
|
||||
!(hasPrefix "nixos-test-" name)
|
||||
&& !(hasPrefix "nixos-" name)
|
||||
@@ -56,7 +58,7 @@ in
|
||||
"clan-core-for-checks"
|
||||
"clan-deps"
|
||||
])
|
||||
) self.checks.${system};
|
||||
);
|
||||
in
|
||||
inputs.nixpkgs.legacyPackages.${system}.runCommand "fast-flake-checks-${system}"
|
||||
{ passthru.checks = checks; }
|
||||
@@ -91,10 +93,13 @@ in
|
||||
|
||||
# Base Tests
|
||||
nixos-test-secrets = self.clanLib.test.baseTest ./secrets nixosTestArgs;
|
||||
nixos-test-borgbackup-legacy = self.clanLib.test.baseTest ./borgbackup-legacy nixosTestArgs;
|
||||
nixos-test-wayland-proxy-virtwl = self.clanLib.test.baseTest ./wayland-proxy-virtwl nixosTestArgs;
|
||||
|
||||
# Container Tests
|
||||
nixos-test-container = self.clanLib.test.containerTest ./container nixosTestArgs;
|
||||
nixos-test-zt-tcp-relay = self.clanLib.test.containerTest ./zt-tcp-relay nixosTestArgs;
|
||||
nixos-test-matrix-synapse = self.clanLib.test.containerTest ./matrix-synapse nixosTestArgs;
|
||||
nixos-test-user-firewall-iptables = self.clanLib.test.containerTest ./user-firewall/iptables.nix nixosTestArgs;
|
||||
nixos-test-user-firewall-nftables = self.clanLib.test.containerTest ./user-firewall/nftables.nix nixosTestArgs;
|
||||
|
||||
@@ -109,8 +114,6 @@ in
|
||||
"dont-depend-on-repo-root"
|
||||
];
|
||||
|
||||
# Temporary workaround: Filter out docs package and devshell for aarch64-darwin due to CI builder hangs
|
||||
# TODO: Remove this filter once macOS CI builder is updated
|
||||
flakeOutputs =
|
||||
lib.mapAttrs' (
|
||||
name: config: lib.nameValuePair "nixos-${name}" config.config.system.build.toplevel
|
||||
@@ -118,18 +121,8 @@ in
|
||||
// lib.mapAttrs' (
|
||||
name: config: lib.nameValuePair "darwin-${name}" config.config.system.build.toplevel
|
||||
) (self.darwinConfigurations or { })
|
||||
// lib.mapAttrs' (n: lib.nameValuePair "package-${n}") (
|
||||
if system == "aarch64-darwin" then
|
||||
lib.filterAttrs (n: _: n != "docs" && n != "deploy-docs" && n != "docs-options") packagesToBuild
|
||||
else
|
||||
packagesToBuild
|
||||
)
|
||||
// lib.mapAttrs' (n: lib.nameValuePair "devShell-${n}") (
|
||||
if system == "aarch64-darwin" then
|
||||
lib.filterAttrs (n: _: n != "docs") self'.devShells
|
||||
else
|
||||
self'.devShells
|
||||
)
|
||||
// lib.mapAttrs' (n: lib.nameValuePair "package-${n}") packagesToBuild
|
||||
// lib.mapAttrs' (n: lib.nameValuePair "devShell-${n}") self'.devShells
|
||||
// lib.mapAttrs' (name: config: lib.nameValuePair "home-manager-${name}" config.activation-script) (
|
||||
self'.legacyPackages.homeConfigurations or { }
|
||||
);
|
||||
@@ -137,6 +130,33 @@ in
|
||||
nixosTests
|
||||
// flakeOutputs
|
||||
// {
|
||||
# TODO: Automatically provide this check to downstream users to check their modules
|
||||
clan-modules-json-compatible =
|
||||
let
|
||||
allSchemas = lib.mapAttrs (
|
||||
_n: m:
|
||||
let
|
||||
schema =
|
||||
(self.clanLib.evalService {
|
||||
modules = [ m ];
|
||||
prefix = [
|
||||
"checks"
|
||||
system
|
||||
];
|
||||
}).config.result.api.schema;
|
||||
in
|
||||
schema
|
||||
) self.clan.modules;
|
||||
in
|
||||
pkgs.runCommand "combined-result"
|
||||
{
|
||||
schemaFile = builtins.toFile "schemas.json" (builtins.toJSON allSchemas);
|
||||
}
|
||||
''
|
||||
mkdir -p $out
|
||||
cat $schemaFile > $out/allSchemas.json
|
||||
'';
|
||||
|
||||
clan-core-for-checks = pkgs.runCommand "clan-core-for-checks" { } ''
|
||||
cp -r ${privateInputs.clan-core-for-checks} $out
|
||||
chmod -R +w $out
|
||||
|
||||
@@ -13,6 +13,8 @@
|
||||
fileSystems."/".device = lib.mkDefault "/dev/vda";
|
||||
boot.loader.grub.device = lib.mkDefault "/dev/vda";
|
||||
|
||||
# We need to use `mkForce` because we inherit from `test-install-machine`
|
||||
# which currently hardcodes `nixpkgs.hostPlatform`
|
||||
nixpkgs.hostPlatform = lib.mkForce system;
|
||||
|
||||
imports = [ self.nixosModules.test-flash-machine ];
|
||||
@@ -26,24 +28,10 @@
|
||||
{
|
||||
imports = [ self.nixosModules.test-install-machine-without-system ];
|
||||
|
||||
# We don't want our system to define any `vars` generators as these can't
|
||||
# be generated as the flake is inside `/nix/store`.
|
||||
clan.core.settings.state-version.enable = false;
|
||||
clan.core.vars.generators.test = lib.mkForce { };
|
||||
|
||||
disko.devices.disk.main.preCreateHook = lib.mkForce "";
|
||||
|
||||
# Every option here should match the options set through `clan flash write`
|
||||
# if you get a mass rebuild on the disko derivation, this means you need to
|
||||
# adjust something here. Also make sure that the injected json in clan flash write
|
||||
# is up to date.
|
||||
i18n.defaultLocale = "de_DE.UTF-8";
|
||||
console.keyMap = "de";
|
||||
services.xserver.xkb.layout = "de";
|
||||
users.users.root.openssh.authorizedKeys.keys = [
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIRWUusawhlIorx7VFeQJHmMkhl9X3QpnvOdhnV/bQNG root@target\n"
|
||||
];
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
perSystem =
|
||||
@@ -56,11 +44,8 @@
|
||||
dependencies = [
|
||||
pkgs.disko
|
||||
pkgs.buildPackages.xorg.lndir
|
||||
pkgs.glibcLocales
|
||||
pkgs.kbd.out
|
||||
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".pkgs.perlPackages.ConfigIniFiles
|
||||
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".pkgs.perlPackages.FileSlurp
|
||||
pkgs.bubblewrap
|
||||
|
||||
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.toplevel
|
||||
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.diskoScript
|
||||
@@ -70,8 +55,7 @@
|
||||
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
|
||||
in
|
||||
{
|
||||
# Skip flash test on aarch64-linux for now as it's too slow
|
||||
checks = lib.optionalAttrs (pkgs.stdenv.isLinux && pkgs.hostPlatform.system != "aarch64-linux") {
|
||||
checks = pkgs.lib.mkIf pkgs.stdenv.isLinux {
|
||||
nixos-test-flash = self.clanLib.test.baseTest {
|
||||
name = "flash";
|
||||
nodes.target = {
|
||||
@@ -88,7 +72,7 @@
|
||||
substituters = lib.mkForce [ ];
|
||||
hashed-mirrors = null;
|
||||
connect-timeout = lib.mkForce 3;
|
||||
flake-registry = "";
|
||||
flake-registry = pkgs.writeText "flake-registry" ''{"flakes":[],"version":2}'';
|
||||
experimental-features = [
|
||||
"nix-command"
|
||||
"flakes"
|
||||
@@ -97,10 +81,10 @@
|
||||
};
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine.succeed("echo 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIRWUusawhlIorx7VFeQJHmMkhl9X3QpnvOdhnV/bQNG root@target' > ./test_id_ed25519.pub")
|
||||
|
||||
# Some distros like to automount disks with spaces
|
||||
machine.succeed('mkdir -p "/mnt/with spaces" && mkfs.ext4 /dev/vdc && mount /dev/vdc "/mnt/with spaces"')
|
||||
machine.succeed("clan flash write --ssh-pubkey ./test_id_ed25519.pub --keymap de --language de_DE.UTF-8 --debug --flake ${self.checks.x86_64-linux.clan-core-for-checks} --yes --disk main /dev/vdc test-flash-machine-${pkgs.hostPlatform.system}")
|
||||
machine.succeed("clan flash write --debug --flake ${self.checks.x86_64-linux.clan-core-for-checks} --yes --disk main /dev/vdc test-flash-machine-${pkgs.hostPlatform.system}")
|
||||
'';
|
||||
} { inherit pkgs self; };
|
||||
};
|
||||
|
||||
51
checks/impure/flake-module.nix
Normal file
51
checks/impure/flake-module.nix
Normal file
@@ -0,0 +1,51 @@
|
||||
{
|
||||
perSystem =
|
||||
{
|
||||
pkgs,
|
||||
lib,
|
||||
self',
|
||||
...
|
||||
}:
|
||||
{
|
||||
# a script that executes all other checks
|
||||
packages.impure-checks = pkgs.writeShellScriptBin "impure-checks" ''
|
||||
#!${pkgs.bash}/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
unset CLAN_DIR
|
||||
|
||||
export PATH="${
|
||||
lib.makeBinPath (
|
||||
[
|
||||
pkgs.gitMinimal
|
||||
pkgs.nix
|
||||
pkgs.coreutils
|
||||
pkgs.rsync # needed to have rsync installed on the dummy ssh server
|
||||
]
|
||||
++ self'.packages.clan-cli-full.runtimeDependencies
|
||||
)
|
||||
}"
|
||||
ROOT=$(git rev-parse --show-toplevel)
|
||||
cd "$ROOT/pkgs/clan-cli"
|
||||
|
||||
# Set up custom git configuration for tests
|
||||
export GIT_CONFIG_GLOBAL=$(mktemp)
|
||||
git config --file "$GIT_CONFIG_GLOBAL" user.name "Test User"
|
||||
git config --file "$GIT_CONFIG_GLOBAL" user.email "test@example.com"
|
||||
export GIT_CONFIG_SYSTEM=/dev/null
|
||||
|
||||
# this disables dynamic dependency loading in clan-cli
|
||||
export CLAN_NO_DYNAMIC_DEPS=1
|
||||
|
||||
jobs=$(nproc)
|
||||
# Spawning worker in pytest is relatively slow, so we limit the number of jobs to 13
|
||||
# (current number of impure tests)
|
||||
jobs="$((jobs > 13 ? 13 : jobs))"
|
||||
|
||||
nix develop "$ROOT#clan-cli" -c bash -c "TMPDIR=/tmp python -m pytest -n $jobs -m impure ./clan_cli $@"
|
||||
|
||||
# Clean up temporary git config
|
||||
rm -f "$GIT_CONFIG_GLOBAL"
|
||||
'';
|
||||
};
|
||||
}
|
||||
@@ -1,8 +1,8 @@
|
||||
{
|
||||
config,
|
||||
self,
|
||||
lib,
|
||||
privateInputs,
|
||||
|
||||
...
|
||||
}:
|
||||
{
|
||||
@@ -14,38 +14,31 @@
|
||||
# you can get a new one by adding
|
||||
# client.fail("cat test-flake/machines/test-install-machine/facter.json >&2")
|
||||
# to the installation test.
|
||||
clan.machines = {
|
||||
test-install-machine-without-system = {
|
||||
clan.machines.test-install-machine-without-system = {
|
||||
fileSystems."/".device = lib.mkDefault "/dev/vda";
|
||||
boot.loader.grub.device = lib.mkDefault "/dev/vda";
|
||||
|
||||
imports = [
|
||||
self.nixosModules.test-install-machine-without-system
|
||||
];
|
||||
imports = [ self.nixosModules.test-install-machine-without-system ];
|
||||
};
|
||||
}
|
||||
// (lib.listToAttrs (
|
||||
lib.map (
|
||||
system:
|
||||
lib.nameValuePair "test-install-machine-${system}" {
|
||||
imports = [
|
||||
self.nixosModules.test-install-machine-without-system
|
||||
(
|
||||
if privateInputs ? test-fixtures then
|
||||
clan.machines.test-install-machine-with-system =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
facter.reportPath = privateInputs.test-fixtures + /nixos-vm-facter-json/${system}.json;
|
||||
# https://git.clan.lol/clan/test-fixtures
|
||||
facter.reportPath = builtins.fetchurl {
|
||||
url = "https://git.clan.lol/clan/test-fixtures/raw/commit/4a2bc56d886578124b05060d3fb7eddc38c019f8/nixos-vm-facter-json/${pkgs.hostPlatform.system}.json";
|
||||
sha256 =
|
||||
{
|
||||
aarch64-linux = "sha256:1rlfymk03rmfkm2qgrc8l5kj5i20srx79n1y1h4nzlpwaz0j7hh2";
|
||||
x86_64-linux = "sha256:16myh0ll2gdwsiwkjw5ba4dl23ppwbsanxx214863j7nvzx42pws";
|
||||
}
|
||||
else
|
||||
{ nixpkgs.hostPlatform = system; }
|
||||
)
|
||||
];
|
||||
.${pkgs.hostPlatform.system};
|
||||
};
|
||||
|
||||
fileSystems."/".device = lib.mkDefault "/dev/vda";
|
||||
boot.loader.grub.device = lib.mkDefault "/dev/vda";
|
||||
}
|
||||
) (lib.filter (lib.hasSuffix "linux") config.systems)
|
||||
));
|
||||
|
||||
imports = [ self.nixosModules.test-install-machine-without-system ];
|
||||
};
|
||||
flake.nixosModules = {
|
||||
test-install-machine-without-system =
|
||||
{ lib, modulesPath, ... }:
|
||||
@@ -160,9 +153,9 @@
|
||||
closureInfo = pkgs.closureInfo {
|
||||
rootPaths = [
|
||||
privateInputs.clan-core-for-checks
|
||||
self.nixosConfigurations."test-install-machine-${pkgs.hostPlatform.system}".config.system.build.toplevel
|
||||
self.nixosConfigurations."test-install-machine-${pkgs.hostPlatform.system}".config.system.build.initialRamdisk
|
||||
self.nixosConfigurations."test-install-machine-${pkgs.hostPlatform.system}".config.system.build.diskoScript
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.toplevel
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.initialRamdisk
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.diskoScript
|
||||
pkgs.stdenv.drvPath
|
||||
pkgs.bash.drvPath
|
||||
pkgs.buildPackages.xorg.lndir
|
||||
@@ -215,7 +208,7 @@
|
||||
# Prepare test flake and Nix store
|
||||
flake_dir = prepare_test_flake(
|
||||
temp_dir,
|
||||
"${self.checks.${pkgs.hostPlatform.system}.clan-core-for-checks}",
|
||||
"${self.checks.x86_64-linux.clan-core-for-checks}",
|
||||
"${closureInfo}"
|
||||
)
|
||||
|
||||
@@ -226,22 +219,6 @@
|
||||
"${../assets/ssh/privkey}"
|
||||
)
|
||||
|
||||
# Run clan install from host using port forwarding
|
||||
clan_cmd = [
|
||||
"${self.packages.${pkgs.system}.clan-cli-full}/bin/clan",
|
||||
"machines",
|
||||
"init-hardware-config",
|
||||
"--debug",
|
||||
"--flake", str(flake_dir),
|
||||
"--yes", "test-install-machine-without-system",
|
||||
"--host-key-check", "none",
|
||||
"--target-host", f"nonrootuser@localhost:{ssh_conn.host_port}",
|
||||
"-i", ssh_conn.ssh_key,
|
||||
"--option", "store", os.environ['CLAN_TEST_STORE']
|
||||
]
|
||||
subprocess.run(clan_cmd, check=True)
|
||||
|
||||
|
||||
# Run clan install from host using port forwarding
|
||||
clan_cmd = [
|
||||
"${self.packages.${pkgs.system}.clan-cli-full}/bin/clan",
|
||||
@@ -255,7 +232,6 @@
|
||||
"-i", ssh_conn.ssh_key,
|
||||
"--option", "store", os.environ['CLAN_TEST_STORE'],
|
||||
"--update-hardware-config", "nixos-facter",
|
||||
"--no-persist-state",
|
||||
]
|
||||
|
||||
subprocess.run(clan_cmd, check=True)
|
||||
@@ -265,7 +241,7 @@
|
||||
target.shutdown()
|
||||
except BrokenPipeError:
|
||||
# qemu has already exited
|
||||
target.connected = False
|
||||
pass
|
||||
|
||||
# Create a new machine instance that boots from the installed system
|
||||
installed_machine = create_test_machine(target, "${pkgs.qemu_test}", name="after_install")
|
||||
@@ -296,7 +272,7 @@
|
||||
# Prepare test flake and Nix store
|
||||
flake_dir = prepare_test_flake(
|
||||
temp_dir,
|
||||
"${self.checks.${pkgs.hostPlatform.system}.clan-core-for-checks}",
|
||||
"${self.checks.x86_64-linux.clan-core-for-checks}",
|
||||
"${closureInfo}"
|
||||
)
|
||||
|
||||
@@ -325,8 +301,7 @@
|
||||
"test-install-machine-without-system",
|
||||
"-i", ssh_conn.ssh_key,
|
||||
"--option", "store", os.environ['CLAN_TEST_STORE'],
|
||||
"--target-host", f"nonrootuser@localhost:{ssh_conn.host_port}",
|
||||
"--yes"
|
||||
f"nonrootuser@localhost:{ssh_conn.host_port}"
|
||||
]
|
||||
|
||||
result = subprocess.run(clan_cmd, capture_output=True, cwd=flake_dir)
|
||||
@@ -350,9 +325,7 @@
|
||||
"test-install-machine-without-system",
|
||||
"-i", ssh_conn.ssh_key,
|
||||
"--option", "store", os.environ['CLAN_TEST_STORE'],
|
||||
"--target-host",
|
||||
f"nonrootuser@localhost:{ssh_conn.host_port}",
|
||||
"--yes"
|
||||
f"nonrootuser@localhost:{ssh_conn.host_port}"
|
||||
]
|
||||
|
||||
result = subprocess.run(clan_cmd, capture_output=True, cwd=flake_dir)
|
||||
|
||||
@@ -147,11 +147,28 @@ let
|
||||
];
|
||||
doCheck = false;
|
||||
};
|
||||
|
||||
# Common closure info
|
||||
closureInfo = pkgs.closureInfo {
|
||||
rootPaths = [
|
||||
self.checks.x86_64-linux.clan-core-for-checks
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.toplevel
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.initialRamdisk
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.diskoScript
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.clan.deployment.file
|
||||
pkgs.stdenv.drvPath
|
||||
pkgs.bash.drvPath
|
||||
pkgs.buildPackages.xorg.lndir
|
||||
]
|
||||
++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||
};
|
||||
|
||||
in
|
||||
{
|
||||
inherit
|
||||
target
|
||||
baseTestMachine
|
||||
nixosTestLib
|
||||
closureInfo
|
||||
;
|
||||
}
|
||||
|
||||
83
checks/matrix-synapse/default.nix
Normal file
83
checks/matrix-synapse/default.nix
Normal file
@@ -0,0 +1,83 @@
|
||||
(
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "matrix-synapse";
|
||||
|
||||
nodes.machine =
|
||||
{
|
||||
config,
|
||||
self,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
self.clanModules.matrix-synapse
|
||||
self.nixosModules.clanCore
|
||||
{
|
||||
clan.core.settings.directory = ./.;
|
||||
|
||||
services.nginx.virtualHosts."matrix.clan.test" = {
|
||||
enableACME = lib.mkForce false;
|
||||
forceSSL = lib.mkForce false;
|
||||
};
|
||||
clan.nginx.acme.email = "admins@clan.lol";
|
||||
clan.matrix-synapse = {
|
||||
server_tld = "clan.test";
|
||||
app_domain = "matrix.clan.test";
|
||||
};
|
||||
clan.matrix-synapse.users.admin.admin = true;
|
||||
clan.matrix-synapse.users.someuser = { };
|
||||
|
||||
clan.core.facts.secretStore = "vm";
|
||||
clan.core.vars.settings.secretStore = "vm";
|
||||
clan.core.vars.settings.publicStore = "in_repo";
|
||||
|
||||
# because we use systemd-tmpfiles to copy the secrets, we need to a separate systemd-tmpfiles call to provision them.
|
||||
boot.postBootCommands = "${config.systemd.package}/bin/systemd-tmpfiles --create /etc/tmpfiles.d/00-vmsecrets.conf";
|
||||
|
||||
systemd.tmpfiles.settings."00-vmsecrets" = {
|
||||
# run before 00-nixos.conf
|
||||
"/etc/secrets" = {
|
||||
d.mode = "0700";
|
||||
z.mode = "0700";
|
||||
};
|
||||
"/etc/secrets/matrix-synapse/synapse-registration_shared_secret" = {
|
||||
f.argument = "supersecret";
|
||||
z = {
|
||||
mode = "0400";
|
||||
user = "root";
|
||||
};
|
||||
};
|
||||
"/etc/secrets/matrix-password-admin/matrix-password-admin" = {
|
||||
f.argument = "matrix-password1";
|
||||
z = {
|
||||
mode = "0400";
|
||||
user = "root";
|
||||
};
|
||||
};
|
||||
"/etc/secrets/matrix-password-someuser/matrix-password-someuser" = {
|
||||
f.argument = "matrix-password2";
|
||||
z = {
|
||||
mode = "0400";
|
||||
user = "root";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
];
|
||||
};
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine.wait_for_unit("matrix-synapse")
|
||||
machine.succeed("${pkgs.netcat}/bin/nc -z -v ::1 8008")
|
||||
machine.wait_until_succeeds("${pkgs.curl}/bin/curl -Ssf -L http://localhost/_matrix/static/ -H 'Host: matrix.clan.test'")
|
||||
|
||||
machine.systemctl("restart matrix-synapse >&2") # check if user creation is idempotent
|
||||
machine.execute("journalctl -u matrix-synapse --no-pager >&2")
|
||||
machine.wait_for_unit("matrix-synapse")
|
||||
machine.succeed("${pkgs.netcat}/bin/nc -z -v ::1 8008")
|
||||
machine.succeed("${pkgs.curl}/bin/curl -Ssf -L http://localhost/_matrix/static/ -H 'Host: matrix.clan.test'")
|
||||
'';
|
||||
}
|
||||
)
|
||||
1
checks/matrix-synapse/synapse-registration_shared_secret
Normal file
1
checks/matrix-synapse/synapse-registration_shared_secret
Normal file
@@ -0,0 +1 @@
|
||||
registration_shared_secret: supersecret
|
||||
@@ -16,6 +16,7 @@ nixosLib.runTest (
|
||||
|
||||
# This tests the compatibility of the inventory
|
||||
# With the test framework
|
||||
# - legacy-modules
|
||||
# - clan.service modules
|
||||
name = "service-dummy-test-from-flake";
|
||||
|
||||
@@ -29,15 +30,16 @@ nixosLib.runTest (
|
||||
{ nodes, ... }:
|
||||
''
|
||||
import subprocess
|
||||
import tempfile
|
||||
from nixos_test_lib.nix_setup import setup_nix_in_nix
|
||||
from nixos_test_lib.nix_setup import setup_nix_in_nix # type: ignore[import-untyped]
|
||||
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
setup_nix_in_nix(temp_dir, None) # No closure info for this test
|
||||
setup_nix_in_nix(None) # No closure info for this test
|
||||
|
||||
start_all()
|
||||
admin1.wait_for_unit("multi-user.target")
|
||||
peer1.wait_for_unit("multi-user.target")
|
||||
# Provided by the legacy module
|
||||
print(admin1.succeed("systemctl status dummy-service"))
|
||||
print(peer1.succeed("systemctl status dummy-service"))
|
||||
|
||||
# peer1 should have the 'hello' file
|
||||
peer1.succeed("cat ${nodes.peer1.clan.core.vars.generators.new-service.files.not-a-secret.path}")
|
||||
|
||||
@@ -15,6 +15,12 @@
|
||||
meta.name = "foo";
|
||||
machines.peer1 = { };
|
||||
machines.admin1 = { };
|
||||
services = {
|
||||
legacy-module.default = {
|
||||
roles.peer.machines = [ "peer1" ];
|
||||
roles.admin.machines = [ "admin1" ];
|
||||
};
|
||||
};
|
||||
|
||||
instances."test" = {
|
||||
module.name = "new-service";
|
||||
@@ -22,14 +28,15 @@
|
||||
roles.peer.machines.peer1 = { };
|
||||
};
|
||||
|
||||
modules = {
|
||||
legacy-module = ./legacy-module;
|
||||
};
|
||||
};
|
||||
|
||||
modules.new-service = {
|
||||
_class = "clan.service";
|
||||
manifest.name = "new-service";
|
||||
roles.peer = {
|
||||
description = "A peer that uses the new-service to generate some files.";
|
||||
};
|
||||
roles.peer = { };
|
||||
perMachine = {
|
||||
nixosModule = {
|
||||
# This should be generated by:
|
||||
|
||||
10
checks/service-dummy-test-from-flake/legacy-module/README.md
Normal file
10
checks/service-dummy-test-from-flake/legacy-module/README.md
Normal file
@@ -0,0 +1,10 @@
|
||||
---
|
||||
description = "Set up dummy-module"
|
||||
categories = ["System"]
|
||||
features = [ "inventory" ]
|
||||
|
||||
[constraints]
|
||||
roles.admin.min = 1
|
||||
roles.admin.max = 1
|
||||
---
|
||||
|
||||
@@ -0,0 +1,5 @@
|
||||
{
|
||||
imports = [
|
||||
../shared.nix
|
||||
];
|
||||
}
|
||||
@@ -0,0 +1,5 @@
|
||||
{
|
||||
imports = [
|
||||
../shared.nix
|
||||
];
|
||||
}
|
||||
@@ -0,0 +1,34 @@
|
||||
{ config, ... }:
|
||||
{
|
||||
systemd.services.dummy-service = {
|
||||
enable = true;
|
||||
description = "Dummy service";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
};
|
||||
script = ''
|
||||
generated_password_path="${config.clan.core.vars.generators.dummy-generator.files.generated-password.path}"
|
||||
if [ ! -f "$generated_password_path" ]; then
|
||||
echo "Generated password file not found: $generated_password_path"
|
||||
exit 1
|
||||
fi
|
||||
host_id_path="${config.clan.core.vars.generators.dummy-generator.files.host-id.path}"
|
||||
if [ ! -e "$host_id_path" ]; then
|
||||
echo "Host ID file not found: $host_id_path"
|
||||
exit 1
|
||||
fi
|
||||
'';
|
||||
};
|
||||
|
||||
# TODO: add and prompt and make it work in the test framework
|
||||
clan.core.vars.generators.dummy-generator = {
|
||||
files.host-id.secret = false;
|
||||
files.generated-password.secret = true;
|
||||
script = ''
|
||||
echo $RANDOM > "$out"/host-id
|
||||
echo $RANDOM > "$out"/generated-password
|
||||
'';
|
||||
};
|
||||
}
|
||||
@@ -15,6 +15,7 @@ nixosLib.runTest (
|
||||
|
||||
# This tests the compatibility of the inventory
|
||||
# With the test framework
|
||||
# - legacy-modules
|
||||
# - clan.service modules
|
||||
name = "service-dummy-test";
|
||||
|
||||
@@ -23,6 +24,12 @@ nixosLib.runTest (
|
||||
inventory = {
|
||||
machines.peer1 = { };
|
||||
machines.admin1 = { };
|
||||
services = {
|
||||
legacy-module.default = {
|
||||
roles.peer.machines = [ "peer1" ];
|
||||
roles.admin.machines = [ "admin1" ];
|
||||
};
|
||||
};
|
||||
|
||||
instances."test" = {
|
||||
module.name = "new-service";
|
||||
@@ -30,13 +37,14 @@ nixosLib.runTest (
|
||||
roles.peer.machines.peer1 = { };
|
||||
};
|
||||
|
||||
modules = {
|
||||
legacy-module = ./legacy-module;
|
||||
};
|
||||
};
|
||||
modules.new-service = {
|
||||
_class = "clan.service";
|
||||
manifest.name = "new-service";
|
||||
roles.peer = {
|
||||
description = "A peer that uses the new-service to generate some files.";
|
||||
};
|
||||
roles.peer = { };
|
||||
perMachine = {
|
||||
nixosModule = {
|
||||
# This should be generated by:
|
||||
@@ -70,6 +78,9 @@ nixosLib.runTest (
|
||||
start_all()
|
||||
admin1.wait_for_unit("multi-user.target")
|
||||
peer1.wait_for_unit("multi-user.target")
|
||||
# Provided by the legacy module
|
||||
print(admin1.succeed("systemctl status dummy-service"))
|
||||
print(peer1.succeed("systemctl status dummy-service"))
|
||||
|
||||
# peer1 should have the 'hello' file
|
||||
peer1.succeed("cat ${nodes.peer1.clan.core.vars.generators.new-service.files.not-a-secret.path}")
|
||||
|
||||
@@ -67,15 +67,6 @@
|
||||
];
|
||||
};
|
||||
|
||||
nix.settings = {
|
||||
flake-registry = "";
|
||||
# required for setting the `flake-registry`
|
||||
experimental-features = [
|
||||
"nix-command"
|
||||
"flakes"
|
||||
];
|
||||
};
|
||||
|
||||
# Define the mounts that exist in the container to prevent them from being stopped
|
||||
fileSystems = {
|
||||
"/" = {
|
||||
@@ -115,13 +106,12 @@
|
||||
let
|
||||
closureInfo = pkgs.closureInfo {
|
||||
rootPaths = [
|
||||
self.packages.${pkgs.hostPlatform.system}.clan-cli
|
||||
self.checks.${pkgs.hostPlatform.system}.clan-core-for-checks
|
||||
self.packages.${pkgs.system}.clan-cli
|
||||
self.checks.${pkgs.system}.clan-core-for-checks
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-update-machine.config.system.build.toplevel
|
||||
pkgs.stdenv.drvPath
|
||||
pkgs.bash.drvPath
|
||||
pkgs.buildPackages.xorg.lndir
|
||||
pkgs.bubblewrap
|
||||
]
|
||||
++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||
};
|
||||
@@ -132,7 +122,7 @@
|
||||
imports = [ self.nixosModules.test-update-machine ];
|
||||
};
|
||||
extraPythonPackages = _p: [
|
||||
self.legacyPackages.${pkgs.hostPlatform.system}.nixosTestLib
|
||||
self.legacyPackages.${pkgs.system}.nixosTestLib
|
||||
];
|
||||
|
||||
testScript = ''
|
||||
@@ -154,7 +144,7 @@
|
||||
# Prepare test flake and Nix store
|
||||
flake_dir = prepare_test_flake(
|
||||
temp_dir,
|
||||
"${self.checks.${pkgs.hostPlatform.system}.clan-core-for-checks}",
|
||||
"${self.checks.x86_64-linux.clan-core-for-checks}",
|
||||
"${closureInfo}"
|
||||
)
|
||||
(flake_dir / ".clan-flake").write_text("") # Ensure .clan-flake exists
|
||||
@@ -221,13 +211,12 @@
|
||||
[
|
||||
"${pkgs.nix}/bin/nix",
|
||||
"copy",
|
||||
"--from",
|
||||
f"{temp_dir}/store",
|
||||
"--to",
|
||||
"ssh://root@192.168.1.1",
|
||||
"--no-check-sigs",
|
||||
f"${self.packages.${pkgs.hostPlatform.system}.clan-cli}",
|
||||
f"${self.packages.${pkgs.system}.clan-cli}",
|
||||
"--extra-experimental-features", "nix-command flakes",
|
||||
"--from", f"{os.environ["TMPDIR"]}/store"
|
||||
],
|
||||
check=True,
|
||||
env={
|
||||
@@ -242,7 +231,7 @@
|
||||
"-o", "UserKnownHostsFile=/dev/null",
|
||||
"-o", "StrictHostKeyChecking=no",
|
||||
f"root@192.168.1.1",
|
||||
"${self.packages.${pkgs.hostPlatform.system}.clan-cli}/bin/clan",
|
||||
"${self.packages.${pkgs.system}.clan-cli}/bin/clan",
|
||||
"machines",
|
||||
"update",
|
||||
"--debug",
|
||||
@@ -270,7 +259,7 @@
|
||||
|
||||
# Run clan update command
|
||||
subprocess.run([
|
||||
"${self.packages.${pkgs.hostPlatform.system}.clan-cli-full}/bin/clan",
|
||||
"${self.packages.${pkgs.system}.clan-cli-full}/bin/clan",
|
||||
"machines",
|
||||
"update",
|
||||
"--debug",
|
||||
@@ -297,7 +286,7 @@
|
||||
|
||||
# Run clan update command with --build-host
|
||||
subprocess.run([
|
||||
"${self.packages.${pkgs.hostPlatform.system}.clan-cli-full}/bin/clan",
|
||||
"${self.packages.${pkgs.system}.clan-cli-full}/bin/clan",
|
||||
"machines",
|
||||
"update",
|
||||
"--debug",
|
||||
|
||||
24
checks/zt-tcp-relay/default.nix
Normal file
24
checks/zt-tcp-relay/default.nix
Normal file
@@ -0,0 +1,24 @@
|
||||
(
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "zt-tcp-relay";
|
||||
|
||||
nodes.machine =
|
||||
{ self, ... }:
|
||||
{
|
||||
imports = [
|
||||
self.nixosModules.clanCore
|
||||
self.clanModules.zt-tcp-relay
|
||||
{
|
||||
clan.core.settings.directory = ./.;
|
||||
}
|
||||
];
|
||||
};
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine.wait_for_unit("zt-tcp-relay.service")
|
||||
out = machine.succeed("${pkgs.netcat}/bin/nc -z -v localhost 4443")
|
||||
print(out)
|
||||
'';
|
||||
}
|
||||
)
|
||||
5
clanModules/admin/README.md
Normal file
5
clanModules/admin/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
description = "Convenient Administration for the Clan App"
|
||||
categories = ["Utility"]
|
||||
features = [ "inventory", "deprecated" ]
|
||||
---
|
||||
3
clanModules/admin/default.nix
Normal file
3
clanModules/admin/default.nix
Normal file
@@ -0,0 +1,3 @@
|
||||
{
|
||||
imports = [ ./roles/default.nix ];
|
||||
}
|
||||
30
clanModules/admin/roles/default.nix
Normal file
30
clanModules/admin/roles/default.nix
Normal file
@@ -0,0 +1,30 @@
|
||||
{ lib, config, ... }:
|
||||
{
|
||||
|
||||
options.clan.admin = {
|
||||
allowedKeys = lib.mkOption {
|
||||
default = { };
|
||||
type = lib.types.attrsOf lib.types.str;
|
||||
description = "The allowed public keys for ssh access to the admin user";
|
||||
example = {
|
||||
"key_1" = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD...";
|
||||
};
|
||||
};
|
||||
};
|
||||
# Bad practice.
|
||||
# Should we add 'clanModules' to specialArgs?
|
||||
imports = [
|
||||
../../sshd
|
||||
../../root-password
|
||||
];
|
||||
config = {
|
||||
|
||||
warnings = [
|
||||
"The clan.admin module is deprecated and will be removed on 2025-07-15.
|
||||
Please migrate to user-maintained configuration or the new equivalent clan services
|
||||
(https://docs.clan.lol/reference/clanServices)."
|
||||
];
|
||||
|
||||
users.users.root.openssh.authorizedKeys.keys = builtins.attrValues config.clan.admin.allowedKeys;
|
||||
};
|
||||
}
|
||||
8
clanModules/auto-upgrade/README.md
Normal file
8
clanModules/auto-upgrade/README.md
Normal file
@@ -0,0 +1,8 @@
|
||||
---
|
||||
description = "Set up automatic upgrades"
|
||||
categories = ["System"]
|
||||
features = [ "inventory", "deprecated" ]
|
||||
---
|
||||
|
||||
Whether to periodically upgrade NixOS to the latest version. If enabled, a
|
||||
systemd timer will run `nixos-rebuild switch --upgrade` once a day.
|
||||
32
clanModules/auto-upgrade/roles/default.nix
Normal file
32
clanModules/auto-upgrade/roles/default.nix
Normal file
@@ -0,0 +1,32 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
cfg = config.clan.auto-upgrade;
|
||||
in
|
||||
{
|
||||
options.clan.auto-upgrade = {
|
||||
flake = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "Flake reference";
|
||||
};
|
||||
};
|
||||
|
||||
config = {
|
||||
|
||||
warnings = [
|
||||
"The clan.auto-upgrade module is deprecated and will be removed on 2025-07-15.
|
||||
Please migrate to user-maintained configuration or the new equivalent clan services
|
||||
(https://docs.clan.lol/reference/clanServices)."
|
||||
];
|
||||
|
||||
system.autoUpgrade = {
|
||||
inherit (cfg) flake;
|
||||
enable = true;
|
||||
dates = "02:00";
|
||||
randomizedDelaySec = "45min";
|
||||
};
|
||||
};
|
||||
}
|
||||
16
clanModules/borgbackup-static/README.md
Normal file
16
clanModules/borgbackup-static/README.md
Normal file
@@ -0,0 +1,16 @@
|
||||
---
|
||||
description = "Statically configure borgbackup with sane defaults."
|
||||
---
|
||||
!!! Danger "Deprecated"
|
||||
Use [borgbackup](borgbackup.md) instead.
|
||||
|
||||
Don't use borgbackup-static through [inventory](../../concepts/inventory.md).
|
||||
|
||||
This module implements the `borgbackup` backend and implements sane defaults
|
||||
for backup management through `borgbackup` for members of the clan.
|
||||
|
||||
Configure target machines where the backups should be sent to through `targets`.
|
||||
|
||||
Configure machines that should be backuped either through `includeMachines`
|
||||
which will exclusively add the included machines to be backuped, or through
|
||||
`excludeMachines`, which will add every machine except the excluded machine to the backup.
|
||||
104
clanModules/borgbackup-static/default.nix
Normal file
104
clanModules/borgbackup-static/default.nix
Normal file
@@ -0,0 +1,104 @@
|
||||
{ lib, config, ... }:
|
||||
let
|
||||
dir = config.clan.core.settings.directory;
|
||||
machineDir = dir + "/machines/";
|
||||
in
|
||||
{
|
||||
imports = [ ../borgbackup ];
|
||||
|
||||
options.clan.borgbackup-static = {
|
||||
excludeMachines = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
example = lib.literalExpression "[ config.clan.core.settings.machine.name ]";
|
||||
default = [ ];
|
||||
description = ''
|
||||
Machines that should not be backuped.
|
||||
Mutually exclusive with includeMachines.
|
||||
If this is not empty, every other machine except the targets in the clan will be backuped by this module.
|
||||
If includeMachines is set, only the included machines will be backuped.
|
||||
'';
|
||||
};
|
||||
includeMachines = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
example = lib.literalExpression "[ config.clan.core.settings.machine.name ]";
|
||||
default = [ ];
|
||||
description = ''
|
||||
Machines that should be backuped.
|
||||
Mutually exclusive with excludeMachines.
|
||||
'';
|
||||
};
|
||||
targets = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
default = [ ];
|
||||
description = ''
|
||||
Machines that should act as target machines for backups.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config.services.borgbackup.repos =
|
||||
let
|
||||
machines = builtins.readDir machineDir;
|
||||
borgbackupIpMachinePath = machines: machineDir + machines + "/facts/borgbackup.ssh.pub";
|
||||
filteredMachines =
|
||||
if ((builtins.length config.clan.borgbackup-static.includeMachines) != 0) then
|
||||
lib.filterAttrs (name: _: (lib.elem name config.clan.borgbackup-static.includeMachines)) machines
|
||||
else
|
||||
lib.filterAttrs (name: _: !(lib.elem name config.clan.borgbackup-static.excludeMachines)) machines;
|
||||
machinesMaybeKey = lib.mapAttrsToList (
|
||||
machine: _:
|
||||
let
|
||||
fullPath = borgbackupIpMachinePath machine;
|
||||
in
|
||||
if builtins.pathExists fullPath then machine else null
|
||||
) filteredMachines;
|
||||
machinesWithKey = lib.filter (x: x != null) machinesMaybeKey;
|
||||
hosts = builtins.map (machine: {
|
||||
name = machine;
|
||||
value = {
|
||||
path = "/var/lib/borgbackup/${machine}";
|
||||
authorizedKeys = [ (builtins.readFile (borgbackupIpMachinePath machine)) ];
|
||||
};
|
||||
}) machinesWithKey;
|
||||
in
|
||||
lib.mkIf
|
||||
(builtins.any (
|
||||
target: target == config.clan.core.settings.machine.name
|
||||
) config.clan.borgbackup-static.targets)
|
||||
(if (builtins.listToAttrs hosts) != null then builtins.listToAttrs hosts else { });
|
||||
|
||||
config.clan.borgbackup.destinations =
|
||||
let
|
||||
destinations = builtins.map (d: {
|
||||
name = d;
|
||||
value = {
|
||||
repo = "borg@${d}:/var/lib/borgbackup/${config.clan.core.settings.machine.name}";
|
||||
};
|
||||
}) config.clan.borgbackup-static.targets;
|
||||
in
|
||||
lib.mkIf (builtins.any (
|
||||
target: target == config.clan.core.settings.machine.name
|
||||
) config.clan.borgbackup-static.includeMachines) (builtins.listToAttrs destinations);
|
||||
|
||||
config.assertions = [
|
||||
{
|
||||
assertion =
|
||||
!(
|
||||
((builtins.length config.clan.borgbackup-static.excludeMachines) != 0)
|
||||
&& ((builtins.length config.clan.borgbackup-static.includeMachines) != 0)
|
||||
);
|
||||
message = ''
|
||||
The options:
|
||||
config.clan.borgbackup-static.excludeMachines = [${builtins.toString config.clan.borgbackup-static.excludeMachines}]
|
||||
and
|
||||
config.clan.borgbackup-static.includeMachines = [${builtins.toString config.clan.borgbackup-static.includeMachines}]
|
||||
are mutually exclusive.
|
||||
Use excludeMachines to exclude certain machines and backup the other clan machines.
|
||||
Use include machines to only backup certain machines.
|
||||
'';
|
||||
}
|
||||
];
|
||||
config.warnings = lib.optional (
|
||||
builtins.length config.clan.borgbackup-static.targets > 0
|
||||
) "The borgbackup-static module is deprecated use the service via the inventory interface instead.";
|
||||
}
|
||||
14
clanModules/borgbackup/README.md
Normal file
14
clanModules/borgbackup/README.md
Normal file
@@ -0,0 +1,14 @@
|
||||
---
|
||||
description = "Efficient, deduplicating backup program with optional compression and secure encryption."
|
||||
categories = ["System"]
|
||||
features = [ "inventory", "deprecated" ]
|
||||
---
|
||||
BorgBackup (short: Borg) gives you:
|
||||
|
||||
- Space efficient storage of backups.
|
||||
- Secure, authenticated encryption.
|
||||
- Compression: lz4, zstd, zlib, lzma or none.
|
||||
- Mountable backups with FUSE.
|
||||
- Easy installation on multiple platforms: Linux, macOS, BSD, …
|
||||
- Free software (BSD license).
|
||||
- Backed by a large and active open-source community.
|
||||
6
clanModules/borgbackup/default.nix
Normal file
6
clanModules/borgbackup/default.nix
Normal file
@@ -0,0 +1,6 @@
|
||||
# Dont import this file
|
||||
# It is only here for backwards compatibility.
|
||||
# Dont author new modules with this file.
|
||||
{
|
||||
imports = [ ./roles/client.nix ];
|
||||
}
|
||||
210
clanModules/borgbackup/roles/client.nix
Normal file
210
clanModules/borgbackup/roles/client.nix
Normal file
@@ -0,0 +1,210 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
# Instances might be empty, if the module is not used via the inventory
|
||||
instances = config.clan.inventory.services.borgbackup or { };
|
||||
# roles = { ${role_name} :: { machines :: [string] } }
|
||||
allServers = lib.foldlAttrs (
|
||||
acc: _instanceName: instanceConfig:
|
||||
acc
|
||||
++ (
|
||||
if builtins.elem machineName instanceConfig.roles.client.machines then
|
||||
instanceConfig.roles.server.machines
|
||||
else
|
||||
[ ]
|
||||
)
|
||||
) [ ] instances;
|
||||
|
||||
machineName = config.clan.core.settings.machine.name;
|
||||
|
||||
cfg = config.clan.borgbackup;
|
||||
preBackupScript = ''
|
||||
declare -A preCommandErrors
|
||||
|
||||
${lib.concatMapStringsSep "\n" (
|
||||
state:
|
||||
lib.optionalString (state.preBackupCommand != null) ''
|
||||
echo "Running pre-backup command for ${state.name}"
|
||||
if ! /run/current-system/sw/bin/${state.preBackupCommand}; then
|
||||
preCommandErrors["${state.name}"]=1
|
||||
fi
|
||||
''
|
||||
) (lib.attrValues config.clan.core.state)}
|
||||
|
||||
if [[ ''${#preCommandErrors[@]} -gt 0 ]]; then
|
||||
echo "pre-backup commands failed for the following services:"
|
||||
for state in "''${!preCommandErrors[@]}"; do
|
||||
echo " $state"
|
||||
done
|
||||
exit 1
|
||||
fi
|
||||
'';
|
||||
in
|
||||
{
|
||||
options.clan.borgbackup.destinations = lib.mkOption {
|
||||
type = lib.types.attrsOf (
|
||||
lib.types.submodule (
|
||||
{ name, ... }:
|
||||
{
|
||||
options = {
|
||||
name = lib.mkOption {
|
||||
type = lib.types.strMatching "^[a-zA-Z0-9._-]+$";
|
||||
default = name;
|
||||
description = "the name of the backup job";
|
||||
};
|
||||
repo = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "the borgbackup repository to backup to";
|
||||
};
|
||||
rsh = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "ssh -i ${
|
||||
config.clan.core.vars.generators.borgbackup.files."borgbackup.ssh".path
|
||||
} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=Yes";
|
||||
defaultText = "ssh -i \${config.clan.core.vars.generators.borgbackup.files.\"borgbackup.ssh\".path} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null";
|
||||
description = "the rsh to use for the backup";
|
||||
};
|
||||
};
|
||||
}
|
||||
)
|
||||
);
|
||||
default = { };
|
||||
description = ''
|
||||
destinations where the machine should be backuped to
|
||||
'';
|
||||
};
|
||||
|
||||
options.clan.borgbackup.exclude = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
example = [ "*.pyc" ];
|
||||
default = [ ];
|
||||
description = ''
|
||||
Directories/Files to exclude from the backup.
|
||||
Use * as a wildcard.
|
||||
'';
|
||||
};
|
||||
|
||||
config = {
|
||||
|
||||
warnings = [
|
||||
"The clan.borgbackup module is deprecated and will be removed on 2025-07-15.
|
||||
Please migrate to user-maintained configuration or the new equivalent clan services
|
||||
(https://docs.clan.lol/reference/clanServices)."
|
||||
];
|
||||
|
||||
# Destinations
|
||||
clan.borgbackup.destinations =
|
||||
let
|
||||
destinations = builtins.map (serverName: {
|
||||
name = serverName;
|
||||
value = {
|
||||
repo = "borg@${serverName}:/var/lib/borgbackup/${machineName}";
|
||||
};
|
||||
}) allServers;
|
||||
in
|
||||
(builtins.listToAttrs destinations);
|
||||
|
||||
# Derived from the destinations
|
||||
systemd.services = lib.mapAttrs' (
|
||||
_: dest:
|
||||
lib.nameValuePair "borgbackup-job-${dest.name}" {
|
||||
# since borgbackup mounts the system read-only, we need to run in a
|
||||
# ExecStartPre script, so we can generate additional files.
|
||||
serviceConfig.ExecStartPre = [
|
||||
''+${pkgs.writeShellScript "borgbackup-job-${dest.name}-pre-backup-commands" preBackupScript}''
|
||||
];
|
||||
}
|
||||
) cfg.destinations;
|
||||
|
||||
services.borgbackup.jobs = lib.mapAttrs (_: dest: {
|
||||
paths = lib.unique (
|
||||
lib.flatten (map (state: state.folders) (lib.attrValues config.clan.core.state))
|
||||
);
|
||||
exclude = cfg.exclude;
|
||||
repo = dest.repo;
|
||||
environment.BORG_RSH = dest.rsh;
|
||||
compression = "auto,zstd";
|
||||
startAt = "*-*-* 01:00:00";
|
||||
persistentTimer = true;
|
||||
|
||||
encryption = {
|
||||
mode = "repokey";
|
||||
passCommand = "cat ${config.clan.core.vars.generators.borgbackup.files."borgbackup.repokey".path}";
|
||||
};
|
||||
|
||||
prune.keep = {
|
||||
within = "1d"; # Keep all archives from the last day
|
||||
daily = 7;
|
||||
weekly = 4;
|
||||
monthly = 0;
|
||||
};
|
||||
}) cfg.destinations;
|
||||
|
||||
environment.systemPackages = [
|
||||
(pkgs.writeShellApplication {
|
||||
name = "borgbackup-create";
|
||||
runtimeInputs = [ config.systemd.package ];
|
||||
text = ''
|
||||
${lib.concatMapStringsSep "\n" (dest: ''
|
||||
systemctl start borgbackup-job-${dest.name}
|
||||
'') (lib.attrValues cfg.destinations)}
|
||||
'';
|
||||
})
|
||||
(pkgs.writeShellApplication {
|
||||
name = "borgbackup-list";
|
||||
runtimeInputs = [ pkgs.jq ];
|
||||
text = ''
|
||||
(${
|
||||
lib.concatMapStringsSep "\n" (
|
||||
dest:
|
||||
# we need yes here to skip the changed url verification
|
||||
''echo y | /run/current-system/sw/bin/borg-job-${dest.name} list --json | jq '[.archives[] | {"name": ("${dest.name}::${dest.repo}::" + .name)}]' ''
|
||||
) (lib.attrValues cfg.destinations)
|
||||
}) | jq -s 'add // []'
|
||||
'';
|
||||
})
|
||||
(pkgs.writeShellApplication {
|
||||
name = "borgbackup-restore";
|
||||
runtimeInputs = [ pkgs.gawk ];
|
||||
text = ''
|
||||
cd /
|
||||
IFS=':' read -ra FOLDER <<< "''${FOLDERS-}"
|
||||
job_name=$(echo "$NAME" | awk -F'::' '{print $1}')
|
||||
backup_name=''${NAME#"$job_name"::}
|
||||
if [[ ! -x /run/current-system/sw/bin/borg-job-"$job_name" ]]; then
|
||||
echo "borg-job-$job_name not found: Backup name is invalid" >&2
|
||||
exit 1
|
||||
fi
|
||||
echo y | /run/current-system/sw/bin/borg-job-"$job_name" extract "$backup_name" "''${FOLDER[@]}"
|
||||
'';
|
||||
})
|
||||
];
|
||||
|
||||
clan.core.vars.generators.borgbackup = {
|
||||
files."borgbackup.ssh.pub".secret = false;
|
||||
files."borgbackup.ssh" = { };
|
||||
files."borgbackup.repokey" = { };
|
||||
|
||||
migrateFact = "borgbackup";
|
||||
runtimeInputs = [
|
||||
pkgs.coreutils
|
||||
pkgs.openssh
|
||||
pkgs.xkcdpass
|
||||
];
|
||||
script = ''
|
||||
ssh-keygen -t ed25519 -N "" -C "" -f "$out"/borgbackup.ssh
|
||||
xkcdpass -n 4 -d - > "$out"/borgbackup.repokey
|
||||
'';
|
||||
};
|
||||
|
||||
clan.core.backups.providers.borgbackup = {
|
||||
list = "borgbackup-list";
|
||||
create = "borgbackup-create";
|
||||
restore = "borgbackup-restore";
|
||||
};
|
||||
};
|
||||
}
|
||||
63
clanModules/borgbackup/roles/server.nix
Normal file
63
clanModules/borgbackup/roles/server.nix
Normal file
@@ -0,0 +1,63 @@
|
||||
{ config, lib, ... }:
|
||||
let
|
||||
dir = config.clan.core.settings.directory;
|
||||
machineDir = dir + "/vars/per-machine/";
|
||||
machineName = config.clan.core.settings.machine.name;
|
||||
|
||||
# Instances might be empty, if the module is not used via the inventory
|
||||
#
|
||||
# Type: { ${instanceName} :: { roles :: Roles } }
|
||||
# Roles :: { ${role_name} :: { machines :: [string] } }
|
||||
instances = config.clan.inventory.services.borgbackup or { };
|
||||
|
||||
allClients = lib.foldlAttrs (
|
||||
acc: _instanceName: instanceConfig:
|
||||
acc
|
||||
++ (
|
||||
if (builtins.elem machineName instanceConfig.roles.server.machines) then
|
||||
instanceConfig.roles.client.machines
|
||||
else
|
||||
[ ]
|
||||
)
|
||||
) [ ] instances;
|
||||
in
|
||||
{
|
||||
options = {
|
||||
clan.borgbackup.directory = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "/var/lib/borgbackup";
|
||||
description = ''
|
||||
The directory where the borgbackup repositories are stored.
|
||||
'';
|
||||
};
|
||||
};
|
||||
config.services.borgbackup.repos =
|
||||
let
|
||||
borgbackupIpMachinePath = machine: machineDir + machine + "/borgbackup/borgbackup.ssh.pub/value";
|
||||
|
||||
machinesMaybeKey = builtins.map (
|
||||
machine:
|
||||
let
|
||||
fullPath = borgbackupIpMachinePath machine;
|
||||
in
|
||||
if builtins.pathExists fullPath then
|
||||
machine
|
||||
else
|
||||
lib.warn ''
|
||||
Machine ${machine} does not have a borgbackup key at ${fullPath},
|
||||
run `clan vars generate ${machine}` to generate it.
|
||||
'' null
|
||||
) allClients;
|
||||
|
||||
machinesWithKey = lib.filter (x: x != null) machinesMaybeKey;
|
||||
|
||||
hosts = builtins.map (machine: {
|
||||
name = machine;
|
||||
value = {
|
||||
path = "${config.clan.borgbackup.directory}/${machine}";
|
||||
authorizedKeys = [ (builtins.readFile (borgbackupIpMachinePath machine)) ];
|
||||
};
|
||||
}) machinesWithKey;
|
||||
in
|
||||
if (builtins.listToAttrs hosts) != [ ] then builtins.listToAttrs hosts else { };
|
||||
}
|
||||
10
clanModules/data-mesher/README.md
Normal file
10
clanModules/data-mesher/README.md
Normal file
@@ -0,0 +1,10 @@
|
||||
---
|
||||
description = "Set up data-mesher"
|
||||
categories = ["System"]
|
||||
features = [ "inventory" ]
|
||||
|
||||
[constraints]
|
||||
roles.admin.min = 1
|
||||
roles.admin.max = 1
|
||||
---
|
||||
|
||||
19
clanModules/data-mesher/lib.nix
Normal file
19
clanModules/data-mesher/lib.nix
Normal file
@@ -0,0 +1,19 @@
|
||||
lib: {
|
||||
|
||||
machines =
|
||||
config:
|
||||
let
|
||||
instanceNames = builtins.attrNames config.clan.inventory.services.data-mesher;
|
||||
instanceName = builtins.head instanceNames;
|
||||
dataMesherInstances = config.clan.inventory.services.data-mesher.${instanceName};
|
||||
|
||||
uniqueStrings = list: builtins.attrNames (builtins.groupBy lib.id list);
|
||||
in
|
||||
rec {
|
||||
admins = dataMesherInstances.roles.admin.machines or [ ];
|
||||
signers = dataMesherInstances.roles.signer.machines or [ ];
|
||||
peers = dataMesherInstances.roles.peer.machines or [ ];
|
||||
bootstrap = uniqueStrings (admins ++ signers);
|
||||
};
|
||||
|
||||
}
|
||||
58
clanModules/data-mesher/roles/admin.nix
Normal file
58
clanModules/data-mesher/roles/admin.nix
Normal file
@@ -0,0 +1,58 @@
|
||||
{ lib, config, ... }:
|
||||
let
|
||||
cfg = config.clan.data-mesher;
|
||||
|
||||
dmLib = import ../lib.nix lib;
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
../shared.nix
|
||||
];
|
||||
|
||||
options.clan.data-mesher = {
|
||||
network = {
|
||||
tld = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = (config.networking.domain or "clan");
|
||||
description = "Top level domain to use for the network";
|
||||
};
|
||||
|
||||
hostTTL = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "672h"; # 28 days
|
||||
example = "24h";
|
||||
description = "The TTL for hosts in the network, in the form of a Go time.Duration";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = {
|
||||
|
||||
warnings = [
|
||||
"The clan.admin module is deprecated and will be removed on 2025-07-15.
|
||||
Please migrate to user-maintained configuration or the new equivalent clan services
|
||||
(https://docs.clan.lol/reference/clanServices)."
|
||||
];
|
||||
|
||||
services.data-mesher.initNetwork =
|
||||
let
|
||||
# for a given machine, read it's public key and remove any new lines
|
||||
readHostKey =
|
||||
machine:
|
||||
let
|
||||
path = "${config.clan.core.settings.directory}/vars/per-machine/${machine}/data-mesher-host-key/public_key/value";
|
||||
in
|
||||
builtins.elemAt (lib.splitString "\n" (builtins.readFile path)) 1;
|
||||
in
|
||||
{
|
||||
enable = true;
|
||||
keyPath = config.clan.core.vars.generators.data-mesher-network-key.files.private_key.path;
|
||||
|
||||
tld = cfg.network.tld;
|
||||
hostTTL = cfg.network.hostTTL;
|
||||
|
||||
# admin and signer host public keys
|
||||
signingKeys = builtins.map readHostKey (dmLib.machines config).bootstrap;
|
||||
};
|
||||
};
|
||||
}
|
||||
5
clanModules/data-mesher/roles/peer.nix
Normal file
5
clanModules/data-mesher/roles/peer.nix
Normal file
@@ -0,0 +1,5 @@
|
||||
{
|
||||
imports = [
|
||||
../shared.nix
|
||||
];
|
||||
}
|
||||
5
clanModules/data-mesher/roles/signer.nix
Normal file
5
clanModules/data-mesher/roles/signer.nix
Normal file
@@ -0,0 +1,5 @@
|
||||
{
|
||||
imports = [
|
||||
../shared.nix
|
||||
];
|
||||
}
|
||||
152
clanModules/data-mesher/shared.nix
Normal file
152
clanModules/data-mesher/shared.nix
Normal file
@@ -0,0 +1,152 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
cfg = config.clan.data-mesher;
|
||||
dmLib = import ./lib.nix lib;
|
||||
|
||||
# the default bootstrap nodes are any machines with the admin or signers role
|
||||
# we iterate through those machines, determining an IP address for them based on their VPN
|
||||
# currently only supports zerotier
|
||||
defaultBootstrapNodes = builtins.foldl' (
|
||||
urls: name:
|
||||
let
|
||||
|
||||
ipPath = "${config.clan.core.settings.directory}/vars/per-machine/${name}/zerotier/zerotier-ip/value";
|
||||
|
||||
in
|
||||
if builtins.pathExists ipPath then
|
||||
let
|
||||
ip = builtins.readFile ipPath;
|
||||
in
|
||||
urls ++ [ "[${ip}]:${builtins.toString cfg.network.port}" ]
|
||||
else
|
||||
urls
|
||||
) [ ] (dmLib.machines config).bootstrap;
|
||||
in
|
||||
{
|
||||
options.clan.data-mesher = {
|
||||
|
||||
bootstrapNodes = lib.mkOption {
|
||||
type = lib.types.nullOr (lib.types.listOf lib.types.str);
|
||||
default = null;
|
||||
description = ''
|
||||
A list of bootstrap nodes that act as an initial gateway when joining
|
||||
the cluster.
|
||||
'';
|
||||
example = [
|
||||
"192.168.1.1:7946"
|
||||
"192.168.1.2:7946"
|
||||
];
|
||||
};
|
||||
|
||||
network = {
|
||||
|
||||
interface = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = ''
|
||||
The interface over which cluster communication should be performed.
|
||||
All the ip addresses associate with this interface will be part of
|
||||
our host claim, including both ipv4 and ipv6.
|
||||
|
||||
This should be set to an internal/VPN interface.
|
||||
'';
|
||||
example = "tailscale0";
|
||||
};
|
||||
|
||||
port = lib.mkOption {
|
||||
type = lib.types.port;
|
||||
default = 7946;
|
||||
description = ''
|
||||
Port to listen on for cluster communication.
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = {
|
||||
|
||||
services.data-mesher = {
|
||||
enable = true;
|
||||
openFirewall = true;
|
||||
|
||||
settings = {
|
||||
log_level = "warn";
|
||||
state_dir = "/var/lib/data-mesher";
|
||||
|
||||
# read network id from vars
|
||||
network.id = config.clan.core.vars.generators.data-mesher-network-key.files.public_key.value;
|
||||
|
||||
host = {
|
||||
names = [ config.networking.hostName ];
|
||||
key_path = config.clan.core.vars.generators.data-mesher-host-key.files.private_key.path;
|
||||
};
|
||||
|
||||
cluster = {
|
||||
port = cfg.network.port;
|
||||
join_interval = "30s";
|
||||
push_pull_interval = "30s";
|
||||
|
||||
interface = cfg.network.interface;
|
||||
|
||||
bootstrap_nodes = if cfg.bootstrapNodes == null then defaultBootstrapNodes else cfg.bootstrapNodes;
|
||||
};
|
||||
|
||||
http.port = 7331;
|
||||
http.interface = "lo";
|
||||
};
|
||||
};
|
||||
|
||||
# Generate host key.
|
||||
clan.core.vars.generators.data-mesher-host-key = {
|
||||
files =
|
||||
let
|
||||
owner = config.users.users.data-mesher.name;
|
||||
in
|
||||
{
|
||||
private_key = {
|
||||
inherit owner;
|
||||
};
|
||||
public_key.secret = false;
|
||||
};
|
||||
|
||||
runtimeInputs = [
|
||||
config.services.data-mesher.package
|
||||
];
|
||||
|
||||
script = ''
|
||||
data-mesher generate keypair \
|
||||
--public-key-path "$out"/public_key \
|
||||
--private-key-path "$out"/private_key
|
||||
'';
|
||||
};
|
||||
|
||||
clan.core.vars.generators.data-mesher-network-key = {
|
||||
# generated once per clan
|
||||
share = true;
|
||||
|
||||
files =
|
||||
let
|
||||
owner = config.users.users.data-mesher.name;
|
||||
in
|
||||
{
|
||||
private_key = {
|
||||
inherit owner;
|
||||
};
|
||||
public_key.secret = false;
|
||||
};
|
||||
|
||||
runtimeInputs = [
|
||||
config.services.data-mesher.package
|
||||
];
|
||||
|
||||
script = ''
|
||||
data-mesher generate keypair \
|
||||
--public-key-path "$out"/public_key \
|
||||
--private-key-path "$out"/private_key
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
17
clanModules/deltachat/README.md
Normal file
17
clanModules/deltachat/README.md
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
description = "Email-based instant messaging for Desktop."
|
||||
categories = ["Social"]
|
||||
features = [ "inventory", "deprecated" ]
|
||||
---
|
||||
|
||||
!!! info
|
||||
This module will automatically configure an email server on the machine for handling the e-mail messaging seamlessly.
|
||||
|
||||
## Features
|
||||
|
||||
- [x] **Email-based**: Uses any email account as its backend.
|
||||
- [x] **End-to-End Encryption**: Supports Autocrypt to automatically encrypt messages.
|
||||
- [x] **No Phone Number Required**: Uses your email address instead of a phone number.
|
||||
- [x] **Cross-Platform**: Available on desktop and mobile platforms.
|
||||
- [x] **Automatic Server Setup**: Includes your own DeltaChat server for enhanced control and privacy.
|
||||
- [ ] **Bake a cake**: This module cannot cake a bake.
|
||||
3
clanModules/deltachat/default.nix
Normal file
3
clanModules/deltachat/default.nix
Normal file
@@ -0,0 +1,3 @@
|
||||
{
|
||||
imports = [ ./roles/default.nix ];
|
||||
}
|
||||
153
clanModules/deltachat/roles/default.nix
Normal file
153
clanModules/deltachat/roles/default.nix
Normal file
@@ -0,0 +1,153 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
warnings = [
|
||||
"The clan.deltachat module is deprecated and will be removed on 2025-07-15.
|
||||
Please migrate to user-maintained configuration or the new equivalent clan services
|
||||
(https://docs.clan.lol/reference/clanServices)."
|
||||
];
|
||||
|
||||
networking.firewall.interfaces."zt+".allowedTCPPorts = [ 25 ]; # smtp with other hosts
|
||||
environment.systemPackages = [ pkgs.deltachat-desktop ];
|
||||
|
||||
services.maddy =
|
||||
let
|
||||
domain = "${config.clan.core.settings.machine.name}.local";
|
||||
in
|
||||
{
|
||||
enable = true;
|
||||
primaryDomain = domain;
|
||||
config = ''
|
||||
# Minimal configuration with TLS disabled, adapted from upstream example
|
||||
# configuration here https://github.com/foxcpp/maddy/blob/master/maddy.conf
|
||||
# Do not use this in unencrypted networks!
|
||||
|
||||
auth.pass_table local_authdb {
|
||||
table sql_table {
|
||||
driver sqlite3
|
||||
dsn credentials.db
|
||||
table_name passwords
|
||||
}
|
||||
}
|
||||
|
||||
storage.imapsql local_mailboxes {
|
||||
driver sqlite3
|
||||
dsn imapsql.db
|
||||
}
|
||||
|
||||
table.chain local_rewrites {
|
||||
optional_step regexp "(.+)\+(.+)@(.+)" "$1@$3"
|
||||
optional_step static {
|
||||
entry postmaster postmaster@$(primary_domain)
|
||||
}
|
||||
optional_step file /etc/maddy/aliases
|
||||
}
|
||||
|
||||
msgpipeline local_routing {
|
||||
destination postmaster $(local_domains) {
|
||||
modify {
|
||||
replace_rcpt &local_rewrites
|
||||
}
|
||||
deliver_to &local_mailboxes
|
||||
}
|
||||
default_destination {
|
||||
reject 550 5.1.1 "User doesn't exist"
|
||||
}
|
||||
}
|
||||
|
||||
smtp tcp://[::]:25 {
|
||||
limits {
|
||||
all rate 20 1s
|
||||
all concurrency 10
|
||||
}
|
||||
dmarc yes
|
||||
check {
|
||||
require_mx_record
|
||||
dkim
|
||||
spf
|
||||
}
|
||||
source $(local_domains) {
|
||||
reject 501 5.1.8 "Use Submission for outgoing SMTP"
|
||||
}
|
||||
default_source {
|
||||
destination postmaster $(local_domains) {
|
||||
deliver_to &local_routing
|
||||
}
|
||||
default_destination {
|
||||
reject 550 5.1.1 "User doesn't exist"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
submission tcp://[::1]:587 {
|
||||
limits {
|
||||
all rate 50 1s
|
||||
}
|
||||
auth &local_authdb
|
||||
source $(local_domains) {
|
||||
check {
|
||||
authorize_sender {
|
||||
prepare_email &local_rewrites
|
||||
user_to_email identity
|
||||
}
|
||||
}
|
||||
destination postmaster $(local_domains) {
|
||||
deliver_to &local_routing
|
||||
}
|
||||
default_destination {
|
||||
modify {
|
||||
dkim $(primary_domain) $(local_domains) default
|
||||
}
|
||||
deliver_to &remote_queue
|
||||
}
|
||||
}
|
||||
default_source {
|
||||
reject 501 5.1.8 "Non-local sender domain"
|
||||
}
|
||||
}
|
||||
|
||||
target.remote outbound_delivery {
|
||||
limits {
|
||||
destination rate 20 1s
|
||||
destination concurrency 10
|
||||
}
|
||||
mx_auth {
|
||||
dane
|
||||
mtasts {
|
||||
cache fs
|
||||
fs_dir mtasts_cache/
|
||||
}
|
||||
local_policy {
|
||||
min_tls_level encrypted
|
||||
min_mx_level none
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
target.queue remote_queue {
|
||||
target &outbound_delivery
|
||||
autogenerated_msg_domain $(primary_domain)
|
||||
bounce {
|
||||
destination postmaster $(local_domains) {
|
||||
deliver_to &local_routing
|
||||
}
|
||||
default_destination {
|
||||
reject 550 5.0.0 "Refusing to send DSNs to non-local addresses"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
imap tcp://[::1]:143 {
|
||||
auth &local_authdb
|
||||
storage &local_mailboxes
|
||||
}
|
||||
'';
|
||||
ensureAccounts = [ "user@${domain}" ];
|
||||
ensureCredentials = {
|
||||
"user@${domain}".passwordFile = pkgs.writeText "dummy" "foobar";
|
||||
};
|
||||
};
|
||||
}
|
||||
5
clanModules/disk-id/README.md
Normal file
5
clanModules/disk-id/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
description = "Generates a uuid for use in disk device naming"
|
||||
features = [ "inventory" ]
|
||||
categories = [ "System" ]
|
||||
---
|
||||
6
clanModules/disk-id/default.nix
Normal file
6
clanModules/disk-id/default.nix
Normal file
@@ -0,0 +1,6 @@
|
||||
# Dont import this file
|
||||
# It is only here for backwards compatibility.
|
||||
# Dont author new modules with this file.
|
||||
{
|
||||
imports = [ ./roles/default.nix ];
|
||||
}
|
||||
36
clanModules/disk-id/roles/default.nix
Normal file
36
clanModules/disk-id/roles/default.nix
Normal file
@@ -0,0 +1,36 @@
|
||||
{
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
|
||||
{
|
||||
|
||||
config = {
|
||||
|
||||
warnings = [
|
||||
''
|
||||
The clan.disk-id module is deprecated and will be removed on 2025-07-15.
|
||||
For migration see: https://docs.clan.lol/guides/migrations/disk-id/
|
||||
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
!!! Please migrate. Otherwise you may not be able to boot your system after that date. !!!
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
''
|
||||
];
|
||||
clan.core.vars.generators.disk-id = {
|
||||
files.diskId.secret = false;
|
||||
runtimeInputs = [
|
||||
pkgs.coreutils
|
||||
pkgs.bash
|
||||
];
|
||||
script = ''
|
||||
uuid=$(bash ${./uuid4.sh})
|
||||
|
||||
# Remove the hyphens from the UUID
|
||||
uuid_no_hyphens=$(echo -n "$uuid" | tr -d '-')
|
||||
|
||||
echo -n "$uuid_no_hyphens" > "$out/diskId"
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
20
clanModules/disk-id/roles/uuid4.sh
Normal file
20
clanModules/disk-id/roles/uuid4.sh
Normal file
@@ -0,0 +1,20 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Read 16 bytes from /dev/urandom
|
||||
uuid=$(dd if=/dev/urandom bs=1 count=16 2>/dev/null | od -An -tx1 | tr -d ' \n')
|
||||
|
||||
# Break the UUID into pieces and apply the required modifications
|
||||
byte6=${uuid:12:2}
|
||||
byte8=${uuid:16:2}
|
||||
|
||||
# Construct the correct version and variant
|
||||
hex_byte6=$(printf "%x" $((0x$byte6 & 0x0F | 0x40)))
|
||||
hex_byte8=$(printf "%x" $((0x$byte8 & 0x3F | 0x80)))
|
||||
|
||||
# Rebuild the UUID with the correct fields
|
||||
uuid_v4="${uuid:0:12}${hex_byte6}${uuid:14:2}${hex_byte8}${uuid:18:14}"
|
||||
|
||||
# Format the UUID correctly 8-4-4-4-12
|
||||
uuid_formatted="${uuid_v4:0:8}-${uuid_v4:8:4}-${uuid_v4:12:4}-${uuid_v4:16:4}-${uuid_v4:20:12}"
|
||||
|
||||
echo -n "$uuid_formatted"
|
||||
6
clanModules/dyndns/README.md
Normal file
6
clanModules/dyndns/README.md
Normal file
@@ -0,0 +1,6 @@
|
||||
---
|
||||
description = "A dynamic DNS service to update domain IPs"
|
||||
---
|
||||
|
||||
To understand the possible options that can be set visit the documentation of [ddns-updater](https://github.com/qdm12/ddns-updater?tab=readme-ov-file#versioned-documentation)
|
||||
|
||||
257
clanModules/dyndns/default.nix
Normal file
257
clanModules/dyndns/default.nix
Normal file
@@ -0,0 +1,257 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
name = "dyndns";
|
||||
cfg = config.clan.${name};
|
||||
|
||||
# We dedup secrets if they have the same provider + base domain
|
||||
secret_id = opt: "${name}-${opt.provider}-${opt.domain}";
|
||||
secret_path =
|
||||
opt: config.clan.core.vars.generators."${secret_id opt}".files."${secret_id opt}".path;
|
||||
|
||||
# We check that a secret has not been set in extraSettings.
|
||||
extraSettingsSafe =
|
||||
opt:
|
||||
if (builtins.hasAttr opt.secret_field_name opt.extraSettings) then
|
||||
throw "Please do not set ${opt.secret_field_name} in extraSettings, it is automatically set by the dyndns module."
|
||||
else
|
||||
opt.extraSettings;
|
||||
/*
|
||||
We go from:
|
||||
{home.example.com:{value:{domain:example.com,host:home, provider:namecheap}}}
|
||||
To:
|
||||
{settings: [{domain: example.com, host: home, provider: namecheap, password: dyndns-namecheap-example.com}]}
|
||||
*/
|
||||
service_config = {
|
||||
settings = builtins.catAttrs "value" (
|
||||
builtins.attrValues (
|
||||
lib.mapAttrs (_: opt: {
|
||||
value =
|
||||
(extraSettingsSafe opt)
|
||||
// {
|
||||
domain = opt.domain;
|
||||
provider = opt.provider;
|
||||
}
|
||||
// {
|
||||
"${opt.secret_field_name}" = secret_id opt;
|
||||
};
|
||||
}) cfg.settings
|
||||
)
|
||||
);
|
||||
};
|
||||
|
||||
secret_generator = _: opt: {
|
||||
name = secret_id opt;
|
||||
value = {
|
||||
share = true;
|
||||
migrateFact = "${secret_id opt}";
|
||||
prompts.${secret_id opt} = {
|
||||
type = "hidden";
|
||||
persist = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
options.clan.${name} = {
|
||||
server = {
|
||||
enable = lib.mkEnableOption "dyndns webserver";
|
||||
domain = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "Domain to serve the webservice on";
|
||||
};
|
||||
port = lib.mkOption {
|
||||
type = lib.types.int;
|
||||
default = 54805;
|
||||
description = "Port to listen on";
|
||||
};
|
||||
};
|
||||
|
||||
period = lib.mkOption {
|
||||
type = lib.types.int;
|
||||
default = 5;
|
||||
description = "Domain update period in minutes";
|
||||
};
|
||||
|
||||
settings = lib.mkOption {
|
||||
type = lib.types.attrsOf (
|
||||
lib.types.submodule (
|
||||
{ ... }:
|
||||
{
|
||||
options = {
|
||||
provider = lib.mkOption {
|
||||
example = "namecheap";
|
||||
type = lib.types.str;
|
||||
description = "The dyndns provider to use";
|
||||
};
|
||||
domain = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
example = "example.com";
|
||||
description = "The top level domain to update.";
|
||||
};
|
||||
secret_field_name = lib.mkOption {
|
||||
example = [
|
||||
"password"
|
||||
"api_key"
|
||||
];
|
||||
type = lib.types.enum [
|
||||
"password"
|
||||
"token"
|
||||
"api_key"
|
||||
"secret_api_key"
|
||||
];
|
||||
default = "password";
|
||||
description = "The field name for the secret";
|
||||
};
|
||||
# TODO: Ideally we would create a gigantic list of all possible settings / types
|
||||
# optimally we would have a way to generate the options from the source code
|
||||
extraSettings = lib.mkOption {
|
||||
type = lib.types.attrsOf lib.types.str;
|
||||
default = { };
|
||||
description = ''
|
||||
Extra settings for the provider.
|
||||
Provider specific settings: https://github.com/qdm12/ddns-updater#configuration
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
)
|
||||
);
|
||||
default = [ ];
|
||||
description = "Configuration for which domains to update";
|
||||
};
|
||||
};
|
||||
|
||||
imports = [
|
||||
../nginx
|
||||
];
|
||||
|
||||
config = lib.mkMerge [
|
||||
(lib.mkIf (cfg.settings != { }) {
|
||||
clan.core.vars.generators = lib.mapAttrs' secret_generator cfg.settings;
|
||||
|
||||
users.groups.${name} = { };
|
||||
users.users.${name} = {
|
||||
group = name;
|
||||
isSystemUser = true;
|
||||
description = "User for ${name} service";
|
||||
home = "/var/lib/${name}";
|
||||
createHome = true;
|
||||
};
|
||||
|
||||
services.nginx = lib.mkIf cfg.server.enable {
|
||||
enable = true;
|
||||
virtualHosts = {
|
||||
"${cfg.server.domain}" = {
|
||||
forceSSL = true;
|
||||
enableACME = true;
|
||||
locations."/" = {
|
||||
proxyPass = "http://localhost:${toString cfg.server.port}";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.${name} = {
|
||||
path = [ ];
|
||||
description = "Dynamic DNS updater";
|
||||
after = [ "network.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
environment = {
|
||||
MYCONFIG = "${builtins.toJSON service_config}";
|
||||
SERVER_ENABLED = if cfg.server.enable then "yes" else "no";
|
||||
PERIOD = "${toString cfg.period}m";
|
||||
LISTENING_ADDRESS = ":${toString cfg.server.port}";
|
||||
};
|
||||
|
||||
serviceConfig =
|
||||
let
|
||||
pyscript =
|
||||
pkgs.writers.writePython3Bin "generate_secret_config.py"
|
||||
{
|
||||
libraries = [ ];
|
||||
doCheck = false;
|
||||
}
|
||||
''
|
||||
import json
|
||||
from pathlib import Path
|
||||
import os
|
||||
|
||||
cred_dir = Path(os.getenv("CREDENTIALS_DIRECTORY"))
|
||||
config_str = os.getenv("MYCONFIG")
|
||||
|
||||
|
||||
def get_credential(name):
|
||||
secret_p = cred_dir / name
|
||||
with open(secret_p, 'r') as f:
|
||||
return f.read().strip()
|
||||
|
||||
|
||||
config = json.loads(config_str)
|
||||
print(f"Config: {config}")
|
||||
for attrset in config["settings"]:
|
||||
if "password" in attrset:
|
||||
attrset['password'] = get_credential(attrset['password'])
|
||||
elif "token" in attrset:
|
||||
attrset['token'] = get_credential(attrset['token'])
|
||||
elif "secret_api_key" in attrset:
|
||||
attrset['secret_api_key'] = get_credential(attrset['secret_api_key'])
|
||||
elif "api_key" in attrset:
|
||||
attrset['api_key'] = get_credential(attrset['api_key'])
|
||||
else:
|
||||
raise ValueError(f"Missing secret field in {attrset}")
|
||||
|
||||
# create directory data if it does not exist
|
||||
data_dir = Path('data')
|
||||
data_dir.mkdir(mode=0o770, exist_ok=True)
|
||||
|
||||
# Create a temporary config file
|
||||
# with appropriate permissions
|
||||
tmp_config_path = data_dir / '.config.json'
|
||||
tmp_config_path.touch(mode=0o660, exist_ok=False)
|
||||
|
||||
# Write the config with secrets back
|
||||
with open(tmp_config_path, 'w') as f:
|
||||
f.write(json.dumps(config, indent=4))
|
||||
|
||||
# Move config into place
|
||||
config_path = data_dir / 'config.json'
|
||||
tmp_config_path.rename(config_path)
|
||||
|
||||
# Set file permissions to read
|
||||
# and write only by the user and group
|
||||
for file in data_dir.iterdir():
|
||||
file.chmod(0o660)
|
||||
'';
|
||||
in
|
||||
{
|
||||
ExecStartPre = lib.getExe pyscript;
|
||||
ExecStart = lib.getExe pkgs.ddns-updater;
|
||||
LoadCredential = lib.mapAttrsToList (_: opt: "${secret_id opt}:${secret_path opt}") cfg.settings;
|
||||
User = name;
|
||||
Group = name;
|
||||
NoNewPrivileges = true;
|
||||
PrivateTmp = true;
|
||||
ProtectSystem = "strict";
|
||||
ReadOnlyPaths = "/";
|
||||
PrivateDevices = "yes";
|
||||
ProtectKernelModules = "yes";
|
||||
ProtectKernelTunables = "yes";
|
||||
WorkingDirectory = "/var/lib/${name}";
|
||||
ReadWritePaths = [
|
||||
"/proc/self"
|
||||
"/var/lib/${name}"
|
||||
];
|
||||
|
||||
Restart = "always";
|
||||
RestartSec = 60;
|
||||
};
|
||||
};
|
||||
})
|
||||
];
|
||||
}
|
||||
5
clanModules/ergochat/README.md
Normal file
5
clanModules/ergochat/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
description = "A modern IRC server"
|
||||
categories = ["Social"]
|
||||
features = [ "inventory", "deprecated" ]
|
||||
---
|
||||
3
clanModules/ergochat/default.nix
Normal file
3
clanModules/ergochat/default.nix
Normal file
@@ -0,0 +1,3 @@
|
||||
{
|
||||
imports = [ ./roles/default.nix ];
|
||||
}
|
||||
21
clanModules/ergochat/roles/default.nix
Normal file
21
clanModules/ergochat/roles/default.nix
Normal file
@@ -0,0 +1,21 @@
|
||||
_: {
|
||||
|
||||
warnings = [
|
||||
"The clan.ergochat module is deprecated and will be removed on 2025-07-15.
|
||||
Please migrate to user-maintained configuration or the new equivalent clan services
|
||||
(https://docs.clan.lol/reference/clanServices)."
|
||||
];
|
||||
|
||||
services.ergochat = {
|
||||
enable = true;
|
||||
|
||||
settings = {
|
||||
datastore = {
|
||||
autoupgrade = true;
|
||||
path = "/var/lib/ergo/ircd.db";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
clan.core.state.ergochat.folders = [ "/var/lib/ergo" ];
|
||||
}
|
||||
@@ -1,62 +1,51 @@
|
||||
{ ... }:
|
||||
|
||||
{ lib, ... }:
|
||||
let
|
||||
error = builtins.throw ''
|
||||
|
||||
###############################################################################
|
||||
# #
|
||||
# Clan modules (clanModules) have been deprecated and removed in favor of #
|
||||
# Clan services! #
|
||||
# #
|
||||
# Refer to https://docs.clan.lol/guides/migrations/migrate-inventory-services #
|
||||
# for migration instructions. #
|
||||
# #
|
||||
###############################################################################
|
||||
|
||||
'';
|
||||
|
||||
modnames = [
|
||||
"admin"
|
||||
"borgbackup"
|
||||
"borgbackup-static"
|
||||
"deltachat"
|
||||
"disk-id"
|
||||
"dyndns"
|
||||
"ergochat"
|
||||
"garage"
|
||||
"heisenbridge"
|
||||
"iwd"
|
||||
"localbackup"
|
||||
"localsend"
|
||||
"matrix-synapse"
|
||||
"moonlight"
|
||||
"mumble"
|
||||
"nginx"
|
||||
"packages"
|
||||
"postgresql"
|
||||
"root-password"
|
||||
"single-disk"
|
||||
"sshd"
|
||||
"state-version"
|
||||
"static-hosts"
|
||||
"sunshine"
|
||||
"syncthing"
|
||||
"syncthing-static-peers"
|
||||
"thelounge"
|
||||
"trusted-nix-caches"
|
||||
"user-password"
|
||||
"vaultwarden"
|
||||
"xfce"
|
||||
"zerotier-static-peers"
|
||||
"zt-tcp-relay"
|
||||
];
|
||||
inherit (lib)
|
||||
filterAttrs
|
||||
pathExists
|
||||
;
|
||||
in
|
||||
|
||||
{
|
||||
flake.clanModules = builtins.listToAttrs (
|
||||
map (name: {
|
||||
inherit name;
|
||||
value = error;
|
||||
}) modnames
|
||||
);
|
||||
# only import available files, as this allows to filter the files for tests.
|
||||
flake.clanModules = filterAttrs (_name: pathExists) {
|
||||
auto-upgrade = ./auto-upgrade;
|
||||
admin = ./admin;
|
||||
borgbackup = ./borgbackup;
|
||||
borgbackup-static = ./borgbackup-static;
|
||||
deltachat = ./deltachat;
|
||||
data-mesher = ./data-mesher;
|
||||
disk-id = ./disk-id;
|
||||
dyndns = ./dyndns;
|
||||
ergochat = ./ergochat;
|
||||
garage = ./garage;
|
||||
heisenbridge = ./heisenbridge;
|
||||
importer = ./importer;
|
||||
iwd = ./iwd;
|
||||
localbackup = ./localbackup;
|
||||
localsend = ./localsend;
|
||||
matrix-synapse = ./matrix-synapse;
|
||||
moonlight = ./moonlight;
|
||||
mumble = ./mumble;
|
||||
mycelium = ./mycelium;
|
||||
nginx = ./nginx;
|
||||
packages = ./packages;
|
||||
postgresql = ./postgresql;
|
||||
root-password = ./root-password;
|
||||
single-disk = ./single-disk;
|
||||
sshd = ./sshd;
|
||||
state-version = ./state-version;
|
||||
static-hosts = ./static-hosts;
|
||||
sunshine = ./sunshine;
|
||||
syncthing = ./syncthing;
|
||||
syncthing-static-peers = ./syncthing-static-peers;
|
||||
thelounge = ./thelounge;
|
||||
trusted-nix-caches = ./trusted-nix-caches;
|
||||
user-password = ./user-password;
|
||||
vaultwarden = ./vaultwarden;
|
||||
wifi = ./wifi;
|
||||
xfce = ./xfce;
|
||||
zerotier = ./zerotier;
|
||||
zerotier-static-peers = ./zerotier-static-peers;
|
||||
zt-tcp-relay = ./zt-tcp-relay;
|
||||
};
|
||||
}
|
||||
|
||||
11
clanModules/garage/README.md
Normal file
11
clanModules/garage/README.md
Normal file
@@ -0,0 +1,11 @@
|
||||
---
|
||||
description = "S3-compatible object store for small self-hosted geo-distributed deployments"
|
||||
categories = ["System"]
|
||||
features = [ "inventory", "deprecated" ]
|
||||
---
|
||||
|
||||
This module generates garage specific keys automatically.
|
||||
Also shares the `rpc_secret` between instances.
|
||||
|
||||
Options: [NixosModuleOptions](https://search.nixos.org/options?channel=unstable&size=50&sort=relevance&type=packages&query=garage)
|
||||
Documentation: https://garagehq.deuxfleurs.fr/
|
||||
3
clanModules/garage/default.nix
Normal file
3
clanModules/garage/default.nix
Normal file
@@ -0,0 +1,3 @@
|
||||
{
|
||||
imports = [ ./roles/default.nix ];
|
||||
}
|
||||
50
clanModules/garage/roles/default.nix
Normal file
50
clanModules/garage/roles/default.nix
Normal file
@@ -0,0 +1,50 @@
|
||||
{ config, pkgs, ... }:
|
||||
{
|
||||
|
||||
warnings = [
|
||||
"The clan.ergochat module is deprecated and will be removed on 2025-07-15.
|
||||
Please migrate to user-maintained configuration or the new equivalent clan services
|
||||
(https://docs.clan.lol/reference/clanServices)."
|
||||
];
|
||||
|
||||
systemd.services.garage.serviceConfig = {
|
||||
LoadCredential = [
|
||||
"rpc_secret_path:${config.clan.core.vars.generators.garage-shared.files.rpc_secret.path}"
|
||||
"admin_token_path:${config.clan.core.vars.generators.garage.files.admin_token.path}"
|
||||
"metrics_token_path:${config.clan.core.vars.generators.garage.files.metrics_token.path}"
|
||||
];
|
||||
Environment = [
|
||||
"GARAGE_ALLOW_WORLD_READABLE_SECRETS=true"
|
||||
"GARAGE_RPC_SECRET_FILE=%d/rpc_secret_path"
|
||||
"GARAGE_ADMIN_TOKEN_FILE=%d/admin_token_path"
|
||||
"GARAGE_METRICS_TOKEN_FILE=%d/metrics_token_path"
|
||||
];
|
||||
};
|
||||
|
||||
clan.core.vars.generators.garage = {
|
||||
files.admin_token = { };
|
||||
files.metrics_token = { };
|
||||
runtimeInputs = [
|
||||
pkgs.coreutils
|
||||
pkgs.openssl
|
||||
];
|
||||
script = ''
|
||||
openssl rand -base64 -out "$out"/admin_token 32
|
||||
openssl rand -base64 -out "$out"/metrics_token 32
|
||||
'';
|
||||
};
|
||||
|
||||
clan.core.vars.generators.garage-shared = {
|
||||
share = true;
|
||||
files.rpc_secret = { };
|
||||
runtimeInputs = [
|
||||
pkgs.coreutils
|
||||
pkgs.openssl
|
||||
];
|
||||
script = ''
|
||||
openssl rand -hex -out "$out"/rpc_secret 32
|
||||
'';
|
||||
};
|
||||
|
||||
clan.core.state.garage.folders = [ config.services.garage.settings.metadata_dir ];
|
||||
}
|
||||
5
clanModules/heisenbridge/README.md
Normal file
5
clanModules/heisenbridge/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
description = "A matrix bridge to communicate with IRC"
|
||||
categories = ["Social"]
|
||||
features = [ "inventory", "deprecated" ]
|
||||
---
|
||||
3
clanModules/heisenbridge/default.nix
Normal file
3
clanModules/heisenbridge/default.nix
Normal file
@@ -0,0 +1,3 @@
|
||||
{
|
||||
imports = [ ./roles/default.nix ];
|
||||
}
|
||||
27
clanModules/heisenbridge/roles/default.nix
Normal file
27
clanModules/heisenbridge/roles/default.nix
Normal file
@@ -0,0 +1,27 @@
|
||||
{
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
(lib.mkRemovedOptionModule [
|
||||
"clan"
|
||||
"heisenbridge"
|
||||
"enable"
|
||||
] "Importing the module will already enable the service.")
|
||||
];
|
||||
config = {
|
||||
warnings = [
|
||||
"The clan.heisenbridge module is deprecated and will be removed on 2025-07-15.
|
||||
Please migrate to user-maintained configuration or the new equivalent clan services
|
||||
(https://docs.clan.lol/reference/clanServices)."
|
||||
];
|
||||
services.heisenbridge = {
|
||||
enable = true;
|
||||
homeserver = "http://localhost:8008"; # TODO: Sync with matrix-synapse
|
||||
};
|
||||
services.matrix-synapse.settings.app_service_config_files = [
|
||||
"/var/lib/heisenbridge/registration.yml"
|
||||
];
|
||||
};
|
||||
}
|
||||
27
clanModules/importer/README.md
Normal file
27
clanModules/importer/README.md
Normal file
@@ -0,0 +1,27 @@
|
||||
---
|
||||
description = "Convenient, structured module imports for hosts."
|
||||
categories = ["Utility"]
|
||||
features = [ "inventory" ]
|
||||
---
|
||||
The importer module allows users to configure importing modules in a flexible and structured way.
|
||||
|
||||
It exposes the `extraModules` functionality of the inventory, without any added configuration.
|
||||
|
||||
## Usage
|
||||
|
||||
```nix
|
||||
inventory.services = {
|
||||
importer.base = {
|
||||
roles.default.tags = [ "all" ];
|
||||
roles.default.extraModules = [ "modules/base.nix" ];
|
||||
};
|
||||
importer.zone1 = {
|
||||
roles.default.tags = [ "zone1" ];
|
||||
roles.default.extraModules = [ "modules/zone1.nix" ];
|
||||
};
|
||||
};
|
||||
```
|
||||
|
||||
This will import the module `modules/base.nix` to all machines that have the `all` tag,
|
||||
which by default is every machine managed by the clan.
|
||||
And also import for all machines tagged with `zone1` the module at `modules/zone1.nix`.
|
||||
1
clanModules/importer/roles/default.nix
Normal file
1
clanModules/importer/roles/default.nix
Normal file
@@ -0,0 +1 @@
|
||||
{ }
|
||||
9
clanModules/iwd/README.md
Normal file
9
clanModules/iwd/README.md
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
description = "Automatically provisions wifi credentials"
|
||||
features = [ "inventory", "deprecated" ]
|
||||
categories = [ "Network" ]
|
||||
---
|
||||
|
||||
!!! Warning
|
||||
If you've been using network manager + wpa_supplicant and now are switching to IWD read this migration guide:
|
||||
https://archive.kernel.org/oldwiki/iwd.wiki.kernel.org/networkmanager.html#converting_network_profiles
|
||||
6
clanModules/iwd/default.nix
Normal file
6
clanModules/iwd/default.nix
Normal file
@@ -0,0 +1,6 @@
|
||||
# Dont import this file
|
||||
# It is only here for backwards compatibility.
|
||||
# Dont author new modules with this file.
|
||||
{
|
||||
imports = [ ./roles/default.nix ];
|
||||
}
|
||||
106
clanModules/iwd/roles/default.nix
Normal file
106
clanModules/iwd/roles/default.nix
Normal file
@@ -0,0 +1,106 @@
|
||||
{
|
||||
lib,
|
||||
config,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
cfg = config.clan.iwd;
|
||||
secret_path = ssid: config.clan.core.vars.generators."iwd.${ssid}".files."iwd.${ssid}".path;
|
||||
secret_generator = name: value: {
|
||||
name = "iwd.${value.ssid}";
|
||||
value =
|
||||
let
|
||||
secret_name = "iwd.${value.ssid}";
|
||||
in
|
||||
{
|
||||
prompts.${secret_name} = {
|
||||
description = "Wifi password for '${value.ssid}'";
|
||||
persist = true;
|
||||
};
|
||||
migrateFact = secret_name;
|
||||
# ref. man iwd.network
|
||||
script = ''
|
||||
config="
|
||||
[Settings]
|
||||
AutoConnect=${if value.AutoConnect then "true" else "false"}
|
||||
[Security]
|
||||
Passphrase=$(echo -e "$prompt_value/${secret_name}" | ${lib.getExe pkgs.gnused} "s=\\\=\\\\\\\=g;s=\t=\\\t=g;s=\r=\\\r=g;s=^ =\\\s=")
|
||||
"
|
||||
echo "$config" > "$out/${secret_name}"
|
||||
'';
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
options.clan.iwd = {
|
||||
networks = lib.mkOption {
|
||||
type = lib.types.attrsOf (
|
||||
lib.types.submodule (
|
||||
{ name, ... }:
|
||||
{
|
||||
options = {
|
||||
ssid = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = name;
|
||||
description = "The name of the wifi network";
|
||||
};
|
||||
AutoConnect = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = true;
|
||||
description = "Automatically try to join this wifi network";
|
||||
};
|
||||
};
|
||||
}
|
||||
)
|
||||
);
|
||||
default = { };
|
||||
description = "Wifi networks to predefine";
|
||||
};
|
||||
};
|
||||
|
||||
imports = [
|
||||
(lib.mkRemovedOptionModule [
|
||||
"clan"
|
||||
"iwd"
|
||||
"enable"
|
||||
] "Just define clan.iwd.networks to enable it")
|
||||
];
|
||||
|
||||
config = lib.mkMerge [
|
||||
(lib.mkIf (cfg.networks != { }) {
|
||||
# Systemd tmpfiles rule to create /var/lib/iwd/example.psk file
|
||||
systemd.tmpfiles.rules = lib.mapAttrsToList (
|
||||
_: value: "C /var/lib/iwd/${value.ssid}.psk 0600 root root - ${secret_path value.ssid}"
|
||||
) cfg.networks;
|
||||
|
||||
clan.core.vars.generators = lib.mapAttrs' secret_generator cfg.networks;
|
||||
|
||||
# TODO: restart the iwd.service if something changes
|
||||
})
|
||||
{
|
||||
warnings = [
|
||||
"The clan.iwd module is deprecated and will be removed on 2025-07-15. Please migrate to a user-maintained configuration or use the wifi service."
|
||||
];
|
||||
|
||||
# disable wpa supplicant
|
||||
networking.wireless.enable = false;
|
||||
|
||||
# Set the network manager backend to iwd
|
||||
networking.networkmanager.wifi.backend = "iwd";
|
||||
|
||||
# Use iwd instead of wpa_supplicant. It has a user friendly CLI
|
||||
networking.wireless.iwd = {
|
||||
enable = true;
|
||||
settings = {
|
||||
Network = {
|
||||
EnableIPv6 = true;
|
||||
RoutePriorityOffset = 300;
|
||||
};
|
||||
Settings.AutoConnect = true;
|
||||
};
|
||||
};
|
||||
}
|
||||
];
|
||||
}
|
||||
3
clanModules/localbackup/README.md
Normal file
3
clanModules/localbackup/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
---
|
||||
description = "Automatically backups current machine to local directory."
|
||||
---
|
||||
241
clanModules/localbackup/default.nix
Normal file
241
clanModules/localbackup/default.nix
Normal file
@@ -0,0 +1,241 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
cfg = config.clan.localbackup;
|
||||
uniqueFolders = lib.unique (
|
||||
lib.flatten (lib.mapAttrsToList (_name: state: state.folders) config.clan.core.state)
|
||||
);
|
||||
rsnapshotConfig = target: ''
|
||||
config_version 1.2
|
||||
snapshot_root ${target.directory}
|
||||
sync_first 1
|
||||
cmd_cp ${pkgs.coreutils}/bin/cp
|
||||
cmd_rm ${pkgs.coreutils}/bin/rm
|
||||
cmd_rsync ${pkgs.rsync}/bin/rsync
|
||||
cmd_ssh ${pkgs.openssh}/bin/ssh
|
||||
cmd_logger ${pkgs.inetutils}/bin/logger
|
||||
cmd_du ${pkgs.coreutils}/bin/du
|
||||
cmd_rsnapshot_diff ${pkgs.rsnapshot}/bin/rsnapshot-diff
|
||||
|
||||
${lib.optionalString (target.postBackupHook != null) ''
|
||||
cmd_postexec ${pkgs.writeShellScript "postexec.sh" ''
|
||||
set -efu -o pipefail
|
||||
${target.postBackupHook}
|
||||
''}
|
||||
''}
|
||||
retain snapshot ${builtins.toString config.clan.localbackup.snapshots}
|
||||
${lib.concatMapStringsSep "\n" (folder: ''
|
||||
backup ${folder} ${config.networking.hostName}/
|
||||
'') uniqueFolders}
|
||||
'';
|
||||
in
|
||||
{
|
||||
options.clan.localbackup = {
|
||||
targets = lib.mkOption {
|
||||
type = lib.types.attrsOf (
|
||||
lib.types.submodule (
|
||||
{ name, ... }:
|
||||
{
|
||||
options = {
|
||||
name = lib.mkOption {
|
||||
type = lib.types.strMatching "^[a-zA-Z0-9._-]+$";
|
||||
default = name;
|
||||
description = "the name of the backup job";
|
||||
};
|
||||
directory = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "the directory to backup";
|
||||
};
|
||||
mountpoint = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
default = null;
|
||||
description = "mountpoint of the directory to backup. If set, the directory will be mounted before the backup and unmounted afterwards";
|
||||
};
|
||||
preMountHook = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.lines;
|
||||
default = null;
|
||||
description = "Shell commands to run before the directory is mounted";
|
||||
};
|
||||
postMountHook = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.lines;
|
||||
default = null;
|
||||
description = "Shell commands to run after the directory is mounted";
|
||||
};
|
||||
preUnmountHook = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.lines;
|
||||
default = null;
|
||||
description = "Shell commands to run before the directory is unmounted";
|
||||
};
|
||||
postUnmountHook = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.lines;
|
||||
default = null;
|
||||
description = "Shell commands to run after the directory is unmounted";
|
||||
};
|
||||
preBackupHook = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.lines;
|
||||
default = null;
|
||||
description = "Shell commands to run before the backup";
|
||||
};
|
||||
postBackupHook = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.lines;
|
||||
default = null;
|
||||
description = "Shell commands to run after the backup";
|
||||
};
|
||||
};
|
||||
}
|
||||
)
|
||||
);
|
||||
default = { };
|
||||
description = "List of directories where backups are stored";
|
||||
};
|
||||
|
||||
snapshots = lib.mkOption {
|
||||
type = lib.types.int;
|
||||
default = 20;
|
||||
description = "Number of snapshots to keep";
|
||||
};
|
||||
};
|
||||
|
||||
config =
|
||||
let
|
||||
mountHook = target: ''
|
||||
if [[ -x /run/current-system/sw/bin/localbackup-mount-${target.name} ]]; then
|
||||
/run/current-system/sw/bin/localbackup-mount-${target.name}
|
||||
fi
|
||||
if [[ -x /run/current-system/sw/bin/localbackup-unmount-${target.name} ]]; then
|
||||
trap "/run/current-system/sw/bin/localbackup-unmount-${target.name}" EXIT
|
||||
fi
|
||||
'';
|
||||
in
|
||||
lib.mkIf (cfg.targets != { }) {
|
||||
environment.systemPackages = [
|
||||
(pkgs.writeShellScriptBin "localbackup-create" ''
|
||||
set -efu -o pipefail
|
||||
export PATH=${
|
||||
lib.makeBinPath [
|
||||
pkgs.rsnapshot
|
||||
pkgs.coreutils
|
||||
pkgs.util-linux
|
||||
]
|
||||
}
|
||||
${lib.concatMapStringsSep "\n" (target: ''
|
||||
${mountHook target}
|
||||
echo "Creating backup '${target.name}'"
|
||||
|
||||
${lib.optionalString (target.preBackupHook != null) ''
|
||||
(
|
||||
${target.preBackupHook}
|
||||
)
|
||||
''}
|
||||
|
||||
declare -A preCommandErrors
|
||||
${lib.concatMapStringsSep "\n" (
|
||||
state:
|
||||
lib.optionalString (state.preBackupCommand != null) ''
|
||||
echo "Running pre-backup command for ${state.name}"
|
||||
if ! /run/current-system/sw/bin/${state.preBackupCommand}; then
|
||||
preCommandErrors["${state.name}"]=1
|
||||
fi
|
||||
''
|
||||
) (builtins.attrValues config.clan.core.state)}
|
||||
|
||||
rsnapshot -c "${pkgs.writeText "rsnapshot.conf" (rsnapshotConfig target)}" sync
|
||||
rsnapshot -c "${pkgs.writeText "rsnapshot.conf" (rsnapshotConfig target)}" snapshot
|
||||
'') (builtins.attrValues cfg.targets)}'')
|
||||
(pkgs.writeShellScriptBin "localbackup-list" ''
|
||||
set -efu -o pipefail
|
||||
export PATH=${
|
||||
lib.makeBinPath [
|
||||
pkgs.jq
|
||||
pkgs.findutils
|
||||
pkgs.coreutils
|
||||
pkgs.util-linux
|
||||
]
|
||||
}
|
||||
(${
|
||||
lib.concatMapStringsSep "\n" (target: ''
|
||||
(
|
||||
${mountHook target}
|
||||
find ${lib.escapeShellArg target.directory} -mindepth 1 -maxdepth 1 -name "snapshot.*" -print0 -type d \
|
||||
| jq -Rs 'split("\u0000") | .[] | select(. != "") | { "name": ("${target.name}::" + .)}'
|
||||
)
|
||||
'') (builtins.attrValues cfg.targets)
|
||||
}) | jq -s .
|
||||
'')
|
||||
(pkgs.writeShellScriptBin "localbackup-restore" ''
|
||||
set -efu -o pipefail
|
||||
export PATH=${
|
||||
lib.makeBinPath [
|
||||
pkgs.rsync
|
||||
pkgs.coreutils
|
||||
pkgs.util-linux
|
||||
pkgs.gawk
|
||||
]
|
||||
}
|
||||
if [[ "''${NAME:-}" == "" ]]; then
|
||||
echo "No backup name given via NAME environment variable"
|
||||
exit 1
|
||||
fi
|
||||
if [[ "''${FOLDERS:-}" == "" ]]; then
|
||||
echo "No folders given via FOLDERS environment variable"
|
||||
exit 1
|
||||
fi
|
||||
name=$(awk -F'::' '{print $1}' <<< $NAME)
|
||||
backupname=''${NAME#$name::}
|
||||
|
||||
if command -v localbackup-mount-$name; then
|
||||
localbackup-mount-$name
|
||||
fi
|
||||
if command -v localbackup-unmount-$name; then
|
||||
trap "localbackup-unmount-$name" EXIT
|
||||
fi
|
||||
|
||||
if [[ ! -d $backupname ]]; then
|
||||
echo "No backup found $backupname"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
IFS=':' read -ra FOLDER <<< "''$FOLDERS"
|
||||
for folder in "''${FOLDER[@]}"; do
|
||||
mkdir -p "$folder"
|
||||
rsync -a "$backupname/${config.networking.hostName}$folder/" "$folder"
|
||||
done
|
||||
'')
|
||||
]
|
||||
++ (lib.mapAttrsToList (
|
||||
name: target:
|
||||
pkgs.writeShellScriptBin ("localbackup-mount-" + name) ''
|
||||
set -efu -o pipefail
|
||||
${lib.optionalString (target.preMountHook != null) target.preMountHook}
|
||||
${lib.optionalString (target.mountpoint != null) ''
|
||||
if ! ${pkgs.util-linux}/bin/mountpoint -q ${lib.escapeShellArg target.mountpoint}; then
|
||||
${pkgs.util-linux}/bin/mount -o X-mount.mkdir ${lib.escapeShellArg target.mountpoint}
|
||||
fi
|
||||
''}
|
||||
${lib.optionalString (target.postMountHook != null) target.postMountHook}
|
||||
''
|
||||
) cfg.targets)
|
||||
++ lib.mapAttrsToList (
|
||||
name: target:
|
||||
pkgs.writeShellScriptBin ("localbackup-unmount-" + name) ''
|
||||
set -efu -o pipefail
|
||||
${lib.optionalString (target.preUnmountHook != null) target.preUnmountHook}
|
||||
${lib.optionalString (
|
||||
target.mountpoint != null
|
||||
) "${pkgs.util-linux}/bin/umount ${lib.escapeShellArg target.mountpoint}"}
|
||||
${lib.optionalString (target.postUnmountHook != null) target.postUnmountHook}
|
||||
''
|
||||
) cfg.targets;
|
||||
|
||||
clan.core.backups.providers.localbackup = {
|
||||
# TODO list needs to run locally or on the remote machine
|
||||
list = "localbackup-list";
|
||||
create = "localbackup-create";
|
||||
restore = "localbackup-restore";
|
||||
};
|
||||
};
|
||||
}
|
||||
5
clanModules/localsend/README.md
Normal file
5
clanModules/localsend/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
description = "Securely sharing files and messages over a local network without internet connectivity."
|
||||
categories = ["Utility"]
|
||||
features = [ "inventory", "deprecated" ]
|
||||
---
|
||||
3
clanModules/localsend/default.nix
Normal file
3
clanModules/localsend/default.nix
Normal file
@@ -0,0 +1,3 @@
|
||||
{
|
||||
imports = [ ./roles/default.nix ];
|
||||
}
|
||||
22
clanModules/localsend/localsend-ensure-config/default.nix
Normal file
22
clanModules/localsend/localsend-ensure-config/default.nix
Normal file
@@ -0,0 +1,22 @@
|
||||
{
|
||||
lib,
|
||||
writers,
|
||||
writeShellScriptBin,
|
||||
localsend,
|
||||
alias ? null,
|
||||
}:
|
||||
let
|
||||
localsend-ensure-config = writers.writePython3 "localsend-ensure-config" {
|
||||
flakeIgnore = [
|
||||
# We don't live in the dark ages anymore.
|
||||
# Languages like Python that are whitespace heavy will overrun
|
||||
# 79 characters..
|
||||
"E501"
|
||||
];
|
||||
} (builtins.readFile ./localsend-ensure-config.py);
|
||||
in
|
||||
writeShellScriptBin "localsend" ''
|
||||
set -xeu
|
||||
${localsend-ensure-config} ${lib.optionalString (alias != null) alias}
|
||||
${lib.getExe localsend}
|
||||
''
|
||||
@@ -0,0 +1,64 @@
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def load_json(file_path: Path) -> dict[str, any]:
|
||||
try:
|
||||
with file_path.open("r") as file:
|
||||
return json.load(file)
|
||||
except FileNotFoundError:
|
||||
return {}
|
||||
|
||||
|
||||
def save_json(file_path: Path, data: dict[str, any]) -> None:
|
||||
with file_path.open("w") as file:
|
||||
json.dump(data, file, indent=4)
|
||||
|
||||
|
||||
def update_json(file_path: Path, updates: dict[str, any]) -> None:
|
||||
data = load_json(file_path)
|
||||
data.update(updates)
|
||||
save_json(file_path, data)
|
||||
|
||||
|
||||
def config_location() -> str:
|
||||
config_file = "shared_preferences.json"
|
||||
config_directory = ".local/share/org.localsend.localsend_app"
|
||||
config_path = Path.home() / Path(config_directory) / Path(config_file)
|
||||
return config_path
|
||||
|
||||
|
||||
def ensure_config_directory() -> None:
|
||||
config_directory = Path(config_location()).parent
|
||||
config_directory.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
|
||||
def load_config() -> dict[str, any]:
|
||||
return load_json(config_location())
|
||||
|
||||
|
||||
def save_config(data: dict[str, any]) -> None:
|
||||
save_json(config_location(), data)
|
||||
|
||||
|
||||
def update_username(username: str, data: dict[str, any]) -> dict[str, any]:
|
||||
data["flutter.ls_alias"] = username
|
||||
return data
|
||||
|
||||
|
||||
def main(argv: list[str]) -> None:
|
||||
try:
|
||||
display_name = argv[1]
|
||||
except IndexError:
|
||||
# This is not an error, just don't update the name
|
||||
print("No display name provided.")
|
||||
sys.exit(0)
|
||||
|
||||
ensure_config_directory()
|
||||
updated_data = update_username(display_name, load_config())
|
||||
save_config(updated_data)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main(sys.argv[:2])
|
||||
69
clanModules/localsend/roles/default.nix
Normal file
69
clanModules/localsend/roles/default.nix
Normal file
@@ -0,0 +1,69 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
cfg = config.clan.localsend;
|
||||
in
|
||||
{
|
||||
# Integration can be improved, if the following issues get implemented:
|
||||
# - cli frontend: https://github.com/localsend/localsend/issues/11
|
||||
# - ipv6 support: https://github.com/localsend/localsend/issues/549
|
||||
options.clan.localsend = {
|
||||
|
||||
displayName = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
default = null;
|
||||
description = "The name that localsend will use to display your instance.";
|
||||
};
|
||||
|
||||
package = lib.mkPackageOption pkgs "localsend" { };
|
||||
|
||||
ipv4Addr = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
default = null;
|
||||
example = "192.168.56.2/24";
|
||||
description = "Optional IPv4 address for ZeroTier network.";
|
||||
};
|
||||
};
|
||||
|
||||
imports = [
|
||||
(lib.mkRemovedOptionModule [
|
||||
"clan"
|
||||
"localsend"
|
||||
"enable"
|
||||
] "Importing the module will already enable the service.")
|
||||
];
|
||||
config = {
|
||||
warnings = [
|
||||
"The clan.localsend module is deprecated and will be removed on 2025-07-15.
|
||||
Please migrate to user-maintained configuration or the new equivalent clan services
|
||||
(https://docs.clan.lol/reference/clanServices)."
|
||||
];
|
||||
|
||||
clan.core.state.localsend.folders = [
|
||||
"/var/localsend"
|
||||
];
|
||||
environment.systemPackages = [
|
||||
(pkgs.callPackage ./localsend-ensure-config {
|
||||
localsend = config.clan.localsend.package;
|
||||
alias = config.clan.localsend.displayName;
|
||||
})
|
||||
];
|
||||
|
||||
networking.firewall.interfaces."zt+".allowedTCPPorts = [ 53317 ];
|
||||
networking.firewall.interfaces."zt+".allowedUDPPorts = [ 53317 ];
|
||||
|
||||
#TODO: This is currently needed because there is no ipv6 multicasting support yet
|
||||
systemd.network.networks = lib.mkIf (cfg.ipv4Addr != null) {
|
||||
"09-zerotier" = {
|
||||
networkConfig = {
|
||||
Address = cfg.ipv4Addr;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
3
clanModules/matrix-synapse/README.md
Normal file
3
clanModules/matrix-synapse/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
---
|
||||
description = "A federated messaging server with end-to-end encryption."
|
||||
---
|
||||
207
clanModules/matrix-synapse/default.nix
Normal file
207
clanModules/matrix-synapse/default.nix
Normal file
@@ -0,0 +1,207 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
cfg = config.clan.matrix-synapse;
|
||||
element-web =
|
||||
pkgs.runCommand "element-web-with-config" { nativeBuildInputs = [ pkgs.buildPackages.jq ]; }
|
||||
''
|
||||
cp -r ${pkgs.element-web} $out
|
||||
chmod -R u+w $out
|
||||
jq '."default_server_config"."m.homeserver" = { "base_url": "https://${cfg.app_domain}:443", "server_name": "${cfg.server_tld}" }' \
|
||||
> $out/config.json < ${pkgs.element-web}/config.json
|
||||
ln -s $out/config.json $out/config.${cfg.app_domain}.json
|
||||
'';
|
||||
in
|
||||
# FIXME: This was taken from upstream. Drop this when our patch is upstream
|
||||
{
|
||||
options.services.matrix-synapse.package = lib.mkOption { readOnly = false; };
|
||||
options.clan.matrix-synapse = {
|
||||
server_tld = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "The address that is suffixed after your username i.e @alice:example.com";
|
||||
example = "example.com";
|
||||
};
|
||||
|
||||
app_domain = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "The matrix server hostname also serves the element client";
|
||||
example = "matrix.example.com";
|
||||
};
|
||||
|
||||
users = lib.mkOption {
|
||||
default = { };
|
||||
type = lib.types.attrsOf (
|
||||
lib.types.submodule (
|
||||
{ name, ... }:
|
||||
{
|
||||
options = {
|
||||
name = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = name;
|
||||
description = "The name of the user";
|
||||
};
|
||||
|
||||
admin = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
description = "Whether the user should be an admin";
|
||||
};
|
||||
};
|
||||
}
|
||||
)
|
||||
);
|
||||
description = "A list of users. Not that only new users will be created and existing ones are not modified.";
|
||||
example.alice = {
|
||||
admin = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
imports = [
|
||||
(lib.mkRemovedOptionModule [
|
||||
"clan"
|
||||
"matrix-synapse"
|
||||
"enable"
|
||||
] "Importing the module will already enable the service.")
|
||||
../nginx
|
||||
];
|
||||
config = {
|
||||
services.matrix-synapse = {
|
||||
enable = true;
|
||||
settings = {
|
||||
server_name = cfg.server_tld;
|
||||
database = {
|
||||
args.user = "matrix-synapse";
|
||||
args.database = "matrix-synapse";
|
||||
name = "psycopg2";
|
||||
};
|
||||
turn_uris = [
|
||||
"turn:turn.matrix.org?transport=udp"
|
||||
"turn:turn.matrix.org?transport=tcp"
|
||||
];
|
||||
registration_shared_secret_path = "/run/synapse-registration-shared-secret";
|
||||
listeners = [
|
||||
{
|
||||
port = 8008;
|
||||
bind_addresses = [ "::1" ];
|
||||
type = "http";
|
||||
tls = false;
|
||||
x_forwarded = true;
|
||||
resources = [
|
||||
{
|
||||
names = [ "client" ];
|
||||
compress = true;
|
||||
}
|
||||
{
|
||||
names = [ "federation" ];
|
||||
compress = false;
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
clan.core.postgresql.enable = true;
|
||||
clan.core.postgresql.users.matrix-synapse = { };
|
||||
clan.core.postgresql.databases.matrix-synapse.create.options = {
|
||||
TEMPLATE = "template0";
|
||||
LC_COLLATE = "C";
|
||||
LC_CTYPE = "C";
|
||||
ENCODING = "UTF8";
|
||||
OWNER = "matrix-synapse";
|
||||
};
|
||||
clan.core.postgresql.databases.matrix-synapse.restore.stopOnRestore = [ "matrix-synapse" ];
|
||||
|
||||
clan.core.vars.generators = {
|
||||
"matrix-synapse" = {
|
||||
files."synapse-registration_shared_secret" = { };
|
||||
runtimeInputs = with pkgs; [
|
||||
coreutils
|
||||
pwgen
|
||||
];
|
||||
migrateFact = "matrix-synapse";
|
||||
script = ''
|
||||
echo -n "$(pwgen -s 32 1)" > "$out"/synapse-registration_shared_secret
|
||||
'';
|
||||
};
|
||||
}
|
||||
// lib.mapAttrs' (
|
||||
name: user:
|
||||
lib.nameValuePair "matrix-password-${user.name}" {
|
||||
files."matrix-password-${user.name}" = { };
|
||||
migrateFact = "matrix-password-${user.name}";
|
||||
runtimeInputs = with pkgs; [ xkcdpass ];
|
||||
script = ''
|
||||
xkcdpass -n 4 -d - > "$out"/${lib.escapeShellArg "matrix-password-${user.name}"}
|
||||
'';
|
||||
}
|
||||
) cfg.users;
|
||||
|
||||
systemd.services.matrix-synapse =
|
||||
let
|
||||
usersScript = ''
|
||||
while ! ${pkgs.netcat}/bin/nc -z -v ::1 8008; do
|
||||
if ! kill -0 "$MAINPID"; then exit 1; fi
|
||||
sleep 1;
|
||||
done
|
||||
''
|
||||
+ lib.concatMapStringsSep "\n" (user: ''
|
||||
# only create user if it doesn't exist
|
||||
/run/current-system/sw/bin/matrix-synapse-register_new_matrix_user --exists-ok --password-file ${
|
||||
config.clan.core.vars.generators."matrix-password-${user.name}".files."matrix-password-${user.name}".path
|
||||
} --user "${user.name}" ${if user.admin then "--admin" else "--no-admin"}
|
||||
'') (lib.attrValues cfg.users);
|
||||
in
|
||||
{
|
||||
path = [ pkgs.curl ];
|
||||
serviceConfig.ExecStartPre = lib.mkBefore [
|
||||
"+${pkgs.coreutils}/bin/install -o matrix-synapse -g matrix-synapse ${
|
||||
lib.escapeShellArg
|
||||
config.clan.core.vars.generators.matrix-synapse.files."synapse-registration_shared_secret".path
|
||||
} /run/synapse-registration-shared-secret"
|
||||
];
|
||||
serviceConfig.ExecStartPost = [
|
||||
''+${pkgs.writeShellScript "matrix-synapse-create-users" usersScript}''
|
||||
];
|
||||
};
|
||||
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
virtualHosts = {
|
||||
"${cfg.server_tld}" = {
|
||||
locations."= /.well-known/matrix/server".extraConfig = ''
|
||||
add_header Content-Type application/json;
|
||||
return 200 '${builtins.toJSON { "m.server" = "${cfg.app_domain}:443"; }}';
|
||||
'';
|
||||
locations."= /.well-known/matrix/client".extraConfig = ''
|
||||
add_header Content-Type application/json;
|
||||
add_header Access-Control-Allow-Origin *;
|
||||
return 200 '${
|
||||
builtins.toJSON {
|
||||
"m.homeserver" = {
|
||||
"base_url" = "https://${cfg.app_domain}";
|
||||
};
|
||||
"m.identity_server" = {
|
||||
"base_url" = "https://vector.im";
|
||||
};
|
||||
}
|
||||
}';
|
||||
'';
|
||||
forceSSL = true;
|
||||
enableACME = true;
|
||||
};
|
||||
"${cfg.app_domain}" = {
|
||||
forceSSL = true;
|
||||
enableACME = true;
|
||||
locations."/".root = element-web;
|
||||
locations."/_matrix".proxyPass = "http://localhost:8008"; # TODO: We should make the port configurable
|
||||
locations."/_synapse".proxyPass = "http://localhost:8008";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
5
clanModules/moonlight/README.md
Normal file
5
clanModules/moonlight/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
description = "A desktop streaming client optimized for remote gaming and synchronized movie viewing."
|
||||
---
|
||||
|
||||
**Warning**: This module was written with our VM integration in mind likely won't work outside of this context. They will be generalized in future.
|
||||
91
clanModules/moonlight/default.nix
Normal file
91
clanModules/moonlight/default.nix
Normal file
@@ -0,0 +1,91 @@
|
||||
{ pkgs, config, ... }:
|
||||
let
|
||||
ms-accept = pkgs.callPackage ../../pkgs/moonlight-sunshine-accept { };
|
||||
defaultPort = 48011;
|
||||
in
|
||||
{
|
||||
warnings = [
|
||||
"The clan.moonlight module is deprecated and will be removed on 2025-07-15. Please migrate to user-maintained configuration."
|
||||
];
|
||||
|
||||
hardware.opengl.enable = true;
|
||||
environment.systemPackages = [
|
||||
pkgs.moonlight-qt
|
||||
ms-accept
|
||||
];
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"d '/var/lib/moonlight' 0770 'user' 'users' - -"
|
||||
"C '/var/lib/moonlight/moonlight.cert' 0644 'user' 'users' - ${
|
||||
config.clan.core.vars.generators.moonlight.files."moonlight.cert".path or ""
|
||||
}"
|
||||
"C '/var/lib/moonlight/moonlight.key' 0644 'user' 'users' - ${
|
||||
config.clan.core.vars.generators.moonlight.files."moonlight.key".path or ""
|
||||
}"
|
||||
];
|
||||
|
||||
systemd.user.services.init-moonlight = {
|
||||
enable = false;
|
||||
description = "Initializes moonlight";
|
||||
wantedBy = [ "graphical-session.target" ];
|
||||
script = ''
|
||||
${ms-accept}/bin/moonlight-sunshine-accept moonlight init-config --key /var/lib/moonlight/moonlight.key --cert /var/lib/moonlight/moonlight.cert
|
||||
'';
|
||||
serviceConfig = {
|
||||
user = "user";
|
||||
Type = "oneshot";
|
||||
WorkingDirectory = "/home/user/";
|
||||
RunTimeDirectory = "moonlight";
|
||||
TimeoutSec = "infinity";
|
||||
Restart = "on-failure";
|
||||
RemainAfterExit = true;
|
||||
ReadOnlyPaths = [
|
||||
"/var/lib/moonlight/moonlight.key"
|
||||
"/var/lib/moonlight/moonlight.cert"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
systemd.user.services.moonlight-join = {
|
||||
description = "Join sunshine hosts";
|
||||
script = ''${ms-accept}/bin/moonlight-sunshine-accept moonlight join --port ${builtins.toString defaultPort} --cert '${
|
||||
config.clan.core.vars.generators.moonlight.files."moonlight.cert".value or ""
|
||||
}' --host fd2e:25da:6035:c98f:cd99:93e0:b9b8:9ca1'';
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
TimeoutSec = "infinity";
|
||||
Restart = "on-failure";
|
||||
ReadOnlyPaths = [
|
||||
"/var/lib/moonlight/moonlight.key"
|
||||
"/var/lib/moonlight/moonlight.cert"
|
||||
];
|
||||
};
|
||||
};
|
||||
systemd.user.timers.moonlight-join = {
|
||||
description = "Join sunshine hosts";
|
||||
wantedBy = [ "timers.target" ];
|
||||
timerConfig = {
|
||||
OnUnitActiveSec = "5min";
|
||||
OnBootSec = "0min";
|
||||
Persistent = true;
|
||||
Unit = "moonlight-join.service";
|
||||
};
|
||||
};
|
||||
|
||||
clan.core.vars.generators.moonlight = {
|
||||
migrateFact = "moonlight";
|
||||
files."moonlight.key" = { };
|
||||
files."moonlight.cert" = { };
|
||||
files."moonlight.cert".secret = false;
|
||||
runtimeInputs = [
|
||||
pkgs.coreutils
|
||||
ms-accept
|
||||
];
|
||||
script = ''
|
||||
moonlight-sunshine-accept moonlight init
|
||||
mv credentials/cakey.pem "$out"/moonlight.key
|
||||
cp credentials/cacert.pem "$out"/moonlight.cert
|
||||
mv credentials/cacert.pem "$out"/moonlight.cert
|
||||
'';
|
||||
};
|
||||
}
|
||||
19
clanModules/mumble/README.md
Normal file
19
clanModules/mumble/README.md
Normal file
@@ -0,0 +1,19 @@
|
||||
---
|
||||
description = "Open Source, Low Latency, High Quality Voice Chat."
|
||||
categories = ["Audio", "Social"]
|
||||
features = [ "inventory" ]
|
||||
|
||||
[constraints]
|
||||
roles.server.min = 1
|
||||
---
|
||||
|
||||
The mumble clan module gives you:
|
||||
|
||||
- True low latency voice communication.
|
||||
- Secure, authenticated encryption.
|
||||
- Free software.
|
||||
- Backed by a large and active open-source community.
|
||||
|
||||
This all set up in a way that allows peer-to-peer hosting.
|
||||
Every machine inside the clan can be a host for mumble,
|
||||
and thus it doesn't matter who in the network is online - as long as two people are online they are able to chat with each other.
|
||||
6
clanModules/mumble/default.nix
Normal file
6
clanModules/mumble/default.nix
Normal file
@@ -0,0 +1,6 @@
|
||||
# Dont import this file
|
||||
# It is only here for backwards compatibility.
|
||||
# Dont author new modules with this file.
|
||||
{
|
||||
imports = [ ./roles/server.nix ];
|
||||
}
|
||||
247
clanModules/mumble/roles/mumble-populate-channels.py
Normal file
247
clanModules/mumble/roles/mumble-populate-channels.py
Normal file
@@ -0,0 +1,247 @@
|
||||
import argparse
|
||||
import json
|
||||
import sqlite3
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def ensure_config(path: Path, db_path: Path) -> None:
|
||||
# Default JSON structure if the file doesn't exist
|
||||
default_json = {
|
||||
"misc": {
|
||||
"audio_wizard_has_been_shown": True,
|
||||
"database_location": str(db_path),
|
||||
"viewed_server_ping_consent_message": True,
|
||||
},
|
||||
"settings_version": 1,
|
||||
}
|
||||
|
||||
# Check if the file exists
|
||||
if path.exists():
|
||||
data = json.loads(path.read_text())
|
||||
else:
|
||||
data = default_json
|
||||
# Create the file with default JSON structure
|
||||
with path.open("w") as file:
|
||||
json.dump(data, file, indent=4)
|
||||
|
||||
# TODO: make sure to only update the diff
|
||||
updated_data = {**default_json, **data}
|
||||
|
||||
# Write the modified JSON object back to the file
|
||||
with path.open("w") as file:
|
||||
json.dump(updated_data, file, indent=4)
|
||||
|
||||
|
||||
def initialize_database(db_location: str) -> None:
|
||||
"""
|
||||
Initializes the database. If the database or the servers table does not exist, it creates them.
|
||||
|
||||
:param db_location: The path to the SQLite database
|
||||
"""
|
||||
conn = sqlite3.connect(db_location)
|
||||
try:
|
||||
cursor = conn.cursor()
|
||||
|
||||
# Create the servers table if it doesn't exist
|
||||
cursor.execute("""
|
||||
CREATE TABLE IF NOT EXISTS servers (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
name TEXT NOT NULL,
|
||||
hostname TEXT NOT NULL,
|
||||
port INTEGER NOT NULL,
|
||||
username TEXT NOT NULL,
|
||||
password TEXT NOT NULL,
|
||||
url TEXT
|
||||
)
|
||||
""")
|
||||
|
||||
# Commit the changes
|
||||
conn.commit()
|
||||
|
||||
except sqlite3.Error as e:
|
||||
print(f"An error occurred while initializing the database: {e}")
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
|
||||
def initialize_certificates(
|
||||
db_location: str, hostname: str, port: str, digest: str
|
||||
) -> None:
|
||||
# Connect to the SQLite database
|
||||
conn = sqlite3.connect(db_location)
|
||||
|
||||
try:
|
||||
# Create a cursor object
|
||||
cursor = conn.cursor()
|
||||
|
||||
# TODO: check if cert already there
|
||||
# if server_check(cursor, name, hostname):
|
||||
# print(
|
||||
# f"Server with name '{name}' and hostname '{hostname}' already exists."
|
||||
# )
|
||||
# return
|
||||
|
||||
# SQL command to insert data into the servers table
|
||||
insert_query = """
|
||||
INSERT INTO cert (hostname, port, digest)
|
||||
VALUES (?, ?, ?)
|
||||
"""
|
||||
|
||||
# Data to be inserted
|
||||
data = (hostname, port, digest)
|
||||
|
||||
# Execute the insert command with the provided data
|
||||
cursor.execute(insert_query, data)
|
||||
|
||||
# Commit the changes
|
||||
conn.commit()
|
||||
|
||||
print("Data has been successfully inserted.")
|
||||
except sqlite3.Error as e:
|
||||
print(f"An error occurred: {e}")
|
||||
finally:
|
||||
# Close the connection
|
||||
conn.close()
|
||||
|
||||
|
||||
def calculate_digest(cert: str) -> str:
|
||||
from cryptography import x509
|
||||
from cryptography.hazmat.backends import default_backend
|
||||
from cryptography.hazmat.primitives import hashes
|
||||
|
||||
cert = cert.strip()
|
||||
cert = cert.encode("utf-8")
|
||||
cert = x509.load_pem_x509_certificate(cert, default_backend())
|
||||
digest = cert.fingerprint(hashes.SHA1()).hex()
|
||||
return digest
|
||||
|
||||
|
||||
def server_check(cursor: str, name: str, hostname: str) -> bool:
|
||||
"""
|
||||
Check if a server with the given name and hostname already exists.
|
||||
|
||||
:param cursor: The database cursor
|
||||
:param name: The name of the server
|
||||
:param hostname: The hostname of the server
|
||||
:return: True if the server exists, False otherwise
|
||||
"""
|
||||
check_query = """
|
||||
SELECT 1 FROM servers WHERE name = ? AND hostname = ?
|
||||
"""
|
||||
cursor.execute(check_query, (name, hostname))
|
||||
return cursor.fetchone() is not None
|
||||
|
||||
|
||||
def insert_server(
|
||||
name: str,
|
||||
hostname: str,
|
||||
port: str,
|
||||
username: str,
|
||||
password: str,
|
||||
url: str,
|
||||
db_location: str,
|
||||
) -> None:
|
||||
"""
|
||||
Inserts a new server record into the servers table.
|
||||
|
||||
:param name: The name of the server
|
||||
:param hostname: The hostname of the server
|
||||
:param port: The port number
|
||||
:param username: The username
|
||||
:param password: The password
|
||||
:param url: The URL
|
||||
"""
|
||||
# Connect to the SQLite database
|
||||
conn = sqlite3.connect(db_location)
|
||||
|
||||
try:
|
||||
# Create a cursor object
|
||||
cursor = conn.cursor()
|
||||
|
||||
if server_check(cursor, name, hostname):
|
||||
print(
|
||||
f"Server with name '{name}' and hostname '{hostname}' already exists."
|
||||
)
|
||||
return
|
||||
|
||||
# SQL command to insert data into the servers table
|
||||
insert_query = """
|
||||
INSERT INTO servers (name, hostname, port, username, password, url)
|
||||
VALUES (?, ?, ?, ?, ?, ?)
|
||||
"""
|
||||
|
||||
# Data to be inserted
|
||||
data = (name, hostname, port, username, password, url)
|
||||
|
||||
# Execute the insert command with the provided data
|
||||
cursor.execute(insert_query, data)
|
||||
|
||||
# Commit the changes
|
||||
conn.commit()
|
||||
|
||||
print("Data has been successfully inserted.")
|
||||
except sqlite3.Error as e:
|
||||
print(f"An error occurred: {e}")
|
||||
finally:
|
||||
# Close the connection
|
||||
conn.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
port = 64738
|
||||
password = ""
|
||||
url = None
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
prog="initialize_mumble",
|
||||
)
|
||||
|
||||
subparser = parser.add_subparsers(dest="certificates")
|
||||
# cert_parser = subparser.add_parser("certificates")
|
||||
|
||||
parser.add_argument("--cert")
|
||||
parser.add_argument("--digest")
|
||||
parser.add_argument("--machines")
|
||||
parser.add_argument("--servers")
|
||||
parser.add_argument("--username")
|
||||
parser.add_argument("--db-location")
|
||||
parser.add_argument("--ensure-config", type=Path)
|
||||
args = parser.parse_args()
|
||||
|
||||
print(args)
|
||||
|
||||
if args.ensure_config:
|
||||
ensure_config(args.ensure_config, args.db_location)
|
||||
print("Initialized config")
|
||||
exit(0)
|
||||
|
||||
if args.servers:
|
||||
print(args.servers)
|
||||
servers = json.loads(f"{args.servers}")
|
||||
db_location = args.db_location
|
||||
for server in servers:
|
||||
digest = calculate_digest(server.get("value"))
|
||||
name = server.get("name")
|
||||
initialize_certificates(db_location, name, port, digest)
|
||||
print("Initialized certificates")
|
||||
exit(0)
|
||||
|
||||
initialize_database(args.db_location)
|
||||
|
||||
# Insert the server into the database
|
||||
print(args.machines)
|
||||
machines = json.loads(f"{args.machines}")
|
||||
print(machines)
|
||||
print(list(machines))
|
||||
|
||||
for machine in list(machines):
|
||||
print(f"Inserting {machine}.")
|
||||
insert_server(
|
||||
machine,
|
||||
machine,
|
||||
port,
|
||||
args.username,
|
||||
password,
|
||||
url,
|
||||
args.db_location,
|
||||
)
|
||||
150
clanModules/mumble/roles/server.nix
Normal file
150
clanModules/mumble/roles/server.nix
Normal file
@@ -0,0 +1,150 @@
|
||||
{
|
||||
lib,
|
||||
config,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
dir = config.clan.core.settings.directory;
|
||||
# TODO: this should actually use the inventory to figure out which machines to use.
|
||||
machineDir = dir + "/vars/per-machine";
|
||||
machinesFileSet = builtins.readDir machineDir;
|
||||
machines = lib.mapAttrsToList (name: _: name) machinesFileSet;
|
||||
machineJson = builtins.toJSON machines;
|
||||
certificateMachinePath = machines: machineDir + "/${machines}" + "/mumble/mumble-cert/value";
|
||||
certificatesUnchecked = builtins.map (
|
||||
machine:
|
||||
let
|
||||
fullPath = certificateMachinePath machine;
|
||||
in
|
||||
if builtins.pathExists fullPath then machine else null
|
||||
) machines;
|
||||
certificate = lib.filter (machine: machine != null) certificatesUnchecked;
|
||||
machineCert = builtins.map (
|
||||
machine: (lib.nameValuePair machine (builtins.readFile (certificateMachinePath machine)))
|
||||
) certificate;
|
||||
machineCertJson = builtins.toJSON machineCert;
|
||||
|
||||
in
|
||||
{
|
||||
options.clan.services.mumble = {
|
||||
user = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
default = null;
|
||||
example = "alice";
|
||||
description = "The user mumble should be set up for.";
|
||||
};
|
||||
};
|
||||
|
||||
config = {
|
||||
|
||||
warnings = [
|
||||
"The clan.mumble module is deprecated and will be removed on 2025-07-15.
|
||||
Please migrate to user-maintained configuration or the new equivalent clan services
|
||||
(https://docs.clan.lol/reference/clanServices)."
|
||||
];
|
||||
|
||||
services.murmur = {
|
||||
enable = true;
|
||||
logDays = -1;
|
||||
registerName = config.clan.core.settings.machine.name;
|
||||
openFirewall = true;
|
||||
bonjour = true;
|
||||
sslKey = "/var/lib/murmur/sslKey";
|
||||
sslCert = "/var/lib/murmur/sslCert";
|
||||
};
|
||||
|
||||
clan.core.state.mumble.folders = [
|
||||
"/var/lib/mumble"
|
||||
"/var/lib/murmur"
|
||||
];
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"d '/var/lib/mumble' 0770 '${config.clan.services.mumble.user}' 'users' - -"
|
||||
];
|
||||
|
||||
systemd.tmpfiles.settings."murmur" = {
|
||||
"/var/lib/murmur/sslKey" = {
|
||||
C.argument = config.clan.core.vars.generators.mumble.files.mumble-key.path;
|
||||
Z = {
|
||||
mode = "0400";
|
||||
user = "murmur";
|
||||
};
|
||||
};
|
||||
"/var/lib/murmur/sslCert" = {
|
||||
C.argument = config.clan.core.vars.generators.mumble.files.mumble-cert.path;
|
||||
Z = {
|
||||
mode = "0400";
|
||||
user = "murmur";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
environment.systemPackages =
|
||||
let
|
||||
mumbleCfgDir = "/var/lib/mumble";
|
||||
mumbleDatabasePath = "${mumbleCfgDir}/mumble.sqlite";
|
||||
mumbleCfgPath = "/var/lib/mumble/mumble_settings.json";
|
||||
populate-channels = pkgs.writers.writePython3 "mumble-populate-channels" {
|
||||
libraries = [
|
||||
pkgs.python3Packages.cryptography
|
||||
pkgs.python3Packages.pyopenssl
|
||||
];
|
||||
flakeIgnore = [
|
||||
# We don't live in the dark ages anymore.
|
||||
# Languages like Python that are whitespace heavy will overrun
|
||||
# 79 characters..
|
||||
"E501"
|
||||
];
|
||||
} (builtins.readFile ./mumble-populate-channels.py);
|
||||
mumble = pkgs.writeShellScriptBin "mumble" ''
|
||||
set -xeu
|
||||
mkdir -p ${mumbleCfgDir}
|
||||
pushd "${mumbleCfgDir}"
|
||||
XDG_DATA_HOME=${mumbleCfgDir}
|
||||
XDG_DATA_DIR=${mumbleCfgDir}
|
||||
${populate-channels} --ensure-config '${mumbleCfgPath}' --db-location ${mumbleDatabasePath}
|
||||
${populate-channels} --machines '${machineJson}' --username ${config.clan.core.settings.machine.name} --db-location ${mumbleDatabasePath}
|
||||
${populate-channels} --servers '${machineCertJson}' --username ${config.clan.core.settings.machine.name} --db-location ${mumbleDatabasePath} --cert True
|
||||
${pkgs.mumble}/bin/mumble --config ${mumbleCfgPath} "$@"
|
||||
popd
|
||||
'';
|
||||
in
|
||||
[ mumble ];
|
||||
|
||||
clan.core.vars.generators.mumble = {
|
||||
migrateFact = "mumble";
|
||||
files.mumble-key = { };
|
||||
files.mumble-cert.secret = false;
|
||||
runtimeInputs = [
|
||||
pkgs.coreutils
|
||||
pkgs.openssl
|
||||
];
|
||||
script = ''
|
||||
openssl genrsa -out "$out/mumble-key" 2048
|
||||
|
||||
cat > mumble-cert.conf <<EOF
|
||||
[ req ]
|
||||
default_bits = 2048
|
||||
distinguished_name = req_distinguished_name
|
||||
req_extensions = req_ext
|
||||
prompt = no
|
||||
[ req_distinguished_name ]
|
||||
C = "US"
|
||||
ST = "California"
|
||||
L = "San Francisco"
|
||||
O = "Clan"
|
||||
OU = "Clan"
|
||||
CN = "${config.clan.core.settings.machine.name}"
|
||||
[ req_ext ]
|
||||
subjectAltName = @alt_names
|
||||
[ alt_names ]
|
||||
DNS.1 = "${config.clan.core.settings.machine.name}"
|
||||
EOF
|
||||
|
||||
openssl req -new -x509 -config mumble-cert.conf -key "$out/mumble-key" -out "$out/mumble-cert" < /dev/null
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
}
|
||||
30
clanModules/mycelium/README.md
Normal file
30
clanModules/mycelium/README.md
Normal file
@@ -0,0 +1,30 @@
|
||||
---
|
||||
description = "End-2-end encrypted IPv6 overlay network"
|
||||
categories = ["System", "Network"]
|
||||
features = [ "inventory", "deprecated" ]
|
||||
---
|
||||
Mycelium is an IPv6 overlay network written in Rust. Each node that joins the overlay network will receive an overlay network IP in the 400::/7 range.
|
||||
|
||||
Features:
|
||||
- Mycelium, is locality aware, it will look for the shortest path between nodes
|
||||
- All traffic between the nodes is end-2-end encrypted
|
||||
- Traffic can be routed over nodes of friends, location aware
|
||||
- If a physical link goes down Mycelium will automatically reroute your traffic
|
||||
- The IP address is IPV6 and linked to private key
|
||||
- A simple reliable messagebus is implemented on top of Mycelium
|
||||
- Mycelium has multiple ways how to communicate quic, tcp, ... and we are working on holepunching for Quick which means P2P traffic without middlemen for NATted networks e.g. most homes
|
||||
- Scalability is very important for us, we tried many overlay networks before and got stuck on all of them, we are trying to design a network which scales to a planetary level
|
||||
- You can run mycelium without TUN and only use it as reliable message bus.
|
||||
|
||||
|
||||
An example configuration might look like this in the inventory:
|
||||
```nix
|
||||
mycelium.default = {
|
||||
roles.peer.machines = [
|
||||
"berlin"
|
||||
"munich"
|
||||
];
|
||||
};
|
||||
```
|
||||
|
||||
This will add the machines named `berlin` and `munich` to the `mycelium` vpn.
|
||||
51
clanModules/mycelium/roles/peer.nix
Normal file
51
clanModules/mycelium/roles/peer.nix
Normal file
@@ -0,0 +1,51 @@
|
||||
{
|
||||
pkgs,
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
options = {
|
||||
clan.mycelium.openFirewall = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = true;
|
||||
description = "Open the firewall for mycelium";
|
||||
};
|
||||
|
||||
clan.mycelium.addHostedPublicNodes = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = true;
|
||||
description = "Add hosted Public nodes";
|
||||
};
|
||||
};
|
||||
|
||||
config.warnings = [
|
||||
"The clan.mycelium module is deprecated and will be removed on 2025-07-15.
|
||||
Please migrate to user-maintained configuration or the new equivalent clan services
|
||||
(https://docs.clan.lol/reference/clanServices)."
|
||||
];
|
||||
|
||||
config.services.mycelium = {
|
||||
enable = true;
|
||||
addHostedPublicNodes = lib.mkDefault config.clan.mycelium.addHostedPublicNodes;
|
||||
openFirewall = lib.mkDefault config.clan.mycelium.openFirewall;
|
||||
keyFile = config.clan.core.vars.generators.mycelium.files.key.path;
|
||||
};
|
||||
|
||||
config.clan.core.vars.generators.mycelium = {
|
||||
files."key" = { };
|
||||
files."ip".secret = false;
|
||||
files."pubkey".secret = false;
|
||||
runtimeInputs = [
|
||||
pkgs.mycelium
|
||||
pkgs.coreutils
|
||||
pkgs.jq
|
||||
];
|
||||
script = ''
|
||||
timeout 5 mycelium --key-file "$out"/key || :
|
||||
mycelium inspect --key-file "$out"/key --json | jq -r .publicKey > "$out"/pubkey
|
||||
mycelium inspect --key-file "$out"/key --json | jq -r .address > "$out"/ip
|
||||
'';
|
||||
};
|
||||
|
||||
}
|
||||
3
clanModules/nginx/README.md
Normal file
3
clanModules/nginx/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
---
|
||||
description = "Good defaults for the nginx webserver"
|
||||
---
|
||||
68
clanModules/nginx/default.nix
Normal file
68
clanModules/nginx/default.nix
Normal file
@@ -0,0 +1,68 @@
|
||||
{ config, lib, ... }:
|
||||
|
||||
{
|
||||
|
||||
imports = [
|
||||
(lib.mkRemovedOptionModule [
|
||||
"clan"
|
||||
"nginx"
|
||||
"enable"
|
||||
] "Importing the module will already enable the service.")
|
||||
|
||||
];
|
||||
options = {
|
||||
clan.nginx.acme.email = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = ''
|
||||
Email address for account creation and correspondence from the CA.
|
||||
It is recommended to use the same email for all certs to avoid account
|
||||
creation limits.
|
||||
'';
|
||||
};
|
||||
};
|
||||
config = {
|
||||
security.acme.acceptTerms = true;
|
||||
security.acme.defaults.email = config.clan.nginx.acme.email;
|
||||
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
443
|
||||
80
|
||||
];
|
||||
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
|
||||
statusPage = lib.mkDefault true;
|
||||
recommendedBrotliSettings = lib.mkDefault true;
|
||||
recommendedGzipSettings = lib.mkDefault true;
|
||||
recommendedOptimisation = lib.mkDefault true;
|
||||
recommendedProxySettings = lib.mkDefault true;
|
||||
recommendedTlsSettings = lib.mkDefault true;
|
||||
|
||||
# Nginx sends all the access logs to /var/log/nginx/access.log by default.
|
||||
# instead of going to the journal!
|
||||
commonHttpConfig = "access_log syslog:server=unix:/dev/log;";
|
||||
|
||||
resolver.addresses =
|
||||
let
|
||||
isIPv6 = addr: builtins.match ".*:.*:.*" addr != null;
|
||||
escapeIPv6 = addr: if isIPv6 addr then "[${addr}]" else addr;
|
||||
cloudflare = [
|
||||
"1.1.1.1"
|
||||
"2606:4700:4700::1111"
|
||||
];
|
||||
resolvers =
|
||||
if config.networking.nameservers == [ ] then cloudflare else config.networking.nameservers;
|
||||
in
|
||||
map escapeIPv6 resolvers;
|
||||
|
||||
sslDhparam = config.security.dhparams.params.nginx.path;
|
||||
};
|
||||
|
||||
security.dhparams = {
|
||||
enable = true;
|
||||
params.nginx = { };
|
||||
};
|
||||
};
|
||||
|
||||
}
|
||||
5
clanModules/packages/README.md
Normal file
5
clanModules/packages/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
description = "Define package sets from nixpkgs and install them on one or more machines"
|
||||
categories = ["System"]
|
||||
features = [ "inventory", "deprecated" ]
|
||||
---
|
||||
6
clanModules/packages/default.nix
Normal file
6
clanModules/packages/default.nix
Normal file
@@ -0,0 +1,6 @@
|
||||
# Dont import this file
|
||||
# It is only here for backwards compatibility.
|
||||
# Dont author new modules with this file.
|
||||
{
|
||||
imports = [ ./roles/default.nix ];
|
||||
}
|
||||
25
clanModules/packages/roles/default.nix
Normal file
25
clanModules/packages/roles/default.nix
Normal file
@@ -0,0 +1,25 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
options.clan.packages = {
|
||||
packages = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
description = "The packages to install on the machine";
|
||||
};
|
||||
};
|
||||
config = {
|
||||
|
||||
warnings = [
|
||||
"The clan.packages module is deprecated and will be removed on 2025-07-15.
|
||||
Please migrate to user-maintained configuration or the new equivalent clan services
|
||||
(https://docs.clan.lol/reference/clanServices)."
|
||||
];
|
||||
environment.systemPackages = map (
|
||||
pName: lib.getAttrFromPath (lib.splitString "." pName) pkgs
|
||||
) config.clan.packages.packages;
|
||||
};
|
||||
}
|
||||
3
clanModules/postgresql/README.md
Normal file
3
clanModules/postgresql/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
---
|
||||
description = "A free and open-source relational database management system (RDBMS) emphasizing extensibility and SQL compliance."
|
||||
---
|
||||
9
clanModules/postgresql/default.nix
Normal file
9
clanModules/postgresql/default.nix
Normal file
@@ -0,0 +1,9 @@
|
||||
{ lib, ... }:
|
||||
{
|
||||
imports = [
|
||||
(lib.mkRemovedOptionModule [
|
||||
"clan"
|
||||
"postgresql"
|
||||
] "The postgresql module has been migrated to a clan core option. Use clan.core.postgresql instead")
|
||||
];
|
||||
}
|
||||
20
clanModules/root-password/README.md
Normal file
20
clanModules/root-password/README.md
Normal file
@@ -0,0 +1,20 @@
|
||||
---
|
||||
description = "Automatically generates and configures a password for the root user."
|
||||
categories = ["System"]
|
||||
features = ["inventory", "deprecated"]
|
||||
---
|
||||
|
||||
This module is deprecated and will be removed in a future release. It's functionality has been replaced by the user-password service.
|
||||
|
||||
After the system was installed/deployed the following command can be used to display the root-password:
|
||||
|
||||
```bash
|
||||
clan vars get [machine_name] root-password/root-password
|
||||
```
|
||||
|
||||
See also: [Vars](../../concepts/generators.md)
|
||||
|
||||
To regenerate the password run:
|
||||
```
|
||||
clan vars generate --regenerate [machine_name] --generator root-password
|
||||
```
|
||||
6
clanModules/root-password/default.nix
Normal file
6
clanModules/root-password/default.nix
Normal file
@@ -0,0 +1,6 @@
|
||||
# Dont import this file
|
||||
# It is only here for backwards compatibility.
|
||||
# Dont author new modules with this file.
|
||||
{
|
||||
imports = [ ./roles/default.nix ];
|
||||
}
|
||||
50
clanModules/root-password/roles/default.nix
Normal file
50
clanModules/root-password/roles/default.nix
Normal file
@@ -0,0 +1,50 @@
|
||||
{
|
||||
_class,
|
||||
pkgs,
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
|
||||
warnings = [
|
||||
"The clan.root-password module is deprecated and will be removed on 2025-07-15.
|
||||
Please migrate to user-maintained configuration or the new equivalent clan services
|
||||
(https://docs.clan.lol/reference/clanServices)."
|
||||
];
|
||||
|
||||
users.mutableUsers = false;
|
||||
users.users.root.hashedPasswordFile =
|
||||
config.clan.core.vars.generators.root-password.files.password-hash.path;
|
||||
|
||||
clan.core.vars.generators.root-password = {
|
||||
files.password-hash = {
|
||||
neededFor = "users";
|
||||
}
|
||||
// (lib.optionalAttrs (_class == "nixos") {
|
||||
restartUnits = lib.optional (config.services.userborn.enable) "userborn.service";
|
||||
});
|
||||
files.password = {
|
||||
deploy = false;
|
||||
};
|
||||
migrateFact = "root-password";
|
||||
runtimeInputs = [
|
||||
pkgs.coreutils
|
||||
pkgs.mkpasswd
|
||||
pkgs.xkcdpass
|
||||
];
|
||||
prompts.password.type = "hidden";
|
||||
prompts.password.persist = true;
|
||||
prompts.password.description = "You can autogenerate a password, if you leave this prompt blank.";
|
||||
|
||||
script = ''
|
||||
prompt_value="$(cat "$prompts"/password)"
|
||||
if [[ -n "''${prompt_value-}" ]]; then
|
||||
echo "$prompt_value" | tr -d "\n" > "$out"/password
|
||||
else
|
||||
xkcdpass --numwords 4 --delimiter - --count 1 | tr -d "\n" > "$out"/password
|
||||
fi
|
||||
mkpasswd -s -m sha-512 < "$out"/password | tr -d "\n" > "$out"/password-hash
|
||||
'';
|
||||
};
|
||||
}
|
||||
43
clanModules/single-disk/README.md
Normal file
43
clanModules/single-disk/README.md
Normal file
@@ -0,0 +1,43 @@
|
||||
---
|
||||
description = "Configures partitioning of the main disk"
|
||||
categories = ["System"]
|
||||
features = [ "inventory" ]
|
||||
---
|
||||
# Primary Disk Layout
|
||||
|
||||
A module for the "disk-layout" category MUST be chosen.
|
||||
|
||||
There is exactly one slot for this type of module in the UI, if you don't fill the slot, your machine cannot boot
|
||||
|
||||
This module is a good choice for most machines. In the future clan will offer a broader choice of disk-layouts
|
||||
|
||||
The UI will ask for the options of this module:
|
||||
|
||||
`device: "/dev/null"`
|
||||
|
||||
# Usage example
|
||||
|
||||
`inventory.json`
|
||||
```json
|
||||
"services": {
|
||||
"single-disk": {
|
||||
"default": {
|
||||
"meta": {
|
||||
"name": "single-disk"
|
||||
},
|
||||
"roles": {
|
||||
"default": {
|
||||
"machines": ["jon"]
|
||||
}
|
||||
},
|
||||
"machines": {
|
||||
"jon": {
|
||||
"config": {
|
||||
"device": "/dev/null"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
3
clanModules/single-disk/default.nix
Normal file
3
clanModules/single-disk/default.nix
Normal file
@@ -0,0 +1,3 @@
|
||||
{
|
||||
imports = [ ./roles/default.nix ];
|
||||
}
|
||||
56
clanModules/single-disk/roles/default.nix
Normal file
56
clanModules/single-disk/roles/default.nix
Normal file
@@ -0,0 +1,56 @@
|
||||
{ lib, config, ... }:
|
||||
{
|
||||
options.clan.single-disk = {
|
||||
device = lib.mkOption {
|
||||
default = null;
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
description = "The primary disk device to install the system on";
|
||||
};
|
||||
};
|
||||
config = {
|
||||
warnings = [
|
||||
"clanModules.single-disk is deprecated. Please copy the disko config from the module into your machine config."
|
||||
];
|
||||
|
||||
boot.loader.grub.efiSupport = lib.mkDefault true;
|
||||
boot.loader.grub.efiInstallAsRemovable = lib.mkDefault true;
|
||||
disko.devices = {
|
||||
disk = {
|
||||
main = {
|
||||
type = "disk";
|
||||
# This is set through the UI
|
||||
device = config.clan.single-disk.device;
|
||||
|
||||
content = {
|
||||
type = "gpt";
|
||||
partitions = {
|
||||
boot = {
|
||||
size = "1M";
|
||||
type = "EF02"; # for grub MBR
|
||||
priority = 1;
|
||||
};
|
||||
ESP = {
|
||||
size = "512M";
|
||||
type = "EF00";
|
||||
content = {
|
||||
type = "filesystem";
|
||||
format = "vfat";
|
||||
mountpoint = "/boot";
|
||||
mountOptions = [ "umask=0077" ];
|
||||
};
|
||||
};
|
||||
root = {
|
||||
size = "100%";
|
||||
content = {
|
||||
type = "filesystem";
|
||||
format = "ext4";
|
||||
mountpoint = "/";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
11
clanModules/sshd/README.md
Normal file
11
clanModules/sshd/README.md
Normal file
@@ -0,0 +1,11 @@
|
||||
---
|
||||
description = "Enables secure remote access to the machine over ssh."
|
||||
categories = ["System", "Network"]
|
||||
features = [ "inventory", "deprecated" ]
|
||||
---
|
||||
|
||||
This module will setup the opensshd service.
|
||||
It will generate a host key for each machine
|
||||
|
||||
|
||||
## Roles
|
||||
6
clanModules/sshd/default.nix
Normal file
6
clanModules/sshd/default.nix
Normal file
@@ -0,0 +1,6 @@
|
||||
# Dont import this file
|
||||
# It is only here for backwards compatibility.
|
||||
# Dont author new modules with this file.
|
||||
{
|
||||
imports = [ ./roles/server.nix ];
|
||||
}
|
||||
6
clanModules/sshd/roles/client.nix
Normal file
6
clanModules/sshd/roles/client.nix
Normal file
@@ -0,0 +1,6 @@
|
||||
{ ... }:
|
||||
{
|
||||
imports = [
|
||||
../shared.nix
|
||||
];
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user