diff --git a/checks/backups/flake-module.nix b/checks/backups/flake-module.nix new file mode 100644 index 000000000..67e744718 --- /dev/null +++ b/checks/backups/flake-module.nix @@ -0,0 +1,210 @@ +{ self, ... }: +{ + clan.machines.test-backup = { + imports = [ self.nixosModules.test-backup ]; + fileSystems."/".device = "/dev/null"; + boot.loader.grub.device = "/dev/null"; + }; + clan.inventory.services = { + borgbackup.test-backup = { + roles.client.machines = [ "test-backup" ]; + roles.server.machines = [ "test-backup" ]; + }; + }; + flake.nixosModules = { + test-backup = + { + pkgs, + lib, + ... + }: + let + dependencies = + [ + pkgs.stdenv.drvPath + ] + ++ builtins.map (i: i.outPath) (builtins.attrValues (builtins.removeAttrs self.inputs [ "self" ])); + closureInfo = pkgs.closureInfo { rootPaths = dependencies; }; + in + { + imports = [ + # Do not import inventory modules. They should be configured via 'clan.inventory' + # + # TODO: Configure localbackup via inventory + self.clanModules.localbackup + ]; + # Borgbackup overrides + services.borgbackup.repos.test-backups = { + path = "/var/lib/borgbackup/test-backups"; + authorizedKeys = [ (builtins.readFile ../assets/ssh/pubkey) ]; + }; + clan.borgbackup.destinations.test-backup.repo = lib.mkForce "borg@machine:."; + + clan.core.networking.targetHost = "machine"; + networking.hostName = "machine"; + + programs.ssh.knownHosts = { + machine.hostNames = [ "machine" ]; + machine.publicKey = builtins.readFile ../assets/ssh/pubkey; + }; + + services.openssh = { + enable = true; + settings.UsePAM = false; + settings.UseDns = false; + hostKeys = [ + { + path = "/root/.ssh/id_ed25519"; + type = "ed25519"; + } + ]; + }; + + users.users.root.openssh.authorizedKeys.keyFiles = [ ../assets/ssh/pubkey ]; + + # This is needed to unlock the user for sshd + # Because we use sshd without setuid binaries + users.users.borg.initialPassword = "hello"; + + systemd.tmpfiles.settings."vmsecrets" = { + "/root/.ssh/id_ed25519" = { + C.argument = "${../assets/ssh/privkey}"; + z = { + mode = "0400"; + user = "root"; + }; + }; + "/etc/secrets/ssh.id_ed25519" = { + C.argument = "${../assets/ssh/privkey}"; + z = { + mode = "0400"; + user = "root"; + }; + }; + "/etc/secrets/borgbackup/borgbackup.ssh" = { + C.argument = "${../assets/ssh/privkey}"; + z = { + mode = "0400"; + user = "root"; + }; + }; + "/etc/secrets/borgbackup/borgbackup.repokey" = { + C.argument = builtins.toString (pkgs.writeText "repokey" "repokey12345"); + z = { + mode = "0400"; + user = "root"; + }; + }; + }; + clan.core.facts.secretStore = "vm"; + clan.core.vars.settings.secretStore = "vm"; + + environment.systemPackages = [ self.packages.${pkgs.system}.clan-cli ]; + environment.etc.install-closure.source = "${closureInfo}/store-paths"; + nix.settings = { + substituters = lib.mkForce [ ]; + hashed-mirrors = null; + connect-timeout = lib.mkForce 3; + flake-registry = pkgs.writeText "flake-registry" ''{"flakes":[],"version":2}''; + }; + system.extraDependencies = dependencies; + clan.core.state.test-backups.folders = [ "/var/test-backups" ]; + + clan.core.state.test-service = { + preBackupScript = '' + touch /var/test-service/pre-backup-command + ''; + preRestoreScript = '' + touch /var/test-service/pre-restore-command + ''; + postRestoreScript = '' + touch /var/test-service/post-restore-command + ''; + folders = [ "/var/test-service" ]; + }; + + fileSystems."/mnt/external-disk" = { + device = "/dev/vdb"; # created in tests with virtualisation.emptyDisks + autoFormat = true; + fsType = "ext4"; + options = [ + "defaults" + "noauto" + ]; + }; + + clan.localbackup.targets.hdd = { + directory = "/mnt/external-disk"; + preMountHook = '' + touch /run/mount-external-disk + ''; + postUnmountHook = '' + touch /run/unmount-external-disk + ''; + }; + }; + }; + perSystem = + { pkgs, ... }: + let + clanCore = self.checks.x86_64-linux.clan-core-for-checks; + in + { + checks = pkgs.lib.mkIf pkgs.stdenv.isLinux { + nixos-test-backups = self.clanLib.test.containerTest { + name = "nixos-test-backups"; + nodes.machine = { + imports = + [ + self.nixosModules.clanCore + # Some custom overrides for the backup tests + self.nixosModules.test-backup + ] + ++ + # import the inventory generated nixosModules + self.clan.clanInternals.inventoryClass.machines.test-backup.machineImports; + clan.core.settings.directory = ./.; + }; + + testScript = '' + import json + start_all() + + # dummy data + machine.succeed("mkdir -p /var/test-backups /var/test-service") + machine.succeed("echo testing > /var/test-backups/somefile") + + # create + machine.succeed("clan backups create --debug --flake ${clanCore} test-backup") + machine.wait_until_succeeds("! systemctl is-active borgbackup-job-test-backup >&2") + machine.succeed("test -f /run/mount-external-disk") + machine.succeed("test -f /run/unmount-external-disk") + + # list + backup_id = json.loads(machine.succeed("borg-job-test-backup list --json"))["archives"][0]["archive"] + out = machine.succeed("clan backups list --debug --flake ${clanCore} test-backup").strip() + print(out) + assert backup_id in out, f"backup {backup_id} not found in {out}" + localbackup_id = "hdd::/mnt/external-disk/snapshot.0" + assert localbackup_id in out, "localbackup not found in {out}" + + ## borgbackup restore + machine.succeed("rm -f /var/test-backups/somefile") + machine.succeed(f"clan backups restore --debug --flake ${clanCore} test-backup borgbackup 'test-backup::borg@machine:.::{backup_id}' >&2") + assert machine.succeed("cat /var/test-backups/somefile").strip() == "testing", "restore failed" + machine.succeed("test -f /var/test-service/pre-restore-command") + machine.succeed("test -f /var/test-service/post-restore-command") + machine.succeed("test -f /var/test-service/pre-backup-command") + + ## localbackup restore + machine.succeed("rm -rf /var/test-backups/somefile /var/test-service/ && mkdir -p /var/test-service") + machine.succeed(f"clan backups restore --debug --flake ${clanCore} test-backup localbackup '{localbackup_id}' >&2") + assert machine.succeed("cat /var/test-backups/somefile").strip() == "testing", "restore failed" + machine.succeed("test -f /var/test-service/pre-restore-command") + machine.succeed("test -f /var/test-service/post-restore-command") + machine.succeed("test -f /var/test-service/pre-backup-command") + ''; + } { inherit pkgs self; }; + }; + }; +} diff --git a/checks/flake-module.nix b/checks/flake-module.nix index dcd44ad92..eddd8e4b2 100644 --- a/checks/flake-module.nix +++ b/checks/flake-module.nix @@ -81,13 +81,14 @@ in # Base Tests nixos-test-secrets = self.clanLib.test.baseTest ./secrets nixosTestArgs; + nixos-test-borgbackup-legacy = self.clanLib.test.baseTest ./borgbackup-legacy nixosTestArgs; nixos-test-wayland-proxy-virtwl = self.clanLib.test.baseTest ./wayland-proxy-virtwl nixosTestArgs; # Container Tests nixos-test-container = self.clanLib.test.containerTest ./container nixosTestArgs; - # nixos-test-zt-tcp-relay = self.clanLib.test.containerTest ./zt-tcp-relay nixosTestArgs; - # nixos-test-matrix-synapse = self.clanLib.test.containerTest ./matrix-synapse nixosTestArgs; - # nixos-test-postgresql = self.clanLib.test.containerTest ./postgresql nixosTestArgs; + nixos-test-zt-tcp-relay = self.clanLib.test.containerTest ./zt-tcp-relay nixosTestArgs; + nixos-test-matrix-synapse = self.clanLib.test.containerTest ./matrix-synapse nixosTestArgs; + nixos-test-postgresql = self.clanLib.test.containerTest ./postgresql nixosTestArgs; nixos-test-user-firewall-iptables = self.clanLib.test.containerTest ./user-firewall/iptables.nix nixosTestArgs; nixos-test-user-firewall-nftables = self.clanLib.test.containerTest ./user-firewall/nftables.nix nixosTestArgs; diff --git a/checks/service-dummy-test-from-flake/default.nix b/checks/service-dummy-test-from-flake/default.nix index afdf04395..324e2f087 100644 --- a/checks/service-dummy-test-from-flake/default.nix +++ b/checks/service-dummy-test-from-flake/default.nix @@ -16,6 +16,7 @@ nixosLib.runTest ( # This tests the compatibility of the inventory # With the test framework + # - legacy-modules # - clan.service modules name = "service-dummy-test-from-flake"; @@ -44,6 +45,9 @@ nixosLib.runTest ( start_all() admin1.wait_for_unit("multi-user.target") peer1.wait_for_unit("multi-user.target") + # Provided by the legacy module + print(admin1.succeed("systemctl status dummy-service")) + print(peer1.succeed("systemctl status dummy-service")) # peer1 should have the 'hello' file peer1.succeed("cat ${nodes.peer1.clan.core.vars.generators.new-service.files.not-a-secret.path}") diff --git a/checks/service-dummy-test-from-flake/flake.nix b/checks/service-dummy-test-from-flake/flake.nix index c6d959822..15294917b 100644 --- a/checks/service-dummy-test-from-flake/flake.nix +++ b/checks/service-dummy-test-from-flake/flake.nix @@ -15,6 +15,12 @@ meta.name = "foo"; machines.peer1 = { }; machines.admin1 = { }; + services = { + legacy-module.default = { + roles.peer.machines = [ "peer1" ]; + roles.admin.machines = [ "admin1" ]; + }; + }; instances."test" = { module.name = "new-service"; @@ -22,6 +28,9 @@ roles.peer.machines.peer1 = { }; }; + modules = { + legacy-module = ./legacy-module; + }; }; modules.new-service = { diff --git a/checks/service-dummy-test-from-flake/legacy-module/README.md b/checks/service-dummy-test-from-flake/legacy-module/README.md new file mode 100644 index 000000000..2a72080ce --- /dev/null +++ b/checks/service-dummy-test-from-flake/legacy-module/README.md @@ -0,0 +1,10 @@ +--- +description = "Set up dummy-module" +categories = ["System"] +features = [ "inventory" ] + +[constraints] +roles.admin.min = 1 +roles.admin.max = 1 +--- + diff --git a/checks/service-dummy-test-from-flake/legacy-module/roles/admin.nix b/checks/service-dummy-test-from-flake/legacy-module/roles/admin.nix new file mode 100644 index 000000000..a56780406 --- /dev/null +++ b/checks/service-dummy-test-from-flake/legacy-module/roles/admin.nix @@ -0,0 +1,5 @@ +{ + imports = [ + ../shared.nix + ]; +} diff --git a/checks/service-dummy-test-from-flake/legacy-module/roles/peer.nix b/checks/service-dummy-test-from-flake/legacy-module/roles/peer.nix new file mode 100644 index 000000000..a56780406 --- /dev/null +++ b/checks/service-dummy-test-from-flake/legacy-module/roles/peer.nix @@ -0,0 +1,5 @@ +{ + imports = [ + ../shared.nix + ]; +} diff --git a/checks/service-dummy-test-from-flake/legacy-module/shared.nix b/checks/service-dummy-test-from-flake/legacy-module/shared.nix new file mode 100644 index 000000000..92b7418ca --- /dev/null +++ b/checks/service-dummy-test-from-flake/legacy-module/shared.nix @@ -0,0 +1,34 @@ +{ config, ... }: +{ + systemd.services.dummy-service = { + enable = true; + description = "Dummy service"; + wantedBy = [ "multi-user.target" ]; + serviceConfig = { + Type = "oneshot"; + RemainAfterExit = true; + }; + script = '' + generated_password_path="${config.clan.core.vars.generators.dummy-generator.files.generated-password.path}" + if [ ! -f "$generated_password_path" ]; then + echo "Generated password file not found: $generated_password_path" + exit 1 + fi + host_id_path="${config.clan.core.vars.generators.dummy-generator.files.host-id.path}" + if [ ! -e "$host_id_path" ]; then + echo "Host ID file not found: $host_id_path" + exit 1 + fi + ''; + }; + + # TODO: add and prompt and make it work in the test framework + clan.core.vars.generators.dummy-generator = { + files.host-id.secret = false; + files.generated-password.secret = true; + script = '' + echo $RANDOM > "$out"/host-id + echo $RANDOM > "$out"/generated-password + ''; + }; +} diff --git a/checks/service-dummy-test/default.nix b/checks/service-dummy-test/default.nix index 263133f3a..a771cb541 100644 --- a/checks/service-dummy-test/default.nix +++ b/checks/service-dummy-test/default.nix @@ -15,6 +15,7 @@ nixosLib.runTest ( # This tests the compatibility of the inventory # With the test framework + # - legacy-modules # - clan.service modules name = "service-dummy-test"; @@ -23,6 +24,12 @@ nixosLib.runTest ( inventory = { machines.peer1 = { }; machines.admin1 = { }; + services = { + legacy-module.default = { + roles.peer.machines = [ "peer1" ]; + roles.admin.machines = [ "admin1" ]; + }; + }; instances."test" = { module.name = "new-service"; @@ -30,6 +37,9 @@ nixosLib.runTest ( roles.peer.machines.peer1 = { }; }; + modules = { + legacy-module = ./legacy-module; + }; }; modules.new-service = { _class = "clan.service"; @@ -68,6 +78,9 @@ nixosLib.runTest ( start_all() admin1.wait_for_unit("multi-user.target") peer1.wait_for_unit("multi-user.target") + # Provided by the legacy module + print(admin1.succeed("systemctl status dummy-service")) + print(peer1.succeed("systemctl status dummy-service")) # peer1 should have the 'hello' file peer1.succeed("cat ${nodes.peer1.clan.core.vars.generators.new-service.files.not-a-secret.path}") diff --git a/clanModules/admin/README.md b/clanModules/admin/README.md new file mode 100644 index 000000000..c27c8d19b --- /dev/null +++ b/clanModules/admin/README.md @@ -0,0 +1,5 @@ +--- +description = "Convenient Administration for the Clan App" +categories = ["Utility"] +features = [ "inventory", "deprecated" ] +--- diff --git a/clanModules/admin/default.nix b/clanModules/admin/default.nix new file mode 100644 index 000000000..8fdf356eb --- /dev/null +++ b/clanModules/admin/default.nix @@ -0,0 +1,3 @@ +{ + imports = [ ./roles/default.nix ]; +} diff --git a/clanModules/admin/roles/default.nix b/clanModules/admin/roles/default.nix new file mode 100644 index 000000000..067d47c76 --- /dev/null +++ b/clanModules/admin/roles/default.nix @@ -0,0 +1,30 @@ +{ lib, config, ... }: +{ + + options.clan.admin = { + allowedKeys = lib.mkOption { + default = { }; + type = lib.types.attrsOf lib.types.str; + description = "The allowed public keys for ssh access to the admin user"; + example = { + "key_1" = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD..."; + }; + }; + }; + # Bad practice. + # Should we add 'clanModules' to specialArgs? + imports = [ + ../../sshd + ../../root-password + ]; + config = { + + warnings = [ + "The clan.admin module is deprecated and will be removed on 2025-07-15. + Please migrate to user-maintained configuration or the new equivalent clan services + (https://docs.clan.lol/reference/clanServices)." + ]; + + users.users.root.openssh.authorizedKeys.keys = builtins.attrValues config.clan.admin.allowedKeys; + }; +} diff --git a/clanModules/auto-upgrade/README.md b/clanModules/auto-upgrade/README.md new file mode 100644 index 000000000..238495cde --- /dev/null +++ b/clanModules/auto-upgrade/README.md @@ -0,0 +1,8 @@ +--- +description = "Set up automatic upgrades" +categories = ["System"] +features = [ "inventory", "deprecated" ] +--- + +Whether to periodically upgrade NixOS to the latest version. If enabled, a +systemd timer will run `nixos-rebuild switch --upgrade` once a day. diff --git a/clanModules/auto-upgrade/roles/default.nix b/clanModules/auto-upgrade/roles/default.nix new file mode 100644 index 000000000..898a358dc --- /dev/null +++ b/clanModules/auto-upgrade/roles/default.nix @@ -0,0 +1,32 @@ +{ + config, + lib, + ... +}: +let + cfg = config.clan.auto-upgrade; +in +{ + options.clan.auto-upgrade = { + flake = lib.mkOption { + type = lib.types.str; + description = "Flake reference"; + }; + }; + + config = { + + warnings = [ + "The clan.auto-upgrade module is deprecated and will be removed on 2025-07-15. + Please migrate to user-maintained configuration or the new equivalent clan services + (https://docs.clan.lol/reference/clanServices)." + ]; + + system.autoUpgrade = { + inherit (cfg) flake; + enable = true; + dates = "02:00"; + randomizedDelaySec = "45min"; + }; + }; +} diff --git a/clanModules/borgbackup-static/README.md b/clanModules/borgbackup-static/README.md new file mode 100644 index 000000000..a0dda99da --- /dev/null +++ b/clanModules/borgbackup-static/README.md @@ -0,0 +1,16 @@ +--- +description = "Statically configure borgbackup with sane defaults." +--- +!!! Danger "Deprecated" + Use [borgbackup](borgbackup.md) instead. + + Don't use borgbackup-static through [inventory](../../guides/inventory.md). + +This module implements the `borgbackup` backend and implements sane defaults +for backup management through `borgbackup` for members of the clan. + +Configure target machines where the backups should be sent to through `targets`. + +Configure machines that should be backuped either through `includeMachines` +which will exclusively add the included machines to be backuped, or through +`excludeMachines`, which will add every machine except the excluded machine to the backup. diff --git a/clanModules/borgbackup-static/default.nix b/clanModules/borgbackup-static/default.nix new file mode 100644 index 000000000..64dae529e --- /dev/null +++ b/clanModules/borgbackup-static/default.nix @@ -0,0 +1,104 @@ +{ lib, config, ... }: +let + dir = config.clan.core.settings.directory; + machineDir = dir + "/machines/"; +in +{ + imports = [ ../borgbackup ]; + + options.clan.borgbackup-static = { + excludeMachines = lib.mkOption { + type = lib.types.listOf lib.types.str; + example = lib.literalExpression "[ config.clan.core.settings.machine.name ]"; + default = [ ]; + description = '' + Machines that should not be backuped. + Mutually exclusive with includeMachines. + If this is not empty, every other machine except the targets in the clan will be backuped by this module. + If includeMachines is set, only the included machines will be backuped. + ''; + }; + includeMachines = lib.mkOption { + type = lib.types.listOf lib.types.str; + example = lib.literalExpression "[ config.clan.core.settings.machine.name ]"; + default = [ ]; + description = '' + Machines that should be backuped. + Mutually exclusive with excludeMachines. + ''; + }; + targets = lib.mkOption { + type = lib.types.listOf lib.types.str; + default = [ ]; + description = '' + Machines that should act as target machines for backups. + ''; + }; + }; + + config.services.borgbackup.repos = + let + machines = builtins.readDir machineDir; + borgbackupIpMachinePath = machines: machineDir + machines + "/facts/borgbackup.ssh.pub"; + filteredMachines = + if ((builtins.length config.clan.borgbackup-static.includeMachines) != 0) then + lib.filterAttrs (name: _: (lib.elem name config.clan.borgbackup-static.includeMachines)) machines + else + lib.filterAttrs (name: _: !(lib.elem name config.clan.borgbackup-static.excludeMachines)) machines; + machinesMaybeKey = lib.mapAttrsToList ( + machine: _: + let + fullPath = borgbackupIpMachinePath machine; + in + if builtins.pathExists fullPath then machine else null + ) filteredMachines; + machinesWithKey = lib.filter (x: x != null) machinesMaybeKey; + hosts = builtins.map (machine: { + name = machine; + value = { + path = "/var/lib/borgbackup/${machine}"; + authorizedKeys = [ (builtins.readFile (borgbackupIpMachinePath machine)) ]; + }; + }) machinesWithKey; + in + lib.mkIf + (builtins.any ( + target: target == config.clan.core.settings.machine.name + ) config.clan.borgbackup-static.targets) + (if (builtins.listToAttrs hosts) != null then builtins.listToAttrs hosts else { }); + + config.clan.borgbackup.destinations = + let + destinations = builtins.map (d: { + name = d; + value = { + repo = "borg@${d}:/var/lib/borgbackup/${config.clan.core.settings.machine.name}"; + }; + }) config.clan.borgbackup-static.targets; + in + lib.mkIf (builtins.any ( + target: target == config.clan.core.settings.machine.name + ) config.clan.borgbackup-static.includeMachines) (builtins.listToAttrs destinations); + + config.assertions = [ + { + assertion = + !( + ((builtins.length config.clan.borgbackup-static.excludeMachines) != 0) + && ((builtins.length config.clan.borgbackup-static.includeMachines) != 0) + ); + message = '' + The options: + config.clan.borgbackup-static.excludeMachines = [${builtins.toString config.clan.borgbackup-static.excludeMachines}] + and + config.clan.borgbackup-static.includeMachines = [${builtins.toString config.clan.borgbackup-static.includeMachines}] + are mutually exclusive. + Use excludeMachines to exclude certain machines and backup the other clan machines. + Use include machines to only backup certain machines. + ''; + } + ]; + config.warnings = lib.optional ( + builtins.length config.clan.borgbackup-static.targets > 0 + ) "The borgbackup-static module is deprecated use the service via the inventory interface instead."; +} diff --git a/clanModules/borgbackup/README.md b/clanModules/borgbackup/README.md new file mode 100644 index 000000000..a5d03d83e --- /dev/null +++ b/clanModules/borgbackup/README.md @@ -0,0 +1,14 @@ +--- +description = "Efficient, deduplicating backup program with optional compression and secure encryption." +categories = ["System"] +features = [ "inventory", "deprecated" ] +--- +BorgBackup (short: Borg) gives you: + +- Space efficient storage of backups. +- Secure, authenticated encryption. +- Compression: lz4, zstd, zlib, lzma or none. +- Mountable backups with FUSE. +- Easy installation on multiple platforms: Linux, macOS, BSD, … +- Free software (BSD license). +- Backed by a large and active open-source community. diff --git a/clanModules/borgbackup/default.nix b/clanModules/borgbackup/default.nix new file mode 100644 index 000000000..d4b634d00 --- /dev/null +++ b/clanModules/borgbackup/default.nix @@ -0,0 +1,6 @@ +# Dont import this file +# It is only here for backwards compatibility. +# Dont author new modules with this file. +{ + imports = [ ./roles/client.nix ]; +} diff --git a/clanModules/borgbackup/roles/server.nix b/clanModules/borgbackup/roles/server.nix new file mode 100644 index 000000000..c4f531b61 --- /dev/null +++ b/clanModules/borgbackup/roles/server.nix @@ -0,0 +1,63 @@ +{ config, lib, ... }: +let + dir = config.clan.core.settings.directory; + machineDir = dir + "/vars/per-machine/"; + machineName = config.clan.core.settings.machine.name; + + # Instances might be empty, if the module is not used via the inventory + # + # Type: { ${instanceName} :: { roles :: Roles } } + # Roles :: { ${role_name} :: { machines :: [string] } } + instances = config.clan.inventory.services.borgbackup or { }; + + allClients = lib.foldlAttrs ( + acc: _instanceName: instanceConfig: + acc + ++ ( + if (builtins.elem machineName instanceConfig.roles.server.machines) then + instanceConfig.roles.client.machines + else + [ ] + ) + ) [ ] instances; +in +{ + options = { + clan.borgbackup.directory = lib.mkOption { + type = lib.types.str; + default = "/var/lib/borgbackup"; + description = '' + The directory where the borgbackup repositories are stored. + ''; + }; + }; + config.services.borgbackup.repos = + let + borgbackupIpMachinePath = machine: machineDir + machine + "/borgbackup/borgbackup.ssh.pub/value"; + + machinesMaybeKey = builtins.map ( + machine: + let + fullPath = borgbackupIpMachinePath machine; + in + if builtins.pathExists fullPath then + machine + else + lib.warn '' + Machine ${machine} does not have a borgbackup key at ${fullPath}, + run `clan vars generate ${machine}` to generate it. + '' null + ) allClients; + + machinesWithKey = lib.filter (x: x != null) machinesMaybeKey; + + hosts = builtins.map (machine: { + name = machine; + value = { + path = "${config.clan.borgbackup.directory}/${machine}"; + authorizedKeys = [ (builtins.readFile (borgbackupIpMachinePath machine)) ]; + }; + }) machinesWithKey; + in + if (builtins.listToAttrs hosts) != [ ] then builtins.listToAttrs hosts else { }; +} diff --git a/clanModules/data-mesher/README.md b/clanModules/data-mesher/README.md new file mode 100644 index 000000000..172430861 --- /dev/null +++ b/clanModules/data-mesher/README.md @@ -0,0 +1,10 @@ +--- +description = "Set up data-mesher" +categories = ["System"] +features = [ "inventory" ] + +[constraints] +roles.admin.min = 1 +roles.admin.max = 1 +--- + diff --git a/clanModules/data-mesher/lib.nix b/clanModules/data-mesher/lib.nix new file mode 100644 index 000000000..80a284ab4 --- /dev/null +++ b/clanModules/data-mesher/lib.nix @@ -0,0 +1,19 @@ +lib: { + + machines = + config: + let + instanceNames = builtins.attrNames config.clan.inventory.services.data-mesher; + instanceName = builtins.head instanceNames; + dataMesherInstances = config.clan.inventory.services.data-mesher.${instanceName}; + + uniqueStrings = list: builtins.attrNames (builtins.groupBy lib.id list); + in + rec { + admins = dataMesherInstances.roles.admin.machines or [ ]; + signers = dataMesherInstances.roles.signer.machines or [ ]; + peers = dataMesherInstances.roles.peer.machines or [ ]; + bootstrap = uniqueStrings (admins ++ signers); + }; + +} diff --git a/clanModules/data-mesher/roles/admin.nix b/clanModules/data-mesher/roles/admin.nix new file mode 100644 index 000000000..caf80d6f2 --- /dev/null +++ b/clanModules/data-mesher/roles/admin.nix @@ -0,0 +1,58 @@ +{ lib, config, ... }: +let + cfg = config.clan.data-mesher; + + dmLib = import ../lib.nix lib; +in +{ + imports = [ + ../shared.nix + ]; + + options.clan.data-mesher = { + network = { + tld = lib.mkOption { + type = lib.types.str; + default = (config.networking.domain or "clan"); + description = "Top level domain to use for the network"; + }; + + hostTTL = lib.mkOption { + type = lib.types.str; + default = "672h"; # 28 days + example = "24h"; + description = "The TTL for hosts in the network, in the form of a Go time.Duration"; + }; + }; + }; + + config = { + + warnings = [ + "The clan.admin module is deprecated and will be removed on 2025-07-15. + Please migrate to user-maintained configuration or the new equivalent clan services + (https://docs.clan.lol/reference/clanServices)." + ]; + + services.data-mesher.initNetwork = + let + # for a given machine, read it's public key and remove any new lines + readHostKey = + machine: + let + path = "${config.clan.core.settings.directory}/vars/per-machine/${machine}/data-mesher-host-key/public_key/value"; + in + builtins.elemAt (lib.splitString "\n" (builtins.readFile path)) 1; + in + { + enable = true; + keyPath = config.clan.core.vars.generators.data-mesher-network-key.files.private_key.path; + + tld = cfg.network.tld; + hostTTL = cfg.network.hostTTL; + + # admin and signer host public keys + signingKeys = builtins.map readHostKey (dmLib.machines config).bootstrap; + }; + }; +} diff --git a/clanModules/data-mesher/roles/peer.nix b/clanModules/data-mesher/roles/peer.nix new file mode 100644 index 000000000..a56780406 --- /dev/null +++ b/clanModules/data-mesher/roles/peer.nix @@ -0,0 +1,5 @@ +{ + imports = [ + ../shared.nix + ]; +} diff --git a/clanModules/data-mesher/roles/signer.nix b/clanModules/data-mesher/roles/signer.nix new file mode 100644 index 000000000..a56780406 --- /dev/null +++ b/clanModules/data-mesher/roles/signer.nix @@ -0,0 +1,5 @@ +{ + imports = [ + ../shared.nix + ]; +} diff --git a/clanModules/data-mesher/shared.nix b/clanModules/data-mesher/shared.nix new file mode 100644 index 000000000..7f14d876f --- /dev/null +++ b/clanModules/data-mesher/shared.nix @@ -0,0 +1,152 @@ +{ + config, + lib, + ... +}: +let + cfg = config.clan.data-mesher; + dmLib = import ./lib.nix lib; + + # the default bootstrap nodes are any machines with the admin or signers role + # we iterate through those machines, determining an IP address for them based on their VPN + # currently only supports zerotier + defaultBootstrapNodes = builtins.foldl' ( + urls: name: + let + + ipPath = "${config.clan.core.settings.directory}/vars/per-machine/${name}/zerotier/zerotier-ip/value"; + + in + if builtins.pathExists ipPath then + let + ip = builtins.readFile ipPath; + in + urls ++ [ "[${ip}]:${builtins.toString cfg.network.port}" ] + else + urls + ) [ ] (dmLib.machines config).bootstrap; +in +{ + options.clan.data-mesher = { + + bootstrapNodes = lib.mkOption { + type = lib.types.nullOr (lib.types.listOf lib.types.str); + default = null; + description = '' + A list of bootstrap nodes that act as an initial gateway when joining + the cluster. + ''; + example = [ + "192.168.1.1:7946" + "192.168.1.2:7946" + ]; + }; + + network = { + + interface = lib.mkOption { + type = lib.types.str; + description = '' + The interface over which cluster communication should be performed. + All the ip addresses associate with this interface will be part of + our host claim, including both ipv4 and ipv6. + + This should be set to an internal/VPN interface. + ''; + example = "tailscale0"; + }; + + port = lib.mkOption { + type = lib.types.port; + default = 7946; + description = '' + Port to listen on for cluster communication. + ''; + }; + }; + }; + + config = { + + services.data-mesher = { + enable = true; + openFirewall = true; + + settings = { + log_level = "warn"; + state_dir = "/var/lib/data-mesher"; + + # read network id from vars + network.id = config.clan.core.vars.generators.data-mesher-network-key.files.public_key.value; + + host = { + names = [ config.networking.hostName ]; + key_path = config.clan.core.vars.generators.data-mesher-host-key.files.private_key.path; + }; + + cluster = { + port = cfg.network.port; + join_interval = "30s"; + push_pull_interval = "30s"; + + interface = cfg.network.interface; + + bootstrap_nodes = if cfg.bootstrapNodes == null then defaultBootstrapNodes else cfg.bootstrapNodes; + }; + + http.port = 7331; + http.interface = "lo"; + }; + }; + + # Generate host key. + clan.core.vars.generators.data-mesher-host-key = { + files = + let + owner = config.users.users.data-mesher.name; + in + { + private_key = { + inherit owner; + }; + public_key.secret = false; + }; + + runtimeInputs = [ + config.services.data-mesher.package + ]; + + script = '' + data-mesher generate keypair \ + --public-key-path "$out"/public_key \ + --private-key-path "$out"/private_key + ''; + }; + + clan.core.vars.generators.data-mesher-network-key = { + # generated once per clan + share = true; + + files = + let + owner = config.users.users.data-mesher.name; + in + { + private_key = { + inherit owner; + }; + public_key.secret = false; + }; + + runtimeInputs = [ + config.services.data-mesher.package + ]; + + script = '' + data-mesher generate keypair \ + --public-key-path "$out"/public_key \ + --private-key-path "$out"/private_key + ''; + }; + }; +} diff --git a/clanModules/deltachat/README.md b/clanModules/deltachat/README.md new file mode 100644 index 000000000..e3822c310 --- /dev/null +++ b/clanModules/deltachat/README.md @@ -0,0 +1,17 @@ +--- +description = "Email-based instant messaging for Desktop." +categories = ["Social"] +features = [ "inventory", "deprecated" ] +--- + +!!! info + This module will automatically configure an email server on the machine for handling the e-mail messaging seamlessly. + +## Features + +- [x] **Email-based**: Uses any email account as its backend. +- [x] **End-to-End Encryption**: Supports Autocrypt to automatically encrypt messages. +- [x] **No Phone Number Required**: Uses your email address instead of a phone number. +- [x] **Cross-Platform**: Available on desktop and mobile platforms. +- [x] **Automatic Server Setup**: Includes your own DeltaChat server for enhanced control and privacy. +- [ ] **Bake a cake**: This module cannot cake a bake. diff --git a/clanModules/deltachat/default.nix b/clanModules/deltachat/default.nix new file mode 100644 index 000000000..8fdf356eb --- /dev/null +++ b/clanModules/deltachat/default.nix @@ -0,0 +1,3 @@ +{ + imports = [ ./roles/default.nix ]; +} diff --git a/clanModules/deltachat/roles/default.nix b/clanModules/deltachat/roles/default.nix new file mode 100644 index 000000000..b48d60a8c --- /dev/null +++ b/clanModules/deltachat/roles/default.nix @@ -0,0 +1,153 @@ +{ + config, + pkgs, + ... +}: +{ + warnings = [ + "The clan.deltachat module is deprecated and will be removed on 2025-07-15. + Please migrate to user-maintained configuration or the new equivalent clan services + (https://docs.clan.lol/reference/clanServices)." + ]; + + networking.firewall.interfaces."zt+".allowedTCPPorts = [ 25 ]; # smtp with other hosts + environment.systemPackages = [ pkgs.deltachat-desktop ]; + + services.maddy = + let + domain = "${config.clan.core.settings.machine.name}.local"; + in + { + enable = true; + primaryDomain = domain; + config = '' + # Minimal configuration with TLS disabled, adapted from upstream example + # configuration here https://github.com/foxcpp/maddy/blob/master/maddy.conf + # Do not use this in unencrypted networks! + + auth.pass_table local_authdb { + table sql_table { + driver sqlite3 + dsn credentials.db + table_name passwords + } + } + + storage.imapsql local_mailboxes { + driver sqlite3 + dsn imapsql.db + } + + table.chain local_rewrites { + optional_step regexp "(.+)\+(.+)@(.+)" "$1@$3" + optional_step static { + entry postmaster postmaster@$(primary_domain) + } + optional_step file /etc/maddy/aliases + } + + msgpipeline local_routing { + destination postmaster $(local_domains) { + modify { + replace_rcpt &local_rewrites + } + deliver_to &local_mailboxes + } + default_destination { + reject 550 5.1.1 "User doesn't exist" + } + } + + smtp tcp://[::]:25 { + limits { + all rate 20 1s + all concurrency 10 + } + dmarc yes + check { + require_mx_record + dkim + spf + } + source $(local_domains) { + reject 501 5.1.8 "Use Submission for outgoing SMTP" + } + default_source { + destination postmaster $(local_domains) { + deliver_to &local_routing + } + default_destination { + reject 550 5.1.1 "User doesn't exist" + } + } + } + + submission tcp://[::1]:587 { + limits { + all rate 50 1s + } + auth &local_authdb + source $(local_domains) { + check { + authorize_sender { + prepare_email &local_rewrites + user_to_email identity + } + } + destination postmaster $(local_domains) { + deliver_to &local_routing + } + default_destination { + modify { + dkim $(primary_domain) $(local_domains) default + } + deliver_to &remote_queue + } + } + default_source { + reject 501 5.1.8 "Non-local sender domain" + } + } + + target.remote outbound_delivery { + limits { + destination rate 20 1s + destination concurrency 10 + } + mx_auth { + dane + mtasts { + cache fs + fs_dir mtasts_cache/ + } + local_policy { + min_tls_level encrypted + min_mx_level none + } + } + } + + target.queue remote_queue { + target &outbound_delivery + autogenerated_msg_domain $(primary_domain) + bounce { + destination postmaster $(local_domains) { + deliver_to &local_routing + } + default_destination { + reject 550 5.0.0 "Refusing to send DSNs to non-local addresses" + } + } + } + + imap tcp://[::1]:143 { + auth &local_authdb + storage &local_mailboxes + } + ''; + ensureAccounts = [ "user@${domain}" ]; + ensureCredentials = { + "user@${domain}".passwordFile = pkgs.writeText "dummy" "foobar"; + }; + }; +} diff --git a/clanModules/disk-id/README.md b/clanModules/disk-id/README.md new file mode 100644 index 000000000..aa83e976b --- /dev/null +++ b/clanModules/disk-id/README.md @@ -0,0 +1,5 @@ +--- +description = "Generates a uuid for use in disk device naming" +features = [ "inventory" ] +categories = [ "System" ] +--- diff --git a/clanModules/disk-id/default.nix b/clanModules/disk-id/default.nix new file mode 100644 index 000000000..ed6af3368 --- /dev/null +++ b/clanModules/disk-id/default.nix @@ -0,0 +1,6 @@ +# Dont import this file +# It is only here for backwards compatibility. +# Dont author new modules with this file. +{ + imports = [ ./roles/default.nix ]; +} diff --git a/clanModules/disk-id/roles/uuid4.sh b/clanModules/disk-id/roles/uuid4.sh new file mode 100644 index 000000000..ca3a7c47e --- /dev/null +++ b/clanModules/disk-id/roles/uuid4.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +# Read 16 bytes from /dev/urandom +uuid=$(dd if=/dev/urandom bs=1 count=16 2>/dev/null | od -An -tx1 | tr -d ' \n') + +# Break the UUID into pieces and apply the required modifications +byte6=${uuid:12:2} +byte8=${uuid:16:2} + +# Construct the correct version and variant +hex_byte6=$(printf "%x" $((0x$byte6 & 0x0F | 0x40))) +hex_byte8=$(printf "%x" $((0x$byte8 & 0x3F | 0x80))) + +# Rebuild the UUID with the correct fields +uuid_v4="${uuid:0:12}${hex_byte6}${uuid:14:2}${hex_byte8}${uuid:18:14}" + +# Format the UUID correctly 8-4-4-4-12 +uuid_formatted="${uuid_v4:0:8}-${uuid_v4:8:4}-${uuid_v4:12:4}-${uuid_v4:16:4}-${uuid_v4:20:12}" + +echo -n "$uuid_formatted" \ No newline at end of file diff --git a/clanModules/dyndns/README.md b/clanModules/dyndns/README.md new file mode 100644 index 000000000..603ad553d --- /dev/null +++ b/clanModules/dyndns/README.md @@ -0,0 +1,6 @@ +--- +description = "A dynamic DNS service to update domain IPs" +--- + +To understand the possible options that can be set visit the documentation of [ddns-updater](https://github.com/qdm12/ddns-updater?tab=readme-ov-file#versioned-documentation) + diff --git a/clanModules/dyndns/default.nix b/clanModules/dyndns/default.nix new file mode 100644 index 000000000..aea0f70cd --- /dev/null +++ b/clanModules/dyndns/default.nix @@ -0,0 +1,257 @@ +{ + config, + pkgs, + lib, + ... +}: + +let + name = "dyndns"; + cfg = config.clan.${name}; + + # We dedup secrets if they have the same provider + base domain + secret_id = opt: "${name}-${opt.provider}-${opt.domain}"; + secret_path = + opt: config.clan.core.vars.generators."${secret_id opt}".files."${secret_id opt}".path; + + # We check that a secret has not been set in extraSettings. + extraSettingsSafe = + opt: + if (builtins.hasAttr opt.secret_field_name opt.extraSettings) then + throw "Please do not set ${opt.secret_field_name} in extraSettings, it is automatically set by the dyndns module." + else + opt.extraSettings; + /* + We go from: + {home.example.com:{value:{domain:example.com,host:home, provider:namecheap}}} + To: + {settings: [{domain: example.com, host: home, provider: namecheap, password: dyndns-namecheap-example.com}]} + */ + service_config = { + settings = builtins.catAttrs "value" ( + builtins.attrValues ( + lib.mapAttrs (_: opt: { + value = + (extraSettingsSafe opt) + // { + domain = opt.domain; + provider = opt.provider; + } + // { + "${opt.secret_field_name}" = secret_id opt; + }; + }) cfg.settings + ) + ); + }; + + secret_generator = _: opt: { + name = secret_id opt; + value = { + share = true; + migrateFact = "${secret_id opt}"; + prompts.${secret_id opt} = { + type = "hidden"; + persist = true; + }; + }; + }; +in +{ + options.clan.${name} = { + server = { + enable = lib.mkEnableOption "dyndns webserver"; + domain = lib.mkOption { + type = lib.types.str; + description = "Domain to serve the webservice on"; + }; + port = lib.mkOption { + type = lib.types.int; + default = 54805; + description = "Port to listen on"; + }; + }; + + period = lib.mkOption { + type = lib.types.int; + default = 5; + description = "Domain update period in minutes"; + }; + + settings = lib.mkOption { + type = lib.types.attrsOf ( + lib.types.submodule ( + { ... }: + { + options = { + provider = lib.mkOption { + example = "namecheap"; + type = lib.types.str; + description = "The dyndns provider to use"; + }; + domain = lib.mkOption { + type = lib.types.str; + example = "example.com"; + description = "The top level domain to update."; + }; + secret_field_name = lib.mkOption { + example = [ + "password" + "api_key" + ]; + type = lib.types.enum [ + "password" + "token" + "api_key" + "secret_api_key" + ]; + default = "password"; + description = "The field name for the secret"; + }; + # TODO: Ideally we would create a gigantic list of all possible settings / types + # optimally we would have a way to generate the options from the source code + extraSettings = lib.mkOption { + type = lib.types.attrsOf lib.types.str; + default = { }; + description = '' + Extra settings for the provider. + Provider specific settings: https://github.com/qdm12/ddns-updater#configuration + ''; + }; + }; + } + ) + ); + default = [ ]; + description = "Configuration for which domains to update"; + }; + }; + + imports = [ + ../nginx + ]; + + config = lib.mkMerge [ + (lib.mkIf (cfg.settings != { }) { + clan.core.vars.generators = lib.mapAttrs' secret_generator cfg.settings; + + users.groups.${name} = { }; + users.users.${name} = { + group = name; + isSystemUser = true; + description = "User for ${name} service"; + home = "/var/lib/${name}"; + createHome = true; + }; + + services.nginx = lib.mkIf cfg.server.enable { + enable = true; + virtualHosts = { + "${cfg.server.domain}" = { + forceSSL = true; + enableACME = true; + locations."/" = { + proxyPass = "http://localhost:${toString cfg.server.port}"; + }; + }; + }; + }; + + systemd.services.${name} = { + path = [ ]; + description = "Dynamic DNS updater"; + after = [ "network.target" ]; + wantedBy = [ "multi-user.target" ]; + environment = { + MYCONFIG = "${builtins.toJSON service_config}"; + SERVER_ENABLED = if cfg.server.enable then "yes" else "no"; + PERIOD = "${toString cfg.period}m"; + LISTENING_ADDRESS = ":${toString cfg.server.port}"; + }; + + serviceConfig = + let + pyscript = + pkgs.writers.writePython3Bin "generate_secret_config.py" + { + libraries = [ ]; + doCheck = false; + } + '' + import json + from pathlib import Path + import os + + cred_dir = Path(os.getenv("CREDENTIALS_DIRECTORY")) + config_str = os.getenv("MYCONFIG") + + + def get_credential(name): + secret_p = cred_dir / name + with open(secret_p, 'r') as f: + return f.read().strip() + + + config = json.loads(config_str) + print(f"Config: {config}") + for attrset in config["settings"]: + if "password" in attrset: + attrset['password'] = get_credential(attrset['password']) + elif "token" in attrset: + attrset['token'] = get_credential(attrset['token']) + elif "secret_api_key" in attrset: + attrset['secret_api_key'] = get_credential(attrset['secret_api_key']) + elif "api_key" in attrset: + attrset['api_key'] = get_credential(attrset['api_key']) + else: + raise ValueError(f"Missing secret field in {attrset}") + + # create directory data if it does not exist + data_dir = Path('data') + data_dir.mkdir(mode=0o770, exist_ok=True) + + # Create a temporary config file + # with appropriate permissions + tmp_config_path = data_dir / '.config.json' + tmp_config_path.touch(mode=0o660, exist_ok=False) + + # Write the config with secrets back + with open(tmp_config_path, 'w') as f: + f.write(json.dumps(config, indent=4)) + + # Move config into place + config_path = data_dir / 'config.json' + tmp_config_path.rename(config_path) + + # Set file permissions to read + # and write only by the user and group + for file in data_dir.iterdir(): + file.chmod(0o660) + ''; + in + { + ExecStartPre = lib.getExe pyscript; + ExecStart = lib.getExe pkgs.ddns-updater; + LoadCredential = lib.mapAttrsToList (_: opt: "${secret_id opt}:${secret_path opt}") cfg.settings; + User = name; + Group = name; + NoNewPrivileges = true; + PrivateTmp = true; + ProtectSystem = "strict"; + ReadOnlyPaths = "/"; + PrivateDevices = "yes"; + ProtectKernelModules = "yes"; + ProtectKernelTunables = "yes"; + WorkingDirectory = "/var/lib/${name}"; + ReadWritePaths = [ + "/proc/self" + "/var/lib/${name}" + ]; + + Restart = "always"; + RestartSec = 60; + }; + }; + }) + ]; +} diff --git a/clanModules/ergochat/README.md b/clanModules/ergochat/README.md new file mode 100644 index 000000000..b04f61a47 --- /dev/null +++ b/clanModules/ergochat/README.md @@ -0,0 +1,5 @@ +--- +description = "A modern IRC server" +categories = ["Social"] +features = [ "inventory", "deprecated" ] +--- diff --git a/clanModules/ergochat/default.nix b/clanModules/ergochat/default.nix new file mode 100644 index 000000000..8fdf356eb --- /dev/null +++ b/clanModules/ergochat/default.nix @@ -0,0 +1,3 @@ +{ + imports = [ ./roles/default.nix ]; +} diff --git a/clanModules/ergochat/roles/default.nix b/clanModules/ergochat/roles/default.nix new file mode 100644 index 000000000..2aa240359 --- /dev/null +++ b/clanModules/ergochat/roles/default.nix @@ -0,0 +1,21 @@ +_: { + + warnings = [ + "The clan.ergochat module is deprecated and will be removed on 2025-07-15. + Please migrate to user-maintained configuration or the new equivalent clan services + (https://docs.clan.lol/reference/clanServices)." + ]; + + services.ergochat = { + enable = true; + + settings = { + datastore = { + autoupgrade = true; + path = "/var/lib/ergo/ircd.db"; + }; + }; + }; + + clan.core.state.ergochat.folders = [ "/var/lib/ergo" ]; +} diff --git a/clanModules/flake-module.nix b/clanModules/flake-module.nix index d37aa27c8..54bcfc4d1 100644 --- a/clanModules/flake-module.nix +++ b/clanModules/flake-module.nix @@ -1,23 +1,51 @@ -{ ... }: - +{ lib, ... }: let - error = builtins.throw '' - clanModules have been removed! - - Refer to https://docs.clan.lol/guides/migrations/migrate-inventory-services for migration. - ''; + inherit (lib) + filterAttrs + pathExists + ; in - { - flake.clanModules = { - outPath = "removed-clan-modules"; - value = error; + # only import available files, as this allows to filter the files for tests. + flake.clanModules = filterAttrs (_name: pathExists) { + auto-upgrade = ./auto-upgrade; + admin = ./admin; + borgbackup = ./borgbackup; + borgbackup-static = ./borgbackup-static; + deltachat = ./deltachat; + data-mesher = ./data-mesher; + disk-id = ./disk-id; + dyndns = ./dyndns; + ergochat = ./ergochat; + garage = ./garage; + heisenbridge = ./heisenbridge; + importer = ./importer; + iwd = ./iwd; + localbackup = ./localbackup; + localsend = ./localsend; + matrix-synapse = ./matrix-synapse; + moonlight = ./moonlight; + mumble = ./mumble; + mycelium = ./mycelium; + nginx = ./nginx; + packages = ./packages; + postgresql = ./postgresql; + root-password = ./root-password; + single-disk = ./single-disk; + sshd = ./sshd; + state-version = ./state-version; + static-hosts = ./static-hosts; + sunshine = ./sunshine; + syncthing = ./syncthing; + syncthing-static-peers = ./syncthing-static-peers; + thelounge = ./thelounge; + trusted-nix-caches = ./trusted-nix-caches; + user-password = ./user-password; + vaultwarden = ./vaultwarden; + wifi = ./wifi; + xfce = ./xfce; + zerotier = ./zerotier; + zerotier-static-peers = ./zerotier-static-peers; + zt-tcp-relay = ./zt-tcp-relay; }; - - # builtins.listToAttrs ( - # map (name: { - # inherit name; - # value = error; - # }) modnames - # ); } diff --git a/clanModules/garage/README.md b/clanModules/garage/README.md new file mode 100644 index 000000000..0d7646ed3 --- /dev/null +++ b/clanModules/garage/README.md @@ -0,0 +1,11 @@ +--- +description = "S3-compatible object store for small self-hosted geo-distributed deployments" +categories = ["System"] +features = [ "inventory", "deprecated" ] +--- + +This module generates garage specific keys automatically. +Also shares the `rpc_secret` between instances. + +Options: [NixosModuleOptions](https://search.nixos.org/options?channel=unstable&size=50&sort=relevance&type=packages&query=garage) +Documentation: https://garagehq.deuxfleurs.fr/ diff --git a/clanModules/garage/default.nix b/clanModules/garage/default.nix new file mode 100644 index 000000000..8fdf356eb --- /dev/null +++ b/clanModules/garage/default.nix @@ -0,0 +1,3 @@ +{ + imports = [ ./roles/default.nix ]; +} diff --git a/clanModules/garage/roles/default.nix b/clanModules/garage/roles/default.nix new file mode 100644 index 000000000..0cb3a37dc --- /dev/null +++ b/clanModules/garage/roles/default.nix @@ -0,0 +1,50 @@ +{ config, pkgs, ... }: +{ + + warnings = [ + "The clan.ergochat module is deprecated and will be removed on 2025-07-15. + Please migrate to user-maintained configuration or the new equivalent clan services + (https://docs.clan.lol/reference/clanServices)." + ]; + + systemd.services.garage.serviceConfig = { + LoadCredential = [ + "rpc_secret_path:${config.clan.core.vars.generators.garage-shared.files.rpc_secret.path}" + "admin_token_path:${config.clan.core.vars.generators.garage.files.admin_token.path}" + "metrics_token_path:${config.clan.core.vars.generators.garage.files.metrics_token.path}" + ]; + Environment = [ + "GARAGE_ALLOW_WORLD_READABLE_SECRETS=true" + "GARAGE_RPC_SECRET_FILE=%d/rpc_secret_path" + "GARAGE_ADMIN_TOKEN_FILE=%d/admin_token_path" + "GARAGE_METRICS_TOKEN_FILE=%d/metrics_token_path" + ]; + }; + + clan.core.vars.generators.garage = { + files.admin_token = { }; + files.metrics_token = { }; + runtimeInputs = [ + pkgs.coreutils + pkgs.openssl + ]; + script = '' + openssl rand -base64 -out "$out"/admin_token 32 + openssl rand -base64 -out "$out"/metrics_token 32 + ''; + }; + + clan.core.vars.generators.garage-shared = { + share = true; + files.rpc_secret = { }; + runtimeInputs = [ + pkgs.coreutils + pkgs.openssl + ]; + script = '' + openssl rand -hex -out "$out"/rpc_secret 32 + ''; + }; + + clan.core.state.garage.folders = [ config.services.garage.settings.metadata_dir ]; +} diff --git a/clanModules/heisenbridge/README.md b/clanModules/heisenbridge/README.md new file mode 100644 index 000000000..ffdfa6ca9 --- /dev/null +++ b/clanModules/heisenbridge/README.md @@ -0,0 +1,5 @@ +--- +description = "A matrix bridge to communicate with IRC" +categories = ["Social"] +features = [ "inventory", "deprecated" ] +--- diff --git a/clanModules/heisenbridge/default.nix b/clanModules/heisenbridge/default.nix new file mode 100644 index 000000000..8fdf356eb --- /dev/null +++ b/clanModules/heisenbridge/default.nix @@ -0,0 +1,3 @@ +{ + imports = [ ./roles/default.nix ]; +} diff --git a/clanModules/heisenbridge/roles/default.nix b/clanModules/heisenbridge/roles/default.nix new file mode 100644 index 000000000..ffa59c056 --- /dev/null +++ b/clanModules/heisenbridge/roles/default.nix @@ -0,0 +1,27 @@ +{ + lib, + ... +}: +{ + imports = [ + (lib.mkRemovedOptionModule [ + "clan" + "heisenbridge" + "enable" + ] "Importing the module will already enable the service.") + ]; + config = { + warnings = [ + "The clan.heisenbridge module is deprecated and will be removed on 2025-07-15. + Please migrate to user-maintained configuration or the new equivalent clan services + (https://docs.clan.lol/reference/clanServices)." + ]; + services.heisenbridge = { + enable = true; + homeserver = "http://localhost:8008"; # TODO: Sync with matrix-synapse + }; + services.matrix-synapse.settings.app_service_config_files = [ + "/var/lib/heisenbridge/registration.yml" + ]; + }; +} diff --git a/clanModules/importer/roles/default.nix b/clanModules/importer/roles/default.nix new file mode 100644 index 000000000..ffcd4415b --- /dev/null +++ b/clanModules/importer/roles/default.nix @@ -0,0 +1 @@ +{ } diff --git a/clanModules/iwd/README.md b/clanModules/iwd/README.md new file mode 100644 index 000000000..40855c5a4 --- /dev/null +++ b/clanModules/iwd/README.md @@ -0,0 +1,9 @@ +--- +description = "Automatically provisions wifi credentials" +features = [ "inventory", "deprecated" ] +categories = [ "Network" ] +--- + +!!! Warning + If you've been using network manager + wpa_supplicant and now are switching to IWD read this migration guide: + https://archive.kernel.org/oldwiki/iwd.wiki.kernel.org/networkmanager.html#converting_network_profiles diff --git a/clanModules/iwd/default.nix b/clanModules/iwd/default.nix new file mode 100644 index 000000000..ed6af3368 --- /dev/null +++ b/clanModules/iwd/default.nix @@ -0,0 +1,6 @@ +# Dont import this file +# It is only here for backwards compatibility. +# Dont author new modules with this file. +{ + imports = [ ./roles/default.nix ]; +} diff --git a/clanModules/iwd/roles/default.nix b/clanModules/iwd/roles/default.nix new file mode 100644 index 000000000..ac836f384 --- /dev/null +++ b/clanModules/iwd/roles/default.nix @@ -0,0 +1,106 @@ +{ + lib, + config, + pkgs, + ... +}: + +let + cfg = config.clan.iwd; + secret_path = ssid: config.clan.core.vars.generators."iwd.${ssid}".files."iwd.${ssid}".path; + secret_generator = name: value: { + name = "iwd.${value.ssid}"; + value = + let + secret_name = "iwd.${value.ssid}"; + in + { + prompts.${secret_name} = { + description = "Wifi password for '${value.ssid}'"; + persist = true; + }; + migrateFact = secret_name; + # ref. man iwd.network + script = '' + config=" + [Settings] + AutoConnect=${if value.AutoConnect then "true" else "false"} + [Security] + Passphrase=$(echo -e "$prompt_value/${secret_name}" | ${lib.getExe pkgs.gnused} "s=\\\=\\\\\\\=g;s=\t=\\\t=g;s=\r=\\\r=g;s=^ =\\\s=") + " + echo "$config" > "$out/${secret_name}" + ''; + }; + }; +in +{ + options.clan.iwd = { + networks = lib.mkOption { + type = lib.types.attrsOf ( + lib.types.submodule ( + { name, ... }: + { + options = { + ssid = lib.mkOption { + type = lib.types.str; + default = name; + description = "The name of the wifi network"; + }; + AutoConnect = lib.mkOption { + type = lib.types.bool; + default = true; + description = "Automatically try to join this wifi network"; + }; + }; + } + ) + ); + default = { }; + description = "Wifi networks to predefine"; + }; + }; + + imports = [ + (lib.mkRemovedOptionModule [ + "clan" + "iwd" + "enable" + ] "Just define clan.iwd.networks to enable it") + ]; + + config = lib.mkMerge [ + (lib.mkIf (cfg.networks != { }) { + # Systemd tmpfiles rule to create /var/lib/iwd/example.psk file + systemd.tmpfiles.rules = lib.mapAttrsToList ( + _: value: "C /var/lib/iwd/${value.ssid}.psk 0600 root root - ${secret_path value.ssid}" + ) cfg.networks; + + clan.core.vars.generators = lib.mapAttrs' secret_generator cfg.networks; + + # TODO: restart the iwd.service if something changes + }) + { + warnings = [ + "The clan.iwd module is deprecated and will be removed on 2025-07-15. Please migrate to a user-maintained configuration or use the wifi service." + ]; + + # disable wpa supplicant + networking.wireless.enable = false; + + # Set the network manager backend to iwd + networking.networkmanager.wifi.backend = "iwd"; + + # Use iwd instead of wpa_supplicant. It has a user friendly CLI + networking.wireless.iwd = { + enable = true; + settings = { + Network = { + EnableIPv6 = true; + RoutePriorityOffset = 300; + }; + Settings.AutoConnect = true; + }; + }; + } + ]; +} diff --git a/clanModules/localbackup/README.md b/clanModules/localbackup/README.md new file mode 100644 index 000000000..1beeae757 --- /dev/null +++ b/clanModules/localbackup/README.md @@ -0,0 +1,3 @@ +--- +description = "Automatically backups current machine to local directory." +--- diff --git a/clanModules/localbackup/default.nix b/clanModules/localbackup/default.nix new file mode 100644 index 000000000..0adc46fde --- /dev/null +++ b/clanModules/localbackup/default.nix @@ -0,0 +1,242 @@ +{ + config, + lib, + pkgs, + ... +}: +let + cfg = config.clan.localbackup; + uniqueFolders = lib.unique ( + lib.flatten (lib.mapAttrsToList (_name: state: state.folders) config.clan.core.state) + ); + rsnapshotConfig = target: '' + config_version 1.2 + snapshot_root ${target.directory} + sync_first 1 + cmd_cp ${pkgs.coreutils}/bin/cp + cmd_rm ${pkgs.coreutils}/bin/rm + cmd_rsync ${pkgs.rsync}/bin/rsync + cmd_ssh ${pkgs.openssh}/bin/ssh + cmd_logger ${pkgs.inetutils}/bin/logger + cmd_du ${pkgs.coreutils}/bin/du + cmd_rsnapshot_diff ${pkgs.rsnapshot}/bin/rsnapshot-diff + + ${lib.optionalString (target.postBackupHook != null) '' + cmd_postexec ${pkgs.writeShellScript "postexec.sh" '' + set -efu -o pipefail + ${target.postBackupHook} + ''} + ''} + retain snapshot ${builtins.toString config.clan.localbackup.snapshots} + ${lib.concatMapStringsSep "\n" (folder: '' + backup ${folder} ${config.networking.hostName}/ + '') uniqueFolders} + ''; +in +{ + options.clan.localbackup = { + targets = lib.mkOption { + type = lib.types.attrsOf ( + lib.types.submodule ( + { name, ... }: + { + options = { + name = lib.mkOption { + type = lib.types.strMatching "^[a-zA-Z0-9._-]+$"; + default = name; + description = "the name of the backup job"; + }; + directory = lib.mkOption { + type = lib.types.str; + description = "the directory to backup"; + }; + mountpoint = lib.mkOption { + type = lib.types.nullOr lib.types.str; + default = null; + description = "mountpoint of the directory to backup. If set, the directory will be mounted before the backup and unmounted afterwards"; + }; + preMountHook = lib.mkOption { + type = lib.types.nullOr lib.types.lines; + default = null; + description = "Shell commands to run before the directory is mounted"; + }; + postMountHook = lib.mkOption { + type = lib.types.nullOr lib.types.lines; + default = null; + description = "Shell commands to run after the directory is mounted"; + }; + preUnmountHook = lib.mkOption { + type = lib.types.nullOr lib.types.lines; + default = null; + description = "Shell commands to run before the directory is unmounted"; + }; + postUnmountHook = lib.mkOption { + type = lib.types.nullOr lib.types.lines; + default = null; + description = "Shell commands to run after the directory is unmounted"; + }; + preBackupHook = lib.mkOption { + type = lib.types.nullOr lib.types.lines; + default = null; + description = "Shell commands to run before the backup"; + }; + postBackupHook = lib.mkOption { + type = lib.types.nullOr lib.types.lines; + default = null; + description = "Shell commands to run after the backup"; + }; + }; + } + ) + ); + default = { }; + description = "List of directories where backups are stored"; + }; + + snapshots = lib.mkOption { + type = lib.types.int; + default = 20; + description = "Number of snapshots to keep"; + }; + }; + + config = + let + mountHook = target: '' + if [[ -x /run/current-system/sw/bin/localbackup-mount-${target.name} ]]; then + /run/current-system/sw/bin/localbackup-mount-${target.name} + fi + if [[ -x /run/current-system/sw/bin/localbackup-unmount-${target.name} ]]; then + trap "/run/current-system/sw/bin/localbackup-unmount-${target.name}" EXIT + fi + ''; + in + lib.mkIf (cfg.targets != { }) { + environment.systemPackages = + [ + (pkgs.writeShellScriptBin "localbackup-create" '' + set -efu -o pipefail + export PATH=${ + lib.makeBinPath [ + pkgs.rsnapshot + pkgs.coreutils + pkgs.util-linux + ] + } + ${lib.concatMapStringsSep "\n" (target: '' + ${mountHook target} + echo "Creating backup '${target.name}'" + + ${lib.optionalString (target.preBackupHook != null) '' + ( + ${target.preBackupHook} + ) + ''} + + declare -A preCommandErrors + ${lib.concatMapStringsSep "\n" ( + state: + lib.optionalString (state.preBackupCommand != null) '' + echo "Running pre-backup command for ${state.name}" + if ! /run/current-system/sw/bin/${state.preBackupCommand}; then + preCommandErrors["${state.name}"]=1 + fi + '' + ) (builtins.attrValues config.clan.core.state)} + + rsnapshot -c "${pkgs.writeText "rsnapshot.conf" (rsnapshotConfig target)}" sync + rsnapshot -c "${pkgs.writeText "rsnapshot.conf" (rsnapshotConfig target)}" snapshot + '') (builtins.attrValues cfg.targets)}'') + (pkgs.writeShellScriptBin "localbackup-list" '' + set -efu -o pipefail + export PATH=${ + lib.makeBinPath [ + pkgs.jq + pkgs.findutils + pkgs.coreutils + pkgs.util-linux + ] + } + (${ + lib.concatMapStringsSep "\n" (target: '' + ( + ${mountHook target} + find ${lib.escapeShellArg target.directory} -mindepth 1 -maxdepth 1 -name "snapshot.*" -print0 -type d \ + | jq -Rs 'split("\u0000") | .[] | select(. != "") | { "name": ("${target.name}::" + .)}' + ) + '') (builtins.attrValues cfg.targets) + }) | jq -s . + '') + (pkgs.writeShellScriptBin "localbackup-restore" '' + set -efu -o pipefail + export PATH=${ + lib.makeBinPath [ + pkgs.rsync + pkgs.coreutils + pkgs.util-linux + pkgs.gawk + ] + } + if [[ "''${NAME:-}" == "" ]]; then + echo "No backup name given via NAME environment variable" + exit 1 + fi + if [[ "''${FOLDERS:-}" == "" ]]; then + echo "No folders given via FOLDERS environment variable" + exit 1 + fi + name=$(awk -F'::' '{print $1}' <<< $NAME) + backupname=''${NAME#$name::} + + if command -v localbackup-mount-$name; then + localbackup-mount-$name + fi + if command -v localbackup-unmount-$name; then + trap "localbackup-unmount-$name" EXIT + fi + + if [[ ! -d $backupname ]]; then + echo "No backup found $backupname" + exit 1 + fi + + IFS=':' read -ra FOLDER <<< "''$FOLDERS" + for folder in "''${FOLDER[@]}"; do + mkdir -p "$folder" + rsync -a "$backupname/${config.networking.hostName}$folder/" "$folder" + done + '') + ] + ++ (lib.mapAttrsToList ( + name: target: + pkgs.writeShellScriptBin ("localbackup-mount-" + name) '' + set -efu -o pipefail + ${lib.optionalString (target.preMountHook != null) target.preMountHook} + ${lib.optionalString (target.mountpoint != null) '' + if ! ${pkgs.util-linux}/bin/mountpoint -q ${lib.escapeShellArg target.mountpoint}; then + ${pkgs.util-linux}/bin/mount -o X-mount.mkdir ${lib.escapeShellArg target.mountpoint} + fi + ''} + ${lib.optionalString (target.postMountHook != null) target.postMountHook} + '' + ) cfg.targets) + ++ lib.mapAttrsToList ( + name: target: + pkgs.writeShellScriptBin ("localbackup-unmount-" + name) '' + set -efu -o pipefail + ${lib.optionalString (target.preUnmountHook != null) target.preUnmountHook} + ${lib.optionalString ( + target.mountpoint != null + ) "${pkgs.util-linux}/bin/umount ${lib.escapeShellArg target.mountpoint}"} + ${lib.optionalString (target.postUnmountHook != null) target.postUnmountHook} + '' + ) cfg.targets; + + clan.core.backups.providers.localbackup = { + # TODO list needs to run locally or on the remote machine + list = "localbackup-list"; + create = "localbackup-create"; + restore = "localbackup-restore"; + }; + }; +} diff --git a/clanModules/localsend/README.md b/clanModules/localsend/README.md new file mode 100644 index 000000000..a8165635e --- /dev/null +++ b/clanModules/localsend/README.md @@ -0,0 +1,5 @@ +--- +description = "Securely sharing files and messages over a local network without internet connectivity." +categories = ["Utility"] +features = [ "inventory", "deprecated" ] +--- diff --git a/clanModules/localsend/default.nix b/clanModules/localsend/default.nix new file mode 100644 index 000000000..8fdf356eb --- /dev/null +++ b/clanModules/localsend/default.nix @@ -0,0 +1,3 @@ +{ + imports = [ ./roles/default.nix ]; +} diff --git a/clanModules/localsend/localsend-ensure-config/default.nix b/clanModules/localsend/localsend-ensure-config/default.nix new file mode 100644 index 000000000..c310e9b62 --- /dev/null +++ b/clanModules/localsend/localsend-ensure-config/default.nix @@ -0,0 +1,22 @@ +{ + lib, + writers, + writeShellScriptBin, + localsend, + alias ? null, +}: +let + localsend-ensure-config = writers.writePython3 "localsend-ensure-config" { + flakeIgnore = [ + # We don't live in the dark ages anymore. + # Languages like Python that are whitespace heavy will overrun + # 79 characters.. + "E501" + ]; + } (builtins.readFile ./localsend-ensure-config.py); +in +writeShellScriptBin "localsend" '' + set -xeu + ${localsend-ensure-config} ${lib.optionalString (alias != null) alias} + ${lib.getExe localsend} +'' diff --git a/clanModules/localsend/localsend-ensure-config/localsend-ensure-config.py b/clanModules/localsend/localsend-ensure-config/localsend-ensure-config.py new file mode 100644 index 000000000..ba7d15613 --- /dev/null +++ b/clanModules/localsend/localsend-ensure-config/localsend-ensure-config.py @@ -0,0 +1,64 @@ +import json +import sys +from pathlib import Path + + +def load_json(file_path: Path) -> dict[str, any]: + try: + with file_path.open("r") as file: + return json.load(file) + except FileNotFoundError: + return {} + + +def save_json(file_path: Path, data: dict[str, any]) -> None: + with file_path.open("w") as file: + json.dump(data, file, indent=4) + + +def update_json(file_path: Path, updates: dict[str, any]) -> None: + data = load_json(file_path) + data.update(updates) + save_json(file_path, data) + + +def config_location() -> str: + config_file = "shared_preferences.json" + config_directory = ".local/share/org.localsend.localsend_app" + config_path = Path.home() / Path(config_directory) / Path(config_file) + return config_path + + +def ensure_config_directory() -> None: + config_directory = Path(config_location()).parent + config_directory.mkdir(parents=True, exist_ok=True) + + +def load_config() -> dict[str, any]: + return load_json(config_location()) + + +def save_config(data: dict[str, any]) -> None: + save_json(config_location(), data) + + +def update_username(username: str, data: dict[str, any]) -> dict[str, any]: + data["flutter.ls_alias"] = username + return data + + +def main(argv: list[str]) -> None: + try: + display_name = argv[1] + except IndexError: + # This is not an error, just don't update the name + print("No display name provided.") + sys.exit(0) + + ensure_config_directory() + updated_data = update_username(display_name, load_config()) + save_config(updated_data) + + +if __name__ == "__main__": + main(sys.argv[:2]) diff --git a/clanModules/localsend/roles/default.nix b/clanModules/localsend/roles/default.nix new file mode 100644 index 000000000..1da449c90 --- /dev/null +++ b/clanModules/localsend/roles/default.nix @@ -0,0 +1,69 @@ +{ + config, + pkgs, + lib, + ... +}: + +let + cfg = config.clan.localsend; +in +{ + # Integration can be improved, if the following issues get implemented: + # - cli frontend: https://github.com/localsend/localsend/issues/11 + # - ipv6 support: https://github.com/localsend/localsend/issues/549 + options.clan.localsend = { + + displayName = lib.mkOption { + type = lib.types.nullOr lib.types.str; + default = null; + description = "The name that localsend will use to display your instance."; + }; + + package = lib.mkPackageOption pkgs "localsend" { }; + + ipv4Addr = lib.mkOption { + type = lib.types.nullOr lib.types.str; + default = null; + example = "192.168.56.2/24"; + description = "Optional IPv4 address for ZeroTier network."; + }; + }; + + imports = [ + (lib.mkRemovedOptionModule [ + "clan" + "localsend" + "enable" + ] "Importing the module will already enable the service.") + ]; + config = { + warnings = [ + "The clan.localsend module is deprecated and will be removed on 2025-07-15. + Please migrate to user-maintained configuration or the new equivalent clan services + (https://docs.clan.lol/reference/clanServices)." + ]; + + clan.core.state.localsend.folders = [ + "/var/localsend" + ]; + environment.systemPackages = [ + (pkgs.callPackage ./localsend-ensure-config { + localsend = config.clan.localsend.package; + alias = config.clan.localsend.displayName; + }) + ]; + + networking.firewall.interfaces."zt+".allowedTCPPorts = [ 53317 ]; + networking.firewall.interfaces."zt+".allowedUDPPorts = [ 53317 ]; + + #TODO: This is currently needed because there is no ipv6 multicasting support yet + systemd.network.networks = lib.mkIf (cfg.ipv4Addr != null) { + "09-zerotier" = { + networkConfig = { + Address = cfg.ipv4Addr; + }; + }; + }; + }; +} diff --git a/clanModules/matrix-synapse/README.md b/clanModules/matrix-synapse/README.md new file mode 100644 index 000000000..70343a4e5 --- /dev/null +++ b/clanModules/matrix-synapse/README.md @@ -0,0 +1,3 @@ +--- +description = "A federated messaging server with end-to-end encryption." +--- diff --git a/clanModules/matrix-synapse/default.nix b/clanModules/matrix-synapse/default.nix new file mode 100644 index 000000000..a29b413f6 --- /dev/null +++ b/clanModules/matrix-synapse/default.nix @@ -0,0 +1,209 @@ +{ + config, + lib, + pkgs, + ... +}: +let + cfg = config.clan.matrix-synapse; + element-web = + pkgs.runCommand "element-web-with-config" { nativeBuildInputs = [ pkgs.buildPackages.jq ]; } + '' + cp -r ${pkgs.element-web} $out + chmod -R u+w $out + jq '."default_server_config"."m.homeserver" = { "base_url": "https://${cfg.app_domain}:443", "server_name": "${cfg.server_tld}" }' \ + > $out/config.json < ${pkgs.element-web}/config.json + ln -s $out/config.json $out/config.${cfg.app_domain}.json + ''; +in +# FIXME: This was taken from upstream. Drop this when our patch is upstream +{ + options.services.matrix-synapse.package = lib.mkOption { readOnly = false; }; + options.clan.matrix-synapse = { + server_tld = lib.mkOption { + type = lib.types.str; + description = "The address that is suffixed after your username i.e @alice:example.com"; + example = "example.com"; + }; + + app_domain = lib.mkOption { + type = lib.types.str; + description = "The matrix server hostname also serves the element client"; + example = "matrix.example.com"; + }; + + users = lib.mkOption { + default = { }; + type = lib.types.attrsOf ( + lib.types.submodule ( + { name, ... }: + { + options = { + name = lib.mkOption { + type = lib.types.str; + default = name; + description = "The name of the user"; + }; + + admin = lib.mkOption { + type = lib.types.bool; + default = false; + description = "Whether the user should be an admin"; + }; + }; + } + ) + ); + description = "A list of users. Not that only new users will be created and existing ones are not modified."; + example.alice = { + admin = true; + }; + }; + }; + imports = [ + ../postgresql + (lib.mkRemovedOptionModule [ + "clan" + "matrix-synapse" + "enable" + ] "Importing the module will already enable the service.") + ../nginx + ]; + config = { + services.matrix-synapse = { + enable = true; + settings = { + server_name = cfg.server_tld; + database = { + args.user = "matrix-synapse"; + args.database = "matrix-synapse"; + name = "psycopg2"; + }; + turn_uris = [ + "turn:turn.matrix.org?transport=udp" + "turn:turn.matrix.org?transport=tcp" + ]; + registration_shared_secret_path = "/run/synapse-registration-shared-secret"; + listeners = [ + { + port = 8008; + bind_addresses = [ "::1" ]; + type = "http"; + tls = false; + x_forwarded = true; + resources = [ + { + names = [ "client" ]; + compress = true; + } + { + names = [ "federation" ]; + compress = false; + } + ]; + } + ]; + }; + }; + + clan.postgresql.users.matrix-synapse = { }; + clan.postgresql.databases.matrix-synapse.create.options = { + TEMPLATE = "template0"; + LC_COLLATE = "C"; + LC_CTYPE = "C"; + ENCODING = "UTF8"; + OWNER = "matrix-synapse"; + }; + clan.postgresql.databases.matrix-synapse.restore.stopOnRestore = [ "matrix-synapse" ]; + + clan.core.vars.generators = + { + "matrix-synapse" = { + files."synapse-registration_shared_secret" = { }; + runtimeInputs = with pkgs; [ + coreutils + pwgen + ]; + migrateFact = "matrix-synapse"; + script = '' + echo -n "$(pwgen -s 32 1)" > "$out"/synapse-registration_shared_secret + ''; + }; + } + // lib.mapAttrs' ( + name: user: + lib.nameValuePair "matrix-password-${user.name}" { + files."matrix-password-${user.name}" = { }; + migrateFact = "matrix-password-${user.name}"; + runtimeInputs = with pkgs; [ xkcdpass ]; + script = '' + xkcdpass -n 4 -d - > "$out"/${lib.escapeShellArg "matrix-password-${user.name}"} + ''; + } + ) cfg.users; + + systemd.services.matrix-synapse = + let + usersScript = + '' + while ! ${pkgs.netcat}/bin/nc -z -v ::1 8008; do + if ! kill -0 "$MAINPID"; then exit 1; fi + sleep 1; + done + '' + + lib.concatMapStringsSep "\n" (user: '' + # only create user if it doesn't exist + /run/current-system/sw/bin/matrix-synapse-register_new_matrix_user --exists-ok --password-file ${ + config.clan.core.vars.generators."matrix-password-${user.name}".files."matrix-password-${user.name}".path + } --user "${user.name}" ${if user.admin then "--admin" else "--no-admin"} + '') (lib.attrValues cfg.users); + in + { + path = [ pkgs.curl ]; + serviceConfig.ExecStartPre = lib.mkBefore [ + "+${pkgs.coreutils}/bin/install -o matrix-synapse -g matrix-synapse ${ + lib.escapeShellArg + config.clan.core.vars.generators.matrix-synapse.files."synapse-registration_shared_secret".path + } /run/synapse-registration-shared-secret" + ]; + serviceConfig.ExecStartPost = [ + ''+${pkgs.writeShellScript "matrix-synapse-create-users" usersScript}'' + ]; + }; + + services.nginx = { + enable = true; + virtualHosts = { + "${cfg.server_tld}" = { + locations."= /.well-known/matrix/server".extraConfig = '' + add_header Content-Type application/json; + return 200 '${builtins.toJSON { "m.server" = "${cfg.app_domain}:443"; }}'; + ''; + locations."= /.well-known/matrix/client".extraConfig = '' + add_header Content-Type application/json; + add_header Access-Control-Allow-Origin *; + return 200 '${ + builtins.toJSON { + "m.homeserver" = { + "base_url" = "https://${cfg.app_domain}"; + }; + "m.identity_server" = { + "base_url" = "https://vector.im"; + }; + } + }'; + ''; + forceSSL = true; + enableACME = true; + }; + "${cfg.app_domain}" = { + forceSSL = true; + enableACME = true; + locations."/".root = element-web; + locations."/_matrix".proxyPass = "http://localhost:8008"; # TODO: We should make the port configurable + locations."/_synapse".proxyPass = "http://localhost:8008"; + }; + }; + }; + }; +} diff --git a/clanModules/moonlight/README.md b/clanModules/moonlight/README.md new file mode 100644 index 000000000..7e96ad597 --- /dev/null +++ b/clanModules/moonlight/README.md @@ -0,0 +1,5 @@ +--- +description = "A desktop streaming client optimized for remote gaming and synchronized movie viewing." +--- + +**Warning**: This module was written with our VM integration in mind likely won't work outside of this context. They will be generalized in future. diff --git a/clanModules/moonlight/default.nix b/clanModules/moonlight/default.nix new file mode 100644 index 000000000..5f6d96232 --- /dev/null +++ b/clanModules/moonlight/default.nix @@ -0,0 +1,91 @@ +{ pkgs, config, ... }: +let + ms-accept = pkgs.callPackage ../../pkgs/moonlight-sunshine-accept { }; + defaultPort = 48011; +in +{ + warnings = [ + "The clan.moonlight module is deprecated and will be removed on 2025-07-15. Please migrate to user-maintained configuration." + ]; + + hardware.opengl.enable = true; + environment.systemPackages = [ + pkgs.moonlight-qt + ms-accept + ]; + + systemd.tmpfiles.rules = [ + "d '/var/lib/moonlight' 0770 'user' 'users' - -" + "C '/var/lib/moonlight/moonlight.cert' 0644 'user' 'users' - ${ + config.clan.core.vars.generators.moonlight.files."moonlight.cert".path or "" + }" + "C '/var/lib/moonlight/moonlight.key' 0644 'user' 'users' - ${ + config.clan.core.vars.generators.moonlight.files."moonlight.key".path or "" + }" + ]; + + systemd.user.services.init-moonlight = { + enable = false; + description = "Initializes moonlight"; + wantedBy = [ "graphical-session.target" ]; + script = '' + ${ms-accept}/bin/moonlight-sunshine-accept moonlight init-config --key /var/lib/moonlight/moonlight.key --cert /var/lib/moonlight/moonlight.cert + ''; + serviceConfig = { + user = "user"; + Type = "oneshot"; + WorkingDirectory = "/home/user/"; + RunTimeDirectory = "moonlight"; + TimeoutSec = "infinity"; + Restart = "on-failure"; + RemainAfterExit = true; + ReadOnlyPaths = [ + "/var/lib/moonlight/moonlight.key" + "/var/lib/moonlight/moonlight.cert" + ]; + }; + }; + + systemd.user.services.moonlight-join = { + description = "Join sunshine hosts"; + script = ''${ms-accept}/bin/moonlight-sunshine-accept moonlight join --port ${builtins.toString defaultPort} --cert '${ + config.clan.core.vars.generators.moonlight.files."moonlight.cert".value or "" + }' --host fd2e:25da:6035:c98f:cd99:93e0:b9b8:9ca1''; + serviceConfig = { + Type = "oneshot"; + TimeoutSec = "infinity"; + Restart = "on-failure"; + ReadOnlyPaths = [ + "/var/lib/moonlight/moonlight.key" + "/var/lib/moonlight/moonlight.cert" + ]; + }; + }; + systemd.user.timers.moonlight-join = { + description = "Join sunshine hosts"; + wantedBy = [ "timers.target" ]; + timerConfig = { + OnUnitActiveSec = "5min"; + OnBootSec = "0min"; + Persistent = true; + Unit = "moonlight-join.service"; + }; + }; + + clan.core.vars.generators.moonlight = { + migrateFact = "moonlight"; + files."moonlight.key" = { }; + files."moonlight.cert" = { }; + files."moonlight.cert".secret = false; + runtimeInputs = [ + pkgs.coreutils + ms-accept + ]; + script = '' + moonlight-sunshine-accept moonlight init + mv credentials/cakey.pem "$out"/moonlight.key + cp credentials/cacert.pem "$out"/moonlight.cert + mv credentials/cacert.pem "$out"/moonlight.cert + ''; + }; +} diff --git a/clanModules/mumble/README.md b/clanModules/mumble/README.md new file mode 100644 index 000000000..a33b99714 --- /dev/null +++ b/clanModules/mumble/README.md @@ -0,0 +1,19 @@ +--- +description = "Open Source, Low Latency, High Quality Voice Chat." +categories = ["Audio", "Social"] +features = [ "inventory" ] + +[constraints] +roles.server.min = 1 +--- + +The mumble clan module gives you: + +- True low latency voice communication. +- Secure, authenticated encryption. +- Free software. +- Backed by a large and active open-source community. + +This all set up in a way that allows peer-to-peer hosting. +Every machine inside the clan can be a host for mumble, +and thus it doesn't matter who in the network is online - as long as two people are online they are able to chat with each other. diff --git a/clanModules/mumble/default.nix b/clanModules/mumble/default.nix new file mode 100644 index 000000000..ab24e1e49 --- /dev/null +++ b/clanModules/mumble/default.nix @@ -0,0 +1,6 @@ +# Dont import this file +# It is only here for backwards compatibility. +# Dont author new modules with this file. +{ + imports = [ ./roles/server.nix ]; +} diff --git a/clanModules/mumble/roles/mumble-populate-channels.py b/clanModules/mumble/roles/mumble-populate-channels.py new file mode 100644 index 000000000..fe46448ae --- /dev/null +++ b/clanModules/mumble/roles/mumble-populate-channels.py @@ -0,0 +1,247 @@ +import argparse +import json +import sqlite3 +from pathlib import Path + + +def ensure_config(path: Path, db_path: Path) -> None: + # Default JSON structure if the file doesn't exist + default_json = { + "misc": { + "audio_wizard_has_been_shown": True, + "database_location": str(db_path), + "viewed_server_ping_consent_message": True, + }, + "settings_version": 1, + } + + # Check if the file exists + if path.exists(): + data = json.loads(path.read_text()) + else: + data = default_json + # Create the file with default JSON structure + with path.open("w") as file: + json.dump(data, file, indent=4) + + # TODO: make sure to only update the diff + updated_data = {**default_json, **data} + + # Write the modified JSON object back to the file + with path.open("w") as file: + json.dump(updated_data, file, indent=4) + + +def initialize_database(db_location: str) -> None: + """ + Initializes the database. If the database or the servers table does not exist, it creates them. + + :param db_location: The path to the SQLite database + """ + conn = sqlite3.connect(db_location) + try: + cursor = conn.cursor() + + # Create the servers table if it doesn't exist + cursor.execute(""" + CREATE TABLE IF NOT EXISTS servers ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + name TEXT NOT NULL, + hostname TEXT NOT NULL, + port INTEGER NOT NULL, + username TEXT NOT NULL, + password TEXT NOT NULL, + url TEXT + ) + """) + + # Commit the changes + conn.commit() + + except sqlite3.Error as e: + print(f"An error occurred while initializing the database: {e}") + finally: + conn.close() + + +def initialize_certificates( + db_location: str, hostname: str, port: str, digest: str +) -> None: + # Connect to the SQLite database + conn = sqlite3.connect(db_location) + + try: + # Create a cursor object + cursor = conn.cursor() + + # TODO: check if cert already there + # if server_check(cursor, name, hostname): + # print( + # f"Server with name '{name}' and hostname '{hostname}' already exists." + # ) + # return + + # SQL command to insert data into the servers table + insert_query = """ + INSERT INTO cert (hostname, port, digest) + VALUES (?, ?, ?) + """ + + # Data to be inserted + data = (hostname, port, digest) + + # Execute the insert command with the provided data + cursor.execute(insert_query, data) + + # Commit the changes + conn.commit() + + print("Data has been successfully inserted.") + except sqlite3.Error as e: + print(f"An error occurred: {e}") + finally: + # Close the connection + conn.close() + + +def calculate_digest(cert: str) -> str: + from cryptography import x509 + from cryptography.hazmat.backends import default_backend + from cryptography.hazmat.primitives import hashes + + cert = cert.strip() + cert = cert.encode("utf-8") + cert = x509.load_pem_x509_certificate(cert, default_backend()) + digest = cert.fingerprint(hashes.SHA1()).hex() + return digest + + +def server_check(cursor: str, name: str, hostname: str) -> bool: + """ + Check if a server with the given name and hostname already exists. + + :param cursor: The database cursor + :param name: The name of the server + :param hostname: The hostname of the server + :return: True if the server exists, False otherwise + """ + check_query = """ + SELECT 1 FROM servers WHERE name = ? AND hostname = ? + """ + cursor.execute(check_query, (name, hostname)) + return cursor.fetchone() is not None + + +def insert_server( + name: str, + hostname: str, + port: str, + username: str, + password: str, + url: str, + db_location: str, +) -> None: + """ + Inserts a new server record into the servers table. + + :param name: The name of the server + :param hostname: The hostname of the server + :param port: The port number + :param username: The username + :param password: The password + :param url: The URL + """ + # Connect to the SQLite database + conn = sqlite3.connect(db_location) + + try: + # Create a cursor object + cursor = conn.cursor() + + if server_check(cursor, name, hostname): + print( + f"Server with name '{name}' and hostname '{hostname}' already exists." + ) + return + + # SQL command to insert data into the servers table + insert_query = """ + INSERT INTO servers (name, hostname, port, username, password, url) + VALUES (?, ?, ?, ?, ?, ?) + """ + + # Data to be inserted + data = (name, hostname, port, username, password, url) + + # Execute the insert command with the provided data + cursor.execute(insert_query, data) + + # Commit the changes + conn.commit() + + print("Data has been successfully inserted.") + except sqlite3.Error as e: + print(f"An error occurred: {e}") + finally: + # Close the connection + conn.close() + + +if __name__ == "__main__": + port = 64738 + password = "" + url = None + + parser = argparse.ArgumentParser( + prog="initialize_mumble", + ) + + subparser = parser.add_subparsers(dest="certificates") + # cert_parser = subparser.add_parser("certificates") + + parser.add_argument("--cert") + parser.add_argument("--digest") + parser.add_argument("--machines") + parser.add_argument("--servers") + parser.add_argument("--username") + parser.add_argument("--db-location") + parser.add_argument("--ensure-config", type=Path) + args = parser.parse_args() + + print(args) + + if args.ensure_config: + ensure_config(args.ensure_config, args.db_location) + print("Initialized config") + exit(0) + + if args.servers: + print(args.servers) + servers = json.loads(f"{args.servers}") + db_location = args.db_location + for server in servers: + digest = calculate_digest(server.get("value")) + name = server.get("name") + initialize_certificates(db_location, name, port, digest) + print("Initialized certificates") + exit(0) + + initialize_database(args.db_location) + + # Insert the server into the database + print(args.machines) + machines = json.loads(f"{args.machines}") + print(machines) + print(list(machines)) + + for machine in list(machines): + print(f"Inserting {machine}.") + insert_server( + machine, + machine, + port, + args.username, + password, + url, + args.db_location, + ) diff --git a/clanModules/mumble/roles/server.nix b/clanModules/mumble/roles/server.nix new file mode 100644 index 000000000..704744eec --- /dev/null +++ b/clanModules/mumble/roles/server.nix @@ -0,0 +1,150 @@ +{ + lib, + config, + pkgs, + ... +}: +let + dir = config.clan.core.settings.directory; + # TODO: this should actually use the inventory to figure out which machines to use. + machineDir = dir + "/vars/per-machine"; + machinesFileSet = builtins.readDir machineDir; + machines = lib.mapAttrsToList (name: _: name) machinesFileSet; + machineJson = builtins.toJSON machines; + certificateMachinePath = machines: machineDir + "/${machines}" + "/mumble/mumble-cert/value"; + certificatesUnchecked = builtins.map ( + machine: + let + fullPath = certificateMachinePath machine; + in + if builtins.pathExists fullPath then machine else null + ) machines; + certificate = lib.filter (machine: machine != null) certificatesUnchecked; + machineCert = builtins.map ( + machine: (lib.nameValuePair machine (builtins.readFile (certificateMachinePath machine))) + ) certificate; + machineCertJson = builtins.toJSON machineCert; + +in +{ + options.clan.services.mumble = { + user = lib.mkOption { + type = lib.types.nullOr lib.types.str; + default = null; + example = "alice"; + description = "The user mumble should be set up for."; + }; + }; + + config = { + + warnings = [ + "The clan.mumble module is deprecated and will be removed on 2025-07-15. + Please migrate to user-maintained configuration or the new equivalent clan services + (https://docs.clan.lol/reference/clanServices)." + ]; + + services.murmur = { + enable = true; + logDays = -1; + registerName = config.clan.core.settings.machine.name; + openFirewall = true; + bonjour = true; + sslKey = "/var/lib/murmur/sslKey"; + sslCert = "/var/lib/murmur/sslCert"; + }; + + clan.core.state.mumble.folders = [ + "/var/lib/mumble" + "/var/lib/murmur" + ]; + + systemd.tmpfiles.rules = [ + "d '/var/lib/mumble' 0770 '${config.clan.services.mumble.user}' 'users' - -" + ]; + + systemd.tmpfiles.settings."murmur" = { + "/var/lib/murmur/sslKey" = { + C.argument = config.clan.core.vars.generators.mumble.files.mumble-key.path; + Z = { + mode = "0400"; + user = "murmur"; + }; + }; + "/var/lib/murmur/sslCert" = { + C.argument = config.clan.core.vars.generators.mumble.files.mumble-cert.path; + Z = { + mode = "0400"; + user = "murmur"; + }; + }; + }; + + environment.systemPackages = + let + mumbleCfgDir = "/var/lib/mumble"; + mumbleDatabasePath = "${mumbleCfgDir}/mumble.sqlite"; + mumbleCfgPath = "/var/lib/mumble/mumble_settings.json"; + populate-channels = pkgs.writers.writePython3 "mumble-populate-channels" { + libraries = [ + pkgs.python3Packages.cryptography + pkgs.python3Packages.pyopenssl + ]; + flakeIgnore = [ + # We don't live in the dark ages anymore. + # Languages like Python that are whitespace heavy will overrun + # 79 characters.. + "E501" + ]; + } (builtins.readFile ./mumble-populate-channels.py); + mumble = pkgs.writeShellScriptBin "mumble" '' + set -xeu + mkdir -p ${mumbleCfgDir} + pushd "${mumbleCfgDir}" + XDG_DATA_HOME=${mumbleCfgDir} + XDG_DATA_DIR=${mumbleCfgDir} + ${populate-channels} --ensure-config '${mumbleCfgPath}' --db-location ${mumbleDatabasePath} + ${populate-channels} --machines '${machineJson}' --username ${config.clan.core.settings.machine.name} --db-location ${mumbleDatabasePath} + ${populate-channels} --servers '${machineCertJson}' --username ${config.clan.core.settings.machine.name} --db-location ${mumbleDatabasePath} --cert True + ${pkgs.mumble}/bin/mumble --config ${mumbleCfgPath} "$@" + popd + ''; + in + [ mumble ]; + + clan.core.vars.generators.mumble = { + migrateFact = "mumble"; + files.mumble-key = { }; + files.mumble-cert.secret = false; + runtimeInputs = [ + pkgs.coreutils + pkgs.openssl + ]; + script = '' + openssl genrsa -out "$out/mumble-key" 2048 + + cat > mumble-cert.conf < "$out"/pubkey + mycelium inspect --key-file "$out"/key --json | jq -r .address > "$out"/ip + ''; + }; + +} diff --git a/clanModules/nginx/README.md b/clanModules/nginx/README.md new file mode 100644 index 000000000..4c8ca5a36 --- /dev/null +++ b/clanModules/nginx/README.md @@ -0,0 +1,3 @@ +--- +description = "Good defaults for the nginx webserver" +--- diff --git a/clanModules/nginx/default.nix b/clanModules/nginx/default.nix new file mode 100644 index 000000000..b00a94f81 --- /dev/null +++ b/clanModules/nginx/default.nix @@ -0,0 +1,69 @@ +{ config, lib, ... }: + +{ + + imports = [ + (lib.mkRemovedOptionModule [ + "clan" + "nginx" + "enable" + ] "Importing the module will already enable the service.") + + ]; + options = { + clan.nginx.acme.email = lib.mkOption { + type = lib.types.str; + description = '' + Email address for account creation and correspondence from the CA. + It is recommended to use the same email for all certs to avoid account + creation limits. + ''; + }; + }; + config = { + security.acme.acceptTerms = true; + security.acme.defaults.email = config.clan.nginx.acme.email; + + networking.firewall.allowedTCPPorts = [ + 443 + 80 + ]; + + services.nginx = { + enable = true; + + statusPage = lib.mkDefault true; + recommendedBrotliSettings = lib.mkDefault true; + recommendedGzipSettings = lib.mkDefault true; + recommendedOptimisation = lib.mkDefault true; + recommendedProxySettings = lib.mkDefault true; + recommendedTlsSettings = lib.mkDefault true; + recommendedZstdSettings = lib.mkDefault true; + + # Nginx sends all the access logs to /var/log/nginx/access.log by default. + # instead of going to the journal! + commonHttpConfig = "access_log syslog:server=unix:/dev/log;"; + + resolver.addresses = + let + isIPv6 = addr: builtins.match ".*:.*:.*" addr != null; + escapeIPv6 = addr: if isIPv6 addr then "[${addr}]" else addr; + cloudflare = [ + "1.1.1.1" + "2606:4700:4700::1111" + ]; + resolvers = + if config.networking.nameservers == [ ] then cloudflare else config.networking.nameservers; + in + map escapeIPv6 resolvers; + + sslDhparam = config.security.dhparams.params.nginx.path; + }; + + security.dhparams = { + enable = true; + params.nginx = { }; + }; + }; + +} diff --git a/clanModules/packages/README.md b/clanModules/packages/README.md new file mode 100644 index 000000000..3db13ef0e --- /dev/null +++ b/clanModules/packages/README.md @@ -0,0 +1,5 @@ +--- +description = "Define package sets from nixpkgs and install them on one or more machines" +categories = ["System"] +features = [ "inventory", "deprecated" ] +--- diff --git a/clanModules/packages/default.nix b/clanModules/packages/default.nix new file mode 100644 index 000000000..ed6af3368 --- /dev/null +++ b/clanModules/packages/default.nix @@ -0,0 +1,6 @@ +# Dont import this file +# It is only here for backwards compatibility. +# Dont author new modules with this file. +{ + imports = [ ./roles/default.nix ]; +} diff --git a/clanModules/packages/roles/default.nix b/clanModules/packages/roles/default.nix new file mode 100644 index 000000000..9aa2864fe --- /dev/null +++ b/clanModules/packages/roles/default.nix @@ -0,0 +1,25 @@ +{ + config, + lib, + pkgs, + ... +}: +{ + options.clan.packages = { + packages = lib.mkOption { + type = lib.types.listOf lib.types.str; + description = "The packages to install on the machine"; + }; + }; + config = { + + warnings = [ + "The clan.packages module is deprecated and will be removed on 2025-07-15. + Please migrate to user-maintained configuration or the new equivalent clan services + (https://docs.clan.lol/reference/clanServices)." + ]; + environment.systemPackages = map ( + pName: lib.getAttrFromPath (lib.splitString "." pName) pkgs + ) config.clan.packages.packages; + }; +} diff --git a/clanModules/postgresql/README.md b/clanModules/postgresql/README.md new file mode 100644 index 000000000..86108a33b --- /dev/null +++ b/clanModules/postgresql/README.md @@ -0,0 +1,3 @@ +--- +description = "A free and open-source relational database management system (RDBMS) emphasizing extensibility and SQL compliance." +--- diff --git a/clanModules/postgresql/default.nix b/clanModules/postgresql/default.nix new file mode 100644 index 000000000..49a98b37e --- /dev/null +++ b/clanModules/postgresql/default.nix @@ -0,0 +1,224 @@ +{ + pkgs, + lib, + config, + ... +}: +let + createDatabaseState = + db: + let + folder = "/var/backup/postgres/${db.name}"; + current = "${folder}/pg-dump"; + compression = lib.optionalString (lib.versionAtLeast config.services.postgresql.package.version "16") "--compress=zstd"; + in + { + folders = [ folder ]; + preBackupScript = '' + export PATH=${ + lib.makeBinPath [ + config.services.postgresql.package + config.systemd.package + pkgs.coreutils + pkgs.util-linux + pkgs.zstd + ] + } + while [[ "$(systemctl is-active postgresql)" == activating ]]; do + sleep 1 + done + + mkdir -p "${folder}" + runuser -u postgres -- pg_dump ${compression} --dbname=${db.name} -Fc -c > "${current}.tmp" + mv "${current}.tmp" ${current} + ''; + postRestoreScript = '' + export PATH=${ + lib.makeBinPath [ + config.services.postgresql.package + config.systemd.package + pkgs.coreutils + pkgs.util-linux + pkgs.zstd + pkgs.gnugrep + ] + } + while [[ "$(systemctl is-active postgresql)" == activating ]]; do + sleep 1 + done + echo "Waiting for postgres to be ready..." + while ! runuser -u postgres -- psql --port=${builtins.toString config.services.postgresql.settings.port} -d postgres -c "" ; do + if ! systemctl is-active postgresql; then exit 1; fi + sleep 0.1 + done + + if [[ -e "${current}" ]]; then + ( + systemctl stop ${lib.concatStringsSep " " db.restore.stopOnRestore} + trap "systemctl start ${lib.concatStringsSep " " db.restore.stopOnRestore}" EXIT + + mkdir -p "${folder}" + if runuser -u postgres -- psql -d postgres -c "SELECT 1 FROM pg_database WHERE datname = '${db.name}'" | grep -q 1; then + runuser -u postgres -- dropdb "${db.name}" + fi + runuser -u postgres -- pg_restore -C -d postgres "${current}" + ) + else + echo No database backup found, skipping restore + fi + ''; + }; + + createDatabase = db: '' + CREATE DATABASE "${db.name}" ${ + lib.concatStringsSep " " ( + lib.mapAttrsToList (name: value: "${name} = '${value}'") db.create.options + ) + } + ''; + cfg = config.clan.postgresql; + + userClauses = lib.mapAttrsToList ( + _: user: + ''$PSQL -tAc "SELECT 1 FROM pg_roles WHERE rolname='${user.name}'" | grep -q 1 || $PSQL -tAc 'CREATE USER "${user.name}"' '' + ) cfg.users; + databaseClauses = lib.mapAttrsToList ( + name: db: + lib.optionalString db.create.enable ''$PSQL -d postgres -c "SELECT 1 FROM pg_database WHERE datname = '${name}'" | grep -q 1 || $PSQL -d postgres -c ${lib.escapeShellArg (createDatabase db)} '' + ) cfg.databases; +in +{ + options.clan.postgresql = { + # we are reimplemeting ensureDatabase and ensureUser options here to allow to create databases with options + databases = lib.mkOption { + description = "Databases to create"; + default = { }; + type = lib.types.attrsOf ( + lib.types.submodule ( + { name, ... }: + { + options = { + name = lib.mkOption { + type = lib.types.str; + default = name; + description = "Database name."; + }; + service = lib.mkOption { + type = lib.types.str; + default = name; + description = "Service name that we associate with the database."; + }; + # set to false, in case the upstream module uses ensureDatabase option + create.enable = lib.mkOption { + type = lib.types.bool; + default = true; + description = "Create the database if it does not exist."; + }; + create.options = lib.mkOption { + description = "Options to pass to the CREATE DATABASE command."; + type = lib.types.lazyAttrsOf lib.types.str; + default = { }; + example = { + TEMPLATE = "template0"; + LC_COLLATE = "C"; + LC_CTYPE = "C"; + ENCODING = "UTF8"; + OWNER = "foo"; + }; + }; + restore.stopOnRestore = lib.mkOption { + type = lib.types.listOf lib.types.str; + default = [ ]; + description = "List of systemd services to stop before restoring the database."; + }; + }; + } + ) + ); + }; + users = lib.mkOption { + description = "Users to create"; + default = { }; + type = lib.types.attrsOf ( + lib.types.submodule ( + { name, ... }: + { + options.name = lib.mkOption { + description = "User name"; + type = lib.types.str; + default = name; + }; + } + ) + ); + }; + }; + config = { + services.postgresql.settings = { + wal_level = "replica"; + max_wal_senders = 3; + }; + + services.postgresql.enable = true; + # We are duplicating a bit the upstream module but allow to create databases with options + systemd.services.postgresql.postStart = '' + PSQL="psql --port=${builtins.toString config.services.postgresql.settings.port}" + + while ! $PSQL -d postgres -c "" 2> /dev/null; do + if ! kill -0 "$MAINPID"; then exit 1; fi + sleep 0.1 + done + ${lib.concatStringsSep "\n" userClauses} + ${lib.concatStringsSep "\n" databaseClauses} + ''; + + clan.core.state = lib.mapAttrs' ( + _: db: lib.nameValuePair db.service (createDatabaseState db) + ) config.clan.postgresql.databases; + + environment.systemPackages = builtins.map ( + db: + let + folder = "/var/backup/postgres/${db.name}"; + current = "${folder}/pg-dump"; + in + pkgs.writeShellScriptBin "postgres-db-restore-command-${db.name}" '' + export PATH=${ + lib.makeBinPath [ + config.services.postgresql.package + config.systemd.package + pkgs.coreutils + pkgs.util-linux + pkgs.zstd + pkgs.gnugrep + ] + } + while [[ "$(systemctl is-active postgresql)" == activating ]]; do + sleep 1 + done + echo "Waiting for postgres to be ready..." + while ! runuser -u postgres -- psql --port=${builtins.toString config.services.postgresql.settings.port} -d postgres -c "" ; do + if ! systemctl is-active postgresql; then exit 1; fi + sleep 0.1 + done + + if [[ -e "${current}" ]]; then + ( + ${lib.optionalString (db.restore.stopOnRestore != [ ]) '' + systemctl stop ${builtins.toString db.restore.stopOnRestore} + trap "systemctl start ${builtins.toString db.restore.stopOnRestore}" EXIT + ''} + + mkdir -p "${folder}" + if runuser -u postgres -- psql -d postgres -c "SELECT 1 FROM pg_database WHERE datname = '${db.name}'" | grep -q 1; then + runuser -u postgres -- dropdb "${db.name}" + fi + runuser -u postgres -- pg_restore -C -d postgres "${current}" + ) + else + echo No database backup found, skipping restore + fi + '' + ) (builtins.attrValues config.clan.postgresql.databases); + }; +} diff --git a/clanModules/root-password/README.md b/clanModules/root-password/README.md new file mode 100644 index 000000000..58e7cf467 --- /dev/null +++ b/clanModules/root-password/README.md @@ -0,0 +1,20 @@ +--- +description = "Automatically generates and configures a password for the root user." +categories = ["System"] +features = ["inventory", "deprecated"] +--- + +This module is deprecated and will be removed in a future release. It's functionality has been replaced by the user-password service. + +After the system was installed/deployed the following command can be used to display the root-password: + +```bash +clan vars get [machine_name] root-password/root-password +``` + +See also: [Vars](../../guides/vars-backend.md) + +To regenerate the password run: +``` +clan vars generate --regenerate [machine_name] --generator root-password +``` diff --git a/clanModules/root-password/default.nix b/clanModules/root-password/default.nix new file mode 100644 index 000000000..ed6af3368 --- /dev/null +++ b/clanModules/root-password/default.nix @@ -0,0 +1,6 @@ +# Dont import this file +# It is only here for backwards compatibility. +# Dont author new modules with this file. +{ + imports = [ ./roles/default.nix ]; +} diff --git a/clanModules/root-password/roles/default.nix b/clanModules/root-password/roles/default.nix new file mode 100644 index 000000000..e9d921d39 --- /dev/null +++ b/clanModules/root-password/roles/default.nix @@ -0,0 +1,51 @@ +{ + _class, + pkgs, + config, + lib, + ... +}: +{ + + warnings = [ + "The clan.root-password module is deprecated and will be removed on 2025-07-15. + Please migrate to user-maintained configuration or the new equivalent clan services + (https://docs.clan.lol/reference/clanServices)." + ]; + + users.mutableUsers = false; + users.users.root.hashedPasswordFile = + config.clan.core.vars.generators.root-password.files.password-hash.path; + + clan.core.vars.generators.root-password = { + files.password-hash = + { + neededFor = "users"; + } + // (lib.optionalAttrs (_class == "nixos") { + restartUnits = lib.optional (config.services.userborn.enable) "userborn.service"; + }); + files.password = { + deploy = false; + }; + migrateFact = "root-password"; + runtimeInputs = [ + pkgs.coreutils + pkgs.mkpasswd + pkgs.xkcdpass + ]; + prompts.password.type = "hidden"; + prompts.password.persist = true; + prompts.password.description = "You can autogenerate a password, if you leave this prompt blank."; + + script = '' + prompt_value="$(cat "$prompts"/password)" + if [[ -n "''${prompt_value-}" ]]; then + echo "$prompt_value" | tr -d "\n" > "$out"/password + else + xkcdpass --numwords 4 --delimiter - --count 1 | tr -d "\n" > "$out"/password + fi + mkpasswd -s -m sha-512 < "$out"/password | tr -d "\n" > "$out"/password-hash + ''; + }; +} diff --git a/clanModules/single-disk/README.md b/clanModules/single-disk/README.md new file mode 100644 index 000000000..a948c44bf --- /dev/null +++ b/clanModules/single-disk/README.md @@ -0,0 +1,43 @@ +--- +description = "Configures partitioning of the main disk" +categories = ["System"] +features = [ "inventory" ] +--- +# Primary Disk Layout + +A module for the "disk-layout" category MUST be chosen. + +There is exactly one slot for this type of module in the UI, if you don't fill the slot, your machine cannot boot + +This module is a good choice for most machines. In the future clan will offer a broader choice of disk-layouts + +The UI will ask for the options of this module: + +`device: "/dev/null"` + +# Usage example + +`inventory.json` +```json +"services": { + "single-disk": { + "default": { + "meta": { + "name": "single-disk" + }, + "roles": { + "default": { + "machines": ["jon"] + } + }, + "machines": { + "jon": { + "config": { + "device": "/dev/null" + } + } + } + } + } +} +``` diff --git a/clanModules/single-disk/default.nix b/clanModules/single-disk/default.nix new file mode 100644 index 000000000..8fdf356eb --- /dev/null +++ b/clanModules/single-disk/default.nix @@ -0,0 +1,3 @@ +{ + imports = [ ./roles/default.nix ]; +} diff --git a/clanModules/single-disk/roles/default.nix b/clanModules/single-disk/roles/default.nix new file mode 100644 index 000000000..0a6faf2bb --- /dev/null +++ b/clanModules/single-disk/roles/default.nix @@ -0,0 +1,56 @@ +{ lib, config, ... }: +{ + options.clan.single-disk = { + device = lib.mkOption { + default = null; + type = lib.types.nullOr lib.types.str; + description = "The primary disk device to install the system on"; + }; + }; + config = { + warnings = [ + "clanModules.single-disk is deprecated. Please copy the disko config from the module into your machine config." + ]; + + boot.loader.grub.efiSupport = lib.mkDefault true; + boot.loader.grub.efiInstallAsRemovable = lib.mkDefault true; + disko.devices = { + disk = { + main = { + type = "disk"; + # This is set through the UI + device = config.clan.single-disk.device; + + content = { + type = "gpt"; + partitions = { + boot = { + size = "1M"; + type = "EF02"; # for grub MBR + priority = 1; + }; + ESP = { + size = "512M"; + type = "EF00"; + content = { + type = "filesystem"; + format = "vfat"; + mountpoint = "/boot"; + mountOptions = [ "umask=0077" ]; + }; + }; + root = { + size = "100%"; + content = { + type = "filesystem"; + format = "ext4"; + mountpoint = "/"; + }; + }; + }; + }; + }; + }; + }; + }; +} diff --git a/clanModules/sshd/README.md b/clanModules/sshd/README.md new file mode 100644 index 000000000..3373af308 --- /dev/null +++ b/clanModules/sshd/README.md @@ -0,0 +1,11 @@ +--- +description = "Enables secure remote access to the machine over ssh." +categories = ["System", "Network"] +features = [ "inventory", "deprecated" ] +--- + +This module will setup the opensshd service. +It will generate a host key for each machine + + +## Roles diff --git a/clanModules/sshd/default.nix b/clanModules/sshd/default.nix new file mode 100644 index 000000000..ab24e1e49 --- /dev/null +++ b/clanModules/sshd/default.nix @@ -0,0 +1,6 @@ +# Dont import this file +# It is only here for backwards compatibility. +# Dont author new modules with this file. +{ + imports = [ ./roles/server.nix ]; +} diff --git a/clanModules/sshd/roles/client.nix b/clanModules/sshd/roles/client.nix new file mode 100644 index 000000000..bdd61dbe9 --- /dev/null +++ b/clanModules/sshd/roles/client.nix @@ -0,0 +1,6 @@ +{ ... }: +{ + imports = [ + ../shared.nix + ]; +} diff --git a/clanModules/state-version/README.md b/clanModules/state-version/README.md new file mode 100644 index 000000000..f8544f0de --- /dev/null +++ b/clanModules/state-version/README.md @@ -0,0 +1,18 @@ +--- +description = "Automatically generate the state version of the nixos installation." +features = [ "inventory", "deprecated" ] +--- + +This module generates the `system.stateVersion` of the nixos installation automatically. + +Options: [system.stateVersion](https://search.nixos.org/options?channel=unstable&show=system.stateVersion&from=0&size=50&sort=relevance&type=packages&query=stateVersion) + +Migration: +If you are already setting `system.stateVersion`, then import the module and then either let the automatic generation happen, or trigger the generation manually for the machine. The module will take the specified version, if one is already supplied through the config. +To manually generate the version for a specified machine run: + +``` +clan vars generate [MACHINE] +``` + +If the setting was already set you can then remove `system.stateVersion` from your machine configuration. For new machines, just import the module. diff --git a/clanModules/state-version/default.nix b/clanModules/state-version/default.nix new file mode 100644 index 000000000..ed6af3368 --- /dev/null +++ b/clanModules/state-version/default.nix @@ -0,0 +1,6 @@ +# Dont import this file +# It is only here for backwards compatibility. +# Dont author new modules with this file. +{ + imports = [ ./roles/default.nix ]; +} diff --git a/clanModules/state-version/roles/default.nix b/clanModules/state-version/roles/default.nix new file mode 100644 index 000000000..b164ca90a --- /dev/null +++ b/clanModules/state-version/roles/default.nix @@ -0,0 +1,28 @@ +{ config, lib, ... }: +let + var = config.clan.core.vars.generators.state-version.files.version or { }; +in +{ + + warnings = [ + '' + The clan.state-version service is deprecated and will be + removed on 2025-07-15 in favor of a nix option. + + Please migrate your configuration to use `clan.core.settings.state-version.enable = true` instead. + '' + ]; + + system.stateVersion = lib.mkDefault (lib.removeSuffix "\n" var.value); + + clan.core.vars.generators.state-version = { + files.version = { + secret = false; + value = lib.mkDefault config.system.nixos.release; + }; + runtimeInputs = [ ]; + script = '' + echo -n ${config.system.stateVersion} > "$out"/version + ''; + }; +} diff --git a/clanModules/static-hosts/README.md b/clanModules/static-hosts/README.md new file mode 100644 index 000000000..92a382202 --- /dev/null +++ b/clanModules/static-hosts/README.md @@ -0,0 +1,3 @@ +--- +description = "Statically configure the host names of machines based on their respective zerotier-ip." +--- diff --git a/clanModules/static-hosts/default.nix b/clanModules/static-hosts/default.nix new file mode 100644 index 000000000..e214d38d9 --- /dev/null +++ b/clanModules/static-hosts/default.nix @@ -0,0 +1,63 @@ +{ lib, config, ... }: +{ + options.clan.static-hosts = { + excludeHosts = lib.mkOption { + type = lib.types.listOf lib.types.str; + default = + if config.clan.static-hosts.topLevelDomain != "" then + [ ] + else + [ config.clan.core.settings.machine.name ]; + defaultText = lib.literalExpression '' + if config.clan.static-hosts.topLevelDomain != "" then + [ ] + else + [ config.clan.core.settings.machine.name ]; + ''; + description = "Hosts that should be excluded"; + }; + topLevelDomain = lib.mkOption { + type = lib.types.str; + default = ""; + description = "Top level domain to reach hosts"; + }; + }; + + config.networking.hosts = + let + dir = config.clan.core.settings.directory; + machineDir = "${dir}/vars/per-machine"; + zerotierIpMachinePath = machine: "${machineDir}/${machine}/zerotier/zerotier-ip/value"; + machinesFileSet = builtins.readDir machineDir; + machines = lib.mapAttrsToList (name: _: name) machinesFileSet; + networkIpsUnchecked = builtins.map ( + machine: + let + fullPath = zerotierIpMachinePath machine; + in + if builtins.pathExists fullPath then machine else null + ) machines; + networkIps = lib.filter (machine: machine != null) networkIpsUnchecked; + machinesWithIp = lib.filterAttrs (name: _: (lib.elem name networkIps)) machinesFileSet; + filteredMachines = lib.filterAttrs ( + name: _: !(lib.elem name config.clan.static-hosts.excludeHosts) + ) machinesWithIp; + in + lib.filterAttrs (_: value: value != null) ( + lib.mapAttrs' ( + machine: _: + let + path = zerotierIpMachinePath machine; + in + if builtins.pathExists path then + lib.nameValuePair (builtins.readFile path) ( + if (config.clan.static-hosts.topLevelDomain == "") then + [ machine ] + else + [ "${machine}.${config.clan.static-hosts.topLevelDomain}" ] + ) + else + { } + ) filteredMachines + ); +} diff --git a/clanModules/sunshine/README.md b/clanModules/sunshine/README.md new file mode 100644 index 000000000..a561d77a1 --- /dev/null +++ b/clanModules/sunshine/README.md @@ -0,0 +1,5 @@ +--- +description = "A desktop streaming server optimized for remote gaming and synchronized movie viewing." +--- + +**Warning**: This module was written with our VM integration in mind likely won't work outside of this context. They will be generalized in future. diff --git a/clanModules/sunshine/default.nix b/clanModules/sunshine/default.nix new file mode 100644 index 000000000..e9d664330 --- /dev/null +++ b/clanModules/sunshine/default.nix @@ -0,0 +1,203 @@ +{ + pkgs, + config, + lib, + ... +}: +let + ms-accept = pkgs.callPackage ../../pkgs/moonlight-sunshine-accept { }; + sunshineConfiguration = pkgs.writeText "sunshine.conf" '' + address_family = both + channels = 5 + pkey = /var/lib/sunshine/sunshine.key + cert = /var/lib/sunshine/sunshine.cert + file_state = /var/lib/sunshine/state.json + credentials_file = /var/lib/sunshine/credentials.json + ''; + listenPort = 48011; +in +{ + warnings = [ + "The clan.sunshine module is deprecated and will be removed on 2025-07-15. Please migrate to user-maintained configuration." + ]; + + networking.firewall = { + allowedTCPPorts = [ + 47984 + 47989 + 47990 + 48010 + 48011 + ]; + + allowedUDPPorts = [ + 47998 + 47999 + 48000 + 48002 + 48010 + ]; + }; + + networking.firewall.allowedTCPPortRanges = [ + { + from = 47984; + to = 48010; + } + ]; + networking.firewall.allowedUDPPortRanges = [ + { + from = 47998; + to = 48010; + } + ]; + + environment.systemPackages = [ + ms-accept + pkgs.sunshine + pkgs.avahi + # Convenience script, until we find a better UX + (pkgs.writers.writeDashBin "sun" '' + ${pkgs.sunshine}/bin/sunshine -0 ${sunshineConfiguration} "$@" + '') + # Create a dummy account, for easier setup, + # don't use this account in actual production yet. + (pkgs.writers.writeDashBin "init-sun" '' + ${pkgs.sunshine}/bin/sunshine \ + --creds "sunshine" "sunshine" + '') + ]; + + # Required to simulate input + boot.kernelModules = [ "uinput" ]; + + services.udev.extraRules = '' + KERNEL=="uinput", SUBSYSTEM=="misc", OPTIONS+="static_node=uinput", TAG+="uaccess" + ''; + + security = { + rtkit.enable = true; + wrappers.sunshine = { + owner = "root"; + group = "root"; + capabilities = "cap_sys_admin+p"; + source = "${pkgs.sunshine}/bin/sunshine"; + }; + }; + + systemd.tmpfiles.rules = [ + "d '/var/lib/sunshine' 0770 'user' 'users' - -" + "C '/var/lib/sunshine/sunshine.cert' 0644 'user' 'users' - ${ + config.clan.core.vars.generators.sunshine.files."sunshine.cert".path or "" + }" + "C '/var/lib/sunshine/sunshine.key' 0644 'user' 'users' - ${ + config.clan.core.vars.generators.sunshine.files."sunshine.key".path or "" + }" + ]; + + hardware.graphics.enable = true; + + systemd.user.services.sunshine = { + enable = true; + description = "Sunshine self-hosted game stream host for Moonlight"; + startLimitBurst = 5; + startLimitIntervalSec = 500; + script = "/run/current-system/sw/bin/env /run/wrappers/bin/sunshine ${sunshineConfiguration}"; + serviceConfig = { + Restart = "on-failure"; + RestartSec = "5s"; + ReadWritePaths = [ "/var/lib/sunshine" ]; + ReadOnlyPaths = [ + (config.clan.core.vars.services.sunshine.files."sunshine.key".path or "") + (config.clan.core.vars.services.sunshine.files."sunshine.cert".path or "") + ]; + }; + wantedBy = [ "graphical-session.target" ]; + partOf = [ "graphical-session.target" ]; + wants = [ "graphical-session.target" ]; + after = [ + "sunshine-init-state.service" + "sunshine-init-credentials.service" + ]; + }; + + systemd.user.services.sunshine-init-state = { + enable = true; + description = "Sunshine self-hosted game stream host for Moonlight"; + startLimitBurst = 5; + startLimitIntervalSec = 500; + script = '' + ${ms-accept}/bin/moonlight-sunshine-accept sunshine init-state \ + --uuid ${config.clan.core.vars.generators.sunshine.files.sunshine-uuid.value} \ + --state-file /var/lib/sunshine/state.json + ''; + serviceConfig = { + Restart = "on-failure"; + RestartSec = "5s"; + Type = "oneshot"; + ReadWritePaths = [ "/var/lib/sunshine" ]; + }; + wantedBy = [ "graphical-session.target" ]; + }; + + systemd.user.services.sunshine-init-credentials = { + enable = true; + description = "Sunshine self-hosted game stream host for Moonlight"; + startLimitBurst = 5; + startLimitIntervalSec = 500; + script = '' + ${lib.getExe pkgs.sunshine} ${sunshineConfiguration} --creds sunshine sunshine + ''; + serviceConfig = { + Restart = "on-failure"; + RestartSec = "5s"; + Type = "oneshot"; + ReadWritePaths = [ "/var/lib/sunshine" ]; + }; + wantedBy = [ "graphical-session.target" ]; + }; + + systemd.user.services.sunshine-listener = { + enable = true; + description = "Sunshine self-hosted game stream host for Moonlight"; + startLimitBurst = 5; + startLimitIntervalSec = 500; + script = '' + ${ms-accept}/bin/moonlight-sunshine-accept sunshine listen --port ${builtins.toString listenPort} \ + --uuid ${config.clan.core.vars.generators.sunshine.files.sunshine-uuid.value} \ + --state /var/lib/sunshine/state.json --cert '${ + config.clan.core.vars.generators.sunshine.files."sunshine.cert".value + }' + ''; + serviceConfig = { + # ); + Restart = "on-failure"; + RestartSec = 5; + ReadWritePaths = [ "/var/lib/sunshine" ]; + }; + wantedBy = [ "graphical-session.target" ]; + }; + + clan.core.vars.generators.sunshine = { + # generator was named incorrectly in the past + migrateFact = "ergochat"; + + files."sunshine.key" = { }; + files."sunshine.cert" = { }; + files."sunshine-uuid".secret = false; + files."sunshine.cert".secret = false; + + runtimeInputs = [ + pkgs.coreutils + ms-accept + ]; + + script = '' + moonlight-sunshine-accept sunshine init + mv credentials/cakey.pem "$out"/sunshine.key + cp credentials/cacert.pem "$out"/sunshine.cert + mv credentials/cacert.pem "$out"/sunshine.cert + mv uuid "$out"/sunshine-uuid + ''; + }; +} diff --git a/clanModules/syncthing-static-peers/README.md b/clanModules/syncthing-static-peers/README.md new file mode 100644 index 000000000..63d78a1da --- /dev/null +++ b/clanModules/syncthing-static-peers/README.md @@ -0,0 +1,3 @@ +--- +description = "Statically configure syncthing peers through clan" +--- diff --git a/clanModules/syncthing-static-peers/default.nix b/clanModules/syncthing-static-peers/default.nix new file mode 100644 index 000000000..6a85b3900 --- /dev/null +++ b/clanModules/syncthing-static-peers/default.nix @@ -0,0 +1,110 @@ +{ + lib, + config, + pkgs, + ... +}: +let + dir = config.clan.core.settings.directory; + machineVarDir = "${dir}/vars/per-machine/"; + syncthingPublicKeyPath = machine: "${machineVarDir}/${machine}/syncthing/id/value"; + machinesFileSet = builtins.readDir machineVarDir; + machines = lib.mapAttrsToList (name: _: name) machinesFileSet; + syncthingPublicKeysUnchecked = builtins.map ( + machine: + let + fullPath = syncthingPublicKeyPath machine; + in + if builtins.pathExists fullPath then machine else null + ) machines; + syncthingPublicKeyMachines = lib.filter (machine: machine != null) syncthingPublicKeysUnchecked; + zerotierIpMachinePath = machine: "${machineVarDir}/${machine}/zerotier/zerotier-ip/value"; + networkIpsUnchecked = builtins.map ( + machine: + let + fullPath = zerotierIpMachinePath machine; + in + if builtins.pathExists fullPath then machine else null + ) machines; + networkIpMachines = lib.filter (machine: machine != null) networkIpsUnchecked; + devices = builtins.map (machine: { + name = machine; + value = { + name = machine; + id = (lib.removeSuffix "\n" (builtins.readFile (syncthingPublicKeyPath machine))); + addresses = + [ "dynamic" ] + ++ ( + if (lib.elem machine networkIpMachines) then + [ "tcp://[${(lib.removeSuffix "\n" (builtins.readFile (zerotierIpMachinePath machine)))}]:22000" ] + else + [ ] + ); + }; + }) syncthingPublicKeyMachines; +in +{ + options.clan.syncthing-static-peers = { + excludeMachines = lib.mkOption { + type = lib.types.listOf lib.types.str; + example = lib.literalExpression "[ config.clan.core.settings.machine.name ]"; + default = [ ]; + description = '' + Machines that should not be added. + ''; + }; + }; + + config.services.syncthing.settings.devices = (builtins.listToAttrs devices); + + imports = [ + { + # Syncthing ports: 8384 for remote access to GUI + # 22000 TCP and/or UDP for sync traffic + # 21027/UDP for discovery + # source: https://docs.syncthing.net/users/firewall.html + networking.firewall.interfaces."zt+".allowedTCPPorts = [ + 8384 + 22000 + ]; + networking.firewall.allowedTCPPorts = [ 8384 ]; + networking.firewall.interfaces."zt+".allowedUDPPorts = [ + 22000 + 21027 + ]; + + # Activates inotify compatibility on syncthing + # use mkOverride 900 here as it otherwise would collide with the default of the + # upstream nixos xserver.nix + boot.kernel.sysctl."fs.inotify.max_user_watches" = lib.mkOverride 900 524288; + + services.syncthing = { + enable = true; + configDir = "/var/lib/syncthing"; + group = "syncthing"; + + key = lib.mkDefault config.clan.core.vars.generators.syncthing.files.key.path or null; + cert = lib.mkDefault config.clan.core.vars.generators.syncthing.files.cert.path or null; + }; + + clan.core.vars.generators.syncthing = { + files.key = { }; + files.cert = { }; + files.api = { }; + files.id.secret = false; + runtimeInputs = [ + pkgs.coreutils + pkgs.gnugrep + pkgs.syncthing + ]; + script = '' + syncthing generate --config "$out" + mv "$out"/key.pem "$out"/key + mv "$out"/cert.pem "$out"/cert + cat "$out"/config.xml | grep -oP '(?<= "$out"/id + cat "$out"/config.xml | grep -oP '\K[^<]+' | uniq > "$out"/api + ''; + }; + } + ]; +} diff --git a/clanModules/syncthing/README.md b/clanModules/syncthing/README.md new file mode 100644 index 000000000..b3ef7c465 --- /dev/null +++ b/clanModules/syncthing/README.md @@ -0,0 +1,40 @@ +--- +description = "A secure, file synchronization app for devices over networks, offering a private alternative to cloud services." +features = [ "inventory" ] + +[constraints] +roles.introducer.min = 1 +roles.introducer.max = 1 +--- +**Warning**: This module was written with our VM integration in mind likely won't work outside of this context. They will be generalized in future. + +## Usage + +We recommend configuring this module as an sync-service through the provided options. Although it provides a Web GUI through which more usage scenarios are supported. + +## Features + +- **Private and Secure**: Syncthing uses TLS encryption to secure data transfer between devices, ensuring that only the intended devices can read your data. +- **Decentralized**: No central server is involved in the data transfer. Each device communicates directly with others. +- **Open Source**: The source code is openly available for audit and contribution, fostering trust and continuous improvement. +- **Cross-Platform**: Syncthing supports multiple platforms including Windows, macOS, Linux, BSD, and Android. +- **Real-time Synchronization**: Changes made to files are synchronized in real-time across all connected devices. +- **Web GUI**: It includes a user-friendly web interface for managing devices and configurations. (`127.0.0.1:8384`) + +## Configuration + +- **Share Folders**: Select folders to share with connected devices and configure permissions and synchronization parameters. + +!!! info + Clan automatically discovers other devices. Automatic discovery requires one machine to be an [introducer](#clan.syncthing.introducer) + + If that is not the case you can add the other device by its Device ID manually. + You can find and share Device IDs under the "Add Device" button in the Web GUI. (`127.0.0.1:8384`) + +## Troubleshooting + +- **Sync Conflicts**: Resolve synchronization conflicts manually by reviewing file versions and modification times in the Web GUI (`127.0.0.1:8384`). + +## Support + +- **Documentation**: Extensive documentation is available on the [Syncthing website](https://docs.syncthing.net/). diff --git a/clanModules/syncthing/default.nix b/clanModules/syncthing/default.nix new file mode 100644 index 000000000..6cf92de8c --- /dev/null +++ b/clanModules/syncthing/default.nix @@ -0,0 +1,6 @@ +# Dont import this file +# It is only here for backwards compatibility. +# Dont author new modules with this file. +{ + imports = [ ./roles/peer.nix ]; +} diff --git a/clanModules/syncthing/roles/introducer.nix b/clanModules/syncthing/roles/introducer.nix new file mode 100644 index 000000000..bdd61dbe9 --- /dev/null +++ b/clanModules/syncthing/roles/introducer.nix @@ -0,0 +1,6 @@ +{ ... }: +{ + imports = [ + ../shared.nix + ]; +} diff --git a/clanModules/syncthing/roles/peer.nix b/clanModules/syncthing/roles/peer.nix new file mode 100644 index 000000000..d8cf3448c --- /dev/null +++ b/clanModules/syncthing/roles/peer.nix @@ -0,0 +1,21 @@ +{ config, lib, ... }: +let + instanceNames = builtins.attrNames config.clan.inventory.services.syncthing; + instanceName = builtins.head instanceNames; + instance = config.clan.inventory.services.syncthing.${instanceName}; + introducer = builtins.head instance.roles.introducer.machines; + + introducerId = "${config.clan.core.settings.directory}/vars/per-machine/${introducer}/syncthing/id/value"; +in +{ + imports = [ + ../shared.nix + ]; + + clan.syncthing.introducer = lib.strings.removeSuffix "\n" ( + if builtins.pathExists introducerId then + builtins.readFile introducerId + else + throw "${introducerId} does not exists. Please run `clan vars generate ${introducer}` to generate the introducer device id" + ); +} diff --git a/clanModules/syncthing/shared.nix b/clanModules/syncthing/shared.nix new file mode 100644 index 000000000..05c28cd9a --- /dev/null +++ b/clanModules/syncthing/shared.nix @@ -0,0 +1,214 @@ +{ + config, + pkgs, + lib, + ... +}: +{ + options.clan.syncthing = { + id = lib.mkOption { + description = '' + The ID of the machine. + It is generated automatically by default. + ''; + type = lib.types.nullOr lib.types.str; + example = "BABNJY4-G2ICDLF-QQEG7DD-N3OBNGF-BCCOFK6-MV3K7QJ-2WUZHXS-7DTW4AS"; + default = config.clan.core.vars.generators.syncthing.files."id".value; + defaultText = "config.clan.core.vars.generators.syncthing.files.\"id\".value"; + }; + introducer = lib.mkOption { + description = '' + The introducer for the machine. + ''; + type = lib.types.nullOr lib.types.str; + default = null; + }; + autoAcceptDevices = lib.mkOption { + description = '' + Auto accept incoming device requests. + Should only be used on the introducer. + ''; + type = lib.types.bool; + default = false; + }; + autoShares = lib.mkOption { + description = '' + Auto share the following Folders by their ID's with introduced devices. + Should only be used on the introducer. + ''; + type = lib.types.listOf lib.types.str; + default = [ ]; + example = [ + "folder1" + "folder2" + ]; + }; + }; + + imports = [ + { + # Syncthing ports: 8384 for remote access to GUI + # 22000 TCP and/or UDP for sync traffic + # 21027/UDP for discovery + # source: https://docs.syncthing.net/users/firewall.html + networking.firewall.interfaces."zt+".allowedTCPPorts = [ + 8384 + 22000 + ]; + networking.firewall.allowedTCPPorts = [ 8384 ]; + networking.firewall.interfaces."zt+".allowedUDPPorts = [ + 22000 + 21027 + ]; + + assertions = [ + { + assertion = lib.all ( + attr: builtins.hasAttr attr config.services.syncthing.settings.folders + ) config.clan.syncthing.autoShares; + message = '' + Syncthing: If you want to AutoShare a folder, you need to have it configured on the sharing device. + ''; + } + ]; + + # Activates inotify compatibility on syncthing + # use mkOverride 900 here as it otherwise would collide with the default of the + # upstream nixos xserver.nix + boot.kernel.sysctl."fs.inotify.max_user_watches" = lib.mkOverride 900 524288; + + services.syncthing = { + enable = true; + + overrideFolders = lib.mkDefault ( + if (config.clan.syncthing.introducer == null) then true else false + ); + overrideDevices = lib.mkDefault ( + if (config.clan.syncthing.introducer == null) then true else false + ); + + key = lib.mkDefault config.clan.core.vars.generators.syncthing.files."key".path or null; + cert = lib.mkDefault config.clan.core.vars.generators.syncthing.files."cert".path or null; + + settings = { + options = { + urAccepted = -1; + allowedNetworks = [ ]; + }; + devices = + { } + // ( + if (config.clan.syncthing.introducer == null) then + { } + else + { + "${config.clan.syncthing.introducer}" = { + name = "introducer"; + id = config.clan.syncthing.introducer; + introducer = true; + autoAcceptFolders = true; + }; + } + ); + }; + }; + systemd.services.syncthing-auto-accept = + let + baseAddress = "127.0.0.1:8384"; + getPendingDevices = "/rest/cluster/pending/devices"; + postNewDevice = "/rest/config/devices"; + SharedFolderById = "/rest/config/folders/"; + apiKey = config.clan.core.vars.generators.syncthing.files."apikey".path; + in + lib.mkIf config.clan.syncthing.autoAcceptDevices { + description = "Syncthing auto accept devices"; + requisite = [ "syncthing.service" ]; + after = [ "syncthing.service" ]; + wantedBy = [ "multi-user.target" ]; + + script = '' + set -x + # query pending deviceID's + APIKEY=$(cat ${apiKey}) + PENDING=$(${lib.getExe pkgs.curl} -X GET -H "X-API-Key: $APIKEY" ${baseAddress}${getPendingDevices}) + PENDING=$(echo $PENDING | ${lib.getExe pkgs.jq} keys[]) + + # accept pending deviceID's + for ID in $PENDING;do + ${lib.getExe pkgs.curl} -X POST -d "{\"deviceId\": $ID}" -H "Content-Type: application/json" -H "X-API-Key: $APIKEY" ${baseAddress}${postNewDevice} + + # get all shared folders by their ID + for folder in ${builtins.toString config.clan.syncthing.autoShares}; do + SHARED_IDS=$(${lib.getExe pkgs.curl} -X GET -H "X-API-Key: $APIKEY" ${baseAddress}${SharedFolderById}"$folder" | ${lib.getExe pkgs.jq} ."devices") + PATCHED_IDS=$(echo $SHARED_IDS | ${lib.getExe pkgs.jq} ".+= [{\"deviceID\": $ID, \"introducedBy\": \"\", \"encryptionPassword\": \"\"}]") + ${lib.getExe pkgs.curl} -X PATCH -d "{\"devices\": $PATCHED_IDS}" -H "X-API-Key: $APIKEY" ${baseAddress}${SharedFolderById}"$folder" + done + done + ''; + }; + + systemd.timers.syncthing-auto-accept = lib.mkIf config.clan.syncthing.autoAcceptDevices { + description = "Syncthing Auto Accept"; + + wantedBy = [ "syncthing-auto-accept.service" ]; + + timerConfig = { + OnActiveSec = lib.mkDefault 60; + OnUnitActiveSec = lib.mkDefault 60; + }; + }; + + systemd.services.syncthing-init-api-key = + let + apiKey = config.clan.core.vars.generators.syncthing.files."apikey".path; + in + lib.mkIf config.clan.syncthing.autoAcceptDevices { + description = "Set the api key"; + after = [ "syncthing-init.service" ]; + wantedBy = [ "multi-user.target" ]; + script = '' + # set -x + set -efu pipefail + + APIKEY=$(cat ${apiKey}) + ${lib.getExe pkgs.gnused} -i "s/.*<\/apikey>/$APIKEY<\/apikey>/" ${config.services.syncthing.configDir}/config.xml + # sudo systemctl restart syncthing.service + systemctl restart syncthing.service + ''; + serviceConfig = { + BindReadOnlyPaths = [ apiKey ]; + Type = "oneshot"; + }; + }; + + clan.core.vars.generators.syncthing = { + migrateFact = "syncthing"; + + files."key".group = config.services.syncthing.group; + files."key".owner = config.services.syncthing.user; + + files."cert".group = config.services.syncthing.group; + files."cert".owner = config.services.syncthing.user; + + files."apikey".group = config.services.syncthing.group; + files."apikey".owner = config.services.syncthing.user; + + files."id".secret = false; + + runtimeInputs = [ + pkgs.coreutils + pkgs.gnugrep + pkgs.syncthing + ]; + + script = '' + syncthing generate --config "$out" + mv "$out"/key.pem "$out"/key + mv "$out"/cert.pem "$out"/cert + cat "$out"/config.xml | grep -oP '(?<= "$out"/id + cat "$out"/config.xml | grep -oP '\K[^<]+' | uniq > "$out"/apikey + ''; + }; + } + ]; +} diff --git a/clanModules/thelounge/README.md b/clanModules/thelounge/README.md new file mode 100644 index 000000000..fb929bcf0 --- /dev/null +++ b/clanModules/thelounge/README.md @@ -0,0 +1,3 @@ +--- +description = "Modern web IRC client" +--- diff --git a/clanModules/thelounge/default.nix b/clanModules/thelounge/default.nix new file mode 100644 index 000000000..024328082 --- /dev/null +++ b/clanModules/thelounge/default.nix @@ -0,0 +1,19 @@ +_: { + warnings = [ + "The clan.thelounge module is deprecated and will be removed on 2025-07-15. Please migrate to user-maintained configuration." + ]; + + services.thelounge = { + enable = true; + public = true; + extraConfig = { + prefetch = true; + defaults = { + port = 6667; + tls = false; + }; + }; + }; + + clan.core.state.thelounde.folders = [ "/var/lib/thelounge" ]; +} diff --git a/clanModules/trusted-nix-caches/README.md b/clanModules/trusted-nix-caches/README.md new file mode 100644 index 000000000..aebad59d8 --- /dev/null +++ b/clanModules/trusted-nix-caches/README.md @@ -0,0 +1,5 @@ +--- +description = "This module sets the `clan.lol` and `nix-community` cache up as a trusted cache." +categories = ["System", "Network"] +features = [ "deprecated" ] +--- diff --git a/clanModules/trusted-nix-caches/default.nix b/clanModules/trusted-nix-caches/default.nix new file mode 100644 index 000000000..03dfd1ae1 --- /dev/null +++ b/clanModules/trusted-nix-caches/default.nix @@ -0,0 +1,10 @@ +{ + nix.settings.trusted-substituters = [ + "https://cache.clan.lol" + "https://nix-community.cachix.org" + ]; + nix.settings.trusted-public-keys = [ + "nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs=" + "cache.clan.lol-1:3KztgSAB5R1M+Dz7vzkBGzXdodizbgLXGXKXlcQLA28=" + ]; +} diff --git a/clanModules/user-password/README.md b/clanModules/user-password/README.md new file mode 100644 index 000000000..049e1a780 --- /dev/null +++ b/clanModules/user-password/README.md @@ -0,0 +1,24 @@ +--- +description = "Automatically generates and configures a password for the specified user account." +categories = ["System"] +features = ["inventory", "deprecated"] +--- + +If setting the option prompt to true, the user will be prompted to type in their desired password. + +!!! Note + This module will set `mutableUsers` to `false`, meaning you can not manage user passwords through `passwd` anymore. + + +After the system was installed/deployed the following command can be used to display the user-password: + +```bash +clan vars get [machine_name] root-password/root-password +``` + +See also: [Vars](../../guides/vars-backend.md) + +To regenerate the password run: +``` +clan vars generate --regenerate [machine_name] --generator user-password +``` diff --git a/clanModules/user-password/default.nix b/clanModules/user-password/default.nix new file mode 100644 index 000000000..ed6af3368 --- /dev/null +++ b/clanModules/user-password/default.nix @@ -0,0 +1,6 @@ +# Dont import this file +# It is only here for backwards compatibility. +# Dont author new modules with this file. +{ + imports = [ ./roles/default.nix ]; +} diff --git a/clanModules/user-password/roles/default.nix b/clanModules/user-password/roles/default.nix new file mode 100644 index 000000000..e0b20c8ce --- /dev/null +++ b/clanModules/user-password/roles/default.nix @@ -0,0 +1,68 @@ +{ + pkgs, + config, + lib, + ... +}: +let + cfg = config.clan.user-password; +in +{ + options.clan.user-password = { + user = lib.mkOption { + type = lib.types.str; + example = "alice"; + description = "The user the password should be generated for."; + }; + prompt = lib.mkOption { + type = lib.types.bool; + default = true; + example = false; + description = '' + Whether the user should be prompted + If disabled, will autogenerate the password without prompting. + ''; + }; + }; + + config = { + + warnings = [ + "The clan.user-password module is deprecated and will be removed on 2025-07-15. + Please migrate to user-maintained configuration or the new equivalent clan services + (https://docs.clan.lol/reference/clanServices)." + ]; + + users.mutableUsers = false; + users.users.${cfg.user} = { + hashedPasswordFile = config.clan.core.vars.generators.user-password.files.user-password-hash.path; + isNormalUser = lib.mkDefault true; + }; + + clan.core.vars.generators.user-password = { + files.user-password-hash.neededFor = "users"; + files.user-password-hash.restartUnits = lib.optional (config.services.userborn.enable) "userborn.service"; + + prompts.user-password.type = "hidden"; + prompts.user-password.persist = true; + prompts.user-password.description = "You can autogenerate a password, if you leave this prompt blank."; + files.user-password.deploy = false; + + migrateFact = "user-password"; + runtimeInputs = [ + pkgs.coreutils + pkgs.xkcdpass + pkgs.mkpasswd + ]; + script = '' + prompt_value=$(cat "$prompts"/user-password) + if [[ -n "''${prompt_value-}" ]]; then + echo "$prompt_value" | tr -d "\n" > "$out"/user-password + else + xkcdpass --numwords 4 --delimiter - --count 1 | tr -d "\n" > "$out"/user-password + fi + mkpasswd -s -m sha-512 < "$out"/user-password | tr -d "\n" > "$out"/user-password-hash + ''; + }; + }; +} diff --git a/clanModules/vaultwarden/README.md b/clanModules/vaultwarden/README.md new file mode 100644 index 000000000..f13759616 --- /dev/null +++ b/clanModules/vaultwarden/README.md @@ -0,0 +1,15 @@ +--- +description = "The server for the centralized password manager bitwarden" +--- + +After enabling the clan module, user accounts have to be created manually in the webinterface. +This is done by visiting `vaultwarden.example.com/admin` and typing in the admin password. +You can get the admin password for vaultwarden by executing: +```bash +clan vars get vaultwarden-admin/vaultwarden-admin +``` +To see all secrets tied to vaultwarden execute: +```bash +clan vars get vaultwarden-admin/vaultwarden-admin +clan vars get vaultwarden-smtp/vaultwarden-smtp +``` diff --git a/clanModules/vaultwarden/default.nix b/clanModules/vaultwarden/default.nix new file mode 100644 index 000000000..e9eda304c --- /dev/null +++ b/clanModules/vaultwarden/default.nix @@ -0,0 +1,161 @@ +{ + config, + lib, + pkgs, + ... +}: +let + cfg = config.clan.vaultwarden; +in + +{ + imports = [ + ../postgresql + (lib.mkRemovedOptionModule [ + "clan" + "vaultwarden" + "enable" + ] "Importing the module will already enable the service.") + ../nginx + ]; + + options.clan.vaultwarden = { + domain = lib.mkOption { + type = lib.types.str; + example = "bitwarden.example.com"; + description = "The domain to use for Vaultwarden"; + }; + port = lib.mkOption { + type = lib.types.int; + default = 3011; + description = "The port to use for Vaultwarden"; + }; + allow_signups = lib.mkOption { + type = lib.types.bool; + default = false; + description = "Allow signups for new users"; + }; + + smtp = { + host = lib.mkOption { + type = lib.types.str; + example = "smtp.example.com"; + description = "The email server domain address"; + }; + from = lib.mkOption { + type = lib.types.str; + example = "foobar@example.com"; + description = "From whom the email is coming from"; + }; + username = lib.mkOption { + type = lib.types.str; + example = "foobar@example.com"; + description = "The email server username"; + }; + }; + }; + + config = { + + clan.postgresql.users.vaultwarden = { }; + clan.postgresql.databases.vaultwarden.create.options = { + TEMPLATE = "template0"; + LC_COLLATE = "C"; + LC_CTYPE = "C"; + ENCODING = "UTF8"; + OWNER = "vaultwarden"; + }; + clan.postgresql.databases.vaultwarden.restore.stopOnRestore = [ "vaultwarden" ]; + + services.nginx = { + enable = true; + virtualHosts = { + "${cfg.domain}" = { + forceSSL = true; + enableACME = true; + extraConfig = '' + client_max_body_size 128M; + ''; + locations."/" = { + proxyPass = "http://localhost:${builtins.toString cfg.port}"; + proxyWebsockets = true; + }; + locations."/notifications/hub" = { + proxyPass = "http://localhost:${builtins.toString cfg.port}"; + proxyWebsockets = true; + }; + locations."/notifications/hub/negotiate" = { + proxyPass = "http://localhost:${builtins.toString cfg.port}"; + proxyWebsockets = true; + }; + }; + }; + }; + + clan.core.vars.generators = { + vaultwarden-admin = { + migrateFact = "vaultwarden-admin"; + files."vaultwarden-admin" = { }; + files."vaultwarden-admin-hash" = { }; + runtimeInputs = with pkgs; [ + coreutils + pwgen + libargon2 + openssl + ]; + script = '' + ADMIN_PWD=$(pwgen 16 -n1 | tr -d "\n") + ADMIN_HASH=$(echo -n "$ADMIN_PWD" | argon2 "$(openssl rand -base64 32)" -e -id -k 65540 -t 3 -p 4) + + config=" + ADMIN_TOKEN=\"$ADMIN_HASH\" + " + echo -n "$ADMIN_PWD" > "$out"/vaultwarden-admin + echo -n "$config" > "$out"/vaultwarden-admin-hash + ''; + }; + vaultwarden-smtp = { + migrateFact = "vaultwarden-smtp"; + prompts."vaultwarden-smtp".description = "${cfg.smtp.from} SMTP password"; + prompts."vaultwarden-smtp".persist = true; + runtimeInputs = with pkgs; [ coreutils ]; + script = '' + prompt_value="$(cat "$prompts"/vaultwarden-smtp)" + config=" + SMTP_PASSWORD=\"$prompt_value\" + " + echo -n "$config" > "$out"/vaultwarden-smtp + ''; + }; + }; + + systemd.services."vaultwarden" = { + serviceConfig = { + EnvironmentFile = [ + config.clan.core.vars.generators."vaultwarden-smtp".files."vaultwarden-smtp".path + ]; + }; + }; + + services.vaultwarden = { + enable = true; + dbBackend = "postgresql"; + environmentFile = + config.clan.core.vars.generators."vaultwarden-admin".files."vaultwarden-admin-hash".path; # TODO: Make this upstream an array + config = { + SMTP_SECURITY = "force_tls"; + SMTP_HOST = cfg.smtp.host; + SMTP_FROM = cfg.smtp.from; + SMTP_USERNAME = cfg.smtp.username; + DOMAIN = "https://${cfg.domain}"; + SIGNUPS_ALLOWED = cfg.allow_signups; + ROCKET_PORT = builtins.toString cfg.port; + DATABASE_URL = "postgresql://"; # TODO: This should be set upstream if dbBackend is set to postgresql + ENABLE_WEBSOCKET = true; + ROCKET_ADDRESS = "127.0.0.1"; + }; + }; + + }; + +} diff --git a/clanModules/xfce/README.md b/clanModules/xfce/README.md new file mode 100644 index 000000000..38ae6e46a --- /dev/null +++ b/clanModules/xfce/README.md @@ -0,0 +1,3 @@ +--- +description = "A lightweight desktop manager" +--- diff --git a/clanModules/xfce/default.nix b/clanModules/xfce/default.nix new file mode 100644 index 000000000..d5413b25d --- /dev/null +++ b/clanModules/xfce/default.nix @@ -0,0 +1,11 @@ +{ + warnings = [ + "The clan.xfce module is deprecated and will be removed on 2025-07-15. Please migrate to user-maintained configuration." + ]; + + services.xserver = { + enable = true; + desktopManager.xfce.enable = true; + layout = "us"; + }; +} diff --git a/clanModules/zerotier-static-peers/README.md b/clanModules/zerotier-static-peers/README.md new file mode 100644 index 000000000..a54301531 --- /dev/null +++ b/clanModules/zerotier-static-peers/README.md @@ -0,0 +1,6 @@ +--- +description = "Statically configure the `zerotier` peers of a clan network." +--- +Statically configure the `zerotier` peers of a clan network. + +Requires a machine, that is the zerotier controller configured in the network. diff --git a/clanModules/zerotier-static-peers/default.nix b/clanModules/zerotier-static-peers/default.nix new file mode 100644 index 000000000..3000dc501 --- /dev/null +++ b/clanModules/zerotier-static-peers/default.nix @@ -0,0 +1,81 @@ +{ + lib, + config, + pkgs, + ... +}: +let + dir = config.clan.core.settings.directory; + machineDir = "${dir}/vars/per-machine"; + # TODO: This should use the inventory + # However we are probably going to replace this with the network module. + machinesFileSet = builtins.readDir machineDir; + machines = lib.mapAttrsToList (name: _: name) machinesFileSet; + + networkIdsUnchecked = builtins.map ( + machine: + let + fullPath = "${machineDir}/vars/per-machine/${machine}/zerotier/zerotier-network-id/value"; + in + if builtins.pathExists fullPath then builtins.readFile fullPath else null + ) machines; + networkIds = lib.filter (machine: machine != null) networkIdsUnchecked; + networkId = if builtins.length networkIds == 0 then null else builtins.elemAt networkIds 0; +in +#TODO:trace on multiple found network-ids +#TODO:trace on no single found networkId +{ + options.clan.zerotier-static-peers = { + excludeHosts = lib.mkOption { + type = lib.types.listOf lib.types.str; + default = [ config.clan.core.settings.machine.name ]; + defaultText = lib.literalExpression "[ config.clan.core.settings.machine.name ]"; + description = "Hosts that should be excluded"; + }; + networkIps = lib.mkOption { + type = lib.types.listOf lib.types.str; + default = [ ]; + description = "Extra zerotier network Ips that should be accepted"; + }; + networkIds = lib.mkOption { + type = lib.types.listOf lib.types.str; + default = [ ]; + description = "Extra zerotier network Ids that should be accepted"; + }; + }; + + config.systemd.services.zerotier-static-peers-autoaccept = + let + zerotierIpFor = machine: "${machineDir}/vars/per-machine/${machine}/zerotier/zerotier-ip/value"; + networkIpsUnchecked = builtins.map ( + machine: if builtins.pathExists (zerotierIpFor machine) then machine else null + ) machines; + networkIps = lib.filter (machine: machine != null) networkIpsUnchecked; + machinesWithIp = lib.filterAttrs (name: _: (lib.elem name networkIps)) machinesFileSet; + filteredMachines = lib.filterAttrs ( + name: _: !(lib.elem name config.clan.zerotier-static-peers.excludeHosts) + ) machinesWithIp; + hosts = lib.mapAttrsToList (host: _: host) ( + lib.mapAttrs' ( + machine: _: lib.nameValuePair (builtins.readFile (zerotierIpFor machine)) [ machine ] + ) filteredMachines + ); + allHostIPs = config.clan.zerotier-static-peers.networkIps ++ hosts; + in + lib.mkIf (config.clan.core.networking.zerotier.controller.enable) { + wantedBy = [ "multi-user.target" ]; + after = [ "zerotierone.service" ]; + path = [ config.clan.core.clanPkgs.zerotierone ]; + serviceConfig.ExecStart = pkgs.writeScript "static-zerotier-peers-autoaccept" '' + #!/bin/sh + ${lib.concatMapStringsSep "\n" (host: '' + ${config.clan.core.clanPkgs.zerotier-members}/bin/zerotier-members allow --member-ip ${host} + '') allHostIPs} + ${lib.concatMapStringsSep "\n" (host: '' + ${config.clan.core.clanPkgs.zerotier-members}/bin/zerotier-members allow ${host} + '') config.clan.zerotier-static-peers.networkIds} + ''; + }; + + config.clan.core.networking.zerotier.networkId = lib.mkDefault networkId; +} diff --git a/clanModules/zerotier/README.md b/clanModules/zerotier/README.md new file mode 100644 index 000000000..373de10ba --- /dev/null +++ b/clanModules/zerotier/README.md @@ -0,0 +1,44 @@ +--- +description = "Configures [Zerotier VPN](https://zerotier.com) secure and efficient networking within a Clan." +features = [ "inventory" ] +categories = [ "Network", "System" ] + +[constraints] +roles.controller.min = 1 +roles.controller.max = 1 +roles.moon.max = 7 +--- + +## Overview + +This guide explains how to set up and manage a [ZeroTier VPN](https://zerotier.com) for a clan network. Each VPN requires a single controller and can support multiple peers and optional moons for better connectivity. + +## Roles + +### 1. Controller + +The [Controller](https://docs.zerotier.com/controller/) manages network membership and is responsible for admitting new peers. +When a new node is added to the clan, the controller must be updated to ensure it has the latest member list. + +- **Key Points:** + - Must be online to admit new machines to the VPN. + - Existing nodes can continue to communicate even when the controller is offline. + +### 2. Moons + +[Moons](https://docs.zerotier.com/roots) act as relay nodes, +providing direct connectivity to peers via their public IP addresses. +They enable devices that are not publicly reachable to join the VPN by routing through these nodes. + +- **Configuration Notes:** + - Each moon must define its public IP address. + - Ensures connectivity for devices behind NAT or restrictive firewalls. + +### 3. Peers + +Peers are standard nodes in the VPN. +They connect to other peers, moons, and the controller as needed. + +- **Purpose:** + - General role for all machines that are neither controllers nor moons. + - Ideal for most clan members' devices. diff --git a/clanModules/zerotier/roles/controller.nix b/clanModules/zerotier/roles/controller.nix new file mode 100644 index 000000000..f648e45cd --- /dev/null +++ b/clanModules/zerotier/roles/controller.nix @@ -0,0 +1,57 @@ +{ + config, + lib, + pkgs, + ... +}: +let + instanceNames = builtins.attrNames config.clan.inventory.services.zerotier; + instanceName = builtins.head instanceNames; + zeroTierInstance = config.clan.inventory.services.zerotier.${instanceName}; + roles = zeroTierInstance.roles; + # TODO(@mic92): This should be upstreamed to nixpkgs + uniqueStrings = list: builtins.attrNames (builtins.groupBy lib.id list); +in +{ + imports = [ + ../shared.nix + ]; + config = { + + warnings = [ + "The clan.zerotier module is deprecated and will be removed on 2025-07-15. + Please migrate to user-maintained configuration or the new equivalent clan services + (https://docs.clan.lol/reference/clanServices)." + ]; + + systemd.services.zerotier-inventory-autoaccept = + let + machines = uniqueStrings (roles.moon.machines ++ roles.controller.machines ++ roles.peer.machines); + networkIps = builtins.foldl' ( + ips: name: + if + builtins.pathExists "${config.clan.core.settings.directory}/vars/per-machine/${name}/zerotier/zerotier-ip/value" + then + ips + ++ [ + (builtins.readFile "${config.clan.core.settings.directory}/vars/per-machine/${name}/zerotier/zerotier-ip/value") + ] + else + ips + ) [ ] machines; + allHostIPs = config.clan.zerotier.networkIps ++ networkIps; + in + { + wantedBy = [ "multi-user.target" ]; + after = [ "zerotierone.service" ]; + path = [ config.clan.core.clanPkgs.zerotierone ]; + serviceConfig.ExecStart = pkgs.writeShellScript "zerotier-inventory-autoaccept" '' + ${lib.concatMapStringsSep "\n" (host: '' + ${config.clan.core.clanPkgs.zerotier-members}/bin/zerotier-members allow --member-ip ${host} + '') allHostIPs} + ''; + }; + + clan.core.networking.zerotier.controller.enable = lib.mkDefault true; + }; +} diff --git a/clanModules/zerotier/roles/moon.nix b/clanModules/zerotier/roles/moon.nix new file mode 100644 index 000000000..3d70b01d8 --- /dev/null +++ b/clanModules/zerotier/roles/moon.nix @@ -0,0 +1,20 @@ +{ config, lib, ... }: +{ + imports = [ + ../shared.nix + ]; + options.clan.zerotier.moon.stableEndpoints = lib.mkOption { + type = lib.types.listOf lib.types.str; + description = '' + Make this machine a moon. + Other machines can join this moon by adding this moon in their config. + It will be reachable under the given stable endpoints. + ''; + example = '' + [ 1.2.3.4" "10.0.0.3/9993" "2001:abcd:abcd::3/9993" ] + ''; + }; + # TODO, we want to remove these options from clanCore + config.clan.core.networking.zerotier.moon.stableEndpoints = + config.clan.zerotier.moon.stableEndpoints; +} diff --git a/clanModules/zerotier/roles/peer.nix b/clanModules/zerotier/roles/peer.nix new file mode 100644 index 000000000..a56780406 --- /dev/null +++ b/clanModules/zerotier/roles/peer.nix @@ -0,0 +1,5 @@ +{ + imports = [ + ../shared.nix + ]; +} diff --git a/clanModules/zerotier/shared.nix b/clanModules/zerotier/shared.nix new file mode 100644 index 000000000..f1504b8f5 --- /dev/null +++ b/clanModules/zerotier/shared.nix @@ -0,0 +1,106 @@ +{ + lib, + config, + pkgs, + ... +}: +let + instanceNames = builtins.attrNames config.clan.inventory.services.zerotier; + instanceName = builtins.head instanceNames; + zeroTierInstance = config.clan.inventory.services.zerotier.${instanceName}; + roles = zeroTierInstance.roles; + controllerMachine = builtins.head roles.controller.machines; + networkIdPath = "${config.clan.core.settings.directory}/vars/per-machine/${controllerMachine}/zerotier/zerotier-network-id/value"; + networkId = + if builtins.pathExists networkIdPath then + builtins.readFile networkIdPath + else + builtins.throw '' + No zerotier network id found for ${controllerMachine}. + Please run `clan vars generate ${controllerMachine}` first. + ''; + moons = roles.moon.machines; + moonIps = builtins.foldl' ( + ips: name: + if + builtins.pathExists "${config.clan.core.settings.directory}/vars/per-machine/${name}/zerotier/zerotier-ip/value" + then + ips + ++ [ + (builtins.readFile "${config.clan.core.settings.directory}/vars/per-machine/${name}/zerotier/zerotier-ip/value") + ] + else + ips + ) [ ] moons; +in +{ + options.clan.zerotier = + let + inherit (lib.types) listOf str; + in + { + excludeHosts = lib.mkOption { + type = listOf str; + default = [ config.clan.core.settings.machine.name ]; + defaultText = lib.literalExpression "[ config.clan.core.settings.machine.name ]"; + description = "Hosts that should be excluded"; + }; + networkIps = lib.mkOption { + type = listOf str; + default = [ ]; + description = "Extra zerotier network Ips that should be accepted"; + }; + networkIds = lib.mkOption { + type = listOf str; + default = [ ]; + description = "Extra zerotier network Ids that should be accepted"; + }; + }; + + config = { + assertions = [ + # TODO: This should also be checked via frontmatter constraints + { + assertion = builtins.length instanceNames == 1; + message = "The zerotier module currently only supports one instance per machine, but found ${builtins.toString instanceNames} on machine ${config.clan.core.settings.machine.name}"; + } + ]; + + clan.core.networking.zerotier.networkId = networkId; + clan.core.networking.zerotier.name = instanceName; + + # TODO: in future we want to have the node id of our moons in our vars + systemd.services.zerotierone.serviceConfig.ExecStartPost = lib.mkIf (moonIps != [ ]) ( + lib.mkAfter [ + "+${pkgs.writeScript "orbit-moons-by-ip" '' + #!${pkgs.python3.interpreter} + import json + import ipaddress + import subprocess + + def compute_member_id(ipv6_addr: str) -> str: + addr = ipaddress.IPv6Address(ipv6_addr) + addr_bytes = bytearray(addr.packed) + + # Extract the bytes corresponding to the member_id (node_id) + node_id_bytes = addr_bytes[10:16] + node_id = int.from_bytes(node_id_bytes, byteorder="big") + + member_id = format(node_id, "x").zfill(10)[-10:] + + return member_id + def main() -> None: + ips = json.loads(${builtins.toJSON (builtins.toJSON moonIps)}) + for ip in ips: + member_id = compute_member_id(ip) + res = subprocess.run(["zerotier-cli", "orbit", member_id, member_id]) + if res.returncode != 0: + print(f"Failed to add {member_id} to orbit") + if __name__ == "__main__": + main() + ''}" + ] + ); + + }; +} diff --git a/clanModules/zt-tcp-relay/README.md b/clanModules/zt-tcp-relay/README.md new file mode 100644 index 000000000..ca42f5625 --- /dev/null +++ b/clanModules/zt-tcp-relay/README.md @@ -0,0 +1,3 @@ +--- +description = "Enable ZeroTier VPN over TCP for networks where UDP is blocked." +--- diff --git a/clanModules/zt-tcp-relay/default.nix b/clanModules/zt-tcp-relay/default.nix new file mode 100644 index 000000000..cfe2bf610 --- /dev/null +++ b/clanModules/zt-tcp-relay/default.nix @@ -0,0 +1,35 @@ +{ + pkgs, + lib, + config, + ... +}: +{ + options.clan.zt-tcp-relay = { + port = lib.mkOption { + type = lib.types.port; + default = 4443; + description = "Port to listen on"; + }; + }; + config = { + warnings = [ + "The clan.zt-tcp-relay module is deprecated and will be removed on 2025-07-15. Please migrate to user-maintained configuration." + ]; + + networking.firewall.allowedTCPPorts = [ config.clan.zt-tcp-relay.port ]; + + systemd.services.zt-tcp-relay = { + wantedBy = [ "multi-user.target" ]; + after = [ "network.target" ]; + serviceConfig = { + ExecStart = "${ + pkgs.callPackage ../../pkgs/zt-tcp-relay { } + }/bin/zt-tcp-relay --listen [::]:${builtins.toString config.clan.zt-tcp-relay.port}"; + Restart = "always"; + RestartSec = "5"; + dynamicUsers = true; + }; + }; + }; +} diff --git a/docs/code-examples/disko-raid.nix b/docs/code-examples/disko-raid.nix index 0af69ee5e..dd59a9b2f 100644 --- a/docs/code-examples/disko-raid.nix +++ b/docs/code-examples/disko-raid.nix @@ -1,6 +1,7 @@ { lib, config, + clan-core, ... }: let @@ -40,7 +41,9 @@ let }; in { - imports = [ ]; + imports = [ + clan-core.clanModules.disk-id + ]; config = { boot.loader.systemd-boot.enable = true; diff --git a/docs/code-examples/disko-single-disk.nix b/docs/code-examples/disko-single-disk.nix index d3884c33d..7b0283903 100644 --- a/docs/code-examples/disko-single-disk.nix +++ b/docs/code-examples/disko-single-disk.nix @@ -1,6 +1,7 @@ { lib, config, + clan-core, ... }: let @@ -40,7 +41,9 @@ let }; in { - imports = [ ]; + imports = [ + clan-core.clanModules.disk-id + ]; config = { boot.loader.systemd-boot.enable = true; diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 2229aab1b..37b063082 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -71,6 +71,7 @@ nav: - Authoring: - clanService: guides/authoring/clanServices/index.md - Disk Template: guides/authoring/templates/disk/disko-templates.md + - clanModule: guides/authoring/clanModules/index.md - Contributing: - Contribute: guides/contributing/CONTRIBUTING.md - Debugging: guides/contributing/debugging.md @@ -101,6 +102,49 @@ nav: - reference/clanServices/wifi.md - reference/clanServices/zerotier.md - Interface for making Services: reference/clanServices/clan-service-author-interface.md + - Modules: + - Overview: reference/clanModules/index.md + - reference/clanModules/frontmatter/index.md + # TODO: display the docs of the clan.service modules + - reference/clanModules/admin.md + # This is the module overview and should stay at the top + - reference/clanModules/borgbackup-static.md + - reference/clanModules/data-mesher.md + - reference/clanModules/borgbackup.md + - reference/clanModules/deltachat.md + - reference/clanModules/disk-id.md + - reference/clanModules/dyndns.md + - reference/clanModules/ergochat.md + - reference/clanModules/garage.md + - reference/clanModules/heisenbridge.md + - reference/clanModules/importer.md + - reference/clanModules/iwd.md + - reference/clanModules/localbackup.md + - reference/clanModules/localsend.md + - reference/clanModules/matrix-synapse.md + - reference/clanModules/moonlight.md + - reference/clanModules/mumble.md + - reference/clanModules/mycelium.md + - reference/clanModules/nginx.md + - reference/clanModules/packages.md + - reference/clanModules/postgresql.md + - reference/clanModules/root-password.md + - reference/clanModules/single-disk.md + - reference/clanModules/sshd.md + - reference/clanModules/state-version.md + - reference/clanModules/static-hosts.md + - reference/clanModules/sunshine.md + - reference/clanModules/syncthing-static-peers.md + - reference/clanModules/syncthing.md + - reference/clanModules/thelounge.md + - reference/clanModules/trusted-nix-caches.md + - reference/clanModules/user-password.md + - reference/clanModules/auto-upgrade.md + - reference/clanModules/vaultwarden.md + - reference/clanModules/xfce.md + - reference/clanModules/zerotier-static-peers.md + - reference/clanModules/zerotier.md + - reference/clanModules/zt-tcp-relay.md - CLI: - Overview: reference/cli/index.md diff --git a/docs/nix/flake-module.nix b/docs/nix/flake-module.nix index 17467463a..9fd08108e 100644 --- a/docs/nix/flake-module.nix +++ b/docs/nix/flake-module.nix @@ -36,6 +36,9 @@ in docs.optionsJSON; + # Options available via ` imports = [ clanModules.${moduleName} ]; ` (Direct nix import) + clanModulesViaNix = pkgs.writeText "info.json" (builtins.toJSON jsonDocs.clanModulesViaNix); + # Options available when imported via ` inventory.${moduleName}....${rolesName} ` clanModulesViaRoles = pkgs.writeText "info.json" (builtins.toJSON jsonDocs.clanModulesViaRoles); @@ -91,6 +94,7 @@ # A file that contains the links to all clanModule docs export CLAN_MODULES_VIA_ROLES=${clanModulesViaRoles} export CLAN_MODULES_VIA_SERVICE=${clanModulesViaService} + export CLAN_MODULES_VIA_NIX=${clanModulesViaNix} export CLAN_SERVICE_INTERFACE=${self'.legacyPackages.clan-service-module-interface}/share/doc/nixos/options.json # Frontmatter format for clanModules export CLAN_MODULES_FRONTMATTER_DOCS=${clanModulesFrontmatter}/share/doc/nixos/options.json @@ -107,6 +111,7 @@ legacyPackages = { inherit jsonDocs + clanModulesViaNix clanModulesViaRoles clanModulesViaService ; diff --git a/docs/nix/get-module-docs.nix b/docs/nix/get-module-docs.nix index a58ce69ff..d6bc0e917 100644 --- a/docs/nix/get-module-docs.nix +++ b/docs/nix/get-module-docs.nix @@ -1,17 +1,34 @@ { modulesRolesOptions, nixosOptionsDoc, + clanModules, evalClanModules, lib, pkgs, clan-core, - ... }: let inherit (clan-core.clanLib.docs) stripStorePathsFromDeclarations; transformOptions = stripStorePathsFromDeclarations; in { + # clanModules docs + clanModulesViaNix = lib.mapAttrs ( + name: module: + if builtins.pathExists (module + "/default.nix") then + (nixosOptionsDoc { + options = + ((evalClanModules { + modules = [ module ]; + inherit pkgs clan-core; + }).options + ).clan.${name} or { }; + warningsAreErrors = true; + inherit transformOptions; + }).optionsJSON + else + { } + ) clanModules; clanModulesViaRoles = lib.mapAttrs ( _moduleName: rolesOptions: diff --git a/docs/nix/render_options/__init__.py b/docs/nix/render_options/__init__.py index 1abe66ead..aa5da870d 100644 --- a/docs/nix/render_options/__init__.py +++ b/docs/nix/render_options/__init__.py @@ -985,7 +985,7 @@ if __name__ == "__main__": # produce_clan_service_author_docs() - # produce_clan_modules_docs() + produce_clan_modules_docs() produce_clan_service_docs() - # produce_clan_modules_frontmatter_docs() + produce_clan_modules_frontmatter_docs() diff --git a/docs/site/guides/authoring/clanModules/index.md b/docs/site/guides/authoring/clanModules/index.md new file mode 100644 index 000000000..6bbb559dc --- /dev/null +++ b/docs/site/guides/authoring/clanModules/index.md @@ -0,0 +1,229 @@ +# Authoring a clanModule + +!!! Danger "Will get deprecated soon" + Please consider twice creating new modules in this format + + [`clan.service` module](../clanServices/index.md) will be the new standard soon. + +This site will guide you through authoring your first module. Explaining which conventions must be followed, such that others will have an enjoyable experience and the module can be used with minimal effort. + + +!!! Tip + External ClanModules can be ad-hoc loaded via [`clan.inventory.modules`](../../../reference/nix-api/inventory.md#inventory.modules) + +## Bootstrapping the `clanModule` + +A ClanModule is a specific subset of a [NixOS Module](https://nix.dev/tutorials/module-system/index.html), but it has some constraints and might be used via the [Inventory](../../../guides/inventory.md) interface. +In fact a `ClanModule` can be thought of as a layer of abstraction on-top of NixOS and/or other ClanModules. It may configure sane defaults and provide an ergonomic interface that is easy to use and can also be used via a UI that is under development currently. + +Because ClanModules should be configurable via `json`/`API` all of its interface (`options`) must be serializable. + +!!! Tip + ClanModules interface can be checked by running the json schema converter as follows. + + `nix build .#legacyPackages.x86_64-linux.schemas.inventory` + + If the build succeeds the module is compatible. + +## Directory structure + +Each module SHOULD be a directory of the following format: + +```sh +# Example: borgbackup +clanModules/borgbackup +├── README.md +└── roles + ├── client.nix + └── server.nix +``` + +!!! Tip + `README.md` is always required. See section [Readme](#readme) for further details. + + The `roles` folder is strictly required for `features = [ "inventory" ]`. + +## Registering the module + +=== "User module" + + If the module should be ad-hoc loaded. + It can be made available in any project via the [`clan.inventory.modules`](../../../reference/nix-api/inventory.md#inventory.modules) attribute. + + ```nix title="flake.nix" + # ... + # Sometimes this attribute set is defined in clan.nix + clan-core.lib.clan { + # 1. Add the module to the available clanModules with inventory support + inventory.modules = { + custom-module = ./modules/my_module; + }; + # 2. Use the module in the inventory + inventory.services = { + custom-module.instance_1 = { + roles.default.machines = [ "machineA" ]; + }; + }; + }; + ``` + +=== "Upstream module" + + If the module will be contributed to [`clan-core`](https://git.clan.lol/clan-core) + The clanModule must be registered within the `clanModules` attribute in `clan-core` + + ```nix title="clanModules/flake-module.nix" + --8<-- "clanModules/flake-module.nix:0:5" + # Register our new module here + # ... + ``` + +## Readme + +The `README.md` is a required file for all modules. It MUST contain frontmatter in [`toml`](https://toml.io) format. + +```markdown +--- +description = "Module A" +--- + +This is the example module that does xyz. +``` + +See the [Full Frontmatter reference](../../../reference/clanModules/frontmatter/index.md) further details and all supported attributes. + +## Roles + +If the module declares to implement `features = [ "inventory" ]` then it MUST contain a roles directory. + +Each `.nix` file in the `roles` directory is added as a role to the inventory service. + +Other files can also be placed alongside the `.nix` files + +```sh +└── roles + ├── client.nix + └── server.nix +``` + +Adds the roles: `client` and `server` + +??? Tip "Good to know" + Sometimes a `ClanModule` should be usable via both clan's `inventory` concept but also natively as a NixOS module. + + > In the long term, we want most modules to implement support for the inventory, + > but we are also aware that there are certain low-level modules that always serve as a backend for other higher-level `clanModules` with inventory support. + > These modules may not want to implement inventory interfaces as they are always used directly by other modules. + + This can be achieved by placing an additional `default.nix` into the root of the ClanModules directory as shown: + + ```sh + # ModuleA + ├── README.md + ├── default.nix + └── roles + └── default.nix + ``` + + ```nix title="default.nix" + {...}:{ + imports = [ ./roles/default.nix ]; + } + ``` + + By utilizing this pattern the module (`moduleA`) can then be imported into any regular NixOS module via: + + ```nix + {...}:{ + imports = [ clanModules.moduleA ]; + } + ``` + +## Adding configuration options + +While we recommend to keep the interface as minimal as possible and deriving all required information from the `roles` model it might sometimes be required or convenient to expose customization options beyond `roles`. + +The following shows how to add options to your module. + +**It is important to understand that every module has its own namespace where it should declare options** + +**`clan.{moduleName}`** + +???+ Example + The following example shows how to register options in the module interface + + and how it can be set via the inventory + + + ```nix title="/default.nix" + custom-module = ./modules/custom-module; + ``` + + Since the module is called `custom-module` all of its exposed options should be added to `options.clan.custom-module.*...*` + + ```nix title="custom-module/roles/default.nix" + { + options = { + clan.custom-module.foo = mkOption { + type = types.str; + default = "bar"; + }; + }; + } + ``` + + If the module is [registered](#registering-the-module). + Configuration can be set as follows. + + ```nix title="flake.nix" + # Sometimes this attribute set is defined in clan.nix + clan-core.lib.clan { + inventory.services = { + custom-module.instance_1 = { + roles.default.machines = [ "machineA" ]; + roles.default.config = { + # All configuration here is scoped to `clan.custom-module` + foo = "foobar"; + }; + }; + }; + } + ``` + +## Organizing the ClanModule + +Each `{role}.nix` is included into the machine if the machine is declared to have the role. + +For example + +```nix +roles.client.machines = ["MachineA"]; +``` + +Then `roles/client.nix` will be added to the machine `MachineA`. + +This behavior makes it possible to split the interface and common code paths when using multiple roles. +In the concrete example of `borgbackup` this allows a `server` to declare a different interface than the corresponding `client`. + +The client offers configuration option, to exclude certain local directories from being backed up: + +```nix title="roles/client.nix" +# Example client interface + options.clan.borgbackup.exclude = ... +``` + +The server doesn't offer any configuration option. Because everything is set-up automatically. + +```nix title="roles/server.nix" +# Example server interface + options.clan.borgbackup = {}; +``` + +Assuming that there is a common code path or a common interface between `server` and `client` this can be structured as: + +```nix title="roles/server.nix, roles/client.nix" +{...}: { + # ... + imports = [ ../common.nix ]; +} +``` diff --git a/docs/site/guides/authoring/clanServices/index.md b/docs/site/guides/authoring/clanServices/index.md index 78b0249d2..f3e86bb70 100644 --- a/docs/site/guides/authoring/clanServices/index.md +++ b/docs/site/guides/authoring/clanServices/index.md @@ -1,5 +1,10 @@ # Authoring a 'clan.service' module +!!! Tip + This is the successor format to the older [clanModules](../clanModules/index.md) + + While some features might still be missing we recommend to adapt this format early and give feedback. + ## Service Module Specification This section explains how to author a clan service module. diff --git a/docs/site/guides/inventory.md b/docs/site/guides/inventory.md index 6995042bf..878a6cf57 100644 --- a/docs/site/guides/inventory.md +++ b/docs/site/guides/inventory.md @@ -25,7 +25,7 @@ See also: [Inventory API Documentation](../reference/nix-api/inventory.md) The inventory defines `services`. Membership of `machines` is defined via `roles` exclusively. -See each [modules documentation](../reference/clanServices/index.md) for its available roles. +See each [modules documentation](../reference/clanModules/index.md) for its available roles. ### Adding services to machines diff --git a/docs/site/index.md b/docs/site/index.md index 92eaa66ca..fab477a13 100644 --- a/docs/site/index.md +++ b/docs/site/index.md @@ -74,6 +74,14 @@ hide: --- + The clan core nix module. + This is imported when using clan and is the basis of the extra functionality + that can be provided. + +- [(Legacy) Modules](./reference/clanModules/index.md) + + --- + An overview of available clanModules !!! Example "These will be deprecated soon" diff --git a/docs/site/reference/index.md b/docs/site/reference/index.md index 590ab7bef..bb08355a8 100644 --- a/docs/site/reference/index.md +++ b/docs/site/reference/index.md @@ -5,7 +5,7 @@ This section of the site provides an overview of available options and commands --- - Learn how to use the [Clan CLI](./cli/index.md) -- Explore available services and application [modules](./clanServices/index.md) +- Explore available services and application [modules](./clanModules/index.md) - Discover [configuration options](./clan.core/index.md) that manage essential features - Find descriptions of the [Nix interfaces](./nix-api/clan.md) for defining a Clan diff --git a/lib/modules/inventory/distributed-service/tests/default.nix b/lib/modules/inventory/distributed-service/tests/default.nix index b30d7f299..e53eda12c 100644 --- a/lib/modules/inventory/distributed-service/tests/default.nix +++ b/lib/modules/inventory/distributed-service/tests/default.nix @@ -32,7 +32,7 @@ let let inventory = evalInventory inventoryModule; flakeInputsFixture = { - self.clan.modules = inventoryModule.modules or { }; + self.clan.modules = inventory.modules; # Example upstream module upstream.clan.modules = { uzzi = { @@ -165,7 +165,7 @@ in instances."instance_zaza" = { module = { name = "B"; - input = null; + input = "self"; }; }; }; @@ -191,7 +191,7 @@ in _class = "clan.service"; manifest = { name = "network"; - input = null; + input = "self"; }; # Define a role without special behavior roles.peer = { }; @@ -220,7 +220,7 @@ in instances."instance_zaza" = { module = { name = "B"; - input = null; + input = "self"; }; roles.peer.tags.all = { }; }; @@ -272,7 +272,7 @@ in instances."instance_zaza" = { module = { name = "B"; - input = null; + input = "self"; }; roles.peer.tags.all = { }; }; diff --git a/lib/modules/inventory/distributed-service/tests/per_instance_args.nix b/lib/modules/inventory/distributed-service/tests/per_instance_args.nix index a9bdd87b7..05e1b16a2 100644 --- a/lib/modules/inventory/distributed-service/tests/per_instance_args.nix +++ b/lib/modules/inventory/distributed-service/tests/per_instance_args.nix @@ -88,7 +88,6 @@ let instances."instance_zaza" = { module = { name = "B"; - input = null; }; roles.peer.tags.all = { }; }; diff --git a/lib/modules/inventory/distributed-service/tests/per_machine_args.nix b/lib/modules/inventory/distributed-service/tests/per_machine_args.nix index 5d9a11392..ac68631d9 100644 --- a/lib/modules/inventory/distributed-service/tests/per_machine_args.nix +++ b/lib/modules/inventory/distributed-service/tests/per_machine_args.nix @@ -65,7 +65,7 @@ let instances."instance_zaza" = { module = { name = "B"; - input = null; + input = "self"; }; roles.peer.tags.all = { }; }; diff --git a/lib/modules/inventory/frontmatter/default.nix b/lib/modules/inventory/frontmatter/default.nix index 4c14216a1..383779b99 100644 --- a/lib/modules/inventory/frontmatter/default.nix +++ b/lib/modules/inventory/frontmatter/default.nix @@ -121,7 +121,45 @@ let ); checkConstraints = args: (evalFrontmatter args).config.constraints.assertions; - getFrontmatter = _modulepath: _modulename: "clanModules are removed!"; + + getReadme = + modulepath: modulename: + let + readme = modulepath + "/README.md"; + readmeContents = + if (builtins.pathExists readme) then + (builtins.readFile readme) + else + throw "No README.md found for module ${modulename} (expected at ${readme})"; + in + readmeContents; + + getFrontmatter = + modulepath: modulename: + let + content = getReadme modulepath modulename; + parts = lib.splitString "---" content; + # Partition the parts into the first part (the readme content) and the rest (the metadata) + parsed = builtins.partition ({ index, ... }: if index >= 2 then false else true) ( + lib.filter ({ index, ... }: index != 0) (lib.imap0 (index: part: { inherit index part; }) parts) + ); + meta = builtins.fromTOML (builtins.head parsed.right).part; + in + if (builtins.length parts >= 3) then + meta + else + throw '' + TOML Frontmatter not found in README.md for module ${modulename} + + Please add the following to the top of your README.md: + + --- + description = "Your description here" + categories = [ "Your categories here" ] + features = [ "inventory" ] + --- + ...rest of your README.md... + ''; in { inherit diff --git a/lib/modules/inventory/tests/default.nix b/lib/modules/inventory/tests/default.nix index 800cbdea5..a69731b97 100644 --- a/lib/modules/inventory/tests/default.nix +++ b/lib/modules/inventory/tests/default.nix @@ -62,6 +62,135 @@ in expr = eval.config.clanInternals.inventoryClass.machines; expected = { }; }; + test_inventory_role_resolve = + let + eval = clan { + directory = ./.; + inventory = { + services = { + borgbackup.instance_1 = { + roles.server.machines = [ "backup_server" ]; + roles.client.machines = [ + "client_1_machine" + "client_2_machine" + ]; + }; + }; + machines = { + "backup_server" = { }; + "client_1_machine" = { }; + "client_2_machine" = { }; + }; + }; + }; + in + { + expr = { + m1 = + (eval.config.clanInternals.inventoryClass.machines."backup_server") + .compiledServices.borgbackup.matchedRoles; + m2 = + (eval.config.clanInternals.inventoryClass.machines."client_1_machine") + .compiledServices.borgbackup.matchedRoles; + m3 = + (eval.config.clanInternals.inventoryClass.machines."client_2_machine") + .compiledServices.borgbackup.matchedRoles; + inherit + ((eval.config.clanInternals.inventoryClass.machines."client_2_machine").compiledServices.borgbackup) + resolvedRolesPerInstance + ; + }; + + expected = { + m1 = [ + "server" + ]; + m2 = [ + "client" + ]; + m3 = [ + "client" + ]; + resolvedRolesPerInstance = { + instance_1 = { + client = { + machines = [ + "client_1_machine" + "client_2_machine" + ]; + }; + server = { + machines = [ "backup_server" ]; + }; + }; + }; + }; + }; + test_inventory_tag_resolve = + let + eval = clan { + directory = ./.; + inventory = { + services = { + borgbackup.instance_1 = { + roles.client.tags = [ "backup" ]; + }; + }; + machines = { + "not_used_machine" = { }; + "client_1_machine" = { + tags = [ "backup" ]; + }; + "client_2_machine" = { + tags = [ "backup" ]; + }; + }; + }; + }; + in + { + expr = + eval.config.clanInternals.inventoryClass.machines.client_1_machine.compiledServices.borgbackup.resolvedRolesPerInstance; + expected = { + instance_1 = { + client = { + machines = [ + "client_1_machine" + "client_2_machine" + ]; + }; + server = { + machines = [ ]; + }; + }; + }; + }; + + test_inventory_multiple_roles = + let + eval = clan { + directory = ./.; + inventory = { + services = { + borgbackup.instance_1 = { + roles.client.machines = [ "machine_1" ]; + roles.server.machines = [ "machine_1" ]; + }; + }; + machines = { + "machine_1" = { }; + }; + }; + }; + in + { + expr = + eval.config.clanInternals.inventoryClass.machines.machine_1.compiledServices.borgbackup.matchedRoles; + expected = [ + "client" + "server" + ]; + }; test_inventory_module_doesnt_exist = let @@ -87,4 +216,84 @@ in msg = "ClanModule not found*"; }; }; + + test_inventory_role_doesnt_exist = + let + eval = clan { + directory = ./.; + inventory = { + services = { + borgbackup.instance_1 = { + roles.roleXYZ.machines = [ "machine_1" ]; + }; + }; + machines = { + "machine_1" = { }; + }; + }; + }; + in + { + inherit eval; + expr = eval.config.clanInternals.inventoryClass.machines.machine_1.machineImports; + expectedError = { + type = "ThrownError"; + msg = ''Roles \["roleXYZ"\] are not defined in the service borgbackup''; + }; + }; + # Needs NIX_ABORT_ON_WARN=1 + # So the lib.warn is turned into abort + test_inventory_tag_doesnt_exist = + let + eval = clan { + directory = ./.; + inventory = { + services = { + borgbackup.instance_1 = { + roles.client.machines = [ "machine_1" ]; + roles.client.tags = [ "tagXYZ" ]; + }; + }; + machines = { + "machine_1" = { + tags = [ "tagABC" ]; + }; + }; + }; + }; + in + { + expr = eval.config.clanInternals.inventoryClass.machines.machine_1.machineImports; + expectedError = { + type = "Error"; + # TODO: Add warning matching in nix-unit + msg = ".*"; + }; + }; + test_inventory_disabled_service = + let + eval = clan { + directory = ./.; + inventory = { + services = { + borgbackup.instance_1 = { + enabled = false; + roles.client.machines = [ "machine_1" ]; + }; + }; + machines = { + "machine_1" = { + + }; + }; + }; + }; + in + { + inherit eval; + expr = builtins.filter ( + v: v != { } && !v.clan.inventory.assertions ? "alive.assertion.inventory" + ) eval.config.clanInternals.inventoryClass.machines.machine_1.machineImports; + expected = [ ]; + }; } diff --git a/lib/modules/inventoryClass/interface.nix b/lib/modules/inventoryClass/interface.nix index 233c53784..06c33a4a5 100644 --- a/lib/modules/inventoryClass/interface.nix +++ b/lib/modules/inventoryClass/interface.nix @@ -161,7 +161,27 @@ in ``` ''; - apply = _: { }; + apply = + moduleSet: + let + allowedNames = lib.attrNames config._legacyModules; + in + if builtins.all (moduleName: builtins.elem moduleName allowedNames) (lib.attrNames moduleSet) then + moduleSet + else + lib.warn '' + `inventory.modules` will be deprecated soon. + + Please migrate the following modules into `clan.service` modules + and register them in `clan.modules` + + ${lib.concatStringsSep "\n" ( + map (m: "'${m}'") (lib.attrNames (lib.filterAttrs (n: _v: !builtins.elem n allowedNames) moduleSet)) + )} + + See: https://docs.clan.lol/guides/clanServices/ + And: https://docs.clan.lol/guides/authoring/clanServices/ + '' moduleSet; }; assertions = lib.mkOption { diff --git a/pkgs/clan-cli/clan_cli/tests/test_flake_with_core/flake.nix b/pkgs/clan-cli/clan_cli/tests/test_flake_with_core/flake.nix index 4720d8c46..d40700310 100644 --- a/pkgs/clan-cli/clan_cli/tests/test_flake_with_core/flake.nix +++ b/pkgs/clan-cli/clan_cli/tests/test_flake_with_core/flake.nix @@ -44,7 +44,13 @@ { config, ... }: { nixpkgs.hostPlatform = "x86_64-linux"; - imports = [ ]; + imports = [ + clan-core.clanModules.sshd + clan-core.clanModules.root-password + clan-core.clanModules.user-password + ]; + clan.user-password.user = "alice"; + clan.user-password.prompt = false; clan.core.networking.targetHost = "__CLAN_TARGET_ADDRESS__"; system.stateVersion = config.system.nixos.release; sops.age.keyFile = "__CLAN_SOPS_KEY_PATH__"; diff --git a/pkgs/clan-cli/clan_cli/tests/test_flake_with_core_and_pass/flake.nix b/pkgs/clan-cli/clan_cli/tests/test_flake_with_core_and_pass/flake.nix index b40b66328..87832979d 100644 --- a/pkgs/clan-cli/clan_cli/tests/test_flake_with_core_and_pass/flake.nix +++ b/pkgs/clan-cli/clan_cli/tests/test_flake_with_core_and_pass/flake.nix @@ -15,7 +15,13 @@ vm1 = { lib, config, ... }: { - imports = [ ]; + imports = [ + clan-core.clanModules.sshd + clan-core.clanModules.root-password + clan-core.clanModules.user-password + ]; + clan.user-password.user = "alice"; + clan.user-password.prompt = false; clan.core.networking.targetHost = "__CLAN_TARGET_ADDRESS__"; system.stateVersion = config.system.nixos.release; diff --git a/pkgs/clan-cli/clan_cli/tests/test_modules.py b/pkgs/clan-cli/clan_cli/tests/test_modules.py index 2ba3f3bcd..22860a45a 100644 --- a/pkgs/clan-cli/clan_cli/tests/test_modules.py +++ b/pkgs/clan-cli/clan_cli/tests/test_modules.py @@ -1,12 +1,27 @@ +import json +import subprocess from typing import TYPE_CHECKING import pytest +from clan_cli.machines.create import CreateOptions, create_machine from clan_cli.tests.fixtures_flakes import FlakeForTest from clan_lib.flake import Flake +from clan_lib.nix import nix_eval, run +from clan_lib.nix_models.clan import ( + InventoryMachine as Machine, +) +from clan_lib.nix_models.clan import ( + InventoryMachineDeploy as MachineDeploy, +) +from clan_lib.persist.inventory_store import InventoryStore +from clan_lib.persist.util import set_value_by_path from clan_lib.services.modules import list_service_modules if TYPE_CHECKING: - pass + from .age_keys import KeyPair + +from clan_cli.tests.helpers import cli +from clan_lib.machines.machines import Machine as MachineMachine @pytest.mark.with_core @@ -15,3 +30,110 @@ def test_list_modules(test_flake_with_core: FlakeForTest) -> None: modules_info = list_service_modules(Flake(str(base_path))) assert "modules" in modules_info + + +@pytest.mark.impure +def test_add_module_to_inventory( + monkeypatch: pytest.MonkeyPatch, + test_flake_with_core: FlakeForTest, + age_keys: list["KeyPair"], +) -> None: + base_path = test_flake_with_core.path + + with monkeypatch.context(): + monkeypatch.chdir(test_flake_with_core.path) + monkeypatch.setenv("SOPS_AGE_KEY", age_keys[0].privkey) + + cli.run( + [ + "secrets", + "users", + "add", + "--flake", + str(test_flake_with_core.path), + "user1", + age_keys[0].pubkey, + ] + ) + opts = CreateOptions( + clan_dir=Flake(str(base_path)), + machine=Machine(name="machine1", tags=[], deploy=MachineDeploy()), + ) + + create_machine(opts) + ( + test_flake_with_core.path / "machines" / "machine1" / "facter.json" + ).write_text( + json.dumps( + { + "version": 1, + "system": "x86_64-linux", + } + ) + ) + subprocess.run(["git", "add", "."], cwd=test_flake_with_core.path, check=True) + + inventory_store = InventoryStore(Flake(str(test_flake_with_core.path))) + inventory = inventory_store.read() + + set_value_by_path( + inventory, + "services", + { + "borgbackup": { + "borg1": { + "meta": {"name": "borg1"}, + "roles": { + "client": {"machines": ["machine1"]}, + "server": {"machines": ["machine1"]}, + }, + } + } + }, + ) + + inventory_store.write( + inventory, + message="Add borgbackup service", + commit=False, + ) + + # cmd = ["facts", "generate", "--flake", str(test_flake_with_core.path), "machine1"] + cmd = [ + "vars", + "generate", + "--flake", + str(test_flake_with_core.path), + "machine1", + ] + + cli.run(cmd) + + machine = MachineMachine( + name="machine1", flake=Flake(str(test_flake_with_core.path)) + ) + + from clan_cli.vars.generate import Generator + + generator = None + + generators = Generator.generators_from_flake(machine.name, machine.flake) + for gen in generators: + if gen.name == "borgbackup": + generator = gen + break + + assert generator + + ssh_key = machine.public_vars_store.get(generator, "borgbackup.ssh.pub") + + cmd = nix_eval( + [ + f"{base_path}#nixosConfigurations.machine1.config.services.borgbackup.repos", + "--json", + ] + ) + proc = run(cmd) + res = json.loads(proc.stdout.strip()) + + assert res["machine1"]["authorizedKeys"] == [ssh_key.decode()] diff --git a/pkgs/installer/flake-module.nix b/pkgs/installer/flake-module.nix index 8c853ed54..2a12f6c8b 100644 --- a/pkgs/installer/flake-module.nix +++ b/pkgs/installer/flake-module.nix @@ -12,6 +12,7 @@ let imports = [ ./iwd.nix self.nixosModules.installer + self.clanModules.trusted-nix-caches ]; system.stateVersion = config.system.nixos.release; diff --git a/templates/machine/flash-installer/configuration.nix b/templates/machine/flash-installer/configuration.nix index 423ba2667..dc05c2110 100644 --- a/templates/machine/flash-installer/configuration.nix +++ b/templates/machine/flash-installer/configuration.nix @@ -8,6 +8,9 @@ imports = [ ./disko.nix clan-core.nixosModules.installer + clan-core.clanModules.trusted-nix-caches + clan-core.clanModules.disk-id + clan-core.clanModules.iwd ]; clan.core.deployment.requireExplicitUpdate = true;