Compare commits
1 Commits
exports-en
...
fix-typogr
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e1e10b2c0a |
@@ -1,12 +0,0 @@
|
||||
## Description of the change
|
||||
|
||||
<!-- Brief summary of the change if not already clear from the title -->
|
||||
|
||||
## Checklist
|
||||
|
||||
- [ ] Updated Documentation
|
||||
- [ ] Added tests
|
||||
- [ ] Doesn't affect backwards compatibility - or check the next points
|
||||
- [ ] Add the breaking change and migration details to docs/release-notes.md
|
||||
- !!! Review from another person is required *BEFORE* merge !!!
|
||||
- [ ] Add introduction of major feature to docs/release-notes.md
|
||||
@@ -17,4 +17,4 @@ jobs:
|
||||
|
||||
- name: Build clan-app for x86_64-darwin
|
||||
run: |
|
||||
nix build .#packages.x86_64-darwin.clan-app --log-format bar-with-logs
|
||||
nix build .#packages.x86_64-darwin.clan-app --system x86_64-darwin --log-format bar-with-logs
|
||||
|
||||
@@ -8,6 +8,6 @@ jobs:
|
||||
runs-on: nix
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- run: nix run --print-build-logs .#deploy-docs
|
||||
- run: nix run .#deploy-docs
|
||||
env:
|
||||
SSH_HOMEPAGE_KEY: ${{ secrets.SSH_HOMEPAGE_KEY }}
|
||||
|
||||
2
.github/workflows/repo-sync.yml
vendored
2
.github/workflows/repo-sync.yml
vendored
@@ -10,7 +10,7 @@ jobs:
|
||||
if: github.repository_owner == 'clan-lol'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/create-github-app-token@v2
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -52,5 +52,3 @@ pkgs/clan-app/ui/.fonts
|
||||
*.gif
|
||||
*.mp4
|
||||
*.mkv
|
||||
|
||||
.jj
|
||||
|
||||
4
CONTRIBUTING.md
Normal file
4
CONTRIBUTING.md
Normal file
@@ -0,0 +1,4 @@
|
||||
# Contributing to Clan
|
||||
|
||||
<!-- Local file: docs/CONTRIBUTING.md -->
|
||||
Go to the Contributing guide at https://docs.clan.lol/guides/contributing/CONTRIBUTING
|
||||
@@ -1,4 +1,4 @@
|
||||
Copyright 2023-2025 Clan contributors
|
||||
Copyright 2023-2024 Clan contributors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
|
||||
@@ -30,7 +30,7 @@ In the Clan ecosystem, security is paramount. Learn how to handle secrets effect
|
||||
|
||||
The Clan project thrives on community contributions. We welcome everyone to contribute and collaborate:
|
||||
|
||||
- **Contribution Guidelines**: Make a meaningful impact by following the steps in [contributing](https://docs.clan.lol/guides/contributing/CONTRIBUTING/)<!-- [contributing.md](docs/CONTRIBUTING.md) -->.
|
||||
- **Contribution Guidelines**: Make a meaningful impact by following the steps in [contributing](https://docs.clan.lol/contributing/contributing/)<!-- [contributing.md](docs/CONTRIBUTING.md) -->.
|
||||
|
||||
## Join the revolution
|
||||
|
||||
|
||||
6
checks/clan-core-for-checks.nix
Normal file
6
checks/clan-core-for-checks.nix
Normal file
@@ -0,0 +1,6 @@
|
||||
{ fetchgit }:
|
||||
fetchgit {
|
||||
url = "https://git.clan.lol/clan/clan-core.git";
|
||||
rev = "5d884cecc2585a29b6a3596681839d081b4de192";
|
||||
sha256 = "09is1afmncamavb2q88qac37vmsijxzsy1iz1vr6gsyjq2rixaxc";
|
||||
}
|
||||
@@ -12,6 +12,7 @@ let
|
||||
elem
|
||||
filter
|
||||
filterAttrs
|
||||
flip
|
||||
genAttrs
|
||||
hasPrefix
|
||||
pathExists
|
||||
@@ -19,23 +20,32 @@ let
|
||||
nixosLib = import (self.inputs.nixpkgs + "/nixos/lib") { };
|
||||
in
|
||||
{
|
||||
imports = filter pathExists [
|
||||
./devshell/flake-module.nix
|
||||
./flash/flake-module.nix
|
||||
./installation/flake-module.nix
|
||||
./update/flake-module.nix
|
||||
./morph/flake-module.nix
|
||||
./nixos-documentation/flake-module.nix
|
||||
./dont-depend-on-repo-root.nix
|
||||
# clan core submodule tests
|
||||
../nixosModules/clanCore/machine-id/tests/flake-module.nix
|
||||
../nixosModules/clanCore/postgresql/tests/flake-module.nix
|
||||
../nixosModules/clanCore/state-version/tests/flake-module.nix
|
||||
];
|
||||
imports =
|
||||
let
|
||||
clanCoreModulesDir = ../nixosModules/clanCore;
|
||||
getClanCoreTestModules =
|
||||
let
|
||||
moduleNames = attrNames (builtins.readDir clanCoreModulesDir);
|
||||
testPaths = map (
|
||||
moduleName: clanCoreModulesDir + "/${moduleName}/tests/flake-module.nix"
|
||||
) moduleNames;
|
||||
in
|
||||
filter pathExists testPaths;
|
||||
in
|
||||
getClanCoreTestModules
|
||||
++ filter pathExists [
|
||||
./devshell/flake-module.nix
|
||||
./flash/flake-module.nix
|
||||
./installation/flake-module.nix
|
||||
./update/flake-module.nix
|
||||
./morph/flake-module.nix
|
||||
./nixos-documentation/flake-module.nix
|
||||
./dont-depend-on-repo-root.nix
|
||||
];
|
||||
flake.check = genAttrs [ "x86_64-linux" "aarch64-darwin" ] (
|
||||
system:
|
||||
let
|
||||
checks = filterAttrs (
|
||||
checks = flip filterAttrs self.checks.${system} (
|
||||
name: _check:
|
||||
!(hasPrefix "nixos-test-" name)
|
||||
&& !(hasPrefix "nixos-" name)
|
||||
@@ -47,7 +57,7 @@ in
|
||||
"clan-core-for-checks"
|
||||
"clan-deps"
|
||||
])
|
||||
) self.checks.${system};
|
||||
);
|
||||
in
|
||||
inputs.nixpkgs.legacyPackages.${system}.runCommand "fast-flake-checks-${system}"
|
||||
{ passthru.checks = checks; }
|
||||
@@ -86,12 +96,11 @@ in
|
||||
|
||||
# Container Tests
|
||||
nixos-test-container = self.clanLib.test.containerTest ./container nixosTestArgs;
|
||||
nixos-systemd-abstraction = self.clanLib.test.containerTest ./systemd-abstraction nixosTestArgs;
|
||||
nixos-test-user-firewall-iptables = self.clanLib.test.containerTest ./user-firewall/iptables.nix nixosTestArgs;
|
||||
nixos-test-user-firewall-nftables = self.clanLib.test.containerTest ./user-firewall/nftables.nix nixosTestArgs;
|
||||
nixos-test-extra-python-packages = self.clanLib.test.containerTest ./test-extra-python-packages nixosTestArgs;
|
||||
|
||||
service-dummy-test = import ./service-dummy-test nixosTestArgs;
|
||||
wireguard = import ./wireguard nixosTestArgs;
|
||||
service-dummy-test-from-flake = import ./service-dummy-test-from-flake nixosTestArgs;
|
||||
};
|
||||
|
||||
@@ -112,7 +121,7 @@ in
|
||||
) (self.darwinConfigurations or { })
|
||||
// lib.mapAttrs' (n: lib.nameValuePair "package-${n}") (
|
||||
if system == "aarch64-darwin" then
|
||||
lib.filterAttrs (n: _: n != "docs" && n != "deploy-docs" && n != "option-search") packagesToBuild
|
||||
lib.filterAttrs (n: _: n != "docs" && n != "deploy-docs" && n != "docs-options") packagesToBuild
|
||||
else
|
||||
packagesToBuild
|
||||
)
|
||||
|
||||
@@ -13,6 +13,8 @@
|
||||
fileSystems."/".device = lib.mkDefault "/dev/vda";
|
||||
boot.loader.grub.device = lib.mkDefault "/dev/vda";
|
||||
|
||||
# We need to use `mkForce` because we inherit from `test-install-machine`
|
||||
# which currently hardcodes `nixpkgs.hostPlatform`
|
||||
nixpkgs.hostPlatform = lib.mkForce system;
|
||||
|
||||
imports = [ self.nixosModules.test-flash-machine ];
|
||||
@@ -26,24 +28,10 @@
|
||||
{
|
||||
imports = [ self.nixosModules.test-install-machine-without-system ];
|
||||
|
||||
# We don't want our system to define any `vars` generators as these can't
|
||||
# be generated as the flake is inside `/nix/store`.
|
||||
clan.core.settings.state-version.enable = false;
|
||||
clan.core.vars.generators.test = lib.mkForce { };
|
||||
|
||||
disko.devices.disk.main.preCreateHook = lib.mkForce "";
|
||||
|
||||
# Every option here should match the options set through `clan flash write`
|
||||
# if you get a mass rebuild on the disko derivation, this means you need to
|
||||
# adjust something here. Also make sure that the injected json in clan flash write
|
||||
# is up to date.
|
||||
i18n.defaultLocale = "de_DE.UTF-8";
|
||||
console.keyMap = "de";
|
||||
services.xserver.xkb.layout = "de";
|
||||
users.users.root.openssh.authorizedKeys.keys = [
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIRWUusawhlIorx7VFeQJHmMkhl9X3QpnvOdhnV/bQNG root@target\n"
|
||||
];
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
perSystem =
|
||||
@@ -56,11 +44,8 @@
|
||||
dependencies = [
|
||||
pkgs.disko
|
||||
pkgs.buildPackages.xorg.lndir
|
||||
pkgs.glibcLocales
|
||||
pkgs.kbd.out
|
||||
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".pkgs.perlPackages.ConfigIniFiles
|
||||
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".pkgs.perlPackages.FileSlurp
|
||||
pkgs.bubblewrap
|
||||
|
||||
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.toplevel
|
||||
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.diskoScript
|
||||
@@ -70,8 +55,7 @@
|
||||
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
|
||||
in
|
||||
{
|
||||
# Skip flash test on aarch64-linux for now as it's too slow
|
||||
checks = lib.optionalAttrs (pkgs.stdenv.isLinux && pkgs.hostPlatform.system != "aarch64-linux") {
|
||||
checks = pkgs.lib.mkIf pkgs.stdenv.isLinux {
|
||||
nixos-test-flash = self.clanLib.test.baseTest {
|
||||
name = "flash";
|
||||
nodes.target = {
|
||||
@@ -88,7 +72,7 @@
|
||||
substituters = lib.mkForce [ ];
|
||||
hashed-mirrors = null;
|
||||
connect-timeout = lib.mkForce 3;
|
||||
flake-registry = "";
|
||||
flake-registry = pkgs.writeText "flake-registry" ''{"flakes":[],"version":2}'';
|
||||
experimental-features = [
|
||||
"nix-command"
|
||||
"flakes"
|
||||
@@ -97,10 +81,10 @@
|
||||
};
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine.succeed("echo 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIRWUusawhlIorx7VFeQJHmMkhl9X3QpnvOdhnV/bQNG root@target' > ./test_id_ed25519.pub")
|
||||
|
||||
# Some distros like to automount disks with spaces
|
||||
machine.succeed('mkdir -p "/mnt/with spaces" && mkfs.ext4 /dev/vdc && mount /dev/vdc "/mnt/with spaces"')
|
||||
machine.succeed("clan flash write --ssh-pubkey ./test_id_ed25519.pub --keymap de --language de_DE.UTF-8 --debug --flake ${self.checks.x86_64-linux.clan-core-for-checks} --yes --disk main /dev/vdc test-flash-machine-${pkgs.hostPlatform.system}")
|
||||
machine.succeed("clan flash write --debug --flake ${self.checks.x86_64-linux.clan-core-for-checks} --yes --disk main /dev/vdc test-flash-machine-${pkgs.hostPlatform.system}")
|
||||
'';
|
||||
} { inherit pkgs self; };
|
||||
};
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
{
|
||||
config,
|
||||
self,
|
||||
lib,
|
||||
privateInputs,
|
||||
|
||||
...
|
||||
}:
|
||||
{
|
||||
@@ -14,38 +14,31 @@
|
||||
# you can get a new one by adding
|
||||
# client.fail("cat test-flake/machines/test-install-machine/facter.json >&2")
|
||||
# to the installation test.
|
||||
clan.machines = {
|
||||
test-install-machine-without-system = {
|
||||
clan.machines.test-install-machine-without-system = {
|
||||
fileSystems."/".device = lib.mkDefault "/dev/vda";
|
||||
boot.loader.grub.device = lib.mkDefault "/dev/vda";
|
||||
|
||||
imports = [ self.nixosModules.test-install-machine-without-system ];
|
||||
};
|
||||
clan.machines.test-install-machine-with-system =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
# https://git.clan.lol/clan/test-fixtures
|
||||
facter.reportPath = builtins.fetchurl {
|
||||
url = "https://git.clan.lol/clan/test-fixtures/raw/commit/4a2bc56d886578124b05060d3fb7eddc38c019f8/nixos-vm-facter-json/${pkgs.hostPlatform.system}.json";
|
||||
sha256 =
|
||||
{
|
||||
aarch64-linux = "sha256:1rlfymk03rmfkm2qgrc8l5kj5i20srx79n1y1h4nzlpwaz0j7hh2";
|
||||
x86_64-linux = "sha256:16myh0ll2gdwsiwkjw5ba4dl23ppwbsanxx214863j7nvzx42pws";
|
||||
}
|
||||
.${pkgs.hostPlatform.system};
|
||||
};
|
||||
|
||||
fileSystems."/".device = lib.mkDefault "/dev/vda";
|
||||
boot.loader.grub.device = lib.mkDefault "/dev/vda";
|
||||
|
||||
imports = [
|
||||
self.nixosModules.test-install-machine-without-system
|
||||
];
|
||||
imports = [ self.nixosModules.test-install-machine-without-system ];
|
||||
};
|
||||
}
|
||||
// (lib.listToAttrs (
|
||||
lib.map (
|
||||
system:
|
||||
lib.nameValuePair "test-install-machine-${system}" {
|
||||
imports = [
|
||||
self.nixosModules.test-install-machine-without-system
|
||||
(
|
||||
if privateInputs ? test-fixtures then
|
||||
{
|
||||
facter.reportPath = privateInputs.test-fixtures + /nixos-vm-facter-json/${system}.json;
|
||||
}
|
||||
else
|
||||
{ nixpkgs.hostPlatform = system; }
|
||||
)
|
||||
];
|
||||
|
||||
fileSystems."/".device = lib.mkDefault "/dev/vda";
|
||||
boot.loader.grub.device = lib.mkDefault "/dev/vda";
|
||||
}
|
||||
) (lib.filter (lib.hasSuffix "linux") config.systems)
|
||||
));
|
||||
|
||||
flake.nixosModules = {
|
||||
test-install-machine-without-system =
|
||||
{ lib, modulesPath, ... }:
|
||||
@@ -160,9 +153,9 @@
|
||||
closureInfo = pkgs.closureInfo {
|
||||
rootPaths = [
|
||||
privateInputs.clan-core-for-checks
|
||||
self.nixosConfigurations."test-install-machine-${pkgs.hostPlatform.system}".config.system.build.toplevel
|
||||
self.nixosConfigurations."test-install-machine-${pkgs.hostPlatform.system}".config.system.build.initialRamdisk
|
||||
self.nixosConfigurations."test-install-machine-${pkgs.hostPlatform.system}".config.system.build.diskoScript
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.toplevel
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.initialRamdisk
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.diskoScript
|
||||
pkgs.stdenv.drvPath
|
||||
pkgs.bash.drvPath
|
||||
pkgs.buildPackages.xorg.lndir
|
||||
@@ -215,7 +208,7 @@
|
||||
# Prepare test flake and Nix store
|
||||
flake_dir = prepare_test_flake(
|
||||
temp_dir,
|
||||
"${self.checks.${pkgs.hostPlatform.system}.clan-core-for-checks}",
|
||||
"${self.checks.x86_64-linux.clan-core-for-checks}",
|
||||
"${closureInfo}"
|
||||
)
|
||||
|
||||
@@ -226,22 +219,6 @@
|
||||
"${../assets/ssh/privkey}"
|
||||
)
|
||||
|
||||
# Run clan install from host using port forwarding
|
||||
clan_cmd = [
|
||||
"${self.packages.${pkgs.system}.clan-cli-full}/bin/clan",
|
||||
"machines",
|
||||
"init-hardware-config",
|
||||
"--debug",
|
||||
"--flake", str(flake_dir),
|
||||
"--yes", "test-install-machine-without-system",
|
||||
"--host-key-check", "none",
|
||||
"--target-host", f"nonrootuser@localhost:{ssh_conn.host_port}",
|
||||
"-i", ssh_conn.ssh_key,
|
||||
"--option", "store", os.environ['CLAN_TEST_STORE']
|
||||
]
|
||||
subprocess.run(clan_cmd, check=True)
|
||||
|
||||
|
||||
# Run clan install from host using port forwarding
|
||||
clan_cmd = [
|
||||
"${self.packages.${pkgs.system}.clan-cli-full}/bin/clan",
|
||||
@@ -296,7 +273,7 @@
|
||||
# Prepare test flake and Nix store
|
||||
flake_dir = prepare_test_flake(
|
||||
temp_dir,
|
||||
"${self.checks.${pkgs.hostPlatform.system}.clan-core-for-checks}",
|
||||
"${self.checks.x86_64-linux.clan-core-for-checks}",
|
||||
"${closureInfo}"
|
||||
)
|
||||
|
||||
@@ -325,8 +302,7 @@
|
||||
"test-install-machine-without-system",
|
||||
"-i", ssh_conn.ssh_key,
|
||||
"--option", "store", os.environ['CLAN_TEST_STORE'],
|
||||
"--target-host", f"nonrootuser@localhost:{ssh_conn.host_port}",
|
||||
"--yes"
|
||||
f"nonrootuser@localhost:{ssh_conn.host_port}"
|
||||
]
|
||||
|
||||
result = subprocess.run(clan_cmd, capture_output=True, cwd=flake_dir)
|
||||
@@ -350,9 +326,7 @@
|
||||
"test-install-machine-without-system",
|
||||
"-i", ssh_conn.ssh_key,
|
||||
"--option", "store", os.environ['CLAN_TEST_STORE'],
|
||||
"--target-host",
|
||||
f"nonrootuser@localhost:{ssh_conn.host_port}",
|
||||
"--yes"
|
||||
f"nonrootuser@localhost:{ssh_conn.host_port}"
|
||||
]
|
||||
|
||||
result = subprocess.run(clan_cmd, capture_output=True, cwd=flake_dir)
|
||||
|
||||
@@ -15,6 +15,7 @@ let
|
||||
networking.useNetworkd = true;
|
||||
services.openssh.enable = true;
|
||||
services.openssh.settings.UseDns = false;
|
||||
services.openssh.settings.PasswordAuthentication = false;
|
||||
system.nixos.variant_id = "installer";
|
||||
environment.systemPackages = [
|
||||
pkgs.nixos-facter
|
||||
@@ -146,11 +147,28 @@ let
|
||||
];
|
||||
doCheck = false;
|
||||
};
|
||||
|
||||
# Common closure info
|
||||
closureInfo = pkgs.closureInfo {
|
||||
rootPaths = [
|
||||
self.checks.x86_64-linux.clan-core-for-checks
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.toplevel
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.initialRamdisk
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.diskoScript
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.clan.deployment.file
|
||||
pkgs.stdenv.drvPath
|
||||
pkgs.bash.drvPath
|
||||
pkgs.buildPackages.xorg.lndir
|
||||
]
|
||||
++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||
};
|
||||
|
||||
in
|
||||
{
|
||||
inherit
|
||||
target
|
||||
baseTestMachine
|
||||
nixosTestLib
|
||||
closureInfo
|
||||
;
|
||||
}
|
||||
|
||||
@@ -29,34 +29,32 @@ nixosLib.runTest (
|
||||
{ nodes, ... }:
|
||||
''
|
||||
import subprocess
|
||||
import tempfile
|
||||
from nixos_test_lib.nix_setup import setup_nix_in_nix
|
||||
from nixos_test_lib.nix_setup import setup_nix_in_nix # type: ignore[import-untyped]
|
||||
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
setup_nix_in_nix(temp_dir, None) # No closure info for this test
|
||||
setup_nix_in_nix(None) # No closure info for this test
|
||||
|
||||
start_all()
|
||||
admin1.wait_for_unit("multi-user.target")
|
||||
peer1.wait_for_unit("multi-user.target")
|
||||
start_all()
|
||||
admin1.wait_for_unit("multi-user.target")
|
||||
peer1.wait_for_unit("multi-user.target")
|
||||
|
||||
# peer1 should have the 'hello' file
|
||||
peer1.succeed("cat ${nodes.peer1.clan.core.vars.generators.new-service.files.not-a-secret.path}")
|
||||
# peer1 should have the 'hello' file
|
||||
peer1.succeed("cat ${nodes.peer1.clan.core.vars.generators.new-service.files.not-a-secret.path}")
|
||||
|
||||
ls_out = peer1.succeed("ls -la ${nodes.peer1.clan.core.vars.generators.new-service.files.a-secret.path}")
|
||||
# Check that the file is owned by 'nobody'
|
||||
assert "nobody" in ls_out, f"File is not owned by 'nobody': {ls_out}"
|
||||
# Check that the file is in the 'users' group
|
||||
assert "users" in ls_out, f"File is not in the 'users' group: {ls_out}"
|
||||
# Check that the file is in the '0644' mode
|
||||
assert "-rw-r--r--" in ls_out, f"File is not in the '0644' mode: {ls_out}"
|
||||
ls_out = peer1.succeed("ls -la ${nodes.peer1.clan.core.vars.generators.new-service.files.a-secret.path}")
|
||||
# Check that the file is owned by 'nobody'
|
||||
assert "nobody" in ls_out, f"File is not owned by 'nobody': {ls_out}"
|
||||
# Check that the file is in the 'users' group
|
||||
assert "users" in ls_out, f"File is not in the 'users' group: {ls_out}"
|
||||
# Check that the file is in the '0644' mode
|
||||
assert "-rw-r--r--" in ls_out, f"File is not in the '0644' mode: {ls_out}"
|
||||
|
||||
# Run clan command
|
||||
result = subprocess.run(
|
||||
["${
|
||||
clan-core.packages.${hostPkgs.system}.clan-cli
|
||||
}/bin/clan", "machines", "list", "--flake", "${config.clan.test.flakeForSandbox}"],
|
||||
check=True
|
||||
)
|
||||
# Run clan command
|
||||
result = subprocess.run(
|
||||
["${
|
||||
clan-core.packages.${hostPkgs.system}.clan-cli
|
||||
}/bin/clan", "machines", "list", "--flake", "${config.clan.test.flakeForSandbox}"],
|
||||
check=True
|
||||
)
|
||||
'';
|
||||
}
|
||||
)
|
||||
|
||||
@@ -27,10 +27,7 @@
|
||||
modules.new-service = {
|
||||
_class = "clan.service";
|
||||
manifest.name = "new-service";
|
||||
manifest.readme = "Just a sample readme to not trigger the warning.";
|
||||
roles.peer = {
|
||||
description = "A peer that uses the new-service to generate some files.";
|
||||
};
|
||||
roles.peer = { };
|
||||
perMachine = {
|
||||
nixosModule = {
|
||||
# This should be generated by:
|
||||
|
||||
@@ -34,10 +34,7 @@ nixosLib.runTest (
|
||||
modules.new-service = {
|
||||
_class = "clan.service";
|
||||
manifest.name = "new-service";
|
||||
manifest.readme = "Just a sample readme to not trigger the warning.";
|
||||
roles.peer = {
|
||||
description = "A peer that uses the new-service to generate some files.";
|
||||
};
|
||||
roles.peer = { };
|
||||
perMachine = {
|
||||
nixosModule = {
|
||||
# This should be generated by:
|
||||
|
||||
@@ -1,67 +0,0 @@
|
||||
{ self, pkgs, ... }:
|
||||
|
||||
let
|
||||
|
||||
cli = self.packages.${pkgs.hostPlatform.system}.clan-cli-full;
|
||||
in
|
||||
{
|
||||
name = "systemd-abstraction";
|
||||
|
||||
nodes = {
|
||||
peer1 = {
|
||||
|
||||
users.users.text-user = {
|
||||
isNormalUser = true;
|
||||
linger = true;
|
||||
uid = 1000;
|
||||
extraGroups = [ "systemd-journal" ];
|
||||
};
|
||||
|
||||
# Set environment variables for user systemd
|
||||
environment.extraInit = ''
|
||||
if [ "$(id -u)" = "1000" ]; then
|
||||
export XDG_RUNTIME_DIR="/run/user/1000"
|
||||
export DBUS_SESSION_BUS_ADDRESS="unix:path=/run/user/1000/bus"
|
||||
fi
|
||||
'';
|
||||
|
||||
# Enable PAM for user systemd sessions
|
||||
security.pam.services.systemd-user = {
|
||||
startSession = true;
|
||||
# Workaround for containers - use pam_permit to avoid helper binary issues
|
||||
text = pkgs.lib.mkForce ''
|
||||
account required pam_permit.so
|
||||
session required pam_permit.so
|
||||
session required pam_env.so conffile=/etc/pam/environment readenv=0
|
||||
session required ${pkgs.systemd}/lib/security/pam_systemd.so
|
||||
'';
|
||||
};
|
||||
|
||||
environment.systemPackages = [
|
||||
cli
|
||||
(cli.pythonRuntime.withPackages (
|
||||
ps: with ps; [
|
||||
pytest
|
||||
pytest-xdist
|
||||
]
|
||||
))
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
testScript =
|
||||
{ ... }:
|
||||
''
|
||||
start_all()
|
||||
|
||||
peer1.wait_for_unit("multi-user.target")
|
||||
peer1.wait_for_unit("user@1000.service")
|
||||
|
||||
# Fix user journal permissions so text-user can read their own logs
|
||||
peer1.succeed("chown text-user:systemd-journal /var/log/journal/*/user-1000.journal*")
|
||||
peer1.succeed("chmod 640 /var/log/journal/*/user-1000.journal*")
|
||||
|
||||
# Run tests as text-user (environment variables are set automatically)
|
||||
peer1.succeed("su - text-user -c 'pytest -s -n0 ${cli}/${cli.pythonRuntime.sitePackages}/clan_lib/service_runner'")
|
||||
'';
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
(
|
||||
{ ... }:
|
||||
{
|
||||
name = "test-extra-python-packages";
|
||||
|
||||
extraPythonPackages = ps: [ ps.numpy ];
|
||||
|
||||
nodes.machine =
|
||||
{ ... }:
|
||||
{
|
||||
networking.hostName = "machine";
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
import numpy as np
|
||||
|
||||
start_all()
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
|
||||
# Test availability of numpy
|
||||
arr = np.array([1, 2, 3])
|
||||
print(f"Numpy array: {arr}")
|
||||
assert len(arr) == 3
|
||||
'';
|
||||
}
|
||||
)
|
||||
@@ -67,15 +67,6 @@
|
||||
];
|
||||
};
|
||||
|
||||
nix.settings = {
|
||||
flake-registry = "";
|
||||
# required for setting the `flake-registry`
|
||||
experimental-features = [
|
||||
"nix-command"
|
||||
"flakes"
|
||||
];
|
||||
};
|
||||
|
||||
# Define the mounts that exist in the container to prevent them from being stopped
|
||||
fileSystems = {
|
||||
"/" = {
|
||||
@@ -115,13 +106,12 @@
|
||||
let
|
||||
closureInfo = pkgs.closureInfo {
|
||||
rootPaths = [
|
||||
self.packages.${pkgs.hostPlatform.system}.clan-cli
|
||||
self.checks.${pkgs.hostPlatform.system}.clan-core-for-checks
|
||||
self.packages.${pkgs.system}.clan-cli
|
||||
self.checks.${pkgs.system}.clan-core-for-checks
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-update-machine.config.system.build.toplevel
|
||||
pkgs.stdenv.drvPath
|
||||
pkgs.bash.drvPath
|
||||
pkgs.buildPackages.xorg.lndir
|
||||
pkgs.bubblewrap
|
||||
]
|
||||
++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||
};
|
||||
@@ -132,7 +122,7 @@
|
||||
imports = [ self.nixosModules.test-update-machine ];
|
||||
};
|
||||
extraPythonPackages = _p: [
|
||||
self.legacyPackages.${pkgs.hostPlatform.system}.nixosTestLib
|
||||
self.legacyPackages.${pkgs.system}.nixosTestLib
|
||||
];
|
||||
|
||||
testScript = ''
|
||||
@@ -154,7 +144,7 @@
|
||||
# Prepare test flake and Nix store
|
||||
flake_dir = prepare_test_flake(
|
||||
temp_dir,
|
||||
"${self.checks.${pkgs.hostPlatform.system}.clan-core-for-checks}",
|
||||
"${self.checks.x86_64-linux.clan-core-for-checks}",
|
||||
"${closureInfo}"
|
||||
)
|
||||
(flake_dir / ".clan-flake").write_text("") # Ensure .clan-flake exists
|
||||
@@ -221,13 +211,12 @@
|
||||
[
|
||||
"${pkgs.nix}/bin/nix",
|
||||
"copy",
|
||||
"--from",
|
||||
f"{temp_dir}/store",
|
||||
"--to",
|
||||
"ssh://root@192.168.1.1",
|
||||
"--no-check-sigs",
|
||||
f"${self.packages.${pkgs.hostPlatform.system}.clan-cli}",
|
||||
f"${self.packages.${pkgs.system}.clan-cli}",
|
||||
"--extra-experimental-features", "nix-command flakes",
|
||||
"--from", f"{os.environ["TMPDIR"]}/store"
|
||||
],
|
||||
check=True,
|
||||
env={
|
||||
@@ -242,7 +231,7 @@
|
||||
"-o", "UserKnownHostsFile=/dev/null",
|
||||
"-o", "StrictHostKeyChecking=no",
|
||||
f"root@192.168.1.1",
|
||||
"${self.packages.${pkgs.hostPlatform.system}.clan-cli}/bin/clan",
|
||||
"${self.packages.${pkgs.system}.clan-cli}/bin/clan",
|
||||
"machines",
|
||||
"update",
|
||||
"--debug",
|
||||
@@ -270,7 +259,7 @@
|
||||
|
||||
# Run clan update command
|
||||
subprocess.run([
|
||||
"${self.packages.${pkgs.hostPlatform.system}.clan-cli-full}/bin/clan",
|
||||
"${self.packages.${pkgs.system}.clan-cli-full}/bin/clan",
|
||||
"machines",
|
||||
"update",
|
||||
"--debug",
|
||||
@@ -297,7 +286,7 @@
|
||||
|
||||
# Run clan update command with --build-host
|
||||
subprocess.run([
|
||||
"${self.packages.${pkgs.hostPlatform.system}.clan-cli-full}/bin/clan",
|
||||
"${self.packages.${pkgs.system}.clan-cli-full}/bin/clan",
|
||||
"machines",
|
||||
"update",
|
||||
"--debug",
|
||||
|
||||
115
checks/wireguard/default.nix
Normal file
115
checks/wireguard/default.nix
Normal file
@@ -0,0 +1,115 @@
|
||||
{
|
||||
pkgs,
|
||||
nixosLib,
|
||||
clan-core,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
nixosLib.runTest (
|
||||
{ ... }:
|
||||
|
||||
let
|
||||
machines = [
|
||||
"controller1"
|
||||
"controller2"
|
||||
"peer1"
|
||||
"peer2"
|
||||
"peer3"
|
||||
];
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
clan-core.modules.nixosTest.clanTest
|
||||
];
|
||||
|
||||
hostPkgs = pkgs;
|
||||
|
||||
name = "wireguard";
|
||||
|
||||
clan = {
|
||||
directory = ./.;
|
||||
modules."@clan/wireguard" = import ../../clanServices/wireguard/default.nix;
|
||||
inventory = {
|
||||
|
||||
machines = lib.genAttrs machines (_: { });
|
||||
|
||||
instances = {
|
||||
|
||||
/*
|
||||
wg-test-one
|
||||
┌───────────────────────────────┐
|
||||
│ ◄───────────── │
|
||||
│ controller2 controller1
|
||||
│ ▲ ─────────────► ▲ ▲
|
||||
│ │ │ │ │ │ │ │ │
|
||||
│ │ │ │ │ │ │ │ │
|
||||
│ │ │ │ │ │ │ │ │
|
||||
│ │ │ │ └───────────────┐ │ │ │ │
|
||||
│ │ │ └──────────────┐ │ │ │ │ │
|
||||
│ ▼ │ ▼ ▼ ▼
|
||||
└─► peer2 │ peer1 peer3
|
||||
│ ▲
|
||||
└──────────┘
|
||||
*/
|
||||
|
||||
wg-test-one = {
|
||||
|
||||
module.name = "@clan/wireguard";
|
||||
module.input = "self";
|
||||
|
||||
roles.controller.machines."controller1".settings = {
|
||||
endpoint = "192.168.1.1";
|
||||
};
|
||||
|
||||
roles.controller.machines."controller2".settings = {
|
||||
endpoint = "192.168.1.2";
|
||||
};
|
||||
|
||||
roles.peer.machines = {
|
||||
peer1.settings.controller = "controller1";
|
||||
peer2.settings.controller = "controller2";
|
||||
peer3.settings.controller = "controller1";
|
||||
};
|
||||
};
|
||||
|
||||
# TODO: Will this actually work with conflicting ports? Can we re-use interfaces?
|
||||
#wg-test-two = {
|
||||
# module.name = "@clan/wireguard";
|
||||
|
||||
# roles.controller.machines."controller1".settings = {
|
||||
# endpoint = "192.168.1.1";
|
||||
# port = 51922;
|
||||
# };
|
||||
|
||||
# roles.peer.machines = {
|
||||
# peer1 = { };
|
||||
# };
|
||||
#};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
# Show all addresses
|
||||
machines = [peer1, peer2, peer3, controller1, controller2]
|
||||
for m in machines:
|
||||
m.systemctl("start network-online.target")
|
||||
|
||||
for m in machines:
|
||||
m.wait_for_unit("network-online.target")
|
||||
m.wait_for_unit("systemd-networkd.service")
|
||||
|
||||
print("\n\n" + "="*60)
|
||||
print("STARTING PING TESTS")
|
||||
print("="*60)
|
||||
|
||||
for m1 in machines:
|
||||
for m2 in machines:
|
||||
if m1 != m2:
|
||||
print(f"\n--- Pinging from {m1.name} to {m2.name}.wg-test-one ---")
|
||||
m1.wait_until_succeeds(f"ping -c1 {m2.name}.wg-test-one >&2")
|
||||
'';
|
||||
}
|
||||
)
|
||||
@@ -1,25 +0,0 @@
|
||||
The admin service aggregates components that allow an administrator to log in to and manage the machine.
|
||||
|
||||
The following configuration:
|
||||
|
||||
1. Enables OpenSSH with root login and adds an SSH public key named`myusersKey` to the machine's authorized_keys via the `allowedKeys` setting.
|
||||
|
||||
2. Automatically generates a password for the root user.
|
||||
|
||||
```nix
|
||||
instances = {
|
||||
admin = {
|
||||
roles.default.tags = {
|
||||
all = { };
|
||||
};
|
||||
roles.default.settings = {
|
||||
allowedKeys = {
|
||||
myusersKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEFDNnynMbFWatSFdANzbJ8iiEKL7+9ZpDaMLrWRQjyH lhebendanz@wintux";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
```
|
||||
|
||||
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
{ ... }:
|
||||
{
|
||||
_class = "clan.service";
|
||||
manifest.name = "clan-core/admin";
|
||||
manifest.description = "Adds a root user with ssh access";
|
||||
manifest.description = "Convenient Administration for the Clan App";
|
||||
manifest.categories = [ "Utility" ];
|
||||
manifest.readme = builtins.readFile ./README.md;
|
||||
|
||||
roles.default = {
|
||||
description = "Placeholder role to apply the admin service";
|
||||
interface =
|
||||
{ lib, ... }:
|
||||
{
|
||||
|
||||
options = {
|
||||
allowedKeys = lib.mkOption {
|
||||
default = { };
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{ ... }:
|
||||
{ lib, ... }:
|
||||
let
|
||||
module = ./default.nix;
|
||||
module = lib.modules.importApply ./default.nix { };
|
||||
in
|
||||
{
|
||||
clan.modules = {
|
||||
|
||||
@@ -2,7 +2,7 @@ let
|
||||
public-key = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAII6zj7ubTg6z/aDwRNwvM/WlQdUocMprQ8E92NWxl6t+ test@test";
|
||||
in
|
||||
{
|
||||
name = "admin";
|
||||
name = "service-admin";
|
||||
|
||||
clan = {
|
||||
directory = ./.;
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
25.11
|
||||
@@ -5,11 +5,11 @@ inventory.instances = {
|
||||
borgbackup = {
|
||||
module = {
|
||||
name = "borgbackup";
|
||||
input = "clan-core";
|
||||
input = "clan";
|
||||
};
|
||||
roles.client.machines."jon".settings = {
|
||||
destinations."storagebox" = {
|
||||
repo = "username@hostname:/./borgbackup";
|
||||
repo = "username@$hostname:/./borgbackup";
|
||||
rsh = ''ssh -oPort=23 -i /run/secrets/vars/borgbackup/borgbackup.ssh'';
|
||||
};
|
||||
};
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
# TODO: a client can only be in one instance, add constraint
|
||||
|
||||
roles.server = {
|
||||
description = "A borgbackup server that stores the backups of clients.";
|
||||
|
||||
interface =
|
||||
{ lib, ... }:
|
||||
{
|
||||
@@ -54,7 +54,7 @@
|
||||
authorizedKeys = [ (builtins.readFile (borgbackupIpMachinePath machineName)) ];
|
||||
# };
|
||||
# }) machinesWithKey;
|
||||
}) (roles.client.machines or { });
|
||||
}) roles.client.machines;
|
||||
in
|
||||
hosts;
|
||||
};
|
||||
@@ -62,7 +62,6 @@
|
||||
};
|
||||
|
||||
roles.client = {
|
||||
description = "A borgbackup client that backs up to all borgbackup server roles.";
|
||||
interface =
|
||||
{
|
||||
lib,
|
||||
@@ -188,7 +187,7 @@
|
||||
config.clan.core.vars.generators.borgbackup.files."borgbackup.ssh".path
|
||||
} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=Yes";
|
||||
};
|
||||
}) (builtins.attrNames (roles.server.machines or { }));
|
||||
}) (builtins.attrNames roles.server.machines);
|
||||
in
|
||||
(builtins.listToAttrs destinations);
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{ ... }:
|
||||
{ lib, ... }:
|
||||
let
|
||||
module = ./default.nix;
|
||||
module = lib.modules.importApply ./default.nix { };
|
||||
in
|
||||
{
|
||||
clan.modules = {
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
...
|
||||
}:
|
||||
{
|
||||
name = "borgbackup";
|
||||
name = "service-borgbackup";
|
||||
|
||||
clan = {
|
||||
directory = ./.;
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
25.11
|
||||
@@ -1 +0,0 @@
|
||||
25.11
|
||||
@@ -1,32 +0,0 @@
|
||||
This service sets up a certificate authority (CA) that can issue certificates to
|
||||
other machines in your clan. For this the `ca` role is used.
|
||||
It additionally provides a `default` role, that can be applied to all machines
|
||||
in your clan and will make sure they trust your CA.
|
||||
|
||||
## Example Usage
|
||||
|
||||
The following configuration would add a CA for the top level domain `.foo`. If
|
||||
the machine `server` now hosts a webservice at `https://something.foo`, it will
|
||||
get a certificate from `ca` which is valid inside your clan. The machine
|
||||
`client` will trust this certificate if it makes a request to
|
||||
`https://something.foo`.
|
||||
|
||||
This clan service can be combined with the `coredns` service for easy to deploy,
|
||||
SSL secured clan-internal service hosting.
|
||||
|
||||
```nix
|
||||
inventory = {
|
||||
machines.ca = { };
|
||||
machines.client = { };
|
||||
machines.server = { };
|
||||
|
||||
instances."certificates" = {
|
||||
module.name = "certificates";
|
||||
module.input = "self";
|
||||
|
||||
roles.ca.machines.ca.settings.tlds = [ "foo" ];
|
||||
roles.default.machines.client = { };
|
||||
roles.default.machines.server = { };
|
||||
};
|
||||
};
|
||||
```
|
||||
@@ -1,246 +0,0 @@
|
||||
{ ... }:
|
||||
{
|
||||
_class = "clan.service";
|
||||
manifest.name = "certificates";
|
||||
manifest.description = "Sets up a PKI certificate chain using step-ca";
|
||||
manifest.categories = [ "Network" ];
|
||||
manifest.readme = builtins.readFile ./README.md;
|
||||
|
||||
roles.ca = {
|
||||
description = "A certificate authority that issues and signs certificates for other machines.";
|
||||
interface =
|
||||
{ lib, ... }:
|
||||
{
|
||||
|
||||
options.acmeEmail = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "none@none.tld";
|
||||
description = ''
|
||||
Email address for account creation and correspondence from the CA.
|
||||
It is recommended to use the same email for all certs to avoid account
|
||||
creation limits.
|
||||
'';
|
||||
};
|
||||
|
||||
options.tlds = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
description = "Top level domain for this CA. Certificates will be issued and trusted for *.<tld>";
|
||||
};
|
||||
|
||||
options.expire = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
description = "When the certificate should expire.";
|
||||
default = "8760h";
|
||||
example = "8760h";
|
||||
};
|
||||
};
|
||||
|
||||
perInstance =
|
||||
{ settings, ... }:
|
||||
{
|
||||
nixosModule =
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
domains = map (tld: "ca.${tld}") settings.tlds;
|
||||
in
|
||||
{
|
||||
security.acme.defaults.email = settings.acmeEmail;
|
||||
security.acme = {
|
||||
certs = builtins.listToAttrs (
|
||||
map (domain: {
|
||||
name = domain;
|
||||
value = {
|
||||
server = "https://${domain}:1443/acme/acme/directory";
|
||||
};
|
||||
}) domains
|
||||
);
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
80
|
||||
443
|
||||
];
|
||||
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
recommendedProxySettings = true;
|
||||
virtualHosts = builtins.listToAttrs (
|
||||
map (domain: {
|
||||
name = domain;
|
||||
value = {
|
||||
addSSL = true;
|
||||
enableACME = true;
|
||||
locations."/".proxyPass = "https://localhost:1443";
|
||||
locations."= /ca.crt".alias =
|
||||
config.clan.core.vars.generators.step-intermediate-cert.files."intermediate.crt".path;
|
||||
};
|
||||
}) domains
|
||||
);
|
||||
};
|
||||
|
||||
clan.core.vars.generators = {
|
||||
|
||||
# Intermediate key generator
|
||||
"step-intermediate-key" = {
|
||||
files."intermediate.key" = {
|
||||
secret = true;
|
||||
deploy = true;
|
||||
owner = "step-ca";
|
||||
group = "step-ca";
|
||||
};
|
||||
runtimeInputs = [ pkgs.step-cli ];
|
||||
script = ''
|
||||
step crypto keypair --kty EC --curve P-256 --no-password --insecure $out/intermediate.pub $out/intermediate.key
|
||||
'';
|
||||
};
|
||||
|
||||
# Intermediate certificate generator
|
||||
"step-intermediate-cert" = {
|
||||
files."intermediate.crt".secret = false;
|
||||
dependencies = [
|
||||
"step-ca"
|
||||
"step-intermediate-key"
|
||||
];
|
||||
runtimeInputs = [ pkgs.step-cli ];
|
||||
script = ''
|
||||
# Create intermediate certificate
|
||||
step certificate create \
|
||||
--ca $in/step-ca/ca.crt \
|
||||
--ca-key $in/step-ca/ca.key \
|
||||
--ca-password-file /dev/null \
|
||||
--key $in/step-intermediate-key/intermediate.key \
|
||||
--template ${pkgs.writeText "intermediate.tmpl" ''
|
||||
{
|
||||
"subject": {{ toJson .Subject }},
|
||||
"keyUsage": ["certSign", "crlSign"],
|
||||
"basicConstraints": {
|
||||
"isCA": true,
|
||||
"maxPathLen": 0
|
||||
},
|
||||
"nameConstraints": {
|
||||
"critical": true,
|
||||
"permittedDNSDomains": [${
|
||||
(lib.strings.concatStringsSep "," (map (tld: ''"${tld}"'') settings.tlds))
|
||||
}]
|
||||
}
|
||||
}
|
||||
''} ${lib.optionalString (settings.expire != null) "--not-after ${settings.expire}"} \
|
||||
--not-before=-12h \
|
||||
--no-password --insecure \
|
||||
"Clan Intermediate CA" \
|
||||
$out/intermediate.crt
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
services.step-ca = {
|
||||
enable = true;
|
||||
intermediatePasswordFile = "/dev/null";
|
||||
address = "0.0.0.0";
|
||||
port = 1443;
|
||||
settings = {
|
||||
root = config.clan.core.vars.generators.step-ca.files."ca.crt".path;
|
||||
crt = config.clan.core.vars.generators.step-intermediate-cert.files."intermediate.crt".path;
|
||||
key = config.clan.core.vars.generators.step-intermediate-key.files."intermediate.key".path;
|
||||
dnsNames = domains;
|
||||
logger.format = "text";
|
||||
db = {
|
||||
type = "badger";
|
||||
dataSource = "/var/lib/step-ca/db";
|
||||
};
|
||||
authority = {
|
||||
provisioners = [
|
||||
{
|
||||
type = "ACME";
|
||||
name = "acme";
|
||||
forceCN = true;
|
||||
}
|
||||
];
|
||||
claims = {
|
||||
maxTLSCertDuration = "2160h";
|
||||
defaultTLSCertDuration = "2160h";
|
||||
};
|
||||
backdate = "1m0s";
|
||||
};
|
||||
tls = {
|
||||
cipherSuites = [
|
||||
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256"
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"
|
||||
];
|
||||
minVersion = 1.2;
|
||||
maxVersion = 1.3;
|
||||
renegotiation = false;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# Empty role, so we can add non-ca machins to the instance to trust the CA
|
||||
roles.default = {
|
||||
description = "A machine that trusts the CA and can get certificates issued by it.";
|
||||
interface =
|
||||
{ lib, ... }:
|
||||
{
|
||||
options.acmeEmail = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "none@none.tld";
|
||||
description = ''
|
||||
Email address for account creation and correspondence from the CA.
|
||||
It is recommended to use the same email for all certs to avoid account
|
||||
creation limits.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
perInstance =
|
||||
{ settings, ... }:
|
||||
{
|
||||
nixosModule.security.acme.defaults.email = settings.acmeEmail;
|
||||
};
|
||||
};
|
||||
|
||||
# All machines (independent of role) will trust the CA
|
||||
perMachine.nixosModule =
|
||||
{ pkgs, config, ... }:
|
||||
{
|
||||
# Root CA generator
|
||||
clan.core.vars.generators = {
|
||||
"step-ca" = {
|
||||
share = true;
|
||||
files."ca.key" = {
|
||||
secret = true;
|
||||
deploy = false;
|
||||
};
|
||||
files."ca.crt".secret = false;
|
||||
runtimeInputs = [ pkgs.step-cli ];
|
||||
script = ''
|
||||
step certificate create --template ${pkgs.writeText "root.tmpl" ''
|
||||
{
|
||||
"subject": {{ toJson .Subject }},
|
||||
"issuer": {{ toJson .Subject }},
|
||||
"keyUsage": ["certSign", "crlSign"],
|
||||
"basicConstraints": {
|
||||
"isCA": true,
|
||||
"maxPathLen": 1
|
||||
}
|
||||
}
|
||||
''} "Clan Root CA" $out/ca.crt $out/ca.key \
|
||||
--kty EC --curve P-256 \
|
||||
--not-after=8760h \
|
||||
--not-before=-12h \
|
||||
--no-password --insecure
|
||||
'';
|
||||
};
|
||||
};
|
||||
security.pki.certificateFiles = [ config.clan.core.vars.generators."step-ca".files."ca.crt".path ];
|
||||
environment.systemPackages = [ pkgs.openssl ];
|
||||
security.acme.acceptTerms = true;
|
||||
};
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
{
|
||||
...
|
||||
}:
|
||||
let
|
||||
module = ./default.nix;
|
||||
in
|
||||
{
|
||||
clan.modules.certificates = module;
|
||||
perSystem =
|
||||
{ ... }:
|
||||
{
|
||||
clan.nixosTests.certificates = {
|
||||
imports = [ ./tests/vm/default.nix ];
|
||||
clan.modules.certificates = module;
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,84 +0,0 @@
|
||||
{
|
||||
name = "certificates";
|
||||
|
||||
clan = {
|
||||
directory = ./.;
|
||||
inventory = {
|
||||
|
||||
machines.ca = { }; # 192.168.1.1
|
||||
machines.client = { }; # 192.168.1.2
|
||||
machines.server = { }; # 192.168.1.3
|
||||
|
||||
instances."certificates" = {
|
||||
module.name = "certificates";
|
||||
module.input = "self";
|
||||
|
||||
roles.ca.machines.ca.settings.tlds = [ "foo" ];
|
||||
roles.default.machines.client = { };
|
||||
roles.default.machines.server = { };
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
nodes =
|
||||
let
|
||||
hostConfig = ''
|
||||
192.168.1.1 ca.foo
|
||||
192.168.1.3 test.foo
|
||||
'';
|
||||
in
|
||||
{
|
||||
|
||||
client.networking.extraHosts = hostConfig;
|
||||
ca.networking.extraHosts = hostConfig;
|
||||
|
||||
server = {
|
||||
|
||||
networking.extraHosts = hostConfig;
|
||||
|
||||
# TODO: Could this be set automatically?
|
||||
# I would like to get this information from the coredns module, but we
|
||||
# cannot model dependencies yet
|
||||
security.acme.certs."test.foo".server = "https://ca.foo/acme/acme/directory";
|
||||
|
||||
# Host a simple service on 'server', with SSL provided via our CA. 'client'
|
||||
# should be able to curl it via https and accept the certificates
|
||||
# presented
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
80
|
||||
443
|
||||
];
|
||||
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
virtualHosts."test.foo" = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
locations."/" = {
|
||||
return = "200 'test server response'";
|
||||
extraConfig = "add_header Content-Type text/plain;";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
import time
|
||||
|
||||
time.sleep(3)
|
||||
ca.succeed("systemctl restart acme-order-renew-ca.foo.service ")
|
||||
|
||||
time.sleep(3)
|
||||
server.succeed("systemctl restart acme-test.foo.service")
|
||||
|
||||
# It takes a while for the correct certs to appear (before that self-signed
|
||||
# are presented by nginx) so we wait for a bit.
|
||||
client.wait_until_succeeds("curl -v https://test.foo")
|
||||
|
||||
# Show certificate information for debugging
|
||||
client.succeed("openssl s_client -connect test.foo:443 -servername test.foo </dev/null 2>/dev/null | openssl x509 -text -noout 1>&2")
|
||||
'';
|
||||
}
|
||||
@@ -1,6 +0,0 @@
|
||||
[
|
||||
{
|
||||
"publickey": "age1yd2cden7jav8x4nzx2fwze2fsa5j0qm2m3t7zum765z3u4gj433q7dqj43",
|
||||
"type": "age"
|
||||
}
|
||||
]
|
||||
@@ -1,6 +0,0 @@
|
||||
[
|
||||
{
|
||||
"publickey": "age1js225d8jc507sgcg0fdfv2x3xv3asm4ds5c6s4hp37nq8spxu95sc5x3ce",
|
||||
"type": "age"
|
||||
}
|
||||
]
|
||||
@@ -1,6 +0,0 @@
|
||||
[
|
||||
{
|
||||
"publickey": "age1nwuh8lc604mnz5r8ku8zswyswnwv02excw237c0cmtlejp7xfp8sdrcwfa",
|
||||
"type": "age"
|
||||
}
|
||||
]
|
||||
@@ -1,15 +0,0 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:6+XilULKRuWtAZ6B8Lj9UqCfi1T6dmqrDqBNXqS4SvBwM1bIWiL6juaT1Q7ByOexzID7tY740gmQBqTey54uLydh8mW0m4ZtUqw=,iv:9kscsrMPBGkutTnxrc5nrc7tQXpzLxw+929pUDKqTu0=,tag:753uIjm8ZRs0xsjiejEY8g==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA1d3kycldZRXhmR0FqTXJp\nWWU0MDBYNmxxbFE5M2xKYm5KWnQ0MXBHNEM4CjN4RFFVcFlkd3pjTFVDQ3Vackdj\nVTVhMWoxdFpsWHp5S1p4L05kYk5LUkkKLS0tIENtZFZZTjY2amFVQmZLZFplQzBC\nZm1vWFI4MXR1ZHIxTTQ5VXdSYUhvOTQKte0bKjXQ0xA8FrpuChjDUvjVqp97D8kT\n3tVh6scdjxW48VSBZP1GRmqcMqCdj75GvJTbWeNEV4PDBW7GI0UW+Q==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-09-02T08:42:39Z",
|
||||
"mac": "ENC[AES256_GCM,data:AftMorrH7qX5ctVu5evYHn5h9pC4Mmm2VYaAV8Hy0PKTc777jNsL6DrxFVV3NVqtecpwrzZFWKgzukcdcRJe4veVeBrusmoZYtifH0AWZTEVpVlr2UXYYxCDmNZt1WHfVUo40bT//X6QM0ye6a/2Y1jYPbMbryQNcGmnpk9PDvU=,iv:5nk+d8hzA05LQp7ZHRbIgiENg2Ha6J6YzyducM6zcNU=,tag:dy1hqWVzMu/+fSK57h9ZCA==,type:str]",
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.10.2"
|
||||
}
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:jdTuGQUYvT1yXei1RHKsOCsABmMlkcLuziHDVhA7NequZeNu0fSbrJTXQDCHsDGhlYRcjU5EsEDT750xdleXuD3Gs9zWvPVobI4=,iv:YVow3K1j6fzRF9bRfIEpuOkO/nRpku/UQxWNGC+UJQQ=,tag:cNLM5R7uu6QpwPB9K6MYzg==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBvOVF2WXRSL0NpQzFZR01I\nNU85TGcyQmVDazN1dmpuRFVTZEg5NDRKTGhrCk1IVjFSU1V6WHBVRnFWcHkyVERr\nTjFKbW1mQ2FWOWhjN2VPamMxVEQ5VkkKLS0tIENVUGlhanhuWGtDKzBzRmk2dE4v\nMXZBRXNMa3IrOTZTNHRUWVE3UXEwSWMK2cBLoL/H/Vxd/klVrqVLdX9Mww5j7gw/\nEWc5/hN+km6XoW+DiJxVG4qaJ7qqld6u5ZnKgJT+2h9CfjA04I2akg==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-09-02T08:42:51Z",
|
||||
"mac": "ENC[AES256_GCM,data:zOBQVM2Ydu4v0+Fw3p3cEU+5+7eKaadV0tKro1JVOxclG1Vs6Myq57nw2eWf5JxIl0ulL+FavPKY26qOQ3aqcGOT3PMRlCda9z+0oSn9Im9bE/DzAGmoH/bp76kFkgTTOCZTMUoqJ+UJqv0qy1BH/92sSSKmYshEX6d1vr5ISrw=,iv:i9ZW4sLxOCan4UokHlySVr1CW39nCTusG4DmEPj/gIw=,tag:iZBDPHDkE3Vt5mFcFu1TPQ==,type:str]",
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.10.2"
|
||||
}
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:5CJuHcxJMXZJ8GqAeG3BrbWtT1kade4kxgJsn1cRpmr1UgN0ZVYnluPEiBscClNSOzcc6vcrBpfTI3dj1tASKTLP58M+GDBFQDo=,iv:gsK7XqBGkYCoqAvyFlIXuJ27PKSbTmy7f6cgTmT2gow=,tag:qG5KejkBvy9ytfhGXa/Mnw==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBxbzVqYkplTzJKN1pwS3VM\naFFIK2VsR3lYUVExYW9ieERBL0tlcFZtVzJRCkpiLzdmWmFlOUZ5QUJ4WkhXZ2tQ\nZm92YXBCV0RpYnIydUdEVTRiamI4bjAKLS0tIG93a2htS1hFcjBOeVFnNCtQTHVr\na2FPYjVGbWtORjJVWXE5bndPU1RWcXMKikMEB7X+kb7OtiyqXn3HRpLYkCdoayDh\n7cjGnplk17q25/lRNHM4JVS5isFfuftCl01enESqkvgq+cwuFwa9DQ==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-09-02T08:42:59Z",
|
||||
"mac": "ENC[AES256_GCM,data:xybV2D0xukZnH2OwRpIugPnS7LN9AbgGKwFioPJc1FQWx9TxMUVDwgMN6V5WrhWkXgF2zP4krtDYpEz4Vq+LbOjcnTUteuCc+7pMHubuRuip7j+M32MH1kuf4bVZuXbCfvm7brGxe83FzjoioLqzA8g/X6Q1q7/ErkNeFjluC3Q=,iv:QEW3EUKSRZY3fbXlP7z+SffWkQeXwMAa5K8RQW7NvPE=,tag:DhFxY7xr7H1Wbd527swD0Q==,type:str]",
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.10.2"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
25.11
|
||||
@@ -1,12 +0,0 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIBsDCCAVegAwIBAgIQbT1Ivm+uwyf0HNkJfan2BTAKBggqhkjOPQQDAjAXMRUw
|
||||
EwYDVQQDEwxDbGFuIFJvb3QgQ0EwHhcNMjUwOTAxMjA0MzAzWhcNMjYwOTAyMDg0
|
||||
MzAzWjAfMR0wGwYDVQQDExRDbGFuIEludGVybWVkaWF0ZSBDQTBZMBMGByqGSM49
|
||||
AgEGCCqGSM49AwEHA0IABDXCNrUIotju9P1U6JxLV43sOxLlRphQJS4dM+lvjTZc
|
||||
aQ+HwQg0AHVlQNRwS3JqKrJJtJVyKbZklh6eFaDPoj6jfTB7MA4GA1UdDwEB/wQE
|
||||
AwIBBjASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdDgQWBBRKHaccHgP2ccSWVBWN
|
||||
zGoDdTg7aTAfBgNVHSMEGDAWgBSfsnz4phMJx9su/kgeF/FbZQCBgzAVBgNVHR4B
|
||||
Af8ECzAJoAcwBYIDZm9vMAoGCCqGSM49BAMCA0cAMEQCICiUDk1zGNzpS/iVKLfW
|
||||
zUGaCagpn2mCx4xAXQM9UranAiAn68nVYGWjkzhU31wyCAupxOjw7Bt96XXqIAz9
|
||||
hLLtMA==
|
||||
-----END CERTIFICATE-----
|
||||
@@ -1 +0,0 @@
|
||||
../../../../../../sops/machines/ca
|
||||
@@ -1,19 +0,0 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:Auonh9fa7jSkld1Zyxw74x5ydj6Xc+0SOgiqumVETNCfner9K96Rmv1PkREuHNGWPsnzyEM3pRT8ijvu3QoKvy9QPCCewyT07Wqe4G74+bk1iMeAHsV3To6kHs6M8OISvE+CmG0+hlLmdfRSabTzyWPLHbOjvFTEEuA5G7xiryacSYOE++eeEHdn+oUDh/IMTcfLjCGMjsXFikx1Hb+ofeRTlCg47+0w4MXVvQkOzQB5V2C694jZXvZ19jd/ioqr8YASz2xatGvqwW6cpZxqOWyZJ0UAj/6yFk6tZWifqVB3wgU=,iv:ITFCrDkeWl4GWCebVq15ei9QmkOLDwUIYojKZ2TU6JU=,tag:8k4iYbCIusUykY79H86WUQ==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBsT25UbjJTQ2tzbnQyUm9p\neWx1UlZIeVpocnBqUCt0YnFlN2FOU25Lb0hNCmdXUUsyalRTbHRRQ0NLSGc1YllV\nUXRwaENhaXU1WmdnVDE0UWprUUUyeDAKLS0tIHV3dHU3aG5JclM0V3FadzN0SU14\ndFptbEJUNXQ4QVlqbkJ1TjAvdDQwSGsKcKPWUjhK7wzIpdIdksMShF2fpLdDTUBS\nZiU7P1T+3psxad9qhapvU0JrAY+9veFaYVEHha2aN/XKs8HqUcTp3A==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
},
|
||||
{
|
||||
"recipient": "age1yd2cden7jav8x4nzx2fwze2fsa5j0qm2m3t7zum765z3u4gj433q7dqj43",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBjZFVteVZwVGVmRE9NT3hG\nNGMyS3FSaXluM1FpeUp6SDVMUEpwYzg5SmdvCkRPU0QyU1JicGNkdlMyQWVkT0k3\nL2YrbDhWeGk4WFhxcUFmTmhZQ0pEQncKLS0tIG85Ui9rKzBJQ2VkMFBUQTMvSTlu\nbm8rZ09Wa24rQkNvTTNtYTZBN3MrZlkK7cjNhlUKZdOrRq/nKUsbUQgNTzX8jO+0\nzADpz6WCMvsJ15xazc10BGh03OtdMWl5tcoWMaZ71HWtI9Gip5DH0w==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-09-02T08:42:42Z",
|
||||
"mac": "ENC[AES256_GCM,data:9xlO5Yis8DG/y8GjvP63NltD4xEL7zqdHL2cQE8gAoh/ZamAmK5ZL0ld80mB3eIYEPKZYvmUYI4Lkrge2ZdqyDoubrW+eJ3dxn9+StxA9FzXYwUE0t+bbsNJfOOp/kDojf060qLGsu0kAGKd2ca4WiDccR0Cieky335C7Zzhi/Q=,iv:bWQ4wr0CJHSN+6ipUbkYTDWZJyFQjDKszfpVX9EEUsY=,tag:kADIFgJBEGCvr5fPbbdEDA==,type:str]",
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.10.2"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
25.11
|
||||
@@ -1 +0,0 @@
|
||||
25.11
|
||||
@@ -1,10 +0,0 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIBcTCCARigAwIBAgIRAIix99+AE7Y+uyiLGaRHEhUwCgYIKoZIzj0EAwIwFzEV
|
||||
MBMGA1UEAxMMQ2xhbiBSb290IENBMB4XDTI1MDkwMTIwNDI1N1oXDTI2MDkwMjA4
|
||||
NDI1N1owFzEVMBMGA1UEAxMMQ2xhbiBSb290IENBMFkwEwYHKoZIzj0CAQYIKoZI
|
||||
zj0DAQcDQgAEk7nn9kzxI+xkRmNMlxD+7T78UqV3aqus0foJh6uu1CHC+XaebMcw
|
||||
JN95nAe3oYA3yZG6Mnq9nCxsYha4EhzGYqNFMEMwDgYDVR0PAQH/BAQDAgEGMBIG
|
||||
A1UdEwEB/wQIMAYBAf8CAQEwHQYDVR0OBBYEFJ+yfPimEwnH2y7+SB4X8VtlAIGD
|
||||
MAoGCCqGSM49BAMCA0cAMEQCIBId/CcbT5MPFL90xa+XQz+gVTdRwsu6Bg7ehMso
|
||||
Bj0oAiBjSlttd5yeuZGXBm+O0Gl+WdKV60QlrWutNewXFS4UpQ==
|
||||
-----END CERTIFICATE-----
|
||||
@@ -1,15 +0,0 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:PnEXteU3I7U0OKgE+oR3xjHdLWYTpJjM/jlzxtGU0uP2pUBuQv3LxtEz+cP0ZsafHLNq2iNJ7xpUEE0g4d3M296S56oSocK3fREWBiJFiaC7SAEUiil1l3UCwHn7LzmdEmn8Kq7T+FK89wwqtVWIASLo2gZC/yHE5eEanEATTchGLSNiHJRzZ8n0Ekm8EFUA6czOqA5nPQHaSmeLzu1g80lSSi1ICly6dJksa6DVucwOyVFYFEeq8Dfyc1eyP8L1ee0D7QFYBMduYOXTKPtNnyDmdaQMj7cMMvE7fn04idIiAqw=,iv:nvLmAfFk2GXnnUy+Afr648R60Ou13eu9UKykkiA8Y+4=,tag:lTTAxfG0EDCU6u7xlW6xSQ==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBEMjNWUm5NbktQeTRWRjJE\nWWFZc2Rsa3I5aitPSno1WnhORENNcng5OHprCjNUQVhBVHFBcWFjaW5UdmxKTnZw\nQlI4MDk5Wkp0RElCeWgzZ2dFQkF2dkkKLS0tIDVreTkydnJ0RDdHSHlQeVV6bGlP\nTmpJOVBSb2dkVS9TZG5SRmFjdnQ1b3cKQ5XvwH1jD4XPVs5RzOotBDq8kiE6S5k2\nDBv6ugjsM5qV7/oGP9H69aSB4jKPZjEn3yiNw++Oorc8uXd5kSGh7w==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-09-02T08:43:00Z",
|
||||
"mac": "ENC[AES256_GCM,data:3jFf66UyZUWEtPdPu809LCS3K/Hc6zbnluystl3eXS+KGI+dCoYmN9hQruRNBRxf6jli2RIlArmmEPBDQVt67gG/qugTdT12krWnYAZ78iocmOnkf44fWxn/pqVnn4JYpjEYRgy8ueGDnUkwvpGWVZpcXw5659YeDQuYOJ2mq0U=,iv:3k7fBPrABdLItQ2Z+Mx8Nx0eIEKo93zG/23K+Q5Hl3I=,tag:aehAObdx//DEjbKlOeM7iQ==,type:str]",
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.10.2"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../../../../sops/users/admin
|
||||
@@ -1,70 +0,0 @@
|
||||
This module enables hosting clan-internal services easily, which can be resolved
|
||||
inside your VPN. This allows defining a custom top-level domain (e.g. `.clan`)
|
||||
and exposing endpoints from a machine to others, which will be
|
||||
accessible under `http://<service>.clan` in your browser.
|
||||
|
||||
The service consists of two roles:
|
||||
|
||||
- A `server` role: This is the DNS-server that will be queried when trying to
|
||||
resolve clan-internal services. It defines the top-level domain.
|
||||
- A `default` role: This does two things. First, it sets up the nameservers so
|
||||
that clan-internal queries are resolved via the `server` machine, while
|
||||
external queries are resolved as normal via DHCP. Second, it allows exposing
|
||||
services (see example below).
|
||||
|
||||
## Example Usage
|
||||
|
||||
Here the machine `dnsserver` is designated as internal DNS-server for the TLD
|
||||
`.foo`. `server01` will host an application that shall be reachable at
|
||||
`http://one.foo` and `server02` is going to be reachable at `http://two.foo`.
|
||||
`client` is any other machine that is part of the clan but does not host any
|
||||
services.
|
||||
|
||||
When `client` tries to resolve `http://one.foo`, the DNS query will be
|
||||
routed to `dnsserver`, which will answer with `192.168.1.3`. If it tries to
|
||||
resolve some external domain (e.g. `https://clan.lol`), the query will not be
|
||||
routed to `dnsserver` but resolved as before, via the nameservers advertised by
|
||||
DHCP.
|
||||
|
||||
```nix
|
||||
inventory = {
|
||||
|
||||
machines = {
|
||||
dnsserver = { }; # 192.168.1.2
|
||||
server01 = { }; # 192.168.1.3
|
||||
server02 = { }; # 192.168.1.4
|
||||
client = { }; # 192.168.1.5
|
||||
};
|
||||
|
||||
instances = {
|
||||
coredns = {
|
||||
|
||||
module.name = "@clan/coredns";
|
||||
module.input = "self";
|
||||
|
||||
# Add the default role to all machines, including `client`
|
||||
roles.default.tags.all = { };
|
||||
|
||||
# DNS server queries to http://<name>.foo are resolved here
|
||||
roles.server.machines."dnsserver".settings = {
|
||||
ip = "192.168.1.2";
|
||||
tld = "foo";
|
||||
};
|
||||
|
||||
# First service
|
||||
# Registers http://one.foo will resolve to 192.168.1.3
|
||||
# underlying service runs on server01
|
||||
roles.default.machines."server01".settings = {
|
||||
ip = "192.168.1.3";
|
||||
services = [ "one" ];
|
||||
};
|
||||
|
||||
# Second service
|
||||
roles.default.machines."server02".settings = {
|
||||
ip = "192.168.1.4";
|
||||
services = [ "two" ];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
```
|
||||
@@ -1,177 +0,0 @@
|
||||
{ ... }:
|
||||
|
||||
{
|
||||
_class = "clan.service";
|
||||
manifest.name = "coredns";
|
||||
manifest.description = "Clan-internal DNS and service exposure";
|
||||
manifest.categories = [ "Network" ];
|
||||
manifest.readme = builtins.readFile ./README.md;
|
||||
|
||||
roles.server = {
|
||||
description = "A DNS server that resolves services in the clan network.";
|
||||
interface =
|
||||
{ lib, ... }:
|
||||
{
|
||||
options.tld = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "clan";
|
||||
description = ''
|
||||
Top-level domain for this instance. All services below this will be
|
||||
resolved internally.
|
||||
'';
|
||||
};
|
||||
|
||||
options.ip = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
# TODO: Set a default
|
||||
description = "IP for the DNS to listen on";
|
||||
};
|
||||
|
||||
options.dnsPort = lib.mkOption {
|
||||
type = lib.types.int;
|
||||
default = 1053;
|
||||
description = "Port of the clan-internal DNS server";
|
||||
};
|
||||
};
|
||||
|
||||
perInstance =
|
||||
{
|
||||
roles,
|
||||
settings,
|
||||
...
|
||||
}:
|
||||
{
|
||||
nixosModule =
|
||||
{
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ settings.dnsPort ];
|
||||
networking.firewall.allowedUDPPorts = [ settings.dnsPort ];
|
||||
|
||||
services.coredns =
|
||||
let
|
||||
|
||||
# Get all service entries for one host
|
||||
hostServiceEntries =
|
||||
host:
|
||||
lib.strings.concatStringsSep "\n" (
|
||||
map (
|
||||
service: "${service} IN A ${roles.default.machines.${host}.settings.ip} ; ${host}"
|
||||
) roles.default.machines.${host}.settings.services
|
||||
);
|
||||
|
||||
zonefile = pkgs.writeTextFile {
|
||||
name = "db.${settings.tld}";
|
||||
text = ''
|
||||
$TTL 3600
|
||||
@ IN SOA ns.${settings.tld}. admin.${settings.tld}. 1 7200 3600 1209600 3600
|
||||
IN NS ns.${settings.tld}.
|
||||
ns IN A ${settings.ip} ; DNS server
|
||||
|
||||
''
|
||||
+ (lib.strings.concatStringsSep "\n" (
|
||||
map (host: hostServiceEntries host) (lib.attrNames roles.default.machines)
|
||||
));
|
||||
};
|
||||
|
||||
in
|
||||
{
|
||||
enable = true;
|
||||
config =
|
||||
|
||||
let
|
||||
dnsPort = builtins.toString settings.dnsPort;
|
||||
in
|
||||
|
||||
''
|
||||
.:${dnsPort} {
|
||||
forward . 1.1.1.1
|
||||
cache 30
|
||||
}
|
||||
|
||||
${settings.tld}:${dnsPort} {
|
||||
file ${zonefile}
|
||||
}
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
roles.default = {
|
||||
description = "A machine that registers the 'server' role as resolver and registers services under the configured TLD in the resolver.";
|
||||
interface =
|
||||
{ lib, ... }:
|
||||
{
|
||||
options.services = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
default = [ ];
|
||||
description = ''
|
||||
Service endpoints this host exposes (without TLD). Each entry will
|
||||
be resolved to <entry>.<tld> using the configured top-level domain.
|
||||
'';
|
||||
};
|
||||
|
||||
options.ip = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
# TODO: Set a default
|
||||
description = "IP on which the services will listen";
|
||||
};
|
||||
|
||||
options.dnsPort = lib.mkOption {
|
||||
type = lib.types.int;
|
||||
default = 1053;
|
||||
description = "Port of the clan-internal DNS server";
|
||||
};
|
||||
};
|
||||
|
||||
perInstance =
|
||||
{ roles, settings, ... }:
|
||||
{
|
||||
nixosModule =
|
||||
{ lib, ... }:
|
||||
{
|
||||
|
||||
networking.nameservers = map (m: "127.0.0.1:5353#${roles.server.machines.${m}.settings.tld}") (
|
||||
lib.attrNames roles.server.machines
|
||||
);
|
||||
|
||||
services.resolved.domains = map (m: "~${roles.server.machines.${m}.settings.tld}") (
|
||||
lib.attrNames roles.server.machines
|
||||
);
|
||||
|
||||
services.unbound = {
|
||||
enable = true;
|
||||
settings = {
|
||||
server = {
|
||||
port = 5353;
|
||||
verbosity = 2;
|
||||
interface = [ "127.0.0.1" ];
|
||||
access-control = [ "127.0.0.0/8 allow" ];
|
||||
do-not-query-localhost = "no";
|
||||
domain-insecure = map (m: "${roles.server.machines.${m}.settings.tld}.") (
|
||||
lib.attrNames roles.server.machines
|
||||
);
|
||||
};
|
||||
|
||||
# Default: forward everything else to DHCP-provided resolvers
|
||||
forward-zone = [
|
||||
{
|
||||
name = ".";
|
||||
forward-addr = "127.0.0.53@53"; # Forward to systemd-resolved
|
||||
}
|
||||
];
|
||||
stub-zone = map (m: {
|
||||
name = "${roles.server.machines.${m}.settings.tld}.";
|
||||
stub-addr = "${roles.server.machines.${m}.settings.ip}@${builtins.toString settings.dnsPort}";
|
||||
}) (lib.attrNames roles.server.machines);
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
{ ... }:
|
||||
let
|
||||
module = ./default.nix;
|
||||
in
|
||||
{
|
||||
clan.modules = {
|
||||
coredns = module;
|
||||
};
|
||||
perSystem =
|
||||
{ ... }:
|
||||
{
|
||||
clan.nixosTests.coredns = {
|
||||
imports = [ ./tests/vm/default.nix ];
|
||||
|
||||
clan.modules."@clan/coredns" = module;
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,110 +0,0 @@
|
||||
{
|
||||
...
|
||||
}:
|
||||
{
|
||||
name = "coredns";
|
||||
|
||||
clan = {
|
||||
directory = ./.;
|
||||
test.useContainers = true;
|
||||
inventory = {
|
||||
|
||||
machines = {
|
||||
dns = { }; # 192.168.1.2
|
||||
server01 = { }; # 192.168.1.3
|
||||
server02 = { }; # 192.168.1.4
|
||||
client = { }; # 192.168.1.1
|
||||
};
|
||||
|
||||
instances = {
|
||||
coredns = {
|
||||
|
||||
module.name = "@clan/coredns";
|
||||
module.input = "self";
|
||||
|
||||
roles.default.tags.all = { };
|
||||
|
||||
# First service
|
||||
roles.default.machines."server01".settings = {
|
||||
ip = "192.168.1.3";
|
||||
services = [ "one" ];
|
||||
};
|
||||
|
||||
# Second service
|
||||
roles.default.machines."server02".settings = {
|
||||
ip = "192.168.1.4";
|
||||
services = [ "two" ];
|
||||
};
|
||||
|
||||
# DNS server
|
||||
roles.server.machines."dns".settings = {
|
||||
ip = "192.168.1.2";
|
||||
tld = "foo";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
nodes = {
|
||||
dns =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = [ pkgs.net-tools ];
|
||||
};
|
||||
|
||||
client =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = [ pkgs.net-tools ];
|
||||
};
|
||||
|
||||
server01 = {
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
virtualHosts."one.foo" = {
|
||||
locations."/" = {
|
||||
return = "200 'test server response one'";
|
||||
extraConfig = "add_header Content-Type text/plain;";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
server02 = {
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
virtualHosts."two.foo" = {
|
||||
locations."/" = {
|
||||
return = "200 'test server response two'";
|
||||
extraConfig = "add_header Content-Type text/plain;";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
import json
|
||||
start_all()
|
||||
|
||||
machines = [server01, server02, dns, client]
|
||||
|
||||
for m in machines:
|
||||
m.systemctl("start network-online.target")
|
||||
|
||||
for m in machines:
|
||||
m.wait_for_unit("network-online.target")
|
||||
|
||||
# This should work, but is borken in tests i think? Instead we dig directly
|
||||
|
||||
# client.succeed("curl -k -v http://one.foo")
|
||||
# client.succeed("curl -k -v http://two.foo")
|
||||
|
||||
answer = client.succeed("dig @192.168.1.2 -p 1053 one.foo")
|
||||
assert "192.168.1.3" in answer, "IP not found"
|
||||
|
||||
answer = client.succeed("dig @192.168.1.2 -p 1053 two.foo")
|
||||
assert "192.168.1.4" in answer, "IP not found"
|
||||
|
||||
'';
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user