Compare commits
3 Commits
mdformat
...
feat/optio
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9225e707b8 | ||
|
|
d45c5ac637 | ||
|
|
20ad968d04 |
@@ -1,20 +0,0 @@
|
||||
name: Build Clan App (Darwin)
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Run every 4 hours
|
||||
- cron: "0 */4 * * *"
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
build-clan-app-darwin:
|
||||
runs-on: nix
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Build clan-app for x86_64-darwin
|
||||
run: |
|
||||
nix build .#packages.x86_64-darwin.clan-app --system x86_64-darwin --log-format bar-with-logs
|
||||
9
.gitea/workflows/checks.yaml
Normal file
9
.gitea/workflows/checks.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
name: checks
|
||||
on:
|
||||
pull_request:
|
||||
jobs:
|
||||
checks-impure:
|
||||
runs-on: nix
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- run: nix run .#impure-checks
|
||||
@@ -1,7 +1,6 @@
|
||||
#!/bin/sh
|
||||
|
||||
#!/usr/bin/env bash
|
||||
# Shared script for creating pull requests in Gitea workflows
|
||||
set -eu
|
||||
set -euo pipefail
|
||||
|
||||
# Required environment variables:
|
||||
# - CI_BOT_TOKEN: Gitea bot token for authentication
|
||||
@@ -9,22 +8,22 @@ set -eu
|
||||
# - PR_TITLE: Title of the pull request
|
||||
# - PR_BODY: Body/description of the pull request
|
||||
|
||||
if [ -z "${CI_BOT_TOKEN:-}" ]; then
|
||||
if [[ -z "${CI_BOT_TOKEN:-}" ]]; then
|
||||
echo "Error: CI_BOT_TOKEN is not set" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "${PR_BRANCH:-}" ]; then
|
||||
if [[ -z "${PR_BRANCH:-}" ]]; then
|
||||
echo "Error: PR_BRANCH is not set" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "${PR_TITLE:-}" ]; then
|
||||
if [[ -z "${PR_TITLE:-}" ]]; then
|
||||
echo "Error: PR_TITLE is not set" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "${PR_BODY:-}" ]; then
|
||||
if [[ -z "${PR_BODY:-}" ]]; then
|
||||
echo "Error: PR_BODY is not set" >&2
|
||||
exit 1
|
||||
fi
|
||||
@@ -44,12 +43,9 @@ resp=$(nix run --inputs-from . nixpkgs#curl -- -X POST \
|
||||
}" \
|
||||
"https://git.clan.lol/api/v1/repos/clan/clan-core/pulls")
|
||||
|
||||
if ! pr_number=$(echo "$resp" | jq -r '.number'); then
|
||||
echo "Error parsing response from pull request creation" >&2
|
||||
exit 1
|
||||
fi
|
||||
pr_number=$(echo "$resp" | jq -r '.number')
|
||||
|
||||
if [ "$pr_number" = "null" ]; then
|
||||
if [[ "$pr_number" == "null" ]]; then
|
||||
echo "Error creating pull request:" >&2
|
||||
echo "$resp" | jq . >&2
|
||||
exit 1
|
||||
@@ -68,15 +64,12 @@ while true; do
|
||||
"delete_branch_after_merge": true
|
||||
}' \
|
||||
"https://git.clan.lol/api/v1/repos/clan/clan-core/pulls/$pr_number/merge")
|
||||
if ! msg=$(echo "$resp" | jq -r '.message'); then
|
||||
echo "Error parsing merge response" >&2
|
||||
exit 1
|
||||
fi
|
||||
if [ "$msg" != "Please try again later" ]; then
|
||||
msg=$(echo "$resp" | jq -r '.message')
|
||||
if [[ "$msg" != "Please try again later" ]]; then
|
||||
break
|
||||
fi
|
||||
echo "Retrying in 2 seconds..."
|
||||
sleep 2
|
||||
done
|
||||
|
||||
echo "Pull request #$pr_number merge initiated"
|
||||
echo "Pull request #$pr_number merge initiated"
|
||||
28
.gitea/workflows/update-clan-core-for-checks.yml
Normal file
28
.gitea/workflows/update-clan-core-for-checks.yml
Normal file
@@ -0,0 +1,28 @@
|
||||
name: "Update pinned clan-core for checks"
|
||||
on:
|
||||
repository_dispatch:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: "51 2 * * *"
|
||||
jobs:
|
||||
update-pinned-clan-core:
|
||||
runs-on: nix
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
- name: Update clan-core for checks
|
||||
run: nix run .#update-clan-core-for-checks
|
||||
- name: Create pull request
|
||||
env:
|
||||
CI_BOT_TOKEN: ${{ secrets.CI_BOT_TOKEN }}
|
||||
run: |
|
||||
export GIT_AUTHOR_NAME=clan-bot GIT_AUTHOR_EMAIL=clan-bot@clan.lol GIT_COMMITTER_NAME=clan-bot GIT_COMMITTER_EMAIL=clan-bot@clan.lol
|
||||
git commit -am "Update pinned clan-core for checks"
|
||||
|
||||
# Use shared PR creation script
|
||||
export PR_BRANCH="update-clan-core-for-checks"
|
||||
export PR_TITLE="Update Clan Core for Checks"
|
||||
export PR_BODY="This PR updates the pinned clan-core flake input that is used for checks."
|
||||
|
||||
./.gitea/workflows/create-pr.sh
|
||||
@@ -19,11 +19,8 @@ jobs:
|
||||
uses: Mic92/update-flake-inputs-gitea@main
|
||||
with:
|
||||
# Exclude private flakes and update-clan-core checks flake
|
||||
exclude-patterns: "checks/impure/flake.nix"
|
||||
|
||||
exclude-patterns: "devFlake/private/flake.nix,checks/impure/flake.nix"
|
||||
auto-merge: true
|
||||
git-author-name: "clan-bot"
|
||||
git-committer-name: "clan-bot"
|
||||
git-author-email: "clan-bot@clan.lol"
|
||||
git-committer-email: "clan-bot@clan.lol"
|
||||
gitea-token: ${{ secrets.CI_BOT_TOKEN }}
|
||||
github-token: ${{ secrets.CI_BOT_GITHUB_TOKEN }}
|
||||
|
||||
40
.gitea/workflows/update-private-flake-inputs.yml
Normal file
40
.gitea/workflows/update-private-flake-inputs.yml
Normal file
@@ -0,0 +1,40 @@
|
||||
name: "Update private flake inputs"
|
||||
on:
|
||||
repository_dispatch:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: "0 3 * * *" # Run daily at 3 AM
|
||||
jobs:
|
||||
update-private-flake:
|
||||
runs-on: nix
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
- name: Update private flake inputs
|
||||
run: |
|
||||
# Update the private flake lock file
|
||||
cd devFlake/private
|
||||
nix flake update
|
||||
cd ../..
|
||||
|
||||
# Update the narHash
|
||||
bash ./devFlake/update-private-narhash
|
||||
- name: Create pull request
|
||||
env:
|
||||
CI_BOT_TOKEN: ${{ secrets.CI_BOT_TOKEN }}
|
||||
run: |
|
||||
export GIT_AUTHOR_NAME=clan-bot GIT_AUTHOR_EMAIL=clan-bot@clan.lol GIT_COMMITTER_NAME=clan-bot GIT_COMMITTER_EMAIL=clan-bot@clan.lol
|
||||
|
||||
# Check if there are any changes
|
||||
if ! git diff --quiet; then
|
||||
git add devFlake/private/flake.lock devFlake/private.narHash
|
||||
git commit -m "Update dev flake"
|
||||
|
||||
# Use shared PR creation script
|
||||
export PR_BRANCH="update-dev-flake"
|
||||
export PR_TITLE="Update dev flake"
|
||||
export PR_BODY="This PR updates the dev flake inputs and corresponding narHash."
|
||||
else
|
||||
echo "No changes detected in dev flake inputs"
|
||||
fi
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -39,6 +39,7 @@ select
|
||||
# Generated files
|
||||
pkgs/clan-app/ui/api/API.json
|
||||
pkgs/clan-app/ui/api/API.ts
|
||||
pkgs/clan-app/ui/api/Inventory.ts
|
||||
pkgs/clan-app/ui/api/modules_schemas.json
|
||||
pkgs/clan-app/ui/api/schema.json
|
||||
pkgs/clan-app/ui/.fonts
|
||||
|
||||
22
CODEOWNERS
22
CODEOWNERS
@@ -1,20 +1,2 @@
|
||||
clanServices/.* @pinpox @kenji
|
||||
|
||||
lib/test/container-test-driver/.* @DavHau @mic92
|
||||
lib/modules/inventory/.* @hsjobeki
|
||||
lib/modules/inventoryClass/.* @hsjobeki
|
||||
|
||||
pkgs/clan-app/ui/.* @hsjobeki @brianmcgee
|
||||
pkgs/clan-app/clan_app/.* @qubasa @hsjobeki
|
||||
|
||||
pkgs/clan-cli/clan_cli/.* @lassulus @mic92 @kenji
|
||||
pkgs/clan-cli/clan_cli/(secrets|vars)/.* @DavHau @lassulus
|
||||
|
||||
pkgs/clan-cli/clan_lib/log_machines/.* @Qubasa
|
||||
pkgs/clan-cli/clan_lib/ssh/.* @Qubasa @Mic92 @lassulus
|
||||
pkgs/clan-cli/clan_lib/tags/.* @hsjobeki
|
||||
pkgs/clan-cli/clan_lib/persist/.* @hsjobeki
|
||||
pkgs/clan-cli/clan_lib/flake/.* @lassulus
|
||||
|
||||
pkgs/clan-cli/api.py @hsjobeki
|
||||
pkgs/clan-cli/openapi.py @hsjobeki
|
||||
nixosModules/clanCore/vars/.* @lopter
|
||||
pkgs/clan-cli/clan_cli/(secrets|vars)/.* @lopter
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
# Contributing to Clan
|
||||
|
||||
<!-- Local file: docs/CONTRIBUTING.md -->
|
||||
|
||||
Go to the Contributing guide at
|
||||
https://docs.clan.lol/guides/contributing/CONTRIBUTING
|
||||
Go to the Contributing guide at https://docs.clan.lol/guides/contributing/CONTRIBUTING
|
||||
@@ -16,3 +16,4 @@ FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
|
||||
52
README.md
52
README.md
@@ -1,69 +1,45 @@
|
||||
# Clan core repository
|
||||
|
||||
Welcome to the Clan core repository, the heart of the
|
||||
[clan.lol](https://clan.lol/) project! This monorepo is the foundation of Clan,
|
||||
a revolutionary open-source project aimed at restoring fun, freedom, and
|
||||
functionality to computing. Here, you'll find all the essential packages, NixOS
|
||||
modules, CLI tools, and tests needed to contribute to and work with the Clan
|
||||
project. Clan leverages the Nix system to ensure reliability, security, and
|
||||
seamless management of digital environments, putting the power back into the
|
||||
hands of users.
|
||||
Welcome to the Clan core repository, the heart of the [clan.lol](https://clan.lol/) project! This monorepo is the foundation of Clan, a revolutionary open-source project aimed at restoring fun, freedom, and functionality to computing. Here, you'll find all the essential packages, NixOS modules, CLI tools, and tests needed to contribute to and work with the Clan project. Clan leverages the Nix system to ensure reliability, security, and seamless management of digital environments, putting the power back into the hands of users.
|
||||
|
||||
## Why Clan?
|
||||
|
||||
Our mission is simple: to democratize computing by providing tools that empower
|
||||
users, foster innovation, and challenge outdated paradigms. Clan represents our
|
||||
contribution to a future where technology serves humanity, not the other way
|
||||
around. By participating in Clan, you're joining a movement dedicated to
|
||||
creating a secure, user-empowered digital future.
|
||||
Our mission is simple: to democratize computing by providing tools that empower users, foster innovation, and challenge outdated paradigms. Clan represents our contribution to a future where technology serves humanity, not the other way around. By participating in Clan, you're joining a movement dedicated to creating a secure, user-empowered digital future.
|
||||
|
||||
## Features of Clan
|
||||
|
||||
- **Full-Stack System Deployment:** Utilize Clan's toolkit alongside Nix's
|
||||
reliability to build and manage systems effortlessly.
|
||||
- **Full-Stack System Deployment:** Utilize Clan’s toolkit alongside Nix's reliability to build and manage systems effortlessly.
|
||||
- **Overlay Networks:** Secure, private communication channels between devices.
|
||||
- **Virtual Machine Integration:** Seamless operation of VM applications within
|
||||
the main operating system.
|
||||
- **Virtual Machine Integration:** Seamless operation of VM applications within the main operating system.
|
||||
- **Robust Backup Management:** Long-term, self-hosted data preservation.
|
||||
- **Intuitive Secret Management:** Simplified encryption and password management
|
||||
processes.
|
||||
- **Intuitive Secret Management:** Simplified encryption and password management processes.
|
||||
|
||||
## Getting started with Clan
|
||||
|
||||
If you're new to Clan and eager to dive in, start with our quickstart guide and
|
||||
explore the core functionalities that Clan offers:
|
||||
If you're new to Clan and eager to dive in, start with our quickstart guide and explore the core functionalities that Clan offers:
|
||||
|
||||
- **Quickstart Guide**: Check out
|
||||
[getting started](https://docs.clan.lol/#starting-with-a-new-clan-project)<!-- [docs/site/index.md](docs/site/index.md) -->
|
||||
to get up and running with Clan in no time.
|
||||
- **Quickstart Guide**: Check out [getting started](https://docs.clan.lol/#starting-with-a-new-clan-project)<!-- [docs/site/index.md](docs/site/index.md) --> to get up and running with Clan in no time.
|
||||
|
||||
### Managing secrets
|
||||
|
||||
In the Clan ecosystem, security is paramount. Learn how to handle secrets
|
||||
effectively:
|
||||
In the Clan ecosystem, security is paramount. Learn how to handle secrets effectively:
|
||||
|
||||
- **Secrets Management**: Securely manage secrets by consulting
|
||||
[Vars](https://docs.clan.lol/concepts/generators/)<!-- [secrets.md](docs/site/concepts/generators.md) -->.
|
||||
- **Secrets Management**: Securely manage secrets by consulting [secrets](https://docs.clan.lol/guides/getting-started/secrets/)<!-- [secrets.md](docs/site/guides/getting-started/secrets.md) -->.
|
||||
|
||||
### Contributing to Clan
|
||||
|
||||
The Clan project thrives on community contributions. We welcome everyone to
|
||||
contribute and collaborate:
|
||||
The Clan project thrives on community contributions. We welcome everyone to contribute and collaborate:
|
||||
|
||||
- **Contribution Guidelines**: Make a meaningful impact by following the steps
|
||||
in
|
||||
[contributing](https://docs.clan.lol/contributing/contributing/)<!-- [contributing.md](docs/CONTRIBUTING.md) -->.
|
||||
- **Contribution Guidelines**: Make a meaningful impact by following the steps in [contributing](https://docs.clan.lol/contributing/contributing/)<!-- [contributing.md](docs/CONTRIBUTING.md) -->.
|
||||
|
||||
## Join the revolution
|
||||
|
||||
Clan is more than a tool; it's a movement towards a better digital future. By
|
||||
contributing to the Clan project, you're part of changing technology for the
|
||||
better, together.
|
||||
Clan is more than a tool; it's a movement towards a better digital future. By contributing to the Clan project, you're part of changing technology for the better, together.
|
||||
|
||||
### Community and support
|
||||
|
||||
Connect with us and the Clan community for support and discussion:
|
||||
|
||||
- [Matrix channel](https://matrix.to/#/#clan:clan.lol) for live discussions.
|
||||
- IRC bridge on [hackint#clan](https://chat.hackint.org/#/connect?join=clan) for
|
||||
real-time chat support.
|
||||
- IRC bridge on [hackint#clan](https://chat.hackint.org/#/connect?join=clan) for real-time chat support.
|
||||
|
||||
|
||||
210
checks/backups/flake-module.nix
Normal file
210
checks/backups/flake-module.nix
Normal file
@@ -0,0 +1,210 @@
|
||||
{ self, ... }:
|
||||
{
|
||||
clan.machines.test-backup = {
|
||||
imports = [ self.nixosModules.test-backup ];
|
||||
fileSystems."/".device = "/dev/null";
|
||||
boot.loader.grub.device = "/dev/null";
|
||||
};
|
||||
clan.inventory.services = {
|
||||
borgbackup.test-backup = {
|
||||
roles.client.machines = [ "test-backup" ];
|
||||
roles.server.machines = [ "test-backup" ];
|
||||
};
|
||||
};
|
||||
flake.nixosModules = {
|
||||
test-backup =
|
||||
{
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
dependencies =
|
||||
[
|
||||
pkgs.stdenv.drvPath
|
||||
]
|
||||
++ builtins.map (i: i.outPath) (builtins.attrValues (builtins.removeAttrs self.inputs [ "self" ]));
|
||||
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
# Do not import inventory modules. They should be configured via 'clan.inventory'
|
||||
#
|
||||
# TODO: Configure localbackup via inventory
|
||||
self.clanModules.localbackup
|
||||
];
|
||||
# Borgbackup overrides
|
||||
services.borgbackup.repos.test-backups = {
|
||||
path = "/var/lib/borgbackup/test-backups";
|
||||
authorizedKeys = [ (builtins.readFile ../assets/ssh/pubkey) ];
|
||||
};
|
||||
clan.borgbackup.destinations.test-backup.repo = lib.mkForce "borg@machine:.";
|
||||
|
||||
clan.core.networking.targetHost = "machine";
|
||||
networking.hostName = "machine";
|
||||
|
||||
programs.ssh.knownHosts = {
|
||||
machine.hostNames = [ "machine" ];
|
||||
machine.publicKey = builtins.readFile ../assets/ssh/pubkey;
|
||||
};
|
||||
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
settings.UsePAM = false;
|
||||
settings.UseDns = false;
|
||||
hostKeys = [
|
||||
{
|
||||
path = "/root/.ssh/id_ed25519";
|
||||
type = "ed25519";
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
users.users.root.openssh.authorizedKeys.keyFiles = [ ../assets/ssh/pubkey ];
|
||||
|
||||
# This is needed to unlock the user for sshd
|
||||
# Because we use sshd without setuid binaries
|
||||
users.users.borg.initialPassword = "hello";
|
||||
|
||||
systemd.tmpfiles.settings."vmsecrets" = {
|
||||
"/root/.ssh/id_ed25519" = {
|
||||
C.argument = "${../assets/ssh/privkey}";
|
||||
z = {
|
||||
mode = "0400";
|
||||
user = "root";
|
||||
};
|
||||
};
|
||||
"/etc/secrets/ssh.id_ed25519" = {
|
||||
C.argument = "${../assets/ssh/privkey}";
|
||||
z = {
|
||||
mode = "0400";
|
||||
user = "root";
|
||||
};
|
||||
};
|
||||
"/etc/secrets/borgbackup/borgbackup.ssh" = {
|
||||
C.argument = "${../assets/ssh/privkey}";
|
||||
z = {
|
||||
mode = "0400";
|
||||
user = "root";
|
||||
};
|
||||
};
|
||||
"/etc/secrets/borgbackup/borgbackup.repokey" = {
|
||||
C.argument = builtins.toString (pkgs.writeText "repokey" "repokey12345");
|
||||
z = {
|
||||
mode = "0400";
|
||||
user = "root";
|
||||
};
|
||||
};
|
||||
};
|
||||
clan.core.facts.secretStore = "vm";
|
||||
clan.core.vars.settings.secretStore = "vm";
|
||||
|
||||
environment.systemPackages = [ self.packages.${pkgs.system}.clan-cli ];
|
||||
environment.etc.install-closure.source = "${closureInfo}/store-paths";
|
||||
nix.settings = {
|
||||
substituters = lib.mkForce [ ];
|
||||
hashed-mirrors = null;
|
||||
connect-timeout = lib.mkForce 3;
|
||||
flake-registry = pkgs.writeText "flake-registry" ''{"flakes":[],"version":2}'';
|
||||
};
|
||||
system.extraDependencies = dependencies;
|
||||
clan.core.state.test-backups.folders = [ "/var/test-backups" ];
|
||||
|
||||
clan.core.state.test-service = {
|
||||
preBackupScript = ''
|
||||
touch /var/test-service/pre-backup-command
|
||||
'';
|
||||
preRestoreScript = ''
|
||||
touch /var/test-service/pre-restore-command
|
||||
'';
|
||||
postRestoreScript = ''
|
||||
touch /var/test-service/post-restore-command
|
||||
'';
|
||||
folders = [ "/var/test-service" ];
|
||||
};
|
||||
|
||||
fileSystems."/mnt/external-disk" = {
|
||||
device = "/dev/vdb"; # created in tests with virtualisation.emptyDisks
|
||||
autoFormat = true;
|
||||
fsType = "ext4";
|
||||
options = [
|
||||
"defaults"
|
||||
"noauto"
|
||||
];
|
||||
};
|
||||
|
||||
clan.localbackup.targets.hdd = {
|
||||
directory = "/mnt/external-disk";
|
||||
preMountHook = ''
|
||||
touch /run/mount-external-disk
|
||||
'';
|
||||
postUnmountHook = ''
|
||||
touch /run/unmount-external-disk
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
perSystem =
|
||||
{ pkgs, ... }:
|
||||
let
|
||||
clanCore = self.checks.x86_64-linux.clan-core-for-checks;
|
||||
in
|
||||
{
|
||||
checks = pkgs.lib.mkIf pkgs.stdenv.isLinux {
|
||||
nixos-test-backups = self.clanLib.test.containerTest {
|
||||
name = "nixos-test-backups";
|
||||
nodes.machine = {
|
||||
imports =
|
||||
[
|
||||
self.nixosModules.clanCore
|
||||
# Some custom overrides for the backup tests
|
||||
self.nixosModules.test-backup
|
||||
]
|
||||
++
|
||||
# import the inventory generated nixosModules
|
||||
self.clan.clanInternals.inventoryClass.machines.test-backup.machineImports;
|
||||
clan.core.settings.directory = ./.;
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
import json
|
||||
start_all()
|
||||
|
||||
# dummy data
|
||||
machine.succeed("mkdir -p /var/test-backups /var/test-service")
|
||||
machine.succeed("echo testing > /var/test-backups/somefile")
|
||||
|
||||
# create
|
||||
machine.succeed("clan backups create --debug --flake ${clanCore} test-backup")
|
||||
machine.wait_until_succeeds("! systemctl is-active borgbackup-job-test-backup >&2")
|
||||
machine.succeed("test -f /run/mount-external-disk")
|
||||
machine.succeed("test -f /run/unmount-external-disk")
|
||||
|
||||
# list
|
||||
backup_id = json.loads(machine.succeed("borg-job-test-backup list --json"))["archives"][0]["archive"]
|
||||
out = machine.succeed("clan backups list --debug --flake ${clanCore} test-backup").strip()
|
||||
print(out)
|
||||
assert backup_id in out, f"backup {backup_id} not found in {out}"
|
||||
localbackup_id = "hdd::/mnt/external-disk/snapshot.0"
|
||||
assert localbackup_id in out, "localbackup not found in {out}"
|
||||
|
||||
## borgbackup restore
|
||||
machine.succeed("rm -f /var/test-backups/somefile")
|
||||
machine.succeed(f"clan backups restore --debug --flake ${clanCore} test-backup borgbackup 'test-backup::borg@machine:.::{backup_id}' >&2")
|
||||
assert machine.succeed("cat /var/test-backups/somefile").strip() == "testing", "restore failed"
|
||||
machine.succeed("test -f /var/test-service/pre-restore-command")
|
||||
machine.succeed("test -f /var/test-service/post-restore-command")
|
||||
machine.succeed("test -f /var/test-service/pre-backup-command")
|
||||
|
||||
## localbackup restore
|
||||
machine.succeed("rm -rf /var/test-backups/somefile /var/test-service/ && mkdir -p /var/test-service")
|
||||
machine.succeed(f"clan backups restore --debug --flake ${clanCore} test-backup localbackup '{localbackup_id}' >&2")
|
||||
assert machine.succeed("cat /var/test-backups/somefile").strip() == "testing", "restore failed"
|
||||
machine.succeed("test -f /var/test-service/pre-restore-command")
|
||||
machine.succeed("test -f /var/test-service/post-restore-command")
|
||||
machine.succeed("test -f /var/test-service/pre-backup-command")
|
||||
'';
|
||||
} { inherit pkgs self; };
|
||||
};
|
||||
};
|
||||
}
|
||||
51
checks/borgbackup-legacy/default.nix
Normal file
51
checks/borgbackup-legacy/default.nix
Normal file
@@ -0,0 +1,51 @@
|
||||
(
|
||||
{ ... }:
|
||||
{
|
||||
name = "borgbackup";
|
||||
|
||||
nodes.machine =
|
||||
{ self, pkgs, ... }:
|
||||
{
|
||||
imports = [
|
||||
self.clanModules.borgbackup
|
||||
self.nixosModules.clanCore
|
||||
{
|
||||
services.openssh.enable = true;
|
||||
services.borgbackup.repos.testrepo = {
|
||||
authorizedKeys = [ (builtins.readFile ../assets/ssh/pubkey) ];
|
||||
};
|
||||
}
|
||||
{
|
||||
clan.core.settings.directory = ./.;
|
||||
clan.core.state.testState.folders = [ "/etc/state" ];
|
||||
environment.etc.state.text = "hello world";
|
||||
systemd.tmpfiles.settings."vmsecrets" = {
|
||||
"/etc/secrets/borgbackup/borgbackup.ssh" = {
|
||||
C.argument = "${../assets/ssh/privkey}";
|
||||
z = {
|
||||
mode = "0400";
|
||||
user = "root";
|
||||
};
|
||||
};
|
||||
"/etc/secrets/borgbackup/borgbackup.repokey" = {
|
||||
C.argument = builtins.toString (pkgs.writeText "repokey" "repokey12345");
|
||||
z = {
|
||||
mode = "0400";
|
||||
user = "root";
|
||||
};
|
||||
};
|
||||
};
|
||||
# clan.core.facts.secretStore = "vm";
|
||||
clan.core.vars.settings.secretStore = "vm";
|
||||
|
||||
clan.borgbackup.destinations.test.repo = "borg@localhost:.";
|
||||
}
|
||||
];
|
||||
};
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine.systemctl("start --wait borgbackup-job-test.service")
|
||||
assert "machine-test" in machine.succeed("BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK=yes /run/current-system/sw/bin/borg-job-test list")
|
||||
'';
|
||||
}
|
||||
)
|
||||
@@ -1,6 +1,6 @@
|
||||
{ fetchgit }:
|
||||
fetchgit {
|
||||
url = "https://git.clan.lol/clan/clan-core.git";
|
||||
rev = "5d884cecc2585a29b6a3596681839d081b4de192";
|
||||
sha256 = "09is1afmncamavb2q88qac37vmsijxzsy1iz1vr6gsyjq2rixaxc";
|
||||
rev = "eea93ea22c9818da67e148ba586277bab9e73cea";
|
||||
sha256 = "sha256-PV0Z+97QuxQbkYSVuNIJwUNXMbHZG/vhsA9M4cDTCOE=";
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
self,
|
||||
lib,
|
||||
inputs,
|
||||
privateInputs ? { },
|
||||
...
|
||||
}:
|
||||
let
|
||||
@@ -20,28 +19,18 @@ let
|
||||
nixosLib = import (self.inputs.nixpkgs + "/nixos/lib") { };
|
||||
in
|
||||
{
|
||||
imports =
|
||||
let
|
||||
clanCoreModulesDir = ../nixosModules/clanCore;
|
||||
getClanCoreTestModules =
|
||||
let
|
||||
moduleNames = attrNames (builtins.readDir clanCoreModulesDir);
|
||||
testPaths = map (
|
||||
moduleName: clanCoreModulesDir + "/${moduleName}/tests/flake-module.nix"
|
||||
) moduleNames;
|
||||
in
|
||||
filter pathExists testPaths;
|
||||
in
|
||||
getClanCoreTestModules
|
||||
++ filter pathExists [
|
||||
./devshell/flake-module.nix
|
||||
./flash/flake-module.nix
|
||||
./installation/flake-module.nix
|
||||
./update/flake-module.nix
|
||||
./morph/flake-module.nix
|
||||
./nixos-documentation/flake-module.nix
|
||||
./dont-depend-on-repo-root.nix
|
||||
];
|
||||
imports = filter pathExists [
|
||||
./backups/flake-module.nix
|
||||
../nixosModules/clanCore/machine-id/tests/flake-module.nix
|
||||
../nixosModules/clanCore/state-version/tests/flake-module.nix
|
||||
./devshell/flake-module.nix
|
||||
./flash/flake-module.nix
|
||||
./impure/flake-module.nix
|
||||
./installation/flake-module.nix
|
||||
./morph/flake-module.nix
|
||||
./nixos-documentation/flake-module.nix
|
||||
./dont-depend-on-repo-root.nix
|
||||
];
|
||||
flake.check = genAttrs [ "x86_64-linux" "aarch64-darwin" ] (
|
||||
system:
|
||||
let
|
||||
@@ -92,15 +81,18 @@ in
|
||||
|
||||
# Base Tests
|
||||
nixos-test-secrets = self.clanLib.test.baseTest ./secrets nixosTestArgs;
|
||||
nixos-test-borgbackup-legacy = self.clanLib.test.baseTest ./borgbackup-legacy nixosTestArgs;
|
||||
nixos-test-wayland-proxy-virtwl = self.clanLib.test.baseTest ./wayland-proxy-virtwl nixosTestArgs;
|
||||
|
||||
# Container Tests
|
||||
nixos-test-container = self.clanLib.test.containerTest ./container nixosTestArgs;
|
||||
nixos-test-zt-tcp-relay = self.clanLib.test.containerTest ./zt-tcp-relay nixosTestArgs;
|
||||
nixos-test-matrix-synapse = self.clanLib.test.containerTest ./matrix-synapse nixosTestArgs;
|
||||
nixos-test-postgresql = self.clanLib.test.containerTest ./postgresql nixosTestArgs;
|
||||
nixos-test-user-firewall-iptables = self.clanLib.test.containerTest ./user-firewall/iptables.nix nixosTestArgs;
|
||||
nixos-test-user-firewall-nftables = self.clanLib.test.containerTest ./user-firewall/nftables.nix nixosTestArgs;
|
||||
|
||||
service-dummy-test = import ./service-dummy-test nixosTestArgs;
|
||||
wireguard = import ./wireguard nixosTestArgs;
|
||||
service-dummy-test-from-flake = import ./service-dummy-test-from-flake nixosTestArgs;
|
||||
};
|
||||
|
||||
@@ -110,8 +102,6 @@ in
|
||||
"dont-depend-on-repo-root"
|
||||
];
|
||||
|
||||
# Temporary workaround: Filter out docs package and devshell for aarch64-darwin due to CI builder hangs
|
||||
# TODO: Remove this filter once macOS CI builder is updated
|
||||
flakeOutputs =
|
||||
lib.mapAttrs' (
|
||||
name: config: lib.nameValuePair "nixos-${name}" config.config.system.build.toplevel
|
||||
@@ -119,18 +109,8 @@ in
|
||||
// lib.mapAttrs' (
|
||||
name: config: lib.nameValuePair "darwin-${name}" config.config.system.build.toplevel
|
||||
) (self.darwinConfigurations or { })
|
||||
// lib.mapAttrs' (n: lib.nameValuePair "package-${n}") (
|
||||
if system == "aarch64-darwin" then
|
||||
lib.filterAttrs (n: _: n != "docs" && n != "deploy-docs" && n != "docs-options") packagesToBuild
|
||||
else
|
||||
packagesToBuild
|
||||
)
|
||||
// lib.mapAttrs' (n: lib.nameValuePair "devShell-${n}") (
|
||||
if system == "aarch64-darwin" then
|
||||
lib.filterAttrs (n: _: n != "docs") self'.devShells
|
||||
else
|
||||
self'.devShells
|
||||
)
|
||||
// lib.mapAttrs' (n: lib.nameValuePair "package-${n}") packagesToBuild
|
||||
// lib.mapAttrs' (n: lib.nameValuePair "devShell-${n}") self'.devShells
|
||||
// lib.mapAttrs' (name: config: lib.nameValuePair "home-manager-${name}" config.activation-script) (
|
||||
self'.legacyPackages.homeConfigurations or { }
|
||||
);
|
||||
@@ -138,13 +118,37 @@ in
|
||||
nixosTests
|
||||
// flakeOutputs
|
||||
// {
|
||||
clan-core-for-checks = pkgs.runCommand "clan-core-for-checks" { } ''
|
||||
cp -r ${privateInputs.clan-core-for-checks} $out
|
||||
chmod -R +w $out
|
||||
cp ${../flake.lock} $out/flake.lock
|
||||
# TODO: Automatically provide this check to downstream users to check their modules
|
||||
clan-modules-json-compatible =
|
||||
let
|
||||
allSchemas = lib.mapAttrs (
|
||||
_n: m:
|
||||
let
|
||||
schema =
|
||||
(self.clanLib.evalService {
|
||||
modules = [ m ];
|
||||
prefix = [
|
||||
"checks"
|
||||
system
|
||||
];
|
||||
}).config.result.api.schema;
|
||||
in
|
||||
schema
|
||||
) self.clan.modules;
|
||||
in
|
||||
pkgs.runCommand "combined-result"
|
||||
{
|
||||
schemaFile = builtins.toFile "schemas.json" (builtins.toJSON allSchemas);
|
||||
}
|
||||
''
|
||||
mkdir -p $out
|
||||
cat $schemaFile > $out/allSchemas.json
|
||||
'';
|
||||
|
||||
# Create marker file to disable private flake loading in tests
|
||||
touch $out/.skip-private-inputs
|
||||
clan-core-for-checks = pkgs.runCommand "clan-core-for-checks" { } ''
|
||||
cp -r ${pkgs.callPackage ./clan-core-for-checks.nix { }} $out
|
||||
chmod +w $out/flake.lock
|
||||
cp ${../flake.lock} $out/flake.lock
|
||||
'';
|
||||
};
|
||||
packages = lib.optionalAttrs (pkgs.stdenv.isLinux) {
|
||||
|
||||
@@ -50,22 +50,16 @@
|
||||
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.toplevel
|
||||
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.diskoScript
|
||||
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.diskoScript.drvPath
|
||||
]
|
||||
++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
|
||||
in
|
||||
{
|
||||
# Skip flash test on aarch64-linux for now as it's too slow
|
||||
checks = lib.optionalAttrs (pkgs.stdenv.isLinux && pkgs.hostPlatform.system != "aarch64-linux") {
|
||||
checks = pkgs.lib.mkIf pkgs.stdenv.isLinux {
|
||||
nixos-test-flash = self.clanLib.test.baseTest {
|
||||
name = "flash";
|
||||
nodes.target = {
|
||||
virtualisation.emptyDiskImages = [ 4096 ];
|
||||
virtualisation.memorySize = 4096;
|
||||
|
||||
virtualisation.useNixStoreImage = true;
|
||||
virtualisation.writableStore = true;
|
||||
|
||||
environment.systemPackages = [ self.packages.${pkgs.system}.clan-cli ];
|
||||
environment.etc."install-closure".source = "${closureInfo}/store-paths";
|
||||
|
||||
@@ -84,8 +78,8 @@
|
||||
start_all()
|
||||
|
||||
# Some distros like to automount disks with spaces
|
||||
machine.succeed('mkdir -p "/mnt/with spaces" && mkfs.ext4 /dev/vdc && mount /dev/vdc "/mnt/with spaces"')
|
||||
machine.succeed("clan flash write --debug --flake ${self.checks.x86_64-linux.clan-core-for-checks} --yes --disk main /dev/vdc test-flash-machine-${pkgs.hostPlatform.system}")
|
||||
machine.succeed('mkdir -p "/mnt/with spaces" && mkfs.ext4 /dev/vdb && mount /dev/vdb "/mnt/with spaces"')
|
||||
machine.succeed("clan flash write --debug --flake ${self.checks.x86_64-linux.clan-core-for-checks} --yes --disk main /dev/vdb test-flash-machine-${pkgs.hostPlatform.system}")
|
||||
'';
|
||||
} { inherit pkgs self; };
|
||||
};
|
||||
|
||||
51
checks/impure/flake-module.nix
Normal file
51
checks/impure/flake-module.nix
Normal file
@@ -0,0 +1,51 @@
|
||||
{
|
||||
perSystem =
|
||||
{
|
||||
pkgs,
|
||||
lib,
|
||||
self',
|
||||
...
|
||||
}:
|
||||
{
|
||||
# a script that executes all other checks
|
||||
packages.impure-checks = pkgs.writeShellScriptBin "impure-checks" ''
|
||||
#!${pkgs.bash}/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
unset CLAN_DIR
|
||||
|
||||
export PATH="${
|
||||
lib.makeBinPath (
|
||||
[
|
||||
pkgs.gitMinimal
|
||||
pkgs.nix
|
||||
pkgs.coreutils
|
||||
pkgs.rsync # needed to have rsync installed on the dummy ssh server
|
||||
]
|
||||
++ self'.packages.clan-cli-full.runtimeDependencies
|
||||
)
|
||||
}"
|
||||
ROOT=$(git rev-parse --show-toplevel)
|
||||
cd "$ROOT/pkgs/clan-cli"
|
||||
|
||||
# Set up custom git configuration for tests
|
||||
export GIT_CONFIG_GLOBAL=$(mktemp)
|
||||
git config --file "$GIT_CONFIG_GLOBAL" user.name "Test User"
|
||||
git config --file "$GIT_CONFIG_GLOBAL" user.email "test@example.com"
|
||||
export GIT_CONFIG_SYSTEM=/dev/null
|
||||
|
||||
# this disables dynamic dependency loading in clan-cli
|
||||
export CLAN_NO_DYNAMIC_DEPS=1
|
||||
|
||||
jobs=$(nproc)
|
||||
# Spawning worker in pytest is relatively slow, so we limit the number of jobs to 13
|
||||
# (current number of impure tests)
|
||||
jobs="$((jobs > 13 ? 13 : jobs))"
|
||||
|
||||
nix develop "$ROOT#clan-cli" -c bash -c "TMPDIR=/tmp python -m pytest -n $jobs -m impure ./clan_cli $@"
|
||||
|
||||
# Clean up temporary git config
|
||||
rm -f "$GIT_CONFIG_GLOBAL"
|
||||
'';
|
||||
};
|
||||
}
|
||||
@@ -1,7 +1,6 @@
|
||||
{
|
||||
self,
|
||||
lib,
|
||||
privateInputs,
|
||||
|
||||
...
|
||||
}:
|
||||
@@ -150,17 +149,17 @@
|
||||
# vm-test-run-test-installation-> target: To debug, enter the VM and run 'systemctl status backdoor.service'.
|
||||
checks =
|
||||
let
|
||||
# Custom Python package for port management utilities
|
||||
closureInfo = pkgs.closureInfo {
|
||||
rootPaths = [
|
||||
privateInputs.clan-core-for-checks
|
||||
self.checks.x86_64-linux.clan-core-for-checks
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.toplevel
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.initialRamdisk
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.diskoScript
|
||||
pkgs.stdenv.drvPath
|
||||
pkgs.bash.drvPath
|
||||
pkgs.buildPackages.xorg.lndir
|
||||
]
|
||||
++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||
};
|
||||
in
|
||||
pkgs.lib.mkIf (pkgs.stdenv.isLinux && !pkgs.stdenv.isAarch64) {
|
||||
@@ -226,13 +225,12 @@
|
||||
"install",
|
||||
"--phases", "disko,install",
|
||||
"--debug",
|
||||
"--flake", str(flake_dir),
|
||||
"--flake", flake_dir,
|
||||
"--yes", "test-install-machine-without-system",
|
||||
"--target-host", f"nonrootuser@localhost:{ssh_conn.host_port}",
|
||||
"-i", ssh_conn.ssh_key,
|
||||
"--option", "store", os.environ['CLAN_TEST_STORE'],
|
||||
"--update-hardware-config", "nixos-facter",
|
||||
"--no-persist-state",
|
||||
]
|
||||
|
||||
subprocess.run(clan_cmd, check=True)
|
||||
@@ -242,7 +240,7 @@
|
||||
target.shutdown()
|
||||
except BrokenPipeError:
|
||||
# qemu has already exited
|
||||
target.connected = False
|
||||
pass
|
||||
|
||||
# Create a new machine instance that boots from the installed system
|
||||
installed_machine = create_test_machine(target, "${pkgs.qemu_test}", name="after_install")
|
||||
@@ -276,7 +274,7 @@
|
||||
"${self.checks.x86_64-linux.clan-core-for-checks}",
|
||||
"${closureInfo}"
|
||||
)
|
||||
|
||||
|
||||
# Set up SSH connection
|
||||
ssh_conn = setup_ssh_connection(
|
||||
target,
|
||||
@@ -291,6 +289,9 @@
|
||||
assert not os.path.exists(hw_config_file), "hardware-configuration.nix should not exist initially"
|
||||
assert not os.path.exists(facter_file), "facter.json should not exist initially"
|
||||
|
||||
# Set CLAN_FLAKE for the commands
|
||||
os.environ["CLAN_FLAKE"] = flake_dir
|
||||
|
||||
# Test facter backend
|
||||
clan_cmd = [
|
||||
"${self.packages.${pkgs.system}.clan-cli-full}/bin/clan",
|
||||
|
||||
@@ -159,8 +159,7 @@ let
|
||||
pkgs.stdenv.drvPath
|
||||
pkgs.bash.drvPath
|
||||
pkgs.buildPackages.xorg.lndir
|
||||
]
|
||||
++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||
};
|
||||
|
||||
in
|
||||
|
||||
83
checks/matrix-synapse/default.nix
Normal file
83
checks/matrix-synapse/default.nix
Normal file
@@ -0,0 +1,83 @@
|
||||
(
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "matrix-synapse";
|
||||
|
||||
nodes.machine =
|
||||
{
|
||||
config,
|
||||
self,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
self.clanModules.matrix-synapse
|
||||
self.nixosModules.clanCore
|
||||
{
|
||||
clan.core.settings.directory = ./.;
|
||||
|
||||
services.nginx.virtualHosts."matrix.clan.test" = {
|
||||
enableACME = lib.mkForce false;
|
||||
forceSSL = lib.mkForce false;
|
||||
};
|
||||
clan.nginx.acme.email = "admins@clan.lol";
|
||||
clan.matrix-synapse = {
|
||||
server_tld = "clan.test";
|
||||
app_domain = "matrix.clan.test";
|
||||
};
|
||||
clan.matrix-synapse.users.admin.admin = true;
|
||||
clan.matrix-synapse.users.someuser = { };
|
||||
|
||||
clan.core.facts.secretStore = "vm";
|
||||
clan.core.vars.settings.secretStore = "vm";
|
||||
clan.core.vars.settings.publicStore = "in_repo";
|
||||
|
||||
# because we use systemd-tmpfiles to copy the secrets, we need to a separate systemd-tmpfiles call to provision them.
|
||||
boot.postBootCommands = "${config.systemd.package}/bin/systemd-tmpfiles --create /etc/tmpfiles.d/00-vmsecrets.conf";
|
||||
|
||||
systemd.tmpfiles.settings."00-vmsecrets" = {
|
||||
# run before 00-nixos.conf
|
||||
"/etc/secrets" = {
|
||||
d.mode = "0700";
|
||||
z.mode = "0700";
|
||||
};
|
||||
"/etc/secrets/matrix-synapse/synapse-registration_shared_secret" = {
|
||||
f.argument = "supersecret";
|
||||
z = {
|
||||
mode = "0400";
|
||||
user = "root";
|
||||
};
|
||||
};
|
||||
"/etc/secrets/matrix-password-admin/matrix-password-admin" = {
|
||||
f.argument = "matrix-password1";
|
||||
z = {
|
||||
mode = "0400";
|
||||
user = "root";
|
||||
};
|
||||
};
|
||||
"/etc/secrets/matrix-password-someuser/matrix-password-someuser" = {
|
||||
f.argument = "matrix-password2";
|
||||
z = {
|
||||
mode = "0400";
|
||||
user = "root";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
];
|
||||
};
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine.wait_for_unit("matrix-synapse")
|
||||
machine.succeed("${pkgs.netcat}/bin/nc -z -v ::1 8008")
|
||||
machine.wait_until_succeeds("${pkgs.curl}/bin/curl -Ssf -L http://localhost/_matrix/static/ -H 'Host: matrix.clan.test'")
|
||||
|
||||
machine.systemctl("restart matrix-synapse >&2") # check if user creation is idempotent
|
||||
machine.execute("journalctl -u matrix-synapse --no-pager >&2")
|
||||
machine.wait_for_unit("matrix-synapse")
|
||||
machine.succeed("${pkgs.netcat}/bin/nc -z -v ::1 8008")
|
||||
machine.succeed("${pkgs.curl}/bin/curl -Ssf -L http://localhost/_matrix/static/ -H 'Host: matrix.clan.test'")
|
||||
'';
|
||||
}
|
||||
)
|
||||
1
checks/matrix-synapse/synapse-registration_shared_secret
Normal file
1
checks/matrix-synapse/synapse-registration_shared_secret
Normal file
@@ -0,0 +1 @@
|
||||
registration_shared_secret: supersecret
|
||||
@@ -35,8 +35,7 @@
|
||||
pkgs.stdenv.drvPath
|
||||
pkgs.stdenvNoCC
|
||||
self.nixosConfigurations.test-morph-machine.config.system.build.toplevel
|
||||
]
|
||||
++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
|
||||
in
|
||||
|
||||
|
||||
73
checks/postgresql/default.nix
Normal file
73
checks/postgresql/default.nix
Normal file
@@ -0,0 +1,73 @@
|
||||
({
|
||||
name = "postgresql";
|
||||
|
||||
nodes.machine =
|
||||
{ self, config, ... }:
|
||||
{
|
||||
imports = [
|
||||
self.nixosModules.clanCore
|
||||
self.clanModules.postgresql
|
||||
self.clanModules.localbackup
|
||||
];
|
||||
clan.postgresql.users.test = { };
|
||||
clan.postgresql.databases.test.create.options.OWNER = "test";
|
||||
clan.postgresql.databases.test.restore.stopOnRestore = [ "sample-service" ];
|
||||
clan.localbackup.targets.hdd.directory = "/mnt/external-disk";
|
||||
clan.core.settings.directory = ./.;
|
||||
|
||||
systemd.services.sample-service = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
script = ''
|
||||
while true; do
|
||||
echo "Hello, world!"
|
||||
sleep 5
|
||||
done
|
||||
'';
|
||||
};
|
||||
|
||||
environment.systemPackages = [ config.services.postgresql.package ];
|
||||
};
|
||||
testScript =
|
||||
{ nodes, ... }:
|
||||
''
|
||||
start_all()
|
||||
machine.wait_for_unit("postgresql")
|
||||
machine.wait_for_unit("sample-service")
|
||||
# Create a test table
|
||||
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -c 'CREATE TABLE test (id serial PRIMARY KEY);' test")
|
||||
|
||||
machine.succeed("/run/current-system/sw/bin/localbackup-create >&2")
|
||||
timestamp_before = int(machine.succeed("systemctl show --property=ExecMainStartTimestampMonotonic sample-service | cut -d= -f2").strip())
|
||||
|
||||
machine.succeed("test -e /mnt/external-disk/snapshot.0/machine/var/backup/postgres/test/pg-dump || { echo 'pg-dump not found'; exit 1; }")
|
||||
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -d test -c 'INSERT INTO test DEFAULT VALUES;'")
|
||||
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -d test -c 'DROP TABLE test;'")
|
||||
machine.succeed("test -e /var/backup/postgres/test/pg-dump || { echo 'pg-dump not found'; exit 1; }")
|
||||
|
||||
machine.succeed("rm -rf /var/backup/postgres")
|
||||
|
||||
machine.succeed("NAME=/mnt/external-disk/snapshot.0 FOLDERS=/var/backup/postgres/test /run/current-system/sw/bin/localbackup-restore >&2")
|
||||
machine.succeed("test -e /var/backup/postgres/test/pg-dump || { echo 'pg-dump not found'; exit 1; }")
|
||||
|
||||
machine.succeed("""
|
||||
set -x
|
||||
${nodes.machine.clan.core.state.test.postRestoreCommand}
|
||||
""")
|
||||
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -l >&2")
|
||||
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -d test -c '\dt' >&2")
|
||||
|
||||
timestamp_after = int(machine.succeed("systemctl show --property=ExecMainStartTimestampMonotonic sample-service | cut -d= -f2").strip())
|
||||
assert timestamp_before < timestamp_after, f"{timestamp_before} >= {timestamp_after}: expected sample-service to be restarted after restore"
|
||||
|
||||
# Check that the table is still there
|
||||
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -d test -c 'SELECT * FROM test;'")
|
||||
output = machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql --csv -c \"SELECT datdba::regrole FROM pg_database WHERE datname = 'test'\"")
|
||||
owner = output.split("\n")[1]
|
||||
assert owner == "test", f"Expected database owner to be 'test', got '{owner}'"
|
||||
|
||||
# check if restore works if the database does not exist
|
||||
machine.succeed("runuser -u postgres -- dropdb test")
|
||||
machine.succeed("${nodes.machine.clan.core.state.test.postRestoreCommand}")
|
||||
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -d test -c '\dt' >&2")
|
||||
'';
|
||||
})
|
||||
@@ -16,6 +16,7 @@ nixosLib.runTest (
|
||||
|
||||
# This tests the compatibility of the inventory
|
||||
# With the test framework
|
||||
# - legacy-modules
|
||||
# - clan.service modules
|
||||
name = "service-dummy-test-from-flake";
|
||||
|
||||
@@ -28,14 +29,25 @@ nixosLib.runTest (
|
||||
testScript =
|
||||
{ nodes, ... }:
|
||||
''
|
||||
import subprocess
|
||||
from nixos_test_lib.nix_setup import setup_nix_in_nix # type: ignore[import-untyped]
|
||||
|
||||
setup_nix_in_nix(None) # No closure info for this test
|
||||
|
||||
def run_clan(cmd: list[str], **kwargs) -> str:
|
||||
import subprocess
|
||||
clan = "${clan-core.packages.${hostPkgs.system}.clan-cli}/bin/clan"
|
||||
clan_args = ["--flake", "${config.clan.test.flakeForSandbox}"]
|
||||
return subprocess.run(
|
||||
["${hostPkgs.util-linux}/bin/unshare", "--user", "--map-user", "1000", "--map-group", "1000", clan, *cmd, *clan_args],
|
||||
**kwargs,
|
||||
check=True,
|
||||
).stdout
|
||||
|
||||
start_all()
|
||||
admin1.wait_for_unit("multi-user.target")
|
||||
peer1.wait_for_unit("multi-user.target")
|
||||
# Provided by the legacy module
|
||||
print(admin1.succeed("systemctl status dummy-service"))
|
||||
print(peer1.succeed("systemctl status dummy-service"))
|
||||
|
||||
# peer1 should have the 'hello' file
|
||||
peer1.succeed("cat ${nodes.peer1.clan.core.vars.generators.new-service.files.not-a-secret.path}")
|
||||
@@ -48,13 +60,7 @@ nixosLib.runTest (
|
||||
# Check that the file is in the '0644' mode
|
||||
assert "-rw-r--r--" in ls_out, f"File is not in the '0644' mode: {ls_out}"
|
||||
|
||||
# Run clan command
|
||||
result = subprocess.run(
|
||||
["${
|
||||
clan-core.packages.${hostPkgs.system}.clan-cli
|
||||
}/bin/clan", "machines", "list", "--flake", "${config.clan.test.flakeForSandbox}"],
|
||||
check=True
|
||||
)
|
||||
run_clan(["machines", "list"])
|
||||
'';
|
||||
}
|
||||
)
|
||||
|
||||
@@ -15,6 +15,12 @@
|
||||
meta.name = "foo";
|
||||
machines.peer1 = { };
|
||||
machines.admin1 = { };
|
||||
services = {
|
||||
legacy-module.default = {
|
||||
roles.peer.machines = [ "peer1" ];
|
||||
roles.admin.machines = [ "admin1" ];
|
||||
};
|
||||
};
|
||||
|
||||
instances."test" = {
|
||||
module.name = "new-service";
|
||||
@@ -22,6 +28,9 @@
|
||||
roles.peer.machines.peer1 = { };
|
||||
};
|
||||
|
||||
modules = {
|
||||
legacy-module = ./legacy-module;
|
||||
};
|
||||
};
|
||||
|
||||
modules.new-service = {
|
||||
|
||||
10
checks/service-dummy-test-from-flake/legacy-module/README.md
Normal file
10
checks/service-dummy-test-from-flake/legacy-module/README.md
Normal file
@@ -0,0 +1,10 @@
|
||||
---
|
||||
description = "Set up dummy-module"
|
||||
categories = ["System"]
|
||||
features = [ "inventory" ]
|
||||
|
||||
[constraints]
|
||||
roles.admin.min = 1
|
||||
roles.admin.max = 1
|
||||
---
|
||||
|
||||
@@ -0,0 +1,5 @@
|
||||
{
|
||||
imports = [
|
||||
../shared.nix
|
||||
];
|
||||
}
|
||||
@@ -0,0 +1,5 @@
|
||||
{
|
||||
imports = [
|
||||
../shared.nix
|
||||
];
|
||||
}
|
||||
@@ -0,0 +1,34 @@
|
||||
{ config, ... }:
|
||||
{
|
||||
systemd.services.dummy-service = {
|
||||
enable = true;
|
||||
description = "Dummy service";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
};
|
||||
script = ''
|
||||
generated_password_path="${config.clan.core.vars.generators.dummy-generator.files.generated-password.path}"
|
||||
if [ ! -f "$generated_password_path" ]; then
|
||||
echo "Generated password file not found: $generated_password_path"
|
||||
exit 1
|
||||
fi
|
||||
host_id_path="${config.clan.core.vars.generators.dummy-generator.files.host-id.path}"
|
||||
if [ ! -e "$host_id_path" ]; then
|
||||
echo "Host ID file not found: $host_id_path"
|
||||
exit 1
|
||||
fi
|
||||
'';
|
||||
};
|
||||
|
||||
# TODO: add and prompt and make it work in the test framework
|
||||
clan.core.vars.generators.dummy-generator = {
|
||||
files.host-id.secret = false;
|
||||
files.generated-password.secret = true;
|
||||
script = ''
|
||||
echo $RANDOM > "$out"/host-id
|
||||
echo $RANDOM > "$out"/generated-password
|
||||
'';
|
||||
};
|
||||
}
|
||||
@@ -15,6 +15,7 @@ nixosLib.runTest (
|
||||
|
||||
# This tests the compatibility of the inventory
|
||||
# With the test framework
|
||||
# - legacy-modules
|
||||
# - clan.service modules
|
||||
name = "service-dummy-test";
|
||||
|
||||
@@ -23,6 +24,12 @@ nixosLib.runTest (
|
||||
inventory = {
|
||||
machines.peer1 = { };
|
||||
machines.admin1 = { };
|
||||
services = {
|
||||
legacy-module.default = {
|
||||
roles.peer.machines = [ "peer1" ];
|
||||
roles.admin.machines = [ "admin1" ];
|
||||
};
|
||||
};
|
||||
|
||||
instances."test" = {
|
||||
module.name = "new-service";
|
||||
@@ -30,6 +37,9 @@ nixosLib.runTest (
|
||||
roles.peer.machines.peer1 = { };
|
||||
};
|
||||
|
||||
modules = {
|
||||
legacy-module = ./legacy-module;
|
||||
};
|
||||
};
|
||||
modules.new-service = {
|
||||
_class = "clan.service";
|
||||
@@ -68,6 +78,9 @@ nixosLib.runTest (
|
||||
start_all()
|
||||
admin1.wait_for_unit("multi-user.target")
|
||||
peer1.wait_for_unit("multi-user.target")
|
||||
# Provided by the legacy module
|
||||
print(admin1.succeed("systemctl status dummy-service"))
|
||||
print(peer1.succeed("systemctl status dummy-service"))
|
||||
|
||||
# peer1 should have the 'hello' file
|
||||
peer1.succeed("cat ${nodes.peer1.clan.core.vars.generators.new-service.files.not-a-secret.path}")
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
______________________________________________________________________
|
||||
---
|
||||
description = "Set up dummy-module"
|
||||
categories = ["System"]
|
||||
features = [ "inventory" ]
|
||||
|
||||
description = "Set up dummy-module" categories = ["System"] features = \[
|
||||
"inventory" \]
|
||||
[constraints]
|
||||
roles.admin.min = 1
|
||||
roles.admin.max = 1
|
||||
---
|
||||
|
||||
## [constraints] roles.admin.min = 1 roles.admin.max = 1
|
||||
|
||||
@@ -1,307 +0,0 @@
|
||||
{ self, ... }:
|
||||
{
|
||||
# Machine for update test
|
||||
clan.machines.test-update-machine = {
|
||||
imports = [
|
||||
self.nixosModules.test-update-machine
|
||||
# Import the configuration file that will be created/updated during the test
|
||||
./test-update-machine/configuration.nix
|
||||
];
|
||||
};
|
||||
flake.nixosModules.test-update-machine =
|
||||
{ lib, modulesPath, ... }:
|
||||
{
|
||||
imports = [
|
||||
(modulesPath + "/testing/test-instrumentation.nix")
|
||||
(modulesPath + "/profiles/qemu-guest.nix")
|
||||
self.clanLib.test.minifyModule
|
||||
../../lib/test/container-test-driver/nixos-module.nix
|
||||
];
|
||||
|
||||
# Apply patch to fix x-initrd.mount filesystem handling in switch-to-configuration-ng
|
||||
nixpkgs.overlays = [
|
||||
(_final: prev: {
|
||||
switch-to-configuration-ng = prev.switch-to-configuration-ng.overrideAttrs (old: {
|
||||
patches = (old.patches or [ ]) ++ [ ./switch-to-configuration-initrd-mount-fix.patch ];
|
||||
});
|
||||
})
|
||||
];
|
||||
|
||||
networking.hostName = "update-machine";
|
||||
|
||||
environment.etc."install-successful".text = "ok";
|
||||
|
||||
# Enable SSH and add authorized key for testing
|
||||
services.openssh.enable = true;
|
||||
services.openssh.settings.PasswordAuthentication = false;
|
||||
users.users.root.openssh.authorizedKeys.keys = [ (builtins.readFile ../assets/ssh/pubkey) ];
|
||||
services.openssh.knownHosts.localhost.publicKeyFile = ../assets/ssh/pubkey;
|
||||
services.openssh.hostKeys = [
|
||||
{
|
||||
path = ../assets/ssh/privkey;
|
||||
type = "ed25519";
|
||||
}
|
||||
];
|
||||
security.sudo.wheelNeedsPassword = false;
|
||||
|
||||
boot.consoleLogLevel = lib.mkForce 100;
|
||||
boot.kernelParams = [ "boot.shell_on_fail" ];
|
||||
|
||||
boot.isContainer = true;
|
||||
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
|
||||
# Preserve the IP addresses assigned by the test framework
|
||||
# (based on virtualisation.vlans = [1] and node number 1)
|
||||
networking.interfaces.eth1 = {
|
||||
useDHCP = false;
|
||||
ipv4.addresses = [
|
||||
{
|
||||
address = "192.168.1.1";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
ipv6.addresses = [
|
||||
{
|
||||
address = "2001:db8:1::1";
|
||||
prefixLength = 64;
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
# Define the mounts that exist in the container to prevent them from being stopped
|
||||
fileSystems = {
|
||||
"/" = {
|
||||
device = "/dev/disk/by-label/nixos";
|
||||
fsType = "ext4";
|
||||
options = [ "x-initrd.mount" ];
|
||||
};
|
||||
"/nix/.rw-store" = {
|
||||
device = "tmpfs";
|
||||
fsType = "tmpfs";
|
||||
options = [
|
||||
"mode=0755"
|
||||
];
|
||||
};
|
||||
"/nix/store" = {
|
||||
device = "overlay";
|
||||
fsType = "overlay";
|
||||
options = [
|
||||
"lowerdir=/nix/.ro-store"
|
||||
"upperdir=/nix/.rw-store/upper"
|
||||
"workdir=/nix/.rw-store/work"
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
perSystem =
|
||||
{
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
checks =
|
||||
pkgs.lib.optionalAttrs (pkgs.stdenv.isLinux && pkgs.stdenv.hostPlatform.system == "x86_64-linux")
|
||||
{
|
||||
nixos-test-update =
|
||||
let
|
||||
closureInfo = pkgs.closureInfo {
|
||||
rootPaths = [
|
||||
self.packages.${pkgs.system}.clan-cli
|
||||
self.checks.${pkgs.system}.clan-core-for-checks
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-update-machine.config.system.build.toplevel
|
||||
pkgs.stdenv.drvPath
|
||||
pkgs.bash.drvPath
|
||||
pkgs.buildPackages.xorg.lndir
|
||||
]
|
||||
++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||
};
|
||||
in
|
||||
self.clanLib.test.containerTest {
|
||||
name = "update";
|
||||
nodes.machine = {
|
||||
imports = [ self.nixosModules.test-update-machine ];
|
||||
};
|
||||
extraPythonPackages = _p: [
|
||||
self.legacyPackages.${pkgs.system}.nixosTestLib
|
||||
];
|
||||
|
||||
testScript = ''
|
||||
import tempfile
|
||||
import os
|
||||
import subprocess
|
||||
from nixos_test_lib.ssh import setup_ssh_connection # type: ignore[import-untyped]
|
||||
from nixos_test_lib.nix_setup import prepare_test_flake # type: ignore[import-untyped]
|
||||
|
||||
start_all()
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
|
||||
# Verify initial state
|
||||
machine.succeed("test -f /etc/install-successful")
|
||||
machine.fail("test -f /etc/update-successful")
|
||||
|
||||
# Set up test environment
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
# Prepare test flake and Nix store
|
||||
flake_dir = prepare_test_flake(
|
||||
temp_dir,
|
||||
"${self.checks.x86_64-linux.clan-core-for-checks}",
|
||||
"${closureInfo}"
|
||||
)
|
||||
(flake_dir / ".clan-flake").write_text("") # Ensure .clan-flake exists
|
||||
|
||||
# Set up SSH connection
|
||||
ssh_conn = setup_ssh_connection(
|
||||
machine,
|
||||
temp_dir,
|
||||
"${../assets/ssh/privkey}"
|
||||
)
|
||||
|
||||
# Update the machine configuration to add a new file
|
||||
machine_config_path = os.path.join(flake_dir, "machines", "test-update-machine", "configuration.nix")
|
||||
os.makedirs(os.path.dirname(machine_config_path), exist_ok=True)
|
||||
|
||||
# Note: update command doesn't accept -i flag, SSH key must be in ssh-agent
|
||||
# Start ssh-agent and add the key
|
||||
agent_output = subprocess.check_output(["${pkgs.openssh}/bin/ssh-agent", "-s"], text=True)
|
||||
for line in agent_output.splitlines():
|
||||
if line.startswith("SSH_AUTH_SOCK="):
|
||||
os.environ["SSH_AUTH_SOCK"] = line.split("=", 1)[1].split(";")[0]
|
||||
elif line.startswith("SSH_AGENT_PID="):
|
||||
os.environ["SSH_AGENT_PID"] = line.split("=", 1)[1].split(";")[0]
|
||||
|
||||
# Add the SSH key to the agent
|
||||
subprocess.run(["${pkgs.openssh}/bin/ssh-add", ssh_conn.ssh_key], check=True)
|
||||
|
||||
|
||||
##############
|
||||
print("TEST: update with --build-host local")
|
||||
with open(machine_config_path, "w") as f:
|
||||
f.write("""
|
||||
{
|
||||
environment.etc."update-build-local-successful".text = "ok";
|
||||
}
|
||||
""")
|
||||
|
||||
# rsync the flake into the container
|
||||
os.environ["PATH"] = f"{os.environ['PATH']}:${pkgs.openssh}/bin"
|
||||
subprocess.run(
|
||||
[
|
||||
"${pkgs.rsync}/bin/rsync",
|
||||
"-a",
|
||||
"--delete",
|
||||
"-e",
|
||||
"ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no",
|
||||
f"{str(flake_dir)}/",
|
||||
f"root@192.168.1.1:/flake",
|
||||
],
|
||||
check=True
|
||||
)
|
||||
|
||||
# allow machine to ssh into itself
|
||||
subprocess.run([
|
||||
"ssh",
|
||||
"-o", "UserKnownHostsFile=/dev/null",
|
||||
"-o", "StrictHostKeyChecking=no",
|
||||
f"root@192.168.1.1",
|
||||
"mkdir -p /root/.ssh && chmod 700 /root/.ssh && echo \"$(cat \"${../assets/ssh/privkey}\")\" > /root/.ssh/id_ed25519 && chmod 600 /root/.ssh/id_ed25519",
|
||||
], check=True)
|
||||
|
||||
# install the clan-cli package into the container's Nix store
|
||||
subprocess.run(
|
||||
[
|
||||
"${pkgs.nix}/bin/nix",
|
||||
"copy",
|
||||
"--to",
|
||||
"ssh://root@192.168.1.1",
|
||||
"--no-check-sigs",
|
||||
f"${self.packages.${pkgs.system}.clan-cli}",
|
||||
"--extra-experimental-features", "nix-command flakes",
|
||||
"--from", f"{os.environ["TMPDIR"]}/store"
|
||||
],
|
||||
check=True,
|
||||
env={
|
||||
**os.environ,
|
||||
"NIX_SSHOPTS": "-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no",
|
||||
},
|
||||
)
|
||||
|
||||
# Run ssh on the host to run the clan update command via --build-host local
|
||||
subprocess.run([
|
||||
"ssh",
|
||||
"-o", "UserKnownHostsFile=/dev/null",
|
||||
"-o", "StrictHostKeyChecking=no",
|
||||
f"root@192.168.1.1",
|
||||
"${self.packages.${pkgs.system}.clan-cli}/bin/clan",
|
||||
"machines",
|
||||
"update",
|
||||
"--debug",
|
||||
"--flake", "/flake",
|
||||
"--host-key-check", "none",
|
||||
"--upload-inputs", # Use local store instead of fetching from network
|
||||
"--build-host", "localhost",
|
||||
"test-update-machine",
|
||||
"--target-host", f"root@localhost",
|
||||
], check=True)
|
||||
|
||||
# Verify the update was successful
|
||||
machine.succeed("test -f /etc/update-build-local-successful")
|
||||
|
||||
|
||||
##############
|
||||
print("TEST: update with --target-host")
|
||||
|
||||
with open(machine_config_path, "w") as f:
|
||||
f.write("""
|
||||
{
|
||||
environment.etc."target-host-update-successful".text = "ok";
|
||||
}
|
||||
""")
|
||||
|
||||
# Run clan update command
|
||||
subprocess.run([
|
||||
"${self.packages.${pkgs.system}.clan-cli-full}/bin/clan",
|
||||
"machines",
|
||||
"update",
|
||||
"--debug",
|
||||
"--flake", flake_dir,
|
||||
"--host-key-check", "none",
|
||||
"--upload-inputs", # Use local store instead of fetching from network
|
||||
"test-update-machine",
|
||||
"--target-host", f"root@192.168.1.1:{ssh_conn.host_port}",
|
||||
], check=True)
|
||||
|
||||
# Verify the update was successful
|
||||
machine.succeed("test -f /etc/target-host-update-successful")
|
||||
|
||||
|
||||
##############
|
||||
print("TEST: update with --build-host")
|
||||
# Update configuration again
|
||||
with open(machine_config_path, "w") as f:
|
||||
f.write("""
|
||||
{
|
||||
environment.etc."build-host-update-successful".text = "ok";
|
||||
}
|
||||
""")
|
||||
|
||||
# Run clan update command with --build-host
|
||||
subprocess.run([
|
||||
"${self.packages.${pkgs.system}.clan-cli-full}/bin/clan",
|
||||
"machines",
|
||||
"update",
|
||||
"--debug",
|
||||
"--flake", flake_dir,
|
||||
"--host-key-check", "none",
|
||||
"--upload-inputs", # Use local store instead of fetching from network
|
||||
"--build-host", f"root@192.168.1.1:{ssh_conn.host_port}",
|
||||
"test-update-machine",
|
||||
"--target-host", f"root@192.168.1.1:{ssh_conn.host_port}",
|
||||
], check=True)
|
||||
|
||||
# Verify the second update was successful
|
||||
machine.succeed("test -f /etc/build-host-update-successful")
|
||||
'';
|
||||
} { inherit pkgs self; };
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
diff --git a/src/main.rs b/src/main.rs
|
||||
index 8baf5924a7db..1234567890ab 100644
|
||||
--- a/src/main.rs
|
||||
+++ b/src/main.rs
|
||||
@@ -1295,6 +1295,12 @@ won't take effect until you reboot the system.
|
||||
|
||||
for (mountpoint, current_filesystem) in current_filesystems {
|
||||
// Use current version of systemctl binary before daemon is reexeced.
|
||||
+
|
||||
+ // Skip filesystem comparison if x-initrd.mount is present in options
|
||||
+ if current_filesystem.options.contains("x-initrd.mount") {
|
||||
+ continue;
|
||||
+ }
|
||||
+
|
||||
let unit = path_to_unit_name(¤t_system_bin, &mountpoint);
|
||||
if let Some(new_filesystem) = new_filesystems.get(&mountpoint) {
|
||||
if current_filesystem.fs_type != new_filesystem.fs_type
|
||||
@@ -1,3 +0,0 @@
|
||||
{
|
||||
# Initial empty configuration
|
||||
}
|
||||
@@ -1,115 +0,0 @@
|
||||
{
|
||||
pkgs,
|
||||
nixosLib,
|
||||
clan-core,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
nixosLib.runTest (
|
||||
{ ... }:
|
||||
|
||||
let
|
||||
machines = [
|
||||
"controller1"
|
||||
"controller2"
|
||||
"peer1"
|
||||
"peer2"
|
||||
"peer3"
|
||||
];
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
clan-core.modules.nixosTest.clanTest
|
||||
];
|
||||
|
||||
hostPkgs = pkgs;
|
||||
|
||||
name = "wireguard";
|
||||
|
||||
clan = {
|
||||
directory = ./.;
|
||||
modules."@clan/wireguard" = import ../../clanServices/wireguard/default.nix;
|
||||
inventory = {
|
||||
|
||||
machines = lib.genAttrs machines (_: { });
|
||||
|
||||
instances = {
|
||||
|
||||
/*
|
||||
wg-test-one
|
||||
┌───────────────────────────────┐
|
||||
│ ◄───────────── │
|
||||
│ controller2 controller1
|
||||
│ ▲ ─────────────► ▲ ▲
|
||||
│ │ │ │ │ │ │ │ │
|
||||
│ │ │ │ │ │ │ │ │
|
||||
│ │ │ │ │ │ │ │ │
|
||||
│ │ │ │ └───────────────┐ │ │ │ │
|
||||
│ │ │ └──────────────┐ │ │ │ │ │
|
||||
│ ▼ │ ▼ ▼ ▼
|
||||
└─► peer2 │ peer1 peer3
|
||||
│ ▲
|
||||
└──────────┘
|
||||
*/
|
||||
|
||||
wg-test-one = {
|
||||
|
||||
module.name = "@clan/wireguard";
|
||||
module.input = "self";
|
||||
|
||||
roles.controller.machines."controller1".settings = {
|
||||
endpoint = "192.168.1.1";
|
||||
};
|
||||
|
||||
roles.controller.machines."controller2".settings = {
|
||||
endpoint = "192.168.1.2";
|
||||
};
|
||||
|
||||
roles.peer.machines = {
|
||||
peer1.settings.controller = "controller1";
|
||||
peer2.settings.controller = "controller2";
|
||||
peer3.settings.controller = "controller1";
|
||||
};
|
||||
};
|
||||
|
||||
# TODO: Will this actually work with conflicting ports? Can we re-use interfaces?
|
||||
#wg-test-two = {
|
||||
# module.name = "@clan/wireguard";
|
||||
|
||||
# roles.controller.machines."controller1".settings = {
|
||||
# endpoint = "192.168.1.1";
|
||||
# port = 51922;
|
||||
# };
|
||||
|
||||
# roles.peer.machines = {
|
||||
# peer1 = { };
|
||||
# };
|
||||
#};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
# Show all addresses
|
||||
machines = [peer1, peer2, peer3, controller1, controller2]
|
||||
for m in machines:
|
||||
m.systemctl("start network-online.target")
|
||||
|
||||
for m in machines:
|
||||
m.wait_for_unit("network-online.target")
|
||||
m.wait_for_unit("systemd-networkd.service")
|
||||
|
||||
print("\n\n" + "="*60)
|
||||
print("STARTING PING TESTS")
|
||||
print("="*60)
|
||||
|
||||
for m1 in machines:
|
||||
for m2 in machines:
|
||||
if m1 != m2:
|
||||
print(f"\n--- Pinging from {m1.name} to {m2.name}.wg-test-one ---")
|
||||
m1.wait_until_succeeds(f"ping -c1 {m2.name}.wg-test-one >&2")
|
||||
'';
|
||||
}
|
||||
)
|
||||
@@ -1,6 +0,0 @@
|
||||
[
|
||||
{
|
||||
"publickey": "age1rnkc2vmrupy9234clyu7fpur5kephuqs3v7qauaw5zeg00jqjdasefn3cc",
|
||||
"type": "age"
|
||||
}
|
||||
]
|
||||
@@ -1,6 +0,0 @@
|
||||
[
|
||||
{
|
||||
"publickey": "age1t2hhg99d4p2yymuhngcy5ccutp8mvu7qwvg5cdhck303h9e7ha9qnlt635",
|
||||
"type": "age"
|
||||
}
|
||||
]
|
||||
@@ -1,6 +0,0 @@
|
||||
[
|
||||
{
|
||||
"publickey": "age1jts52rzlqcwjc36jkp56a7fmjn3czr7kl9ta2spkfzhvfama33sqacrzzd",
|
||||
"type": "age"
|
||||
}
|
||||
]
|
||||
@@ -1,6 +0,0 @@
|
||||
[
|
||||
{
|
||||
"publickey": "age12nqnp0zd435ckp5p0v2fv4p2x4cvur2mnxe8use2sx3fgy883vaq4ae75e",
|
||||
"type": "age"
|
||||
}
|
||||
]
|
||||
@@ -1,6 +0,0 @@
|
||||
[
|
||||
{
|
||||
"publickey": "age1sglr4zp34drjfydzeweq43fz3uwpul3hkh53lsfa9drhuzwmkqyqn5jegp",
|
||||
"type": "age"
|
||||
}
|
||||
]
|
||||
@@ -1,15 +0,0 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:zDF0RiBqaawpg+GaFkuLPomJ01Xu+lgY5JfUzaIk2j03XkCzIf8EMrmn6pRtBP3iUjPBm+gQSTQk6GHTONrixA5hRNyETV+UgQw=,iv:zUUCAGZ0cz4Tc2t/HOjVYNsdnrAOtid/Ns5ak7rnyCk=,tag:z43WtNSue4Ddf7AVu21IKA==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBlY1NEdjAzQm5RMFZWY3BJ\nclp6c01FdlZFK3dOSDB4cHc1NTdwMXErMFJFCnIrRVFNZEFYOG1rVUhFd2xsbTJ2\nVkJHNmdOWXlOcHJoQ0QzM1VyZmxmcGcKLS0tIFk1cEx4dFdvNGRwK1FWdDZsb1lR\nV2d1RFZtNzZqVFdtQ1FzNStEcEgyUUkKx8tkxqJz/Ko3xgvhvd6IYiV/lRGmrY13\nUZpYWR9tsQwZAR9dLjCyVU3JRuXeGB1unXC1CO0Ff3R0A/PuuRHh+g==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-08-13T09:19:37Z",
|
||||
"mac": "ENC[AES256_GCM,data:8RGOUhZ2LGmC9ugULwHDgdMrtdo9vzBm3BJmL4XTuNJKm0NlKfgNLi1E4n9DMQ+kD4hKvcwbiUcwSGE8jZD6sm7Sh3bJi/HZCoiWm/O/OIzstli2NNDBGvQBgyWZA5H+kDjZ6aEi6icNWIlm5gsty7KduABnf5B3p0Bn5Uf5Bio=,iv:sGZp0XF+mgocVzAfHF8ATdlSE/5zyz5WUSRMJqNeDQs=,tag:ymYVBRwF5BOSAu5ONU2qKw==,type:str]",
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.10.2"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../../users/admin
|
||||
@@ -1,15 +0,0 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:dHM7zWzqnC1QLRKYpbI2t63kOFnSaQy6ur9zlkLQf17Q03CNrqUsZtdEbwMnLR3llu7eVMhtvVRkXjEkvn3leb9HsNFmtk/DP70=,iv:roEZsBFqRypM106O5sehTzo7SySOJUJgAR738rTtOo8=,tag:VDd9/6uU0SAM7pWRLIUhUQ==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBKTEVYUmVGbUtOcHZ4cnc3\nKzNETnlxaVRKYTI3eWVHdEoyc3l2SnhsZ1J3CnB2RnZrOXM5Uml6TThDUlZjY25J\nbkJ6eUZ2ckN1NWpNUU9IaE93UDJQdlEKLS0tIC95ZDhkU0R1VHhCdldxdW4zSmps\nN3NqL1cvd05hRTRPdDA3R2pzNUFFajgKS+DJH14fH9AvEAa3PoUC1jEqKAzTmExN\nl32FeHTHbGMo1PKeaFm+Eg0WSpAmFE7beBunc5B73SW30ok6x4FcQw==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-08-13T09:19:47Z",
|
||||
"mac": "ENC[AES256_GCM,data:77EnuBQyguvkCtobUg8/6zoLHjmeGDrSBZuIXOZBMxdbJjzhRg++qxQjuu6t0FoWATtz7u4Y3/jzUMGffr/N5HegqSq0D2bhv7AqJwBiVaOwd80fRTtM+YiP/zXsCk52Pj/Gadapg208bDPQ1BBDOyz/DrqZ7w//j+ARJjAnugI=,iv:IuTDmJKZEuHXJXjxrBw0gP2t6vpxAYEqbtpnVbavVCY=,tag:4EnpX6rOamtg1O+AaEQahQ==,type:str]",
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.10.2"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../../users/admin
|
||||
@@ -1,15 +0,0 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:wcSsqxTKiMAnzPwxs5DNjcSdLyjVQ9UOrZxfSbOkVfniwx6F7xz6dLNhaDq7MHQ0vRWpg28yNs7NHrp52bYFnb/+eZsis46WiCw=,iv:B4t1lvS2gC601MtsmZfEiEulLWvSGei3/LSajwFS9Vs=,tag:hnRXlZyYEFfLJUrw1SqbSQ==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAybUgya2VEdzMvRG1hdkpu\nM2pGNmcyVmcvYVZ1ZjJlY3A1bXFUUUtkMTI0CmJoRFZmejZjN2UxUXNuc1k5WnE2\nNmxIcnpNQ1lJZ3ZKSmhtSlVURXJTSUUKLS0tIGU4Wi9yZ3VYekJkVW9pNWFHblFk\na0gzbTVKUWdSam1sVjRUaUlTdVd5YWMKntRc9yb9VPOTMibp8QM5m57DilP01N/X\nPTQaw8oI40znnHdctTZz7S+W/3Te6sRnkOhFyalWmsKY0CWg/FELlA==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-08-13T09:19:58Z",
|
||||
"mac": "ENC[AES256_GCM,data:8nq+ugkUJxE24lUIySySs/cAF8vnfqr936L/5F0O1QFwNrbpPmKRXkuwa6u0V+187L2952Id20Fym4ke59f3fJJsF840NCKDwDDZhBZ20q9GfOqIKImEom/Nzw6D0WXQLUT3w8EMyJ/F+UaJxnBNPR6f6+Kx4YgStYzCcA6Ahzg=,iv:VBPktEz7qwWBBnXE+xOP/EUVy7/AmNCHPoK56Yt/ZNc=,tag:qXONwOLFAlopymBEf5p4Sw==,type:str]",
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.10.2"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../../users/admin
|
||||
@@ -1,15 +0,0 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:4d3ri0EsDmWRtA8vzvpPRLMsSp4MIMKwvtn0n0pRY05uBPXs3KcjnweMPIeTE1nIhqnMR2o2MfLah5TCPpaFax9+wxIt74uacbg=,iv:0LBAldTC/hN4QLCxgXTl6d9UB8WmUTnj4sD2zHQuG2w=,tag:zr/RhG/AU4g9xj9l2BprKw==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBvV0JnZDhlU1piU1g2cng0\ncytKOEZ6WlZlNGRGUjV3MmVMd2Nzc0ZwelgwCjBGdThCUGlXbVFYdnNoZWpJZ3Vm\nc2xkRXhxS09vdzltSVoxLzhFSVduak0KLS0tIE5DRjJ6cGxiVlB1eElHWXhxN1pJ\nYWtIMDMvb0Z6akJjUzlqeEFsNHkxL2cKpghv/QegnXimeqd9OPFouGM//jYvoVmw\n2d4mLT2JSMkEhpfGcqb6vswhdJfCiKuqr2B4bqwAnPMaykhsm8DFRQ==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-08-13T09:20:08Z",
|
||||
"mac": "ENC[AES256_GCM,data:BzlQVAJ7HzcxNPKB3JhabqRX/uU0EElj172YecjmOflHnzz/s9xgfdAfJK/c53hXlX4LtGPnubH7a8jOolRq98zmZeBYE27+WLs2aN7Ufld6mYk90/i7u4CqR+Fh2Kfht04SlUJCjnS5A9bTPwU9XGRHJ0BiOhzTuSMUJTRaPRM=,iv:L50K5zc1o99Ix9nP0pb9PRH+VIN2yvq7JqKeVHxVXmc=,tag:XFLkSCsdbTPxbasDYYxcFQ==,type:str]",
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.10.2"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../../users/admin
|
||||
@@ -1,15 +0,0 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:qfLm6+g1vYnESCik9uyBeKsY6Ju2Gq3arnn2I8HHNO67Ri5BWbOQTvtz7WT8/q94RwVjv8SGeJ/fsJSpwLSrJSbqTZCPAnYwzzQ=,iv:PnA9Ao8RRELNhNQYbaorstc0KaIXRU7h3+lgDCXZFHk=,tag:VeLgYQYwqthYihIoQTwYiA==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBNWVVQaDJFd0N3WHptRC9Z\nZTgxTWh5bnU1SkpqRWRXZnhPaFhpSVJmVEhrCjFvdHFYenNWaFNrdXlha09iS2xj\nOTZDcUNkcHkvTDUwNjM4Z3gxUkxreUEKLS0tIE5oY3Q2bWhsb2FSQTVGTWVSclJw\nWllrelRwT3duYjJJbTV0d3FwU1VuNlkK2eN3fHFX/sVUWom8TeZC9fddqnSCsC1+\nJRCZsG46uHDxqLcKIfdFWh++2t16XupQYk3kn+NUR/aMc3fR32Uwjw==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-08-13T09:20:18Z",
|
||||
"mac": "ENC[AES256_GCM,data:nUwsPcP1bsDjAHFjQ1NlVkTwyZY4B+BpzNkMx9gl0rE14j425HVLtlhlLndhRp+XMpnDldQppLAAtSdzMsrw8r5efNgTRl7cu4Fy/b9cHt84k7m0aou5lrGus9SV1bM7/fzC9Xm7CSXBcRzyDGVsKC6UBl1rx+ybh7HyAN05XSo=,iv:It57H+zUUNPkoN1D8sYwyZx5zIFIga7mydhGUHYBCGE=,tag:mBQdYqUpjPknbYa13qESyw==,type:str]",
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.10.2"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../../users/admin
|
||||
@@ -1,4 +0,0 @@
|
||||
{
|
||||
"publickey": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"type": "age"
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../../../../../sops/machines/controller1
|
||||
@@ -1,19 +0,0 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:noe913+28JWkoDkGGMu++cc1+j5NPDoyIhWixdsowoiVO3cTWGkZ88SUGO5D,iv:ynYMljwqMcBdk8RpVcw/2Jflg2RCF28r4fKUgIAF8B4=,tag:+TsXDJgfUhKgg4iQVXKKlQ==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBhYVRReTZBQ05GYmVBVjhS\nNXM5aFlhVzZRaVl6UHl6S3JnMC9Sb1dwZ1ZjCmVuS2dEVExYZWROVklUZWFCSnM2\nZnlxbVNseTM2c0Q0TjhsT3NzYmtqREUKLS0tIHBRTFpvVGt6d1cxZ2lFclRsUVhZ\nZDlWaG9PcXVrNUZKaEgxWndjUDVpYjgKt0eOhAgcYdkg9JSEakx4FjChLTn3pis+\njOkuGd4JfXMKcwC7vJV5ygQBxzVJSBw+RucP7sYCBPK0m8Voj94ntw==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
},
|
||||
{
|
||||
"recipient": "age1rnkc2vmrupy9234clyu7fpur5kephuqs3v7qauaw5zeg00jqjdasefn3cc",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB6MFJqNHNraG9DSnJZMFdz\ndU8zVXNTamxROFd1dWtuK2RiekhPdHhleVhFCi8zNWJDNXJMRUlDdjc4Q0UycTIz\nSGFGSmdnNU0wZWlDaTEwTzBqWjh6SFkKLS0tIEJOdjhOMDY2TUFLb3RPczNvMERx\nYkpSeW5VOXZvMlEvdm53MDE3aUFTNjgKyelSTjrTIR9I3rJd3krvzpsrKF1uGs4J\n4MtmQj0/3G+zPYZVBx7b3HF6B3f1Z7LYh05+z7nCnN/duXyPnDjNcg==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-08-13T09:19:37Z",
|
||||
"mac": "ENC[AES256_GCM,data:+DmIkPG/H6tCtf8CvB98E1QFXv08QfTcCB3CRsi+XWnIRBkryRd/Au9JahViHMdK7MED8WNf84NWTjY2yH4y824/DjI8XXNMF1iVMo0CqY42xbVHtUuhXrYeT+c8CyEw+M6zfy1jC0+Bm3WQWgagz1G6A9SZk3D2ycu0N08+axA=,iv:kwBjTYebIy5i2hagAajSwwuKnSkrM9GyrnbeQXB2e/w=,tag:EgKJ5gVGYj1NGFUduxLGfg==,type:str]",
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.10.2"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../../../../../sops/users/admin
|
||||
@@ -1 +0,0 @@
|
||||
lQfR7GhivN87XoXruTGOPjVPhNu1Brt//wyc3pdwE20=
|
||||
@@ -1 +0,0 @@
|
||||
7470bb5c79df224a9b7f5a2259acd2e46db763c27e24cb3416c8b591cb328077
|
||||
@@ -1 +0,0 @@
|
||||
fd51:19c1:3b:f700
|
||||
@@ -1 +0,0 @@
|
||||
../../../../../../sops/machines/controller2
|
||||
@@ -1,19 +0,0 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:2kehACgvNgoYGPwnW7p86BR0yUu689Chth6qZf9zoJtuTY9ATS68dxDyBc5S,iv:qb2iDUtExegTeN3jt6SA8RnU61W5GDDhn56QXiQT4gw=,tag:pSGPICX5p6qlZ1WMVoIEYQ==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBSTTR5TDY4RE9VYmlCK1dL\nWkVRcVZqVDlsbmQvUlJmdzF2b1Z1S0k3NngwCkFWNzRVaERtSmFsd0o2aFJOb0ZX\nSU9yUnVaNi9IUjJWeGRFcEpDUXo5WkEKLS0tIEczNkxiYnJsTWRoLzFhQVF1M21n\nWnZEdGV1N2N5d1FZQkJUQ1IrdGFLblkKPTpha2bxS8CCAMXWTDKX/WOcdvggaP3Y\nqewyahDNzb4ggP+LNKp55BtwFjdvoPoq4BpYOOgMRbQMMk+H1o9WFw==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
},
|
||||
{
|
||||
"recipient": "age1t2hhg99d4p2yymuhngcy5ccutp8mvu7qwvg5cdhck303h9e7ha9qnlt635",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBYcEZ6Tzk3M0pkV0tOdTBj\nenF2a0tHNnhBa0NrazMwV1VBbXBZR3pzSHpvCnBZOEU0VlFHS1FHcVpTTDdPczVV\nV0RFSlZ0VmIzWGoydEdKVXlIUE9OOEkKLS0tIFZ0cWVBR1loeVlWa2c4U3oweXE2\ncm1ja0JCS3U5Nk41dlAzV2NabDc2bDQKdgCDNnpRZlFPnEGlX6fo0SQX4yOB+E6r\ntnSwofR3xxZvkyme/6JJU5qBZXyCXEAhKMRkFyvJANXzMJAUo/Osow==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-08-13T09:19:48Z",
|
||||
"mac": "ENC[AES256_GCM,data:e3EkL8vwRhLsec83Zi9DE3PKT+4RwgiffpN4QHcJKTgmDW6hzizWc5kAxbNWGJ9Qqe6sso2KY7tc+hg1lHEsmzjCbg153p8h+7lVI2XT6adi/CS8WZ2VpeL+0X9zDQCjqHmrESZAYFBdkLqO4jucdf0Pc3CKKD+N3BDDTwSUvHM=,iv:xvR7dJL8sdYen00ovrYT8PNxhB9XxSWDSRz1IK23I/o=,tag:OyhAvllBgfAp3eGeNpR/Nw==,type:str]",
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.10.2"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../../../../../sops/users/admin
|
||||
@@ -1 +0,0 @@
|
||||
5Z7gbLFbXpEFfomW2pKyZBpZN5xvUtiqrIL0GVfNtQ8=
|
||||
@@ -1 +0,0 @@
|
||||
c3672fdb9fb31ddaf6572fc813cf7a8fe50488ef4e9d534c62d4f29da60a1a99
|
||||
@@ -1 +0,0 @@
|
||||
fd51:19c1:c1:aa00
|
||||
@@ -1 +0,0 @@
|
||||
../../../../../../sops/machines/peer1
|
||||
@@ -1,19 +0,0 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:b+akw85T3D9xc75CPLHucR//k7inpxKDvgpR8tCNKwNDRVjVHjcABhfZNLXW,iv:g11fZE8UI0MVh9GKdjR6leBlxa4wN7ZubozXG/VlBbw=,tag:0YkzWCW3zJ3Mt3br/jmTYw==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age1jts52rzlqcwjc36jkp56a7fmjn3czr7kl9ta2spkfzhvfama33sqacrzzd",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBXWkJUR0pIa2xOSEw2dThm\nYlNuOHZCVW93Wkc5LzE4YmpUTHRkZlk3ckc4CnN4M3ZRMWNFVitCT3FyWkxaR0di\nb0NmSXFhRHJmTWg0d05OcWx1LytscEEKLS0tIEtleTFqU3JrRjVsdHpJeTNuVUhF\nWEtnOVlXVXRFamFSak5ia2F2b0JiTzAKlhOBZvZ4AN+QqAYQXvd6YNmgVS4gtkWT\nbV3bLNTgwtrDtet9NDHM8vdF+cn5RZxwFfgmTbDEow6Zm8EXfpxj/g==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
},
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB6YVYyQkZqMTJYQTlyRG5Y\nbnJ2UkE1TS9FZkpSa2tQbk1hQjViMi9OcGk0CjFaZUdjU3JtNzh0bDFXdTdUVW4x\nanFqZHZjZjdzKzA2MC8vTWh3Uy82UGcKLS0tIDhyOFl3UGs3czdoMlpza3UvMlB1\nSE90MnpGc05sSCtmVWg0UVNVdmRvN2MKHlCr4U+7bsoYb+2fgT4mEseZCEjxrtLu\n55sR/4YH0vqMnIBnLTSA0e+WMrs3tQfseeJM5jY/ZNnpec1LbxkGTg==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-08-13T09:19:58Z",
|
||||
"mac": "ENC[AES256_GCM,data:gEoEC9D2Z7k5F8egaY1qPXT5/96FFVsyofSBivQ28Ir/9xHX2j40PAQrYRJUWsk/GAUMOyi52Wm7kPuacw+bBcdtQ0+MCDEmjkEnh1V83eZ/baey7iMmg05uO92MYY5o4e7ZkwzXoAeMCMcfO0GqjNvsYJHF1pSNa+UNDj+eflw=,iv:dnIYpvhAdvUDe9md53ll42krb0sxcHy/toqGc7JFxNA=,tag:0WkZU7GeKMD1DQTYaI+1dg==,type:str]",
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.10.2"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../../../../../sops/users/admin
|
||||
@@ -1 +0,0 @@
|
||||
juK7P/92N2t2t680aLIRobHc3ts49CsZBvfZOyIKpUc=
|
||||
@@ -1 +0,0 @@
|
||||
b36142569a74a0de0f9b229f2a040ae33a22d53bef5e62aa6939912d0cda05ba
|
||||
@@ -1 +0,0 @@
|
||||
6987:50a0:9b93:4337
|
||||
@@ -1 +0,0 @@
|
||||
../../../../../../sops/machines/peer2
|
||||
@@ -1,19 +0,0 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:apX2sLwtq6iQgLJslFwiRMNBUe0XLzLQbhKfmb2pKiJG7jGNHUgHJz3Ls4Ca,iv:HTDatm3iD5wACTkkd3LdRNvJfnfg75RMtn9G6Q7Fqd4=,tag:Mfehlljnes5CFD1NJdk27A==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age12nqnp0zd435ckp5p0v2fv4p2x4cvur2mnxe8use2sx3fgy883vaq4ae75e",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBVZzFyMUZsd2V2VWxOUmhP\nZE8yZTc4Q0RkZisxR25NemR1TzVDWmJZVjBVClA1MWhsU0xzSG16aUx3cWFWKzlG\nSkxrT09OTkVqLzlWejVESE1QWHVJaFkKLS0tIGxlaGVuWU43RXErNTB3c3FaUnM3\nT0N5M253anZkbnFkZWw2VHA0eWhxQW8Kd1PMtEX1h0Hd3fDLMi++gKJkzPi9FXUm\n+uYhx+pb+pJM+iLkPwP/q6AWC7T0T4bHfekkdzxrbsKMi73x/GrOiw==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
},
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBqVzRIMWdlNjVwTURyMFkv\nSUhiajZkZVNuWklRYit6cno4UzNDa2szOFN3CkQ2TWhHb25pbmR1MlBsRXNLL2lx\ncVZ3c3BsWXN2aS9UUVYvN3I4S0xUSmMKLS0tIE5FV0U5aXVUZk9XL0U0Z2ZSNGd5\nbU9zY3IvMlpSNVFLYkRNQUpUYVZOWFUK7j4Otzb8CJTcT7aAj9/irxHEDXh1HkTg\nzz7Ho8/ZncNtaCVHlHxjTgVW9d5aIx8fSsV9LRCFwHMtNzvwj1Nshg==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-08-13T09:20:08Z",
|
||||
"mac": "ENC[AES256_GCM,data:e7WNVEz78noHBiz6S3A6qNfop+yBXB3rYN0k4GvaQKz3b99naEHuqIF8Smzzt4XrbbiPKu2iLa5ddLBlqqsi32UQUB8JS9TY7hvW8ol+jpn0VxusGCXW9ThdDEsM/hXiPyr331C73zTvbOYI1hmcGMlJL9cunVRO9rkMtEqhEfo=,iv:6zt7wjIs1y5xDHNK+yLOwoOuUpY7/dOGJGT6UWAFeOg=,tag:gzFTgoxhoLzUV0lvzOhhfg==,type:str]",
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.10.2"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../../../../../sops/users/admin
|
||||
@@ -1 +0,0 @@
|
||||
XI9uSaQRDBCb82cMnGzGJcbqRfDG/IXZobyeL+kV03k=
|
||||
@@ -1 +0,0 @@
|
||||
360f9fce4a984eb87ce2a673eb5341ecb89c0f62126548d45ef25ff5243dd646
|
||||
@@ -1 +0,0 @@
|
||||
3b21:3ced:003e:89b3
|
||||
@@ -1 +0,0 @@
|
||||
../../../../../../sops/machines/peer3
|
||||
@@ -1,19 +0,0 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:Gluvjes/3oH5YsDq00JDJyJgoEFcj56smioMArPSt309MDGExYX2QsCzeO1q,iv:oBBJRDdTj/1dWEvzhdFKQ2WfeCKyavKMLmnMbqnU5PM=,tag:2WNFxKz2dWyVcybpm5N4iw==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBtQWpjRmhZTFdPa2VSZkFN\nbUczMlY5bDBmMTdoMy8xcWxMaXpWVitMZGdjCnRWb2Y3eGpHU1hmNHRJVFBqbU5w\nVEZGdUIrQXk0U0dUUEZ6bE5EMFpTRHMKLS0tIGpYSmZmQThJUTlvTHpjc05ZVlM4\nQWhTOWxnUHZnYlJ3czE3ZUJ0L3ozWTQK3a7N0Zpzo4sUezYveqvKR49RUdJL23eD\n+cK5lk2xbtj+YHkeG+dg7UlHfDaicj0wnFH1KLuWmNd1ONa6eQp3BQ==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
},
|
||||
{
|
||||
"recipient": "age1sglr4zp34drjfydzeweq43fz3uwpul3hkh53lsfa9drhuzwmkqyqn5jegp",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA3a2FOWlVsSkdnendrYmUz\ndEpuL1hZSWNFTUtDYm14S3V1aW9KS3hsazJRCkp2SkFFbi9hbGJpNks1MlNTL0s5\nTk5pcUMxaEJobkcvWmRGeU9jMkdNdzAKLS0tIDR6M0Y5eE1ETHJJejAzVW1EYy9v\nZCtPWHJPUkhuWnRzSGhMUUtTa280UmMKXvtnxyop7PmRvTOFkV80LziDjhGh93Pf\nYwhD/ByD/vMmr21Fd6PVHOX70FFT30BdnMc1/wt7c/0iAw4w4GoQsA==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-08-13T09:20:18Z",
|
||||
"mac": "ENC[AES256_GCM,data:3nXMTma0UYXCco+EM8UW45cth7DVMboFBKyesL86GmaG6OlTkA2/25AeDrtSVO13a5c2jC6yNFK5dE6pSe5R9f0BoDF7d41mgc85zyn+LGECNWKC6hy6gADNSDD6RRuV1S3FisFQl1F1LD8LiSWmg/XNMZzChNlHYsCS8M+I84g=,iv:pu5VVXAVPmVoXy0BJ+hq5Ar8R0pZttKSYa4YS+dhDNc=,tag:xp1S/4qExnxMTGwhfLJrkA==,type:str]",
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.10.2"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../../../../../sops/users/admin
|
||||
@@ -1 +0,0 @@
|
||||
t6qN4VGLR+VMhrBDNKQEXZVyRsEXs1/nGFRs5DI82F8=
|
||||
@@ -1 +0,0 @@
|
||||
e3facc99b73fe029d4c295f71829a83f421f38d82361cf412326398175da162a
|
||||
@@ -1 +0,0 @@
|
||||
e42b:bf85:33f4:f0b1
|
||||
24
checks/zt-tcp-relay/default.nix
Normal file
24
checks/zt-tcp-relay/default.nix
Normal file
@@ -0,0 +1,24 @@
|
||||
(
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "zt-tcp-relay";
|
||||
|
||||
nodes.machine =
|
||||
{ self, ... }:
|
||||
{
|
||||
imports = [
|
||||
self.nixosModules.clanCore
|
||||
self.clanModules.zt-tcp-relay
|
||||
{
|
||||
clan.core.settings.directory = ./.;
|
||||
}
|
||||
];
|
||||
};
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine.wait_for_unit("zt-tcp-relay.service")
|
||||
out = machine.succeed("${pkgs.netcat}/bin/nc -z -v localhost 4443")
|
||||
print(out)
|
||||
'';
|
||||
}
|
||||
)
|
||||
5
clanModules/admin/README.md
Normal file
5
clanModules/admin/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
description = "Convenient Administration for the Clan App"
|
||||
categories = ["Utility"]
|
||||
features = [ "inventory", "deprecated" ]
|
||||
---
|
||||
3
clanModules/admin/default.nix
Normal file
3
clanModules/admin/default.nix
Normal file
@@ -0,0 +1,3 @@
|
||||
{
|
||||
imports = [ ./roles/default.nix ];
|
||||
}
|
||||
30
clanModules/admin/roles/default.nix
Normal file
30
clanModules/admin/roles/default.nix
Normal file
@@ -0,0 +1,30 @@
|
||||
{ lib, config, ... }:
|
||||
{
|
||||
|
||||
options.clan.admin = {
|
||||
allowedKeys = lib.mkOption {
|
||||
default = { };
|
||||
type = lib.types.attrsOf lib.types.str;
|
||||
description = "The allowed public keys for ssh access to the admin user";
|
||||
example = {
|
||||
"key_1" = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD...";
|
||||
};
|
||||
};
|
||||
};
|
||||
# Bad practice.
|
||||
# Should we add 'clanModules' to specialArgs?
|
||||
imports = [
|
||||
../../sshd
|
||||
../../root-password
|
||||
];
|
||||
config = {
|
||||
|
||||
warnings = [
|
||||
"The clan.admin module is deprecated and will be removed on 2025-07-15.
|
||||
Please migrate to user-maintained configuration or the new equivalent clan services
|
||||
(https://docs.clan.lol/reference/clanServices)."
|
||||
];
|
||||
|
||||
users.users.root.openssh.authorizedKeys.keys = builtins.attrValues config.clan.admin.allowedKeys;
|
||||
};
|
||||
}
|
||||
8
clanModules/auto-upgrade/README.md
Normal file
8
clanModules/auto-upgrade/README.md
Normal file
@@ -0,0 +1,8 @@
|
||||
---
|
||||
description = "Set up automatic upgrades"
|
||||
categories = ["System"]
|
||||
features = [ "inventory", "deprecated" ]
|
||||
---
|
||||
|
||||
Whether to periodically upgrade NixOS to the latest version. If enabled, a
|
||||
systemd timer will run `nixos-rebuild switch --upgrade` once a day.
|
||||
32
clanModules/auto-upgrade/roles/default.nix
Normal file
32
clanModules/auto-upgrade/roles/default.nix
Normal file
@@ -0,0 +1,32 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
cfg = config.clan.auto-upgrade;
|
||||
in
|
||||
{
|
||||
options.clan.auto-upgrade = {
|
||||
flake = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "Flake reference";
|
||||
};
|
||||
};
|
||||
|
||||
config = {
|
||||
|
||||
warnings = [
|
||||
"The clan.auto-upgrade module is deprecated and will be removed on 2025-07-15.
|
||||
Please migrate to user-maintained configuration or the new equivalent clan services
|
||||
(https://docs.clan.lol/reference/clanServices)."
|
||||
];
|
||||
|
||||
system.autoUpgrade = {
|
||||
inherit (cfg) flake;
|
||||
enable = true;
|
||||
dates = "02:00";
|
||||
randomizedDelaySec = "45min";
|
||||
};
|
||||
};
|
||||
}
|
||||
16
clanModules/borgbackup-static/README.md
Normal file
16
clanModules/borgbackup-static/README.md
Normal file
@@ -0,0 +1,16 @@
|
||||
---
|
||||
description = "Statically configure borgbackup with sane defaults."
|
||||
---
|
||||
!!! Danger "Deprecated"
|
||||
Use [borgbackup](borgbackup.md) instead.
|
||||
|
||||
Don't use borgbackup-static through [inventory](../../guides/inventory.md).
|
||||
|
||||
This module implements the `borgbackup` backend and implements sane defaults
|
||||
for backup management through `borgbackup` for members of the clan.
|
||||
|
||||
Configure target machines where the backups should be sent to through `targets`.
|
||||
|
||||
Configure machines that should be backuped either through `includeMachines`
|
||||
which will exclusively add the included machines to be backuped, or through
|
||||
`excludeMachines`, which will add every machine except the excluded machine to the backup.
|
||||
104
clanModules/borgbackup-static/default.nix
Normal file
104
clanModules/borgbackup-static/default.nix
Normal file
@@ -0,0 +1,104 @@
|
||||
{ lib, config, ... }:
|
||||
let
|
||||
dir = config.clan.core.settings.directory;
|
||||
machineDir = dir + "/machines/";
|
||||
in
|
||||
{
|
||||
imports = [ ../borgbackup ];
|
||||
|
||||
options.clan.borgbackup-static = {
|
||||
excludeMachines = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
example = lib.literalExpression "[ config.clan.core.settings.machine.name ]";
|
||||
default = [ ];
|
||||
description = ''
|
||||
Machines that should not be backuped.
|
||||
Mutually exclusive with includeMachines.
|
||||
If this is not empty, every other machine except the targets in the clan will be backuped by this module.
|
||||
If includeMachines is set, only the included machines will be backuped.
|
||||
'';
|
||||
};
|
||||
includeMachines = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
example = lib.literalExpression "[ config.clan.core.settings.machine.name ]";
|
||||
default = [ ];
|
||||
description = ''
|
||||
Machines that should be backuped.
|
||||
Mutually exclusive with excludeMachines.
|
||||
'';
|
||||
};
|
||||
targets = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
default = [ ];
|
||||
description = ''
|
||||
Machines that should act as target machines for backups.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config.services.borgbackup.repos =
|
||||
let
|
||||
machines = builtins.readDir machineDir;
|
||||
borgbackupIpMachinePath = machines: machineDir + machines + "/facts/borgbackup.ssh.pub";
|
||||
filteredMachines =
|
||||
if ((builtins.length config.clan.borgbackup-static.includeMachines) != 0) then
|
||||
lib.filterAttrs (name: _: (lib.elem name config.clan.borgbackup-static.includeMachines)) machines
|
||||
else
|
||||
lib.filterAttrs (name: _: !(lib.elem name config.clan.borgbackup-static.excludeMachines)) machines;
|
||||
machinesMaybeKey = lib.mapAttrsToList (
|
||||
machine: _:
|
||||
let
|
||||
fullPath = borgbackupIpMachinePath machine;
|
||||
in
|
||||
if builtins.pathExists fullPath then machine else null
|
||||
) filteredMachines;
|
||||
machinesWithKey = lib.filter (x: x != null) machinesMaybeKey;
|
||||
hosts = builtins.map (machine: {
|
||||
name = machine;
|
||||
value = {
|
||||
path = "/var/lib/borgbackup/${machine}";
|
||||
authorizedKeys = [ (builtins.readFile (borgbackupIpMachinePath machine)) ];
|
||||
};
|
||||
}) machinesWithKey;
|
||||
in
|
||||
lib.mkIf
|
||||
(builtins.any (
|
||||
target: target == config.clan.core.settings.machine.name
|
||||
) config.clan.borgbackup-static.targets)
|
||||
(if (builtins.listToAttrs hosts) != null then builtins.listToAttrs hosts else { });
|
||||
|
||||
config.clan.borgbackup.destinations =
|
||||
let
|
||||
destinations = builtins.map (d: {
|
||||
name = d;
|
||||
value = {
|
||||
repo = "borg@${d}:/var/lib/borgbackup/${config.clan.core.settings.machine.name}";
|
||||
};
|
||||
}) config.clan.borgbackup-static.targets;
|
||||
in
|
||||
lib.mkIf (builtins.any (
|
||||
target: target == config.clan.core.settings.machine.name
|
||||
) config.clan.borgbackup-static.includeMachines) (builtins.listToAttrs destinations);
|
||||
|
||||
config.assertions = [
|
||||
{
|
||||
assertion =
|
||||
!(
|
||||
((builtins.length config.clan.borgbackup-static.excludeMachines) != 0)
|
||||
&& ((builtins.length config.clan.borgbackup-static.includeMachines) != 0)
|
||||
);
|
||||
message = ''
|
||||
The options:
|
||||
config.clan.borgbackup-static.excludeMachines = [${builtins.toString config.clan.borgbackup-static.excludeMachines}]
|
||||
and
|
||||
config.clan.borgbackup-static.includeMachines = [${builtins.toString config.clan.borgbackup-static.includeMachines}]
|
||||
are mutually exclusive.
|
||||
Use excludeMachines to exclude certain machines and backup the other clan machines.
|
||||
Use include machines to only backup certain machines.
|
||||
'';
|
||||
}
|
||||
];
|
||||
config.warnings = lib.optional (
|
||||
builtins.length config.clan.borgbackup-static.targets > 0
|
||||
) "The borgbackup-static module is deprecated use the service via the inventory interface instead.";
|
||||
}
|
||||
14
clanModules/borgbackup/README.md
Normal file
14
clanModules/borgbackup/README.md
Normal file
@@ -0,0 +1,14 @@
|
||||
---
|
||||
description = "Efficient, deduplicating backup program with optional compression and secure encryption."
|
||||
categories = ["System"]
|
||||
features = [ "inventory", "deprecated" ]
|
||||
---
|
||||
BorgBackup (short: Borg) gives you:
|
||||
|
||||
- Space efficient storage of backups.
|
||||
- Secure, authenticated encryption.
|
||||
- Compression: lz4, zstd, zlib, lzma or none.
|
||||
- Mountable backups with FUSE.
|
||||
- Easy installation on multiple platforms: Linux, macOS, BSD, …
|
||||
- Free software (BSD license).
|
||||
- Backed by a large and active open-source community.
|
||||
6
clanModules/borgbackup/default.nix
Normal file
6
clanModules/borgbackup/default.nix
Normal file
@@ -0,0 +1,6 @@
|
||||
# Dont import this file
|
||||
# It is only here for backwards compatibility.
|
||||
# Dont author new modules with this file.
|
||||
{
|
||||
imports = [ ./roles/client.nix ];
|
||||
}
|
||||
210
clanModules/borgbackup/roles/client.nix
Normal file
210
clanModules/borgbackup/roles/client.nix
Normal file
@@ -0,0 +1,210 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
# Instances might be empty, if the module is not used via the inventory
|
||||
instances = config.clan.inventory.services.borgbackup or { };
|
||||
# roles = { ${role_name} :: { machines :: [string] } }
|
||||
allServers = lib.foldlAttrs (
|
||||
acc: _instanceName: instanceConfig:
|
||||
acc
|
||||
++ (
|
||||
if builtins.elem machineName instanceConfig.roles.client.machines then
|
||||
instanceConfig.roles.server.machines
|
||||
else
|
||||
[ ]
|
||||
)
|
||||
) [ ] instances;
|
||||
|
||||
machineName = config.clan.core.settings.machine.name;
|
||||
|
||||
cfg = config.clan.borgbackup;
|
||||
preBackupScript = ''
|
||||
declare -A preCommandErrors
|
||||
|
||||
${lib.concatMapStringsSep "\n" (
|
||||
state:
|
||||
lib.optionalString (state.preBackupCommand != null) ''
|
||||
echo "Running pre-backup command for ${state.name}"
|
||||
if ! /run/current-system/sw/bin/${state.preBackupCommand}; then
|
||||
preCommandErrors["${state.name}"]=1
|
||||
fi
|
||||
''
|
||||
) (lib.attrValues config.clan.core.state)}
|
||||
|
||||
if [[ ''${#preCommandErrors[@]} -gt 0 ]]; then
|
||||
echo "pre-backup commands failed for the following services:"
|
||||
for state in "''${!preCommandErrors[@]}"; do
|
||||
echo " $state"
|
||||
done
|
||||
exit 1
|
||||
fi
|
||||
'';
|
||||
in
|
||||
{
|
||||
options.clan.borgbackup.destinations = lib.mkOption {
|
||||
type = lib.types.attrsOf (
|
||||
lib.types.submodule (
|
||||
{ name, ... }:
|
||||
{
|
||||
options = {
|
||||
name = lib.mkOption {
|
||||
type = lib.types.strMatching "^[a-zA-Z0-9._-]+$";
|
||||
default = name;
|
||||
description = "the name of the backup job";
|
||||
};
|
||||
repo = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "the borgbackup repository to backup to";
|
||||
};
|
||||
rsh = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "ssh -i ${
|
||||
config.clan.core.vars.generators.borgbackup.files."borgbackup.ssh".path
|
||||
} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=Yes";
|
||||
defaultText = "ssh -i \${config.clan.core.vars.generators.borgbackup.files.\"borgbackup.ssh\".path} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null";
|
||||
description = "the rsh to use for the backup";
|
||||
};
|
||||
};
|
||||
}
|
||||
)
|
||||
);
|
||||
default = { };
|
||||
description = ''
|
||||
destinations where the machine should be backuped to
|
||||
'';
|
||||
};
|
||||
|
||||
options.clan.borgbackup.exclude = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
example = [ "*.pyc" ];
|
||||
default = [ ];
|
||||
description = ''
|
||||
Directories/Files to exclude from the backup.
|
||||
Use * as a wildcard.
|
||||
'';
|
||||
};
|
||||
|
||||
config = {
|
||||
|
||||
warnings = [
|
||||
"The clan.borgbackup module is deprecated and will be removed on 2025-07-15.
|
||||
Please migrate to user-maintained configuration or the new equivalent clan services
|
||||
(https://docs.clan.lol/reference/clanServices)."
|
||||
];
|
||||
|
||||
# Destinations
|
||||
clan.borgbackup.destinations =
|
||||
let
|
||||
destinations = builtins.map (serverName: {
|
||||
name = serverName;
|
||||
value = {
|
||||
repo = "borg@${serverName}:/var/lib/borgbackup/${machineName}";
|
||||
};
|
||||
}) allServers;
|
||||
in
|
||||
(builtins.listToAttrs destinations);
|
||||
|
||||
# Derived from the destinations
|
||||
systemd.services = lib.mapAttrs' (
|
||||
_: dest:
|
||||
lib.nameValuePair "borgbackup-job-${dest.name}" {
|
||||
# since borgbackup mounts the system read-only, we need to run in a
|
||||
# ExecStartPre script, so we can generate additional files.
|
||||
serviceConfig.ExecStartPre = [
|
||||
''+${pkgs.writeShellScript "borgbackup-job-${dest.name}-pre-backup-commands" preBackupScript}''
|
||||
];
|
||||
}
|
||||
) cfg.destinations;
|
||||
|
||||
services.borgbackup.jobs = lib.mapAttrs (_: dest: {
|
||||
paths = lib.unique (
|
||||
lib.flatten (map (state: state.folders) (lib.attrValues config.clan.core.state))
|
||||
);
|
||||
exclude = cfg.exclude;
|
||||
repo = dest.repo;
|
||||
environment.BORG_RSH = dest.rsh;
|
||||
compression = "auto,zstd";
|
||||
startAt = "*-*-* 01:00:00";
|
||||
persistentTimer = true;
|
||||
|
||||
encryption = {
|
||||
mode = "repokey";
|
||||
passCommand = "cat ${config.clan.core.vars.generators.borgbackup.files."borgbackup.repokey".path}";
|
||||
};
|
||||
|
||||
prune.keep = {
|
||||
within = "1d"; # Keep all archives from the last day
|
||||
daily = 7;
|
||||
weekly = 4;
|
||||
monthly = 0;
|
||||
};
|
||||
}) cfg.destinations;
|
||||
|
||||
environment.systemPackages = [
|
||||
(pkgs.writeShellApplication {
|
||||
name = "borgbackup-create";
|
||||
runtimeInputs = [ config.systemd.package ];
|
||||
text = ''
|
||||
${lib.concatMapStringsSep "\n" (dest: ''
|
||||
systemctl start borgbackup-job-${dest.name}
|
||||
'') (lib.attrValues cfg.destinations)}
|
||||
'';
|
||||
})
|
||||
(pkgs.writeShellApplication {
|
||||
name = "borgbackup-list";
|
||||
runtimeInputs = [ pkgs.jq ];
|
||||
text = ''
|
||||
(${
|
||||
lib.concatMapStringsSep "\n" (
|
||||
dest:
|
||||
# we need yes here to skip the changed url verification
|
||||
''echo y | /run/current-system/sw/bin/borg-job-${dest.name} list --json | jq '[.archives[] | {"name": ("${dest.name}::${dest.repo}::" + .name)}]' ''
|
||||
) (lib.attrValues cfg.destinations)
|
||||
}) | jq -s 'add // []'
|
||||
'';
|
||||
})
|
||||
(pkgs.writeShellApplication {
|
||||
name = "borgbackup-restore";
|
||||
runtimeInputs = [ pkgs.gawk ];
|
||||
text = ''
|
||||
cd /
|
||||
IFS=':' read -ra FOLDER <<< "''${FOLDERS-}"
|
||||
job_name=$(echo "$NAME" | awk -F'::' '{print $1}')
|
||||
backup_name=''${NAME#"$job_name"::}
|
||||
if [[ ! -x /run/current-system/sw/bin/borg-job-"$job_name" ]]; then
|
||||
echo "borg-job-$job_name not found: Backup name is invalid" >&2
|
||||
exit 1
|
||||
fi
|
||||
echo y | /run/current-system/sw/bin/borg-job-"$job_name" extract "$backup_name" "''${FOLDER[@]}"
|
||||
'';
|
||||
})
|
||||
];
|
||||
|
||||
clan.core.vars.generators.borgbackup = {
|
||||
files."borgbackup.ssh.pub".secret = false;
|
||||
files."borgbackup.ssh" = { };
|
||||
files."borgbackup.repokey" = { };
|
||||
|
||||
migrateFact = "borgbackup";
|
||||
runtimeInputs = [
|
||||
pkgs.coreutils
|
||||
pkgs.openssh
|
||||
pkgs.xkcdpass
|
||||
];
|
||||
script = ''
|
||||
ssh-keygen -t ed25519 -N "" -C "" -f "$out"/borgbackup.ssh
|
||||
xkcdpass -n 4 -d - > "$out"/borgbackup.repokey
|
||||
'';
|
||||
};
|
||||
|
||||
clan.core.backups.providers.borgbackup = {
|
||||
list = "borgbackup-list";
|
||||
create = "borgbackup-create";
|
||||
restore = "borgbackup-restore";
|
||||
};
|
||||
};
|
||||
}
|
||||
63
clanModules/borgbackup/roles/server.nix
Normal file
63
clanModules/borgbackup/roles/server.nix
Normal file
@@ -0,0 +1,63 @@
|
||||
{ config, lib, ... }:
|
||||
let
|
||||
dir = config.clan.core.settings.directory;
|
||||
machineDir = dir + "/vars/per-machine/";
|
||||
machineName = config.clan.core.settings.machine.name;
|
||||
|
||||
# Instances might be empty, if the module is not used via the inventory
|
||||
#
|
||||
# Type: { ${instanceName} :: { roles :: Roles } }
|
||||
# Roles :: { ${role_name} :: { machines :: [string] } }
|
||||
instances = config.clan.inventory.services.borgbackup or { };
|
||||
|
||||
allClients = lib.foldlAttrs (
|
||||
acc: _instanceName: instanceConfig:
|
||||
acc
|
||||
++ (
|
||||
if (builtins.elem machineName instanceConfig.roles.server.machines) then
|
||||
instanceConfig.roles.client.machines
|
||||
else
|
||||
[ ]
|
||||
)
|
||||
) [ ] instances;
|
||||
in
|
||||
{
|
||||
options = {
|
||||
clan.borgbackup.directory = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "/var/lib/borgbackup";
|
||||
description = ''
|
||||
The directory where the borgbackup repositories are stored.
|
||||
'';
|
||||
};
|
||||
};
|
||||
config.services.borgbackup.repos =
|
||||
let
|
||||
borgbackupIpMachinePath = machine: machineDir + machine + "/borgbackup/borgbackup.ssh.pub/value";
|
||||
|
||||
machinesMaybeKey = builtins.map (
|
||||
machine:
|
||||
let
|
||||
fullPath = borgbackupIpMachinePath machine;
|
||||
in
|
||||
if builtins.pathExists fullPath then
|
||||
machine
|
||||
else
|
||||
lib.warn ''
|
||||
Machine ${machine} does not have a borgbackup key at ${fullPath},
|
||||
run `clan vars generate ${machine}` to generate it.
|
||||
'' null
|
||||
) allClients;
|
||||
|
||||
machinesWithKey = lib.filter (x: x != null) machinesMaybeKey;
|
||||
|
||||
hosts = builtins.map (machine: {
|
||||
name = machine;
|
||||
value = {
|
||||
path = "${config.clan.borgbackup.directory}/${machine}";
|
||||
authorizedKeys = [ (builtins.readFile (borgbackupIpMachinePath machine)) ];
|
||||
};
|
||||
}) machinesWithKey;
|
||||
in
|
||||
if (builtins.listToAttrs hosts) != [ ] then builtins.listToAttrs hosts else { };
|
||||
}
|
||||
10
clanModules/data-mesher/README.md
Normal file
10
clanModules/data-mesher/README.md
Normal file
@@ -0,0 +1,10 @@
|
||||
---
|
||||
description = "Set up data-mesher"
|
||||
categories = ["System"]
|
||||
features = [ "inventory" ]
|
||||
|
||||
[constraints]
|
||||
roles.admin.min = 1
|
||||
roles.admin.max = 1
|
||||
---
|
||||
|
||||
19
clanModules/data-mesher/lib.nix
Normal file
19
clanModules/data-mesher/lib.nix
Normal file
@@ -0,0 +1,19 @@
|
||||
lib: {
|
||||
|
||||
machines =
|
||||
config:
|
||||
let
|
||||
instanceNames = builtins.attrNames config.clan.inventory.services.data-mesher;
|
||||
instanceName = builtins.head instanceNames;
|
||||
dataMesherInstances = config.clan.inventory.services.data-mesher.${instanceName};
|
||||
|
||||
uniqueStrings = list: builtins.attrNames (builtins.groupBy lib.id list);
|
||||
in
|
||||
rec {
|
||||
admins = dataMesherInstances.roles.admin.machines or [ ];
|
||||
signers = dataMesherInstances.roles.signer.machines or [ ];
|
||||
peers = dataMesherInstances.roles.peer.machines or [ ];
|
||||
bootstrap = uniqueStrings (admins ++ signers);
|
||||
};
|
||||
|
||||
}
|
||||
58
clanModules/data-mesher/roles/admin.nix
Normal file
58
clanModules/data-mesher/roles/admin.nix
Normal file
@@ -0,0 +1,58 @@
|
||||
{ lib, config, ... }:
|
||||
let
|
||||
cfg = config.clan.data-mesher;
|
||||
|
||||
dmLib = import ../lib.nix lib;
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
../shared.nix
|
||||
];
|
||||
|
||||
options.clan.data-mesher = {
|
||||
network = {
|
||||
tld = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = (config.networking.domain or "clan");
|
||||
description = "Top level domain to use for the network";
|
||||
};
|
||||
|
||||
hostTTL = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "672h"; # 28 days
|
||||
example = "24h";
|
||||
description = "The TTL for hosts in the network, in the form of a Go time.Duration";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = {
|
||||
|
||||
warnings = [
|
||||
"The clan.admin module is deprecated and will be removed on 2025-07-15.
|
||||
Please migrate to user-maintained configuration or the new equivalent clan services
|
||||
(https://docs.clan.lol/reference/clanServices)."
|
||||
];
|
||||
|
||||
services.data-mesher.initNetwork =
|
||||
let
|
||||
# for a given machine, read it's public key and remove any new lines
|
||||
readHostKey =
|
||||
machine:
|
||||
let
|
||||
path = "${config.clan.core.settings.directory}/vars/per-machine/${machine}/data-mesher-host-key/public_key/value";
|
||||
in
|
||||
builtins.elemAt (lib.splitString "\n" (builtins.readFile path)) 1;
|
||||
in
|
||||
{
|
||||
enable = true;
|
||||
keyPath = config.clan.core.vars.generators.data-mesher-network-key.files.private_key.path;
|
||||
|
||||
tld = cfg.network.tld;
|
||||
hostTTL = cfg.network.hostTTL;
|
||||
|
||||
# admin and signer host public keys
|
||||
signingKeys = builtins.map readHostKey (dmLib.machines config).bootstrap;
|
||||
};
|
||||
};
|
||||
}
|
||||
5
clanModules/data-mesher/roles/peer.nix
Normal file
5
clanModules/data-mesher/roles/peer.nix
Normal file
@@ -0,0 +1,5 @@
|
||||
{
|
||||
imports = [
|
||||
../shared.nix
|
||||
];
|
||||
}
|
||||
5
clanModules/data-mesher/roles/signer.nix
Normal file
5
clanModules/data-mesher/roles/signer.nix
Normal file
@@ -0,0 +1,5 @@
|
||||
{
|
||||
imports = [
|
||||
../shared.nix
|
||||
];
|
||||
}
|
||||
152
clanModules/data-mesher/shared.nix
Normal file
152
clanModules/data-mesher/shared.nix
Normal file
@@ -0,0 +1,152 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
cfg = config.clan.data-mesher;
|
||||
dmLib = import ./lib.nix lib;
|
||||
|
||||
# the default bootstrap nodes are any machines with the admin or signers role
|
||||
# we iterate through those machines, determining an IP address for them based on their VPN
|
||||
# currently only supports zerotier
|
||||
defaultBootstrapNodes = builtins.foldl' (
|
||||
urls: name:
|
||||
let
|
||||
|
||||
ipPath = "${config.clan.core.settings.directory}/vars/per-machine/${name}/zerotier/zerotier-ip/value";
|
||||
|
||||
in
|
||||
if builtins.pathExists ipPath then
|
||||
let
|
||||
ip = builtins.readFile ipPath;
|
||||
in
|
||||
urls ++ [ "[${ip}]:${builtins.toString cfg.network.port}" ]
|
||||
else
|
||||
urls
|
||||
) [ ] (dmLib.machines config).bootstrap;
|
||||
in
|
||||
{
|
||||
options.clan.data-mesher = {
|
||||
|
||||
bootstrapNodes = lib.mkOption {
|
||||
type = lib.types.nullOr (lib.types.listOf lib.types.str);
|
||||
default = null;
|
||||
description = ''
|
||||
A list of bootstrap nodes that act as an initial gateway when joining
|
||||
the cluster.
|
||||
'';
|
||||
example = [
|
||||
"192.168.1.1:7946"
|
||||
"192.168.1.2:7946"
|
||||
];
|
||||
};
|
||||
|
||||
network = {
|
||||
|
||||
interface = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = ''
|
||||
The interface over which cluster communication should be performed.
|
||||
All the ip addresses associate with this interface will be part of
|
||||
our host claim, including both ipv4 and ipv6.
|
||||
|
||||
This should be set to an internal/VPN interface.
|
||||
'';
|
||||
example = "tailscale0";
|
||||
};
|
||||
|
||||
port = lib.mkOption {
|
||||
type = lib.types.port;
|
||||
default = 7946;
|
||||
description = ''
|
||||
Port to listen on for cluster communication.
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = {
|
||||
|
||||
services.data-mesher = {
|
||||
enable = true;
|
||||
openFirewall = true;
|
||||
|
||||
settings = {
|
||||
log_level = "warn";
|
||||
state_dir = "/var/lib/data-mesher";
|
||||
|
||||
# read network id from vars
|
||||
network.id = config.clan.core.vars.generators.data-mesher-network-key.files.public_key.value;
|
||||
|
||||
host = {
|
||||
names = [ config.networking.hostName ];
|
||||
key_path = config.clan.core.vars.generators.data-mesher-host-key.files.private_key.path;
|
||||
};
|
||||
|
||||
cluster = {
|
||||
port = cfg.network.port;
|
||||
join_interval = "30s";
|
||||
push_pull_interval = "30s";
|
||||
|
||||
interface = cfg.network.interface;
|
||||
|
||||
bootstrap_nodes = if cfg.bootstrapNodes == null then defaultBootstrapNodes else cfg.bootstrapNodes;
|
||||
};
|
||||
|
||||
http.port = 7331;
|
||||
http.interface = "lo";
|
||||
};
|
||||
};
|
||||
|
||||
# Generate host key.
|
||||
clan.core.vars.generators.data-mesher-host-key = {
|
||||
files =
|
||||
let
|
||||
owner = config.users.users.data-mesher.name;
|
||||
in
|
||||
{
|
||||
private_key = {
|
||||
inherit owner;
|
||||
};
|
||||
public_key.secret = false;
|
||||
};
|
||||
|
||||
runtimeInputs = [
|
||||
config.services.data-mesher.package
|
||||
];
|
||||
|
||||
script = ''
|
||||
data-mesher generate keypair \
|
||||
--public-key-path "$out"/public_key \
|
||||
--private-key-path "$out"/private_key
|
||||
'';
|
||||
};
|
||||
|
||||
clan.core.vars.generators.data-mesher-network-key = {
|
||||
# generated once per clan
|
||||
share = true;
|
||||
|
||||
files =
|
||||
let
|
||||
owner = config.users.users.data-mesher.name;
|
||||
in
|
||||
{
|
||||
private_key = {
|
||||
inherit owner;
|
||||
};
|
||||
public_key.secret = false;
|
||||
};
|
||||
|
||||
runtimeInputs = [
|
||||
config.services.data-mesher.package
|
||||
];
|
||||
|
||||
script = ''
|
||||
data-mesher generate keypair \
|
||||
--public-key-path "$out"/public_key \
|
||||
--private-key-path "$out"/private_key
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
17
clanModules/deltachat/README.md
Normal file
17
clanModules/deltachat/README.md
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
description = "Email-based instant messaging for Desktop."
|
||||
categories = ["Social"]
|
||||
features = [ "inventory", "deprecated" ]
|
||||
---
|
||||
|
||||
!!! info
|
||||
This module will automatically configure an email server on the machine for handling the e-mail messaging seamlessly.
|
||||
|
||||
## Features
|
||||
|
||||
- [x] **Email-based**: Uses any email account as its backend.
|
||||
- [x] **End-to-End Encryption**: Supports Autocrypt to automatically encrypt messages.
|
||||
- [x] **No Phone Number Required**: Uses your email address instead of a phone number.
|
||||
- [x] **Cross-Platform**: Available on desktop and mobile platforms.
|
||||
- [x] **Automatic Server Setup**: Includes your own DeltaChat server for enhanced control and privacy.
|
||||
- [ ] **Bake a cake**: This module cannot cake a bake.
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user