Compare commits
1 Commits
revert-del
...
feat/clan-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f6cec3521a |
4
.envrc
4
.envrc
@@ -1,13 +1,11 @@
|
||||
# shellcheck shell=bash
|
||||
if ! has nix_direnv_version || ! nix_direnv_version 3.0.4; then
|
||||
source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.4/direnvrc" "sha256-DzlYZ33mWF/Gs8DDeyjr8mnVmQGx7ASYqA5WlxwvBG4="
|
||||
fi
|
||||
|
||||
watch_file .direnv/selected-shell
|
||||
watch_file formatter.nix
|
||||
|
||||
if [ -e .direnv/selected-shell ]; then
|
||||
use flake ".#$(cat .direnv/selected-shell)"
|
||||
use flake .#$(cat .direnv/selected-shell)
|
||||
else
|
||||
use flake
|
||||
fi
|
||||
|
||||
@@ -1,9 +1,17 @@
|
||||
name: checks
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
jobs:
|
||||
checks:
|
||||
runs-on: nix
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- run: nix run --refresh github:Mic92/nix-fast-build -- --no-nom --eval-workers 10
|
||||
checks-impure:
|
||||
runs-on: nix
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
- run: nix run .#impure-checks
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
name: deploy
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
jobs:
|
||||
deploy-docs:
|
||||
runs-on: nix
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- run: nix run .#deploy-docs
|
||||
env:
|
||||
SSH_HOMEPAGE_KEY: ${{ secrets.SSH_HOMEPAGE_KEY }}
|
||||
@@ -1,53 +0,0 @@
|
||||
name: "Update pinned clan-core for checks"
|
||||
on:
|
||||
repository_dispatch:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: "51 2 * * *"
|
||||
jobs:
|
||||
update-pinned-clan-core:
|
||||
runs-on: nix
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
- name: Update clan-core for checks
|
||||
run: nix run .#update-clan-core-for-checks
|
||||
- name: Create pull request
|
||||
env:
|
||||
CI_BOT_TOKEN: ${{ secrets.CI_BOT_TOKEN }}
|
||||
run: |
|
||||
export GIT_AUTHOR_NAME=clan-bot GIT_AUTHOR_EMAIL=clan-bot@clan.lol GIT_COMMITTER_NAME=clan-bot GIT_COMMITTER_EMAIL=clan-bot@clan.lol
|
||||
git commit -am "Update pinned clan-core for checks"
|
||||
git push origin +HEAD:update-clan-core-for-checks
|
||||
set -x
|
||||
resp=$(nix run --inputs-from . nixpkgs#curl -- -X POST \
|
||||
-H "Authorization: token $CI_BOT_TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"head": "update-clan-core-for-checks",
|
||||
"base": "main",
|
||||
"title": "Update Clan Core for Checks",
|
||||
"body": "This PR updates the pinned clan-core flake input that is used for checks."
|
||||
}' \
|
||||
"https://git.clan.lol/api/v1/repos/clan/clan-core/pulls")
|
||||
pr_number=$(echo "$resp" | jq -r '.number')
|
||||
|
||||
# Merge when succeed
|
||||
while true; do
|
||||
resp=$(nix run --inputs-from . nixpkgs#curl -- -X POST \
|
||||
-H "Authorization: token $CI_BOT_TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"Do": "merge",
|
||||
"merge_when_checks_succeed": true,
|
||||
"delete_branch_after_merge": true
|
||||
}' \
|
||||
"https://git.clan.lol/api/v1/repos/clan/clan-core/pulls/$pr_number/merge")
|
||||
msg=$(echo $resp | jq -r '.message')
|
||||
if [[ "$msg" != "Please try again later" ]]; then
|
||||
break
|
||||
fi
|
||||
echo "Retrying in 2 seconds..."
|
||||
sleep 2
|
||||
done
|
||||
6
.github/dependabot.yml
vendored
6
.github/dependabot.yml
vendored
@@ -1,6 +0,0 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
28
.github/workflows/repo-sync.yml
vendored
28
.github/workflows/repo-sync.yml
vendored
@@ -1,28 +0,0 @@
|
||||
name: Github<->Gitea sync
|
||||
on:
|
||||
schedule:
|
||||
- cron: "39 * * * *"
|
||||
workflow_dispatch:
|
||||
permissions:
|
||||
contents: write
|
||||
jobs:
|
||||
repo-sync:
|
||||
if: github.repository_owner == 'clan-lol'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/create-github-app-token@v2
|
||||
id: app-token
|
||||
with:
|
||||
app-id: ${{ vars.CI_APP_ID }}
|
||||
private-key: ${{ secrets.CI_PRIVATE_KEY }}
|
||||
- name: repo-sync
|
||||
uses: repo-sync/github-sync@v2
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ steps.app-token.outputs.token }}
|
||||
with:
|
||||
source_repo: "https://git.clan.lol/clan/clan-core.git"
|
||||
source_branch: "main"
|
||||
destination_branch: "main"
|
||||
37
.gitignore
vendored
37
.gitignore
vendored
@@ -1,23 +1,17 @@
|
||||
.direnv
|
||||
.nixos-test-history
|
||||
.hypothesis
|
||||
***/.hypothesis
|
||||
out.log
|
||||
.coverage.*
|
||||
pkgs/repro-hook
|
||||
testdir
|
||||
**/qubeclan
|
||||
**/testdir
|
||||
democlan
|
||||
example_clan
|
||||
result*
|
||||
/pkgs/clan-cli/clan_lib/nixpkgs
|
||||
/pkgs/clan-cli/clan_cli/nixpkgs
|
||||
/pkgs/clan-cli/clan_cli/webui/assets
|
||||
nixos.qcow2
|
||||
*.glade~
|
||||
**/*.glade~
|
||||
/docs/out
|
||||
/pkgs/clan-cli/clan_lib/select
|
||||
.local.env
|
||||
|
||||
# macOS stuff
|
||||
.DS_Store
|
||||
|
||||
# python
|
||||
__pycache__
|
||||
@@ -28,18 +22,9 @@ __pycache__
|
||||
.ruff_cache
|
||||
htmlcov
|
||||
|
||||
# node
|
||||
node_modules
|
||||
dist
|
||||
.webui
|
||||
|
||||
# TODO: remove after bug in select is fixed
|
||||
select
|
||||
|
||||
# Generated files
|
||||
pkgs/clan-app/ui/api/API.json
|
||||
pkgs/clan-app/ui/api/API.ts
|
||||
pkgs/clan-app/ui/api/Inventory.ts
|
||||
pkgs/clan-app/ui/api/modules_schemas.json
|
||||
pkgs/clan-app/ui/api/schema.json
|
||||
pkgs/clan-app/ui/.fonts
|
||||
# flatpak
|
||||
.flatpak-builder
|
||||
build
|
||||
build-dir
|
||||
repo
|
||||
.env
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
nixosModules/clanCore/vars/.* @lopter
|
||||
pkgs/clan-cli/clan_cli/(secrets|vars)/.* @lopter
|
||||
@@ -1,4 +1,21 @@
|
||||
# Contributing to Clan
|
||||
# Contributing to cLAN
|
||||
|
||||
<!-- Local file: docs/CONTRIBUTING.md -->
|
||||
Go to the Contributing guide at https://docs.clan.lol/guides/contributing/CONTRIBUTING
|
||||
## Live-reloading documentation
|
||||
|
||||
Enter the `docs` directory:
|
||||
|
||||
```shell-session
|
||||
cd docs
|
||||
```
|
||||
|
||||
Enter the development shell or enable `direnv`:
|
||||
|
||||
```shell-session
|
||||
direnv allow
|
||||
```
|
||||
|
||||
Run a local server:
|
||||
|
||||
```shell-session
|
||||
mkdocs serve
|
||||
```
|
||||
|
||||
19
LICENSE.md
19
LICENSE.md
@@ -1,19 +0,0 @@
|
||||
Copyright 2023-2024 Clan contributors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
26
README.md
26
README.md
@@ -1,6 +1,6 @@
|
||||
# Clan core repository
|
||||
# cLAN Core Repository
|
||||
|
||||
Welcome to the Clan core repository, the heart of the [clan.lol](https://clan.lol/) project! This monorepo is the foundation of Clan, a revolutionary open-source project aimed at restoring fun, freedom, and functionality to computing. Here, you'll find all the essential packages, NixOS modules, CLI tools, and tests needed to contribute to and work with the Clan project. Clan leverages the Nix system to ensure reliability, security, and seamless management of digital environments, putting the power back into the hands of users.
|
||||
Welcome to the cLAN Core Repository, the heart of the [clan.lol](https://clan.lol/) project! This monorepo is the foundation of Clan, a revolutionary open-source project aimed at restoring fun, freedom, and functionality to computing. Here, you'll find all the essential packages, NixOS modules, CLI tools, and tests needed to contribute to and work with the cLAN project. Clan leverages the Nix system to ensure reliability, security, and seamless management of digital environments, putting the power back into the hands of users.
|
||||
|
||||
## Why Clan?
|
||||
|
||||
@@ -14,32 +14,32 @@ Our mission is simple: to democratize computing by providing tools that empower
|
||||
- **Robust Backup Management:** Long-term, self-hosted data preservation.
|
||||
- **Intuitive Secret Management:** Simplified encryption and password management processes.
|
||||
|
||||
## Getting started with Clan
|
||||
## Getting Started with cLAN
|
||||
|
||||
If you're new to Clan and eager to dive in, start with our quickstart guide and explore the core functionalities that Clan offers:
|
||||
If you're new to cLAN and eager to dive in, start with our quickstart guide and explore the core functionalities that Clan offers:
|
||||
|
||||
- **Quickstart Guide**: Check out [getting started](https://docs.clan.lol/#starting-with-a-new-clan-project)<!-- [docs/site/index.md](docs/site/index.md) --> to get up and running with Clan in no time.
|
||||
- **Quickstart Guide**: Check out [getting started](https://docs.clan.lol/#starting-with-a-new-clan-project)<!-- [docs/site/index.md](docs/site/index.md) --> to get up and running with cLAN in no time.
|
||||
|
||||
### Managing secrets
|
||||
### Managing Secrets
|
||||
|
||||
In the Clan ecosystem, security is paramount. Learn how to handle secrets effectively:
|
||||
|
||||
- **Secrets Management**: Securely manage secrets by consulting [secrets](https://docs.clan.lol/guides/getting-started/secrets/)<!-- [secrets.md](docs/site/guides/getting-started/secrets.md) -->.
|
||||
- **Secrets Management**: Securely manage secrets by consulting [secrets](https://docs.clan.lol/getting-started/secrets/)<!-- [secrets.md](docs/site/getting-started/secrets.md) -->.
|
||||
|
||||
### Contributing to Clan
|
||||
### Contributing to cLAN
|
||||
|
||||
The Clan project thrives on community contributions. We welcome everyone to contribute and collaborate:
|
||||
|
||||
- **Contribution Guidelines**: Make a meaningful impact by following the steps in [contributing](https://docs.clan.lol/contributing/contributing/)<!-- [contributing.md](docs/CONTRIBUTING.md) -->.
|
||||
|
||||
## Join the revolution
|
||||
## Join the Revolution
|
||||
|
||||
Clan is more than a tool; it's a movement towards a better digital future. By contributing to the Clan project, you're part of changing technology for the better, together.
|
||||
Clan is more than a tool; it's a movement towards a better digital future. By contributing to the cLAN project, you're part of changing technology for the better, together.
|
||||
|
||||
### Community and support
|
||||
### Community and Support
|
||||
|
||||
Connect with us and the Clan community for support and discussion:
|
||||
|
||||
- [Matrix channel](https://matrix.to/#/#clan:clan.lol) for live discussions.
|
||||
- IRC bridge on [hackint#clan](https://chat.hackint.org/#/connect?join=clan) for real-time chat support.
|
||||
- [Matrix channel](https://matrix.to/#/!djzOHBBBHnwQkgNgdV:matrix.org?via=blog.clan.lol) for live discussions.
|
||||
- IRC bridges (coming soon) for real-time chat support.
|
||||
|
||||
|
||||
@@ -1,41 +0,0 @@
|
||||
{ self, pkgs, ... }:
|
||||
{
|
||||
name = "app-ocr-smoke-test";
|
||||
|
||||
enableOCR = true;
|
||||
|
||||
nodes = {
|
||||
wayland =
|
||||
{ modulesPath, ... }:
|
||||
{
|
||||
imports = [ (modulesPath + "/../tests/common/wayland-cage.nix") ];
|
||||
services.cage.program = "${self.packages.${pkgs.system}.clan-app}/bin/clan-app";
|
||||
virtualisation.memorySize = 2047;
|
||||
# TODO: get rid of this and fix debus-proxy error instead
|
||||
services.cage.environment.WEBKIT_DISABLE_SANDBOX_THIS_IS_DANGEROUS = "1";
|
||||
};
|
||||
xorg =
|
||||
{ pkgs, modulesPath, ... }:
|
||||
{
|
||||
imports = [
|
||||
(modulesPath + "/../tests/common/user-account.nix")
|
||||
(modulesPath + "/../tests/common/x11.nix")
|
||||
];
|
||||
virtualisation.memorySize = 2047;
|
||||
services.xserver.enable = true;
|
||||
services.xserver.displayManager.sessionCommands = "${
|
||||
self.packages.${pkgs.system}.clan-app
|
||||
}/bin/clan-app";
|
||||
test-support.displayManager.auto.user = "alice";
|
||||
};
|
||||
};
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
wayland.wait_for_unit('graphical.target')
|
||||
xorg.wait_for_unit('graphical.target')
|
||||
|
||||
wayland.wait_for_text('Welcome to Clan')
|
||||
xorg.wait_for_text('Welcome to Clan')
|
||||
'';
|
||||
}
|
||||
@@ -5,90 +5,62 @@
|
||||
fileSystems."/".device = "/dev/null";
|
||||
boot.loader.grub.device = "/dev/null";
|
||||
};
|
||||
clan.inventory.services = {
|
||||
borgbackup.test-backup = {
|
||||
roles.client.machines = [ "test-backup" ];
|
||||
roles.server.machines = [ "test-backup" ];
|
||||
};
|
||||
};
|
||||
flake.nixosModules = {
|
||||
test-backup =
|
||||
{
|
||||
pkgs,
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}:
|
||||
let
|
||||
dependencies = [
|
||||
self
|
||||
pkgs.stdenv.drvPath
|
||||
self.clan.clanInternals.machines.${pkgs.hostPlatform.system}.test-backup.config.system.clan.deployment.file
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-backup.config.system.clan.deployment.file
|
||||
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
# Do not import inventory modules. They should be configured via 'clan.inventory'
|
||||
#
|
||||
# TODO: Configure localbackup via inventory
|
||||
self.clanModules.borgbackup
|
||||
self.clanModules.localbackup
|
||||
self.clanModules.sshd
|
||||
];
|
||||
# Borgbackup overrides
|
||||
services.borgbackup.repos.test-backups = {
|
||||
path = "/var/lib/borgbackup/test-backups";
|
||||
authorizedKeys = [ (builtins.readFile ../assets/ssh/pubkey) ];
|
||||
};
|
||||
clan.borgbackup.destinations.test-backup.repo = lib.mkForce "borg@machine:.";
|
||||
|
||||
clan.core.networking.targetHost = "machine";
|
||||
clan.networking.targetHost = "machine";
|
||||
networking.hostName = "machine";
|
||||
services.openssh.settings.UseDns = false;
|
||||
|
||||
programs.ssh.knownHosts = {
|
||||
machine.hostNames = [ "machine" ];
|
||||
machine.publicKey = builtins.readFile ../assets/ssh/pubkey;
|
||||
machine.publicKey = builtins.readFile ../lib/ssh/pubkey;
|
||||
};
|
||||
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
settings.UsePAM = false;
|
||||
settings.UseDns = false;
|
||||
hostKeys = [
|
||||
{
|
||||
path = "/root/.ssh/id_ed25519";
|
||||
type = "ed25519";
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
users.users.root.openssh.authorizedKeys.keyFiles = [ ../assets/ssh/pubkey ];
|
||||
|
||||
# This is needed to unlock the user for sshd
|
||||
# Because we use sshd without setuid binaries
|
||||
users.users.borg.initialPassword = "hello";
|
||||
users.users.root.openssh.authorizedKeys.keyFiles = [ ../lib/ssh/pubkey ];
|
||||
|
||||
systemd.tmpfiles.settings."vmsecrets" = {
|
||||
"/root/.ssh/id_ed25519" = {
|
||||
C.argument = "${../assets/ssh/privkey}";
|
||||
C.argument = "${../lib/ssh/privkey}";
|
||||
z = {
|
||||
mode = "0400";
|
||||
user = "root";
|
||||
};
|
||||
};
|
||||
"/etc/secrets/ssh.id_ed25519" = {
|
||||
C.argument = "${../assets/ssh/privkey}";
|
||||
C.argument = "${../lib/ssh/privkey}";
|
||||
z = {
|
||||
mode = "0400";
|
||||
user = "root";
|
||||
};
|
||||
};
|
||||
"/etc/secrets/borgbackup/borgbackup.ssh" = {
|
||||
C.argument = "${../assets/ssh/privkey}";
|
||||
"/etc/secrets/borgbackup.ssh" = {
|
||||
C.argument = "${../lib/ssh/privkey}";
|
||||
z = {
|
||||
mode = "0400";
|
||||
user = "root";
|
||||
};
|
||||
};
|
||||
"/etc/secrets/borgbackup/borgbackup.repokey" = {
|
||||
"/etc/secrets/borgbackup.repokey" = {
|
||||
C.argument = builtins.toString (pkgs.writeText "repokey" "repokey12345");
|
||||
z = {
|
||||
mode = "0400";
|
||||
@@ -96,10 +68,17 @@
|
||||
};
|
||||
};
|
||||
};
|
||||
clan.core.facts.secretStore = "vm";
|
||||
clan.core.vars.settings.secretStore = "vm";
|
||||
clanCore.facts.secretStore = "vm";
|
||||
|
||||
environment.systemPackages = [ self.packages.${pkgs.system}.clan-cli ];
|
||||
environment.systemPackages = [
|
||||
self.packages.${pkgs.system}.clan-cli
|
||||
(pkgs.writeShellScriptBin "pre-restore-command" ''
|
||||
touch /var/test-service/pre-restore-command
|
||||
'')
|
||||
(pkgs.writeShellScriptBin "post-restore-command" ''
|
||||
touch /var/test-service/post-restore-command
|
||||
'')
|
||||
];
|
||||
environment.etc.install-closure.source = "${closureInfo}/store-paths";
|
||||
nix.settings = {
|
||||
substituters = lib.mkForce [ ];
|
||||
@@ -108,20 +87,14 @@
|
||||
flake-registry = pkgs.writeText "flake-registry" ''{"flakes":[],"version":2}'';
|
||||
};
|
||||
system.extraDependencies = dependencies;
|
||||
clan.core.state.test-backups.folders = [ "/var/test-backups" ];
|
||||
clanCore.state.test-backups.folders = [ "/var/test-backups" ];
|
||||
|
||||
clan.core.state.test-service = {
|
||||
preBackupScript = ''
|
||||
touch /var/test-service/pre-backup-command
|
||||
'';
|
||||
preRestoreScript = ''
|
||||
touch /var/test-service/pre-restore-command
|
||||
'';
|
||||
postRestoreScript = ''
|
||||
touch /var/test-service/post-restore-command
|
||||
'';
|
||||
clanCore.state.test-service = {
|
||||
preRestoreCommand = "pre-restore-command";
|
||||
postRestoreCommand = "post-restore-command";
|
||||
folders = [ "/var/test-service" ];
|
||||
};
|
||||
clan.borgbackup.destinations.test-backup.repo = "borg@machine:.";
|
||||
|
||||
fileSystems."/mnt/external-disk" = {
|
||||
device = "/dev/vdb"; # created in tests with virtualisation.emptyDisks
|
||||
@@ -142,28 +115,25 @@
|
||||
touch /run/unmount-external-disk
|
||||
'';
|
||||
};
|
||||
|
||||
services.borgbackup.repos.test-backups = {
|
||||
path = "/var/lib/borgbackup/test-backups";
|
||||
authorizedKeys = [ (builtins.readFile ../lib/ssh/pubkey) ];
|
||||
};
|
||||
};
|
||||
};
|
||||
perSystem =
|
||||
{ pkgs, ... }:
|
||||
let
|
||||
clanCore = self.checks.x86_64-linux.clan-core-for-checks;
|
||||
in
|
||||
{ nodes, pkgs, ... }:
|
||||
{
|
||||
checks = pkgs.lib.mkIf pkgs.stdenv.isLinux {
|
||||
nixos-test-backups = self.clanLib.test.containerTest {
|
||||
name = "nixos-test-backups";
|
||||
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux) {
|
||||
test-backups = (import ../lib/test-base.nix) {
|
||||
name = "test-backups";
|
||||
nodes.machine = {
|
||||
imports =
|
||||
[
|
||||
self.nixosModules.clanCore
|
||||
# Some custom overrides for the backup tests
|
||||
self.nixosModules.test-backup
|
||||
]
|
||||
++
|
||||
# import the inventory generated nixosModules
|
||||
self.clan.clanInternals.inventoryClass.machines.test-backup.machineImports;
|
||||
clan.core.settings.directory = ./.;
|
||||
imports = [
|
||||
self.nixosModules.clanCore
|
||||
self.nixosModules.test-backup
|
||||
];
|
||||
virtualisation.emptyDiskImages = [ 256 ];
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
@@ -175,14 +145,14 @@
|
||||
machine.succeed("echo testing > /var/test-backups/somefile")
|
||||
|
||||
# create
|
||||
machine.succeed("clan backups create --debug --flake ${clanCore} test-backup")
|
||||
machine.succeed("clan --debug --flake ${self} backups create test-backup")
|
||||
machine.wait_until_succeeds("! systemctl is-active borgbackup-job-test-backup >&2")
|
||||
machine.succeed("test -f /run/mount-external-disk")
|
||||
machine.succeed("test -f /run/unmount-external-disk")
|
||||
|
||||
# list
|
||||
backup_id = json.loads(machine.succeed("borg-job-test-backup list --json"))["archives"][0]["archive"]
|
||||
out = machine.succeed("clan backups list --debug --flake ${clanCore} test-backup").strip()
|
||||
out = machine.succeed("clan --debug --flake ${self} backups list test-backup").strip()
|
||||
print(out)
|
||||
assert backup_id in out, f"backup {backup_id} not found in {out}"
|
||||
localbackup_id = "hdd::/mnt/external-disk/snapshot.0"
|
||||
@@ -190,19 +160,17 @@
|
||||
|
||||
## borgbackup restore
|
||||
machine.succeed("rm -f /var/test-backups/somefile")
|
||||
machine.succeed(f"clan backups restore --debug --flake ${clanCore} test-backup borgbackup 'test-backup::borg@machine:.::{backup_id}' >&2")
|
||||
machine.succeed(f"clan --debug --flake ${self} backups restore test-backup borgbackup 'test-backup::borg@machine:.::{backup_id}' >&2")
|
||||
assert machine.succeed("cat /var/test-backups/somefile").strip() == "testing", "restore failed"
|
||||
machine.succeed("test -f /var/test-service/pre-restore-command")
|
||||
machine.succeed("test -f /var/test-service/post-restore-command")
|
||||
machine.succeed("test -f /var/test-service/pre-backup-command")
|
||||
|
||||
## localbackup restore
|
||||
machine.succeed("rm -rf /var/test-backups/somefile /var/test-service/ && mkdir -p /var/test-service")
|
||||
machine.succeed(f"clan backups restore --debug --flake ${clanCore} test-backup localbackup '{localbackup_id}' >&2")
|
||||
machine.succeed("rm -f /var/test-backups/somefile /var/test-service/{pre,post}-restore-command")
|
||||
machine.succeed(f"clan --debug --flake ${self} backups restore test-backup localbackup '{localbackup_id}' >&2")
|
||||
assert machine.succeed("cat /var/test-backups/somefile").strip() == "testing", "restore failed"
|
||||
machine.succeed("test -f /var/test-service/pre-restore-command")
|
||||
machine.succeed("test -f /var/test-service/post-restore-command")
|
||||
machine.succeed("test -f /var/test-service/pre-backup-command")
|
||||
'';
|
||||
} { inherit pkgs self; };
|
||||
};
|
||||
|
||||
@@ -1,51 +0,0 @@
|
||||
(
|
||||
{ ... }:
|
||||
{
|
||||
name = "borgbackup";
|
||||
|
||||
nodes.machine =
|
||||
{ self, pkgs, ... }:
|
||||
{
|
||||
imports = [
|
||||
self.clanModules.borgbackup
|
||||
self.nixosModules.clanCore
|
||||
{
|
||||
services.openssh.enable = true;
|
||||
services.borgbackup.repos.testrepo = {
|
||||
authorizedKeys = [ (builtins.readFile ../assets/ssh/pubkey) ];
|
||||
};
|
||||
}
|
||||
{
|
||||
clan.core.settings.directory = ./.;
|
||||
clan.core.state.testState.folders = [ "/etc/state" ];
|
||||
environment.etc.state.text = "hello world";
|
||||
systemd.tmpfiles.settings."vmsecrets" = {
|
||||
"/etc/secrets/borgbackup/borgbackup.ssh" = {
|
||||
C.argument = "${../assets/ssh/privkey}";
|
||||
z = {
|
||||
mode = "0400";
|
||||
user = "root";
|
||||
};
|
||||
};
|
||||
"/etc/secrets/borgbackup/borgbackup.repokey" = {
|
||||
C.argument = builtins.toString (pkgs.writeText "repokey" "repokey12345");
|
||||
z = {
|
||||
mode = "0400";
|
||||
user = "root";
|
||||
};
|
||||
};
|
||||
};
|
||||
# clan.core.facts.secretStore = "vm";
|
||||
clan.core.vars.settings.secretStore = "vm";
|
||||
|
||||
clan.borgbackup.destinations.test.repo = "borg@localhost:.";
|
||||
}
|
||||
];
|
||||
};
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine.systemctl("start --wait borgbackup-job-test.service")
|
||||
assert "machine-test" in machine.succeed("BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK=yes /run/current-system/sw/bin/borg-job-test list")
|
||||
'';
|
||||
}
|
||||
)
|
||||
@@ -1,119 +1,51 @@
|
||||
{
|
||||
pkgs,
|
||||
nixosLib,
|
||||
clan-core,
|
||||
...
|
||||
}:
|
||||
nixosLib.runTest (
|
||||
(import ../lib/test-base.nix) (
|
||||
{ ... }:
|
||||
{
|
||||
imports = [
|
||||
clan-core.modules.nixosTest.clanTest
|
||||
];
|
||||
name = "borgbackup";
|
||||
|
||||
hostPkgs = pkgs;
|
||||
nodes.machine =
|
||||
{ self, pkgs, ... }:
|
||||
{
|
||||
imports = [
|
||||
self.clanModules.borgbackup
|
||||
self.nixosModules.clanCore
|
||||
{
|
||||
services.openssh.enable = true;
|
||||
services.borgbackup.repos.testrepo = {
|
||||
authorizedKeys = [ (builtins.readFile ../lib/ssh/pubkey) ];
|
||||
};
|
||||
}
|
||||
{
|
||||
clanCore.machineName = "machine";
|
||||
clanCore.clanDir = ./.;
|
||||
clanCore.state.testState.folders = [ "/etc/state" ];
|
||||
environment.etc.state.text = "hello world";
|
||||
systemd.tmpfiles.settings."vmsecrets" = {
|
||||
"/etc/secrets/borgbackup.ssh" = {
|
||||
C.argument = "${../lib/ssh/privkey}";
|
||||
z = {
|
||||
mode = "0400";
|
||||
user = "root";
|
||||
};
|
||||
};
|
||||
"/etc/secrets/borgbackup.repokey" = {
|
||||
C.argument = builtins.toString (pkgs.writeText "repokey" "repokey12345");
|
||||
z = {
|
||||
mode = "0400";
|
||||
user = "root";
|
||||
};
|
||||
};
|
||||
};
|
||||
clanCore.facts.secretStore = "vm";
|
||||
|
||||
name = "service-borgbackup";
|
||||
|
||||
clan = {
|
||||
directory = ./.;
|
||||
test.useContainers = true;
|
||||
modules."@clan/borgbackup" = ../../clanServices/borgbackup/default.nix;
|
||||
inventory = {
|
||||
|
||||
machines.clientone = { };
|
||||
machines.serverone = { };
|
||||
|
||||
instances = {
|
||||
borgone = {
|
||||
|
||||
module.name = "@clan/borgbackup";
|
||||
module.input = "self";
|
||||
|
||||
roles.client.machines."clientone" = { };
|
||||
roles.server.machines."serverone".settings.directory = "/tmp/borg-test";
|
||||
};
|
||||
};
|
||||
clan.borgbackup.destinations.test.repo = "borg@localhost:.";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
nodes = {
|
||||
|
||||
serverone = {
|
||||
services.openssh.enable = true;
|
||||
# Needed so PAM doesn't see the user as locked
|
||||
users.users.borg.password = "borg";
|
||||
};
|
||||
|
||||
clientone =
|
||||
{ config, pkgs, ... }:
|
||||
let
|
||||
dependencies = [
|
||||
clan-core
|
||||
pkgs.stdenv.drvPath
|
||||
] ++ builtins.map (i: i.outPath) (builtins.attrValues clan-core.inputs);
|
||||
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
|
||||
|
||||
in
|
||||
{
|
||||
|
||||
services.openssh.enable = true;
|
||||
|
||||
users.users.root.openssh.authorizedKeys.keyFiles = [ ../assets/ssh/pubkey ];
|
||||
|
||||
clan.core.networking.targetHost = config.networking.hostName;
|
||||
|
||||
environment.systemPackages = [ clan-core.packages.${pkgs.system}.clan-cli ];
|
||||
|
||||
environment.etc.install-closure.source = "${closureInfo}/store-paths";
|
||||
nix.settings = {
|
||||
substituters = pkgs.lib.mkForce [ ];
|
||||
hashed-mirrors = null;
|
||||
connect-timeout = pkgs.lib.mkForce 3;
|
||||
flake-registry = pkgs.writeText "flake-registry" ''{"flakes":[],"version":2}'';
|
||||
};
|
||||
system.extraDependencies = dependencies;
|
||||
|
||||
clan.core.state.test-backups.folders = [ "/var/test-backups" ];
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
import json
|
||||
start_all()
|
||||
|
||||
machines = [clientone, serverone]
|
||||
|
||||
for m in machines:
|
||||
m.systemctl("start network-online.target")
|
||||
|
||||
for m in machines:
|
||||
m.wait_for_unit("network-online.target")
|
||||
|
||||
# dummy data
|
||||
clientone.succeed("mkdir -p /var/test-backups /var/test-service")
|
||||
clientone.succeed("echo testing > /var/test-backups/somefile")
|
||||
|
||||
clientone.succeed("${pkgs.coreutils}/bin/install -Dm 600 ${../assets/ssh/privkey} /root/.ssh/id_ed25519")
|
||||
clientone.succeed("${pkgs.coreutils}/bin/touch /root/.ssh/known_hosts")
|
||||
clientone.wait_until_succeeds("timeout 2 ssh -o StrictHostKeyChecking=accept-new localhost hostname")
|
||||
clientone.wait_until_succeeds("timeout 2 ssh -o StrictHostKeyChecking=accept-new $(hostname) hostname")
|
||||
|
||||
# create
|
||||
clientone.succeed("borgbackup-create >&2")
|
||||
clientone.wait_until_succeeds("! systemctl is-active borgbackup-job-serverone >&2")
|
||||
|
||||
# list
|
||||
backup_id = json.loads(clientone.succeed("borg-job-serverone list --json"))["archives"][0]["archive"]
|
||||
out = clientone.succeed("borgbackup-list").strip()
|
||||
print(out)
|
||||
assert backup_id in out, f"backup {backup_id} not found in {out}"
|
||||
|
||||
# borgbackup restore
|
||||
clientone.succeed("rm -f /var/test-backups/somefile")
|
||||
clientone.succeed(f"NAME='serverone::borg@serverone:.::{backup_id}' borgbackup-restore >&2")
|
||||
assert clientone.succeed("cat /var/test-backups/somefile").strip() == "testing", "restore failed"
|
||||
machine.systemctl("start --wait borgbackup-job-test.service")
|
||||
assert "machine-test" in machine.succeed("BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK=yes /run/current-system/sw/bin/borg-job-test list")
|
||||
'';
|
||||
}
|
||||
)
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
[
|
||||
{
|
||||
"publickey": "age1tyyx2ratu8s9ugyre36xyksnquth9gxeh7wjdhvsk89rtf8yu5wq0pk04c",
|
||||
"type": "age"
|
||||
}
|
||||
]
|
||||
@@ -1,15 +0,0 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:wCKoKuJo4uXycfqEUYAXDlRRMGJaWgOFiaQa4Wigs0jx1eCI80lP3cEZ1QKyrU/9m9POoZz0JlaKHcuhziTKUqaevHvGfVq2y00=,iv:pH5a90bJbK9Ro6zndNJ18qd4/rU+Tdm+y+jJZtY7UGg=,tag:9lHZJ9C/zIfy8nFrYt9JBQ==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBwUDhpd1ZqbWFqR0I3dVFI\nOHlyZnFUYXJnWElrRWhoUHVNMzdKd0VrcGdRCkphQVhuYzlJV0p1MG9MSW5ncWJ3\nREp1OEJxMzQzS2MxTk9aMkJ1a3B0Q0kKLS0tIENweVJ2Tk1yeXlFc2F5cTNIV3F3\nTkRFOVZ1amRIYmg1K3hGWUFSTTl4Wk0KHJRJ7756Msod7Bsmn9SgtwRo53B8Ilp3\nhsAPv+TtdmOD8He9MvGV+BElKEXCsLUwhp/Py6n6CJCczu0VIr8owg==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-05-20T13:33:56Z",
|
||||
"mac": "ENC[AES256_GCM,data:FyfxXhnI6o4SVGJY2e1eMDnfkbMWiCkP4JL/G4PQvzz+c7OIuz8xaa03P3VW7b7o85NP2Tln4FMNTZ0FYtQwd0kKypLUnIxAHsixAHFCv4X8ul1gtZynzgbFbmc0GkfVWW8Lf+U+vvDwT+UrEVfcmksCjdvAOwP26PvlEhYEkSw=,iv:H+VrWYL+kLOLezCZrI8ZgeCsaUdpb7LxDMiLotezVPs=,tag:B/cbPdiEFumGKQHby5inCA==,type:str]",
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.10.2"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../../users/admin
|
||||
@@ -1,4 +0,0 @@
|
||||
{
|
||||
"publickey": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"type": "age"
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../../../../../sops/machines/clientone
|
||||
@@ -1,19 +0,0 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:52vY68gqbwiZRMUBKc9SeXR06fuKAhuAPciLpxXgEOxI,iv:Y34AVoHaZzRiFFTDbekXP1X3W8zSXJmzVCYODYkdxnY=,tag:8WQaGEHQKT/n+auHUZCE0w==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBOdUFUZUZ2M00zTGlhNjF4\nL0VlMVY4Z2xMbWRWR29zZlFwdm1XRk12NGtBCnkrb3A4M3BkalMyeWdDaUdQdStt\nUWY3SXJROXdpRzN0NlBJNEpjTEZ0aFkKLS0tIGZkMGhsTXB2RnRqVHVrUFQwL2lw\nZnBreWhWa3Jrcm4yOXBiaUlPWFM1aDAKRE+Zzrja7KeANEJUbmFYuVoO3qGyi4iH\n0cfH0W8irRe9vsKMXz7YJxtByYLwRulrT8tXtElHvIEVJG0mwwaf0Q==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
},
|
||||
{
|
||||
"recipient": "age1tyyx2ratu8s9ugyre36xyksnquth9gxeh7wjdhvsk89rtf8yu5wq0pk04c",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBsNEljUFdnQ0tTQ1IxZ2Zo\nYkc4V2dCaUk0YXh5SzlSazhsRTVKVzFvVXhFCkRyMlMxR3EyWEZIRzFQV3d2dVpz\na3NPbk9XdWR1NmtMQlZsNlBuU0NkQWMKLS0tIDlDYzMzOExVL1g5SVRHYlpUQlBV\na2lpdTUwaEd4OXhWUWxuV04xRVVKNHcK9coohAD1IoarLOXSGg3MIRXQ3BsTIA4y\nKrcS/PxITKJs7ihg93RZin70R79Qsij1RHZLKGfgGJ67i8ZCxc4N0g==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-05-20T13:33:59Z",
|
||||
"mac": "ENC[AES256_GCM,data:eABMaIe07dwAMMlgrIUUpfpj73q1H5Keafql91MBQ5NN9Znr5lI/ennQsQsuLO8ZTCC34US/MJndliW34SqVM9y53p0jjPzqBxSKYq74iNcBz7+TxbjlY1aapgTRPr6Ta8I/5loohnxlHqjvLL70ZzfbChDN0/4jZsDVXYNfbIk=,iv:41Mz2u40JN0iE5zPUK6siaxo0rTtlk7fGWq7TF5NyUI=,tag:1A+h6XPH7DeQ6kxGDV3PgQ==,type:str]",
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.10.2"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../../../../../sops/users/admin
|
||||
@@ -1 +0,0 @@
|
||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIE3clYF6BDZ0PxfDdprx7YYM4U4PKEZkWUuhpre0wb7w nixbld@kiwi
|
||||
@@ -1 +0,0 @@
|
||||
../../../../../../sops/machines/clientone
|
||||
@@ -1,19 +0,0 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:tAjfBW75XDS8lfJCf/+9rPYH3aMjRX1nmdN5dPMxnrlhuEPM3Smv9AM93Tz36k7BKk31bUWcV/99ax+KaIK1Rzgym/CwKGGxIUziuVOEOwrCOBeOw7amZ9YGsgiLUTLIhoeO6SjfdZ4q2JxGPw7KqNfUM9kiZT01vx5JTLa24JdvBKpizbtHRlL1lappTRVt0dG2WhT9/YhQUGu9ZFqPs8+bPOBclc78qjCm2DAPgsprK+JCBuq+r+qHgAx4Ee1QHI7FC39e5NeGBTBeZfZ5d95+0klKuTx9FCPs6QRBkQ0tN29OpwzkdSuRAXGGHpzPkZ+FupbETtSQWCmnjma6jPzEl8oDUTWooKK0mUEz8icvTQvRfyM3Qt3mQpkX3e0rTEbZzoLdWCwTufP/tRQNDCWvI/NV7OjIHpNPjymqE5uPmiBpA6y6hhCH7zL1eDo11ICSIX3hkyFJH2svvFQn6oLrPAoByvNutfetKhd8z7NFpVeIOWwtuPzO7wU5M7zESHww0JF78vjFwimQYYhQ,iv:fVjeVez4dTGSrANi5ZeP9PJhsSySqeqqJzBDbd0gFW4=,tag:Aa89+bWLljxV1tlSHtpddw==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBVaW94M3VwcFJ2elcrRGlv\nUGdzVk9vU2ZweFpIVVlIRUEyRVlSMlEyeHpVCnJuV0xIS3hMLy9IbG92S0pvL2RP\nL0J0WkVuWVhQdldHekdYNTVXdFkrUlEKLS0tIFQzdGErZVBwQUFNMXErbDBQVURZ\naHlsY2hDa1Zud1E2dFh0ZHl4VEJ2S0kKVABqwRcCUTcsBInfo9CpFtoM3kl4KMyU\nGXDjHOSjlX5df7OKZAvYukgX7Q2penvq+Fq4fa4A1Cmkqga7cHdJ+A==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
},
|
||||
{
|
||||
"recipient": "age1tyyx2ratu8s9ugyre36xyksnquth9gxeh7wjdhvsk89rtf8yu5wq0pk04c",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBnbHRSVEg3Vi9qTnAwWGF6\nbEdIR2gvZ2laZnJMbVF3NjcvN25OdXF3WXowCnVUODdEa1NWU3JISXlrNldOMjVi\ndUlMTVdBaWxvZHlwSTdJY3NCcll4SjAKLS0tIEp6ZVlDTklqVXdNYzJ2dElCR21o\nUWphMDdyVVppVnFHOVlHZTNtajZzOXMKRB61lUrAkUXSYl3ffOOK8k4QgLA4bFln\naQ7GOol8f8W5H68zXBMZrhjP6k4kZDfknc9jgyoWM7jaZNSWC5J19Q==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-05-20T13:33:59Z",
|
||||
"mac": "ENC[AES256_GCM,data:NjVpDweqxTSQGt9VKR/CMfvbvHQJHCi8P7XbOuKLZKQ4GVoeZ5r4PsC6nxKHHikN6YL1oJCmaSxr0mJRk/sFZg/+wdW8L7F5aQeFRiWo9jCjH0MDMnfiu5a0xjRt21uPl/7LUJ9jNon5nyxPTlZMeYSvTP2Q9spnNuN8vqipP68=,iv:DPvbN9IvWiUfxiJk6mey/us8N1GGVJcSJrT8Bty4kB4=,tag:+emK8uSkfIGUXoYpaWeu3A==,type:str]",
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.10.2"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../../../../../sops/users/admin
|
||||
@@ -1,6 +0,0 @@
|
||||
{ fetchgit }:
|
||||
fetchgit {
|
||||
url = "https://git.clan.lol/clan/clan-core.git";
|
||||
rev = "28131afbbcd379a8ff04c79c66c670ef655ed889";
|
||||
sha256 = "1294cwjlnc341fl6zbggn4rgq8z33gqkcyggjfvk9cf7zdgygrf6";
|
||||
}
|
||||
@@ -1,44 +1,19 @@
|
||||
(
|
||||
(import ../lib/container-test.nix) (
|
||||
{ ... }:
|
||||
{
|
||||
name = "container";
|
||||
name = "secrets";
|
||||
|
||||
nodes.machine1 =
|
||||
nodes.machine =
|
||||
{ ... }:
|
||||
{
|
||||
networking.hostName = "machine1";
|
||||
networking.hostName = "machine";
|
||||
services.openssh.enable = true;
|
||||
services.openssh.startWhenNeeded = false;
|
||||
};
|
||||
|
||||
nodes.machine2 =
|
||||
{ ... }:
|
||||
{
|
||||
networking.hostName = "machine2";
|
||||
services.openssh.enable = true;
|
||||
services.openssh.startWhenNeeded = false;
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
import subprocess
|
||||
start_all()
|
||||
machine1.succeed("systemctl status sshd")
|
||||
machine2.succeed("systemctl status sshd")
|
||||
machine1.wait_for_unit("sshd")
|
||||
machine2.wait_for_unit("sshd")
|
||||
|
||||
p1 = subprocess.run(["ip", "a"], check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
assert p1.returncode == 0
|
||||
bridge_output = p1.stdout.decode("utf-8")
|
||||
assert "br0" in bridge_output, f"bridge not found in ip a output: {bridge_output}"
|
||||
|
||||
for m in [machine1, machine2]:
|
||||
out = machine1.succeed("ip addr show eth1")
|
||||
assert "UP" in out, f"UP not found in ip addr show output: {out}"
|
||||
assert "inet" in out, f"inet not found in ip addr show output: {out}"
|
||||
assert "inet6" in out, f"inet6 not found in ip addr show output: {out}"
|
||||
|
||||
machine1.succeed("ping -c 1 machine2")
|
||||
machine.succeed("systemctl status sshd")
|
||||
machine.wait_for_unit("sshd")
|
||||
'';
|
||||
}
|
||||
)
|
||||
|
||||
@@ -1,89 +0,0 @@
|
||||
{
|
||||
pkgs,
|
||||
nixosLib,
|
||||
clan-core,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
machines = [
|
||||
"admin"
|
||||
"peer"
|
||||
"signer"
|
||||
];
|
||||
in
|
||||
nixosLib.runTest (
|
||||
{ ... }:
|
||||
{
|
||||
imports = [
|
||||
clan-core.modules.nixosTest.clanTest
|
||||
];
|
||||
|
||||
hostPkgs = pkgs;
|
||||
name = "service-data-mesher";
|
||||
|
||||
clan = {
|
||||
directory = ./.;
|
||||
inventory = {
|
||||
machines = lib.genAttrs machines (_: { });
|
||||
services = {
|
||||
data-mesher.default = {
|
||||
roles.peer.machines = [ "peer" ];
|
||||
roles.admin.machines = [ "admin" ];
|
||||
roles.signer.machines = [ "signer" ];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
defaults =
|
||||
{ config, ... }:
|
||||
{
|
||||
environment.systemPackages = [
|
||||
config.services.data-mesher.package
|
||||
];
|
||||
|
||||
clan.data-mesher.network.interface = "eth1";
|
||||
clan.data-mesher.bootstrapNodes = [
|
||||
"[2001:db8:1::1]:7946" # peer1
|
||||
"[2001:db8:1::2]:7946" # peer2
|
||||
];
|
||||
|
||||
# speed up for testing
|
||||
services.data-mesher.settings = {
|
||||
cluster.join_interval = lib.mkForce "2s";
|
||||
cluster.push_pull_interval = lib.mkForce "5s";
|
||||
};
|
||||
};
|
||||
|
||||
nodes = {
|
||||
admin.clan.data-mesher.network.tld = "foo";
|
||||
};
|
||||
|
||||
# TODO Add better test script.
|
||||
testScript = ''
|
||||
|
||||
def resolve(node, success = {}, fail = [], timeout = 60):
|
||||
for hostname, ips in success.items():
|
||||
for ip in ips:
|
||||
node.wait_until_succeeds(f"getent ahosts {hostname} | grep {ip}", timeout)
|
||||
|
||||
for hostname in fail:
|
||||
node.wait_until_fails(f"getent ahosts {hostname}")
|
||||
|
||||
start_all()
|
||||
|
||||
admin.wait_for_unit("data-mesher")
|
||||
signer.wait_for_unit("data-mesher")
|
||||
peer.wait_for_unit("data-mesher")
|
||||
|
||||
# check dns resolution
|
||||
for node in [admin, signer, peer]:
|
||||
resolve(node, {
|
||||
"admin.foo": ["2001:db8:1::1", "192.168.1.1"],
|
||||
"peer.foo": ["2001:db8:1::2", "192.168.1.2"],
|
||||
"signer.foo": ["2001:db8:1::3", "192.168.1.3"]
|
||||
})
|
||||
'';
|
||||
}
|
||||
)
|
||||
@@ -1,4 +0,0 @@
|
||||
{
|
||||
"publickey": "age10zxkj45fah3qa8uyg3a36jsd06d839xfq64nrez9etrsf4km0gtsp45gsz",
|
||||
"type": "age"
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
{
|
||||
"publickey": "age1faqrml2ukc6unfm75d3v2vnaf62v92rdxaagg3ty3cfna7vt99gqlzs43l",
|
||||
"type": "age"
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
{
|
||||
"publickey": "age153mke8v2qksyqjc7vta7wglzdqr5epazt83nch0ur5v7kl87cfdsr07qld",
|
||||
"type": "age"
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:7xyb6WoaN7uRWEO8QRkBw7iytP5hFrA94VRi+sy/UhzqT9AyDPmxB/F8ASFsBbzJUwi0Oqd2E1CeIYRoDhG7JHnDyL2bYonz2RQ=,iv:slh3x774m6oTHAXFwcen1qF+jEchOKCyNsJMbNhqXHE=,tag:wtK8H8PZCESPA1vZCd7Ptw==,type:str]",
|
||||
"sops": {
|
||||
"kms": null,
|
||||
"gcp_kms": null,
|
||||
"azure_kv": null,
|
||||
"hc_vault": null,
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBPTzZ4RTVNb2I1MTBRMEcy\neU1Eek9GakkydEJBVm9kR3AyY1pEYkorNUYwCkh2WHhNQmc1eWI2cCtEUFFWdzJq\nS0FvQWtoOFkzRVBxVzhuczc0aVprbkkKLS0tIFRLdmpnbzY1Uk9LdklEWnQzZHM2\nVEx3dzhMSnMwaWE0V0J6VTZ5ZVFYMjgKdaICa/hprHxhH89XD7ri0vyTT4rM+Si0\niHcQU4x64dgoJa4gKxgr4k9XncjoNEjJhxL7i/ZNZ5deaaLRn5rKMg==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-04-08T13:24:55Z",
|
||||
"mac": "ENC[AES256_GCM,data:TJWDHGSRBfOCW8Q+t3YxG3vlpf9a5u7B27AamnOk95huqIv0htqWV3RuV7NoOZ5v2ijqSe/pLfpwrmtdhO2sUBEvhdhJm8UzLShP7AbH9lxV+icJOsY7VSrp+R5W526V46ONP6p47b7fOQBbp03BMz01G191N68WYOf6k2arGxU=,iv:nEyTBwJ2EA+OAl8Ulo5cvFX6Ow2FwzTWooF/rdkPiXg=,tag:oYcG16zR+Fb5XzVsHhq2Qw==,type:str]",
|
||||
"pgp": null,
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.9.4"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../../users/admin
|
||||
@@ -1,20 +0,0 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:JOOhvl0clDD/b5YO45CXR3wVopBSNe9dYBG+p5iD+nniN2OgOwBgYPNSCVtc+NemqutD12hFUSfCzXidkv0ijhD1JZeLar9Ygxc=,iv:XctQwSYSvKhDRk/XMacC9uMydZ8e9hnhpoWTgyXiFI0=,tag:foAhBlg4DwpQU2G9DzTo5g==,type:str]",
|
||||
"sops": {
|
||||
"kms": null,
|
||||
"gcp_kms": null,
|
||||
"azure_kv": null,
|
||||
"hc_vault": null,
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBBVWMvWkp5TnZQcGs5Ykhp\nWC91YkoyZERqdXpxQm5JVmRhaUhueEJETDJVCkM4V0hSYldkV1U2Q0d1TGh3eGNR\nVjJ1VFd6ZEN0SXZjSVEvcnV2WW0vbVUKLS0tIFRCNW9nWHdYaUxLSVVUSXM0OGtN\nVFMzRXExNkYxcFE3QWlxVUM3ay9INm8KV6r8ftpwarly3qXoU9y8KxKrUKLvP9KX\nGsP0pORsaM+qPMsdfEo35CqhAeQu0+6DWd7/67+fUMp6Jr0DthtTmg==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-04-08T13:25:28Z",
|
||||
"mac": "ENC[AES256_GCM,data:scY9+/fcXhfHEdrsZJLOM6nfjpRaURgTVbCRepUjhUo24B4ByEsAo2B8psVAaGEHEsFRZuoiByqrGzKhyUASmUs+wn+ziOKBTLzu55fOakp8PWYtQ4miiz2TQffp80gCQRJpykcbUgqIKXNSNutt4tosTBL7osXwCEnEQWd+SaA=,iv:1VXNvLP6DUxZYEr1juOLJmZCGbLp33DlwhxHQV9AMD4=,tag:uFM1R8OmkFS74/zkUG0k8A==,type:str]",
|
||||
"pgp": null,
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.9.4"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../../users/admin
|
||||
@@ -1,20 +0,0 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:i1YBJdK8XmWnVnZKBpmWggSN8JSOr8pm2Zx+CeE8qqeLZ7xwMO8SYCutM8l94M5vzmmX0CmwzeMZ/JVPbEwFd3ZAImUfh685HOY=,iv:N4rHNaX+WmoPb0EZPqMt+CT1BzaWO9LyoemBxKn+u/s=,tag:PnzSvdGwVnTMK8Do8VzFaQ==,type:str]",
|
||||
"sops": {
|
||||
"kms": null,
|
||||
"gcp_kms": null,
|
||||
"azure_kv": null,
|
||||
"hc_vault": null,
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB4RXlmcVNGTnlkY2ZqZFlH\nVnh0eHhRNE5hRDNDVkt0TEE0bmRNN2JIVkN3CkxnaGM4Y3M3a0xoK2xMRzBLMHRV\nT1FzKzNRMFZOeWc2K3E5K2FzdUsvWmsKLS0tIENtVlFSWElHN3RtOUY2alhxajhs\naXI1MmR4WC9EVGVFK3dHM1gvVnlZMVUKCyLz0DkdbWfSfccShO1xjWfxhunEIbD0\n6imeIBhZHvVJmZLXnVl7B0pNXo6be7WSBMAUM9gUtCNh4zaChBNwGw==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-04-08T13:25:52Z",
|
||||
"mac": "ENC[AES256_GCM,data:WFGysoXN95e/RxL094CoL4iueqEcSqCSQZLahwz9HMLi+8HWZIXr55a+jyK7piqR8nBS4BquU5fKhlC6BvEbZFt69t4onTA+LxS3D7A8/TO0CWS0RymUjW9omJUseRQWwAHtE7l0qI5hdOUKhQ+o5pU+2bc3PUlaONM0aOCCoFo=,iv:l1f4aVqLl5VAMfjNxDbxQEQp/qY/nxzgv2GTuPVBoBA=,tag:4PPDCmDrviqdn42RLHQYbA==,type:str]",
|
||||
"pgp": null,
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.9.4"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../../users/admin
|
||||
@@ -1,4 +0,0 @@
|
||||
{
|
||||
"publickey": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"type": "age"
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../../../../../sops/machines/admin
|
||||
@@ -1,24 +0,0 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:w3bU23Pfe8W89lF+tOmEYPU/A4FkY6n7rgQ6yo+eqCJFxTyHydV6Mg4/g4jaL+4wwIqNYRiMR8J8jLhSvw3Bc59u7Ul+RGwdpiKoBBJfsHjO8r6uOz2u9Raa+iUJH1EJWmGvsQXAILpliZ+klS96VWnGN3pYMEI=,iv:7QbUxta6NPQLZrh6AOcNe+0wkrADuTI9VKVp8q+XoZ8=,tag:ZH0t3RylfQk5U23ZHWaw0g==,type:str]",
|
||||
"sops": {
|
||||
"kms": null,
|
||||
"gcp_kms": null,
|
||||
"azure_kv": null,
|
||||
"hc_vault": null,
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age10zxkj45fah3qa8uyg3a36jsd06d839xfq64nrez9etrsf4km0gtsp45gsz",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBKaTBoSFJVSTdZeW4wZG9p\nWFR1LzVmYS8xWmRqTlNtWFVkSW9jZXpVejJBCkpqZm12L1dDSmNhekVsK1JBOU9r\nZThScGdDakFlRzNsVXp1eE5yOStFSW8KLS0tIFRrTkZBQlRsR2VNcUJvNEkzS2pw\nNksvM296UkFWTkZDVVp1ZVZMNUs4cWsKWTteB1G9Oo38a81PeqKO09NUQetuqosC\nhrToQ6NMo5O7/StmVG228MHbJS3KLXsvh2AFOEPyZrbpB2Opd2wwoA==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
},
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB6U2FWRThRNkVQdk9yZ0VE\nM09iSVhmeldMcDZVaFRDNGtjWTdBa0VIT2pJCkdtd04xSXdicDY3OHI1WXl5TndB\nemtQeW1SS2tVVllPUHhLUTRla3haZGMKLS0tIGN0NVNEN3RKeWM0azBBMnBpQU4r\nTFFzQ0lOcGt0ek9UZmZZRjhibTNTc0EKReUwYBVM1NKX0FD/ZeokFAAknwju5Azq\nGzl4UVJBi5Es0GWORdCGElPXMd7jMud1SwgY04AdZj/dzinCSW4CZw==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-04-08T13:25:10Z",
|
||||
"mac": "ENC[AES256_GCM,data:0vl9Gt4QeH+GJcnl8FuWSaqQXC8S6Pe50NmeDg5Nl2NWagz8aLCvOFyTqX/Icp/bTi1XQ5icHHhF3YhM+QAvdUL3aO0WGbh92dPRnFuvlZsdtwCFhT+LyHyYHFf6yP+0h/uFpJv9fE6xY22CezA6ZVQ8ywi1epaC548Gr27uVe4=,iv:G4hZVCLkIpbg9uwB7Y8xtHLdnlmBvFrPjxSoqdyHNvM=,tag:uvKwakhUY2aa7v0tmR/o8A==,type:str]",
|
||||
"pgp": null,
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.9.4"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../../../../../sops/users/admin
|
||||
@@ -1,3 +0,0 @@
|
||||
-----BEGIN PUBLIC KEY-----
|
||||
MCowBQYDK2VwAyEAm204bpSFi4jOjZuXDpIZ/rcJBrbG4zAc7OSA4rAVSYE=
|
||||
-----END PUBLIC KEY-----
|
||||
@@ -1 +0,0 @@
|
||||
../../../../../../sops/machines/peer
|
||||
@@ -1,24 +0,0 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:kERPY40pyvke0mRBnafa4zOaF46rbueRbhpUCXjYP5ORpC7zoOhbdlVBhOsPqE2vfEP4RWkH+ZPdDYXOKXwotBCmlq2i7TfZeoNXFkzWXc3GyM5mndnjCc8hvYEQF1w6xkkVSUt4n06BAw/gT0ppz+vo5dExIA8=,iv:JmYD2o4DGqds6DV7ucUmUD0BRB61exbRsNAtINOR8cQ=,tag:Z58gVnHD+4s21Z84IRw+Vw==,type:str]",
|
||||
"sops": {
|
||||
"kms": null,
|
||||
"gcp_kms": null,
|
||||
"azure_kv": null,
|
||||
"hc_vault": null,
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age1faqrml2ukc6unfm75d3v2vnaf62v92rdxaagg3ty3cfna7vt99gqlzs43l",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB4OFluVThBdUJSTmRVTk94\neFZnLytvcnNSdmQvR3ZkT2UvWFVieFV1SUFNCm9jWHlyZXRwaVdFaG9ocnd4S3FU\ndTZ2dklBbkFVL0hVT0Y2L1o5dnUyNG8KLS0tIGFvYlBJR3l2b3F6OU9uMTFkYjli\nNVFLOWQzOStpU2kzb0xyZUFCMnBmMVUK5Jzssf1XBX25bq0RKlJY8NwtKIytxL/c\nBPPFDZywJiUgw1izsdfGVkRhhSFCQIz+yWIJWzr01NU2jLyFjSfCNw==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
},
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAzYW92c3Q4SktwSnJ1TkRJ\nZEJyZk96cG8ybkpPQzYzVk0xZGs0eCtISVR3CmhDaWxTem1FMjJKNmZNaTkxN01n\nenUvdFI1UkFmL1lzNlM5N0Ixd0dpc1EKLS0tIHpyS2VHaHRRdUovQVgvRmRHaXh3\naFpSNURjTWkxaW9TOXpKL2IvcUFEbmMKq4Ch7DIL34NetFV+xygTdcpQjjmV8v1n\nlvYcjUO/9c3nVkxNMJYGjuxFLuFc4Gw+AyawCjpsIYXRskYRW4UR1w==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-04-08T13:25:43Z",
|
||||
"mac": "ENC[AES256_GCM,data:YhL2d6i0VpUd15B4ow2BgRpyEm0KEA8NSb7jZcjI58d7d4lAqBMcDQB+8a9e2NZbPk8p1EYl3q4VXbEnuwsJiPZI2kabRusy/IGoHzUTUMFfVaOuUcC0eyINNVSmzJxnCbLCAA1Aj1yXzgRQ0MWr7r0RHMKw0D1e0HxdEsuAPrA=,iv:yPlMmE6+NEEQ9uOZzD3lUTBcfUwGX/Ar+bCu0XKnjIg=,tag:eR22BCFVAlRHdggg9oCeaA==,type:str]",
|
||||
"pgp": null,
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.9.4"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../../../../../sops/users/admin
|
||||
@@ -1,3 +0,0 @@
|
||||
-----BEGIN PUBLIC KEY-----
|
||||
MCowBQYDK2VwAyEAv5dICFue2fYO0Zi1IyfYjoNfR6713WpISo7+2bSjL18=
|
||||
-----END PUBLIC KEY-----
|
||||
@@ -1 +0,0 @@
|
||||
../../../../../../sops/machines/signer
|
||||
@@ -1,24 +0,0 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:U8F7clQ2Tuj8zy5EoEga/Mc9N3LLZrlFf5m7UJKrP5yybFRCJSBs05hOcNe+LQZdEAvvr0Qbkry1pQyE84gCVbxHvwkD+l3GbguBuLMsW96bHcmstb6AvZyhMDBpm73Azf4lXhNaiB8p2pDWdxV77E+PPw1MNYI=,iv:hQhN6Ak8tB6cXSCnTmmQqHEpXWpWck3uIVCk5pUqFqU=,tag:uC4ljcs92WPlUOfwSkrK9Q==,type:str]",
|
||||
"sops": {
|
||||
"kms": null,
|
||||
"gcp_kms": null,
|
||||
"azure_kv": null,
|
||||
"hc_vault": null,
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age153mke8v2qksyqjc7vta7wglzdqr5epazt83nch0ur5v7kl87cfdsr07qld",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBvV05lejQrdUQvQjZPOG9v\nZ01naXlYZ1JxWHhDT1M1aUs1RWJDSU1acVFFCmdHY094aGRPYWxpdVVxSFVHRU9v\nNnVaeTlpSEdtSWRDMmVMSjdSOEQ4ZlEKLS0tIFo5NVk2bzBxYjZ5ZWpDWTMrQ2VF\nVThWUk0rVXpTY2svSCtiVDhTQ2kvbFkKEM2DBuFtdEj1G/vS1TsyIfQxSFFvPTDq\nCmO7L/J5lHdyfIXzp/FlhdKpjvmchb8gbfJn7IWpKopc7Zimy/JnGQ==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
},
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSArNzVUaHkzUzVEMlh1Q3Qr\nOEo0aDJIMG91amJiZG50MEhqblRCTWxRRVVRCk4xZlp4SkJuUHc2UnFyU1prczkz\nNGtlQlRlNnBDRFFvUGhReTh6MTBZaXMKLS0tIGxtaXhUMDM0RU4yQytualdzdTFt\nWGRiVG54MnYrR2lqZVZoT0VkbmV5WUUKbzAnOkn8RYOo7z4RISQ0yN875vSEQMDa\nnnttzVrQuK0/iZvzJ0Zq8U9+JJJKvFB1tHqye6CN0zMbv55CLLnA0g==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-04-08T13:26:07Z",
|
||||
"mac": "ENC[AES256_GCM,data:uMss4+BiVupFqX7nHnMo+0yZ8RPuFD8VHYK2EtJSqzgurQrZVT4tJwY50mz2gVmwbrm49QYKk5S+H29DU0cM0HiEOgB5P5ObpXTRJPagWQ48CEFrDpBzLplobxulwnN6jJ1dpL3JF3jfrzrnSDFXMvx+n5x/86/AYXYRsi/UeyY=,iv:mPT1svKrNGmYpbL9hh2Bxxakml69q+U6gQ0ZnEcbEyg=,tag:zcZx1lTw/bEsX/1g+6T04g==,type:str]",
|
||||
"pgp": null,
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.9.4"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../../../../../sops/users/admin
|
||||
@@ -1,3 +0,0 @@
|
||||
-----BEGIN PUBLIC KEY-----
|
||||
MCowBQYDK2VwAyEAeUkW5UIwA1svbNY71ePyJKX68UhxrqIUGQ2jd06w5WM=
|
||||
-----END PUBLIC KEY-----
|
||||
@@ -1 +0,0 @@
|
||||
../../../../../sops/machines/admin
|
||||
@@ -1 +0,0 @@
|
||||
../../../../../sops/machines/peer
|
||||
@@ -1 +0,0 @@
|
||||
../../../../../sops/machines/signer
|
||||
@@ -1,32 +0,0 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:nRlCMF58cnkdUAE2aVHEG1+vAckKtVt48Jr21Bklfbsqe1yTiHPFAMLL1ywgWWWd7FjI/Z8WID9sWzh9J8Vmotw4aJWU/rIQSeF8cJHALvfOxarJIIyb7purAiPoPPs6ggGmSmVFGB1aw8kH1JMcppQN8OItdQM=,iv:qTwaL2mgw6g7heN/H5qcjei3oY+h46PdSe3v2hDlkTs=,tag:jYNULrOPl9mcQTTrx1SDeA==,type:str]",
|
||||
"sops": {
|
||||
"kms": null,
|
||||
"gcp_kms": null,
|
||||
"azure_kv": null,
|
||||
"hc_vault": null,
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age153mke8v2qksyqjc7vta7wglzdqr5epazt83nch0ur5v7kl87cfdsr07qld",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBRcG44cGFBWXk2Z0pmNklv\nTnJ5b0svLytzZmNNRkxCVU1zaDVhNUs2cld3CklsenpWd0g2OEdKKzBMQlNEejRn\nTlEvY01HYjdvVExadnN3aXZIRTZ4YlEKLS0tIGRPUXdNSHZCRDBMbno2MjJqRHBl\nSzdiSURDYitQWFpaSElkdmdicDVjMWsKweQiRqyzXmzabmU2fmgwHtOa9uDmhx9O\ns9NfUhC3ifooQUSeYp58b1ZGJQx5O5bn9q/DaEoit5LTOUprt1pUPA==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
},
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBiTEdlL29sVWFpSDNNaXRJ\ndTJDRkU4VzFPQ0M4MkFha2IxV2FXN2o3ZEFRCjF3UnZ5U1hTc3VvSTIzcWxOZjl0\ncHlLVEFqRk1UbGdxaUxEeDFqbFVYaU0KLS0tIFFyMnJkZnRHdWg4Z1IyRHFkY0I5\nQjdIMGtGLzRGMFM0ektDZ3hzZDdHSmMKvxOQuKgePom0QfPSvn+4vsGHhJ4BoOvW\nc27Vn4/i4hbjfJr4JpULAwyIwt3F0RaTA2M6EkFkY8otEi3vkcpWvA==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
},
|
||||
{
|
||||
"recipient": "age10zxkj45fah3qa8uyg3a36jsd06d839xfq64nrez9etrsf4km0gtsp45gsz",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA5ZzdsaVRnSmsrMGR1Ylg3\nZkpscTdwNUl5NUVXN3kvMU1icE0yZU1WSEJBClB6SlJYZUhDSElRREx5b0VueFUw\nNVFRU3BSU24yWEtpRnJoUC83SDVaUWsKLS0tIGVxNEo3TjlwakpDZlNsSkVCOXlz\nNDgwaE1xNjZkSnJBVlU5YXVHeGxVNFEKsXKyTzq9VsERpXzbFJGv/pbAghFAcXkf\nMmCgQHsfIMBJQUstcO8sAkxv3ced0dAEz8O6NUd0FS2zlhBzt29Rnw==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
},
|
||||
{
|
||||
"recipient": "age1faqrml2ukc6unfm75d3v2vnaf62v92rdxaagg3ty3cfna7vt99gqlzs43l",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBkK1hDMGxCc1IvYXlJMnBF\nWncxaXBQa1RpTWdwUHc3Yk16My8rVHNJc2dFCkNlK2h0dy9oU3Z5ZGhwRWVLYVUz\ncVBKT2x5VnlhbXNmdHkwbmZzVG5sd0EKLS0tIHJaMzhDanF4Rkl3akN4MEIxOHFC\nYWRUZ08xb1UwOFNRaktkMjIzNXZmNkUK1rlbJ96oUNQZLmCmPNDOKxfDMMa+Bl2E\nJPxcNc7XY3WBHa3xFUbcqiPxWxDyaZjhq/LYQGpepiGonGMEzR5JOQ==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-04-08T13:25:20Z",
|
||||
"mac": "ENC[AES256_GCM,data:za9ku+9lu1TTRjbPcd5LYDM4tJsAYF/yuWFCGkAhqcYguEducsIfoKBwL42ahAzqLjCZp91YJuINtw16mM+Hmlhi/BVwhnXNHqcfnKoAS/zg9KJvWcvXwKMmjEjaBovqaCWXWoKS7dn/wZ7nfGrlsiUilCDkW4BzTIzkqNkyREU=,iv:2X9apXMatwCPRBIRbPxz6PJQwGrlr7O+z+MrsnFq+sQ=,tag:IYvitoV4MhyJyRO1ySxbLQ==,type:str]",
|
||||
"pgp": null,
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.9.4"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../../../../sops/users/admin
|
||||
@@ -1,3 +0,0 @@
|
||||
-----BEGIN PUBLIC KEY-----
|
||||
MCowBQYDK2VwAyEA/5j+Js7oxwWvZdfjfEO/3UuRqMxLKXsaNc3/5N2WSaw=
|
||||
-----END PUBLIC KEY-----
|
||||
29
checks/deltachat/default.nix
Normal file
29
checks/deltachat/default.nix
Normal file
@@ -0,0 +1,29 @@
|
||||
(import ../lib/container-test.nix) (
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "secrets";
|
||||
|
||||
nodes.machine =
|
||||
{ self, ... }:
|
||||
{
|
||||
imports = [
|
||||
self.clanModules.deltachat
|
||||
self.nixosModules.clanCore
|
||||
{
|
||||
clanCore.machineName = "machine";
|
||||
clanCore.clanDir = ./.;
|
||||
}
|
||||
];
|
||||
};
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine.wait_for_unit("maddy")
|
||||
# imap
|
||||
machine.succeed("${pkgs.netcat}/bin/nc -z -v ::1 143")
|
||||
# smtp submission
|
||||
machine.succeed("${pkgs.netcat}/bin/nc -z -v ::1 587")
|
||||
# smtp
|
||||
machine.succeed("${pkgs.netcat}/bin/nc -z -v ::1 25")
|
||||
'';
|
||||
}
|
||||
)
|
||||
@@ -1,22 +0,0 @@
|
||||
{ ... }:
|
||||
{
|
||||
perSystem =
|
||||
{ self', pkgs, ... }:
|
||||
{
|
||||
checks.devshell =
|
||||
pkgs.runCommand "check-devshell-not-depends-on-clan-cli"
|
||||
{
|
||||
exportReferencesGraph = [
|
||||
"graph"
|
||||
self'.devShells.default
|
||||
];
|
||||
}
|
||||
''
|
||||
if grep -q "${self'.packages.clan-cli}" ./graph; then
|
||||
echo "devshell depends on clan-cli, which is not allowed";
|
||||
exit 1;
|
||||
fi
|
||||
mkdir $out
|
||||
'';
|
||||
};
|
||||
}
|
||||
@@ -1,122 +0,0 @@
|
||||
{
|
||||
...
|
||||
}:
|
||||
{
|
||||
perSystem =
|
||||
{
|
||||
system,
|
||||
pkgs,
|
||||
self',
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
clanCore = self'.packages.clan-core-flake;
|
||||
clanCoreHash = lib.substring 0 12 (builtins.hashString "sha256" "${clanCore}");
|
||||
/*
|
||||
construct a flake for the test which contains a single check which depends
|
||||
on all checks of clan-core.
|
||||
*/
|
||||
testFlakeFile = pkgs.writeText "flake.nix" ''
|
||||
{
|
||||
inputs.clan-core.url = path:///to/nowhere;
|
||||
outputs = {clan-core, ...}:
|
||||
let
|
||||
checks =
|
||||
builtins.removeAttrs
|
||||
clan-core.checks.${system}
|
||||
[
|
||||
"dont-depend-on-repo-root"
|
||||
"package-dont-depend-on-repo-root"
|
||||
"package-clan-core-flake"
|
||||
];
|
||||
checksOutPaths = map (x: "''${x}") (builtins.attrValues checks);
|
||||
in
|
||||
{
|
||||
checks.${system}.check = builtins.derivation {
|
||||
name = "all-clan-core-checks";
|
||||
system = "${system}";
|
||||
builder = "/bin/sh";
|
||||
args = ["-c" '''
|
||||
of outPath in ''${toString checksOutPaths}; do
|
||||
echo "$outPath" >> $out
|
||||
done
|
||||
'''];
|
||||
};
|
||||
};
|
||||
}
|
||||
'';
|
||||
in
|
||||
lib.optionalAttrs (system == "x86_64-linux") {
|
||||
packages.dont-depend-on-repo-root =
|
||||
pkgs.runCommand
|
||||
# append repo hash to this tests name to ensure it gets invalidated on each chain
|
||||
# This is needed because this test is an FOD (due to networking) and would get cached indefinitely.
|
||||
"check-dont-depend-on-repo-root-${clanCoreHash}"
|
||||
{
|
||||
buildInputs = [
|
||||
pkgs.nix
|
||||
pkgs.cacert
|
||||
pkgs.nix-diff
|
||||
];
|
||||
outputHashAlgo = "sha256";
|
||||
outputHash = "sha256-47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU=";
|
||||
}
|
||||
''
|
||||
mkdir clanCore testFlake store
|
||||
clanCore=$(realpath clanCore)
|
||||
testFlake=$(realpath testFlake)
|
||||
|
||||
# copy clan core flake and make writable
|
||||
cp -r ${clanCore}/* clanCore/
|
||||
chmod +w -R clanCore\
|
||||
|
||||
# copy test flake and make writable
|
||||
cp ${testFlakeFile} testFlake/flake.nix
|
||||
chmod +w -R testFlake
|
||||
|
||||
# enable flakes
|
||||
export NIX_CONFIG="experimental-features = nix-command flakes"
|
||||
|
||||
# give nix a $HOME
|
||||
export HOME=$(realpath ./store)
|
||||
|
||||
# override clan-core flake input to point to $clanCore\
|
||||
echo "locking clan-core to $clanCore"
|
||||
nix flake lock --override-input clan-core "path://$clanCore" "$testFlake" --store "$HOME"
|
||||
|
||||
# evaluate all tests
|
||||
echo "evaluating all tests for clan core"
|
||||
nix eval "$testFlake"#checks.${system}.check.drvPath --store "$HOME" --raw > drvPath1 &
|
||||
|
||||
# slightly modify clan core
|
||||
cp -r $clanCore clanCore2
|
||||
cp -r $testFlake testFlake2
|
||||
export clanCore2=$(realpath clanCore2)
|
||||
export testFlake2=$(realpath testFlake2)
|
||||
touch clanCore2/fly-fpv
|
||||
|
||||
# re-evaluate all tests
|
||||
echo "locking clan-core to $clanCore2"
|
||||
nix flake lock --override-input clan-core "path://$clanCore2" "$testFlake2" --store "$HOME"
|
||||
echo "evaluating all tests for clan core with added file"
|
||||
nix eval "$testFlake2"#checks.${system}.check.drvPath --store "$HOME" --raw > drvPath2
|
||||
|
||||
# wait for first nix eval to return as well
|
||||
while ! grep -q drv drvPath1; do sleep 1; done
|
||||
|
||||
# raise error if outputs are different
|
||||
if [ "$(cat drvPath1)" != "$(cat drvPath2)" ]; then
|
||||
echo -e "\n\nERROR: Something in clan-core depends on the whole repo" > /dev/stderr
|
||||
echo -e "See details in the nix-diff below which shows the difference between two evaluations:"
|
||||
echo -e " 1. Evaluation of clan-core checks without any changes"
|
||||
echo -e " 1. Evaluation of clan-core checks after adding a file to the top-level of the repo"
|
||||
echo "nix-diff:"
|
||||
export NIX_REMOTE="$HOME"
|
||||
nix-diff $(cat drvPath1) $(cat drvPath2)
|
||||
exit 1
|
||||
fi
|
||||
touch $out
|
||||
'';
|
||||
};
|
||||
}
|
||||
@@ -1,160 +1,64 @@
|
||||
{ self, ... }:
|
||||
{
|
||||
self,
|
||||
lib,
|
||||
inputs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
inherit (lib)
|
||||
attrNames
|
||||
attrValues
|
||||
elem
|
||||
filter
|
||||
filterAttrs
|
||||
flip
|
||||
genAttrs
|
||||
hasPrefix
|
||||
pathExists
|
||||
;
|
||||
nixosLib = import (self.inputs.nixpkgs + "/nixos/lib") { };
|
||||
in
|
||||
{
|
||||
imports = filter pathExists [
|
||||
./backups/flake-module.nix
|
||||
../nixosModules/clanCore/machine-id/tests/flake-module.nix
|
||||
../nixosModules/clanCore/state-version/tests/flake-module.nix
|
||||
./devshell/flake-module.nix
|
||||
./flash/flake-module.nix
|
||||
imports = [
|
||||
./impure/flake-module.nix
|
||||
./backups/flake-module.nix
|
||||
./installation/flake-module.nix
|
||||
./morph/flake-module.nix
|
||||
./nixos-documentation/flake-module.nix
|
||||
./dont-depend-on-repo-root.nix
|
||||
./flash/flake-module.nix
|
||||
];
|
||||
flake.check = genAttrs [ "x86_64-linux" "aarch64-darwin" ] (
|
||||
system:
|
||||
let
|
||||
checks = flip filterAttrs self.checks.${system} (
|
||||
name: _check:
|
||||
!(hasPrefix "nixos-test-" name)
|
||||
&& !(hasPrefix "nixos-" name)
|
||||
&& !(hasPrefix "darwin-test-" name)
|
||||
&& !(hasPrefix "service-" name)
|
||||
&& !(hasPrefix "vars-check-" name)
|
||||
&& !(hasPrefix "devShell-" name)
|
||||
&& !(elem name [
|
||||
"clan-core-for-checks"
|
||||
"clan-deps"
|
||||
])
|
||||
);
|
||||
in
|
||||
inputs.nixpkgs.legacyPackages.${system}.runCommand "fast-flake-checks-${system}"
|
||||
{ passthru.checks = checks; }
|
||||
''
|
||||
echo "Executed the following checks for ${system}..."
|
||||
echo " - ${lib.concatStringsSep "\n" (map (n: " - " + n) (attrNames checks))}"
|
||||
echo ${toString (attrValues checks)} >/dev/null
|
||||
echo "All checks succeeded"
|
||||
touch $out
|
||||
''
|
||||
);
|
||||
perSystem =
|
||||
{
|
||||
pkgs,
|
||||
lib,
|
||||
self',
|
||||
system,
|
||||
...
|
||||
}:
|
||||
{
|
||||
checks =
|
||||
let
|
||||
# ensure all options can be rendered after importing clan into nixos
|
||||
renderClanOptions =
|
||||
let
|
||||
docs = pkgs.nixosOptionsDoc {
|
||||
options =
|
||||
(pkgs.nixos {
|
||||
imports = [ self.nixosModules.clanCore ];
|
||||
clanCore.clanDir = ./.;
|
||||
}).options;
|
||||
warningsAreErrors = false;
|
||||
};
|
||||
in
|
||||
docs.optionsJSON;
|
||||
nixosTestArgs = {
|
||||
# reference to nixpkgs for the current system
|
||||
inherit pkgs lib nixosLib;
|
||||
inherit pkgs;
|
||||
# this gives us a reference to our flake but also all flake inputs
|
||||
inherit self;
|
||||
inherit (self) clanLib;
|
||||
clan-core = self;
|
||||
};
|
||||
nixosTests = lib.optionalAttrs (pkgs.stdenv.isLinux) {
|
||||
|
||||
# Base Tests
|
||||
nixos-test-secrets = self.clanLib.test.baseTest ./secrets nixosTestArgs;
|
||||
nixos-test-borgbackup-legacy = self.clanLib.test.baseTest ./borgbackup-legacy nixosTestArgs;
|
||||
nixos-test-wayland-proxy-virtwl = self.clanLib.test.baseTest ./wayland-proxy-virtwl nixosTestArgs;
|
||||
|
||||
# Container Tests
|
||||
nixos-test-container = self.clanLib.test.containerTest ./container nixosTestArgs;
|
||||
nixos-test-zt-tcp-relay = self.clanLib.test.containerTest ./zt-tcp-relay nixosTestArgs;
|
||||
nixos-test-matrix-synapse = self.clanLib.test.containerTest ./matrix-synapse nixosTestArgs;
|
||||
nixos-test-postgresql = self.clanLib.test.containerTest ./postgresql nixosTestArgs;
|
||||
nixos-test-user-firewall-iptables = self.clanLib.test.containerTest ./user-firewall/iptables.nix nixosTestArgs;
|
||||
nixos-test-user-firewall-nftables = self.clanLib.test.containerTest ./user-firewall/nftables.nix nixosTestArgs;
|
||||
|
||||
service-dummy-test = import ./service-dummy-test nixosTestArgs;
|
||||
service-dummy-test-from-flake = import ./service-dummy-test-from-flake nixosTestArgs;
|
||||
service-data-mesher = import ./data-mesher nixosTestArgs;
|
||||
# import our test
|
||||
secrets = import ./secrets nixosTestArgs;
|
||||
container = import ./container nixosTestArgs;
|
||||
deltachat = import ./deltachat nixosTestArgs;
|
||||
matrix-synapse = import ./matrix-synapse nixosTestArgs;
|
||||
zt-tcp-relay = import ./zt-tcp-relay nixosTestArgs;
|
||||
borgbackup = import ./borgbackup nixosTestArgs;
|
||||
syncthing = import ./syncthing nixosTestArgs;
|
||||
wayland-proxy-virtwl = import ./wayland-proxy-virtwl nixosTestArgs;
|
||||
};
|
||||
|
||||
packagesToBuild = lib.removeAttrs self'.packages [
|
||||
# exclude the check that checks that nothing depends on the repo root
|
||||
# We might want to include this later once everything is fixed
|
||||
"dont-depend-on-repo-root"
|
||||
];
|
||||
schemaTests = pkgs.callPackages ./schemas.nix { inherit self; };
|
||||
|
||||
flakeOutputs =
|
||||
lib.mapAttrs' (
|
||||
name: config: lib.nameValuePair "nixos-${name}" config.config.system.build.toplevel
|
||||
) (lib.filterAttrs (n: _: !lib.hasPrefix "test-" n) self.nixosConfigurations)
|
||||
// lib.mapAttrs' (
|
||||
name: config: lib.nameValuePair "darwin-${name}" config.config.system.build.toplevel
|
||||
) (self.darwinConfigurations or { })
|
||||
// lib.mapAttrs' (n: lib.nameValuePair "package-${n}") packagesToBuild
|
||||
) self.nixosConfigurations
|
||||
// lib.mapAttrs' (n: lib.nameValuePair "package-${n}") self'.packages
|
||||
// lib.mapAttrs' (n: lib.nameValuePair "devShell-${n}") self'.devShells
|
||||
// lib.mapAttrs' (name: config: lib.nameValuePair "home-manager-${name}" config.activation-script) (
|
||||
self'.legacyPackages.homeConfigurations or { }
|
||||
);
|
||||
in
|
||||
nixosTests
|
||||
// flakeOutputs
|
||||
// {
|
||||
# TODO: Automatically provide this check to downstream users to check their modules
|
||||
clan-modules-json-compatible =
|
||||
let
|
||||
allSchemas = lib.mapAttrs (
|
||||
_n: m:
|
||||
let
|
||||
schema =
|
||||
(self.clanLib.evalService {
|
||||
modules = [ m ];
|
||||
prefix = [
|
||||
"checks"
|
||||
system
|
||||
];
|
||||
}).config.result.api.schema;
|
||||
in
|
||||
schema
|
||||
) self.clan.modules;
|
||||
in
|
||||
pkgs.runCommand "combined-result"
|
||||
{
|
||||
schemaFile = builtins.toFile "schemas.json" (builtins.toJSON allSchemas);
|
||||
}
|
||||
''
|
||||
mkdir -p $out
|
||||
cat $schemaFile > $out/allSchemas.json
|
||||
'';
|
||||
|
||||
clan-core-for-checks = pkgs.runCommand "clan-core-for-checks" { } ''
|
||||
cp -r ${pkgs.callPackage ./clan-core-for-checks.nix { }} $out
|
||||
chmod +w $out/flake.lock
|
||||
cp ${../flake.lock} $out/flake.lock
|
||||
'';
|
||||
};
|
||||
packages = lib.optionalAttrs (pkgs.stdenv.isLinux) {
|
||||
run-vm-test-offline = pkgs.callPackage ../pkgs/run-vm-test-offline { };
|
||||
};
|
||||
{ inherit renderClanOptions; } // nixosTests // schemaTests // flakeOutputs;
|
||||
legacyPackages = {
|
||||
nixosTests =
|
||||
let
|
||||
@@ -167,10 +71,8 @@ in
|
||||
in
|
||||
lib.optionalAttrs (pkgs.stdenv.isLinux) {
|
||||
# import our test
|
||||
nixos-test-secrets = import ./secrets nixosTestArgs;
|
||||
nixos-test-container = import ./container nixosTestArgs;
|
||||
# Clan app tests
|
||||
nixos-test-app-ocr = self.clanLib.test.baseTest ./app-ocr nixosTestArgs;
|
||||
secrets = import ./secrets nixosTestArgs;
|
||||
container = import ./container nixosTestArgs;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
@@ -1,63 +1,26 @@
|
||||
{ self, ... }:
|
||||
{
|
||||
config,
|
||||
self,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
clan.machines = lib.listToAttrs (
|
||||
lib.map (
|
||||
system:
|
||||
lib.nameValuePair "test-flash-machine-${system}" {
|
||||
clan.core.networking.targetHost = "test-flash-machine";
|
||||
fileSystems."/".device = lib.mkDefault "/dev/vda";
|
||||
boot.loader.grub.device = lib.mkDefault "/dev/vda";
|
||||
|
||||
# We need to use `mkForce` because we inherit from `test-install-machine`
|
||||
# which currently hardcodes `nixpkgs.hostPlatform`
|
||||
nixpkgs.hostPlatform = lib.mkForce system;
|
||||
|
||||
imports = [ self.nixosModules.test-flash-machine ];
|
||||
}
|
||||
) (lib.filter (lib.hasSuffix "linux") config.systems)
|
||||
);
|
||||
|
||||
flake.nixosModules = {
|
||||
test-flash-machine =
|
||||
{ lib, ... }:
|
||||
{
|
||||
imports = [ self.nixosModules.test-install-machine-without-system ];
|
||||
|
||||
clan.core.vars.generators.test = lib.mkForce { };
|
||||
|
||||
disko.devices.disk.main.preCreateHook = lib.mkForce "";
|
||||
};
|
||||
};
|
||||
|
||||
perSystem =
|
||||
{
|
||||
nodes,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
dependencies = [
|
||||
pkgs.disko
|
||||
pkgs.buildPackages.xorg.lndir
|
||||
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".pkgs.perlPackages.ConfigIniFiles
|
||||
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".pkgs.perlPackages.FileSlurp
|
||||
|
||||
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.toplevel
|
||||
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.diskoScript
|
||||
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.diskoScript.drvPath
|
||||
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.clan.deployment.file
|
||||
|
||||
self
|
||||
pkgs.stdenv.drvPath
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test_install_machine.config.system.build.toplevel
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test_install_machine.config.system.build.diskoScript
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test_install_machine.config.system.clan.deployment.file
|
||||
self.inputs.nixpkgs.legacyPackages.${pkgs.hostPlatform.system}.disko
|
||||
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
|
||||
in
|
||||
{
|
||||
checks = pkgs.lib.mkIf pkgs.stdenv.isLinux {
|
||||
nixos-test-flash = self.clanLib.test.baseTest {
|
||||
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux) {
|
||||
flash = (import ../lib/test-base.nix) {
|
||||
name = "flash";
|
||||
nodes.target = {
|
||||
virtualisation.emptyDiskImages = [ 4096 ];
|
||||
@@ -78,10 +41,7 @@
|
||||
};
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
# Some distros like to automount disks with spaces
|
||||
machine.succeed('mkdir -p "/mnt/with spaces" && mkfs.ext4 /dev/vdb && mount /dev/vdb "/mnt/with spaces"')
|
||||
machine.succeed("clan flash write --debug --flake ${self.checks.x86_64-linux.clan-core-for-checks} --yes --disk main /dev/vdb test-flash-machine-${pkgs.hostPlatform.system}")
|
||||
machine.succeed("clan --flake ${../..} flash --debug --yes --disk main /dev/vdb test_install_machine")
|
||||
'';
|
||||
} { inherit pkgs self; };
|
||||
};
|
||||
|
||||
@@ -1,51 +1,22 @@
|
||||
{
|
||||
perSystem =
|
||||
{
|
||||
pkgs,
|
||||
lib,
|
||||
self',
|
||||
...
|
||||
}:
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
# a script that executes all other checks
|
||||
packages.impure-checks = pkgs.writeShellScriptBin "impure-checks" ''
|
||||
#!${pkgs.bash}/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
unset CLAN_DIR
|
||||
|
||||
export PATH="${
|
||||
lib.makeBinPath (
|
||||
[
|
||||
pkgs.gitMinimal
|
||||
pkgs.nix
|
||||
pkgs.coreutils
|
||||
pkgs.rsync # needed to have rsync installed on the dummy ssh server
|
||||
]
|
||||
++ self'.packages.clan-cli-full.runtimeDependencies
|
||||
)
|
||||
lib.makeBinPath [
|
||||
pkgs.gitMinimal
|
||||
pkgs.nix
|
||||
pkgs.rsync # needed to have rsync installed on the dummy ssh server
|
||||
]
|
||||
}"
|
||||
ROOT=$(git rev-parse --show-toplevel)
|
||||
cd "$ROOT/pkgs/clan-cli"
|
||||
|
||||
# Set up custom git configuration for tests
|
||||
export GIT_CONFIG_GLOBAL=$(mktemp)
|
||||
git config --file "$GIT_CONFIG_GLOBAL" user.name "Test User"
|
||||
git config --file "$GIT_CONFIG_GLOBAL" user.email "test@example.com"
|
||||
export GIT_CONFIG_SYSTEM=/dev/null
|
||||
|
||||
# this disables dynamic dependency loading in clan-cli
|
||||
export CLAN_NO_DYNAMIC_DEPS=1
|
||||
|
||||
jobs=$(nproc)
|
||||
# Spawning worker in pytest is relatively slow, so we limit the number of jobs to 13
|
||||
# (current number of impure tests)
|
||||
jobs="$((jobs > 13 ? 13 : jobs))"
|
||||
|
||||
nix develop "$ROOT#clan-cli" -c bash -c "TMPDIR=/tmp python -m pytest -n $jobs -m impure ./clan_cli $@"
|
||||
|
||||
# Clean up temporary git config
|
||||
rm -f "$GIT_CONFIG_GLOBAL"
|
||||
nix develop "$ROOT#clan-cli" -c bash -c "TMPDIR=/tmp python -m pytest -s -m impure ./tests $@"
|
||||
'';
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,234 +1,110 @@
|
||||
{ self, lib, ... }:
|
||||
{
|
||||
self,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
installer =
|
||||
{ modulesPath, pkgs, ... }:
|
||||
let
|
||||
dependencies = [
|
||||
self.clan.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.toplevel
|
||||
self.clan.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.diskoScript
|
||||
self.clan.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.clan.deployment.file
|
||||
pkgs.stdenv.drvPath
|
||||
pkgs.bash.drvPath
|
||||
pkgs.nixos-anywhere
|
||||
pkgs.bubblewrap
|
||||
pkgs.buildPackages.xorg.lndir
|
||||
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
(modulesPath + "/../tests/common/auto-format-root-device.nix")
|
||||
];
|
||||
networking.useNetworkd = true;
|
||||
services.openssh.enable = true;
|
||||
services.openssh.settings.UseDns = false;
|
||||
services.openssh.settings.PasswordAuthentication = false;
|
||||
system.nixos.variant_id = "installer";
|
||||
environment.systemPackages = [
|
||||
self.packages.${pkgs.system}.clan-cli-full
|
||||
pkgs.nixos-facter
|
||||
];
|
||||
environment.etc."install-closure".source = "${closureInfo}/store-paths";
|
||||
virtualisation.emptyDiskImages = [ 512 ];
|
||||
virtualisation.diskSize = 8 * 1024;
|
||||
virtualisation.rootDevice = "/dev/vdb";
|
||||
# both installer and target need to use the same diskImage
|
||||
virtualisation.diskImage = "./target.qcow2";
|
||||
virtualisation.memorySize = 3048;
|
||||
nix.settings = {
|
||||
substituters = lib.mkForce [ ];
|
||||
hashed-mirrors = null;
|
||||
connect-timeout = lib.mkForce 3;
|
||||
flake-registry = pkgs.writeText "flake-registry" ''{"flakes":[],"version":2}'';
|
||||
experimental-features = [
|
||||
"nix-command"
|
||||
"flakes"
|
||||
];
|
||||
};
|
||||
users.users.nonrootuser = {
|
||||
isNormalUser = true;
|
||||
openssh.authorizedKeys.keyFiles = [ ../assets/ssh/pubkey ];
|
||||
extraGroups = [ "wheel" ];
|
||||
};
|
||||
security.sudo.wheelNeedsPassword = false;
|
||||
system.extraDependencies = dependencies;
|
||||
};
|
||||
in
|
||||
{
|
||||
clan.machines.test_install_machine = {
|
||||
clan.networking.targetHost = "test_install_machine";
|
||||
fileSystems."/".device = lib.mkDefault "/dev/null";
|
||||
boot.loader.grub.device = lib.mkDefault "/dev/null";
|
||||
|
||||
# The purpose of this test is to ensure `clan machines install` works
|
||||
# for machines that don't have a hardware config yet.
|
||||
|
||||
# If this test starts failing it could be due to the `facter.json` being out of date
|
||||
# you can get a new one by adding
|
||||
# client.fail("cat test-flake/machines/test-install-machine/facter.json >&2")
|
||||
# to the installation test.
|
||||
clan.machines.test-install-machine-without-system = {
|
||||
fileSystems."/".device = lib.mkDefault "/dev/vda";
|
||||
boot.loader.grub.device = lib.mkDefault "/dev/vda";
|
||||
|
||||
imports = [ self.nixosModules.test-install-machine-without-system ];
|
||||
imports = [ self.nixosModules.test_install_machine ];
|
||||
};
|
||||
clan.machines.test-install-machine-with-system =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
# https://git.clan.lol/clan/test-fixtures
|
||||
facter.reportPath = builtins.fetchurl {
|
||||
url = "https://git.clan.lol/clan/test-fixtures/raw/commit/4a2bc56d886578124b05060d3fb7eddc38c019f8/nixos-vm-facter-json/${pkgs.hostPlatform.system}.json";
|
||||
sha256 =
|
||||
{
|
||||
aarch64-linux = "sha256:1rlfymk03rmfkm2qgrc8l5kj5i20srx79n1y1h4nzlpwaz0j7hh2";
|
||||
x86_64-linux = "sha256:16myh0ll2gdwsiwkjw5ba4dl23ppwbsanxx214863j7nvzx42pws";
|
||||
}
|
||||
.${pkgs.hostPlatform.system};
|
||||
};
|
||||
|
||||
fileSystems."/".device = lib.mkDefault "/dev/vda";
|
||||
boot.loader.grub.device = lib.mkDefault "/dev/vda";
|
||||
|
||||
imports = [ self.nixosModules.test-install-machine-without-system ];
|
||||
};
|
||||
flake.nixosModules = {
|
||||
test-install-machine-without-system =
|
||||
test_install_machine =
|
||||
{ lib, modulesPath, ... }:
|
||||
{
|
||||
imports = [
|
||||
self.clanModules.diskLayouts
|
||||
(modulesPath + "/testing/test-instrumentation.nix") # we need these 2 modules always to be able to run the tests
|
||||
(modulesPath + "/profiles/qemu-guest.nix")
|
||||
self.clanLib.test.minifyModule
|
||||
];
|
||||
|
||||
networking.hostName = "test-install-machine";
|
||||
clan.diskLayouts.singleDiskExt4.device = "/dev/vdb";
|
||||
|
||||
environment.etc."install-successful".text = "ok";
|
||||
|
||||
boot.consoleLogLevel = lib.mkForce 100;
|
||||
boot.kernelParams = [ "boot.shell_on_fail" ];
|
||||
|
||||
# disko config
|
||||
boot.loader.grub.efiSupport = lib.mkDefault true;
|
||||
boot.loader.grub.efiInstallAsRemovable = lib.mkDefault true;
|
||||
clan.core.vars.settings.secretStore = "vm";
|
||||
clan.core.vars.generators.test = {
|
||||
files.test.neededFor = "partitioning";
|
||||
script = ''
|
||||
echo "notok" > "$out"/test
|
||||
'';
|
||||
};
|
||||
disko.devices = {
|
||||
disk = {
|
||||
main = {
|
||||
type = "disk";
|
||||
device = "/dev/vda";
|
||||
|
||||
preCreateHook = ''
|
||||
test -e /run/partitioning-secrets/test/test
|
||||
'';
|
||||
|
||||
content = {
|
||||
type = "gpt";
|
||||
partitions = {
|
||||
boot = {
|
||||
size = "1M";
|
||||
type = "EF02"; # for grub MBR
|
||||
priority = 1;
|
||||
};
|
||||
ESP = {
|
||||
size = "512M";
|
||||
type = "EF00";
|
||||
content = {
|
||||
type = "filesystem";
|
||||
format = "vfat";
|
||||
mountpoint = "/boot";
|
||||
mountOptions = [ "umask=0077" ];
|
||||
};
|
||||
};
|
||||
root = {
|
||||
size = "100%";
|
||||
content = {
|
||||
type = "filesystem";
|
||||
format = "ext4";
|
||||
mountpoint = "/";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
perSystem =
|
||||
{
|
||||
nodes,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
dependencies = [
|
||||
self
|
||||
self.nixosConfigurations.test_install_machine.config.system.build.toplevel
|
||||
self.nixosConfigurations.test_install_machine.config.system.build.diskoScript
|
||||
self.nixosConfigurations.test_install_machine.config.system.clan.deployment.file
|
||||
pkgs.stdenv.drvPath
|
||||
pkgs.nixos-anywhere
|
||||
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
|
||||
in
|
||||
{
|
||||
# On aarch64-linux, hangs on reboot with after installation:
|
||||
# vm-test-run-test-installation-> installer # [ 288.002871] reboot: Restarting system
|
||||
# vm-test-run-test-installation-> server # [test-install-machine] ### Done! ###
|
||||
# vm-test-run-test-installation-> server # [test-install-machine] + step 'Done!'
|
||||
# vm-test-run-test-installation-> server # [test-install-machine] + echo '### Done! ###'
|
||||
# vm-test-run-test-installation-> server # [test-install-machine] + rm -rf /tmp/tmp.qb16EAq7hJ
|
||||
# vm-test-run-test-installation-> (finished: must succeed: clan machines install --debug --flake test-flake --yes test-install-machine --target-host root@installer --update-hardware-config nixos-facter >&2, in 154.62 seconds)
|
||||
# vm-test-run-test-installation-> target: starting vm
|
||||
# vm-test-run-test-installation-> target: QEMU running (pid 144)
|
||||
# vm-test-run-test-installation-> target: waiting for unit multi-user.target
|
||||
# vm-test-run-test-installation-> target: waiting for the VM to finish booting
|
||||
# vm-test-run-test-installation-> target: Guest root shell did not produce any data yet...
|
||||
# vm-test-run-test-installation-> target: To debug, enter the VM and run 'systemctl status backdoor.service'.
|
||||
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux && !pkgs.stdenv.isAarch64) {
|
||||
nixos-test-installation = self.clanLib.test.baseTest {
|
||||
name = "installation";
|
||||
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux) {
|
||||
test-installation = (import ../lib/test-base.nix) {
|
||||
name = "test-installation";
|
||||
nodes.target = {
|
||||
services.openssh.enable = true;
|
||||
virtualisation.diskImage = "./target.qcow2";
|
||||
virtualisation.useBootLoader = true;
|
||||
users.users.root.openssh.authorizedKeys.keyFiles = [ ../lib/ssh/pubkey ];
|
||||
system.nixos.variant_id = "installer";
|
||||
virtualisation.emptyDiskImages = [ 4096 ];
|
||||
nix.settings = {
|
||||
substituters = lib.mkForce [ ];
|
||||
hashed-mirrors = null;
|
||||
connect-timeout = lib.mkForce 3;
|
||||
flake-registry = pkgs.writeText "flake-registry" ''{"flakes":[],"version":2}'';
|
||||
experimental-features = [
|
||||
"nix-command"
|
||||
"flakes"
|
||||
];
|
||||
};
|
||||
};
|
||||
nodes.client = {
|
||||
environment.systemPackages = [ self.packages.${pkgs.system}.clan-cli ];
|
||||
environment.etc."install-closure".source = "${closureInfo}/store-paths";
|
||||
virtualisation.memorySize = 2048;
|
||||
nix.settings = {
|
||||
substituters = lib.mkForce [ ];
|
||||
hashed-mirrors = null;
|
||||
connect-timeout = lib.mkForce 3;
|
||||
flake-registry = pkgs.writeText "flake-registry" ''{"flakes":[],"version":2}'';
|
||||
experimental-features = [
|
||||
"nix-command"
|
||||
"flakes"
|
||||
];
|
||||
};
|
||||
system.extraDependencies = dependencies;
|
||||
};
|
||||
nodes.installer = installer;
|
||||
|
||||
testScript = ''
|
||||
installer.start()
|
||||
def create_test_machine(oldmachine=None, args={}): # taken from <nixpkgs/nixos/tests/installer.nix>
|
||||
startCommand = "${pkgs.qemu_test}/bin/qemu-kvm"
|
||||
startCommand += " -cpu max -m 1024 -virtfs local,path=/nix/store,security_model=none,mount_tag=nix-store"
|
||||
startCommand += f' -drive file={oldmachine.state_dir}/empty0.qcow2,id=drive1,if=none,index=1,werror=report'
|
||||
startCommand += ' -device virtio-blk-pci,drive=drive1'
|
||||
machine = create_machine({
|
||||
"startCommand": startCommand,
|
||||
} | args)
|
||||
driver.machines.append(machine)
|
||||
return machine
|
||||
|
||||
installer.succeed("${pkgs.coreutils}/bin/install -Dm 600 ${../assets/ssh/privkey} /root/.ssh/id_ed25519")
|
||||
start_all()
|
||||
|
||||
installer.wait_until_succeeds("timeout 2 ssh -o StrictHostKeyChecking=accept-new -v nonrootuser@localhost hostname")
|
||||
installer.succeed("cp -r ${self.checks.x86_64-linux.clan-core-for-checks} test-flake && chmod -R +w test-flake")
|
||||
client.succeed("${pkgs.coreutils}/bin/install -Dm 600 ${../lib/ssh/privkey} /root/.ssh/id_ed25519")
|
||||
client.wait_until_succeeds("ssh -o StrictHostKeyChecking=accept-new -v root@target hostname")
|
||||
|
||||
installer.succeed("clan machines install --no-reboot --debug --flake test-flake --yes test-install-machine-without-system --target-host nonrootuser@localhost --update-hardware-config nixos-facter >&2")
|
||||
installer.shutdown()
|
||||
client.succeed("clan --debug --flake ${../..} machines install --yes test_install_machine root@target >&2")
|
||||
try:
|
||||
target.shutdown()
|
||||
except BrokenPipeError:
|
||||
# qemu has already exited
|
||||
pass
|
||||
|
||||
# We are missing the test instrumentation somehow. Test this later.
|
||||
target.state_dir = installer.state_dir
|
||||
target.start()
|
||||
target.wait_for_unit("multi-user.target")
|
||||
'';
|
||||
} { inherit pkgs self; };
|
||||
|
||||
nixos-test-update-hardware-configuration = self.clanLib.test.baseTest {
|
||||
name = "update-hardware-configuration";
|
||||
nodes.installer = installer;
|
||||
|
||||
testScript = ''
|
||||
installer.start()
|
||||
installer.succeed("${pkgs.coreutils}/bin/install -Dm 600 ${../assets/ssh/privkey} /root/.ssh/id_ed25519")
|
||||
installer.wait_until_succeeds("timeout 2 ssh -o StrictHostKeyChecking=accept-new -v nonrootuser@localhost hostname")
|
||||
installer.succeed("cp -r ${self.checks.x86_64-linux.clan-core-for-checks} test-flake && chmod -R +w test-flake")
|
||||
installer.fail("test -f test-flake/machines/test-install-machine/hardware-configuration.nix")
|
||||
installer.fail("test -f test-flake/machines/test-install-machine/facter.json")
|
||||
|
||||
installer.succeed("clan machines update-hardware-config --debug --flake test-flake test-install-machine-without-system nonrootuser@localhost >&2")
|
||||
installer.succeed("test -f test-flake/machines/test-install-machine-without-system/facter.json")
|
||||
installer.succeed("rm test-flake/machines/test-install-machine-without-system/facter.json")
|
||||
|
||||
installer.succeed("clan machines update-hardware-config --debug --backend nixos-generate-config --flake test-flake test-install-machine-without-system nonrootuser@localhost >&2")
|
||||
installer.succeed("test -f test-flake/machines/test-install-machine-without-system/hardware-configuration.nix")
|
||||
installer.succeed("rm test-flake/machines/test-install-machine-without-system/hardware-configuration.nix")
|
||||
new_machine = create_test_machine(oldmachine=target, args={ "name": "new_machine" })
|
||||
assert(new_machine.succeed("cat /etc/install-successful").strip() == "ok")
|
||||
'';
|
||||
} { inherit pkgs self; };
|
||||
};
|
||||
|
||||
@@ -7,19 +7,9 @@
|
||||
let
|
||||
testDriver = hostPkgs.python3.pkgs.callPackage ./package.nix {
|
||||
inherit (config) extraPythonPackages;
|
||||
inherit (hostPkgs.pkgs) util-linux systemd nix;
|
||||
inherit (hostPkgs.pkgs) util-linux systemd;
|
||||
};
|
||||
containers =
|
||||
testScript:
|
||||
map (m: [
|
||||
m.system.build.toplevel
|
||||
(hostPkgs.closureInfo {
|
||||
rootPaths = [
|
||||
m.system.build.toplevel
|
||||
(hostPkgs.writeText "testScript" testScript)
|
||||
];
|
||||
})
|
||||
]) (lib.attrValues config.nodes);
|
||||
containers = map (m: m.system.build.toplevel) (lib.attrValues config.nodes);
|
||||
pythonizeName =
|
||||
name:
|
||||
let
|
||||
@@ -36,10 +26,7 @@ let
|
||||
machineNames = map (name: "${name}: Machine;") pythonizedNames;
|
||||
pythonizedNames = map pythonizeName nodeHostNames;
|
||||
in
|
||||
lib.mkIf (config.clan.test.useContainers or true) {
|
||||
defaults.imports = [
|
||||
./nixos-module.nix
|
||||
];
|
||||
{
|
||||
driver = lib.mkForce (
|
||||
hostPkgs.runCommand "nixos-test-driver-${config.name}"
|
||||
{
|
||||
@@ -57,6 +44,8 @@ lib.mkIf (config.clan.test.useContainers or true) {
|
||||
''
|
||||
mkdir -p $out/bin
|
||||
|
||||
containers=(${toString containers})
|
||||
|
||||
${lib.optionalString (!config.skipTypeCheck) ''
|
||||
# prepend type hints so the test script can be type checked with mypy
|
||||
cat "${./test-script-prepend.py}" >> testScriptWithTypes
|
||||
@@ -77,13 +66,7 @@ lib.mkIf (config.clan.test.useContainers or true) {
|
||||
ln -s ${testDriver}/bin/nixos-test-driver $out/bin/nixos-test-driver
|
||||
|
||||
wrapProgram $out/bin/nixos-test-driver \
|
||||
${
|
||||
lib.concatStringsSep " " (
|
||||
map (container: "--add-flags '--container ${builtins.toString container}'") (
|
||||
containers config.testScriptString
|
||||
)
|
||||
)
|
||||
} \
|
||||
${lib.concatStringsSep " " (map (name: "--add-flags '--container ${name}'") containers)} \
|
||||
--add-flags "--test-script '$out/test-script'"
|
||||
''
|
||||
);
|
||||
@@ -92,7 +75,7 @@ lib.mkIf (config.clan.test.useContainers or true) {
|
||||
lib.lazyDerivation {
|
||||
# lazyDerivation improves performance when only passthru items and/or meta are used.
|
||||
derivation = hostPkgs.stdenv.mkDerivation {
|
||||
name = "container-test-run-${config.name}";
|
||||
name = "vm-test-run-${config.name}";
|
||||
|
||||
requiredSystemFeatures = [ "uid-range" ];
|
||||
|
||||
@@ -105,12 +88,6 @@ lib.mkIf (config.clan.test.useContainers or true) {
|
||||
${config.driver}/bin/nixos-test-driver -o $out
|
||||
'';
|
||||
|
||||
nativeBuildInputs = [
|
||||
hostPkgs.util-linux
|
||||
hostPkgs.coreutils
|
||||
hostPkgs.iproute2
|
||||
];
|
||||
|
||||
passthru = config.passthru;
|
||||
|
||||
meta = config.meta;
|
||||
19
checks/lib/container-driver/package.nix
Normal file
19
checks/lib/container-driver/package.nix
Normal file
@@ -0,0 +1,19 @@
|
||||
{
|
||||
extraPythonPackages,
|
||||
python3Packages,
|
||||
buildPythonApplication,
|
||||
setuptools,
|
||||
util-linux,
|
||||
systemd,
|
||||
}:
|
||||
buildPythonApplication {
|
||||
pname = "test-driver";
|
||||
version = "0.0.1";
|
||||
propagatedBuildInputs = [
|
||||
util-linux
|
||||
systemd
|
||||
] ++ extraPythonPackages python3Packages;
|
||||
nativeBuildInputs = [ setuptools ];
|
||||
format = "pyproject";
|
||||
src = ./.;
|
||||
}
|
||||
@@ -14,8 +14,16 @@ find = {}
|
||||
|
||||
[tool.setuptools.package-data]
|
||||
test_driver = ["py.typed"]
|
||||
|
||||
[tool.ruff]
|
||||
target-version = "py311"
|
||||
line-length = 88
|
||||
|
||||
lint.select = [ "E", "F", "I", "U", "N", "RUF", "ANN", "A" ]
|
||||
lint.ignore = ["E501", "ANN101", "ANN401", "A003"]
|
||||
|
||||
[tool.mypy]
|
||||
python_version = "3.12"
|
||||
python_version = "3.11"
|
||||
warn_redundant_casts = true
|
||||
disallow_untyped_calls = true
|
||||
disallow_untyped_defs = true
|
||||
354
checks/lib/container-driver/test_driver/__init__.py
Normal file
354
checks/lib/container-driver/test_driver/__init__.py
Normal file
@@ -0,0 +1,354 @@
|
||||
import argparse
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import time
|
||||
from collections.abc import Callable
|
||||
from pathlib import Path
|
||||
from tempfile import TemporaryDirectory
|
||||
from typing import Any
|
||||
|
||||
|
||||
def prepare_machine_root(machinename: str, root: Path) -> None:
|
||||
root.mkdir(parents=True, exist_ok=True)
|
||||
root.joinpath("etc").mkdir(parents=True, exist_ok=True)
|
||||
root.joinpath(".env").write_text(
|
||||
"\n".join(f"{k}={v}" for k, v in os.environ.items())
|
||||
)
|
||||
|
||||
|
||||
def pythonize_name(name: str) -> str:
|
||||
return re.sub(r"^[^A-z_]|[^A-z0-9_]", "_", name)
|
||||
|
||||
|
||||
def retry(fn: Callable, timeout: int = 900) -> None:
|
||||
"""Call the given function repeatedly, with 1 second intervals,
|
||||
until it returns True or a timeout is reached.
|
||||
"""
|
||||
|
||||
for _ in range(timeout):
|
||||
if fn(False):
|
||||
return
|
||||
time.sleep(1)
|
||||
|
||||
if not fn(True):
|
||||
raise Exception(f"action timed out after {timeout} seconds")
|
||||
|
||||
|
||||
class Machine:
|
||||
def __init__(self, name: str, toplevel: Path, rootdir: Path, out_dir: str) -> None:
|
||||
self.name = name
|
||||
self.toplevel = toplevel
|
||||
self.out_dir = out_dir
|
||||
self.process: subprocess.Popen | None = None
|
||||
self.rootdir: Path = rootdir
|
||||
|
||||
def start(self) -> None:
|
||||
prepare_machine_root(self.name, self.rootdir)
|
||||
cmd = [
|
||||
"systemd-nspawn",
|
||||
"--keep-unit",
|
||||
"-M",
|
||||
self.name,
|
||||
"-D",
|
||||
self.rootdir,
|
||||
"--register=no",
|
||||
"--resolv-conf=off",
|
||||
"--bind-ro=/nix/store",
|
||||
"--bind",
|
||||
self.out_dir,
|
||||
"--bind=/proc:/run/host/proc",
|
||||
"--bind=/sys:/run/host/sys",
|
||||
"--private-network",
|
||||
self.toplevel.joinpath("init"),
|
||||
]
|
||||
env = os.environ.copy()
|
||||
env["SYSTEMD_NSPAWN_UNIFIED_HIERARCHY"] = "1"
|
||||
self.process = subprocess.Popen(cmd, stdout=subprocess.PIPE, text=True, env=env)
|
||||
self.container_pid = self.get_systemd_process()
|
||||
|
||||
def get_systemd_process(self) -> int:
|
||||
assert self.process is not None, "Machine not started"
|
||||
assert self.process.stdout is not None, "Machine has no stdout"
|
||||
for line in self.process.stdout:
|
||||
print(line, end="")
|
||||
if line.startswith("systemd[1]: Startup finished in"):
|
||||
break
|
||||
else:
|
||||
raise RuntimeError(f"Failed to start container {self.name}")
|
||||
childs = (
|
||||
Path(f"/proc/{self.process.pid}/task/{self.process.pid}/children")
|
||||
.read_text()
|
||||
.split()
|
||||
)
|
||||
assert (
|
||||
len(childs) == 1
|
||||
), f"Expected exactly one child process for systemd-nspawn, got {childs}"
|
||||
try:
|
||||
return int(childs[0])
|
||||
except ValueError:
|
||||
raise RuntimeError(f"Failed to parse child process id {childs[0]}")
|
||||
|
||||
def get_unit_info(self, unit: str) -> dict[str, str]:
|
||||
proc = self.systemctl(f'--no-pager show "{unit}"')
|
||||
if proc.returncode != 0:
|
||||
raise Exception(
|
||||
f'retrieving systemctl info for unit "{unit}"'
|
||||
+ f" failed with exit code {proc.returncode}"
|
||||
)
|
||||
|
||||
line_pattern = re.compile(r"^([^=]+)=(.*)$")
|
||||
|
||||
def tuple_from_line(line: str) -> tuple[str, str]:
|
||||
match = line_pattern.match(line)
|
||||
assert match is not None
|
||||
return match[1], match[2]
|
||||
|
||||
return dict(
|
||||
tuple_from_line(line)
|
||||
for line in proc.stdout.split("\n")
|
||||
if line_pattern.match(line)
|
||||
)
|
||||
|
||||
def execute(
|
||||
self,
|
||||
command: str,
|
||||
check_return: bool = True,
|
||||
check_output: bool = True,
|
||||
timeout: int | None = 900,
|
||||
) -> subprocess.CompletedProcess:
|
||||
"""
|
||||
Execute a shell command, returning a list `(status, stdout)`.
|
||||
|
||||
Commands are run with `set -euo pipefail` set:
|
||||
|
||||
- If several commands are separated by `;` and one fails, the
|
||||
command as a whole will fail.
|
||||
|
||||
- For pipelines, the last non-zero exit status will be returned
|
||||
(if there is one; otherwise zero will be returned).
|
||||
|
||||
- Dereferencing unset variables fails the command.
|
||||
|
||||
- It will wait for stdout to be closed.
|
||||
|
||||
If the command detaches, it must close stdout, as `execute` will wait
|
||||
for this to consume all output reliably. This can be achieved by
|
||||
redirecting stdout to stderr `>&2`, to `/dev/console`, `/dev/null` or
|
||||
a file. Examples of detaching commands are `sleep 365d &`, where the
|
||||
shell forks a new process that can write to stdout and `xclip -i`, where
|
||||
the `xclip` command itself forks without closing stdout.
|
||||
|
||||
Takes an optional parameter `check_return` that defaults to `True`.
|
||||
Setting this parameter to `False` will not check for the return code
|
||||
and return -1 instead. This can be used for commands that shut down
|
||||
the VM and would therefore break the pipe that would be used for
|
||||
retrieving the return code.
|
||||
|
||||
A timeout for the command can be specified (in seconds) using the optional
|
||||
`timeout` parameter, e.g., `execute(cmd, timeout=10)` or
|
||||
`execute(cmd, timeout=None)`. The default is 900 seconds.
|
||||
"""
|
||||
|
||||
# Always run command with shell opts
|
||||
command = f"set -euo pipefail; {command}"
|
||||
|
||||
proc = subprocess.run(
|
||||
[
|
||||
"nsenter",
|
||||
"--target",
|
||||
str(self.container_pid),
|
||||
"--mount",
|
||||
"--uts",
|
||||
"--ipc",
|
||||
"--net",
|
||||
"--pid",
|
||||
"--cgroup",
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
command,
|
||||
],
|
||||
timeout=timeout,
|
||||
check=False,
|
||||
stdout=subprocess.PIPE,
|
||||
text=True,
|
||||
)
|
||||
return proc
|
||||
|
||||
def systemctl(self, q: str) -> subprocess.CompletedProcess:
|
||||
"""
|
||||
Runs `systemctl` commands with optional support for
|
||||
`systemctl --user`
|
||||
|
||||
```py
|
||||
# run `systemctl list-jobs --no-pager`
|
||||
machine.systemctl("list-jobs --no-pager")
|
||||
|
||||
# spawn a shell for `any-user` and run
|
||||
# `systemctl --user list-jobs --no-pager`
|
||||
machine.systemctl("list-jobs --no-pager", "any-user")
|
||||
```
|
||||
"""
|
||||
return self.execute(f"systemctl {q}")
|
||||
|
||||
def wait_for_unit(self, unit: str, timeout: int = 900) -> None:
|
||||
"""
|
||||
Wait for a systemd unit to get into "active" state.
|
||||
Throws exceptions on "failed" and "inactive" states as well as after
|
||||
timing out.
|
||||
"""
|
||||
|
||||
def check_active(_: bool) -> bool:
|
||||
info = self.get_unit_info(unit)
|
||||
state = info["ActiveState"]
|
||||
if state == "failed":
|
||||
raise Exception(f'unit "{unit}" reached state "{state}"')
|
||||
|
||||
if state == "inactive":
|
||||
proc = self.systemctl("list-jobs --full 2>&1")
|
||||
if "No jobs" in proc.stdout:
|
||||
info = self.get_unit_info(unit)
|
||||
if info["ActiveState"] == state:
|
||||
raise Exception(
|
||||
f'unit "{unit}" is inactive and there are no pending jobs'
|
||||
)
|
||||
|
||||
return state == "active"
|
||||
|
||||
retry(check_active, timeout)
|
||||
|
||||
def succeed(self, command: str, timeout: int | None = None) -> str:
|
||||
res = self.execute(command, timeout=timeout)
|
||||
if res.returncode != 0:
|
||||
raise RuntimeError(f"Failed to run command {command}")
|
||||
return res.stdout
|
||||
|
||||
def shutdown(self) -> None:
|
||||
"""
|
||||
Shut down the machine, waiting for the VM to exit.
|
||||
"""
|
||||
if self.process:
|
||||
self.process.terminate()
|
||||
self.process.wait()
|
||||
self.process = None
|
||||
|
||||
def release(self) -> None:
|
||||
self.shutdown()
|
||||
|
||||
|
||||
def setup_filesystems() -> None:
|
||||
# We don't care about cleaning up the mount points, since we're running in a nix sandbox.
|
||||
Path("/run").mkdir(parents=True, exist_ok=True)
|
||||
subprocess.run(["mount", "-t", "tmpfs", "none", "/run"], check=True)
|
||||
subprocess.run(["mount", "-t", "cgroup2", "none", "/sys/fs/cgroup"], check=True)
|
||||
Path("/etc").chmod(0o755)
|
||||
Path("/etc/os-release").touch()
|
||||
Path("/etc/machine-id").write_text("a5ea3f98dedc0278b6f3cc8c37eeaeac")
|
||||
|
||||
|
||||
class Driver:
|
||||
def __init__(self, containers: list[Path], testscript: str, out_dir: str) -> None:
|
||||
self.containers = containers
|
||||
self.testscript = testscript
|
||||
self.out_dir = out_dir
|
||||
setup_filesystems()
|
||||
|
||||
self.tempdir = TemporaryDirectory()
|
||||
tempdir_path = Path(self.tempdir.name)
|
||||
|
||||
self.machines = []
|
||||
for container in containers:
|
||||
name_match = re.match(r".*-nixos-system-(.+)-(.+)", container.name)
|
||||
if not name_match:
|
||||
raise ValueError(f"Unable to extract hostname from {container.name}")
|
||||
name = name_match.group(1)
|
||||
self.machines.append(
|
||||
Machine(
|
||||
name=name,
|
||||
toplevel=container,
|
||||
rootdir=tempdir_path / name,
|
||||
out_dir=self.out_dir,
|
||||
)
|
||||
)
|
||||
|
||||
def start_all(self) -> None:
|
||||
for machine in self.machines:
|
||||
machine.start()
|
||||
|
||||
def test_symbols(self) -> dict[str, Any]:
|
||||
general_symbols = dict(
|
||||
start_all=self.start_all,
|
||||
machines=self.machines,
|
||||
driver=self,
|
||||
Machine=Machine, # for typing
|
||||
)
|
||||
machine_symbols = {pythonize_name(m.name): m for m in self.machines}
|
||||
# If there's exactly one machine, make it available under the name
|
||||
# "machine", even if it's not called that.
|
||||
if len(self.machines) == 1:
|
||||
(machine_symbols["machine"],) = self.machines
|
||||
print(
|
||||
"additionally exposed symbols:\n "
|
||||
+ ", ".join(map(lambda m: m.name, self.machines))
|
||||
+ ",\n "
|
||||
+ ", ".join(list(general_symbols.keys()))
|
||||
)
|
||||
return {**general_symbols, **machine_symbols}
|
||||
|
||||
def test_script(self) -> None:
|
||||
"""Run the test script"""
|
||||
exec(self.testscript, self.test_symbols(), None)
|
||||
|
||||
def run_tests(self) -> None:
|
||||
"""Run the test script (for non-interactive test runs)"""
|
||||
self.test_script()
|
||||
|
||||
def __enter__(self) -> "Driver":
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
|
||||
for machine in self.machines:
|
||||
machine.release()
|
||||
|
||||
|
||||
def writeable_dir(arg: str) -> Path:
|
||||
"""Raises an ArgumentTypeError if the given argument isn't a writeable directory
|
||||
Note: We want to fail as early as possible if a directory isn't writeable,
|
||||
since an executed nixos-test could fail (very late) because of the test-driver
|
||||
writing in a directory without proper permissions.
|
||||
"""
|
||||
path = Path(arg)
|
||||
if not path.is_dir():
|
||||
raise argparse.ArgumentTypeError(f"{path} is not a directory")
|
||||
if not os.access(path, os.W_OK):
|
||||
raise argparse.ArgumentTypeError(f"{path} is not a writeable directory")
|
||||
return path
|
||||
|
||||
|
||||
def main() -> None:
|
||||
arg_parser = argparse.ArgumentParser(prog="nixos-test-driver")
|
||||
arg_parser.add_argument(
|
||||
"--containers",
|
||||
nargs="+",
|
||||
type=Path,
|
||||
help="container system toplevel paths",
|
||||
)
|
||||
arg_parser.add_argument(
|
||||
"--test-script",
|
||||
help="the test script to run",
|
||||
type=Path,
|
||||
)
|
||||
arg_parser.add_argument(
|
||||
"-o",
|
||||
"--output-directory",
|
||||
default=Path.cwd(),
|
||||
help="the directory to bind to /run/test-results",
|
||||
type=writeable_dir,
|
||||
)
|
||||
args = arg_parser.parse_args()
|
||||
with Driver(
|
||||
args.containers,
|
||||
args.test_script.read_text(),
|
||||
args.output_directory.resolve(),
|
||||
) as driver:
|
||||
driver.run_tests()
|
||||
33
checks/lib/container-test.nix
Normal file
33
checks/lib/container-test.nix
Normal file
@@ -0,0 +1,33 @@
|
||||
test:
|
||||
{ pkgs, self, ... }:
|
||||
let
|
||||
inherit (pkgs) lib;
|
||||
nixos-lib = import (pkgs.path + "/nixos/lib") { };
|
||||
in
|
||||
(nixos-lib.runTest (
|
||||
{ hostPkgs, ... }:
|
||||
{
|
||||
hostPkgs = pkgs;
|
||||
# speed-up evaluation
|
||||
defaults = {
|
||||
documentation.enable = lib.mkDefault false;
|
||||
boot.isContainer = true;
|
||||
|
||||
# undo qemu stuff
|
||||
system.build.initialRamdisk = "";
|
||||
virtualisation.sharedDirectories = lib.mkForce { };
|
||||
networking.useDHCP = false;
|
||||
|
||||
# we have not private networking so far
|
||||
networking.interfaces = lib.mkForce { };
|
||||
#networking.primaryIPAddress = lib.mkForce null;
|
||||
systemd.services.backdoor.enable = false;
|
||||
};
|
||||
# to accept external dependencies such as disko
|
||||
node.specialArgs.self = self;
|
||||
imports = [
|
||||
test
|
||||
./container-driver/module.nix
|
||||
];
|
||||
}
|
||||
)).config.result
|
||||
18
checks/lib/test-base.nix
Normal file
18
checks/lib/test-base.nix
Normal file
@@ -0,0 +1,18 @@
|
||||
test:
|
||||
{ pkgs, self, ... }:
|
||||
let
|
||||
inherit (pkgs) lib;
|
||||
nixos-lib = import (pkgs.path + "/nixos/lib") { };
|
||||
in
|
||||
(nixos-lib.runTest {
|
||||
hostPkgs = pkgs;
|
||||
# speed-up evaluation
|
||||
defaults = {
|
||||
documentation.enable = lib.mkDefault false;
|
||||
nix.settings.min-free = 0;
|
||||
};
|
||||
|
||||
# to accept external dependencies such as disko
|
||||
node.specialArgs.self = self;
|
||||
imports = [ test ];
|
||||
}).config.result
|
||||
@@ -1,69 +1,29 @@
|
||||
(
|
||||
(import ../lib/container-test.nix) (
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
name = "matrix-synapse";
|
||||
|
||||
nodes.machine =
|
||||
{
|
||||
config,
|
||||
self,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{ self, lib, ... }:
|
||||
{
|
||||
imports = [
|
||||
self.clanModules.matrix-synapse
|
||||
self.nixosModules.clanCore
|
||||
{
|
||||
clan.core.settings.directory = ./.;
|
||||
|
||||
clanCore.machineName = "machine";
|
||||
clanCore.clanDir = ./.;
|
||||
clan.matrix-synapse = {
|
||||
enable = true;
|
||||
domain = "clan.test";
|
||||
};
|
||||
}
|
||||
{
|
||||
# secret override
|
||||
clanCore.facts.services.matrix-synapse.secret.synapse-registration_shared_secret.path = "${./synapse-registration_shared_secret}";
|
||||
services.nginx.virtualHosts."matrix.clan.test" = {
|
||||
enableACME = lib.mkForce false;
|
||||
forceSSL = lib.mkForce false;
|
||||
};
|
||||
clan.nginx.acme.email = "admins@clan.lol";
|
||||
clan.matrix-synapse = {
|
||||
server_tld = "clan.test";
|
||||
app_domain = "matrix.clan.test";
|
||||
};
|
||||
clan.matrix-synapse.users.admin.admin = true;
|
||||
clan.matrix-synapse.users.someuser = { };
|
||||
|
||||
clan.core.facts.secretStore = "vm";
|
||||
clan.core.vars.settings.secretStore = "vm";
|
||||
clan.core.vars.settings.publicStore = "in_repo";
|
||||
|
||||
# because we use systemd-tmpfiles to copy the secrets, we need to a separate systemd-tmpfiles call to provision them.
|
||||
boot.postBootCommands = "${config.systemd.package}/bin/systemd-tmpfiles --create /etc/tmpfiles.d/00-vmsecrets.conf";
|
||||
|
||||
systemd.tmpfiles.settings."00-vmsecrets" = {
|
||||
# run before 00-nixos.conf
|
||||
"/etc/secrets" = {
|
||||
d.mode = "0700";
|
||||
z.mode = "0700";
|
||||
};
|
||||
"/etc/secrets/matrix-synapse/synapse-registration_shared_secret" = {
|
||||
f.argument = "supersecret";
|
||||
z = {
|
||||
mode = "0400";
|
||||
user = "root";
|
||||
};
|
||||
};
|
||||
"/etc/secrets/matrix-password-admin/matrix-password-admin" = {
|
||||
f.argument = "matrix-password1";
|
||||
z = {
|
||||
mode = "0400";
|
||||
user = "root";
|
||||
};
|
||||
};
|
||||
"/etc/secrets/matrix-password-someuser/matrix-password-someuser" = {
|
||||
f.argument = "matrix-password2";
|
||||
z = {
|
||||
mode = "0400";
|
||||
user = "root";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
];
|
||||
};
|
||||
@@ -71,12 +31,6 @@
|
||||
start_all()
|
||||
machine.wait_for_unit("matrix-synapse")
|
||||
machine.succeed("${pkgs.netcat}/bin/nc -z -v ::1 8008")
|
||||
machine.wait_until_succeeds("${pkgs.curl}/bin/curl -Ssf -L http://localhost/_matrix/static/ -H 'Host: matrix.clan.test'")
|
||||
|
||||
machine.systemctl("restart matrix-synapse >&2") # check if user creation is idempotent
|
||||
machine.execute("journalctl -u matrix-synapse --no-pager >&2")
|
||||
machine.wait_for_unit("matrix-synapse")
|
||||
machine.succeed("${pkgs.netcat}/bin/nc -z -v ::1 8008")
|
||||
machine.succeed("${pkgs.curl}/bin/curl -Ssf -L http://localhost/_matrix/static/ -H 'Host: matrix.clan.test'")
|
||||
'';
|
||||
}
|
||||
|
||||
@@ -1,64 +0,0 @@
|
||||
{
|
||||
self,
|
||||
...
|
||||
}:
|
||||
{
|
||||
clan.machines.test-morph-machine = {
|
||||
imports = [
|
||||
./template/configuration.nix
|
||||
self.nixosModules.clanCore
|
||||
];
|
||||
nixpkgs.hostPlatform = "x86_64-linux";
|
||||
environment.etc."testfile".text = "morphed";
|
||||
};
|
||||
|
||||
clan.templates.machine.test-morph-template = {
|
||||
description = "Morph a machine";
|
||||
path = ./template;
|
||||
};
|
||||
|
||||
perSystem =
|
||||
{
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux && !pkgs.stdenv.isAarch64) {
|
||||
nixos-test-morph = self.clanLib.test.baseTest {
|
||||
name = "morph";
|
||||
|
||||
nodes = {
|
||||
actual =
|
||||
{ pkgs, ... }:
|
||||
let
|
||||
dependencies = [
|
||||
pkgs.stdenv.drvPath
|
||||
pkgs.stdenvNoCC
|
||||
self.nixosConfigurations.test-morph-machine.config.system.build.toplevel
|
||||
self.nixosConfigurations.test-morph-machine.config.system.clan.deployment.file
|
||||
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
|
||||
in
|
||||
|
||||
{
|
||||
environment.etc."install-closure".source = "${closureInfo}/store-paths";
|
||||
system.extraDependencies = dependencies;
|
||||
|
||||
virtualisation.memorySize = 2048;
|
||||
virtualisation.useNixStoreImage = true;
|
||||
virtualisation.writableStore = true;
|
||||
|
||||
environment.systemPackages = [ self.packages.${pkgs.system}.clan-cli-full ];
|
||||
};
|
||||
};
|
||||
testScript = ''
|
||||
start_all()
|
||||
actual.fail("cat /etc/testfile")
|
||||
actual.succeed("env CLAN_DIR=${self.checks.x86_64-linux.clan-core-for-checks} clan machines morph test-morph-template --i-will-be-fired-for-using-this --debug --name test-morph-machine")
|
||||
assert actual.succeed("cat /etc/testfile") == "morphed"
|
||||
'';
|
||||
} { inherit pkgs self; };
|
||||
};
|
||||
|
||||
};
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
{ modulesPath, ... }:
|
||||
{
|
||||
imports = [
|
||||
# we need these 2 modules always to be able to run the tests
|
||||
(modulesPath + "/testing/test-instrumentation.nix")
|
||||
(modulesPath + "/virtualisation/qemu-vm.nix")
|
||||
|
||||
(modulesPath + "/profiles/minimal.nix")
|
||||
];
|
||||
|
||||
virtualisation.useNixStoreImage = true;
|
||||
virtualisation.writableStore = true;
|
||||
|
||||
clan.core.enableRecommendedDefaults = false;
|
||||
}
|
||||
@@ -1,53 +0,0 @@
|
||||
{
|
||||
pkgs,
|
||||
nixosLib,
|
||||
clan-core,
|
||||
...
|
||||
}:
|
||||
nixosLib.runTest (
|
||||
{ ... }:
|
||||
{
|
||||
imports = [
|
||||
clan-core.modules.nixosTest.clanTest
|
||||
];
|
||||
|
||||
hostPkgs = pkgs;
|
||||
|
||||
name = "service-mycelium";
|
||||
|
||||
clan = {
|
||||
test.useContainers = false;
|
||||
directory = ./.;
|
||||
modules."@clan/mycelium" = ../../clanServices/mycelium/default.nix;
|
||||
inventory = {
|
||||
machines.server = { };
|
||||
|
||||
instances = {
|
||||
mycelium-test = {
|
||||
module.name = "@clan/mycelium";
|
||||
module.input = "self";
|
||||
roles.peer.machines."server".settings = {
|
||||
openFirewall = true;
|
||||
addHostedPublicNodes = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
nodes = {
|
||||
server = { };
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
# Check that mycelium service is running
|
||||
server.wait_for_unit("mycelium")
|
||||
server.succeed("systemctl status mycelium")
|
||||
|
||||
# Check that mycelium is listening on its default port
|
||||
server.wait_until_succeeds("${pkgs.iproute2}/bin/ss -tulpn | grep -q 'mycelium'", 10)
|
||||
'';
|
||||
}
|
||||
)
|
||||
@@ -1,6 +0,0 @@
|
||||
[
|
||||
{
|
||||
"publickey": "age122lc4fvw2p22gcvwqeme5k49qxtjanqkl2xvr6qvf3r0zyh7scuqz28cam",
|
||||
"type": "age"
|
||||
}
|
||||
]
|
||||
@@ -1,15 +0,0 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:hLJS+CJllYM50KxKuiYamxBLGd9lwoeIFapP9mZAlVGH5DSenylcKUfsphxafASoB516qns2DznBoS9mWqg9uTsRZjk4WlR3x6A=,iv:uRiIpUKIiV3riNcBAWUqhZbE+Vb7lLMfU0C/TClVZ6M=,tag:4+nsMssiSyq9Iv7sDuWmoQ==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBPUWFOMzluRmdOQXBmRjRN\ncXNSUlB5Z0t6dWYxNVkvMmhrN1FDdmxHcTNJCkhPL3BYMFFXMmU5ZGRqOC9KWEgv\nSHB5OUJqTk5Dd0tDTks1R1ZhYktrLzgKLS0tIHJIMlFRVWphZXlISmR3VUJKUjNk\ndWF4eCt6UHBrSndBay95RVJ3dldiaFkKCgYqrt0aCGRTaHycBoeqv/zeByu2ZZ3Z\nVfgxnD9liIQkS2wERbpk0/Yq9wkKgVxj+DZoWwHYhP0eKCw2UOorCA==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-06-04T11:54:59Z",
|
||||
"mac": "ENC[AES256_GCM,data:xoeOz7FRCPJ18UTsfbY1x/N65pxbTsehT9Kv3MgEd6NQJn6FTvquaj3HEZ0KvIzStBz1FNOhSql9CZUFc4StYps05EbX61MMMnz6Nlj3xcTwuVQFabGoinxcXbCDSA+tAW7VqzVxumj6FMDg+r77gdcIApZjGJg4Z9ws2RZd3u4=,iv:U8IUDwmfg8Umob9mtKgGaKoIY4SKNL895BABJxzx5n8=,tag:tnMCx6D/17ZYgI6SgNS29A==,type:str]",
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.10.2"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../../users/admin
|
||||
@@ -1,4 +0,0 @@
|
||||
{
|
||||
"publickey": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"type": "age"
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
52d:87c1:4222:b550:ee01:a7ae:254:5a66
|
||||
@@ -1 +0,0 @@
|
||||
../../../../../../sops/machines/server
|
||||
@@ -1,19 +0,0 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:DGzl2G4H+NkwXq0fCUQS0+8FG1x9xoIsYvAgUxP4Qp8=,iv:CXOJVgYNthVOZ4vbdI3n4KLXSFVemzaoEnRGMC+m0i8=,tag:/u+pV3xWpUq0ZtAm6LKuGQ==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age122lc4fvw2p22gcvwqeme5k49qxtjanqkl2xvr6qvf3r0zyh7scuqz28cam",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBZZHZjdXMwclBTeGthcGpM\nV1ArVy83akNHNEpXVFhoR1FWNlJUeHNKTW00CjJFTFZneFNrL0hDMXJpaTQ2M1ln\nTmdPMGhzeUp0NU55QnhCZEU2QVk1OG8KLS0tIDFhQmJhOHJsTjhYNEhITEw0WFgy\nWC9pTi9od0wyMWtZRVZJYWo0Nmo5SHMKDohnAAfrnGOiw55huMme2EEWE53N/feS\nutvbiTZh1ECHCi/uoK757fjnJ/WrQMSxUpctT9I8bpJRtbTqkx3XRw==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
},
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBZeTZENGFpWndDbmdsWDRw\nMTBCVXg5Zkg2a2s5Yk1HSERIVlVQRXdUSUNnCnREbno0dEN0ZEgvOFNMRG1ReGx4\na0h1YUFuMkxBZXJUTE9xOUVUMitEalkKLS0tIFZMZ21qclRqUFR0dlAyMFkzdUNX\nNjRLTWVRVWtHSDlDakEzMmpRVWkyc0EKabm8mTKJVxQNTaIgU+8rb/xk9Dpg+Zjz\nb+wgD0+TlARlenMtIub8Y6N06ENOc20oovylfu+g7xV+EkvRPCd6tA==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-06-04T11:55:02Z",
|
||||
"mac": "ENC[AES256_GCM,data:UIBaD/3mACgFzkajkFXz3oKai8IxpYQriR2t0mc5fL92P5ECloxCobY386TDZYOEVrDJ45Bw+IzqZbsCx/G9f1xCCTR2JvqygxYIsK3TpQPsboJzb9Cz3dBNBCXGboVykcg/NobEMaJBw1xtdAQBhuo8S7ymIuOPtGz0vPFJkf8=,iv:g0YAOBsRpgAOikKDMJDyOtcVx+0QwetfA8R6wQFH7lY=,tag:sfdFLjtiqFHdP/Qe1suBBQ==,type:str]",
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.10.2"
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
../../../../../../sops/users/admin
|
||||
@@ -1 +0,0 @@
|
||||
2125c6b039374467eaa3eaf552bd3e97f434d16006433cfbba3e6823c958b728
|
||||
@@ -1,27 +0,0 @@
|
||||
{ self, ... }:
|
||||
let
|
||||
documentationModule = {
|
||||
# This is how some downstream users currently generate documentation
|
||||
# If this breaks notify them via matrix since we spent ~5 hrs for bughunting last time.
|
||||
documentation.nixos.enable = true;
|
||||
documentation.nixos.extraModules = [
|
||||
self.nixosModules.clanCore
|
||||
# This is the only option that is not part of the
|
||||
# module because it is usually set by flake-parts
|
||||
{ clan.core.settings.directory = ./.; }
|
||||
];
|
||||
};
|
||||
in
|
||||
{
|
||||
clan = {
|
||||
machines.test-documentation = {
|
||||
# Dummy file system
|
||||
fileSystems."/".device = "/dev/null";
|
||||
boot.loader.grub.device = "/dev/null";
|
||||
nixpkgs.hostPlatform = "x86_64-linux";
|
||||
imports = [
|
||||
documentationModule
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,73 +0,0 @@
|
||||
({
|
||||
name = "postgresql";
|
||||
|
||||
nodes.machine =
|
||||
{ self, config, ... }:
|
||||
{
|
||||
imports = [
|
||||
self.nixosModules.clanCore
|
||||
self.clanModules.postgresql
|
||||
self.clanModules.localbackup
|
||||
];
|
||||
clan.postgresql.users.test = { };
|
||||
clan.postgresql.databases.test.create.options.OWNER = "test";
|
||||
clan.postgresql.databases.test.restore.stopOnRestore = [ "sample-service" ];
|
||||
clan.localbackup.targets.hdd.directory = "/mnt/external-disk";
|
||||
clan.core.settings.directory = ./.;
|
||||
|
||||
systemd.services.sample-service = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
script = ''
|
||||
while true; do
|
||||
echo "Hello, world!"
|
||||
sleep 5
|
||||
done
|
||||
'';
|
||||
};
|
||||
|
||||
environment.systemPackages = [ config.services.postgresql.package ];
|
||||
};
|
||||
testScript =
|
||||
{ nodes, ... }:
|
||||
''
|
||||
start_all()
|
||||
machine.wait_for_unit("postgresql")
|
||||
machine.wait_for_unit("sample-service")
|
||||
# Create a test table
|
||||
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -c 'CREATE TABLE test (id serial PRIMARY KEY);' test")
|
||||
|
||||
machine.succeed("/run/current-system/sw/bin/localbackup-create >&2")
|
||||
timestamp_before = int(machine.succeed("systemctl show --property=ExecMainStartTimestampMonotonic sample-service | cut -d= -f2").strip())
|
||||
|
||||
machine.succeed("test -e /mnt/external-disk/snapshot.0/machine/var/backup/postgres/test/pg-dump || { echo 'pg-dump not found'; exit 1; }")
|
||||
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -d test -c 'INSERT INTO test DEFAULT VALUES;'")
|
||||
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -d test -c 'DROP TABLE test;'")
|
||||
machine.succeed("test -e /var/backup/postgres/test/pg-dump || { echo 'pg-dump not found'; exit 1; }")
|
||||
|
||||
machine.succeed("rm -rf /var/backup/postgres")
|
||||
|
||||
machine.succeed("NAME=/mnt/external-disk/snapshot.0 FOLDERS=/var/backup/postgres/test /run/current-system/sw/bin/localbackup-restore >&2")
|
||||
machine.succeed("test -e /var/backup/postgres/test/pg-dump || { echo 'pg-dump not found'; exit 1; }")
|
||||
|
||||
machine.succeed("""
|
||||
set -x
|
||||
${nodes.machine.clan.core.state.test.postRestoreCommand}
|
||||
""")
|
||||
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -l >&2")
|
||||
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -d test -c '\dt' >&2")
|
||||
|
||||
timestamp_after = int(machine.succeed("systemctl show --property=ExecMainStartTimestampMonotonic sample-service | cut -d= -f2").strip())
|
||||
assert timestamp_before < timestamp_after, f"{timestamp_before} >= {timestamp_after}: expected sample-service to be restarted after restore"
|
||||
|
||||
# Check that the table is still there
|
||||
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -d test -c 'SELECT * FROM test;'")
|
||||
output = machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql --csv -c \"SELECT datdba::regrole FROM pg_database WHERE datname = 'test'\"")
|
||||
owner = output.split("\n")[1]
|
||||
assert owner == "test", f"Expected database owner to be 'test', got '{owner}'"
|
||||
|
||||
# check if restore works if the database does not exist
|
||||
machine.succeed("runuser -u postgres -- dropdb test")
|
||||
machine.succeed("${nodes.machine.clan.core.state.test.postRestoreCommand}")
|
||||
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -d test -c '\dt' >&2")
|
||||
'';
|
||||
})
|
||||
48
checks/schemas.nix
Normal file
48
checks/schemas.nix
Normal file
@@ -0,0 +1,48 @@
|
||||
{
|
||||
self,
|
||||
runCommand,
|
||||
check-jsonschema,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
clanModules.clanCore = self.nixosModules.clanCore;
|
||||
|
||||
baseModule = {
|
||||
imports = (import (pkgs.path + "/nixos/modules/module-list.nix")) ++ [
|
||||
{
|
||||
nixpkgs.hostPlatform = "x86_64-linux";
|
||||
clanCore.clanName = "dummy";
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
optionsFromModule =
|
||||
module:
|
||||
let
|
||||
evaled = lib.evalModules {
|
||||
modules = [
|
||||
module
|
||||
baseModule
|
||||
];
|
||||
};
|
||||
in
|
||||
evaled.options.clan;
|
||||
|
||||
clanModuleSchemas = lib.mapAttrs (
|
||||
_: module: self.lib.jsonschema.parseOptions (optionsFromModule module)
|
||||
) clanModules;
|
||||
|
||||
mkTest =
|
||||
name: schema:
|
||||
runCommand "schema-${name}" { } ''
|
||||
${check-jsonschema}/bin/check-jsonschema \
|
||||
--check-metaschema ${builtins.toFile "schema-${name}" (builtins.toJSON schema)}
|
||||
touch $out
|
||||
'';
|
||||
in
|
||||
lib.mapAttrs' (name: schema: {
|
||||
name = "schema-${name}";
|
||||
value = mkTest name schema;
|
||||
}) clanModuleSchemas
|
||||
@@ -1,16 +1,16 @@
|
||||
{
|
||||
(import ../lib/test-base.nix) {
|
||||
name = "secrets";
|
||||
|
||||
nodes.machine =
|
||||
{ self, config, ... }:
|
||||
{
|
||||
environment.etc."privkey.age".source = ./key.age;
|
||||
imports = [ (self.nixosModules.clanCore) ];
|
||||
environment.etc."secret".source = config.sops.secrets.secret.path;
|
||||
environment.etc."group-secret".source = config.sops.secrets.group-secret.path;
|
||||
sops.age.keyFile = "/etc/privkey.age";
|
||||
sops.age.keyFile = ./key.age;
|
||||
|
||||
clan.core.settings.directory = "${./.}";
|
||||
clanCore.clanDir = "${./.}";
|
||||
clanCore.machineName = "machine";
|
||||
|
||||
networking.hostName = "machine";
|
||||
};
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
{
|
||||
"publickey": "age15x8u838dwqflr3t6csf4tlghxm4tx77y379ncqxav7y2n8qp7yzqgrwt00",
|
||||
"type": "age"
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
{
|
||||
"publickey": "age15x8u838dwqflr3t6csf4tlghxm4tx77y379ncqxav7y2n8qp7yzqgrwt00",
|
||||
"type": "age"
|
||||
}
|
||||
}
|
||||
@@ -1,66 +0,0 @@
|
||||
{
|
||||
pkgs,
|
||||
nixosLib,
|
||||
clan-core,
|
||||
...
|
||||
}:
|
||||
|
||||
nixosLib.runTest (
|
||||
{ hostPkgs, config, ... }:
|
||||
{
|
||||
imports = [
|
||||
clan-core.modules.nixosTest.clanTest
|
||||
];
|
||||
|
||||
hostPkgs = pkgs;
|
||||
|
||||
# This tests the compatibility of the inventory
|
||||
# With the test framework
|
||||
# - legacy-modules
|
||||
# - clan.service modules
|
||||
name = "service-dummy-test-from-flake";
|
||||
|
||||
clan.test.fromFlake = ./.;
|
||||
|
||||
extraPythonPackages = _p: [
|
||||
clan-core.legacyPackages.${hostPkgs.system}.setupNixInNixPythonPackage
|
||||
];
|
||||
|
||||
testScript =
|
||||
{ nodes, ... }:
|
||||
''
|
||||
from setup_nix_in_nix import setup_nix_in_nix # type: ignore[import-untyped]
|
||||
setup_nix_in_nix()
|
||||
|
||||
def run_clan(cmd: list[str], **kwargs) -> str:
|
||||
import subprocess
|
||||
clan = "${clan-core.packages.${hostPkgs.system}.clan-cli}/bin/clan"
|
||||
clan_args = ["--flake", "${config.clan.test.flakeForSandbox}"]
|
||||
return subprocess.run(
|
||||
["${hostPkgs.util-linux}/bin/unshare", "--user", "--map-user", "1000", "--map-group", "1000", clan, *cmd, *clan_args],
|
||||
**kwargs,
|
||||
check=True,
|
||||
).stdout
|
||||
|
||||
start_all()
|
||||
admin1.wait_for_unit("multi-user.target")
|
||||
peer1.wait_for_unit("multi-user.target")
|
||||
# Provided by the legacy module
|
||||
print(admin1.succeed("systemctl status dummy-service"))
|
||||
print(peer1.succeed("systemctl status dummy-service"))
|
||||
|
||||
# peer1 should have the 'hello' file
|
||||
peer1.succeed("cat ${nodes.peer1.clan.core.vars.generators.new-service.files.not-a-secret.path}")
|
||||
|
||||
ls_out = peer1.succeed("ls -la ${nodes.peer1.clan.core.vars.generators.new-service.files.a-secret.path}")
|
||||
# Check that the file is owned by 'nobody'
|
||||
assert "nobody" in ls_out, f"File is not owned by 'nobody': {ls_out}"
|
||||
# Check that the file is in the 'users' group
|
||||
assert "users" in ls_out, f"File is not in the 'users' group: {ls_out}"
|
||||
# Check that the file is in the '0644' mode
|
||||
assert "-rw-r--r--" in ls_out, f"File is not in the '0644' mode: {ls_out}"
|
||||
|
||||
run_clan(["machines", "list"])
|
||||
'';
|
||||
}
|
||||
)
|
||||
@@ -1,71 +0,0 @@
|
||||
{
|
||||
inputs.clan-core.url = "https://git.clan.lol/clan/clan-core/archive/main.tar.gz";
|
||||
inputs.nixpkgs.follows = "clan-core/nixpkgs";
|
||||
|
||||
outputs =
|
||||
{ self, clan-core, ... }:
|
||||
let
|
||||
# Usage see: https://docs.clan.lol
|
||||
clan = clan-core.lib.clan {
|
||||
inherit self;
|
||||
|
||||
inventory =
|
||||
{ ... }:
|
||||
{
|
||||
meta.name = "foo";
|
||||
machines.peer1 = { };
|
||||
machines.admin1 = { };
|
||||
services = {
|
||||
legacy-module.default = {
|
||||
roles.peer.machines = [ "peer1" ];
|
||||
roles.admin.machines = [ "admin1" ];
|
||||
};
|
||||
};
|
||||
|
||||
instances."test" = {
|
||||
module.name = "new-service";
|
||||
module.input = "self";
|
||||
roles.peer.machines.peer1 = { };
|
||||
};
|
||||
|
||||
modules = {
|
||||
legacy-module = ./legacy-module;
|
||||
};
|
||||
};
|
||||
|
||||
modules.new-service = {
|
||||
_class = "clan.service";
|
||||
manifest.name = "new-service";
|
||||
roles.peer = { };
|
||||
perMachine = {
|
||||
nixosModule = {
|
||||
# This should be generated by:
|
||||
# nix run .#generate-test-vars -- checks/service-dummy-test service-dummy-test
|
||||
clan.core.vars.generators.new-service = {
|
||||
files.not-a-secret = {
|
||||
secret = false;
|
||||
deploy = true;
|
||||
};
|
||||
files.a-secret = {
|
||||
secret = true;
|
||||
deploy = true;
|
||||
owner = "nobody";
|
||||
group = "users";
|
||||
mode = "0644";
|
||||
};
|
||||
script = ''
|
||||
# This is a dummy script that does nothing
|
||||
echo -n "not-a-secret" > $out/not-a-secret
|
||||
echo -n "a-secret" > $out/a-secret
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
# all machines managed by Clan
|
||||
inherit (clan.config) nixosConfigurations nixosModules clanInternals;
|
||||
};
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
---
|
||||
description = "Set up dummy-module"
|
||||
categories = ["System"]
|
||||
features = [ "inventory" ]
|
||||
|
||||
[constraints]
|
||||
roles.admin.min = 1
|
||||
roles.admin.max = 1
|
||||
---
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
{
|
||||
imports = [
|
||||
../shared.nix
|
||||
];
|
||||
}
|
||||
@@ -1,5 +0,0 @@
|
||||
{
|
||||
imports = [
|
||||
../shared.nix
|
||||
];
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
{ config, ... }:
|
||||
{
|
||||
systemd.services.dummy-service = {
|
||||
enable = true;
|
||||
description = "Dummy service";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
};
|
||||
script = ''
|
||||
generated_password_path="${config.clan.core.vars.generators.dummy-generator.files.generated-password.path}"
|
||||
if [ ! -f "$generated_password_path" ]; then
|
||||
echo "Generated password file not found: $generated_password_path"
|
||||
exit 1
|
||||
fi
|
||||
host_id_path="${config.clan.core.vars.generators.dummy-generator.files.host-id.path}"
|
||||
if [ ! -e "$host_id_path" ]; then
|
||||
echo "Host ID file not found: $host_id_path"
|
||||
exit 1
|
||||
fi
|
||||
'';
|
||||
};
|
||||
|
||||
# TODO: add and prompt and make it work in the test framework
|
||||
clan.core.vars.generators.dummy-generator = {
|
||||
files.host-id.secret = false;
|
||||
files.generated-password.secret = true;
|
||||
script = ''
|
||||
echo $RANDOM > "$out"/host-id
|
||||
echo $RANDOM > "$out"/generated-password
|
||||
'';
|
||||
};
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user