Compare commits
118 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
271b6fe7fc | ||
|
|
b899f95cf6 | ||
|
|
f9fe1b8913 | ||
|
|
fc8a65c388 | ||
|
|
75f722bc79 | ||
|
|
38f3ea6dad | ||
|
|
9c5b0ed077 | ||
|
|
0dad11ffcf | ||
|
|
9144f5a3cd | ||
|
|
f66b96c102 | ||
|
|
7d3972b993 | ||
|
|
d61a042b76 | ||
|
|
2f05eccace | ||
|
|
8779dc07f0 | ||
|
|
ae6eb1a822 | ||
|
|
57c91c3da3 | ||
|
|
c5a8765a65 | ||
|
|
5ec14e51d4 | ||
|
|
a4cc333533 | ||
|
|
5299fe7259 | ||
|
|
e6a9bcbb69 | ||
|
|
b46f841257 | ||
|
|
14847ba846 | ||
|
|
6eb4c4c1e9 | ||
|
|
520c926d6d | ||
|
|
1205f74f87 | ||
|
|
9b392b66ee | ||
|
|
4e37f53b7a | ||
|
|
8eec4c89c5 | ||
|
|
9812d4114f | ||
|
|
6d622f7f68 | ||
|
|
c62995f91f | ||
|
|
7f0e6d74e6 | ||
|
|
bf46ea1ebb | ||
|
|
4ba722dd36 | ||
|
|
61baf0f6c3 | ||
|
|
c252dd7b47 | ||
|
|
4aa01a63dc | ||
|
|
8030b64cdb | ||
|
|
cbe7e27f91 | ||
|
|
d1e59fedb1 | ||
|
|
b3dd1c4a46 | ||
|
|
6614138fb8 | ||
|
|
92f87e169c | ||
|
|
a451946ab4 | ||
|
|
c7a1d7ce29 | ||
|
|
0e06ce3cca | ||
|
|
1bb1b966d6 | ||
|
|
db98d106a1 | ||
|
|
a40c6884d9 | ||
|
|
5cac9e7704 | ||
|
|
808491c71c | ||
|
|
68afbb564e | ||
|
|
11d851e934 | ||
|
|
d825a6b8c0 | ||
|
|
3187ad3f5b | ||
|
|
84ab04fc06 | ||
|
|
7112f608a7 | ||
|
|
70523f75fa | ||
|
|
25db58ce11 | ||
|
|
d92623f07e | ||
|
|
6b5dca5842 | ||
|
|
016fe3d114 | ||
|
|
9b60b4a989 | ||
|
|
3088ce025b | ||
|
|
4f1fda3de6 | ||
|
|
57f14827c2 | ||
|
|
58e9a28f14 | ||
|
|
b4ad5ca1bd | ||
|
|
84ecb1aae6 | ||
|
|
2b9971f538 | ||
|
|
81e15cab34 | ||
|
|
215c808071 | ||
|
|
4de052e58b | ||
|
|
a06a7a7a2c | ||
|
|
94df3855b5 | ||
|
|
a83f3c23f4 | ||
|
|
da6cd324f0 | ||
|
|
c5b96df7b0 | ||
|
|
c4feeace31 | ||
|
|
6117b664ae | ||
|
|
b8fdb48fd8 | ||
|
|
9165f7ccaf | ||
|
|
8058a7c158 | ||
|
|
fed61f49f9 | ||
|
|
f1f05c7e6b | ||
|
|
7597d1560f | ||
|
|
f739e1b66d | ||
|
|
5d3609aacd | ||
|
|
7aa51d6bd7 | ||
|
|
af91ae8c7f | ||
|
|
077bf55fd7 | ||
|
|
1f6dcb910f | ||
|
|
6363d9c99c | ||
|
|
fd30dbd1be | ||
|
|
ba4dc36ddf | ||
|
|
5abac04b15 | ||
|
|
8c84d32b13 | ||
|
|
c083548795 | ||
|
|
11af5c3471 | ||
|
|
dac8a40b9f | ||
|
|
204f9d09e3 | ||
|
|
668067080d | ||
|
|
10ed2cc7f7 | ||
|
|
060b22cf21 | ||
|
|
965dddfee1 | ||
|
|
6e5d74ba22 | ||
|
|
4257f47a1a | ||
|
|
72b64a8b70 | ||
|
|
e46e0543cd | ||
|
|
0de79962ea | ||
|
|
6209816115 | ||
|
|
ec21cda0cf | ||
|
|
8a29d102cd | ||
|
|
22787e7c93 | ||
|
|
19fd72e075 | ||
|
|
50be33088c | ||
|
|
6e7a67c830 |
12
devFlake/flake.lock
generated
12
devFlake/flake.lock
generated
@@ -3,10 +3,10 @@
|
||||
"clan-core-for-checks": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1759727242,
|
||||
"narHash": "sha256-15Q9eXbfsLmzIbYWasZ3Nuqafnc5o9al9RmGuBGVK74=",
|
||||
"lastModified": 1759915474,
|
||||
"narHash": "sha256-ef7awwmx2onWuA83FNE29B3tTZ+tQxEWLD926ckMiF8=",
|
||||
"ref": "main",
|
||||
"rev": "c737271585ff3df308feab22c09967fce8f278d3",
|
||||
"rev": "81e15cab34f9ae00b6f2df5f2e53ee07cd3a0af3",
|
||||
"shallow": true,
|
||||
"type": "git",
|
||||
"url": "https://git.clan.lol/clan/clan-core"
|
||||
@@ -105,11 +105,11 @@
|
||||
},
|
||||
"nixpkgs-dev": {
|
||||
"locked": {
|
||||
"lastModified": 1759670943,
|
||||
"narHash": "sha256-JBjTDfwzAwtd8+5X/Weg27WE/3hVYOP3uggP2JPaQVQ=",
|
||||
"lastModified": 1759860509,
|
||||
"narHash": "sha256-c7eJvqAlWLhwNc9raHkQ7mvoFbHLUO/cLMrww1ds4Zg=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "21980a9c20f34648121f60bda15f419fa568db21",
|
||||
"rev": "b574dcadf3fb578dee8d104b565bd745a5a9edc0",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Serve documentation locally
|
||||
|
||||
```
|
||||
$ nix develop .#docs -c mkdocs serve
|
||||
nix develop .#docs -c mkdocs serve
|
||||
```
|
||||
|
||||
41
docs/main.py
41
docs/main.py
@@ -1,41 +0,0 @@
|
||||
from typing import Any
|
||||
|
||||
|
||||
def define_env(env: Any) -> None:
|
||||
static_dir = "/static/"
|
||||
video_dir = "https://clan.lol/" + "videos/"
|
||||
asciinema_dir = static_dir + "asciinema-player/"
|
||||
|
||||
@env.macro
|
||||
def video(name: str) -> str:
|
||||
return f"""<video loop muted autoplay id="{name}">
|
||||
<source src={video_dir + name} type="video/webm">
|
||||
Your browser does not support the video tag.
|
||||
</video>"""
|
||||
|
||||
@env.macro
|
||||
def asciinema(name: str) -> str:
|
||||
return f"""<div id="{name}">
|
||||
<script>
|
||||
// Function to load the script and then create the Asciinema player
|
||||
function loadAsciinemaPlayer() {{
|
||||
var script = document.createElement('script');
|
||||
script.src = "{asciinema_dir}/asciinema-player.min.js";
|
||||
script.onload = function() {{
|
||||
AsciinemaPlayer.create('{video_dir + name}', document.getElementById("{name}"), {{
|
||||
loop: true,
|
||||
autoPlay: true,
|
||||
controls: false,
|
||||
speed: 1.5,
|
||||
theme: "solarized-light"
|
||||
}});
|
||||
}};
|
||||
document.head.appendChild(script);
|
||||
}}
|
||||
|
||||
// Load the Asciinema player script
|
||||
loadAsciinemaPlayer();
|
||||
</script>
|
||||
|
||||
<link rel="stylesheet" type="text/css" href="{asciinema_dir}/asciinema-player.css" />
|
||||
</div>"""
|
||||
@@ -58,7 +58,7 @@ nav:
|
||||
- getting-started/configure-disk.md
|
||||
- getting-started/update-machines.md
|
||||
- getting-started/continuous-integration.md
|
||||
- getting-started/convert-existing-NixOS-configuration.md
|
||||
- Convert existing NixOS configurations: getting-started/convert-existing-NixOS-configuration.md
|
||||
- Guides:
|
||||
- Inventory:
|
||||
- Introduction to Inventory: guides/inventory/inventory.md
|
||||
@@ -66,6 +66,7 @@ nav:
|
||||
- Services:
|
||||
- Introduction to Services: guides/services/introduction-to-services.md
|
||||
- Author Your Own Service: guides/services/community.md
|
||||
- Internal Services with SSL: guides/internal-ssl-services.md
|
||||
- Vars:
|
||||
- Introduction to Vars: guides/vars/vars-overview.md
|
||||
- Minimal Example: guides/vars/vars-backend.md
|
||||
|
||||
@@ -3,8 +3,6 @@
|
||||
module-docs,
|
||||
clan-cli-docs,
|
||||
clan-lib-openapi,
|
||||
asciinema-player-js,
|
||||
asciinema-player-css,
|
||||
roboto,
|
||||
fira-code,
|
||||
docs-options,
|
||||
@@ -57,10 +55,6 @@ pkgs.stdenv.mkDerivation {
|
||||
cp -r ${docs-options} ./site/options
|
||||
chmod -R +w ./site/options
|
||||
|
||||
mkdir -p ./site/static/asciinema-player
|
||||
ln -snf ${asciinema-player-js} ./site/static/asciinema-player/asciinema-player.min.js
|
||||
ln -snf ${asciinema-player-css} ./site/static/asciinema-player/asciinema-player.css
|
||||
|
||||
# Link to fonts
|
||||
ln -snf ${roboto}/share/fonts/truetype/Roboto-Regular.ttf ./site/static/
|
||||
ln -snf ${fira-code}/share/fonts/truetype/FiraCode-VF.ttf ./site/static/
|
||||
|
||||
@@ -43,15 +43,6 @@
|
||||
mypy --strict $out/bin/render-options
|
||||
'';
|
||||
|
||||
asciinema-player-js = pkgs.fetchurl {
|
||||
url = "https://github.com/asciinema/asciinema-player/releases/download/v3.7.0/asciinema-player.min.js";
|
||||
sha256 = "sha256-Ymco/+FinDr5YOrV72ehclpp4amrczjo5EU3jfr/zxs=";
|
||||
};
|
||||
asciinema-player-css = pkgs.fetchurl {
|
||||
url = "https://github.com/asciinema/asciinema-player/releases/download/v3.7.0/asciinema-player.css";
|
||||
sha256 = "sha256-GZMeZFFGvP5GMqqh516mjJKfQaiJ6bL38bSYOXkaohc=";
|
||||
};
|
||||
|
||||
module-docs =
|
||||
pkgs.runCommand "rendered"
|
||||
{
|
||||
@@ -111,8 +102,6 @@
|
||||
;
|
||||
inherit (inputs) nixpkgs;
|
||||
inherit module-docs;
|
||||
inherit asciinema-player-js;
|
||||
inherit asciinema-player-css;
|
||||
};
|
||||
deploy-docs = pkgs.callPackage ./deploy-docs.nix { inherit (config.packages) docs; };
|
||||
inherit module-docs;
|
||||
@@ -121,6 +110,7 @@
|
||||
pkgs.runCommand "docs-integrity"
|
||||
{
|
||||
nativeBuildInputs = [ pkgs.html-proofer ];
|
||||
LANG = "C.UTF-8";
|
||||
}
|
||||
''
|
||||
# External links should be avoided in the docs, because they often break
|
||||
|
||||
@@ -105,7 +105,7 @@ def render_option(
|
||||
read_only = option.get("readOnly")
|
||||
|
||||
res = f"""
|
||||
{"#" * level} {sanitize(name) if short_head is None else sanitize(short_head)} {"{: #" + sanitize_anchor(name) + "}" if level > 1 else ""}
|
||||
{"#" * level} {sanitize(name) if short_head is None else sanitize(short_head)}
|
||||
|
||||
"""
|
||||
|
||||
@@ -431,7 +431,7 @@ def produce_inventory_docs() -> None:
|
||||
output = """# Inventory Submodule
|
||||
This provides an overview of the available options of the `inventory` model.
|
||||
|
||||
It can be set via the `inventory` attribute of the [`clan`](../../reference/options/clan_inventory.md) function, or via the [`clan.inventory`](../../reference/options/clan_inventory.md) attribute of flake-parts.
|
||||
It can be set via the `inventory` attribute of the [`clan`](../../reference/options/clan.md) function, or via the [`clan.inventory`](../../reference/options/clan_inventory.md) attribute of flake-parts.
|
||||
|
||||
"""
|
||||
# Inventory options are already included under the clan attribute
|
||||
|
||||
@@ -38,8 +38,8 @@ See the complete [list](../guides/inventory/autoincludes.md) of auto-loaded file
|
||||
### Configuring a machine
|
||||
|
||||
!!! Note
|
||||
The option: `inventory.machines.<name>` is used to define metadata about the machine
|
||||
That includes for example `deploy.targethost` `machineClass` or `tags`
|
||||
The option: `inventory.machines.<name>` is used to define metadata about the machine
|
||||
That includes for example `deploy.targethost` `machineClass` or `tags`
|
||||
|
||||
The option: `machines.<name>` is used to add extra *nixosConfiguration* to a machine
|
||||
|
||||
@@ -71,7 +71,7 @@ This example demonstrates what is needed based on a machine called `jon`:
|
||||
```
|
||||
|
||||
1. Tags can be used to automatically add this machine to services later on. - You dont need to set this now.
|
||||
2. Add your *ssh key* here - That will ensure you can always login to your machine via *ssh* in case something goes wrong.
|
||||
2. Add your _ssh key_ here - That will ensure you can always login to your machine via _ssh_ in case something goes wrong.
|
||||
|
||||
### (Optional) Create a `configuration.nix`
|
||||
|
||||
@@ -99,8 +99,8 @@ git mv ./machines/jon ./machines/<your-machine-name>
|
||||
|
||||
Since your Clan configuration lives inside a Git repository, remember:
|
||||
|
||||
* Only files tracked by Git (`git add`) are recognized.
|
||||
* Whenever you add, rename, or remove files, run:
|
||||
- Only files tracked by Git (`git add`) are recognized.
|
||||
- Whenever you add, rename, or remove files, run:
|
||||
|
||||
```bash
|
||||
git add ./machines/<your-machine-name>
|
||||
|
||||
@@ -4,14 +4,14 @@ This guide will help you convert your existing NixOS configurations into a Clan.
|
||||
Migrating instead of starting new can be trickier and might lead to bugs or
|
||||
unexpected issues. We recommend reading the [Getting Started](../getting-started/creating-your-first-clan.md) guide first.
|
||||
|
||||
Once you have a working setup and understand the concepts transfering your NixOS configurations over is easy.
|
||||
Once you have a working setup and understand the concepts transferring your NixOS configurations over is easy.
|
||||
|
||||
## Back up your existing configuration
|
||||
|
||||
Before you start, it is strongly recommended to back up your existing
|
||||
configuration in any form you see fit. If you use version control to manage
|
||||
your configuration changes, it is also a good idea to follow the migration
|
||||
guide in a separte branch until everything works as expected.
|
||||
guide in a separate branch until everything works as expected.
|
||||
|
||||
## Starting Point
|
||||
|
||||
|
||||
@@ -67,6 +67,59 @@ nix build .#checks.x86_64-linux.{test-attr-name}
|
||||
```
|
||||
(replace `{test-attr-name}` with the name of the test)
|
||||
|
||||
### Testing services with vars
|
||||
|
||||
Services that define their own vars (using `clan.core.vars.generators`) require generating test vars before running the tests.
|
||||
|
||||
#### Understanding the `clan.directory` setting
|
||||
|
||||
The `clan.directory` option is critical for vars generation and loading in tests. This setting determines:
|
||||
|
||||
1. **Where vars are generated**: When you run `update-vars`, it creates `vars/` and `sops/` directories inside the path specified by `clan.directory`
|
||||
2. **Where vars are loaded from**: During test execution, machines look for their vars and secrets relative to `clan.directory`
|
||||
|
||||
#### Generating test vars
|
||||
|
||||
For services that define vars, you must first run:
|
||||
|
||||
```shellSession
|
||||
nix run .#checks.x86_64-linux.{test-attr-name}.update-vars
|
||||
```
|
||||
|
||||
This generates the necessary var files in the directory specified by `clan.directory`. After running this command, you can run the test normally:
|
||||
|
||||
```shellSession
|
||||
nix run .#checks.x86_64-linux.{test-attr-name}
|
||||
```
|
||||
|
||||
#### Example: service-dummy-test
|
||||
|
||||
The `service-dummy-test` is a good example of a test that uses vars. To run it:
|
||||
|
||||
```shellSession
|
||||
# First, generate the test vars
|
||||
nix run .#checks.x86_64-linux.service-dummy-test.update-vars
|
||||
|
||||
# Then run the test
|
||||
nix run .#checks.x86_64-linux.service-dummy-test
|
||||
```
|
||||
|
||||
#### Common issues
|
||||
|
||||
If `update-vars` fails, you may need to ensure that:
|
||||
|
||||
- **`clan.directory` is set correctly**: It should point to the directory where you want vars to be generated (typically `clan.directory = ./.;` in your test definition)
|
||||
- **Your test defines machines**: Machines must be defined in `clan.inventory.machines` or through the inventory system
|
||||
- **Machine definitions are complete**: Each machine should have the necessary service configuration that defines the vars generators
|
||||
|
||||
**If vars are not found during test execution:**
|
||||
|
||||
- Verify that `clan.directory` points to the same location where you ran `update-vars`
|
||||
- Check that the `vars/` and `sops/` directories exist in that location
|
||||
- Ensure the generated files match the machines and generators defined in your test
|
||||
|
||||
You can reference `/checks/service-dummy-test/` to see a complete working example of a test with vars, including the correct directory structure.
|
||||
|
||||
### Debugging VM tests
|
||||
|
||||
The following techniques can be used to debug a VM test:
|
||||
|
||||
213
docs/site/guides/internal-ssl-services.md
Normal file
213
docs/site/guides/internal-ssl-services.md
Normal file
@@ -0,0 +1,213 @@
|
||||
A common use case you might have is to host services and applications which are
|
||||
only reachable within your clan.
|
||||
|
||||
This guide explains how to set up such secure, clan-internal web services using
|
||||
a custom top-level domain (TLD) with SSL certificates.
|
||||
|
||||
Your services will be accessible only within your clan network and secured with
|
||||
proper SSL certificates that all clan machines trust.
|
||||
|
||||
## Overview
|
||||
|
||||
By combining the `coredns` and `certificates` clan services, you can:
|
||||
|
||||
- Create a custom TLD for your clan (e.g. `.c`)
|
||||
- Host internal web services accessible via HTTPS (e.g. `https://api.c`, `https://dashboard.c`)
|
||||
- Automatically provision and trust SSL certificates across all clan machines
|
||||
- Keep internal services secure and isolated from the public internet
|
||||
|
||||
The setup uses two clan services working together:
|
||||
|
||||
- **coredns service**: Provides DNS resolution for your custom TLD within the clan
|
||||
- **certificates service**: Creates a certificate authority (CA) and issues SSL certificates for your TLD
|
||||
|
||||
### DNS Resolution Flow
|
||||
|
||||
1. A clan machine tries to access `https://service.c`
|
||||
2. The machine queries its local DNS resolver (unbound)
|
||||
3. For `.c` domains, the query is forwarded to your clan's CoreDNS server. All
|
||||
other domains will be resolved as usual.
|
||||
4. CoreDNS returns the IP address of the machine hosting the service
|
||||
5. The machine connects directly to the service over HTTPS
|
||||
6. The SSL certificate is trusted because all machines trust your clan's CA
|
||||
|
||||
## Step-by-Step Setup
|
||||
|
||||
The following setup assumes you have a VPN (e.g. Zerotier) already running. The
|
||||
IPs configured in the options below will probably the Zerotier-IPs of the
|
||||
respective machines.
|
||||
|
||||
### Configure the CoreDNS Service
|
||||
|
||||
The CoreDNS service has two roles:
|
||||
- `server`: Runs the DNS server for your custom TLD
|
||||
- `default`: Makes machines use the DNS server for TLD resolution and allows exposing services
|
||||
|
||||
Add this to your inventory:
|
||||
|
||||
```nix
|
||||
inventory = {
|
||||
machines = {
|
||||
dns-server = { }; # Machine that will run the DNS server
|
||||
web-server = { }; # Machine that will host web services
|
||||
client = { }; # Any other machines in your clan
|
||||
};
|
||||
|
||||
instances = {
|
||||
coredns = {
|
||||
|
||||
# Add the default role to all machines
|
||||
roles.default.tags = [ "all" ];
|
||||
|
||||
# DNS server for the .c TLD
|
||||
roles.server.machines.dns-server.settings = {
|
||||
ip = "192.168.1.10"; # IP of your DNS server machine
|
||||
tld = "c";
|
||||
};
|
||||
|
||||
# Machine hosting services (example: ca.c and admin.c)
|
||||
roles.default.machines.web-server.settings = {
|
||||
ip = "192.168.1.20"; # IP of your web server
|
||||
services = [ "ca" "admin" ];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
```
|
||||
|
||||
### Configure the Certificates Service
|
||||
|
||||
The certificates service also has two roles:
|
||||
- `ca`: Sets up the certificate authority on a server
|
||||
- `default`: Makes machines trust the CA and allows them to request certificates
|
||||
|
||||
Add this to your inventory:
|
||||
|
||||
```nix
|
||||
inventory = {
|
||||
instances = {
|
||||
# ... coredns configuration from above ...
|
||||
|
||||
certificates = {
|
||||
|
||||
# Set up CA for .c domain
|
||||
roles.ca.machines.dns-server.settings = {
|
||||
tlds = [ "c" ];
|
||||
acmeEmail = "admin@example.com"; # Optional: your email
|
||||
};
|
||||
|
||||
# Add default role to all machines to trust the CA
|
||||
roles.default.tags = [ "all" ];
|
||||
};
|
||||
};
|
||||
};
|
||||
```
|
||||
|
||||
### Complete Example Configuration
|
||||
|
||||
Here's a complete working example:
|
||||
|
||||
```nix
|
||||
nventory = {
|
||||
machines = {
|
||||
caserver = { }; # DNS server + CA + web services
|
||||
webserver = { }; # Additional web services
|
||||
client = { }; # Client machine
|
||||
};
|
||||
|
||||
instances = {
|
||||
coredns = {
|
||||
|
||||
# Add the default role to all machines
|
||||
roles.default.tags = [ "all" ];
|
||||
|
||||
# DNS server for the .c TLD
|
||||
roles.server.machines.caserver.settings = {
|
||||
ip = "192.168.8.5";
|
||||
tld = "c";
|
||||
};
|
||||
|
||||
# machine hosting https://ca.c (our CA for SSL)
|
||||
roles.default.machines.caserver.settings = {
|
||||
ip = "192.168.8.5";
|
||||
services = [ "ca" ];
|
||||
};
|
||||
|
||||
# machine hosting https://blub.c (some internal web-service)
|
||||
roles.default.machines.webserver.settings = {
|
||||
ip = "192.168.8.6";
|
||||
services = [ "blub" ];
|
||||
};
|
||||
};
|
||||
|
||||
# Provide https for the .c top-level domain
|
||||
certificates = {
|
||||
|
||||
roles.ca.machines.caserver.settings = {
|
||||
tlds = [ "c" ];
|
||||
acmeEmail = "admin@example.com";
|
||||
};
|
||||
|
||||
roles.default.tags = [ "all" ];
|
||||
};
|
||||
};
|
||||
};
|
||||
```
|
||||
|
||||
## Testing Your Configuration
|
||||
|
||||
DNS resolution can be tested with:
|
||||
|
||||
```bash
|
||||
# On any clan machine, test DNS resolution
|
||||
nslookup ca.c
|
||||
nslookup blub.c
|
||||
```
|
||||
|
||||
You should also now be able to visit `https://ca.c` to access the certificate authority or visit `https://blub.c` to access your web service.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### DNS Resolution Issues
|
||||
|
||||
1. **Check if DNS server is running**:
|
||||
```bash
|
||||
# On the DNS server machine
|
||||
systemctl status coredns
|
||||
```
|
||||
|
||||
2. **Verify DNS configuration**:
|
||||
```bash
|
||||
# Check if the right nameservers are configured
|
||||
cat /etc/resolv.conf
|
||||
systemctl status systemd-resolved
|
||||
```
|
||||
|
||||
3. **Test DNS directly**:
|
||||
```bash
|
||||
# Query the DNS server directly
|
||||
dig @192.168.8.5 ca.c
|
||||
```
|
||||
|
||||
### Certificate Issues
|
||||
|
||||
1. **Check CA status**:
|
||||
```bash
|
||||
# On the CA machine
|
||||
systemctl status step-ca
|
||||
systemctl status nginx
|
||||
```
|
||||
|
||||
2. **Verify certificate trust**:
|
||||
```bash
|
||||
# Test certificate trust
|
||||
curl -v https://ca.c
|
||||
openssl s_client -connect ca.c:443 -verify_return_error
|
||||
```
|
||||
|
||||
3. **Check ACME configuration**:
|
||||
```bash
|
||||
# View ACME certificates
|
||||
ls /var/lib/acme/
|
||||
journalctl -u acme-ca.c.service
|
||||
```
|
||||
@@ -5,11 +5,11 @@
|
||||
## Option 1: Follow `clan-core`
|
||||
|
||||
- **Pros**:
|
||||
- Recommended for most users.
|
||||
- Verified by our CI and widely used by others.
|
||||
- Recommended for most users.
|
||||
- Verified by our CI and widely used by others.
|
||||
- **Cons**:
|
||||
- Coupled to version bumps in `clan-core`.
|
||||
- Upstream features and packages may take longer to land.
|
||||
- Coupled to version bumps in `clan-core`.
|
||||
- Upstream features and packages may take longer to land.
|
||||
|
||||
Example:
|
||||
|
||||
@@ -24,10 +24,10 @@ inputs = {
|
||||
## Option 2: Use Your Own `nixpkgs` Version
|
||||
|
||||
- **Pros**:
|
||||
- Faster access to new upstream features and packages.
|
||||
- Faster access to new upstream features and packages.
|
||||
- **Cons**:
|
||||
- Recommended for advanced users.
|
||||
- Not covered by our CI — you’re on the frontier.
|
||||
- Recommended for advanced users.
|
||||
- Not covered by our CI — you’re on the frontier.
|
||||
|
||||
Example:
|
||||
|
||||
|
||||
@@ -303,3 +303,4 @@ instnaces.machine-type = {
|
||||
- [Reference Documentation for Service Authors](../../reference/options/clan_service.md)
|
||||
- [Migration Guide from ClanModules to ClanServices](../../guides/migrations/migrate-inventory-services.md)
|
||||
- [Decision that lead to ClanServices](../../decisions/01-Clan-Modules.md)
|
||||
- [Testing Guide for Services with Vars](../contributing/testing.md#testing-services-with-vars)
|
||||
|
||||
@@ -77,6 +77,8 @@
|
||||
};
|
||||
};
|
||||
};
|
||||
# Allows downstream users to inject "unsupported" nixpkgs versions
|
||||
checks.minNixpkgsVersion.ignore = true;
|
||||
};
|
||||
systems = import systems;
|
||||
imports = [
|
||||
@@ -91,6 +93,7 @@
|
||||
./clanServices/flake-module.nix
|
||||
./devShell.nix
|
||||
./docs/nix/flake-module.nix
|
||||
./site/flake-module.nix
|
||||
./flakeModules/demo_iso.nix
|
||||
./flakeModules/flake-module.nix
|
||||
./lib/filter-clan-core/flake-module.nix
|
||||
|
||||
@@ -52,8 +52,6 @@
|
||||
"checks/secrets/sops/groups/group/machines/machine"
|
||||
"checks/syncthing/introducer/introducer_device_id"
|
||||
"checks/syncthing/introducer/introducer_test_api"
|
||||
"docs/site/static/asciinema-player/asciinema-player.css"
|
||||
"docs/site/static/asciinema-player/asciinema-player.min.js"
|
||||
"nixosModules/clanCore/vars/secret/sops/eval-tests/populated/vars/my_machine/my_generator/my_secret"
|
||||
"pkgs/clan-cli/clan_cli/tests/data/gnupg.conf"
|
||||
"pkgs/clan-cli/clan_cli/tests/data/password-store/.gpg-id"
|
||||
@@ -94,9 +92,6 @@
|
||||
"*.yaml"
|
||||
"*.yml"
|
||||
];
|
||||
excludes = [
|
||||
"*/asciinema-player/*"
|
||||
];
|
||||
};
|
||||
treefmt.programs.mypy.directories = {
|
||||
"clan-cli" = {
|
||||
|
||||
@@ -149,6 +149,13 @@ let
|
||||
# TODO: Add index support in nixpkgs first
|
||||
# else if type.name == "listOf" then
|
||||
# handleListOf meta.list
|
||||
else if type.name == "either" then
|
||||
# For either(oneOf) types, we skip introspection as we cannot
|
||||
# determine which branch of the union was taken without more context
|
||||
# This *should* be safe, as it can currently mostly be triggered through
|
||||
# The `extraModules` setting of inventory modules and seems to be better
|
||||
# than just aborting entirely.
|
||||
{ }
|
||||
else
|
||||
throw "Yet Unsupported type: ${type.name}";
|
||||
in
|
||||
|
||||
@@ -699,4 +699,44 @@ in
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
test_listOf_either =
|
||||
let
|
||||
evaluated = eval [
|
||||
{
|
||||
options.extraModules = lib.mkOption {
|
||||
description = "List of modules that can be strings, paths, or attrsets";
|
||||
default = [ ];
|
||||
type = lib.types.listOf (
|
||||
lib.types.oneOf [
|
||||
lib.types.str
|
||||
lib.types.path
|
||||
(lib.types.attrsOf lib.types.anything)
|
||||
]
|
||||
);
|
||||
};
|
||||
}
|
||||
({
|
||||
_file = "config.nix";
|
||||
extraModules = [
|
||||
"modules/common.nix"
|
||||
./some/path.nix
|
||||
{ config = { }; }
|
||||
];
|
||||
})
|
||||
];
|
||||
result = slib.getPrios { options = evaluated.options; };
|
||||
in
|
||||
{
|
||||
inherit evaluated;
|
||||
# Test that either types in list items return empty objects
|
||||
# This is a behavioral test and not necessarily the correct
|
||||
# behavior. But this is better than crashing on people directly.
|
||||
expr = result.extraModules.__list;
|
||||
expected = [
|
||||
{ }
|
||||
{ }
|
||||
{ }
|
||||
];
|
||||
};
|
||||
}
|
||||
|
||||
@@ -75,13 +75,14 @@ class TestFlake(Flake):
|
||||
def path(self) -> Path:
|
||||
return self.test_dir
|
||||
|
||||
def select_machine(self, machine_name: str, selector: str) -> Any:
|
||||
"""Select a nix attribute for a specific machine.
|
||||
def machine_selector(self, machine_name: str, selector: str) -> str:
|
||||
"""Create a selector for a specific machine.
|
||||
|
||||
Args:
|
||||
machine_name: The name of the machine
|
||||
selector: The attribute selector string relative to the machine config
|
||||
apply: Optional function to apply to the result
|
||||
Returns:
|
||||
The full selector string for the machine
|
||||
|
||||
"""
|
||||
config = nix_config()
|
||||
@@ -89,9 +90,7 @@ class TestFlake(Flake):
|
||||
test_system = system
|
||||
if system.endswith("-darwin"):
|
||||
test_system = system.rstrip("darwin") + "linux"
|
||||
|
||||
full_selector = f'checks."{test_system}".{self.check_attr}.machinesCross.{system}."{machine_name}".{selector}'
|
||||
return self.select(full_selector)
|
||||
return f'checks."{test_system}".{self.check_attr}.machinesCross."{system}"."{machine_name}".{selector}'
|
||||
|
||||
# we don't want to evaluate all machines of the flake. Only the ones defined in the test
|
||||
def set_machine_names(self, machine_names: list[str]) -> None:
|
||||
|
||||
@@ -158,8 +158,10 @@ def encrypt_secret(
|
||||
admin_keys = sops.ensure_admin_public_keys(flake_dir)
|
||||
|
||||
if not admin_keys:
|
||||
# TODO double check the correct command to run
|
||||
msg = "No keys found. Please run 'clan secrets add-key' to add a key."
|
||||
msg = (
|
||||
"No admin keys found.\n\n"
|
||||
"Please run 'clan vars keygen' to generate and set up keys."
|
||||
)
|
||||
raise ClanError(msg)
|
||||
|
||||
username = next(iter(admin_keys)).username
|
||||
|
||||
@@ -355,7 +355,10 @@ def get_public_age_key_from_private_key(privkey: str) -> str:
|
||||
cmd = nix_shell(["age"], ["age-keygen", "-y"])
|
||||
|
||||
error_msg = "Failed to get public key for age private key. Is the key malformed?"
|
||||
res = run(cmd, RunOpts(input=privkey.encode(), error_msg=error_msg))
|
||||
res = run(
|
||||
cmd,
|
||||
RunOpts(input=privkey.encode(), error_msg=error_msg, sensitive_input=True),
|
||||
)
|
||||
return res.stdout.rstrip(os.linesep).rstrip()
|
||||
|
||||
|
||||
|
||||
@@ -1264,68 +1264,117 @@ def test_cache_misses_for_vars_operations(
|
||||
flake: ClanFlake,
|
||||
) -> None:
|
||||
"""Test that vars operations result in minimal cache misses."""
|
||||
# Set up first machine with two generators
|
||||
config = flake.machines["my_machine"] = create_test_machine_config()
|
||||
|
||||
# Set up a simple generator with a public value
|
||||
my_generator = config["clan"]["core"]["vars"]["generators"]["my_generator"]
|
||||
my_generator["files"]["my_value"]["secret"] = False
|
||||
my_generator["script"] = 'echo -n "test_value" > "$out"/my_value'
|
||||
# Set up two generators with public values
|
||||
gen1 = config["clan"]["core"]["vars"]["generators"]["gen1"]
|
||||
gen1["files"]["value1"]["secret"] = False
|
||||
gen1["script"] = 'echo -n "test_value1" > "$out"/value1'
|
||||
|
||||
gen2 = config["clan"]["core"]["vars"]["generators"]["gen2"]
|
||||
gen2["files"]["value2"]["secret"] = False
|
||||
gen2["script"] = 'echo -n "test_value2" > "$out"/value2'
|
||||
|
||||
# Add a second machine with the same generator configuration
|
||||
flake.machines["other_machine"] = config.copy()
|
||||
|
||||
flake.refresh()
|
||||
monkeypatch.chdir(flake.path)
|
||||
|
||||
# Create a fresh machine object to ensure clean cache state
|
||||
machine = Machine(name="my_machine", flake=Flake(str(flake.path)))
|
||||
# Create fresh machine objects to ensure clean cache state
|
||||
flake_obj = Flake(str(flake.path))
|
||||
machine1 = Machine(name="my_machine", flake=flake_obj)
|
||||
machine2 = Machine(name="other_machine", flake=flake_obj)
|
||||
|
||||
# Test 1: Running vars generate with a fresh cache should result in exactly 3 cache misses
|
||||
# Expected cache misses:
|
||||
# 1. One for getting the list of generators
|
||||
# 2. One for getting the final script of our test generator (my_generator)
|
||||
# 3. One for getting the final script of the state version generator (added by default)
|
||||
# TODO: The third cache miss is undesired in tests. disable state version module for tests
|
||||
# Test 1: Running vars generate for BOTH machines simultaneously should still result in exactly 2 cache misses
|
||||
# Even though we have:
|
||||
# - 2 machines (my_machine and other_machine)
|
||||
# - 2 generators per machine (gen1 and gen2)
|
||||
# We still only get 2 cache misses when generating for both machines:
|
||||
# 1. One for getting the list of generators for both machines
|
||||
# 2. One batched evaluation for getting all generator scripts for both machines
|
||||
# The key insight: the system should batch ALL evaluations across ALL machines into a single nix eval
|
||||
|
||||
run_generators(
|
||||
machines=[machine],
|
||||
machines=[machine1, machine2],
|
||||
generators=None, # Generate all
|
||||
)
|
||||
|
||||
# Print stack traces if we have more than 3 cache misses
|
||||
if machine.flake._cache_misses != 3:
|
||||
machine.flake.print_cache_miss_analysis(
|
||||
# Print stack traces if we have more than 2 cache misses
|
||||
if flake_obj._cache_misses != 2:
|
||||
flake_obj.print_cache_miss_analysis(
|
||||
title="Cache miss analysis for vars generate"
|
||||
)
|
||||
|
||||
assert machine.flake._cache_misses == 2, (
|
||||
f"Expected exactly 2 cache misses for vars generate, got {machine.flake._cache_misses}"
|
||||
assert flake_obj._cache_misses == 2, (
|
||||
f"Expected exactly 2 cache misses for vars generate, got {flake_obj._cache_misses}"
|
||||
)
|
||||
|
||||
# Verify the value was generated correctly
|
||||
var_value = get_machine_var(machine, "my_generator/my_value")
|
||||
assert var_value.printable_value == "test_value"
|
||||
|
||||
# Test 2: List all vars should result in exactly 1 cache miss
|
||||
# Force cache invalidation (this also resets cache miss tracking)
|
||||
invalidate_flake_cache(flake.path)
|
||||
machine.flake.invalidate_cache()
|
||||
flake_obj.invalidate_cache()
|
||||
|
||||
stringify_all_vars(machine)
|
||||
assert machine.flake._cache_misses == 1, (
|
||||
f"Expected exactly 1 cache miss for vars list, got {machine.flake._cache_misses}"
|
||||
stringify_all_vars(machine1)
|
||||
assert flake_obj._cache_misses == 1, (
|
||||
f"Expected exactly 1 cache miss for vars list, got {flake_obj._cache_misses}"
|
||||
)
|
||||
|
||||
# Test 3: Getting a specific var with a fresh cache should result in exactly 1 cache miss
|
||||
# Force cache invalidation (this also resets cache miss tracking)
|
||||
invalidate_flake_cache(flake.path)
|
||||
machine.flake.invalidate_cache()
|
||||
flake_obj.invalidate_cache()
|
||||
|
||||
var_value = get_machine_var(machine, "my_generator/my_value")
|
||||
assert var_value.printable_value == "test_value"
|
||||
# Only test gen1 for the get operation
|
||||
var_value = get_machine_var(machine1, "gen1/value1")
|
||||
assert var_value.printable_value == "test_value1"
|
||||
|
||||
assert machine.flake._cache_misses == 1, (
|
||||
f"Expected exactly 1 cache miss for vars get with fresh cache, got {machine.flake._cache_misses}"
|
||||
assert flake_obj._cache_misses == 1, (
|
||||
f"Expected exactly 1 cache miss for vars get with fresh cache, got {flake_obj._cache_misses}"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.with_core
|
||||
def test_shared_generator_conflicting_definition_raises_error(
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
flake_with_sops: ClanFlake,
|
||||
) -> None:
|
||||
"""Test that vars generation raises an error when two machines have different
|
||||
definitions for the same shared generator.
|
||||
"""
|
||||
flake = flake_with_sops
|
||||
|
||||
# Create machine1 with a shared generator
|
||||
machine1_config = flake.machines["machine1"] = create_test_machine_config()
|
||||
shared_gen1 = machine1_config["clan"]["core"]["vars"]["generators"][
|
||||
"shared_generator"
|
||||
]
|
||||
shared_gen1["share"] = True
|
||||
shared_gen1["files"]["file1"]["secret"] = False
|
||||
shared_gen1["script"] = 'echo "test" > "$out"/file1'
|
||||
|
||||
# Create machine2 with the same shared generator but different files
|
||||
machine2_config = flake.machines["machine2"] = create_test_machine_config()
|
||||
shared_gen2 = machine2_config["clan"]["core"]["vars"]["generators"][
|
||||
"shared_generator"
|
||||
]
|
||||
shared_gen2["share"] = True
|
||||
shared_gen2["files"]["file2"]["secret"] = False # Different file name
|
||||
shared_gen2["script"] = 'echo "test" > "$out"/file2'
|
||||
|
||||
flake.refresh()
|
||||
monkeypatch.chdir(flake.path)
|
||||
|
||||
# Attempting to generate vars for both machines should raise an error
|
||||
# because they have conflicting definitions for the same shared generator
|
||||
with pytest.raises(
|
||||
ClanError,
|
||||
match=".*differ.*",
|
||||
):
|
||||
cli.run(["vars", "generate", "--flake", str(flake.path)])
|
||||
|
||||
|
||||
@pytest.mark.with_core
|
||||
def test_dynamic_invalidation(
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
|
||||
@@ -144,6 +144,9 @@ class Generator:
|
||||
flake.precache(cls.get_machine_selectors(machine_names))
|
||||
|
||||
generators = []
|
||||
shared_generators_raw: dict[
|
||||
str, tuple[str, dict, dict]
|
||||
] = {} # name -> (machine_name, gen_data, files_data)
|
||||
|
||||
for machine_name in machine_names:
|
||||
# Get all generator metadata in one select (safe fields only)
|
||||
@@ -165,6 +168,38 @@ class Generator:
|
||||
sec_store = machine.secret_vars_store
|
||||
|
||||
for gen_name, gen_data in generators_data.items():
|
||||
# Check for conflicts in shared generator definitions using raw data
|
||||
if gen_data["share"]:
|
||||
if gen_name in shared_generators_raw:
|
||||
prev_machine, prev_gen_data, prev_files_data = (
|
||||
shared_generators_raw[gen_name]
|
||||
)
|
||||
# Compare raw data
|
||||
prev_gen_files = prev_files_data.get(gen_name, {})
|
||||
curr_gen_files = files_data.get(gen_name, {})
|
||||
# Build list of differences with details
|
||||
differences = []
|
||||
if prev_gen_files != curr_gen_files:
|
||||
differences.append("files")
|
||||
if prev_gen_data.get("prompts") != gen_data.get("prompts"):
|
||||
differences.append("prompts")
|
||||
if prev_gen_data.get("dependencies") != gen_data.get(
|
||||
"dependencies"
|
||||
):
|
||||
differences.append("dependencies")
|
||||
if prev_gen_data.get("validationHash") != gen_data.get(
|
||||
"validationHash"
|
||||
):
|
||||
differences.append("validation_hash")
|
||||
if differences:
|
||||
msg = f"Machines {prev_machine} and {machine_name} have different definitions for shared generator '{gen_name}' (differ in: {', '.join(differences)})"
|
||||
raise ClanError(msg)
|
||||
else:
|
||||
shared_generators_raw[gen_name] = (
|
||||
machine_name,
|
||||
gen_data,
|
||||
files_data,
|
||||
)
|
||||
# Build files from the files_data
|
||||
files = []
|
||||
gen_files = files_data.get(gen_name, {})
|
||||
@@ -216,6 +251,7 @@ class Generator:
|
||||
_public_store=pub_store,
|
||||
_secret_store=sec_store,
|
||||
)
|
||||
|
||||
generators.append(generator)
|
||||
|
||||
# TODO: This should be done in a non-mutable way.
|
||||
@@ -245,15 +281,19 @@ class Generator:
|
||||
return sec_store.get(self, prompt.name).decode()
|
||||
return None
|
||||
|
||||
def final_script_selector(self, machine_name: str) -> str:
|
||||
if self._flake is None:
|
||||
msg = "Flake cannot be None"
|
||||
raise ClanError(msg)
|
||||
return self._flake.machine_selector(
|
||||
machine_name, f'config.clan.core.vars.generators."{self.name}".finalScript'
|
||||
)
|
||||
|
||||
def final_script(self, machine: "Machine") -> Path:
|
||||
if self._flake is None:
|
||||
msg = "Flake cannot be None"
|
||||
raise ClanError(msg)
|
||||
output = Path(
|
||||
machine.select(
|
||||
f'config.clan.core.vars.generators."{self.name}".finalScript',
|
||||
),
|
||||
)
|
||||
output = Path(self._flake.select(self.final_script_selector(machine.name)))
|
||||
if tmp_store := nix_test_store():
|
||||
output = tmp_store.joinpath(*output.parts[1:])
|
||||
return output
|
||||
|
||||
@@ -294,6 +294,8 @@ class RunOpts:
|
||||
# This is needed for GUI applications
|
||||
graphical_perm: bool = False
|
||||
trace: bool = True
|
||||
# Mark input as sensitive to prevent it from being logged (e.g., private keys, passwords)
|
||||
sensitive_input: bool = False
|
||||
|
||||
|
||||
def cmd_with_root(cmd: list[str], graphical: bool = False) -> list[str]:
|
||||
@@ -349,7 +351,10 @@ def run(
|
||||
|
||||
if cmdlog.isEnabledFor(logging.DEBUG) and options.trace:
|
||||
if options.input and isinstance(options.input, bytes):
|
||||
if any(
|
||||
# Always redact sensitive input (e.g., private keys, passwords)
|
||||
if options.sensitive_input:
|
||||
filtered_input = "<<REDACTED>>"
|
||||
elif any(
|
||||
not ch.isprintable() for ch in options.input.decode("ascii", "replace")
|
||||
):
|
||||
filtered_input = "<<binary_blob>>"
|
||||
|
||||
@@ -1132,6 +1132,20 @@ class Flake:
|
||||
|
||||
return self._cache.select(selector)
|
||||
|
||||
def machine_selector(self, machine_name: str, selector: str) -> str:
|
||||
"""Create a selector for a specific machine.
|
||||
|
||||
Args:
|
||||
machine_name: The name of the machine
|
||||
selector: The attribute selector string relative to the machine config
|
||||
Returns:
|
||||
The full selector string for the machine
|
||||
|
||||
"""
|
||||
config = nix_config()
|
||||
system = config["system"]
|
||||
return f'clanInternals.machines."{system}"."{machine_name}".{selector}'
|
||||
|
||||
def select_machine(self, machine_name: str, selector: str) -> Any:
|
||||
"""Select a nix attribute for a specific machine.
|
||||
|
||||
@@ -1141,11 +1155,7 @@ class Flake:
|
||||
apply: Optional function to apply to the result
|
||||
|
||||
"""
|
||||
config = nix_config()
|
||||
system = config["system"]
|
||||
|
||||
full_selector = f'clanInternals.machines."{system}"."{machine_name}".{selector}'
|
||||
return self.select(full_selector)
|
||||
return self.select(self.machine_selector(machine_name, selector))
|
||||
|
||||
def list_machines(
|
||||
self,
|
||||
|
||||
@@ -136,92 +136,123 @@ def networks_from_flake(flake: Flake) -> dict[str, Network]:
|
||||
return networks
|
||||
|
||||
|
||||
@contextmanager
|
||||
def get_best_remote(machine: "Machine") -> Iterator["Remote"]:
|
||||
"""Context manager that yields the best remote connection for a machine following this priority:
|
||||
1. If machine has targetHost in inventory, return a direct connection
|
||||
2. Return the highest priority network where machine is reachable
|
||||
3. If no network works, try to get targetHost from machine nixos config
|
||||
class BestRemoteContext:
|
||||
"""Class-based context manager for establishing and maintaining network connections."""
|
||||
|
||||
Args:
|
||||
machine: Machine instance to connect to
|
||||
def __init__(self, machine: "Machine") -> None:
|
||||
self.machine = machine
|
||||
self._network_ctx: Any = None
|
||||
self._remote: Remote | None = None
|
||||
|
||||
Yields:
|
||||
Remote object for connecting to the machine
|
||||
def __enter__(self) -> "Remote":
|
||||
"""Establish the best remote connection for a machine following this priority:
|
||||
1. If machine has targetHost in inventory, return a direct connection
|
||||
2. Return the highest priority network where machine is reachable
|
||||
3. If no network works, try to get targetHost from machine nixos config
|
||||
|
||||
Raises:
|
||||
ClanError: If no connection method works
|
||||
Returns:
|
||||
Remote object for connecting to the machine
|
||||
|
||||
"""
|
||||
# Step 1: Check if targetHost is set in inventory
|
||||
inv_machine = machine.get_inv_machine()
|
||||
target_host = inv_machine.get("deploy", {}).get("targetHost")
|
||||
Raises:
|
||||
ClanError: If no connection method works
|
||||
|
||||
if target_host:
|
||||
log.debug(f"Using targetHost from inventory for {machine.name}: {target_host}")
|
||||
# Create a direct network with just this machine
|
||||
remote = Remote.from_ssh_uri(machine_name=machine.name, address=target_host)
|
||||
yield remote
|
||||
return
|
||||
"""
|
||||
# Step 1: Check if targetHost is set in inventory
|
||||
inv_machine = self.machine.get_inv_machine()
|
||||
target_host = inv_machine.get("deploy", {}).get("targetHost")
|
||||
|
||||
# Step 2: Try existing networks by priority
|
||||
try:
|
||||
networks = networks_from_flake(machine.flake)
|
||||
if target_host:
|
||||
log.debug(
|
||||
f"Using targetHost from inventory for {self.machine.name}: {target_host}"
|
||||
)
|
||||
self._remote = Remote.from_ssh_uri(
|
||||
machine_name=self.machine.name, address=target_host
|
||||
)
|
||||
return self._remote
|
||||
|
||||
sorted_networks = sorted(networks.items(), key=lambda x: -x[1].priority)
|
||||
# Step 2: Try existing networks by priority
|
||||
try:
|
||||
networks = networks_from_flake(self.machine.flake)
|
||||
sorted_networks = sorted(networks.items(), key=lambda x: -x[1].priority)
|
||||
|
||||
for network_name, network in sorted_networks:
|
||||
if machine.name not in network.peers:
|
||||
continue
|
||||
for network_name, network in sorted_networks:
|
||||
if self.machine.name not in network.peers:
|
||||
continue
|
||||
|
||||
# Check if network is running and machine is reachable
|
||||
log.debug(f"trying to connect via {network_name}")
|
||||
if network.is_running():
|
||||
try:
|
||||
ping_time = network.ping(machine.name)
|
||||
if ping_time is not None:
|
||||
log.info(
|
||||
f"Machine {machine.name} reachable via {network_name} network",
|
||||
)
|
||||
yield network.remote(machine.name)
|
||||
return
|
||||
except ClanError as e:
|
||||
log.debug(f"Failed to reach {machine.name} via {network_name}: {e}")
|
||||
else:
|
||||
try:
|
||||
log.debug(f"Establishing connection for network {network_name}")
|
||||
with network.module.connection(network) as connected_network:
|
||||
ping_time = connected_network.ping(machine.name)
|
||||
log.debug(f"trying to connect via {network_name}")
|
||||
if network.is_running():
|
||||
try:
|
||||
ping_time = network.ping(self.machine.name)
|
||||
if ping_time is not None:
|
||||
log.info(
|
||||
f"Machine {machine.name} reachable via {network_name} network after connection",
|
||||
f"Machine {self.machine.name} reachable via {network_name} network",
|
||||
)
|
||||
yield connected_network.remote(machine.name)
|
||||
return
|
||||
except ClanError as e:
|
||||
log.debug(
|
||||
f"Failed to establish connection to {machine.name} via {network_name}: {e}",
|
||||
)
|
||||
except (ImportError, AttributeError, KeyError) as e:
|
||||
log.debug(f"Failed to use networking modules to determine machines remote: {e}")
|
||||
self._remote = remote = network.remote(self.machine.name)
|
||||
return remote
|
||||
except ClanError as e:
|
||||
log.debug(
|
||||
f"Failed to reach {self.machine.name} via {network_name}: {e}"
|
||||
)
|
||||
else:
|
||||
try:
|
||||
log.debug(f"Establishing connection for network {network_name}")
|
||||
# Enter the network context and keep it alive
|
||||
self._network_ctx = network.module.connection(network)
|
||||
connected_network = self._network_ctx.__enter__()
|
||||
ping_time = connected_network.ping(self.machine.name)
|
||||
if ping_time is not None:
|
||||
log.info(
|
||||
f"Machine {self.machine.name} reachable via {network_name} network after connection",
|
||||
)
|
||||
self._remote = remote = connected_network.remote(
|
||||
self.machine.name
|
||||
)
|
||||
return remote
|
||||
# Ping failed, clean up this connection attempt
|
||||
self._network_ctx.__exit__(None, None, None)
|
||||
self._network_ctx = None
|
||||
except ClanError as e:
|
||||
# Clean up failed connection attempt
|
||||
if self._network_ctx is not None:
|
||||
self._network_ctx.__exit__(None, None, None)
|
||||
self._network_ctx = None
|
||||
log.debug(
|
||||
f"Failed to establish connection to {self.machine.name} via {network_name}: {e}",
|
||||
)
|
||||
except (ImportError, AttributeError, KeyError) as e:
|
||||
log.debug(
|
||||
f"Failed to use networking modules to determine machines remote: {e}"
|
||||
)
|
||||
|
||||
# Step 3: Try targetHost from machine nixos config
|
||||
target_host = machine.select('config.clan.core.networking."targetHost"')
|
||||
if target_host:
|
||||
log.debug(
|
||||
f"Using targetHost from machine config for {machine.name}: {target_host}",
|
||||
)
|
||||
# Check if reachable
|
||||
remote = Remote.from_ssh_uri(
|
||||
machine_name=machine.name,
|
||||
address=target_host,
|
||||
)
|
||||
yield remote
|
||||
return
|
||||
# Step 3: Try targetHost from machine nixos config
|
||||
target_host = self.machine.select('config.clan.core.networking."targetHost"')
|
||||
if target_host:
|
||||
log.debug(
|
||||
f"Using targetHost from machine config for {self.machine.name}: {target_host}",
|
||||
)
|
||||
self._remote = Remote.from_ssh_uri(
|
||||
machine_name=self.machine.name,
|
||||
address=target_host,
|
||||
)
|
||||
return self._remote
|
||||
|
||||
# No connection method found
|
||||
msg = f"Could not find any way to connect to machine '{machine.name}'. No targetHost configured and machine not reachable via any network."
|
||||
raise ClanError(msg)
|
||||
# No connection method found
|
||||
msg = f"Could not find any way to connect to machine '{self.machine.name}'. No targetHost configured and machine not reachable via any network."
|
||||
raise ClanError(msg)
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: object,
|
||||
) -> None:
|
||||
"""Clean up network connection if one was established."""
|
||||
if self._network_ctx is not None:
|
||||
self._network_ctx.__exit__(exc_type, exc_val, exc_tb)
|
||||
|
||||
|
||||
def get_best_remote(machine: "Machine") -> BestRemoteContext:
|
||||
return BestRemoteContext(machine)
|
||||
|
||||
|
||||
def get_network_overview(networks: dict[str, Network]) -> dict:
|
||||
|
||||
@@ -177,13 +177,25 @@ def run_generators(
|
||||
for machine in machines:
|
||||
_ensure_healthy(machine=machine)
|
||||
|
||||
# get the flake via any machine (they are all the same)
|
||||
flake = machines[0].flake
|
||||
|
||||
def get_generator_machine(generator: Generator) -> Machine:
|
||||
if generator.machine is None:
|
||||
# return first machine if generator is not tied to a specific one
|
||||
return machines[0]
|
||||
return Machine(name=generator.machine, flake=flake)
|
||||
|
||||
# preheat the select cache, to reduce repeated calls during execution
|
||||
selectors = []
|
||||
for generator in generator_objects:
|
||||
machine = get_generator_machine(generator)
|
||||
selectors.append(generator.final_script_selector(machine.name))
|
||||
flake.precache(selectors)
|
||||
|
||||
# execute generators
|
||||
for generator in generator_objects:
|
||||
machine = (
|
||||
machines[0]
|
||||
if generator.machine is None
|
||||
else Machine(name=generator.machine, flake=machines[0].flake)
|
||||
)
|
||||
machine = get_generator_machine(generator)
|
||||
if check_can_migrate(machine, generator):
|
||||
migrate_files(machine, generator)
|
||||
else:
|
||||
|
||||
8
pkgs/docs-site/.envrc
Normal file
8
pkgs/docs-site/.envrc
Normal file
@@ -0,0 +1,8 @@
|
||||
# shellcheck shell=bash
|
||||
source_up
|
||||
|
||||
mapfile -d '' -t nix_files < <(find ./nix -name "*.nix" -print0)
|
||||
watch_file "${nix_files[@]}"
|
||||
|
||||
# Because we depend on nixpkgs sources, uploading to builders takes a long time
|
||||
use flake .#docs-site --builders ''
|
||||
29
pkgs/docs-site/.gitignore
vendored
Normal file
29
pkgs/docs-site/.gitignore
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
node_modules
|
||||
|
||||
# Output
|
||||
.output
|
||||
.vercel
|
||||
.netlify
|
||||
.wrangler
|
||||
/.svelte-kit
|
||||
/build
|
||||
/static/pagefind
|
||||
|
||||
# Env
|
||||
.env
|
||||
.env.*
|
||||
!.env.example
|
||||
!.env.test
|
||||
|
||||
# Vite
|
||||
vite.config.js.timestamp-*
|
||||
vite.config.ts.timestamp-*
|
||||
|
||||
|
||||
# Generated docs
|
||||
src/routes/docs/reference/options
|
||||
src/routes/docs/reference/clan.core
|
||||
src/routes/docs/services/official
|
||||
|
||||
# Icons and other assets
|
||||
static/icons
|
||||
1
pkgs/docs-site/.npmrc
Normal file
1
pkgs/docs-site/.npmrc
Normal file
@@ -0,0 +1 @@
|
||||
engine-strict=true
|
||||
6
pkgs/docs-site/.postcssrc.json
Normal file
6
pkgs/docs-site/.postcssrc.json
Normal file
@@ -0,0 +1,6 @@
|
||||
{
|
||||
"plugins": {
|
||||
"postcss-preset-env": {},
|
||||
"cssnano": { "preset": "default" }
|
||||
}
|
||||
}
|
||||
6
pkgs/docs-site/.prettierignore
Normal file
6
pkgs/docs-site/.prettierignore
Normal file
@@ -0,0 +1,6 @@
|
||||
# Package Managers
|
||||
package-lock.json
|
||||
pnpm-lock.yaml
|
||||
yarn.lock
|
||||
bun.lock
|
||||
bun.lockb
|
||||
11
pkgs/docs-site/.prettierrc
Normal file
11
pkgs/docs-site/.prettierrc
Normal file
@@ -0,0 +1,11 @@
|
||||
{
|
||||
"plugins": ["prettier-plugin-svelte"],
|
||||
"overrides": [
|
||||
{
|
||||
"files": "*.svelte",
|
||||
"options": {
|
||||
"parser": "svelte"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
38
pkgs/docs-site/README.md
Normal file
38
pkgs/docs-site/README.md
Normal file
@@ -0,0 +1,38 @@
|
||||
# sv
|
||||
|
||||
Everything you need to build a Svelte project, powered by [`sv`](https://github.com/sveltejs/cli).
|
||||
|
||||
## Creating a project
|
||||
|
||||
If you're seeing this, you've probably already done this step. Congrats!
|
||||
|
||||
```sh
|
||||
# create a new project in the current directory
|
||||
npx sv create
|
||||
|
||||
# create a new project in my-app
|
||||
npx sv create my-app
|
||||
```
|
||||
|
||||
## Developing
|
||||
|
||||
Once you've created a project and installed dependencies with `npm install` (or `pnpm install` or `yarn`), start a development server:
|
||||
|
||||
```sh
|
||||
npm run dev
|
||||
|
||||
# or start the server and open the app in a new browser tab
|
||||
npm run dev -- --open
|
||||
```
|
||||
|
||||
## Building
|
||||
|
||||
To create a production version of your app:
|
||||
|
||||
```sh
|
||||
npm run build
|
||||
```
|
||||
|
||||
You can preview the production build with `npm run preview`.
|
||||
|
||||
> To deploy your app, you may need to install an [adapter](https://svelte.dev/docs/kit/adapters) for your target environment.
|
||||
33
pkgs/docs-site/default.nix
Normal file
33
pkgs/docs-site/default.nix
Normal file
@@ -0,0 +1,33 @@
|
||||
{
|
||||
buildNpmPackage,
|
||||
importNpmLock,
|
||||
nodejs_latest,
|
||||
module-docs,
|
||||
}:
|
||||
buildNpmPackage {
|
||||
pname = "clan-site";
|
||||
version = "0.0.1";
|
||||
nodejs = nodejs_latest;
|
||||
src = ./.;
|
||||
|
||||
npmDeps = importNpmLock {
|
||||
npmRoot = ./.;
|
||||
};
|
||||
|
||||
npmConfigHook = importNpmLock.npmConfigHook;
|
||||
|
||||
preBuild = ''
|
||||
# Copy generated reference docs
|
||||
mkdir -p src/routes/docs/reference
|
||||
cp -r ${module-docs}/reference/* src/routes/docs/reference
|
||||
|
||||
mkdir -p src/routes/docs/services
|
||||
cp -r ${module-docs}/services/* src/routes/docs/services
|
||||
|
||||
chmod +w -R src/routes/docs/reference
|
||||
|
||||
mkdir -p static/icons
|
||||
cp -af ${../clan-app/ui/icons}/* ./static/icons
|
||||
chmod +w -R static/icons
|
||||
'';
|
||||
}
|
||||
12
pkgs/docs-site/flake-module.nix
Normal file
12
pkgs/docs-site/flake-module.nix
Normal file
@@ -0,0 +1,12 @@
|
||||
{
|
||||
perSystem =
|
||||
{ pkgs, self', ... }:
|
||||
{
|
||||
packages.docs-site = pkgs.callPackage ./default.nix { inherit (self'.packages) module-docs; };
|
||||
|
||||
devShells.docs-site = pkgs.mkShell {
|
||||
shellHook = self'.packages.docs-site.preBuild;
|
||||
inputsFrom = [ self'.packages.docs-site ];
|
||||
};
|
||||
};
|
||||
}
|
||||
10128
pkgs/docs-site/package-lock.json
generated
Normal file
10128
pkgs/docs-site/package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load Diff
56
pkgs/docs-site/package.json
Normal file
56
pkgs/docs-site/package.json
Normal file
@@ -0,0 +1,56 @@
|
||||
{
|
||||
"name": "clan-site",
|
||||
"private": true,
|
||||
"version": "0.0.1",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"dev": "vite dev",
|
||||
"build": "vite build && pagefind --site build",
|
||||
"preview": "vite preview",
|
||||
"prepare": "svelte-kit sync || echo ''",
|
||||
"check": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json",
|
||||
"check:watch": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json --watch",
|
||||
"format": "prettier --write .",
|
||||
"lint": "prettier --check ."
|
||||
},
|
||||
"devDependencies": {
|
||||
"@fontsource-variable/geist": "^5.2.8",
|
||||
"@shikijs/rehype": "^3.13.0",
|
||||
"@shikijs/transformers": "^3.13.0",
|
||||
"@sveltejs/adapter-static": "^3.0.10",
|
||||
"@sveltejs/kit": "^2.43.2",
|
||||
"@sveltejs/vite-plugin-svelte": "^6.2.0",
|
||||
"@types/node": "^24.7.0",
|
||||
"cssnano": "^7.1.1",
|
||||
"github-slugger": "^2.0.0",
|
||||
"hast": "^0.0.2",
|
||||
"hast-util-heading-rank": "^3.0.0",
|
||||
"hast-util-to-string": "^3.0.1",
|
||||
"hastscript": "^9.0.1",
|
||||
"mdast": "^2.3.2",
|
||||
"mdast-util-from-markdown": "^2.0.2",
|
||||
"mdast-util-to-hast": "^13.2.0",
|
||||
"mdast-util-toc": "^7.1.0",
|
||||
"pagefind": "^1.4.0",
|
||||
"postcss-preset-env": "^10.4.0",
|
||||
"prettier": "^3.6.2",
|
||||
"prettier-plugin-svelte": "^3.4.0",
|
||||
"rehype-autolink-headings": "^7.1.0",
|
||||
"rehype-stringify": "^10.0.1",
|
||||
"remark": "^15.0.1",
|
||||
"remark-directive": "^4.0.0",
|
||||
"remark-gfm": "^4.0.1",
|
||||
"remark-rehype": "^11.1.2",
|
||||
"remark-stringify": "^11.0.0",
|
||||
"remark-toc": "^9.0.0",
|
||||
"svelte": "^5.39.5",
|
||||
"svelte-check": "^4.3.2",
|
||||
"typescript": "^5.9.2",
|
||||
"unified": "^11.0.5",
|
||||
"unist-util-visit": "^5.0.0",
|
||||
"vfile": "^6.0.3",
|
||||
"vfile-matter": "^5.0.1",
|
||||
"vite": "^7.1.7",
|
||||
"vite-plugin-pagefind": "^1.0.7"
|
||||
}
|
||||
}
|
||||
13
pkgs/docs-site/src/app.d.ts
vendored
Normal file
13
pkgs/docs-site/src/app.d.ts
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
// See https://svelte.dev/docs/kit/types#app.d.ts
|
||||
// for information about these interfaces
|
||||
declare global {
|
||||
namespace App {
|
||||
// interface Error {}
|
||||
// interface Locals {}
|
||||
// interface PageData {}
|
||||
// interface PageState {}
|
||||
// interface Platform {}
|
||||
}
|
||||
}
|
||||
|
||||
export {};
|
||||
14
pkgs/docs-site/src/app.html
Normal file
14
pkgs/docs-site/src/app.html
Normal file
@@ -0,0 +1,14 @@
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
||||
<script>
|
||||
document.documentElement.classList.add("js");
|
||||
</script>
|
||||
%sveltekit.head%
|
||||
</head>
|
||||
<body data-sveltekit-preload-data="hover">
|
||||
<div style="display: contents">%sveltekit.body%</div>
|
||||
</body>
|
||||
</html>
|
||||
27
pkgs/docs-site/src/config.ts
Normal file
27
pkgs/docs-site/src/config.ts
Normal file
@@ -0,0 +1,27 @@
|
||||
import type { RawNavItem } from "$lib";
|
||||
|
||||
export default {
|
||||
navItems: [
|
||||
{
|
||||
label: "Getting Started",
|
||||
items: ["/getting-started/add-machines"],
|
||||
},
|
||||
{
|
||||
label: "Reference",
|
||||
items: [
|
||||
{
|
||||
label: "Overview",
|
||||
slug: "/reference/overview",
|
||||
},
|
||||
{
|
||||
label: "Options",
|
||||
autogenerate: { directory: "/reference/options" },
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
label: "Test",
|
||||
link: "/test/overview",
|
||||
},
|
||||
] as RawNavItem[],
|
||||
};
|
||||
1
pkgs/docs-site/src/lib/assets/favicon.svg
Normal file
1
pkgs/docs-site/src/lib/assets/favicon.svg
Normal file
@@ -0,0 +1 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="107" height="128" viewBox="0 0 107 128"><title>svelte-logo</title><path d="M94.157 22.819c-10.4-14.885-30.94-19.297-45.792-9.835L22.282 29.608A29.92 29.92 0 0 0 8.764 49.65a31.5 31.5 0 0 0 3.108 20.231 30 30 0 0 0-4.477 11.183 31.9 31.9 0 0 0 5.448 24.116c10.402 14.887 30.942 19.297 45.791 9.835l26.083-16.624A29.92 29.92 0 0 0 98.235 78.35a31.53 31.53 0 0 0-3.105-20.232 30 30 0 0 0 4.474-11.182 31.88 31.88 0 0 0-5.447-24.116" style="fill:#ff3e00"/><path d="M45.817 106.582a20.72 20.72 0 0 1-22.237-8.243 19.17 19.17 0 0 1-3.277-14.503 18 18 0 0 1 .624-2.435l.49-1.498 1.337.981a33.6 33.6 0 0 0 10.203 5.098l.97.294-.09.968a5.85 5.85 0 0 0 1.052 3.878 6.24 6.24 0 0 0 6.695 2.485 5.8 5.8 0 0 0 1.603-.704L69.27 76.28a5.43 5.43 0 0 0 2.45-3.631 5.8 5.8 0 0 0-.987-4.371 6.24 6.24 0 0 0-6.698-2.487 5.7 5.7 0 0 0-1.6.704l-9.953 6.345a19 19 0 0 1-5.296 2.326 20.72 20.72 0 0 1-22.237-8.243 19.17 19.17 0 0 1-3.277-14.502 17.99 17.99 0 0 1 8.13-12.052l26.081-16.623a19 19 0 0 1 5.3-2.329 20.72 20.72 0 0 1 22.237 8.243 19.17 19.17 0 0 1 3.277 14.503 18 18 0 0 1-.624 2.435l-.49 1.498-1.337-.98a33.6 33.6 0 0 0-10.203-5.1l-.97-.294.09-.968a5.86 5.86 0 0 0-1.052-3.878 6.24 6.24 0 0 0-6.696-2.485 5.8 5.8 0 0 0-1.602.704L37.73 51.72a5.42 5.42 0 0 0-2.449 3.63 5.79 5.79 0 0 0 .986 4.372 6.24 6.24 0 0 0 6.698 2.486 5.8 5.8 0 0 0 1.602-.704l9.952-6.342a19 19 0 0 1 5.295-2.328 20.72 20.72 0 0 1 22.237 8.242 19.17 19.17 0 0 1 3.277 14.503 18 18 0 0 1-8.13 12.053l-26.081 16.622a19 19 0 0 1-5.3 2.328" style="fill:#fff"/></svg>
|
||||
|
After Width: | Height: | Size: 1.5 KiB |
296
pkgs/docs-site/src/lib/docs.ts
Normal file
296
pkgs/docs-site/src/lib/docs.ts
Normal file
@@ -0,0 +1,296 @@
|
||||
import config from "~/config";
|
||||
import type {
|
||||
Markdown,
|
||||
Frontmatter as MarkdownFrontmatter,
|
||||
Heading as MarkdownHeading,
|
||||
} from "./markdown";
|
||||
export type Article = Markdown & {
|
||||
path: string;
|
||||
frontmatter: Frontmatter;
|
||||
toc: Heading[];
|
||||
};
|
||||
export type Frontmatter = MarkdownFrontmatter & {
|
||||
previous?: ArticleSibling;
|
||||
next?: ArticleSibling;
|
||||
};
|
||||
export type ArticleSibling = {
|
||||
label: string;
|
||||
link: string;
|
||||
};
|
||||
export type Heading = MarkdownHeading;
|
||||
export class Docs {
|
||||
#allArticles: Record<string, () => Promise<Markdown>> = {};
|
||||
#loadedArticles: Record<string, Article> = {};
|
||||
navItems: NavItem[] = [];
|
||||
async init() {
|
||||
this.#allArticles = Object.fromEntries(
|
||||
Object.entries(import.meta.glob<Markdown>("../routes/docs/**/*.md")).map(
|
||||
([key, fn]) => [key.slice("../routes/docs".length, -".md".length), fn],
|
||||
),
|
||||
);
|
||||
this.navItems = await Promise.all(
|
||||
config.navItems.map((navItem) => this.#normalizeNavItem(navItem)),
|
||||
);
|
||||
return this;
|
||||
}
|
||||
|
||||
async getArticle(path: string): Promise<Article | null> {
|
||||
const article = this.#loadedArticles[path];
|
||||
if (article) {
|
||||
return article;
|
||||
}
|
||||
const loadArticle = this.#allArticles[path];
|
||||
if (!loadArticle) {
|
||||
return null;
|
||||
}
|
||||
return this.#normalizeArticle(await loadArticle(), path);
|
||||
}
|
||||
|
||||
async getArticles(paths: string[]): Promise<(Article | null)[]> {
|
||||
return await Promise.all(paths.map((path) => this.getArticle(path)));
|
||||
}
|
||||
|
||||
async #normalizeNavItem(navItem: RawNavItem): Promise<NavItem> {
|
||||
if (typeof navItem === "string") {
|
||||
const article = await this.getArticle(navItem);
|
||||
if (!article) {
|
||||
throw new Error(`Doc not found: ${navItem}`);
|
||||
}
|
||||
return {
|
||||
label: article.frontmatter.title,
|
||||
link: navItem,
|
||||
external: false,
|
||||
};
|
||||
}
|
||||
|
||||
if ("items" in navItem) {
|
||||
return {
|
||||
...navItem,
|
||||
collapsed: !!navItem.collapsed,
|
||||
badge: normalizeBadge(navItem.badge),
|
||||
items: await Promise.all(
|
||||
navItem.items.map((navItem) => this.#normalizeNavItem(navItem)),
|
||||
),
|
||||
};
|
||||
}
|
||||
|
||||
if ("slug" in navItem) {
|
||||
const article = await this.getArticle(navItem.slug);
|
||||
if (!article) {
|
||||
throw new Error(`Doc not found: ${navItem.slug}`);
|
||||
}
|
||||
return {
|
||||
label: navItem.label ?? article.frontmatter.title,
|
||||
link: navItem.slug,
|
||||
badge: normalizeBadge(navItem.badge),
|
||||
external: false,
|
||||
};
|
||||
}
|
||||
|
||||
if ("autogenerate" in navItem) {
|
||||
const paths = Object.keys(this.#allArticles).filter((path) =>
|
||||
path.startsWith(navItem.autogenerate.directory + "/"),
|
||||
);
|
||||
const articles = (await this.getArticles(paths)) as Article[];
|
||||
|
||||
let titleMissing = false;
|
||||
// Check frontmatter for title
|
||||
for (const article of articles) {
|
||||
if (!article.frontmatter.title) {
|
||||
console.error(`Missing # title in doc: ${article.path}`);
|
||||
titleMissing = true;
|
||||
}
|
||||
}
|
||||
if (titleMissing) throw new Error("Aborting due to errors.");
|
||||
|
||||
articles.sort((a, b) => {
|
||||
const orderA = a.frontmatter.order;
|
||||
const orderB = b.frontmatter.order;
|
||||
if (orderA != null && orderB != null) {
|
||||
return orderA - orderB;
|
||||
}
|
||||
if (orderA != null) {
|
||||
return -1;
|
||||
}
|
||||
if (orderB != null) {
|
||||
return 1;
|
||||
}
|
||||
const titleA = a.frontmatter.title ?? a.path;
|
||||
const titleB = a.frontmatter.title ?? a.path;
|
||||
return titleA.localeCompare(titleB);
|
||||
});
|
||||
const items = await Promise.all(
|
||||
articles.map((article) =>
|
||||
this.#normalizeNavItem({
|
||||
label: article.frontmatter.title,
|
||||
link: article.path,
|
||||
}),
|
||||
),
|
||||
);
|
||||
return {
|
||||
label:
|
||||
navItem.label ?? navItem.autogenerate.directory.split("/").at(-1),
|
||||
items,
|
||||
collapsed: !!navItem.collapsed,
|
||||
badge: normalizeBadge(navItem.badge),
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
...navItem,
|
||||
badge: normalizeBadge(navItem.badge),
|
||||
external: /^(https?:)?\/\//.test(navItem.link),
|
||||
};
|
||||
}
|
||||
|
||||
#normalizeArticle(md: Markdown, path: string): Article {
|
||||
let index = -1;
|
||||
const navLinks: NavLink[] = [];
|
||||
let previous: ArticleSibling | undefined;
|
||||
let next: ArticleSibling | undefined;
|
||||
visitNavItems(this.navItems, (navItem) => {
|
||||
if (!("link" in navItem)) {
|
||||
return;
|
||||
}
|
||||
if (index != -1) {
|
||||
next = {
|
||||
label: navItem.label,
|
||||
link: navItem.link,
|
||||
};
|
||||
return false;
|
||||
}
|
||||
if (navItem.link != path) {
|
||||
navLinks.push(navItem);
|
||||
return;
|
||||
}
|
||||
index = navLinks.length;
|
||||
navLinks.push(navItem);
|
||||
if (index != 0) {
|
||||
const navLink = navLinks[index - 1];
|
||||
previous = {
|
||||
label: navLink.label,
|
||||
link: navLink.link,
|
||||
};
|
||||
}
|
||||
});
|
||||
return {
|
||||
...md,
|
||||
path,
|
||||
frontmatter: {
|
||||
...md.frontmatter,
|
||||
previous,
|
||||
next,
|
||||
},
|
||||
toc: md.toc,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
export function visit<T extends { children: T[] }>(
|
||||
items: T[],
|
||||
fn: (item: T, parents: T[]) => false | void,
|
||||
): void {
|
||||
_visit(items, [], fn);
|
||||
}
|
||||
|
||||
function _visit<T extends { children: T[] }>(
|
||||
items: T[],
|
||||
parents: T[],
|
||||
fn: (item: T, parents: T[]) => false | void,
|
||||
): false | void {
|
||||
for (const item of items) {
|
||||
if (fn(item, parents) === false) {
|
||||
return false;
|
||||
}
|
||||
if (_visit(item.children, [...parents, item], fn) === false) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export type RawNavItem =
|
||||
| string
|
||||
| {
|
||||
label: string;
|
||||
items: RawNavItem[];
|
||||
collapsed?: boolean;
|
||||
badge?: RawBadge;
|
||||
}
|
||||
| {
|
||||
label: string;
|
||||
autogenerate: { directory: string };
|
||||
collapsed?: boolean;
|
||||
badge?: RawBadge;
|
||||
}
|
||||
| {
|
||||
label?: string;
|
||||
slug: string;
|
||||
badge?: RawBadge;
|
||||
}
|
||||
| {
|
||||
label: string;
|
||||
link: string;
|
||||
badge?: RawBadge;
|
||||
};
|
||||
|
||||
export type NavItem = NavGroup | NavLink;
|
||||
|
||||
export type NavGroup = {
|
||||
label: string;
|
||||
items: NavItem[];
|
||||
collapsed: boolean;
|
||||
badge?: Badge;
|
||||
};
|
||||
|
||||
export type NavLink = {
|
||||
label: string;
|
||||
link: string;
|
||||
badge?: Badge;
|
||||
external: boolean;
|
||||
};
|
||||
|
||||
export type RawBadge = string | Badge;
|
||||
|
||||
export type Badge = {
|
||||
text: string;
|
||||
variant: "caution" | "normal";
|
||||
};
|
||||
|
||||
function normalizeBadge(badge: RawBadge | undefined): Badge | undefined {
|
||||
if (!badge) {
|
||||
return undefined;
|
||||
}
|
||||
if (typeof badge === "string") {
|
||||
return {
|
||||
text: badge,
|
||||
variant: "normal",
|
||||
};
|
||||
}
|
||||
return badge;
|
||||
}
|
||||
|
||||
function visitNavItems(
|
||||
navItems: NavItem[],
|
||||
visit: (navItem: NavItem, parents: NavItem[]) => false | void,
|
||||
): void {
|
||||
_visitNavItems(navItems, [], visit);
|
||||
}
|
||||
|
||||
function _visitNavItems(
|
||||
navItems: NavItem[],
|
||||
parents: NavItem[],
|
||||
visit: (heading: NavItem, parents: NavItem[]) => false | void,
|
||||
): false | void {
|
||||
for (const navItem of navItems) {
|
||||
if (visit(navItem, parents) === false) {
|
||||
return false;
|
||||
}
|
||||
if ("items" in navItem) {
|
||||
if (
|
||||
_visitNavItems(navItem.items, [...parents, navItem], visit) === false
|
||||
) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
1
pkgs/docs-site/src/lib/index.ts
Normal file
1
pkgs/docs-site/src/lib/index.ts
Normal file
@@ -0,0 +1 @@
|
||||
export * from "./docs";
|
||||
81
pkgs/docs-site/src/lib/markdown/admonition.css
Normal file
81
pkgs/docs-site/src/lib/markdown/admonition.css
Normal file
@@ -0,0 +1,81 @@
|
||||
.md-admonition {
|
||||
border-left: 4px solid;
|
||||
padding: 1rem;
|
||||
margin: 1rem 0;
|
||||
|
||||
.md-admonition-title {
|
||||
text-transform: capitalize;
|
||||
font-weight: 600;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: start;
|
||||
gap: 0.5rem;
|
||||
}
|
||||
.md-admonition-icon {
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
width: 1.25rem;
|
||||
height: 1.25rem;
|
||||
flex-shrink: 0;
|
||||
|
||||
&::before {
|
||||
content: "";
|
||||
display: block;
|
||||
width: 1rem;
|
||||
height: 1rem;
|
||||
background-color: currentColor;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* TODO: Adjust styling */
|
||||
.md-admonition.is-note {
|
||||
border-left-color: #3b82f6;
|
||||
background-color: #eff6ff;
|
||||
.md-admonition-title {
|
||||
color: #1e40af;
|
||||
}
|
||||
.md-admonition-icon::before {
|
||||
mask: url("/icons/info.svg") no-repeat center;
|
||||
mask-size: contain;
|
||||
}
|
||||
}
|
||||
|
||||
.md-admonition.is-important {
|
||||
border-left-color: #facc15;
|
||||
background-color: #fffbeb;
|
||||
.md-admonition-title {
|
||||
color: #b45309;
|
||||
}
|
||||
.md-admonition-icon::before {
|
||||
mask: url("/icons/attention.svg") no-repeat center;
|
||||
mask-size: contain;
|
||||
}
|
||||
}
|
||||
|
||||
.md-admonition.is-danger {
|
||||
border-left-color: #ef4444;
|
||||
background-color: #fef2f2;
|
||||
|
||||
.md-admonition-title {
|
||||
color: #b91c1c;
|
||||
}
|
||||
.md-admonition-icon::before {
|
||||
mask: url("/icons/warning-filled.svg") no-repeat center;
|
||||
mask-size: contain;
|
||||
}
|
||||
}
|
||||
|
||||
.md-admonition.is-tip {
|
||||
border-left-color: #10b981;
|
||||
background-color: #ecfdf5;
|
||||
|
||||
.md-admonition-title {
|
||||
color: #065f46;
|
||||
}
|
||||
.md-admonition-icon::before {
|
||||
mask: url("/icons/heart.svg") no-repeat center;
|
||||
mask-size: contain;
|
||||
}
|
||||
}
|
||||
15
pkgs/docs-site/src/lib/markdown/index.ts
Normal file
15
pkgs/docs-site/src/lib/markdown/index.ts
Normal file
@@ -0,0 +1,15 @@
|
||||
export type Markdown = {
|
||||
content: string;
|
||||
frontmatter: Frontmatter;
|
||||
toc: Heading[];
|
||||
};
|
||||
|
||||
export type Frontmatter = Record<string, any> & {
|
||||
title: string;
|
||||
};
|
||||
|
||||
export type Heading = {
|
||||
id: string;
|
||||
content: string;
|
||||
children: Heading[];
|
||||
};
|
||||
14
pkgs/docs-site/src/lib/markdown/main.css
Normal file
14
pkgs/docs-site/src/lib/markdown/main.css
Normal file
@@ -0,0 +1,14 @@
|
||||
@import url("./shiki.css");
|
||||
@import url("./admonition.css");
|
||||
@import url("./tabs.css");
|
||||
|
||||
code {
|
||||
font-family:
|
||||
ui-monospace, "Cascadia Code", "Source Code Pro", Menlo, Consolas,
|
||||
"DejaVu Sans Mono", monospace;
|
||||
font-weight: normal;
|
||||
}
|
||||
|
||||
pre {
|
||||
overflow: auto;
|
||||
}
|
||||
81
pkgs/docs-site/src/lib/markdown/shiki.css
Normal file
81
pkgs/docs-site/src/lib/markdown/shiki.css
Normal file
@@ -0,0 +1,81 @@
|
||||
.shiki {
|
||||
margin: 0 -15px;
|
||||
padding: 15px 0;
|
||||
background-color: var(--shiki-light-bg);
|
||||
|
||||
&,
|
||||
& span {
|
||||
color: var(--shiki-light);
|
||||
font-style: var(--shiki-light-font-style);
|
||||
font-weight: var(--shiki-light-font-weight);
|
||||
text-decoration: var(--shiki-light-text-decoration);
|
||||
}
|
||||
|
||||
@media (prefers-color-scheme: dark) {
|
||||
&,
|
||||
& span,
|
||||
html.dark &,
|
||||
html.dark & span {
|
||||
color: var(--shiki-dark);
|
||||
background-color: var(--shiki-dark-bg);
|
||||
font-style: var(--shiki-dark-font-style);
|
||||
font-weight: var(--shiki-dark-font-weight);
|
||||
text-decoration: var(--shiki-dark-text-decoration);
|
||||
}
|
||||
}
|
||||
|
||||
code {
|
||||
display: block;
|
||||
}
|
||||
.line {
|
||||
width: 100%;
|
||||
padding: 0 15px;
|
||||
display: inline-block;
|
||||
}
|
||||
|
||||
/* line numbers */
|
||||
&.line-numbers code {
|
||||
counter-reset: step;
|
||||
counter-increment: step 0;
|
||||
}
|
||||
&.line-numbers .line::before {
|
||||
content: counter(step);
|
||||
counter-increment: step;
|
||||
width: 1rem;
|
||||
margin-right: 1em;
|
||||
display: inline-block;
|
||||
text-align: right;
|
||||
color: rgba(115, 138, 148, 0.4);
|
||||
}
|
||||
|
||||
/* indent guides */
|
||||
.indent {
|
||||
display: inline-block;
|
||||
position: relative;
|
||||
left: var(--indent-offset);
|
||||
}
|
||||
.indent:empty {
|
||||
height: 1lh;
|
||||
vertical-align: bottom;
|
||||
}
|
||||
|
||||
.indent::before {
|
||||
content: "";
|
||||
position: absolute;
|
||||
opacity: 0.15;
|
||||
width: 1px;
|
||||
height: 100%;
|
||||
background-color: currentColor;
|
||||
}
|
||||
|
||||
/* diff */
|
||||
.line.diff.remove {
|
||||
background-color: #db0a0a41;
|
||||
}
|
||||
.line.diff.add {
|
||||
background-color: #0adb4954;
|
||||
}
|
||||
.line.highlighted {
|
||||
background-color: #00000024;
|
||||
}
|
||||
}
|
||||
50
pkgs/docs-site/src/lib/markdown/tabs.css
Normal file
50
pkgs/docs-site/src/lib/markdown/tabs.css
Normal file
@@ -0,0 +1,50 @@
|
||||
.md-tabs-bar {
|
||||
display: none;
|
||||
gap: 7px;
|
||||
align-items: flex-end;
|
||||
}
|
||||
.md-tabs-tab {
|
||||
padding: 8px 0;
|
||||
}
|
||||
.md-tabs-container {
|
||||
margin: 20px 0;
|
||||
}
|
||||
.js {
|
||||
.md-tabs-bar {
|
||||
display: flex;
|
||||
}
|
||||
.md-tabs-container {
|
||||
margin: 0;
|
||||
> .md-tabs-tab {
|
||||
display: none;
|
||||
}
|
||||
}
|
||||
.md-tabs {
|
||||
margin: 20px 0;
|
||||
}
|
||||
.md-tabs-tab {
|
||||
background: #d7dadf;
|
||||
padding: 8px 18px;
|
||||
border-top-left-radius: 8px;
|
||||
border-top-right-radius: 8px;
|
||||
cursor: pointer;
|
||||
&.is-active {
|
||||
background: #eff1f5;
|
||||
|
||||
.md-tabs.is-singleton & {
|
||||
padding: 8px 16px;
|
||||
border-top-left-radius: 5px;
|
||||
border-top-right-radius: 5px;
|
||||
flex: 1;
|
||||
border-bottom: 1px solid #d8dbe1;
|
||||
}
|
||||
}
|
||||
}
|
||||
.md-tabs-content {
|
||||
display: none;
|
||||
margin: 0 var(--pageMargin);
|
||||
&.is-active {
|
||||
display: block;
|
||||
}
|
||||
}
|
||||
}
|
||||
87
pkgs/docs-site/src/lib/markdown/vite/index.ts
Normal file
87
pkgs/docs-site/src/lib/markdown/vite/index.ts
Normal file
@@ -0,0 +1,87 @@
|
||||
import { unified } from "unified";
|
||||
import { VFile } from "vfile";
|
||||
import remarkRehype from "remark-rehype";
|
||||
import rehypeStringify from "rehype-stringify";
|
||||
import rehypeShiki from "@shikijs/rehype";
|
||||
import remarkGfm from "remark-gfm";
|
||||
import remarkDirective from "remark-directive";
|
||||
import rehypeAutolinkHeadings from "rehype-autolink-headings";
|
||||
import {
|
||||
transformerNotationDiff,
|
||||
transformerNotationHighlight,
|
||||
transformerRenderIndentGuides,
|
||||
transformerMetaHighlight,
|
||||
} from "@shikijs/transformers";
|
||||
import type { PluginOption } from "vite";
|
||||
import rehypeTocSlug from "./rehype-toc-slug";
|
||||
import transformerLineNumbers from "./shiki-transformer-line-numbers";
|
||||
import remarkParse from "./remark-parse";
|
||||
import remarkAdmonition from "./remark-admonition";
|
||||
import remarkTabs from "./remark-tabs";
|
||||
import rehypeWrapHeadings from "./rehype-wrap-headings";
|
||||
import remarkLinkMigration from "./link-migration";
|
||||
|
||||
export type Options = {
|
||||
codeLightTheme?: string;
|
||||
codeDarkTheme?: string;
|
||||
minLineNumberLines?: number;
|
||||
tocMaxDepth?: number;
|
||||
};
|
||||
|
||||
export default function ({
|
||||
codeLightTheme = "catppuccin-latte",
|
||||
codeDarkTheme = "catppuccin-macchiato",
|
||||
minLineNumberLines = 4,
|
||||
tocMaxDepth = 3,
|
||||
}: Options = {}): PluginOption {
|
||||
return {
|
||||
name: "markdown-loader",
|
||||
async transform(code, id) {
|
||||
if (id.slice(-3) !== ".md") return;
|
||||
|
||||
const file = await unified()
|
||||
.use(remarkParse)
|
||||
.use(remarkLinkMigration)
|
||||
.use(remarkGfm)
|
||||
.use(remarkDirective)
|
||||
.use(remarkAdmonition)
|
||||
.use(remarkTabs)
|
||||
.use(remarkRehype)
|
||||
.use(rehypeTocSlug, {
|
||||
tocMaxDepth,
|
||||
})
|
||||
.use(rehypeWrapHeadings)
|
||||
.use(rehypeAutolinkHeadings)
|
||||
.use(rehypeShiki, {
|
||||
defaultColor: false,
|
||||
themes: {
|
||||
light: codeLightTheme,
|
||||
dark: codeDarkTheme,
|
||||
},
|
||||
transformers: [
|
||||
transformerNotationDiff({
|
||||
matchAlgorithm: "v3",
|
||||
}),
|
||||
transformerNotationHighlight(),
|
||||
transformerRenderIndentGuides(),
|
||||
transformerMetaHighlight(),
|
||||
transformerLineNumbers({
|
||||
minLines: minLineNumberLines,
|
||||
}),
|
||||
],
|
||||
})
|
||||
.use(rehypeStringify)
|
||||
.process(
|
||||
new VFile({
|
||||
path: id,
|
||||
value: code,
|
||||
}),
|
||||
);
|
||||
|
||||
return `
|
||||
export const content = ${JSON.stringify(String(file))};
|
||||
export const frontmatter = ${JSON.stringify(file.data.matter)};
|
||||
export const toc = ${JSON.stringify(file.data.toc)};`;
|
||||
},
|
||||
};
|
||||
}
|
||||
26
pkgs/docs-site/src/lib/markdown/vite/link-migration.ts
Normal file
26
pkgs/docs-site/src/lib/markdown/vite/link-migration.ts
Normal file
@@ -0,0 +1,26 @@
|
||||
import { visit } from "unist-util-visit";
|
||||
import type { Nodes } from "mdast";
|
||||
|
||||
/**
|
||||
* Rewrites relative links in mkDocs files to point to /docs/...
|
||||
*
|
||||
* For this to work the relative link must start at the docs root
|
||||
*/
|
||||
export default function remarkLinkMigration() {
|
||||
return (tree: Nodes) => {
|
||||
visit(tree, ["link", "definition"], (node) => {
|
||||
if (node.type !== "link" && node.type !== "definition") {
|
||||
return;
|
||||
}
|
||||
// Skip external links, links pointing to /docs already and anchors
|
||||
if (!node.url || /^(https?:)?\/\/|mailto:|^#/.test(node.url)) return;
|
||||
|
||||
// Remove repeated leading ../ or ./
|
||||
const cleanUrl = node.url.replace(/^\.\.?|((\.\.?)\/)+|\.md$/g, "");
|
||||
if (!cleanUrl.startsWith("/")) {
|
||||
throw new Error(`invalid doc link: ${cleanUrl}`);
|
||||
}
|
||||
node.url = `/docs${cleanUrl}`;
|
||||
});
|
||||
};
|
||||
}
|
||||
74
pkgs/docs-site/src/lib/markdown/vite/rehype-toc-slug.ts
Normal file
74
pkgs/docs-site/src/lib/markdown/vite/rehype-toc-slug.ts
Normal file
@@ -0,0 +1,74 @@
|
||||
import { VFile } from "vfile";
|
||||
import type { Nodes } from "hast";
|
||||
import { toString } from "hast-util-to-string";
|
||||
import GithubSlugger from "github-slugger";
|
||||
import { visit } from "unist-util-visit";
|
||||
import { headingRank } from "hast-util-heading-rank";
|
||||
import type { Heading } from "..";
|
||||
|
||||
const startingRank = 1;
|
||||
/**
|
||||
* Adds `id`s to headings and extract out a toc
|
||||
*/
|
||||
export default function rehypeTocSlug({
|
||||
tocMaxDepth,
|
||||
}: {
|
||||
tocMaxDepth: number;
|
||||
}) {
|
||||
return (tree: Nodes, file: VFile) => {
|
||||
const slugger = new GithubSlugger();
|
||||
const toc: Heading[] = [];
|
||||
let h1Exist = false;
|
||||
const parentHeadings: Heading[] = [];
|
||||
const frontmatter: Record<string, any> = file.data.matter
|
||||
? file.data.matter
|
||||
: {};
|
||||
frontmatter.title = "";
|
||||
visit(tree, "element", (node) => {
|
||||
const rank = headingRank(node);
|
||||
if (!rank) return;
|
||||
|
||||
let { id } = node.properties;
|
||||
if (id) {
|
||||
console.error(
|
||||
`WARNING: h${rank} has an existing id, it will be overwritten with an auto-generated one: ${file.path}`,
|
||||
);
|
||||
}
|
||||
const content = toString(node);
|
||||
id = node.properties.id = slugger.slug(content);
|
||||
|
||||
if (parentHeadings.length > tocMaxDepth) {
|
||||
return;
|
||||
}
|
||||
if (rank == 1) {
|
||||
if (h1Exist) {
|
||||
console.error(
|
||||
`WARNING: only one "# title" is allowed, ignoring the rest: ${file.path}`,
|
||||
);
|
||||
return;
|
||||
}
|
||||
h1Exist = true;
|
||||
frontmatter.title = content;
|
||||
}
|
||||
const heading = { id, content, children: [] };
|
||||
const currentRank = parentHeadings.length - 1 + startingRank;
|
||||
if (rank > currentRank) {
|
||||
(parentHeadings.at(-1)?.children ?? toc).push(heading);
|
||||
parentHeadings.push(heading);
|
||||
} else if (rank == currentRank) {
|
||||
(parentHeadings.at(-2)?.children ?? toc).push(heading);
|
||||
parentHeadings.pop();
|
||||
parentHeadings.push(heading);
|
||||
} else {
|
||||
const i = rank - startingRank - 1;
|
||||
(parentHeadings?.[i].children ?? toc).push(heading);
|
||||
while (parentHeadings.length > i + 1) {
|
||||
parentHeadings.pop();
|
||||
}
|
||||
parentHeadings.push(heading);
|
||||
}
|
||||
});
|
||||
|
||||
file.data.toc = toc;
|
||||
};
|
||||
}
|
||||
21
pkgs/docs-site/src/lib/markdown/vite/rehype-wrap-headings.ts
Normal file
21
pkgs/docs-site/src/lib/markdown/vite/rehype-wrap-headings.ts
Normal file
@@ -0,0 +1,21 @@
|
||||
import { visit } from "unist-util-visit";
|
||||
import { headingRank } from "hast-util-heading-rank";
|
||||
import type { Nodes } from "hast";
|
||||
|
||||
export default function rehypeWrapHeadings() {
|
||||
return (tree: Nodes) => {
|
||||
visit(tree, "element", (node) => {
|
||||
if (!headingRank(node)) {
|
||||
return;
|
||||
}
|
||||
node.children = [
|
||||
{
|
||||
type: "element",
|
||||
tagName: "span",
|
||||
properties: {},
|
||||
children: node.children,
|
||||
},
|
||||
];
|
||||
});
|
||||
};
|
||||
}
|
||||
53
pkgs/docs-site/src/lib/markdown/vite/remark-admonition.ts
Normal file
53
pkgs/docs-site/src/lib/markdown/vite/remark-admonition.ts
Normal file
@@ -0,0 +1,53 @@
|
||||
import { visit } from "unist-util-visit";
|
||||
import type { Paragraph, Text, Root } from "mdast";
|
||||
|
||||
const names = ["note", "important", "danger", "tip"];
|
||||
|
||||
// Adapted from https://github.com/remarkjs/remark-directive
|
||||
export default function remarkAdmonition() {
|
||||
return (tree: Root) => {
|
||||
visit(tree, (node) => {
|
||||
if (node.type != "containerDirective" || !names.includes(node.name)) {
|
||||
return;
|
||||
}
|
||||
|
||||
const data = (node.data ||= {});
|
||||
data.hName = "div";
|
||||
data.hProperties = {
|
||||
className: `md-admonition is-${node.name}`,
|
||||
};
|
||||
let title: string;
|
||||
if (node.children?.[0].data?.directiveLabel) {
|
||||
const p = node.children.shift() as Paragraph;
|
||||
title = (p.children[0] as Text).value;
|
||||
} else {
|
||||
title = node.name;
|
||||
}
|
||||
|
||||
node.children = [
|
||||
{
|
||||
type: "paragraph",
|
||||
data: {
|
||||
hName: "div",
|
||||
hProperties: { className: ["md-admonition-title"] },
|
||||
},
|
||||
children: [
|
||||
{
|
||||
type: "text",
|
||||
data: {
|
||||
hName: "span",
|
||||
hProperties: { className: ["md-admonition-icon"] },
|
||||
},
|
||||
value: "",
|
||||
},
|
||||
{
|
||||
type: "text",
|
||||
value: title,
|
||||
},
|
||||
],
|
||||
},
|
||||
...node.children,
|
||||
];
|
||||
});
|
||||
};
|
||||
}
|
||||
24
pkgs/docs-site/src/lib/markdown/vite/remark-parse.ts
Normal file
24
pkgs/docs-site/src/lib/markdown/vite/remark-parse.ts
Normal file
@@ -0,0 +1,24 @@
|
||||
import type { Data, Processor } from "unified";
|
||||
import { matter } from "vfile-matter";
|
||||
import {
|
||||
fromMarkdown,
|
||||
type Extension as MarkdownExtension,
|
||||
} from "mdast-util-from-markdown";
|
||||
import type { Extension } from "micromark-util-types";
|
||||
|
||||
export default function remarkParse(this: Processor) {
|
||||
const self = this;
|
||||
this.parser = (document, file) => {
|
||||
matter(file, { strip: true });
|
||||
// FIXME: fromMarkdown has a broken type definition, fix it and upstream
|
||||
const extensions = (self.data("micromarkExtensions" as unknown as Data) ||
|
||||
[]) as unknown as Extension[];
|
||||
const mdastExtensions = (self.data(
|
||||
"fromMarkdownExtensions" as unknown as Data,
|
||||
) || []) as unknown as MarkdownExtension[];
|
||||
return fromMarkdown(String(file), {
|
||||
extensions,
|
||||
mdastExtensions,
|
||||
});
|
||||
};
|
||||
}
|
||||
93
pkgs/docs-site/src/lib/markdown/vite/remark-tabs.ts
Normal file
93
pkgs/docs-site/src/lib/markdown/vite/remark-tabs.ts
Normal file
@@ -0,0 +1,93 @@
|
||||
import { visit } from "unist-util-visit";
|
||||
import type { Paragraph, Root, Text } from "mdast";
|
||||
|
||||
export default function remarkTabs() {
|
||||
return (tree: Root) => {
|
||||
visit(tree, (node) => {
|
||||
if (node.type != "containerDirective" || node.name != "tabs") {
|
||||
return;
|
||||
}
|
||||
|
||||
const data = (node.data ||= {});
|
||||
data.hName = "div";
|
||||
data.hProperties = {
|
||||
className: "md-tabs",
|
||||
};
|
||||
let tabIndex = 0;
|
||||
let tabTitles: string[] = [];
|
||||
for (const [i, child] of node.children.entries()) {
|
||||
if (child.type != "containerDirective" || child.name != "tab") {
|
||||
continue;
|
||||
}
|
||||
let tabTitle: string;
|
||||
if (child.children?.[0].data?.directiveLabel) {
|
||||
const p = child.children.shift() as Paragraph;
|
||||
tabTitle = (p.children[0] as Text).value;
|
||||
} else {
|
||||
tabTitle = "(empty)";
|
||||
}
|
||||
tabTitles.push(tabTitle);
|
||||
node.children[i] = {
|
||||
type: "containerDirective",
|
||||
name: "",
|
||||
data: {
|
||||
hName: "div",
|
||||
hProperties: {
|
||||
className: "md-tabs-container",
|
||||
},
|
||||
},
|
||||
children: [
|
||||
{
|
||||
type: "paragraph",
|
||||
data: {
|
||||
hName: "div",
|
||||
hProperties: {
|
||||
className: `md-tabs-tab ${tabIndex == 0 ? "is-active" : ""}`,
|
||||
},
|
||||
},
|
||||
children: [{ type: "text", value: tabTitle }],
|
||||
},
|
||||
{
|
||||
type: "containerDirective",
|
||||
name: "",
|
||||
data: {
|
||||
hName: "div",
|
||||
hProperties: {
|
||||
className: `md-tabs-content ${tabIndex == 0 ? "is-active" : ""}`,
|
||||
},
|
||||
},
|
||||
children: child.children,
|
||||
},
|
||||
],
|
||||
};
|
||||
tabIndex++;
|
||||
}
|
||||
if (tabTitles.length === 1) {
|
||||
data.hProperties.className += " is-singleton";
|
||||
}
|
||||
// Add tab bar for when js is enabled
|
||||
node.children = [
|
||||
{
|
||||
type: "paragraph",
|
||||
data: {
|
||||
hName: "div",
|
||||
hProperties: {
|
||||
className: "md-tabs-bar",
|
||||
},
|
||||
},
|
||||
children: tabTitles.map((tabTitle, tabIndex) => ({
|
||||
type: "text",
|
||||
data: {
|
||||
hName: "div",
|
||||
hProperties: {
|
||||
className: `md-tabs-tab ${tabIndex == 0 ? "is-active" : ""}`,
|
||||
},
|
||||
},
|
||||
value: tabTitle,
|
||||
})),
|
||||
},
|
||||
...node.children,
|
||||
];
|
||||
});
|
||||
};
|
||||
}
|
||||
@@ -0,0 +1,26 @@
|
||||
import type { Element } from "hast";
|
||||
|
||||
export default function transformerLineNumbers({
|
||||
minLines,
|
||||
}: {
|
||||
minLines: number;
|
||||
}) {
|
||||
return {
|
||||
pre(pre: Element) {
|
||||
const code = pre.children?.[0] as Element | undefined;
|
||||
if (!code) {
|
||||
return;
|
||||
}
|
||||
const lines = code.children.reduce((lines, node) => {
|
||||
if (node.type !== "element" || node.properties.class != "line") {
|
||||
return lines;
|
||||
}
|
||||
return lines + 1;
|
||||
}, 0);
|
||||
if (lines < minLines) {
|
||||
return;
|
||||
}
|
||||
pre.properties.class += " line-numbers";
|
||||
},
|
||||
};
|
||||
}
|
||||
168
pkgs/docs-site/src/routes/+layout.svelte
Normal file
168
pkgs/docs-site/src/routes/+layout.svelte
Normal file
@@ -0,0 +1,168 @@
|
||||
<script lang="ts">
|
||||
import favicon from "$lib/assets/favicon.svg";
|
||||
import type { NavItem } from "$lib";
|
||||
import { onNavigate } from "$app/navigation";
|
||||
import { onMount } from "svelte";
|
||||
import type {
|
||||
Pagefind,
|
||||
PagefindSearchFragment,
|
||||
} from "vite-plugin-pagefind/types";
|
||||
import "./global.css";
|
||||
|
||||
const { data, children } = $props();
|
||||
const docs = $derived(data.docs);
|
||||
let menuOpen = $state(false);
|
||||
onNavigate(() => {
|
||||
menuOpen = false;
|
||||
query = "";
|
||||
document.documentElement.classList.remove("no-scroll");
|
||||
});
|
||||
let pagefind: Pagefind | undefined;
|
||||
let query = $state("");
|
||||
let searchResults: PagefindSearchFragment[] = $state([]);
|
||||
onMount(async () => {
|
||||
// @ts-expect-error
|
||||
pagefind = await import("/pagefind/pagefind.js");
|
||||
pagefind!.init();
|
||||
});
|
||||
$effect(() => {
|
||||
(async () => {
|
||||
query;
|
||||
const search = await pagefind?.debouncedSearch(query);
|
||||
if (search) {
|
||||
searchResults = await Promise.all(
|
||||
search.results.slice(0, 5).map((r) => r.data()),
|
||||
);
|
||||
}
|
||||
})();
|
||||
});
|
||||
|
||||
function toggleMenu() {
|
||||
menuOpen = !menuOpen;
|
||||
window.scrollTo({ top: 0 });
|
||||
document.documentElement.classList.toggle("no-scroll", menuOpen);
|
||||
}
|
||||
</script>
|
||||
|
||||
<svelte:head>
|
||||
<link rel="icon" href={favicon} />
|
||||
</svelte:head>
|
||||
|
||||
<div class="global-bar">
|
||||
<span class="logo">Clan Docs</span>
|
||||
<nav>
|
||||
<div class="search">
|
||||
<input type="search" bind:value={query} />
|
||||
{#if searchResults.length > 0}
|
||||
<ul>
|
||||
{#each searchResults as searchResult}
|
||||
<li class="search-result">
|
||||
<div class="search-result-title">
|
||||
<a href={searchResult.url.slice(0, -".html".length)}
|
||||
>{searchResult.meta.title}</a
|
||||
>
|
||||
</div>
|
||||
<div class="search-result-excerpt">
|
||||
{@html searchResult.excerpt}
|
||||
</div>
|
||||
</li>
|
||||
{/each}
|
||||
</ul>
|
||||
{/if}
|
||||
</div>
|
||||
<div class={["menu", menuOpen && "open"]}>
|
||||
<button onclick={toggleMenu}>Menu</button>
|
||||
<ul>
|
||||
{@render navItems(docs.navItems)}
|
||||
</ul>
|
||||
</div>
|
||||
</nav>
|
||||
</div>
|
||||
<main>
|
||||
{@render children?.()}
|
||||
</main>
|
||||
|
||||
{#snippet navItems(items: NavItem[])}
|
||||
{#each items as item}
|
||||
{@render navItem(item)}
|
||||
{/each}
|
||||
{/snippet}
|
||||
|
||||
{#snippet navItem(item: NavItem)}
|
||||
{#if "items" in item}
|
||||
<li>
|
||||
<details open={!item.collapsed}>
|
||||
<summary><span class="label group">{item.label}</span></summary>
|
||||
<ul>
|
||||
{@render navItems(item.items)}
|
||||
</ul>
|
||||
</details>
|
||||
</li>
|
||||
{:else}
|
||||
<li>
|
||||
<a href={item.link}>{item.label}</a>
|
||||
</li>
|
||||
{/if}
|
||||
{/snippet}
|
||||
|
||||
<style>
|
||||
.global-bar {
|
||||
height: var(--globalBarHeight);
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
padding: 0 var(--pageMargin);
|
||||
color: var(--fgInvertedColor);
|
||||
background: var(--bgInvertedColor);
|
||||
}
|
||||
.search {
|
||||
& > ul {
|
||||
position: fixed;
|
||||
z-index: 10;
|
||||
left: 0;
|
||||
top: var(--globalBarHeight);
|
||||
width: 100vw;
|
||||
height: 100vh;
|
||||
background: #fff;
|
||||
}
|
||||
}
|
||||
.search-result {
|
||||
padding: 15px;
|
||||
border-bottom: 1px solid #a3a3a3;
|
||||
}
|
||||
.search-result-title {
|
||||
padding: 0 0 15px;
|
||||
}
|
||||
.search-result-excerpt {
|
||||
color: #666;
|
||||
}
|
||||
.menu {
|
||||
color: var(--fgColor);
|
||||
& > ul {
|
||||
visibility: hidden;
|
||||
position: fixed;
|
||||
left: 0;
|
||||
z-index: 10;
|
||||
top: var(--globalBarHeight);
|
||||
width: 100vw;
|
||||
height: 100vh;
|
||||
background: #fff;
|
||||
}
|
||||
&.open > ul {
|
||||
visibility: visible;
|
||||
}
|
||||
li {
|
||||
padding-left: 1em;
|
||||
}
|
||||
}
|
||||
|
||||
nav {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
}
|
||||
ul {
|
||||
list-style: none;
|
||||
padding: 0;
|
||||
margin: 0;
|
||||
}
|
||||
</style>
|
||||
9
pkgs/docs-site/src/routes/+layout.ts
Normal file
9
pkgs/docs-site/src/routes/+layout.ts
Normal file
@@ -0,0 +1,9 @@
|
||||
import { Docs } from "$lib";
|
||||
|
||||
export const prerender = true;
|
||||
|
||||
export async function load() {
|
||||
return {
|
||||
docs: await new Docs().init(),
|
||||
};
|
||||
}
|
||||
1
pkgs/docs-site/src/routes/+page.svelte
Normal file
1
pkgs/docs-site/src/routes/+page.svelte
Normal file
@@ -0,0 +1 @@
|
||||
<h1>Welcome to Clan</h1>
|
||||
338
pkgs/docs-site/src/routes/[...path]/+page.svelte
Normal file
338
pkgs/docs-site/src/routes/[...path]/+page.svelte
Normal file
@@ -0,0 +1,338 @@
|
||||
<script lang="ts">
|
||||
import "$lib/markdown/main.css";
|
||||
import { visit, type Heading as ArticleHeading } from "$lib/docs";
|
||||
import { onMount } from "svelte";
|
||||
const { data } = $props();
|
||||
|
||||
type Heading = ArticleHeading & {
|
||||
index: number;
|
||||
scrolledPast: number;
|
||||
element: Element;
|
||||
children: Heading[];
|
||||
};
|
||||
|
||||
let nextHeadingIndex = 0;
|
||||
const headings = $derived(normalizeHeadings(data.toc));
|
||||
let tocOpen = $state(false);
|
||||
let tocEl: HTMLElement;
|
||||
let contentEl: HTMLElement;
|
||||
let currentHeading: Heading | null = $state(null);
|
||||
let observer: IntersectionObserver | undefined;
|
||||
const defaultTocContent = "Table of contents";
|
||||
const currentTocContent = $derived.by(() => {
|
||||
if (tocOpen) {
|
||||
return defaultTocContent;
|
||||
}
|
||||
return currentHeading?.content || defaultTocContent;
|
||||
});
|
||||
|
||||
$effect(() => {
|
||||
// Make sure the effect is triggered on content change
|
||||
data.content;
|
||||
observer?.disconnect();
|
||||
observer = new IntersectionObserver(onIntersectionChange, {
|
||||
threshold: 1,
|
||||
rootMargin: `${-tocEl.offsetHeight}px 0px 0px`,
|
||||
});
|
||||
const els = contentEl.querySelectorAll("h1,h2,h3,h4,h5,h6");
|
||||
for (const el of els) {
|
||||
observer.observe(el);
|
||||
}
|
||||
});
|
||||
|
||||
onMount(() => {
|
||||
const onClick = (ev: MouseEvent) => {
|
||||
const targetTabEl = (ev.target as HTMLElement).closest(".md-tabs-tab");
|
||||
if (!targetTabEl || targetTabEl.classList.contains(".is-active")) {
|
||||
return;
|
||||
}
|
||||
const tabsEl = targetTabEl.closest(".md-tabs")!;
|
||||
const tabEls = tabsEl.querySelectorAll(".md-tabs-tab")!;
|
||||
const tabIndex = Array.from(tabEls).indexOf(targetTabEl);
|
||||
if (tabIndex == -1) {
|
||||
return;
|
||||
}
|
||||
const tabContentEls = tabsEl.querySelectorAll(".md-tabs-content");
|
||||
const tabContentEl = tabContentEls[tabIndex];
|
||||
if (!tabContentEl) {
|
||||
return;
|
||||
}
|
||||
tabEls.forEach((tabEl) => tabEl.classList.remove("is-active"));
|
||||
targetTabEl.classList.add("is-active");
|
||||
tabContentEls.forEach((tabContentEl) =>
|
||||
tabContentEl.classList.remove("is-active"),
|
||||
);
|
||||
tabContentEl.classList.add("is-active");
|
||||
};
|
||||
document.addEventListener("click", onClick);
|
||||
return () => {
|
||||
document.removeEventListener("click", onClick);
|
||||
};
|
||||
});
|
||||
|
||||
function normalizeHeadings(headings: ArticleHeading[]): Heading[] {
|
||||
return headings.map((heading) => ({
|
||||
...heading,
|
||||
index: nextHeadingIndex++,
|
||||
scrolledPast: 0,
|
||||
children: normalizeHeadings(heading.children),
|
||||
})) as Heading[];
|
||||
}
|
||||
|
||||
async function onIntersectionChange(entries: IntersectionObserverEntry[]) {
|
||||
// Record each heading's scrolledPast
|
||||
for (const entry of entries) {
|
||||
visit(headings, (heading) => {
|
||||
if (heading.id != entry.target.id) {
|
||||
return;
|
||||
}
|
||||
heading.element = entry.target;
|
||||
heading.scrolledPast =
|
||||
entry.intersectionRatio < 1 &&
|
||||
entry.boundingClientRect.top < entry.rootBounds!.top
|
||||
? entry.rootBounds!.top - entry.boundingClientRect.top
|
||||
: 0;
|
||||
return false;
|
||||
})!;
|
||||
}
|
||||
let last: Heading | null = null;
|
||||
let current: Heading | null = null;
|
||||
// Find the last heading with scrolledPast > 0
|
||||
visit(headings, (heading) => {
|
||||
if (last && last.scrolledPast > 0 && heading.scrolledPast === 0) {
|
||||
current = last;
|
||||
return false;
|
||||
}
|
||||
last = heading;
|
||||
});
|
||||
currentHeading = current;
|
||||
}
|
||||
|
||||
function scrollToHeading(ev: Event, heading: Heading) {
|
||||
ev.preventDefault();
|
||||
heading.element.scrollIntoView({
|
||||
behavior: "smooth",
|
||||
});
|
||||
tocOpen = false;
|
||||
}
|
||||
function scrollToTop(ev: Event) {
|
||||
ev.preventDefault();
|
||||
window.scrollTo({
|
||||
top: 0,
|
||||
behavior: "smooth",
|
||||
});
|
||||
tocOpen = false;
|
||||
}
|
||||
</script>
|
||||
|
||||
<div class="container">
|
||||
<div class="toc">
|
||||
<h2 class="toc-title" bind:this={tocEl}>
|
||||
<button class="toc-label" onclick={() => (tocOpen = !tocOpen)}>
|
||||
<span>
|
||||
{currentTocContent}
|
||||
</span>
|
||||
<svg
|
||||
fill="none"
|
||||
height="24"
|
||||
stroke="currentColor"
|
||||
stroke-linecap="round"
|
||||
stroke-linejoin="round"
|
||||
stroke-width="2"
|
||||
viewBox="0 0 24 24"
|
||||
width="18"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
><polyline points="9 18 15 12 9 6" /></svg
|
||||
>
|
||||
</button>
|
||||
</h2>
|
||||
{#if tocOpen}
|
||||
<ul class="toc-menu">
|
||||
<li>
|
||||
<a href={`#${headings[0].id}`} onclick={scrollToTop}
|
||||
>{headings[0].content}</a
|
||||
>
|
||||
</li>
|
||||
{@render tocLinks(headings[0].children)}
|
||||
</ul>
|
||||
{/if}
|
||||
</div>
|
||||
<div class="content" bind:this={contentEl}>
|
||||
{@html data.content}
|
||||
</div>
|
||||
<footer>
|
||||
{#if data.frontmatter.previous}
|
||||
<a class="pointer previous" href={data.frontmatter.previous.link}>
|
||||
<div class="pointer-arrow"><</div>
|
||||
<div>
|
||||
<div class="pointer-label">Previous</div>
|
||||
<div class="pointer-title">{data.frontmatter.previous.label}</div>
|
||||
</div>
|
||||
</a>
|
||||
{:else}
|
||||
<div class="pointer previous"></div>
|
||||
{/if}
|
||||
{#if data.frontmatter.next}
|
||||
<a class="pointer next" href={data.frontmatter.next.link}>
|
||||
<div>
|
||||
<div class="pointer-label">Next</div>
|
||||
<div class="pointer-title">{data.frontmatter.next.label}</div>
|
||||
</div>
|
||||
<div class="pointer-arrow">></div>
|
||||
</a>
|
||||
{:else}
|
||||
<div class="pointer previous"></div>
|
||||
{/if}
|
||||
</footer>
|
||||
</div>
|
||||
|
||||
{#snippet tocLinks(headings: Heading[])}
|
||||
{#each headings as heading}
|
||||
{@render tocLink(heading)}
|
||||
{/each}
|
||||
{/snippet}
|
||||
|
||||
{#snippet tocLink(heading: Heading)}
|
||||
<li>
|
||||
<a
|
||||
href={`#${heading.id}`}
|
||||
onclick={(ev) => {
|
||||
scrollToHeading(ev, heading);
|
||||
}}>{heading.content}</a
|
||||
>
|
||||
{#if heading.children.length != 0}
|
||||
<ul>
|
||||
{@render tocLinks(heading.children)}
|
||||
</ul>
|
||||
{/if}
|
||||
</li>
|
||||
{/snippet}
|
||||
|
||||
<style>
|
||||
.toc {
|
||||
position: sticky;
|
||||
top: 0;
|
||||
z-index: 1;
|
||||
|
||||
h2 {
|
||||
margin: 0;
|
||||
font-size: 16px;
|
||||
font-weight: normal;
|
||||
padding: 15px 20px;
|
||||
background: #e5e5e5;
|
||||
}
|
||||
}
|
||||
button {
|
||||
padding: 0;
|
||||
background: transparent;
|
||||
border: 0;
|
||||
font: inherit;
|
||||
color: inherit;
|
||||
}
|
||||
.toc-title {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
}
|
||||
.toc-label {
|
||||
display: flex;
|
||||
gap: 3px;
|
||||
align-items: center;
|
||||
}
|
||||
.toc-menu {
|
||||
position: absolute;
|
||||
top: 100%;
|
||||
left: 0;
|
||||
right: 0;
|
||||
margin: 0;
|
||||
padding: 15px 20px;
|
||||
background: #fff;
|
||||
list-style: none;
|
||||
box-shadow: 0 3px 5px #00000020;
|
||||
|
||||
ul {
|
||||
list-style: none;
|
||||
padding: 0 15px;
|
||||
}
|
||||
li {
|
||||
padding: 3px 0;
|
||||
}
|
||||
}
|
||||
.content {
|
||||
padding: 0 15px;
|
||||
width: 100vw;
|
||||
|
||||
:global {
|
||||
& :is(h1, h2, h3, h4, h5, h6) {
|
||||
margin-left: calc(-1 * var(--pageMargin));
|
||||
display: flex;
|
||||
align-items: center;
|
||||
&.is-scrolledPast {
|
||||
opacity: 0;
|
||||
}
|
||||
&.is-ghost {
|
||||
position: fixed;
|
||||
z-index: 1;
|
||||
margin: 0;
|
||||
left: 0;
|
||||
|
||||
> span {
|
||||
transform-origin: left top;
|
||||
}
|
||||
}
|
||||
a {
|
||||
text-decoration: none;
|
||||
}
|
||||
.icon {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
}
|
||||
.icon::before {
|
||||
content: "🔗";
|
||||
font-size: 14px;
|
||||
visibility: hidden;
|
||||
}
|
||||
&:hover {
|
||||
.icon::before {
|
||||
visibility: visible;
|
||||
}
|
||||
&.is-ghost {
|
||||
.icon::before {
|
||||
visibility: hidden;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
footer {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
gap: 15px;
|
||||
margin: 20px 15px;
|
||||
}
|
||||
.pointer {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
flex: 1;
|
||||
box-shadow: 0 2px 5px #00000030;
|
||||
border-radius: 8px;
|
||||
padding: 20px;
|
||||
gap: 10px;
|
||||
text-decoration: none;
|
||||
color: inherit;
|
||||
}
|
||||
.pointer:empty {
|
||||
box-shadow: none;
|
||||
}
|
||||
.pointer.next {
|
||||
text-align: right;
|
||||
justify-content: end;
|
||||
}
|
||||
.pointer-title {
|
||||
font-size: 26px;
|
||||
}
|
||||
.pointer-label {
|
||||
font-size: 16px;
|
||||
}
|
||||
</style>
|
||||
10
pkgs/docs-site/src/routes/[...path]/+page.ts
Normal file
10
pkgs/docs-site/src/routes/[...path]/+page.ts
Normal file
@@ -0,0 +1,10 @@
|
||||
import { error } from "@sveltejs/kit";
|
||||
export async function load({ params, parent }) {
|
||||
const { docs } = await parent();
|
||||
const article = await docs.getArticle(`/${params.path}`);
|
||||
if (!article) {
|
||||
error(404, "");
|
||||
}
|
||||
|
||||
return article;
|
||||
}
|
||||
548
pkgs/docs-site/src/routes/docs/decisions/01-Clan-Modules.md
Normal file
548
pkgs/docs-site/src/routes/docs/decisions/01-Clan-Modules.md
Normal file
@@ -0,0 +1,548 @@
|
||||
## Status
|
||||
|
||||
Accepted
|
||||
|
||||
## Context
|
||||
|
||||
Current state as of writing:
|
||||
|
||||
To define a service in Clan, you need to define two things:
|
||||
|
||||
- `clanModule` - defined by module authors
|
||||
- `inventory` - defined by users
|
||||
|
||||
The `clanModule` is currently a plain NixOS module. It is conditionally imported into each machine depending on the `service` and `role`.
|
||||
|
||||
A `role` is a function of a machine within a service. For example in the `backup` service there are `client` and `server` roles.
|
||||
|
||||
The `inventory` contains the settings for the user/consumer of the module. It describes what `services` run on each machine and with which `roles`.
|
||||
|
||||
Additionally any `service` can be instantiated multiple times.
|
||||
|
||||
This ADR proposes that we change how to write a `clanModule`. The `inventory` should get a new attribute called `instances` that allow for configuration of these modules.
|
||||
|
||||
### Status Quo
|
||||
|
||||
In this example the user configures 2 instances of the `networking` service:
|
||||
|
||||
The *user* defines
|
||||
|
||||
```nix
|
||||
{
|
||||
inventory.services = {
|
||||
# anything inside an instance is instance specific
|
||||
networking."instance1" = {
|
||||
roles.client.tags = [ "all" ];
|
||||
machines.foo.config = { ... /* machine specific settings */ };
|
||||
|
||||
# this will not apply to `clients` outside of `instance1`
|
||||
roles.client.config = { ... /* client specific settings */ };
|
||||
};
|
||||
networking."instance2" = {
|
||||
roles.server.tags = [ "all" ];
|
||||
config = { ... /* applies to every machine that runs this instance */ };
|
||||
};
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
The *module author* defines:
|
||||
|
||||
```nix
|
||||
# networking/roles/client.nix
|
||||
{ config, ... }:
|
||||
let
|
||||
instances = config.clan.inventory.services.networking or { };
|
||||
|
||||
serviceConfig = config.clan.networking;
|
||||
in {
|
||||
## Set some nixos options
|
||||
}
|
||||
```
|
||||
|
||||
### Problems
|
||||
|
||||
Problems with the current way of writing clanModules:
|
||||
|
||||
1. No way to retrieve the config of a single service instance, together with its name.
|
||||
2. Directly exporting a single, anonymous nixosModule without any intermediary attribute layers doesn't leave room for exporting other inventory resources such as potentially `vars` or `homeManagerConfig`.
|
||||
3. Can't access multiple config instances individually.
|
||||
Example:
|
||||
```nix
|
||||
inventory = {
|
||||
services = {
|
||||
network.c-base = {
|
||||
instanceConfig.ips = {
|
||||
mors = "172.139.0.2";
|
||||
};
|
||||
};
|
||||
network.gg23 = {
|
||||
instanceConfig.ips = {
|
||||
mors = "10.23.0.2";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
```
|
||||
This doesn't work because all instance configs are applied to the same namespace. So this results in a conflict currently.
|
||||
Resolving this problem means that new inventory modules cannot be plain nixos modules anymore. If they are configured via `instances` / `instanceConfig` they cannot be configured without using the inventory. (There might be ways to inject instanceConfig but that requires knowledge of inventory internals)
|
||||
|
||||
4. Writing modules for multiple instances is cumbersome. Currently the clanModule author has to write one or multiple `fold` operations for potentially every nixos option to define how multiple service instances merge into every single one option. The new idea behind this adr is to pull the common fold function into the outer context provide it as a common helper. (See the example below. `perInstance` analog to the well known `perSystem` of flake-parts)
|
||||
|
||||
5. Each role has a different interface. We need to render that interface into json-schema which includes creating an unnecessary test machine currently. Defining the interface at a higher level (outside of any machine context) allows faster evaluation and an isolation by design from any machine.
|
||||
This allows rendering the UI (options tree) of a service by just knowing the service and the corresponding roles without creating a dummy machine.
|
||||
|
||||
6. The interface of defining config is wrong. It is possible to define config that applies to multiple machine at once. It is possible to define config that applies to
|
||||
a machine as a hole. But this is wrong behavior because the options exist at the role level. So config must also always exist at the role level.
|
||||
Currently we merge options and config together but that may produce conflicts. Those module system conflicts are very hard to foresee since they depend on what roles exist at runtime.
|
||||
|
||||
## Proposed Change
|
||||
|
||||
We will create a new module class which is defined by `_class = "clan.service"` ([documented here](https://nixos.org/manual/nixpkgs/stable/#module-system-lib-evalModules-param-class)).
|
||||
|
||||
Existing clan modules will still work by continuing to be plain NixOS modules. All new modules can set `_class = "clan.service";` to use the proposed features.
|
||||
|
||||
In short the change introduces a new module class that makes the currently necessary folding of `clan.service`s `instances` and `roles` a common operation. The module author can define the inner function of the fold operations which is called a `clan.service` module.
|
||||
|
||||
There are the following attributes of such a module:
|
||||
|
||||
### `roles.<roleName>.interface`
|
||||
|
||||
Each role can have a different interface for how to be configured.
|
||||
I.e.: A `client` role might have different options than a `server` role.
|
||||
|
||||
This attribute should be used to define `options`. (Not `config` !)
|
||||
|
||||
The end-user defines the corresponding `config`.
|
||||
|
||||
This submodule will be evaluated for each `instance role` combination and passed as argument into `perInstance`.
|
||||
|
||||
This submodules `options` will be evaluated to build the UI for that module dynamically.
|
||||
|
||||
### **Result attributes**
|
||||
|
||||
Some common result attributes are produced by modules of this proposal, those will be referenced later in this document but are commonly defined as:
|
||||
|
||||
- `nixosModule` A single nixos module. (`{config, ...}:{ environment.systemPackages = []; }`)
|
||||
- `services.<serviceName>` An attribute set of `_class = clan.service`. Which contain the same thing as this whole ADR proposes.
|
||||
- `vars` To be defined. Reserved for now.
|
||||
|
||||
### `roles.<roleName>.perInstance`
|
||||
|
||||
This acts like a function that maps over all `service instances` of a given `role`.
|
||||
It produces the previously defined **result attributes**.
|
||||
|
||||
I.e. This allows to produce multiple `nixosModules` one for every instance of the service.
|
||||
Hence making multiple `service instances` convenient by leveraging the module-system merge behavior.
|
||||
|
||||
### `perMachine`
|
||||
|
||||
This acts like a function that maps over all `machines` of a given `service`.
|
||||
It produces the previously defined **result attributes**.
|
||||
|
||||
I.e. this allows to produce exactly one `nixosModule` per `service`.
|
||||
Making it easy to set nixos-options only once if they have a one-to-one relation to a service being enabled.
|
||||
|
||||
Note: `lib.mkIf` can be used on i.e. `roleName` to make the scope more specific.
|
||||
|
||||
### `services.<serviceName>`
|
||||
|
||||
This allows to define nested services.
|
||||
i.e the *service* `backup` might define a nested *service* `ssh` which sets up an ssh connection.
|
||||
|
||||
This can be defined in `perMachine` and `perInstance`
|
||||
|
||||
- For Every `instance` a given `service` may add multiple nested `services`.
|
||||
- A given `service` may add a static set of nested `services`; Even if there are multiple instances of the same given service.
|
||||
|
||||
Q: Why is this not a top-level attribute?
|
||||
A: Because nested service definitions may also depend on a `role` which must be resolved depending on `machine` and `instance`. The top-level module doesn't know anything about machines. Keeping the service layer machine agnostic allows us to build the UI for a module without adding any machines. (One of the problems with the current system)
|
||||
|
||||
```
|
||||
zerotier/default.nix
|
||||
```
|
||||
```nix
|
||||
# Some example module
|
||||
{
|
||||
_class = "clan.service";
|
||||
|
||||
# Analog to flake-parts 'perSystem' only that it takes instance
|
||||
# The exact arguments will be specified and documented along with the actual implementation.
|
||||
roles.client.perInstance = {
|
||||
# attrs : settings of that instance
|
||||
settings,
|
||||
# string : name of the instance
|
||||
instanceName,
|
||||
# { name :: string , roles :: listOf string; }
|
||||
machine,
|
||||
# { {roleName} :: { machines :: listOf string; } }
|
||||
roles,
|
||||
...
|
||||
}:
|
||||
{
|
||||
# Return a nixos module for every instance.
|
||||
# The module author must be aware that this may return multiple modules (one for every instance) which are merged natively
|
||||
nixosModule = {
|
||||
config.debug."${instanceName}-client" = instanceConfig;
|
||||
};
|
||||
};
|
||||
# Function that is called once for every machine with the role "client"
|
||||
# Receives at least the following parameters:
|
||||
#
|
||||
# machine :: { name :: String, roles :: listOf string; }
|
||||
# Name of the machine
|
||||
#
|
||||
# instances :: { instanceName :: { roleName :: { machines :: [ string ]; }}}
|
||||
# Resolved roles
|
||||
# Same type as currently in `clan.inventory.services.<ServiceName>.<InstanceName>.roles`
|
||||
#
|
||||
# The exact arguments will be specified and documented along with the actual implementation.
|
||||
perMachine = {machine, instances, ... }: {
|
||||
nixosModule =
|
||||
{ lib, ... }:
|
||||
{
|
||||
# Some shared code should be put into a shared file
|
||||
# Which is then imported into all/some roles
|
||||
imports = [
|
||||
../shared.nix
|
||||
] ++
|
||||
(lib.optional (builtins.elem "client" machine.roles)
|
||||
{
|
||||
options.debug = lib.mkOption {
|
||||
type = lib.types.attrsOf lib.types.raw;
|
||||
};
|
||||
});
|
||||
};
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
## Inventory.instances
|
||||
|
||||
This document also proposes to add a new attribute to the inventory that allow for exclusive configuration of the new modules.
|
||||
This allows to better separate the new and the old way of writing and configuring modules. Keeping the new implementation more focussed and keeping existing technical debt out from the beginning.
|
||||
|
||||
The following thoughts went into this:
|
||||
|
||||
- Getting rid of `<serviceName>`: Using only the attribute name (plain string) is not sufficient for defining the source of the service module. Encoding meta information into it would also require some extensible format specification and parser.
|
||||
- removing instanceConfig and machineConfig: There is no such config. Service configuration must always be role specific, because the options are defined on the role.
|
||||
- renaming `config` to `settings` or similar. Since `config` is a module system internal name.
|
||||
- Tags and machines should be an attribute set to allow setting `settings` on that level instead.
|
||||
|
||||
```nix
|
||||
{
|
||||
inventory.instances = {
|
||||
"instance1" = {
|
||||
# Allows to define where the module should be imported from.
|
||||
module = {
|
||||
input = "clan-core";
|
||||
name = "borgbackup";
|
||||
};
|
||||
# settings that apply to all client machines
|
||||
roles.client.settings = {};
|
||||
# settings that apply to the client service of machine with name <machineName>
|
||||
# There might be a server service that takes different settings on the same machine!
|
||||
roles.client.machines.<machineName>.settings = {};
|
||||
# settings that apply to all client-instances with tag <tagName>
|
||||
roles.client.tags.<tagName>.settings = {};
|
||||
};
|
||||
"instance2" = {
|
||||
# ...
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
## Iteration note
|
||||
|
||||
We want to implement the system as described. Once we have sufficient data on real world use-cases and modules we might revisit this document along with the updated implementation.
|
||||
|
||||
## Real world example
|
||||
|
||||
The following module demonstrates the idea in the example of *borgbackup*.
|
||||
|
||||
```nix
|
||||
{
|
||||
_class = "clan.service";
|
||||
|
||||
# Define the 'options' of 'settings' see argument of perInstance
|
||||
roles.server.interface =
|
||||
{ lib, ... }:
|
||||
{
|
||||
options = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "/var/lib/borgbackup";
|
||||
description = ''
|
||||
The directory where the borgbackup repositories are stored.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
roles.server.perInstance =
|
||||
{
|
||||
instanceName,
|
||||
settings,
|
||||
roles,
|
||||
...
|
||||
}:
|
||||
{
|
||||
nixosModule =
|
||||
{ config, lib, ... }:
|
||||
let
|
||||
dir = config.clan.core.settings.directory;
|
||||
machineDir = dir + "/vars/per-machine/";
|
||||
allClients = roles.client.machines;
|
||||
in
|
||||
{
|
||||
# services.borgbackup is a native nixos option
|
||||
config.services.borgbackup.repos =
|
||||
let
|
||||
borgbackupIpMachinePath = machine: machineDir + machine + "/borgbackup/borgbackup.ssh.pub/value";
|
||||
|
||||
machinesMaybeKey = builtins.map (
|
||||
machine:
|
||||
let
|
||||
fullPath = borgbackupIpMachinePath machine;
|
||||
in
|
||||
if builtins.pathExists fullPath then
|
||||
machine
|
||||
else
|
||||
lib.warn ''
|
||||
Machine ${machine} does not have a borgbackup key at ${fullPath},
|
||||
run `clan vars generate ${machine}` to generate it.
|
||||
'' null
|
||||
) allClients;
|
||||
|
||||
machinesWithKey = lib.filter (x: x != null) machinesMaybeKey;
|
||||
|
||||
hosts = builtins.map (machine: {
|
||||
name = instanceName + machine;
|
||||
value = {
|
||||
path = "${settings.directory}/${machine}";
|
||||
authorizedKeys = [ (builtins.readFile (borgbackupIpMachinePath machine)) ];
|
||||
};
|
||||
}) machinesWithKey;
|
||||
in
|
||||
if (builtins.listToAttrs hosts) != [ ] then builtins.listToAttrs hosts else { };
|
||||
};
|
||||
};
|
||||
|
||||
roles.client.interface =
|
||||
{ lib, ... }:
|
||||
{
|
||||
# There might be a better interface now. This is just how clan borgbackup was configured in the 'old' way
|
||||
options.destinations = lib.mkOption {
|
||||
type = lib.types.attrsOf (
|
||||
lib.types.submodule (
|
||||
{ name, ... }:
|
||||
{
|
||||
options = {
|
||||
name = lib.mkOption {
|
||||
type = lib.types.strMatching "^[a-zA-Z0-9._-]+$";
|
||||
default = name;
|
||||
description = "the name of the backup job";
|
||||
};
|
||||
repo = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "the borgbackup repository to backup to";
|
||||
};
|
||||
rsh = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
default = null;
|
||||
defaultText = "ssh -i \${config.clan.core.vars.generators.borgbackup.files.\"borgbackup.ssh\".path} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null";
|
||||
description = "the rsh to use for the backup";
|
||||
};
|
||||
};
|
||||
}
|
||||
)
|
||||
);
|
||||
default = { };
|
||||
description = ''
|
||||
destinations where the machine should be backed up to
|
||||
'';
|
||||
};
|
||||
|
||||
options.exclude = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
example = [ "*.pyc" ];
|
||||
default = [ ];
|
||||
description = ''
|
||||
Directories/Files to exclude from the backup.
|
||||
Use * as a wildcard.
|
||||
'';
|
||||
};
|
||||
};
|
||||
roles.client.perInstance =
|
||||
{
|
||||
instanceName,
|
||||
roles,
|
||||
machine,
|
||||
settings,
|
||||
...
|
||||
}:
|
||||
{
|
||||
nixosModule =
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
allServers = roles.server.machines;
|
||||
|
||||
# machineName = config.clan.core.settings.machine.name;
|
||||
|
||||
# cfg = config.clan.borgbackup;
|
||||
preBackupScript = ''
|
||||
declare -A preCommandErrors
|
||||
|
||||
${lib.concatMapStringsSep "\n" (
|
||||
state:
|
||||
lib.optionalString (state.preBackupCommand != null) ''
|
||||
echo "Running pre-backup command for ${state.name}"
|
||||
if ! /run/current-system/sw/bin/${state.preBackupCommand}; then
|
||||
preCommandErrors["${state.name}"]=1
|
||||
fi
|
||||
''
|
||||
) (lib.attrValues config.clan.core.state)}
|
||||
|
||||
if [[ ''${preCommandErrors[@]} -gt 0 ]]; then
|
||||
echo "pre-backup commands failed for the following services:"
|
||||
for state in "''${!preCommandErrors[@]}"; do
|
||||
echo " $state"
|
||||
done
|
||||
exit 1
|
||||
fi
|
||||
'';
|
||||
|
||||
destinations =
|
||||
let
|
||||
destList = builtins.map (serverName: {
|
||||
name = "${instanceName}-${serverName}";
|
||||
value = {
|
||||
repo = "borg@${serverName}:/var/lib/borgbackup/${machine.name}";
|
||||
rsh = "ssh -i ${
|
||||
config.clan.core.vars.generators."borgbackup-${instanceName}".files."borgbackup.ssh".path
|
||||
} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=Yes";
|
||||
} // settings.destinations.${serverName};
|
||||
}) allServers;
|
||||
in
|
||||
(builtins.listToAttrs destList);
|
||||
in
|
||||
{
|
||||
config = {
|
||||
# Derived from the destinations
|
||||
systemd.services = lib.mapAttrs' (
|
||||
_: dest:
|
||||
lib.nameValuePair "borgbackup-job-${instanceName}-${dest.name}" {
|
||||
# since borgbackup mounts the system read-only, we need to run in a ExecStartPre script, so we can generate additional files.
|
||||
serviceConfig.ExecStartPre = [
|
||||
''+${pkgs.writeShellScript "borgbackup-job-${dest.name}-pre-backup-commands" preBackupScript}''
|
||||
];
|
||||
}
|
||||
) destinations;
|
||||
|
||||
services.borgbackup.jobs = lib.mapAttrs (_destinationName: dest: {
|
||||
paths = lib.unique (
|
||||
lib.flatten (map (state: state.folders) (lib.attrValues config.clan.core.state))
|
||||
);
|
||||
exclude = settings.exclude;
|
||||
repo = dest.repo;
|
||||
environment.BORG_RSH = dest.rsh;
|
||||
compression = "auto,zstd";
|
||||
startAt = "*-*-* 01:00:00";
|
||||
persistentTimer = true;
|
||||
|
||||
encryption = {
|
||||
mode = "repokey";
|
||||
passCommand = "cat ${config.clan.core.vars.generators."borgbackup-${instanceName}".files."borgbackup.repokey".path}";
|
||||
};
|
||||
|
||||
prune.keep = {
|
||||
within = "1d"; # Keep all archives from the last day
|
||||
daily = 7;
|
||||
weekly = 4;
|
||||
monthly = 0;
|
||||
};
|
||||
}) destinations;
|
||||
|
||||
environment.systemPackages = [
|
||||
(pkgs.writeShellApplication {
|
||||
name = "borgbackup-create";
|
||||
runtimeInputs = [ config.systemd.package ];
|
||||
text = ''
|
||||
${lib.concatMapStringsSep "\n" (dest: ''
|
||||
systemctl start borgbackup-job-${dest.name}
|
||||
'') (lib.attrValues destinations)}
|
||||
'';
|
||||
})
|
||||
(pkgs.writeShellApplication {
|
||||
name = "borgbackup-list";
|
||||
runtimeInputs = [ pkgs.jq ];
|
||||
text = ''
|
||||
(${
|
||||
lib.concatMapStringsSep "\n" (
|
||||
dest:
|
||||
# we need yes here to skip the changed url verification
|
||||
''echo y | /run/current-system/sw/bin/borg-job-${dest.name} list --json | jq '[.archives[] | {"name": ("${dest.name}::${dest.repo}::" + .name)}]' ''
|
||||
) (lib.attrValues destinations)
|
||||
}) | jq -s 'add // []'
|
||||
'';
|
||||
})
|
||||
(pkgs.writeShellApplication {
|
||||
name = "borgbackup-restore";
|
||||
runtimeInputs = [ pkgs.gawk ];
|
||||
text = ''
|
||||
cd /
|
||||
IFS=':' read -ra FOLDER <<< "''${FOLDERS-}"
|
||||
job_name=$(echo "$NAME" | awk -F'::' '{print $1}')
|
||||
backup_name=''${NAME#"$job_name"::}
|
||||
if [[ ! -x /run/current-system/sw/bin/borg-job-"$job_name" ]]; then
|
||||
echo "borg-job-$job_name not found: Backup name is invalid" >&2
|
||||
exit 1
|
||||
fi
|
||||
echo y | /run/current-system/sw/bin/borg-job-"$job_name" extract "$backup_name" "''${FOLDER[@]}"
|
||||
'';
|
||||
})
|
||||
];
|
||||
# every borgbackup instance adds its own vars
|
||||
clan.core.vars.generators."borgbackup-${instanceName}" = {
|
||||
files."borgbackup.ssh.pub".secret = false;
|
||||
files."borgbackup.ssh" = { };
|
||||
files."borgbackup.repokey" = { };
|
||||
|
||||
migrateFact = "borgbackup";
|
||||
runtimeInputs = [
|
||||
pkgs.coreutils
|
||||
pkgs.openssh
|
||||
pkgs.xkcdpass
|
||||
];
|
||||
script = ''
|
||||
ssh-keygen -t ed25519 -N "" -f $out/borgbackup.ssh
|
||||
xkcdpass -n 4 -d - > $out/borgbackup.repokey
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
perMachine = {
|
||||
nixosModule =
|
||||
{ ... }:
|
||||
{
|
||||
clan.core.backups.providers.borgbackup = {
|
||||
list = "borgbackup-list";
|
||||
create = "borgbackup-create";
|
||||
restore = "borgbackup-restore";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
## Prior-art
|
||||
|
||||
- https://github.com/NixOS/nixops
|
||||
- https://github.com/infinisil/nixus
|
||||
112
pkgs/docs-site/src/routes/docs/decisions/02-clan-as-library.md
Normal file
112
pkgs/docs-site/src/routes/docs/decisions/02-clan-as-library.md
Normal file
@@ -0,0 +1,112 @@
|
||||
## Status
|
||||
|
||||
Accepted
|
||||
|
||||
## Context
|
||||
|
||||
In the long term we envision the clan application will consist of the following user facing tools in the long term.
|
||||
|
||||
- `CLI`
|
||||
- `TUI`
|
||||
- `Desktop Application`
|
||||
- `REST-API`
|
||||
- `Mobile Application`
|
||||
|
||||
We might not be sure whether all of those will exist but the architecture should be generic such that those are possible without major changes of the underlying system.
|
||||
|
||||
## Decision
|
||||
|
||||
This leads to the conclusion that we should do `library` centric development.
|
||||
With the current `clan` python code being a library that can be imported to create various tools ontop of it.
|
||||
All **CLI** or **UI** related parts should be moved out of the main library.
|
||||
|
||||
Imagine roughly the following architecture:
|
||||
|
||||
``` mermaid
|
||||
graph TD
|
||||
%% Define styles
|
||||
classDef frontend fill:#f9f,stroke:#333,stroke-width:2px;
|
||||
classDef backend fill:#bbf,stroke:#333,stroke-width:2px;
|
||||
classDef storage fill:#ff9,stroke:#333,stroke-width:2px;
|
||||
classDef testing fill:#cfc,stroke:#333,stroke-width:2px;
|
||||
|
||||
%% Define nodes
|
||||
user(["User"]) -->|Interacts with| Frontends
|
||||
|
||||
subgraph "Frontends"
|
||||
CLI["CLI"]:::frontend
|
||||
APP["Desktop App"]:::frontend
|
||||
TUI["TUI"]:::frontend
|
||||
REST["REST API"]:::frontend
|
||||
end
|
||||
|
||||
subgraph "Python"
|
||||
API["Library <br>for interacting with clan"]:::backend
|
||||
BusinessLogic["Business Logic<br>Implements actions like 'machine create'"]:::backend
|
||||
STORAGE[("Persistence")]:::storage
|
||||
NIX["Nix Eval & Build"]:::backend
|
||||
end
|
||||
|
||||
subgraph "CI/CD & Tests"
|
||||
TEST["Feature Testing"]:::testing
|
||||
end
|
||||
|
||||
%% Define connections
|
||||
CLI --> API
|
||||
APP --> API
|
||||
TUI --> API
|
||||
REST --> API
|
||||
|
||||
TEST --> API
|
||||
|
||||
API --> BusinessLogic
|
||||
BusinessLogic --> STORAGE
|
||||
BusinessLogic --> NIX
|
||||
```
|
||||
|
||||
With this very simple design it is ensured that all the basic features remain stable across all frontends.
|
||||
In the end it is straight forward to create python library function calls in a testing framework to ensure that kind of stability.
|
||||
|
||||
Integration tests and smaller unit-tests should both be utilized to ensure the stability of the library.
|
||||
|
||||
Note: Library function don't have to be json-serializable in general.
|
||||
|
||||
Persistence includes but is not limited to: creating git commits, writing to inventory.json, reading and writing vars, and interacting with persisted data in general.
|
||||
|
||||
## Benefits / Drawbacks
|
||||
|
||||
- (+) Less tight coupling of frontend- / backend-teams
|
||||
- (+) Consistency and inherent behavior
|
||||
- (+) Performance & Scalability
|
||||
- (+) Different frontends for different user groups
|
||||
- (+) Documentation per library function makes it convenient to interact with the clan resources.
|
||||
- (+) Testing the library ensures stability of the underlyings for all layers above.
|
||||
- (-) Complexity overhead
|
||||
- (-) library needs to be designed / documented
|
||||
- (+) library can be well documented since it is a finite set of functions.
|
||||
- (-) Error handling might be harder.
|
||||
- (+) Common error reporting
|
||||
- (-) different frontends need different features. The library must include them all.
|
||||
- (+) All those core features must be implemented anyways.
|
||||
- (+) VPN Benchmarking uses the existing library's already and works relatively well.
|
||||
|
||||
## Implementation considerations
|
||||
|
||||
Not all required details that need to change over time are possible to be pointed out ahead of time.
|
||||
The goal of this document is to create a common understanding for how we like our project to be structured.
|
||||
Any future commits should contribute to this goal.
|
||||
|
||||
Some ideas what might be needed to change:
|
||||
|
||||
- Having separate locations or packages for the library and the CLI.
|
||||
- Rename the `clan_cli` package to `clan` and move the `cli` frontend into a subfolder or a separate package.
|
||||
- Python Argparse or other cli related code should not exist in the `clan` python library.
|
||||
- `__init__.py` should be very minimal. Only init the business logic models and resources. Note that all `__init__.py` files all the way up in the module tree are always executed as part of the python module import logic and thus should be as small as possible.
|
||||
i.e. `from clan_cli.vars.generators import ...` executes both `clan_cli/__init__.py` and `clan_cli/vars/__init__.py` if any of those exist.
|
||||
- `api` folder doesn't make sense since the python library `clan` is the api.
|
||||
- Logic needed for the webui that performs json serialization and deserialization will be some `json-adapter` folder or package.
|
||||
- Code for serializing dataclasses and typed dictionaries is needed for the persistence layer. (i.e. for read-write of inventory.json)
|
||||
- The inventory-json is a backend resource, that is internal. Its logic includes merging, unmerging and partial updates with considering nix values and their priorities. Nobody should try to read or write to it directly.
|
||||
Instead there will be library methods i.e. to add a `service` or to update/read/delete some information from it.
|
||||
- Library functions should be carefully designed with suitable conventions for writing good api's in mind. (i.e: https://swagger.io/resources/articles/best-practices-in-api-design/)
|
||||
|
||||
@@ -0,0 +1,45 @@
|
||||
## Status
|
||||
|
||||
Proposed after some conversation between @lassulus, @Mic92, & @lopter.
|
||||
|
||||
## Context
|
||||
|
||||
It can be useful to refer to ADRs by their numbers, rather than their full title. To that end, short and sequential numbers are useful.
|
||||
|
||||
The issue is that an ADR number is effectively assigned when the ADR is merged, before being merged its number is provisional. Because multiple ADRs can be written at the same time, you end-up with multiple provisional ADRs with the same number, for example this is the third ADR-3:
|
||||
|
||||
1. ADR-3-clan-compat: see [#3212];
|
||||
2. ADR-3-fetching-nix-from-python: see [#3452];
|
||||
3. ADR-3-numbering-process: this ADR.
|
||||
|
||||
This situation makes it impossible to refer to an ADR by its number, and why I (@lopter) went with the arbitrary number 7 in [#3196].
|
||||
|
||||
We could solve this problem by using the PR number as the ADR number (@lassulus). The issue is that PR numbers are getting big in clan-core which does not make them easy to remember, or use in conversation and code (@lopter).
|
||||
|
||||
Another approach would be to move the ADRs in a different repository, this would reset the counter back to 1, and make it straightforward to keep ADR and PR numbers in sync (@lopter). The issue then is that ADR are not in context with their changes which makes them more difficult to review (@Mic92).
|
||||
|
||||
## Decision
|
||||
|
||||
A third approach would be to:
|
||||
|
||||
1. Commit ADRs before they are approved, so that the next ADR number gets assigned;
|
||||
1. Open a PR for the proposed ADR;
|
||||
1. Update the ADR file committed in step 1, so that its markdown contents point to the PR that tracks it.
|
||||
|
||||
## Consequences
|
||||
|
||||
### ADR have unique and memorable numbers trough their entire life cycle
|
||||
|
||||
This makes it easier to refer to them in conversation or in code.
|
||||
|
||||
### You need to have commit access to get an ADR number assigned
|
||||
|
||||
This makes it more difficult for someone external to the project to contribute an ADR.
|
||||
|
||||
### Creating a new ADR requires multiple commits
|
||||
|
||||
Maybe a script or CI flow could help with that if it becomes painful.
|
||||
|
||||
[#3212]: https://git.clan.lol/clan/clan-core/pulls/3212/
|
||||
[#3452]: https://git.clan.lol/clan/clan-core/pulls/3452/
|
||||
[#3196]: https://git.clan.lol/clan/clan-core/pulls/3196/
|
||||
@@ -0,0 +1,97 @@
|
||||
## Status
|
||||
|
||||
accepted
|
||||
|
||||
## Context
|
||||
|
||||
In our clan-cli we need to get a lot of values from nix into the python runtime. This is used to determine the hostname, the target ips address, scripts to generate vars, file locations and many more.
|
||||
|
||||
Currently we use two different accessing methods:
|
||||
|
||||
### Method 1: deployment.json
|
||||
|
||||
A json file that serializes some predefined values into a JSON file as build-time artifact.
|
||||
|
||||
Downsides:
|
||||
|
||||
* no access to flake level values
|
||||
* all or nothing:
|
||||
* values are either cached via deployment.json or not. So we can only put cheap values into there,
|
||||
* in the past var generation script were added here, which added a huge build time overhead for every time we wanted to do any action
|
||||
* duplicated nix code
|
||||
* values need duplicated nix code, once to define them at the correct place in the module system (clan.core.vars.generators) and code to accumulate them again for the deployment.json (system.clan.deployment.data)
|
||||
* This duality adds unnecessary dependencies to the nixos module system.
|
||||
|
||||
Benefits:
|
||||
|
||||
* Utilize `nix build` for caching the file.
|
||||
* Caching mechanism is very simple.
|
||||
|
||||
|
||||
### Method 2: Direct access
|
||||
|
||||
Directly calling the evaluator / build sandbox via `nix build` and `nix eval`within the Python code
|
||||
|
||||
|
||||
Downsides:
|
||||
|
||||
* Access is not cached: Static overhead (see below: \~1.5s) is present every time, if we invoke `nix commands`
|
||||
* The static overhead depends obviously which value we need to retrieve, since the `evalModules` overhead depends, whether we evaluate some attribute inside a machine or a flake attribute
|
||||
* Accessing more and more attributes with this method increases the static overhead, which leads to a linear decrease in performance.
|
||||
* Boilerplate for interacting with the CLI and Error handling code is repeated every time.
|
||||
|
||||
Benefits:
|
||||
|
||||
* Simple and native interaction with the `nix commands`is rather intuitive
|
||||
* Custom error handling for each attribute is easy
|
||||
|
||||
This sytem could be enhanced with custom nix expressions, which could be used in places where we don't want to put values into deployment.json or want to fetch flake level values. This also has some downsides:
|
||||
|
||||
* technical debt
|
||||
* we have to maintain custom nix expressions inside python code, embedding code is error prone and the language linters won't help you here, so errors are common and harder to debug.
|
||||
* we need custom error reporting code in case something goes wrong, either the value doesn't exist or there is an reported build error
|
||||
* no caching/custom caching logic
|
||||
* currently there is no infrastructure to cache those extra values, so we would need to store them somewhere, we could either enhance one of the many classes we have or don't cache them at all
|
||||
* even if we implement caching for extra nix expressions, there can be no sharing between extra nix expressions. for example we have 2 nix expressions, one fetches paths and values for all generators and the second one fetches only the values, we still need to execute both of them in both contexts although the second one could be skipped if the first one is already cached
|
||||
|
||||
### Method 3: nix select
|
||||
|
||||
Move all code that extracts nix values into a common class:
|
||||
|
||||
Downsides:
|
||||
* added complexity for maintaining our own DSL
|
||||
|
||||
Benefits:
|
||||
* we can implement an API (select DSL) to get those values from nix without writing complex nix expressions.
|
||||
* we can implement caching of those values beyond the runtime of the CLI
|
||||
* we can use precaching at different endpoints to eliminate most of multiple nix evaluations (except in cases where we have to break the cache or we don't know if we need the value in the value later and getting it is expensive).
|
||||
|
||||
|
||||
|
||||
## Decision
|
||||
|
||||
Use Method 3 (nix select) for extracting values out of nix.
|
||||
|
||||
This adds the Flake class in flake.py with a select method, which takes a selector string and returns a python dict.
|
||||
|
||||
Example:
|
||||
```python
|
||||
from clan_lib.flake import Flake
|
||||
flake = Flake("github:lassulus/superconfig")
|
||||
flake.select("nixosConfigurations.*.config.networking.hostName)
|
||||
```
|
||||
returns:
|
||||
```
|
||||
{
|
||||
"ignavia": "ignavia",
|
||||
"mors": "mors",
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
## Consequences
|
||||
|
||||
* Faster execution due to caching most things beyond a single execution, if no cache break happens execution is basically instant, because we don't need to run nix again.
|
||||
* Better error reporting, since all nix values go through one chokepoint, we can parse error messages in that chokepoint and report them in a more user friendly way, for example if a value is missing at the expected location inside the module system.
|
||||
* less embedded nix code inside python code
|
||||
* more portable CLI, since we need to import less modules into the module system and most things can be extracted by the python code directly
|
||||
@@ -0,0 +1,34 @@
|
||||
## Status
|
||||
|
||||
accepted
|
||||
|
||||
## Context
|
||||
|
||||
Currently different operations (install, update) have different modes. Install always evals locally and pushes the derivation to a remote system. update has a configurable buildHost and targetHost.
|
||||
Confusingly install always evals locally and update always evals on the targetHost, so hosts have different semantics in different operations contexts.
|
||||
|
||||
## Decision
|
||||
|
||||
Add evalHost to make this clear and configurable for the user. This would leave us with:
|
||||
|
||||
- evalHost
|
||||
- buildHost
|
||||
- targetHost
|
||||
|
||||
for the update and install operation.
|
||||
|
||||
`evalHost` would be the machine that evaluates the nixos configuration. if evalHost is not localhost, we upload the non secret vars and the nix archived flake (this is usually the same operation) to the evalMachine.
|
||||
|
||||
`buildHost` would be what is used by the machine to build, it would correspond to `--build-host` on the nixos-rebuild command or `--builders` for nix build.
|
||||
|
||||
`targetHost` would be the machine where the closure gets copied to and activated (either through install or switch-to-configuration). It corresponds to `--targetHost` for nixos-rebuild or where we usually point `nixos-anywhere` to.
|
||||
|
||||
This hosts could be set either through CLI args (or forms for the GUI) or via the inventory. If both are given, the CLI args would take precedence.
|
||||
|
||||
## Consequences
|
||||
|
||||
We now support every deployment model of every tool out there with a bunch of simple flags. The semantics are more clear and we can write some nice documentation.
|
||||
|
||||
The install code has to be reworked, since nixos-anywhere has problems with evalHost and targetHost being the same machine, So we would need to kexec first and use the kexec image (or installer) as the evalHost afterwards.
|
||||
|
||||
In cases where the evalHost doesn't have access to the targetHost or buildHost, we need to setup temporary entries for the lifetime of the command.
|
||||
@@ -0,0 +1,14 @@
|
||||
This section contains the architecture decisions that have been reviewed and generally agreed upon
|
||||
|
||||
## What is an ADR?
|
||||
|
||||
> An architecture decision record (ADR) is a document that captures an important architecture decision made along with its context and consequences.
|
||||
|
||||
!!! Note
|
||||
For further reading about adr's we recommend [architecture-decision-record](https://github.com/joelparkerhenderson/architecture-decision-record)
|
||||
|
||||
## Crafting a new ADR
|
||||
|
||||
1. Use the [template](../decisions/template.md)
|
||||
2. Create the Pull request and gather feedback
|
||||
3. Retreive your adr-number (see: [numbering](../decisions/03-adr-numbering-process.md))
|
||||
24
pkgs/docs-site/src/routes/docs/decisions/template.md
Normal file
24
pkgs/docs-site/src/routes/docs/decisions/template.md
Normal file
@@ -0,0 +1,24 @@
|
||||
## Decision record template by Michael Nygard
|
||||
|
||||
This is the template in [Documenting architecture decisions - Michael Nygard](https://thinkrelevance.com/blog/2011/11/15/documenting-architecture-decisions).
|
||||
You can use [adr-tools](https://github.com/npryce/adr-tools) for managing the ADR files.
|
||||
|
||||
In each ADR file, write these sections:
|
||||
|
||||
# Title
|
||||
|
||||
## Status
|
||||
|
||||
What is the status, such as proposed, accepted, rejected, deprecated, superseded, etc.?
|
||||
|
||||
## Context
|
||||
|
||||
What is the issue that we're seeing that is motivating this decision or change?
|
||||
|
||||
## Decision
|
||||
|
||||
What is the change that we're proposing and/or doing?
|
||||
|
||||
## Consequences
|
||||
|
||||
What becomes easier or more difficult to do because of this change?
|
||||
145
pkgs/docs-site/src/routes/docs/getting-started/add-machines.md
Normal file
145
pkgs/docs-site/src/routes/docs/getting-started/add-machines.md
Normal file
@@ -0,0 +1,145 @@
|
||||
# Add Machines
|
||||
|
||||
Machines can be added using the following methods
|
||||
|
||||
- Create a file `machines/{machine_name}/configuration.nix` (See: [File Autoincludes](../guides/inventory/autoincludes.md))
|
||||
- Imperative via cli command: `clan machines create`
|
||||
- Editing nix expressions in flake.nix See [`clan-core.lib.clan`](../reference/options/clan.md)
|
||||
|
||||
See the complete [list](../guides/inventory/autoincludes.md) of auto-loaded files.
|
||||
|
||||
## Create a machine
|
||||
|
||||
::::tabs
|
||||
|
||||
:::tab[clan.nix (declarative)]
|
||||
|
||||
```nix {3-4}
|
||||
{
|
||||
inventory.machines = {
|
||||
# Define a machine
|
||||
jon = { };
|
||||
};
|
||||
|
||||
# Additional NixOS configuration can be added here.
|
||||
machines = {
|
||||
# jon = { config, ... }: {
|
||||
# environment.systemPackages = [ pkgs.asciinema ];
|
||||
# };
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
:::
|
||||
|
||||
:::tab[CLI (imperative)]
|
||||
|
||||
```sh
|
||||
clan machines create jon
|
||||
```
|
||||
|
||||
The imperative command might create a machine folder in `machines/jon`
|
||||
And might persist information in `inventory.json`
|
||||
:::
|
||||
::::
|
||||
|
||||
::::tabs
|
||||
|
||||
:::tab[file name test]
|
||||
|
||||
```nix
|
||||
{
|
||||
inventory.machines = {
|
||||
# Define a machine
|
||||
jon = { };
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
::::
|
||||
|
||||
### Configuring a machine
|
||||
|
||||
:::note
|
||||
The option: `inventory.machines.<name>` is used to define metadata about the machine
|
||||
That includes for example `deploy.targethost` `machineClass` or `tags`
|
||||
|
||||
The option: `machines.<name>` is used to add extra _nixosConfiguration_ to a machine
|
||||
:::
|
||||
|
||||
Add the following to your `clan.nix` file for each machine.
|
||||
This example demonstrates what is needed based on a machine called `jon`:
|
||||
|
||||
```nix {3-6,15-19}
|
||||
{
|
||||
inventory.machines = {
|
||||
jon = {
|
||||
# Define tags here (optional)
|
||||
tags = [ ]; # (1)
|
||||
};
|
||||
sara = {
|
||||
deploy.targetHost = "root@sara";
|
||||
tags = [ ];
|
||||
};
|
||||
};
|
||||
# Define additional nixosConfiguration here
|
||||
# Or in /machines/jon/configuration.nix (autoloaded)
|
||||
machines = {
|
||||
jon = { config, pkgs, ... }: {
|
||||
users.users.root.openssh.authorizedKeys.keys = [
|
||||
"ssh-ed25519 AAAAC3NzaC..." # elided (2)
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
1. Tags can be used to automatically add this machine to services later on. - You dont need to set this now.
|
||||
2. Add your _ssh key_ here - That will ensure you can always login to your machine via _ssh_ in case something goes wrong.
|
||||
|
||||
### (Optional) Create a `configuration.nix`
|
||||
|
||||
```nix title="./machines/jon/configuration.nix"
|
||||
{
|
||||
imports = [
|
||||
# enables GNOME desktop (optional)
|
||||
../../modules/gnome.nix
|
||||
];
|
||||
|
||||
# Set nixosOptions here
|
||||
# Or import your own modules via 'imports'
|
||||
# ...
|
||||
}
|
||||
```
|
||||
|
||||
### (Optional) Renaming a Machine
|
||||
|
||||
Older templates included static machine folders like `jon` and `sara`.
|
||||
If your setup still uses such static machines, you can rename a machine folder to match your own machine name:
|
||||
|
||||
```bash
|
||||
git mv ./machines/jon ./machines/<your-machine-name>
|
||||
```
|
||||
|
||||
Since your Clan configuration lives inside a Git repository, remember:
|
||||
|
||||
- Only files tracked by Git (`git add`) are recognized.
|
||||
- Whenever you add, rename, or remove files, run:
|
||||
|
||||
```bash
|
||||
git add ./machines/<your-machine-name>
|
||||
```
|
||||
|
||||
to stage the changes.
|
||||
|
||||
---
|
||||
|
||||
### (Optional) Removing a Machine
|
||||
|
||||
If you want to work with a single machine for now, you can remove other machine entries both from your `flake.nix` and from the `machines` directory. For example, to remove the machine `sara`:
|
||||
|
||||
```bash
|
||||
git rm -rf ./machines/sara
|
||||
```
|
||||
|
||||
Make sure to also remove or update any references to that machine in your `nix files` or `inventory.json` if you have any of that
|
||||
@@ -0,0 +1,75 @@
|
||||
A service in clan is a self-contained, reusable unit of system configuration that provides a specific piece of functionality across one or more machines.
|
||||
|
||||
Think of it as a recipe for running a tool — like automatic backups, VPN networking, monitoring, etc.
|
||||
|
||||
In Clan Services are multi-Host & role-based:
|
||||
|
||||
- Roles map machines to logical service responsibilities, enabling structured, clean deployments.
|
||||
|
||||
- You can use tags instead of explicit machine names.
|
||||
|
||||
To learn more: [Guide about clanService](../guides/services/introduction-to-services.md)
|
||||
|
||||
!!! Important
|
||||
It is recommended to add at least one networking service such as `zerotier` that allows to reach all your clan machines from your setup computer across the globe.
|
||||
|
||||
## Configure a Zerotier Network (recommended)
|
||||
|
||||
```{.nix title="clan.nix" hl_lines="8-16"}
|
||||
{
|
||||
inventory.machines = {
|
||||
jon = { };
|
||||
sara = { };
|
||||
};
|
||||
|
||||
inventory.instances = {
|
||||
zerotier = { # (1)
|
||||
# Replace with the name (string) of your machine that you will use as zerotier-controller
|
||||
# See: https://docs.zerotier.com/controller/
|
||||
# Deploy this machine first to create the network secrets
|
||||
roles.controller.machines."jon" = { }; # (2)
|
||||
# Peers of the network
|
||||
# this line means 'all' clan machines will be 'peers'
|
||||
roles.peer.tags.all = { }; # (3)
|
||||
};
|
||||
};
|
||||
# ...
|
||||
# elided
|
||||
}
|
||||
```
|
||||
|
||||
1. See [services/official](../services/definition.md) for all available services and how to configure them.
|
||||
Or read [guides/services](../guides/services/community.md) if you want to bring your own
|
||||
|
||||
2. Replace `__YOUR_CONTROLLER_` with the *name* of your machine.
|
||||
|
||||
3. This line will add all machines of your clan as `peer` to zerotier
|
||||
|
||||
## Adding more recommended defaults
|
||||
|
||||
Adding the following services is recommended for most users:
|
||||
|
||||
```{.nix title="clan.nix" hl_lines="7-14"}
|
||||
{
|
||||
inventory.machines = {
|
||||
jon = { };
|
||||
sara = { };
|
||||
};
|
||||
inventory.instances = {
|
||||
admin = { # (1)
|
||||
roles.default.tags.all = { };
|
||||
roles.default.settings = {
|
||||
allowedKeys = {
|
||||
"my-user" = "ssh-ed25519 AAAAC3N..."; # (2)
|
||||
};
|
||||
};
|
||||
};
|
||||
# ...
|
||||
# elided
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
1. The `admin` service will generate a **root-password** and **add your ssh-key** that allows for convienient administration.
|
||||
2. Equivalent to directly setting `authorizedKeys` like in [configuring a machine](../getting-started/add-machines.md#configuring-a-machine)
|
||||
3. Adds `user = jon` as a user on all machines. Will create a `home` directory, and prompt for a password before deployment.
|
||||
125
pkgs/docs-site/src/routes/docs/getting-started/add-users.md
Normal file
125
pkgs/docs-site/src/routes/docs/getting-started/add-users.md
Normal file
@@ -0,0 +1,125 @@
|
||||
!!! Note "Under construction"
|
||||
|
||||
The users concept of clan is not done yet. This guide outlines some solutions from our community.
|
||||
Defining users can be done in many different ways. We want to highlight two approaches:
|
||||
|
||||
- Using clan's [users](../services/official/users.md) service.
|
||||
- Using a custom approach.
|
||||
|
||||
## Adding Users using the [users](../services/official/users.md) service
|
||||
|
||||
To add a first *user* this guide will be leveraging two things:
|
||||
|
||||
- [services](../services/definition.md): Allows to bind arbitrary logic to something we call an `ìnstance`.
|
||||
- [services/users](../services/official/users.md): Implements logic for adding a single user perInstance.
|
||||
|
||||
The example shows how to add a user called `jon`:
|
||||
|
||||
```{.nix title="clan.nix" hl_lines="7-21"}
|
||||
{
|
||||
inventory.machines = {
|
||||
jon = { };
|
||||
sara = { };
|
||||
};
|
||||
inventory.instances = {
|
||||
jon-user = { # (1)
|
||||
module.name = "users";
|
||||
|
||||
roles.default.tags.all = { }; # (2)
|
||||
|
||||
roles.default.settings = {
|
||||
user = "jon"; # (3)
|
||||
groups = [
|
||||
"wheel" # Allow using 'sudo'
|
||||
"networkmanager" # Allows to manage network connections.
|
||||
"video" # Allows to access video devices.
|
||||
"input" # Allows to access input devices.
|
||||
];
|
||||
};
|
||||
};
|
||||
# ...
|
||||
# elided
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
1. Add `user = jon` as a user on all machines. Will create a `home` directory, and prompt for a password before deployment.
|
||||
2. Add this user to `all` machines
|
||||
3. Define the `name` of the user to be `jon`
|
||||
|
||||
The `users` service creates a `/home/jon` directory, allows `jon` to sign in and will take care of the user's password.
|
||||
|
||||
For more information see [services/users](../services/official/users.md)
|
||||
|
||||
## Using a custom approach
|
||||
|
||||
Some people like to define a `users` folder in their repository root.
|
||||
That allows to bind all user specific logic to a single place (`default.nix`)
|
||||
Which can be imported into individual machines to make the user available on that machine.
|
||||
|
||||
```bash
|
||||
.
|
||||
├── machines
|
||||
│ ├── jon
|
||||
# ......
|
||||
├── users
|
||||
│ ├── jon
|
||||
│ │ └── default.nix # <- a NixOS module; sets some options
|
||||
# ... ... ...
|
||||
```
|
||||
|
||||
## using [home-manager](https://github.com/nix-community/home-manager)
|
||||
|
||||
When using clan's `users` service it is possible to define extraModules.
|
||||
In fact this is always possible when using clan's services.
|
||||
|
||||
We can use this property of clan services to bind a nixosModule to the user, which configures home-manager.
|
||||
|
||||
```{.nix title="clan.nix" hl_lines="22"}
|
||||
{
|
||||
inventory.machines = {
|
||||
jon = { };
|
||||
sara = { };
|
||||
};
|
||||
inventory.instances = {
|
||||
jon-user = {
|
||||
module.name = "users";
|
||||
|
||||
roles.default.tags.all = { };
|
||||
|
||||
roles.default.settings = {
|
||||
user = "jon",
|
||||
groups = [
|
||||
"wheel"
|
||||
"networkmanager"
|
||||
"video"
|
||||
"input"
|
||||
];
|
||||
};
|
||||
|
||||
roles.default.extraModules = [ ./users/jon/home.nix ]; # (1)
|
||||
};
|
||||
# ...
|
||||
# elided
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
1. Type `path` or `string`: Must point to a separate file. Inlining a module is not possible
|
||||
|
||||
!!! Note "This is inspiration"
|
||||
Our community might come up with better solutions soon.
|
||||
We are seeking contributions to improve this pattern if you have a nicer solution in mind.
|
||||
|
||||
```nix title="users/jon/home.nix"
|
||||
# NixOS module to import home-manager and the home-manager configuration of 'jon'
|
||||
{ self, ...}:
|
||||
{
|
||||
imports = [ self.inputs.home-manager.nixosModules.default ];
|
||||
home-manager.users.jon = {
|
||||
imports = [
|
||||
./home-configuration.nix
|
||||
];
|
||||
};
|
||||
}
|
||||
```
|
||||
@@ -0,0 +1,74 @@
|
||||
By default clan uses [disko](https://github.com/nix-community/disko) which allows for declarative disk partitioning.
|
||||
|
||||
To see what disk templates are available run:
|
||||
```{.shellSession hl_lines="10" .no-copy}
|
||||
$ clan templates list
|
||||
|
||||
Available 'clan' template
|
||||
├── <builtin>
|
||||
│ ├── default: Initialize a new clan flake
|
||||
│ ├── flake-parts: Flake-parts
|
||||
│ └── minimal: for clans managed via (G)UI
|
||||
Available 'disko' templates
|
||||
├── <builtin>
|
||||
│ └── single-disk: A simple ext4 disk with a single partition
|
||||
Available 'machine' templates
|
||||
├── <builtin>
|
||||
│ ├── demo-template: Demo machine for the CLAN project
|
||||
│ ├── flash-installer: Initialize a new flash-installer machine
|
||||
│ ├── new-machine: Initialize a new machine
|
||||
│ └── test-morph-template: Morph a machine
|
||||
```
|
||||
|
||||
|
||||
For this guide we will select the `single-disk` template, that uses `A simple ext4 disk with a single partition`.
|
||||
|
||||
!!! tip
|
||||
For advanced partitioning, see [Disko templates](https://github.com/nix-community/disko-templates) or [Disko examples](https://github.com/nix-community/disko/tree/master/example).
|
||||
You can also [contribute a disk template to clan core](https://docs.clan.lol/guides/disko-templates/community/)
|
||||
|
||||
|
||||
To setup a disk schema for a machine run
|
||||
|
||||
```bash
|
||||
clan templates apply disk single-disk jon --set mainDisk ""
|
||||
```
|
||||
|
||||
Which should fail and give the valid options for the specific hardware:
|
||||
|
||||
```shellSession
|
||||
Invalid value for placeholder mainDisk - Valid options:
|
||||
/dev/disk/by-id/nvme-WD_PC_SN740_SDDQNQD-512G-1201_232557804368
|
||||
```
|
||||
|
||||
Re-run the command with the correct disk:
|
||||
|
||||
```bash
|
||||
clan templates apply disk single-disk jon --set mainDisk "/dev/disk/by-id/nvme-WD_PC_SN740_SDDQNQD-512G-1201_232557804368"
|
||||
```
|
||||
|
||||
Should now be successful
|
||||
|
||||
```shellSession
|
||||
Applied disk template 'single-disk' to machine 'jon'
|
||||
```
|
||||
|
||||
A disko.nix file should be created in `machines/jon`
|
||||
You can have a look and customize it if needed.
|
||||
|
||||
!!! Danger
|
||||
Don't change the `disko.nix` after the machine is installed for the first time, unless you really know what you are doing.
|
||||
Changing disko configuration requires wiping and reinstalling the machine.
|
||||
|
||||
## Deploy the machine
|
||||
|
||||
**Finally deployment time!**
|
||||
|
||||
This command is destructive and will format your disk and install NixOS on it! It is equivalent to appending `--phases kexec,disko,install,reboot`.
|
||||
|
||||
|
||||
```bash
|
||||
clan machines install [MACHINE] --target-host root@<IP>
|
||||
```
|
||||
|
||||
|
||||
@@ -0,0 +1,28 @@
|
||||
### Generate Facts and Vars
|
||||
|
||||
Typically, this step is handled automatically when a machine is deployed. However, to enable the use of `nix flake check` with your configuration, it must be completed manually beforehand.
|
||||
|
||||
Currently, generating all the necessary facts requires two separate commands. This is due to the coexistence of two parallel secret management solutions:
|
||||
the newer, recommended version (`clan vars`) and the older version (`clan facts`) that we are slowly phasing out.
|
||||
|
||||
To generate both facts and vars, execute the following commands:
|
||||
|
||||
```sh
|
||||
clan facts generate && clan vars generate
|
||||
```
|
||||
|
||||
|
||||
### Check Configuration
|
||||
|
||||
Validate your configuration by running:
|
||||
|
||||
```bash
|
||||
nix flake check
|
||||
```
|
||||
|
||||
This command helps ensure that your system configuration is correct and free from errors.
|
||||
|
||||
!!! Tip
|
||||
|
||||
You can integrate this step into your [Continuous Integration](https://en.wikipedia.org/wiki/Continuous_integration) workflow to ensure that only valid Nix configurations are merged into your codebase.
|
||||
|
||||
@@ -0,0 +1,178 @@
|
||||
This guide will help you convert your existing NixOS configurations into a Clan.
|
||||
|
||||
!!! Warning
|
||||
Migrating instead of starting new can be trickier and might lead to bugs or
|
||||
unexpected issues. We recommend reading the [Getting Started](../getting-started/creating-your-first-clan.md) guide first.
|
||||
|
||||
Once you have a working setup and understand the concepts transfering your NixOS configurations over is easy.
|
||||
|
||||
## Back up your existing configuration
|
||||
|
||||
Before you start, it is strongly recommended to back up your existing
|
||||
configuration in any form you see fit. If you use version control to manage
|
||||
your configuration changes, it is also a good idea to follow the migration
|
||||
guide in a separte branch until everything works as expected.
|
||||
|
||||
## Starting Point
|
||||
|
||||
We assume you are already using NixOS flakes to manage your configuration. If
|
||||
not, migrate to a flake-based setup following the official [NixOS
|
||||
documentation](https://nix.dev/manual/nix/2.25/command-ref/new-cli/nix3-flake.html).
|
||||
The snippet below shows a common Nix flake. For this example we will assume you
|
||||
have have two hosts: **berlin** and **cologne**.
|
||||
|
||||
```nix
|
||||
{
|
||||
inputs.nixpkgs.url = "github:nixos/nixpkgs/nixos-unstable";
|
||||
|
||||
outputs = { self, nixpkgs, ... }: {
|
||||
|
||||
nixosConfigurations = {
|
||||
|
||||
berlin = nixpkgs.lib.nixosSystem {
|
||||
system = "x86_64-linux";
|
||||
modules = [ ./machines/berlin/configuration.nix ];
|
||||
};
|
||||
|
||||
cologne = nixpkgs.lib.nixosSystem {
|
||||
system = "x86_64-linux";
|
||||
modules = [ ./machines/cologne/configuration.nix ];
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
## 1. Add `clan-core` to `inputs`
|
||||
|
||||
Add `clan-core` to your flake as input.
|
||||
|
||||
```nix
|
||||
inputs.clan-core = {
|
||||
url = "https://git.clan.lol/clan/clan-core/archive/main.tar.gz";
|
||||
# Don't do this if your machines are on nixpkgs stable.
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
}
|
||||
```
|
||||
|
||||
## 2. Update Outputs
|
||||
|
||||
To be able to access our newly added dependency, it has to be added to the
|
||||
output parameters.
|
||||
|
||||
```diff
|
||||
- outputs = { self, nixpkgs, ... }:
|
||||
+ outputs = { self, nixpkgs, clan-core }:
|
||||
```
|
||||
|
||||
The existing `nixosConfigurations` output of your flake will be created by
|
||||
clan. In addition, a new `clanInternals` output will be added. Since both of
|
||||
these are provided by the output of `clan-core.lib.clan`, a common syntax is to use a
|
||||
`let...in` statement to create your clan and access it's parameters in the flake
|
||||
outputs.
|
||||
|
||||
For the provide flake example, your flake should now look like this:
|
||||
|
||||
```nix
|
||||
{
|
||||
inputs.nixpkgs.url = "github:nixos/nixpkgs/nixos-unstable";
|
||||
|
||||
inputs.clan-core = {
|
||||
url = "https://git.clan.lol/clan/clan-core/archive/main.tar.gz";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
|
||||
outputs = { self, nixpkgs, clan-core, ... }:
|
||||
let
|
||||
clan = clan-core.lib.clan {
|
||||
self = self; # this needs to point at the repository root
|
||||
specialArgs = {};
|
||||
meta.name = throw "Change me to something unique";
|
||||
|
||||
machines = {
|
||||
berlin = {
|
||||
nixpkgs.hostPlatform = "x86_64-linux";
|
||||
imports = [ ./machines/berlin/configuration.nix ];
|
||||
};
|
||||
cologne = {
|
||||
nixpkgs.hostPlatform = "x86_64-linux";
|
||||
imports = [ ./machines/cologne/configuration.nix ];
|
||||
};
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
inherit (clan.config) nixosConfigurations nixosModules clanInternals;
|
||||
clan = clan.config;
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
✅ Et voilà! Your existing hosts are now part of a clan.
|
||||
|
||||
Existing Nix tooling
|
||||
should still work as normal. To check that you didn't make any errors, run `nix
|
||||
flake show` and verify both hosts are still recognized as if nothing had
|
||||
changed. You should also see the new `clan` output.
|
||||
|
||||
```
|
||||
❯ nix flake show
|
||||
git+file:///my-nixos-config
|
||||
├───clan: unknown
|
||||
└───nixosConfigurations
|
||||
├───berlin: NixOS configuration
|
||||
└───cologne: NixOS configuration
|
||||
```
|
||||
|
||||
Of course you can also rebuild your configuration using `nixos-rebuild` and
|
||||
veryify everything still works.
|
||||
|
||||
## 3. Add `clan-cli` to your `devShells`
|
||||
|
||||
At this point Clan is set up, but you can't use the CLI yet. To do so, it is
|
||||
recommended to expose it via a `devShell` in your flake. It is also possible to
|
||||
install it any other way you would install a package in Nix, but using a
|
||||
developtment shell ensures the CLI's version will always be in sync with your
|
||||
configuration.
|
||||
|
||||
A minimal example is provided below, add it to your flake outputs.
|
||||
|
||||
```nix
|
||||
devShells."x86_64-linux".default = nixpkgs.legacyPackages."x86_64-linux".mkShell {
|
||||
packages = [ clan-core.packages."x86_64-linux".clan-cli ];
|
||||
}
|
||||
```
|
||||
|
||||
To use the CLI, execute `nix develop` in the directory of your flake. The
|
||||
resulting shell, provides you with the `clan` CLI tool. Since you will be using
|
||||
it every time you interact with Clan, it is recommended to set up
|
||||
[direnv](https://direnv.net/).
|
||||
|
||||
Verify everything works as expected by running `clan machines list`.
|
||||
|
||||
```
|
||||
❯ nix develop
|
||||
[user@host:~/my-nixos-config]$ clan machines list
|
||||
berlin
|
||||
cologne
|
||||
```
|
||||
|
||||
## Specify Targets
|
||||
|
||||
Clan needs to know where it can reach your hosts. For testing purpose set
|
||||
`clan.core.networking.targetHost` to the machines adress or hostname.
|
||||
|
||||
```nix
|
||||
# machines/berlin/configuration.nix
|
||||
{
|
||||
clan.core.networking.targetHost = "123.4.56.78";
|
||||
}
|
||||
```
|
||||
|
||||
See our guide on for properly [configuring machines networking](../guides/networking/networking.md)
|
||||
|
||||
## Next Steps
|
||||
|
||||
You are now fully set up. Use the CLI to manage your hosts or proceed to
|
||||
configure further services. At this point you should be able to run commands
|
||||
like `clan machines update berlin` to deploy a host.
|
||||
@@ -0,0 +1,135 @@
|
||||
Ready to manage your fleet of machines?
|
||||
|
||||
We will create a declarative infrastructure using **clan**, **git**, and **nix flakes**.
|
||||
|
||||
You'll finish with a centrally managed fleet, ready to import your existing NixOS configuration.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Make sure you have the following:
|
||||
|
||||
* 💻 **Administration Machine**: Run the setup commands from this machine.
|
||||
* 🛠️ **Nix**: The Nix package manager, installed on your administration machine.
|
||||
|
||||
??? info "**How to install Nix (Linux / MacOS / NixOS)**"
|
||||
|
||||
**On Linux or macOS:**
|
||||
|
||||
1. Run the recommended installer:
|
||||
```shellSession
|
||||
curl --proto '=https' --tlsv1.2 -sSf -L [https://install.determinate.systems/nix](https://install.determinate.systems/nix) | sh -s -- install
|
||||
```
|
||||
|
||||
2. After installation, ensure flakes are enabled by adding this line to `~/.config/nix/nix.conf`:
|
||||
```
|
||||
experimental-features = nix-command flakes
|
||||
```
|
||||
|
||||
**On NixOS:**
|
||||
|
||||
Nix is already installed. You only need to enable flakes for your user in your `configuration.nix`:
|
||||
|
||||
```nix
|
||||
{
|
||||
nix.settings.experimental-features = [ "nix-command" "flakes" ];
|
||||
}
|
||||
```
|
||||
Then, run `nixos-rebuild switch` to apply the changes.
|
||||
|
||||
* 🎯 **Target Machine(s)**: A remote machine with SSH, or your local machine (if NixOS).
|
||||
|
||||
## Create a New Clan
|
||||
|
||||
1. Navigate to your desired directory:
|
||||
|
||||
```shellSession
|
||||
cd <your-directory>
|
||||
```
|
||||
|
||||
2. Create a new clan flake:
|
||||
|
||||
**Note:** This creates a new directory in your current location
|
||||
|
||||
```shellSession
|
||||
nix run https://git.clan.lol/clan/clan-core/archive/main.tar.gz#clan-cli --refresh -- flakes create
|
||||
```
|
||||
|
||||
3. Enter a **name** in the prompt:
|
||||
|
||||
```terminalSession
|
||||
Enter a name for the new clan: my-clan
|
||||
```
|
||||
|
||||
## Project Structure
|
||||
|
||||
Your new directory, `my-clan`, should contain the following structure:
|
||||
|
||||
```
|
||||
my-clan/
|
||||
├── clan.nix
|
||||
├── flake.lock
|
||||
├── flake.nix
|
||||
├── modules/
|
||||
└── sops/
|
||||
```
|
||||
|
||||
!!! note "Templates"
|
||||
This is the structure for the `default` template.
|
||||
|
||||
Use `clan templates list` and `clan templates --help` for available templates & more. Keep in mind that the exact files may change as templates evolve.
|
||||
|
||||
|
||||
## Activate the Environment
|
||||
|
||||
To get started, `cd` into your new project directory.
|
||||
|
||||
```shellSession
|
||||
cd my-clan
|
||||
```
|
||||
|
||||
Now, activate the environment using one of the following methods.
|
||||
|
||||
=== "Automatic (direnv, recommended)"
|
||||
**Prerequisite**: You must have [nix-direnv](https://github.com/nix-community/nix-direnv) installed.
|
||||
|
||||
Run `direnv allow` to automatically load the environment whenever you enter this directory.
|
||||
```shellSession
|
||||
direnv allow
|
||||
```
|
||||
|
||||
=== "Manual (nix develop)"
|
||||
Run nix develop to load the environment for your current shell session.
|
||||
|
||||
```shellSession
|
||||
nix develop
|
||||
```
|
||||
|
||||
## Verify the Setup
|
||||
|
||||
Once your environment is active, verify that the clan command is available by running:
|
||||
|
||||
```shellSession
|
||||
clan show
|
||||
```
|
||||
|
||||
You should see the default metadata for your new clan:
|
||||
|
||||
```shellSession
|
||||
Name: __CHANGE_ME__
|
||||
Description: None
|
||||
```
|
||||
|
||||
This confirms your setup is working correctly.
|
||||
|
||||
You can now change the default name by editing the `meta.name` field in your `clan.nix` file.
|
||||
|
||||
```{.nix title="clan.nix" hl_lines="3"}
|
||||
{
|
||||
# Ensure this is unique among all clans you want to use.
|
||||
meta.name = "__CHANGE_ME__";
|
||||
|
||||
# ...
|
||||
# elided
|
||||
}
|
||||
```
|
||||
|
||||
@@ -0,0 +1,201 @@
|
||||
To install Clan on physical machines, you need to use our custom installer image. This is necessary for proper installation and operation.
|
||||
|
||||
!!! note "Deploying to a Virtual Machine?"
|
||||
If you're deploying to a virtual machine (VM), you can skip this section and go directly to the [Deploy Virtual Machine](../../getting-started/deploy-to-virtual-machine.md) step. In this scenario, we automatically use [nixos-anywhere](https://github.com/nix-community/nixos-anywhere) to replace the kernel during runtime.
|
||||
|
||||
??? info "Why nixos-anywhere Doesn't Work on Physical Hardware?"
|
||||
nixos-anywhere relies on [kexec](https://wiki.archlinux.org/title/Kexec) to replace the running kernel with our custom one. This method often has compatibility issues with real hardware, especially systems with dedicated graphics cards like laptops and servers, leading to crashes and black screens.
|
||||
|
||||
??? info "Reasons for a Custom Install Image"
|
||||
Our custom install images are built to include essential tools like [nixos-facter](https://github.com/nix-community/nixos-facter) and support for [ZFS](https://wiki.archlinux.org/title/ZFS). They're also optimized to run on systems with as little as 1 GB of RAM, ensuring efficient performance even on lower-end hardware.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- [x] A free USB Drive with at least 1.5GB (All data on it will be lost)
|
||||
- [x] Linux/NixOS Machine with Internet
|
||||
|
||||
## Identify the USB Flash Drive
|
||||
|
||||
1. Insert your USB flash drive into your computer.
|
||||
|
||||
2. Identify your flash drive with `lsblk`:
|
||||
|
||||
```shellSession
|
||||
lsblk
|
||||
```
|
||||
|
||||
```{.shellSession hl_lines="2" .no-copy}
|
||||
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS
|
||||
sdb 8:0 1 117,2G 0 disk
|
||||
└─sdb1 8:1 1 117,2G 0 part /run/media/qubasa/INTENSO
|
||||
nvme0n1 259:0 0 1,8T 0 disk
|
||||
├─nvme0n1p1 259:1 0 512M 0 part /boot
|
||||
└─nvme0n1p2 259:2 0 1,8T 0 part
|
||||
└─luks-f7600028-9d83-4967-84bc-dd2f498bc486 254:0 0 1,8T 0 crypt /nix/store
|
||||
```
|
||||
|
||||
!!! Info "In this case the USB device is `sdb`"
|
||||
|
||||
3. Ensure all partitions on the drive are unmounted. Replace `sdb1` in the command below with your device identifier (like `sdc1`, etc.):
|
||||
|
||||
```shellSession
|
||||
sudo umount /dev/sdb1
|
||||
```
|
||||
|
||||
## Installer
|
||||
|
||||
=== "**Linux OS**"
|
||||
**Create a Custom Installer**
|
||||
|
||||
We recommend to build your own installer because of the following reasons:
|
||||
|
||||
- Include your ssh public keys into the image that allows passwordless ssh connection later on.
|
||||
- Set your preferred language and keymap
|
||||
|
||||
```bash
|
||||
clan flash write --flake https://git.clan.lol/clan/clan-core/archive/main.tar.gz \
|
||||
--ssh-pubkey $HOME/.ssh/id_ed25519.pub \
|
||||
--keymap us \
|
||||
--language en_US.UTF-8 \
|
||||
--disk main /dev/sd<X> \
|
||||
flash-installer
|
||||
```
|
||||
!!! Note
|
||||
Replace `$HOME/.ssh/id_ed25519.pub` with a path to your SSH public key.
|
||||
Replace `/dev/sd<X>` with the drive path you want to flash
|
||||
|
||||
!!! Danger "Specifying the wrong device can lead to unrecoverable data loss."
|
||||
|
||||
The `clan flash` utility will erase the disk. Make sure to specify the correct device
|
||||
|
||||
- **SSH-Pubkey Option**
|
||||
|
||||
To add an ssh public key into the installer image append the option:
|
||||
```
|
||||
--ssh-pubkey <pubkey_path>
|
||||
```
|
||||
If you do not have an ssh key yet, you can generate one with `ssh-keygen -t ed25519` command.
|
||||
This ssh key will be installed into the root user.
|
||||
|
||||
- **Connect to the installer**
|
||||
|
||||
On boot, the installer will display on-screen the IP address it received from the network.
|
||||
If you need to configure Wi-Fi first, refer to the next section.
|
||||
If Multicast-DNS (Avahi) is enabled on your own machine, you can also access the installer using the `flash-installer.local` address.
|
||||
|
||||
- **List Keymaps**
|
||||
|
||||
You can get a list of all keymaps with the following command:
|
||||
```
|
||||
clan flash list keymaps
|
||||
```
|
||||
|
||||
- **List Languages**
|
||||
|
||||
You can get a list of all languages with the following command:
|
||||
```
|
||||
clan flash list languages
|
||||
```
|
||||
|
||||
=== "**Other OS**"
|
||||
**Download Generic Installer**
|
||||
|
||||
For x86_64:
|
||||
|
||||
```shellSession
|
||||
wget https://github.com/nix-community/nixos-images/releases/download/nixos-unstable/nixos-installer-x86_64-linux.iso
|
||||
```
|
||||
|
||||
For generic arm64 / aarch64 (probably does not work on raspberry pi...)
|
||||
|
||||
```shellSession
|
||||
wget https://github.com/nix-community/nixos-images/releases/download/nixos-unstable/nixos-installer-aarch64-linux.iso
|
||||
```
|
||||
|
||||
!!! Note
|
||||
If you don't have `wget` installed, you can use `curl --progress-bar -OL <url>` instead.
|
||||
|
||||
## Flash the Installer to the USB Drive
|
||||
|
||||
!!! Danger "Specifying the wrong device can lead to unrecoverable data loss."
|
||||
|
||||
The `dd` utility will erase the disk. Make sure to specify the correct device (`of=...`)
|
||||
|
||||
For example if the USB device is `sdb` use `of=/dev/sdb` (on macOS it will look more like /dev/disk1)
|
||||
|
||||
On Linux, you can use the `lsblk` utility to identify the correct disko
|
||||
|
||||
```
|
||||
lsblk --output NAME,ID-LINK,FSTYPE,SIZE,MOUNTPOINT
|
||||
```
|
||||
|
||||
On macos use `diskutil`:
|
||||
|
||||
```
|
||||
diskutil list
|
||||
```
|
||||
|
||||
Use the `dd` utility to write the NixOS installer image to your USB drive.
|
||||
Replace `/dev/sd<X>` with your external drive from above.
|
||||
|
||||
```shellSession
|
||||
sudo dd bs=4M conv=fsync status=progress if=./nixos-installer-x86_64-linux.iso of=/dev/sd<X>
|
||||
```
|
||||
|
||||
- **Connect to the installer
|
||||
|
||||
On boot, the installer will display on-screen the IP address it received from the network.
|
||||
If you need to configure Wi-Fi first, refer to the next section.
|
||||
If Multicast-DNS (Avahi) is enabled on your own machine, you can also access the installer using the `nixos-installer.local` address.
|
||||
|
||||
## Boot From USB Stick
|
||||
|
||||
- To use, boot from the Clan USB drive with **secure boot turned off**. For step by step instructions go to [Disabling Secure Boot](../../guides/secure-boot.md)
|
||||
|
||||
## (Optional) Connect to Wifi Manually
|
||||
|
||||
If you don't have access via LAN the Installer offers support for connecting via Wifi.
|
||||
|
||||
```shellSession
|
||||
iwctl
|
||||
```
|
||||
|
||||
This will enter `iwd`
|
||||
|
||||
```{.console, .no-copy}
|
||||
[iwd]#
|
||||
```
|
||||
|
||||
Now run the following command to connect to your Wifi:
|
||||
|
||||
```{.shellSession .no-copy}
|
||||
# Identify your network device.
|
||||
device list
|
||||
|
||||
# Replace 'wlan0' with your wireless device name
|
||||
# Find your Wifi SSID.
|
||||
station wlan0 scan
|
||||
station wlan0 get-networks
|
||||
|
||||
# Replace your_ssid with the Wifi SSID
|
||||
# Connect to your network.
|
||||
station wlan0 connect your_ssid
|
||||
|
||||
# Verify you are connected
|
||||
station wlan0 show
|
||||
```
|
||||
|
||||
If the connection was successful you should see something like this:
|
||||
|
||||
```{.console, .no-copy}
|
||||
State connected
|
||||
Connected network FRITZ!Box (Your router device)
|
||||
IPv4 address 192.168.188.50 (Your new local ip)
|
||||
```
|
||||
|
||||
Press ++ctrl+d++ to exit `IWD`.
|
||||
|
||||
!!! Important
|
||||
Press ++ctrl+d++ **again** to update the displayed QR code and connection information.
|
||||
|
||||
You're all set up
|
||||
@@ -0,0 +1,114 @@
|
||||
Now that you have created a machine, added some services, and set up secrets, this guide will walk you through how to deploy it.
|
||||
|
||||
|
||||
### Prerequisites
|
||||
- [x] RAM > 2GB
|
||||
- [x] **Two Computers**: You need one computer that you're getting ready (we'll call this the Target Computer) and another one to set it up from (we'll call this the Setup Computer). Make sure both can talk to each other over the network using SSH.
|
||||
- [x] **Machine configuration**: See our basic [adding and configuring machine guide](../../getting-started/add-machines.md)
|
||||
- [x] **Initialized secrets**: See [secrets](../../guides/secrets.md) for how to initialize your secrets.
|
||||
- [x] **USB Flash Drive**: See [Clan Installer](../../getting-started/deploy-to-physical-machine/flash-installer.md)
|
||||
|
||||
|
||||
### Image Installer
|
||||
This method makes use of the [image installers](../../getting-started/deploy-to-physical-machine/flash-installer.md).
|
||||
|
||||
The installer will randomly generate a password and local addresses on boot, then run a SSH server with these preconfigured.
|
||||
The installer shows its deployment relevant information in two formats, a text form, as well as a QR code.
|
||||
|
||||
|
||||
This is an example of the booted installer.
|
||||
|
||||
```{ .bash .annotate .no-copy .nohighlight}
|
||||
┌─────────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ ┌───────────────────────────┐ │
|
||||
│ │███████████████████████████│ # This is the QR Code (1) │
|
||||
│ │██ ▄▄▄▄▄ █▀▄█▀█▀▄█ ▄▄▄▄▄ ██│ │
|
||||
│ │██ █ █ █▀▄▄▄█ ▀█ █ █ ██│ │
|
||||
│ │██ █▄▄▄█ █▀▄ ▀▄▄▄█ █▄▄▄█ ██│ │
|
||||
│ │██▄▄▄▄▄▄▄█▄▀ ▀▄▀▄█▄▄▄▄▄▄▄██│ │
|
||||
│ │███▀▀▀ █▄▄█ ▀▄ ▄▀▄█ ███│ │
|
||||
│ │██▄██▄▄█▄▄▀▀██▄▀ ▄▄▄ ▄▀█▀██│ │
|
||||
│ │██ ▄▄▄▄▄ █▄▄▄▄ █ █▄█ █▀ ███│ │
|
||||
│ │██ █ █ █ █ █ ▄▄▄ ▄▀▀ ██│ │
|
||||
│ │██ █▄▄▄█ █ ▄ ▄ ▄ ▀█ ▄███│ │
|
||||
│ │██▄▄▄▄▄▄▄█▄▄▄▄▄▄█▄▄▄▄▄█▄███│ │
|
||||
│ │███████████████████████████│ │
|
||||
│ └───────────────────────────┘ │
|
||||
│ ┌─────────────────────────────────────────────────────────────────────────────────┐ │
|
||||
│ │Root password: cheesy-capital-unwell # password (2) │ │
|
||||
│ │Local network addresses: │ │
|
||||
│ │enp1s0 UP 192.168.178.169/24 metric 1024 fe80::21e:6ff:fe45:3c92/64 │ │
|
||||
│ │enp2s0 DOWN │ │
|
||||
│ │wlan0 DOWN # connect to wlan (3) │ │
|
||||
│ │Onion address: 6evxy5yhzytwpnhc2vpscrbti3iktxdhpnf6yim6bbs25p4v6beemzyd.onion │ │
|
||||
│ │Multicast DNS: nixos-installer.local │ │
|
||||
│ └─────────────────────────────────────────────────────────────────────────────────┘ │
|
||||
│ Press 'Ctrl-C' for console access │
|
||||
│ │
|
||||
└─────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
1. This is not an actual QR code, because it is displayed rather poorly on text sites.
|
||||
This would be the actual content of this specific QR code prettified:
|
||||
```json
|
||||
{
|
||||
"pass": "cheesy-capital-unwell",
|
||||
"tor": "6evxy5yhzytwpnhc2vpscrbti3iktxdhpnf6yim6bbs25p4v6beemzyd.onion",
|
||||
"addrs": [
|
||||
"2001:9e8:347:ca00:21e:6ff:fe45:3c92"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
To generate the actual QR code, that would be displayed use:
|
||||
```shellSession
|
||||
echo '{"pass":"cheesy-capital-unwell","tor":"6evxy5yhzytwpnhc2vpscrbti3iktxdhpnf6yim6bbs25p4v6beemzyd.onion","addrs":["2001:9e8:347:ca00:21e:6ff:fe45:3c92"]}' | nix run nixpkgs#qrencode -- -s 2 -m 2 -t utf8
|
||||
```
|
||||
2. The root password for the installer medium.
|
||||
This password is autogenerated and meant to be easily typeable.
|
||||
3. See how to connect the installer medium to wlan [here](../../getting-started/deploy-to-physical-machine/flash-installer.md).
|
||||
|
||||
!!!tip
|
||||
For easy sharing of deployment information via QR code, we highly recommend using [KDE Connect](https://apps.kde.org/de/kdeconnect/).
|
||||
|
||||
There are two ways to deploy your machine:
|
||||
|
||||
### Generating a Hardware Report
|
||||
|
||||
The following command will generate a hardware report with [nixos-facter](https://github.com/nix-community/nixos-facter) and writes it back into your machine folder. The `--phases kexec` flag makes sure we are not yet formatting anything, instead if the target system is not a NixOS machine it will use [kexec](https://wiki.archlinux.org/title/Kexec) to switch to a NixOS kernel.
|
||||
|
||||
=== "Password"
|
||||
**Password**
|
||||
|
||||
```terminal
|
||||
clan machines install [MACHINE] \
|
||||
--update-hardware-config nixos-facter \
|
||||
--phases kexec \
|
||||
--target-host root@192.168.178.169
|
||||
```
|
||||
|
||||
=== "QR Code"
|
||||
**QR Code**
|
||||
|
||||
**Using a JSON String or File Path**:
|
||||
|
||||
Copy the JSON string contained in the QR Code and provide its path or paste it directly:
|
||||
```terminal
|
||||
clan machines install [MACHINE] --json [JSON] \
|
||||
--update-hardware-config nixos-facter \
|
||||
--phases kexec
|
||||
```
|
||||
|
||||
**Using an Image Containing the QR Code**:
|
||||
|
||||
Provide the path to an image file containing the QR code displayed by the installer:
|
||||
```terminal
|
||||
clan machines install [MACHINE] --png [PATH] \
|
||||
--update-hardware-config nixos-facter \
|
||||
--phases kexec
|
||||
```
|
||||
|
||||
|
||||
If you are using our template `[MACHINE]` would be `jon`
|
||||
|
||||
[Next Step (Choose Disk Format)](../../getting-started/configure-disk.md){ .md-button .md-button--primary }
|
||||
@@ -0,0 +1,26 @@
|
||||
|
||||
## Prerequisites
|
||||
- [x] RAM > 2GB
|
||||
- [x] **Two Computers**: You need one computer that you're getting ready (we'll call this the Target Computer) and another one to set it up from (we'll call this the Setup Computer). Make sure both can talk to each other over the network using SSH.
|
||||
- [x] **Machine configuration**: See our basic [adding and configuring machine guide](../getting-started/add-machines.md)
|
||||
|
||||
|
||||
Clan supports any cloud machine if it is reachable via SSH and supports `kexec`.
|
||||
|
||||
|
||||
??? tip "NixOS can cause strange issues when booting in certain cloud environments."
|
||||
If on Linode: Make sure that the system uses "Direct Disk boot kernel" (found in the configuration panel)
|
||||
|
||||
|
||||
The following command will generate a hardware report with [nixos-facter](https://github.com/nix-community/nixos-facter) and writes it back into your machine folder. The `--phases kexec` flag makes sure we are not yet formatting anything, instead if the target system is not a NixOS machine it will use [kexec](https://wiki.archlinux.org/title/Kexec) to switch to a NixOS kernel.
|
||||
|
||||
|
||||
```terminal
|
||||
clan machines install [MACHINE] \
|
||||
--update-hardware-config nixos-facter \
|
||||
--phases kexec \
|
||||
--target-host myuser@<IP>
|
||||
```
|
||||
|
||||
!!! Warning
|
||||
After running the above command, be aware that the SSH login user changes from `myuser` to `root`. For subsequent SSH connections to the target machine, use `root` as the login user. This change occurs because the system switches to the NixOS kernel using `kexec`.
|
||||
@@ -0,0 +1,129 @@
|
||||
# Update Machines
|
||||
|
||||
The Clan command line interface enables you to update machines remotely over SSH.
|
||||
In this guide we will teach you how to set a `targetHost` in Nix,
|
||||
and how to define a remote builder for your machine closures.
|
||||
|
||||
|
||||
## Setting `targetHost`
|
||||
|
||||
Set the machine’s `targetHost` to the reachable IP address of the new machine.
|
||||
This eliminates the need to specify `--target-host` in CLI commands.
|
||||
|
||||
```{.nix title="clan.nix" hl_lines="9"}
|
||||
{
|
||||
# Ensure this is unique among all clans you want to use.
|
||||
meta.name = "my-clan";
|
||||
|
||||
inventory.machines = {
|
||||
# Define machines here.
|
||||
# The machine name will be used as the hostname.
|
||||
jon = {
|
||||
deploy.targetHost = "root@192.168.192.4"; # (1)
|
||||
};
|
||||
};
|
||||
# [...]
|
||||
}
|
||||
```
|
||||
|
||||
The use of `root@` in the target address implies SSH access as the `root` user.
|
||||
Ensure that the root login is secured and only used when necessary.
|
||||
|
||||
## Multiple Target Hosts
|
||||
|
||||
You can now experiment with a new interface that allows you to define multiple `targetHost` addresses for different VPNs. Learn more and try it out in our [networking guide](../guides/networking/networking.md).
|
||||
|
||||
## Updating Machine Configurations
|
||||
|
||||
Execute the following command to update the specified machine:
|
||||
|
||||
```bash
|
||||
clan machines update jon
|
||||
```
|
||||
|
||||
All machines can be updated simultaneously by omitting the machine name:
|
||||
|
||||
```bash
|
||||
clan machines update
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
The following options are only needed for special cases, such as limited resources, mixed environments, or private flakes.
|
||||
|
||||
### Setting `buildHost`
|
||||
|
||||
If the machine does not have enough resources to run the NixOS **evaluation** or **build** itself,
|
||||
it is also possible to specify a `buildHost` instead.
|
||||
During an update, clan will ssh into the `buildHost` and run `nixos-rebuild` from there.
|
||||
|
||||
!!! Note
|
||||
The `buildHost` option should be set directly within your machine’s Nix configuration, **not** under `inventory.machines`.
|
||||
|
||||
|
||||
```{.nix hl_lines="5" .no-copy}
|
||||
clan {
|
||||
# ...
|
||||
machines = {
|
||||
"jon" = {
|
||||
clan.core.networking.buildHost = "root@<host_or_ip>";
|
||||
};
|
||||
};
|
||||
};
|
||||
```
|
||||
|
||||
### Overriding configuration with CLI flags
|
||||
|
||||
`buildHost` / `targetHost`, and other network settings can be temporarily overridden for a single command:
|
||||
|
||||
For the full list of flags refer to the [Clan CLI](../reference/cli/index.md)
|
||||
|
||||
```bash
|
||||
# Build on a remote host
|
||||
clan machines update jon --build-host root@192.168.1.10
|
||||
|
||||
# Build locally (useful for testing or when the target has limited resources)
|
||||
clan machines update jon --build-host local
|
||||
```
|
||||
|
||||
!!! Note
|
||||
Make sure the CPU architecture of the `buildHost` matches that of the `targetHost`
|
||||
|
||||
For example, if deploying to a macOS machine with an ARM64-Darwin architecture, you need a second macOS machine with the same architecture to build it.
|
||||
|
||||
|
||||
### Excluding a machine from `clan machine update`
|
||||
|
||||
To exclude machines from being updated when running `clan machines update` without any machines specified,
|
||||
one can set the `clan.deployment.requireExplicitUpdate` option to true:
|
||||
|
||||
```{.nix hl_lines="5" .no-copy}
|
||||
clan {
|
||||
# ...
|
||||
machines = {
|
||||
"jon" = {
|
||||
clan.deployment.requireExplicitUpdate = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
```
|
||||
|
||||
This is useful for machines that are not always online or are not part of the regular update cycle.
|
||||
|
||||
### Uploading Flake Inputs
|
||||
|
||||
When updating remote machines, flake inputs are usually fetched by the build host.
|
||||
However, if flake inputs require authentication (e.g., private repositories),
|
||||
|
||||
Use the `--upload-inputs` flag to upload all inputs from your local machine:
|
||||
|
||||
```bash
|
||||
clan machines update jon --upload-inputs
|
||||
```
|
||||
|
||||
This is particularly useful when:
|
||||
- The flake references private Git repositories
|
||||
- Authentication credentials are only available on local machine
|
||||
- The build host doesn't have access to certain network resources
|
||||
@@ -0,0 +1,71 @@
|
||||
This guide explains how to set up a [Hetzner Storage Box](https://docs.hetzner.com/storage/storage-box/general) as a backup destination instead of using an internal Clan backup server. Follow the steps below to configure and verify the setup.
|
||||
|
||||
### Step 1: Create a Hetzner Storage Box
|
||||
|
||||
Begin by [creating a Hetzner Storage Box account](https://docs.hetzner.com/storage/storage-box/getting-started/creating-a-storage-box).
|
||||
|
||||
### Step 2: Create a Sub-Account
|
||||
|
||||
Set up a sub-account for your `jon` machine. Save the SSH password for this account in your password manager for future reference.
|
||||
|
||||
### Step 3: Configure BorgBackup in `clan.nix`
|
||||
|
||||
Add the BorgBackup service to your `clan.nix` configuration. In this example, the `jon` machine will back up to `user-sub1@user-sub1.your-storagebox.de` in the `borgbackup` folder:
|
||||
|
||||
```nix hl_lines="9"
|
||||
inventory.instances = {
|
||||
borgbackup = {
|
||||
module = {
|
||||
name = "borgbackup";
|
||||
input = "clan-core";
|
||||
};
|
||||
roles.client.machines."jon".settings = {
|
||||
destinations."storagebox" = {
|
||||
repo = "user-sub1@user-sub1.your-storagebox.de:/./borgbackup";
|
||||
rsh = ''ssh -p 23 -oStrictHostKeyChecking=accept-new -i /run/secrets/vars/borgbackup/borgbackup.ssh'';
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
```
|
||||
|
||||
### Step 4: Generate SSH Keys
|
||||
|
||||
Run the following command to generate the SSH private keys:
|
||||
|
||||
```bash
|
||||
clan vars generate
|
||||
```
|
||||
|
||||
### Step 5: Add the Public Key to the Sub-Account
|
||||
|
||||
Add the generated SSH public key to the `user-sub1` account by running:
|
||||
|
||||
```bash
|
||||
clan vars get jon borgbackup/borgbackup.ssh.pub | ssh -p23 user-sub1@user-sub1.your-storagebox.de install-ssh-key
|
||||
```
|
||||
|
||||
### Step 6: Deploy the Configuration
|
||||
|
||||
Apply the changes to your Clan setup by executing:
|
||||
|
||||
```bash
|
||||
clan machines update
|
||||
```
|
||||
|
||||
### Step 7: Verify the Setup
|
||||
|
||||
Check if the configuration works by starting the BorgBackup service on the `jon` machine:
|
||||
|
||||
```bash
|
||||
systemctl start borgbackup-job-storagebox.service &
|
||||
```
|
||||
|
||||
Then, inspect the service logs to ensure everything is functioning correctly:
|
||||
|
||||
```bash
|
||||
journalctl -u borgbackup-job-storagebox.service
|
||||
```
|
||||
|
||||
|
||||
|
||||
@@ -0,0 +1,89 @@
|
||||
# Introduction to Clan Backups
|
||||
|
||||
This guide explains how to use the Clan backup and state management interface to configure, manage, and restore backups for your services and machines. By the end of this guide, you will understand how to define backup states, manage backups, and restore data.
|
||||
|
||||
## State Management
|
||||
|
||||
Clan backups are based on the concept of [states](../../reference/clan.core/state.md). A state is a Nix attribute set, defined as `clan.core.state.<name> = {};`, which specifies the files or directories to back up.
|
||||
|
||||
For example, if you have a clan service called `linkding`, you can define the folders to back up as follows:
|
||||
|
||||
```nix hl_lines="2"
|
||||
clan.core.state.linkding = {
|
||||
folders = [ "/var/backup/linkding" ];
|
||||
};
|
||||
```
|
||||
|
||||
In this example:
|
||||
|
||||
- `/var/backup/linkding` is the staging directory where data is prepared for backup.
|
||||
|
||||
This simple configuration ensures that all critical data for the `linkding` service is included in the backup process.
|
||||
|
||||
|
||||
## Custom Pre and Post Backup Hooks
|
||||
|
||||
The state interface allows you to run custom scripts before creating a backup and after restoring one. These scripts are defined using the `preBackupScript` and `postRestoreScript` options. This can be useful for tasks like stopping services, syncing data, or performing cleanup operations.
|
||||
|
||||
### Example: Pre and Post Backup Scripts for the `linkding` Service
|
||||
|
||||
In the following example, we configure the `linkding` service to:
|
||||
|
||||
1. Stop the service before backing up its data.
|
||||
2. Sync the data to a staging directory.
|
||||
3. Restore the data and restart the service after restoration.
|
||||
|
||||
```nix hl_lines="5 26"
|
||||
clan.core.state.linkding = {
|
||||
folders = [ "/var/backup/linkding" ];
|
||||
|
||||
# Script to run before creating a backup
|
||||
preBackupScript = ''
|
||||
export PATH=${
|
||||
lib.makeBinPath [
|
||||
config.systemd.package
|
||||
pkgs.coreutils
|
||||
pkgs.rsync
|
||||
]
|
||||
}
|
||||
|
||||
# Check if the service is running
|
||||
service_status=$(systemctl is-active podman-linkding)
|
||||
|
||||
if [ "$service_status" = "active" ]; then
|
||||
# Stop the service and sync data to the backup directory
|
||||
systemctl stop podman-linkding
|
||||
rsync -avH --delete --numeric-ids "/data/podman/linkding/" /var/backup/linkding/
|
||||
systemctl start podman-linkding
|
||||
fi
|
||||
'';
|
||||
|
||||
# Script to run after restoring a backup
|
||||
postRestoreScript = ''
|
||||
export PATH=${
|
||||
lib.makeBinPath [
|
||||
config.systemd.package
|
||||
pkgs.coreutils
|
||||
pkgs.rsync
|
||||
]
|
||||
}
|
||||
|
||||
# Check if the service is running
|
||||
service_status="$(systemctl is-active podman-linkding)"
|
||||
|
||||
if [ "$service_status" = "active" ]; then
|
||||
# Stop the service
|
||||
systemctl stop podman-linkding
|
||||
|
||||
# Backup current data locally
|
||||
cp -rp "/data/podman/linkding" "/data/podman/linkding.bak"
|
||||
|
||||
# Restore data from the backup directory
|
||||
rsync -avH --delete --numeric-ids /var/backup/linkding/ "/data/podman/linkding/"
|
||||
|
||||
# Restart the service
|
||||
systemctl start podman-linkding
|
||||
fi
|
||||
'';
|
||||
};
|
||||
```
|
||||
@@ -0,0 +1,75 @@
|
||||
|
||||
|
||||
In this section we go over how to manage your collection of backups with the clan command.
|
||||
|
||||
### Listing states
|
||||
|
||||
To see which files (`states`) will be backed up on a specific machine, use:
|
||||
|
||||
```bash
|
||||
clan state list jon
|
||||
```
|
||||
|
||||
This will show all configured states for the machine `jon`, for example:
|
||||
|
||||
```text
|
||||
· service: linkding
|
||||
folders:
|
||||
- /var/backup/linkding
|
||||
preBackupCommand: pre-backup-linkding
|
||||
postRestoreCommand: post-restore-linkding
|
||||
|
||||
· service: zerotier
|
||||
folders:
|
||||
- /var/lib/zerotier-one
|
||||
```
|
||||
|
||||
### Creating backups
|
||||
|
||||
To create a backup of a machine (e.g., `jon`), run:
|
||||
|
||||
```bash
|
||||
clan backups create jon
|
||||
```
|
||||
|
||||
This will backup all configured states (`zerotier` and `linkding` in this
|
||||
example) from the machine `jon`.
|
||||
|
||||
### Listing available backups
|
||||
|
||||
To see all available backups, use:
|
||||
|
||||
```bash
|
||||
clan backups list
|
||||
```
|
||||
|
||||
This will display all backups with their timestamps:
|
||||
|
||||
```text
|
||||
storagebox::username@username.your-storagebox.de:/./borgbackup::jon-jon-2025-07-22T19:40:10
|
||||
storagebox::username@username.your-storagebox.de:/./borgbackup::jon-jon-2025-07-23T01:00:00
|
||||
storagebox::username@username.your-storagebox.de:/./borgbackup::jon-storagebox-2025-07-24T01:00:00
|
||||
storagebox::username@username.your-storagebox.de:/./borgbackup::jon-storagebox-2025-07-24T06:02:35
|
||||
```
|
||||
|
||||
### Restoring backups
|
||||
|
||||
For restoring a backup you have two options.
|
||||
|
||||
#### Full restoration
|
||||
|
||||
To restore all services from a backup:
|
||||
|
||||
```bash
|
||||
clan backups restore jon borgbackup storagebox::u444061@u444061.your-storagebox.de:/./borgbackup::jon-storagebox-2025-07-24T06:02:35
|
||||
```
|
||||
|
||||
#### Partial restoration
|
||||
|
||||
To restore only a specific service (e.g., `linkding`):
|
||||
|
||||
```bash
|
||||
clan backups restore --service linkding jon borgbackup storagebox::u444061@u444061.your-storagebox.de:/./borgbackup::jon-storagebox-2025-07-24T06:02:35
|
||||
```
|
||||
|
||||
|
||||
@@ -0,0 +1,63 @@
|
||||
In this guide we will explain how to install a simple peer-to-peer backup system through the inventory. Such that machines will backup it's state to other machines in the clan, ensuring redundancy and data safety.
|
||||
|
||||
|
||||
### What is BorgBackup?
|
||||
|
||||
BorgBackup is a powerful and efficient backup solution designed for secure and space-efficient backups. It offers features such as:
|
||||
|
||||
- **Deduplication**: Saves storage space by avoiding duplicate data.
|
||||
- **Encryption**: Ensures backups are secure and authenticated.
|
||||
- **Compression**: Supports multiple compression algorithms like lz4, zstd, zlib, and more.
|
||||
- **FUSE Mounting**: Allows backups to be mounted as a file system.
|
||||
- **Cross-Platform**: Works on Linux, macOS, BSD, and more.
|
||||
- **Open Source**: Licensed under BSD and supported by an active community.
|
||||
|
||||
|
||||
While this guide uses BorgBackup, you can also use other backup services supported by Clan, depending on your requirements.
|
||||
|
||||
|
||||
### Example Setup
|
||||
|
||||
In this example, we configure a backup system with three machines: `bob`, `jon`, and `alice`. The `bob` and `jon` machines will periodically back up their state folders to `alice`. The backups are encrypted for security.
|
||||
|
||||
|
||||
|
||||
```nix
|
||||
inventory.instances = {
|
||||
borgbackup = {
|
||||
module = {
|
||||
name = "borgbackup";
|
||||
input = "clan-core";
|
||||
};
|
||||
roles.client.machines = {
|
||||
"bob" = { };
|
||||
"jon" = { };
|
||||
};
|
||||
roles.server.machines = {
|
||||
"alice" = { };
|
||||
};
|
||||
};
|
||||
};
|
||||
```
|
||||
|
||||
|
||||
## Roles
|
||||
|
||||
In a Clan Service, roles define how machines participate in the backup system. Each role applies specific Nix configurations to the machine, enabling flexibility and scalability in your backup setup.
|
||||
|
||||
- **Client**: These machines create backups and send them to designated destinations. Clients can be configured to back up to multiple destinations, ensuring redundancy and reliability.
|
||||
|
||||
- **Server**: These machines act as repositories, receiving and securely storing backups from client machines. Servers can be dedicated backup nodes within your clan network, providing centralized storage for all backups.
|
||||
|
||||
|
||||
## Backup destinations
|
||||
|
||||
This service allows you to perform backups to multiple `destinations`.
|
||||
Destinations can be:
|
||||
|
||||
- **Local**: Local disk storage
|
||||
- **Server**: Your own borgbackup server (using the `server` role)
|
||||
- **Third-party services**: Such as Hetzner's Storage Box
|
||||
|
||||
|
||||
However, if BorgBackup does not meet your needs, you can implement your own backup clan service.
|
||||
184
pkgs/docs-site/src/routes/docs/guides/contributing/debugging.md
Normal file
184
pkgs/docs-site/src/routes/docs/guides/contributing/debugging.md
Normal file
@@ -0,0 +1,184 @@
|
||||
|
||||
Here are some methods for debugging and testing the clan-cli
|
||||
|
||||
## Using a Development Branch
|
||||
|
||||
To streamline your development process, I suggest not installing `clan-cli`. Instead, clone the `clan-core` repository and add `clan-core/pkgs/clan-cli/bin` to your PATH to use the checked-out version directly.
|
||||
|
||||
!!! Note
|
||||
After cloning, navigate to `clan-core/pkgs/clan-cli` and execute `direnv allow` to activate the devshell. This will set up a symlink to nixpkgs at a specific location; without it, `clan-cli` won't function correctly.
|
||||
|
||||
With this setup, you can easily use [breakpoint()](https://docs.python.org/3/library/pdb.html) to inspect the application's internal state as needed.
|
||||
|
||||
This approach is feasible because `clan-cli` only requires a Python interpreter and has no other dependencies.
|
||||
|
||||
```nix
|
||||
pkgs.mkShell {
|
||||
packages = [
|
||||
pkgs.python3
|
||||
];
|
||||
shellHook = ''
|
||||
export GIT_ROOT="$(git rev-parse --show-toplevel)"
|
||||
export PATH=$PATH:~/Projects/clan-core/pkgs/clan-cli/bin
|
||||
'';
|
||||
}
|
||||
```
|
||||
|
||||
## Debugging nixos-anywhere
|
||||
|
||||
If you encounter a bug in a complex shell script such as `nixos-anywhere`, start by replacing the `nixos-anywhere` command with a local checkout of the project, look in the [contribution](../../guides/contributing/CONTRIBUTING.md) section for an example.
|
||||
|
||||
## The Debug Flag
|
||||
|
||||
You can enhance your debugging process with the `--debug` flag in the `clan` command. When you add this flag to any command, it displays all subprocess commands initiated by `clan` in a readable format, along with the source code position that triggered them. This feature makes it easier to understand and trace what's happening under the hood.
|
||||
|
||||
```bash
|
||||
$ clan machines list --debug 1 ↵
|
||||
Debug log activated
|
||||
nix \
|
||||
--extra-experimental-features 'nix-command flakes' \
|
||||
eval \
|
||||
--show-trace --json \
|
||||
--print-build-logs '/home/qubasa/Projects/qubasas-clan#clanInternals.machines.x86_64-linux' \
|
||||
--apply builtins.attrNames \
|
||||
--json
|
||||
Caller: ~/Projects/clan-core/pkgs/clan-cli/clan_cli/machines/list.py:96::list_nixos_machines
|
||||
|
||||
warning: Git tree '/home/qubasa/Projects/qubasas-clan' is dirty
|
||||
demo
|
||||
gchq-local
|
||||
wintux
|
||||
|
||||
```
|
||||
|
||||
## VSCode
|
||||
|
||||
If you're using VSCode, it has a handy feature that makes paths to source code files clickable in the integrated terminal. Combined with the previously mentioned techniques, this allows you to open a Clan in VSCode, execute a command like `clan machines list --debug`, and receive a printed path to the code that initiates the subprocess. With the `Ctrl` key (or `Cmd` on macOS) and a mouse click, you can jump directly to the corresponding line in the code file and add a `breakpoint()` function to it, to inspect the internal state.
|
||||
|
||||
|
||||
|
||||
## Finding Print Messages
|
||||
|
||||
To trace the origin of print messages in `clan-cli`, you can enable special debugging features using environment variables:
|
||||
|
||||
- Set `TRACE_PRINT=1` to include the source location with each print message:
|
||||
```bash
|
||||
export TRACE_PRINT=1
|
||||
```
|
||||
When running commands with `--debug`, every print will show where it was triggered in the code.
|
||||
|
||||
- To see a deeper stack trace for each print, set `TRACE_DEPTH` to the desired number of stack frames (e.g., 3):
|
||||
```bash
|
||||
export TRACE_DEPTH=3
|
||||
```
|
||||
|
||||
### Additional Debug Logging
|
||||
|
||||
You can enable more detailed logging for specific components by setting these environment variables:
|
||||
|
||||
- `CLAN_DEBUG_NIX_SELECTORS=1` — verbose logs for flake.select operations
|
||||
- `CLAN_DEBUG_NIX_PREFETCH=1` — verbose logs for flake.prefetch operations
|
||||
- `CLAN_DEBUG_COMMANDS=1` — print the diffed environment of executed commands
|
||||
|
||||
Example:
|
||||
```bash
|
||||
export CLAN_DEBUG_NIX_SELECTORS=1
|
||||
export CLAN_DEBUG_NIX_PREFETCH=1
|
||||
export CLAN_DEBUG_COMMANDS=1
|
||||
```
|
||||
|
||||
These options help you pinpoint the source and context of print messages and debug logs during development.
|
||||
|
||||
|
||||
## Analyzing Performance
|
||||
|
||||
To understand what's causing slow performance, set the environment variable `export CLAN_CLI_PERF=1`. When you complete a clan command, you'll see a summary of various performance metrics, helping you identify what's taking up time.
|
||||
|
||||
## See all possible packages and tests
|
||||
|
||||
To quickly show all possible packages and tests execute:
|
||||
|
||||
```bash
|
||||
nix flake show
|
||||
```
|
||||
|
||||
Under `checks` you will find all tests that are executed in our CI. Under `packages` you find all our projects.
|
||||
|
||||
```
|
||||
git+file:///home/lhebendanz/Projects/clan-core
|
||||
├───apps
|
||||
│ └───x86_64-linux
|
||||
│ ├───install-vm: app
|
||||
│ └───install-vm-nogui: app
|
||||
├───checks
|
||||
│ └───x86_64-linux
|
||||
│ ├───borgbackup omitted (use '--all-systems' to show)
|
||||
│ ├───check-for-breakpoints omitted (use '--all-systems' to show)
|
||||
│ ├───clan-dep-age omitted (use '--all-systems' to show)
|
||||
│ ├───clan-dep-bash omitted (use '--all-systems' to show)
|
||||
│ ├───clan-dep-e2fsprogs omitted (use '--all-systems' to show)
|
||||
│ ├───clan-dep-fakeroot omitted (use '--all-systems' to show)
|
||||
│ ├───clan-dep-git omitted (use '--all-systems' to show)
|
||||
│ ├───clan-dep-nix omitted (use '--all-systems' to show)
|
||||
│ ├───clan-dep-openssh omitted (use '--all-systems' to show)
|
||||
│ ├───"clan-dep-python3.11-mypy" omitted (use '--all-systems' to show)
|
||||
├───packages
|
||||
│ └───x86_64-linux
|
||||
│ ├───clan-cli omitted (use '--all-systems' to show)
|
||||
│ ├───clan-cli-docs omitted (use '--all-systems' to show)
|
||||
│ ├───clan-ts-api omitted (use '--all-systems' to show)
|
||||
│ ├───clan-app omitted (use '--all-systems' to show)
|
||||
│ ├───default omitted (use '--all-systems' to show)
|
||||
│ ├───deploy-docs omitted (use '--all-systems' to show)
|
||||
│ ├───docs omitted (use '--all-systems' to show)
|
||||
│ ├───editor omitted (use '--all-systems' to show)
|
||||
└───templates
|
||||
├───default: template: Initialize a new clan flake
|
||||
└───default: template: Initialize a new clan flake
|
||||
```
|
||||
|
||||
You can execute every test separately by following the tree path `nix run .#checks.x86_64-linux.clan-pytest -L` for example.
|
||||
|
||||
## Test Locally in Devshell with Breakpoints
|
||||
|
||||
To test the CLI locally in a development environment and set breakpoints for debugging, follow these steps:
|
||||
|
||||
1. Run the following command to execute your tests and allow for debugging with breakpoints:
|
||||
```bash
|
||||
cd ./pkgs/clan-cli
|
||||
pytest -n0 -s --maxfail=1 ./tests/test_nameofthetest.py
|
||||
```
|
||||
You can place `breakpoint()` in your Python code where you want to trigger a breakpoint for debugging.
|
||||
|
||||
## Test Locally in a Nix Sandbox
|
||||
|
||||
To run tests in a Nix sandbox:
|
||||
|
||||
```bash
|
||||
nix build .#checks.x86_64-linux.clan-pytest-with-core
|
||||
```
|
||||
|
||||
```bash
|
||||
nix build .#checks.x86_64-linux.clan-pytest-without-core
|
||||
```
|
||||
|
||||
### Inspecting the Nix Sandbox
|
||||
|
||||
If you need to inspect the Nix sandbox while running tests, follow these steps:
|
||||
|
||||
1. Insert an endless sleep into your test code where you want to pause the execution. For example:
|
||||
|
||||
```python
|
||||
import time
|
||||
time.sleep(3600) # Sleep for one hour
|
||||
```
|
||||
|
||||
2. Use `cntr` and `psgrep` to attach to the Nix sandbox. This allows you to interactively debug your code while it's paused. For example:
|
||||
|
||||
```bash
|
||||
psgrep <your_python_process_name>
|
||||
cntr attach <container id, container name or process id>
|
||||
```
|
||||
|
||||
Or you can also use the [nix breakpoint hook](https://nixos.org/manual/nixpkgs/stable/#breakpointhook)
|
||||
|
||||
316
pkgs/docs-site/src/routes/docs/guides/contributing/testing.md
Normal file
316
pkgs/docs-site/src/routes/docs/guides/contributing/testing.md
Normal file
@@ -0,0 +1,316 @@
|
||||
# Testing your contributions
|
||||
|
||||
Each feature added to clan should be tested extensively via automated tests.
|
||||
|
||||
This document covers different methods of automated testing, including creating, running and debugging such tests.
|
||||
|
||||
In order to test the behavior of clan, different testing frameworks are used depending on the concern:
|
||||
|
||||
- NixOS VM tests: for high level integration
|
||||
- NixOS container tests: for high level integration
|
||||
- Python tests via pytest: for unit tests and integration tests
|
||||
- Nix eval tests: for nix functions, libraries, modules, etc.
|
||||
|
||||
## NixOS VM Tests
|
||||
|
||||
The [NixOS VM Testing Framework](https://nixos.org/manual/nixos/stable/index.html#sec-nixos-tests) is used to create high level integration tests, by running one or more VMs generated from a specified config. Commands can be executed on the booted machine(s) to verify a deployment of a service works as expected. All machines within a test are connected by a virtual network. Internet access is not available.
|
||||
|
||||
### When to use VM tests
|
||||
|
||||
- testing that a service defined through a clan module works as expected after deployment
|
||||
- testing clan-cli subcommands which require accessing a remote machine
|
||||
|
||||
### When not to use VM tests
|
||||
|
||||
NixOS VM Tests are slow and expensive. They should only be used for testing high level integration of components.
|
||||
VM tests should be avoided wherever it is possible to implement a cheaper unit test instead.
|
||||
|
||||
- testing detailed behavior of a certain clan-cli command -> use unit testing via pytest instead
|
||||
- regression testing -> add a unit test
|
||||
|
||||
### Finding examples for VM tests
|
||||
|
||||
Existing nixos vm tests in clan-core can be found by using ripgrep:
|
||||
```shellSession
|
||||
rg self.clanLib.test.baseTest
|
||||
```
|
||||
|
||||
### Locating definitions of failing VM tests
|
||||
|
||||
All nixos vm tests in clan are exported as individual flake outputs under `checks.x86_64-linux.{test-attr-name}`.
|
||||
If a test fails in CI:
|
||||
|
||||
- look for the job name of the test near the top if the CI Job page, like, for example `gitea:clan/clan-core#checks.x86_64-linux.borgbackup/1242`
|
||||
- in this case `checks.x86_64-linux.borgbackup` is the attribute path
|
||||
- note the last element of that attribute path, in this case `borgbackup`
|
||||
- search for the attribute name inside the `/checks` directory via ripgrep
|
||||
|
||||
example: locating the vm test named `borgbackup`:
|
||||
|
||||
```shellSession
|
||||
$ rg "borgbackup =" ./checks
|
||||
./checks/flake-module.nix
|
||||
44- wayland-proxy-virtwl = self.clanLib.test.baseTest ./wayland-proxy-virtwl nixosTestArgs;
|
||||
```
|
||||
|
||||
-> the location of that test is `/checks/flake-module.nix` line `41`.
|
||||
|
||||
### Adding vm tests
|
||||
|
||||
Create a nixos test module under `/checks/{name}/default.nix` and import it in `/checks/flake-module.nix`.
|
||||
|
||||
|
||||
### Running VM tests
|
||||
|
||||
```shellSession
|
||||
nix build .#checks.x86_64-linux.{test-attr-name}
|
||||
```
|
||||
(replace `{test-attr-name}` with the name of the test)
|
||||
|
||||
### Debugging VM tests
|
||||
|
||||
The following techniques can be used to debug a VM test:
|
||||
|
||||
#### Print Statements
|
||||
|
||||
Locate the definition (see above) and add print statements, like, for example `print(client.succeed("systemctl --failed"))`, then re-run the test via `nix build` (see above)
|
||||
|
||||
#### Interactive Shell
|
||||
|
||||
- Execute the vm test outside the nix Sandbox via the following command:
|
||||
`nix run .#checks.x86_64-linux.{test-attr-name}.driver -- --interactive`
|
||||
- Then run the commands in the machines manually, like for example:
|
||||
```python3
|
||||
start_all()
|
||||
machine1.succeed("echo hello")
|
||||
```
|
||||
|
||||
#### Breakpoints
|
||||
|
||||
To get an interactive shell at a specific line in the VM test script, add a `breakpoint()` call before the line to debug, then run the test outside of the sandbox via:
|
||||
`nix run .#checks.x86_64-linux.{test-attr-name}.driver`
|
||||
|
||||
|
||||
## NixOS Container Tests
|
||||
|
||||
Those are very similar to NixOS VM tests, as in they run virtualized nixos machines, but instead of using VMs, they use containers which are much cheaper to launch.
|
||||
As of now the container test driver is a downstream development in clan-core.
|
||||
Basically everything stated under the NixOS VM tests sections applies here, except some limitations.
|
||||
|
||||
Limitations:
|
||||
|
||||
- Cannot run in interactive mode, however while the container test runs, it logs a nsenter command that can be used to log into each of the container.
|
||||
- setuid binaries don't work
|
||||
|
||||
### Where to find examples for NixOS container tests
|
||||
|
||||
Existing NixOS container tests in clan-core can be found by using `ripgrep`:
|
||||
|
||||
```shellSession
|
||||
rg self.clanLib.test.containerTest
|
||||
```
|
||||
|
||||
|
||||
## Python tests via pytest
|
||||
|
||||
Since the Clan CLI is written in python, the `pytest` framework is used to define unit tests and integration tests via python
|
||||
|
||||
Due to superior efficiency,
|
||||
|
||||
### When to use python tests
|
||||
|
||||
- writing unit tests for python functions and modules, or bugfixes of such
|
||||
- all integrations tests that do not require building or running a nixos machine
|
||||
- impure integrations tests that require internet access (very rare, try to avoid)
|
||||
|
||||
|
||||
### When not to use python tests
|
||||
|
||||
- integrations tests that require building or running a nixos machine (use NixOS VM or container tests instead)
|
||||
- testing behavior of a nix function or library (use nix eval tests instead)
|
||||
|
||||
### Finding examples of python tests
|
||||
|
||||
Existing python tests in clan-core can be found by using `ripgrep`:
|
||||
```shellSession
|
||||
rg "import pytest"
|
||||
```
|
||||
|
||||
### Locating definitions of failing python tests
|
||||
|
||||
If any python test fails in the CI pipeline, an error message like this can be found at the end of the log:
|
||||
```
|
||||
...
|
||||
FAILED tests/test_machines_cli.py::test_machine_delete - clan_lib.errors.ClanError: Template 'new-machine' not in 'inputs.clan-core
|
||||
...
|
||||
```
|
||||
|
||||
In this case the test is defined in the file `/tests/test_machines_cli.py` via the test function `test_machine_delete`.
|
||||
|
||||
### Adding python tests
|
||||
|
||||
If a specific python module is tested, the test should be located near the tested module in a subdirectory called `./tests`
|
||||
If the test is not clearly related to a specific module, put it in the top-level `./tests` directory of the tested python package. For `clan-cli` this would be `/pkgs/clan-cli/clan_cli/tests`.
|
||||
All filenames must be prefixed with `test_` and test functions prefixed with `test_` for pytest to discover them.
|
||||
|
||||
### Running python tests
|
||||
|
||||
#### Running all python tests
|
||||
|
||||
To run all python tests which are executed in the CI pipeline locally, use this `nix build` command
|
||||
|
||||
```shellSession
|
||||
nix build .#checks.x86_64-linux.clan-pytest-{with,without}-core
|
||||
```
|
||||
|
||||
#### Running a specific python test
|
||||
|
||||
To run a specific python test outside the nix sandbox
|
||||
|
||||
1. Enter the development environment of the python package, by either:
|
||||
- Having direnv enabled and entering the directory of the package (eg. `/pkgs/clan-cli`)
|
||||
- Or using the command `select-shell {package}` in the top-level dev shell of clan-core, (eg. `switch-shell clan-cli`)
|
||||
2. Execute the test via pytest using issuing
|
||||
`pytest ./path/to/test_file.py:test_function_name -s -n0`
|
||||
|
||||
The flags `-sn0` are useful to forwards all stdout/stderr output to the terminal and be able to debug interactively via `breakpoint()`.
|
||||
|
||||
|
||||
### Debugging python tests
|
||||
|
||||
To debug a specific python test, find its definition (see above) and make sure to enter the correct dev environment for that python package.
|
||||
|
||||
Modify the test and add `breakpoint()` statements to it.
|
||||
|
||||
Execute the test using the flags `-sn0` in order to get an interactive shell at the breakpoint:
|
||||
|
||||
```shelSession
|
||||
pytest ./path/to/test_file.py:test_function_name -sn0
|
||||
```
|
||||
|
||||
## Nix Eval Tests
|
||||
|
||||
### When to use nix eval tests
|
||||
|
||||
Nix eval tests are good for testing any nix logic, including
|
||||
|
||||
- nix functions
|
||||
- nix libraries
|
||||
- modules for the NixOS module system
|
||||
|
||||
When not to use
|
||||
|
||||
- tests that require building nix derivations (except some very cheap ones)
|
||||
- tests that require running programs written in other languages
|
||||
- tests that require building or running NixOS machines
|
||||
|
||||
### Finding examples of nix eval tests
|
||||
|
||||
Existing nix eval tests can be found via this `ripgrep` command:
|
||||
|
||||
```shellSession
|
||||
rg "nix-unit --eval-store"
|
||||
```
|
||||
|
||||
### Locating definitions of failing nix eval tests
|
||||
|
||||
Failing nix eval tests look like this:
|
||||
|
||||
```shellSession
|
||||
> ✅ test_attrsOf_attrsOf_submodule
|
||||
> ✅ test_attrsOf_submodule
|
||||
> ❌ test_default
|
||||
> /build/nix-8-2/expected.nix --- Nix
|
||||
> 1 { foo = { bar = { __prio = 1500; }; } 1 { foo = { bar = { __prio = 1501; }; }
|
||||
> . ; } . ; }
|
||||
>
|
||||
>
|
||||
> ✅ test_no_default
|
||||
> ✅ test_submodule
|
||||
> ✅ test_submoduleWith
|
||||
> ✅ test_submodule_with_merging
|
||||
>
|
||||
> 😢 6/7 successful
|
||||
> error: Tests failed
|
||||
```
|
||||
|
||||
To locate the definition, find the flake attribute name of the failing test near the top of the CI Job page, like for example `gitea:clan/clan-core#checks.x86_64-linux.eval-lib-values/1242`.
|
||||
|
||||
In this case `eval-lib-values` is the attribute we are looking for.
|
||||
|
||||
Find the attribute via ripgrep:
|
||||
|
||||
```shellSession
|
||||
$ rg "eval-lib-values ="
|
||||
lib/values/flake-module.nix
|
||||
21: eval-lib-values = pkgs.runCommand "tests" { nativeBuildInputs = [ pkgs.nix-unit ]; } ''
|
||||
grmpf@grmpf-nix ~/p/c/clan-core (test-docs)>
|
||||
```
|
||||
|
||||
In this case the test is defined in the file `lib/values/flake-module.nix` line 21
|
||||
|
||||
### Adding nix eval tests
|
||||
|
||||
In clan core, the following pattern is usually followed:
|
||||
|
||||
- tests are put in a `test.nix` file
|
||||
- a CI Job is exposed via a `flake-module.nix`
|
||||
- that `flake-module.nix` is imported via the `flake.nix` at the root of the project
|
||||
|
||||
For example see `/lib/values/{test.nix,flake-module.nix}`.
|
||||
|
||||
### Running nix eval tests
|
||||
|
||||
Since all nix eval tests are exposed via the flake outputs, they can be ran via `nix build`:
|
||||
|
||||
```shellSession
|
||||
nix build .#checks.x86_64-linux.{test-attr-name}
|
||||
```
|
||||
|
||||
For quicker iteration times, instead of `nix build` use the `nix-unit` command available in the dev environment.
|
||||
Example:
|
||||
|
||||
```shellSession
|
||||
nix-unit --flake .#legacyPackages.x86_64-linux.{test-attr-name}
|
||||
```
|
||||
|
||||
### Debugging nix eval tests
|
||||
|
||||
Follow the instructions above to find the definition of the test, then use one of the following techniques:
|
||||
|
||||
#### Print debugging
|
||||
|
||||
Add `lib.trace` or `lib.traceVal` statements in order to print some variables during evaluation
|
||||
|
||||
#### Nix repl
|
||||
|
||||
Use `nix repl` to evaluate and inspect the test.
|
||||
|
||||
Each test consists of an `expr` (expression) and an `expected` field. `nix-unit` simply checks if `expr == expected` and prints the diff if that's not the case.
|
||||
|
||||
`nix repl` can be used to inspect an `expr` manually, or any other variables that you choose to expose.
|
||||
|
||||
Example:
|
||||
|
||||
```shellSession
|
||||
$ nix repl
|
||||
Nix 2.25.5
|
||||
Type :? for help.
|
||||
nix-repl> tests = import ./lib/values/test.nix {}
|
||||
|
||||
nix-repl> tests
|
||||
{
|
||||
test_attrsOf_attrsOf_submodule = { ... };
|
||||
test_attrsOf_submodule = { ... };
|
||||
test_default = { ... };
|
||||
test_no_default = { ... };
|
||||
test_submodule = { ... };
|
||||
test_submoduleWith = { ... };
|
||||
test_submodule_with_merging = { ... };
|
||||
}
|
||||
|
||||
nix-repl> tests.test_default.expr
|
||||
{
|
||||
foo = { ... };
|
||||
}
|
||||
```
|
||||
171
pkgs/docs-site/src/routes/docs/guides/disk-encryption.md
Normal file
171
pkgs/docs-site/src/routes/docs/guides/disk-encryption.md
Normal file
@@ -0,0 +1,171 @@
|
||||
|
||||
This guide provides an example setup for a single-disk ZFS system with native encryption, accessible for decryption remotely.
|
||||
|
||||
!!! Warning
|
||||
This configuration only applies to `systemd-boot` enabled systems and **requires** UEFI booting.
|
||||
|
||||
!!! Info "Secure Boot"
|
||||
This guide is compatible with systems that have [secure boot disabled](../guides/secure-boot.md). If you encounter boot issues, check if secure boot needs to be disabled in your UEFI settings.
|
||||
|
||||
Replace the highlighted lines with your own disk-id.
|
||||
You can find our your disk-id by executing:
|
||||
```bash
|
||||
lsblk --output NAME,ID-LINK,FSTYPE,SIZE,MOUNTPOINT
|
||||
```
|
||||
|
||||
|
||||
=== "**Single Disk**"
|
||||
Below is the configuration for `disko.nix`
|
||||
```nix hl_lines="13 53"
|
||||
--8<-- "docs/code-examples/disko-single-disk.nix"
|
||||
```
|
||||
|
||||
|
||||
|
||||
=== "**Raid 1**"
|
||||
Below is the configuration for `disko.nix`
|
||||
```nix hl_lines="13 53 54"
|
||||
--8<-- "docs/code-examples/disko-raid.nix"
|
||||
```
|
||||
|
||||
Below is the configuration for `initrd.nix`.
|
||||
Replace `<yourkey>` with your ssh public key.
|
||||
Replace `kernelModules` with the ethernet module loaded one on your target machine.
|
||||
```nix hl_lines="18 29"
|
||||
{config, pkgs, ...}:
|
||||
|
||||
{
|
||||
|
||||
boot.initrd.systemd = {
|
||||
enable = true;
|
||||
};
|
||||
|
||||
# uncomment this if you want to be asked for the decryption password on login
|
||||
#users.root.shell = "/bin/systemd-tty-ask-password-agent";
|
||||
|
||||
boot.initrd.network = {
|
||||
enable = true;
|
||||
|
||||
ssh = {
|
||||
enable = true;
|
||||
port = 7172;
|
||||
authorizedKeys = [ "<yourkey>" ];
|
||||
hostKeys = [
|
||||
"/var/lib/initrd_host_ed25519_key"
|
||||
"/var/lib/initrd_host_rsa_key"
|
||||
];
|
||||
};
|
||||
};
|
||||
boot.initrd.availableKernelModules = [
|
||||
"xhci_pci"
|
||||
];
|
||||
|
||||
# Find out the required network card driver by running `lspci -k` on the target machine
|
||||
boot.initrd.kernelModules = [ "r8169" ];
|
||||
}
|
||||
```
|
||||
|
||||
## Copying SSH Public Key
|
||||
|
||||
Before starting the installation process, ensure that the SSH public key is copied to the NixOS installer.
|
||||
|
||||
1. Copy your public SSH key to the installer, if it has not been copied already:
|
||||
|
||||
```bash
|
||||
ssh-copy-id -o PreferredAuthentications=password -o PubkeyAuthentication=no root@nixos-installer.local
|
||||
```
|
||||
|
||||
## Prepare Secret Key and Partition Disks
|
||||
|
||||
1. Access the installer using SSH:
|
||||
|
||||
```bash
|
||||
ssh root@nixos-installer.local
|
||||
```
|
||||
|
||||
2. Create a `secret.key` file in `/tmp` using `nano` or another text editor:
|
||||
|
||||
```bash
|
||||
nano /tmp/secret.key
|
||||
```
|
||||
|
||||
3. Discard the old disk partition data:
|
||||
|
||||
```bash
|
||||
blkdiscard /dev/disk/by-id/<installdisk>
|
||||
```
|
||||
|
||||
4. Run `clan` machines install, only running kexec and disko, with the following command:
|
||||
|
||||
```bash
|
||||
clan machines install gchq-local --target-host root@nixos-installer --phases kexec,disko
|
||||
```
|
||||
|
||||
## ZFS Pool Import and System Installation
|
||||
|
||||
1. SSH into the installer once again:
|
||||
|
||||
```bash
|
||||
ssh root@nixos-installer.local
|
||||
```
|
||||
|
||||
2. Run the following command on the remote installation environment:
|
||||
|
||||
```bash
|
||||
zfs set keylocation=prompt zroot/root
|
||||
```
|
||||
|
||||
3. Disconnect from the SSH session:
|
||||
|
||||
```bash
|
||||
CTRL+D
|
||||
```
|
||||
|
||||
4. Locally generate ssh host keys. You only need to generate ones for the algorithms you're using in `authorizedKeys`.
|
||||
|
||||
```bash
|
||||
ssh-keygen -q -N "" -C "" -t ed25519 -f ./initrd_host_ed25519_key
|
||||
ssh-keygen -q -N "" -C "" -t rsa -b 4096 -f ./initrd_host_rsa_key
|
||||
```
|
||||
|
||||
5. Securely copy your local initrd ssh host keys to the installer's `/mnt` directory:
|
||||
|
||||
```bash
|
||||
scp ./initrd_host* root@nixos-installer.local:/mnt/var/lib/
|
||||
```
|
||||
|
||||
6. Install nixos to the mounted partitions
|
||||
```bash
|
||||
clan machines install gchq-local --target-host root@nixos-installer --phases install
|
||||
```
|
||||
|
||||
7. After the installation process, unmount `/mnt/boot`, change the ZFS mountpoints and unmount all the ZFS volumes by exporting the zpool:
|
||||
|
||||
```bash
|
||||
umount /mnt/boot
|
||||
cd /
|
||||
zfs set -u mountpoint=/ zroot/root/nixos
|
||||
zfs set -u mountpoint=/tmp zroot/root/tmp
|
||||
zfs set -u mountpoint=/home zroot/root/home
|
||||
zpool export zroot
|
||||
```
|
||||
|
||||
8. Perform a reboot of the machine and remove the USB installer.
|
||||
|
||||
## Accessing the Initial Ramdisk (initrd) Environment
|
||||
|
||||
1. SSH into the initrd environment using the `initrd_rsa_key` and provided port:
|
||||
|
||||
```bash
|
||||
ssh -p 7172 root@192.168.178.141
|
||||
```
|
||||
|
||||
2. Run the `systemd-tty-ask-password-agent` utility to query a password:
|
||||
|
||||
```bash
|
||||
systemd-tty-ask-password-agent
|
||||
```
|
||||
|
||||
After completing these steps, your NixOS should be successfully installed and ready for use.
|
||||
|
||||
**Note:** Replace `root@nixos-installer.local` and `192.168.178.141` with the appropriate user and IP addresses for your setup. Also, adjust `<SYS_PATH>` to reflect the correct system path for your environment.
|
||||
@@ -0,0 +1,94 @@
|
||||
|
||||
!!! Danger ":fontawesome-solid-road-barrier: Under Construction :fontawesome-solid-road-barrier:"
|
||||
Currently under construction use with caution
|
||||
|
||||
:fontawesome-solid-road-barrier: :fontawesome-solid-road-barrier: :fontawesome-solid-road-barrier:
|
||||
|
||||
|
||||
## Structure
|
||||
|
||||
A disk template consists of exactly two files
|
||||
|
||||
- `default.nix`
|
||||
- `README.md`
|
||||
|
||||
```sh
|
||||
└── single-disk
|
||||
├── default.nix
|
||||
└── README.md
|
||||
```
|
||||
|
||||
## `default.nix`
|
||||
|
||||
Placeholders are filled with their machine specific options when a template is used for a machine.
|
||||
|
||||
The user can choose any valid options from the hardware report.
|
||||
|
||||
The file itself is then copied to `machines/{machineName}/disko.nix` and will be automatically loaded by the machine.
|
||||
|
||||
`single-disk/default.nix`
|
||||
```
|
||||
{
|
||||
disko.devices = {
|
||||
disk = {
|
||||
main = {
|
||||
device = "{{mainDisk}}";
|
||||
...
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
## Placeholders
|
||||
|
||||
Each template must declare the options of its placeholders depending on the hardware-report.
|
||||
|
||||
`api/disk.py`
|
||||
```py
|
||||
templates: dict[str, dict[str, Callable[[dict[str, Any]], Placeholder]]] = {
|
||||
"single-disk": {
|
||||
# Placeholders
|
||||
"mainDisk": lambda hw_report: Placeholder(
|
||||
label="Main disk", options=hw_main_disk_options(hw_report), required=True
|
||||
),
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Introducing new local or global placeholders requires contributing to clan-core `api/disks.py`.
|
||||
|
||||
### Predefined placeholders
|
||||
|
||||
Some placeholders provide predefined functionality
|
||||
|
||||
- `uuid`: In most cases we recommend adding a unique id to all disks. This prevents the system to false boot from i.e. hot-plugged devices.
|
||||
```
|
||||
disko.devices = {
|
||||
disk = {
|
||||
main = {
|
||||
name = "main-{{uuid}}";
|
||||
...
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## Readme
|
||||
|
||||
The readme frontmatter must be of the same format as modules frontmatter.
|
||||
|
||||
```markdown
|
||||
---
|
||||
description = "Simple disk schema for single disk setups"
|
||||
---
|
||||
|
||||
# Single disk
|
||||
|
||||
Use this schema for simple setups where ....
|
||||
|
||||
```
|
||||
|
||||
|
||||
The format and fields of this file is not clear yet. We might change that once fully implemented.
|
||||
87
pkgs/docs-site/src/routes/docs/guides/flake-parts.md
Normal file
87
pkgs/docs-site/src/routes/docs/guides/flake-parts.md
Normal file
@@ -0,0 +1,87 @@
|
||||
Clan supports integration with [flake-parts](https://flake.parts/), a framework for constructing your `flake.nix` using modules. Follow these steps to integrate Clan with flake-parts:
|
||||
|
||||
## Step 1: Update Your Flake Inputs
|
||||
|
||||
Add `flake-parts` as a dependency in your `flake.nix` file alongside existing dependencies like `clan-core` and `nixpkgs`. Here's an example:
|
||||
|
||||
```nix
|
||||
# flake.nix
|
||||
inputs = {
|
||||
nixpkgs.url = "github:NixOS/nixpkgs?ref=nixos-unstable";
|
||||
|
||||
# Add flake-parts
|
||||
flake-parts.url = "github:hercules-ci/flake-parts";
|
||||
flake-parts.inputs.nixpkgs-lib.follows = "nixpkgs";
|
||||
|
||||
clan-core = {
|
||||
url = "https://git.clan.lol/clan/clan-core/archive/main.tar.gz";
|
||||
inputs.nixpkgs.follows = "nixpkgs"; # Avoid this if using nixpkgs stable.
|
||||
inputs.flake-parts.follows = "flake-parts"; # New
|
||||
};
|
||||
};
|
||||
```
|
||||
|
||||
## Step 2: Import the Clan flake-parts Module
|
||||
|
||||
Next, import the Clan flake-parts module to make the [Clan options](../reference/options/clan.md) available within `mkFlake`:
|
||||
|
||||
```nix
|
||||
{
|
||||
outputs =
|
||||
inputs@{ flake-parts, ... }:
|
||||
flake-parts.lib.mkFlake { inherit inputs; } (
|
||||
{
|
||||
imports = [
|
||||
inputs.clan-core.flakeModules.default
|
||||
];
|
||||
}
|
||||
);
|
||||
}
|
||||
```
|
||||
|
||||
## Step 3: Configure Clan Settings and Define Machines
|
||||
|
||||
Configure Clan-wide settings and define machines. Here's an example `flake.nix`:
|
||||
|
||||
```nix
|
||||
{
|
||||
outputs = inputs@{ flake-parts, clan-core, ... }:
|
||||
flake-parts.lib.mkFlake { inherit inputs; } ({self, pkgs, ...}: {
|
||||
# See: https://flake.parts/getting-started
|
||||
systems = [
|
||||
"x86_64-linux"
|
||||
];
|
||||
|
||||
# Import the Clan flake-parts module
|
||||
imports = [
|
||||
clan-core.flakeModules.default
|
||||
];
|
||||
|
||||
# Define your Clan
|
||||
clan = {
|
||||
meta.name = ""; # Required and must be unique
|
||||
|
||||
machines = {
|
||||
jon = {
|
||||
imports = [
|
||||
./modules/firefox.nix
|
||||
# Add more modules as needed
|
||||
];
|
||||
|
||||
nixpkgs.hostPlatform = "x86_64-linux";
|
||||
|
||||
# Enable remote Clan commands over SSH
|
||||
clan.core.networking.targetHost = "root@jon";
|
||||
|
||||
# Disk configuration
|
||||
disko.devices.disk.main = {
|
||||
device = "/dev/disk/by-id/nvme-eui.e8238fa6bf530001001b448b4aec2929";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
});
|
||||
}
|
||||
```
|
||||
|
||||
For more details on configuring `flake-parts` and available Clan options, refer to the [Clan module documentation](https://git.clan.lol/clan/clan-core/src/branch/main/flakeModules/clan.nix).
|
||||
@@ -0,0 +1,33 @@
|
||||
# Auto-included Files
|
||||
|
||||
Clan automatically imports specific files from each machine directory and registers them, reducing the need for manual configuration.
|
||||
|
||||
## Machine Registration
|
||||
|
||||
Every folder under `machines/{machineName}` is automatically registered as a Clan machine.
|
||||
|
||||
!!! info "Files loaded automatically for each machine"
|
||||
|
||||
The following files are detected and imported for every Clan machine:
|
||||
|
||||
- [x] `machines/{machineName}/configuration.nix`
|
||||
Main configuration file for the machine.
|
||||
|
||||
- [x] `machines/{machineName}/hardware-configuration.nix`
|
||||
Hardware-specific configuration generated by NixOS.
|
||||
|
||||
- [x] `machines/{machineName}/facter.json`
|
||||
Contains system facts. Automatically generated — see [nixos-facter](https://clan.lol/blog/nixos-facter/) for details.
|
||||
|
||||
- [x] `machines/{machineName}/disko.nix`
|
||||
Disk layout configuration. See the [disko quickstart](https://github.com/nix-community/disko/blob/master/docs/quickstart.md) for more info.
|
||||
|
||||
## Other Auto-included Files
|
||||
|
||||
* **`inventory.json`**
|
||||
Managed by Clan's API.
|
||||
Merges with `clan.inventory` to extend the inventory.
|
||||
|
||||
* **`.clan-flake`**
|
||||
Sentinel file to be used to locate the root of a Clan repository.
|
||||
Falls back to `.git`, `.hg`, `.svn`, or `flake.nix` if not found.
|
||||
136
pkgs/docs-site/src/routes/docs/guides/inventory/inventory.md
Normal file
136
pkgs/docs-site/src/routes/docs/guides/inventory/inventory.md
Normal file
@@ -0,0 +1,136 @@
|
||||
|
||||
`Inventory` is an abstract service layer for consistently configuring distributed services across machine boundaries.
|
||||
|
||||
## Concept
|
||||
|
||||
Its concept is slightly different to what NixOS veterans might be used to. The inventory is a service definition on a higher level, not a machine configuration. This allows you to define a consistent and coherent service.
|
||||
|
||||
The inventory logic will automatically derive the modules and configurations to enable on each machine in your `clan` based on its `role`. This makes it super easy to setup distributed `services` such as Backups, Networking, traditional cloud services, or peer-to-peer based applications.
|
||||
|
||||
The following tutorial will walk through setting up a Backup service where the terms `Service` and `Role` will become more clear.
|
||||
|
||||
!!! example "Experimental status"
|
||||
The inventory implementation is not considered stable yet.
|
||||
We are actively soliciting feedback from users.
|
||||
|
||||
Stabilizing the API is a priority.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- [x] [Add some machines](../../getting-started/add-machines.md) to your Clan.
|
||||
|
||||
## Services
|
||||
|
||||
The inventory defines `instances` of clan services. Membership of `machines` is defined via `roles` exclusively.
|
||||
|
||||
See each [modules documentation](../../services/definition.md) for its available roles.
|
||||
|
||||
### Adding services to machines
|
||||
|
||||
A service can be added to one or multiple machines via `Roles`. Clan's `Role` interface provide sane defaults for a module this allows the module author to reduce the configuration overhead to a minimum.
|
||||
|
||||
Each service can still be customized and configured according to the modules options.
|
||||
|
||||
- Per role configuration via `inventory.instances.<instanceName>.roles.<roleName>.settings`
|
||||
- Per machine configuration via `inventory.instances.<instanceName>.roles.<roleName>.machines.<machineName>.settings`
|
||||
|
||||
### Setting up the Backup Service
|
||||
|
||||
!!! Example "Borgbackup Example"
|
||||
|
||||
To configure a service it needs to be added to the machine.
|
||||
It is required to assign the service (`borgbackup`) an arbitrary instance name. (`instance_1`)
|
||||
|
||||
See also: [Multiple Service Instances](#multiple-service-instances)
|
||||
|
||||
```{.nix hl_lines="9-10"}
|
||||
{
|
||||
inventory.instances.instance_1 = {
|
||||
module = {
|
||||
name = "borgbackup";
|
||||
input = "clan-core";
|
||||
};
|
||||
|
||||
# Machines can be added here.
|
||||
roles.client.machines."jon" {};
|
||||
roles.server.machines."backup_server" = {};
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
### Scaling the Backup
|
||||
|
||||
The inventory allows machines to set Tags
|
||||
|
||||
It is possible to add services to multiple machines via tags as shown
|
||||
|
||||
!!! Example "Tags Example"
|
||||
|
||||
```{.nix hl_lines="5 8 18"}
|
||||
{
|
||||
inventory = {
|
||||
machines = {
|
||||
"jon" = {
|
||||
tags = [ "backup" ];
|
||||
};
|
||||
"sara" = {
|
||||
tags = [ "backup" ];
|
||||
};
|
||||
};
|
||||
|
||||
instances.instance_1 = {
|
||||
module = {
|
||||
name = "borgbackup";
|
||||
input = "clan-core";
|
||||
};
|
||||
|
||||
roles.client.tags = [ "backup" ];
|
||||
roles.server.machines."backup_server" = {};
|
||||
};
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
### Multiple Service Instances
|
||||
|
||||
!!! danger "Important"
|
||||
Not all modules implement support for multiple instances yet.
|
||||
Multiple instance usage could create complexity, refer to each modules documentation, for intended usage.
|
||||
|
||||
!!! Example
|
||||
|
||||
In this example `backup_server` has role `client` and `server` in different instances.
|
||||
|
||||
```{.nix hl_lines="17 26"}
|
||||
{
|
||||
inventory = {
|
||||
machines = {
|
||||
"jon" = {};
|
||||
"backup_server" = {};
|
||||
"backup_backup_server" = {};
|
||||
};
|
||||
|
||||
instances = {
|
||||
instance_1 = {
|
||||
module = {
|
||||
name = "borgbackup";
|
||||
input = "clan-core";
|
||||
};
|
||||
|
||||
roles.client.machines."jon" = {};
|
||||
roles.server.machines."backup_server" = {};
|
||||
};
|
||||
|
||||
instance_2 = {
|
||||
module = {
|
||||
name = "borgbackup";
|
||||
input = "clan-core";
|
||||
};
|
||||
|
||||
roles.client.machines."backup_server" = {};
|
||||
roles.server.machines."backup_backup_server" = {};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
```
|
||||
84
pkgs/docs-site/src/routes/docs/guides/macos.md
Normal file
84
pkgs/docs-site/src/routes/docs/guides/macos.md
Normal file
@@ -0,0 +1,84 @@
|
||||
# Managing macOS Machines with Clan
|
||||
|
||||
This guide explains how to manage macOS machines using Clan.
|
||||
|
||||
## Supported Features
|
||||
|
||||
Currently, Clan supports the following features for macOS:
|
||||
|
||||
- `clan machines update` for existing [nix-darwin](https://github.com/nix-darwin/nix-darwin) installations
|
||||
- Support for [vars](../guides/vars/vars-overview.md)
|
||||
|
||||
## Add Your Machine to Your Clan Flake
|
||||
|
||||
In this example, we'll name the machine `yourmachine`. Replace this with your preferred machine name.
|
||||
|
||||
=== "**If using clan-core.lib.clan**"
|
||||
|
||||
```nix
|
||||
clan-core.lib.clan {
|
||||
inventory = {
|
||||
machines.yourmachine.machineClass = "darwin";
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
=== "**If using flake-parts**"
|
||||
|
||||
```nix
|
||||
{
|
||||
clan = {
|
||||
inventory = {
|
||||
machines.yourmachine.machineClass = "darwin";
|
||||
};
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
## Add a `configuration.nix` for Your Machine
|
||||
|
||||
Create the file `./machines/yourmachine/configuration.nix` with the following content (replace `yourmachine` with your chosen machine name):
|
||||
|
||||
```nix
|
||||
{
|
||||
clan.core.networking.targetHost = "root@ip_or_hostname";
|
||||
nixpkgs.hostPlatform = "aarch64-darwin"; # Use "x86_64-darwin" for Intel-based Macs
|
||||
}
|
||||
```
|
||||
|
||||
After creating the file, run `git add` to ensure Nix recognizes it.
|
||||
|
||||
## Generate Vars (If Needed)
|
||||
|
||||
If your machine uses vars, generate them with:
|
||||
|
||||
```
|
||||
clan vars generate yourmachine
|
||||
```
|
||||
|
||||
Replace `yourmachine` with your chosen machine name.
|
||||
|
||||
## Install Nix
|
||||
|
||||
Install Nix on your macOS machine using one of the methods described in the [nix-darwin prerequisites](https://github.com/nix-darwin/nix-darwin?tab=readme-ov-file#prerequisites).
|
||||
|
||||
|
||||
## Install nix-darwin
|
||||
|
||||
Upload your Clan flake to the macOS machine. Then, from within your flake directory, run:
|
||||
|
||||
```sh
|
||||
sudo nix run nix-darwin/master#darwin-rebuild -- switch --flake .#yourmachine
|
||||
```
|
||||
|
||||
Replace `yourmachine` with your chosen machine name.
|
||||
|
||||
## Manage Your Machine with Clan
|
||||
|
||||
Once all the steps above are complete, you can start managing your machine with:
|
||||
|
||||
```
|
||||
clan machines update yourmachine
|
||||
```
|
||||
|
||||
This command can be run from any computer that can reach this machine via SSH.
|
||||
98
pkgs/docs-site/src/routes/docs/guides/migrations/disk-id.md
Normal file
98
pkgs/docs-site/src/routes/docs/guides/migrations/disk-id.md
Normal file
@@ -0,0 +1,98 @@
|
||||
# Migrate disko config from `clanModules.disk-id`
|
||||
|
||||
If you previously bootstrapped a machine's disk using `clanModules.disk-id`, you should now migrate to a standalone, self-contained disko configuration. This ensures long-term stability and avoids reliance on dynamic values from Clan.
|
||||
|
||||
If your `disko.nix` currently looks something like this:
|
||||
|
||||
```nix title="disko.nix"
|
||||
{
|
||||
lib,
|
||||
clan-core,
|
||||
config,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
suffix = config.clan.core.vars.generators.disk-id.files.diskId.value;
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
clan-core.clanModules.disk-id
|
||||
];
|
||||
|
||||
# DO NOT EDIT THIS FILE AFTER INSTALLATION of a machine
|
||||
# Otherwise your system might not boot because of missing partitions / filesystems
|
||||
boot.loader.grub.efiSupport = lib.mkDefault true;
|
||||
boot.loader.grub.efiInstallAsRemovable = lib.mkDefault true;
|
||||
disko.devices = {
|
||||
disk = {
|
||||
"main" = {
|
||||
# suffix is to prevent disk name collisions
|
||||
name = "main-" + suffix;
|
||||
type = "disk";
|
||||
# Set the following in flake.nix for each maschine:
|
||||
# device = <uuid>;
|
||||
content = {
|
||||
# edlied
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
## Step 1: Retrieve your `disk-id`
|
||||
|
||||
Run the following command to retrieve the generated disk ID for your machine:
|
||||
|
||||
```bash
|
||||
clan vars list <machineName>
|
||||
```
|
||||
|
||||
Which should print the generated `disk-id/diskId` value in clear text
|
||||
You should see output like:
|
||||
|
||||
```shellSession
|
||||
disk-id/diskId: fcef30a749f8451d8f60c46e1ead726f
|
||||
# ...
|
||||
# elided
|
||||
```
|
||||
|
||||
Copy this value — you'll need it in the next step.
|
||||
|
||||
## ✍️ Step 2: Replace Dynamic Configuration with Static Values
|
||||
|
||||
✅ Goal: Make your disko.nix file standalone.
|
||||
|
||||
We are going to make three changes:
|
||||
|
||||
- Remove `let in, imports, {lib,clan-core,config, ...}:` to isolate the file.
|
||||
- Replace `suffix` with the actual disk-id
|
||||
- Move `disko.devices.disk.main.device` from `flake.nix` or `configuration.nix` into this file.
|
||||
|
||||
```{.nix title="disko.nix" hl_lines="7-9 11-14"}
|
||||
{
|
||||
boot.loader.grub.efiSupport = lib.mkDefault true;
|
||||
boot.loader.grub.efiInstallAsRemovable = lib.mkDefault true;
|
||||
disko.devices = {
|
||||
disk = {
|
||||
"main" = {
|
||||
# ↓ Copy the disk-id into place
|
||||
name = "main-fcef30a749f8451d8f60c46e1ead726f";
|
||||
type = "disk";
|
||||
|
||||
# Some earlier guides had this line in a flake.nix
|
||||
# disko.devices.disk.main.device = "/dev/disk/by-id/__CHANGE_ME__";
|
||||
# ↓ Copy the '/dev/disk/by-id' into here instead
|
||||
device = "/dev/disk/by-id/nvme-eui.e8238fa6bf530001001b448b4aec2929";
|
||||
|
||||
# edlied;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
These steps are only needed for existing configurations that depend on the `diskId` module.
|
||||
|
||||
For newer machines clan offers simple *disk templates* via its [templates cli](../../reference/cli/templates.md)
|
||||
@@ -0,0 +1,383 @@
|
||||
# Migrating from using `clanModules` to `clanServices`
|
||||
|
||||
**Audience**: This is a guide for **people using `clanModules`**.
|
||||
If you are a **module author** and need to migrate your modules please consult our **new** [clanServices authoring guide](../../guides/services/community.md)
|
||||
|
||||
## What's Changing?
|
||||
|
||||
Clan is transitioning from the legacy `clanModules` system to the `clanServices` system. This guide will help you migrate your service definitions from the old format (`inventory.services`) to the new format (`inventory.instances`).
|
||||
|
||||
| Feature | `clanModules` (Old) | `clanServices` (New) |
|
||||
| ---------------- | -------------------------- | ----------------------- |
|
||||
| Module Class | `"nixos"` | `"clan.service"` |
|
||||
| Inventory Key | `services` | `instances` |
|
||||
| Module Source | Static | Composable via flakes |
|
||||
| Custom Settings | Loosely structured | Strongly typed per-role |
|
||||
| Migration Status | Deprecated (to be removed) | ✅ Preferred |
|
||||
|
||||
---
|
||||
|
||||
## Before: Old `services` Definition
|
||||
|
||||
```nix
|
||||
services = {
|
||||
admin = {
|
||||
simple = {
|
||||
roles.default.tags = [ "all" ];
|
||||
|
||||
roles.default.config = {
|
||||
allowedKeys = {
|
||||
"key-1" = "ssh-ed25519 AAAA...0J jon@jon-os";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
```
|
||||
|
||||
### Complex Example: Multi-service Setup
|
||||
|
||||
```nix
|
||||
# Old format
|
||||
services = {
|
||||
borgbackup.production = {
|
||||
roles.server.machines = [ "backup-server" ];
|
||||
roles.server.config = {
|
||||
directory = "/var/backup/borg";
|
||||
};
|
||||
roles.client.tags = [ "backup" ];
|
||||
roles.client.extraModules = [ "nixosModules/borgbackup.nix" ];
|
||||
};
|
||||
|
||||
zerotier.company-network = {
|
||||
roles.controller.machines = [ "network-controller" ];
|
||||
roles.moon.machines = [ "moon-1" "moon-2" ];
|
||||
roles.peer.tags = [ "nixos" ];
|
||||
};
|
||||
|
||||
sshd.internal = {
|
||||
roles.server.tags = [ "nixos" ];
|
||||
roles.client.tags = [ "nixos" ];
|
||||
config.certificate.searchDomains = [
|
||||
"internal.example.com"
|
||||
"vpn.example.com"
|
||||
];
|
||||
};
|
||||
};
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## ✅ After: New `instances` Definition with `clanServices`
|
||||
|
||||
```nix
|
||||
instances = {
|
||||
# The instance_name is arbitrary but must be unique
|
||||
# We recommend to incorporate the module name in some kind to keep it clear
|
||||
admin-simple = {
|
||||
module = {
|
||||
name = "admin";
|
||||
input = "clan-core";
|
||||
};
|
||||
|
||||
roles.default.tags."all" = {};
|
||||
|
||||
# Move settings either into the desired role
|
||||
# In that case they effect all 'client-machines'
|
||||
roles.default.settings = {
|
||||
allowedKeys = {
|
||||
"key-1" = "ssh-ed25519 AAAA...0J jon@jon-os";
|
||||
};
|
||||
};
|
||||
# ----------------------------
|
||||
# OR move settings into the machine
|
||||
# then they affect only that single 'machine'
|
||||
roles.default.machines."jon".settings = {
|
||||
allowedKeys = {
|
||||
"key-1" = "ssh-ed25519 AAAA...0J jon@jon-os";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
```
|
||||
|
||||
### Complex Example Migrated
|
||||
|
||||
```nix
|
||||
# New format
|
||||
instances = {
|
||||
borgbackup-production = {
|
||||
module = {
|
||||
name = "borgbackup";
|
||||
input = "clan-core";
|
||||
};
|
||||
roles.server.machines."backup-server" = { };
|
||||
roles.server.settings = {
|
||||
directory = "/var/backup/borg";
|
||||
};
|
||||
roles.client.tags.backup = { };
|
||||
roles.client.extraModules = [ ../nixosModules/borgbackup.nix ];
|
||||
};
|
||||
|
||||
zerotier-company-network = {
|
||||
module = {
|
||||
name = "zerotier";
|
||||
input = "clan-core";
|
||||
};
|
||||
roles.controller.machines."network-controller" = { };
|
||||
roles.moon.machines."moon-1".settings = {
|
||||
stableEndpoints = [ "10.0.0.1" "2001:db8::1" ];
|
||||
};
|
||||
roles.moon.machines."moon-2".settings = {
|
||||
stableEndpoints = [ "10.0.0.2" "2001:db8::2" ];
|
||||
};
|
||||
roles.peer.tags.nixos = { };
|
||||
};
|
||||
|
||||
sshd-internal = {
|
||||
module = {
|
||||
name = "sshd";
|
||||
input = "clan-core";
|
||||
};
|
||||
roles.server.tags.nixos = { };
|
||||
roles.client.tags.nixos = { };
|
||||
roles.client.settings = {
|
||||
certificate.searchDomains = [
|
||||
"internal.example.com"
|
||||
"vpn.example.com"
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Steps to Migrate
|
||||
|
||||
### Move `services` entries to `instances`
|
||||
|
||||
Check if a service that you use has been migrated [In our reference](../../services/definition.md)
|
||||
|
||||
In your inventory, move it from:
|
||||
|
||||
```nix
|
||||
services = { ... };
|
||||
```
|
||||
|
||||
to:
|
||||
|
||||
```nix
|
||||
instances = { ... };
|
||||
```
|
||||
|
||||
Each nested service-instance-pair becomes a flat key, like `borgbackup.simple → borgbackup-simple`.
|
||||
|
||||
---
|
||||
|
||||
### Add `module.name` and `module.input`
|
||||
|
||||
Each instance must declare the module name and flake input it comes from:
|
||||
|
||||
```nix
|
||||
module = {
|
||||
name = "borgbackup";
|
||||
input = "clan-core"; # The name of your flake input
|
||||
};
|
||||
```
|
||||
|
||||
If you used `clan-core` as an input:
|
||||
|
||||
```nix
|
||||
inputs.clan-core.url = "github:clan/clan-core";
|
||||
```
|
||||
|
||||
Then refer to it as `input = "clan-core"`.
|
||||
|
||||
---
|
||||
|
||||
### Move role and machine config under `roles`
|
||||
|
||||
In the new system:
|
||||
|
||||
* Use `roles.<role>.machines.<hostname>.settings` for machine-specific config.
|
||||
* Use `roles.<role>.settings` for role-wide config.
|
||||
* Remove: `.config` as a top-level attribute is removed.
|
||||
|
||||
Example:
|
||||
|
||||
```nix
|
||||
roles.default.machines."test-inventory-machine".settings = {
|
||||
packages = [ "hello" ];
|
||||
};
|
||||
```
|
||||
|
||||
### Important Type Changes
|
||||
|
||||
The new `instances` format uses **attribute sets** instead of **lists** for tags and machines:
|
||||
|
||||
```nix
|
||||
# ❌ Old format (lists)
|
||||
roles.client.tags = [ "backup" ];
|
||||
roles.server.machines = [ "blob64" ];
|
||||
|
||||
# ✅ New format (attribute sets)
|
||||
roles.client.tags.backup = { };
|
||||
roles.server.machines.blob64 = { };
|
||||
```
|
||||
|
||||
### Handling Multiple Machines/Tags
|
||||
|
||||
When you need to assign multiple machines or tags to a role:
|
||||
|
||||
```nix
|
||||
# ❌ Old format
|
||||
roles.moon.machines = [ "eva" "eve" ];
|
||||
|
||||
# ✅ New format - each machine gets its own attribute
|
||||
roles.moon.machines.eva = { };
|
||||
roles.moon.machines.eve = { };
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Migration Status of clanModules
|
||||
|
||||
The following table shows the migration status of each deprecated clanModule:
|
||||
|
||||
| clanModule | Migration Status | Notes |
|
||||
|--------------------------|-------------------------------------------------------------------|------------------------------------------------------------------|
|
||||
| `admin` | ✅ [Migrated](../../services/official/admin.md) | |
|
||||
| `auto-upgrade` | ❌ Removed | |
|
||||
| `borgbackup-static` | ❌ Removed | |
|
||||
| `borgbackup` | ✅ [Migrated](../../services/official/borgbackup.md) | |
|
||||
| `data-mesher` | ✅ [Migrated](../../services/official/data-mesher.md) | |
|
||||
| `deltachat` | ❌ Removed | |
|
||||
| `disk-id` | ❌ Removed | |
|
||||
| `dyndns` | ✅ [Migrated](../../services/official/dyndns.md) | |
|
||||
| `ergochat` | ❌ Removed | |
|
||||
| `garage` | ✅ [Migrated](../../services/official/garage.md) | |
|
||||
| `golem-provider` | ❌ Removed | |
|
||||
| `heisenbridge` | ❌ Removed | |
|
||||
| `importer` | ✅ [Migrated](../../services/official/importer.md) | |
|
||||
| `iwd` | ❌ Removed | Use [wifi service](../../services/official/wifi.md) instead |
|
||||
| `localbackup` | ✅ [Migrated](../../services/official/localbackup.md) | |
|
||||
| `localsend` | ❌ Removed | |
|
||||
| `machine-id` | ✅ [Migrated](../../reference/clan.core/settings.md) | Now an [option](../../reference/clan.core/settings.md) |
|
||||
| `matrix-synapse` | ✅ [Migrated](../../services/official/matrix-synapse.md) | |
|
||||
| `moonlight` | ❌ Removed | |
|
||||
| `mumble` | ❌ Removed | |
|
||||
| `mycelium` | ✅ [Migrated](../../services/official/mycelium.md) | |
|
||||
| `nginx` | ❌ Removed | |
|
||||
| `packages` | ✅ [Migrated](../../services/official/packages.md) | |
|
||||
| `postgresql` | ✅ [Migrated](../../reference/clan.core/settings.md) | Now an [option](../../reference/clan.core/settings.md) |
|
||||
| `root-password` | ✅ [Migrated](../../services/official/users.md) | See [migration guide](../../services/official/users.md#migration-from-root-password-module) |
|
||||
| `single-disk` | ❌ Removed | |
|
||||
| `sshd` | ✅ [Migrated](../../services/official/sshd.md) | |
|
||||
| `state-version` | ✅ [Migrated](../../reference/clan.core/settings.md) | Now an [option](../../reference/clan.core/settings.md) |
|
||||
| `static-hosts` | ❌ Removed | |
|
||||
| `sunshine` | ❌ Removed | |
|
||||
| `syncthing-static-peers` | ❌ Removed | |
|
||||
| `syncthing` | ✅ [Migrated](../../services/official/syncthing.md) | |
|
||||
| `thelounge` | ❌ Removed | |
|
||||
| `trusted-nix-caches` | ✅ [Migrated](../../services/official/trusted-nix-caches.md) | |
|
||||
| `user-password` | ✅ [Migrated](../../services/official/users.md) | |
|
||||
| `vaultwarden` | ❌ Removed | |
|
||||
| `xfce` | ❌ Removed | |
|
||||
| `zerotier-static-peers` | ❌ Removed | |
|
||||
| `zerotier` | ✅ [Migrated](../../services/official/zerotier.md) | |
|
||||
| `zt-tcp-relay` | ❌ Removed | |
|
||||
|
||||
---
|
||||
|
||||
!!! Warning
|
||||
* Old `clanModules` (`class = "nixos"`) are deprecated and will be removed in the near future.
|
||||
* `inventory.services` is no longer recommended; use `inventory.instances` instead.
|
||||
* Module authors should begin exporting service modules under the `clan.modules` attribute of their flake.
|
||||
|
||||
## Troubleshooting Common Migration Errors
|
||||
|
||||
### Error: "not of type `attribute set of (submodule)`"
|
||||
|
||||
This error occurs when using lists instead of attribute sets for tags or machines:
|
||||
|
||||
```
|
||||
error: A definition for option `flake.clan.inventory.instances.borgbackup-blob64.roles.client.tags' is not of type `attribute set of (submodule)'.
|
||||
```
|
||||
|
||||
**Solution**: Convert lists to attribute sets as shown in the "Important Type Changes" section above.
|
||||
|
||||
### Error: "unsupported attribute `module`"
|
||||
|
||||
This error indicates the module structure is incorrect:
|
||||
|
||||
```
|
||||
error: Module ':anon-4:anon-1' has an unsupported attribute `module'.
|
||||
```
|
||||
|
||||
**Solution**: Ensure the `module` attribute has exactly two fields: `name` and `input`.
|
||||
|
||||
### Error: "attribute 'pkgs' missing"
|
||||
|
||||
This suggests the instance configuration is trying to use imports incorrectly:
|
||||
|
||||
```
|
||||
error: attribute 'pkgs' missing
|
||||
```
|
||||
|
||||
**Solution**: Use the `module = { name = "..."; input = "..."; }` format instead of `imports`.
|
||||
|
||||
### Removed Features
|
||||
|
||||
The following features from the old `services` format are no longer supported in `instances`:
|
||||
|
||||
- Top-level `config` attribute (use `roles.<role>.settings` instead)
|
||||
- Direct module imports (use the `module` declaration instead)
|
||||
|
||||
### extraModules Support
|
||||
|
||||
The `extraModules` attribute is still supported in the new instances format! The key change is how modules are specified:
|
||||
|
||||
**Old format (string paths relative to clan root):**
|
||||
```nix
|
||||
roles.client.extraModules = [ "nixosModules/borgbackup.nix" ];
|
||||
```
|
||||
|
||||
**New format (NixOS modules):**
|
||||
```nix
|
||||
# Direct module reference
|
||||
roles.client.extraModules = [ ../nixosModules/borgbackup.nix ];
|
||||
|
||||
# Or using self
|
||||
roles.client.extraModules = [ self.nixosModules.borgbackup ];
|
||||
|
||||
# Or inline module definition
|
||||
roles.client.extraModules = [
|
||||
{
|
||||
# Your module configuration here
|
||||
}
|
||||
];
|
||||
```
|
||||
|
||||
The `extraModules` now expects actual **NixOS modules** rather than string paths. This provides better type checking and more flexibility in how modules are specified.
|
||||
|
||||
**Alternative: Using @clan/importer**
|
||||
|
||||
For scenarios where you need to import modules with specific tag-based targeting, you can also use the dedicated `@clan/importer` service:
|
||||
|
||||
```nix
|
||||
instances = {
|
||||
my-importer = {
|
||||
module.name = "@clan/importer";
|
||||
module.input = "clan-core";
|
||||
roles.default.tags.my-tag = { };
|
||||
roles.default.extraModules = [ self.nixosModules.myModule ];
|
||||
};
|
||||
};
|
||||
```
|
||||
|
||||
## Further reference
|
||||
|
||||
* [Inventory Concept](../../guides/inventory/inventory.md)
|
||||
* [Authoring a 'clan.service' module](../../guides/services/community.md)
|
||||
* [ClanServices](../../guides/services/introduction-to-services.md)
|
||||
@@ -0,0 +1,129 @@
|
||||
# Migrate modules from `facts` to `vars`
|
||||
|
||||
For a high level overview about `vars` see our [blog post](https://clan.lol/blog/vars/).
|
||||
|
||||
This guide will help you migrate your modules that still use our [`facts`](../../guides/migrations/migration-facts-vars.md) backend
|
||||
to the [`vars`](../../guides/vars/vars-overview.md) backend.
|
||||
|
||||
The `vars` [module](../../reference/clan.core/vars.md) and the clan [command](../../reference/cli/vars.md) work in tandem, they should ideally be kept in sync.
|
||||
|
||||
## Keep Existing Values
|
||||
|
||||
In order to keep existing values and move them from `facts` to `vars`
|
||||
we will need to set the corresponding option in the vars module:
|
||||
|
||||
```
|
||||
migrateFact = "fact-name"
|
||||
```
|
||||
|
||||
This will now check on `vars` generation if there is an existing `fact` with the
|
||||
name already present and if that is the case will migrate it to `vars`.
|
||||
|
||||
Let us look at the mapping a little closer.
|
||||
Suppose we have the following fact: `facts.services.vaultwarden.secret.admin`.
|
||||
This would read as follows: The `vaultwarden` `fact` service has the `admin` secret.
|
||||
In order to migrate this fact we would need to have the following `vars` configuration:
|
||||
|
||||
```nix
|
||||
vars.generators.vaultwarden = {
|
||||
migrateFact = "vaultwarden";
|
||||
files.admin = {};
|
||||
};
|
||||
```
|
||||
|
||||
And this would read as follows: The vaultwarden `vars` module generates the admin file.
|
||||
|
||||
## Prompts
|
||||
|
||||
Because prompts can be a necessity for certain systems `vars` have a shorthand for defining them.
|
||||
A prompt is a request for user input. Let us look how user input used to be handled in facts:
|
||||
|
||||
```nix
|
||||
facts.services.forgejo-api = {
|
||||
secret.token = {};
|
||||
generator.prompt = "Please insert your forgejo api token";
|
||||
generator.script = "cp $prompt_value > $secret/token";
|
||||
};
|
||||
```
|
||||
|
||||
To have analogous functionality in `vars`:
|
||||
|
||||
```nix
|
||||
vars.generators.forgejo-api = {
|
||||
prompts.token = {
|
||||
description = "Please insert your forgejo api token"
|
||||
persist = true;
|
||||
};
|
||||
};
|
||||
```
|
||||
|
||||
This does not only simplify prompting, it also now allows us to define multiple prompts in one generator.
|
||||
A more analogous way to the `fact` method is available, in case the module author needs more flexibility with the prompt input:
|
||||
|
||||
```nix
|
||||
vars.generators.forgejo-api = {
|
||||
files.token = {};
|
||||
prompts.token.description = "Please insert your forgejo api token";
|
||||
script = "cp $prompts/<name> $out/<name>";
|
||||
};
|
||||
```
|
||||
|
||||
## Migration of a complete module
|
||||
|
||||
Let us look closer at how we would migrate an existing generator for syncthing.
|
||||
This is the `fact` module of syncthing:
|
||||
|
||||
```nix
|
||||
facts.services.syncthing = {
|
||||
secret.key = {};
|
||||
secret.cert = {};
|
||||
public.id = {};
|
||||
|
||||
generator.path = [
|
||||
pkgs.coreutils
|
||||
pkgs.gnugrep
|
||||
pkgs.syncthing
|
||||
];
|
||||
|
||||
generator.script = ''
|
||||
syncthing generate --config "$out"
|
||||
mv "$out"/key.pem "$secret"/key
|
||||
mv "$out"/cert.pem "$secret"/cert
|
||||
cat "$out"/config.xml | grep -oP '(?<=<device id=")[^"]+' | uniq > "$public"/id
|
||||
'';
|
||||
};
|
||||
```
|
||||
|
||||
This would be the corresponding `vars` module, which also will migrate existing facts.
|
||||
|
||||
```nix
|
||||
vars.generators.syncthing = {
|
||||
migrateFact = "syncthing";
|
||||
|
||||
files.key = {};
|
||||
files.cert = {};
|
||||
files.id.secret = false;
|
||||
|
||||
runtimeInputs = [
|
||||
pkgs.coreutils
|
||||
pkgs.gnugrep
|
||||
pkgs.syncthing
|
||||
];
|
||||
|
||||
script = ''
|
||||
syncthing generate --config "$out"
|
||||
mv "$out"/key.pem "$out"/key
|
||||
mv "$out"/cert.pem "$out"/cert
|
||||
cat "$out"/config.xml | grep -oP '(?<=<device id=")[^"]+' | uniq > "$out"/id
|
||||
'';
|
||||
};
|
||||
```
|
||||
|
||||
Most of the usage patterns stay the same, but `vars` have a more ergonomic interface.
|
||||
There are not two different ways to define files anymore (public/secret).
|
||||
Now files are defined under the `files` attribute and are secret by default.
|
||||
|
||||
## Happy Migration
|
||||
|
||||
We hope this gives you a clear path to start and finish your migration from `facts` to `vars`.
|
||||
Please do not hesitate reaching out if something is still unclear - either through [matrix](https://matrix.to/#/#clan:clan.lol) or through our git [forge](https://git.clan.lol/clan/clan-core).
|
||||
149
pkgs/docs-site/src/routes/docs/guides/networking/mesh-vpn.md
Normal file
149
pkgs/docs-site/src/routes/docs/guides/networking/mesh-vpn.md
Normal file
@@ -0,0 +1,149 @@
|
||||
This guide provides detailed instructions for configuring
|
||||
[ZeroTier VPN](https://zerotier.com) within Clan. Follow the
|
||||
outlined steps to set up a machine as a VPN controller (`<CONTROLLER>`) and to
|
||||
include a new machine into the VPN.
|
||||
|
||||
## Concept
|
||||
|
||||
By default all machines within one clan are connected via a chosen network technology.
|
||||
|
||||
```{.no-copy}
|
||||
Clan
|
||||
Node A
|
||||
<-> (zerotier / mycelium / ...)
|
||||
Node B
|
||||
```
|
||||
|
||||
This guide shows you how to configure `zerotier` through clan's `Inventory` System.
|
||||
|
||||
## The Controller
|
||||
|
||||
The controller is the initial entrypoint for new machines into the vpn.
|
||||
It will sign the id's of new machines.
|
||||
Once id's are signed, the controller's continuous operation is not essential.
|
||||
A good controller choice is nevertheless a machine that can always be reached for updates - so that new peers can be added to the network.
|
||||
|
||||
For the purpose of this guide we have two machines:
|
||||
|
||||
- The `controller` machine, which will be the zerotier controller.
|
||||
- The `new_machine` machine, which is the machine we want to add to the vpn network.
|
||||
|
||||
## Configure the Service
|
||||
|
||||
```nix {.nix title="flake.nix" hl_lines="19-25"}
|
||||
{
|
||||
inputs.clan-core.url = "https://git.clan.lol/clan/clan-core/archive/main.tar.gz";
|
||||
inputs.nixpkgs.follows = "clan-core/nixpkgs";
|
||||
|
||||
outputs =
|
||||
{ self, clan-core, ... }:
|
||||
let
|
||||
# Sometimes this attribute set is defined in clan.nix
|
||||
clan = clan-core.lib.clan {
|
||||
inherit self;
|
||||
|
||||
meta.name = "myclan";
|
||||
|
||||
inventory.machines = {
|
||||
controller = {};
|
||||
new_machine = {};
|
||||
};
|
||||
|
||||
inventory.instances = {
|
||||
zerotier = {
|
||||
# Assign the controller machine to the role "controller"
|
||||
roles.controller.machines."controller" = {};
|
||||
|
||||
# All clan machines are zerotier peers
|
||||
roles.peer.tags."all" = {};
|
||||
};
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
inherit (clan) nixosConfigurations nixosModules clanInternals;
|
||||
|
||||
# elided for brevity
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
## Apply the Configuration
|
||||
|
||||
Update the `controller` machine first:
|
||||
|
||||
```bash
|
||||
clan machines update controller
|
||||
```
|
||||
|
||||
Then update all other peers:
|
||||
|
||||
```bash
|
||||
clan machines update
|
||||
```
|
||||
|
||||
### Verify Connection
|
||||
|
||||
On the `new_machine` run:
|
||||
|
||||
```bash
|
||||
$ sudo zerotier-cli info
|
||||
```
|
||||
|
||||
The status should be "ONLINE":
|
||||
|
||||
```{.console, .no-copy}
|
||||
200 info d2c71971db 1.12.1 ONLINE
|
||||
```
|
||||
|
||||
## Further
|
||||
Currently **Zerotier** is the only mesh-vpn that is fully integrated into clan.
|
||||
In the future we plan to add additional network technologies like tinc, head/tailscale
|
||||
Currently we support yggdrassil and mycelium through usage of the inventory,
|
||||
though it is not yet integrated into the networking module.
|
||||
|
||||
We chose ZeroTier because in our tests it was a straight forward solution to bootstrap.
|
||||
It allows you to selfhost a controller and the controller doesn't need to be globally reachable.
|
||||
Which made it a good fit for starting the project.
|
||||
|
||||
## Debugging
|
||||
|
||||
### Retrieve the ZeroTier ID
|
||||
|
||||
In the repo:
|
||||
|
||||
```console
|
||||
$ clan vars list <machineName>
|
||||
```
|
||||
|
||||
```{.console, .no-copy}
|
||||
$ clan vars list controller
|
||||
# ... elided
|
||||
zerotier/zerotier-identity-secret: ********
|
||||
zerotier/zerotier-ip: fd0a:b849:2928:1234:c99:930a:a959:2928
|
||||
zerotier/zerotier-network-id: 0aa959282834000c
|
||||
```
|
||||
|
||||
On the machine:
|
||||
|
||||
```bash
|
||||
$ sudo zerotier-cli info
|
||||
```
|
||||
|
||||
#### Manually Authorize a Machine on the Controller
|
||||
|
||||
=== "with ZeroTierIP"
|
||||
|
||||
```bash
|
||||
$ sudo zerotier-members allow --member-ip <IP>
|
||||
```
|
||||
|
||||
Substitute `<IP>` with the ZeroTier IP obtained previously.
|
||||
|
||||
=== "with ZeroTierID"
|
||||
|
||||
```bash
|
||||
$ sudo zerotier-members allow <ID>
|
||||
```
|
||||
|
||||
Substitute `<ID>` with the ZeroTier ID obtained previously.
|
||||
184
pkgs/docs-site/src/routes/docs/guides/networking/networking.md
Normal file
184
pkgs/docs-site/src/routes/docs/guides/networking/networking.md
Normal file
@@ -0,0 +1,184 @@
|
||||
# Connecting to Your Machines
|
||||
|
||||
Clan provides automatic networking with fallback mechanisms to reliably connect to your machines.
|
||||
|
||||
## Option 1: Automatic Networking with Fallback (Recommended)
|
||||
|
||||
Clan's networking module automatically manages connections through various network technologies with intelligent fallback. When you run `clan ssh` or `clan machines update`, Clan tries each configured network by priority until one succeeds.
|
||||
|
||||
### Basic Setup with Internet Service
|
||||
|
||||
For machines with public IPs or DNS names, use the `internet` service to configure direct SSH while keeping fallback options:
|
||||
|
||||
```{.nix title="flake.nix" hl_lines="7-10 14-16"}
|
||||
{
|
||||
outputs = { self, clan-core, ... }:
|
||||
let
|
||||
clan = clan-core.lib.clan {
|
||||
inventory.instances = {
|
||||
# Direct SSH with fallback support
|
||||
internet = {
|
||||
roles.default.machines.server1 = {
|
||||
settings.host = "server1.example.com";
|
||||
};
|
||||
roles.default.machines.server2 = {
|
||||
settings.host = "192.168.1.100";
|
||||
};
|
||||
};
|
||||
|
||||
# Fallback: Secure connections via Tor
|
||||
tor = {
|
||||
roles.server.tags.nixos = { };
|
||||
};
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
inherit (clan.config) nixosConfigurations;
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
### Advanced Setup with Multiple Networks
|
||||
|
||||
```{.nix title="flake.nix" hl_lines="7-10 13-16 19-21"}
|
||||
{
|
||||
outputs = { self, clan-core, ... }:
|
||||
let
|
||||
clan = clan-core.lib.clan {
|
||||
inventory.instances = {
|
||||
# Priority 1: Try direct connection first
|
||||
internet = {
|
||||
roles.default.machines.publicserver = {
|
||||
settings.host = "public.example.com";
|
||||
};
|
||||
};
|
||||
|
||||
# Priority 2: VPN for internal machines
|
||||
zerotier = {
|
||||
roles.controller.machines."controller" = { };
|
||||
roles.peer.tags.nixos = { };
|
||||
};
|
||||
|
||||
# Priority 3: Tor as universal fallback
|
||||
tor = {
|
||||
roles.server.tags.nixos = { };
|
||||
};
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
inherit (clan.config) nixosConfigurations;
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
### How It Works
|
||||
|
||||
Clan automatically tries networks in order of priority:
|
||||
1. Direct internet connections (if configured)
|
||||
2. VPN networks (ZeroTier, Tailscale, etc.)
|
||||
3. Tor hidden services
|
||||
4. Any other configured networks
|
||||
|
||||
If one network fails, Clan automatically tries the next.
|
||||
|
||||
### Useful Commands
|
||||
|
||||
```bash
|
||||
# View all configured networks and their status
|
||||
clan network list
|
||||
|
||||
# Test connectivity through all networks
|
||||
clan network ping machine1
|
||||
|
||||
# Show complete network topology
|
||||
clan network overview
|
||||
```
|
||||
|
||||
## Option 2: Manual targetHost (Bypasses Fallback!)
|
||||
|
||||
!!! warning
|
||||
Setting `targetHost` directly **disables all automatic networking and fallback**. Only use this if you need complete control and don't want Clan's intelligent connection management.
|
||||
|
||||
### Using Inventory (For Static Addresses)
|
||||
|
||||
Use inventory-level `targetHost` when the address is **static** and doesn't depend on NixOS configuration:
|
||||
|
||||
```{.nix title="flake.nix" hl_lines="8"}
|
||||
{
|
||||
outputs = { self, clan-core, ... }:
|
||||
let
|
||||
clan = clan-core.lib.clan {
|
||||
inventory.machines.server = {
|
||||
# WARNING: This bypasses all networking modules!
|
||||
# Use for: Static IPs, DNS names, known hostnames
|
||||
deploy.targetHost = "root@192.168.1.100";
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
inherit (clan.config) nixosConfigurations;
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
**When to use inventory-level:**
|
||||
- Static IP addresses: `"root@192.168.1.100"`
|
||||
- DNS names: `"user@server.example.com"`
|
||||
- Any address that doesn't change based on machine configuration
|
||||
|
||||
### Using NixOS Configuration (For Dynamic Addresses)
|
||||
|
||||
Use machine-level `targetHost` when you need to **interpolate values from the NixOS configuration**:
|
||||
|
||||
```{.nix title="flake.nix" hl_lines="7"}
|
||||
{
|
||||
outputs = { self, clan-core, ... }:
|
||||
let
|
||||
clan = clan-core.lib.clan {
|
||||
machines.server = { config, ... }: {
|
||||
# WARNING: This also bypasses all networking modules!
|
||||
# REQUIRED for: Addresses that depend on NixOS config
|
||||
clan.core.networking.targetHost = "root@${config.networking.hostName}.local";
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
inherit (clan.config) nixosConfigurations;
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
**When to use machine-level (NixOS config):**
|
||||
- Using hostName from config: `"root@${config.networking.hostName}.local"`
|
||||
- Building from multiple config values: `"${config.users.users.deploy.name}@${config.networking.hostName}"`
|
||||
- Any address that depends on evaluated NixOS configuration
|
||||
|
||||
!!! info "Key Difference"
|
||||
**Inventory-level** (`deploy.targetHost`) is evaluated immediately and works with static strings.
|
||||
**Machine-level** (`clan.core.networking.targetHost`) is evaluated after NixOS configuration and can access `config.*` values.
|
||||
|
||||
## Quick Decision Guide
|
||||
|
||||
| Scenario | Recommended Approach | Why |
|
||||
|----------|---------------------|-----|
|
||||
| Public servers | `internet` service | Keeps fallback options |
|
||||
| Mixed infrastructure | Multiple networks | Automatic failover |
|
||||
| Machines behind NAT | ZeroTier/Tor | NAT traversal with fallback |
|
||||
| Testing/debugging | Manual targetHost | Full control, no magic |
|
||||
| Single static machine | Manual targetHost | Simple, no overhead |
|
||||
|
||||
## Command-Line Override
|
||||
|
||||
The `--target-host` flag bypasses ALL networking configuration:
|
||||
|
||||
```bash
|
||||
# Emergency access - ignores all networking config
|
||||
clan machines update server --target-host root@backup-ip.com
|
||||
|
||||
# Direct SSH - no fallback attempted
|
||||
clan ssh laptop --target-host user@10.0.0.5
|
||||
```
|
||||
|
||||
Use this for debugging or emergency access when automatic networking isn't working.
|
||||
68
pkgs/docs-site/src/routes/docs/guides/nixos-rebuild.md
Normal file
68
pkgs/docs-site/src/routes/docs/guides/nixos-rebuild.md
Normal file
@@ -0,0 +1,68 @@
|
||||
# Can I still use `nixos-rebuild`?
|
||||
|
||||
**Yes, you can still use `nixos-rebuild` with clan!**
|
||||
|
||||
Clan is built on top of standard `NixOS` and uses `nixos-rebuild` internally.
|
||||
However, there are important considerations when using `nixos-rebuild` directly instead of `clan machines update`.
|
||||
|
||||
## Important Considerations
|
||||
|
||||
!!! warning "Vars Must Be Uploaded First"
|
||||
If your configuration uses clan vars, failing to run `clan vars upload` before `nixos-rebuild` will result in missing secrets and potentially broken services.
|
||||
|
||||
!!! info "Build Host Configuration"
|
||||
Clan automatically handles build host configuration based on your machine settings.
|
||||
When using `nixos-rebuild` manually, you need to specify `--build-host` and `--target-host` options yourself.
|
||||
|
||||
## How Clan Uses nixos-rebuild
|
||||
|
||||
Clan doesn't replace `nixos-rebuild` - it enhances it. When you run `clan machines update`, clan:
|
||||
|
||||
1. Generates and uploads secrets/variables (if any)
|
||||
2. Uploads the flake source to the target/build host (if needed)
|
||||
3. Runs `nixos-rebuild switch` with the appropriate options
|
||||
4. Handles remote building and deployment automatically
|
||||
|
||||
Under the hood, clan executes commands like:
|
||||
|
||||
```bash
|
||||
nixos-rebuild switch --fast --build-host builtHost --flake /path/to/flake#machine-name
|
||||
```
|
||||
|
||||
## When You Need `clan vars upload`
|
||||
|
||||
If your clan configuration uses **variables (vars)** - generated secrets, keys, or configuration values - you **must** run `clan vars upload` before using `nixos-rebuild` directly.
|
||||
|
||||
### Systems that use vars include:
|
||||
|
||||
- Any `clanModules` with generated secrets (zerotier, borgbackup, etc.)
|
||||
- Custom generators that create passwords or keys
|
||||
- Services that need shared configuration values
|
||||
|
||||
### Systems that don't need vars:
|
||||
|
||||
- Basic NixOS configurations without clan-specific services
|
||||
- Static configurations with hardcoded values
|
||||
- Systems using only traditional NixOS secrets management
|
||||
|
||||
## Manual nixos-rebuild Workflow
|
||||
|
||||
When you want to use `nixos-rebuild` directly:
|
||||
|
||||
### Step 1: Upload vars (if needed)
|
||||
|
||||
```bash
|
||||
# Upload secret vars to the target machine
|
||||
clan vars upload my-machine
|
||||
```
|
||||
|
||||
### Step 2: Run nixos-rebuild
|
||||
|
||||
```bash
|
||||
nixos-rebuild switch --flake .#my-machine --target-host root@target-ip --build-host local
|
||||
```
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- [Update Your Machines](../getting-started/update-machines.md) - Using clan's update command
|
||||
- [Variables (Vars)](../guides/vars/vars-overview.md) - Understanding the vars system
|
||||
@@ -0,0 +1,99 @@
|
||||
**Q**: How should I choose the `nixpkgs` input for my flake when using `clan-core`?
|
||||
|
||||
**A**: Pin your flake to a recent `nixpkgs` version. Here are two common approaches, each with its trade-offs:
|
||||
|
||||
## Option 1: Follow `clan-core`
|
||||
|
||||
- **Pros**:
|
||||
- Recommended for most users.
|
||||
- Verified by our CI and widely used by others.
|
||||
- **Cons**:
|
||||
- Coupled to version bumps in `clan-core`.
|
||||
- Upstream features and packages may take longer to land.
|
||||
|
||||
Example:
|
||||
|
||||
```nix
|
||||
inputs = {
|
||||
clan-core.url = "https://git.clan.lol/clan/clan-core/archive/main.tar.gz";
|
||||
# Use the `nixpkgs` version locked in `clan-core`
|
||||
nixpkgs.follows = "clan-core/nixpkgs";
|
||||
};
|
||||
```
|
||||
|
||||
## Option 2: Use Your Own `nixpkgs` Version
|
||||
|
||||
- **Pros**:
|
||||
- Faster access to new upstream features and packages.
|
||||
- **Cons**:
|
||||
- Recommended for advanced users.
|
||||
- Not covered by our CI — you’re on the frontier.
|
||||
|
||||
Example:
|
||||
|
||||
```nix
|
||||
inputs = {
|
||||
# Specify your own `nixpkgs` version
|
||||
nixpkgs.url = "github:nixos/nixpkgs/nixos-unstable";
|
||||
|
||||
clan-core.url = "https://git.clan.lol/clan/clan-core/archive/main.tar.gz";
|
||||
# Ensure `clan-core` uses your `nixpkgs` version
|
||||
clan-core.inputs.nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
```
|
||||
|
||||
## Recommended: Avoid Duplicate `nixpkgs` Entries
|
||||
|
||||
To prevent ambiguity or compatibility issues, check your `flake.lock` for duplicate `nixpkgs` entries. Duplicate entries indicate a missing `follows` directive in one of your flake inputs.
|
||||
|
||||
Example of duplicate entries in `flake.lock`:
|
||||
|
||||
```json
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 315532800,
|
||||
"narHash": "sha256-1tUpklZsKzMGI3gjo/dWD+hS8cf+5Jji8TF5Cfz7i3I=",
|
||||
"rev": "08b8f92ac6354983f5382124fef6006cade4a1c1",
|
||||
"type": "tarball",
|
||||
"url": "https://releases.nixos.org/nixpkgs/nixpkgs-25.11pre862603.08b8f92ac635/nixexprs.tar.xz"
|
||||
},
|
||||
"original": {
|
||||
"type": "tarball",
|
||||
"url": "https://nixos.org/channels/nixpkgs-unstable/nixexprs.tar.xz"
|
||||
}
|
||||
},
|
||||
"nixpkgs_2": {
|
||||
"locked": {
|
||||
"lastModified": 1758346548,
|
||||
"narHash": "sha256-afXE7AJ7MY6wY1pg/Y6UPHNYPy5GtUKeBkrZZ/gC71E=",
|
||||
"owner": "nixos",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "b2a3852bd078e68dd2b3dfa8c00c67af1f0a7d20",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nixos",
|
||||
"ref": "nixos-25.05",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
To locate the source of duplicate entries, grep your `flake.lock` file. For example, if `home-manager` is referencing `nixpkgs_2` instead of the main `nixpkgs`:
|
||||
|
||||
```json
|
||||
"home-manager": {
|
||||
"inputs": {
|
||||
"nixpkgs": "nixpkgs_2"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Fix this by adding the following line to your `flake.nix` inputs:
|
||||
|
||||
```nix
|
||||
home-manager.inputs.nixpkgs.follows = "nixpkgs";
|
||||
```
|
||||
|
||||
Repeat this process until all duplicate `nixpkgs` entries are resolved. This ensures all inputs use the same `nixpkgs` source, preventing cross-version conflicts.
|
||||
371
pkgs/docs-site/src/routes/docs/guides/secrets.md
Normal file
371
pkgs/docs-site/src/routes/docs/guides/secrets.md
Normal file
@@ -0,0 +1,371 @@
|
||||
This article provides an overview over the underlying secrets system which is used by [Vars](../guides/vars/vars-overview.md).
|
||||
Under most circumstances you should use [Vars](../guides/vars/vars-overview.md) directly instead.
|
||||
|
||||
Consider using `clan secrets` only for managing admin users and groups, as well as a debugging tool.
|
||||
|
||||
Manually interacting with secrets via `clan secrets [set|remove]`, etc may break the integrity of your `Vars` state.
|
||||
|
||||
---
|
||||
|
||||
Clan enables encryption of secrets (such as passwords & keys) ensuring security and ease-of-use among users.
|
||||
|
||||
By default, Clan uses the [sops](https://github.com/getsops/sops) format
|
||||
and integrates with [sops-nix](https://github.com/Mic92/sops-nix) on NixOS machines.
|
||||
Clan can also be configured to be used with other secret store [backends](../reference/clan.core/vars.md#clan.core.vars.settings.secretStore).
|
||||
|
||||
## Create Your Admin Keypair
|
||||
|
||||
To get started, you'll need to create **your admin keypair**.
|
||||
|
||||
!!! info
|
||||
Don't worry — if you've already made one before, this step won't change or overwrite it.
|
||||
|
||||
```bash
|
||||
clan secrets key generate
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```{.console, .no-copy}
|
||||
Public key: age1wkth7uhpkl555g40t8hjsysr20drq286netu8zptw50lmqz7j95sw2t3l7
|
||||
|
||||
Generated age private key at '/home/joerg/.config/sops/age/keys.txt' for your user. Please back it up on a secure location or you will lose access to your secrets.
|
||||
Also add your age public key to the repository with 'clan secrets users add YOUR_USER age1wkth7uhpkl555g40t8hjsysr20drq286netu8zptw50lmqz7j95sw2t3l7' (replace YOUR_USER with your actual username)
|
||||
```
|
||||
|
||||
!!! warning
|
||||
Make sure to keep a safe backup of the private key you've just created.
|
||||
If it's lost, you won't be able to get to your secrets anymore because they all need the admin key to be unlocked.
|
||||
|
||||
If you already have an [age] secret key and want to use that instead, you can simply edit `~/.config/sops/age/keys.txt`:
|
||||
|
||||
```title="~/.config/sops/age/keys.txt"
|
||||
AGE-SECRET-KEY-13GWMK0KNNKXPTJ8KQ9LPSQZU7G3KU8LZDW474NX3D956GGVFAZRQTAE3F4
|
||||
```
|
||||
|
||||
Alternatively, you can provide your [age] secret key as an environment variable `SOPS_AGE_KEY`, or in a different file
|
||||
using `SOPS_AGE_KEY_FILE`.
|
||||
For more information see the [SOPS] guide on [encrypting with age].
|
||||
|
||||
!!! note
|
||||
It's safe to add any secrets created by the clan CLI and placed in your repository to version control systems like `git`.
|
||||
|
||||
## Add Your Public Key(s)
|
||||
|
||||
```console
|
||||
clan secrets users add $USER --age-key <your_public_key>
|
||||
```
|
||||
|
||||
It's best to choose the same username as on your Setup/Admin Machine that you use to control the deployment with.
|
||||
|
||||
Once run this will create the following files:
|
||||
|
||||
```{.console, .no-copy}
|
||||
sops/
|
||||
└── users/
|
||||
└── <your_username>/
|
||||
└── key.json
|
||||
```
|
||||
If you followed the quickstart tutorial all necessary secrets are initialized at this point.
|
||||
|
||||
!!! note
|
||||
You can add multiple age keys for a user by providing multiple `--age-key <your_public_key>` flags:
|
||||
|
||||
```console
|
||||
clan secrets users add $USER \
|
||||
--age-key <your_public_key_1> \
|
||||
--age-key <your_public_key_2> \
|
||||
...
|
||||
```
|
||||
|
||||
## Manage Your Public Key(s)
|
||||
|
||||
You can list keys for your user with `clan secrets users get $USER`:
|
||||
|
||||
```console
|
||||
clan secrets users get alice
|
||||
|
||||
[
|
||||
{
|
||||
"publickey": "age1hrrcspp645qtlj29krjpq66pqg990ejaq0djcms6y6evnmgglv5sq0gewu",
|
||||
"type": "age",
|
||||
"username": "alice"
|
||||
},
|
||||
{
|
||||
"publickey": "age13kh4083t3g4x3ktr52nav6h7sy8ynrnky2x58pyp96c5s5nvqytqgmrt79",
|
||||
"type": "age",
|
||||
"username": "alice"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
To add a new key to your user:
|
||||
|
||||
```console
|
||||
clan secrets users add-key $USER --age-key <your_public_key>
|
||||
```
|
||||
|
||||
To remove a key from your user:
|
||||
|
||||
```console
|
||||
clan secrets users remove-key $USER --age-key <your_public_key>
|
||||
```
|
||||
|
||||
[age]: https://github.com/FiloSottile/age
|
||||
[age plugin]: https://github.com/FiloSottile/awesome-age?tab=readme-ov-file#plugins
|
||||
[sops]: https://github.com/getsops/sops
|
||||
[encrypting with age]: https://github.com/getsops/sops?tab=readme-ov-file#encrypting-using-age
|
||||
|
||||
## Adding a Secret
|
||||
|
||||
```shellSession
|
||||
clan secrets set mysecret
|
||||
Paste your secret:
|
||||
```
|
||||
|
||||
## Retrieving a Stored Secret
|
||||
|
||||
```bash
|
||||
clan secrets get mysecret
|
||||
```
|
||||
|
||||
## List all Secrets
|
||||
|
||||
```bash
|
||||
clan secrets list
|
||||
```
|
||||
|
||||
## NixOS integration
|
||||
|
||||
A NixOS machine will automatically import all secrets that are encrypted for the
|
||||
current machine. At runtime it will use the host key to decrypt all secrets into
|
||||
an in-memory, non-persistent filesystem using [sops-nix](https://github.com/Mic92/sops-nix).
|
||||
In your nixos configuration you can get a path to secrets like this `config.sops.secrets.<name>.path`. For example:
|
||||
|
||||
```nix
|
||||
{ config, ...}: {
|
||||
sops.secrets.my-password.neededFor = "users";
|
||||
|
||||
users.users.mic92 = {
|
||||
isNormalUser = true;
|
||||
passwordFile = config.sops.secrets.my-password.path;
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
## Assigning Access
|
||||
|
||||
When using `clan secrets set <secret>` without arguments, secrets are encrypted for the key of the user named like your current $USER.
|
||||
|
||||
To add machines/users to an existing secret use:
|
||||
|
||||
```bash
|
||||
clan secrets machines add-secret <machine_name> <secret_name>
|
||||
```
|
||||
|
||||
Alternatively specify users and machines while creating a secret:
|
||||
|
||||
```bash
|
||||
clan secrets set --machine <machine1> --machine <machine2> --user <user1> --user <user2> <secret_name>
|
||||
```
|
||||
|
||||
## Advanced
|
||||
|
||||
In this section we go into more advanced secret management topics.
|
||||
|
||||
### Groups
|
||||
|
||||
Clan CLI makes it easy to manage access by allowing you to create groups.
|
||||
|
||||
All users within a group inherit access to all secrets of the group.
|
||||
|
||||
This feature eases the process of handling permissions for multiple users.
|
||||
|
||||
Here's how to get started:
|
||||
|
||||
1. **Creating Groups**:
|
||||
|
||||
Assign users to a new group, e.g., `admins`:
|
||||
|
||||
```bash
|
||||
clan secrets groups add-user admins <username>
|
||||
```
|
||||
|
||||
2. **Listing Groups**:
|
||||
|
||||
```bash
|
||||
clan secrets groups list
|
||||
```
|
||||
|
||||
3. **Assigning Secrets to Groups**:
|
||||
|
||||
```bash
|
||||
clan secrets groups add-secret <group_name> <secret_name>
|
||||
```
|
||||
|
||||
**TIP** To encrypt all secrets of a machine for a specific group, use the following NixOS configuration:
|
||||
|
||||
```
|
||||
{
|
||||
clan.core.sops.defaultGroups = [ "groupname" ]
|
||||
}
|
||||
```
|
||||
|
||||
### Adding Machine Keys
|
||||
|
||||
New machines in Clan come with age keys stored in `./sops/machines/<machine_name>`. To list these machines:
|
||||
|
||||
```bash
|
||||
clan secrets machines list
|
||||
```
|
||||
|
||||
For existing machines, add their keys:
|
||||
|
||||
```bash
|
||||
clan secrets machines add <machine_name> <age_key>
|
||||
```
|
||||
|
||||
To fetch an age key from an SSH host key:
|
||||
|
||||
```bash
|
||||
ssh-keyscan <domain_name> | nix shell nixpkgs#ssh-to-age -c ssh-to-age
|
||||
```
|
||||
|
||||
### Migration: Importing existing sops-based keys / sops-nix
|
||||
|
||||
`clan secrets` stores each secret in a single file, whereas [sops](https://github.com/Mic92/sops-nix) commonly allows to put all secrets in a yaml or json document.
|
||||
|
||||
If you already happened to use sops-nix, you can migrate by using the `clan secrets import-sops` command by importing these files:
|
||||
|
||||
```bash
|
||||
% clan secrets import-sops --prefix matchbox- --group admins --machine matchbox nixos/matchbox/secrets/secrets.yaml
|
||||
```
|
||||
|
||||
This will create secrets for each secret found in `nixos/matchbox/secrets/secrets.yaml` in a `./sops` folder of your repository.
|
||||
Each member of the group `admins` in this case will be able to decrypt the secrets with their respective key.
|
||||
|
||||
Since our clan secret module will auto-import secrets that are encrypted for a particular nixos machine,
|
||||
you can now remove `sops.secrets.<secrets> = { };` unless you need to specify more options for the secret like owner/group of the secret file.
|
||||
|
||||
|
||||
## Indepth Explanation
|
||||
|
||||
|
||||
The secrets system conceptually knows two different entities:
|
||||
|
||||
- **Machine**: consumes secrets
|
||||
- **User**: manages access to secrets
|
||||
|
||||
**A Users** Can add or revoke machines' access to secrets.
|
||||
|
||||
**A machine** Can decrypt secrets that where encrypted specifically for that machine.
|
||||
|
||||
!!! Danger
|
||||
**Always make sure at least one _User_ has access to a secret**. Otherwise you could lock yourself out from accessing the secret.
|
||||
|
||||
### Inherited implications
|
||||
|
||||
By default clan uses [sops](https://github.com/getsops/sops) through [sops-nix](https://github.com/Mic92/sops-nix) for managing its secrets which inherits some implications that are important to understand:
|
||||
|
||||
- **Public/Private keys**: Entities are identified via their public keys. Each Entity can use their respective private key to decrypt a secret.
|
||||
- **Public keys are stored**: All Public keys are stored inside the repository
|
||||
- **Secrets are stored Encrypted**: secrets are stored inside the repository encrypted with the respective public keys
|
||||
- **Secrets are deployed encrypted**: Fully encrypted secrets are deployed to machines at deployment time.
|
||||
- **Secrets are decrypted by sops on-demand**: Each machine decrypts its secrets at runtime and stores them at an ephemeral location.
|
||||
- **Machine key-pairs are auto-generated**: When a machine is created **no user-interaction is required** to setup public/private key-pairs.
|
||||
- **secrets are re-encrypted**: In case machines, users or groups are modified secrets get re-encrypted on demand.
|
||||
|
||||
!!! Important
|
||||
After revoking access to a secret you should also change the underlying secret. i.e. change the API key, or the password.
|
||||
|
||||
---
|
||||
|
||||
### Machine and user keys
|
||||
|
||||
The following diagrams illustrates how a user can provide a secret (i.e. a Password).
|
||||
|
||||
- By using the **Clan CLI** a user encrypts the password with both the **User public-key** and the **machine's public-key**
|
||||
|
||||
- The *Machine* can decrypt the password with its private-key on demand.
|
||||
|
||||
- The *User* is able to decrypt the password to make changes to it.
|
||||
|
||||
```plantuml
|
||||
@startuml
|
||||
|
||||
actor "User" as user
|
||||
database "Secret" as secret
|
||||
rectangle "Machine" as machine
|
||||
|
||||
user -right-> secret : Encrypt\n(Pubkeys: User, Machine)
|
||||
secret -left-> user : Decrypt\n(user privkey)
|
||||
secret -right-> machine : Decrypt\n(machine privkey)
|
||||
|
||||
@enduml
|
||||
```
|
||||
|
||||
|
||||
#### User groups
|
||||
|
||||
Here we illustrate how machine groups work.
|
||||
|
||||
Common use cases:
|
||||
|
||||
- **Shared Management**: Access among multiple users. I.e. a subset of secrets/machines that have two admins
|
||||
|
||||
```plantuml
|
||||
@startuml
|
||||
|
||||
rectangle "Group" {
|
||||
actor "User A" as user1
|
||||
actor "User B" as user2
|
||||
}
|
||||
|
||||
database "Secret" as secret
|
||||
rectangle "Machine" as machine
|
||||
|
||||
user1 -right-> secret : Encrypt
|
||||
user2 -right-> secret : (Pubkeys: User A, User B, Machine)
|
||||
secret -right-> machine : Decrypt\n(machine privkey)
|
||||
|
||||
@enduml
|
||||
```
|
||||
|
||||
<!-- TODO: See also [Groups Reference](#groups-reference) -->
|
||||
|
||||
---
|
||||
|
||||
#### Machine groups
|
||||
|
||||
Here we illustrate how machine groups work.
|
||||
|
||||
Common use cases:
|
||||
|
||||
- **Shared secrets**: Among multiple machines such as Wifi passwords
|
||||
|
||||
```plantuml
|
||||
@startuml
|
||||
|
||||
actor "User" as user
|
||||
database "Secret" as secret
|
||||
rectangle "Group" {
|
||||
rectangle "Machine A" as machine1
|
||||
rectangle "Machine B" as machine2
|
||||
}
|
||||
|
||||
user -right-> secret : Encrypt\n(Pubkeys: machine A, machine B, User)
|
||||
secret -down-> machine1 : Decrypt
|
||||
secret -down-> machine2 : (Both machines can decrypt\nusing their private key)
|
||||
|
||||
@enduml
|
||||
```
|
||||
|
||||
<!-- TODO: See also [Groups Reference](#groups-reference) -->
|
||||
|
||||
|
||||
|
||||
See the [readme](https://github.com/Mic92/sops-nix) of sops-nix for more
|
||||
examples.
|
||||
|
||||
|
||||
|
||||
55
pkgs/docs-site/src/routes/docs/guides/secure-boot.md
Normal file
55
pkgs/docs-site/src/routes/docs/guides/secure-boot.md
Normal file
@@ -0,0 +1,55 @@
|
||||
At the moment, NixOS/Clan does not support [Secure Boot](https://wiki.gentoo.org/wiki/Secure_Boot). Therefore, you need to disable it in the BIOS. You can watch this [video guide](https://www.youtube.com/watch?v=BKVShiMUePc) or follow the instructions below:
|
||||
|
||||
## Insert the USB Stick
|
||||
|
||||
- Begin by inserting the USB stick into a USB port on your computer.
|
||||
|
||||
## Access the UEFI/BIOS Menu
|
||||
|
||||
- Restart your computer.
|
||||
- As your computer restarts, press the appropriate key to enter the UEFI/BIOS settings.
|
||||
??? tip "The key depends on your laptop or motherboard manufacturer. Click to see a reference list:"
|
||||
|
||||
| Manufacturer | UEFI/BIOS Key(s) |
|
||||
|--------------------|---------------------------|
|
||||
| ASUS | `Del`, `F2` |
|
||||
| MSI | `Del`, `F2` |
|
||||
| Gigabyte | `Del`, `F2` |
|
||||
| ASRock | `Del`, `F2` |
|
||||
| Lenovo | `F1`, `F2`, `Enter` (alternatively `Fn + F2`) |
|
||||
| HP | `Esc`, `F10` |
|
||||
| Dell | `F2`, `Fn + F2`, `Esc` |
|
||||
| Acer | `F2`, `Del` |
|
||||
| Samsung | `F2`, `F10` |
|
||||
| Toshiba | `F2`, `Esc` |
|
||||
| Sony | `F2`, `Assist` button |
|
||||
| Fujitsu | `F2` |
|
||||
| Microsoft Surface | `Volume Up` + `Power` |
|
||||
| IBM/Lenovo ThinkPad| `Enter`, `F1`, `F12` |
|
||||
| Biostar | `Del` |
|
||||
| Zotac | `Del`, `F2` |
|
||||
| EVGA | `Del` |
|
||||
| Origin PC | `F2`, `Delete` |
|
||||
|
||||
!!! Note
|
||||
Pressing the key quickly and repeatedly is sometimes necessary to access the UEFI/BIOS menu, as the window to enter this mode is brief.
|
||||
|
||||
## Access Advanced Mode (Optional)
|
||||
|
||||
- If your UEFI/BIOS has a `Simple` or `Easy` mode interface, look for an option labeled `Advanced Mode` (often found in the lower right corner).
|
||||
- Click on `Advanced Mode` to access more settings. This step is optional, as your boot settings might be available in the basic view.
|
||||
|
||||
## Disable Secure Boot
|
||||
|
||||
- Locate the `Secure Boot` option in your UEFI/BIOS settings. This is typically found under a `Security` tab, `Boot` tab, or a similarly named section.
|
||||
- Set the `Secure Boot` option to `Disabled`.
|
||||
|
||||
## Change Boot Order
|
||||
|
||||
- Find the option to adjust the boot order—often labeled `Boot Order`, `Boot Sequence`, or `Boot Priority`.
|
||||
- Ensure that your USB device is set as the first boot option. This allows your computer to boot from the USB stick.
|
||||
|
||||
## Save and Exit
|
||||
|
||||
- Save your changes before exiting the UEFI/BIOS menu. Look for a `Save & Exit` option or press the corresponding function key (often `F10`).
|
||||
- Your computer should now restart and boot from the USB stick.
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user