diff --git a/docs/nix/render_options/__init__.py b/docs/nix/render_options/__init__.py index fd8bf49dd..adbc96d1c 100644 --- a/docs/nix/render_options/__init__.py +++ b/docs/nix/render_options/__init__.py @@ -452,7 +452,6 @@ Each `clanService`: * Is a module of class **`clan.service`** * Can define **roles** (e.g., `client`, `server`) * Uses **`inventory.instances`** to configure where and how it is deployed -* Replaces the legacy `clanModules` and `inventory.services` system altogether !!! Note `clanServices` are part of Clan's next-generation service model and are intended to replace `clanModules`. diff --git a/docs/site/guides/authoring/clanModules/index.md b/docs/site/guides/authoring/clanModules/index.md index 2ee9d0951..6bbb559dc 100644 --- a/docs/site/guides/authoring/clanModules/index.md +++ b/docs/site/guides/authoring/clanModules/index.md @@ -52,6 +52,7 @@ clanModules/borgbackup ```nix title="flake.nix" # ... + # Sometimes this attribute set is defined in clan.nix clan-core.lib.clan { # 1. Add the module to the available clanModules with inventory support inventory.modules = { @@ -175,6 +176,7 @@ The following shows how to add options to your module. Configuration can be set as follows. ```nix title="flake.nix" + # Sometimes this attribute set is defined in clan.nix clan-core.lib.clan { inventory.services = { custom-module.instance_1 = { diff --git a/docs/site/guides/authoring/clanServices/index.md b/docs/site/guides/authoring/clanServices/index.md index 319e29e44..f3e86bb70 100644 --- a/docs/site/guides/authoring/clanServices/index.md +++ b/docs/site/guides/authoring/clanServices/index.md @@ -27,6 +27,7 @@ i.e. `@hsjobeki/customNetworking` outputs = inputs: inputs.flake-parts.lib.mkFlake { inherit inputs; } ({ imports = [ inputs.clan-core.flakeModules.default ]; # ... + # Sometimes this attribute set is defined in clan.nix clan = { # If needed: Exporting the module for other people modules."@hsjobeki/customNetworking" = import ./service-modules/networking.nix; @@ -218,6 +219,7 @@ To import the module use `importApply` outputs = inputs: flake-parts.lib.mkFlake { inherit inputs; } ({self, lib, ...}: { imports = [ inputs.clan-core.flakeModules.default ]; # ... + # Sometimes this attribute set is defined in clan.nix clan = { # Register the module modules."@hsjobeki/messaging" = lib.importApply ./service-modules/messaging.nix { inherit self; }; @@ -244,6 +246,7 @@ Then wrap the module and forward the variable `self` from the outer context into outputs = inputs: flake-parts.lib.mkFlake { inherit inputs; } ({self, lib, ...}: { imports = [ inputs.clan-core.flakeModules.default ]; # ... + # Sometimes this attribute set is defined in clan.nix clan = { # Register the module modules."@hsjobeki/messaging" = { diff --git a/docs/site/guides/getting-started/add-machines.md b/docs/site/guides/getting-started/add-machines.md index 1f0726fb8..8c10dc499 100644 --- a/docs/site/guides/getting-started/add-machines.md +++ b/docs/site/guides/getting-started/add-machines.md @@ -90,6 +90,7 @@ See the complete [list](../../guides/more-machines.md#automatic-registration) of The option: `machines.` is used to add extra *nixosConfiguration* to a machine ```{.nix .annotate title="flake.nix" hl_lines="3-13 18-22"} +# Sometimes this attribute set is defined in clan.nix clan = { inventory.machines = { jon = { diff --git a/docs/site/guides/getting-started/add-services.md b/docs/site/guides/getting-started/add-services.md index 6a0b08cc6..a4355b112 100644 --- a/docs/site/guides/getting-started/add-services.md +++ b/docs/site/guides/getting-started/add-services.md @@ -28,6 +28,7 @@ To learn more: [Guide about clanService](../clanServices.md) inputs@{ flake-parts, ... }: flake-parts.lib.mkFlake { inherit inputs; } { imports = [ inputs.clan-core.flakeModules.default ]; + # Sometimes this attribute set is defined in clan.nix clan = { inventory.machines = { jon = { @@ -76,6 +77,7 @@ Adding the following services is recommended for most users: inputs@{ flake-parts, ... }: flake-parts.lib.mkFlake { inherit inputs; } { imports = [ inputs.clan-core.flakeModules.default ]; + # Sometimes this attribute set is defined in clan.nix clan = { inventory.machines = { jon = { diff --git a/docs/site/guides/getting-started/index.md b/docs/site/guides/getting-started/index.md index 8e7713021..30bfaaf9d 100644 --- a/docs/site/guides/getting-started/index.md +++ b/docs/site/guides/getting-started/index.md @@ -103,10 +103,15 @@ Don’t worry if your output looks different—the template evolves over time. run `nix develop` every time, we recommend setting up [direnv](https://direnv.net/). ``` -clan machines list +clan show ``` -If you see no output yet, that’s expected — [add machines](./add-machines.md) to populate it. +You should see something like this: + +```terminal-session +Name: my-clan +Description: None +``` --- diff --git a/docs/site/guides/getting-started/secrets.md b/docs/site/guides/getting-started/secrets.md index 2fb9df600..5475875d6 100644 --- a/docs/site/guides/getting-started/secrets.md +++ b/docs/site/guides/getting-started/secrets.md @@ -152,6 +152,7 @@ are loaded when using Clan: outputs = { self, clan-core, ... }: let + # Sometimes this attribute set is defined in clan.nix clan = clan-core.lib.clan { inherit self; diff --git a/docs/site/guides/mesh-vpn.md b/docs/site/guides/mesh-vpn.md index 038de1ce7..ef9878576 100644 --- a/docs/site/guides/mesh-vpn.md +++ b/docs/site/guides/mesh-vpn.md @@ -39,6 +39,7 @@ For the purpose of this guide we have two machines: outputs = { self, clan-core, ... }: let + # Sometimes this attribute set is defined in clan.nix clan = clan-core.lib.clan { inherit self; diff --git a/docs/site/guides/target-host.md b/docs/site/guides/target-host.md index aec79beba..d413ac6cb 100644 --- a/docs/site/guides/target-host.md +++ b/docs/site/guides/target-host.md @@ -14,6 +14,7 @@ If the hostname is **static**, like `server.example.com`, set it in the **invent outputs = { self, clan-core, ... }: let + # Sometimes this attribute set is defined in clan.nix clan = clan-core.lib.clan { inventory.machines.jon = { deploy.targetHost = "root@server.example.com"; @@ -41,6 +42,7 @@ If your target host depends on a **dynamic expression** (like using the machine outputs = { self, clan-core, ... }: let + # Sometimes this attribute set is defined in clan.nix clan = clan-core.lib.clan { machines.jon = {config, ...}: { clan.core.networking.targetHost = "jon@${config.networking.fqdn}"; diff --git a/pkgs/clan-cli/clan_cli/clan/show.py b/pkgs/clan-cli/clan_cli/clan/show.py index 1b7e0beac..97b8ffb66 100644 --- a/pkgs/clan-cli/clan_cli/clan/show.py +++ b/pkgs/clan-cli/clan_cli/clan/show.py @@ -2,17 +2,17 @@ import argparse import logging from clan_lib.clan.get import get_clan_details +from clan_lib.flake import Flake log = logging.getLogger(__name__) def show_command(args: argparse.Namespace) -> None: - flake_path = args.flake.path - meta = get_clan_details(flake_path) + flake: Flake = args.flake + meta = get_clan_details(flake) print(f"Name: {meta.get('name')}") print(f"Description: {meta.get('description', '-')}") - print(f"Icon: {meta.get('icon', '-')}") def register_parser(parser: argparse.ArgumentParser) -> None: diff --git a/pkgs/clan-cli/clan_cli/tests/test_clan_nix_attrset.py b/pkgs/clan-cli/clan_cli/tests/test_clan_nix_attrset.py index 6994f7681..b510d2184 100644 --- a/pkgs/clan-cli/clan_cli/tests/test_clan_nix_attrset.py +++ b/pkgs/clan-cli/clan_cli/tests/test_clan_nix_attrset.py @@ -64,8 +64,6 @@ def test_clan_core_templates( assert (flake_nix).exists() assert (flake_nix).is_file() - assert (my_clan / "machines").is_dir() - # Test if we can write to the flake.nix file with flake_nix.open("r+") as f: data = f.read() diff --git a/pkgs/clan-cli/clan_cli/tests/test_create_flake.py b/pkgs/clan-cli/clan_cli/tests/test_create_flake.py index eb8aaa45c..5c5b2a944 100644 --- a/pkgs/clan-cli/clan_cli/tests/test_create_flake.py +++ b/pkgs/clan-cli/clan_cli/tests/test_create_flake.py @@ -23,7 +23,6 @@ def test_create_flake( cli.run(["flakes", "create", str(flake_dir), "--template=default", "--no-update"]) - assert (flake_dir / ".clan-flake").exists() # Replace the inputs.clan.url in the template flake.nix substitute( flake_dir / "flake.nix", @@ -37,10 +36,10 @@ def test_create_flake( # create a hardware-configuration.nix that doesn't throw an eval error - for patch_machine in ["jon", "sara"]: - ( - flake_dir / "machines" / f"{patch_machine}/hardware-configuration.nix" - ).write_text("{}") + # for patch_machine in ["jon", "sara"]: + # ( + # flake_dir / "machines" / f"{patch_machine}/hardware-configuration.nix" + # ).write_text("{}") with capture_output as output: cli.run(["machines", "list"]) @@ -68,7 +67,6 @@ def test_create_flake_existing_git( cli.run(["flakes", "create", str(flake_dir), "--template=default", "--no-update"]) - assert (flake_dir / ".clan-flake").exists() # Replace the inputs.clan.url in the template flake.nix substitute( flake_dir / "flake.nix", @@ -81,10 +79,10 @@ def test_create_flake_existing_git( # create a hardware-configuration.nix that doesn't throw an eval error - for patch_machine in ["jon", "sara"]: - ( - flake_dir / "machines" / f"{patch_machine}/hardware-configuration.nix" - ).write_text("{}") + # for patch_machine in ["jon", "sara"]: + # ( + # flake_dir / "machines" / f"{patch_machine}/hardware-configuration.nix" + # ).write_text("{}") with capture_output as output: cli.run(["machines", "list"]) diff --git a/pkgs/clan-cli/clan_cli/tests/test_dirs.py b/pkgs/clan-cli/clan_cli/tests/test_dirs.py index 6a5f938cc..3fe44f510 100644 --- a/pkgs/clan-cli/clan_cli/tests/test_dirs.py +++ b/pkgs/clan-cli/clan_cli/tests/test_dirs.py @@ -13,7 +13,6 @@ # subdir = temporary_home / "subdir" # subdir.mkdir() # monkeypatch.chdir(subdir) -# (subdir / ".clan-flake").touch() # assert _get_clan_flake_toplevel() == subdir from clan_lib.dirs import clan_key_safe, vm_state_dir diff --git a/templates/clan/default/.clan-flake b/templates/clan/default/.clan-flake deleted file mode 100644 index 406fcfebe..000000000 --- a/templates/clan/default/.clan-flake +++ /dev/null @@ -1,2 +0,0 @@ -# DO NOT DELETE -# This file is used by the clan cli to discover a clan flake diff --git a/templates/clan/default/clan.nix b/templates/clan/default/clan.nix new file mode 100644 index 000000000..3bc5f1815 --- /dev/null +++ b/templates/clan/default/clan.nix @@ -0,0 +1,45 @@ +{ + # Ensure this is unique among all clans you want to use. + meta.name = "__CHANGE_ME__"; + + # Docs: See https://docs.clan.lol/reference/clanServices + inventory.instances = { + + # Docs: https://docs.clan.lol/reference/clanServices/admin/ + # Admin service for managing machines + # This service adds a root password and SSH access. + admin = { + roles.default.tags.all = { }; + roles.default.settings.allowedKeys = { + # Insert the public key that you want to use for SSH access. + # All keys will have ssh access to all machines ("tags.all" means 'all machines'). + # Alternatively set 'users.users.root.openssh.authorizedKeys.keys' in each machine + "admin-machine-1" = "__YOUR_PUBLIC_KEY__"; + }; + }; + + # Docs: https://docs.clan.lol/reference/clanServices/zerotier/ + # The lines below will define a zerotier network and add all machines as 'peer' to it. + # !!! Manual steps required: + # - Define a controller machine for the zerotier network. + # - Deploy the controller machine first to initilize the network. + zerotier = { + # Replace with the name (string) of your machine that you will use as zerotier-controller + # See: https://docs.zerotier.com/controller/ + # Deploy this machine first to create the network secrets + roles.controller.machines."__YOUR_CONTROLLER__" = { }; + # Peers of the network + # tags.all means 'all machines' will joined + roles.peer.tags.all = { }; + }; + }; + + # Additional NixOS configuration can be added here. + # machines/machine1/configuration.nix will be automatically imported. + # See: https://docs.clan.lol/guides/more-machines/#automatic-registration + machines = { + # machine1 = { config, ... }: { + # environment.systemPackages = [ pkgs.asciinema ]; + # }; + }; +} diff --git a/templates/clan/default/flake.nix b/templates/clan/default/flake.nix index 141177063..0b339d04b 100644 --- a/templates/clan/default/flake.nix +++ b/templates/clan/default/flake.nix @@ -8,21 +8,7 @@ # Usage see: https://docs.clan.lol clan = clan-core.lib.clan { inherit self; - # Ensure this is unique among all clans you want to use. - meta.name = "__CHANGE_ME__"; - - # All machines in ./machines will be imported. - - # Prerequisite: boot into the installer. - # See: https://docs.clan.lol/guides/getting-started/installer - # local> mkdir -p ./machines/machine1 - # local> Edit ./machines//configuration.nix to your liking. - machines = { - # You can also specify additional machines here. - # somemachine = { - # imports = [ ./some-machine/configuration.nix ]; - # } - }; + imports = [ ./clan.nix ]; }; in { diff --git a/templates/clan/default/machines/jon/configuration.nix b/templates/clan/default/machines/jon/configuration.nix deleted file mode 100644 index a675ce882..000000000 --- a/templates/clan/default/machines/jon/configuration.nix +++ /dev/null @@ -1,35 +0,0 @@ -{ - imports = [ - # contains your disk format and partitioning configuration. - ../../modules/disko.nix - # this file is shared among all machines - ../../modules/shared.nix - # enables GNOME desktop (optional) - ../../modules/gnome.nix - ]; - - # This is your user login name. - users.users.user.name = ""; - - # Set this for clan commands use ssh i.e. `clan machines update` - # If you change the hostname, you need to update this line to root@ - # This only works however if you have avahi running on your admin machine else use IP - clan.core.networking.targetHost = "root@"; - - # You can get your disk id by running the following command on the installer: - # Replace with the IP of the installer printed on the screen or by running the `ip addr` command. - # ssh root@ lsblk --output NAME,ID-LINK,FSTYPE,SIZE,MOUNTPOINT - disko.devices.disk.main.device = "/dev/disk/by-id/__CHANGE_ME__"; - - # IMPORTANT! Add your SSH key here - # e.g. > cat ~/.ssh/id_ed25519.pub - users.users.root.openssh.authorizedKeys.keys = [ - '' - __YOUR_SSH_KEY__ - '' - ]; - - # Zerotier needs one controller to accept new nodes. Once accepted - # the controller can be offline and routing still works. - clan.core.networking.zerotier.controller.enable = true; -} diff --git a/templates/clan/default/machines/sara/configuration.nix b/templates/clan/default/machines/sara/configuration.nix deleted file mode 100644 index 66da728a1..000000000 --- a/templates/clan/default/machines/sara/configuration.nix +++ /dev/null @@ -1,34 +0,0 @@ -{ - imports = [ - ../../modules/disko.nix - ../../modules/shared.nix - # enables GNOME desktop (optional) - ../../modules/gnome.nix - ]; - # Put your username here for login - users.users.user.name = ""; - - # Set this for clan commands use ssh i.e. `clan machines update` - # If you change the hostname, you need to update this line to root@ - # This only works however if you have avahi running on your admin machine else use IP - clan.core.networking.targetHost = "root@"; - - # You can get your disk id by running the following command on the installer: - # Replace with the IP of the installer printed on the screen or by running the `ip addr` command. - # ssh root@ lsblk --output NAME,ID-LINK,FSTYPE,SIZE,MOUNTPOINT - disko.devices.disk.main.device = "/dev/disk/by-id/__CHANGE_ME__"; - - # IMPORTANT! Add your SSH key here - # e.g. > cat ~/.ssh/id_ed25519.pub - users.users.root.openssh.authorizedKeys.keys = [ - '' - __YOUR_SSH_KEY__ - '' - ]; - /* - After jon is deployed, uncomment the following line - This will allow sara to share the VPN overlay network with jon - The networkId is generated by the first deployment of jon - */ - # clan.core.networking.zerotier.networkId = builtins.readFile ../../vars/per-machine/jon/zerotier/zerotier-network-id/value; -} diff --git a/templates/clan/default/modules/gnome.nix b/templates/clan/default/modules/gnome.nix index bcbc5a148..b4f77768d 100644 --- a/templates/clan/default/modules/gnome.nix +++ b/templates/clan/default/modules/gnome.nix @@ -1,3 +1,17 @@ +/* + This is an example of a simple nixos module: + + Enables the GNOME desktop environment and the GDM display manager. + + To use this module, import it in your machines NixOS configuration like this: + + ```nix + imports = [ + modules/gnome.nix + ]; + ``` +*/ +{ ... }: { services.xserver.enable = true; services.xserver.desktopManager.gnome.enable = true;