Compare commits
3 Commits
6a6a371256
...
ke-docs-ad
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3da15fac3b | ||
|
|
9579da1d4c | ||
|
|
830da48943 |
@@ -69,6 +69,7 @@ nav:
|
||||
- Zerotier VPN: guides/mesh-vpn.md
|
||||
- How to disable Secure Boot: guides/secure-boot.md
|
||||
- Flake-parts: guides/flake-parts.md
|
||||
- Nixos-rebuild: guides/nixos-rebuild.md
|
||||
- macOS: guides/macos.md
|
||||
- Contributing:
|
||||
- Contributing: guides/contributing/CONTRIBUTING.md
|
||||
|
||||
68
docs/site/guides/nixos-rebuild.md
Normal file
68
docs/site/guides/nixos-rebuild.md
Normal file
@@ -0,0 +1,68 @@
|
||||
# Can I still use `nixos-rebuild`?
|
||||
|
||||
**Yes, you can still use `nixos-rebuild` with clan!**
|
||||
|
||||
Clan is built on top of standard `NixOS` and uses `nixos-rebuild` internally.
|
||||
However, there are important considerations when using `nixos-rebuild` directly instead of `clan machines update`.
|
||||
|
||||
## Important Considerations
|
||||
|
||||
!!! warning "Vars Must Be Uploaded First"
|
||||
If your configuration uses clan vars, failing to run `clan vars upload` before `nixos-rebuild` will result in missing secrets and potentially broken services.
|
||||
|
||||
!!! info "Build Host Configuration"
|
||||
Clan automatically handles build host configuration based on your machine settings.
|
||||
When using `nixos-rebuild` manually, you need to specify `--build-host` and `--target-host` options yourself.
|
||||
|
||||
## How Clan Uses nixos-rebuild
|
||||
|
||||
Clan doesn't replace `nixos-rebuild` - it enhances it. When you run `clan machines update`, clan:
|
||||
|
||||
1. Generates and uploads secrets/variables (if any)
|
||||
2. Uploads the flake source to the target/build host (if needed)
|
||||
3. Runs `nixos-rebuild switch` with the appropriate options
|
||||
4. Handles remote building and deployment automatically
|
||||
|
||||
Under the hood, clan executes commands like:
|
||||
|
||||
```bash
|
||||
nixos-rebuild switch --fast --build-host builtHost --flake /path/to/flake#machine-name
|
||||
```
|
||||
|
||||
## When You Need `clan vars upload`
|
||||
|
||||
If your clan configuration uses **variables (vars)** - generated secrets, keys, or configuration values - you **must** run `clan vars upload` before using `nixos-rebuild` directly.
|
||||
|
||||
### Systems that use vars include:
|
||||
|
||||
- Any `clanModules` with generated secrets (zerotier, borgbackup, etc.)
|
||||
- Custom generators that create passwords or keys
|
||||
- Services that need shared configuration values
|
||||
|
||||
### Systems that don't need vars:
|
||||
|
||||
- Basic NixOS configurations without clan-specific services
|
||||
- Static configurations with hardcoded values
|
||||
- Systems using only traditional NixOS secrets management
|
||||
|
||||
## Manual nixos-rebuild Workflow
|
||||
|
||||
When you want to use `nixos-rebuild` directly:
|
||||
|
||||
### Step 1: Upload vars (if needed)
|
||||
|
||||
```bash
|
||||
# Upload secret vars to the target machine
|
||||
clan vars upload my-machine
|
||||
```
|
||||
|
||||
### Step 2: Run nixos-rebuild
|
||||
|
||||
```bash
|
||||
nixos-rebuild switch --flake .#my-machine --target-host root@target-ip --build-host local
|
||||
```
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- [Update Your Machines](getting-started/update.md) - Using clan's update command
|
||||
- [Variables (Vars)](../reference/clan-core/vars.md) - Understanding the vars system
|
||||
@@ -77,9 +77,7 @@ class SecretStore(SecretStoreBase):
|
||||
check=False,
|
||||
).stdout.strip(),
|
||||
)
|
||||
for symlink in Path(password_store).glob(f"machines/{self.machine.name}/**/*"):
|
||||
if symlink.is_symlink():
|
||||
hashes.append(
|
||||
hashes.extend(
|
||||
subprocess.run(
|
||||
nix_shell(
|
||||
["git"],
|
||||
@@ -95,7 +93,11 @@ class SecretStore(SecretStoreBase):
|
||||
),
|
||||
stdout=subprocess.PIPE,
|
||||
check=False,
|
||||
).stdout.strip(),
|
||||
).stdout.strip()
|
||||
for symlink in Path(password_store).glob(
|
||||
f"machines/{self.machine.name}/**/*",
|
||||
)
|
||||
if symlink.is_symlink()
|
||||
)
|
||||
|
||||
# we sort the hashes to make sure that the order is always the same
|
||||
|
||||
@@ -23,13 +23,9 @@ sops_groups_folder = gen_sops_subfolder("groups")
|
||||
|
||||
|
||||
def list_objects(path: Path, is_valid: Callable[[str], bool]) -> list[str]:
|
||||
objs: list[str] = []
|
||||
if not path.exists():
|
||||
return objs
|
||||
for f in path.iterdir():
|
||||
if is_valid(f.name):
|
||||
objs.append(f.name)
|
||||
return objs
|
||||
return []
|
||||
return [f.name for f in path.iterdir() if is_valid(f.name)]
|
||||
|
||||
|
||||
def remove_object(path: Path, name: str) -> list[Path]:
|
||||
|
||||
@@ -64,17 +64,17 @@ def list_groups(flake_dir: Path) -> list[Group]:
|
||||
if not group_folder.is_dir():
|
||||
continue
|
||||
machines_path = machines_folder(flake_dir, group.name)
|
||||
machines = []
|
||||
if machines_path.is_dir():
|
||||
for f in machines_path.iterdir():
|
||||
if validate_hostname(f.name):
|
||||
machines.append(f.name)
|
||||
machines = (
|
||||
[f.name for f in machines_path.iterdir() if validate_hostname(f.name)]
|
||||
if machines_path.is_dir()
|
||||
else []
|
||||
)
|
||||
users_path = users_folder(flake_dir, group.name)
|
||||
users = []
|
||||
if users_path.is_dir():
|
||||
for f in users_path.iterdir():
|
||||
if VALID_USER_NAME.match(f.name):
|
||||
users.append(f.name)
|
||||
users = (
|
||||
[f.name for f in users_path.iterdir() if VALID_USER_NAME.match(f.name)]
|
||||
if users_path.is_dir()
|
||||
else []
|
||||
)
|
||||
groups.append(Group(flake_dir, group.name, machines, users))
|
||||
return groups
|
||||
|
||||
@@ -270,11 +270,11 @@ def get_groups(flake_dir: Path, what: str, name: str) -> list[str]:
|
||||
if not groups_dir.exists():
|
||||
return []
|
||||
|
||||
groups = []
|
||||
for group in groups_dir.iterdir():
|
||||
if group.is_dir() and (group / what / name).is_symlink():
|
||||
groups.append(group.name)
|
||||
return groups
|
||||
return [
|
||||
group.name
|
||||
for group in groups_dir.iterdir()
|
||||
if group.is_dir() and (group / what / name).is_symlink()
|
||||
]
|
||||
|
||||
|
||||
def add_secret_command(args: argparse.Namespace) -> None:
|
||||
|
||||
@@ -41,7 +41,7 @@ log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def list_generators_secrets(generators_path: Path) -> list[Path]:
|
||||
paths = []
|
||||
paths: list[Path] = []
|
||||
for generator_path in generators_path.iterdir():
|
||||
if not generator_path.is_dir():
|
||||
continue
|
||||
@@ -49,11 +49,13 @@ def list_generators_secrets(generators_path: Path) -> list[Path]:
|
||||
def validate(generator_path: Path, name: str) -> bool:
|
||||
return has_secret(generator_path / name)
|
||||
|
||||
paths.extend(
|
||||
generator_path / obj
|
||||
for obj in list_objects(
|
||||
generator_path,
|
||||
functools.partial(validate, generator_path),
|
||||
):
|
||||
paths.append(generator_path / obj)
|
||||
)
|
||||
)
|
||||
return paths
|
||||
|
||||
|
||||
|
||||
@@ -58,10 +58,7 @@ def ssh_command(args: argparse.Namespace) -> None:
|
||||
raise ClanError(msg)
|
||||
|
||||
# Convert ssh_option list to dictionary
|
||||
ssh_options = {}
|
||||
if args.ssh_option:
|
||||
for name, value in args.ssh_option:
|
||||
ssh_options[name] = value
|
||||
ssh_options = dict(args.ssh_option) if args.ssh_option else {}
|
||||
|
||||
remote = remote.override(
|
||||
host_key_check=args.host_key_check,
|
||||
|
||||
@@ -63,7 +63,7 @@ def find_dataclasses_in_directory(
|
||||
and isinstance(deco.func, ast.Name)
|
||||
and deco.func.id == "dataclass"
|
||||
):
|
||||
dataclass_files.append((file_path, node.name))
|
||||
dataclass_files.append((file_path, node.name)) # noqa: PERF401
|
||||
except (SyntaxError, UnicodeDecodeError) as e:
|
||||
print(f"Error parsing {file_path}: {e}")
|
||||
|
||||
|
||||
@@ -164,11 +164,12 @@ class SecretStore(StoreBase):
|
||||
|
||||
from clan_cli.vars.generator import Generator
|
||||
|
||||
manifest = []
|
||||
generators = Generator.get_machine_generators(machine, self.flake)
|
||||
for generator in generators:
|
||||
for file in generator.files:
|
||||
manifest.append(f"{generator.name}/{file.name}".encode())
|
||||
manifest = [
|
||||
f"{generator.name}/{file.name}".encode()
|
||||
for generator in generators
|
||||
for file in generator.files
|
||||
]
|
||||
|
||||
manifest.append(git_hash)
|
||||
return b"\n".join(manifest)
|
||||
|
||||
@@ -14,7 +14,6 @@ class Backup:
|
||||
|
||||
|
||||
def list_provider(machine: Machine, host: Remote, provider: str) -> list[Backup]:
|
||||
results = []
|
||||
backup_metadata = machine.select("config.clan.core.backups")
|
||||
list_command = backup_metadata["providers"][provider]["list"]
|
||||
proc = host.run(
|
||||
@@ -35,8 +34,11 @@ def list_provider(machine: Machine, host: Remote, provider: str) -> list[Backup]
|
||||
msg = f"Failed to parse json output from provider {provider}:\n{proc.stdout}"
|
||||
raise ClanError(msg) from e
|
||||
|
||||
for archive in parsed_json:
|
||||
results.append(Backup(name=archive["name"], job_name=archive.get("job_name")))
|
||||
results: list[Backup] = []
|
||||
results.extend(
|
||||
Backup(name=archive["name"], job_name=archive.get("job_name"))
|
||||
for archive in parsed_json
|
||||
)
|
||||
return results
|
||||
|
||||
|
||||
|
||||
@@ -444,8 +444,9 @@ class FlakeCacheEntry:
|
||||
if not isinstance(selector.value, list):
|
||||
msg = f"Expected list for SET selector value, got {type(selector.value)}"
|
||||
raise ClanError(msg)
|
||||
for subselector in selector.value:
|
||||
fetched_indices.append(subselector.value)
|
||||
fetched_indices.extend(
|
||||
subselector.value for subselector in selector.value
|
||||
)
|
||||
# if it's just a str, that is the index
|
||||
elif selector.type == SelectorType.STR:
|
||||
if not isinstance(selector.value, str):
|
||||
@@ -635,9 +636,9 @@ class FlakeCacheEntry:
|
||||
keys_to_select: list[str] = []
|
||||
# if we want to select all keys, we take all existing sub elements
|
||||
if selector.type == SelectorType.ALL:
|
||||
for key in self.value:
|
||||
if self.value[key].exists:
|
||||
keys_to_select.append(key)
|
||||
keys_to_select.extend(
|
||||
key for key in self.value if self.value[key].exists
|
||||
)
|
||||
|
||||
# if we want to select a set of keys, we take the keys from the selector
|
||||
if selector.type == SelectorType.SET:
|
||||
@@ -657,9 +658,9 @@ class FlakeCacheEntry:
|
||||
|
||||
# if we are a list, return a list
|
||||
if self.is_list:
|
||||
result_list: list[Any] = []
|
||||
for index in keys_to_select:
|
||||
result_list.append(self.value[index].select(selectors[1:]))
|
||||
result_list: list[Any] = [
|
||||
self.value[index].select(selectors[1:]) for index in keys_to_select
|
||||
]
|
||||
return result_list
|
||||
|
||||
# otherwise return a dict
|
||||
@@ -681,12 +682,10 @@ class FlakeCacheEntry:
|
||||
if selector.type == SelectorType.ALL:
|
||||
str_selector = "*"
|
||||
elif selector.type == SelectorType.SET:
|
||||
subselectors: list[str] = []
|
||||
if not isinstance(selector.value, list):
|
||||
msg = f"Expected list for SET selector value in error handling, got {type(selector.value)}"
|
||||
raise ClanError(msg)
|
||||
for subselector in selector.value:
|
||||
subselectors.append(subselector.value)
|
||||
subselectors = [subselector.value for subselector in selector.value]
|
||||
str_selector = "{" + ",".join(subselectors) + "}"
|
||||
else:
|
||||
if not isinstance(selector.value, str):
|
||||
@@ -967,9 +966,9 @@ class Flake:
|
||||
|
||||
nix_options = self.nix_options[:] if self.nix_options is not None else []
|
||||
|
||||
str_selectors: list[str] = []
|
||||
for selector in selectors:
|
||||
str_selectors.append(selectors_as_json(parse_selector(selector)))
|
||||
str_selectors = [
|
||||
selectors_as_json(parse_selector(selector)) for selector in selectors
|
||||
]
|
||||
|
||||
config = nix_config()
|
||||
|
||||
@@ -1079,10 +1078,9 @@ class Flake:
|
||||
if self.flake_cache_path is None:
|
||||
msg = "Flake cache path cannot be None"
|
||||
raise ClanError(msg)
|
||||
not_fetched_selectors = []
|
||||
for selector in selectors:
|
||||
if not self._cache.is_cached(selector):
|
||||
not_fetched_selectors.append(selector)
|
||||
not_fetched_selectors = [
|
||||
selector for selector in selectors if not self._cache.is_cached(selector)
|
||||
]
|
||||
|
||||
if not_fetched_selectors:
|
||||
self.get_from_nix(not_fetched_selectors)
|
||||
|
||||
@@ -133,12 +133,7 @@ def list_difference(all_items: list, filter_items: list) -> list:
|
||||
|
||||
"""
|
||||
# Unmerge the lists
|
||||
res = []
|
||||
for value in all_items:
|
||||
if value not in filter_items:
|
||||
res.append(value)
|
||||
|
||||
return res
|
||||
return [value for value in all_items if value not in filter_items]
|
||||
|
||||
|
||||
def find_duplicates(string_list: list[str]) -> list[str]:
|
||||
|
||||
@@ -105,11 +105,10 @@ def fix_nullables(schema: dict) -> dict:
|
||||
if isinstance(schema, dict):
|
||||
if "type" in schema and schema["type"] == "null":
|
||||
# Convert 'type: null' to 'nullable: true'
|
||||
new_schema = {"nullable": True}
|
||||
# Merge any other keys from original schema except type
|
||||
for k, v in schema.items():
|
||||
if k != "type":
|
||||
new_schema[k] = v
|
||||
new_schema = {"nullable": True} | {
|
||||
k: v for k, v in schema.items() if k != "type"
|
||||
}
|
||||
return fix_nullables(new_schema)
|
||||
|
||||
# If 'oneOf' present
|
||||
|
||||
Reference in New Issue
Block a user