Merge pull request 'Fix aarch64-linux vm support' (#4810) from various-fixes into main

Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/4810
This commit is contained in:
Mic92
2025-08-19 13:21:28 +00:00
14 changed files with 380 additions and 90 deletions

View File

@@ -96,6 +96,7 @@
./nixosModules/flake-module.nix
./pkgs/flake-module.nix
./templates/flake-module.nix
./pkgs/clan-cli/clan_cli/tests/flake-module.nix
]
++ [
(if pathExists ./flakeModules/clan.nix then import ./flakeModules/clan.nix inputs.self else { })

View File

@@ -1,6 +1,5 @@
import dataclasses
import json
import os
from collections.abc import Iterable
from pathlib import Path
@@ -26,7 +25,7 @@ class SopsSetup:
def __init__(self, keys: list[KeyPair]) -> None:
self.keys = keys
self.user = os.environ.get("USER", "admin")
self.user = "admin"
def init(self, flake_path: Path) -> None:
cli.run(

View File

@@ -422,3 +422,103 @@ def test_flake_with_core(
monkeypatch=monkeypatch,
inventory_expr=inventory_expr,
)
@pytest.fixture
def writable_clan_core(
clan_core: Path,
tmp_path: Path,
) -> Path:
"""
Creates a writable copy of clan_core in a temporary directory.
If clan_core is a git repo, copies tracked files and uncommitted changes.
Removes vars/ and sops/ directories if they exist.
"""
temp_flake = tmp_path / "clan-core"
# Check if it's a git repository
if (clan_core / ".git").exists():
# Create the target directory
temp_flake.mkdir(parents=True)
# Copy all tracked and untracked files (excluding ignored)
# Using git ls-files with -z for null-terminated output to handle filenames with spaces
sp.run(
f"(git ls-files -z; git ls-files -z --others --exclude-standard) | "
f"xargs -0 cp --parents -t {temp_flake}/",
shell=True,
cwd=clan_core,
check=True,
)
# Copy .git directory to maintain git functionality
if (clan_core / ".git").is_dir():
shutil.copytree(
clan_core / ".git", temp_flake / ".git", ignore_dangling_symlinks=True
)
else:
# It's a git file (for submodules/worktrees)
shutil.copy2(clan_core / ".git", temp_flake / ".git")
else:
# Regular copy if not a git repo
shutil.copytree(clan_core, temp_flake, ignore_dangling_symlinks=True)
# Make writable
sp.run(["chmod", "-R", "+w", str(temp_flake)], check=True)
# Remove vars and sops directories
shutil.rmtree(temp_flake / "vars", ignore_errors=True)
shutil.rmtree(temp_flake / "sops", ignore_errors=True)
return temp_flake
@pytest.fixture
def vm_test_flake(
clan_core: Path,
tmp_path: Path,
) -> Path:
"""
Creates a test flake that imports the VM test nixOS modules from clan-core.
"""
test_flake_dir = tmp_path / "test-flake"
test_flake_dir.mkdir(parents=True)
metadata = sp.run(
nix_command(["flake", "metadata", "--json"]),
cwd=CLAN_CORE,
capture_output=True,
text=True,
check=True,
).stdout.strip()
metadata_json = json.loads(metadata)
clan_core_url = f"path:{metadata_json['path']}"
# Read the template and substitute the clan-core path
template_path = Path(__file__).parent / "vm_test_flake.nix"
template_content = template_path.read_text()
# Get the current system
system_result = sp.run(
nix_command(["config", "show", "system"]),
capture_output=True,
text=True,
check=True,
)
current_system = system_result.stdout.strip()
# Substitute the clan-core URL and system
flake_content = template_content.replace("__CLAN_CORE__", clan_core_url)
flake_content = flake_content.replace("__SYSTEM__", current_system)
# Write the flake.nix
(test_flake_dir / "flake.nix").write_text(flake_content)
# Lock the flake with --allow-dirty to handle uncommitted changes
sp.run(
nix_command(["flake", "lock", "--allow-dirty-locks"]),
cwd=test_flake_dir,
check=True,
)
return test_flake_dir

View File

@@ -0,0 +1,127 @@
{ self, ... }:
{
# Define machines that use the nixOS modules
clan.machines = {
test-vm-persistence-x86_64-linux = {
imports = [ self.nixosModules.test-vm-persistence ];
nixpkgs.hostPlatform = "x86_64-linux";
};
test-vm-persistence-aarch64-linux = {
imports = [ self.nixosModules.test-vm-persistence ];
nixpkgs.hostPlatform = "aarch64-linux";
};
test-vm-deployment-x86_64-linux = {
imports = [ self.nixosModules.test-vm-deployment ];
nixpkgs.hostPlatform = "x86_64-linux";
};
test-vm-deployment-aarch64-linux = {
imports = [ self.nixosModules.test-vm-deployment ];
nixpkgs.hostPlatform = "aarch64-linux";
};
};
flake.nixosModules = {
# NixOS module for test_vm_persistence
test-vm-persistence =
{ config, ... }:
{
system.stateVersion = config.system.nixos.release;
# Disable services that might cause issues in tests
systemd.services.logrotate-checkconf.enable = false;
services.getty.autologinUser = "root";
# Basic networking setup
networking.useDHCP = false;
networking.firewall.enable = false;
# VM-specific settings
clan.virtualisation.graphics = false;
clan.core.networking.targetHost = "client";
# State configuration for persistence test
clan.core.state.my_state.folders = [
"/var/my-state"
"/var/user-state"
];
# Initialize users for tests
users.users = {
root = {
initialPassword = "root";
};
test = {
initialPassword = "test";
isSystemUser = true;
group = "users";
};
};
};
# NixOS module for test_vm_deployment
test-vm-deployment =
{ config, lib, ... }:
{
system.stateVersion = config.system.nixos.release;
# Disable services that might cause issues in tests
systemd.services.logrotate-checkconf.enable = false;
services.getty.autologinUser = "root";
# Basic networking setup
networking.useDHCP = false;
networking.firewall.enable = false;
# VM-specific settings
clan.virtualisation.graphics = false;
# SSH for deployment tests
services.openssh.enable = true;
# Initialize users for tests
users.users = {
root = {
initialPassword = "root";
};
};
# hack to make sure
sops.validateSopsFiles = false;
sops.secrets."vars/m1_generator/my_secret" = lib.mkDefault {
sopsFile = builtins.toFile "fake" "";
};
# Vars generators configuration
clan.core.vars.generators = {
m1_generator = {
files.my_secret = {
secret = true;
path = "/run/secrets/vars/m1_generator/my_secret";
};
script = ''
echo hello > "$out"/my_secret
'';
};
my_shared_generator = {
share = true;
files = {
shared_secret = {
secret = true;
path = "/run/secrets/vars/my_shared_generator/shared_secret";
};
no_deploy_secret = {
secret = true;
deploy = false;
path = "/run/secrets/vars/my_shared_generator/no_deploy_secret";
};
};
script = ''
echo hello > "$out"/shared_secret
echo hello > "$out"/no_deploy_secret
'';
};
};
};
};
}

View File

@@ -2,63 +2,33 @@ import json
import subprocess
import sys
from contextlib import ExitStack
from pathlib import Path
import pytest
from clan_cli.tests.age_keys import SopsSetup
from clan_cli.tests.fixtures_flakes import ClanFlake
from clan_cli.tests.helpers import cli
from clan_cli.vms.run import inspect_vm, spawn_vm
from clan_lib import cmd
from clan_lib.flake import Flake
from clan_lib.machines.machines import Machine
from clan_lib.nix import nix_config, nix_eval, run
from clan_lib.nix import nix_eval, run
@pytest.mark.impure
@pytest.mark.skipif(sys.platform == "darwin", reason="preload doesn't work on darwin")
def test_vm_deployment(
flake: ClanFlake,
vm_test_flake: Path,
sops_setup: SopsSetup,
) -> None:
# machine 1
config = nix_config()
machine1_config = flake.machines["m1_machine"]
machine1_config["nixpkgs"]["hostPlatform"] = config["system"]
machine1_config["clan"]["virtualisation"]["graphics"] = False
machine1_config["services"]["getty"]["autologinUser"] = "root"
machine1_config["services"]["openssh"]["enable"] = True
machine1_config["networking"]["firewall"]["enable"] = False
machine1_config["users"]["users"]["root"]["openssh"]["authorizedKeys"]["keys"] = [
# put your key here when debugging and pass ssh_port in run_vm_in_thread call below
]
m1_generator = machine1_config["clan"]["core"]["vars"]["generators"]["m1_generator"]
m1_generator["files"]["my_secret"]["secret"] = True
m1_generator["script"] = """
echo hello > "$out"/my_secret
"""
m1_shared_generator = machine1_config["clan"]["core"]["vars"]["generators"][
"my_shared_generator"
]
m1_shared_generator["share"] = True
m1_shared_generator["files"]["shared_secret"]["secret"] = True
m1_shared_generator["files"]["no_deploy_secret"]["secret"] = True
m1_shared_generator["files"]["no_deploy_secret"]["deploy"] = False
m1_shared_generator["script"] = """
echo hello > "$out"/shared_secret
echo hello > "$out"/no_deploy_secret
"""
flake.refresh()
sops_setup.init(flake.path)
cli.run(["vars", "generate", "--flake", str(flake.path)])
# Set up sops for the test flake machines
sops_setup.init(vm_test_flake)
cli.run(["vars", "generate", "--flake", str(vm_test_flake), "test-vm-deployment"])
# check sops secrets not empty
sops_secrets = json.loads(
run(
nix_eval(
[
f"{flake.path}#nixosConfigurations.m1_machine.config.sops.secrets",
f"{vm_test_flake}#nixosConfigurations.test-vm-deployment.config.sops.secrets",
]
)
).stdout.strip()
@@ -67,7 +37,7 @@ def test_vm_deployment(
my_secret_path = run(
nix_eval(
[
f"{flake.path}#nixosConfigurations.m1_machine.config.clan.core.vars.generators.m1_generator.files.my_secret.path",
f"{vm_test_flake}#nixosConfigurations.test-vm-deployment.config.clan.core.vars.generators.m1_generator.files.my_secret.path",
]
)
).stdout.strip()
@@ -75,15 +45,15 @@ def test_vm_deployment(
shared_secret_path = run(
nix_eval(
[
f"{flake.path}#nixosConfigurations.m1_machine.config.clan.core.vars.generators.my_shared_generator.files.shared_secret.path",
f"{vm_test_flake}#nixosConfigurations.test-vm-deployment.config.clan.core.vars.generators.my_shared_generator.files.shared_secret.path",
]
)
).stdout.strip()
assert "no-such-path" not in shared_secret_path
# run nix flake lock
cmd.run(["nix", "flake", "lock"], cmd.RunOpts(cwd=flake.path))
vm1_config = inspect_vm(machine=Machine("m1_machine", Flake(str(flake.path))))
vm1_config = inspect_vm(
machine=Machine("test-vm-deployment", Flake(str(vm_test_flake)))
)
with ExitStack() as stack:
vm1 = stack.enter_context(spawn_vm(vm1_config, stdin=subprocess.DEVNULL))
qga_m1 = stack.enter_context(vm1.qga_connect())
@@ -92,7 +62,7 @@ def test_vm_deployment(
# check my_secret is deployed
result = qga_m1.run(["cat", "/run/secrets/vars/m1_generator/my_secret"])
assert result.stdout == "hello\n"
# check shared_secret is deployed on m1
# check shared_secret is deployed
result = qga_m1.run(
["cat", "/run/secrets/vars/my_shared_generator/shared_secret"]
)

View File

@@ -2,7 +2,7 @@ from pathlib import Path
from typing import TYPE_CHECKING
import pytest
from clan_cli.tests.fixtures_flakes import ClanFlake, FlakeForTest
from clan_cli.tests.fixtures_flakes import FlakeForTest
from clan_cli.tests.helpers import cli
from clan_cli.tests.stdout import CaptureOutput
from clan_cli.vms.run import inspect_vm, spawn_vm
@@ -24,8 +24,7 @@ def test_inspect(
assert "Cores" in output.out
# @pytest.mark.skipif(no_kvm, reason="Requires KVM")
@pytest.mark.skipif(True, reason="We need to fix vars support for vms for this test")
@pytest.mark.skipif(no_kvm, reason="Requires KVM")
@pytest.mark.impure
def test_run(
monkeypatch: pytest.MonkeyPatch,
@@ -60,30 +59,12 @@ def test_run(
@pytest.mark.skipif(no_kvm, reason="Requires KVM")
@pytest.mark.impure
def test_vm_persistence(
flake: ClanFlake,
vm_test_flake: Path,
) -> None:
# set up a clan flake with some systemd services to test persistence
config = flake.machines["my_machine"]
config["nixpkgs"]["hostPlatform"] = "x86_64-linux"
# logrotate-checkconf doesn't work in VM because /nix/store is owned by nobody
config["systemd"]["services"]["logrotate-checkconf"]["enable"] = False
config["services"]["getty"]["autologinUser"] = "root"
config["clan"]["virtualisation"] = {"graphics": False}
config["clan"]["core"]["networking"] = {"targetHost": "client"}
config["clan"]["core"]["state"]["my_state"]["folders"] = [
# to be owned by root
"/var/my-state",
# to be owned by user 'test'
"/var/user-state",
]
config["users"]["users"] = {
"test": {"initialPassword": "test", "isSystemUser": True, "group": "users"},
"root": {"initialPassword": "root"},
}
flake.refresh()
vm_config = inspect_vm(machine=Machine("my_machine", Flake(str(flake.path))))
# Use the pre-built test VM from the test flake
vm_config = inspect_vm(
machine=Machine("test-vm-persistence", Flake(str(vm_test_flake)))
)
with spawn_vm(vm_config) as vm, vm.qga_connect() as qga:
# create state via qmp command instead of systemd service

View File

@@ -0,0 +1,28 @@
{
inputs.clan-core.url = "__CLAN_CORE__";
outputs =
{ self, clan-core }:
let
clan = clan-core.lib.clan {
inherit self;
meta.name = "test-flake";
machines = {
test-vm-persistence = {
imports = [ clan-core.nixosModules.test-vm-persistence ];
nixpkgs.hostPlatform = "__SYSTEM__";
};
test-vm-deployment = {
imports = [ clan-core.nixosModules.test-vm-deployment ];
nixpkgs.hostPlatform = "__SYSTEM__";
};
};
};
in
{
inherit (clan.config) nixosConfigurations;
inherit (clan.config) nixosModules;
inherit (clan.config) clanInternals;
clan = clan.config;
};
}

View File

@@ -1,3 +1,4 @@
import platform
import random
from collections.abc import Generator
from contextlib import contextmanager
@@ -5,6 +6,7 @@ from dataclasses import dataclass
from pathlib import Path
from clan_lib.errors import ClanError
from clan_lib.nix import nix_test_store
from clan_cli.qemu.qmp import QEMUMonitorProtocol
@@ -84,6 +86,44 @@ class QemuCommand:
vsock_cid: int | None = None
def get_machine_options() -> str:
"""Get appropriate QEMU machine options for host architecture."""
arch = platform.machine().lower()
system = platform.system().lower()
# Determine accelerator based on OS
if system == "darwin":
# macOS uses Hypervisor.framework
accel = "hvf"
else:
# Linux and others use KVM
accel = "kvm"
if arch in ("x86_64", "amd64", "i386", "i686"):
# For x86_64, use q35 for modern PCIe support
return f"q35,memory-backend=mem,accel={accel}"
if arch in ("aarch64", "arm64"):
# Use virt machine type for ARM64
if system == "darwin":
# macOS ARM uses GIC version 2
return f"virt,gic-version=2,memory-backend=mem,accel={accel}"
# Linux ARM uses max GIC version
return f"virt,gic-version=max,memory-backend=mem,accel={accel}"
if arch == "armv7l":
# 32-bit ARM
return f"virt,memory-backend=mem,accel={accel}"
if arch in ("riscv64", "riscv32"):
# RISC-V architectures
return f"virt,memory-backend=mem,accel={accel}"
if arch in ("powerpc64le", "powerpc64", "ppc64le", "ppc64"):
# PowerPC architectures
return f"powernv,memory-backend=mem,accel={accel}"
# No fallback - raise an error for unsupported architectures
msg = f"Unsupported architecture: {arch} on {system}. Supported architectures are: x86_64, aarch64, armv7l, riscv64, riscv32, powerpc64"
raise ClanError(msg)
def qemu_command(
vm: VmConfig,
nixos_config: dict[str, str],
@@ -98,22 +138,31 @@ def qemu_command(
) -> QemuCommand:
if portmap is None:
portmap = {}
toplevel = Path(nixos_config["toplevel"])
chroot_toplevel = toplevel
initrd = Path(nixos_config["initrd"])
if tmp_store := nix_test_store():
chroot_toplevel = tmp_store / toplevel.relative_to("/")
initrd = tmp_store / initrd.relative_to("/")
kernel_cmdline = [
(Path(nixos_config["toplevel"]) / "kernel-params").read_text(),
f"init={nixos_config['toplevel']}/init",
(chroot_toplevel / "kernel-params").read_text(),
f"init={toplevel}/init",
f"regInfo={nixos_config['regInfo']}/registration",
"console=hvc0",
]
if not vm.waypipe.enable:
kernel_cmdline.append("console=tty0")
hostfwd = ",".join(f"hostfwd=tcp::{h}-:{g}" for h, g in portmap.items())
machine_options = get_machine_options()
# fmt: off
command = [
"qemu-kvm",
"-name", vm.machine_name,
"-m", f'{nixos_config["memorySize"]}M',
"-object", f"memory-backend-memfd,id=mem,size={nixos_config['memorySize']}M",
"-machine", "pc,memory-backend=mem,accel=kvm",
"-machine", machine_options,
"-smp", str(nixos_config["cores"]),
"-cpu", "max",
"-enable-kvm",
@@ -130,9 +179,8 @@ def qemu_command(
"-drive", f"cache=writeback,file={state_img},format=qcow2,id=state,if=none,index=2,werror=report",
"-device", "virtio-blk-pci,drive=state",
"-device", "virtio-keyboard",
"-usb", "-device", "usb-tablet,bus=usb-bus.0",
"-kernel", f'{nixos_config["toplevel"]}/kernel',
"-initrd", nixos_config["initrd"],
"-kernel", f"{chroot_toplevel}/kernel",
"-initrd", str(initrd),
"-append", " ".join(kernel_cmdline),
# qmp & qga setup
"-qmp", f"unix:{qmp_socket_file},server,wait=off",
@@ -140,6 +188,11 @@ def qemu_command(
"-device", "virtio-serial",
"-device", "virtserialport,chardev=qga0,name=org.qemu.guest_agent.0",
]
# USB tablet only works reliably on x86_64 Linux for now, not aarch64-linux.
# TODO: Fix USB tablet support for ARM architectures and test macOS
if platform.system().lower() == "linux" and platform.machine().lower() in ("x86_64", "amd64"):
command.extend(["-usb", "-device", "usb-tablet,bus=usb-bus.0"])
if interactive:
command.extend(
[

View File

@@ -16,7 +16,7 @@ from clan_lib.cmd import CmdOut, Log, RunOpts, handle_io, run
from clan_lib.dirs import module_root, user_cache_dir, vm_state_dir
from clan_lib.errors import ClanCmdError, ClanError
from clan_lib.machines.machines import Machine
from clan_lib.nix import nix_shell
from clan_lib.nix import nix_shell, nix_test_store
from clan_lib.vars.generate import run_generators
from clan_cli.completions import add_dynamic_completer, complete_machines
@@ -57,8 +57,6 @@ def build_vm(
nix_options = []
secrets_dir = get_secrets(machine, tmpdir)
from clan_lib.nix import nix_test_store
output = Path(
machine.select(
"config.system.clan.vm.create",
@@ -84,11 +82,9 @@ def get_secrets(
secrets_dir = tmpdir / "secrets"
secrets_dir.mkdir(parents=True, exist_ok=True)
generate_facts([machine])
run_generators([machine])
machine.secret_facts_store.upload(secrets_dir)
populate_secret_vars(machine, secrets_dir)
return secrets_dir
@@ -386,6 +382,9 @@ def run_command(
) -> None:
machine_obj: Machine = Machine(args.machine, args.flake)
generate_facts([machine_obj])
run_generators([machine_obj])
vm: VmConfig = inspect_vm(machine=machine_obj)
if not os.environ.get("WAYLAND_DISPLAY"):

View File

@@ -1,4 +1,5 @@
import contextlib
import logging
import shutil
import subprocess
import time
@@ -6,7 +7,9 @@ from collections.abc import Iterator
from pathlib import Path
from clan_lib.errors import ClanError
from clan_lib.nix import nix_shell
from clan_lib.nix import nix_shell, nix_test_store
log = logging.getLogger(__name__)
@contextlib.contextmanager
@@ -14,6 +17,9 @@ def start_virtiofsd(socket_path: Path) -> Iterator[None]:
sandbox = "namespace"
if shutil.which("newuidmap") is None:
sandbox = "none"
store_root = nix_test_store() or Path("/")
store = store_root / "nix" / "store"
virtiofsd = nix_shell(
["virtiofsd"],
[
@@ -25,9 +31,10 @@ def start_virtiofsd(socket_path: Path) -> Iterator[None]:
"--sandbox",
sandbox,
"--shared-dir",
"/nix/store",
str(store),
],
)
log.debug("$ {}".format(" ".join(virtiofsd)))
with subprocess.Popen(virtiofsd) as proc:
try:
while not socket_path.exists():

View File

@@ -19,6 +19,7 @@
templateDerivation,
zerotierone,
minifakeroot,
nixosConfigurations,
}:
let
pyDeps = ps: [
@@ -225,6 +226,26 @@ pythonRuntime.pkgs.buildPythonApplication {
# needed by flash list tests
nixpkgs.legacyPackages.x86_64-linux.kbd
nixpkgs.legacyPackages.x86_64-linux.glibcLocales
# Pre-built VMs for impure tests
pkgs.stdenv.drvPath
pkgs.bash.drvPath
pkgs.buildPackages.xorg.lndir
(pkgs.perl.withPackages (
p: with p; [
ConfigIniFiles
FileSlurp
]
))
(pkgs.closureInfo { rootPaths = [ ]; }).drvPath
pkgs.desktop-file-utils
pkgs.dbus
pkgs.unzip
pkgs.libxslt
pkgs.getconf
nixosConfigurations."test-vm-persistence-${stdenv.hostPlatform.system}".config.system.clan.vm.create
nixosConfigurations."test-vm-deployment-${stdenv.hostPlatform.system}".config.system.clan.vm.create
];
};
}

View File

@@ -5,6 +5,8 @@
...
}:
{
imports = [ ./clan_cli/tests/flake-module.nix ];
perSystem =
{
self',
@@ -25,6 +27,7 @@
"clanServices"
"pkgs/zerotierone"
"pkgs/minifakeroot"
"pkgs/clan-cli/clan_cli/tests/flake-module.nix"
];
};
};
@@ -54,6 +57,7 @@
"age"
"git"
];
inherit (self) nixosConfigurations;
};
clan-cli-full = pkgs.callPackage ./default.nix {
inherit (inputs) nixpkgs nix-select;
@@ -63,6 +67,7 @@
templateDerivation = templateDerivation;
pythonRuntime = pkgs.python3;
includedRuntimeDeps = lib.importJSON ./clan_lib/nix/allowed-packages.json;
inherit (self) nixosConfigurations;
};
clan-cli-docs = pkgs.stdenv.mkDerivation {
name = "clan-cli-docs";

View File

@@ -15,8 +15,7 @@
mkdir -p "$CLAN_TEST_STORE/nix/store"
mkdir -p "$CLAN_TEST_STORE/nix/var/nix/gcroots"
if [[ -n "''${closureInfo-}" ]]; then
# ${pkgs.findutils}/bin/xargs ${pkgs.xcp}/bin/xcp --recursive --target-directory "$CLAN_TEST_STORE/nix/store" < "$closureInfo/store-paths"
${pkgs.findutils}/bin/xargs ${pkgs.coreutils}/bin/cp --recursive --target "$CLAN_TEST_STORE/nix/store" < "$closureInfo/store-paths"
${pkgs.findutils}/bin/xargs ${pkgs.xcp}/bin/xcp --recursive --target-directory "$CLAN_TEST_STORE/nix/store" < "$closureInfo/store-paths"
${pkgs.nix}/bin/nix-store --load-db --store "$CLAN_TEST_STORE" < "$closureInfo/registration"
fi
'';
@@ -39,7 +38,7 @@
];
postPatch = ''
substituteInPlace nixos_test_lib/nix_setup.py \
--replace '@cp@' '${pkgs.coreutils}/bin/cp' \
--replace '@xcp@' '${pkgs.xcp}/bin/xcp' \
--replace '@nix-store@' '${pkgs.nix}/bin/nix-store' \
--replace '@xargs@' '${pkgs.findutils}/bin/xargs'
'';

View File

@@ -5,7 +5,7 @@ import subprocess
from pathlib import Path
# These paths will be substituted during package build
CP_BIN = "@cp@"
XCP_BIN = "@xcp@"
NIX_STORE_BIN = "@nix-store@"
XARGS_BIN = "@xargs@"
@@ -52,7 +52,7 @@ def setup_nix_in_nix(closure_info: str | None) -> None:
subprocess.run( # noqa: S603
[
XARGS_BIN,
CP_BIN,
XCP_BIN,
"--recursive",
"--target-directory",
f"{tmpdir}/store/nix/store",