- Rename `disks` to `additionalDisks` with structured format (path, readOnly, enableDiscard, blockSize, devIdentifier, useDirect) - Add custom boot options: rootDisk, kernel, initramfs, rootDiskReadonly - Add kernelParams for extra kernel command line options - Add gpu option (default: "context-types=cross-domain:virgl2") - Add sharedDirectories for crosvm --shared-dir - Add global crosvmLogLevel option (default: "info") - Add --name argument to crosvm set to VM name - Migrate deprecated --disk/--rwdisk to --block format - Switch flake to nixos-unstable channel Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
420 lines
13 KiB
Nix
420 lines
13 KiB
Nix
# Configuration implementation for qubes-lite NixOS module
|
|
{
|
|
config,
|
|
pkgs,
|
|
lib,
|
|
...
|
|
}:
|
|
|
|
let
|
|
cfg = config.programs.qubes-lite;
|
|
|
|
# Extract first 3 octets from CIDR notation (e.g., "172.16.200.0/24" -> "172.16.200")
|
|
cidrToNetworkBase =
|
|
cidr:
|
|
let
|
|
ip = builtins.head (lib.splitString "/" cidr);
|
|
octets = lib.splitString "." ip;
|
|
in
|
|
lib.concatStringsSep "." (lib.take 3 octets);
|
|
|
|
networkBase = cidrToNetworkBase cfg.vmNetwork;
|
|
|
|
# VMs with networking enabled
|
|
networkedVms = builtins.filter (vm: vm.network) cfg.nixosVms;
|
|
|
|
# Generate shell case statement for VM dispatch
|
|
mkVmCase = makeCase: ''
|
|
case "$VM_NAME" in
|
|
${lib.concatMapStringsSep "\n " makeCase cfg.nixosVms}
|
|
*)
|
|
echo "Unknown VM: $VM_NAME" >&2
|
|
echo "Available VMs: ${lib.concatMapStringsSep ", " (vm: vm.name) cfg.nixosVms}" >&2
|
|
exit 1
|
|
;;
|
|
esac
|
|
'';
|
|
|
|
# Build rootfs for a VM
|
|
# Note: disposable and idleTimeout are passed as kernel parameters at runtime,
|
|
# not baked into the image, so VMs with different settings can share the same rootfs.
|
|
buildRootfs =
|
|
vm:
|
|
pkgs.callPackage ../rootfs-nixos {
|
|
inherit (cfg._internal) wayland-proxy-virtwl;
|
|
guestPrograms = cfg.guestPrograms ++ vm.guestPrograms;
|
|
guestConfig = lib.recursiveUpdate cfg.guestConfig vm.guestConfig;
|
|
};
|
|
|
|
# Format a disk configuration as --block argument
|
|
formatBlockArg =
|
|
disk:
|
|
let
|
|
parts = [
|
|
"path=${disk.path}"
|
|
"ro=${lib.boolToString disk.readOnly}"
|
|
"sparse=${lib.boolToString disk.enableDiscard}"
|
|
"block-size=${toString disk.blockSize}"
|
|
"direct=${lib.boolToString disk.useDirect}"
|
|
]
|
|
++ lib.optional (disk.devIdentifier != null) "id=${disk.devIdentifier}";
|
|
in
|
|
"--block ${lib.concatStringsSep "," parts}";
|
|
|
|
# Generate VM launcher script
|
|
mkVmScript =
|
|
vm:
|
|
let
|
|
# Only build rootfs if we need it (no custom root/kernel/initramfs)
|
|
needsBuiltRootfs = vm.rootDisk == null || vm.kernel == null || vm.initramfs == null;
|
|
rootfs = if needsBuiltRootfs then buildRootfs vm else null;
|
|
|
|
# Determine root disk, kernel, and initramfs sources
|
|
rootDiskPath = if vm.rootDisk != null then vm.rootDisk.path else "${rootfs}/nixos.qcow2";
|
|
rootDiskConfig = {
|
|
path = rootDiskPath;
|
|
readOnly = vm.rootDiskReadonly;
|
|
enableDiscard = if vm.rootDisk != null then vm.rootDisk.enableDiscard else true;
|
|
blockSize = if vm.rootDisk != null then vm.rootDisk.blockSize else 512;
|
|
devIdentifier = if vm.rootDisk != null then vm.rootDisk.devIdentifier else null;
|
|
useDirect = if vm.rootDisk != null then vm.rootDisk.useDirect else false;
|
|
};
|
|
kernelPath = if vm.kernel != null then vm.kernel else "${rootfs}/bzImage";
|
|
initramfsPath = if vm.initramfs != null then vm.initramfs else "${rootfs}/initrd";
|
|
|
|
vmIp = "${networkBase}.${toString vm.id}";
|
|
gwIp = "${networkBase}.${toString (vm.id - 1)}";
|
|
ipv6 = "fd4d:06ff:48e4:${toString (vm.id - 1)}::2/48";
|
|
gwv6 = "fd4d:06ff:48e4:${toString (vm.id - 1)}::1";
|
|
|
|
additionalDisksArgs = lib.concatMapStringsSep " " formatBlockArg vm.additionalDisks;
|
|
sharedDirArgs = lib.concatMapStringsSep " " (d: "--shared-dir ${d}") vm.sharedDirectories;
|
|
extraKernelParams = lib.concatMapStringsSep " " (p: "-p \"${p}\"") vm.kernelParams;
|
|
in
|
|
pkgs.writeShellScript "qubes-lite-start-${vm.name}" ''
|
|
#!/bin/sh
|
|
set -e
|
|
|
|
# Clean up stale socket
|
|
rm -f "$XDG_RUNTIME_DIR/crosvm-${vm.name}.sock"
|
|
|
|
exec ${cfg._internal.crosvm}/bin/crosvm run \
|
|
--name ${vm.name} \
|
|
--log-level=${cfg.crosvmLogLevel} \
|
|
-m ${toString vm.memory} \
|
|
--initrd=${initramfsPath} \
|
|
--serial=hardware=virtio-console \
|
|
${formatBlockArg rootDiskConfig} \
|
|
${additionalDisksArgs} \
|
|
${lib.optionalString (rootfs != null) ''-p "init=${rootfs.config.system.build.toplevel}/init"''} \
|
|
-p "net.ifnames=0" \
|
|
-p "spectrumname=${vm.name}" \
|
|
${lib.optionalString vm.network ''
|
|
-p "spectrumip=${vmIp}" \
|
|
-p "spectrumgw=${gwIp}" \
|
|
-p "spectrumip6=${ipv6}" \
|
|
-p "spectrumgw6=${gwv6}" \
|
|
--tap-name tap${vm.name} \
|
|
''} \
|
|
${lib.optionalString vm.disposable ''
|
|
-p "isDisposable=1" \
|
|
-p "idleTimeout=${toString vm.idleTimeout}" \
|
|
''} \
|
|
${extraKernelParams} \
|
|
${sharedDirArgs} \
|
|
--cid ${toString vm.id} \
|
|
--cpus ${toString vm.cpus} \
|
|
--gpu=${vm.gpu} \
|
|
-s "$XDG_RUNTIME_DIR/crosvm-${vm.name}.sock" \
|
|
--wayland-sock "$XDG_RUNTIME_DIR/$WAYLAND_DISPLAY" \
|
|
${kernelPath}
|
|
'';
|
|
|
|
# vm-run: Run command in VM (socket-activated)
|
|
vmRunScript = pkgs.writeShellScript "vm-run" ''
|
|
if [ $# -lt 2 ]; then
|
|
echo "Usage: vm-run <vm-name> <command> [args...]" >&2
|
|
exit 1
|
|
fi
|
|
|
|
VM_NAME="$1"
|
|
shift
|
|
|
|
SOCKET="$XDG_RUNTIME_DIR/qubes-lite/$VM_NAME.sock"
|
|
|
|
if [ ! -S "$SOCKET" ]; then
|
|
echo "Unknown VM or socket not active: $VM_NAME" >&2
|
|
echo "Available VMs: ${lib.concatMapStringsSep ", " (vm: vm.name) cfg.nixosVms}" >&2
|
|
exit 1
|
|
fi
|
|
|
|
# Send command via socket (triggers activation if needed)
|
|
echo "$@" | ${pkgs.socat}/bin/socat - UNIX-CONNECT:"$SOCKET"
|
|
'';
|
|
|
|
# vm-start-debug: Start VM directly (bypasses socket activation)
|
|
vmStartDebugScript = pkgs.writeShellScript "vm-start-debug" ''
|
|
if [ $# -ne 1 ]; then
|
|
echo "Usage: vm-start-debug <vm-name>" >&2
|
|
exit 1
|
|
fi
|
|
|
|
VM_NAME="$1"
|
|
|
|
${mkVmCase (vm: "${vm.name}) exec ${mkVmScript vm} ;;")}
|
|
'';
|
|
|
|
# vm-stop: Request graceful shutdown
|
|
vmStopScript = pkgs.writeShellScript "vm-stop" ''
|
|
if [ $# -ne 1 ]; then
|
|
echo "Usage: vm-stop <vm-name>" >&2
|
|
exit 1
|
|
fi
|
|
|
|
VM_NAME="$1"
|
|
|
|
# Send shutdown command via vsock
|
|
${mkVmCase (
|
|
vm:
|
|
''${vm.name}) echo "systemctl poweroff" | ${pkgs.socat}/bin/socat - VSOCK-CONNECT:${toString vm.id}:5000 ;;''
|
|
)}
|
|
'';
|
|
|
|
# vm-shell: SSH into VM as user
|
|
vmShellScript = pkgs.writeShellScript "vm-shell" ''
|
|
if [ $# -ne 1 ]; then
|
|
echo "Usage: vm-shell <vm-name>" >&2
|
|
exit 1
|
|
fi
|
|
|
|
VM_NAME="$1"
|
|
|
|
${mkVmCase (vm: "${vm.name}) exec ${pkgs.openssh}/bin/ssh user@vsock/${toString vm.id} ;;")}
|
|
'';
|
|
|
|
# vm-root-shell: SSH into VM as root
|
|
vmRootShellScript = pkgs.writeShellScript "vm-root-shell" ''
|
|
if [ $# -ne 1 ]; then
|
|
echo "Usage: vm-root-shell <vm-name>" >&2
|
|
exit 1
|
|
fi
|
|
|
|
VM_NAME="$1"
|
|
|
|
${mkVmCase (vm: "${vm.name}) exec ${pkgs.openssh}/bin/ssh root@vsock/${toString vm.id} ;;")}
|
|
'';
|
|
|
|
# Generate proxy script for a VM
|
|
mkProxyScript =
|
|
vm:
|
|
pkgs.writeShellScript "qubes-lite-proxy-${vm.name}" ''
|
|
CID=${toString vm.id}
|
|
VSOCK_PORT=5000
|
|
TIMEOUT=30
|
|
|
|
# Wait for vsock to become available
|
|
ELAPSED=0
|
|
while [ $ELAPSED -lt $TIMEOUT ]; do
|
|
if ${pkgs.socat}/bin/socat -u OPEN:/dev/null VSOCK-CONNECT:$CID:$VSOCK_PORT 2>/dev/null; then
|
|
break
|
|
fi
|
|
sleep 0.5
|
|
ELAPSED=$((ELAPSED + 1))
|
|
done
|
|
|
|
if [ $ELAPSED -ge $TIMEOUT ]; then
|
|
echo "Timeout waiting for VM ${vm.name} to start" >&2
|
|
exit 1
|
|
fi
|
|
|
|
# Forward stdin/stdout to vsock
|
|
exec ${pkgs.socat}/bin/socat - VSOCK-CONNECT:$CID:$VSOCK_PORT
|
|
'';
|
|
|
|
# Bash completion script for vm-* commands
|
|
# VM names are queried dynamically from systemd so existing shells update after rebuild
|
|
bashCompletionScript = pkgs.writeText "qubes-lite-completion.bash" ''
|
|
_qubes_lite_vm_names() {
|
|
systemctl --user list-unit-files 'qubes-lite-*.socket' --no-legend 2>/dev/null \
|
|
| ${pkgs.gawk}/bin/awk '{print $1}' \
|
|
| ${pkgs.gnused}/bin/sed 's/qubes-lite-\(.*\)\.socket/\1/'
|
|
}
|
|
|
|
_qubes_lite_vm_only() {
|
|
local cur
|
|
COMPREPLY=()
|
|
cur="''${COMP_WORDS[COMP_CWORD]}"
|
|
if [[ $COMP_CWORD -eq 1 ]]; then
|
|
COMPREPLY=( $(compgen -W "$(_qubes_lite_vm_names)" -- "$cur") )
|
|
fi
|
|
}
|
|
|
|
_qubes_lite_vm_run() {
|
|
local cur
|
|
COMPREPLY=()
|
|
cur="''${COMP_WORDS[COMP_CWORD]}"
|
|
if [[ $COMP_CWORD -eq 1 ]]; then
|
|
COMPREPLY=( $(compgen -W "$(_qubes_lite_vm_names)" -- "$cur") )
|
|
else
|
|
compopt -o bashdefault -o default
|
|
fi
|
|
}
|
|
|
|
complete -F _qubes_lite_vm_only vm-start-debug
|
|
complete -F _qubes_lite_vm_only vm-stop
|
|
complete -F _qubes_lite_vm_only vm-shell
|
|
complete -F _qubes_lite_vm_only vm-root-shell
|
|
complete -F _qubes_lite_vm_run vm-run
|
|
'';
|
|
|
|
# Generate the package with all scripts
|
|
vmPackage = pkgs.runCommand "qubes-lite-scripts" { } ''
|
|
mkdir -p $out/bin
|
|
mkdir -p $out/share/bash-completion/completions
|
|
|
|
# VM launcher scripts
|
|
${lib.concatMapStringsSep "\n" (vm: ''
|
|
ln -s ${mkVmScript vm} $out/bin/qubes-lite-start-${vm.name}
|
|
'') cfg.nixosVms}
|
|
|
|
# vm-run script
|
|
ln -s ${vmRunScript} $out/bin/vm-run
|
|
|
|
# vm-start-debug script
|
|
ln -s ${vmStartDebugScript} $out/bin/vm-start-debug
|
|
|
|
# vm-stop script
|
|
ln -s ${vmStopScript} $out/bin/vm-stop
|
|
|
|
# vm-shell script
|
|
ln -s ${vmShellScript} $out/bin/vm-shell
|
|
|
|
# vm-root-shell script
|
|
ln -s ${vmRootShellScript} $out/bin/vm-root-shell
|
|
|
|
# Link crosvm for convenience
|
|
ln -s ${cfg._internal.crosvm}/bin/crosvm $out/bin/crosvm
|
|
|
|
# Bash completions
|
|
${lib.optionalString cfg.enableBashIntegration ''
|
|
for cmd in vm-run vm-start-debug vm-stop vm-shell vm-root-shell; do
|
|
ln -s ${bashCompletionScript} $out/share/bash-completion/completions/$cmd
|
|
done
|
|
''}
|
|
'';
|
|
|
|
in
|
|
{
|
|
config = lib.mkIf cfg.enable {
|
|
# Validation assertions
|
|
assertions =
|
|
let
|
|
vmIds = map (vm: vm.id) cfg.nixosVms;
|
|
vmNames = map (vm: vm.name) cfg.nixosVms;
|
|
in
|
|
[
|
|
{
|
|
assertion = cfg.natEnable -> cfg.natInterface != "";
|
|
message = "programs.qubes-lite.natInterface must be set when natEnable is true";
|
|
}
|
|
{
|
|
assertion = lib.length vmIds == lib.length (lib.unique vmIds);
|
|
message = "VM IDs must be unique";
|
|
}
|
|
{
|
|
assertion = lib.length vmNames == lib.length (lib.unique vmNames);
|
|
message = "VM names must be unique";
|
|
}
|
|
]
|
|
++ map (vm: {
|
|
assertion = vm.id >= 3 && vm.id <= 255 && lib.mod vm.id 2 == 1;
|
|
message = "VM '${vm.name}' has invalid id ${toString vm.id}. Must be odd number 3-255.";
|
|
}) cfg.nixosVms;
|
|
|
|
# Enable IP forwarding
|
|
boot.kernel.sysctl."net.ipv4.ip_forward" = 1;
|
|
|
|
# TAP interfaces for networked VMs
|
|
networking.interfaces = lib.listToAttrs (
|
|
map (
|
|
vm:
|
|
lib.nameValuePair "tap${vm.name}" {
|
|
virtual = true;
|
|
virtualOwner = cfg.user;
|
|
ipv4.addresses = [
|
|
{
|
|
address = "${networkBase}.${toString (vm.id - 1)}";
|
|
prefixLength = 31;
|
|
}
|
|
];
|
|
}
|
|
) networkedVms
|
|
);
|
|
|
|
# NAT configuration
|
|
networking.nat = lib.mkIf cfg.natEnable {
|
|
enable = true;
|
|
externalInterface = cfg.natInterface;
|
|
internalIPs = map (vm: "${networkBase}.${toString vm.id}/32") networkedVms;
|
|
};
|
|
|
|
# Socket activation for VMs
|
|
systemd.user.sockets = lib.listToAttrs (
|
|
map (
|
|
vm:
|
|
lib.nameValuePair "qubes-lite-${vm.name}" {
|
|
description = "qubes-lite socket for ${vm.name}";
|
|
wantedBy = [ "default.target" ];
|
|
socketConfig = {
|
|
ListenStream = "%t/qubes-lite/${vm.name}.sock";
|
|
Accept = true;
|
|
DirectoryMode = "0700";
|
|
};
|
|
}
|
|
) cfg.nixosVms
|
|
);
|
|
|
|
# Systemd user services for VMs
|
|
systemd.user.services = lib.listToAttrs (
|
|
# VM services (run crosvm)
|
|
map (
|
|
vm:
|
|
lib.nameValuePair "qubes-lite-${vm.name}-vm" {
|
|
description = "qubes-lite VM: ${vm.name}";
|
|
after = [ "graphical-session.target" ];
|
|
serviceConfig = {
|
|
Type = "simple";
|
|
ExecStart = "${mkVmScript vm}";
|
|
};
|
|
}
|
|
) cfg.nixosVms
|
|
++
|
|
# Proxy template services (per-connection)
|
|
map (
|
|
vm:
|
|
lib.nameValuePair "qubes-lite-${vm.name}@" {
|
|
description = "qubes-lite proxy for ${vm.name}";
|
|
requires = [ "qubes-lite-${vm.name}-vm.service" ];
|
|
after = [ "qubes-lite-${vm.name}-vm.service" ];
|
|
serviceConfig = {
|
|
Type = "simple";
|
|
StandardInput = "socket";
|
|
StandardOutput = "socket";
|
|
ExecStart = "${mkProxyScript vm}";
|
|
};
|
|
}
|
|
) cfg.nixosVms
|
|
);
|
|
|
|
# Set the package output
|
|
programs.qubes-lite.package = vmPackage;
|
|
|
|
# Add scripts to system PATH
|
|
environment.systemPackages = [ vmPackage ];
|
|
|
|
# Ensure bash-completion paths are linked when integration is enabled
|
|
environment.pathsToLink = lib.mkIf cfg.enableBashIntegration [
|
|
"/share/bash-completion"
|
|
];
|
|
};
|
|
}
|