vmsilo/modules/services.nix
Davíð Steinn Geirsson 2aec448e71 feat: add per-VM tray.enable option (default false)
Tray proxy was unconditionally enabled for all crosvm VMs. Add
tray.enable so it must be opted into per VM. When disabled, neither
the host-side tray service nor the guest-side tray daemon are created.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-18 19:13:32 +00:00

777 lines
29 KiB
Nix

# Systemd services for vmsilo NixOS module
# Sockets, services (VM, proxy, console relay), tmpfiles, polkit
{
config,
pkgs,
lib,
...
}:
let
cfg = config.programs.vmsilo;
helpers = import ./lib/helpers.nix { inherit lib; };
inherit (helpers) assignVmIds mkEffectiveSharedDirs;
cssColors = import ./css-colors.nix;
# NOTE: getEffectiveInterfaces is intentionally duplicated in networking.nix and scripts.nix.
# It cannot live in helpers.nix (which has no config access) and the three modules
# don't share a common let-binding scope. Keep the copies in sync.
getEffectiveInterfaces =
vm: vm.network.interfaces // (cfg._internal.netvmInjections.${vm.name}.interfaces or { });
resolveColor =
color:
if lib.hasPrefix "#" color then
color
else if builtins.hasAttr (lib.toLower color) cssColors then
cssColors.${lib.toLower color}
else
throw "Unknown VM color '${color}'. Use a CSS named color or hex (#RRGGBB).";
vms = assignVmIds cfg.nixosVms;
# User UID/GID/home for console relay, shared home, and tray proxy
userUid = config.users.users.${cfg.user}.uid;
userGid = config.users.groups.${config.users.users.${cfg.user}.group}.gid;
userHome = config.users.users.${cfg.user}.home;
# Whether any VM uses sharedHome
anySharedHome = lib.any (vm: vm.sharedHome != false) (lib.attrValues cfg.nixosVms);
# VMs with GPU enabled (gpu defaults to true, so filter out gpu = false)
gpuVms = lib.filter (vm: vm.gpu != false) (lib.attrValues cfg.nixosVms);
# Compute effective GPU config attrset for a VM (null if GPU disabled).
getGpuConfig =
vm:
if vm.gpu == false then
null
else if vm.gpu == true then
{
wayland = true;
opengl = true;
allowWX = null;
logLevel = null;
seccompPolicy = "enforcing";
}
else
vm.gpu;
gpuSyscallAllowlist = [
"accept4"
"access"
"arch_prctl"
"bind"
"brk"
"capget"
"capset"
"clock_gettime"
"clock_nanosleep"
"clone"
"clone3"
"close"
"connect"
"dup"
"dup2"
"epoll_create1"
"epoll_ctl"
"epoll_pwait"
"epoll_wait"
"eventfd2"
"execve"
"exit"
"exit_group"
"fallocate"
"fcntl"
"flock"
"fstat"
"fstatfs"
"ftruncate"
"futex"
"getcwd"
"getdents"
"getdents64"
"getegid"
"geteuid"
"getgid"
"getpgrp"
"getpid"
"getppid"
"getrandom"
"getresgid"
"getresuid"
"getsockopt"
"gettid"
"gettimeofday"
"getuid"
"inotify_add_watch"
"inotify_init1"
"inotify_rm_watch"
"io_uring_enter"
"io_uring_register"
"io_uring_setup"
"ioctl"
"kcmp"
"kill"
"listen"
"lseek"
"lstat"
"madvise"
"membarrier"
"memfd_create"
"mkdir"
"mknodat"
"mmap"
"mprotect"
"mremap"
"munmap"
"nanosleep"
"newfstatat"
"open"
"openat"
"pipe2"
"poll"
"ppoll"
"prctl"
"pread64"
"prlimit64"
"read"
"readlink"
"readlinkat"
"readv"
"recvfrom"
"recvmsg"
"rename"
"restart_syscall"
"rseq"
"rt_sigaction"
"rt_sigprocmask"
"rt_sigreturn"
"sched_get_priority_max"
"sched_get_priority_min"
"sched_getaffinity"
"sched_setaffinity"
"sched_setscheduler"
"sched_yield"
"sendmmsg"
"sendmsg"
"sendto"
"set_robust_list"
"set_tid_address"
"setpriority"
"setsockopt"
"shutdown"
"sigaltstack"
"socket"
"socketpair"
"stat"
"statfs"
"statx"
"sysinfo"
"tgkill"
"uname"
"unlink"
"unlinkat"
"userfaultfd"
"wait4"
"write"
"writev"
];
# Get all bridge names used by a VM's TAP interfaces
vmBridges =
vm:
lib.unique (
lib.concatMap (
iface:
lib.optional (iface.type == "tap" && iface.tap != null && iface.tap.bridge != null) iface.tap.bridge
) (lib.attrValues (getEffectiveInterfaces vm))
);
# vmsilo-balloond service definition
mkVmsiloBalloondService = {
description = "vmsilo-balloond memory management daemon";
after = [
"network.target"
"systemd-tmpfiles-setup.service"
];
wants = [ "systemd-tmpfiles-setup.service" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
Type = "simple";
ExecStart = "${cfg._internal.vmsilo-balloond}/bin/vmsilo-balloond --log-level ${cfg.vmsilo-balloond.logLevel} --poll-interval ${cfg.vmsilo-balloond.pollInterval} --critical-host-percent ${toString cfg.vmsilo-balloond.criticalHostPercent} --critical-guest-available ${cfg.vmsilo-balloond.criticalGuestAvailable} --guest-available-bias ${cfg.vmsilo-balloond.guestAvailableBias} --min-poll-interval ${cfg.vmsilo-balloond.minPollInterval} --psi-ceiling ${toString cfg.vmsilo-balloond.psiCeiling} ${lib.escapeShellArgs cfg.vmsilo-balloond.extraArgs}";
Restart = "on-failure";
RestartSec = "5s";
};
};
in
{
config = lib.mkIf cfg.enable {
# Create /run/vmsilo directory for VM sockets
systemd.tmpfiles.rules = [
"d /run/vmsilo 0755 root root -"
"d /var/lib/vmsilo 0755 root root -"
]
++ lib.concatMap (vm: [
"d /run/vmsilo/${vm.name} 0755 ${cfg.user} ${config.users.users.${cfg.user}.group} -"
"d /run/vmsilo/${vm.name}/gpu 0755 ${cfg.user} ${config.users.users.${cfg.user}.group} -"
"d /run/vmsilo/${vm.name}/sound 0755 ${cfg.user} ${config.users.users.${cfg.user}.group} -"
"d /run/vmsilo/${vm.name}/virtiofs 0755 root root -"
]) (lib.attrValues cfg.nixosVms)
++ lib.optionals anySharedHome [
"d /shared 0755 ${cfg.user} ${config.users.users.${cfg.user}.group} -"
"d /var/lib/vmsilo/home-template 0755 ${cfg.user} ${config.users.users.${cfg.user}.group} -"
];
# Socket activation for VMs (system sockets with user ownership)
systemd.sockets = lib.listToAttrs (
map (
vm:
lib.nameValuePair "vmsilo-${vm.name}" {
description = "vmsilo socket for ${vm.name}";
wantedBy = [ "sockets.target" ];
socketConfig = {
ListenStream = "/run/vmsilo/${vm.name}/command.socket";
Accept = true;
SocketUser = cfg.user;
SocketGroup = "root";
SocketMode = "0600";
};
}
) (lib.attrValues cfg.nixosVms)
);
# Systemd system services for VMs (run as root for PCI passthrough and sandboxing)
systemd.services = lib.listToAttrs (
# Prep services (create runtime directories for each VM)
map (
vm:
lib.nameValuePair "vmsilo-${vm.name}-prep" {
description = "Create runtime directories for VM ${vm.name}";
before = [
"vmsilo-${vm.name}.socket"
"vmsilo-${vm.name}-vm.service"
];
requiredBy = [
"vmsilo-${vm.name}.socket"
"vmsilo-${vm.name}-vm.service"
];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
ExecStart = pkgs.writeShellScript "vmsilo-prep-${vm.name}" ''
${pkgs.coreutils}/bin/install -d -m 0755 -o ${toString userUid} -g ${toString userGid} \
/run/vmsilo/${vm.name} \
/run/vmsilo/${vm.name}/gpu \
/run/vmsilo/${vm.name}/gpu/shader-cache \
/run/vmsilo/${vm.name}/sound
${pkgs.coreutils}/bin/install -d -m 0755 \
/run/vmsilo/${vm.name}/virtiofs
'';
};
}
) (lib.attrValues cfg.nixosVms)
++
# VM services (run crosvm as root)
map (
vm:
lib.nameValuePair "vmsilo-${vm.name}-vm" {
description = "vmsilo VM: ${vm.name}";
wantedBy = lib.optionals (vm.autoStart && vm.gpu == false) [ "multi-user.target" ];
wants =
map (depName: "vmsilo-${depName}-vm.service") vm.dependsOn
++ lib.optional (
vm.network.netvm != null && vm.network.netvm != "host"
) "vmsilo-${vm.network.netvm}-vm.service"
++ map (brName: "${brName}-netdev.service") (vmBridges vm);
after = [ "network.target" ] ++ map (brName: "${brName}-netdev.service") (vmBridges vm);
serviceConfig =
let
ephemeralPath = "/var/lib/vmsilo/${vm.name}-ephemeral.raw";
createEphemeral = pkgs.writeShellScript "create-ephemeral-${vm.name}" ''
truncate -s ${vm.rootOverlay.size} ${ephemeralPath}
'';
deleteEphemeral = pkgs.writeShellScript "delete-ephemeral-${vm.name}" ''
rm -f ${ephemeralPath}
'';
startPreScripts = lib.optionals (vm.rootOverlay.type == "raw") [ "${createEphemeral}" ];
cleanupSocket = pkgs.writeShellScript "cleanup-socket-${vm.name}" ''
rm -f /run/vmsilo/${vm.name}/crosvm-control.socket
rm -f /run/vmsilo/${vm.name}/cloud-hypervisor-control.socket
'';
usbCleanup = pkgs.writeShellScript "usb-cleanup-${vm.name}" ''
source ${cfg._internal.usbHelperLib}
usb_cleanup_vm "${vm.name}"
'';
stopPostScripts = [
"${cleanupSocket}"
]
++ lib.optional (vm.hypervisor == "crosvm") "${usbCleanup}"
++ lib.optionals (vm.rootOverlay.type == "raw") [ "${deleteEphemeral}" ];
in
{
Type = "simple";
ExecStart = "${cfg._internal.vmScripts.${vm.name}}";
ExecStopPost = stopPostScripts;
}
// lib.optionalAttrs (startPreScripts != [ ]) {
ExecStartPre = startPreScripts;
};
}
) (lib.attrValues cfg.nixosVms)
++
# Proxy template services (per-connection)
map (
vm:
lib.nameValuePair "vmsilo-${vm.name}@" {
description = "vmsilo proxy for ${vm.name}";
requires = [ "vmsilo-${vm.name}-vm.service" ];
after = [ "vmsilo-${vm.name}-vm.service" ];
serviceConfig = {
Type = "simple";
StandardInput = "socket";
StandardOutput = "socket";
ExecStart = "${cfg._internal.proxyScripts.${vm.name}}";
};
}
) (lib.attrValues cfg.nixosVms)
++
# Console relay services (one per VM)
# Uses PTY so crosvm stays connected when users disconnect
map (
vm:
lib.nameValuePair "vmsilo-${vm.name}-console-relay" {
description = "Console relay for VM ${vm.name}";
after = [ "vmsilo-${vm.name}-prep.service" ];
before = [ "vmsilo-${vm.name}-vm.service" ];
requiredBy = [ "vmsilo-${vm.name}-vm.service" ];
bindsTo = [ "vmsilo-${vm.name}-vm.service" ];
serviceConfig = {
Type = "simple";
ExecStartPre = [
"-${pkgs.coreutils}/bin/rm -f /run/vmsilo/${vm.name}/console-backend.socket"
"-${pkgs.coreutils}/bin/rm -f /run/vmsilo/${vm.name}/console"
];
# PTY slave is created as a symlink that users can open
ExecStart = "${pkgs.socat}/bin/socat UNIX-LISTEN:/run/vmsilo/${vm.name}/console-backend.socket,fork,reuseaddr PTY,link=/run/vmsilo/${vm.name}/console,raw,echo=0,user=${toString userUid},mode=0600";
Restart = "on-failure";
RestartSec = "1s";
};
}
) (lib.attrValues cfg.nixosVms)
++
# virtiofsd services (one per shared directory per VM)
lib.concatMap (
vm:
let
effectiveSharedDirs = mkEffectiveSharedDirs {
inherit (vm) sharedDirectories sharedHome;
vmName = vm.name;
inherit userUid userGid;
};
sharedHomeEnabled = vm.sharedHome != false;
sharedHomePath = if builtins.isString vm.sharedHome then vm.sharedHome else "/shared/${vm.name}";
createSharedHome = pkgs.writeShellScript "create-shared-home-${vm.name}" ''
if [ ! -d ${sharedHomePath} ]; then
${pkgs.coreutils}/bin/cp -a /var/lib/vmsilo/home-template ${sharedHomePath}
chown -R ${toString userUid}:${toString userGid} ${sharedHomePath}
fi
'';
mkVirtiofsdCmd =
tag: d:
lib.concatStringsSep " " (
[
"${pkgs.virtiofsd}/bin/virtiofsd"
"--shared-dir ${d.path}"
"--tag ${tag}"
"--socket-path /run/vmsilo/${vm.name}/virtiofs/${tag}.socket"
"--thread-pool-size ${toString d.threadPoolSize}"
"--inode-file-handles=${d.inodeFileHandles}"
"--cache ${d.cache}"
"--log-level ${d.logLevel}"
]
++ lib.optional d.xattr "--xattr"
++ lib.optional d.posixAcl "--posix-acl"
++ lib.optional d.readonly "--readonly"
++ lib.optional d.allowMmap "--allow-mmap"
++ lib.optional (!d.enableReaddirplus) "--no-readdirplus"
++ lib.optional d.writeback "--writeback"
++ lib.optional d.allowDirectIo "--allow-direct-io"
++ lib.optional d.killprivV2 "--killpriv-v2"
++ lib.optional d.preserveNoatime "--preserve-noatime"
++ lib.optional (d.uidMap != null) "--uid-map ${d.uidMap}"
++ lib.optional (d.gidMap != null) "--gid-map ${d.gidMap}"
++ lib.optional (d.translateUid != null) "--translate-uid ${d.translateUid}"
++ lib.optional (d.translateGid != null) "--translate-gid ${d.translateGid}"
);
in
lib.mapAttrsToList (
tag: dirConfig:
lib.nameValuePair "vmsilo-${vm.name}-virtiofsd-${tag}" {
description = "virtiofsd ${tag} for VM ${vm.name}";
after = [ "vmsilo-${vm.name}-prep.service" ];
before = [ "vmsilo-${vm.name}-vm.service" ];
requiredBy = [ "vmsilo-${vm.name}-vm.service" ];
bindsTo = [ "vmsilo-${vm.name}-vm.service" ];
serviceConfig = {
Type = "simple";
ExecStart = mkVirtiofsdCmd tag dirConfig;
ExecStopPost = pkgs.writeShellScript "cleanup-virtiofsd-${vm.name}-${tag}" ''
rm -f /run/vmsilo/${vm.name}/virtiofs/${tag}.socket
'';
}
// lib.optionalAttrs (tag == "home" && sharedHomeEnabled) {
ExecStartPre = [ "${createSharedHome}" ];
};
}
) effectiveSharedDirs
) (lib.attrValues cfg.nixosVms)
++
# vhost-device-sound services (one per VM with sound enabled)
lib.concatMap (
vm:
let
soundEnabled = vm.sound.playback || vm.sound.capture;
streams =
if vm.sound.playback && vm.sound.capture then
"input,output"
else if vm.sound.playback then
"output"
else
"input";
in
lib.optional soundEnabled (
lib.nameValuePair "vmsilo-${vm.name}-sound" {
description = "vhost-device-sound for VM ${vm.name}";
after = [ "vmsilo-${vm.name}-prep.service" ];
before = [ "vmsilo-${vm.name}-vm.service" ];
requiredBy = [ "vmsilo-${vm.name}-vm.service" ];
bindsTo = [ "vmsilo-${vm.name}-vm.service" ];
serviceConfig = {
Type = "simple";
ExecStart = lib.concatStringsSep " " [
"${cfg._internal.vhost-device-sound}/bin/vhost-device-sound"
"--socket /run/vmsilo/${vm.name}/sound/sound.socket"
"--backend pipewire"
"--streams ${streams}"
];
ExecStopPost = pkgs.writeShellScript "cleanup-sound-${vm.name}" ''
rm -f /run/vmsilo/${vm.name}/sound/sound.socket
'';
User = cfg.user;
Environment = [
"XDG_RUNTIME_DIR=/run/user/${toString userUid}"
"RUST_BACKTRACE=full"
];
};
}
)
) (lib.attrValues cfg.nixosVms)
++ [ (lib.nameValuePair "vmsilo-balloond" mkVmsiloBalloondService) ]
++
# Tray proxy services (crosvm VMs only — kernel vsock required)
lib.concatMap (
vm:
lib.optional (vm.tray.enable && vm.hypervisor == "crosvm") (
lib.nameValuePair "vmsilo-${vm.name}-tray" {
description = "Tray proxy for VM ${vm.name}";
wantedBy = [ "vmsilo-${vm.name}-vm.service" ];
bindsTo = [ "vmsilo-${vm.name}-vm.service" ];
after = [ "vmsilo-${vm.name}-vm.service" ];
serviceConfig =
let
vmColor = resolveColor vm.color;
kreadconfig6 = "${pkgs.kdePackages.kconfig}/bin/kreadconfig6";
trayHost = "${cfg._internal.vmsilo-tray}/bin/vmsilo-tray-host";
in
{
Type = "simple";
ExecStart = pkgs.writeShellScript "vmsilo-tray-${vm.name}" ''
color_scheme=$(${kreadconfig6} --group General --key ColorScheme 2>/dev/null)
case "''${color_scheme:-}" in
BreezeDark) icon_theme=breeze-dark ;;
*) icon_theme=breeze ;;
esac
exec ${trayHost} \
--vm-name ${lib.escapeShellArg vm.name} \
--cid ${toString vm.id} \
--icon-theme "$icon_theme" \
--color ${lib.escapeShellArg vmColor} \
--log-level ${cfg.vmsilo-tray.logLevel}
'';
User = cfg.user;
Environment = [
"DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${toString userUid}/bus"
"XDG_CONFIG_DIRS=${userHome}/.config/kdedefaults"
];
Restart = "on-failure";
RestartSec = "2s";
};
}
)
) vms
++
# wayland-seccontext services (one per GPU VM — runs as user, before GPU service)
lib.concatMap (
vm:
lib.optional (vm.gpu != false) (
lib.nameValuePair "vmsilo-${vm.name}-wayland-seccontext" {
description = "Wayland security context for VM ${vm.name}";
after = [ "vmsilo-${vm.name}-prep.service" ];
before = [ "vmsilo-${vm.name}-gpu.service" ];
requiredBy = [ "vmsilo-${vm.name}-gpu.service" ];
bindsTo = [ "vmsilo-${vm.name}-vm.service" ];
serviceConfig = {
Type = "simple";
ExecStartPre = [
# Remove stale socket from previous run
"-${pkgs.coreutils}/bin/rm -f /run/vmsilo/${vm.name}/wayland-seccontext.socket"
];
ExecStart = pkgs.writeShellScript "vmsilo-wayland-seccontext-${vm.name}" ''
exec ${cfg._internal.vmsilo-wayland-seccontext}/bin/vmsilo-wayland-seccontext \
--wayland-socket /run/user/${toString userUid}/wayland-0 \
--app-id "vmsilo:${vm.name}:${vm.color}" \
--socket-path /run/vmsilo/${vm.name}/wayland-seccontext.socket
'';
ExecStopPost = pkgs.writeShellScript "cleanup-wayland-seccontext-${vm.name}" ''
rm -f /run/vmsilo/${vm.name}/wayland-seccontext.socket
'';
User = cfg.user;
Environment = [
"XDG_RUNTIME_DIR=/run/user/${toString userUid}"
"RUST_LOG=info"
];
Restart = "on-failure";
RestartSec = "1s";
};
}
)
) (lib.attrValues cfg.nixosVms)
++
# GPU device backend services (one per GPU VM — sandboxed crosvm device gpu)
lib.concatMap (
vm:
let
gpuConfig = getGpuConfig vm;
effectiveAllowWX =
if gpuConfig != null && gpuConfig.allowWX != null then gpuConfig.allowWX else cfg.gpu.allowWX;
effectiveGpuLogLevel =
if gpuConfig != null && gpuConfig.logLevel != null then gpuConfig.logLevel else cfg.crosvm.logLevel;
gpuContextTypes = lib.concatStringsSep ":" (
lib.filter (x: x != null) [
(if gpuConfig != null && gpuConfig.wayland then "cross-domain" else null)
(if gpuConfig != null && gpuConfig.opengl then "virgl2" else null)
]
);
seccompPolicy = if gpuConfig != null then gpuConfig.seccompPolicy else "enforcing";
in
lib.optional (gpuConfig != null) (
lib.nameValuePair "vmsilo-${vm.name}-gpu" {
description = "GPU device backend for VM ${vm.name}";
after = [
"vmsilo-${vm.name}-prep.service"
"vmsilo-${vm.name}-wayland-seccontext.service"
];
before = [ "vmsilo-${vm.name}-vm.service" ];
requiredBy = [ "vmsilo-${vm.name}-vm.service" ];
bindsTo = [ "vmsilo-${vm.name}-vm.service" ];
environment = {
LD_LIBRARY_PATH = "${pkgs.vulkan-loader}/lib";
__GL_SHADER_DISK_CACHE_PATH = "/run/vmsilo/${vm.name}/gpu/shader-cache";
};
serviceConfig = {
Type = "simple";
ExecStartPre = [
"-${pkgs.coreutils}/bin/rm -f /run/vmsilo/${vm.name}/gpu/gpu.socket"
];
ExecStart = pkgs.writeShellScript "vmsilo-gpu-${vm.name}" ''
exec ${cfg._internal.crosvm}/bin/crosvm \
--no-syslog \
--log-level=${effectiveGpuLogLevel} \
device \
gpu \
--socket-path /run/vmsilo/${vm.name}/gpu/gpu.socket \
--wayland-sock /run/vmsilo/${vm.name}/wayland-seccontext.socket \
--params '{"context-types":"${gpuContextTypes}"}'
'';
ExecStopPost = pkgs.writeShellScript "cleanup-gpu-${vm.name}" ''
rm -f /run/vmsilo/${vm.name}/gpu/gpu.socket
'';
User = cfg.user;
# Sandboxing
TemporaryFileSystem = "/";
BindReadOnlyPaths = [
"/nix/store"
"/proc"
"/sys/dev/char"
"/sys/devices"
"/run/opengl-driver"
];
BindPaths = [
"/dev/dri"
"/dev/udmabuf"
"/run/vmsilo/${vm.name}/gpu"
"/run/vmsilo/${vm.name}/wayland-seccontext.socket"
];
MountAPIVFS = true;
PrivateDevices = true;
DeviceAllow = [
"/dev/dri rw"
"/dev/udmabuf rw"
];
CapabilityBoundingSet = "";
NoNewPrivileges = true;
PrivateNetwork = true;
MemoryDenyWriteExecute = !effectiveAllowWX;
SystemCallArchitectures = "native";
}
// lib.optionalAttrs (seccompPolicy == "enforcing") {
SystemCallFilter = lib.concatStringsSep " " gpuSyscallAllowlist;
SystemCallErrorNumber = "EPERM";
}
// lib.optionalAttrs (seccompPolicy == "log") {
SystemCallLog = "~${lib.concatStringsSep " " gpuSyscallAllowlist}";
}
// {
LockPersonality = true;
ProtectKernelTunables = true;
ProtectKernelModules = true;
ProtectKernelLogs = true;
RestrictNamespaces = true;
};
}
)
) (lib.attrValues cfg.nixosVms)
++ [
# USB attach/detach oneshot template services (invoked by vmsilo-usb CLI)
(lib.nameValuePair "vmsilo-usb-attach@" {
description = "vmsilo USB attach: %I";
serviceConfig = {
Type = "oneshot";
ExecStart = "${pkgs.writeShellScript "vmsilo-usb-do-attach" ''
source ${cfg._internal.usbHelperLib}
IFS=':' read -r vm_name devpath dev_file vid pid serial busnum devnum <<< "$1"
usb_lock
usb_do_attach "$vm_name" "$devpath" "$dev_file" "$vid" "$pid" "$serial" "$busnum" "$devnum"
usb_unlock
''} %I";
};
})
(lib.nameValuePair "vmsilo-usb-detach@" {
description = "vmsilo USB detach: %I";
serviceConfig = {
Type = "oneshot";
ExecStart = "${pkgs.writeShellScript "vmsilo-usb-do-detach" ''
source ${cfg._internal.usbHelperLib}
IFS=':' read -r vm_name devpath <<< "$1"
usb_lock
usb_do_detach "$vm_name" "$devpath"
usb_unlock
''} %I";
};
})
]
++
# Persistent USB attach services (one per VM with usbDevices)
lib.concatMap (
vm:
lib.optional (vm.usbDevices != [ ]) (
lib.nameValuePair "vmsilo-${vm.name}-usb-attach" {
description = "USB device attach for VM ${vm.name}";
requires = [ "vmsilo-${vm.name}-vm.service" ];
after = [ "vmsilo-${vm.name}-vm.service" ];
wantedBy = [ "vmsilo-${vm.name}-vm.service" ];
bindsTo = [ "vmsilo-${vm.name}-vm.service" ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
ExecStart = pkgs.writeShellScript "vmsilo-usb-attach-${vm.name}" ''
source ${cfg._internal.usbHelperLib}
SOCKET="/run/vmsilo/${vm.name}/crosvm-control.socket"
# Wait for control socket (up to 30s)
ELAPSED=0
while [ ! -S "''${SOCKET}" ] && [ "''${ELAPSED}" -lt 30 ]; do
sleep 0.5
ELAPSED=$((ELAPSED + 1))
done
if [ ! -S "''${SOCKET}" ]; then
echo "Timeout waiting for control socket: ''${SOCKET}" >&2
exit 1
fi
usb_lock
${lib.concatMapStringsSep "\n" (dev: ''
devices=$(usb_find_by_vidpid "${dev.vendorId}" "${dev.productId}" "${toString (dev.serial or "")}")
count=$(echo "''${devices}" | ${pkgs.jq}/bin/jq 'length')
if [ "''${count}" -eq 0 ]; then
echo "Warning: USB device ${dev.vendorId}:${dev.productId} not found" >&2
else
echo "''${devices}" | ${pkgs.jq}/bin/jq -r '.[] | [.devpath, .dev_file, .vid, .pid, .serial, .busnum, .devnum] | @tsv' | \
while IFS=$'\t' read -r devpath dev_file vid pid serial busnum devnum; do
usb_do_attach "${vm.name}" "''${devpath}" "''${dev_file}" "''${vid}" "''${pid}" "''${serial}" "''${busnum}" "''${devnum}" || true
done
fi
'') vm.usbDevices}
usb_unlock
'';
};
}
)
) vms
);
# Session-bind user services: tie GPU VM lifecycle to the desktop session
# GPU VMs connect to the host Wayland socket, which is destroyed on logout.
# For autoStart VMs, this also starts the VM when the session begins.
systemd.user.services = lib.listToAttrs (
map (
vm:
lib.nameValuePair "vmsilo-${vm.name}-session-bind" {
description =
if vm.autoStart then
"Bind VM ${vm.name} to session (start on login, stop on logout)"
else
"Stop VM ${vm.name} on session end";
bindsTo = [ "graphical-session.target" ];
after = [ "graphical-session.target" ];
wantedBy = [ "graphical-session.target" ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
ExecStart =
if vm.autoStart then
"${pkgs.systemd}/bin/systemctl start vmsilo-${vm.name}-vm.service"
else
"${pkgs.coreutils}/bin/true";
ExecStop = "${pkgs.systemd}/bin/systemctl stop vmsilo-${vm.name}-vm.service";
};
}
) gpuVms
);
# Polkit rules to allow configured user to manage VM services without sudo
security.polkit.extraConfig = ''
polkit.addRule(function(action, subject) {
if (action.id == "org.freedesktop.systemd1.manage-units") {
var unit = action.lookup("unit");
if (unit && unit.indexOf("vmsilo-") === 0 &&
subject.user == "${cfg.user}") {
return polkit.Result.YES;
}
}
});
'';
};
}