diff --git a/CLAUDE.md b/CLAUDE.md index 39d807c..f1299f1 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -82,6 +82,7 @@ The configured user can manage VM services via polkit (no sudo required for `vm- - `vmsilo-balloond/` — dynamic balloon memory management daemon (equalizes host/guest free memory via virtio-balloon; run `--help` for CLI options) - `vmsilo-dbus-proxy/` — D-Bus proxy for system tray and notification forwarding between guest and host over vsock:5001 - `vmsilo-wayland-seccontext/` — creates Wayland security context socket (wp_security_context_v1); run by per-VM systemd service before the GPU device service +- `vmsilo-tools/` — Rust workspace for small utilities. Contains `tap-open` (opens TAP device by name, execs command with inherited FD) **Other:** - `patches/` — KWin/Plasma patches for VM window decoration colors and clipboard isolation @@ -115,6 +116,7 @@ See README.md for full usage details and options. - **GPU device backend**: `vmsilo--gpu` service runs the GPU device backend sandboxed; selectable via `gpu.backend` between `vhost-device-gpu` (default, vhost-device-gpu in rutabaga mode) and `crosvm` (crosvm device gpu). Both crosvm and cloud-hypervisor VMMs attach via vhost-user. `vmsilo--wayland-seccontext` must start first. GPU is enabled when any capability (`wayland`, `opengl`, `vulkan`) is true; `wayland` defaults true. Set `gpu.wayland = false` to disable. - **Per-VM runtime dirs**: all sockets under `/run/vmsilo//` subdirectories (not flat). - **USB passthrough**: usbip-over-vsock on port 5002. Guest runs `usbip-rs client listen`, host runs one `usbip-rs host connect` per device as `vmsilo--usb@.service`. Works with both crosvm and cloud-hypervisor. +- **CH sandboxing**: CH VMs use NixOS confinement (chroot), PrivateUsers=identity, PrivateNetwork, PrivatePIDs, PrivateIPC, empty CapabilityBoundingSet. TAP FDs passed via `vmsilo-tap-open` + `ch-remote add-net`. All privileged operations in ExecStartPre=+/ExecStartPost=+/ExecStopPost=+. Gated by `cloud-hypervisor.disableSandbox`. ### Gotchas diff --git a/README.md b/README.md index d519239..022cbda 100644 --- a/README.md +++ b/README.md @@ -206,6 +206,7 @@ There are a lot of configuration options but you don't really need to touch most | `cloud-hypervisor.logLevel` | string | `"info"` | Log level for cloud-hypervisor (error, warn, info, debug, trace) | | `cloud-hypervisor.hugepages` | bool | `false` | Use hugetlbfs-backed memory for this VM. Requires pre-allocated hugepages (`vm.nr_hugepages`). | | `cloud-hypervisor.seccompPolicy` | `"enforcing"` or `"log"` | `"enforcing"` | Seccomp policy for this VM's cloud-hypervisor instance | +| `cloud-hypervisor.disableSandbox` | bool | `false` | Disable Landlock and systemd hardening. Seccomp controlled separately by `seccompPolicy`. | | `cloud-hypervisor.extraArgs` | list of strings | `[]` | Extra args passed to cloud-hypervisor | | `cloud-hypervisor.extraConfig` | attrs | `{}` | Merged into the JSON VM config passed to cloud-hypervisor | | `rootOverlay.type` | `"raw"` or `"tmpfs"` | `"raw"` | Overlay upper layer: disk-backed (raw) or RAM-backed (tmpfs) | diff --git a/flake.lock b/flake.lock index eddc60b..9ae94f2 100644 --- a/flake.lock +++ b/flake.lock @@ -7,11 +7,11 @@ ] }, "locked": { - "lastModified": 1774181920, - "narHash": "sha256-3WCjUz8Lmd3KHaVOpQXKatHtQbS9ODOJnPP2b53mTTg=", + "lastModified": 1774303930, + "narHash": "sha256-TvpRaRdLsrJcx/0X063Gt6a+5FeUG3KdmzqrwWHjcys=", "ref": "refs/heads/main", - "rev": "7933f33fa97c3523c3e89eda338fb2ba63c3ba6e", - "revCount": 9348, + "rev": "2f77d61b526b53046e2f8c063f81888a61e3131b", + "revCount": 9350, "type": "git", "url": "https://git.dsg.is/dsg/cloud-hypervisor.git" }, diff --git a/flake.nix b/flake.nix index 0a10364..52c7d53 100644 --- a/flake.nix +++ b/flake.nix @@ -109,6 +109,21 @@ buildInputs = with pkgs; [ wayland ]; }; + # Build vmsilo-tools workspace + buildVmsiloTools = + system: + let + pkgs = nixpkgs.legacyPackages.${system}; + in + pkgs.rustPlatform.buildRustPackage { + pname = "vmsilo-tools"; + version = "0.1.0"; + src = ./vmsilo-tools; + cargoLock = { + lockFile = ./vmsilo-tools/Cargo.lock; + }; + }; + # treefmt configuration treefmtConfig = { projectRootFile = "flake.nix"; @@ -127,6 +142,7 @@ vmsilo-balloond = buildVmsiloBalloond system; vmsilo-dbus-proxy = buildVmsiloDbusProxy system; vmsilo-wayland-seccontext = buildVmsiloWaylandSeccontext system; + vmsilo-tools = buildVmsiloTools system; "cloud-hypervisor" = cloud-hypervisor.packages.${system}.cloud-hypervisor; decoration-tests = let @@ -221,6 +237,7 @@ vhost-device-gpu = vhost-device.packages.${pkgs.stdenv.hostPlatform.system}.vhost-device-gpu; vmsilo-balloond = buildVmsiloBalloond pkgs.stdenv.hostPlatform.system; vmsilo-dbus-proxy = buildVmsiloDbusProxy pkgs.stdenv.hostPlatform.system; + vmsilo-tools = buildVmsiloTools pkgs.stdenv.hostPlatform.system; "usbip-rs" = usbip-rs-input.packages.${pkgs.stdenv.hostPlatform.system}.default; }; }; diff --git a/modules/lib/vm-config.nix b/modules/lib/vm-config.nix index 60ae8a6..d5b51ba 100644 --- a/modules/lib/vm-config.nix +++ b/modules/lib/vm-config.nix @@ -142,7 +142,7 @@ let }; # ── Ephemeral overlay ───────────────────────────────────────────────── - ephemeralDiskPath = "/var/lib/vmsilo/${vm.name}-ephemeral.raw"; + ephemeralDiskPath = "/var/lib/vmsilo/${vm.name}/ephemeral.raw"; ephemeralDiskId = "ephemeral"; # ── vhost-user sockets ──────────────────────────────────────────────── @@ -438,7 +438,6 @@ let }; disks = chDiskEntries; } - // lib.optionalAttrs (chNetworkEntries != [ ]) { net = chNetworkEntries; } // lib.optionalAttrs (chFsEntries != [ ]) { fs = chFsEntries; } // lib.optionalAttrs gpuEnabled { gpu = [ { socket = "/run/vmsilo/${vm.name}/gpu/gpu.socket"; } ]; @@ -521,6 +520,7 @@ in configFile = chConfigFile; verbosityArgs = chVerbosityArgs; seccompArg = chSeccompArg; + disableSandbox = vm.cloud-hypervisor.disableSandbox; effectiveExtraArgs = chEffectiveExtraArgs; bin = chBin; remote = chRemote; diff --git a/modules/networking.nix b/modules/networking.nix index 9054ce7..512ccea 100644 --- a/modules/networking.nix +++ b/modules/networking.nix @@ -100,7 +100,7 @@ in lib.nameValuePair t.tapName ( { virtual = true; - virtualOwner = cfg.user; + virtualOwner = "root"; } // lib.optionalAttrs (t.iface.tap != null && t.iface.tap.hostAddress != null) { ipv4.addresses = diff --git a/modules/options.nix b/modules/options.nix index 7d7dd58..57c8267 100644 --- a/modules/options.nix +++ b/modules/options.nix @@ -824,6 +824,11 @@ let default = "enforcing"; description = "Seccomp policy for this VM's cloud-hypervisor instance. \"enforcing\" kills on violation, \"log\" logs violations without killing."; }; + disableSandbox = lib.mkOption { + type = lib.types.bool; + default = false; + description = "Disable Landlock and systemd hardening for this VM's cloud-hypervisor instance. Seccomp is controlled separately by seccompPolicy."; + }; extraArgs = lib.mkOption { type = lib.types.listOf lib.types.str; default = [ ]; @@ -1052,6 +1057,12 @@ in internal = true; }; + "vmsilo-tools" = lib.mkOption { + type = lib.types.package; + description = "vmsilo-tools package (injected by flake)."; + internal = true; + }; + # Generated scripts (set by scripts.nix, consumed by services.nix and package.nix) vmScripts = lib.mkOption { type = lib.types.attrsOf lib.types.path; @@ -1060,6 +1071,20 @@ in default = { }; }; + chExecStartScripts = lib.mkOption { + type = lib.types.attrsOf lib.types.path; + default = { }; + description = "CH ExecStart scripts (name -> script path)."; + internal = true; + }; + + chPostScripts = lib.mkOption { + type = lib.types.attrsOf lib.types.path; + default = { }; + description = "CH ExecStartPost scripts (name -> script path)."; + internal = true; + }; + proxyScripts = lib.mkOption { type = lib.types.attrsOf lib.types.path; description = "Proxy scripts (name -> script path)."; diff --git a/modules/scripts.nix b/modules/scripts.nix index 2e16a3d..f8bb50a 100644 --- a/modules/scripts.nix +++ b/modules/scripts.nix @@ -114,101 +114,114 @@ let ${c.kernelPath} ''; - # Generate cloud-hypervisor VM launch script - mkCloudHypervisorVmScript = + # Generate cloud-hypervisor ExecStart script (thin wrapper for correct arg handling) + mkChExecStartScript = vm: let c = mkVmConfig vm; + landlock = lib.optionalString (!c.ch.disableSandbox) "--landlock"; in - pkgs.writeShellScript "vmsilo-start-${vm.name}" '' - set -e - - ${c.iommuValidationScript} - - # Remove stale sockets - rm -f /run/vmsilo/${vm.name}/cloud-hypervisor-control.socket - rm -f /run/vmsilo/${vm.name}/vsock.socket - - # Step 1: Start cloud-hypervisor VMM in background - ${c.ch.bin} \ + pkgs.writeShellScript "vmsilo-ch-${vm.name}" '' + exec ${c.ch.bin} \ --api-socket /run/vmsilo/${vm.name}/cloud-hypervisor-control.socket \ --seccomp ${c.ch.seccompArg} \ + ${landlock} \ ${lib.escapeShellArgs c.ch.effectiveExtraArgs} \ - ${lib.concatStringsSep " " c.ch.verbosityArgs} & - CH_PID=$! + ${lib.concatStringsSep " " c.ch.verbosityArgs} + ''; + + # Generate cloud-hypervisor ExecStartPost script (privileged orchestration) + mkChExecStartPostScript = + vm: + let + c = mkVmConfig vm; + apiSocket = "/run/vmsilo/${vm.name}/cloud-hypervisor-control.socket"; + chRemote = "${c.ch.remote} --api-socket ${apiSocket}"; + tapOpen = "${cfg._internal."vmsilo-tools"}/bin/vmsilo-tap-open"; + + # Build add-net commands for each TAP interface + # {TAP_FD} is replaced by vmsilo-tap-open with the actual fd number + addNetCommands = lib.concatMapStringsSep "\n" (ne: '' + ${tapOpen} ${ne.tapName} -- \ + ${c.ch.remote} --api-socket ${apiSocket} \ + add-net "fd=[{TAP_FD}],mac=${ne.mac}" + '') c.networkEntries; + in + pkgs.writeShellScript "vmsilo-post-${vm.name}" '' + set -e # Wait for API socket to appear (up to 30s) ELAPSED=0 - while [ $ELAPSED -lt 60 ] && [ ! -S /run/vmsilo/${vm.name}/cloud-hypervisor-control.socket ]; do + while [ $ELAPSED -lt 60 ] && [ ! -S ${apiSocket} ]; do sleep 0.5 ELAPSED=$((ELAPSED + 1)) done - - if [ ! -S /run/vmsilo/${vm.name}/cloud-hypervisor-control.socket ]; then + if [ ! -S ${apiSocket} ]; then echo "Timeout waiting for cloud-hypervisor API socket" >&2 - kill $CH_PID 2>/dev/null || true exit 1 fi - # Wait for API server to be ready (socket existing != API ready) + # Wait for API server to be ready while [ $ELAPSED -lt 60 ]; do - if ${c.ch.remote} --api-socket /run/vmsilo/${vm.name}/cloud-hypervisor-control.socket ping 2>/dev/null; then + if ${chRemote} ping 2>/dev/null; then break fi sleep 0.5 ELAPSED=$((ELAPSED + 1)) done - - if ! ${c.ch.remote} --api-socket /run/vmsilo/${vm.name}/cloud-hypervisor-control.socket ping 2>/dev/null; then - echo "Timeout waiting for cloud-hypervisor API to become ready" >&2 - kill $CH_PID 2>/dev/null || true + if ! ${chRemote} ping 2>/dev/null; then + echo "Timeout waiting for cloud-hypervisor API" >&2 exit 1 fi - ${c.socketWaitScript "kill $CH_PID 2>/dev/null || true"} + # Wait for vhost-user backend sockets + ${c.socketWaitScript ""} - # Step 2: Create VM configuration - ${c.ch.remote} \ - --api-socket /run/vmsilo/${vm.name}/cloud-hypervisor-control.socket \ - create -- ${c.ch.configFile} + # Make vhost-user sockets connectable by CH (runs as root without CAP_DAC_OVERRIDE) + ${lib.concatMapStringsSep "\n" (sock: "chown 0 ${sock}") c.vhostUserSockets} - # Discover serial PTY allocated by cloud-hypervisor and symlink to standard path - CONSOLE_PTY=$(${c.ch.remote} \ - --api-socket /run/vmsilo/${vm.name}/cloud-hypervisor-control.socket \ - info | ${pkgs.jq}/bin/jq -r '.config.serial.file') + # Create VM + ${chRemote} create -- ${c.ch.configFile} + + # Add TAP network interfaces via FD passing + ${addNetCommands} + + # Boot VM + ${chRemote} boot + + # Discover serial PTY and symlink + CONSOLE_PTY=$(${chRemote} info | ${pkgs.jq}/bin/jq -r '.config.serial.file') if [ -z "$CONSOLE_PTY" ] || [ "$CONSOLE_PTY" = "null" ]; then - echo "Failed to discover serial PTY from cloud-hypervisor API" >&2 - kill $CH_PID 2>/dev/null || true + echo "Failed to discover serial PTY" >&2 exit 1 fi ln -sf "$CONSOLE_PTY" /run/vmsilo/${vm.name}/console chown ${toString cfg._internal.userUid} /run/vmsilo/${vm.name}/console - # Step 3: Boot VM - ${c.ch.remote} \ - --api-socket /run/vmsilo/${vm.name}/cloud-hypervisor-control.socket \ - boot - - # vsock socket is created at boot time; wait for it then chown so user-level services can connect + # Wait for vsock socket and chown ELAPSED=0 - while [ ! -S /run/vmsilo/${vm.name}/vsock.socket ] && [ "$ELAPSED" -lt 60 ]; do + while [ ! -S /run/vmsilo/${vm.name}/vsock.socket ] && [ "$ELAPSED" -lt 120 ]; do sleep 0.5 ELAPSED=$((ELAPSED + 1)) done if [ ! -S /run/vmsilo/${vm.name}/vsock.socket ]; then echo "Timeout waiting for vsock socket" >&2 - kill $CH_PID 2>/dev/null || true exit 1 fi chown ${toString cfg._internal.userUid} /run/vmsilo/${vm.name}/vsock.socket - - # Block until VMM exits (VM shutdown) - wait $CH_PID ''; # Dispatcher: generate the appropriate VM script based on hypervisor choice mkVmScript = - vm: if vm.hypervisor == "crosvm" then mkCrosvmVmScript vm else mkCloudHypervisorVmScript vm; + vm: + if vm.hypervisor == "crosvm" then + mkCrosvmVmScript vm + else + # Placeholder — CH VMs use ExecStart= directly (set in services.nix) + pkgs.writeShellScript "vmsilo-start-${vm.name}" '' + echo "ERROR: CH VM ${vm.name} should use direct ExecStart, not this script" >&2 + exit 1 + ''; # ── Proxy and user-facing scripts ──────────────────────────────────────── @@ -471,6 +484,17 @@ in proxyScripts = lib.listToAttrs (map (vm: lib.nameValuePair vm.name (mkProxyScript vm)) vms); + chExecStartScripts = lib.listToAttrs ( + map (vm: lib.nameValuePair vm.name (mkChExecStartScript vm)) ( + lib.filter (vm: vm.hypervisor == "cloud-hypervisor") vms + ) + ); + chPostScripts = lib.listToAttrs ( + map (vm: lib.nameValuePair vm.name (mkChExecStartPostScript vm)) ( + lib.filter (vm: vm.hypervisor == "cloud-hypervisor") vms + ) + ); + userScripts = { vm-run = vmRunScript; vm-start = vmStartScript; diff --git a/modules/services.nix b/modules/services.nix index ea3f8a6..309734b 100644 --- a/modules/services.nix +++ b/modules/services.nix @@ -77,8 +77,9 @@ let Type = "oneshot"; RemainAfterExit = true; ExecStart = pkgs.writeShellScript "vmsilo-prep-${vm.name}" '' + ${pkgs.coreutils}/bin/install -d -m 0775 -g ${toString userGid} \ + /run/vmsilo/${vm.name} ${pkgs.coreutils}/bin/install -d -m 0755 -o ${toString userUid} -g ${toString userGid} \ - /run/vmsilo/${vm.name} \ /run/vmsilo/${vm.name}/gpu \ /run/vmsilo/${vm.name}/gpu/shader-cache \ /run/vmsilo/${vm.name}/sound @@ -91,6 +92,92 @@ let mkVmServices = map ( vm: + let + c = mkVmConfig vm; + isCh = vm.hypervisor == "cloud-hypervisor"; + ephemeralPath = "/var/lib/vmsilo/${vm.name}/ephemeral.raw"; + + createEphemeral = pkgs.writeShellScript "create-ephemeral-${vm.name}" '' + truncate -s ${vm.rootOverlay.size} ${ephemeralPath} + ''; + deleteEphemeral = pkgs.writeShellScript "delete-ephemeral-${vm.name}" '' + rm -f ${ephemeralPath} + ''; + cleanupSocket = pkgs.writeShellScript "cleanup-socket-${vm.name}" '' + rm -f /run/vmsilo/${vm.name}/crosvm-control.socket + rm -f /run/vmsilo/${vm.name}/cloud-hypervisor-control.socket + rm -f /run/vmsilo/${vm.name}/vsock.socket + ${lib.optionalString isCh '' + rm -f /run/vmsilo/${vm.name}/console + ''} + ''; + + # CH-specific: privileged pre/post scripts with + prefix + chStartPreScripts = lib.optionals (vm.rootOverlay.type == "raw") [ "+${createEphemeral}" ] ++ [ + "+${pkgs.writeShellScript "ch-pre-${vm.name}" '' + ${c.iommuValidationScript} + rm -f /run/vmsilo/${vm.name}/cloud-hypervisor-control.socket + rm -f /run/vmsilo/${vm.name}/vsock.socket + ''}" + ]; + + chStopPostScripts = [ + "+${cleanupSocket}" + ] + ++ lib.optionals (vm.rootOverlay.type == "raw") [ "+${deleteEphemeral}" ]; + + # crosvm: existing behavior (unchanged) + crosvmStartPreScripts = lib.optionals (vm.rootOverlay.type == "raw") [ "${createEphemeral}" ]; + crosvmStopPostScripts = [ + "${cleanupSocket}" + ] + ++ lib.optionals (vm.rootOverlay.type == "raw") [ "${deleteEphemeral}" ]; + + # CH sandbox directives + chSandboxConfig = lib.optionalAttrs (isCh && !c.ch.disableSandbox) { + TemporaryFileSystem = "/"; + PrivateUsers = "identity"; + CapabilityBoundingSet = ""; + NoNewPrivileges = true; + #PrivateNetwork = true; + PrivatePIDs = true; + PrivateIPC = true; + LimitMEMLOCK = "infinity"; + ProtectKernelTunables = true; + ProtectKernelModules = true; + ProtectKernelLogs = true; + RestrictNamespaces = true; + LockPersonality = true; + SystemCallArchitectures = "native"; + DevicePolicy = "closed"; + DeviceAllow = [ + "/dev/kvm rw" + "/dev/net/tun rw" + "char-pts rw" + ] + ++ lib.optionals (c.vmPciDevicePaths != [ ]) [ + "/dev/vfio/vfio rw" + "/dev/vfio/* rw" + ]; + BindReadOnlyPaths = [ + "/nix/store" + c.kernelPath + c.initramfsPath + ] + ++ lib.optional c.rootDiskReadonly c.rootDiskPath; + BindPaths = [ + "/run/vmsilo/${vm.name}" + "/dev/net/tun" + "/dev/pts" + ] + ++ lib.optional (!c.rootDiskReadonly) c.rootDiskPath + ++ lib.optional (vm.rootOverlay.type == "raw") "/var/lib/vmsilo/${vm.name}" + ++ lib.optionals (c.vmPciDevicePaths != [ ]) ( + [ "/dev/vfio" ] ++ map (p: "/sys/bus/pci/devices/${p}") c.vmPciDevicePaths + ) + ++ lib.optional vm.cloud-hypervisor.hugepages "/dev/hugepages"; + }; + in lib.nameValuePair "vmsilo-${vm.name}-vm" { description = "vmsilo VM: ${vm.name}"; wantedBy = lib.optionals (vm.autoStart && !(vm.gpu.wayland || vm.gpu.opengl || vm.gpu.vulkan)) [ @@ -103,40 +190,27 @@ let ) "vmsilo-${vm.network.netvm}-vm.service" ++ map (brName: "${brName}-netdev.service") (vmBridges vm); after = [ "network.target" ] ++ map (brName: "${brName}-netdev.service") (vmBridges vm); - serviceConfig = - let - ephemeralPath = "/var/lib/vmsilo/${vm.name}-ephemeral.raw"; - createEphemeral = pkgs.writeShellScript "create-ephemeral-${vm.name}" '' - truncate -s ${vm.rootOverlay.size} ${ephemeralPath} - ''; - deleteEphemeral = pkgs.writeShellScript "delete-ephemeral-${vm.name}" '' - rm -f ${ephemeralPath} - ''; - startPreScripts = lib.optionals (vm.rootOverlay.type == "raw") [ "${createEphemeral}" ]; - cleanupSocket = pkgs.writeShellScript "cleanup-socket-${vm.name}" '' - rm -f /run/vmsilo/${vm.name}/crosvm-control.socket - rm -f /run/vmsilo/${vm.name}/cloud-hypervisor-control.socket - rm -f /run/vmsilo/${vm.name}/vsock.socket - ${lib.optionalString (vm.hypervisor == "cloud-hypervisor") '' - rm -f /run/vmsilo/${vm.name}/console - ''} - ''; - stopPostScripts = [ - "${cleanupSocket}" - ] - ++ lib.optionals (vm.rootOverlay.type == "raw") [ "${deleteEphemeral}" ]; - in - { - Type = "simple"; - ExecStart = "${cfg._internal.vmScripts.${vm.name}}"; - ExecStopPost = stopPostScripts; - Environment = [ - "RUST_BACKTRACE=full" - ]; - } - // lib.optionalAttrs (startPreScripts != [ ]) { - ExecStartPre = startPreScripts; - }; + + serviceConfig = { + Type = "simple"; + ExecStart = + if isCh then + "${cfg._internal.chExecStartScripts.${vm.name}}" + else + "${cfg._internal.vmScripts.${vm.name}}"; + ExecStopPost = if isCh then chStopPostScripts else crosvmStopPostScripts; + Environment = [ + "RUST_BACKTRACE=full" + ]; + } + // lib.optionalAttrs isCh { + ExecStartPre = chStartPreScripts; + ExecStartPost = [ "+${cfg._internal.chPostScripts.${vm.name}}" ]; + } + // lib.optionalAttrs (!isCh && crosvmStartPreScripts != [ ]) { + ExecStartPre = crosvmStartPreScripts; + } + // chSandboxConfig; } ) allVms; @@ -417,16 +491,16 @@ let serviceConfig = { Type = "simple"; ExecStartPre = [ - "-${pkgs.coreutils}/bin/rm -f /run/vmsilo/${vm.name}/wayland-seccontext.socket" + "-${pkgs.coreutils}/bin/rm -f /run/vmsilo/${vm.name}/gpu/wayland-seccontext.socket" ]; ExecStart = pkgs.writeShellScript "vmsilo-wayland-seccontext-${vm.name}" '' exec ${cfg._internal.vmsilo-wayland-seccontext}/bin/vmsilo-wayland-seccontext \ --wayland-socket /run/user/${toString userUid}/wayland-0 \ --app-id "vmsilo:${vm.name}:${vm.color}" \ - --socket-path /run/vmsilo/${vm.name}/wayland-seccontext.socket + --socket-path /run/vmsilo/${vm.name}/gpu/wayland-seccontext.socket ''; ExecStopPost = pkgs.writeShellScript "cleanup-wayland-seccontext-${vm.name}" '' - rm -f /run/vmsilo/${vm.name}/wayland-seccontext.socket + rm -f /run/vmsilo/${vm.name}/gpu/wayland-seccontext.socket ''; User = cfg.user; Environment = [ @@ -488,7 +562,7 @@ let device \ gpu \ --socket-path /run/vmsilo/${vm.name}/gpu/gpu.socket \ - --wayland-sock /run/vmsilo/${vm.name}/wayland-seccontext.socket \ + --wayland-sock /run/vmsilo/${vm.name}/gpu/wayland-seccontext.socket \ --params '{"context-types":"${c.gpuContextTypes}"}' '' else @@ -498,7 +572,7 @@ let --headless \ --gpu-mode rutabaga \ --capset ${lib.concatStringsSep "," c.gpuCapsets} \ - ${lib.optionalString vm.gpu.wayland "--wayland-socket /run/vmsilo/${vm.name}/wayland-seccontext.socket"} + ${lib.optionalString vm.gpu.wayland "--wayland-socket /run/vmsilo/${vm.name}/gpu/wayland-seccontext.socket"} ''; ExecStopPost = pkgs.writeShellScript "cleanup-gpu-${vm.name}" '' rm -f /run/vmsilo/${vm.name}/gpu/gpu.socket @@ -518,7 +592,6 @@ let "/dev/dri" "/dev/udmabuf" "/run/vmsilo/${vm.name}/gpu" - "/run/vmsilo/${vm.name}/wayland-seccontext.socket" ]; MountAPIVFS = true; DeviceAllow = [ @@ -706,10 +779,11 @@ in "d /var/lib/vmsilo 0755 root root -" ] ++ lib.concatMap (vm: [ - "d /run/vmsilo/${vm.name} 0755 ${cfg.user} ${config.users.users.${cfg.user}.group} -" + "d /run/vmsilo/${vm.name} 0775 root ${config.users.users.${cfg.user}.group} -" "d /run/vmsilo/${vm.name}/gpu 0755 ${cfg.user} ${config.users.users.${cfg.user}.group} -" "d /run/vmsilo/${vm.name}/sound 0755 ${cfg.user} ${config.users.users.${cfg.user}.group} -" "d /run/vmsilo/${vm.name}/virtiofs 0755 root root -" + "d /var/lib/vmsilo/${vm.name} 0755 root root -" ]) allVms ++ lib.optionals anySharedHome [ "d /shared 0755 ${cfg.user} ${config.users.users.${cfg.user}.group} -"