Sandbox cloud-hypervisor VMs with namespaces, landlock, and capability dropping

Harden CH VM services with a whitelist approach:

- TemporaryFileSystem=/ with explicit BindPaths/BindReadOnlyPaths
- PrivateUsers=identity (1:1 UID mapping, zero host capabilities)
- PrivatePIDs, PrivateIPC
- Empty CapabilityBoundingSet, NoNewPrivileges
- DevicePolicy=closed with specific DeviceAllow
- ProtectKernel{Tunables,Modules,Logs}, RestrictNamespaces, LockPersonality
- CH landlock enabled via --landlock flag

Launch flow restructured: ExecStart runs the CH binary directly inside
the sandbox. ExecStartPost=+ (privileged) handles VM creation, TAP FD
passing via vmsilo-tap-open + ch-remote add-net, boot, and socket chown.
ExecStartPre=+/ExecStopPost=+ handle ephemeral disk and socket cleanup.

Network config removed from CH JSON — TAP interfaces added via ch-remote
add-net with FD passing (SCM_RIGHTS). TAP and runtime directory ownership
changed to root. Wayland-seccontext socket moved to gpu/ subdirectory.
Ephemeral disk moved to per-VM /var/lib/vmsilo/{name}/ directory.

New option: cloud-hypervisor.disableSandbox (default false) disables all
hardening except seccomp.

PrivateNetwork=true is not yet enabled — TAP FD passing works but CH
gets EIO writing to the TAP from inside a private network namespace.
To be investigated separately.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Davíð Steinn Geirsson 2026-03-23 22:52:20 +00:00
parent 483a71cc92
commit cd6a22b0c3
9 changed files with 240 additions and 97 deletions

View file

@ -82,6 +82,7 @@ The configured user can manage VM services via polkit (no sudo required for `vm-
- `vmsilo-balloond/` — dynamic balloon memory management daemon (equalizes host/guest free memory via virtio-balloon; run `--help` for CLI options)
- `vmsilo-dbus-proxy/` — D-Bus proxy for system tray and notification forwarding between guest and host over vsock:5001
- `vmsilo-wayland-seccontext/` — creates Wayland security context socket (wp_security_context_v1); run by per-VM systemd service before the GPU device service
- `vmsilo-tools/` — Rust workspace for small utilities. Contains `tap-open` (opens TAP device by name, execs command with inherited FD)
**Other:**
- `patches/` — KWin/Plasma patches for VM window decoration colors and clipboard isolation
@ -115,6 +116,7 @@ See README.md for full usage details and options.
- **GPU device backend**: `vmsilo-<name>-gpu` service runs the GPU device backend sandboxed; selectable via `gpu.backend` between `vhost-device-gpu` (default, vhost-device-gpu in rutabaga mode) and `crosvm` (crosvm device gpu). Both crosvm and cloud-hypervisor VMMs attach via vhost-user. `vmsilo-<name>-wayland-seccontext` must start first. GPU is enabled when any capability (`wayland`, `opengl`, `vulkan`) is true; `wayland` defaults true. Set `gpu.wayland = false` to disable.
- **Per-VM runtime dirs**: all sockets under `/run/vmsilo/<vmname>/` subdirectories (not flat).
- **USB passthrough**: usbip-over-vsock on port 5002. Guest runs `usbip-rs client listen`, host runs one `usbip-rs host connect` per device as `vmsilo-<vm>-usb@<devpath>.service`. Works with both crosvm and cloud-hypervisor.
- **CH sandboxing**: CH VMs use NixOS confinement (chroot), PrivateUsers=identity, PrivateNetwork, PrivatePIDs, PrivateIPC, empty CapabilityBoundingSet. TAP FDs passed via `vmsilo-tap-open` + `ch-remote add-net`. All privileged operations in ExecStartPre=+/ExecStartPost=+/ExecStopPost=+. Gated by `cloud-hypervisor.disableSandbox`.
### Gotchas

View file

@ -206,6 +206,7 @@ There are a lot of configuration options but you don't really need to touch most
| `cloud-hypervisor.logLevel` | string | `"info"` | Log level for cloud-hypervisor (error, warn, info, debug, trace) |
| `cloud-hypervisor.hugepages` | bool | `false` | Use hugetlbfs-backed memory for this VM. Requires pre-allocated hugepages (`vm.nr_hugepages`). |
| `cloud-hypervisor.seccompPolicy` | `"enforcing"` or `"log"` | `"enforcing"` | Seccomp policy for this VM's cloud-hypervisor instance |
| `cloud-hypervisor.disableSandbox` | bool | `false` | Disable Landlock and systemd hardening. Seccomp controlled separately by `seccompPolicy`. |
| `cloud-hypervisor.extraArgs` | list of strings | `[]` | Extra args passed to cloud-hypervisor |
| `cloud-hypervisor.extraConfig` | attrs | `{}` | Merged into the JSON VM config passed to cloud-hypervisor |
| `rootOverlay.type` | `"raw"` or `"tmpfs"` | `"raw"` | Overlay upper layer: disk-backed (raw) or RAM-backed (tmpfs) |

8
flake.lock generated
View file

@ -7,11 +7,11 @@
]
},
"locked": {
"lastModified": 1774181920,
"narHash": "sha256-3WCjUz8Lmd3KHaVOpQXKatHtQbS9ODOJnPP2b53mTTg=",
"lastModified": 1774303930,
"narHash": "sha256-TvpRaRdLsrJcx/0X063Gt6a+5FeUG3KdmzqrwWHjcys=",
"ref": "refs/heads/main",
"rev": "7933f33fa97c3523c3e89eda338fb2ba63c3ba6e",
"revCount": 9348,
"rev": "2f77d61b526b53046e2f8c063f81888a61e3131b",
"revCount": 9350,
"type": "git",
"url": "https://git.dsg.is/dsg/cloud-hypervisor.git"
},

View file

@ -109,6 +109,21 @@
buildInputs = with pkgs; [ wayland ];
};
# Build vmsilo-tools workspace
buildVmsiloTools =
system:
let
pkgs = nixpkgs.legacyPackages.${system};
in
pkgs.rustPlatform.buildRustPackage {
pname = "vmsilo-tools";
version = "0.1.0";
src = ./vmsilo-tools;
cargoLock = {
lockFile = ./vmsilo-tools/Cargo.lock;
};
};
# treefmt configuration
treefmtConfig = {
projectRootFile = "flake.nix";
@ -127,6 +142,7 @@
vmsilo-balloond = buildVmsiloBalloond system;
vmsilo-dbus-proxy = buildVmsiloDbusProxy system;
vmsilo-wayland-seccontext = buildVmsiloWaylandSeccontext system;
vmsilo-tools = buildVmsiloTools system;
"cloud-hypervisor" = cloud-hypervisor.packages.${system}.cloud-hypervisor;
decoration-tests =
let
@ -221,6 +237,7 @@
vhost-device-gpu = vhost-device.packages.${pkgs.stdenv.hostPlatform.system}.vhost-device-gpu;
vmsilo-balloond = buildVmsiloBalloond pkgs.stdenv.hostPlatform.system;
vmsilo-dbus-proxy = buildVmsiloDbusProxy pkgs.stdenv.hostPlatform.system;
vmsilo-tools = buildVmsiloTools pkgs.stdenv.hostPlatform.system;
"usbip-rs" = usbip-rs-input.packages.${pkgs.stdenv.hostPlatform.system}.default;
};
};

View file

@ -142,7 +142,7 @@ let
};
# ── Ephemeral overlay ─────────────────────────────────────────────────
ephemeralDiskPath = "/var/lib/vmsilo/${vm.name}-ephemeral.raw";
ephemeralDiskPath = "/var/lib/vmsilo/${vm.name}/ephemeral.raw";
ephemeralDiskId = "ephemeral";
# ── vhost-user sockets ────────────────────────────────────────────────
@ -438,7 +438,6 @@ let
};
disks = chDiskEntries;
}
// lib.optionalAttrs (chNetworkEntries != [ ]) { net = chNetworkEntries; }
// lib.optionalAttrs (chFsEntries != [ ]) { fs = chFsEntries; }
// lib.optionalAttrs gpuEnabled {
gpu = [ { socket = "/run/vmsilo/${vm.name}/gpu/gpu.socket"; } ];
@ -521,6 +520,7 @@ in
configFile = chConfigFile;
verbosityArgs = chVerbosityArgs;
seccompArg = chSeccompArg;
disableSandbox = vm.cloud-hypervisor.disableSandbox;
effectiveExtraArgs = chEffectiveExtraArgs;
bin = chBin;
remote = chRemote;

View file

@ -100,7 +100,7 @@ in
lib.nameValuePair t.tapName (
{
virtual = true;
virtualOwner = cfg.user;
virtualOwner = "root";
}
// lib.optionalAttrs (t.iface.tap != null && t.iface.tap.hostAddress != null) {
ipv4.addresses =

View file

@ -824,6 +824,11 @@ let
default = "enforcing";
description = "Seccomp policy for this VM's cloud-hypervisor instance. \"enforcing\" kills on violation, \"log\" logs violations without killing.";
};
disableSandbox = lib.mkOption {
type = lib.types.bool;
default = false;
description = "Disable Landlock and systemd hardening for this VM's cloud-hypervisor instance. Seccomp is controlled separately by seccompPolicy.";
};
extraArgs = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ ];
@ -1052,6 +1057,12 @@ in
internal = true;
};
"vmsilo-tools" = lib.mkOption {
type = lib.types.package;
description = "vmsilo-tools package (injected by flake).";
internal = true;
};
# Generated scripts (set by scripts.nix, consumed by services.nix and package.nix)
vmScripts = lib.mkOption {
type = lib.types.attrsOf lib.types.path;
@ -1060,6 +1071,20 @@ in
default = { };
};
chExecStartScripts = lib.mkOption {
type = lib.types.attrsOf lib.types.path;
default = { };
description = "CH ExecStart scripts (name -> script path).";
internal = true;
};
chPostScripts = lib.mkOption {
type = lib.types.attrsOf lib.types.path;
default = { };
description = "CH ExecStartPost scripts (name -> script path).";
internal = true;
};
proxyScripts = lib.mkOption {
type = lib.types.attrsOf lib.types.path;
description = "Proxy scripts (name -> script path).";

View file

@ -114,101 +114,114 @@ let
${c.kernelPath}
'';
# Generate cloud-hypervisor VM launch script
mkCloudHypervisorVmScript =
# Generate cloud-hypervisor ExecStart script (thin wrapper for correct arg handling)
mkChExecStartScript =
vm:
let
c = mkVmConfig vm;
landlock = lib.optionalString (!c.ch.disableSandbox) "--landlock";
in
pkgs.writeShellScript "vmsilo-start-${vm.name}" ''
set -e
${c.iommuValidationScript}
# Remove stale sockets
rm -f /run/vmsilo/${vm.name}/cloud-hypervisor-control.socket
rm -f /run/vmsilo/${vm.name}/vsock.socket
# Step 1: Start cloud-hypervisor VMM in background
${c.ch.bin} \
pkgs.writeShellScript "vmsilo-ch-${vm.name}" ''
exec ${c.ch.bin} \
--api-socket /run/vmsilo/${vm.name}/cloud-hypervisor-control.socket \
--seccomp ${c.ch.seccompArg} \
${landlock} \
${lib.escapeShellArgs c.ch.effectiveExtraArgs} \
${lib.concatStringsSep " " c.ch.verbosityArgs} &
CH_PID=$!
${lib.concatStringsSep " " c.ch.verbosityArgs}
'';
# Generate cloud-hypervisor ExecStartPost script (privileged orchestration)
mkChExecStartPostScript =
vm:
let
c = mkVmConfig vm;
apiSocket = "/run/vmsilo/${vm.name}/cloud-hypervisor-control.socket";
chRemote = "${c.ch.remote} --api-socket ${apiSocket}";
tapOpen = "${cfg._internal."vmsilo-tools"}/bin/vmsilo-tap-open";
# Build add-net commands for each TAP interface
# {TAP_FD} is replaced by vmsilo-tap-open with the actual fd number
addNetCommands = lib.concatMapStringsSep "\n" (ne: ''
${tapOpen} ${ne.tapName} -- \
${c.ch.remote} --api-socket ${apiSocket} \
add-net "fd=[{TAP_FD}],mac=${ne.mac}"
'') c.networkEntries;
in
pkgs.writeShellScript "vmsilo-post-${vm.name}" ''
set -e
# Wait for API socket to appear (up to 30s)
ELAPSED=0
while [ $ELAPSED -lt 60 ] && [ ! -S /run/vmsilo/${vm.name}/cloud-hypervisor-control.socket ]; do
while [ $ELAPSED -lt 60 ] && [ ! -S ${apiSocket} ]; do
sleep 0.5
ELAPSED=$((ELAPSED + 1))
done
if [ ! -S /run/vmsilo/${vm.name}/cloud-hypervisor-control.socket ]; then
if [ ! -S ${apiSocket} ]; then
echo "Timeout waiting for cloud-hypervisor API socket" >&2
kill $CH_PID 2>/dev/null || true
exit 1
fi
# Wait for API server to be ready (socket existing != API ready)
# Wait for API server to be ready
while [ $ELAPSED -lt 60 ]; do
if ${c.ch.remote} --api-socket /run/vmsilo/${vm.name}/cloud-hypervisor-control.socket ping 2>/dev/null; then
if ${chRemote} ping 2>/dev/null; then
break
fi
sleep 0.5
ELAPSED=$((ELAPSED + 1))
done
if ! ${c.ch.remote} --api-socket /run/vmsilo/${vm.name}/cloud-hypervisor-control.socket ping 2>/dev/null; then
echo "Timeout waiting for cloud-hypervisor API to become ready" >&2
kill $CH_PID 2>/dev/null || true
if ! ${chRemote} ping 2>/dev/null; then
echo "Timeout waiting for cloud-hypervisor API" >&2
exit 1
fi
${c.socketWaitScript "kill $CH_PID 2>/dev/null || true"}
# Wait for vhost-user backend sockets
${c.socketWaitScript ""}
# Step 2: Create VM configuration
${c.ch.remote} \
--api-socket /run/vmsilo/${vm.name}/cloud-hypervisor-control.socket \
create -- ${c.ch.configFile}
# Make vhost-user sockets connectable by CH (runs as root without CAP_DAC_OVERRIDE)
${lib.concatMapStringsSep "\n" (sock: "chown 0 ${sock}") c.vhostUserSockets}
# Discover serial PTY allocated by cloud-hypervisor and symlink to standard path
CONSOLE_PTY=$(${c.ch.remote} \
--api-socket /run/vmsilo/${vm.name}/cloud-hypervisor-control.socket \
info | ${pkgs.jq}/bin/jq -r '.config.serial.file')
# Create VM
${chRemote} create -- ${c.ch.configFile}
# Add TAP network interfaces via FD passing
${addNetCommands}
# Boot VM
${chRemote} boot
# Discover serial PTY and symlink
CONSOLE_PTY=$(${chRemote} info | ${pkgs.jq}/bin/jq -r '.config.serial.file')
if [ -z "$CONSOLE_PTY" ] || [ "$CONSOLE_PTY" = "null" ]; then
echo "Failed to discover serial PTY from cloud-hypervisor API" >&2
kill $CH_PID 2>/dev/null || true
echo "Failed to discover serial PTY" >&2
exit 1
fi
ln -sf "$CONSOLE_PTY" /run/vmsilo/${vm.name}/console
chown ${toString cfg._internal.userUid} /run/vmsilo/${vm.name}/console
# Step 3: Boot VM
${c.ch.remote} \
--api-socket /run/vmsilo/${vm.name}/cloud-hypervisor-control.socket \
boot
# vsock socket is created at boot time; wait for it then chown so user-level services can connect
# Wait for vsock socket and chown
ELAPSED=0
while [ ! -S /run/vmsilo/${vm.name}/vsock.socket ] && [ "$ELAPSED" -lt 60 ]; do
while [ ! -S /run/vmsilo/${vm.name}/vsock.socket ] && [ "$ELAPSED" -lt 120 ]; do
sleep 0.5
ELAPSED=$((ELAPSED + 1))
done
if [ ! -S /run/vmsilo/${vm.name}/vsock.socket ]; then
echo "Timeout waiting for vsock socket" >&2
kill $CH_PID 2>/dev/null || true
exit 1
fi
chown ${toString cfg._internal.userUid} /run/vmsilo/${vm.name}/vsock.socket
# Block until VMM exits (VM shutdown)
wait $CH_PID
'';
# Dispatcher: generate the appropriate VM script based on hypervisor choice
mkVmScript =
vm: if vm.hypervisor == "crosvm" then mkCrosvmVmScript vm else mkCloudHypervisorVmScript vm;
vm:
if vm.hypervisor == "crosvm" then
mkCrosvmVmScript vm
else
# Placeholder — CH VMs use ExecStart=<binary> directly (set in services.nix)
pkgs.writeShellScript "vmsilo-start-${vm.name}" ''
echo "ERROR: CH VM ${vm.name} should use direct ExecStart, not this script" >&2
exit 1
'';
# ── Proxy and user-facing scripts ────────────────────────────────────────
@ -471,6 +484,17 @@ in
proxyScripts = lib.listToAttrs (map (vm: lib.nameValuePair vm.name (mkProxyScript vm)) vms);
chExecStartScripts = lib.listToAttrs (
map (vm: lib.nameValuePair vm.name (mkChExecStartScript vm)) (
lib.filter (vm: vm.hypervisor == "cloud-hypervisor") vms
)
);
chPostScripts = lib.listToAttrs (
map (vm: lib.nameValuePair vm.name (mkChExecStartPostScript vm)) (
lib.filter (vm: vm.hypervisor == "cloud-hypervisor") vms
)
);
userScripts = {
vm-run = vmRunScript;
vm-start = vmStartScript;

View file

@ -77,8 +77,9 @@ let
Type = "oneshot";
RemainAfterExit = true;
ExecStart = pkgs.writeShellScript "vmsilo-prep-${vm.name}" ''
${pkgs.coreutils}/bin/install -d -m 0775 -g ${toString userGid} \
/run/vmsilo/${vm.name}
${pkgs.coreutils}/bin/install -d -m 0755 -o ${toString userUid} -g ${toString userGid} \
/run/vmsilo/${vm.name} \
/run/vmsilo/${vm.name}/gpu \
/run/vmsilo/${vm.name}/gpu/shader-cache \
/run/vmsilo/${vm.name}/sound
@ -91,6 +92,92 @@ let
mkVmServices = map (
vm:
let
c = mkVmConfig vm;
isCh = vm.hypervisor == "cloud-hypervisor";
ephemeralPath = "/var/lib/vmsilo/${vm.name}/ephemeral.raw";
createEphemeral = pkgs.writeShellScript "create-ephemeral-${vm.name}" ''
truncate -s ${vm.rootOverlay.size} ${ephemeralPath}
'';
deleteEphemeral = pkgs.writeShellScript "delete-ephemeral-${vm.name}" ''
rm -f ${ephemeralPath}
'';
cleanupSocket = pkgs.writeShellScript "cleanup-socket-${vm.name}" ''
rm -f /run/vmsilo/${vm.name}/crosvm-control.socket
rm -f /run/vmsilo/${vm.name}/cloud-hypervisor-control.socket
rm -f /run/vmsilo/${vm.name}/vsock.socket
${lib.optionalString isCh ''
rm -f /run/vmsilo/${vm.name}/console
''}
'';
# CH-specific: privileged pre/post scripts with + prefix
chStartPreScripts = lib.optionals (vm.rootOverlay.type == "raw") [ "+${createEphemeral}" ] ++ [
"+${pkgs.writeShellScript "ch-pre-${vm.name}" ''
${c.iommuValidationScript}
rm -f /run/vmsilo/${vm.name}/cloud-hypervisor-control.socket
rm -f /run/vmsilo/${vm.name}/vsock.socket
''}"
];
chStopPostScripts = [
"+${cleanupSocket}"
]
++ lib.optionals (vm.rootOverlay.type == "raw") [ "+${deleteEphemeral}" ];
# crosvm: existing behavior (unchanged)
crosvmStartPreScripts = lib.optionals (vm.rootOverlay.type == "raw") [ "${createEphemeral}" ];
crosvmStopPostScripts = [
"${cleanupSocket}"
]
++ lib.optionals (vm.rootOverlay.type == "raw") [ "${deleteEphemeral}" ];
# CH sandbox directives
chSandboxConfig = lib.optionalAttrs (isCh && !c.ch.disableSandbox) {
TemporaryFileSystem = "/";
PrivateUsers = "identity";
CapabilityBoundingSet = "";
NoNewPrivileges = true;
#PrivateNetwork = true;
PrivatePIDs = true;
PrivateIPC = true;
LimitMEMLOCK = "infinity";
ProtectKernelTunables = true;
ProtectKernelModules = true;
ProtectKernelLogs = true;
RestrictNamespaces = true;
LockPersonality = true;
SystemCallArchitectures = "native";
DevicePolicy = "closed";
DeviceAllow = [
"/dev/kvm rw"
"/dev/net/tun rw"
"char-pts rw"
]
++ lib.optionals (c.vmPciDevicePaths != [ ]) [
"/dev/vfio/vfio rw"
"/dev/vfio/* rw"
];
BindReadOnlyPaths = [
"/nix/store"
c.kernelPath
c.initramfsPath
]
++ lib.optional c.rootDiskReadonly c.rootDiskPath;
BindPaths = [
"/run/vmsilo/${vm.name}"
"/dev/net/tun"
"/dev/pts"
]
++ lib.optional (!c.rootDiskReadonly) c.rootDiskPath
++ lib.optional (vm.rootOverlay.type == "raw") "/var/lib/vmsilo/${vm.name}"
++ lib.optionals (c.vmPciDevicePaths != [ ]) (
[ "/dev/vfio" ] ++ map (p: "/sys/bus/pci/devices/${p}") c.vmPciDevicePaths
)
++ lib.optional vm.cloud-hypervisor.hugepages "/dev/hugepages";
};
in
lib.nameValuePair "vmsilo-${vm.name}-vm" {
description = "vmsilo VM: ${vm.name}";
wantedBy = lib.optionals (vm.autoStart && !(vm.gpu.wayland || vm.gpu.opengl || vm.gpu.vulkan)) [
@ -103,40 +190,27 @@ let
) "vmsilo-${vm.network.netvm}-vm.service"
++ map (brName: "${brName}-netdev.service") (vmBridges vm);
after = [ "network.target" ] ++ map (brName: "${brName}-netdev.service") (vmBridges vm);
serviceConfig =
let
ephemeralPath = "/var/lib/vmsilo/${vm.name}-ephemeral.raw";
createEphemeral = pkgs.writeShellScript "create-ephemeral-${vm.name}" ''
truncate -s ${vm.rootOverlay.size} ${ephemeralPath}
'';
deleteEphemeral = pkgs.writeShellScript "delete-ephemeral-${vm.name}" ''
rm -f ${ephemeralPath}
'';
startPreScripts = lib.optionals (vm.rootOverlay.type == "raw") [ "${createEphemeral}" ];
cleanupSocket = pkgs.writeShellScript "cleanup-socket-${vm.name}" ''
rm -f /run/vmsilo/${vm.name}/crosvm-control.socket
rm -f /run/vmsilo/${vm.name}/cloud-hypervisor-control.socket
rm -f /run/vmsilo/${vm.name}/vsock.socket
${lib.optionalString (vm.hypervisor == "cloud-hypervisor") ''
rm -f /run/vmsilo/${vm.name}/console
''}
'';
stopPostScripts = [
"${cleanupSocket}"
]
++ lib.optionals (vm.rootOverlay.type == "raw") [ "${deleteEphemeral}" ];
in
{
Type = "simple";
ExecStart = "${cfg._internal.vmScripts.${vm.name}}";
ExecStopPost = stopPostScripts;
Environment = [
"RUST_BACKTRACE=full"
];
}
// lib.optionalAttrs (startPreScripts != [ ]) {
ExecStartPre = startPreScripts;
};
serviceConfig = {
Type = "simple";
ExecStart =
if isCh then
"${cfg._internal.chExecStartScripts.${vm.name}}"
else
"${cfg._internal.vmScripts.${vm.name}}";
ExecStopPost = if isCh then chStopPostScripts else crosvmStopPostScripts;
Environment = [
"RUST_BACKTRACE=full"
];
}
// lib.optionalAttrs isCh {
ExecStartPre = chStartPreScripts;
ExecStartPost = [ "+${cfg._internal.chPostScripts.${vm.name}}" ];
}
// lib.optionalAttrs (!isCh && crosvmStartPreScripts != [ ]) {
ExecStartPre = crosvmStartPreScripts;
}
// chSandboxConfig;
}
) allVms;
@ -417,16 +491,16 @@ let
serviceConfig = {
Type = "simple";
ExecStartPre = [
"-${pkgs.coreutils}/bin/rm -f /run/vmsilo/${vm.name}/wayland-seccontext.socket"
"-${pkgs.coreutils}/bin/rm -f /run/vmsilo/${vm.name}/gpu/wayland-seccontext.socket"
];
ExecStart = pkgs.writeShellScript "vmsilo-wayland-seccontext-${vm.name}" ''
exec ${cfg._internal.vmsilo-wayland-seccontext}/bin/vmsilo-wayland-seccontext \
--wayland-socket /run/user/${toString userUid}/wayland-0 \
--app-id "vmsilo:${vm.name}:${vm.color}" \
--socket-path /run/vmsilo/${vm.name}/wayland-seccontext.socket
--socket-path /run/vmsilo/${vm.name}/gpu/wayland-seccontext.socket
'';
ExecStopPost = pkgs.writeShellScript "cleanup-wayland-seccontext-${vm.name}" ''
rm -f /run/vmsilo/${vm.name}/wayland-seccontext.socket
rm -f /run/vmsilo/${vm.name}/gpu/wayland-seccontext.socket
'';
User = cfg.user;
Environment = [
@ -488,7 +562,7 @@ let
device \
gpu \
--socket-path /run/vmsilo/${vm.name}/gpu/gpu.socket \
--wayland-sock /run/vmsilo/${vm.name}/wayland-seccontext.socket \
--wayland-sock /run/vmsilo/${vm.name}/gpu/wayland-seccontext.socket \
--params '{"context-types":"${c.gpuContextTypes}"}'
''
else
@ -498,7 +572,7 @@ let
--headless \
--gpu-mode rutabaga \
--capset ${lib.concatStringsSep "," c.gpuCapsets} \
${lib.optionalString vm.gpu.wayland "--wayland-socket /run/vmsilo/${vm.name}/wayland-seccontext.socket"}
${lib.optionalString vm.gpu.wayland "--wayland-socket /run/vmsilo/${vm.name}/gpu/wayland-seccontext.socket"}
'';
ExecStopPost = pkgs.writeShellScript "cleanup-gpu-${vm.name}" ''
rm -f /run/vmsilo/${vm.name}/gpu/gpu.socket
@ -518,7 +592,6 @@ let
"/dev/dri"
"/dev/udmabuf"
"/run/vmsilo/${vm.name}/gpu"
"/run/vmsilo/${vm.name}/wayland-seccontext.socket"
];
MountAPIVFS = true;
DeviceAllow = [
@ -706,10 +779,11 @@ in
"d /var/lib/vmsilo 0755 root root -"
]
++ lib.concatMap (vm: [
"d /run/vmsilo/${vm.name} 0755 ${cfg.user} ${config.users.users.${cfg.user}.group} -"
"d /run/vmsilo/${vm.name} 0775 root ${config.users.users.${cfg.user}.group} -"
"d /run/vmsilo/${vm.name}/gpu 0755 ${cfg.user} ${config.users.users.${cfg.user}.group} -"
"d /run/vmsilo/${vm.name}/sound 0755 ${cfg.user} ${config.users.users.${cfg.user}.group} -"
"d /run/vmsilo/${vm.name}/virtiofs 0755 root root -"
"d /var/lib/vmsilo/${vm.name} 0755 root root -"
]) allVms
++ lib.optionals anySharedHome [
"d /shared 0755 ${cfg.user} ${config.users.users.${cfg.user}.group} -"