Shares a host directory as /home/user in guest VMs via virtiofs, enabled by default. Accepts true (/shared/<vmname>), a custom path string, or false to disable. Host directory is created with correct uid:gid ownership at VM start. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
548 lines
18 KiB
Nix
548 lines
18 KiB
Nix
# Script generation for vmsilo NixOS module
|
|
# VM launcher scripts and user-facing scripts (vm-run, vm-start, vm-stop, vm-shell)
|
|
{
|
|
config,
|
|
pkgs,
|
|
lib,
|
|
...
|
|
}:
|
|
|
|
let
|
|
cfg = config.programs.vmsilo;
|
|
helpers = import ./lib/helpers.nix { inherit lib; };
|
|
inherit (helpers)
|
|
resolveOptionalConfig
|
|
formatKVArgs
|
|
formatPositionalKVArgs
|
|
parseCIDR
|
|
prefixToNetmask
|
|
isVmSwitchIface
|
|
idxToIfIndex
|
|
ifIndexToPciAddr
|
|
normalizeBdf
|
|
isBdf
|
|
generateMac
|
|
sortedInterfaceList
|
|
sanitizeName
|
|
makeTapName
|
|
;
|
|
|
|
# User UID/GID and runtime directory for explicit paths (system services need these)
|
|
userUid = config.users.users.${cfg.user}.uid;
|
|
userGid = config.users.groups.${config.users.users.${cfg.user}.group}.gid;
|
|
userRuntimeDir = "/run/user/${toString userUid}";
|
|
|
|
# Default PulseAudio sound configuration
|
|
defaultSoundConfig = {
|
|
backend = "pulse";
|
|
capture = false;
|
|
pulse_socket_path = "${userRuntimeDir}/pulse/native";
|
|
pulse_cookie_path = "/home/${cfg.user}/.config/pulse/cookie";
|
|
};
|
|
|
|
# Default GPU configuration
|
|
defaultGpuConfig = {
|
|
context-types = "cross-domain:virgl2";
|
|
};
|
|
|
|
# Get effective MAC for an interface (uses user-specified interface name)
|
|
getEffectiveIfaceMac =
|
|
vm: ifName: iface:
|
|
if iface.macAddress != null then iface.macAddress else generateMac vm.name ifName;
|
|
|
|
# Enrich VM config with interface names (keys become _name field)
|
|
enrichVmConfig =
|
|
vm:
|
|
vm
|
|
// {
|
|
network = vm.network // {
|
|
interfaces = lib.mapAttrs (name: iface: iface // { _name = name; }) vm.network.interfaces;
|
|
};
|
|
};
|
|
|
|
# Resolve guestConfig - call if function, use as-is if attrs
|
|
resolveGuestConfig =
|
|
vm:
|
|
let
|
|
enrichedVm = enrichVmConfig vm;
|
|
in
|
|
if builtins.isFunction vm.guestConfig then vm.guestConfig enrichedVm else vm.guestConfig;
|
|
|
|
# Build rootfs for a VM
|
|
buildRootfs =
|
|
vm:
|
|
let
|
|
# Enable pipewire in guest when sound is enabled
|
|
soundConfig = lib.optionalAttrs (vm.sound != false) {
|
|
services.pipewire = {
|
|
enable = lib.mkDefault true;
|
|
pulse.enable = lib.mkDefault true;
|
|
# Pipewire volume defaults to 40%, likely because the ALSA device has no mixer controls.
|
|
wireplumber.extraConfig."50-default-volume"."wireplumber.settings" = {
|
|
"device.routes.default-sink-volume" = 1.0;
|
|
};
|
|
};
|
|
};
|
|
in
|
|
pkgs.callPackage ../rootfs-nixos {
|
|
inherit (cfg._internal) wayland-proxy-virtwl sommelier;
|
|
waylandProxy = vm.waylandProxy;
|
|
guestPrograms = vm.guestPrograms;
|
|
guestConfig = lib.recursiveUpdate soundConfig (resolveGuestConfig vm);
|
|
};
|
|
|
|
# Format a disk configuration as --block argument
|
|
formatBlockArg = disk: "--block ${formatPositionalKVArgs [ "path" ] "," "=" disk}";
|
|
|
|
# Normalize all isolated devices
|
|
normalizedIsolatedDevices = map normalizeBdf cfg.isolatedPciDevices;
|
|
|
|
# Generate VM launcher script
|
|
mkVmScript =
|
|
vm:
|
|
let
|
|
# Only build rootfs if we need it (no custom root/kernel/initramfs)
|
|
needsBuiltRootfs = vm.rootDisk == null || vm.kernel == null || vm.initramfs == null;
|
|
rootfs = if needsBuiltRootfs then buildRootfs vm else null;
|
|
|
|
# Determine root disk config: use user's as-is, or built rootfs with rootDiskReadonly
|
|
rootDiskConfig =
|
|
if vm.rootDisk != null then
|
|
vm.rootDisk
|
|
else
|
|
{
|
|
path = "${rootfs}/nixos.qcow2";
|
|
ro = vm.rootDiskReadonly;
|
|
};
|
|
kernelPath = if vm.kernel != null then vm.kernel else "${rootfs}/bzImage";
|
|
initramfsPath = if vm.initramfs != null then vm.initramfs else "${rootfs}/initrd";
|
|
|
|
additionalDisksArgs = lib.concatMapStringsSep " " formatBlockArg vm.additionalDisks;
|
|
|
|
# Ephemeral overlay disk (qcow2 mode only)
|
|
sanitizedName = sanitizeName vm.name;
|
|
ephemeralDiskPath = "/var/lib/vmsilo/${vm.name}-ephemeral.qcow2";
|
|
ephemeralDiskId = "${sanitizedName}_ephemeral";
|
|
ephemeralDiskArg = lib.optionalString (
|
|
vm.rootOverlay.type == "qcow2"
|
|
) "--block path=${ephemeralDiskPath},ro=false,id=${ephemeralDiskId}";
|
|
|
|
# Kernel param for overlay type
|
|
rootOverlayKernelParam =
|
|
if vm.rootOverlay.type == "qcow2" then
|
|
''-p "vmsilo.rootOverlay=qcow2,${ephemeralDiskId}"''
|
|
else
|
|
''-p "vmsilo.rootOverlay=tmpfs"'';
|
|
|
|
# Shared home directory
|
|
sharedHomePath = if builtins.isString vm.sharedHome then vm.sharedHome else "/shared/${vm.name}";
|
|
sharedHomeEnabled = vm.sharedHome != false;
|
|
effectiveSharedDirs =
|
|
vm.sharedDirectories
|
|
++ lib.optionals sharedHomeEnabled [
|
|
{
|
|
path = sharedHomePath;
|
|
tag = "home";
|
|
type = "fs";
|
|
uid = userUid;
|
|
gid = userGid;
|
|
uidmap = "${toString userUid} ${toString userUid} 1";
|
|
gidmap = "${toString userGid} ${toString userGid} 1";
|
|
}
|
|
];
|
|
|
|
# Shared directories: path:tag:key=value:...
|
|
# Quoted because uidmap values contain spaces (e.g. uidmap=1000 1000 1)
|
|
sharedDirArgs = lib.concatMapStringsSep " " (
|
|
d: "--shared-dir '${formatPositionalKVArgs [ "path" "tag" ] ":" "=" d}'"
|
|
) effectiveSharedDirs;
|
|
extraKernelParams = lib.concatMapStringsSep " " (p: "-p \"${p}\"") vm.kernelParams;
|
|
|
|
# GPU config: false = disabled, true = default config, attrset = custom config
|
|
effectiveGpu = resolveOptionalConfig defaultGpuConfig vm.gpu;
|
|
|
|
# Sound config: false = disabled, true = default pulse config, attrset = custom config
|
|
effectiveSound = resolveOptionalConfig defaultSoundConfig vm.sound;
|
|
|
|
# Convert BDF to sysfs path
|
|
bdfToSysfs = bdf: "/sys/bus/pci/devices/${normalizeBdf bdf}";
|
|
|
|
# PCI devices for this VM (extract path from attrset, normalize BDF)
|
|
vmPciDevicePaths = map (
|
|
dev: if isBdf dev.path then normalizeBdf dev.path else dev.path
|
|
) vm.pciDevices;
|
|
# Format --vfio arguments with optional kv pairs
|
|
vfioArgs = lib.concatMapStringsSep " " (
|
|
dev:
|
|
let
|
|
sysfsPath = if isBdf dev.path then bdfToSysfs dev.path else dev.path;
|
|
remaining = lib.filterAttrs (k: v: k != "path" && v != null) dev;
|
|
kvPart = formatKVArgs "," remaining;
|
|
in
|
|
if kvPart == "" then "--vfio ${sysfsPath}" else "--vfio ${sysfsPath},${kvPart}"
|
|
) vm.pciDevices;
|
|
|
|
# vhost-user arguments (manual entries only, vm-switch handled via networkArgs)
|
|
vhostUserArgs = lib.concatMapStringsSep " " (
|
|
vu: "--vhost-user ${formatKVArgs "," vu}"
|
|
) vm.vhostUser;
|
|
|
|
# Network interface crosvm arguments (tap and vm-switch)
|
|
# Sorted alphabetically by interface name for deterministic PCI slot assignment
|
|
networkArgs = lib.concatStringsSep " \\\n " (
|
|
lib.imap0 (
|
|
idx: entry:
|
|
let
|
|
ifName = entry.name;
|
|
iface = entry.value;
|
|
ifIndex = idxToIfIndex idx;
|
|
pciAddr = ifIndexToPciAddr ifIndex;
|
|
mac = getEffectiveIfaceMac vm ifName iface;
|
|
in
|
|
if iface.type == "tap" then
|
|
let
|
|
tapName = if iface.tap.name != null then iface.tap.name else makeTapName vm.name vm.id ifIndex;
|
|
in
|
|
"--net tap-name=${tapName},mac=${mac},pci-address=${pciAddr}"
|
|
else
|
|
let
|
|
socket = "/run/vm-switch/${iface.vmNetwork.name}/${vm.name}/client.sock";
|
|
in
|
|
"--vhost-user type=net,socket=${socket},pci-address=${pciAddr}"
|
|
) (sortedInterfaceList vm.network.interfaces)
|
|
);
|
|
|
|
# Kernel params for network configuration (uses user-specified interface names)
|
|
networkKernelParams = lib.concatLists (
|
|
map (
|
|
entry:
|
|
let
|
|
ifName = entry.name;
|
|
iface = entry.value;
|
|
in
|
|
if iface.dhcp then
|
|
[ ''-p "ip=:::::${ifName}:dhcp"'' ]
|
|
else
|
|
# Static IPv4 addresses
|
|
(map (
|
|
addr:
|
|
let
|
|
parsed = parseCIDR addr;
|
|
in
|
|
''-p "ip=${parsed.ip}:::${prefixToNetmask parsed.prefix}::${ifName}:none"''
|
|
) iface.addresses)
|
|
# Static IPv6 addresses
|
|
++ (map (addr: ''-p "ip=[${addr}]:::::${ifName}:none"'') iface.v6Addresses)
|
|
# IPv4 routes
|
|
++ (lib.mapAttrsToList (dest: r: ''-p "rd.route=${dest}:${r.via}:${ifName}"'') iface.routes)
|
|
# IPv6 routes
|
|
++ (lib.mapAttrsToList (dest: r: ''-p "rd.route=[${dest}]:[${r.via}]:${ifName}"'') iface.v6Routes)
|
|
) (sortedInterfaceList vm.network.interfaces)
|
|
);
|
|
|
|
# Kernel params for interface naming (vmsilo.ifname=<name>,<mac>)
|
|
interfaceNameKernelParams = map (
|
|
entry:
|
|
let
|
|
ifName = entry.name;
|
|
mac = getEffectiveIfaceMac vm ifName entry.value;
|
|
in
|
|
''-p "vmsilo.ifname=${ifName},${mac}"''
|
|
) (sortedInterfaceList vm.network.interfaces);
|
|
|
|
# Nameserver params
|
|
nameserverParams = map (ns: ''-p "nameserver=${ns}"'') vm.network.nameservers;
|
|
|
|
# All network kernel params
|
|
allNetworkKernelParams = interfaceNameKernelParams ++ networkKernelParams ++ nameserverParams;
|
|
|
|
# Crosvm configuration (per-VM overrides global)
|
|
effectiveLogLevel = if vm.crosvm.logLevel != null then vm.crosvm.logLevel else cfg.crosvm.logLevel;
|
|
allExtraArgs = cfg.crosvm.extraArgs ++ vm.crosvm.extraArgs;
|
|
allExtraRunArgs = cfg.crosvm.extraRunArgs ++ vm.crosvm.extraRunArgs;
|
|
in
|
|
pkgs.writeShellScript "vmsilo-start-${vm.name}" ''
|
|
#!/bin/sh
|
|
set -e
|
|
|
|
${lib.optionalString (vm.pciDevices != [ ]) ''
|
|
# IOMMU group validation
|
|
check_iommu_group() {
|
|
local dev="$1"
|
|
local group_path="/sys/bus/pci/devices/$dev/iommu_group/devices"
|
|
|
|
if [ ! -d "$group_path" ]; then
|
|
echo "Error: IOMMU not enabled or device $dev not found" >&2
|
|
echo "Ensure IOMMU is enabled (intel_iommu=on or amd_iommu=on)" >&2
|
|
exit 1
|
|
fi
|
|
|
|
for peer in "$group_path"/*; do
|
|
peer_bdf=$(basename "$peer")
|
|
[ "$peer_bdf" = "$dev" ] && continue
|
|
|
|
# Check if peer is in our passthrough list
|
|
case "$peer_bdf" in
|
|
${lib.concatStringsSep "|" vmPciDevicePaths})
|
|
# Peer is being passed to this VM, OK
|
|
;;
|
|
*)
|
|
# Check if peer is unbound (no driver)
|
|
if [ -L "/sys/bus/pci/devices/$peer_bdf/driver" ]; then
|
|
peer_driver=$(basename "$(readlink "/sys/bus/pci/devices/$peer_bdf/driver")")
|
|
if [ "$peer_driver" != "vfio-pci" ]; then
|
|
# Check if peer is a PCI bridge (class 0x0604xx) - safe to leave bound
|
|
peer_class=$(cat "/sys/bus/pci/devices/$peer_bdf/class" 2>/dev/null || echo "")
|
|
case "$peer_class" in
|
|
0x0604*)
|
|
# PCI-to-PCI bridge, safe to leave bound to pcieport driver
|
|
;;
|
|
*)
|
|
echo "Error: Device $dev shares IOMMU group with $peer_bdf (bound to $peer_driver)" >&2
|
|
echo "All devices in an IOMMU group must be passed to the same VM or unbound" >&2
|
|
exit 1
|
|
;;
|
|
esac
|
|
fi
|
|
fi
|
|
;;
|
|
esac
|
|
done
|
|
}
|
|
|
|
# Check all PCI devices
|
|
for dev in ${lib.concatStringsSep " " vmPciDevicePaths}; do
|
|
check_iommu_group "$dev"
|
|
done
|
|
''}
|
|
|
|
# Clean up stale socket
|
|
rm -f /run/vmsilo/${vm.name}-crosvm-control.socket
|
|
|
|
exec ${cfg._internal.crosvm}/bin/crosvm \
|
|
--log-level=${effectiveLogLevel} \
|
|
--no-syslog \
|
|
--no-timestamps \
|
|
${lib.escapeShellArgs allExtraArgs} \
|
|
run \
|
|
--name ${vm.name} \
|
|
-m ${toString vm.memory} \
|
|
--initrd=${initramfsPath} \
|
|
--serial=hardware=virtio-console,type=unix-stream,path=/run/vmsilo/${vm.name}-console-backend.socket,console,input-unix-stream \
|
|
${formatBlockArg rootDiskConfig} \
|
|
${additionalDisksArgs} \
|
|
${ephemeralDiskArg} \
|
|
${lib.optionalString (rootfs != null) ''-p "init=${rootfs.config.system.build.toplevel}/init"''} \
|
|
-p "systemd.hostname=${vm.name}" \
|
|
${lib.concatStringsSep " \\\n " allNetworkKernelParams} \
|
|
${lib.optionalString vm.autoShutdown.enable ''
|
|
-p "autoShutdown.enable=1" \
|
|
-p "autoShutdown.after=${toString vm.autoShutdown.after}" \
|
|
''} \
|
|
${rootOverlayKernelParam} \
|
|
${lib.optionalString sharedHomeEnabled ''-p "systemd.mount-extra=home:/home/user:virtiofs:"''} \
|
|
${extraKernelParams} \
|
|
${sharedDirArgs} \
|
|
--cid ${toString vm.id} \
|
|
--cpus ${toString vm.cpus} \
|
|
${lib.optionalString (effectiveGpu != null) "--gpu=${formatKVArgs "," effectiveGpu}"} \
|
|
${lib.optionalString (effectiveSound != null) "--virtio-snd=${formatKVArgs "," effectiveSound}"} \
|
|
-s /run/vmsilo/${vm.name}-crosvm-control.socket \
|
|
--wayland-security-context wayland_socket=${userRuntimeDir}/wayland-0,app_id=vmsilo:${vm.name}:${vm.color} \
|
|
${vfioArgs} \
|
|
${networkArgs} \
|
|
${vhostUserArgs} \
|
|
${lib.escapeShellArgs allExtraRunArgs} \
|
|
${kernelPath}
|
|
'';
|
|
|
|
# Generate proxy script for a VM
|
|
mkProxyScript =
|
|
vm:
|
|
pkgs.writeShellScript "vmsilo-proxy-${vm.name}" ''
|
|
CID=${toString vm.id}
|
|
VSOCK_PORT=5000
|
|
TIMEOUT=30
|
|
|
|
# Wait for vsock to become available
|
|
ELAPSED=0
|
|
while [ $ELAPSED -lt $TIMEOUT ]; do
|
|
if ${pkgs.socat}/bin/socat -u OPEN:/dev/null VSOCK-CONNECT:$CID:$VSOCK_PORT 2>/dev/null; then
|
|
break
|
|
fi
|
|
sleep 0.5
|
|
ELAPSED=$((ELAPSED + 1))
|
|
done
|
|
|
|
if [ $ELAPSED -ge $TIMEOUT ]; then
|
|
echo "Timeout waiting for VM ${vm.name} to start" >&2
|
|
exit 1
|
|
fi
|
|
|
|
# Forward stdin/stdout to vsock
|
|
exec ${pkgs.socat}/bin/socat - VSOCK-CONNECT:$CID:$VSOCK_PORT
|
|
'';
|
|
|
|
# Generate shell case statement for VM dispatch
|
|
mkVmCase = makeCase: ''
|
|
case "$VM_NAME" in
|
|
${lib.concatMapStringsSep "\n " makeCase cfg.nixosVms}
|
|
*)
|
|
echo "Unknown VM: $VM_NAME" >&2
|
|
echo "Available VMs: ${lib.concatMapStringsSep ", " (vm: vm.name) cfg.nixosVms}" >&2
|
|
exit 1
|
|
;;
|
|
esac
|
|
'';
|
|
|
|
# vm-run: Run command in VM (socket-activated)
|
|
vmRunScript = pkgs.writeShellScript "vm-run" ''
|
|
if [ $# -lt 2 ]; then
|
|
echo "Usage: vm-run <vm-name> <command> [args...]" >&2
|
|
exit 1
|
|
fi
|
|
|
|
VM_NAME="$1"
|
|
shift
|
|
|
|
SOCKET="/run/vmsilo/$VM_NAME-command.socket"
|
|
|
|
if [ ! -S "$SOCKET" ]; then
|
|
echo "Unknown VM or socket not active: $VM_NAME" >&2
|
|
echo "Available VMs: ${lib.concatMapStringsSep ", " (vm: vm.name) cfg.nixosVms}" >&2
|
|
exit 1
|
|
fi
|
|
|
|
# Send command via socket (triggers activation if needed)
|
|
echo "$@" | ${pkgs.socat}/bin/socat - UNIX-CONNECT:"$SOCKET"
|
|
'';
|
|
|
|
# vm-start-debug: Start VM directly (bypasses socket activation, requires root)
|
|
vmStartDebugScript = pkgs.writeShellScript "vm-start-debug" ''
|
|
if [ $# -ne 1 ]; then
|
|
echo "Usage: vm-start-debug <vm-name>" >&2
|
|
echo "Note: Requires root privileges (use sudo)" >&2
|
|
exit 1
|
|
fi
|
|
|
|
if [ "$(id -u)" -ne 0 ]; then
|
|
echo "Error: vm-start-debug requires root privileges" >&2
|
|
echo "Run: sudo vm-start-debug $1" >&2
|
|
exit 1
|
|
fi
|
|
|
|
VM_NAME="$1"
|
|
|
|
${mkVmCase (vm: "${vm.name}) exec ${mkVmScript vm} ;;")}
|
|
'';
|
|
|
|
# vm-start: Start VM via systemd (uses polkit for authorization)
|
|
vmStartScript = pkgs.writeShellScript "vm-start" ''
|
|
if [ $# -ne 1 ]; then
|
|
echo "Usage: vm-start <vm-name>" >&2
|
|
exit 1
|
|
fi
|
|
|
|
VM_NAME="$1"
|
|
|
|
${mkVmCase (vm: "${vm.name}) systemctl start vmsilo-${vm.name}-vm.service ;;")}
|
|
'';
|
|
|
|
# vm-stop: Stop VM via systemd (uses polkit for authorization)
|
|
vmStopScript = pkgs.writeShellScript "vm-stop" ''
|
|
if [ $# -ne 1 ]; then
|
|
echo "Usage: vm-stop <vm-name>" >&2
|
|
exit 1
|
|
fi
|
|
|
|
VM_NAME="$1"
|
|
|
|
${mkVmCase (vm: "${vm.name}) systemctl stop vmsilo-${vm.name}-vm.service ;;")}
|
|
'';
|
|
|
|
# vm-shell: Connect to VM (serial console by default, SSH with --ssh)
|
|
vmShellScript = pkgs.writeShellScript "vm-shell" ''
|
|
usage() {
|
|
echo "Usage: vm-shell [--ssh [--root]] <vm-name>" >&2
|
|
echo "" >&2
|
|
echo "Options:" >&2
|
|
echo " --ssh Use SSH over vsock (requires SSH keys configured)" >&2
|
|
echo " --root Connect as root (only with --ssh)" >&2
|
|
echo "" >&2
|
|
echo "Without --ssh, connects to serial console." >&2
|
|
echo "Escape character is CTRL+]" >&2
|
|
exit 1
|
|
}
|
|
|
|
USE_SSH=0
|
|
USE_ROOT=0
|
|
|
|
while [ $# -gt 0 ]; do
|
|
case "$1" in
|
|
--ssh)
|
|
USE_SSH=1
|
|
shift
|
|
;;
|
|
--root)
|
|
USE_ROOT=1
|
|
shift
|
|
;;
|
|
-*)
|
|
usage
|
|
;;
|
|
*)
|
|
break
|
|
;;
|
|
esac
|
|
done
|
|
|
|
if [ $# -ne 1 ]; then
|
|
usage
|
|
fi
|
|
|
|
VM_NAME="$1"
|
|
|
|
if [ $USE_ROOT -eq 1 ] && [ $USE_SSH -eq 0 ]; then
|
|
echo "Error: --root requires --ssh" >&2
|
|
exit 1
|
|
fi
|
|
|
|
if [ $USE_SSH -eq 1 ]; then
|
|
if [ $USE_ROOT -eq 1 ]; then
|
|
USER_NAME="root"
|
|
else
|
|
USER_NAME="user"
|
|
fi
|
|
${mkVmCase (vm: "${vm.name}) exec ${pkgs.openssh}/bin/ssh $USER_NAME@vsock/${toString vm.id} ;;")}
|
|
else
|
|
CONSOLE="/run/vmsilo/$VM_NAME-console"
|
|
if [ ! -e "$CONSOLE" ]; then
|
|
echo "Console not found: $CONSOLE" >&2
|
|
echo "Is the VM running? Use: vm-start $VM_NAME" >&2
|
|
exit 1
|
|
fi
|
|
echo "Escape character is CTRL+]"
|
|
exec ${pkgs.socat}/bin/socat -,raw,echo=0,escape=0x1d "$CONSOLE"
|
|
fi
|
|
'';
|
|
in
|
|
{
|
|
config = lib.mkIf cfg.enable {
|
|
# Set internal options for other modules to consume
|
|
programs.vmsilo._internal = {
|
|
vmScripts = lib.listToAttrs (map (vm: lib.nameValuePair vm.name (mkVmScript vm)) cfg.nixosVms);
|
|
|
|
proxyScripts = lib.listToAttrs (
|
|
map (vm: lib.nameValuePair vm.name (mkProxyScript vm)) cfg.nixosVms
|
|
);
|
|
|
|
userScripts = {
|
|
vm-run = vmRunScript;
|
|
vm-start = vmStartScript;
|
|
vm-start-debug = vmStartDebugScript;
|
|
vm-stop = vmStopScript;
|
|
vm-shell = vmShellScript;
|
|
};
|
|
};
|
|
};
|
|
}
|