vmsilo/modules/netvm.nix
Davíð Steinn Geirsson 3b640b1662 netvm: support network.netvm = "host" for host-routed networking
Route VM traffic through the host directly instead of requiring a
separate netvm VM. Uses the same nftables NAT and forward firewall
rules as VM-based netvms, applied on the host using TAP interface
names. Removes the hostNetworking.nat options in favor of the
unified netvm approach.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-07 15:28:47 +00:00

232 lines
7.4 KiB
Nix

# Auto-configuration for netvm links
# For each (netvm, client) pair derived from network.netvm options, synthesizes:
# - Host bridge + TAP interfaces (via _internal.netvmInjections, picked up by networking.nix)
# - Client VM: "upstream" interface with IP and default route
# - Netvm VM: "<clientname>" interface with IP, nftables NAT, forward firewall rules
#
# When network.netvm = "host", the host machine acts as the gateway:
# - Client VM: "upstream" interface with tap.hostAddress (no bridge)
# - Host: nftables NAT + forward rules (via _internal.hostNetvmConfig)
#
# We do NOT write back to programs.vmsilo.nixosVms to avoid a self-referential cycle:
# reading nixosVms while also defining it via mkMerge causes infinite recursion in the
# module system's attrsOf submodule merge. Instead we use a separate internal option
# (_internal.netvmInjections) that networking.nix and scripts.nix merge in explicitly.
{
config,
options,
lib,
...
}:
let
cfg = config.programs.vmsilo;
helpers = import ./lib/helpers.nix { inherit lib; };
inherit (helpers)
parseCIDR
makeBridgeName
allocateNetvmSubnet
ipToInt
intToIp
;
# Read raw user-provided VM declarations to avoid infinite recursion.
# cfg.nixosVms is an attrsOf submodule whose merge cycle would be triggered if we
# read it here and also wrote to it below. Instead we use
# options.definitionsWithLocations which gives the unprocessed raw Nix attrsets
# directly from each module definition, before submodule type evaluation.
rawVmAttrs = lib.foldl (
acc: def:
lib.recursiveUpdate acc (
# Each definition value is an attrset of VM name -> partial VM config (plain Nix attrset)
lib.mapAttrs (_: vmDef: {
network.netvm = vmDef.network.netvm or null;
network.netvmSubnet = vmDef.network.netvmSubnet or null;
network.isNetvm = vmDef.network.isNetvm or false;
}) def.value
)
) { } options.programs.vmsilo.nixosVms.definitionsWithLocations;
# Assign sequential IDs by sorted name (same logic as helpers.assignVmIds but over rawVmAttrs)
vms =
let
sortedNames = lib.sort (a: b: a < b) (lib.attrNames rawVmAttrs);
in
lib.imap0 (
idx: name:
rawVmAttrs.${name}
// {
id = idx + 3;
inherit name;
}
) sortedNames;
# All client VMs (those with network.netvm set)
clientVms = lib.filter (vm: vm.network.netvm != null) vms;
# VMs routing through another VM vs through the host
vmClientVms = lib.filter (vm: vm.network.netvm != "host") clientVms;
hostClientVms = lib.filter (vm: vm.network.netvm == "host") clientVms;
# Parse the global IP pool
parsedRange = parseCIDR cfg.netvmRange;
# Compute allocation for one (netvm, client) pair
mkPair =
clientVm:
let
netvmName = clientVm.network.netvm;
clientName = clientVm.name;
bridgeName = makeBridgeName netvmName clientName clientVm.id;
auto = allocateNetvmSubnet netvmName clientName parsedRange.ip parsedRange.prefix;
clientIp =
if clientVm.network.netvmSubnet != null then
(parseCIDR clientVm.network.netvmSubnet).ip
else
auto.clientIp;
# clientIp is always the lower (even) address of the /31:
# - auto path: guaranteed by allocateNetvmSubnet
# - manual path: enforced by assertions (assertions.nix)
netvmIp = intToIp (ipToInt clientIp + 1);
in
{
inherit
netvmName
clientName
bridgeName
clientIp
netvmIp
;
};
allPairs = map mkPair vmClientVms;
# Compute allocation for one (host, client) pair — no bridge needed
mkHostPair =
clientVm:
let
clientName = clientVm.name;
auto = allocateNetvmSubnet "host" clientName parsedRange.ip parsedRange.prefix;
clientIp =
if clientVm.network.netvmSubnet != null then
(parseCIDR clientVm.network.netvmSubnet).ip
else
auto.clientIp;
netvmIp = intToIp (ipToInt clientIp + 1);
in
{
inherit clientName clientIp netvmIp;
};
allHostPairs = map mkHostPair hostClientVms;
# Group pairs by netvmName (to build per-netvm nftables covering all its clients)
pairsByNetvm = lib.groupBy (p: p.netvmName) allPairs;
# Build guest NixOS config for a netvm given the list of client interface names it serves
mkNetvmGuestConfig =
clientIfNames:
let
# nftables set literal — e.g. { "client1", "client2" }
ifSet = "{ ${lib.concatMapStringsSep ", " (n: ''"${n}"'') clientIfNames} }";
in
{
boot.kernel.sysctl."net.ipv4.ip_forward" = 1;
networking.nftables.enable = true;
networking.nftables.tables = {
vmsilo-netvm-nat = {
family = "ip";
content = ''
chain postrouting {
type nat hook postrouting priority srcnat;
iifname ${ifSet} oifname != "lo" masquerade
}
'';
};
vmsilo-netvm-filter = {
family = "inet";
content = ''
chain forward {
type filter hook forward priority filter;
policy accept;
ct state established,related accept
iifname ${ifSet} oifname ${ifSet} drop
}
'';
};
};
};
# Full interface record with all fields required by scripts.nix and networking.nix.
# These fields mirror the networkInterfaceSubmodule defaults so that consumer code
# can access any field without needing special-case handling for injected interfaces.
mkIface =
overrides:
{
type = "tap";
macAddress = null;
tap = {
name = null;
hostAddress = null;
bridge = null;
};
dhcp = false;
addresses = [ ];
routes = { };
v6Addresses = [ ];
v6Routes = { };
}
// overrides
// {
tap = {
name = null;
hostAddress = null;
bridge = null;
}
// (overrides.tap or { });
};
# Build the netvmInjections attrset: VM name -> { interfaces, guestConfig }
# Start with all VMs having empty injections, then fold in pair-derived data.
buildInjections =
let
# Per-pair interface injections
pairInjections = map (pair: {
# Client VM gets "upstream" interface
${pair.clientName}.interfaces.upstream = mkIface {
tap.bridge = pair.bridgeName;
addresses = [ "${pair.clientIp}/31" ];
routes."0.0.0.0/0".via = pair.netvmIp;
};
# Netvm VM gets interface named after client
${pair.netvmName}.interfaces.${pair.clientName} = mkIface {
tap.bridge = pair.bridgeName;
addresses = [ "${pair.netvmIp}/31" ];
};
}) allPairs;
# Per-netvm guest config injections
guestConfigInjections = lib.mapAttrsToList (netvmName: pairs: {
${netvmName}.guestConfig = [ (mkNetvmGuestConfig (map (p: p.clientName) pairs)) ];
}) pairsByNetvm;
# Host-client interface injections (tap.hostAddress, no bridge)
hostPairInjections = map (pair: {
${pair.clientName}.interfaces.upstream = mkIface {
tap.hostAddress = "${pair.netvmIp}/31";
addresses = [ "${pair.clientIp}/31" ];
routes."0.0.0.0/0".via = pair.netvmIp;
};
}) allHostPairs;
in
lib.foldl lib.recursiveUpdate { } (pairInjections ++ guestConfigInjections ++ hostPairInjections);
in
{
config = lib.mkIf cfg.enable {
programs.vmsilo._internal.netvmInjections = buildInjections;
};
}