build: Switch vhost/vhost-user-backend to dsg fork
Some checks are pending
Cloud Hypervisor Tests (Metrics) / Tests (Metrics) (push) Waiting to run
REUSE Compliance Check / REUSE Compliance Check (push) Waiting to run
Shell scripts check / Check shell scripts (push) Waiting to run

Use vhost 0.15.0 and vhost-user-backend 0.21.0 from
https://git.dsg.is/dsg/vhost.git instead of crates.io.

This required bumping several rust-vmm dependencies to maintain
compatibility with the newer vm-memory 0.17.x and vmm-sys-util 0.15.x
APIs used by the fork:

- kvm-bindings: 0.12.1 → 0.14.0
- kvm-ioctls: 0.22.1 → 0.24.0 (nested_state renamed)
- linux-loader: 0.13.1 → 0.13.2
- vfio-bindings: 0.6.0 → 0.6.1
- vfio-ioctls: 0.5.1 → 0.5.2 (VfioDevice::new takes Arc<dyn VfioOps>)
- vfio_user: 0.1.1 → 0.1.2
- virtio-queue: 0.16.0 → 0.17.0
- vm-memory: 0.16.1 → 0.17.1 (Error split, Bytes trait, VolatileSlice)
- vmm-sys-util: 0.14.0 → 0.15.0 (EventConsumer/EventNotifier API)

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Davíð Steinn Geirsson 2026-03-18 17:15:59 +00:00
parent e36096db3e
commit e3372a22f6
14 changed files with 410 additions and 304 deletions

499
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -46,22 +46,22 @@ resolver = "3"
[workspace.dependencies]
# rust-vmm crates
acpi_tables = "0.2.0"
kvm-bindings = "0.12.1"
kvm-ioctls = "0.22.1"
linux-loader = "0.13.1"
kvm-bindings = "0.14.0"
kvm-ioctls = "0.24.0"
linux-loader = "0.13.2"
mshv-bindings = "0.6.7"
mshv-ioctls = "0.6.7"
seccompiler = "0.5.0"
vfio-bindings = { version = "0.6.0", default-features = false }
vfio-ioctls = { version = "0.5.1", default-features = false }
vfio_user = { version = "0.1.1", default-features = false }
vhost = { version = "0.14.0", default-features = false }
vhost-user-backend = { version = "0.20.0", default-features = false }
vfio-bindings = { version = "0.6.1", default-features = false }
vfio-ioctls = { version = "0.5.2", default-features = false }
vfio_user = { version = "0.1.2", default-features = false }
vhost = { git = "https://git.dsg.is/dsg/vhost.git", default-features = false }
vhost-user-backend = { git = "https://git.dsg.is/dsg/vhost.git", default-features = false }
virtio-bindings = "0.2.6"
virtio-queue = "0.16.0"
virtio-queue = "0.17.0"
vm-fdt = "0.3.0"
vm-memory = "0.16.1"
vmm-sys-util = "0.14.0"
vm-memory = "0.17.1"
vmm-sys-util = "0.15.0"
# igvm crates
igvm = "0.4.0"

27
flake.lock generated Normal file
View file

@ -0,0 +1,27 @@
{
"nodes": {
"nixpkgs": {
"locked": {
"lastModified": 1773734432,
"narHash": "sha256-IF5ppUWh6gHGHYDbtVUyhwy/i7D261P7fWD1bPefOsw=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "cda48547b432e8d3b18b4180ba07473762ec8558",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"nixpkgs": "nixpkgs"
}
}
},
"root": "root",
"version": 7
}

83
flake.nix Normal file
View file

@ -0,0 +1,83 @@
{
description = "Cloud Hypervisor DSG fork";
inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
outputs =
{ self, nixpkgs }:
let
supportedSystems = [
"aarch64-linux"
"riscv64-linux"
"x86_64-linux"
];
forAllSystems = nixpkgs.lib.genAttrs supportedSystems;
in
{
packages = forAllSystems (
system:
let
pkgs = nixpkgs.legacyPackages.${system};
inherit (pkgs) lib;
in
{
default = self.packages.${system}.cloud-hypervisor;
cloud-hypervisor = pkgs.rustPlatform.buildRustPackage (finalAttrs: {
pname = "cloud-hypervisor";
version = "50.0.0";
src = self;
# After updating Cargo.lock with `cargo update`, fill in the hashes
# for the git-sourced vhost packages by running `nix build` and
# replacing lib.fakeHash with the hash reported in the error message.
cargoLock = {
lockFile = ./Cargo.lock;
outputHashes = {
"micro_http-0.1.0" = "sha256-XemdzwS25yKWEXJcRX2l6QzD7lrtroMeJNOUEWGR7WQ=";
"vhost-0.15.0" = "sha256-iH5Zr9lSlkWhhdOQTSiPQ/mHBaKJ/oamTJZgY68tnGg=";
"vhost-user-backend-0.21.0" = "sha256-iH5Zr9lSlkWhhdOQTSiPQ/mHBaKJ/oamTJZgY68tnGg=";
};
};
separateDebugInfo = true;
nativeBuildInputs = [ pkgs.pkg-config ];
buildInputs =
[ pkgs.openssl ]
++ lib.optional pkgs.stdenv.hostPlatform.isAarch64 pkgs.dtc;
env.OPENSSL_NO_VENDOR = true;
cargoTestFlags = [
"--workspace"
"--exclude"
"hypervisor" # /dev/kvm
"--exclude"
"net_util" # /dev/net/tun
"--exclude"
"virtio-devices" # seccomp
"--exclude"
"vmm" # /dev/kvm
];
meta = {
homepage = "https://git.dsg.is/dsg/cloud-hypervisor";
description = "Open source Virtual Machine Monitor (VMM) that runs on top of KVM";
license = with lib.licenses; [
asl20
bsd3
];
mainProgram = "cloud-hypervisor";
platforms = [
"aarch64-linux"
"riscv64-linux"
"x86_64-linux"
];
};
});
}
);
};
}

View file

@ -2994,7 +2994,7 @@ impl KvmVcpu {
let maybe_size = self
.fd
.get_nested_state(&mut buffer)
.nested_state(&mut buffer)
.map_err(|e| cpu::HypervisorCpuError::GetNestedState(e.into()))?;
if let Some(_size) = maybe_size {

View file

@ -12,6 +12,7 @@ use std::fs::{File, OpenOptions};
use std::io::{Read, Seek, SeekFrom, Write};
use std::ops::{Deref, DerefMut};
use std::os::unix::fs::OpenOptionsExt;
use std::os::unix::io::{FromRawFd, IntoRawFd};
use std::path::PathBuf;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex, RwLock, RwLockWriteGuard};
@ -34,6 +35,7 @@ use virtio_bindings::virtio_ring::VIRTIO_RING_F_EVENT_IDX;
use virtio_queue::QueueT;
use vm_memory::{ByteValued, Bytes, GuestAddressSpace, GuestMemoryAtomic};
use vmm_sys_util::epoll::EventSet;
use vmm_sys_util::event::{EventConsumer, EventNotifier};
use vmm_sys_util::eventfd::EventFd;
type GuestMemoryMmap = vm_memory::GuestMemoryMmap<BitmapMmapRegion>;
@ -423,15 +425,15 @@ impl VhostUserBackendMut for VhostUserBlkBackend {
Ok(())
}
fn exit_event(&self, thread_index: usize) -> Option<EventFd> {
Some(
self.threads[thread_index]
.lock()
.unwrap()
.kill_evt
.try_clone()
.unwrap(),
)
fn exit_event(&self, thread_index: usize) -> Option<(EventConsumer, EventNotifier)> {
let thread = self.threads[thread_index].lock().unwrap();
// SAFETY: Safe because we duplicate valid file descriptors from the EventFd.
unsafe {
Some((
EventConsumer::from_raw_fd(thread.kill_evt.try_clone().unwrap().into_raw_fd()),
EventNotifier::from_raw_fd(thread.kill_evt.try_clone().unwrap().into_raw_fd()),
))
}
}
fn queues_per_thread(&self) -> Vec<u64> {
@ -533,14 +535,14 @@ pub fn start_block_backend(backend_command: &str) {
debug!("blk_backend is created!\n");
let listener = Listener::new(&backend_config.socket, true).unwrap();
let mut listener = Listener::new(&backend_config.socket, true).unwrap();
let name = "vhost-user-blk-backend";
let mut blk_daemon = VhostUserDaemon::new(name.to_string(), blk_backend.clone(), mem).unwrap();
debug!("blk_daemon is created!\n");
if let Err(e) = blk_daemon.start(listener) {
if let Err(e) = blk_daemon.start(&mut listener) {
error!("Failed to start daemon for vhost-user-block with error: {e:?}\n");
process::exit(1);
}

View file

@ -8,7 +8,7 @@
use std::net::{IpAddr, Ipv4Addr};
use std::ops::Deref;
use std::os::unix::io::{AsRawFd, RawFd};
use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
use std::sync::{Arc, Mutex, RwLock};
use std::{io, process};
@ -27,6 +27,7 @@ use virtio_bindings::virtio_config::{VIRTIO_F_NOTIFY_ON_EMPTY, VIRTIO_F_VERSION_
use virtio_bindings::virtio_net::*;
use vm_memory::{GuestAddressSpace, GuestMemoryAtomic};
use vmm_sys_util::epoll::EventSet;
use vmm_sys_util::event::{EventConsumer, EventNotifier};
use vmm_sys_util::eventfd::EventFd;
type GuestMemoryMmap = vm_memory::GuestMemoryMmap<BitmapMmapRegion>;
@ -249,15 +250,15 @@ impl VhostUserBackendMut for VhostUserNetBackend {
Ok(())
}
fn exit_event(&self, thread_index: usize) -> Option<EventFd> {
Some(
self.threads[thread_index]
.lock()
.unwrap()
.kill_evt
.try_clone()
.unwrap(),
)
fn exit_event(&self, thread_index: usize) -> Option<(EventConsumer, EventNotifier)> {
let thread = self.threads[thread_index].lock().unwrap();
// SAFETY: Safe because we duplicate valid file descriptors from the EventFd.
unsafe {
Some((
EventConsumer::from_raw_fd(thread.kill_evt.try_clone().unwrap().into_raw_fd()),
EventNotifier::from_raw_fd(thread.kill_evt.try_clone().unwrap().into_raw_fd()),
))
}
}
fn queues_per_thread(&self) -> Vec<u64> {
@ -394,7 +395,7 @@ pub fn start_net_backend(backend_command: &str) {
if let Err(e) = if backend_config.client {
net_daemon.start_client(&backend_config.socket)
} else {
net_daemon.start(Listener::new(&backend_config.socket, true).unwrap())
net_daemon.start(&mut Listener::new(&backend_config.socket, true).unwrap())
} {
error!("failed to start daemon for vhost-user-net with error: {e:?}");
process::exit(1);

View file

@ -167,9 +167,7 @@ pub fn get_host_address_range<M: GuestMemory + ?Sized>(
if mem.check_range(addr, size) {
let slice = mem.get_slice(addr, size).unwrap();
assert!(slice.len() >= size);
// TODO: return a VolatileSlice and fix all callers.
#[allow(deprecated)]
Some(slice.as_ptr())
Some(slice.ptr_guard_mut().as_ptr())
} else {
None
}

View file

@ -17,7 +17,7 @@ use seccompiler::SeccompAction;
use serde::{Deserialize, Serialize};
use thiserror::Error;
use virtio_queue::{Queue, QueueT};
use vm_memory::{GuestAddressSpace, GuestMemory, GuestMemoryAtomic};
use vm_memory::{Bytes, GuestAddressSpace, GuestMemory, GuestMemoryAtomic};
use vm_migration::{Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable};
use vm_virtio::{AccessPlatform, Translatable};
use vmm_sys_util::eventfd::EventFd;

View file

@ -18,7 +18,9 @@ use vhost::vhost_user::message::{
use vhost::vhost_user::{FrontendReqHandler, VhostUserFrontendReqHandler};
use virtio_queue::{Error as QueueError, Queue};
use vm_memory::mmap::MmapRegionError;
use vm_memory::{Address, Error as MmapError, GuestAddressSpace, GuestMemory, GuestMemoryAtomic};
use vm_memory::{
Address, GuestAddressSpace, GuestMemory, GuestMemoryAtomic, GuestRegionCollectionError,
};
use vm_migration::protocol::MemoryRangeTable;
use vm_migration::{MigratableError, Snapshot};
use vmm_sys_util::eventfd::EventFd;
@ -60,7 +62,7 @@ pub enum Error {
#[error("Signal used queue failed")]
FailedSignalingUsedQueue(#[source] io::Error),
#[error("Failed to read vhost eventfd")]
MemoryRegions(#[source] MmapError),
MemoryRegions(#[source] GuestRegionCollectionError),
#[error("Failed removing socket path")]
RemoveSocketPath(#[source] io::Error),
#[error("Failed to create frontend")]
@ -106,7 +108,7 @@ pub enum Error {
#[error("Failed to read vhost eventfd")]
VhostIrqRead(#[source] io::Error),
#[error("Failed to read vhost eventfd")]
VhostUserMemoryRegion(#[source] MmapError),
VhostUserMemoryRegion(#[source] GuestRegionCollectionError),
#[error("Failed to create the frontend request handler from backend")]
FrontendReqHandlerCreation(#[source] vhost::vhost_user::Error),
#[error("Set backend request fd failed")]

View file

@ -22,7 +22,8 @@ use vhost::{VhostBackend, VhostUserDirtyLogRegion, VhostUserMemoryRegionInfo, Vr
use virtio_queue::desc::RawDescriptor;
use virtio_queue::{Queue, QueueT};
use vm_memory::{
Address, Error as MmapError, FileOffset, GuestAddress, GuestMemory, GuestMemoryRegion,
Address, FileOffset, GuestAddress, GuestMemory, GuestMemoryRegion,
GuestRegionCollectionError,
};
use vm_migration::protocol::MemoryRangeTable;
use vmm_sys_util::eventfd::EventFd;
@ -67,7 +68,7 @@ impl VhostUserHandle {
for region in mem.iter() {
let (mmap_handle, mmap_offset) = match region.file_offset() {
Some(_file_offset) => (_file_offset.file().as_raw_fd(), _file_offset.start()),
None => return Err(Error::VhostUserMemoryRegion(MmapError::NoMemoryRegion)),
None => return Err(Error::VhostUserMemoryRegion(GuestRegionCollectionError::NoMemoryRegion)),
};
let vhost_user_net_reg = VhostUserMemoryRegionInfo {

View file

@ -89,7 +89,7 @@ use seccompiler::SeccompAction;
use serde::{Deserialize, Serialize};
use thiserror::Error;
use tracer::trace_scoped;
use vfio_ioctls::{VfioContainer, VfioDevice, VfioDeviceFd};
use vfio_ioctls::{VfioContainer, VfioDevice, VfioDeviceFd, VfioOps};
use virtio_devices::transport::{VirtioPciDevice, VirtioPciDeviceActivator, VirtioTransport};
use virtio_devices::vhost_user::VhostUserConfig;
use virtio_devices::{
@ -3754,7 +3754,7 @@ impl DeviceManager {
vfio_container
};
let vfio_device = VfioDevice::new(&device_cfg.path, Arc::clone(&vfio_container))
let vfio_device = VfioDevice::new(&device_cfg.path, Arc::clone(&vfio_container) as Arc<dyn VfioOps>)
.map_err(DeviceManagerError::VfioCreate)?;
if needs_dma_mapping {

View file

@ -38,8 +38,9 @@ use vm_memory::bitmap::AtomicBitmap;
use vm_memory::guest_memory::FileOffset;
use vm_memory::mmap::MmapRegionError;
use vm_memory::{
Address, Error as MmapError, GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryAtomic,
GuestMemoryError, GuestMemoryRegion, GuestUsize, MmapRegion, ReadVolatile,
Address, Bytes, GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryAtomic,
GuestMemoryError, GuestMemoryRegion, GuestRegionCollectionError, GuestUsize, MmapRegion,
ReadVolatile, WriteVolatile,
};
use vm_migration::protocol::{MemoryRange, MemoryRangeTable};
use vm_migration::{
@ -205,7 +206,7 @@ pub enum Error {
/// Mmap backed guest memory error
#[error("Mmap backed guest memory error")]
GuestMemory(#[source] MmapError),
GuestMemory(#[source] GuestRegionCollectionError),
/// Failed to allocate a memory range.
#[error("Failed to allocate a memory range")]
@ -1536,7 +1537,7 @@ impl MemoryManager {
)?;
Ok(Arc::new(
GuestRegionMmap::new(r, start_addr).map_err(Error::GuestMemory)?,
GuestRegionMmap::new(r, start_addr).ok_or(Error::GuestMemory(GuestRegionCollectionError::NoMemoryRegion))?,
))
}

View file

@ -67,7 +67,7 @@ use vm_device::Bus;
#[cfg(feature = "tdx")]
use vm_memory::{Address, ByteValued, GuestMemoryRegion, ReadVolatile};
use vm_memory::{
Bytes, GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryAtomic, WriteVolatile,
Bytes, GuestAddress, GuestAddressSpace, GuestMemoryAtomic, WriteVolatile,
};
use vm_migration::protocol::{MemoryRangeTable, Request, Response};
use vm_migration::{