virtio-devices: add a GPU device
Some checks are pending
Cloud Hypervisor Tests (Metrics) / Tests (Metrics) (push) Waiting to run
REUSE Compliance Check / REUSE Compliance Check (push) Waiting to run
Shell scripts check / Check shell scripts (push) Waiting to run

This adds support for exposing a virtio-gpu device to guest by
implementing a vhost-user frontend compatible with crosvm's GPU device
backend.

Note that this is not the same as the "vhost-user-gpu" protocol
implemented by QEMU.

Adding a GPU device from the command line looks like this:

    --gpu socket=/path/to/crosvm-gpu-vhost-user.sock

Signed-off-by: Alyssa Ross <alyssa.ross@unikie.com>
Co-authored-by: Alyssa Ross <hi@alyssa.is>
Signed-off-by: Alyssa Ross <hi@alyssa.is>
This commit is contained in:
Alyssa Ross 2022-09-07 14:16:29 +00:00 committed by Davíð Steinn Geirsson
parent e36096db3e
commit f19a28d12b
19 changed files with 967 additions and 29 deletions

View file

@ -22,8 +22,8 @@ use option_parser::{ByteSized, ByteSizedParseError};
use thiserror::Error;
use vmm::config::RestoreConfig;
use vmm::vm_config::{
DeviceConfig, DiskConfig, FsConfig, NetConfig, PmemConfig, UserDeviceConfig, VdpaConfig,
VsockConfig,
DeviceConfig, DiskConfig, FsConfig, GpuConfig, NetConfig, PmemConfig, UserDeviceConfig,
VdpaConfig, VsockConfig,
};
#[cfg(feature = "dbus_api")]
use zbus::{proxy, zvariant::Optional};
@ -49,6 +49,8 @@ enum Error {
AddDiskConfig(#[source] vmm::config::Error),
#[error("Error parsing filesystem syntax")]
AddFsConfig(#[source] vmm::config::Error),
#[error("Error parsing GPU syntax: {0}")]
AddGpuConfig(#[source] vmm::config::Error),
#[error("Error parsing persistent memory syntax")]
AddPmemConfig(#[source] vmm::config::Error),
#[error("Error parsing network syntax")]
@ -83,6 +85,7 @@ trait DBusApi1 {
fn vm_add_device(&self, device_config: &str) -> zbus::Result<Optional<String>>;
fn vm_add_disk(&self, disk_config: &str) -> zbus::Result<Optional<String>>;
fn vm_add_fs(&self, fs_config: &str) -> zbus::Result<Optional<String>>;
fn vm_add_gpu(&self, gpu_config: &str) -> zbus::Result<Optional<String>>;
fn vm_add_net(&self, net_config: &str) -> zbus::Result<Optional<String>>;
fn vm_add_pmem(&self, pmem_config: &str) -> zbus::Result<Optional<String>>;
fn vm_add_user_device(&self, vm_add_user_device: &str) -> zbus::Result<Optional<String>>;
@ -155,6 +158,10 @@ impl<'a> DBusApi1ProxyBlocking<'a> {
self.print_response(self.vm_add_fs(fs_config))
}
fn api_vm_add_gpu(&self, gpu_config: &str) -> ApiResult {
self.print_response(self.vm_add_gpu(gpu_config))
}
fn api_vm_add_net(&self, net_config: &str) -> ApiResult {
self.print_response(self.vm_add_net(net_config))
}
@ -398,6 +405,17 @@ fn rest_api_do_command(matches: &ArgMatches, socket: &mut UnixStream) -> ApiResu
simple_api_command(socket, "PUT", "add-fs", Some(&fs_config))
.map_err(Error::HttpApiClient)
}
Some("add-gpu") => {
let gpu_config = add_gpu_config(
matches
.subcommand_matches("add-gpu")
.unwrap()
.get_one::<String>("gpu_config")
.unwrap(),
)?;
simple_api_command(socket, "PUT", "add-gpu", Some(&gpu_config))
.map_err(Error::HttpApiClient)
}
Some("add-pmem") => {
let pmem_config = add_pmem_config(
matches
@ -620,6 +638,16 @@ fn dbus_api_do_command(matches: &ArgMatches, proxy: &DBusApi1ProxyBlocking<'_>)
)?;
proxy.api_vm_add_fs(&fs_config)
}
Some("add-gpu") => {
let gpu_config = add_gpu_config(
matches
.subcommand_matches("add-gpu")
.unwrap()
.get_one::<String>("gpu_config")
.unwrap(),
)?;
proxy.api_vm_add_gpu(&gpu_config)
}
Some("add-pmem") => {
let pmem_config = add_pmem_config(
matches
@ -835,6 +863,13 @@ fn add_fs_config(config: &str) -> Result<String, Error> {
Ok(fs_config)
}
fn add_gpu_config(config: &str) -> Result<String, Error> {
let gpu_config = GpuConfig::parse(config).map_err(Error::AddGpuConfig)?;
let gpu_config = serde_json::to_string(&gpu_config).unwrap();
Ok(gpu_config)
}
fn add_pmem_config(config: &str) -> Result<String, Error> {
let pmem_config = PmemConfig::parse(config).map_err(Error::AddPmemConfig)?;
let pmem_config = serde_json::to_string(&pmem_config).unwrap();
@ -981,6 +1016,13 @@ fn get_cli_commands_sorted() -> Box<[Command]> {
.index(1)
.help(vmm::vm_config::FsConfig::SYNTAX),
),
Command::new("add-gpu")
.about("Add virtio-gpu backed gpu device")
.arg(
Arg::new("gpu_config")
.index(1)
.help(vmm::vm_config::GpuConfig::SYNTAX),
),
Command::new("add-net")
.about("Add network device")
.arg(Arg::new("net_config").index(1).help(NetConfig::SYNTAX)),

View file

@ -32,9 +32,9 @@ use vmm::vm_config::FwCfgConfig;
#[cfg(feature = "ivshmem")]
use vmm::vm_config::IvshmemConfig;
use vmm::vm_config::{
BalloonConfig, DeviceConfig, DiskConfig, FsConfig, LandlockConfig, NetConfig, NumaConfig,
PciSegmentConfig, PmemConfig, RateLimiterGroupConfig, TpmConfig, UserDeviceConfig, VdpaConfig,
VmConfig, VsockConfig,
BalloonConfig, DeviceConfig, DiskConfig, FsConfig, GpuConfig, LandlockConfig, NetConfig,
NumaConfig, PciSegmentConfig, PmemConfig, RateLimiterGroupConfig, TpmConfig, UserDeviceConfig,
VdpaConfig, VmConfig, VsockConfig,
};
use vmm_sys_util::eventfd::EventFd;
use vmm_sys_util::signal::block_signal;
@ -280,6 +280,11 @@ fn get_cli_options_sorted(
.help("GDB socket (UNIX domain socket): path=</path/to/a/file>")
.num_args(1)
.group("vmm-config"),
Arg::new("gpu")
.long("gpu")
.help(GpuConfig::SYNTAX)
.num_args(1..)
.group("vm-config"),
#[cfg(feature = "igvm")]
Arg::new("igvm")
.long("igvm")
@ -998,6 +1003,7 @@ mod unit_tests {
},
balloon: None,
fs: None,
gpu: None,
pmem: None,
serial: ConsoleConfig {
file: None,

View file

@ -6,7 +6,7 @@
//
// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
use std::collections::HashMap;
use std::collections::{BTreeMap, HashMap};
use std::io::Write;
use std::num::Wrapping;
use std::sync::atomic::{AtomicBool, Ordering};
@ -50,7 +50,7 @@ pub struct VirtioSharedMemoryList {
pub mem_slot: u32,
pub addr: GuestAddress,
pub mapping: Arc<MmapRegion>,
pub region_list: Vec<VirtioSharedMemory>,
pub region_list: BTreeMap<u8, VirtioSharedMemory>,
}
/// Trait for virtio devices to be driven by a virtio transport.

View file

@ -43,7 +43,7 @@ pub use self::block::{Block, BlockState};
pub use self::console::{Console, ConsoleResizer, Endpoint};
pub use self::device::{
DmaRemapping, VirtioCommon, VirtioDevice, VirtioInterrupt, VirtioInterruptType,
VirtioSharedMemoryList,
VirtioSharedMemory, VirtioSharedMemoryList,
};
pub use self::epoll_helper::{
EPOLL_HELPER_EVENT_LAST, EpollHelper, EpollHelperError, EpollHelperHandler,
@ -91,6 +91,8 @@ pub enum ActivateError {
#[error("Failed to setup vhost-user daemon")]
VhostUserSetup(#[source] vhost_user::Error),
#[error("Failed to create seccomp filter")]
VhostUserGpuSetup(#[source] vhost_user::Error),
#[error("Failed to create seccomp filter: {0}")]
CreateSeccompFilter(#[source] seccompiler::Error),
#[error("Failed to create rate limiter")]
CreateRateLimiter(#[source] std::io::Error),

View file

@ -24,6 +24,7 @@ pub enum Thread {
VirtioRng,
VirtioVhostBlock,
VirtioVhostFs,
VirtioVhostGpu,
VirtioVhostNet,
VirtioVhostNetCtl,
VirtioVsock,
@ -192,6 +193,20 @@ fn virtio_vhost_fs_thread_rules() -> Vec<(i64, Vec<SeccompRule>)> {
]
}
fn virtio_vhost_gpu_thread_rules() -> Vec<(i64, Vec<SeccompRule>)> {
vec![
(libc::SYS_clock_nanosleep, vec![]),
(libc::SYS_connect, vec![]),
(libc::SYS_getcwd, vec![]),
(libc::SYS_nanosleep, vec![]),
(libc::SYS_recvmsg, vec![]),
(libc::SYS_recvmsg, vec![]),
(libc::SYS_sendmsg, vec![]),
(libc::SYS_sendmsg, vec![]),
(libc::SYS_socket, vec![]),
]
}
fn virtio_vhost_net_ctl_thread_rules() -> Vec<(i64, Vec<SeccompRule>)> {
vec![]
}
@ -271,6 +286,7 @@ fn get_seccomp_rules(thread_type: Thread) -> Vec<(i64, Vec<SeccompRule>)> {
Thread::VirtioRng => virtio_rng_thread_rules(),
Thread::VirtioVhostBlock => virtio_vhost_block_thread_rules(),
Thread::VirtioVhostFs => virtio_vhost_fs_thread_rules(),
Thread::VirtioVhostGpu => virtio_vhost_gpu_thread_rules(),
Thread::VirtioVhostNet => virtio_vhost_net_thread_rules(),
Thread::VirtioVhostNetCtl => virtio_vhost_net_ctl_thread_rules(),
Thread::VirtioVsock => virtio_vsock_thread_rules(),

View file

@ -1036,11 +1036,11 @@ impl PciDevice for VirtioPciDevice {
PciDeviceError::IoRegistrationFailed(shm_list.addr.raw_value(), e)
})?;
for (idx, shm) in shm_list.region_list.iter().enumerate() {
for (shmid, shm) in shm_list.region_list.iter() {
let shm_cap = VirtioPciCap64::new(
PciCapabilityType::SharedMemory,
VIRTIO_SHM_BAR_INDEX as u8,
idx as u8,
*shmid,
shm.offset,
shm.len,
);

View file

@ -0,0 +1,411 @@
// Copyright 2019 Intel Corporation. All Rights Reserved.
// Copyright 2022 Unikie
// Copyright 2023 Alyssa Ross <hi@alyssa.is>
// SPDX-License-Identifier: Apache-2.0
use std::io::{self, Write};
use std::os::unix::io::AsRawFd;
use std::sync::{Arc, Barrier, Mutex};
use std::{result, thread};
use event_monitor::event;
use log::error;
use seccompiler::SeccompAction;
use vhost::vhost_user::message::{
VhostSharedMemoryRegion, VhostUserConfigFlags, VhostUserProtocolFeatures, VhostUserShmemMapMsg,
VhostUserShmemUnmapMsg, VhostUserVirtioFeatures,
};
use vhost::vhost_user::{
FrontendReqHandler, HandlerResult, VhostUserFrontend, VhostUserFrontendReqHandler,
};
use virtio_bindings::virtio_gpu::{
VIRTIO_GPU_F_CONTEXT_INIT, VIRTIO_GPU_F_RESOURCE_BLOB, VIRTIO_GPU_F_RESOURCE_UUID,
VIRTIO_GPU_F_VIRGL,
};
use virtio_queue::Queue;
use vm_device::UserspaceMapping;
use vm_memory::volatile_memory::PtrGuardMut;
use vm_memory::{GuestMemoryAtomic, VolatileMemory};
use vm_migration::{MigratableError, Pausable};
use vmm_sys_util::eventfd::EventFd;
use super::vu_common_ctrl::VhostUserHandle;
use super::{Error, Result};
use crate::seccomp_filters::Thread;
use crate::thread_helper::spawn_virtio_thread;
use crate::vhost_user::VhostUserCommon;
use crate::{
ActivateError, ActivateResult, GuestMemoryMmap, GuestRegionMmap, MmapRegion,
VIRTIO_F_IOMMU_PLATFORM, VIRTIO_F_VERSION_1, VirtioCommon, VirtioDevice, VirtioDeviceType,
VirtioInterrupt, VirtioSharedMemoryList,
};
const QUEUE_SIZES: &[u16] = &[256, 16];
const NUM_QUEUES: u16 = QUEUE_SIZES.len() as _;
struct BackendReqHandler {
mapping: Arc<MmapRegion>,
}
impl BackendReqHandler {
fn ptr_guard_mut(&self, offset: u64, len: u64) -> io::Result<PtrGuardMut> {
let shm_offset = offset
.try_into()
.map_err(|_| io::Error::from_raw_os_error(libc::EINVAL))?;
let len = len
.try_into()
.map_err(|_| io::Error::from_raw_os_error(libc::EINVAL))?;
Ok(self
.mapping
.get_slice(shm_offset, len)
.map_err(|_| io::Error::from_raw_os_error(libc::EINVAL))?
.ptr_guard_mut())
}
}
impl VhostUserFrontendReqHandler for BackendReqHandler {
fn shmem_map(&self, req: &VhostUserShmemMapMsg, fd: &dyn AsRawFd) -> HandlerResult<u64> {
let target = self.ptr_guard_mut(req.shm_offset, req.len)?;
// SAFETY: we've checked we're only giving addr and length
// within the region, and are passing MAP_FIXED to ensure they
// are respected.
let ret = unsafe {
libc::mmap(
target.as_ptr().cast(),
target.len(),
req.flags.bits() as i32,
// https://bugzilla.kernel.org/show_bug.cgi?id=217238
if req.flags.bits() as i32 & libc::PROT_WRITE != 0 {
libc::MAP_SHARED
} else {
libc::MAP_PRIVATE
} | libc::MAP_FIXED,
fd.as_raw_fd(),
req.fd_offset as libc::off_t,
)
};
if ret == libc::MAP_FAILED {
return Err(io::Error::last_os_error());
}
Ok(0)
}
fn shmem_unmap(&self, req: &VhostUserShmemUnmapMsg) -> HandlerResult<u64> {
let target = self.ptr_guard_mut(req.shm_offset, req.len)?;
// SAFETY: we've checked we're only giving addr and length
// within the region, and are passing MAP_FIXED to ensure they
// are respected.
let ret = unsafe {
libc::mmap(
target.as_ptr().cast(),
target.len(),
libc::PROT_NONE,
libc::MAP_ANONYMOUS | libc::MAP_PRIVATE | libc::MAP_FIXED,
-1,
0,
)
};
if ret == libc::MAP_FAILED {
return Err(io::Error::last_os_error());
}
Ok(0)
}
}
pub struct Gpu {
common: VirtioCommon,
vu_common: VhostUserCommon,
id: String,
// Hold ownership of the memory that is allocated for the device
// which will be automatically dropped when the device is dropped
cache: Option<VirtioSharedMemoryList>,
backend_req_support: bool,
seccomp_action: SeccompAction,
guest_memory: Option<GuestMemoryAtomic<GuestMemoryMmap>>,
epoll_thread: Option<thread::JoinHandle<()>>,
exit_evt: EventFd,
iommu: bool,
}
impl Gpu {
/// Create a new virtio-gpu device.
pub fn new(
id: String,
path: &str,
seccomp_action: SeccompAction,
exit_evt: EventFd,
iommu: bool,
) -> Result<(Gpu, VhostSharedMemoryRegion)> {
// Connect to the vhost-user socket.
let mut vu = VhostUserHandle::connect_vhost_user(false, path, NUM_QUEUES as u64, false)?;
let avail_features = 1 << VIRTIO_F_VERSION_1
| 1 << VIRTIO_GPU_F_VIRGL
| 1 << VIRTIO_GPU_F_RESOURCE_UUID
| 1 << VIRTIO_GPU_F_RESOURCE_BLOB
| 1 << VIRTIO_GPU_F_CONTEXT_INIT
| VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits();
let avail_protocol_features = VhostUserProtocolFeatures::CONFIG
| VhostUserProtocolFeatures::BACKEND_REQ
| VhostUserProtocolFeatures::SHARED_MEMORY_REGIONS;
let (acked_features, acked_protocol_features) =
vu.negotiate_features_vhost_user(avail_features, avail_protocol_features)?;
let shm_regions = vu.get_shared_memory_regions()?;
if shm_regions.len() != 1 {
return Err(Error::VhostUserUnexpectedSharedMemoryRegionsCount(
1,
shm_regions.len(),
));
}
let shm_region = shm_regions[0];
Ok((
Gpu {
common: VirtioCommon {
device_type: VirtioDeviceType::Gpu as u32,
avail_features: acked_features,
// If part of the available features that have been acked, the
// PROTOCOL_FEATURES bit must be already set through the VIRTIO
// acked features as we know the guest would never ack it, this
// the feature would be lost.
acked_features: acked_features
& VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits(),
paused_sync: Some(Arc::new(Barrier::new(NUM_QUEUES as usize))),
queue_sizes: QUEUE_SIZES.to_vec(),
min_queues: NUM_QUEUES,
..Default::default()
},
vu_common: VhostUserCommon {
vu: Some(Arc::new(Mutex::new(vu))),
acked_protocol_features,
socket_path: path.to_string(),
vu_num_queues: NUM_QUEUES as usize,
..Default::default()
},
id,
cache: None,
backend_req_support: acked_protocol_features
& VhostUserProtocolFeatures::BACKEND_REQ.bits()
!= 0,
seccomp_action,
guest_memory: None,
epoll_thread: None,
exit_evt,
iommu,
},
shm_region,
))
}
pub fn set_cache(&mut self, cache: VirtioSharedMemoryList) {
self.cache = Some(cache);
}
}
impl Drop for Gpu {
fn drop(&mut self) {
if let Some(kill_evt) = self.common.kill_evt.take() {
// Ignore the result because there is nothing we can do about it.
let _ = kill_evt.write(1);
}
}
}
impl VirtioDevice for Gpu {
fn device_type(&self) -> u32 {
self.common.device_type
}
fn queue_max_sizes(&self) -> &[u16] {
&self.common.queue_sizes
}
fn features(&self) -> u64 {
let mut features = self.common.avail_features;
if self.iommu {
features |= 1u64 << VIRTIO_F_IOMMU_PLATFORM;
}
features
}
fn ack_features(&mut self, value: u64) {
self.common.ack_features(value);
}
fn read_config(&self, offset: u64, mut data: &mut [u8]) {
if let Some(vu) = &self.vu_common.vu
&& let Err(e) = vu
.lock()
.unwrap()
.socket_handle()
.get_config(
offset as u32,
data.len() as u32,
VhostUserConfigFlags::WRITABLE,
data,
)
.map_err(|e| format!("{e:?}"))
.and_then(|(_, config)| data.write_all(&config).map_err(|e| format!("{e:?}")))
{
error!("Failed getting vhost-user-gpu configuration: {e:?}");
}
}
fn activate(
&mut self,
mem: GuestMemoryAtomic<GuestMemoryMmap>,
interrupt_cb: Arc<dyn VirtioInterrupt>,
queues: Vec<(usize, Queue, EventFd)>,
) -> ActivateResult {
self.common.activate(&queues, interrupt_cb.clone())?;
self.guest_memory = Some(mem.clone());
// Initialize backend communication.
let backend_req_handler = if self.backend_req_support {
if let Some(cache) = self.cache.as_ref() {
let vu_frontend_req_handler = Arc::new(BackendReqHandler {
mapping: cache.mapping.clone(),
});
let mut req_handler =
FrontendReqHandler::new(vu_frontend_req_handler).map_err(|e| {
ActivateError::VhostUserGpuSetup(Error::FrontendReqHandlerCreation(e))
})?;
if self.vu_common.acked_protocol_features
& VhostUserProtocolFeatures::REPLY_ACK.bits()
!= 0
{
req_handler.set_reply_ack_flag(true);
}
Some(req_handler)
} else {
None
}
} else {
None
};
// Run a dedicated thread for handling potential reconnections with
// the backend.
let (kill_evt, pause_evt) = self.common.dup_eventfds();
let mut handler = self.vu_common.activate(
mem,
&queues,
interrupt_cb,
self.common.acked_features,
backend_req_handler,
kill_evt,
pause_evt,
)?;
let paused = self.common.paused.clone();
let paused_sync = self.common.paused_sync.clone();
let mut epoll_threads = Vec::new();
spawn_virtio_thread(
&self.id,
&self.seccomp_action,
Thread::VirtioVhostGpu,
&mut epoll_threads,
&self.exit_evt,
move || handler.run(&paused, paused_sync.as_ref().unwrap()),
)?;
self.epoll_thread = Some(epoll_threads.remove(0));
event!("virtio-device", "activated", "id", &self.id);
Ok(())
}
fn reset(&mut self) -> Option<Arc<dyn VirtioInterrupt>> {
// We first must resume the virtio thread if it was paused.
if self.common.pause_evt.take().is_some() {
self.common.resume().ok()?;
}
if let Some(vu) = &self.vu_common.vu
&& let Err(e) = vu.lock().unwrap().reset_vhost_user()
{
error!("Failed to reset vhost-user daemon: {e:?}");
return None;
}
if let Some(kill_evt) = self.common.kill_evt.take() {
// Ignore the result because there is nothing we can do about it.
let _ = kill_evt.write(1);
}
event!("virtio-device", "reset", "id", &self.id);
// Return the interrupt
Some(self.common.interrupt_cb.take().unwrap())
}
fn shutdown(&mut self) {
self.vu_common.shutdown();
}
fn get_shm_regions(&self) -> Option<VirtioSharedMemoryList> {
self.cache.clone()
}
fn set_shm_regions(
&mut self,
shm_regions: VirtioSharedMemoryList,
) -> std::result::Result<(), crate::Error> {
if let Some(cache) = self.cache.as_mut() {
*cache = shm_regions;
Ok(())
} else {
Err(crate::Error::SetShmRegionsNotSupported)
}
}
fn add_memory_region(
&mut self,
region: &Arc<GuestRegionMmap>,
) -> std::result::Result<(), crate::Error> {
self.vu_common.add_memory_region(&self.guest_memory, region)
}
fn userspace_mappings(&self) -> Vec<UserspaceMapping> {
let mut mappings = Vec::new();
if let Some(cache) = self.cache.as_ref() {
mappings.push(UserspaceMapping {
mapping: cache.mapping.clone(),
mem_slot: cache.mem_slot,
addr: cache.addr,
mergeable: false,
});
}
mappings
}
}
impl Pausable for Gpu {
fn pause(&mut self) -> result::Result<(), MigratableError> {
self.vu_common.pause()?;
self.common.pause()
}
fn resume(&mut self) -> result::Result<(), MigratableError> {
self.common.resume()?;
if let Some(epoll_thread) = &self.epoll_thread {
epoll_thread.thread().unpark();
}
self.vu_common.resume()
}
}

View file

@ -33,11 +33,13 @@ use crate::{
pub mod blk;
pub mod fs;
pub mod gpu;
pub mod net;
pub mod vu_common_ctrl;
pub use self::blk::Blk;
pub use self::fs::*;
pub use self::gpu::*;
pub use self::net::Net;
pub use self::vu_common_ctrl::VhostUserConfig;
@ -75,6 +77,8 @@ pub enum Error {
VhostUserGetQueueMaxNum(#[source] VhostError),
#[error("Get protocol features failed")]
VhostUserGetProtocolFeatures(#[source] VhostError),
#[error("Get shared memory regions failed")]
VhostUserGetSharedMemoryRegions(#[source] VhostError),
#[error("Get vring base failed")]
VhostUserGetVringBase(#[source] VhostError),
#[error("Vhost-user Backend not support vhost-user protocol")]
@ -123,6 +127,8 @@ pub enum Error {
VhostUserSetInflight(#[source] VhostError),
#[error("Failed setting the log base")]
VhostUserSetLogBase(#[source] VhostError),
#[error("Expected {0} shared memory regions; got {1}")]
VhostUserUnexpectedSharedMemoryRegionsCount(usize, usize),
#[error("Invalid used address")]
UsedAddress,
#[error("Invalid features provided from vhost-user backend")]

View file

@ -13,7 +13,8 @@ use std::time::{Duration, Instant};
use log::{error, info};
use vhost::vhost_kern::vhost_binding::{VHOST_F_LOG_ALL, VHOST_VRING_F_LOG};
use vhost::vhost_user::message::{
VhostUserHeaderFlag, VhostUserInflight, VhostUserProtocolFeatures, VhostUserVirtioFeatures,
VhostSharedMemoryRegion, VhostUserHeaderFlag, VhostUserInflight, VhostUserProtocolFeatures,
VhostUserVirtioFeatures,
};
use vhost::vhost_user::{
Frontend, FrontendReqHandler, VhostUserFrontend, VhostUserFrontendReqHandler,
@ -107,6 +108,12 @@ impl VhostUserHandle {
.map_err(Error::VhostUserAddMemReg)
}
pub fn get_shared_memory_regions(&self) -> Result<Vec<VhostSharedMemoryRegion>> {
self.vu
.get_shared_memory_regions()
.map_err(Error::VhostUserGetSharedMemoryRegions)
}
pub fn negotiate_features_vhost_user(
&mut self,
avail_features: u64,

View file

@ -22,7 +22,7 @@ use super::{ApiAction, ApiRequest};
#[cfg(all(target_arch = "x86_64", feature = "guest_debug"))]
use crate::api::VmCoredump;
use crate::api::{
AddDisk, Body, VmAddDevice, VmAddFs, VmAddNet, VmAddPmem, VmAddUserDevice, VmAddVdpa,
AddDisk, Body, VmAddDevice, VmAddFs, VmAddGpu, VmAddNet, VmAddPmem, VmAddUserDevice, VmAddVdpa,
VmAddVsock, VmBoot, VmCounters, VmCreate, VmDelete, VmInfo, VmPause, VmPowerButton, VmReboot,
VmReceiveMigration, VmRemoveDevice, VmResize, VmResizeZone, VmRestore, VmResume,
VmSendMigration, VmShutdown, VmSnapshot, VmmPing, VmmShutdown,
@ -144,6 +144,11 @@ impl DBusApi {
self.vm_action(&VmAddFs, fs_config).await
}
async fn vm_add_gpu(&self, gpu_config: String) -> Result<Optional<String>> {
let gpu_config = serde_json::from_str(&gpu_config).map_err(api_error)?;
self.vm_action(&VmAddGpu, gpu_config).await
}
async fn vm_add_net(&self, net_config: String) -> Result<Optional<String>> {
let mut net_config: NetConfig = serde_json::from_str(&net_config).map_err(api_error)?;
if net_config.fds.is_some() {

View file

@ -45,10 +45,10 @@ use crate::api::VmCoredump;
use crate::api::http::http_endpoint::fds_helper::{attach_fds_to_cfg, attach_fds_to_cfgs};
use crate::api::http::{EndpointHandler, HttpError, error_response};
use crate::api::{
AddDisk, ApiAction, ApiError, ApiRequest, NetConfig, VmAddDevice, VmAddFs, VmAddNet, VmAddPmem,
VmAddUserDevice, VmAddVdpa, VmAddVsock, VmBoot, VmConfig, VmCounters, VmDelete, VmNmi, VmPause,
VmPowerButton, VmReboot, VmReceiveMigration, VmRemoveDevice, VmResize, VmResizeDisk,
VmResizeZone, VmRestore, VmResume, VmSendMigration, VmShutdown, VmSnapshot,
AddDisk, ApiAction, ApiError, ApiRequest, NetConfig, VmAddDevice, VmAddFs, VmAddGpu, VmAddNet,
VmAddPmem, VmAddUserDevice, VmAddVdpa, VmAddVsock, VmBoot, VmConfig, VmCounters, VmDelete,
VmNmi, VmPause, VmPowerButton, VmReboot, VmReceiveMigration, VmRemoveDevice, VmResize,
VmResizeDisk, VmResizeZone, VmRestore, VmResume, VmSendMigration, VmShutdown, VmSnapshot,
};
use crate::config::RestoreConfig;
use crate::cpu::Error as CpuError;
@ -419,6 +419,7 @@ vm_action_put_handler!(VmNmi);
vm_action_put_handler_body!(VmAddDevice);
vm_action_put_handler_body!(AddDisk);
vm_action_put_handler_body!(VmAddFs);
vm_action_put_handler_body!(VmAddGpu);
vm_action_put_handler_body!(VmAddPmem);
vm_action_put_handler_body!(VmAddVdpa);
vm_action_put_handler_body!(VmAddVsock);

View file

@ -28,10 +28,10 @@ use self::http_endpoint::{VmActionHandler, VmCreate, VmInfo, VmmPing, VmmShutdow
#[cfg(all(target_arch = "x86_64", feature = "guest_debug"))]
use crate::api::VmCoredump;
use crate::api::{
AddDisk, ApiError, ApiRequest, VmAddDevice, VmAddFs, VmAddNet, VmAddPmem, VmAddUserDevice,
VmAddVdpa, VmAddVsock, VmBoot, VmCounters, VmDelete, VmNmi, VmPause, VmPowerButton, VmReboot,
VmReceiveMigration, VmRemoveDevice, VmResize, VmResizeDisk, VmResizeZone, VmRestore, VmResume,
VmSendMigration, VmShutdown, VmSnapshot,
AddDisk, ApiError, ApiRequest, VmAddDevice, VmAddFs, VmAddGpu, VmAddNet, VmAddPmem,
VmAddUserDevice, VmAddVdpa, VmAddVsock, VmBoot, VmCounters, VmDelete, VmNmi, VmPause,
VmPowerButton, VmReboot, VmReceiveMigration, VmRemoveDevice, VmResize, VmResizeDisk,
VmResizeZone, VmRestore, VmResume, VmSendMigration, VmShutdown, VmSnapshot,
};
use crate::landlock::Landlock;
use crate::seccomp_filters::{Thread, get_seccomp_filter};
@ -196,6 +196,10 @@ pub static HTTP_ROUTES: LazyLock<HttpRoutes> = LazyLock::new(|| {
endpoint!("/vm.add-fs"),
Box::new(VmActionHandler::new(&VmAddFs)),
);
r.routes.insert(
endpoint!("/vm.add-gpu"),
Box::new(VmActionHandler::new(&VmAddGpu)),
);
r.routes.insert(
endpoint!("/vm.add-net"),
Box::new(VmActionHandler::new(&VmAddNet)),

View file

@ -51,8 +51,8 @@ use crate::config::RestoreConfig;
use crate::device_tree::DeviceTree;
use crate::vm::{Error as VmError, VmState};
use crate::vm_config::{
DeviceConfig, DiskConfig, FsConfig, NetConfig, PmemConfig, UserDeviceConfig, VdpaConfig,
VmConfig, VsockConfig,
DeviceConfig, DiskConfig, FsConfig, GpuConfig, NetConfig, PmemConfig, UserDeviceConfig,
VdpaConfig, VmConfig, VsockConfig,
};
/// API errors are sent back from the VMM API server through the ApiResponse.
@ -170,6 +170,10 @@ pub enum ApiError {
#[error("The fs could not be added to the VM")]
VmAddFs(#[source] VmError),
/// The gpu could not be added to the VM.
#[error("The GPU could not be added to the VM: {0}")]
VmAddGpu(#[source] VmError),
/// The pmem device could not be added to the VM.
#[error("The pmem device could not be added to the VM")]
VmAddPmem(#[source] VmError),
@ -340,6 +344,8 @@ pub trait RequestHandler {
fn vm_add_fs(&mut self, fs_cfg: FsConfig) -> Result<Option<Vec<u8>>, VmError>;
fn vm_add_gpu(&mut self, gpu_cfg: GpuConfig) -> Result<Option<Vec<u8>>, VmError>;
fn vm_add_pmem(&mut self, pmem_cfg: PmemConfig) -> Result<Option<Vec<u8>>, VmError>;
fn vm_add_net(&mut self, net_cfg: NetConfig) -> Result<Option<Vec<u8>>, VmError>;
@ -539,6 +545,43 @@ impl ApiAction for VmAddFs {
}
}
pub struct VmAddGpu;
impl ApiAction for VmAddGpu {
type RequestBody = GpuConfig;
type ResponseBody = Option<Body>;
fn request(
&self,
config: Self::RequestBody,
response_sender: Sender<ApiResponse>,
) -> ApiRequest {
Box::new(move |vmm| {
info!("API request event: VmAddGpu {config:?}");
let response = vmm
.vm_add_gpu(config)
.map_err(ApiError::VmAddGpu)
.map(ApiResponsePayload::VmAction);
response_sender
.send(response)
.map_err(VmmError::ApiResponseSend)?;
Ok(false)
})
}
fn send(
&self,
api_evt: EventFd,
api_sender: Sender<ApiRequest>,
data: Self::RequestBody,
) -> ApiResult<Self::ResponseBody> {
get_response_body(self, api_evt, api_sender, data)
}
}
pub struct VmAddPmem;
impl ApiAction for VmAddPmem {

View file

@ -277,6 +277,28 @@ paths:
500:
description: The new device could not be added to the VM instance.
/vm.add-gpu:
put:
summary: Add a new virtio-gpu device to the VM
requestBody:
description: The details of the new virtio-gpu
content:
application/json:
schema:
$ref: "#/components/schemas/GpuConfig"
required: true
responses:
200:
description: The new device was successfully added to the VM instance.
content:
application/json:
schema:
$ref: "#/components/schemas/PciDeviceInfo"
204:
description: The new device was successfully (cold) added to the VM instance.
500:
description: The new device could not be added to the VM instance.
/vm.add-pmem:
put:
summary: Add a new pmem device to the VM
@ -603,6 +625,10 @@ components:
type: array
items:
$ref: "#/components/schemas/FsConfig"
gpu:
type: array
items:
$ref: "#/components/schemas/GpuConfig"
pmem:
type: array
items:
@ -1053,6 +1079,19 @@ components:
id:
type: string
GpuConfig:
required:
- socket
type: object
properties:
socket:
type: string
pci_segment:
type: integer
format: int16
id:
type: string
PmemConfig:
required:
- file

View file

@ -45,6 +45,9 @@ pub enum Error {
/// Filesystem socket is missing
#[error("Error parsing --fs: socket missing")]
ParseFsSockMissing,
/// GPU socket is missing
#[error("Error parsing --gpu: socket missing")]
ParseGpuSockMissing,
/// Missing persistent memory file parameter.
#[error("Error parsing --pmem: file missing")]
ParsePmemFileMissing,
@ -90,6 +93,9 @@ pub enum Error {
/// Error parsing filesystem parameters
#[error("Error parsing --fs")]
ParseFileSystem(#[source] OptionParserError),
/// Error parsing GPU parameters
#[error("Error parsing --gpu")]
ParseGpu(#[source] OptionParserError),
/// Error parsing persistent memory parameters
#[error("Error parsing --pmem")]
ParsePersistentMemory(#[source] OptionParserError),
@ -393,6 +399,7 @@ pub struct VmParams<'a> {
pub rng: &'a str,
pub balloon: Option<&'a str>,
pub fs: Option<Vec<&'a str>>,
pub gpu: Option<Vec<&'a str>>,
pub pmem: Option<Vec<&'a str>>,
pub serial: &'a str,
pub console: &'a str,
@ -454,6 +461,9 @@ impl<'a> VmParams<'a> {
let fs: Option<Vec<&str>> = args
.get_many::<String>("fs")
.map(|x| x.map(|y| y as &str).collect());
let gpu: Option<Vec<&str>> = args
.get_many::<String>("gpu")
.map(|x| x.map(|y| y as &str).collect());
let pmem: Option<Vec<&str>> = args
.get_many::<String>("pmem")
.map(|x| x.map(|y| y as &str).collect());
@ -508,6 +518,7 @@ impl<'a> VmParams<'a> {
rng,
balloon,
fs,
gpu,
pmem,
serial,
console,
@ -1797,6 +1808,49 @@ impl FwCfgItem {
}
}
impl GpuConfig {
pub const SYNTAX: &'static str = "virtio-gpu parameters \
\"socket=<socket_path>,id=<device_id>,pci_segment=<segment_id>\"";
pub fn parse(gpu: &str) -> Result<Self> {
let mut parser = OptionParser::new();
parser.add("socket").add("id").add("pci_segment");
parser.parse(gpu).map_err(Error::ParseGpu)?;
let socket = PathBuf::from(parser.get("socket").ok_or(Error::ParseGpuSockMissing)?);
let id = parser.get("id");
let pci_segment = parser
.convert("pci_segment")
.map_err(Error::ParseGpu)?
.unwrap_or_default();
Ok(GpuConfig {
socket,
id,
pci_segment,
})
}
pub fn validate(&self, vm_config: &VmConfig) -> ValidationResult<()> {
if let Some(platform_config) = vm_config.platform.as_ref() {
if self.pci_segment >= platform_config.num_pci_segments {
return Err(ValidationError::InvalidPciSegment(self.pci_segment));
}
if let Some(iommu_segments) = platform_config.iommu_segments.as_ref()
&& iommu_segments.contains(&self.pci_segment)
{
return Err(ValidationError::IommuNotSupportedOnSegment(
self.pci_segment,
));
}
}
Ok(())
}
}
impl PmemConfig {
pub const SYNTAX: &'static str = "Persistent memory parameters \
\"file=<backing_file_path>,size=<persistent_memory_size>,iommu=on|off,\
@ -2723,6 +2777,17 @@ impl VmConfig {
}
}
if let Some(gpus) = &self.gpu {
if !gpus.is_empty() && !self.memory.shared {
return Err(ValidationError::VhostUserRequiresSharedMemory);
}
for gpu in gpus {
gpu.validate(self)?;
Self::validate_identifier(&mut id_list, &gpu.id)?;
}
}
if let Some(pmems) = &self.pmem {
for pmem in pmems {
pmem.validate(self)?;
@ -2976,6 +3041,15 @@ impl VmConfig {
fs = Some(fs_config_list);
}
let mut gpu: Option<Vec<GpuConfig>> = None;
if let Some(gpu_list) = &vm_params.gpu {
let mut gpu_config_list = Vec::new();
for item in gpu_list.iter() {
gpu_config_list.push(GpuConfig::parse(item)?);
}
gpu = Some(gpu_config_list);
}
let mut pmem: Option<Vec<PmemConfig>> = None;
if let Some(pmem_list) = &vm_params.pmem {
let mut pmem_config_list = Vec::new();
@ -3112,6 +3186,7 @@ impl VmConfig {
rng,
balloon,
fs,
gpu,
pmem,
serial,
console,
@ -3173,6 +3248,13 @@ impl VmConfig {
removed |= fs.len() != len;
}
// Remove if gpu device
if let Some(gpu) = self.gpu.as_mut() {
let len = gpu.len();
gpu.retain(|dev| dev.id.as_ref().map(|id| id.as_ref()) != Some(id));
removed |= gpu.len() != len;
}
// Remove if net device
if let Some(net) = self.net.as_mut() {
let len = net.len();
@ -3245,6 +3327,7 @@ impl Clone for VmConfig {
#[cfg(feature = "pvmemcontrol")]
pvmemcontrol: self.pvmemcontrol.clone(),
fs: self.fs.clone(),
gpu: self.gpu.clone(),
pmem: self.pmem.clone(),
serial: self.serial.clone(),
console: self.console.clone(),
@ -3767,6 +3850,23 @@ mod unit_tests {
Ok(())
}
fn gpu_fixture() -> GpuConfig {
GpuConfig {
socket: PathBuf::from("/tmp/sock"),
id: None,
pci_segment: 0,
}
}
#[test]
fn test_parse_gpu() -> Result<()> {
// "socket" must be supplied
assert!(GpuConfig::parse("").is_err());
assert_eq!(GpuConfig::parse("socket=/tmp/sock")?, gpu_fixture());
Ok(())
}
fn pmem_fixture() -> PmemConfig {
PmemConfig {
file: PathBuf::from("/tmp/pmem"),
@ -4153,6 +4253,7 @@ mod unit_tests {
rng: RngConfig::default(),
balloon: None,
fs: None,
gpu: None,
pmem: None,
serial: default_serial(),
console: default_console(),
@ -4356,6 +4457,7 @@ mod unit_tests {
},
balloon: None,
fs: None,
gpu: None,
pmem: None,
serial: ConsoleConfig {
file: None,
@ -4552,6 +4654,13 @@ mod unit_tests {
Err(ValidationError::VhostUserRequiresSharedMemory)
);
let mut invalid_config = valid_config.clone();
invalid_config.gpu = Some(vec![gpu_fixture()]);
assert_eq!(
invalid_config.validate(),
Err(ValidationError::VhostUserRequiresSharedMemory)
);
let mut still_valid_config = valid_config.clone();
still_valid_config.memory.shared = true;
still_valid_config.validate().unwrap();

View file

@ -12,6 +12,7 @@
use std::collections::{BTreeMap, BTreeSet, HashMap};
use std::fs::{File, OpenOptions};
use std::io::{self, IsTerminal, Seek, SeekFrom, stdout};
use std::iter::once;
use std::num::Wrapping;
use std::os::unix::fs::OpenOptionsExt;
use std::os::unix::io::{AsRawFd, FromRawFd};
@ -94,7 +95,7 @@ use virtio_devices::transport::{VirtioPciDevice, VirtioPciDeviceActivator, Virti
use virtio_devices::vhost_user::VhostUserConfig;
use virtio_devices::{
AccessPlatformMapping, ActivateError, Block, Endpoint, IommuMapping, VdpaDmaMapping,
VirtioMemMappingSource,
VirtioMemMappingSource, VirtioSharedMemory, VirtioSharedMemoryList,
};
use vm_allocator::{AddressAllocator, SystemAllocator};
use vm_device::dma_mapping::ExternalDmaMapping;
@ -127,8 +128,8 @@ use crate::serial_manager::{Error as SerialManagerError, SerialManager};
use crate::vm_config::IvshmemConfig;
use crate::vm_config::{
ConsoleOutputMode, DEFAULT_IOMMU_ADDRESS_WIDTH_BITS, DEFAULT_PCI_SEGMENT_APERTURE_WEIGHT,
DeviceConfig, DiskConfig, FsConfig, NetConfig, PmemConfig, UserDeviceConfig, VdpaConfig,
VhostMode, VmConfig, VsockConfig,
DeviceConfig, DiskConfig, FsConfig, GpuConfig, NetConfig, PmemConfig, UserDeviceConfig,
VdpaConfig, VhostMode, VmConfig, VsockConfig,
};
use crate::{DEVICE_MANAGER_SNAPSHOT_ID, GuestRegionMmap, PciDeviceInfo, device_node};
@ -157,6 +158,7 @@ const IVSHMEM_DEVICE_NAME: &str = "__ivshmem";
// identifiers if the user doesn't give one
const DISK_DEVICE_NAME_PREFIX: &str = "_disk";
const FS_DEVICE_NAME_PREFIX: &str = "_fs";
const GPU_DEVICE_NAME_PREFIX: &str = "_gpu";
const NET_DEVICE_NAME_PREFIX: &str = "_net";
const PMEM_DEVICE_NAME_PREFIX: &str = "_pmem";
const VDPA_DEVICE_NAME_PREFIX: &str = "_vdpa";
@ -201,10 +203,18 @@ pub enum DeviceManagerError {
#[error("Cannot create virtio-fs device")]
CreateVirtioFs(#[source] virtio_devices::vhost_user::Error),
/// Cannot create virtio-gpu device
#[error("Cannot create virtio-gpu device: {0}")]
CreateVirtioGpu(#[source] virtio_devices::vhost_user::Error),
/// Virtio-fs device was created without a socket.
#[error("Virtio-fs device was created without a socket")]
NoVirtioFsSock,
/// Virtio-gpu device was created without a socket.
#[error("Virtio-gpu device was created without a socket")]
NoVirtioGpuSock,
/// Cannot create vhost-user-blk device
#[error("Cannot create vhost-user-blk device")]
CreateVhostUserBlk(#[source] virtio_devices::vhost_user::Error),
@ -322,6 +332,10 @@ pub enum DeviceManagerError {
#[error("Cannot find a memory range for virtio-fs")]
FsRangeAllocation,
/// Cannot find a memory range for virtio-gpu
#[error("Cannot find a memory range for virtio-gpu")]
GpuRangeAllocation,
/// Error creating serial output file
#[error("Error creating serial output file")]
SerialOutputFileOpen(#[source] io::Error),
@ -2548,6 +2562,9 @@ impl DeviceManager {
// Add virtio-fs if required
self.make_virtio_fs_devices()?;
// Add virtio-gpu if required
self.make_virtio_gpu_devices()?;
// Add virtio-pmem if required
self.make_virtio_pmem_devices()?;
@ -3146,6 +3163,119 @@ impl DeviceManager {
Ok(())
}
fn make_virtio_gpu_device(
&mut self,
gpu_cfg: &mut GpuConfig,
) -> DeviceManagerResult<MetaVirtioDevice> {
let id = if let Some(id) = &gpu_cfg.id {
id.clone()
} else {
let id = self.next_device_name(GPU_DEVICE_NAME_PREFIX)?;
gpu_cfg.id = Some(id.clone());
id
};
info!("Creating virtio-gpu device: {gpu_cfg:?}");
let mut node = device_node!(id);
if let Some(gpu_socket) = gpu_cfg.socket.to_str() {
let (mut virtio_gpu_device, region) = virtio_devices::vhost_user::Gpu::new(
id.clone(),
gpu_socket,
self.seccomp_action.clone(),
self.exit_evt
.try_clone()
.map_err(DeviceManagerError::EventFd)?,
self.force_iommu,
)
.map_err(DeviceManagerError::CreateVirtioGpu)?;
// In crosvm, the 8 GiB bar is 8 GiB-aligned.
let cache_base = self.pci_segments[gpu_cfg.pci_segment as usize]
.mem64_allocator
.lock()
.unwrap()
.allocate(None, region.length as GuestUsize, Some(region.length))
.ok_or(DeviceManagerError::GpuRangeAllocation)?
.raw_value();
// Update the node with correct resource information.
node.resources.push(Resource::MmioAddressRange {
base: cache_base,
size: region.length,
});
let mmap_region = MmapRegion::build(
None,
region.length as usize,
libc::PROT_NONE,
libc::MAP_ANONYMOUS | libc::MAP_PRIVATE,
)
.map_err(DeviceManagerError::NewMmapRegion)?;
// SAFETY: `mmap_region.size()` and `mmap_region.as_ptr()` refer to an allocation.
// We remove the userspace mapping before dropping the device if the device is
// ejected.
let mem_slot = unsafe {
self.memory_manager
.lock()
.unwrap()
.create_userspace_mapping(
cache_base,
mmap_region.size(),
mmap_region.as_ptr(),
false,
false,
false,
)
.map_err(DeviceManagerError::MemoryManager)?
};
let region_list = once((
region.id,
VirtioSharedMemory {
offset: 0,
len: region.length,
},
))
.collect();
virtio_gpu_device.set_cache(VirtioSharedMemoryList {
mapping: Arc::new(mmap_region),
mem_slot,
addr: GuestAddress(cache_base),
region_list,
});
self.device_tree.lock().unwrap().insert(id.clone(), node);
Ok(MetaVirtioDevice {
virtio_device: Arc::new(Mutex::new(virtio_gpu_device))
as Arc<Mutex<dyn virtio_devices::VirtioDevice>>,
iommu: false,
id,
pci_segment: gpu_cfg.pci_segment,
dma_handler: None,
})
} else {
Err(DeviceManagerError::NoVirtioGpuSock)
}
}
fn make_virtio_gpu_devices(&mut self) -> DeviceManagerResult<()> {
let mut gpu_devices = self.config.lock().unwrap().gpu.clone();
if let Some(gpu_list_cfg) = &mut gpu_devices {
for gpu_cfg in gpu_list_cfg.iter_mut() {
let device = self.make_virtio_gpu_device(gpu_cfg)?;
self.virtio_devices.push(device);
}
}
self.config.lock().unwrap().gpu = gpu_devices;
Ok(())
}
fn make_virtio_pmem_device(
&mut self,
pmem_cfg: &mut PmemConfig,
@ -4585,6 +4715,7 @@ impl DeviceManager {
VirtioDeviceType::Block
| VirtioDeviceType::Pmem
| VirtioDeviceType::Fs
| VirtioDeviceType::Gpu
| VirtioDeviceType::Vsock => {}
_ => return Err(DeviceManagerError::RemovalNotAllowed(device_type)),
}
@ -4876,6 +5007,13 @@ impl DeviceManager {
self.hotplug_virtio_pci_device(device)
}
pub fn add_gpu(&mut self, gpu_cfg: &mut GpuConfig) -> DeviceManagerResult<PciDeviceInfo> {
self.validate_identifier(&gpu_cfg.id)?;
let device = self.make_virtio_gpu_device(gpu_cfg)?;
self.hotplug_virtio_pci_device(device)
}
pub fn add_pmem(&mut self, pmem_cfg: &mut PmemConfig) -> DeviceManagerResult<PciDeviceInfo> {
self.validate_identifier(&pmem_cfg.id)?;

View file

@ -59,8 +59,8 @@ use crate::migration::{recv_vm_config, recv_vm_state};
use crate::seccomp_filters::{Thread, get_seccomp_filter};
use crate::vm::{Error as VmError, Vm, VmState};
use crate::vm_config::{
DeviceConfig, DiskConfig, FsConfig, NetConfig, PmemConfig, UserDeviceConfig, VdpaConfig,
VmConfig, VsockConfig,
DeviceConfig, DiskConfig, FsConfig, GpuConfig, NetConfig, PmemConfig, UserDeviceConfig,
VdpaConfig, VmConfig, VsockConfig,
};
mod acpi;
@ -2125,6 +2125,31 @@ impl RequestHandler for Vmm {
}
}
fn vm_add_gpu(&mut self, gpu_cfg: GpuConfig) -> result::Result<Option<Vec<u8>>, VmError> {
self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?;
{
// Validate the configuration change in a cloned configuration
let mut config = self.vm_config.as_ref().unwrap().lock().unwrap().clone();
add_to_config(&mut config.gpu, gpu_cfg.clone());
config.validate().map_err(VmError::ConfigValidation)?;
}
if let Some(ref mut vm) = self.vm {
let info = vm.add_gpu(gpu_cfg).inspect_err(|e| {
error!("Error when adding new gpu to the VM: {e:?}");
})?;
serde_json::to_vec(&info)
.map(Some)
.map_err(VmError::SerializeJson)
} else {
// Update VmConfig by adding the new device.
let mut config = self.vm_config.as_ref().unwrap().lock().unwrap();
add_to_config(&mut config.gpu, gpu_cfg);
Ok(None)
}
}
fn vm_add_pmem(&mut self, pmem_cfg: PmemConfig) -> result::Result<Option<Vec<u8>>, VmError> {
self.vm_config.as_ref().ok_or(VmError::VmNotCreated)?;
@ -2443,6 +2468,7 @@ mod unit_tests {
},
balloon: None,
fs: None,
gpu: None,
pmem: None,
serial: ConsoleConfig {
file: None,
@ -2679,6 +2705,55 @@ mod unit_tests {
);
}
#[test]
fn test_vmm_vm_cold_add_gpu() {
let mut vmm = create_dummy_vmm();
let gpu_config = GpuConfig::parse("socket=/tmp/sock").unwrap();
assert!(matches!(
vmm.vm_add_gpu(gpu_config.clone()),
Err(VmError::VmNotCreated)
));
let _ = vmm.vm_create(create_dummy_vm_config());
assert!(
vmm.vm_config
.as_ref()
.unwrap()
.lock()
.unwrap()
.gpu
.is_none()
);
let result = vmm.vm_add_gpu(gpu_config.clone());
assert!(result.is_ok());
assert!(result.unwrap().is_none());
assert_eq!(
vmm.vm_config
.as_ref()
.unwrap()
.lock()
.unwrap()
.gpu
.clone()
.unwrap()
.len(),
1
);
assert_eq!(
vmm.vm_config
.as_ref()
.unwrap()
.lock()
.unwrap()
.gpu
.clone()
.unwrap()[0],
gpu_config
);
}
#[test]
fn test_vmm_vm_cold_add_pmem() {
let mut vmm = create_dummy_vmm();

View file

@ -100,8 +100,8 @@ use crate::migration::{SNAPSHOT_CONFIG_FILE, SNAPSHOT_STATE_FILE, url_to_path};
#[cfg(feature = "fw_cfg")]
use crate::vm_config::FwCfgConfig;
use crate::vm_config::{
DeviceConfig, DiskConfig, FsConfig, HotplugMethod, NetConfig, NumaConfig, PayloadConfig,
PmemConfig, UserDeviceConfig, VdpaConfig, VmConfig, VsockConfig,
DeviceConfig, DiskConfig, FsConfig, GpuConfig, HotplugMethod, NetConfig, NumaConfig,
PayloadConfig, PmemConfig, UserDeviceConfig, VdpaConfig, VmConfig, VsockConfig,
};
use crate::{
CPU_MANAGER_SNAPSHOT_ID, DEVICE_MANAGER_SNAPSHOT_ID, GuestMemoryMmap,
@ -2136,6 +2136,30 @@ impl Vm {
Ok(pci_device_info)
}
pub fn add_gpu(&mut self, mut gpu_cfg: GpuConfig) -> Result<PciDeviceInfo> {
let pci_device_info = self
.device_manager
.lock()
.unwrap()
.add_gpu(&mut gpu_cfg)
.map_err(Error::DeviceManager)?;
// Update VmConfig by adding the new device. This is important to
// ensure the device would be created in case of a reboot.
{
let mut config = self.config.lock().unwrap();
add_to_config(&mut config.gpu, gpu_cfg);
}
self.device_manager
.lock()
.unwrap()
.notify_hotplug(AcpiNotificationFlags::PCI_DEVICES_CHANGED)
.map_err(Error::DeviceManager)?;
Ok(pci_device_info)
}
pub fn add_pmem(&mut self, mut pmem_cfg: PmemConfig) -> Result<PciDeviceInfo> {
let pci_device_info = self
.device_manager

View file

@ -469,6 +469,15 @@ impl ApplyLandlock for FsConfig {
}
}
#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)]
pub struct GpuConfig {
pub socket: PathBuf,
#[serde(default)]
pub id: Option<String>,
#[serde(default)]
pub pci_segment: u16,
}
#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)]
pub struct PmemConfig {
pub file: PathBuf,
@ -922,6 +931,7 @@ pub struct VmConfig {
pub rng: RngConfig,
pub balloon: Option<BalloonConfig>,
pub fs: Option<Vec<FsConfig>>,
pub gpu: Option<Vec<GpuConfig>>,
pub pmem: Option<Vec<PmemConfig>>,
#[serde(default = "default_serial")]
pub serial: ConsoleConfig,