Support guest memory dirty page tracking

Add a generic type parameter `B: Bitmap` to several key structs to
support guest memory dirty page tracking.

Signed-off-by: Liu Jiang <gerry@linux.alibaba.com>
This commit is contained in:
Liu Jiang 2021-08-09 23:03:26 +08:00
parent ca2b434900
commit 5dc56732f8
4 changed files with 68 additions and 62 deletions

View file

@ -25,16 +25,16 @@ use std::sync::{Arc, Mutex, RwLock};
use vhost::vhost_user::message::VhostUserProtocolFeatures;
use vhost::vhost_user::SlaveFsCacheReq;
use vm_memory::{GuestMemoryAtomic, GuestMemoryMmap};
use vm_memory::bitmap::Bitmap;
use vmm_sys_util::eventfd::EventFd;
use super::Vring;
use super::{Vring, GM};
/// Trait with interior mutability for vhost user backend servers to implement concrete services.
///
/// To support multi-threading and asynchronous IO, we enforce `the Send + Sync + 'static`.
/// So there's no plan for support of "Rc<T>" and "RefCell<T>".
pub trait VhostUserBackend: Send + Sync + 'static {
pub trait VhostUserBackend<B: Bitmap + 'static>: Send + Sync + 'static {
/// Get number of queues supported.
fn num_queues(&self) -> usize;
@ -70,10 +70,7 @@ pub trait VhostUserBackend: Send + Sync + 'static {
}
/// Update guest memory regions.
fn update_memory(
&self,
atomic_mem: GuestMemoryAtomic<GuestMemoryMmap>,
) -> result::Result<(), io::Error>;
fn update_memory(&self, mem: GM<B>) -> result::Result<(), io::Error>;
/// Set handler for communicating with the master by the slave communication channel.
///
@ -110,13 +107,13 @@ pub trait VhostUserBackend: Send + Sync + 'static {
&self,
device_event: u16,
evset: epoll::Events,
vrings: &[Vring],
vrings: &[Vring<GM<B>>],
thread_id: usize,
) -> result::Result<bool, io::Error>;
}
/// Trait without interior mutability for vhost user backend servers to implement concrete services.
pub trait VhostUserBackendMut: Send + Sync + 'static {
pub trait VhostUserBackendMut<B: Bitmap + 'static>: Send + Sync + 'static {
/// Get number of queues supported.
fn num_queues(&self) -> usize;
@ -152,10 +149,7 @@ pub trait VhostUserBackendMut: Send + Sync + 'static {
}
/// Update guest memory regions.
fn update_memory(
&mut self,
atomic_mem: GuestMemoryAtomic<GuestMemoryMmap>,
) -> result::Result<(), io::Error>;
fn update_memory(&mut self, mem: GM<B>) -> result::Result<(), io::Error>;
/// Set handler for communicating with the master by the slave communication channel.
///
@ -192,12 +186,12 @@ pub trait VhostUserBackendMut: Send + Sync + 'static {
&mut self,
device_event: u16,
evset: epoll::Events,
vrings: &[Vring],
vrings: &[Vring<GM<B>>],
thread_id: usize,
) -> result::Result<bool, io::Error>;
}
impl<T: VhostUserBackend> VhostUserBackend for Arc<T> {
impl<T: VhostUserBackend<B>, B: Bitmap + 'static> VhostUserBackend<B> for Arc<T> {
fn num_queues(&self) -> usize {
self.deref().num_queues()
}
@ -230,11 +224,8 @@ impl<T: VhostUserBackend> VhostUserBackend for Arc<T> {
self.deref().set_config(offset, buf)
}
fn update_memory(
&self,
atomic_mem: GuestMemoryAtomic<GuestMemoryMmap>,
) -> Result<(), io::Error> {
self.deref().update_memory(atomic_mem)
fn update_memory(&self, mem: GM<B>) -> Result<(), io::Error> {
self.deref().update_memory(mem)
}
fn set_slave_req_fd(&self, vu_req: SlaveFsCacheReq) {
@ -253,7 +244,7 @@ impl<T: VhostUserBackend> VhostUserBackend for Arc<T> {
&self,
device_event: u16,
evset: epoll::Events,
vrings: &[Vring],
vrings: &[Vring<GM<B>>],
thread_id: usize,
) -> Result<bool, io::Error> {
self.deref()
@ -261,7 +252,7 @@ impl<T: VhostUserBackend> VhostUserBackend for Arc<T> {
}
}
impl<T: VhostUserBackendMut> VhostUserBackend for Mutex<T> {
impl<T: VhostUserBackendMut<B>, B: Bitmap + 'static> VhostUserBackend<B> for Mutex<T> {
fn num_queues(&self) -> usize {
self.lock().unwrap().num_queues()
}
@ -294,11 +285,8 @@ impl<T: VhostUserBackendMut> VhostUserBackend for Mutex<T> {
self.lock().unwrap().set_config(offset, buf)
}
fn update_memory(
&self,
atomic_mem: GuestMemoryAtomic<GuestMemoryMmap>,
) -> Result<(), io::Error> {
self.lock().unwrap().update_memory(atomic_mem)
fn update_memory(&self, mem: GM<B>) -> Result<(), io::Error> {
self.lock().unwrap().update_memory(mem)
}
fn set_slave_req_fd(&self, vu_req: SlaveFsCacheReq) {
@ -317,7 +305,7 @@ impl<T: VhostUserBackendMut> VhostUserBackend for Mutex<T> {
&self,
device_event: u16,
evset: epoll::Events,
vrings: &[Vring],
vrings: &[Vring<GM<B>>],
thread_id: usize,
) -> Result<bool, io::Error> {
self.lock()
@ -326,7 +314,7 @@ impl<T: VhostUserBackendMut> VhostUserBackend for Mutex<T> {
}
}
impl<T: VhostUserBackendMut> VhostUserBackend for RwLock<T> {
impl<T: VhostUserBackendMut<B>, B: Bitmap + 'static> VhostUserBackend<B> for RwLock<T> {
fn num_queues(&self) -> usize {
self.read().unwrap().num_queues()
}
@ -359,11 +347,8 @@ impl<T: VhostUserBackendMut> VhostUserBackend for RwLock<T> {
self.write().unwrap().set_config(offset, buf)
}
fn update_memory(
&self,
atomic_mem: GuestMemoryAtomic<GuestMemoryMmap>,
) -> Result<(), io::Error> {
self.write().unwrap().update_memory(atomic_mem)
fn update_memory(&self, mem: GM<B>) -> Result<(), io::Error> {
self.write().unwrap().update_memory(mem)
}
fn set_slave_req_fd(&self, vu_req: SlaveFsCacheReq) {
@ -382,7 +367,7 @@ impl<T: VhostUserBackendMut> VhostUserBackend for RwLock<T> {
&self,
device_event: u16,
evset: epoll::Events,
vrings: &[Vring],
vrings: &[Vring<GM<B>>],
thread_id: usize,
) -> Result<bool, io::Error> {
self.write()
@ -397,6 +382,7 @@ mod tests {
use epoll::Events;
use std::io::Error;
use std::sync::Mutex;
use vm_memory::{GuestMemoryAtomic, GuestMemoryMmap};
struct MockVhostBackend {
events: u64,
@ -414,7 +400,7 @@ mod tests {
}
}
impl VhostUserBackendMut for MockVhostBackend {
impl VhostUserBackendMut<()> for MockVhostBackend {
fn num_queues(&self) -> usize {
2
}

View file

@ -9,7 +9,9 @@ use std::io;
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use std::result;
use super::{VhostUserBackend, Vring};
use vm_memory::bitmap::Bitmap;
use super::{VhostUserBackend, Vring, GM};
/// Errors related to vring epoll event handling.
#[derive(Debug)]
@ -53,17 +55,21 @@ pub type VringEpollResult<T> = std::result::Result<T, VringEpollError>;
/// - add file descriptors to be monitored by the epoll fd
/// - remove registered file descriptors from the epoll fd
/// - run the event loop to handle pending events on the epoll fd
pub struct VringEpollHandler<S: VhostUserBackend> {
pub struct VringEpollHandler<S: VhostUserBackend<B>, B: Bitmap + 'static> {
epoll_file: File,
backend: S,
vrings: Vec<Vring>,
vrings: Vec<Vring<GM<B>>>,
thread_id: usize,
exit_event_id: Option<u16>,
}
impl<S: VhostUserBackend> VringEpollHandler<S> {
impl<S: VhostUserBackend<B>, B: Bitmap + 'static> VringEpollHandler<S, B> {
/// Create a `VringEpollHandler` instance.
pub(crate) fn new(backend: S, vrings: Vec<Vring>, thread_id: usize) -> VringEpollResult<Self> {
pub(crate) fn new(
backend: S,
vrings: Vec<Vring<GM<B>>>,
thread_id: usize,
) -> VringEpollResult<Self> {
let epoll_fd = epoll::create(true).map_err(VringEpollError::EpollCreateFd)?;
let epoll_file = unsafe { File::from_raw_fd(epoll_fd) };
let (exit_event_fd, exit_event_id) = match backend.exit_event(thread_id) {

View file

@ -17,7 +17,9 @@ use vhost::vhost_user::message::{
};
use vhost::vhost_user::{Error as VhostUserError, Result as VhostUserResult, SlaveFsCacheReq};
use virtio_bindings::bindings::virtio_ring::VIRTIO_RING_F_EVENT_IDX;
use vm_memory::{FileOffset, GuestAddress, GuestMemoryAtomic, GuestMemoryMmap};
use vm_memory::bitmap::Bitmap;
use vm_memory::mmap::NewBitmap;
use vm_memory::{FileOffset, GuestAddress, GuestMemoryMmap, GuestRegionMmap};
use super::event_loop::{VringEpollError, VringEpollResult};
use super::*;
@ -60,9 +62,9 @@ struct AddrMapping {
gpa_base: u64,
}
pub struct VhostUserHandler<S: VhostUserBackend> {
pub struct VhostUserHandler<S: VhostUserBackend<B>, B: Bitmap + 'static> {
backend: S,
handlers: Vec<Arc<VringEpollHandler<S>>>,
handlers: Vec<Arc<VringEpollHandler<S, B>>>,
owned: bool,
features_acked: bool,
acked_features: u64,
@ -71,19 +73,18 @@ pub struct VhostUserHandler<S: VhostUserBackend> {
max_queue_size: usize,
queues_per_thread: Vec<u64>,
mappings: Vec<AddrMapping>,
atomic_mem: GuestMemoryAtomic<GuestMemoryMmap>,
vrings: Vec<Vring>,
atomic_mem: GM<B>,
vrings: Vec<Vring<GM<B>>>,
worker_threads: Vec<thread::JoinHandle<VringEpollResult<()>>>,
}
impl<S: VhostUserBackend + Clone> VhostUserHandler<S> {
pub(crate) fn new(backend: S) -> VhostUserHandlerResult<Self> {
impl<S: VhostUserBackend<B> + Clone, B: Bitmap + Clone + Send + Sync> VhostUserHandler<S, B> {
pub(crate) fn new(backend: S, atomic_mem: GM<B>) -> VhostUserHandlerResult<Self> {
let num_queues = backend.num_queues();
let max_queue_size = backend.max_queue_size();
let queues_per_thread = backend.queues_per_thread();
let atomic_mem = GuestMemoryAtomic::new(GuestMemoryMmap::new());
let mut vrings: Vec<Vring> = Vec::new();
let mut vrings = Vec::new();
for _ in 0..num_queues {
let vring = Vring::new(atomic_mem.clone(), max_queue_size as u16);
vrings.push(vring);
@ -92,7 +93,7 @@ impl<S: VhostUserBackend + Clone> VhostUserHandler<S> {
let mut handlers = Vec::new();
let mut worker_threads = Vec::new();
for (thread_id, queues_mask) in queues_per_thread.iter().enumerate() {
let mut thread_vrings: Vec<Vring> = Vec::new();
let mut thread_vrings = Vec::new();
for (index, vring) in vrings.iter().enumerate() {
if (queues_mask >> index) & 1u64 == 1u64 {
thread_vrings.push(vring.clone());
@ -129,8 +130,10 @@ impl<S: VhostUserBackend + Clone> VhostUserHandler<S> {
worker_threads,
})
}
}
pub(crate) fn get_epoll_handlers(&self) -> Vec<Arc<VringEpollHandler<S>>> {
impl<S: VhostUserBackend<B> + Clone, B: Bitmap> VhostUserHandler<S, B> {
pub(crate) fn get_epoll_handlers(&self) -> Vec<Arc<VringEpollHandler<S, B>>> {
self.handlers.clone()
}
@ -145,7 +148,9 @@ impl<S: VhostUserBackend + Clone> VhostUserHandler<S> {
}
}
impl<S: VhostUserBackend + Clone> VhostUserSlaveReqHandlerMut for VhostUserHandler<S> {
impl<S: VhostUserBackend<B> + Clone, B: NewBitmap + Clone> VhostUserSlaveReqHandlerMut
for VhostUserHandler<S, B>
{
fn set_owner(&mut self) -> VhostUserResult<()> {
if self.owned {
return Err(VhostUserError::InvalidOperation);
@ -519,7 +524,7 @@ impl<S: VhostUserBackend + Clone> VhostUserSlaveReqHandlerMut for VhostUserHandl
}
}
impl<S: VhostUserBackend> Drop for VhostUserHandler<S> {
impl<S: VhostUserBackend<B>, B: Bitmap> Drop for VhostUserHandler<S, B> {
fn drop(&mut self) {
for thread in self.worker_threads.drain(..) {
if let Err(e) = thread.join() {

View file

@ -1,5 +1,5 @@
// Copyright 2019 Intel Corporation. All Rights Reserved.
// Copyright 2019 Alibaba Cloud Computing. All rights reserved.
// Copyright 2019-2021 Alibaba Cloud Computing. All rights reserved.
//
// SPDX-License-Identifier: Apache-2.0
@ -17,7 +17,9 @@ use std::thread;
use vhost::vhost_user::{
Error as VhostUserError, Listener, SlaveListener, VhostUserSlaveReqHandlerMut,
};
use vm_memory::{GuestAddressSpace, GuestRegionMmap, MmapRegion};
use vm_memory::bitmap::Bitmap;
use vm_memory::mmap::NewBitmap;
use vm_memory::{GuestAddressSpace, GuestMemoryAtomic, GuestMemoryMmap, MmapRegion};
use self::handler::VhostUserHandler;
@ -33,6 +35,9 @@ pub use self::handler::VhostUserHandlerError;
mod vring;
pub use self::vring::{Vring, VringState};
/// An alias for `GuestMemoryAtomic<GuestMemoryMmap<B>>` to simplify code.
type GM<B> = GuestMemoryAtomic<GuestMemoryMmap<B>>;
#[derive(Debug)]
/// Errors related to vhost-user daemon.
pub enum Error {
@ -70,21 +75,25 @@ pub type Result<T> = result::Result<T, Error>;
///
/// This structure is the public API the backend is allowed to interact with in order to run
/// a fully functional vhost-user daemon.
pub struct VhostUserDaemon<S: VhostUserBackend> {
pub struct VhostUserDaemon<S: VhostUserBackend<B>, B: Bitmap + 'static> {
name: String,
handler: Arc<Mutex<VhostUserHandler<S>>>,
handler: Arc<Mutex<VhostUserHandler<S, B>>>,
main_thread: Option<thread::JoinHandle<Result<()>>>,
}
impl<S: VhostUserBackend + Clone> VhostUserDaemon<S> {
impl<S: VhostUserBackend<B> + Clone, B: NewBitmap + Clone + Send + Sync> VhostUserDaemon<S, B> {
/// Create the daemon instance, providing the backend implementation of `VhostUserBackend`.
///
/// Under the hood, this will start a dedicated thread responsible for listening onto
/// registered event. Those events can be vring events or custom events from the backend,
/// but they get to be registered later during the sequence.
pub fn new(name: String, backend: S) -> Result<Self> {
pub fn new(
name: String,
backend: S,
atomic_mem: GuestMemoryAtomic<GuestMemoryMmap<B>>,
) -> Result<Self> {
let handler = Arc::new(Mutex::new(
VhostUserHandler::new(backend).map_err(Error::NewVhostUserHandler)?,
VhostUserHandler::new(backend, atomic_mem).map_err(Error::NewVhostUserHandler)?,
));
Ok(VhostUserDaemon {
@ -137,7 +146,7 @@ impl<S: VhostUserBackend + Clone> VhostUserDaemon<S> {
///
/// This is necessary to perform further actions like registering and unregistering some extra
/// event file descriptors.
pub fn get_epoll_handlers(&self) -> Vec<Arc<VringEpollHandler<S>>> {
pub fn get_epoll_handlers(&self) -> Vec<Arc<VringEpollHandler<S, B>>> {
self.handler.lock().unwrap().get_epoll_handlers()
}
}