Wrap GuestMemoryMmap in GuestMemoryAtomic
GuestMemoryAtomic enables support for mutable memory maps, and also acts as a convenient wrapper around GuestMemory without its lifetime restrictions. Signed-off-by: Sergio Lopez <slp@redhat.com>
This commit is contained in:
parent
309d82bf2b
commit
260a9e4e31
2 changed files with 14 additions and 4 deletions
|
|
@ -10,7 +10,7 @@ epoll = ">=4.0.1"
|
|||
libc = ">=0.2.39"
|
||||
log = ">=0.4.6"
|
||||
virtio-bindings = "0.1.0"
|
||||
vm-memory = {version = ">=0.2.0", features = ["backend-mmap"]}
|
||||
vm-memory = {version = ">=0.2.0", features = ["backend-mmap", "backend-atomic"]}
|
||||
vm-virtio = { git = "https://github.com/cloud-hypervisor/vm-virtio", branch = "dragonball" }
|
||||
vmm-sys-util = ">=0.3.1"
|
||||
vhost_rs = { git = "https://github.com/cloud-hypervisor/vhost", branch = "dragonball", package = "vhost", features = ["vhost-user-slave"] }
|
||||
|
|
|
|||
16
src/lib.rs
16
src/lib.rs
|
|
@ -25,7 +25,7 @@ use vhost_rs::vhost_user::{
|
|||
};
|
||||
use virtio_bindings::bindings::virtio_ring::VIRTIO_RING_F_EVENT_IDX;
|
||||
use vm_memory::guest_memory::FileOffset;
|
||||
use vm_memory::{GuestAddress, GuestMemoryMmap};
|
||||
use vm_memory::{GuestAddress, GuestMemoryAtomic, GuestMemoryMmap};
|
||||
use vm_virtio::Queue;
|
||||
use vmm_sys_util::eventfd::EventFd;
|
||||
|
||||
|
|
@ -77,7 +77,10 @@ pub trait VhostUserBackend: Send + Sync + 'static {
|
|||
fn set_event_idx(&mut self, enabled: bool);
|
||||
|
||||
/// Update guest memory regions.
|
||||
fn update_memory(&mut self, mem: GuestMemoryMmap) -> result::Result<(), io::Error>;
|
||||
fn update_memory(
|
||||
&mut self,
|
||||
atomic_mem: GuestMemoryAtomic<GuestMemoryMmap>,
|
||||
) -> result::Result<(), io::Error>;
|
||||
|
||||
/// This function gets called if the backend registered some additional
|
||||
/// listeners onto specific file descriptors. The library can handle
|
||||
|
|
@ -445,6 +448,7 @@ struct VhostUserHandler<S: VhostUserBackend> {
|
|||
max_queue_size: usize,
|
||||
queues_per_thread: Vec<u64>,
|
||||
memory: Option<Memory>,
|
||||
atomic_mem: GuestMemoryAtomic<GuestMemoryMmap>,
|
||||
vrings: Vec<Arc<RwLock<Vring>>>,
|
||||
worker_threads: Vec<thread::JoinHandle<VringWorkerResult<()>>>,
|
||||
}
|
||||
|
|
@ -522,6 +526,7 @@ impl<S: VhostUserBackend> VhostUserHandler<S> {
|
|||
max_queue_size,
|
||||
queues_per_thread,
|
||||
memory: None,
|
||||
atomic_mem: GuestMemoryAtomic::new(GuestMemoryMmap::new()),
|
||||
vrings,
|
||||
worker_threads,
|
||||
})
|
||||
|
|
@ -633,10 +638,15 @@ impl<S: VhostUserBackend> VhostUserSlaveReqHandler for VhostUserHandler<S> {
|
|||
let mem = GuestMemoryMmap::from_ranges_with_files(regions).map_err(|e| {
|
||||
VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e))
|
||||
})?;
|
||||
|
||||
// Updating the inner GuestMemory object here will cause all our vrings to
|
||||
// see the new one the next time they call to `atomic_mem.memory()`.
|
||||
self.atomic_mem.lock().unwrap().replace(mem);
|
||||
|
||||
self.backend
|
||||
.write()
|
||||
.unwrap()
|
||||
.update_memory(mem)
|
||||
.update_memory(self.atomic_mem.clone())
|
||||
.map_err(|e| {
|
||||
VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e))
|
||||
})?;
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue