vring: formalize Vring interfaces

Move struct Vring to dedicated file vring.rs and formalize interfaces
to access `Vring` objects.

Signed-off-by: Liu Jiang <gerry@linux.alibaba.com>
This commit is contained in:
Liu Jiang 2021-08-09 13:47:20 +08:00
parent 27e29d0c36
commit 18db4634f6
5 changed files with 176 additions and 117 deletions

View file

@ -110,7 +110,7 @@ pub trait VhostUserBackend: Send + Sync + 'static {
&self,
device_event: u16,
evset: epoll::Events,
vrings: &[Arc<RwLock<Vring>>],
vrings: &[Vring],
thread_id: usize,
) -> result::Result<bool, io::Error>;
}
@ -192,7 +192,7 @@ pub trait VhostUserBackendMut: Send + Sync + 'static {
&mut self,
device_event: u16,
evset: epoll::Events,
vrings: &[Arc<RwLock<Vring>>],
vrings: &[Vring],
thread_id: usize,
) -> result::Result<bool, io::Error>;
}
@ -253,7 +253,7 @@ impl<T: VhostUserBackend> VhostUserBackend for Arc<T> {
&self,
device_event: u16,
evset: epoll::Events,
vrings: &[Arc<RwLock<Vring>>],
vrings: &[Vring],
thread_id: usize,
) -> Result<bool, io::Error> {
self.deref()
@ -317,7 +317,7 @@ impl<T: VhostUserBackendMut> VhostUserBackend for Mutex<T> {
&self,
device_event: u16,
evset: epoll::Events,
vrings: &[Arc<RwLock<Vring>>],
vrings: &[Vring],
thread_id: usize,
) -> Result<bool, io::Error> {
self.lock()
@ -382,7 +382,7 @@ impl<T: VhostUserBackendMut> VhostUserBackend for RwLock<T> {
&self,
device_event: u16,
evset: epoll::Events,
vrings: &[Arc<RwLock<Vring>>],
vrings: &[Vring],
thread_id: usize,
) -> Result<bool, io::Error> {
self.write()
@ -477,7 +477,7 @@ mod tests {
&mut self,
_device_event: u16,
_evset: Events,
_vrings: &[Arc<RwLock<Vring>>],
_vrings: &[Vring],
_thread_id: usize,
) -> Result<bool, Error> {
self.events += 1;

View file

@ -8,7 +8,6 @@ use std::fs::File;
use std::io;
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use std::result;
use std::sync::{Arc, RwLock};
use super::{VhostUserBackend, Vring};
@ -57,18 +56,14 @@ pub type VringEpollResult<T> = std::result::Result<T, VringEpollError>;
pub struct VringEpollHandler<S: VhostUserBackend> {
epoll_file: File,
backend: S,
vrings: Vec<Arc<RwLock<Vring>>>,
vrings: Vec<Vring>,
thread_id: usize,
exit_event_id: Option<u16>,
}
impl<S: VhostUserBackend> VringEpollHandler<S> {
/// Create a `VringEpollHandler` instance.
pub(crate) fn new(
backend: S,
vrings: Vec<Arc<RwLock<Vring>>>,
thread_id: usize,
) -> VringEpollResult<Self> {
pub(crate) fn new(backend: S, vrings: Vec<Vring>, thread_id: usize) -> VringEpollResult<Self> {
let epoll_fd = epoll::create(true).map_err(VringEpollError::EpollCreateFd)?;
let epoll_file = unsafe { File::from_raw_fd(epoll_fd) };
let (exit_event_fd, exit_event_id) = match backend.exit_event(thread_id) {
@ -187,16 +182,14 @@ impl<S: VhostUserBackend> VringEpollHandler<S> {
return Ok(true);
}
let num_queues = self.vrings.len();
if (device_event as usize) < num_queues {
let vring = &self.vrings[device_event as usize].read().unwrap();
if let Some(kick) = &vring.kick {
kick.read().map_err(VringEpollError::HandleEventReadKick)?;
}
if (device_event as usize) < self.vrings.len() {
let vring = &self.vrings[device_event as usize];
let enabled = vring
.read_kick()
.map_err(VringEpollError::HandleEventReadKick)?;
// If the vring is not enabled, it should not be processed.
// The event is only read to be discarded.
if !vring.enabled {
if !enabled {
return Ok(false);
}
}

View file

@ -6,19 +6,20 @@
use std::error;
use std::fs::File;
use std::io;
use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd};
use std::sync::{Arc, RwLock};
use std::os::unix::io::AsRawFd;
use std::sync::Arc;
use std::thread;
use vhost::vhost_user::message::{
VhostUserConfigFlags, VhostUserMemoryRegion, VhostUserProtocolFeatures,
VhostUserVirtioFeatures, VhostUserVringAddrFlags, VhostUserVringState,
VhostUserSingleMemoryRegion, VhostUserVirtioFeatures, VhostUserVringAddrFlags,
VhostUserVringState,
};
use vhost::vhost_user::{Error as VhostUserError, Result as VhostUserResult, SlaveFsCacheReq};
use virtio_bindings::bindings::virtio_ring::VIRTIO_RING_F_EVENT_IDX;
use vm_memory::{FileOffset, GuestAddress, GuestMemoryAtomic, GuestMemoryMmap};
use vmm_sys_util::eventfd::EventFd;
use super::event_loop::{VringEpollError, VringEpollResult};
use super::*;
const MAX_MEM_SLOTS: u64 = 32;
@ -71,7 +72,7 @@ pub struct VhostUserHandler<S: VhostUserBackend> {
queues_per_thread: Vec<u64>,
mappings: Vec<AddrMapping>,
atomic_mem: GuestMemoryAtomic<GuestMemoryMmap>,
vrings: Vec<Arc<RwLock<Vring>>>,
vrings: Vec<Vring>,
worker_threads: Vec<thread::JoinHandle<VringEpollResult<()>>>,
}
@ -80,22 +81,18 @@ impl<S: VhostUserBackend + Clone> VhostUserHandler<S> {
let num_queues = backend.num_queues();
let max_queue_size = backend.max_queue_size();
let queues_per_thread = backend.queues_per_thread();
let atomic_mem = GuestMemoryAtomic::new(GuestMemoryMmap::new());
let mut vrings: Vec<Arc<RwLock<Vring>>> = Vec::new();
let mut vrings: Vec<Vring> = Vec::new();
for _ in 0..num_queues {
let vring = Arc::new(RwLock::new(Vring::new(
atomic_mem.clone(),
max_queue_size as u16,
)));
let vring = Vring::new(atomic_mem.clone(), max_queue_size as u16);
vrings.push(vring);
}
let mut handlers = Vec::new();
let mut worker_threads = Vec::new();
for (thread_id, queues_mask) in queues_per_thread.iter().enumerate() {
let mut thread_vrings: Vec<Arc<RwLock<Vring>>> = Vec::new();
let mut thread_vrings: Vec<Vring> = Vec::new();
for (index, vring) in vrings.iter().enumerate() {
if (queues_mask >> index) & 1u64 == 1u64 {
thread_vrings.push(vring.clone());
@ -186,8 +183,8 @@ impl<S: VhostUserBackend + Clone> VhostUserSlaveReqHandlerMut for VhostUserHandl
// been disabled by VHOST_USER_SET_VRING_ENABLE with parameter 0.
let vring_enabled =
self.acked_features & VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits() == 0;
for vring in self.vrings.iter_mut() {
vring.write().unwrap().enabled = vring_enabled;
for vring in self.vrings.iter() {
vring.set_enabled(vring_enabled);
}
self.backend.acked_features(self.acked_features);
@ -256,7 +253,7 @@ impl<S: VhostUserBackend + Clone> VhostUserSlaveReqHandlerMut for VhostUserHandl
if index as usize >= self.num_queues || num == 0 || num as usize > self.max_queue_size {
return Err(VhostUserError::InvalidParam);
}
self.vrings[index as usize].write().unwrap().queue.size = num as u16;
self.vrings[index as usize].set_queue_size(num as u16);
Ok(())
}
@ -283,17 +280,7 @@ impl<S: VhostUserBackend + Clone> VhostUserSlaveReqHandlerMut for VhostUserHandl
let used_ring = self.vmm_va_to_gpa(used).map_err(|e| {
VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e))
})?;
self.vrings[index as usize]
.write()
.unwrap()
.queue
.desc_table = GuestAddress(desc_table);
self.vrings[index as usize]
.write()
.unwrap()
.queue
.avail_ring = GuestAddress(avail_ring);
self.vrings[index as usize].write().unwrap().queue.used_ring = GuestAddress(used_ring);
self.vrings[index as usize].set_queue_info(desc_table, avail_ring, used_ring);
Ok(())
} else {
Err(VhostUserError::InvalidParam)
@ -301,19 +288,12 @@ impl<S: VhostUserBackend + Clone> VhostUserSlaveReqHandlerMut for VhostUserHandl
}
fn set_vring_base(&mut self, index: u32, base: u32) -> VhostUserResult<()> {
self.vrings[index as usize]
.write()
.unwrap()
.queue
.set_next_avail(base as u16);
let event_idx: bool = (self.acked_features & (1 << VIRTIO_RING_F_EVENT_IDX)) != 0;
self.vrings[index as usize]
.write()
.unwrap()
.mut_queue()
.set_event_idx(event_idx);
self.vrings[index as usize].set_queue_next_avail(base as u16);
self.vrings[index as usize].set_queue_event_idx(event_idx);
self.backend.set_event_idx(event_idx);
Ok(())
}
@ -326,8 +306,8 @@ impl<S: VhostUserBackend + Clone> VhostUserSlaveReqHandlerMut for VhostUserHandl
// that file descriptor is readable) on the descriptor specified by
// VHOST_USER_SET_VRING_KICK, and stop ring upon receiving
// VHOST_USER_GET_VRING_BASE.
self.vrings[index as usize].write().unwrap().queue.ready = false;
if let Some(fd) = self.vrings[index as usize].read().unwrap().kick.as_ref() {
self.vrings[index as usize].set_queue_ready(false);
if let Some(fd) = self.vrings[index as usize].get_ref().get_kick() {
for (thread_index, queues_mask) in self.queues_per_thread.iter().enumerate() {
let shifted_queues_mask = queues_mask >> index;
if shifted_queues_mask & 1u64 == 1u64 {
@ -344,11 +324,7 @@ impl<S: VhostUserBackend + Clone> VhostUserSlaveReqHandlerMut for VhostUserHandl
}
}
let next_avail = self.vrings[index as usize]
.read()
.unwrap()
.queue
.next_avail();
let next_avail = self.vrings[index as usize].queue_next_avail();
Ok(VhostUserVringState::new(index, u32::from(next_avail)))
}
@ -362,16 +338,15 @@ impl<S: VhostUserBackend + Clone> VhostUserSlaveReqHandlerMut for VhostUserHandl
// does File, so this is safe.
// Ideally, we'd have a generic way to refer to a uniquely-owned fd,
// such as that proposed by Rust RFC #3128.
self.vrings[index as usize].write().unwrap().kick =
file.map(|f| unsafe { EventFd::from_raw_fd(f.into_raw_fd()) });
self.vrings[index as usize].set_kick(file);
// Quote from vhost-user specification:
// Client must start ring upon receiving a kick (that is, detecting
// that file descriptor is readable) on the descriptor specified by
// VHOST_USER_SET_VRING_KICK, and stop ring upon receiving
// VHOST_USER_GET_VRING_BASE.
self.vrings[index as usize].write().unwrap().queue.ready = true;
if let Some(fd) = self.vrings[index as usize].read().unwrap().kick.as_ref() {
self.vrings[index as usize].set_queue_ready(true);
if let Some(fd) = self.vrings[index as usize].get_ref().get_kick() {
for (thread_index, queues_mask) in self.queues_per_thread.iter().enumerate() {
let shifted_queues_mask = queues_mask >> index;
if shifted_queues_mask & 1u64 == 1u64 {
@ -396,9 +371,7 @@ impl<S: VhostUserBackend + Clone> VhostUserSlaveReqHandlerMut for VhostUserHandl
return Err(VhostUserError::InvalidParam);
}
// SAFETY: see comment in set_vring_kick()
self.vrings[index as usize].write().unwrap().call =
file.map(|f| unsafe { EventFd::from_raw_fd(f.into_raw_fd()) });
self.vrings[index as usize].set_call(file);
Ok(())
}
@ -408,9 +381,7 @@ impl<S: VhostUserBackend + Clone> VhostUserSlaveReqHandlerMut for VhostUserHandl
return Err(VhostUserError::InvalidParam);
}
// SAFETY: see comment in set_vring_kick()
self.vrings[index as usize].write().unwrap().err =
file.map(|f| unsafe { EventFd::from_raw_fd(f.into_raw_fd()) });
self.vrings[index as usize].set_err(file);
Ok(())
}
@ -428,7 +399,7 @@ impl<S: VhostUserBackend + Clone> VhostUserSlaveReqHandlerMut for VhostUserHandl
// enabled by VHOST_USER_SET_VRING_ENABLE with parameter 1,
// or after it has been disabled by VHOST_USER_SET_VRING_ENABLE
// with parameter 0.
self.vrings[index as usize].write().unwrap().enabled = enable;
self.vrings[index as usize].set_enabled(enable);
Ok(())
}

View file

@ -1,7 +1,6 @@
// Copyright 2019 Intel Corporation. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Copyright 2019 Alibaba Cloud Computing. All rights reserved.
//
// SPDX-License-Identifier: Apache-2.0
//! A simple framework to run a vhost-user backend service.
@ -14,15 +13,10 @@ use std::result;
use std::sync::{Arc, Mutex};
use std::thread;
use vhost::vhost_user::message::VhostUserSingleMemoryRegion;
use vhost::vhost_user::{
Error as VhostUserError, Listener, SlaveListener, VhostUserSlaveReqHandlerMut,
};
use virtio_queue::Queue;
use vm_memory::{
GuestAddressSpace, GuestMemoryAtomic, GuestMemoryMmap, GuestRegionMmap, MmapRegion,
};
use vmm_sys_util::eventfd::EventFd;
use vm_memory::{GuestAddressSpace, GuestRegionMmap, MmapRegion};
use self::handler::VhostUserHandler;
@ -30,11 +24,14 @@ mod backend;
pub use self::backend::{VhostUserBackend, VhostUserBackendMut};
mod event_loop;
pub use self::event_loop::{VringEpollError, VringEpollHandler, VringEpollResult};
pub use self::event_loop::VringEpollHandler;
mod handler;
pub use self::handler::VhostUserHandlerError;
mod vring;
pub use self::vring::{Vring, VringState};
#[derive(Debug)]
/// Errors related to vhost-user daemon.
pub enum Error {
@ -130,35 +127,3 @@ impl<S: VhostUserBackend + Clone> VhostUserDaemon<S> {
self.handler.lock().unwrap().get_epoll_handlers()
}
}
pub struct Vring {
queue: Queue<GuestMemoryAtomic<GuestMemoryMmap>>,
kick: Option<EventFd>,
call: Option<EventFd>,
err: Option<EventFd>,
enabled: bool,
}
impl Vring {
fn new(atomic_mem: GuestMemoryAtomic<GuestMemoryMmap>, max_queue_size: u16) -> Self {
Vring {
queue: Queue::new(atomic_mem, max_queue_size),
kick: None,
call: None,
err: None,
enabled: false,
}
}
pub fn mut_queue(&mut self) -> &mut Queue<GuestMemoryAtomic<GuestMemoryMmap>> {
&mut self.queue
}
pub fn signal_used_queue(&mut self) -> result::Result<(), io::Error> {
if let Some(call) = self.call.as_ref() {
call.write(1)
} else {
Ok(())
}
}
}

130
src/vring.rs Normal file
View file

@ -0,0 +1,130 @@
// Copyright 2019 Intel Corporation. All Rights Reserved.
// Copyright 2021 Alibaba Cloud Computing. All rights reserved.
//
// SPDX-License-Identifier: Apache-2.0
//! Struct to maintain state information and manipulate vhost-user queues.
use std::fs::File;
use std::io;
use std::os::unix::io::{FromRawFd, IntoRawFd};
use std::sync::{Arc, RwLock, RwLockReadGuard, RwLockWriteGuard};
use virtio_queue::Queue;
use vm_memory::{GuestAddress, GuestMemoryAtomic, GuestMemoryMmap};
use vmm_sys_util::eventfd::EventFd;
/// Struct to maintain raw state information for a vhost-user queue.
pub struct VringState {
queue: Queue<GuestMemoryAtomic<GuestMemoryMmap>>,
kick: Option<EventFd>,
call: Option<EventFd>,
err: Option<EventFd>,
enabled: bool,
}
impl VringState {
fn new(atomic_mem: GuestMemoryAtomic<GuestMemoryMmap>, max_queue_size: u16) -> Self {
VringState {
queue: Queue::new(atomic_mem, max_queue_size),
kick: None,
call: None,
err: None,
enabled: false,
}
}
/// Get a mutable reference to the underlying `Queue` object.
pub fn get_queue_mut(&mut self) -> &mut Queue<GuestMemoryAtomic<GuestMemoryMmap>> {
&mut self.queue
}
/// Get a immutable reference to the underlying kick event fd.
pub fn get_kick(&self) -> &Option<EventFd> {
&self.kick
}
}
/// Struct to maintain state information and manipulate a vhost-user queue.
#[derive(Clone)]
pub struct Vring {
state: Arc<RwLock<VringState>>,
}
impl Vring {
/// Get a immutable guard to the underlying raw `VringState` object.
pub fn get_ref(&self) -> RwLockReadGuard<VringState> {
self.state.read().unwrap()
}
/// Get a mutable guard to the underlying raw `VringState` object.
pub fn get_mut(&self) -> RwLockWriteGuard<VringState> {
self.state.write().unwrap()
}
pub(crate) fn new(mem: GuestMemoryAtomic<GuestMemoryMmap>, max_queue_size: u16) -> Self {
Vring {
state: Arc::new(RwLock::new(VringState::new(mem, max_queue_size))),
}
}
pub(crate) fn set_enabled(&self, enabled: bool) {
self.get_mut().enabled = enabled;
}
pub(crate) fn set_queue_info(&self, desc_table: u64, avail_ring: u64, used_ring: u64) {
let mut state = self.get_mut();
state.queue.desc_table = GuestAddress(desc_table);
state.queue.avail_ring = GuestAddress(avail_ring);
state.queue.used_ring = GuestAddress(used_ring);
}
pub(crate) fn queue_next_avail(&self) -> u16 {
self.get_ref().queue.next_avail()
}
pub(crate) fn set_queue_next_avail(&self, base: u16) {
self.get_mut().queue.set_next_avail(base);
}
pub(crate) fn set_queue_size(&self, num: u16) {
self.get_mut().queue.size = num;
}
pub(crate) fn set_queue_event_idx(&self, enabled: bool) {
self.get_mut().queue.set_event_idx(enabled);
}
pub(crate) fn set_queue_ready(&self, ready: bool) {
self.get_mut().queue.ready = ready;
}
pub(crate) fn set_kick(&self, file: Option<File>) {
// SAFETY:
// EventFd requires that it has sole ownership of its fd. So does File, so this is safe.
// Ideally, we'd have a generic way to refer to a uniquely-owned fd, such as that proposed
// by Rust RFC #3128.
self.get_mut().kick = file.map(|f| unsafe { EventFd::from_raw_fd(f.into_raw_fd()) });
}
pub(crate) fn read_kick(&self) -> io::Result<bool> {
let state = self.get_ref();
if let Some(kick) = &state.kick {
kick.read()?;
}
Ok(state.enabled)
}
pub(crate) fn set_call(&self, file: Option<File>) {
// SAFETY: see comment in set_kick()
self.get_mut().call = file.map(|f| unsafe { EventFd::from_raw_fd(f.into_raw_fd()) });
}
pub(crate) fn set_err(&self, file: Option<File>) {
// SAFETY: see comment in set_kick()
self.get_mut().err = file.map(|f| unsafe { EventFd::from_raw_fd(f.into_raw_fd()) });
}
}