vring: introduce trait VringT

Introduce trait VringT, and provide three implementations of it:
VringState, VringMutex, VringRwLock.

Signed-off-by: Liu Jiang <gerry@linux.alibaba.com>
This commit is contained in:
Liu Jiang 2021-08-24 20:15:55 +08:00 committed by Jiang Liu
parent f363fb2b53
commit ed929c0cbd
5 changed files with 366 additions and 115 deletions

View file

@ -28,7 +28,7 @@ use vhost::vhost_user::SlaveFsCacheReq;
use vm_memory::bitmap::Bitmap;
use vmm_sys_util::eventfd::EventFd;
use super::{Vring, GM};
use super::{VringRwLock, GM};
/// Trait with interior mutability for vhost user backend servers to implement concrete services.
///
@ -107,7 +107,7 @@ pub trait VhostUserBackend<B: Bitmap + 'static = ()>: Send + Sync + 'static {
&self,
device_event: u16,
evset: epoll::Events,
vrings: &[Vring<GM<B>>],
vrings: &[VringRwLock<GM<B>>],
thread_id: usize,
) -> result::Result<bool, io::Error>;
}
@ -186,7 +186,7 @@ pub trait VhostUserBackendMut<B: Bitmap + 'static = ()>: Send + Sync + 'static {
&mut self,
device_event: u16,
evset: epoll::Events,
vrings: &[Vring<GM<B>>],
vrings: &[VringRwLock<GM<B>>],
thread_id: usize,
) -> result::Result<bool, io::Error>;
}
@ -244,7 +244,7 @@ impl<T: VhostUserBackend<B>, B: Bitmap + 'static> VhostUserBackend<B> for Arc<T>
&self,
device_event: u16,
evset: epoll::Events,
vrings: &[Vring<GM<B>>],
vrings: &[VringRwLock<GM<B>>],
thread_id: usize,
) -> Result<bool, io::Error> {
self.deref()
@ -305,7 +305,7 @@ impl<T: VhostUserBackendMut<B>, B: Bitmap + 'static> VhostUserBackend<B> for Mut
&self,
device_event: u16,
evset: epoll::Events,
vrings: &[Vring<GM<B>>],
vrings: &[VringRwLock<GM<B>>],
thread_id: usize,
) -> Result<bool, io::Error> {
self.lock()
@ -367,7 +367,7 @@ impl<T: VhostUserBackendMut<B>, B: Bitmap + 'static> VhostUserBackend<B> for RwL
&self,
device_event: u16,
evset: epoll::Events,
vrings: &[Vring<GM<B>>],
vrings: &[VringRwLock<GM<B>>],
thread_id: usize,
) -> Result<bool, io::Error> {
self.write()
@ -463,7 +463,7 @@ pub mod tests {
&mut self,
_device_event: u16,
_evset: Events,
_vrings: &[Vring],
_vrings: &[VringRwLock],
_thread_id: usize,
) -> Result<bool, Error> {
self.events += 1;

View file

@ -12,7 +12,8 @@ use std::result;
use vm_memory::bitmap::Bitmap;
use vmm_sys_util::eventfd::EventFd;
use super::{VhostUserBackend, Vring, GM};
use super::vring::VringT;
use super::{VhostUserBackend, VringRwLock, GM};
/// Errors related to vring epoll event handling.
#[derive(Debug)]
@ -59,7 +60,7 @@ pub type VringEpollResult<T> = std::result::Result<T, VringEpollError>;
pub struct VringEpollHandler<S: VhostUserBackend<B>, B: Bitmap + 'static> {
epoll_file: File,
backend: S,
vrings: Vec<Vring<GM<B>>>,
vrings: Vec<VringRwLock<GM<B>>>,
thread_id: usize,
exit_event_fd: Option<EventFd>,
exit_event_id: Option<u16>,
@ -69,7 +70,7 @@ impl<S: VhostUserBackend<B>, B: Bitmap + 'static> VringEpollHandler<S, B> {
/// Create a `VringEpollHandler` instance.
pub(crate) fn new(
backend: S,
vrings: Vec<Vring<GM<B>>>,
vrings: Vec<VringRwLock<GM<B>>>,
thread_id: usize,
) -> VringEpollResult<Self> {
let epoll_fd = epoll::create(true).map_err(VringEpollError::EpollCreateFd)?;
@ -234,7 +235,7 @@ mod tests {
let mem = GuestMemoryAtomic::new(
GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0x100000), 0x10000)]).unwrap(),
);
let vring = Vring::new(mem, 0x1000);
let vring = VringRwLock::new(mem, 0x1000);
let backend = Arc::new(Mutex::new(MockVhostBackend::new()));
let handler = VringEpollHandler::new(backend, vec![vring], 0x1).unwrap();

View file

@ -15,14 +15,22 @@ use vhost::vhost_user::message::{
VhostUserSingleMemoryRegion, VhostUserVirtioFeatures, VhostUserVringAddrFlags,
VhostUserVringState,
};
use vhost::vhost_user::{Error as VhostUserError, Result as VhostUserResult, SlaveFsCacheReq};
use vhost::vhost_user::{
Error as VhostUserError, Result as VhostUserResult, SlaveFsCacheReq,
VhostUserSlaveReqHandlerMut,
};
use virtio_bindings::bindings::virtio_ring::VIRTIO_RING_F_EVENT_IDX;
use vm_memory::bitmap::Bitmap;
use vm_memory::mmap::NewBitmap;
use vm_memory::{FileOffset, GuestAddress, GuestMemoryMmap, GuestRegionMmap};
use vm_memory::{
FileOffset, GuestAddress, GuestAddressSpace, GuestMemoryMmap, GuestRegionMmap, MmapRegion,
};
use super::backend::VhostUserBackend;
use super::event_loop::VringEpollHandler;
use super::event_loop::{VringEpollError, VringEpollResult};
use super::*;
use super::vring::{VringRwLock, VringT};
use super::GM;
const MAX_MEM_SLOTS: u64 = 32;
@ -74,7 +82,7 @@ pub struct VhostUserHandler<S: VhostUserBackend<B>, B: Bitmap + 'static> {
queues_per_thread: Vec<u64>,
mappings: Vec<AddrMapping>,
atomic_mem: GM<B>,
vrings: Vec<Vring<GM<B>>>,
vrings: Vec<VringRwLock<GM<B>>>,
worker_threads: Vec<thread::JoinHandle<VringEpollResult<()>>>,
}
@ -86,7 +94,7 @@ impl<S: VhostUserBackend<B> + Clone, B: Bitmap + Clone + Send + Sync> VhostUserH
let mut vrings = Vec::new();
for _ in 0..num_queues {
let vring = Vring::new(atomic_mem.clone(), max_queue_size as u16);
let vring = VringRwLock::new(atomic_mem.clone(), max_queue_size as u16);
vrings.push(vring);
}
@ -194,7 +202,7 @@ impl<S: VhostUserBackend<B>, B: NewBitmap + Clone> VhostUserSlaveReqHandlerMut
// been disabled by VHOST_USER_SET_VRING_ENABLE with parameter 0.
let vring_enabled =
self.acked_features & VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits() == 0;
for vring in self.vrings.iter() {
for vring in self.vrings.iter_mut() {
vring.set_enabled(vring_enabled);
}

View file

@ -14,12 +14,10 @@ use std::result;
use std::sync::{Arc, Mutex};
use std::thread;
use vhost::vhost_user::{
Error as VhostUserError, Listener, SlaveListener, VhostUserSlaveReqHandlerMut,
};
use vhost::vhost_user::{Error as VhostUserError, Listener, SlaveListener};
use vm_memory::bitmap::Bitmap;
use vm_memory::mmap::NewBitmap;
use vm_memory::{GuestAddressSpace, GuestMemoryAtomic, GuestMemoryMmap, MmapRegion};
use vm_memory::{GuestMemoryAtomic, GuestMemoryMmap};
use self::handler::VhostUserHandler;
@ -33,7 +31,7 @@ mod handler;
pub use self::handler::VhostUserHandlerError;
mod vring;
pub use self::vring::{Vring, VringState};
pub use self::vring::{VringRwLock, VringState};
/// An alias for `GuestMemoryAtomic<GuestMemoryMmap<B>>` to simplify code.
type GM<B> = GuestMemoryAtomic<GuestMemoryMmap<B>>;

View file

@ -7,16 +7,109 @@
use std::fs::File;
use std::io;
use std::ops::Deref;
use std::os::unix::io::{FromRawFd, IntoRawFd};
use std::result::Result;
use std::sync::{Arc, RwLock, RwLockReadGuard, RwLockWriteGuard};
use std::sync::{Arc, Mutex, MutexGuard, RwLock, RwLockReadGuard, RwLockWriteGuard};
use virtio_queue::{Error as VirtQueError, Queue};
use vm_memory::{GuestAddress, GuestAddressSpace, GuestMemoryAtomic, GuestMemoryMmap};
use vmm_sys_util::eventfd::EventFd;
/// Struct to hold a shared reference to the underlying `VringState` object.
pub enum VringStateGuard<'a, M: GuestAddressSpace> {
/// A reference to a `VringState` object.
StateObject(&'a VringState<M>),
/// A `MutexGuard` for a `VringState` object.
MutexGuard(MutexGuard<'a, VringState<M>>),
/// A `ReadGuard` for a `VringState` object.
RwLockReadGuard(RwLockReadGuard<'a, VringState<M>>),
}
impl<'a, M: GuestAddressSpace> Deref for VringStateGuard<'a, M> {
type Target = VringState<M>;
fn deref(&self) -> &Self::Target {
match self {
VringStateGuard::StateObject(v) => v,
VringStateGuard::MutexGuard(v) => v.deref(),
VringStateGuard::RwLockReadGuard(v) => v.deref(),
}
}
}
/*
impl<'a, M: GuestAddressSpace> DerefMut for VringStateGuard<'a, M> {
fn deref_mut(&mut self) -> &mut Self::Target {
match self {
VringStateGuard::StateObject(v) => v,
VringStateGuard::MutexGuard(v) => v.deref_mut(),
}
}
}
*/
pub trait VringT<M: GuestAddressSpace> {
/// Create a new instance of Vring.
fn new(mem: M, max_queue_size: u16) -> Self;
/// Get an immutable reference to the kick event fd.
fn get_ref(&self) -> VringStateGuard<M>;
/// Add an used descriptor into the used queue.
fn add_used(&mut self, desc_index: u16, len: u32) -> Result<(), VirtQueError>;
/// Notify the vhost-user master that used descriptors have been put into the used queue.
fn signal_used_queue(&self) -> io::Result<()>;
/// Enable event notification for queue.
fn enable_notification(&mut self) -> Result<bool, VirtQueError>;
/// Disable event notification for queue.
fn disable_notification(&mut self) -> Result<(), VirtQueError>;
/// Check whether a notification to the guest is needed.
fn needs_notification(&mut self) -> Result<bool, VirtQueError>;
/// Set vring enabled state.
fn set_enabled(&mut self, enabled: bool);
/// Set queue addresses for descriptor table, available ring and used ring.
fn set_queue_info(&mut self, desc_table: u64, avail_ring: u64, used_ring: u64);
/// Get queue next avail head.
fn queue_next_avail(&self) -> u16;
/// Set queue next avail head.
fn set_queue_next_avail(&mut self, base: u16);
/// Set configured queue size.
fn set_queue_size(&mut self, num: u16);
/// Enable/disable queue event index feature.
fn set_queue_event_idx(&mut self, enabled: bool);
/// Set queue enabled state.
fn set_queue_ready(&mut self, ready: bool);
/// Set `EventFd` for kick.
fn set_kick(&mut self, file: Option<File>);
/// Read event from the kick `EventFd`.
fn read_kick(&self) -> io::Result<bool>;
/// Set `EventFd` for call.
fn set_call(&mut self, file: Option<File>);
/// Set `EventFd` for err.
fn set_err(&mut self, file: Option<File>);
}
/// Struct to maintain raw state information for a vhost-user queue.
pub struct VringState<M: GuestAddressSpace> {
///
/// This struct maintains all information of a virito queue, and could be used as an `VringT`
/// object for single-threaded context.
pub struct VringState<M: GuestAddressSpace = GuestMemoryAtomic<GuestMemoryMmap>> {
queue: Queue<M>,
kick: Option<EventFd>,
call: Option<EventFd>,
@ -25,6 +118,23 @@ pub struct VringState<M: GuestAddressSpace> {
}
impl<M: GuestAddressSpace> VringState<M> {
/// Get the `EventFd` for kick.
pub fn get_kick(&self) -> &Option<EventFd> {
&self.kick
}
/// Get an immutable reference to the underlying raw `Queue` object.
pub fn get_queue(&self) -> &Queue<M> {
&self.queue
}
/// Get a mutable reference to the underlying raw `Queue` object.
pub fn get_queue_mut(&mut self) -> &mut Queue<M> {
&mut self.queue
}
}
impl<M: GuestAddressSpace> VringT<M> for VringState<M> {
fn new(mem: M, max_queue_size: u16) -> Self {
VringState {
queue: Queue::new(mem, max_queue_size),
@ -35,110 +145,73 @@ impl<M: GuestAddressSpace> VringState<M> {
}
}
/// Get a mutable reference to the underlying raw `Queue` object.
pub fn get_queue_mut(&mut self) -> &mut Queue<M> {
&mut self.queue
fn get_ref(&self) -> VringStateGuard<M> {
VringStateGuard::StateObject(self)
}
/// Get a immutable reference to the kick event fd.
pub fn get_kick(&self) -> &Option<EventFd> {
&self.kick
}
}
/// Struct to maintain state information and manipulate a vhost-user queue.
#[derive(Clone)]
pub struct Vring<M: GuestAddressSpace = GuestMemoryAtomic<GuestMemoryMmap>> {
state: Arc<RwLock<VringState<M>>>,
}
impl<M: GuestAddressSpace> Vring<M> {
/// Get a immutable guard to the underlying raw `VringState` object.
pub fn get_ref(&self) -> RwLockReadGuard<VringState<M>> {
self.state.read().unwrap()
fn add_used(&mut self, desc_index: u16, len: u32) -> Result<(), VirtQueError> {
self.queue.add_used(desc_index, len)
}
/// Get a mutable guard to the underlying raw `VringState` object.
pub fn get_mut(&self) -> RwLockWriteGuard<VringState<M>> {
self.state.write().unwrap()
}
/// Add an used descriptor into the used queue.
pub fn add_used(&self, desc_index: u16, len: u32) -> Result<(), VirtQueError> {
self.get_mut().get_queue_mut().add_used(desc_index, len)
}
/// Notify the vhost-user master that used descriptors have been put into the used queue.
pub fn signal_used_queue(&self) -> io::Result<()> {
if let Some(call) = self.get_ref().call.as_ref() {
fn signal_used_queue(&self) -> io::Result<()> {
if let Some(call) = self.call.as_ref() {
call.write(1)
} else {
Ok(())
}
}
/// Enable event notification for queue.
pub fn enable_notification(&self) -> Result<bool, VirtQueError> {
self.get_mut().get_queue_mut().enable_notification()
fn enable_notification(&mut self) -> Result<bool, VirtQueError> {
self.queue.enable_notification()
}
/// Disable event notification for queue.
pub fn disable_notification(&self) -> Result<(), VirtQueError> {
self.get_mut().get_queue_mut().disable_notification()
fn disable_notification(&mut self) -> Result<(), VirtQueError> {
self.queue.disable_notification()
}
/// Check whether a notification to the guest is needed.
pub fn needs_notification(&self) -> Result<bool, VirtQueError> {
self.get_mut().get_queue_mut().needs_notification()
fn needs_notification(&mut self) -> Result<bool, VirtQueError> {
self.queue.needs_notification()
}
pub(crate) fn new(mem: M, max_queue_size: u16) -> Self {
Vring {
state: Arc::new(RwLock::new(VringState::new(mem, max_queue_size))),
}
fn set_enabled(&mut self, enabled: bool) {
self.enabled = enabled;
}
pub(crate) fn set_enabled(&self, enabled: bool) {
self.get_mut().enabled = enabled;
fn set_queue_info(&mut self, desc_table: u64, avail_ring: u64, used_ring: u64) {
self.queue.desc_table = GuestAddress(desc_table);
self.queue.avail_ring = GuestAddress(avail_ring);
self.queue.used_ring = GuestAddress(used_ring);
}
pub(crate) fn set_queue_info(&self, desc_table: u64, avail_ring: u64, used_ring: u64) {
let mut state = self.get_mut();
state.queue.desc_table = GuestAddress(desc_table);
state.queue.avail_ring = GuestAddress(avail_ring);
state.queue.used_ring = GuestAddress(used_ring);
fn queue_next_avail(&self) -> u16 {
self.queue.next_avail()
}
pub(crate) fn queue_next_avail(&self) -> u16 {
self.get_ref().queue.next_avail()
fn set_queue_next_avail(&mut self, base: u16) {
self.queue.set_next_avail(base);
}
pub(crate) fn set_queue_next_avail(&self, base: u16) {
self.get_mut().queue.set_next_avail(base);
fn set_queue_size(&mut self, num: u16) {
self.queue.size = num;
}
pub(crate) fn set_queue_size(&self, num: u16) {
self.get_mut().queue.size = num;
fn set_queue_event_idx(&mut self, enabled: bool) {
self.queue.set_event_idx(enabled);
}
pub(crate) fn set_queue_event_idx(&self, enabled: bool) {
self.get_mut().queue.set_event_idx(enabled);
fn set_queue_ready(&mut self, ready: bool) {
self.queue.ready = ready;
}
pub(crate) fn set_queue_ready(&self, ready: bool) {
self.get_mut().queue.ready = ready;
}
pub(crate) fn set_kick(&self, file: Option<File>) {
fn set_kick(&mut self, file: Option<File>) {
// SAFETY:
// EventFd requires that it has sole ownership of its fd. So does File, so this is safe.
// Ideally, we'd have a generic way to refer to a uniquely-owned fd, such as that proposed
// by Rust RFC #3128.
self.get_mut().kick = file.map(|f| unsafe { EventFd::from_raw_fd(f.into_raw_fd()) });
self.kick = file.map(|f| unsafe { EventFd::from_raw_fd(f.into_raw_fd()) });
}
pub(crate) fn read_kick(&self) -> io::Result<bool> {
fn read_kick(&self) -> io::Result<bool> {
let state = self.get_ref();
if let Some(kick) = &state.kick {
@ -148,14 +221,194 @@ impl<M: GuestAddressSpace> Vring<M> {
Ok(state.enabled)
}
pub(crate) fn set_call(&self, file: Option<File>) {
fn set_call(&mut self, file: Option<File>) {
// SAFETY: see comment in set_kick()
self.get_mut().call = file.map(|f| unsafe { EventFd::from_raw_fd(f.into_raw_fd()) });
self.call = file.map(|f| unsafe { EventFd::from_raw_fd(f.into_raw_fd()) });
}
pub(crate) fn set_err(&self, file: Option<File>) {
fn set_err(&mut self, file: Option<File>) {
// SAFETY: see comment in set_kick()
self.get_mut().err = file.map(|f| unsafe { EventFd::from_raw_fd(f.into_raw_fd()) });
self.err = file.map(|f| unsafe { EventFd::from_raw_fd(f.into_raw_fd()) });
}
}
/// A `VringState` object protected by Mutex for multi-threading context.
#[derive(Clone)]
pub struct VringMutex<M: GuestAddressSpace = GuestMemoryAtomic<GuestMemoryMmap>> {
state: Arc<Mutex<VringState<M>>>,
}
impl<M: GuestAddressSpace> VringMutex<M> {
/// Get a mutable guard to the underlying raw `VringState` object.
fn lock(&self) -> MutexGuard<VringState<M>> {
self.state.lock().unwrap()
}
}
impl<M: GuestAddressSpace> VringT<M> for VringMutex<M> {
fn new(mem: M, max_queue_size: u16) -> Self {
VringMutex {
state: Arc::new(Mutex::new(VringState::new(mem, max_queue_size))),
}
}
fn get_ref(&self) -> VringStateGuard<M> {
VringStateGuard::MutexGuard(self.state.lock().unwrap())
}
fn add_used(&mut self, desc_index: u16, len: u32) -> Result<(), VirtQueError> {
self.lock().add_used(desc_index, len)
}
fn signal_used_queue(&self) -> io::Result<()> {
self.get_ref().signal_used_queue()
}
fn enable_notification(&mut self) -> Result<bool, VirtQueError> {
self.lock().enable_notification()
}
fn disable_notification(&mut self) -> Result<(), VirtQueError> {
self.lock().disable_notification()
}
fn needs_notification(&mut self) -> Result<bool, VirtQueError> {
self.lock().needs_notification()
}
fn set_enabled(&mut self, enabled: bool) {
self.lock().set_enabled(enabled)
}
fn set_queue_info(&mut self, desc_table: u64, avail_ring: u64, used_ring: u64) {
self.lock()
.set_queue_info(desc_table, avail_ring, used_ring)
}
fn queue_next_avail(&self) -> u16 {
self.get_ref().queue_next_avail()
}
fn set_queue_next_avail(&mut self, base: u16) {
self.lock().set_queue_next_avail(base)
}
fn set_queue_size(&mut self, num: u16) {
self.lock().set_queue_size(num);
}
fn set_queue_event_idx(&mut self, enabled: bool) {
self.lock().set_queue_event_idx(enabled);
}
fn set_queue_ready(&mut self, ready: bool) {
self.lock().set_queue_ready(ready);
}
fn set_kick(&mut self, file: Option<File>) {
self.lock().set_kick(file);
}
fn read_kick(&self) -> io::Result<bool> {
self.get_ref().read_kick()
}
fn set_call(&mut self, file: Option<File>) {
self.lock().set_call(file)
}
fn set_err(&mut self, file: Option<File>) {
self.lock().set_err(file)
}
}
/// A `VringState` object protected by RwLock for multi-threading context.
#[derive(Clone)]
pub struct VringRwLock<M: GuestAddressSpace = GuestMemoryAtomic<GuestMemoryMmap>> {
state: Arc<RwLock<VringState<M>>>,
}
impl<M: GuestAddressSpace> VringRwLock<M> {
/// Get a mutable guard to the underlying raw `VringState` object.
fn write_lock(&self) -> RwLockWriteGuard<VringState<M>> {
self.state.write().unwrap()
}
}
impl<M: GuestAddressSpace> VringT<M> for VringRwLock<M> {
fn new(mem: M, max_queue_size: u16) -> Self {
VringRwLock {
state: Arc::new(RwLock::new(VringState::new(mem, max_queue_size))),
}
}
fn get_ref(&self) -> VringStateGuard<M> {
VringStateGuard::RwLockReadGuard(self.state.read().unwrap())
}
fn add_used(&mut self, desc_index: u16, len: u32) -> Result<(), VirtQueError> {
self.write_lock().add_used(desc_index, len)
}
fn signal_used_queue(&self) -> io::Result<()> {
self.get_ref().signal_used_queue()
}
fn enable_notification(&mut self) -> Result<bool, VirtQueError> {
self.write_lock().enable_notification()
}
fn disable_notification(&mut self) -> Result<(), VirtQueError> {
self.write_lock().disable_notification()
}
fn needs_notification(&mut self) -> Result<bool, VirtQueError> {
self.write_lock().needs_notification()
}
fn set_enabled(&mut self, enabled: bool) {
self.write_lock().set_enabled(enabled)
}
fn set_queue_info(&mut self, desc_table: u64, avail_ring: u64, used_ring: u64) {
self.write_lock()
.set_queue_info(desc_table, avail_ring, used_ring)
}
fn queue_next_avail(&self) -> u16 {
self.get_ref().queue_next_avail()
}
fn set_queue_next_avail(&mut self, base: u16) {
self.write_lock().set_queue_next_avail(base)
}
fn set_queue_size(&mut self, num: u16) {
self.write_lock().set_queue_size(num);
}
fn set_queue_event_idx(&mut self, enabled: bool) {
self.write_lock().set_queue_event_idx(enabled);
}
fn set_queue_ready(&mut self, ready: bool) {
self.write_lock().set_queue_ready(ready);
}
fn set_kick(&mut self, file: Option<File>) {
self.write_lock().set_kick(file);
}
fn read_kick(&self) -> io::Result<bool> {
self.get_ref().read_kick()
}
fn set_call(&mut self, file: Option<File>) {
self.write_lock().set_call(file)
}
fn set_err(&mut self, file: Option<File>) {
self.write_lock().set_err(file)
}
}
@ -172,42 +425,33 @@ mod tests {
GuestMemoryMmap::<AtomicBitmap>::from_ranges(&[(GuestAddress(0x100000), 0x10000)])
.unwrap(),
);
let vring = Vring::new(mem, 0x1000);
let mut vring = VringMutex::new(mem, 0x1000);
assert!(vring.get_ref().get_kick().is_none());
assert_eq!(vring.get_ref().enabled, false);
assert_eq!(vring.get_mut().get_queue_mut().ready, false);
assert_eq!(vring.get_mut().get_queue_mut().event_idx_enabled, false);
assert_eq!(vring.lock().queue.ready, false);
assert_eq!(vring.lock().queue.event_idx_enabled, false);
vring.set_enabled(true);
assert_eq!(vring.get_ref().enabled, true);
vring.set_queue_info(0x100100, 0x100200, 0x100300);
assert_eq!(
vring.get_mut().get_queue_mut().desc_table,
GuestAddress(0x100100)
);
assert_eq!(
vring.get_mut().get_queue_mut().avail_ring,
GuestAddress(0x100200)
);
assert_eq!(
vring.get_mut().get_queue_mut().used_ring,
GuestAddress(0x100300)
);
assert_eq!(vring.lock().get_queue().desc_table, GuestAddress(0x100100));
assert_eq!(vring.lock().get_queue().avail_ring, GuestAddress(0x100200));
assert_eq!(vring.lock().get_queue().used_ring, GuestAddress(0x100300));
assert_eq!(vring.queue_next_avail(), 0);
vring.set_queue_next_avail(0x20);
assert_eq!(vring.queue_next_avail(), 0x20);
vring.set_queue_size(0x200);
assert_eq!(vring.get_mut().get_queue_mut().size, 0x200);
assert_eq!(vring.lock().queue.size, 0x200);
vring.set_queue_event_idx(true);
assert_eq!(vring.get_mut().get_queue_mut().event_idx_enabled, true);
assert_eq!(vring.lock().queue.event_idx_enabled, true);
vring.set_queue_ready(true);
assert_eq!(vring.get_mut().get_queue_mut().ready, true);
assert_eq!(vring.lock().queue.ready, true);
}
#[test]
@ -215,7 +459,7 @@ mod tests {
let mem = GuestMemoryAtomic::new(
GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0x100000), 0x10000)]).unwrap(),
);
let vring = Vring::new(mem, 0x1000);
let mut vring = VringMutex::new(mem, 0x1000);
vring.set_enabled(true);
assert_eq!(vring.get_ref().enabled, true);