vhost_user: add more negative unit test cases

Add more negative unit test cases to improve code coverage.
Also add two helper functions to simplify code.

Signed-off-by: Liu Jiang <gerry@linux.alibaba.com>
This commit is contained in:
Liu Jiang 2021-02-22 14:08:04 +08:00 committed by Sergio Lopez
parent ec6eae722e
commit e543bf2a8d
4 changed files with 295 additions and 40 deletions

View file

@ -1 +1 @@
{"coverage_score": 78.9, "exclude_path": "src/vhost_kern/", "crate_features": "vhost-user-master,vhost-user-slave"}
{"coverage_score": 81.3, "exclude_path": "src/vhost_kern/", "crate_features": "vhost-user-master,vhost-user-slave"}

View file

@ -6,7 +6,7 @@
use std::mem;
use std::os::unix::io::{AsRawFd, RawFd};
use std::os::unix::net::UnixStream;
use std::sync::{Arc, Mutex};
use std::sync::{Arc, Mutex, MutexGuard};
use vmm_sys_util::eventfd::EventFd;
@ -78,6 +78,10 @@ impl Master {
}
}
fn node(&self) -> MutexGuard<MasterInternal> {
self.node.lock().unwrap()
}
/// Create a new instance from a Unix stream socket.
pub fn from_stream(sock: UnixStream, max_queue_num: u64) -> Self {
Self::new(Endpoint::<MasterReq>::from_stream(sock), max_queue_num)
@ -116,7 +120,7 @@ impl Master {
impl VhostBackend for Master {
/// Get from the underlying vhost implementation the feature bitmask.
fn get_features(&self) -> Result<u64> {
let mut node = self.node.lock().unwrap();
let mut node = self.node();
let hdr = node.send_request_header(MasterReq::GET_FEATURES, None)?;
let val = node.recv_reply::<VhostUserU64>(&hdr)?;
node.virtio_features = val.value;
@ -125,7 +129,7 @@ impl VhostBackend for Master {
/// Enable features in the underlying vhost implementation using a bitmask.
fn set_features(&self, features: u64) -> Result<()> {
let mut node = self.node.lock().unwrap();
let mut node = self.node();
let val = VhostUserU64::new(features);
let _ = node.send_request_with_body(MasterReq::SET_FEATURES, &val, None)?;
// Don't wait for ACK here because the protocol feature negotiation process hasn't been
@ -138,7 +142,7 @@ impl VhostBackend for Master {
fn set_owner(&self) -> Result<()> {
// We unwrap() the return value to assert that we are not expecting threads to ever fail
// while holding the lock.
let mut node = self.node.lock().unwrap();
let mut node = self.node();
let _ = node.send_request_header(MasterReq::SET_OWNER, None)?;
// Don't wait for ACK here because the protocol feature negotiation process hasn't been
// completed yet.
@ -146,7 +150,7 @@ impl VhostBackend for Master {
}
fn reset_owner(&self) -> Result<()> {
let mut node = self.node.lock().unwrap();
let mut node = self.node();
let _ = node.send_request_header(MasterReq::RESET_OWNER, None)?;
// Don't wait for ACK here because the protocol feature negotiation process hasn't been
// completed yet.
@ -174,7 +178,7 @@ impl VhostBackend for Master {
ctx.append(&reg, region.mmap_handle);
}
let mut node = self.node.lock().unwrap();
let mut node = self.node();
let body = VhostUserMemory::new(ctx.regions.len() as u32);
let (_, payload, _) = unsafe { ctx.regions.align_to::<u8>() };
let hdr = node.send_request_with_payload(
@ -189,7 +193,7 @@ impl VhostBackend for Master {
// Clippy doesn't seem to know that if let with && is still experimental
#[allow(clippy::unnecessary_unwrap)]
fn set_log_base(&self, base: u64, fd: Option<RawFd>) -> Result<()> {
let mut node = self.node.lock().unwrap();
let mut node = self.node();
let val = VhostUserU64::new(base);
if node.acked_protocol_features & VhostUserProtocolFeatures::LOG_SHMFD.bits() != 0
@ -204,7 +208,7 @@ impl VhostBackend for Master {
}
fn set_log_fd(&self, fd: RawFd) -> Result<()> {
let mut node = self.node.lock().unwrap();
let mut node = self.node();
let fds = [fd];
node.send_request_header(MasterReq::SET_LOG_FD, Some(&fds))?;
Ok(())
@ -212,7 +216,7 @@ impl VhostBackend for Master {
/// Set the size of the queue.
fn set_vring_num(&self, queue_index: usize, num: u16) -> Result<()> {
let mut node = self.node.lock().unwrap();
let mut node = self.node();
if queue_index as u64 >= node.max_queue_num {
return error_code(VhostUserError::InvalidParam);
}
@ -224,7 +228,7 @@ impl VhostBackend for Master {
/// Sets the addresses of the different aspects of the vring.
fn set_vring_addr(&self, queue_index: usize, config_data: &VringConfigData) -> Result<()> {
let mut node = self.node.lock().unwrap();
let mut node = self.node();
if queue_index as u64 >= node.max_queue_num
|| config_data.flags & !(VhostUserVringAddrFlags::all().bits()) != 0
{
@ -238,7 +242,7 @@ impl VhostBackend for Master {
/// Sets the base offset in the available vring.
fn set_vring_base(&self, queue_index: usize, base: u16) -> Result<()> {
let mut node = self.node.lock().unwrap();
let mut node = self.node();
if queue_index as u64 >= node.max_queue_num {
return error_code(VhostUserError::InvalidParam);
}
@ -249,7 +253,7 @@ impl VhostBackend for Master {
}
fn get_vring_base(&self, queue_index: usize) -> Result<u32> {
let mut node = self.node.lock().unwrap();
let mut node = self.node();
if queue_index as u64 >= node.max_queue_num {
return error_code(VhostUserError::InvalidParam);
}
@ -265,7 +269,7 @@ impl VhostBackend for Master {
/// is set when there is no file descriptor in the ancillary data. This signals that polling
/// will be used instead of waiting for the call.
fn set_vring_call(&self, queue_index: usize, fd: &EventFd) -> Result<()> {
let mut node = self.node.lock().unwrap();
let mut node = self.node();
if queue_index as u64 >= node.max_queue_num {
return error_code(VhostUserError::InvalidParam);
}
@ -278,7 +282,7 @@ impl VhostBackend for Master {
/// is set when there is no file descriptor in the ancillary data. This signals that polling
/// should be used instead of waiting for a kick.
fn set_vring_kick(&self, queue_index: usize, fd: &EventFd) -> Result<()> {
let mut node = self.node.lock().unwrap();
let mut node = self.node();
if queue_index as u64 >= node.max_queue_num {
return error_code(VhostUserError::InvalidParam);
}
@ -290,7 +294,7 @@ impl VhostBackend for Master {
/// Bits (0-7) of the payload contain the vring index. Bit 8 is the invalid FD flag. This flag
/// is set when there is no file descriptor in the ancillary data.
fn set_vring_err(&self, queue_index: usize, fd: &EventFd) -> Result<()> {
let mut node = self.node.lock().unwrap();
let mut node = self.node();
if queue_index as u64 >= node.max_queue_num {
return error_code(VhostUserError::InvalidParam);
}
@ -301,7 +305,7 @@ impl VhostBackend for Master {
impl VhostUserMaster for Master {
fn get_protocol_features(&mut self) -> Result<VhostUserProtocolFeatures> {
let mut node = self.node.lock().unwrap();
let mut node = self.node();
let flag = VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits();
if node.virtio_features & flag == 0 || node.acked_virtio_features & flag == 0 {
return error_code(VhostUserError::InvalidOperation);
@ -318,7 +322,7 @@ impl VhostUserMaster for Master {
}
fn set_protocol_features(&mut self, features: VhostUserProtocolFeatures) -> Result<()> {
let mut node = self.node.lock().unwrap();
let mut node = self.node();
let flag = VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits();
if node.virtio_features & flag == 0 || node.acked_virtio_features & flag == 0 {
return error_code(VhostUserError::InvalidOperation);
@ -333,7 +337,7 @@ impl VhostUserMaster for Master {
}
fn get_queue_num(&mut self) -> Result<u64> {
let mut node = self.node.lock().unwrap();
let mut node = self.node();
if !node.is_feature_mq_available() {
return error_code(VhostUserError::InvalidOperation);
}
@ -348,7 +352,7 @@ impl VhostUserMaster for Master {
}
fn set_vring_enable(&mut self, queue_index: usize, enable: bool) -> Result<()> {
let mut node = self.node.lock().unwrap();
let mut node = self.node();
// set_vring_enable() is supported only when PROTOCOL_FEATURES has been enabled.
if node.acked_virtio_features & VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits() == 0 {
return error_code(VhostUserError::InvalidOperation);
@ -374,7 +378,7 @@ impl VhostUserMaster for Master {
return error_code(VhostUserError::InvalidParam);
}
let mut node = self.node.lock().unwrap();
let mut node = self.node();
// depends on VhostUserProtocolFeatures::CONFIG
if node.acked_protocol_features & VhostUserProtocolFeatures::CONFIG.bits() == 0 {
return error_code(VhostUserError::InvalidOperation);
@ -409,7 +413,7 @@ impl VhostUserMaster for Master {
return error_code(VhostUserError::InvalidParam);
}
let mut node = self.node.lock().unwrap();
let mut node = self.node();
// depends on VhostUserProtocolFeatures::CONFIG
if node.acked_protocol_features & VhostUserProtocolFeatures::CONFIG.bits() == 0 {
return error_code(VhostUserError::InvalidOperation);
@ -420,7 +424,7 @@ impl VhostUserMaster for Master {
}
fn set_slave_request_fd(&mut self, fd: RawFd) -> Result<()> {
let mut node = self.node.lock().unwrap();
let mut node = self.node();
if node.acked_protocol_features & VhostUserProtocolFeatures::SLAVE_REQ.bits() == 0 {
return error_code(VhostUserError::InvalidOperation);
}
@ -433,7 +437,7 @@ impl VhostUserMaster for Master {
impl AsRawFd for Master {
fn as_raw_fd(&self) -> RawFd {
let node = self.node.lock().unwrap();
let node = self.node();
node.main_sock.as_raw_fd()
}
}
@ -783,4 +787,211 @@ mod tests {
peer.send_message(&hdr, &msg, None).unwrap();
assert!(master.get_protocol_features().is_err());
}
#[test]
fn test_master_set_config_negative() {
let path = temp_path();
let (mut master, _peer) = create_pair(&path);
let buf = vec![0x0; MAX_MSG_SIZE + 1];
master
.set_config(0x100, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.unwrap_err();
{
let mut node = master.node();
node.virtio_features = 0xffff_ffff;
node.acked_virtio_features = 0xffff_ffff;
node.protocol_features = 0xffff_ffff;
node.acked_protocol_features = 0xffff_ffff;
}
master
.set_config(0x100, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.unwrap();
master
.set_config(0x0, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.unwrap_err();
master
.set_config(0x1000, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.unwrap_err();
master
.set_config(
0x100,
unsafe { VhostUserConfigFlags::from_bits_unchecked(0xffff_ffff) },
&buf[0..4],
)
.unwrap_err();
master
.set_config(0x100, VhostUserConfigFlags::WRITABLE, &buf)
.unwrap_err();
master
.set_config(0x100, VhostUserConfigFlags::WRITABLE, &[])
.unwrap_err();
}
fn create_pair2() -> (Master, Endpoint<MasterReq>) {
let path = temp_path();
let (master, peer) = create_pair(&path);
{
let mut node = master.node();
node.virtio_features = 0xffff_ffff;
node.acked_virtio_features = 0xffff_ffff;
node.protocol_features = 0xffff_ffff;
node.acked_protocol_features = 0xffff_ffff;
}
(master, peer)
}
#[test]
fn test_master_get_config_negative0() {
let (mut master, mut peer) = create_pair2();
let buf = vec![0x0; MAX_MSG_SIZE + 1];
let mut hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
let msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
.unwrap();
assert!(master
.get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.is_ok());
hdr.set_code(MasterReq::GET_FEATURES);
peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
.unwrap();
assert!(master
.get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.is_err());
hdr.set_code(MasterReq::GET_CONFIG);
}
#[test]
fn test_master_get_config_negative1() {
let (mut master, mut peer) = create_pair2();
let buf = vec![0x0; MAX_MSG_SIZE + 1];
let mut hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
let msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
.unwrap();
assert!(master
.get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.is_ok());
hdr.set_reply(false);
peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
.unwrap();
assert!(master
.get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.is_err());
}
#[test]
fn test_master_get_config_negative2() {
let (mut master, mut peer) = create_pair2();
let buf = vec![0x0; MAX_MSG_SIZE + 1];
let hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
let msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
.unwrap();
assert!(master
.get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.is_ok());
}
#[test]
fn test_master_get_config_negative3() {
let (mut master, mut peer) = create_pair2();
let buf = vec![0x0; MAX_MSG_SIZE + 1];
let hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
let mut msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
.unwrap();
assert!(master
.get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.is_ok());
msg.offset = 0;
peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
.unwrap();
assert!(master
.get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.is_err());
}
#[test]
fn test_master_get_config_negative4() {
let (mut master, mut peer) = create_pair2();
let buf = vec![0x0; MAX_MSG_SIZE + 1];
let hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
let mut msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
.unwrap();
assert!(master
.get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.is_ok());
msg.offset = 0x101;
peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
.unwrap();
assert!(master
.get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.is_err());
}
#[test]
fn test_master_get_config_negative5() {
let (mut master, mut peer) = create_pair2();
let buf = vec![0x0; MAX_MSG_SIZE + 1];
let hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
let mut msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
.unwrap();
assert!(master
.get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.is_ok());
msg.offset = (MAX_MSG_SIZE + 1) as u32;
peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
.unwrap();
assert!(master
.get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.is_err());
}
#[test]
fn test_master_get_config_negative6() {
let (mut master, mut peer) = create_pair2();
let buf = vec![0x0; MAX_MSG_SIZE + 1];
let hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
let mut msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
.unwrap();
assert!(master
.get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.is_ok());
msg.size = 6;
peer.send_message_with_payload(&hdr, &msg, &buf[0..6], None)
.unwrap();
assert!(master
.get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.is_err());
}
#[test]
fn test_maset_set_mem_table_failure() {
let (master, _peer) = create_pair2();
master.set_mem_table(&[]).unwrap_err();
let tables = vec![VhostUserMemoryRegionInfo::default(); MAX_ATTACHED_FD_ENTRIES + 1];
master.set_mem_table(&tables).unwrap_err();
}
}

View file

@ -210,7 +210,7 @@ mod tests {
#[test]
fn create_dummy_slave() {
let mut slave = DummySlaveReqHandler::new();
let slave = Arc::new(Mutex::new(DummySlaveReqHandler::new()));
slave.set_owner().unwrap();
assert!(slave.set_owner().is_err());
@ -319,11 +319,9 @@ mod tests {
// set_vring_enable
slave.handle_request().unwrap();
/*
// set_log_base,set_log_fd()
slave.handle_request().unwrap();
slave.handle_request().unwrap();
*/
slave.handle_request().unwrap_err();
slave.handle_request().unwrap_err();
// set_vring_xxx
slave.handle_request().unwrap();
@ -375,10 +373,9 @@ mod tests {
master.set_slave_request_fd(eventfd.as_raw_fd()).unwrap();
master.set_vring_enable(0, true).unwrap();
/*
// unimplemented yet
master.set_log_base(0, Some(eventfd.as_raw_fd())).unwrap();
master.set_log_fd(eventfd.as_raw_fd()).unwrap();
*/
master.set_vring_num(0, 256).unwrap();
master.set_vring_base(0, 0).unwrap();

View file

@ -5,7 +5,7 @@ use std::io;
use std::mem;
use std::os::unix::io::RawFd;
use std::os::unix::net::UnixStream;
use std::sync::{Arc, Mutex};
use std::sync::{Arc, Mutex, MutexGuard};
use super::connection::Endpoint;
use super::message::*;
@ -92,15 +92,17 @@ impl SlaveFsCacheReq {
}
}
fn node(&self) -> MutexGuard<SlaveFsCacheReqInternal> {
self.node.lock().unwrap()
}
fn send_message(
&self,
request: SlaveReq,
fs: &VhostUserFSSlaveMsg,
fds: Option<&[RawFd]>,
) -> io::Result<u64> {
self.node
.lock()
.unwrap()
self.node()
.send_message(request, fs, fds)
.or_else(|e| Err(io::Error::new(io::ErrorKind::Other, format!("{}", e))))
}
@ -116,12 +118,12 @@ impl SlaveFsCacheReq {
/// the "REPLY_ACK" flag will be set in the message header for every slave to master request
/// message.
pub fn set_reply_ack_flag(&self, enable: bool) {
self.node.lock().unwrap().reply_ack_negotiated = enable;
self.node().reply_ack_negotiated = enable;
}
/// Mark endpoint as failed with specified error code.
pub fn set_failed(&self, error: i32) {
self.node.lock().unwrap().error = Some(error);
self.node().error = Some(error);
}
}
@ -148,9 +150,9 @@ mod tests {
let (p1, _p2) = UnixStream::pair().unwrap();
let fs_cache = SlaveFsCacheReq::from_stream(p1);
assert!(fs_cache.node.lock().unwrap().error.is_none());
assert!(fs_cache.node().error.is_none());
fs_cache.set_failed(libc::EAGAIN);
assert_eq!(fs_cache.node.lock().unwrap().error, Some(libc::EAGAIN));
assert_eq!(fs_cache.node().error, Some(libc::EAGAIN));
}
#[test]
@ -166,7 +168,7 @@ mod tests {
fs_cache
.fs_slave_unmap(&VhostUserFSSlaveMsg::default())
.unwrap_err();
fs_cache.node.lock().unwrap().error = None;
fs_cache.node().error = None;
drop(p2);
fs_cache
@ -176,4 +178,49 @@ mod tests {
.fs_slave_unmap(&VhostUserFSSlaveMsg::default())
.unwrap_err();
}
#[test]
fn test_slave_fs_cache_recv_negative() {
let (p1, p2) = UnixStream::pair().unwrap();
let fd = p2.as_raw_fd();
let fs_cache = SlaveFsCacheReq::from_stream(p1);
let mut master = Endpoint::<SlaveReq>::from_stream(p2);
let len = mem::size_of::<VhostUserFSSlaveMsg>();
let mut hdr = VhostUserMsgHeader::new(
SlaveReq::FS_MAP,
VhostUserHeaderFlag::REPLY.bits(),
len as u32,
);
let body = VhostUserU64::new(0);
master.send_message(&hdr, &body, Some(&[fd])).unwrap();
fs_cache
.fs_slave_map(&VhostUserFSSlaveMsg::default(), fd)
.unwrap();
fs_cache.set_reply_ack_flag(true);
fs_cache
.fs_slave_map(&VhostUserFSSlaveMsg::default(), fd)
.unwrap_err();
hdr.set_code(SlaveReq::FS_UNMAP);
master.send_message(&hdr, &body, None).unwrap();
fs_cache
.fs_slave_map(&VhostUserFSSlaveMsg::default(), fd)
.unwrap_err();
hdr.set_code(SlaveReq::FS_MAP);
let body = VhostUserU64::new(1);
master.send_message(&hdr, &body, None).unwrap();
fs_cache
.fs_slave_map(&VhostUserFSSlaveMsg::default(), fd)
.unwrap_err();
let body = VhostUserU64::new(0);
master.send_message(&hdr, &body, None).unwrap();
fs_cache
.fs_slave_map(&VhostUserFSSlaveMsg::default(), fd)
.unwrap();
}
}