From 3acf9dfcf3d3c1de38708c59595f5d25417832fd Mon Sep 17 00:00:00 2001 From: Sebastien Boeuf Date: Tue, 8 Oct 2019 12:50:05 -0700 Subject: [PATCH] vfio: Don't map guest memory for VFIO devices attached to vIOMMU In case a VFIO devices is being attached behind a virtual IOMMU, we should not automatically map the entire guest memory for the specific device. A VFIO device attached to the virtual IOMMU will be driven with IOVAs, hence we should simply wait for the requests coming from the virtual IOMMU. Signed-off-by: Sebastien Boeuf --- vfio/src/vfio_device.rs | 27 +++++++++++++++++---------- vmm/src/device_manager.rs | 10 +++++++--- 2 files changed, 24 insertions(+), 13 deletions(-) diff --git a/vfio/src/vfio_device.rs b/vfio/src/vfio_device.rs index dc2df5c81..93144eb9b 100644 --- a/vfio/src/vfio_device.rs +++ b/vfio/src/vfio_device.rs @@ -578,6 +578,7 @@ pub struct VfioDevice { regions: Vec, irqs: HashMap, mem: Arc>, + iommu_attached: bool, } impl VfioDevice { @@ -588,6 +589,7 @@ impl VfioDevice { sysfspath: &Path, device_fd: Arc, mem: Arc>, + iommu_attached: bool, ) -> Result { let uuid_path: PathBuf = [sysfspath, Path::new("iommu_group")].iter().collect(); let group_path = uuid_path.read_link().map_err(|_| VfioError::InvalidPath)?; @@ -609,6 +611,7 @@ impl VfioDevice { regions, irqs, mem, + iommu_attached, }) } @@ -841,22 +844,26 @@ impl VfioDevice { /// Add all guest memory regions into vfio container's iommu table, /// then vfio kernel driver could access guest memory from gfn pub fn setup_dma_map(&self) -> Result<()> { - self.mem.read().unwrap().with_regions(|_index, region| { - self.vfio_dma_map( - region.start_addr().raw_value(), - region.len() as u64, - region.as_ptr() as u64, - ) - })?; + if !self.iommu_attached { + self.mem.read().unwrap().with_regions(|_index, region| { + self.vfio_dma_map( + region.start_addr().raw_value(), + region.len() as u64, + region.as_ptr() as u64, + ) + })?; + } Ok(()) } /// remove all guest memory regions from vfio containers iommu table /// then vfio kernel driver couldn't access this guest memory pub fn unset_dma_map(&self) -> Result<()> { - self.mem.read().unwrap().with_regions(|_index, region| { - self.vfio_dma_unmap(region.start_addr().raw_value(), region.len() as u64) - })?; + if !self.iommu_attached { + self.mem.read().unwrap().with_regions(|_index, region| { + self.vfio_dma_unmap(region.start_addr().raw_value(), region.len() as u64) + })?; + } Ok(()) } diff --git a/vmm/src/device_manager.rs b/vmm/src/device_manager.rs index 88270e4cf..8a8ba7fd0 100644 --- a/vmm/src/device_manager.rs +++ b/vmm/src/device_manager.rs @@ -999,9 +999,13 @@ impl DeviceManager { // global device ID. let device_id = pci.next_device_id() << 3; - let vfio_device = - VfioDevice::new(&device_cfg.path, device_fd.clone(), vm_info.memory.clone()) - .map_err(DeviceManagerError::VfioCreate)?; + let vfio_device = VfioDevice::new( + &device_cfg.path, + device_fd.clone(), + vm_info.memory.clone(), + device_cfg.iommu, + ) + .map_err(DeviceManagerError::VfioCreate)?; if device_cfg.iommu { if let Some(iommu) = iommu_device {