diff --git a/virtio-devices/src/vhost_user/vu_common_ctrl.rs b/virtio-devices/src/vhost_user/vu_common_ctrl.rs index 628662d8e..21caffe98 100644 --- a/virtio-devices/src/vhost_user/vu_common_ctrl.rs +++ b/virtio-devices/src/vhost_user/vu_common_ctrl.rs @@ -27,10 +27,10 @@ pub struct VhostUserConfig { pub fn update_mem_table(vu: &mut Master, mem: &GuestMemoryMmap) -> Result<()> { let mut regions: Vec = Vec::new(); - mem.with_regions_mut(|_, region| { + for region in mem.iter() { let (mmap_handle, mmap_offset) = match region.file_offset() { Some(_file_offset) => (_file_offset.file().as_raw_fd(), _file_offset.start()), - None => return Err(MmapError::NoMemoryRegion), + None => return Err(Error::VhostUserMemoryRegion(MmapError::NoMemoryRegion)), }; let vhost_user_net_reg = VhostUserMemoryRegionInfo { @@ -42,10 +42,7 @@ pub fn update_mem_table(vu: &mut Master, mem: &GuestMemoryMmap) -> Result<()> { }; regions.push(vhost_user_net_reg); - - Ok(()) - }) - .map_err(Error::VhostUserMemoryRegion)?; + } vu.set_mem_table(regions.as_slice()) .map_err(Error::VhostUserSetMemTable)?; diff --git a/vmm/src/memory_manager.rs b/vmm/src/memory_manager.rs index 690c1e3a6..0ff58dc35 100644 --- a/vmm/src/memory_manager.rs +++ b/vmm/src/memory_manager.rs @@ -773,7 +773,7 @@ impl MemoryManager { log_dirty, })); - guest_memory.memory().with_regions(|_, region| { + for region in guest_memory.memory().iter() { let mut mm = memory_manager.lock().unwrap(); let slot = mm.create_userspace_mapping( region.start_addr().raw_value(), @@ -788,9 +788,7 @@ impl MemoryManager { size: region.len(), slot, }); - - Ok(()) - })?; + } for region in virtio_mem_regions.drain(..) { let mut mm = memory_manager.lock().unwrap(); @@ -1942,7 +1940,7 @@ impl Snapshottable for MemoryManager { let mut memory_regions: Vec = Vec::new(); - guest_memory.with_regions_mut(|index, region| { + for (index, region) in guest_memory.iter().enumerate() { if region.len() == 0 { return Err(MigratableError::Snapshot(anyhow!("Zero length region"))); } @@ -1970,9 +1968,7 @@ impl Snapshottable for MemoryManager { start_addr: region.start_addr().0, size: region.len(), }); - - Ok(()) - })?; + } // Store locally this list of regions as it will be used through the // Transportable::send() implementation. The point is to avoid the diff --git a/vmm/src/vm.rs b/vmm/src/vm.rs index 97b1f062c..b156c1788 100644 --- a/vmm/src/vm.rs +++ b/vmm/src/vm.rs @@ -2019,13 +2019,12 @@ impl Vm { let mut table = MemoryRangeTable::default(); let guest_memory = self.memory_manager.lock().as_ref().unwrap().guest_memory(); - guest_memory.memory().with_regions_mut(|_, region| { + for region in guest_memory.memory().iter() { table.push(MemoryRange { gpa: region.start_addr().raw_value(), length: region.len() as u64, }); - Ok(()) - })?; + } Ok(table) } @@ -2520,7 +2519,7 @@ pub fn test_vm() { let hv = hypervisor::new().unwrap(); let vm = hv.create_vm().expect("new VM creation failed"); - mem.with_regions(|index, region| { + for (index, region) in mem.iter().enumerate() { let mem_region = vm.make_user_memory_region( index as u32, region.start_addr().raw_value(), @@ -2531,8 +2530,8 @@ pub fn test_vm() { ); vm.set_user_memory_region(mem_region) - }) - .expect("Cannot configure guest memory"); + .expect("Cannot configure guest memory"); + } mem.write_slice(&code, load_addr) .expect("Writing code to memory failed");