From 6a86c157afde79e734bc35e1065403d532c51f34 Mon Sep 17 00:00:00 2001 From: Philipp Schuster Date: Tue, 25 Nov 2025 14:29:36 +0100 Subject: [PATCH] misc: clippy: add needless_pass_by_value (partially) This helps to uncover expensive and needless clones in the code base. For example, I prevented extensive clones in the snapshot path where (nested) BTreeMap's have been cloned over and over again. Further, the lint helps devs to much better reason about the ownership of parameters. All of these changes have been done manually with the necessary caution. A few structs that are cheap to clone are now `copy` so that this lint won't trigger for them. I didn't enable the lint so far as it is a massive rabbit hole and needs much more fixes. Nevertheless, it is very useful. Signed-off-by: Philipp Schuster On-behalf-of: SAP philipp.schuster@sap.com --- Cargo.toml | 2 + api_client/src/lib.rs | 20 +++----- arch/src/x86_64/mod.rs | 6 +-- block/src/fcntl.rs | 6 +-- block/src/lib.rs | 1 + block/src/qcow/mod.rs | 23 ++++----- block/src/qcow/qcow_raw_file.rs | 2 +- devices/src/ioapic.rs | 2 +- devices/src/ivshmem.rs | 14 +++--- devices/src/pvpanic.rs | 14 +++--- devices/src/tpm.rs | 4 +- pci/src/vfio.rs | 54 ++++++++++---------- pci/src/vfio_user.rs | 12 +++-- rate_limiter/src/lib.rs | 1 + src/bin/ch-remote.rs | 4 +- tpm/src/emulator.rs | 2 +- tpm/src/socket.rs | 4 +- vhost_user_block/src/lib.rs | 6 +-- virtio-devices/src/block.rs | 7 +-- virtio-devices/src/transport/pci_device.rs | 30 +++++------- virtio-devices/src/vdpa.rs | 4 +- vm-allocator/src/gsi.rs | 4 +- vm-allocator/src/system.rs | 8 +-- vm-migration/src/lib.rs | 4 +- vmm/src/cpu.rs | 12 +++-- vmm/src/device_manager.rs | 8 +-- vmm/src/lib.rs | 4 +- vmm/src/memory_manager.rs | 2 +- vmm/src/vm.rs | 57 +++++++++++----------- 29 files changed, 162 insertions(+), 155 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 4b15a67a0..155e2fc17 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -174,6 +174,8 @@ assertions_on_result_states = "deny" if_not_else = "deny" manual_string_new = "deny" map_unwrap_or = "deny" +# Very helpful to uncover costly clones, but unfortunately also a rabbit hole. +#needless_pass_by_value = "deny" redundant_else = "deny" semicolon_if_nothing_returned = "deny" undocumented_unsafe_blocks = "deny" diff --git a/api_client/src/lib.rs b/api_client/src/lib.rs index 8ced48f7c..f5bdd1302 100644 --- a/api_client/src/lib.rs +++ b/api_client/src/lib.rs @@ -142,7 +142,7 @@ pub fn simple_api_full_command_with_fds_and_response, - request_fds: Vec, + request_fds: &[RawFd], ) -> Result, Error> { socket .send_with_fds( @@ -150,7 +150,7 @@ pub fn simple_api_full_command_with_fds_and_response( method: &str, full_command: &str, request_body: Option<&str>, - request_fds: Vec, + request_fds: &[RawFd], ) -> Result<(), Error> { let response = simple_api_full_command_with_fds_and_response( socket, @@ -201,7 +201,7 @@ pub fn simple_api_full_command( full_command: &str, request_body: Option<&str>, ) -> Result<(), Error> { - simple_api_full_command_with_fds(socket, method, full_command, request_body, Vec::new()) + simple_api_full_command_with_fds(socket, method, full_command, request_body, &[]) } pub fn simple_api_full_command_and_response( @@ -210,13 +210,7 @@ pub fn simple_api_full_command_and_response( full_command: &str, request_body: Option<&str>, ) -> Result, Error> { - simple_api_full_command_with_fds_and_response( - socket, - method, - full_command, - request_body, - Vec::new(), - ) + simple_api_full_command_with_fds_and_response(socket, method, full_command, request_body, &[]) } pub fn simple_api_command_with_fds( @@ -224,7 +218,7 @@ pub fn simple_api_command_with_fds( method: &str, c: &str, request_body: Option<&str>, - request_fds: Vec, + request_fds: &[RawFd], ) -> Result<(), Error> { // Create the full VM command. For VMM commands, use // simple_api_full_command(). @@ -239,5 +233,5 @@ pub fn simple_api_command( c: &str, request_body: Option<&str>, ) -> Result<(), Error> { - simple_api_command_with_fds(socket, method, c, request_body, Vec::new()) + simple_api_command_with_fds(socket, method, c, request_body, &[]) } diff --git a/arch/src/x86_64/mod.rs b/arch/src/x86_64/mod.rs index e2cb1eab2..ab3835993 100644 --- a/arch/src/x86_64/mod.rs +++ b/arch/src/x86_64/mod.rs @@ -283,7 +283,7 @@ impl CpuidPatch { } } - pub fn patch_cpuid(cpuid: &mut [CpuIdEntry], patches: Vec) { + pub fn patch_cpuid(cpuid: &mut [CpuIdEntry], patches: &[CpuidPatch]) { for entry in cpuid { for patch in patches.iter() { if entry.function == patch.function && entry.index == patch.index { @@ -620,7 +620,7 @@ pub fn generate_common_cpuid( .get_supported_cpuid() .map_err(Error::CpuidGetSupported)?; - CpuidPatch::patch_cpuid(&mut cpuid, cpuid_patches); + CpuidPatch::patch_cpuid(&mut cpuid, &cpuid_patches); #[cfg(feature = "tdx")] let tdx_capabilities = if config.tdx { @@ -1421,7 +1421,7 @@ fn update_cpuid_topology( edx_bit: Some(28), }, ]; - CpuidPatch::patch_cpuid(cpuid, cpuid_patches); + CpuidPatch::patch_cpuid(cpuid, &cpuid_patches); CpuidPatch::set_cpuid_reg( cpuid, 0x8000_0008, diff --git a/block/src/fcntl.rs b/block/src/fcntl.rs index 3687288a6..a2a684f32 100644 --- a/block/src/fcntl.rs +++ b/block/src/fcntl.rs @@ -163,7 +163,7 @@ const fn get_flock(lock_type: LockType, granularity: LockGranularity) -> libc::f /// - `lock_type`: The [`LockType`] /// - `granularity`: The [`LockGranularity`]. pub fn try_acquire_lock( - file: Fd, + file: &Fd, lock_type: LockType, granularity: LockGranularity, ) -> Result<(), LockError> { @@ -191,7 +191,7 @@ pub fn try_acquire_lock( /// # Parameters /// - `file`: The file to clear all locks for [`LockType`]. /// - `granularity`: The [`LockGranularity`]. -pub fn clear_lock(file: Fd, granularity: LockGranularity) -> Result<(), LockError> { +pub fn clear_lock(file: &Fd, granularity: LockGranularity) -> Result<(), LockError> { try_acquire_lock(file, LockType::Unlock, granularity) } @@ -202,7 +202,7 @@ pub fn clear_lock(file: Fd, granularity: LockGranularity) -> Result /// - `file`: The file for which to get the lock state. /// - `granularity`: The [`LockGranularity`]. pub fn get_lock_state( - file: Fd, + file: &Fd, granularity: LockGranularity, ) -> Result { let mut flock = get_flock(LockType::Write, granularity); diff --git a/block/src/lib.rs b/block/src/lib.rs index 997a2f341..1996c9ba6 100644 --- a/block/src/lib.rs +++ b/block/src/lib.rs @@ -861,6 +861,7 @@ ioctl_io_nr!(BLKPBSZGET, 0x12, 123); ioctl_io_nr!(BLKIOMIN, 0x12, 120); ioctl_io_nr!(BLKIOOPT, 0x12, 121); +#[derive(Copy, Clone)] enum BlockSize { LogicalBlock, PhysicalBlock, diff --git a/block/src/qcow/mod.rs b/block/src/qcow/mod.rs index d713d5693..251c1b772 100644 --- a/block/src/qcow/mod.rs +++ b/block/src/qcow/mod.rs @@ -125,6 +125,7 @@ pub enum Error { pub type Result = std::result::Result; +#[derive(Copy, Clone)] pub enum ImageType { Raw, Qcow2, @@ -683,7 +684,7 @@ impl QcowFile { /// Creates a new QcowFile at the given path. pub fn new(file: RawFile, version: u32, virtual_size: u64) -> Result { let header = QcowHeader::create_for_size_and_path(version, virtual_size, None)?; - QcowFile::new_from_header(file, header) + QcowFile::new_from_header(file, &header) } /// Creates a new QcowFile at the given path. @@ -705,12 +706,12 @@ impl QcowFile { .map_err(|e| Error::BackingFileOpen(Box::new(e)))?; let size = backing_file.virtual_size(); let header = QcowHeader::create_for_size_and_path(version, size, Some(backing_file_name))?; - let mut result = QcowFile::new_from_header(file, header)?; + let mut result = QcowFile::new_from_header(file, &header)?; result.backing_file = Some(Box::new(backing_file)); Ok(result) } - fn new_from_header(mut file: RawFile, header: QcowHeader) -> Result { + fn new_from_header(mut file: RawFile, header: &QcowHeader) -> Result { file.rewind().map_err(Error::SeekingFile)?; header.write_to(&mut file)?; @@ -854,7 +855,7 @@ impl QcowFile { // Add references to the L1 table clusters. fn set_l1_refcounts( refcounts: &mut [u16], - header: QcowHeader, + header: &QcowHeader, cluster_size: u64, ) -> Result<()> { let entries_per_cluster = cluster_size / size_of::() as u64; @@ -869,7 +870,7 @@ impl QcowFile { // Traverse the L1 and L2 tables to find all reachable data clusters. fn set_data_refcounts( refcounts: &mut [u16], - header: QcowHeader, + header: &QcowHeader, cluster_size: u64, raw_file: &mut QcowRawFile, ) -> Result<()> { @@ -908,7 +909,7 @@ impl QcowFile { // Add references to the top-level refcount table clusters. fn set_refcount_table_refcounts( refcounts: &mut [u16], - header: QcowHeader, + header: &QcowHeader, cluster_size: u64, ) -> Result<()> { let refcount_table_offset = header.refcount_table_offset; @@ -1046,9 +1047,9 @@ impl QcowFile { // Find all references clusters and rebuild refcounts. set_header_refcount(&mut refcounts, cluster_size)?; - set_l1_refcounts(&mut refcounts, header.clone(), cluster_size)?; - set_data_refcounts(&mut refcounts, header.clone(), cluster_size, raw_file)?; - set_refcount_table_refcounts(&mut refcounts, header.clone(), cluster_size)?; + set_l1_refcounts(&mut refcounts, &header, cluster_size)?; + set_data_refcounts(&mut refcounts, &header, cluster_size, raw_file)?; + set_refcount_table_refcounts(&mut refcounts, &header, cluster_size)?; // Allocate clusters to store the new reference count blocks. let ref_table = alloc_refblocks(&mut refcounts, cluster_size, refblock_clusters)?; @@ -1276,7 +1277,7 @@ impl QcowFile { // First use a pre allocated cluster if one is available. if let Some(free_cluster) = self.avail_clusters.pop() { if let Some(initial_data) = initial_data { - self.raw_file.write_cluster(free_cluster, initial_data)?; + self.raw_file.write_cluster(free_cluster, &initial_data)?; } else { self.raw_file.zero_cluster(free_cluster)?; } @@ -1286,7 +1287,7 @@ impl QcowFile { let max_valid_cluster_offset = self.refcounts.max_valid_cluster_offset(); if let Some(new_cluster) = self.raw_file.add_cluster_end(max_valid_cluster_offset)? { if let Some(initial_data) = initial_data { - self.raw_file.write_cluster(new_cluster, initial_data)?; + self.raw_file.write_cluster(new_cluster, &initial_data)?; } Ok(new_cluster) } else { diff --git a/block/src/qcow/qcow_raw_file.rs b/block/src/qcow/qcow_raw_file.rs index bb4f849ac..ceebfd1a9 100644 --- a/block/src/qcow/qcow_raw_file.rs +++ b/block/src/qcow/qcow_raw_file.rs @@ -144,7 +144,7 @@ impl QcowRawFile { } /// Writes - pub fn write_cluster(&mut self, address: u64, data: Vec) -> io::Result<()> { + pub fn write_cluster(&mut self, address: u64, data: &[u8]) -> io::Result<()> { let cluster_size = self.cluster_size as usize; self.file.seek(SeekFrom::Start(address))?; self.file.write_all(&data[0..cluster_size]) diff --git a/devices/src/ioapic.rs b/devices/src/ioapic.rs index c43f629d8..660531eb6 100644 --- a/devices/src/ioapic.rs +++ b/devices/src/ioapic.rs @@ -191,7 +191,7 @@ impl Ioapic { pub fn new( id: String, apic_address: GuestAddress, - interrupt_manager: Arc>, + interrupt_manager: &dyn InterruptManager, state: Option, ) -> Result { let interrupt_source_group = interrupt_manager diff --git a/devices/src/ivshmem.rs b/devices/src/ivshmem.rs index 4e1a01bf3..fff48a72c 100644 --- a/devices/src/ivshmem.rs +++ b/devices/src/ivshmem.rs @@ -114,14 +114,14 @@ impl IvshmemDevice { region_size: u64, backend_file: Option, ivshmem_ops: Arc>, - snapshot: Option, + snapshot: Option<&Snapshot>, ) -> Result { - let pci_configuration_state = - vm_migration::state_from_id(snapshot.as_ref(), PCI_CONFIGURATION_ID).map_err(|e| { - IvshmemError::RetrievePciConfigurationState(anyhow!( - "Failed to get PciConfigurationState from Snapshot: {e}", - )) - })?; + let pci_configuration_state = vm_migration::state_from_id(snapshot, PCI_CONFIGURATION_ID) + .map_err(|e| { + IvshmemError::RetrievePciConfigurationState(anyhow!( + "Failed to get PciConfigurationState from Snapshot: {e}", + )) + })?; let state: Option = snapshot .as_ref() diff --git a/devices/src/pvpanic.rs b/devices/src/pvpanic.rs index ef42b0361..0451da1a0 100644 --- a/devices/src/pvpanic.rs +++ b/devices/src/pvpanic.rs @@ -66,13 +66,13 @@ pub struct PvPanicDeviceState { } impl PvPanicDevice { - pub fn new(id: String, snapshot: Option) -> Result { - let pci_configuration_state = - vm_migration::state_from_id(snapshot.as_ref(), PCI_CONFIGURATION_ID).map_err(|e| { - PvPanicError::RetrievePciConfigurationState(anyhow!( - "Failed to get PciConfigurationState from Snapshot: {e}" - )) - })?; + pub fn new(id: String, snapshot: Option<&Snapshot>) -> Result { + let pci_configuration_state = vm_migration::state_from_id(snapshot, PCI_CONFIGURATION_ID) + .map_err(|e| { + PvPanicError::RetrievePciConfigurationState(anyhow!( + "Failed to get PciConfigurationState from Snapshot: {e}" + )) + })?; let mut configuration = PciConfiguration::new( PVPANIC_VENDOR_ID, diff --git a/devices/src/tpm.rs b/devices/src/tpm.rs index 532b2c4a9..15494e323 100644 --- a/devices/src/tpm.rs +++ b/devices/src/tpm.rs @@ -27,6 +27,7 @@ pub enum Error { type Result = anyhow::Result; #[allow(dead_code)] +#[derive(Copy, Clone)] enum LocStateFields { TpmEstablished, LocAssigned, @@ -63,6 +64,7 @@ enum IntfId2Fields { Did, } +#[derive(Copy, Clone)] enum CtrlStsFields { TpmSts, TpmIdle, @@ -220,7 +222,7 @@ pub struct Tpm { } impl Tpm { - pub fn new(path: String) -> Result { + pub fn new(path: &str) -> Result { let emulator = Emulator::new(path) .map_err(|e| Error::Init(anyhow!("Failed while initializing tpm Emulator: {e:?}")))?; let mut tpm = Tpm { diff --git a/pci/src/vfio.rs b/pci/src/vfio.rs index 76c5d72e3..914feaffd 100644 --- a/pci/src/vfio.rs +++ b/pci/src/vfio.rs @@ -493,15 +493,15 @@ impl VfioCommon { vfio_wrapper: Arc, subclass: &dyn PciSubclass, bdf: PciBdf, - snapshot: Option, + snapshot: Option<&Snapshot>, x_nv_gpudirect_clique: Option, ) -> Result { - let pci_configuration_state = - vm_migration::state_from_id(snapshot.as_ref(), PCI_CONFIGURATION_ID).map_err(|e| { - VfioPciError::RetrievePciConfigurationState(anyhow!( - "Failed to get PciConfigurationState from Snapshot: {e}" - )) - })?; + let pci_configuration_state = vm_migration::state_from_id(snapshot, PCI_CONFIGURATION_ID) + .map_err(|e| { + VfioPciError::RetrievePciConfigurationState(anyhow!( + "Failed to get PciConfigurationState from Snapshot: {e}" + )) + })?; let configuration = PciConfiguration::new( 0, @@ -541,18 +541,16 @@ impl VfioCommon { "Failed to get VfioCommonState from Snapshot: {e}" )) })?; - let msi_state = - vm_migration::state_from_id(snapshot.as_ref(), MSI_CONFIG_ID).map_err(|e| { - VfioPciError::RetrieveMsiConfigState(anyhow!( - "Failed to get MsiConfigState from Snapshot: {e}" - )) - })?; - let msix_state = - vm_migration::state_from_id(snapshot.as_ref(), MSIX_CONFIG_ID).map_err(|e| { - VfioPciError::RetrieveMsixConfigState(anyhow!( - "Failed to get MsixConfigState from Snapshot: {e}" - )) - })?; + let msi_state = vm_migration::state_from_id(snapshot, MSI_CONFIG_ID).map_err(|e| { + VfioPciError::RetrieveMsiConfigState(anyhow!( + "Failed to get MsiConfigState from Snapshot: {e}" + )) + })?; + let msix_state = vm_migration::state_from_id(snapshot, MSIX_CONFIG_ID).map_err(|e| { + VfioPciError::RetrieveMsixConfigState(anyhow!( + "Failed to get MsixConfigState from Snapshot: {e}" + )) + })?; if let Some(state) = state.as_ref() { vfio_common.set_state(state, msi_state, msix_state)?; @@ -598,7 +596,7 @@ impl VfioCommon { allocator: &Arc>, mmio32_allocator: &mut AddressAllocator, mmio64_allocator: &mut AddressAllocator, - resources: Option>, + resources: Option<&[Resource]>, ) -> Result, PciDeviceError> { let mut bars = Vec::new(); let mut bar_id = VFIO_PCI_BAR0_REGION_INDEX; @@ -614,7 +612,7 @@ impl VfioCommon { let mut flags: u32 = 0; let mut restored_bar_addr = None; - if let Some(resources) = &resources { + if let Some(resources) = resources { for resource in resources { if let Resource::PciBar { index, @@ -1480,7 +1478,7 @@ impl VfioPciDevice { iommu_attached: bool, bdf: PciBdf, memory_slot_allocator: MemorySlotAllocator, - snapshot: Option, + snapshot: Option<&Snapshot>, x_nv_gpudirect_clique: Option, device_path: PathBuf, ) -> Result { @@ -1495,7 +1493,7 @@ impl VfioPciDevice { Arc::new(vfio_wrapper) as Arc, &PciVfioSubclass::VfioSubclass, bdf, - vm_migration::snapshot_from_id(snapshot.as_ref(), VFIO_COMMON_ID), + vm_migration::snapshot_from_id(snapshot, VFIO_COMMON_ID), x_nv_gpudirect_clique, )?; @@ -1508,7 +1506,7 @@ impl VfioPciDevice { iommu_attached, memory_slot_allocator, bdf, - device_path: device_path.clone(), + device_path, }; Ok(vfio_pci_device) @@ -1832,8 +1830,12 @@ impl PciDevice for VfioPciDevice { mmio64_allocator: &mut AddressAllocator, resources: Option>, ) -> Result, PciDeviceError> { - self.common - .allocate_bars(allocator, mmio32_allocator, mmio64_allocator, resources) + self.common.allocate_bars( + allocator, + mmio32_allocator, + mmio64_allocator, + resources.as_deref(), + ) } fn free_bars( diff --git a/pci/src/vfio_user.rs b/pci/src/vfio_user.rs index 5aa2266ca..248f87272 100644 --- a/pci/src/vfio_user.rs +++ b/pci/src/vfio_user.rs @@ -79,7 +79,7 @@ impl VfioUserPciDevice { legacy_interrupt_group: Option>, bdf: PciBdf, memory_slot_allocator: MemorySlotAllocator, - snapshot: Option, + snapshot: Option<&Snapshot>, ) -> Result { let resettable = client.lock().unwrap().resettable(); if resettable { @@ -100,7 +100,7 @@ impl VfioUserPciDevice { Arc::new(vfio_wrapper) as Arc, &PciVfioUserSubclass::VfioUserSubclass, bdf, - vm_migration::snapshot_from_id(snapshot.as_ref(), VFIO_COMMON_ID), + vm_migration::snapshot_from_id(snapshot, VFIO_COMMON_ID), None, ) .map_err(VfioUserPciDeviceError::CreateVfioCommon)?; @@ -396,8 +396,12 @@ impl PciDevice for VfioUserPciDevice { mmio64_allocator: &mut AddressAllocator, resources: Option>, ) -> Result, PciDeviceError> { - self.common - .allocate_bars(allocator, mmio32_allocator, mmio64_allocator, resources) + self.common.allocate_bars( + allocator, + mmio32_allocator, + mmio64_allocator, + resources.as_deref(), + ) } fn free_bars( diff --git a/rate_limiter/src/lib.rs b/rate_limiter/src/lib.rs index 029288ad2..0239bd4ef 100644 --- a/rate_limiter/src/lib.rs +++ b/rate_limiter/src/lib.rs @@ -245,6 +245,7 @@ impl TokenBucket { } /// Enum that describes the type of token used. +#[derive(Copy, Clone)] pub enum TokenType { /// Token type used for bandwidth limiting. Bytes, diff --git a/src/bin/ch-remote.rs b/src/bin/ch-remote.rs index 8d89b607b..39bfaeecd 100644 --- a/src/bin/ch-remote.rs +++ b/src/bin/ch-remote.rs @@ -399,7 +399,7 @@ fn rest_api_do_command(matches: &ArgMatches, socket: &mut UnixStream) -> ApiResu .get_one::("net_config") .unwrap(), )?; - simple_api_command_with_fds(socket, "PUT", "add-net", Some(&net_config), fds) + simple_api_command_with_fds(socket, "PUT", "add-net", Some(&net_config), &fds) .map_err(Error::HttpApiClient) } Some("add-user-device") => { @@ -454,7 +454,7 @@ fn rest_api_do_command(matches: &ArgMatches, socket: &mut UnixStream) -> ApiResu .get_one::("restore_config") .unwrap(), )?; - simple_api_command_with_fds(socket, "PUT", "restore", Some(&restore_config), fds) + simple_api_command_with_fds(socket, "PUT", "restore", Some(&restore_config), &fds) .map_err(Error::HttpApiClient) } Some("coredump") => { diff --git a/tpm/src/emulator.rs b/tpm/src/emulator.rs index a0e9ab6ec..3699c3da7 100644 --- a/tpm/src/emulator.rs +++ b/tpm/src/emulator.rs @@ -86,7 +86,7 @@ impl Emulator { /// /// * `path` - A path to the Unix Domain Socket swtpm is listening on /// - pub fn new(path: String) -> Result { + pub fn new(path: &str) -> Result { if !Path::new(&path).exists() { return Err(Error::InitializeEmulator(anyhow!( "The input TPM Socket path: {path:?} does not exist" diff --git a/tpm/src/socket.rs b/tpm/src/socket.rs index ca4ae5435..4fe5b6370 100644 --- a/tpm/src/socket.rs +++ b/tpm/src/socket.rs @@ -58,8 +58,8 @@ impl SocketDev { } } - pub fn init(&mut self, path: String) -> Result<()> { - self.connect(&path)?; + pub fn init(&mut self, path: &str) -> Result<()> { + self.connect(path)?; Ok(()) } diff --git a/vhost_user_block/src/lib.rs b/vhost_user_block/src/lib.rs index 5ad48608e..0e28377b6 100644 --- a/vhost_user_block/src/lib.rs +++ b/vhost_user_block/src/lib.rs @@ -203,7 +203,7 @@ struct VhostUserBlkBackend { impl VhostUserBlkBackend { fn new( - image_path: String, + image_path: &str, num_queues: usize, rdonly: bool, direct: bool, @@ -217,7 +217,7 @@ impl VhostUserBlkBackend { if direct { options.custom_flags(libc::O_DIRECT); } - let image: File = options.open(&image_path).unwrap(); + let image: File = options.open(image_path).unwrap(); let mut raw_img: qcow::RawFile = qcow::RawFile::new(image, direct); let serial = build_serial(&PathBuf::from(&image_path)); @@ -510,7 +510,7 @@ pub fn start_block_backend(backend_command: &str) { let blk_backend = Arc::new(RwLock::new( VhostUserBlkBackend::new( - backend_config.path, + &backend_config.path, backend_config.num_queues, backend_config.readonly, backend_config.direct, diff --git a/virtio-devices/src/block.rs b/virtio-devices/src/block.rs index 93c07763e..898b13616 100644 --- a/virtio-devices/src/block.rs +++ b/virtio-devices/src/block.rs @@ -800,8 +800,8 @@ impl Block { self.disk_path.display() ); let fd = self.disk_image.fd(); - fcntl::try_acquire_lock(fd, lock_type, granularity).map_err(|error| { - let current_lock = get_lock_state(fd, granularity); + fcntl::try_acquire_lock(&fd, lock_type, granularity).map_err(|error| { + let current_lock = get_lock_state(&fd, granularity); // Don't propagate the error to the outside, as it is not useful at all. Instead, // we try to log additional help to the user. if let Ok(current_lock) = current_lock { @@ -830,7 +830,8 @@ impl Block { // It is very unlikely that this fails; // Should we remove the Result to simplify the error propagation on // higher levels? - fcntl::clear_lock(self.disk_image.fd(), granularity).map_err(|error| Error::LockDiskImage { + let fd = self.disk_image.fd(); + fcntl::clear_lock(&fd, granularity).map_err(|error| Error::LockDiskImage { path: self.disk_path.clone(), error, lock_type: LockType::Unlock, diff --git a/virtio-devices/src/transport/pci_device.rs b/virtio-devices/src/transport/pci_device.rs index 0e1eb2cce..408611e29 100644 --- a/virtio-devices/src/transport/pci_device.rs +++ b/virtio-devices/src/transport/pci_device.rs @@ -389,7 +389,7 @@ impl VirtioPciDevice { use_64bit_bar: bool, dma_handler: Option>, pending_activations: Arc>>, - snapshot: Option, + snapshot: Option<&Snapshot>, ) -> Result { let mut locked_device = device.lock().unwrap(); let mut queue_evts = Vec::new(); @@ -423,8 +423,8 @@ impl VirtioPciDevice { )) })?; - let msix_state = vm_migration::state_from_id(snapshot.as_ref(), pci::MSIX_CONFIG_ID) - .map_err(|e| { + let msix_state = + vm_migration::state_from_id(snapshot, pci::MSIX_CONFIG_ID).map_err(|e| { VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!( "Failed to get MsixConfigState from Snapshot: {e}" )) @@ -462,13 +462,11 @@ impl VirtioPciDevice { }; let pci_configuration_state = - vm_migration::state_from_id(snapshot.as_ref(), pci::PCI_CONFIGURATION_ID).map_err( - |e| { - VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!( - "Failed to get PciConfigurationState from Snapshot: {e}" - )) - }, - )?; + vm_migration::state_from_id(snapshot, pci::PCI_CONFIGURATION_ID).map_err(|e| { + VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!( + "Failed to get PciConfigurationState from Snapshot: {e}" + )) + })?; let configuration = PciConfiguration::new( VIRTIO_PCI_VENDOR_ID, @@ -485,13 +483,11 @@ impl VirtioPciDevice { ); let common_config_state = - vm_migration::state_from_id(snapshot.as_ref(), VIRTIO_PCI_COMMON_CONFIG_ID).map_err( - |e| { - VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!( - "Failed to get VirtioPciCommonConfigState from Snapshot: {e}" - )) - }, - )?; + vm_migration::state_from_id(snapshot, VIRTIO_PCI_COMMON_CONFIG_ID).map_err(|e| { + VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!( + "Failed to get VirtioPciCommonConfigState from Snapshot: {e}" + )) + })?; let common_config = if let Some(common_config_state) = common_config_state { VirtioPciCommonConfig::new(common_config_state, access_platform) diff --git a/virtio-devices/src/vdpa.rs b/virtio-devices/src/vdpa.rs index 49248a678..725f215c7 100644 --- a/virtio-devices/src/vdpa.rs +++ b/virtio-devices/src/vdpa.rs @@ -219,7 +219,7 @@ impl Vdpa { &mut self, mem: &GuestMemoryMmap, virtio_interrupt: &dyn VirtioInterrupt, - queues: Vec<(usize, Queue, EventFd)>, + queues: &[(usize, Queue, EventFd)], ) -> Result<()> { assert!(self.vhost.is_some()); self.vhost @@ -434,7 +434,7 @@ impl VirtioDevice for Vdpa { virtio_interrupt: Arc, queues: Vec<(usize, Queue, EventFd)>, ) -> ActivateResult { - self.activate_vdpa(&mem.memory(), virtio_interrupt.as_ref(), queues) + self.activate_vdpa(&mem.memory(), virtio_interrupt.as_ref(), &queues) .map_err(ActivateError::ActivateVdpa)?; // Store the virtio interrupt handler as we need to return it on reset diff --git a/vm-allocator/src/gsi.rs b/vm-allocator/src/gsi.rs index 8e0a34021..d58670a43 100644 --- a/vm-allocator/src/gsi.rs +++ b/vm-allocator/src/gsi.rs @@ -40,14 +40,14 @@ pub struct GsiAllocator { impl GsiAllocator { #[cfg(target_arch = "x86_64")] /// New GSI allocator - pub fn new(apics: Vec) -> Self { + pub fn new(apics: &[GsiApic]) -> Self { let mut allocator = GsiAllocator { apics: BTreeMap::new(), next_irq: 0xffff_ffff, next_gsi: 0, }; - for apic in &apics { + for apic in apics { if apic.base < allocator.next_irq { allocator.next_irq = apic.base; } diff --git a/vm-allocator/src/system.rs b/vm-allocator/src/system.rs index bad0272c3..02ea86c6a 100644 --- a/vm-allocator/src/system.rs +++ b/vm-allocator/src/system.rs @@ -19,7 +19,7 @@ use crate::page_size::get_page_size; /// /// # Example - Use the `SystemAddress` builder. /// -/// ``` +/// ```rust /// # #[cfg(target_arch = "x86_64")] /// # use vm_allocator::{GsiApic, SystemAllocator}; /// # #[cfg(any(target_arch = "aarch64", target_arch = "riscv64"))] @@ -29,7 +29,7 @@ use crate::page_size::get_page_size; /// GuestAddress(0x1000), /// 0x10000, /// GuestAddress(0x10000000), 0x10000000, -/// #[cfg(target_arch = "x86_64")] vec![GsiApic::new(5, 19)]).unwrap(); +/// #[cfg(target_arch = "x86_64")] &[GsiApic::new(5, 19)]).unwrap(); /// #[cfg(target_arch = "x86_64")] /// assert_eq!(allocator.allocate_irq(), Some(5)); /// #[cfg(target_arch = "aarch64")] @@ -59,14 +59,14 @@ impl SystemAllocator { /// * `io_size` - (X86) The size of IO memory. /// * `platform_mmio_base` - The starting address of platform MMIO memory. /// * `platform_mmio_size` - The size of platform MMIO memory. - /// * `apics` - (X86) Vector of APIC's. + /// * `apics` - (X86) slice of APIC's. /// pub fn new( io_base: GuestAddress, io_size: GuestUsize, platform_mmio_base: GuestAddress, platform_mmio_size: GuestUsize, - #[cfg(target_arch = "x86_64")] apics: Vec, + #[cfg(target_arch = "x86_64")] apics: &[GsiApic], ) -> Option { Some(SystemAllocator { io_address_space: AddressAllocator::new(io_base, io_size)?, diff --git a/vm-migration/src/lib.rs b/vm-migration/src/lib.rs index 16c0a6df8..921ae5b3d 100644 --- a/vm-migration/src/lib.rs +++ b/vm-migration/src/lib.rs @@ -156,8 +156,8 @@ impl Snapshot { } } -pub fn snapshot_from_id(snapshot: Option<&Snapshot>, id: &str) -> Option { - snapshot.and_then(|s| s.snapshots.get(id).cloned()) +pub fn snapshot_from_id<'a>(snapshot: Option<&'a Snapshot>, id: &str) -> Option<&'a Snapshot> { + snapshot.and_then(|s| s.snapshots.get(id)) } pub fn state_from_id<'a, T>(s: Option<&'a Snapshot>, id: &str) -> Result, MigratableError> diff --git a/vmm/src/cpu.rs b/vmm/src/cpu.rs index ee5186e6d..b3d7f71ef 100644 --- a/vmm/src/cpu.rs +++ b/vmm/src/cpu.rs @@ -901,7 +901,11 @@ impl CpuManager { Ok(()) } - fn create_vcpu(&mut self, cpu_id: u32, snapshot: Option) -> Result>> { + fn create_vcpu( + &mut self, + cpu_id: u32, + snapshot: Option<&Snapshot>, + ) -> Result>> { info!("Creating vCPU: cpu_id = {cpu_id}"); #[cfg(target_arch = "x86_64")] @@ -1006,7 +1010,7 @@ impl CpuManager { fn create_vcpus( &mut self, desired_vcpus: u32, - snapshot: Option, + snapshot: Option<&Snapshot>, ) -> Result>>> { let mut vcpus: Vec>> = vec![]; info!( @@ -1027,7 +1031,7 @@ impl CpuManager { cpu_id, // TODO: The special format of the CPU id can be removed once // ready to break live upgrade. - snapshot_from_id(snapshot.as_ref(), cpu_id.to_string().as_str()), + snapshot_from_id(snapshot, cpu_id.to_string().as_str()), )?); } @@ -1386,7 +1390,7 @@ impl CpuManager { pub fn create_boot_vcpus( &mut self, - snapshot: Option, + snapshot: Option<&Snapshot>, ) -> Result>>> { trace_scoped!("create_boot_vcpus"); diff --git a/vmm/src/device_manager.rs b/vmm/src/device_manager.rs index 7766caa80..245be2c85 100644 --- a/vmm/src/device_manager.rs +++ b/vmm/src/device_manager.rs @@ -1155,7 +1155,7 @@ impl DeviceManager { force_iommu: bool, boot_id_list: BTreeSet, #[cfg(not(target_arch = "riscv64"))] timestamp: Instant, - snapshot: Option, + snapshot: Option<&Snapshot>, dynamic: bool, ) -> DeviceManagerResult>> { trace_scoped!("DeviceManager::new"); @@ -1367,7 +1367,7 @@ impl DeviceManager { timestamp, pending_activations: Arc::new(Mutex::new(Vec::default())), acpi_platform_addresses: AcpiPlatformAddresses::default(), - snapshot, + snapshot: snapshot.cloned(), rate_limit_groups, mmio_regions: Arc::new(Mutex::new(Vec::new())), #[cfg(feature = "fw_cfg")] @@ -1795,7 +1795,7 @@ impl DeviceManager { ioapic::Ioapic::new( id.clone(), APIC_START, - Arc::clone(&self.msi_interrupt_manager), + self.msi_interrupt_manager.as_ref(), state_from_id(self.snapshot.as_ref(), id.as_str()) .map_err(DeviceManagerError::RestoreGetState)?, ) @@ -2489,7 +2489,7 @@ impl DeviceManager { tpm_path: PathBuf, ) -> DeviceManagerResult>> { // Create TPM Device - let tpm = devices::tpm::Tpm::new(tpm_path.to_str().unwrap().to_string()).map_err(|e| { + let tpm = devices::tpm::Tpm::new(tpm_path.to_str().unwrap()).map_err(|e| { DeviceManagerError::CreateTpmDevice(anyhow!("Failed to create TPM Device : {e:?}")) })?; let tpm = Arc::new(Mutex::new(tpm)); diff --git a/vmm/src/lib.rs b/vmm/src/lib.rs index cb1ce9d54..1c1eca79b 100644 --- a/vmm/src/lib.rs +++ b/vmm/src/lib.rs @@ -965,7 +965,7 @@ impl Vmm { self.console_info.clone(), self.console_resize_pipe.clone(), Arc::clone(&self.original_termios_opt), - Some(snapshot), + Some(&snapshot), ) .map_err(|e| { MigratableError::MigrateReceive(anyhow!("Error creating VM from snapshot: {e:?}")) @@ -1334,7 +1334,7 @@ impl Vmm { self.console_info.clone(), self.console_resize_pipe.clone(), Arc::clone(&self.original_termios_opt), - Some(snapshot), + Some(&snapshot), Some(source_url), Some(prefault), )?; diff --git a/vmm/src/memory_manager.rs b/vmm/src/memory_manager.rs index 8bfcd6d82..8f4ff649b 100644 --- a/vmm/src/memory_manager.rs +++ b/vmm/src/memory_manager.rs @@ -1169,7 +1169,7 @@ impl MemoryManager { start_of_platform_device_area, PLATFORM_DEVICE_AREA_SIZE, #[cfg(target_arch = "x86_64")] - vec![GsiApic::new( + &[GsiApic::new( X86_64_IRQ_BASE, ioapic::NUM_IOAPIC_PINS as u32 - X86_64_IRQ_BASE, )], diff --git a/vmm/src/vm.rs b/vmm/src/vm.rs index 526649c57..fcd6fbfbe 100644 --- a/vmm/src/vm.rs +++ b/vmm/src/vm.rs @@ -543,7 +543,7 @@ impl Vm { console_info: Option, console_resize_pipe: Option>, original_termios: Arc>>, - snapshot: Option, + snapshot: Option<&Snapshot>, ) -> Result { trace_scoped!("Vm::new_from_memory_manager"); @@ -658,7 +658,7 @@ impl Vm { boot_id_list, #[cfg(not(target_arch = "riscv64"))] timestamp, - snapshot_from_id(snapshot.as_ref(), DEVICE_MANAGER_SNAPSHOT_ID), + snapshot_from_id(snapshot, DEVICE_MANAGER_SNAPSHOT_ID), dynamic, ) .map_err(Error::DeviceManager)?; @@ -723,7 +723,7 @@ impl Vm { cpu_manager .lock() .unwrap() - .create_boot_vcpus(snapshot_from_id(snapshot.as_ref(), CPU_MANAGER_SNAPSHOT_ID)) + .create_boot_vcpus(snapshot_from_id(snapshot, CPU_MANAGER_SNAPSHOT_ID)) .map_err(Error::CpuManager)?; // For KVM, we need to create interrupt controller after we create boot vcpus. @@ -992,7 +992,7 @@ impl Vm { console_info: Option, console_resize_pipe: Option>, original_termios: Arc>>, - snapshot: Option, + snapshot: Option<&Snapshot>, source_url: Option<&str>, prefault: Option, ) -> Result { @@ -1035,31 +1035,30 @@ impl Vm { vm_config.lock().unwrap().cpus.max_phys_bits, ); - let memory_manager = if let Some(snapshot) = - snapshot_from_id(snapshot.as_ref(), MEMORY_MANAGER_SNAPSHOT_ID) - { - MemoryManager::new_from_snapshot( - &snapshot, - vm.clone(), - &vm_config.lock().unwrap().memory.clone(), - source_url, - prefault.unwrap(), - phys_bits, - ) - .map_err(Error::MemoryManager)? - } else { - MemoryManager::new( - vm.clone(), - &vm_config.lock().unwrap().memory.clone(), - None, - phys_bits, - #[cfg(feature = "tdx")] - tdx_enabled, - None, - None, - ) - .map_err(Error::MemoryManager)? - }; + let memory_manager = + if let Some(snapshot) = snapshot_from_id(snapshot, MEMORY_MANAGER_SNAPSHOT_ID) { + MemoryManager::new_from_snapshot( + snapshot, + vm.clone(), + &vm_config.lock().unwrap().memory.clone(), + source_url, + prefault.unwrap(), + phys_bits, + ) + .map_err(Error::MemoryManager)? + } else { + MemoryManager::new( + vm.clone(), + &vm_config.lock().unwrap().memory.clone(), + None, + phys_bits, + #[cfg(feature = "tdx")] + tdx_enabled, + None, + None, + ) + .map_err(Error::MemoryManager)? + }; Vm::new_from_memory_manager( vm_config,