misc: clippy: add needless_pass_by_value (partially)
This helps to uncover expensive and needless clones in the code base. For example, I prevented extensive clones in the snapshot path where (nested) BTreeMap's have been cloned over and over again. Further, the lint helps devs to much better reason about the ownership of parameters. All of these changes have been done manually with the necessary caution. A few structs that are cheap to clone are now `copy` so that this lint won't trigger for them. I didn't enable the lint so far as it is a massive rabbit hole and needs much more fixes. Nevertheless, it is very useful. Signed-off-by: Philipp Schuster <philipp.schuster@cyberus-technology.de> On-behalf-of: SAP philipp.schuster@sap.com
This commit is contained in:
parent
0a07c96d17
commit
6a86c157af
29 changed files with 162 additions and 155 deletions
|
|
@ -174,6 +174,8 @@ assertions_on_result_states = "deny"
|
|||
if_not_else = "deny"
|
||||
manual_string_new = "deny"
|
||||
map_unwrap_or = "deny"
|
||||
# Very helpful to uncover costly clones, but unfortunately also a rabbit hole.
|
||||
#needless_pass_by_value = "deny"
|
||||
redundant_else = "deny"
|
||||
semicolon_if_nothing_returned = "deny"
|
||||
undocumented_unsafe_blocks = "deny"
|
||||
|
|
|
|||
|
|
@ -142,7 +142,7 @@ pub fn simple_api_full_command_with_fds_and_response<T: Read + Write + ScmSocket
|
|||
method: &str,
|
||||
full_command: &str,
|
||||
request_body: Option<&str>,
|
||||
request_fds: Vec<RawFd>,
|
||||
request_fds: &[RawFd],
|
||||
) -> Result<Option<String>, Error> {
|
||||
socket
|
||||
.send_with_fds(
|
||||
|
|
@ -150,7 +150,7 @@ pub fn simple_api_full_command_with_fds_and_response<T: Read + Write + ScmSocket
|
|||
"{method} /api/v1/{full_command} HTTP/1.1\r\nHost: localhost\r\nAccept: */*\r\n"
|
||||
)
|
||||
.as_bytes()],
|
||||
&request_fds,
|
||||
request_fds,
|
||||
)
|
||||
.map_err(Error::SocketSendFds)?;
|
||||
|
||||
|
|
@ -178,7 +178,7 @@ pub fn simple_api_full_command_with_fds<T: Read + Write + ScmSocket>(
|
|||
method: &str,
|
||||
full_command: &str,
|
||||
request_body: Option<&str>,
|
||||
request_fds: Vec<RawFd>,
|
||||
request_fds: &[RawFd],
|
||||
) -> Result<(), Error> {
|
||||
let response = simple_api_full_command_with_fds_and_response(
|
||||
socket,
|
||||
|
|
@ -201,7 +201,7 @@ pub fn simple_api_full_command<T: Read + Write + ScmSocket>(
|
|||
full_command: &str,
|
||||
request_body: Option<&str>,
|
||||
) -> Result<(), Error> {
|
||||
simple_api_full_command_with_fds(socket, method, full_command, request_body, Vec::new())
|
||||
simple_api_full_command_with_fds(socket, method, full_command, request_body, &[])
|
||||
}
|
||||
|
||||
pub fn simple_api_full_command_and_response<T: Read + Write + ScmSocket>(
|
||||
|
|
@ -210,13 +210,7 @@ pub fn simple_api_full_command_and_response<T: Read + Write + ScmSocket>(
|
|||
full_command: &str,
|
||||
request_body: Option<&str>,
|
||||
) -> Result<Option<String>, Error> {
|
||||
simple_api_full_command_with_fds_and_response(
|
||||
socket,
|
||||
method,
|
||||
full_command,
|
||||
request_body,
|
||||
Vec::new(),
|
||||
)
|
||||
simple_api_full_command_with_fds_and_response(socket, method, full_command, request_body, &[])
|
||||
}
|
||||
|
||||
pub fn simple_api_command_with_fds<T: Read + Write + ScmSocket>(
|
||||
|
|
@ -224,7 +218,7 @@ pub fn simple_api_command_with_fds<T: Read + Write + ScmSocket>(
|
|||
method: &str,
|
||||
c: &str,
|
||||
request_body: Option<&str>,
|
||||
request_fds: Vec<RawFd>,
|
||||
request_fds: &[RawFd],
|
||||
) -> Result<(), Error> {
|
||||
// Create the full VM command. For VMM commands, use
|
||||
// simple_api_full_command().
|
||||
|
|
@ -239,5 +233,5 @@ pub fn simple_api_command<T: Read + Write + ScmSocket>(
|
|||
c: &str,
|
||||
request_body: Option<&str>,
|
||||
) -> Result<(), Error> {
|
||||
simple_api_command_with_fds(socket, method, c, request_body, Vec::new())
|
||||
simple_api_command_with_fds(socket, method, c, request_body, &[])
|
||||
}
|
||||
|
|
|
|||
|
|
@ -283,7 +283,7 @@ impl CpuidPatch {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn patch_cpuid(cpuid: &mut [CpuIdEntry], patches: Vec<CpuidPatch>) {
|
||||
pub fn patch_cpuid(cpuid: &mut [CpuIdEntry], patches: &[CpuidPatch]) {
|
||||
for entry in cpuid {
|
||||
for patch in patches.iter() {
|
||||
if entry.function == patch.function && entry.index == patch.index {
|
||||
|
|
@ -620,7 +620,7 @@ pub fn generate_common_cpuid(
|
|||
.get_supported_cpuid()
|
||||
.map_err(Error::CpuidGetSupported)?;
|
||||
|
||||
CpuidPatch::patch_cpuid(&mut cpuid, cpuid_patches);
|
||||
CpuidPatch::patch_cpuid(&mut cpuid, &cpuid_patches);
|
||||
|
||||
#[cfg(feature = "tdx")]
|
||||
let tdx_capabilities = if config.tdx {
|
||||
|
|
@ -1421,7 +1421,7 @@ fn update_cpuid_topology(
|
|||
edx_bit: Some(28),
|
||||
},
|
||||
];
|
||||
CpuidPatch::patch_cpuid(cpuid, cpuid_patches);
|
||||
CpuidPatch::patch_cpuid(cpuid, &cpuid_patches);
|
||||
CpuidPatch::set_cpuid_reg(
|
||||
cpuid,
|
||||
0x8000_0008,
|
||||
|
|
|
|||
|
|
@ -163,7 +163,7 @@ const fn get_flock(lock_type: LockType, granularity: LockGranularity) -> libc::f
|
|||
/// - `lock_type`: The [`LockType`]
|
||||
/// - `granularity`: The [`LockGranularity`].
|
||||
pub fn try_acquire_lock<Fd: AsRawFd>(
|
||||
file: Fd,
|
||||
file: &Fd,
|
||||
lock_type: LockType,
|
||||
granularity: LockGranularity,
|
||||
) -> Result<(), LockError> {
|
||||
|
|
@ -191,7 +191,7 @@ pub fn try_acquire_lock<Fd: AsRawFd>(
|
|||
/// # Parameters
|
||||
/// - `file`: The file to clear all locks for [`LockType`].
|
||||
/// - `granularity`: The [`LockGranularity`].
|
||||
pub fn clear_lock<Fd: AsRawFd>(file: Fd, granularity: LockGranularity) -> Result<(), LockError> {
|
||||
pub fn clear_lock<Fd: AsRawFd>(file: &Fd, granularity: LockGranularity) -> Result<(), LockError> {
|
||||
try_acquire_lock(file, LockType::Unlock, granularity)
|
||||
}
|
||||
|
||||
|
|
@ -202,7 +202,7 @@ pub fn clear_lock<Fd: AsRawFd>(file: Fd, granularity: LockGranularity) -> Result
|
|||
/// - `file`: The file for which to get the lock state.
|
||||
/// - `granularity`: The [`LockGranularity`].
|
||||
pub fn get_lock_state<Fd: AsRawFd>(
|
||||
file: Fd,
|
||||
file: &Fd,
|
||||
granularity: LockGranularity,
|
||||
) -> Result<LockState, LockError> {
|
||||
let mut flock = get_flock(LockType::Write, granularity);
|
||||
|
|
|
|||
|
|
@ -861,6 +861,7 @@ ioctl_io_nr!(BLKPBSZGET, 0x12, 123);
|
|||
ioctl_io_nr!(BLKIOMIN, 0x12, 120);
|
||||
ioctl_io_nr!(BLKIOOPT, 0x12, 121);
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
enum BlockSize {
|
||||
LogicalBlock,
|
||||
PhysicalBlock,
|
||||
|
|
|
|||
|
|
@ -125,6 +125,7 @@ pub enum Error {
|
|||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
pub enum ImageType {
|
||||
Raw,
|
||||
Qcow2,
|
||||
|
|
@ -683,7 +684,7 @@ impl QcowFile {
|
|||
/// Creates a new QcowFile at the given path.
|
||||
pub fn new(file: RawFile, version: u32, virtual_size: u64) -> Result<QcowFile> {
|
||||
let header = QcowHeader::create_for_size_and_path(version, virtual_size, None)?;
|
||||
QcowFile::new_from_header(file, header)
|
||||
QcowFile::new_from_header(file, &header)
|
||||
}
|
||||
|
||||
/// Creates a new QcowFile at the given path.
|
||||
|
|
@ -705,12 +706,12 @@ impl QcowFile {
|
|||
.map_err(|e| Error::BackingFileOpen(Box::new(e)))?;
|
||||
let size = backing_file.virtual_size();
|
||||
let header = QcowHeader::create_for_size_and_path(version, size, Some(backing_file_name))?;
|
||||
let mut result = QcowFile::new_from_header(file, header)?;
|
||||
let mut result = QcowFile::new_from_header(file, &header)?;
|
||||
result.backing_file = Some(Box::new(backing_file));
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn new_from_header(mut file: RawFile, header: QcowHeader) -> Result<QcowFile> {
|
||||
fn new_from_header(mut file: RawFile, header: &QcowHeader) -> Result<QcowFile> {
|
||||
file.rewind().map_err(Error::SeekingFile)?;
|
||||
header.write_to(&mut file)?;
|
||||
|
||||
|
|
@ -854,7 +855,7 @@ impl QcowFile {
|
|||
// Add references to the L1 table clusters.
|
||||
fn set_l1_refcounts(
|
||||
refcounts: &mut [u16],
|
||||
header: QcowHeader,
|
||||
header: &QcowHeader,
|
||||
cluster_size: u64,
|
||||
) -> Result<()> {
|
||||
let entries_per_cluster = cluster_size / size_of::<u64>() as u64;
|
||||
|
|
@ -869,7 +870,7 @@ impl QcowFile {
|
|||
// Traverse the L1 and L2 tables to find all reachable data clusters.
|
||||
fn set_data_refcounts(
|
||||
refcounts: &mut [u16],
|
||||
header: QcowHeader,
|
||||
header: &QcowHeader,
|
||||
cluster_size: u64,
|
||||
raw_file: &mut QcowRawFile,
|
||||
) -> Result<()> {
|
||||
|
|
@ -908,7 +909,7 @@ impl QcowFile {
|
|||
// Add references to the top-level refcount table clusters.
|
||||
fn set_refcount_table_refcounts(
|
||||
refcounts: &mut [u16],
|
||||
header: QcowHeader,
|
||||
header: &QcowHeader,
|
||||
cluster_size: u64,
|
||||
) -> Result<()> {
|
||||
let refcount_table_offset = header.refcount_table_offset;
|
||||
|
|
@ -1046,9 +1047,9 @@ impl QcowFile {
|
|||
|
||||
// Find all references clusters and rebuild refcounts.
|
||||
set_header_refcount(&mut refcounts, cluster_size)?;
|
||||
set_l1_refcounts(&mut refcounts, header.clone(), cluster_size)?;
|
||||
set_data_refcounts(&mut refcounts, header.clone(), cluster_size, raw_file)?;
|
||||
set_refcount_table_refcounts(&mut refcounts, header.clone(), cluster_size)?;
|
||||
set_l1_refcounts(&mut refcounts, &header, cluster_size)?;
|
||||
set_data_refcounts(&mut refcounts, &header, cluster_size, raw_file)?;
|
||||
set_refcount_table_refcounts(&mut refcounts, &header, cluster_size)?;
|
||||
|
||||
// Allocate clusters to store the new reference count blocks.
|
||||
let ref_table = alloc_refblocks(&mut refcounts, cluster_size, refblock_clusters)?;
|
||||
|
|
@ -1276,7 +1277,7 @@ impl QcowFile {
|
|||
// First use a pre allocated cluster if one is available.
|
||||
if let Some(free_cluster) = self.avail_clusters.pop() {
|
||||
if let Some(initial_data) = initial_data {
|
||||
self.raw_file.write_cluster(free_cluster, initial_data)?;
|
||||
self.raw_file.write_cluster(free_cluster, &initial_data)?;
|
||||
} else {
|
||||
self.raw_file.zero_cluster(free_cluster)?;
|
||||
}
|
||||
|
|
@ -1286,7 +1287,7 @@ impl QcowFile {
|
|||
let max_valid_cluster_offset = self.refcounts.max_valid_cluster_offset();
|
||||
if let Some(new_cluster) = self.raw_file.add_cluster_end(max_valid_cluster_offset)? {
|
||||
if let Some(initial_data) = initial_data {
|
||||
self.raw_file.write_cluster(new_cluster, initial_data)?;
|
||||
self.raw_file.write_cluster(new_cluster, &initial_data)?;
|
||||
}
|
||||
Ok(new_cluster)
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -144,7 +144,7 @@ impl QcowRawFile {
|
|||
}
|
||||
|
||||
/// Writes
|
||||
pub fn write_cluster(&mut self, address: u64, data: Vec<u8>) -> io::Result<()> {
|
||||
pub fn write_cluster(&mut self, address: u64, data: &[u8]) -> io::Result<()> {
|
||||
let cluster_size = self.cluster_size as usize;
|
||||
self.file.seek(SeekFrom::Start(address))?;
|
||||
self.file.write_all(&data[0..cluster_size])
|
||||
|
|
|
|||
|
|
@ -191,7 +191,7 @@ impl Ioapic {
|
|||
pub fn new(
|
||||
id: String,
|
||||
apic_address: GuestAddress,
|
||||
interrupt_manager: Arc<dyn InterruptManager<GroupConfig = MsiIrqGroupConfig>>,
|
||||
interrupt_manager: &dyn InterruptManager<GroupConfig = MsiIrqGroupConfig>,
|
||||
state: Option<IoapicState>,
|
||||
) -> Result<Ioapic> {
|
||||
let interrupt_source_group = interrupt_manager
|
||||
|
|
|
|||
|
|
@ -114,14 +114,14 @@ impl IvshmemDevice {
|
|||
region_size: u64,
|
||||
backend_file: Option<PathBuf>,
|
||||
ivshmem_ops: Arc<Mutex<dyn IvshmemOps>>,
|
||||
snapshot: Option<Snapshot>,
|
||||
snapshot: Option<&Snapshot>,
|
||||
) -> Result<Self, IvshmemError> {
|
||||
let pci_configuration_state =
|
||||
vm_migration::state_from_id(snapshot.as_ref(), PCI_CONFIGURATION_ID).map_err(|e| {
|
||||
IvshmemError::RetrievePciConfigurationState(anyhow!(
|
||||
"Failed to get PciConfigurationState from Snapshot: {e}",
|
||||
))
|
||||
})?;
|
||||
let pci_configuration_state = vm_migration::state_from_id(snapshot, PCI_CONFIGURATION_ID)
|
||||
.map_err(|e| {
|
||||
IvshmemError::RetrievePciConfigurationState(anyhow!(
|
||||
"Failed to get PciConfigurationState from Snapshot: {e}",
|
||||
))
|
||||
})?;
|
||||
|
||||
let state: Option<IvshmemDeviceState> = snapshot
|
||||
.as_ref()
|
||||
|
|
|
|||
|
|
@ -66,13 +66,13 @@ pub struct PvPanicDeviceState {
|
|||
}
|
||||
|
||||
impl PvPanicDevice {
|
||||
pub fn new(id: String, snapshot: Option<Snapshot>) -> Result<Self, PvPanicError> {
|
||||
let pci_configuration_state =
|
||||
vm_migration::state_from_id(snapshot.as_ref(), PCI_CONFIGURATION_ID).map_err(|e| {
|
||||
PvPanicError::RetrievePciConfigurationState(anyhow!(
|
||||
"Failed to get PciConfigurationState from Snapshot: {e}"
|
||||
))
|
||||
})?;
|
||||
pub fn new(id: String, snapshot: Option<&Snapshot>) -> Result<Self, PvPanicError> {
|
||||
let pci_configuration_state = vm_migration::state_from_id(snapshot, PCI_CONFIGURATION_ID)
|
||||
.map_err(|e| {
|
||||
PvPanicError::RetrievePciConfigurationState(anyhow!(
|
||||
"Failed to get PciConfigurationState from Snapshot: {e}"
|
||||
))
|
||||
})?;
|
||||
|
||||
let mut configuration = PciConfiguration::new(
|
||||
PVPANIC_VENDOR_ID,
|
||||
|
|
|
|||
|
|
@ -27,6 +27,7 @@ pub enum Error {
|
|||
type Result<T> = anyhow::Result<T, Error>;
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(Copy, Clone)]
|
||||
enum LocStateFields {
|
||||
TpmEstablished,
|
||||
LocAssigned,
|
||||
|
|
@ -63,6 +64,7 @@ enum IntfId2Fields {
|
|||
Did,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
enum CtrlStsFields {
|
||||
TpmSts,
|
||||
TpmIdle,
|
||||
|
|
@ -220,7 +222,7 @@ pub struct Tpm {
|
|||
}
|
||||
|
||||
impl Tpm {
|
||||
pub fn new(path: String) -> Result<Self> {
|
||||
pub fn new(path: &str) -> Result<Self> {
|
||||
let emulator = Emulator::new(path)
|
||||
.map_err(|e| Error::Init(anyhow!("Failed while initializing tpm Emulator: {e:?}")))?;
|
||||
let mut tpm = Tpm {
|
||||
|
|
|
|||
|
|
@ -493,15 +493,15 @@ impl VfioCommon {
|
|||
vfio_wrapper: Arc<dyn Vfio>,
|
||||
subclass: &dyn PciSubclass,
|
||||
bdf: PciBdf,
|
||||
snapshot: Option<Snapshot>,
|
||||
snapshot: Option<&Snapshot>,
|
||||
x_nv_gpudirect_clique: Option<u8>,
|
||||
) -> Result<Self, VfioPciError> {
|
||||
let pci_configuration_state =
|
||||
vm_migration::state_from_id(snapshot.as_ref(), PCI_CONFIGURATION_ID).map_err(|e| {
|
||||
VfioPciError::RetrievePciConfigurationState(anyhow!(
|
||||
"Failed to get PciConfigurationState from Snapshot: {e}"
|
||||
))
|
||||
})?;
|
||||
let pci_configuration_state = vm_migration::state_from_id(snapshot, PCI_CONFIGURATION_ID)
|
||||
.map_err(|e| {
|
||||
VfioPciError::RetrievePciConfigurationState(anyhow!(
|
||||
"Failed to get PciConfigurationState from Snapshot: {e}"
|
||||
))
|
||||
})?;
|
||||
|
||||
let configuration = PciConfiguration::new(
|
||||
0,
|
||||
|
|
@ -541,18 +541,16 @@ impl VfioCommon {
|
|||
"Failed to get VfioCommonState from Snapshot: {e}"
|
||||
))
|
||||
})?;
|
||||
let msi_state =
|
||||
vm_migration::state_from_id(snapshot.as_ref(), MSI_CONFIG_ID).map_err(|e| {
|
||||
VfioPciError::RetrieveMsiConfigState(anyhow!(
|
||||
"Failed to get MsiConfigState from Snapshot: {e}"
|
||||
))
|
||||
})?;
|
||||
let msix_state =
|
||||
vm_migration::state_from_id(snapshot.as_ref(), MSIX_CONFIG_ID).map_err(|e| {
|
||||
VfioPciError::RetrieveMsixConfigState(anyhow!(
|
||||
"Failed to get MsixConfigState from Snapshot: {e}"
|
||||
))
|
||||
})?;
|
||||
let msi_state = vm_migration::state_from_id(snapshot, MSI_CONFIG_ID).map_err(|e| {
|
||||
VfioPciError::RetrieveMsiConfigState(anyhow!(
|
||||
"Failed to get MsiConfigState from Snapshot: {e}"
|
||||
))
|
||||
})?;
|
||||
let msix_state = vm_migration::state_from_id(snapshot, MSIX_CONFIG_ID).map_err(|e| {
|
||||
VfioPciError::RetrieveMsixConfigState(anyhow!(
|
||||
"Failed to get MsixConfigState from Snapshot: {e}"
|
||||
))
|
||||
})?;
|
||||
|
||||
if let Some(state) = state.as_ref() {
|
||||
vfio_common.set_state(state, msi_state, msix_state)?;
|
||||
|
|
@ -598,7 +596,7 @@ impl VfioCommon {
|
|||
allocator: &Arc<Mutex<SystemAllocator>>,
|
||||
mmio32_allocator: &mut AddressAllocator,
|
||||
mmio64_allocator: &mut AddressAllocator,
|
||||
resources: Option<Vec<Resource>>,
|
||||
resources: Option<&[Resource]>,
|
||||
) -> Result<Vec<PciBarConfiguration>, PciDeviceError> {
|
||||
let mut bars = Vec::new();
|
||||
let mut bar_id = VFIO_PCI_BAR0_REGION_INDEX;
|
||||
|
|
@ -614,7 +612,7 @@ impl VfioCommon {
|
|||
let mut flags: u32 = 0;
|
||||
|
||||
let mut restored_bar_addr = None;
|
||||
if let Some(resources) = &resources {
|
||||
if let Some(resources) = resources {
|
||||
for resource in resources {
|
||||
if let Resource::PciBar {
|
||||
index,
|
||||
|
|
@ -1480,7 +1478,7 @@ impl VfioPciDevice {
|
|||
iommu_attached: bool,
|
||||
bdf: PciBdf,
|
||||
memory_slot_allocator: MemorySlotAllocator,
|
||||
snapshot: Option<Snapshot>,
|
||||
snapshot: Option<&Snapshot>,
|
||||
x_nv_gpudirect_clique: Option<u8>,
|
||||
device_path: PathBuf,
|
||||
) -> Result<Self, VfioPciError> {
|
||||
|
|
@ -1495,7 +1493,7 @@ impl VfioPciDevice {
|
|||
Arc::new(vfio_wrapper) as Arc<dyn Vfio>,
|
||||
&PciVfioSubclass::VfioSubclass,
|
||||
bdf,
|
||||
vm_migration::snapshot_from_id(snapshot.as_ref(), VFIO_COMMON_ID),
|
||||
vm_migration::snapshot_from_id(snapshot, VFIO_COMMON_ID),
|
||||
x_nv_gpudirect_clique,
|
||||
)?;
|
||||
|
||||
|
|
@ -1508,7 +1506,7 @@ impl VfioPciDevice {
|
|||
iommu_attached,
|
||||
memory_slot_allocator,
|
||||
bdf,
|
||||
device_path: device_path.clone(),
|
||||
device_path,
|
||||
};
|
||||
|
||||
Ok(vfio_pci_device)
|
||||
|
|
@ -1832,8 +1830,12 @@ impl PciDevice for VfioPciDevice {
|
|||
mmio64_allocator: &mut AddressAllocator,
|
||||
resources: Option<Vec<Resource>>,
|
||||
) -> Result<Vec<PciBarConfiguration>, PciDeviceError> {
|
||||
self.common
|
||||
.allocate_bars(allocator, mmio32_allocator, mmio64_allocator, resources)
|
||||
self.common.allocate_bars(
|
||||
allocator,
|
||||
mmio32_allocator,
|
||||
mmio64_allocator,
|
||||
resources.as_deref(),
|
||||
)
|
||||
}
|
||||
|
||||
fn free_bars(
|
||||
|
|
|
|||
|
|
@ -79,7 +79,7 @@ impl VfioUserPciDevice {
|
|||
legacy_interrupt_group: Option<Arc<dyn InterruptSourceGroup>>,
|
||||
bdf: PciBdf,
|
||||
memory_slot_allocator: MemorySlotAllocator,
|
||||
snapshot: Option<Snapshot>,
|
||||
snapshot: Option<&Snapshot>,
|
||||
) -> Result<Self, VfioUserPciDeviceError> {
|
||||
let resettable = client.lock().unwrap().resettable();
|
||||
if resettable {
|
||||
|
|
@ -100,7 +100,7 @@ impl VfioUserPciDevice {
|
|||
Arc::new(vfio_wrapper) as Arc<dyn Vfio>,
|
||||
&PciVfioUserSubclass::VfioUserSubclass,
|
||||
bdf,
|
||||
vm_migration::snapshot_from_id(snapshot.as_ref(), VFIO_COMMON_ID),
|
||||
vm_migration::snapshot_from_id(snapshot, VFIO_COMMON_ID),
|
||||
None,
|
||||
)
|
||||
.map_err(VfioUserPciDeviceError::CreateVfioCommon)?;
|
||||
|
|
@ -396,8 +396,12 @@ impl PciDevice for VfioUserPciDevice {
|
|||
mmio64_allocator: &mut AddressAllocator,
|
||||
resources: Option<Vec<Resource>>,
|
||||
) -> Result<Vec<PciBarConfiguration>, PciDeviceError> {
|
||||
self.common
|
||||
.allocate_bars(allocator, mmio32_allocator, mmio64_allocator, resources)
|
||||
self.common.allocate_bars(
|
||||
allocator,
|
||||
mmio32_allocator,
|
||||
mmio64_allocator,
|
||||
resources.as_deref(),
|
||||
)
|
||||
}
|
||||
|
||||
fn free_bars(
|
||||
|
|
|
|||
|
|
@ -245,6 +245,7 @@ impl TokenBucket {
|
|||
}
|
||||
|
||||
/// Enum that describes the type of token used.
|
||||
#[derive(Copy, Clone)]
|
||||
pub enum TokenType {
|
||||
/// Token type used for bandwidth limiting.
|
||||
Bytes,
|
||||
|
|
|
|||
|
|
@ -399,7 +399,7 @@ fn rest_api_do_command(matches: &ArgMatches, socket: &mut UnixStream) -> ApiResu
|
|||
.get_one::<String>("net_config")
|
||||
.unwrap(),
|
||||
)?;
|
||||
simple_api_command_with_fds(socket, "PUT", "add-net", Some(&net_config), fds)
|
||||
simple_api_command_with_fds(socket, "PUT", "add-net", Some(&net_config), &fds)
|
||||
.map_err(Error::HttpApiClient)
|
||||
}
|
||||
Some("add-user-device") => {
|
||||
|
|
@ -454,7 +454,7 @@ fn rest_api_do_command(matches: &ArgMatches, socket: &mut UnixStream) -> ApiResu
|
|||
.get_one::<String>("restore_config")
|
||||
.unwrap(),
|
||||
)?;
|
||||
simple_api_command_with_fds(socket, "PUT", "restore", Some(&restore_config), fds)
|
||||
simple_api_command_with_fds(socket, "PUT", "restore", Some(&restore_config), &fds)
|
||||
.map_err(Error::HttpApiClient)
|
||||
}
|
||||
Some("coredump") => {
|
||||
|
|
|
|||
|
|
@ -86,7 +86,7 @@ impl Emulator {
|
|||
///
|
||||
/// * `path` - A path to the Unix Domain Socket swtpm is listening on
|
||||
///
|
||||
pub fn new(path: String) -> Result<Self> {
|
||||
pub fn new(path: &str) -> Result<Self> {
|
||||
if !Path::new(&path).exists() {
|
||||
return Err(Error::InitializeEmulator(anyhow!(
|
||||
"The input TPM Socket path: {path:?} does not exist"
|
||||
|
|
|
|||
|
|
@ -58,8 +58,8 @@ impl SocketDev {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn init(&mut self, path: String) -> Result<()> {
|
||||
self.connect(&path)?;
|
||||
pub fn init(&mut self, path: &str) -> Result<()> {
|
||||
self.connect(path)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -203,7 +203,7 @@ struct VhostUserBlkBackend {
|
|||
|
||||
impl VhostUserBlkBackend {
|
||||
fn new(
|
||||
image_path: String,
|
||||
image_path: &str,
|
||||
num_queues: usize,
|
||||
rdonly: bool,
|
||||
direct: bool,
|
||||
|
|
@ -217,7 +217,7 @@ impl VhostUserBlkBackend {
|
|||
if direct {
|
||||
options.custom_flags(libc::O_DIRECT);
|
||||
}
|
||||
let image: File = options.open(&image_path).unwrap();
|
||||
let image: File = options.open(image_path).unwrap();
|
||||
let mut raw_img: qcow::RawFile = qcow::RawFile::new(image, direct);
|
||||
|
||||
let serial = build_serial(&PathBuf::from(&image_path));
|
||||
|
|
@ -510,7 +510,7 @@ pub fn start_block_backend(backend_command: &str) {
|
|||
|
||||
let blk_backend = Arc::new(RwLock::new(
|
||||
VhostUserBlkBackend::new(
|
||||
backend_config.path,
|
||||
&backend_config.path,
|
||||
backend_config.num_queues,
|
||||
backend_config.readonly,
|
||||
backend_config.direct,
|
||||
|
|
|
|||
|
|
@ -800,8 +800,8 @@ impl Block {
|
|||
self.disk_path.display()
|
||||
);
|
||||
let fd = self.disk_image.fd();
|
||||
fcntl::try_acquire_lock(fd, lock_type, granularity).map_err(|error| {
|
||||
let current_lock = get_lock_state(fd, granularity);
|
||||
fcntl::try_acquire_lock(&fd, lock_type, granularity).map_err(|error| {
|
||||
let current_lock = get_lock_state(&fd, granularity);
|
||||
// Don't propagate the error to the outside, as it is not useful at all. Instead,
|
||||
// we try to log additional help to the user.
|
||||
if let Ok(current_lock) = current_lock {
|
||||
|
|
@ -830,7 +830,8 @@ impl Block {
|
|||
// It is very unlikely that this fails;
|
||||
// Should we remove the Result to simplify the error propagation on
|
||||
// higher levels?
|
||||
fcntl::clear_lock(self.disk_image.fd(), granularity).map_err(|error| Error::LockDiskImage {
|
||||
let fd = self.disk_image.fd();
|
||||
fcntl::clear_lock(&fd, granularity).map_err(|error| Error::LockDiskImage {
|
||||
path: self.disk_path.clone(),
|
||||
error,
|
||||
lock_type: LockType::Unlock,
|
||||
|
|
|
|||
|
|
@ -389,7 +389,7 @@ impl VirtioPciDevice {
|
|||
use_64bit_bar: bool,
|
||||
dma_handler: Option<Arc<dyn ExternalDmaMapping>>,
|
||||
pending_activations: Arc<Mutex<Vec<VirtioPciDeviceActivator>>>,
|
||||
snapshot: Option<Snapshot>,
|
||||
snapshot: Option<&Snapshot>,
|
||||
) -> Result<Self> {
|
||||
let mut locked_device = device.lock().unwrap();
|
||||
let mut queue_evts = Vec::new();
|
||||
|
|
@ -423,8 +423,8 @@ impl VirtioPciDevice {
|
|||
))
|
||||
})?;
|
||||
|
||||
let msix_state = vm_migration::state_from_id(snapshot.as_ref(), pci::MSIX_CONFIG_ID)
|
||||
.map_err(|e| {
|
||||
let msix_state =
|
||||
vm_migration::state_from_id(snapshot, pci::MSIX_CONFIG_ID).map_err(|e| {
|
||||
VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!(
|
||||
"Failed to get MsixConfigState from Snapshot: {e}"
|
||||
))
|
||||
|
|
@ -462,13 +462,11 @@ impl VirtioPciDevice {
|
|||
};
|
||||
|
||||
let pci_configuration_state =
|
||||
vm_migration::state_from_id(snapshot.as_ref(), pci::PCI_CONFIGURATION_ID).map_err(
|
||||
|e| {
|
||||
VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!(
|
||||
"Failed to get PciConfigurationState from Snapshot: {e}"
|
||||
))
|
||||
},
|
||||
)?;
|
||||
vm_migration::state_from_id(snapshot, pci::PCI_CONFIGURATION_ID).map_err(|e| {
|
||||
VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!(
|
||||
"Failed to get PciConfigurationState from Snapshot: {e}"
|
||||
))
|
||||
})?;
|
||||
|
||||
let configuration = PciConfiguration::new(
|
||||
VIRTIO_PCI_VENDOR_ID,
|
||||
|
|
@ -485,13 +483,11 @@ impl VirtioPciDevice {
|
|||
);
|
||||
|
||||
let common_config_state =
|
||||
vm_migration::state_from_id(snapshot.as_ref(), VIRTIO_PCI_COMMON_CONFIG_ID).map_err(
|
||||
|e| {
|
||||
VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!(
|
||||
"Failed to get VirtioPciCommonConfigState from Snapshot: {e}"
|
||||
))
|
||||
},
|
||||
)?;
|
||||
vm_migration::state_from_id(snapshot, VIRTIO_PCI_COMMON_CONFIG_ID).map_err(|e| {
|
||||
VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!(
|
||||
"Failed to get VirtioPciCommonConfigState from Snapshot: {e}"
|
||||
))
|
||||
})?;
|
||||
|
||||
let common_config = if let Some(common_config_state) = common_config_state {
|
||||
VirtioPciCommonConfig::new(common_config_state, access_platform)
|
||||
|
|
|
|||
|
|
@ -219,7 +219,7 @@ impl Vdpa {
|
|||
&mut self,
|
||||
mem: &GuestMemoryMmap,
|
||||
virtio_interrupt: &dyn VirtioInterrupt,
|
||||
queues: Vec<(usize, Queue, EventFd)>,
|
||||
queues: &[(usize, Queue, EventFd)],
|
||||
) -> Result<()> {
|
||||
assert!(self.vhost.is_some());
|
||||
self.vhost
|
||||
|
|
@ -434,7 +434,7 @@ impl VirtioDevice for Vdpa {
|
|||
virtio_interrupt: Arc<dyn VirtioInterrupt>,
|
||||
queues: Vec<(usize, Queue, EventFd)>,
|
||||
) -> ActivateResult {
|
||||
self.activate_vdpa(&mem.memory(), virtio_interrupt.as_ref(), queues)
|
||||
self.activate_vdpa(&mem.memory(), virtio_interrupt.as_ref(), &queues)
|
||||
.map_err(ActivateError::ActivateVdpa)?;
|
||||
|
||||
// Store the virtio interrupt handler as we need to return it on reset
|
||||
|
|
|
|||
|
|
@ -40,14 +40,14 @@ pub struct GsiAllocator {
|
|||
impl GsiAllocator {
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
/// New GSI allocator
|
||||
pub fn new(apics: Vec<GsiApic>) -> Self {
|
||||
pub fn new(apics: &[GsiApic]) -> Self {
|
||||
let mut allocator = GsiAllocator {
|
||||
apics: BTreeMap::new(),
|
||||
next_irq: 0xffff_ffff,
|
||||
next_gsi: 0,
|
||||
};
|
||||
|
||||
for apic in &apics {
|
||||
for apic in apics {
|
||||
if apic.base < allocator.next_irq {
|
||||
allocator.next_irq = apic.base;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ use crate::page_size::get_page_size;
|
|||
///
|
||||
/// # Example - Use the `SystemAddress` builder.
|
||||
///
|
||||
/// ```
|
||||
/// ```rust
|
||||
/// # #[cfg(target_arch = "x86_64")]
|
||||
/// # use vm_allocator::{GsiApic, SystemAllocator};
|
||||
/// # #[cfg(any(target_arch = "aarch64", target_arch = "riscv64"))]
|
||||
|
|
@ -29,7 +29,7 @@ use crate::page_size::get_page_size;
|
|||
/// GuestAddress(0x1000),
|
||||
/// 0x10000,
|
||||
/// GuestAddress(0x10000000), 0x10000000,
|
||||
/// #[cfg(target_arch = "x86_64")] vec![GsiApic::new(5, 19)]).unwrap();
|
||||
/// #[cfg(target_arch = "x86_64")] &[GsiApic::new(5, 19)]).unwrap();
|
||||
/// #[cfg(target_arch = "x86_64")]
|
||||
/// assert_eq!(allocator.allocate_irq(), Some(5));
|
||||
/// #[cfg(target_arch = "aarch64")]
|
||||
|
|
@ -59,14 +59,14 @@ impl SystemAllocator {
|
|||
/// * `io_size` - (X86) The size of IO memory.
|
||||
/// * `platform_mmio_base` - The starting address of platform MMIO memory.
|
||||
/// * `platform_mmio_size` - The size of platform MMIO memory.
|
||||
/// * `apics` - (X86) Vector of APIC's.
|
||||
/// * `apics` - (X86) slice of APIC's.
|
||||
///
|
||||
pub fn new(
|
||||
io_base: GuestAddress,
|
||||
io_size: GuestUsize,
|
||||
platform_mmio_base: GuestAddress,
|
||||
platform_mmio_size: GuestUsize,
|
||||
#[cfg(target_arch = "x86_64")] apics: Vec<GsiApic>,
|
||||
#[cfg(target_arch = "x86_64")] apics: &[GsiApic],
|
||||
) -> Option<Self> {
|
||||
Some(SystemAllocator {
|
||||
io_address_space: AddressAllocator::new(io_base, io_size)?,
|
||||
|
|
|
|||
|
|
@ -156,8 +156,8 @@ impl Snapshot {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn snapshot_from_id(snapshot: Option<&Snapshot>, id: &str) -> Option<Snapshot> {
|
||||
snapshot.and_then(|s| s.snapshots.get(id).cloned())
|
||||
pub fn snapshot_from_id<'a>(snapshot: Option<&'a Snapshot>, id: &str) -> Option<&'a Snapshot> {
|
||||
snapshot.and_then(|s| s.snapshots.get(id))
|
||||
}
|
||||
|
||||
pub fn state_from_id<'a, T>(s: Option<&'a Snapshot>, id: &str) -> Result<Option<T>, MigratableError>
|
||||
|
|
|
|||
|
|
@ -901,7 +901,11 @@ impl CpuManager {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
fn create_vcpu(&mut self, cpu_id: u32, snapshot: Option<Snapshot>) -> Result<Arc<Mutex<Vcpu>>> {
|
||||
fn create_vcpu(
|
||||
&mut self,
|
||||
cpu_id: u32,
|
||||
snapshot: Option<&Snapshot>,
|
||||
) -> Result<Arc<Mutex<Vcpu>>> {
|
||||
info!("Creating vCPU: cpu_id = {cpu_id}");
|
||||
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
|
|
@ -1006,7 +1010,7 @@ impl CpuManager {
|
|||
fn create_vcpus(
|
||||
&mut self,
|
||||
desired_vcpus: u32,
|
||||
snapshot: Option<Snapshot>,
|
||||
snapshot: Option<&Snapshot>,
|
||||
) -> Result<Vec<Arc<Mutex<Vcpu>>>> {
|
||||
let mut vcpus: Vec<Arc<Mutex<Vcpu>>> = vec![];
|
||||
info!(
|
||||
|
|
@ -1027,7 +1031,7 @@ impl CpuManager {
|
|||
cpu_id,
|
||||
// TODO: The special format of the CPU id can be removed once
|
||||
// ready to break live upgrade.
|
||||
snapshot_from_id(snapshot.as_ref(), cpu_id.to_string().as_str()),
|
||||
snapshot_from_id(snapshot, cpu_id.to_string().as_str()),
|
||||
)?);
|
||||
}
|
||||
|
||||
|
|
@ -1386,7 +1390,7 @@ impl CpuManager {
|
|||
|
||||
pub fn create_boot_vcpus(
|
||||
&mut self,
|
||||
snapshot: Option<Snapshot>,
|
||||
snapshot: Option<&Snapshot>,
|
||||
) -> Result<Vec<Arc<Mutex<Vcpu>>>> {
|
||||
trace_scoped!("create_boot_vcpus");
|
||||
|
||||
|
|
|
|||
|
|
@ -1155,7 +1155,7 @@ impl DeviceManager {
|
|||
force_iommu: bool,
|
||||
boot_id_list: BTreeSet<String>,
|
||||
#[cfg(not(target_arch = "riscv64"))] timestamp: Instant,
|
||||
snapshot: Option<Snapshot>,
|
||||
snapshot: Option<&Snapshot>,
|
||||
dynamic: bool,
|
||||
) -> DeviceManagerResult<Arc<Mutex<Self>>> {
|
||||
trace_scoped!("DeviceManager::new");
|
||||
|
|
@ -1367,7 +1367,7 @@ impl DeviceManager {
|
|||
timestamp,
|
||||
pending_activations: Arc::new(Mutex::new(Vec::default())),
|
||||
acpi_platform_addresses: AcpiPlatformAddresses::default(),
|
||||
snapshot,
|
||||
snapshot: snapshot.cloned(),
|
||||
rate_limit_groups,
|
||||
mmio_regions: Arc::new(Mutex::new(Vec::new())),
|
||||
#[cfg(feature = "fw_cfg")]
|
||||
|
|
@ -1795,7 +1795,7 @@ impl DeviceManager {
|
|||
ioapic::Ioapic::new(
|
||||
id.clone(),
|
||||
APIC_START,
|
||||
Arc::clone(&self.msi_interrupt_manager),
|
||||
self.msi_interrupt_manager.as_ref(),
|
||||
state_from_id(self.snapshot.as_ref(), id.as_str())
|
||||
.map_err(DeviceManagerError::RestoreGetState)?,
|
||||
)
|
||||
|
|
@ -2489,7 +2489,7 @@ impl DeviceManager {
|
|||
tpm_path: PathBuf,
|
||||
) -> DeviceManagerResult<Arc<Mutex<devices::tpm::Tpm>>> {
|
||||
// Create TPM Device
|
||||
let tpm = devices::tpm::Tpm::new(tpm_path.to_str().unwrap().to_string()).map_err(|e| {
|
||||
let tpm = devices::tpm::Tpm::new(tpm_path.to_str().unwrap()).map_err(|e| {
|
||||
DeviceManagerError::CreateTpmDevice(anyhow!("Failed to create TPM Device : {e:?}"))
|
||||
})?;
|
||||
let tpm = Arc::new(Mutex::new(tpm));
|
||||
|
|
|
|||
|
|
@ -965,7 +965,7 @@ impl Vmm {
|
|||
self.console_info.clone(),
|
||||
self.console_resize_pipe.clone(),
|
||||
Arc::clone(&self.original_termios_opt),
|
||||
Some(snapshot),
|
||||
Some(&snapshot),
|
||||
)
|
||||
.map_err(|e| {
|
||||
MigratableError::MigrateReceive(anyhow!("Error creating VM from snapshot: {e:?}"))
|
||||
|
|
@ -1334,7 +1334,7 @@ impl Vmm {
|
|||
self.console_info.clone(),
|
||||
self.console_resize_pipe.clone(),
|
||||
Arc::clone(&self.original_termios_opt),
|
||||
Some(snapshot),
|
||||
Some(&snapshot),
|
||||
Some(source_url),
|
||||
Some(prefault),
|
||||
)?;
|
||||
|
|
|
|||
|
|
@ -1169,7 +1169,7 @@ impl MemoryManager {
|
|||
start_of_platform_device_area,
|
||||
PLATFORM_DEVICE_AREA_SIZE,
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
vec![GsiApic::new(
|
||||
&[GsiApic::new(
|
||||
X86_64_IRQ_BASE,
|
||||
ioapic::NUM_IOAPIC_PINS as u32 - X86_64_IRQ_BASE,
|
||||
)],
|
||||
|
|
|
|||
|
|
@ -543,7 +543,7 @@ impl Vm {
|
|||
console_info: Option<ConsoleInfo>,
|
||||
console_resize_pipe: Option<Arc<File>>,
|
||||
original_termios: Arc<Mutex<Option<termios>>>,
|
||||
snapshot: Option<Snapshot>,
|
||||
snapshot: Option<&Snapshot>,
|
||||
) -> Result<Self> {
|
||||
trace_scoped!("Vm::new_from_memory_manager");
|
||||
|
||||
|
|
@ -658,7 +658,7 @@ impl Vm {
|
|||
boot_id_list,
|
||||
#[cfg(not(target_arch = "riscv64"))]
|
||||
timestamp,
|
||||
snapshot_from_id(snapshot.as_ref(), DEVICE_MANAGER_SNAPSHOT_ID),
|
||||
snapshot_from_id(snapshot, DEVICE_MANAGER_SNAPSHOT_ID),
|
||||
dynamic,
|
||||
)
|
||||
.map_err(Error::DeviceManager)?;
|
||||
|
|
@ -723,7 +723,7 @@ impl Vm {
|
|||
cpu_manager
|
||||
.lock()
|
||||
.unwrap()
|
||||
.create_boot_vcpus(snapshot_from_id(snapshot.as_ref(), CPU_MANAGER_SNAPSHOT_ID))
|
||||
.create_boot_vcpus(snapshot_from_id(snapshot, CPU_MANAGER_SNAPSHOT_ID))
|
||||
.map_err(Error::CpuManager)?;
|
||||
|
||||
// For KVM, we need to create interrupt controller after we create boot vcpus.
|
||||
|
|
@ -992,7 +992,7 @@ impl Vm {
|
|||
console_info: Option<ConsoleInfo>,
|
||||
console_resize_pipe: Option<Arc<File>>,
|
||||
original_termios: Arc<Mutex<Option<termios>>>,
|
||||
snapshot: Option<Snapshot>,
|
||||
snapshot: Option<&Snapshot>,
|
||||
source_url: Option<&str>,
|
||||
prefault: Option<bool>,
|
||||
) -> Result<Self> {
|
||||
|
|
@ -1035,31 +1035,30 @@ impl Vm {
|
|||
vm_config.lock().unwrap().cpus.max_phys_bits,
|
||||
);
|
||||
|
||||
let memory_manager = if let Some(snapshot) =
|
||||
snapshot_from_id(snapshot.as_ref(), MEMORY_MANAGER_SNAPSHOT_ID)
|
||||
{
|
||||
MemoryManager::new_from_snapshot(
|
||||
&snapshot,
|
||||
vm.clone(),
|
||||
&vm_config.lock().unwrap().memory.clone(),
|
||||
source_url,
|
||||
prefault.unwrap(),
|
||||
phys_bits,
|
||||
)
|
||||
.map_err(Error::MemoryManager)?
|
||||
} else {
|
||||
MemoryManager::new(
|
||||
vm.clone(),
|
||||
&vm_config.lock().unwrap().memory.clone(),
|
||||
None,
|
||||
phys_bits,
|
||||
#[cfg(feature = "tdx")]
|
||||
tdx_enabled,
|
||||
None,
|
||||
None,
|
||||
)
|
||||
.map_err(Error::MemoryManager)?
|
||||
};
|
||||
let memory_manager =
|
||||
if let Some(snapshot) = snapshot_from_id(snapshot, MEMORY_MANAGER_SNAPSHOT_ID) {
|
||||
MemoryManager::new_from_snapshot(
|
||||
snapshot,
|
||||
vm.clone(),
|
||||
&vm_config.lock().unwrap().memory.clone(),
|
||||
source_url,
|
||||
prefault.unwrap(),
|
||||
phys_bits,
|
||||
)
|
||||
.map_err(Error::MemoryManager)?
|
||||
} else {
|
||||
MemoryManager::new(
|
||||
vm.clone(),
|
||||
&vm_config.lock().unwrap().memory.clone(),
|
||||
None,
|
||||
phys_bits,
|
||||
#[cfg(feature = "tdx")]
|
||||
tdx_enabled,
|
||||
None,
|
||||
None,
|
||||
)
|
||||
.map_err(Error::MemoryManager)?
|
||||
};
|
||||
|
||||
Vm::new_from_memory_manager(
|
||||
vm_config,
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue