misc: clippy: add if_not_else

This removes cognitive load when reading if statements.
All changes were applied by clippy via `--fix`.

Signed-off-by: Philipp Schuster <philipp.schuster@cyberus-technology.de>
On-behalf-of: SAP philipp.schuster@sap.com
This commit is contained in:
Philipp Schuster 2025-11-21 11:08:27 +01:00 committed by Rob Bradford
parent a0b72dce22
commit 0a07c96d17
16 changed files with 139 additions and 144 deletions

View file

@ -171,6 +171,7 @@ suspicious = "deny"
# Individual Lints
assertions_on_result_states = "deny"
if_not_else = "deny"
manual_string_new = "deny"
map_unwrap_or = "deny"
redundant_else = "deny"

View file

@ -111,9 +111,7 @@ pub fn get_cache_size(cache_level: CacheLevel) -> u32 {
}
let file_path = Path::new(&file_directory);
if !file_path.exists() {
0
} else {
if file_path.exists() {
let src = fs::read_to_string(file_directory).expect("File not exists or file corrupted.");
// The content of the file is as simple as a size, like: "32K"
let src = src.trim();
@ -127,6 +125,8 @@ pub fn get_cache_size(cache_level: CacheLevel) -> u32 {
"G" => 1024u32.pow(3),
_ => 1,
}
} else {
0
}
}
@ -142,11 +142,11 @@ pub fn get_cache_coherency_line_size(cache_level: CacheLevel) -> u32 {
}
let file_path = Path::new(&file_directory);
if !file_path.exists() {
0
} else {
if file_path.exists() {
let src = fs::read_to_string(file_directory).expect("File not exists or file corrupted.");
src.trim().parse::<u32>().unwrap()
} else {
0
}
}
@ -162,11 +162,11 @@ pub fn get_cache_number_of_sets(cache_level: CacheLevel) -> u32 {
}
let file_path = Path::new(&file_directory);
if !file_path.exists() {
0
} else {
if file_path.exists() {
let src = fs::read_to_string(file_directory).expect("File not exists or file corrupted.");
src.trim().parse::<u32>().unwrap()
} else {
0
}
}
@ -187,9 +187,7 @@ pub fn get_cache_shared(cache_level: CacheLevel) -> bool {
}
let file_path = Path::new(&file_directory);
if !file_path.exists() {
result = false;
} else {
if file_path.exists() {
let src = fs::read_to_string(file_directory).expect("File not exists or file corrupted.");
let src = src.trim();
if src.is_empty() {
@ -197,6 +195,8 @@ pub fn get_cache_shared(cache_level: CacheLevel) -> bool {
} else {
result = src.contains('-') || src.contains(',');
}
} else {
result = false;
}
result
@ -312,9 +312,7 @@ fn create_cpu_nodes(
let cache_path = Path::new("/sys/devices/system/cpu/cpu0/cache");
let cache_exist: bool = cache_path.exists();
if !cache_exist {
warn!("cache sysfs system does not exist.");
} else {
if cache_exist {
// L1 Data Cache Info.
l1_d_cache_size = get_cache_size(CacheLevel::L1D);
l1_d_cache_line_size = get_cache_coherency_line_size(CacheLevel::L1D);
@ -342,6 +340,8 @@ fn create_cpu_nodes(
if l3_cache_size != 0 {
l3_cache_shared = get_cache_shared(CacheLevel::L3);
}
} else {
warn!("cache sysfs system does not exist.");
}
for (cpu_id, mpidr) in vcpu_mpidr.iter().enumerate().take(num_cpus) {

View file

@ -293,14 +293,7 @@ impl Request {
error!("Only head descriptor present: request = {req:?}");
})?;
if !desc.has_next() {
status_desc = desc;
// Only flush requests are allowed to skip the data descriptor.
if req.request_type != RequestType::Flush {
error!("Need a data descriptor: request = {req:?}");
return Err(Error::DescriptorChainTooShort);
}
} else {
if desc.has_next() {
req.data_descriptors.reserve_exact(1);
while desc.has_next() {
if desc.is_write_only() && req.request_type == RequestType::Out {
@ -326,6 +319,13 @@ impl Request {
})?;
}
status_desc = desc;
} else {
status_desc = desc;
// Only flush requests are allowed to skip the data descriptor.
if req.request_type != RequestType::Flush {
error!("Need a data descriptor: request = {req:?}");
return Err(Error::DescriptorChainTooShort);
}
}
// The status MUST always be writable.
@ -446,7 +446,9 @@ impl Request {
// In case it's not properly aligned, an intermediate buffer is
// created with the correct alignment, and a copy from/to the
// origin buffer is performed, depending on the type of operation.
let iov_base = if !(origin_ptr.as_ptr() as u64).is_multiple_of(SECTOR_SIZE) {
let iov_base = if (origin_ptr.as_ptr() as u64).is_multiple_of(SECTOR_SIZE) {
origin_ptr.as_ptr() as *mut libc::c_void
} else {
let layout = Layout::from_size_align(data_len, SECTOR_SIZE as usize).unwrap();
// SAFETY: layout has non-zero size
let aligned_ptr = unsafe { alloc_zeroed(layout) };
@ -474,8 +476,6 @@ impl Request {
});
aligned_ptr as *mut libc::c_void
} else {
origin_ptr.as_ptr() as *mut libc::c_void
};
let iovec = libc::iovec {

View file

@ -119,7 +119,9 @@ impl RefCount {
}
// Unwrap is safe here as the entry was filled directly above.
let dropped_cluster = if !self.refblock_cache.get(table_index).unwrap().dirty() {
let dropped_cluster = if self.refblock_cache.get(table_index).unwrap().dirty() {
None
} else {
// Free the previously used block and use a new one. Writing modified counts to new
// blocks keeps the on-disk state consistent even if it's out of date.
if let Some((addr, _)) = new_cluster.take() {
@ -128,8 +130,6 @@ impl RefCount {
} else {
return Err(Error::NeedNewCluster);
}
} else {
None
};
self.refblock_cache.get_mut(table_index).unwrap()[block_index] = refcount;

View file

@ -2364,7 +2364,9 @@ impl cpu::Vcpu for KvmVcpu {
let expected_num_msrs = msr_entries.len();
let num_msrs = self.get_msrs(&mut msr_entries)?;
let msrs = if num_msrs != expected_num_msrs {
let msrs = if num_msrs == expected_num_msrs {
msr_entries
} else {
let mut faulty_msr_index = num_msrs;
let mut msr_entries_tmp = msr_entries[..faulty_msr_index].to_vec();
@ -2390,8 +2392,6 @@ impl cpu::Vcpu for KvmVcpu {
}
msr_entries_tmp
} else {
msr_entries
};
let vcpu_events = self.get_vcpu_events()?;

View file

@ -111,10 +111,7 @@ impl CtrlQueue {
.memory()
.read_obj::<u64>(data_desc_addr)
.map_err(Error::GuestMemory)?;
if u32::from(ctrl_hdr.cmd) != VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET {
warn!("Unsupported command: {}", ctrl_hdr.cmd);
false
} else {
if u32::from(ctrl_hdr.cmd) == VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET {
let mut ok = true;
for tap in self.taps.iter_mut() {
info!("Reprogramming tap offload with features: {features}");
@ -126,6 +123,9 @@ impl CtrlQueue {
.ok();
}
ok
} else {
warn!("Unsupported command: {}", ctrl_hdr.cmd);
false
}
}
_ => {

View file

@ -80,16 +80,14 @@ fn open_tap_rx_q_0(
None => Tap::new(num_rx_q).map_err(Error::TapOpen)?,
};
// Don't overwrite ip configuration of existing interfaces:
if !tap_exists {
if let Some(ip) = ip_addr {
tap.set_ip_addr(ip, netmask)
.map_err(Error::TapSetIpNetmask)?;
}
} else {
if tap_exists {
warn!(
"Tap {} already exists. IP configuration will not be overwritten.",
if_name.unwrap_or_default()
);
} else if let Some(ip) = ip_addr {
tap.set_ip_addr(ip, netmask)
.map_err(Error::TapSetIpNetmask)?;
}
if let Some(mac) = host_mac {
tap.set_mac_addr(*mac).map_err(Error::TapSetMac)?;

View file

@ -88,7 +88,9 @@ impl TxVirtio {
next_desc = desc_chain.next();
}
let len = if !iovecs.is_empty() {
let len = if iovecs.is_empty() {
0
} else {
// SAFETY: FFI call with correct arguments
let result = unsafe {
libc::writev(
@ -119,8 +121,6 @@ impl TxVirtio {
self.counter_frames += Wrapping(1);
result as u32
} else {
0
};
// For the sake of simplicity (similar to the RX rate limiting), we always
@ -230,7 +230,9 @@ impl RxVirtio {
next_desc = desc_chain.next();
}
let len = if !iovecs.is_empty() {
let len = if iovecs.is_empty() {
0
} else {
// SAFETY: FFI call with correct arguments
let result = unsafe {
libc::readv(
@ -268,8 +270,6 @@ impl RxVirtio {
self.counter_frames += Wrapping(1);
result as u32
} else {
0
};
// For the sake of simplicity (keeping the handling of RX_QUEUE_EVENT and

View file

@ -180,11 +180,11 @@ impl PciBus {
pub fn get_device_id(&mut self, id: usize) -> Result<()> {
if id < NUM_DEVICE_IDS {
if !self.device_ids[id] {
if self.device_ids[id] {
Err(PciRootError::AlreadyInUsePciDeviceSlot(id))
} else {
self.device_ids[id] = true;
Ok(())
} else {
Err(PciRootError::AlreadyInUsePciDeviceSlot(id))
}
} else {
Err(PciRootError::InvalidPciDeviceSlot(id))

View file

@ -646,20 +646,20 @@ impl VfioCommon {
flags = self.vfio_wrapper.read_config_dword(bar_offset);
// Is this an IO BAR?
let io_bar = if bar_id != VFIO_PCI_ROM_REGION_INDEX {
matches!(flags & PCI_CONFIG_IO_BAR, PCI_CONFIG_IO_BAR)
} else {
let io_bar = if bar_id == VFIO_PCI_ROM_REGION_INDEX {
false
} else {
matches!(flags & PCI_CONFIG_IO_BAR, PCI_CONFIG_IO_BAR)
};
// Is this a 64-bit BAR?
let is_64bit_bar = if bar_id != VFIO_PCI_ROM_REGION_INDEX {
let is_64bit_bar = if bar_id == VFIO_PCI_ROM_REGION_INDEX {
false
} else {
matches!(
flags & PCI_CONFIG_MEMORY_BAR_64BIT,
PCI_CONFIG_MEMORY_BAR_64BIT
)
} else {
false
};
if matches!(
@ -915,11 +915,10 @@ impl VfioCommon {
& PCI_CONFIG_CAPABILITY_PTR_MASK;
// See parse_capabilities below for an explanation.
if cap_ptr != cap_next {
cap_next = cap_ptr;
} else {
if cap_ptr == cap_next {
break;
}
cap_next = cap_ptr;
}
None

View file

@ -757,12 +757,11 @@ pub fn ssh_command_ip(
pub fn exec_host_command_with_retries(command: &str, retries: u32, interval: Duration) -> bool {
for _ in 0..retries {
let s = exec_host_command_output(command).status;
if !s.success() {
eprintln!("\n\n==== retrying in {interval:?} ===\n\n");
thread::sleep(interval);
} else {
if s.success() {
return true;
}
eprintln!("\n\n==== retrying in {interval:?} ===\n\n");
thread::sleep(interval);
}
false
@ -1686,17 +1685,17 @@ pub fn measure_virtio_net_throughput(
failed = true;
}
if !failed {
// Safe to unwrap as we know the child has terminated successfully
let output = c.wait_with_output().unwrap();
results.push(parse_iperf3_output(&output.stdout, receive, bandwidth)?);
} else {
if failed {
let _ = c.kill();
let output = c.wait_with_output().unwrap();
println!(
"=============== Client output [Error] ===============\n\n{}\n\n===========end============\n\n",
String::from_utf8_lossy(&output.stdout)
);
} else {
// Safe to unwrap as we know the child has terminated successfully
let output = c.wait_with_output().unwrap();
results.push(parse_iperf3_output(&output.stdout, receive, bandwidth)?);
}
}

View file

@ -463,12 +463,12 @@ impl VirtioDevice for Vdpa {
impl Pausable for Vdpa {
fn pause(&mut self) -> std::result::Result<(), MigratableError> {
if !self.migrating {
if self.migrating {
Ok(())
} else {
Err(MigratableError::Pause(anyhow!(
"Can't pause a vDPA device outside live migration"
)))
} else {
Ok(())
}
}
@ -477,12 +477,12 @@ impl Pausable for Vdpa {
return Ok(());
}
if !self.migrating {
if self.migrating {
Ok(())
} else {
Err(MigratableError::Resume(anyhow!(
"Can't resume a vDPA device outside live migration"
)))
} else {
Ok(())
}
}
}

View file

@ -68,10 +68,10 @@ impl AddressAllocator {
}
fn align_address(&self, address: GuestAddress, alignment: GuestUsize) -> GuestAddress {
let align_adjust = if !address.raw_value().is_multiple_of(alignment) {
alignment - (address.raw_value() % alignment)
} else {
let align_adjust = if address.raw_value().is_multiple_of(alignment) {
0
} else {
alignment - (address.raw_value() % alignment)
};
address.unchecked_add(align_adjust)

View file

@ -2733,16 +2733,15 @@ impl VmConfig {
for numa_node in numa.iter() {
if let Some(memory_zones) = numa_node.memory_zones.clone() {
for memory_zone in memory_zones.iter() {
if !used_numa_node_memory_zones.contains_key(memory_zone) {
used_numa_node_memory_zones
.insert(memory_zone.to_string(), numa_node.guest_numa_id);
} else {
if used_numa_node_memory_zones.contains_key(memory_zone) {
return Err(ValidationError::MemoryZoneReused(
memory_zone.to_string(),
*used_numa_node_memory_zones.get(memory_zone).unwrap(),
numa_node.guest_numa_id,
));
}
used_numa_node_memory_zones
.insert(memory_zone.to_string(), numa_node.guest_numa_id);
}
}
@ -2756,15 +2755,14 @@ impl VmConfig {
numa_node.guest_numa_id,
));
}
if !used_pci_segments.contains_key(pci_segment) {
used_pci_segments.insert(*pci_segment, numa_node.guest_numa_id);
} else {
if used_pci_segments.contains_key(pci_segment) {
return Err(ValidationError::PciSegmentReused(
*pci_segment,
*used_pci_segments.get(pci_segment).unwrap(),
numa_node.guest_numa_id,
));
}
used_pci_segments.insert(*pci_segment, numa_node.guest_numa_id);
}
}
}

View file

@ -5246,15 +5246,15 @@ impl Aml for DeviceManager {
let serial_irq = 4;
#[cfg(any(target_arch = "aarch64", target_arch = "riscv64"))]
let serial_irq =
if self.config.lock().unwrap().serial.clone().mode != ConsoleOutputMode::Off {
if self.config.lock().unwrap().serial.clone().mode == ConsoleOutputMode::Off {
// If serial is turned off, add a fake device with invalid irq.
31
} else {
self.get_device_info()
.clone()
.get(&(DeviceType::Serial, DeviceType::Serial.to_string()))
.unwrap()
.irq()
} else {
// If serial is turned off, add a fake device with invalid irq.
31
};
if self.config.lock().unwrap().serial.mode != ConsoleOutputMode::Off {
aml::Device::new(

View file

@ -766,61 +766,7 @@ impl MemoryManager {
) -> Result<(u64, Vec<MemoryZoneConfig>, bool), Error> {
let mut allow_mem_hotplug = false;
if !user_provided_zones {
if config.zones.is_some() {
error!(
"User defined memory regions can't be provided if the \
memory size is not 0"
);
return Err(Error::InvalidMemoryParameters);
}
if config.hotplug_size.is_some() {
allow_mem_hotplug = true;
}
if let Some(hotplugged_size) = config.hotplugged_size {
if let Some(hotplug_size) = config.hotplug_size {
if hotplugged_size > hotplug_size {
error!(
"'hotplugged_size' {hotplugged_size} can't be bigger than \
'hotplug_size' {hotplug_size}",
);
return Err(Error::InvalidMemoryParameters);
}
} else {
error!(
"Invalid to define 'hotplugged_size' when there is\
no 'hotplug_size'"
);
return Err(Error::InvalidMemoryParameters);
}
if config.hotplug_method == HotplugMethod::Acpi {
error!(
"Invalid to define 'hotplugged_size' with hotplug \
method 'acpi'"
);
return Err(Error::InvalidMemoryParameters);
}
}
// Create a single zone from the global memory config. This lets
// us reuse the codepath for user defined memory zones.
let zones = vec![MemoryZoneConfig {
id: String::from(DEFAULT_MEMORY_ZONE),
size: config.size,
file: None,
shared: config.shared,
hugepages: config.hugepages,
hugepage_size: config.hugepage_size,
host_numa_node: None,
hotplug_size: config.hotplug_size,
hotplugged_size: config.hotplugged_size,
prefault: config.prefault,
}];
Ok((config.size, zones, allow_mem_hotplug))
} else {
if user_provided_zones {
if config.zones.is_none() {
error!(
"User defined memory regions must be provided if the \
@ -880,6 +826,60 @@ impl MemoryManager {
}
Ok((total_ram_size, zones, allow_mem_hotplug))
} else {
if config.zones.is_some() {
error!(
"User defined memory regions can't be provided if the \
memory size is not 0"
);
return Err(Error::InvalidMemoryParameters);
}
if config.hotplug_size.is_some() {
allow_mem_hotplug = true;
}
if let Some(hotplugged_size) = config.hotplugged_size {
if let Some(hotplug_size) = config.hotplug_size {
if hotplugged_size > hotplug_size {
error!(
"'hotplugged_size' {hotplugged_size} can't be bigger than \
'hotplug_size' {hotplug_size}",
);
return Err(Error::InvalidMemoryParameters);
}
} else {
error!(
"Invalid to define 'hotplugged_size' when there is\
no 'hotplug_size'"
);
return Err(Error::InvalidMemoryParameters);
}
if config.hotplug_method == HotplugMethod::Acpi {
error!(
"Invalid to define 'hotplugged_size' with hotplug \
method 'acpi'"
);
return Err(Error::InvalidMemoryParameters);
}
}
// Create a single zone from the global memory config. This lets
// us reuse the codepath for user defined memory zones.
let zones = vec![MemoryZoneConfig {
id: String::from(DEFAULT_MEMORY_ZONE),
size: config.size,
file: None,
shared: config.shared,
hugepages: config.hugepages,
hugepage_size: config.hugepage_size,
host_numa_node: None,
hotplug_size: config.hotplug_size,
hotplugged_size: config.hotplugged_size,
prefault: config.prefault,
}];
Ok((config.size, zones, allow_mem_hotplug))
}
}