build: treewide: clippy: collapse nested ifs, use let chains

This bumps the MSRV to 1.88 (also, Rust edition 2024 is mandatory).

Signed-off-by: Philipp Schuster <philipp.schuster@cyberus-technology.de>
On-behalf-of: SAP philipp.schuster@sap.com
This commit is contained in:
Philipp Schuster 2025-08-15 09:00:01 +02:00 committed by Bo Chen
parent f73a6c8d8e
commit c995b72384
40 changed files with 574 additions and 608 deletions

View file

@ -118,14 +118,13 @@ fn parse_http_response(socket: &mut dyn Read) -> Result<Option<String>, Error> {
}
}
if let Some(body_offset) = body_offset {
if let Some(content_length) = content_length {
if res.len() >= content_length + body_offset {
if let Some(body_offset) = body_offset
&& let Some(content_length) = content_length
&& res.len() >= content_length + body_offset
{
break;
}
}
}
}
let body_string = content_length.and(body_offset.map(|o| String::from(&res[o..])));
let status_code = get_status_code(&res)?;

View file

@ -999,8 +999,9 @@ fn create_pci_nodes(
fdt.property_array_u32("msi-map", &msi_map)?;
fdt.property_u32("msi-parent", MSI_PHANDLE)?;
if pci_device_info_elem.pci_segment_id == 0 {
if let Some(virtio_iommu_bdf) = virtio_iommu_bdf {
if pci_device_info_elem.pci_segment_id == 0
&& let Some(virtio_iommu_bdf) = virtio_iommu_bdf
{
// See kernel document Documentation/devicetree/bindings/pci/pci-iommu.txt
// for 'iommu-map' attribute setting.
let iommu_map = [
@ -1032,7 +1033,6 @@ fn create_pci_nodes(
fdt.end_node(virtio_iommu_node)?;
}
}
fdt.end_node(pci_node)?;
}

View file

@ -810,21 +810,13 @@ pub fn configure_vcpu(
);
// The TSC frequency CPUID leaf should not be included when running with HyperV emulation
if !kvm_hyperv {
if let Some(tsc_khz) = vcpu.tsc_khz().map_err(Error::GetTscFrequency)? {
if !kvm_hyperv && let Some(tsc_khz) = vcpu.tsc_khz().map_err(Error::GetTscFrequency)? {
// Need to check that the TSC doesn't vary with dynamic frequency
// SAFETY: cpuid called with valid leaves
if unsafe { std::arch::x86_64::__cpuid(0x8000_0007) }.edx
& (1u32 << INVARIANT_TSC_EDX_BIT)
if unsafe { std::arch::x86_64::__cpuid(0x8000_0007) }.edx & (1u32 << INVARIANT_TSC_EDX_BIT)
> 0
{
CpuidPatch::set_cpuid_reg(
&mut cpuid,
0x4000_0000,
None,
CpuidReg::EAX,
0x4000_0010,
);
CpuidPatch::set_cpuid_reg(&mut cpuid, 0x4000_0000, None, CpuidReg::EAX, 0x4000_0010);
cpuid.retain(|c| c.function != 0x4000_0010);
cpuid.push(CpuIdEntry {
function: 0x4000_0010,
@ -835,7 +827,6 @@ pub fn configure_vcpu(
});
};
}
}
for c in &cpuid {
debug!("{}", c);
@ -932,11 +923,11 @@ pub fn configure_system(
mptable::setup_mptable(offset, guest_mem, _num_cpus, topology).map_err(Error::MpTableSetup)?;
// Check that the RAM is not smaller than the RSDP start address
if let Some(rsdp_addr) = rsdp_addr {
if rsdp_addr.0 > guest_mem.last_addr().0 {
if let Some(rsdp_addr) = rsdp_addr
&& rsdp_addr.0 > guest_mem.last_addr().0
{
return Err(super::Error::RsdpPastRamEnd);
}
}
match setup_header {
Some(hdr) => configure_32bit_entry(

View file

@ -287,11 +287,12 @@ impl QcowHeader {
let cluster_bits: u32 = DEFAULT_CLUSTER_BITS;
let cluster_size: u32 = 0x01 << cluster_bits;
let max_length: usize = (cluster_size - header_size) as usize;
if let Some(path) = backing_file {
if path.len() > max_length {
if let Some(path) = backing_file
&& path.len() > max_length
{
return Err(Error::BackingFileTooLong(path.len() - max_length));
}
}
// L2 blocks are always one cluster long. They contain cluster_size/sizeof(u64) addresses.
let entries_per_cluster: u32 = cluster_size / size_of::<u64>() as u32;
let num_clusters: u32 = div_round_up_u64(size, u64::from(cluster_size)) as u32;
@ -589,16 +590,14 @@ impl QcowFile {
// Check for compressed blocks
for l2_addr_disk in l1_table.get_values() {
if *l2_addr_disk != 0 {
if let Err(e) = Self::read_l2_cluster(&mut raw_file, *l2_addr_disk) {
if let Some(os_error) = e.raw_os_error() {
if os_error == ENOTSUP {
if *l2_addr_disk != 0
&& let Err(e) = Self::read_l2_cluster(&mut raw_file, *l2_addr_disk)
&& let Some(os_error) = e.raw_os_error()
&& os_error == ENOTSUP
{
return Err(Error::CompressedBlocksNotSupported);
}
}
}
}
}
let mut qcow = QcowFile {
raw_file,
@ -1584,12 +1583,12 @@ impl Seek for QcowFile {
}
};
if let Some(o) = new_offset {
if o <= self.virtual_size() {
if let Some(o) = new_offset
&& o <= self.virtual_size()
{
self.current_offset = o;
return Ok(o);
}
}
Err(std::io::Error::from_raw_os_error(EINVAL))
}
}

View file

@ -123,12 +123,12 @@ impl<T: Cacheable> CacheMap<T> {
if self.map.len() == self.capacity {
// TODO(dgreid) - smarter eviction strategy.
let to_evict = *self.map.iter().next().unwrap().0;
if let Some(evicted) = self.map.remove(&to_evict) {
if evicted.dirty() {
if let Some(evicted) = self.map.remove(&to_evict)
&& evicted.dirty()
{
write_callback(to_evict, evicted)?;
}
}
}
self.map.insert(index, block);
Ok(())
}

View file

@ -187,12 +187,12 @@ impl Seek for Vhdx {
}
};
if let Some(o) = new_offset {
if o <= self.virtual_disk_size() {
if let Some(o) = new_offset
&& o <= self.virtual_disk_size()
{
self.current_offset = o;
return Ok(o);
}
}
Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,

View file

@ -9,15 +9,14 @@ use std::process::Command;
fn main() {
let mut version = "v".to_owned() + env!("CARGO_PKG_VERSION");
if let Ok(git_out) = Command::new("git").args(["describe", "--dirty"]).output() {
if git_out.status.success() {
if let Ok(git_out_str) = String::from_utf8(git_out.stdout) {
if let Ok(git_out) = Command::new("git").args(["describe", "--dirty"]).output()
&& git_out.status.success()
&& let Ok(git_out_str) = String::from_utf8(git_out.stdout)
{
version = git_out_str;
// Pop the trailing newline.
version.pop();
}
}
}
// Append CH_EXTRA_VERSION to version if it is set.
if let Ok(extra_version) = env::var("CH_EXTRA_VERSION") {

View file

@ -458,12 +458,11 @@ impl BusDevice for Tpm {
CRB_CTRL_CANCEL => {
if v == CRB_CANCEL_INVOKE
&& (self.regs[CRB_CTRL_START as usize] & CRB_START_INVOKE != 0)
&& let Err(e) = self.emulator.cancel_cmd()
{
if let Err(e) = self.emulator.cancel_cmd() {
error!("Failed to run cancel command. Error: {:?}", e);
}
}
}
CRB_CTRL_START => {
if v == CRB_START_INVOKE
&& ((self.regs[CRB_CTRL_START as usize] & CRB_START_INVOKE) == 0)

View file

@ -624,13 +624,13 @@ impl<T: CpuStateManager> Emulator<'_, T> {
last_decoded_ip = decoder.ip();
num_insn_emulated += 1;
if let Some(num_insn) = num_insn {
if num_insn_emulated >= num_insn {
if let Some(num_insn) = num_insn
&& num_insn_emulated >= num_insn
{
// Exit the decoding loop, do not decode the next instruction.
stop_emulation = true;
}
}
}
state.set_ip(decoder.ip());
Ok(state)

View file

@ -44,13 +44,13 @@ impl MshvEmulatorContext<'_> {
gpa
);
if let Some(vm_ops) = &self.vcpu.vm_ops {
if vm_ops.guest_mem_read(gpa, data).is_err() {
if let Some(vm_ops) = &self.vcpu.vm_ops
&& vm_ops.guest_mem_read(gpa, data).is_err()
{
vm_ops
.mmio_read(gpa, data)
.map_err(|e| PlatformError::MemoryReadFailure(e.into()))?;
}
}
Ok(())
}
@ -94,13 +94,13 @@ impl MshvEmulatorContext<'_> {
gpa
);
if let Some(vm_ops) = &self.vcpu.vm_ops {
if vm_ops.guest_mem_write(gpa, data).is_err() {
if let Some(vm_ops) = &self.vcpu.vm_ops
&& vm_ops.guest_mem_write(gpa, data).is_err()
{
vm_ops
.mmio_write(gpa, data)
.map_err(|e| PlatformError::MemoryWriteFailure(e.into()))?;
}
}
Ok(())
}

View file

@ -866,13 +866,13 @@ mod tests {
let p = ParsedPkt::new(buf);
p.print();
if let Some(ref udp) = p.udp {
if payload == udp.payload() {
if let Some(ref udp) = p.udp
&& payload == udp.payload()
{
channel_tx.send(true).unwrap();
break;
}
}
}
});
// We wait for at most SLEEP_MILLIS * SLEEP_ITERS milliseconds for the reception of the

View file

@ -828,11 +828,11 @@ impl PciConfiguration {
let mut addr = u64::from(self.bars[bar_num].addr & self.writable_bits[bar_idx]);
if let Some(bar_type) = self.bars[bar_num].r#type {
if bar_type == PciBarRegionType::Memory64BitRegion {
if let Some(bar_type) = self.bars[bar_num].r#type
&& bar_type == PciBarRegionType::Memory64BitRegion
{
addr |= u64::from(self.bars[bar_num + 1].addr) << 32;
}
}
addr
}
@ -907,8 +907,9 @@ impl PciConfiguration {
}
// Handle potential write to MSI-X message control register
if let Some(msix_cap_reg_idx) = self.msix_cap_reg_idx {
if let Some(msix_config) = &self.msix_config {
if let Some(msix_cap_reg_idx) = self.msix_cap_reg_idx
&& let Some(msix_config) = &self.msix_config
{
if msix_cap_reg_idx == reg_idx && offset == 2 && data.len() == 2 {
msix_config
.lock()
@ -921,7 +922,6 @@ impl PciConfiguration {
.set_msg_ctl((LittleEndian::read_u32(data) >> 16) as u16);
}
}
}
match data.len() {
1 => self.write_byte(reg_idx * 4 + offset as usize, data[0]),

View file

@ -271,17 +271,13 @@ impl MsiConfig {
}
}
if !old_enabled {
if let Err(e) = self.interrupt_source_group.enable() {
if !old_enabled && let Err(e) = self.interrupt_source_group.enable() {
error!("Failed enabling irq_fd: {:?}", e);
}
}
} else if old_enabled {
if let Err(e) = self.interrupt_source_group.disable() {
} else if old_enabled && let Err(e) = self.interrupt_source_group.disable() {
error!("Failed disabling irq_fd: {:?}", e);
}
}
}
}
impl Pausable for MsiConfig {}

View file

@ -208,8 +208,8 @@ impl Interrupt {
}
fn accessed(&self, offset: u64) -> Option<(PciCapabilityId, u64)> {
if let Some(msi) = &self.msi {
if offset >= u64::from(msi.cap_offset)
if let Some(msi) = &self.msi
&& offset >= u64::from(msi.cap_offset)
&& offset < u64::from(msi.cap_offset) + msi.cfg.size()
{
return Some((
@ -217,13 +217,12 @@ impl Interrupt {
u64::from(msi.cap_offset),
));
}
}
if let Some(msix) = &self.msix {
if offset == u64::from(msix.cap_offset) {
if let Some(msix) = &self.msix
&& offset == u64::from(msix.cap_offset)
{
return Some((PciCapabilityId::MsiX, u64::from(msix.cap_offset)));
}
}
None
}
@ -603,15 +602,14 @@ impl VfioCommon {
type_,
..
} = resource
&& *index == bar_id as usize
{
if *index == bar_id as usize {
restored_bar_addr = Some(GuestAddress(*base));
region_size = *size;
region_type = PciBarRegionType::from(*type_);
break;
}
}
}
if restored_bar_addr.is_none() {
bar_id += 1;
continue;
@ -925,26 +923,25 @@ impl VfioCommon {
match PciCapabilityId::from(cap_id) {
PciCapabilityId::MessageSignalledInterrupts => {
if let Some(irq_info) = self.vfio_wrapper.get_irq_info(VFIO_PCI_MSI_IRQ_INDEX) {
if irq_info.count > 0 {
if let Some(irq_info) = self.vfio_wrapper.get_irq_info(VFIO_PCI_MSI_IRQ_INDEX)
&& irq_info.count > 0
{
// Parse capability only if the VFIO device
// supports MSI.
let msg_ctl = self.parse_msi_capabilities(cap_iter);
self.initialize_msi(msg_ctl, cap_iter as u32, None);
}
}
}
PciCapabilityId::MsiX => {
if let Some(irq_info) = self.vfio_wrapper.get_irq_info(VFIO_PCI_MSIX_IRQ_INDEX)
&& irq_info.count > 0
{
if irq_info.count > 0 {
// Parse capability only if the VFIO device
// supports MSI-X.
let msix_cap = self.parse_msix_capabilities(cap_iter);
self.initialize_msix(msix_cap, cap_iter as u32, bdf, None);
}
}
}
PciCapabilityId::PciExpress => pci_express_cap_found = true,
PciCapabilityId::PowerManagement => power_management_cap_found = true,
_ => {}
@ -1038,8 +1035,9 @@ impl VfioCommon {
}
pub(crate) fn enable_intx(&mut self) -> Result<(), VfioPciError> {
if let Some(intx) = &mut self.interrupt.intx {
if !intx.enabled {
if let Some(intx) = &mut self.interrupt.intx
&& !intx.enabled
{
if let Some(eventfd) = intx.interrupt_source_group.notifier(0) {
self.vfio_wrapper
.enable_irq(VFIO_PCI_INTX_IRQ_INDEX, vec![&eventfd])
@ -1050,14 +1048,14 @@ impl VfioCommon {
return Err(VfioPciError::MissingNotifier);
}
}
}
Ok(())
}
pub(crate) fn disable_intx(&mut self) {
if let Some(intx) = &mut self.interrupt.intx {
if intx.enabled {
if let Some(intx) = &mut self.interrupt.intx
&& intx.enabled
{
if let Err(e) = self.vfio_wrapper.disable_irq(VFIO_PCI_INTX_IRQ_INDEX) {
error!("Could not disable INTx: {}", e);
} else {
@ -1065,7 +1063,6 @@ impl VfioCommon {
}
}
}
}
pub(crate) fn enable_msi(&self) -> Result<(), VfioPciError> {
if let Some(msi) = &self.interrupt.msi {
@ -1118,13 +1115,13 @@ impl VfioCommon {
}
fn initialize_legacy_interrupt(&mut self) -> Result<(), VfioPciError> {
if let Some(irq_info) = self.vfio_wrapper.get_irq_info(VFIO_PCI_INTX_IRQ_INDEX) {
if irq_info.count == 0 {
if let Some(irq_info) = self.vfio_wrapper.get_irq_info(VFIO_PCI_INTX_IRQ_INDEX)
&& irq_info.count == 0
{
// A count of 0 means the INTx IRQ is not supported, therefore
// it shouldn't be initialized.
return Ok(());
}
}
if let Some(interrupt_source_group) = self.legacy_interrupt_group.clone() {
self.interrupt.intx = Some(VfioIntx {
@ -1200,12 +1197,12 @@ impl VfioCommon {
// INTx EOI
// The guest reading from the BAR potentially means the interrupt has
// been received and can be acknowledged.
if self.interrupt.intx_in_use() {
if let Err(e) = self.vfio_wrapper.unmask_irq(VFIO_PCI_INTX_IRQ_INDEX) {
if self.interrupt.intx_in_use()
&& let Err(e) = self.vfio_wrapper.unmask_irq(VFIO_PCI_INTX_IRQ_INDEX)
{
error!("Failed unmasking INTx IRQ: {}", e);
}
}
}
pub(crate) fn write_bar(
&mut self,
@ -1228,11 +1225,11 @@ impl VfioCommon {
// INTx EOI
// The guest writing to the BAR potentially means the interrupt has
// been received and can be acknowledged.
if self.interrupt.intx_in_use() {
if let Err(e) = self.vfio_wrapper.unmask_irq(VFIO_PCI_INTX_IRQ_INDEX) {
if self.interrupt.intx_in_use()
&& let Err(e) = self.vfio_wrapper.unmask_irq(VFIO_PCI_INTX_IRQ_INDEX)
{
error!("Failed unmasking INTx IRQ: {}", e);
}
}
None
}
@ -1619,13 +1616,12 @@ impl VfioPciDevice {
// Don't try to mmap the region if it contains MSI-X table or
// MSI-X PBA subregion, and if we couldn't find MSIX_MAPPABLE
// in the list of supported capabilities.
if let Some(msix) = self.common.interrupt.msix.as_ref() {
if (region.index == msix.cap.table_bir() || region.index == msix.cap.pba_bir())
if let Some(msix) = self.common.interrupt.msix.as_ref()
&& (region.index == msix.cap.table_bir() || region.index == msix.cap.pba_bir())
&& !caps.contains(&VfioRegionInfoCap::MsixMappable)
{
continue;
}
}
let mmap_size = self.device.get_region_size(region.index);
let mmap_offset = self.device.get_region_offset(region.index);
@ -1713,8 +1709,8 @@ impl VfioPciDevice {
for region in self.common.mmio_regions.iter() {
for user_memory_region in region.user_memory_regions.iter() {
// Unmap from vfio container
if !self.iommu_attached {
if let Err(e) = self
if !self.iommu_attached
&& let Err(e) = self
.container
.vfio_dma_unmap(user_memory_region.start, user_memory_region.size)
.map_err(|e| VfioPciError::DmaUnmap(e, self.device_path.clone(), self.bdf))
@ -1725,7 +1721,6 @@ impl VfioPciDevice {
user_memory_region.start, user_memory_region.size, e
);
}
}
// Remove region
let r = self.vm.make_user_memory_region(
@ -1791,17 +1786,17 @@ impl Drop for VfioPciDevice {
fn drop(&mut self) {
self.unmap_mmio_regions();
if let Some(msix) = &self.common.interrupt.msix {
if msix.bar.enabled() {
if let Some(msix) = &self.common.interrupt.msix
&& msix.bar.enabled()
{
self.common.disable_msix();
}
}
if let Some(msi) = &self.common.interrupt.msi {
if msi.cfg.enabled() {
if let Some(msi) = &self.common.interrupt.msi
&& msi.cfg.enabled()
{
self.common.disable_msi()
}
}
if self.common.interrupt.intx_in_use() {
self.common.disable_intx();
@ -1898,8 +1893,8 @@ impl PciDevice for VfioPciDevice {
for user_memory_region in region.user_memory_regions.iter_mut() {
// Unmap the old MMIO region from vfio container
if !self.iommu_attached {
if let Err(e) = self
if !self.iommu_attached
&& let Err(e) = self
.container
.vfio_dma_unmap(user_memory_region.start, user_memory_region.size)
.map_err(|e| {
@ -1912,7 +1907,6 @@ impl PciDevice for VfioPciDevice {
user_memory_region.start, user_memory_region.size, e
);
}
}
// Remove old region
let old_mem_region = self.vm.make_user_memory_region(

View file

@ -505,17 +505,17 @@ impl Drop for VfioUserPciDevice {
fn drop(&mut self) {
self.unmap_mmio_regions();
if let Some(msix) = &self.common.interrupt.msix {
if msix.bar.enabled() {
if let Some(msix) = &self.common.interrupt.msix
&& msix.bar.enabled()
{
self.common.disable_msix();
}
}
if let Some(msi) = &self.common.interrupt.msi {
if msi.cfg.enabled() {
if let Some(msi) = &self.common.interrupt.msi
&& msi.cfg.enabled()
{
self.common.disable_msi()
}
}
if self.common.interrupt.intx_in_use() {
self.common.disable_intx();

View file

@ -288,12 +288,12 @@ impl Drop for RateLimiterGroup {
fn drop(&mut self) {
self.kill_evt.write(1).unwrap();
if let Some(t) = self.epoll_thread.take() {
if let Err(e) = t.join() {
if let Some(t) = self.epoll_thread.take()
&& let Err(e) = t.join()
{
error!("Error joining thread: {:?}", e);
}
}
}
}
#[cfg(test)]

View file

@ -754,11 +754,11 @@ fn start_vmm(cmd_arguments: ArgMatches) -> Result<Option<String>, Error> {
Ok(())
})();
if r.is_err() {
if let Err(e) = exit_evt.write(1) {
if r.is_err()
&& let Err(e) = exit_evt.write(1)
{
warn!("writing to exit EventFd: {e}");
}
}
if landlock_enable {
Landlock::new()

View file

@ -481,13 +481,14 @@ pub fn rate_limited_copy<P: AsRef<Path>, Q: AsRef<Path>>(from: P, to: Q) -> io::
match fs::copy(&from, &to) {
Err(e) => {
if let Some(errno) = e.raw_os_error() {
if errno == libc::ENOSPC {
if let Some(errno) = e.raw_os_error()
&& errno == libc::ENOSPC
{
eprintln!("Copy returned ENOSPC. Attempt {i} of 10. Sleeping.");
thread::sleep(std::time::Duration::new(60, 0));
continue;
}
}
return Err(e);
}
Ok(i) => return Ok(i),
@ -1094,14 +1095,13 @@ impl Guest {
let vendors: Vec<&str> = vendors.split('\n').collect();
for (index, d_id) in devices.iter().enumerate() {
if *d_id == device_id {
if let Some(v_id) = vendors.get(index) {
if *v_id == vendor_id {
if *d_id == device_id
&& let Some(v_id) = vendors.get(index)
&& *v_id == vendor_id
{
return Ok(true);
}
}
}
}
Ok(false)
}

View file

@ -454,12 +454,11 @@ impl EpollHelperHandler for ConsoleEpollHandler {
}
if self.endpoint.is_pty() {
self.file_event_registered = false;
if event.events & libc::EPOLLHUP as u32 != 0 {
if let Some(pty_write_out) = &self.write_out {
if pty_write_out.load(Ordering::Acquire) {
if event.events & libc::EPOLLHUP as u32 != 0
&& let Some(pty_write_out) = &self.write_out
&& pty_write_out.load(Ordering::Acquire)
{
pty_write_out.store(false, Ordering::Release);
}
}
} else {
// If the EPOLLHUP flag is not up on the associated event, we
// can assume the other end of the PTY is connected and therefore
@ -731,11 +730,11 @@ impl VirtioDevice for Console {
.acked_features
.store(self.common.acked_features, Ordering::Relaxed);
if self.common.feature_acked(VIRTIO_CONSOLE_F_SIZE) {
if let Err(e) = interrupt_cb.trigger(VirtioInterruptType::Config) {
if self.common.feature_acked(VIRTIO_CONSOLE_F_SIZE)
&& let Err(e) = interrupt_cb.trigger(VirtioInterruptType::Config)
{
error!("Failed to signal console driver: {:?}", e);
}
}
let (kill_evt, pause_evt) = self.common.dup_eventfds();

View file

@ -421,15 +421,14 @@ impl Request {
// If any other mappings exist in the domain for other containers,
// make sure to issue these mappings for the new endpoint/container
if let Some(domain_mappings) = &mapping.domains.read().unwrap().get(&domain_id)
&& let Some(ext_map) = ext_mapping.get(&endpoint)
{
if let Some(ext_map) = ext_mapping.get(&endpoint) {
for (virt_start, addr_map) in &domain_mappings.mappings {
ext_map
.map(*virt_start, addr_map.gpa, addr_map.size)
.map_err(Error::ExternalUnmapping)?;
}
}
}
// Add new domain with no mapping if the entry didn't exist yet
let mut domains = mapping.domains.write().unwrap();
@ -654,15 +653,15 @@ fn detach_endpoint_from_domain(
mapping.endpoints.write().unwrap().remove(&endpoint);
// Trigger external unmapping for the endpoint if necessary.
if let Some(domain_mappings) = &mapping.domains.read().unwrap().get(&domain_id) {
if let Some(ext_map) = ext_mapping.get(&endpoint) {
if let Some(domain_mappings) = &mapping.domains.read().unwrap().get(&domain_id)
&& let Some(ext_map) = ext_mapping.get(&endpoint)
{
for (virt_start, addr_map) in &domain_mappings.mappings {
ext_map
.unmap(*virt_start, addr_map.size)
.map_err(Error::ExternalUnmapping)?;
}
}
}
if mapping
.endpoints

View file

@ -475,12 +475,10 @@ impl MemEpollHandler {
return VIRTIO_MEM_RESP_ERROR;
}
if !plug {
if let Err(e) = self.discard_memory_range(offset, size) {
if !plug && let Err(e) = self.discard_memory_range(offset, size) {
error!("failed discarding memory range: {:?}", e);
return VIRTIO_MEM_RESP_ERROR;
}
}
self.blocks_state
.lock()

View file

@ -667,12 +667,12 @@ impl Drop for Net {
}
// Needed to ensure all references to tap FDs are dropped (#4868)
self.common.wait_for_epoll_threads();
if let Some(thread) = self.ctrl_queue_epoll_thread.take() {
if let Err(e) = thread.join() {
if let Some(thread) = self.ctrl_queue_epoll_thread.take()
&& let Err(e) = thread.join()
{
error!("Error joining thread: {:?}", e);
}
}
}
}
impl VirtioDevice for Net {

View file

@ -36,13 +36,13 @@ where
thread::Builder::new()
.name(name.to_string())
.spawn(move || {
if !seccomp_filter.is_empty() {
if let Err(e) = apply_filter(&seccomp_filter) {
if !seccomp_filter.is_empty()
&& let Err(e) = apply_filter(&seccomp_filter)
{
error!("Error applying seccomp filter: {:?}", e);
thread_exit_evt.write(1).ok();
return;
}
}
match std::panic::catch_unwind(AssertUnwindSafe(f)) {
Err(_) => {
error!("{} thread panicked", thread_name);

View file

@ -261,19 +261,12 @@ impl VirtioPciCommonConfig {
let ready = value == 1;
q.set_ready(ready);
// Translate address of descriptor table and vrings.
if let Some(access_platform) = &self.access_platform {
if ready {
if ready && let Some(access_platform) = &self.access_platform {
let desc_table = access_platform
.translate_gva(
q.desc_table(),
get_vring_size(VringType::Desc, q.size()),
)
.translate_gva(q.desc_table(), get_vring_size(VringType::Desc, q.size()))
.unwrap();
let avail_ring = access_platform
.translate_gva(
q.avail_ring(),
get_vring_size(VringType::Avail, q.size()),
)
.translate_gva(q.avail_ring(), get_vring_size(VringType::Avail, q.size()))
.unwrap();
let used_ring = access_platform
.translate_gva(q.used_ring(), get_vring_size(VringType::Used, q.size()))
@ -291,7 +284,6 @@ impl VirtioPciCommonConfig {
Some((used_ring >> 32) as u32),
);
}
}
}),
_ => {
warn!("invalid virtio register word write: 0x{:x}", offset);

View file

@ -968,8 +968,8 @@ impl PciDevice for VirtioPciDevice {
if let Resource::PciBar {
index, base, type_, ..
} = resource
&& index == VIRTIO_COMMON_BAR_INDEX
{
if index == VIRTIO_COMMON_BAR_INDEX {
settings_bar_addr = Some(GuestAddress(base));
use_64bit_bar = match type_ {
PciBarType::Io => {
@ -981,7 +981,6 @@ impl PciDevice for VirtioPciDevice {
break;
}
}
}
// Error out if no resource was matching the BAR id.
if settings_bar_addr.is_none() {
return Err(PciDeviceError::MissingResource);

View file

@ -213,18 +213,18 @@ impl Blk {
impl Drop for Blk {
fn drop(&mut self) {
if let Some(kill_evt) = self.common.kill_evt.take() {
if let Err(e) = kill_evt.write(1) {
if let Some(kill_evt) = self.common.kill_evt.take()
&& let Err(e) = kill_evt.write(1)
{
error!("failed to kill vhost-user-blk: {:?}", e);
}
}
self.common.wait_for_epoll_threads();
if let Some(thread) = self.epoll_thread.take() {
if let Err(e) = thread.join() {
if let Some(thread) = self.epoll_thread.take()
&& let Err(e) = thread.join()
{
error!("Error joining thread: {:?}", e);
}
}
}
}
impl VirtioDevice for Blk {
@ -267,8 +267,8 @@ impl VirtioDevice for Blk {
}
self.config.writeback = data[0];
if let Some(vu) = &self.vu_common.vu {
if let Err(e) = vu
if let Some(vu) = &self.vu_common.vu
&& let Err(e) = vu
.lock()
.unwrap()
.socket_handle()
@ -278,7 +278,6 @@ impl VirtioDevice for Blk {
error!("Failed setting vhost-user-blk configuration: {:?}", e);
}
}
}
fn activate(
&mut self,
@ -329,12 +328,12 @@ impl VirtioDevice for Blk {
self.common.resume().ok()?;
}
if let Some(vu) = &self.vu_common.vu {
if let Err(e) = vu.lock().unwrap().reset_vhost_user() {
if let Some(vu) = &self.vu_common.vu
&& let Err(e) = vu.lock().unwrap().reset_vhost_user()
{
error!("Failed to reset vhost-user daemon: {:?}", e);
return None;
}
}
if let Some(kill_evt) = self.common.kill_evt.take() {
// Ignore the result because there is nothing we can do about it.

View file

@ -227,12 +227,12 @@ impl Drop for Fs {
let _ = kill_evt.write(1);
}
self.common.wait_for_epoll_threads();
if let Some(thread) = self.epoll_thread.take() {
if let Err(e) = thread.join() {
if let Some(thread) = self.epoll_thread.take()
&& let Err(e) = thread.join()
{
error!("Error joining thread: {:?}", e);
}
}
}
}
impl VirtioDevice for Fs {
@ -308,12 +308,12 @@ impl VirtioDevice for Fs {
self.common.resume().ok()?;
}
if let Some(vu) = &self.vu_common.vu {
if let Err(e) = vu.lock().unwrap().reset_vhost_user() {
if let Some(vu) = &self.vu_common.vu
&& let Err(e) = vu.lock().unwrap().reset_vhost_user()
{
error!("Failed to reset vhost-user daemon: {:?}", e);
return None;
}
}
if let Some(kill_evt) = self.common.kill_evt.take() {
// Ignore the result because there is nothing we can do about it.

View file

@ -243,25 +243,26 @@ impl Net {
impl Drop for Net {
fn drop(&mut self) {
if let Some(kill_evt) = self.common.kill_evt.take() {
if let Err(e) = kill_evt.write(1) {
if let Some(kill_evt) = self.common.kill_evt.take()
&& let Err(e) = kill_evt.write(1)
{
error!("failed to kill vhost-user-net: {:?}", e);
}
}
self.common.wait_for_epoll_threads();
if let Some(thread) = self.epoll_thread.take() {
if let Err(e) = thread.join() {
if let Some(thread) = self.epoll_thread.take()
&& let Err(e) = thread.join()
{
error!("Error joining thread: {:?}", e);
}
}
if let Some(thread) = self.ctrl_queue_epoll_thread.take() {
if let Err(e) = thread.join() {
if let Some(thread) = self.ctrl_queue_epoll_thread.take()
&& let Err(e) = thread.join()
{
error!("Error joining thread: {:?}", e);
}
}
}
}
impl VirtioDevice for Net {
@ -382,12 +383,12 @@ impl VirtioDevice for Net {
self.common.resume().ok()?;
}
if let Some(vu) = &self.vu_common.vu {
if let Err(e) = vu.lock().unwrap().reset_vhost_user() {
if let Some(vu) = &self.vu_common.vu
&& let Err(e) = vu.lock().unwrap().reset_vhost_user()
{
error!("Failed to reset vhost-user daemon: {:?}", e);
return None;
}
}
if let Some(kill_evt) = self.common.kill_evt.take() {
// Ignore the result because there is nothing we can do about it.

View file

@ -317,8 +317,8 @@ impl VhostUserHandle {
.get_features()
.map_err(Error::VhostUserGetFeatures)?;
if acked_features & VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits() != 0 {
if let Some(acked_protocol_features) =
if acked_features & VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits() != 0
&& let Some(acked_protocol_features) =
VhostUserProtocolFeatures::from_bits(acked_protocol_features)
{
self.vu
@ -329,7 +329,6 @@ impl VhostUserHandle {
self.vu.set_hdr_flags(VhostUserHeaderFlag::NEED_REPLY);
}
}
}
self.update_supports_migration(acked_features, acked_protocol_features);

View file

@ -437,11 +437,11 @@ impl VsockMuxer {
if let Some(EpollListener::LocalStream(stream)) = self.listener_map.get_mut(&fd) {
let port = Self::read_local_stream_port(&mut self.partial_command_map, stream);
if let Err(Error::UnixRead(ref e)) = port {
if e.kind() == ErrorKind::WouldBlock {
if let Err(Error::UnixRead(ref e)) = port
&& e.kind() == ErrorKind::WouldBlock
{
return;
}
}
let stream = match self.remove_listener(fd) {
Some(EpollListener::LocalStream(s)) => s,

View file

@ -111,11 +111,12 @@ impl MuxerKillQ {
/// the queue has expired. Otherwise, `None` is returned.
///
pub fn pop(&mut self) -> Option<ConnMapKey> {
if let Some(item) = self.q.front() {
if Instant::now() > item.kill_time {
if let Some(item) = self.q.front()
&& Instant::now() > item.kill_time
{
return Some(self.q.pop_front().unwrap().key);
}
}
None
}

View file

@ -196,12 +196,12 @@ impl AddressAllocator {
/// Free an already allocated address range.
/// We can only free a range if it matches exactly an already allocated range.
pub fn free(&mut self, address: GuestAddress, size: GuestUsize) {
if let Some(&range_size) = self.ranges.get(&address) {
if size == range_size {
if let Some(&range_size) = self.ranges.get(&address)
&& size == range_size
{
self.ranges.remove(&address);
}
}
}
/// Start address of the allocator
pub fn base(&self) -> GuestAddress {

View file

@ -1248,26 +1248,27 @@ impl DiskConfig {
return Err(ValidationError::InvalidPciSegment(self.pci_segment));
}
if let Some(iommu_segments) = platform_config.iommu_segments.as_ref() {
if iommu_segments.contains(&self.pci_segment) && !self.iommu {
if let Some(iommu_segments) = platform_config.iommu_segments.as_ref()
&& iommu_segments.contains(&self.pci_segment)
&& !self.iommu
{
return Err(ValidationError::OnIommuSegment(self.pci_segment));
}
}
}
if self.rate_limiter_config.is_some() && self.rate_limit_group.is_some() {
return Err(ValidationError::InvalidRateLimiterGroup);
}
// Check Block device serial length
if let Some(ref serial) = self.serial {
if serial.len() > VIRTIO_BLK_ID_BYTES as usize {
if let Some(ref serial) = self.serial
&& serial.len() > VIRTIO_BLK_ID_BYTES as usize
{
return Err(ValidationError::InvalidSerialLength(
serial.len(),
VIRTIO_BLK_ID_BYTES as usize,
));
}
}
Ok(())
}
@ -1496,18 +1497,19 @@ impl NetConfig {
return Err(ValidationError::InvalidPciSegment(self.pci_segment));
}
if let Some(iommu_segments) = platform_config.iommu_segments.as_ref() {
if iommu_segments.contains(&self.pci_segment) && !self.iommu {
if let Some(iommu_segments) = platform_config.iommu_segments.as_ref()
&& iommu_segments.contains(&self.pci_segment)
&& !self.iommu
{
return Err(ValidationError::OnIommuSegment(self.pci_segment));
}
}
}
if let Some(mtu) = self.mtu {
if mtu < virtio_devices::net::MIN_MTU {
if let Some(mtu) = self.mtu
&& mtu < virtio_devices::net::MIN_MTU
{
return Err(ValidationError::InvalidMtu(mtu));
}
}
if !self.offload_csum && (self.offload_tso || self.offload_ufo) {
return Err(ValidationError::NoHardwareChecksumOffload);
@ -1633,14 +1635,14 @@ impl FsConfig {
return Err(ValidationError::InvalidPciSegment(self.pci_segment));
}
if let Some(iommu_segments) = platform_config.iommu_segments.as_ref() {
if iommu_segments.contains(&self.pci_segment) {
if let Some(iommu_segments) = platform_config.iommu_segments.as_ref()
&& iommu_segments.contains(&self.pci_segment)
{
return Err(ValidationError::IommuNotSupportedOnSegment(
self.pci_segment,
));
}
}
}
Ok(())
}
@ -1795,12 +1797,13 @@ impl PmemConfig {
return Err(ValidationError::InvalidPciSegment(self.pci_segment));
}
if let Some(iommu_segments) = platform_config.iommu_segments.as_ref() {
if iommu_segments.contains(&self.pci_segment) && !self.iommu {
if let Some(iommu_segments) = platform_config.iommu_segments.as_ref()
&& iommu_segments.contains(&self.pci_segment)
&& !self.iommu
{
return Err(ValidationError::OnIommuSegment(self.pci_segment));
}
}
}
Ok(())
}
@ -1895,18 +1898,19 @@ impl DebugConsoleConfig {
return Err(Error::ParseConsoleInvalidModeGiven);
}
if parser.is_set("iobase") {
if let Some(iobase_opt) = parser.get("iobase") {
if parser.is_set("iobase")
&& let Some(iobase_opt) = parser.get("iobase")
{
if !iobase_opt.starts_with("0x") {
return Err(Error::Validation(ValidationError::InvalidIoPortHex(
iobase_opt,
)));
}
iobase = Some(u16::from_str_radix(&iobase_opt[2..], 16).map_err(|_| {
iobase =
Some(u16::from_str_radix(&iobase_opt[2..], 16).map_err(|_| {
Error::Validation(ValidationError::InvalidIoPortHex(iobase_opt))
})?);
}
}
Ok(Self { file, mode, iobase })
}
@ -1957,12 +1961,13 @@ impl DeviceConfig {
return Err(ValidationError::InvalidPciSegment(self.pci_segment));
}
if let Some(iommu_segments) = platform_config.iommu_segments.as_ref() {
if iommu_segments.contains(&self.pci_segment) && !self.iommu {
if let Some(iommu_segments) = platform_config.iommu_segments.as_ref()
&& iommu_segments.contains(&self.pci_segment)
&& !self.iommu
{
return Err(ValidationError::OnIommuSegment(self.pci_segment));
}
}
}
Ok(())
}
@ -2000,14 +2005,14 @@ impl UserDeviceConfig {
return Err(ValidationError::InvalidPciSegment(self.pci_segment));
}
if let Some(iommu_segments) = platform_config.iommu_segments.as_ref() {
if iommu_segments.contains(&self.pci_segment) {
if let Some(iommu_segments) = platform_config.iommu_segments.as_ref()
&& iommu_segments.contains(&self.pci_segment)
{
return Err(ValidationError::IommuNotSupportedOnSegment(
self.pci_segment,
));
}
}
}
Ok(())
}
@ -2062,12 +2067,13 @@ impl VdpaConfig {
return Err(ValidationError::InvalidPciSegment(self.pci_segment));
}
if let Some(iommu_segments) = platform_config.iommu_segments.as_ref() {
if iommu_segments.contains(&self.pci_segment) && !self.iommu {
if let Some(iommu_segments) = platform_config.iommu_segments.as_ref()
&& iommu_segments.contains(&self.pci_segment)
&& !self.iommu
{
return Err(ValidationError::OnIommuSegment(self.pci_segment));
}
}
}
Ok(())
}
@ -2121,12 +2127,13 @@ impl VsockConfig {
return Err(ValidationError::InvalidPciSegment(self.pci_segment));
}
if let Some(iommu_segments) = platform_config.iommu_segments.as_ref() {
if iommu_segments.contains(&self.pci_segment) && !self.iommu {
if let Some(iommu_segments) = platform_config.iommu_segments.as_ref()
&& iommu_segments.contains(&self.pci_segment)
&& !self.iommu
{
return Err(ValidationError::OnIommuSegment(self.pci_segment));
}
}
}
Ok(())
}
@ -2493,12 +2500,12 @@ impl VmConfig {
{
let host_data_opt = &self.payload.as_ref().unwrap().host_data;
if let Some(host_data) = host_data_opt {
if host_data.len() != 64 {
if let Some(host_data) = host_data_opt
&& host_data.len() != 64
{
return Err(ValidationError::InvalidHostData);
}
}
}
// The 'conflict' check is introduced in commit 24438e0390d3
// (vm-virtio: Enable the vmm support for virtio-console).
//
@ -2675,11 +2682,11 @@ impl VmConfig {
}
}
if let Some(vsock) = &self.vsock {
if [!0, 0, 1, 2].contains(&vsock.cid) {
if let Some(vsock) = &self.vsock
&& [!0, 0, 1, 2].contains(&vsock.cid)
{
return Err(ValidationError::VsockSpecialCid(vsock.cid));
}
}
if let Some(balloon) = &self.balloon {
let mut ram_size = self.memory.size;
@ -3080,12 +3087,12 @@ impl VmConfig {
}
// Remove if vsock device
if let Some(vsock) = self.vsock.as_ref() {
if vsock.id.as_ref().map(|id| id.as_ref()) == Some(id) {
if let Some(vsock) = self.vsock.as_ref()
&& vsock.id.as_ref().map(|id| id.as_ref()) == Some(id)
{
self.vsock = None;
removed = true;
}
}
removed
}

View file

@ -601,11 +601,11 @@ impl BusDevice for CpuManager {
state.removing = false;
}
// Trigger removal of vCPU
if data[0] & (1 << CPU_EJECT_FLAG) == 1 << CPU_EJECT_FLAG {
if let Err(e) = self.remove_vcpu(self.selected_cpu as u32) {
if data[0] & (1 << CPU_EJECT_FLAG) == 1 << CPU_EJECT_FLAG
&& let Err(e) = self.remove_vcpu(self.selected_cpu as u32)
{
error!("Error removing vCPU: {:?}", e);
}
}
} else {
warn!("Out of range vCPU id: {}", self.selected_cpu);
}
@ -1059,14 +1059,13 @@ impl CpuManager {
}
// Apply seccomp filter for vcpu thread.
if !vcpu_seccomp_filter.is_empty() {
if let Err(e) =
if !vcpu_seccomp_filter.is_empty() && let Err(e) =
apply_filter(&vcpu_seccomp_filter).map_err(Error::ApplySeccompFilter)
{
error!("Error applying seccomp filter: {:?}", e);
return;
}
}
extern "C" fn handle_signal(_: i32, _: *mut siginfo_t, _: *mut c_void) {}
// This uses an async signal safe handler to kill the vcpu handles.
register_signal_handler(SIGRTMIN(), handle_signal)

View file

@ -772,14 +772,15 @@ impl DeviceRelocation for AddressManager {
if let Some(node) = self.device_tree.lock().unwrap().get_mut(&id) {
let mut resource_updated = false;
for resource in node.resources.iter_mut() {
if let Resource::PciBar { base, type_, .. } = resource {
if PciBarRegionType::from(*type_) == region_type && *base == old_base {
if let Resource::PciBar { base, type_, .. } = resource
&& PciBarRegionType::from(*type_) == region_type
&& *base == old_base
{
*base = new_base;
resource_updated = true;
break;
}
}
}
if !resource_updated {
return Err(io::Error::other(format!(
@ -814,8 +815,9 @@ impl DeviceRelocation for AddressManager {
} else {
let virtio_dev = virtio_pci_dev.virtio_device();
let mut virtio_dev = virtio_dev.lock().unwrap();
if let Some(mut shm_regions) = virtio_dev.get_shm_regions() {
if shm_regions.addr.raw_value() == old_base {
if let Some(mut shm_regions) = virtio_dev.get_shm_regions()
&& shm_regions.addr.raw_value() == old_base
{
let mem_region = self.vm.make_user_memory_region(
shm_regions.mem_slot,
old_base,
@ -846,14 +848,11 @@ impl DeviceRelocation for AddressManager {
// Update shared memory regions to reflect the new mapping.
shm_regions.addr = GuestAddress(new_base);
virtio_dev.set_shm_regions(shm_regions).map_err(|e| {
io::Error::other(format!(
"failed to update shared memory regions: {e:?}"
))
io::Error::other(format!("failed to update shared memory regions: {e:?}"))
})?;
}
}
}
}
pci_dev.move_bar(old_base, new_base)
}
@ -1655,8 +1654,9 @@ impl DeviceManager {
iommu_attached_devices.append(&mut vfio_user_iommu_device_ids);
// Add all devices from forced iommu segments
if let Some(platform_config) = self.config.lock().unwrap().platform.as_ref() {
if let Some(iommu_segments) = platform_config.iommu_segments.as_ref() {
if let Some(platform_config) = self.config.lock().unwrap().platform.as_ref()
&& let Some(iommu_segments) = platform_config.iommu_segments.as_ref()
{
for segment in iommu_segments {
for device in 0..32 {
let bdf = PciBdf::new(*segment, 0, device, 0);
@ -1666,7 +1666,6 @@ impl DeviceManager {
}
}
}
}
if let Some(iommu_device) = iommu_device {
let dev_id = self.add_virtio_pci_device(iommu_device, &None, iommu_id, 0, None)?;
@ -4350,8 +4349,9 @@ impl DeviceManager {
.add_memory_region(new_region)
.map_err(DeviceManagerError::UpdateMemoryForVirtioDevice)?;
if let Some(dma_handler) = &handle.dma_handler {
if !handle.iommu {
if let Some(dma_handler) = &handle.dma_handler
&& !handle.iommu
{
let gpa = new_region.start_addr().0;
let size = new_region.len();
dma_handler
@ -4359,7 +4359,6 @@ impl DeviceManager {
.map_err(DeviceManagerError::VirtioDmaMap)?;
}
}
}
// Take care of updating the memory for VFIO PCI devices.
if let Some(vfio_container) = &self.vfio_container {
@ -4576,11 +4575,11 @@ impl DeviceManager {
};
let mut iommu_attached = false;
if let Some((_, iommu_attached_devices)) = &self.iommu_attached_devices {
if iommu_attached_devices.contains(&pci_device_bdf) {
if let Some((_, iommu_attached_devices)) = &self.iommu_attached_devices
&& iommu_attached_devices.contains(&pci_device_bdf)
{
iommu_attached = true;
}
}
let (pci_device, bus_device, virtio_device, remove_dma_handler) = match pci_device_handle {
// No need to remove any virtio-mem mapping here as the container outlives all devices
@ -4610,8 +4609,9 @@ impl DeviceManager {
.map_err(|e| DeviceManagerError::UnRegisterIoevent(e.into()))?;
}
if let Some(dma_handler) = dev.dma_handler() {
if !iommu_attached {
if let Some(dma_handler) = dev.dma_handler()
&& !iommu_attached
{
for (_, zone) in self.memory_manager.lock().unwrap().memory_zones().iter() {
for region in zone.regions() {
let iova = region.start_addr().0;
@ -4622,7 +4622,6 @@ impl DeviceManager {
}
}
}
}
(
Arc::clone(&virtio_pci_device) as Arc<Mutex<dyn PciDevice>>,

View file

@ -428,12 +428,12 @@ pub fn load_igvm(
let gpas_grouped = gpas
.iter()
.fold(Vec::<Vec<GpaPages>>::new(), |mut acc, gpa| {
if let Some(last_vec) = acc.last_mut() {
if last_vec[0].page_type == gpa.page_type {
if let Some(last_vec) = acc.last_mut()
&& last_vec[0].page_type == gpa.page_type
{
last_vec.push(*gpa);
return acc;
}
}
acc.push(vec![*gpa]);
acc
});

View file

@ -729,15 +729,14 @@ impl Vmm {
thread::Builder::new()
.name("vmm_signal_handler".to_string())
.spawn(move || {
if !signal_handler_seccomp_filter.is_empty() {
if let Err(e) = apply_filter(&signal_handler_seccomp_filter)
if !signal_handler_seccomp_filter.is_empty() && let Err(e) = apply_filter(&signal_handler_seccomp_filter)
.map_err(Error::ApplySeccompFilter)
{
error!("Error applying seccomp filter: {:?}", e);
exit_evt.write(1).ok();
return;
}
}
if landlock_enable{
match Landlock::new() {
Ok(landlock) => {
@ -1834,11 +1833,11 @@ impl RequestHandler for Vmm {
if let Some(desired_ram) = desired_ram {
config.memory.size = desired_ram;
}
if let Some(desired_balloon) = desired_balloon {
if let Some(balloon_config) = &mut config.balloon {
if let Some(desired_balloon) = desired_balloon
&& let Some(balloon_config) = &mut config.balloon
{
balloon_config.size = desired_balloon;
}
}
Ok(())
}
}
@ -2306,17 +2305,17 @@ impl RequestHandler for Vmm {
error!("Migration failed: {:?}", migration_err);
// Stop logging dirty pages only for non-local migrations
if !send_data_migration.local {
if let Err(e) = vm.stop_dirty_log() {
if !send_data_migration.local
&& let Err(e) = vm.stop_dirty_log()
{
return e;
}
}
if vm.get_state().unwrap() == VmState::Paused {
if let Err(e) = vm.resume() {
if vm.get_state().unwrap() == VmState::Paused
&& let Err(e) = vm.resume()
{
return e;
}
}
migration_err
})?;

View file

@ -1959,9 +1959,9 @@ impl MemoryManager {
}
for region in memory_zone.regions() {
if snapshot {
if let Some(file_offset) = region.file_offset() {
if (region.flags() & libc::MAP_SHARED == libc::MAP_SHARED)
if snapshot
&& let Some(file_offset) = region.file_offset()
&& (region.flags() & libc::MAP_SHARED == libc::MAP_SHARED)
&& Self::is_hardlink(file_offset.file())
{
// In this very specific case, we know the memory
@ -1975,8 +1975,6 @@ impl MemoryManager {
// the backing file already.
continue;
}
}
}
table.push(MemoryRange {
gpa: region.start_addr().raw_value(),

View file

@ -432,12 +432,12 @@ impl Drop for SerialManager {
if let Some(handle) = self.handle.take() {
handle.join().ok();
}
if let ConsoleOutput::Socket(_) = self.in_file {
if let Some(socket_path) = self.socket_path.as_ref() {
if let ConsoleOutput::Socket(_) = self.in_file
&& let Some(socket_path) = self.socket_path.as_ref()
{
std::fs::remove_file(socket_path.as_os_str())
.map_err(Error::RemoveUnixSocket)
.ok();
}
}
}
}

View file

@ -3123,14 +3123,14 @@ impl GuestDebuggable for Vm {
#[cfg(feature = "tdx")]
{
if let Some(ref platform) = self.config.lock().unwrap().platform {
if platform.tdx {
if let Some(ref platform) = self.config.lock().unwrap().platform
&& platform.tdx
{
return Err(GuestDebuggableError::Coredump(anyhow!(
"Coredump not possible with TDX VM"
)));
}
}
}
match self.get_state().unwrap() {
VmState::Running => {