misc: Use variables directly in format! string
Fix clippy warning `uninlined_format_args` reported by rustc rustc
1.89.0 (29483883e 2025-08-04).
```console
warning: variables can be used directly in the `format!` string
--> block/src/lib.rs:649:17
|
649 | info!("{} failed to create io_uring instance: {}", error_msg, e);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
= help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#uninlined_format_args
= note: `#[warn(clippy::uninlined_format_args)]` on by default
help: change this to
|
649 - info!("{} failed to create io_uring instance: {}", error_msg, e);
649 + info!("{error_msg} failed to create io_uring instance: {e}");
|
```
Signed-off-by: Ruoqing He <heruoqing@iscas.ac.cn>
This commit is contained in:
parent
ea83fe314c
commit
f2dfa7f6e0
56 changed files with 470 additions and 679 deletions
|
|
@ -829,7 +829,7 @@ pub fn configure_vcpu(
|
|||
}
|
||||
|
||||
for c in &cpuid {
|
||||
debug!("{}", c);
|
||||
debug!("{c}");
|
||||
}
|
||||
|
||||
vcpu.set_cpuid2(&cpuid)
|
||||
|
|
@ -995,17 +995,15 @@ pub fn generate_ram_ranges(guest_mem: &GuestMemoryMmap) -> super::Result<Vec<Ram
|
|||
&& (first_region_end <= &mem_32bit_reserved_start))
|
||||
{
|
||||
error!(
|
||||
"Unexpected first memory region layout: (start: 0x{:08x}, end: 0x{:08x}).
|
||||
high_ram_start: 0x{:08x}, mem_32bit_reserved_start: 0x{:08x}",
|
||||
first_region_start, first_region_end, high_ram_start, mem_32bit_reserved_start
|
||||
"Unexpected first memory region layout: (start: 0x{first_region_start:08x}, end: 0x{first_region_end:08x}).
|
||||
high_ram_start: 0x{high_ram_start:08x}, mem_32bit_reserved_start: 0x{mem_32bit_reserved_start:08x}"
|
||||
);
|
||||
|
||||
return Err(super::Error::MemmapTableSetup);
|
||||
}
|
||||
|
||||
info!(
|
||||
"first usable physical memory range, start: 0x{:08x}, end: 0x{:08x}",
|
||||
high_ram_start, first_region_end
|
||||
"first usable physical memory range, start: 0x{high_ram_start:08x}, end: 0x{first_region_end:08x}"
|
||||
);
|
||||
|
||||
(high_ram_start, *first_region_end)
|
||||
|
|
|
|||
|
|
@ -108,7 +108,7 @@ fn tdvf_descriptor_offset(file: &mut File) -> Result<(SeekFrom, bool), TdvfError
|
|||
u16::from_le_bytes(table[offset - 18..offset - 16].try_into().unwrap()) as usize;
|
||||
debug!(
|
||||
"Entry GUID = {}, size = {}",
|
||||
entry_uuid.hyphenated().to_string(),
|
||||
entry_uuid.hyphenated(),
|
||||
entry_size
|
||||
);
|
||||
|
||||
|
|
|
|||
|
|
@ -293,14 +293,14 @@ impl Request {
|
|||
.next()
|
||||
.ok_or(Error::DescriptorChainTooShort)
|
||||
.inspect_err(|_| {
|
||||
error!("Only head descriptor present: request = {:?}", req);
|
||||
error!("Only head descriptor present: request = {req:?}");
|
||||
})?;
|
||||
|
||||
if !desc.has_next() {
|
||||
status_desc = desc;
|
||||
// Only flush requests are allowed to skip the data descriptor.
|
||||
if req.request_type != RequestType::Flush {
|
||||
error!("Need a data descriptor: request = {:?}", req);
|
||||
error!("Need a data descriptor: request = {req:?}");
|
||||
return Err(Error::DescriptorChainTooShort);
|
||||
}
|
||||
} else {
|
||||
|
|
@ -325,7 +325,7 @@ impl Request {
|
|||
.next()
|
||||
.ok_or(Error::DescriptorChainTooShort)
|
||||
.inspect_err(|_| {
|
||||
error!("DescriptorChain corrupted: request = {:?}", req);
|
||||
error!("DescriptorChain corrupted: request = {req:?}");
|
||||
})?;
|
||||
}
|
||||
status_desc = desc;
|
||||
|
|
|
|||
|
|
@ -51,7 +51,7 @@ impl BusDevice for AcpiShutdownDevice {
|
|||
if data[0] == 1 {
|
||||
info!("ACPI Reboot signalled");
|
||||
if let Err(e) = self.reset_evt.write(1) {
|
||||
error!("Error triggering ACPI reset event: {}", e);
|
||||
error!("Error triggering ACPI reset event: {e}");
|
||||
}
|
||||
// Spin until we are sure the reset_evt has been handled and that when
|
||||
// we return from the KVM_RUN we will exit rather than re-enter the guest.
|
||||
|
|
@ -68,7 +68,7 @@ impl BusDevice for AcpiShutdownDevice {
|
|||
if data[0] == (S5_SLEEP_VALUE << SLEEP_VALUE_BIT) | (1 << SLEEP_STATUS_EN_BIT) {
|
||||
info!("ACPI Shutdown signalled");
|
||||
if let Err(e) = self.exit_evt.write(1) {
|
||||
error!("Error triggering ACPI shutdown event: {}", e);
|
||||
error!("Error triggering ACPI shutdown event: {e}");
|
||||
}
|
||||
// Spin until we are sure the reset_evt has been handled and that when
|
||||
// we return from the KVM_RUN we will exit rather than re-enter the guest.
|
||||
|
|
|
|||
|
|
@ -151,13 +151,13 @@ impl BusDevice for Ioapic {
|
|||
return;
|
||||
}
|
||||
|
||||
debug!("IOAPIC_R @ offset 0x{:x}", offset);
|
||||
debug!("IOAPIC_R @ offset 0x{offset:x}");
|
||||
|
||||
let value: u32 = match offset as u8 {
|
||||
IOREGSEL_OFF => self.reg_sel,
|
||||
IOWIN_OFF => self.ioapic_read(),
|
||||
_ => {
|
||||
error!("IOAPIC: failed reading at offset {}", offset);
|
||||
error!("IOAPIC: failed reading at offset {offset}");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
|
@ -171,7 +171,7 @@ impl BusDevice for Ioapic {
|
|||
return None;
|
||||
}
|
||||
|
||||
debug!("IOAPIC_W @ offset 0x{:x}", offset);
|
||||
debug!("IOAPIC_W @ offset 0x{offset:x}");
|
||||
|
||||
let value = LittleEndian::read_u32(data);
|
||||
|
||||
|
|
@ -179,7 +179,7 @@ impl BusDevice for Ioapic {
|
|||
IOREGSEL_OFF => self.reg_sel = value,
|
||||
IOWIN_OFF => self.ioapic_write(value),
|
||||
_ => {
|
||||
error!("IOAPIC: failed writing at offset {}", offset);
|
||||
error!("IOAPIC: failed writing at offset {offset}");
|
||||
}
|
||||
}
|
||||
None
|
||||
|
|
@ -266,7 +266,7 @@ impl Ioapic {
|
|||
IOWIN_OFF..=REG_MAX_OFFSET => {
|
||||
let (index, is_high_bits) = decode_irq_from_selector(self.reg_sel as u8);
|
||||
if index > NUM_IOAPIC_PINS {
|
||||
warn!("IOAPIC index out of range: {}", index);
|
||||
warn!("IOAPIC index out of range: {index}");
|
||||
return;
|
||||
}
|
||||
if is_high_bits {
|
||||
|
|
@ -282,7 +282,7 @@ impl Ioapic {
|
|||
// The entry must be updated through the interrupt source
|
||||
// group.
|
||||
if let Err(e) = self.update_entry(index, true) {
|
||||
error!("Failed updating IOAPIC entry: {:?}", e);
|
||||
error!("Failed updating IOAPIC entry: {e:?}");
|
||||
}
|
||||
// Store the information this IRQ is now being used.
|
||||
self.used_entries[index] = true;
|
||||
|
|
@ -303,7 +303,7 @@ impl Ioapic {
|
|||
IOWIN_OFF..=REG_MAX_OFFSET => {
|
||||
let (index, is_high_bits) = decode_irq_from_selector(self.reg_sel as u8);
|
||||
if index > NUM_IOAPIC_PINS {
|
||||
warn!("IOAPIC index out of range: {}", index);
|
||||
warn!("IOAPIC index out of range: {index}");
|
||||
return 0;
|
||||
}
|
||||
if is_high_bits {
|
||||
|
|
|
|||
|
|
@ -90,7 +90,7 @@ impl BusDevice for Cmos {
|
|||
self.data[(self.index & INDEX_MASK) as usize] = data[0]
|
||||
}
|
||||
}
|
||||
o => warn!("bad write offset on CMOS device: {}", o),
|
||||
o => warn!("bad write offset on CMOS device: {o}"),
|
||||
};
|
||||
None
|
||||
}
|
||||
|
|
@ -164,7 +164,7 @@ impl BusDevice for Cmos {
|
|||
}
|
||||
}
|
||||
o => {
|
||||
warn!("bad read offset on CMOS device: {}", o);
|
||||
warn!("bad read offset on CMOS device: {o}");
|
||||
0
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -45,7 +45,7 @@ impl BusDevice for I8042Device {
|
|||
if data.len() == 1 && data[0] == 0xfe && offset == 3 {
|
||||
info!("i8042 reset signalled");
|
||||
if let Err(e) = self.reset_evt.write(1) {
|
||||
error!("Error triggering i8042 reset event: {}", e);
|
||||
error!("Error triggering i8042 reset event: {e}");
|
||||
}
|
||||
// Spin until we are sure the reset_evt has been handled and that when
|
||||
// we return from the KVM_RUN we will exit rather than re-enter the guest.
|
||||
|
|
|
|||
|
|
@ -68,8 +68,7 @@ impl PvPanicDevice {
|
|||
let pci_configuration_state =
|
||||
vm_migration::state_from_id(snapshot.as_ref(), PCI_CONFIGURATION_ID).map_err(|e| {
|
||||
PvPanicError::RetrievePciConfigurationState(anyhow!(
|
||||
"Failed to get PciConfigurationState from Snapshot: {}",
|
||||
e
|
||||
"Failed to get PciConfigurationState from Snapshot: {e}"
|
||||
))
|
||||
})?;
|
||||
|
||||
|
|
@ -100,8 +99,7 @@ impl PvPanicDevice {
|
|||
.transpose()
|
||||
.map_err(|e| {
|
||||
PvPanicError::CreatePvPanicDevice(anyhow!(
|
||||
"Failed to get PvPanicDeviceState from Snapshot: {}",
|
||||
e
|
||||
"Failed to get PvPanicDeviceState from Snapshot: {e}"
|
||||
))
|
||||
})?;
|
||||
let events = if let Some(state) = state {
|
||||
|
|
@ -148,7 +146,7 @@ impl BusDevice for PvPanicDevice {
|
|||
|
||||
fn write(&mut self, _base: u64, _offset: u64, data: &[u8]) -> Option<Arc<Barrier>> {
|
||||
let event = self.event_to_string(data[0]);
|
||||
info!("pvpanic got guest event {}", event);
|
||||
info!("pvpanic got guest event {event}");
|
||||
event!("guest", "panic", "event", &event);
|
||||
None
|
||||
}
|
||||
|
|
|
|||
|
|
@ -221,7 +221,7 @@ pub struct Tpm {
|
|||
impl Tpm {
|
||||
pub fn new(path: String) -> Result<Self> {
|
||||
let emulator = Emulator::new(path)
|
||||
.map_err(|e| Error::Init(anyhow!("Failed while initializing tpm Emulator: {:?}", e)))?;
|
||||
.map_err(|e| Error::Init(anyhow!("Failed while initializing tpm Emulator: {e:?}")))?;
|
||||
let mut tpm = Tpm {
|
||||
emulator,
|
||||
regs: [0; TPM_CRB_R_MAX],
|
||||
|
|
@ -331,8 +331,7 @@ impl Tpm {
|
|||
|
||||
if let Err(e) = self.emulator.startup_tpm(self.backend_buff_size) {
|
||||
return Err(Error::Init(anyhow!(
|
||||
"Failed while running Startup TPM. Error: {:?}",
|
||||
e
|
||||
"Failed while running Startup TPM. Error: {e:?}"
|
||||
)));
|
||||
}
|
||||
Ok(())
|
||||
|
|
@ -460,7 +459,7 @@ impl BusDevice for Tpm {
|
|||
&& (self.regs[CRB_CTRL_START as usize] & CRB_START_INVOKE != 0)
|
||||
&& let Err(e) = self.emulator.cancel_cmd()
|
||||
{
|
||||
error!("Failed to run cancel command. Error: {:?}", e);
|
||||
error!("Failed to run cancel command. Error: {e:?}");
|
||||
}
|
||||
}
|
||||
CRB_CTRL_START => {
|
||||
|
|
@ -481,10 +480,7 @@ impl BusDevice for Tpm {
|
|||
}
|
||||
}
|
||||
CRB_LOC_CTRL => {
|
||||
warn!(
|
||||
"CRB_LOC_CTRL locality to write = {:?} val = {:?}",
|
||||
locality, v
|
||||
);
|
||||
warn!("CRB_LOC_CTRL locality to write = {locality:?} val = {v:?}");
|
||||
match v {
|
||||
CRB_LOC_CTRL_RESET_ESTABLISHMENT_BIT => {}
|
||||
CRB_LOC_CTRL_RELINQUISH => {
|
||||
|
|
@ -517,7 +513,7 @@ impl BusDevice for Tpm {
|
|||
);
|
||||
}
|
||||
_ => {
|
||||
error!("Invalid value to write in CRB_LOC_CTRL {:#X} ", v);
|
||||
error!("Invalid value to write in CRB_LOC_CTRL {v:#X} ");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -633,7 +633,7 @@ impl vm::Vm for KvmVm {
|
|||
///
|
||||
fn create_vaia(&self, config: VaiaConfig) -> vm::Result<Arc<Mutex<dyn Vaia>>> {
|
||||
let aia_device = KvmAiaImsics::new(self, config)
|
||||
.map_err(|e| vm::HypervisorVmError::CreateVaia(anyhow!("Vaia error {:?}", e)))?;
|
||||
.map_err(|e| vm::HypervisorVmError::CreateVaia(anyhow!("Vaia error {e:?}")))?;
|
||||
Ok(Arc::new(Mutex::new(aia_device)))
|
||||
}
|
||||
|
||||
|
|
@ -1997,8 +1997,7 @@ impl cpu::Vcpu for KvmVcpu {
|
|||
// tr.valid is set if the GVA is mapped to valid GPA.
|
||||
match tr.valid {
|
||||
0 => Err(cpu::HypervisorCpuError::TranslateVirtualAddress(anyhow!(
|
||||
"Invalid GVA: {:#x}",
|
||||
gva
|
||||
"Invalid GVA: {gva:#x}"
|
||||
))),
|
||||
_ => Ok((tr.physical_address, 0)),
|
||||
}
|
||||
|
|
@ -2081,16 +2080,14 @@ impl cpu::Vcpu for KvmVcpu {
|
|||
VcpuExit::Debug(_) => Ok(cpu::VmExit::Debug),
|
||||
|
||||
r => Err(cpu::HypervisorCpuError::RunVcpu(anyhow!(
|
||||
"Unexpected exit reason on vcpu run: {:?}",
|
||||
r
|
||||
"Unexpected exit reason on vcpu run: {r:?}"
|
||||
))),
|
||||
},
|
||||
|
||||
Err(ref e) => match e.errno() {
|
||||
libc::EAGAIN | libc::EINTR => Ok(cpu::VmExit::Ignore),
|
||||
_ => Err(cpu::HypervisorCpuError::RunVcpu(anyhow!(
|
||||
"VCPU error {:?}",
|
||||
e
|
||||
"VCPU error {e:?}"
|
||||
))),
|
||||
},
|
||||
}
|
||||
|
|
|
|||
|
|
@ -100,10 +100,10 @@ impl CtrlQueue {
|
|||
} else if (queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN as u16)
|
||||
|| (queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX as u16)
|
||||
{
|
||||
warn!("Number of MQ pairs out of range: {}", queue_pairs);
|
||||
warn!("Number of MQ pairs out of range: {queue_pairs}");
|
||||
false
|
||||
} else {
|
||||
info!("Number of MQ pairs requested: {}", queue_pairs);
|
||||
info!("Number of MQ pairs requested: {queue_pairs}");
|
||||
true
|
||||
}
|
||||
}
|
||||
|
|
@ -118,10 +118,10 @@ impl CtrlQueue {
|
|||
} else {
|
||||
let mut ok = true;
|
||||
for tap in self.taps.iter_mut() {
|
||||
info!("Reprogramming tap offload with features: {}", features);
|
||||
info!("Reprogramming tap offload with features: {features}");
|
||||
tap.set_offload(virtio_features_to_tap_offload(features))
|
||||
.map_err(|e| {
|
||||
error!("Error programming tap offload: {:?}", e);
|
||||
error!("Error programming tap offload: {e:?}");
|
||||
ok = false
|
||||
})
|
||||
.ok();
|
||||
|
|
@ -130,7 +130,7 @@ impl CtrlQueue {
|
|||
}
|
||||
}
|
||||
_ => {
|
||||
warn!("Unsupported command {:?}", ctrl_hdr);
|
||||
warn!("Unsupported command {ctrl_hdr:?}");
|
||||
false
|
||||
}
|
||||
};
|
||||
|
|
|
|||
|
|
@ -83,10 +83,7 @@ impl MacAddr {
|
|||
// Generate a fully random MAC
|
||||
let mut random_bytes = [0u8; MAC_ADDR_LEN];
|
||||
if let Err(e) = getrandom::fill(&mut random_bytes) {
|
||||
error!(
|
||||
"Error populating MAC address with random data: {}",
|
||||
e.to_string()
|
||||
);
|
||||
error!("Error populating MAC address with random data: {e}");
|
||||
}
|
||||
|
||||
// Set the first byte to make the OUI a locally administered OUI
|
||||
|
|
|
|||
|
|
@ -105,7 +105,7 @@ impl TxVirtio {
|
|||
retry_write = true;
|
||||
break;
|
||||
}
|
||||
error!("net: tx: failed writing to tap: {}", e);
|
||||
error!("net: tx: failed writing to tap: {e}");
|
||||
return Err(NetQueuePairError::WriteTap(e));
|
||||
}
|
||||
|
||||
|
|
@ -246,7 +246,7 @@ impl RxVirtio {
|
|||
break;
|
||||
}
|
||||
|
||||
error!("net: rx: failed reading from tap: {}", e);
|
||||
error!("net: rx: failed reading from tap: {e}");
|
||||
return Err(NetQueuePairError::ReadTap(e));
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -662,7 +662,7 @@ impl PciConfiguration {
|
|||
if let Some(r) = self.registers.get_mut(reg_idx) {
|
||||
*r = (*r & !self.writable_bits[reg_idx]) | (value & mask);
|
||||
} else {
|
||||
warn!("bad PCI register write {}", reg_idx);
|
||||
warn!("bad PCI register write {reg_idx}");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -672,7 +672,7 @@ impl PciConfiguration {
|
|||
0 => 0,
|
||||
2 => 16,
|
||||
_ => {
|
||||
warn!("bad PCI config write offset {}", offset);
|
||||
warn!("bad PCI config write offset {offset}");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
|
@ -684,7 +684,7 @@ impl PciConfiguration {
|
|||
let shifted_value = (u32::from(value) << shift) & writable_mask;
|
||||
*r = *r & !mask | shifted_value;
|
||||
} else {
|
||||
warn!("bad PCI config write offset {}", offset);
|
||||
warn!("bad PCI config write offset {offset}");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -708,7 +708,7 @@ impl PciConfiguration {
|
|||
let shifted_value = (u32::from(value) << shift) & writable_mask;
|
||||
*r = *r & !mask | shifted_value;
|
||||
} else {
|
||||
warn!("bad PCI config write offset {}", offset);
|
||||
warn!("bad PCI config write offset {offset}");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -267,15 +267,15 @@ impl MsiConfig {
|
|||
self.cap.vector_masked(idx),
|
||||
true,
|
||||
) {
|
||||
error!("Failed updating vector: {:?}", e);
|
||||
error!("Failed updating vector: {e:?}");
|
||||
}
|
||||
}
|
||||
|
||||
if !old_enabled && let Err(e) = self.interrupt_source_group.enable() {
|
||||
error!("Failed enabling irq_fd: {:?}", e);
|
||||
error!("Failed enabling irq_fd: {e:?}");
|
||||
}
|
||||
} else if old_enabled && let Err(e) = self.interrupt_source_group.disable() {
|
||||
error!("Failed disabling irq_fd: {:?}", e);
|
||||
error!("Failed disabling irq_fd: {e:?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -186,13 +186,13 @@ impl MsixConfig {
|
|||
table_entry.masked(),
|
||||
true,
|
||||
) {
|
||||
error!("Failed updating vector: {:?}", e);
|
||||
error!("Failed updating vector: {e:?}");
|
||||
}
|
||||
}
|
||||
} else if old_enabled || !old_masked {
|
||||
debug!("MSI-X disabled for device 0x{:x}", self.devid);
|
||||
if let Err(e) = self.interrupt_source_group.disable() {
|
||||
error!("Failed disabling irq_fd: {:?}", e);
|
||||
error!("Failed disabling irq_fd: {e:?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -235,7 +235,7 @@ impl MsixConfig {
|
|||
}
|
||||
};
|
||||
|
||||
debug!("MSI_R TABLE offset 0x{:x} data 0x{:x}", offset, value);
|
||||
debug!("MSI_R TABLE offset 0x{offset:x} data 0x{value:x}");
|
||||
LittleEndian::write_u32(data, value);
|
||||
}
|
||||
8 => {
|
||||
|
|
@ -254,7 +254,7 @@ impl MsixConfig {
|
|||
}
|
||||
};
|
||||
|
||||
debug!("MSI_R TABLE offset 0x{:x} data 0x{:x}", offset, value);
|
||||
debug!("MSI_R TABLE offset 0x{offset:x} data 0x{value:x}");
|
||||
LittleEndian::write_u64(data, value);
|
||||
}
|
||||
_ => {
|
||||
|
|
@ -290,7 +290,7 @@ impl MsixConfig {
|
|||
_ => error!("invalid offset"),
|
||||
};
|
||||
|
||||
debug!("MSI_W TABLE offset 0x{:x} data 0x{:x}", offset, value);
|
||||
debug!("MSI_W TABLE offset 0x{offset:x} data 0x{value:x}");
|
||||
}
|
||||
8 => {
|
||||
let value = LittleEndian::read_u64(data);
|
||||
|
|
@ -306,7 +306,7 @@ impl MsixConfig {
|
|||
_ => error!("invalid offset"),
|
||||
};
|
||||
|
||||
debug!("MSI_W TABLE offset 0x{:x} data 0x{:x}", offset, value);
|
||||
debug!("MSI_W TABLE offset 0x{offset:x} data 0x{value:x}");
|
||||
}
|
||||
_ => error!("invalid data length"),
|
||||
};
|
||||
|
|
@ -336,7 +336,7 @@ impl MsixConfig {
|
|||
table_entry.masked(),
|
||||
true,
|
||||
) {
|
||||
error!("Failed updating vector: {:?}", e);
|
||||
error!("Failed updating vector: {e:?}");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -382,7 +382,7 @@ impl MsixConfig {
|
|||
}
|
||||
};
|
||||
|
||||
debug!("MSI_R PBA offset 0x{:x} data 0x{:x}", offset, value);
|
||||
debug!("MSI_R PBA offset 0x{offset:x} data 0x{value:x}");
|
||||
LittleEndian::write_u32(data, value);
|
||||
}
|
||||
8 => {
|
||||
|
|
@ -394,7 +394,7 @@ impl MsixConfig {
|
|||
}
|
||||
};
|
||||
|
||||
debug!("MSI_R PBA offset 0x{:x} data 0x{:x}", offset, value);
|
||||
debug!("MSI_R PBA offset 0x{offset:x} data 0x{value:x}");
|
||||
LittleEndian::write_u64(data, value);
|
||||
}
|
||||
_ => {
|
||||
|
|
@ -438,7 +438,7 @@ impl MsixConfig {
|
|||
.trigger(vector as InterruptIndex)
|
||||
{
|
||||
Ok(_) => debug!("MSI-X injected on vector control flip"),
|
||||
Err(e) => error!("failed to inject MSI-X: {}", e),
|
||||
Err(e) => error!("failed to inject MSI-X: {e}"),
|
||||
}
|
||||
|
||||
// Clear the bit from PBA
|
||||
|
|
|
|||
|
|
@ -474,8 +474,7 @@ impl VfioCommon {
|
|||
let pci_configuration_state =
|
||||
vm_migration::state_from_id(snapshot.as_ref(), PCI_CONFIGURATION_ID).map_err(|e| {
|
||||
VfioPciError::RetrievePciConfigurationState(anyhow!(
|
||||
"Failed to get PciConfigurationState from Snapshot: {}",
|
||||
e
|
||||
"Failed to get PciConfigurationState from Snapshot: {e}"
|
||||
))
|
||||
})?;
|
||||
|
||||
|
|
@ -514,22 +513,19 @@ impl VfioCommon {
|
|||
.transpose()
|
||||
.map_err(|e| {
|
||||
VfioPciError::RetrieveVfioCommonState(anyhow!(
|
||||
"Failed to get VfioCommonState from Snapshot: {}",
|
||||
e
|
||||
"Failed to get VfioCommonState from Snapshot: {e}"
|
||||
))
|
||||
})?;
|
||||
let msi_state =
|
||||
vm_migration::state_from_id(snapshot.as_ref(), MSI_CONFIG_ID).map_err(|e| {
|
||||
VfioPciError::RetrieveMsiConfigState(anyhow!(
|
||||
"Failed to get MsiConfigState from Snapshot: {}",
|
||||
e
|
||||
"Failed to get MsiConfigState from Snapshot: {e}"
|
||||
))
|
||||
})?;
|
||||
let msix_state =
|
||||
vm_migration::state_from_id(snapshot.as_ref(), MSIX_CONFIG_ID).map_err(|e| {
|
||||
VfioPciError::RetrieveMsixConfigState(anyhow!(
|
||||
"Failed to get MsixConfigState from Snapshot: {}",
|
||||
e
|
||||
"Failed to get MsixConfigState from Snapshot: {e}"
|
||||
))
|
||||
})?;
|
||||
|
||||
|
|
@ -1057,7 +1053,7 @@ impl VfioCommon {
|
|||
&& intx.enabled
|
||||
{
|
||||
if let Err(e) = self.vfio_wrapper.disable_irq(VFIO_PCI_INTX_IRQ_INDEX) {
|
||||
error!("Could not disable INTx: {}", e);
|
||||
error!("Could not disable INTx: {e}");
|
||||
} else {
|
||||
intx.enabled = false;
|
||||
}
|
||||
|
|
@ -1085,7 +1081,7 @@ impl VfioCommon {
|
|||
|
||||
pub(crate) fn disable_msi(&self) {
|
||||
if let Err(e) = self.vfio_wrapper.disable_msi() {
|
||||
error!("Could not disable MSI: {}", e);
|
||||
error!("Could not disable MSI: {e}");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1110,7 +1106,7 @@ impl VfioCommon {
|
|||
|
||||
pub(crate) fn disable_msix(&self) {
|
||||
if let Err(e) = self.vfio_wrapper.disable_msix() {
|
||||
error!("Could not disable MSI-X: {}", e);
|
||||
error!("Could not disable MSI-X: {e}");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1200,7 +1196,7 @@ impl VfioCommon {
|
|||
if self.interrupt.intx_in_use()
|
||||
&& let Err(e) = self.vfio_wrapper.unmask_irq(VFIO_PCI_INTX_IRQ_INDEX)
|
||||
{
|
||||
error!("Failed unmasking INTx IRQ: {}", e);
|
||||
error!("Failed unmasking INTx IRQ: {e}");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1228,7 +1224,7 @@ impl VfioCommon {
|
|||
if self.interrupt.intx_in_use()
|
||||
&& let Err(e) = self.vfio_wrapper.unmask_irq(VFIO_PCI_INTX_IRQ_INDEX)
|
||||
{
|
||||
error!("Failed unmasking INTx IRQ: {}", e);
|
||||
error!("Failed unmasking INTx IRQ: {e}");
|
||||
}
|
||||
|
||||
None
|
||||
|
|
@ -1267,12 +1263,12 @@ impl VfioCommon {
|
|||
match cap_id {
|
||||
PciCapabilityId::MessageSignalledInterrupts => {
|
||||
if let Err(e) = self.update_msi_capabilities(cap_offset, data) {
|
||||
error!("Could not update MSI capabilities: {}", e);
|
||||
error!("Could not update MSI capabilities: {e}");
|
||||
}
|
||||
}
|
||||
PciCapabilityId::MsiX => {
|
||||
if let Err(e) = self.update_msix_capabilities(cap_offset, data) {
|
||||
error!("Could not update MSI-X capabilities: {}", e);
|
||||
error!("Could not update MSI-X capabilities: {e}");
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
|
|
@ -1296,12 +1292,11 @@ impl VfioCommon {
|
|||
& crate::configuration::COMMAND_REG_MEMORY_SPACE_MASK
|
||||
== crate::configuration::COMMAND_REG_MEMORY_SPACE_MASK
|
||||
{
|
||||
info!("BAR reprogramming parameter is returned: {:x?}", ret_param);
|
||||
info!("BAR reprogramming parameter is returned: {ret_param:x?}");
|
||||
self.configuration.clear_pending_bar_reprogram();
|
||||
} else {
|
||||
info!(
|
||||
"MSE bit is disabled. No BAR reprogramming parameter is returned: {:x?}",
|
||||
ret_param
|
||||
"MSE bit is disabled. No BAR reprogramming parameter is returned: {ret_param:x?}"
|
||||
);
|
||||
|
||||
ret_param = Vec::new();
|
||||
|
|
@ -1513,16 +1508,12 @@ impl VfioPciDevice {
|
|||
VfioRegionInfoCap::MsixMappable => {
|
||||
if !is_4k_aligned(region_start) {
|
||||
error!(
|
||||
"Region start address 0x{:x} must be at least aligned on 4KiB",
|
||||
region_start
|
||||
"Region start address 0x{region_start:x} must be at least aligned on 4KiB"
|
||||
);
|
||||
return Err(VfioPciError::RegionAlignment);
|
||||
}
|
||||
if !is_4k_multiple(region_size) {
|
||||
error!(
|
||||
"Region size 0x{:x} must be at least a multiple of 4KiB",
|
||||
region_size
|
||||
);
|
||||
error!("Region size 0x{region_size:x} must be at least a multiple of 4KiB");
|
||||
return Err(VfioPciError::RegionSize);
|
||||
}
|
||||
|
||||
|
|
@ -1733,7 +1724,7 @@ impl VfioPciDevice {
|
|||
);
|
||||
|
||||
if let Err(e) = self.vm.remove_user_memory_region(r) {
|
||||
error!("Could not remove the userspace memory region: {}", e);
|
||||
error!("Could not remove the userspace memory region: {e}");
|
||||
}
|
||||
|
||||
self.memory_slot_allocator
|
||||
|
|
|
|||
|
|
@ -219,7 +219,7 @@ impl VfioUserPciDevice {
|
|||
);
|
||||
|
||||
if let Err(e) = self.vm.remove_user_memory_region(r) {
|
||||
error!("Could not remove the userspace memory region: {}", e);
|
||||
error!("Could not remove the userspace memory region: {e}");
|
||||
}
|
||||
|
||||
self.memory_slot_allocator
|
||||
|
|
@ -370,7 +370,7 @@ impl Vfio for VfioUserClientWrapper {
|
|||
}
|
||||
|
||||
fn disable_irq(&self, irq_index: u32) -> Result<(), VfioError> {
|
||||
info!("Disabling IRQ {:x}", irq_index);
|
||||
info!("Disabling IRQ {irq_index:x}");
|
||||
self.client
|
||||
.lock()
|
||||
.unwrap()
|
||||
|
|
@ -385,7 +385,7 @@ impl Vfio for VfioUserClientWrapper {
|
|||
}
|
||||
|
||||
fn unmask_irq(&self, irq_index: u32) -> Result<(), VfioError> {
|
||||
info!("Unmasking IRQ {:x}", irq_index);
|
||||
info!("Unmasking IRQ {irq_index:x}");
|
||||
self.client
|
||||
.lock()
|
||||
.unwrap()
|
||||
|
|
@ -448,7 +448,7 @@ impl PciDevice for VfioUserPciDevice {
|
|||
}
|
||||
|
||||
fn move_bar(&mut self, old_base: u64, new_base: u64) -> Result<(), std::io::Error> {
|
||||
info!("Moving BAR 0x{:x} -> 0x{:x}", old_base, new_base);
|
||||
info!("Moving BAR 0x{old_base:x} -> 0x{new_base:x}");
|
||||
for mmio_region in self.common.mmio_regions.iter_mut() {
|
||||
if mmio_region.start.raw_value() == old_base {
|
||||
mmio_region.start = GuestAddress(new_base);
|
||||
|
|
@ -489,7 +489,7 @@ impl PciDevice for VfioUserPciDevice {
|
|||
.create_user_memory_region(new_region)
|
||||
.map_err(std::io::Error::other)?;
|
||||
}
|
||||
info!("Moved bar 0x{:x} -> 0x{:x}", old_base, new_base);
|
||||
info!("Moved bar 0x{old_base:x} -> 0x{new_base:x}");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -522,7 +522,7 @@ impl Drop for VfioUserPciDevice {
|
|||
}
|
||||
|
||||
if let Err(e) = self.client.lock().unwrap().shutdown() {
|
||||
error!("Failed shutting down vfio-user client: {}", e);
|
||||
error!("Failed shutting down vfio-user client: {e}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -244,7 +244,7 @@ impl RateLimiterGroup {
|
|||
match dispatch_event {
|
||||
EpollDispatch::Unknown => {
|
||||
let event = event.data;
|
||||
warn!("Unknown rate-limiter loop event: {}", event);
|
||||
warn!("Unknown rate-limiter loop event: {event}");
|
||||
}
|
||||
EpollDispatch::Unblocked => {
|
||||
inner.rate_limiter.event_handler().unwrap();
|
||||
|
|
@ -267,7 +267,7 @@ impl RateLimiterGroup {
|
|||
match res {
|
||||
Ok(res) => {
|
||||
if let Err(e) = res {
|
||||
error!("Error running rate-limit-group worker: {:?}", e);
|
||||
error!("Error running rate-limit-group worker: {e:?}");
|
||||
exit_evt.write(1).unwrap();
|
||||
}
|
||||
}
|
||||
|
|
@ -291,7 +291,7 @@ impl Drop for RateLimiterGroup {
|
|||
if let Some(t) = self.epoll_thread.take()
|
||||
&& let Err(e) = t.join()
|
||||
{
|
||||
error!("Error joining thread: {:?}", e);
|
||||
error!("Error joining thread: {e:?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -136,7 +136,7 @@ impl log::Log for Logger {
|
|||
let duration_s = duration.as_secs_f32();
|
||||
|
||||
let location = if let (Some(file), Some(line)) = (record.file(), record.line()) {
|
||||
format!("{}:{}", file, line)
|
||||
format!("{file}:{line}")
|
||||
} else {
|
||||
record.target().to_string()
|
||||
};
|
||||
|
|
|
|||
|
|
@ -88,13 +88,12 @@ impl Emulator {
|
|||
pub fn new(path: String) -> Result<Self> {
|
||||
if !Path::new(&path).exists() {
|
||||
return Err(Error::InitializeEmulator(anyhow!(
|
||||
"The input TPM Socket path: {:?} does not exist",
|
||||
path
|
||||
"The input TPM Socket path: {path:?} does not exist"
|
||||
)));
|
||||
}
|
||||
let mut socket = SocketDev::new();
|
||||
socket.init(path).map_err(|e| {
|
||||
Error::InitializeEmulator(anyhow!("Failed while initializing tpm emulator: {:?}", e))
|
||||
Error::InitializeEmulator(anyhow!("Failed while initializing tpm emulator: {e:?}"))
|
||||
})?;
|
||||
|
||||
let mut emulator = Self {
|
||||
|
|
@ -217,41 +216,36 @@ impl Emulator {
|
|||
msg_len_in: usize,
|
||||
msg_len_out: usize,
|
||||
) -> Result<()> {
|
||||
debug!("Control Cmd to send : {:02X?}", cmd);
|
||||
debug!("Control Cmd to send : {cmd:02X?}");
|
||||
|
||||
let cmd_no = (cmd as u32).to_be_bytes();
|
||||
let n = mem::size_of::<u32>() + msg_len_in;
|
||||
|
||||
let converted_req = msg.ptm_to_request();
|
||||
debug!("converted request: {:02X?}", converted_req);
|
||||
debug!("converted request: {converted_req:02X?}");
|
||||
|
||||
let mut buf = Vec::<u8>::with_capacity(n);
|
||||
|
||||
buf.extend(cmd_no);
|
||||
buf.extend(converted_req);
|
||||
debug!("full Control request {:02X?}", buf);
|
||||
debug!("full Control request {buf:02X?}");
|
||||
|
||||
let written = self.control_socket.write(&buf).map_err(|e| {
|
||||
Error::RunControlCmd(anyhow!(
|
||||
"Failed while running {:02X?} Control Cmd. Error: {:?}",
|
||||
cmd,
|
||||
e
|
||||
"Failed while running {cmd:02X?} Control Cmd. Error: {e:?}"
|
||||
))
|
||||
})?;
|
||||
|
||||
if written < buf.len() {
|
||||
return Err(Error::RunControlCmd(anyhow!(
|
||||
"Truncated write while running {:02X?} Control Cmd",
|
||||
cmd,
|
||||
"Truncated write while running {cmd:02X?} Control Cmd",
|
||||
)));
|
||||
}
|
||||
|
||||
// The largest response is 16 bytes so far.
|
||||
if msg_len_out > 16 {
|
||||
return Err(Error::RunControlCmd(anyhow!(
|
||||
"Response size is too large for Cmd {:02X?}, max 16 wanted {}",
|
||||
cmd,
|
||||
msg_len_out
|
||||
"Response size is too large for Cmd {cmd:02X?}, max 16 wanted {msg_len_out}"
|
||||
)));
|
||||
}
|
||||
|
||||
|
|
@ -260,9 +254,7 @@ impl Emulator {
|
|||
// Every Control Cmd gets at least a result code in response. Read it
|
||||
let read_size = self.control_socket.read(&mut output).map_err(|e| {
|
||||
Error::RunControlCmd(anyhow!(
|
||||
"Failed while reading response for Control Cmd: {:02X?}. Error: {:?}",
|
||||
cmd,
|
||||
e
|
||||
"Failed while reading response for Control Cmd: {cmd:02X?}. Error: {e:?}"
|
||||
))
|
||||
})?;
|
||||
|
||||
|
|
@ -270,9 +262,7 @@ impl Emulator {
|
|||
msg.update_ptm_with_response(&output[0..read_size])
|
||||
.map_err(|e| {
|
||||
Error::RunControlCmd(anyhow!(
|
||||
"Failed while converting response of Control Cmd: {:02X?} to PTM. Error: {:?}",
|
||||
cmd,
|
||||
e
|
||||
"Failed while converting response of Control Cmd: {cmd:02X?} to PTM. Error: {e:?}"
|
||||
))
|
||||
})?;
|
||||
} else {
|
||||
|
|
@ -303,10 +293,7 @@ impl Emulator {
|
|||
0,
|
||||
2 * mem::size_of::<u32>(),
|
||||
) {
|
||||
error!(
|
||||
"Failed to run CmdGetTpmEstablished Control Cmd. Error: {:?}",
|
||||
e
|
||||
);
|
||||
error!("Failed to run CmdGetTpmEstablished Control Cmd. Error: {e:?}");
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
@ -379,8 +366,7 @@ impl Emulator {
|
|||
|
||||
if isselftest && output_len < 10 {
|
||||
return Err(Error::SelfTest(anyhow!(
|
||||
"Self test response should have 10 bytes. Only {:?} returned",
|
||||
output_len
|
||||
"Self test response should have 10 bytes. Only {output_len:?} returned"
|
||||
)));
|
||||
}
|
||||
|
||||
|
|
@ -426,7 +412,7 @@ impl Emulator {
|
|||
|
||||
if buffersize != 0 {
|
||||
let actual_size = self.set_buffer_size(buffersize)?;
|
||||
debug!("set tpm buffersize to {:?} during Startup", actual_size);
|
||||
debug!("set tpm buffersize to {actual_size:?} during Startup");
|
||||
}
|
||||
|
||||
self.run_control_cmd(
|
||||
|
|
|
|||
|
|
@ -66,12 +66,12 @@ impl SocketDev {
|
|||
self.state = SocketDevState::Connecting;
|
||||
|
||||
let s = UnixStream::connect(socket_path).map_err(|e| {
|
||||
Error::ConnectToSocket(anyhow!("Failed to connect to tpm Socket. Error: {:?}", e))
|
||||
Error::ConnectToSocket(anyhow!("Failed to connect to tpm Socket. Error: {e:?}"))
|
||||
})?;
|
||||
self.control_fd = s.as_raw_fd();
|
||||
self.stream = Some(s);
|
||||
self.state = SocketDevState::Connected;
|
||||
debug!("Connected to tpm socket path : {:?}", socket_path);
|
||||
debug!("Connected to tpm socket path : {socket_path:?}");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
|
@ -92,7 +92,7 @@ impl SocketDev {
|
|||
.unwrap()
|
||||
.send_with_fd(buf, write_fd)
|
||||
.map_err(|e| {
|
||||
Error::WriteToSocket(anyhow!("Failed to write to Socket. Error: {:?}", e))
|
||||
Error::WriteToSocket(anyhow!("Failed to write to Socket. Error: {e:?}"))
|
||||
})?;
|
||||
|
||||
Ok(size)
|
||||
|
|
@ -129,7 +129,7 @@ impl SocketDev {
|
|||
}
|
||||
let mut socket = self.stream.as_ref().unwrap();
|
||||
let size: usize = socket.read(buf).map_err(|e| {
|
||||
Error::ReadFromSocket(anyhow!("Failed to read from socket. Error Code {:?}", e))
|
||||
Error::ReadFromSocket(anyhow!("Failed to read from socket. Error Code {e:?}"))
|
||||
})?;
|
||||
Ok(size)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -152,7 +152,7 @@ impl VhostUserBlkThread {
|
|||
.unwrap();
|
||||
}
|
||||
Err(err) => {
|
||||
error!("failed to parse available descriptor chain: {:?}", err);
|
||||
error!("failed to parse available descriptor chain: {err:?}");
|
||||
len = 0;
|
||||
}
|
||||
}
|
||||
|
|
@ -350,7 +350,7 @@ impl VhostUserBackendMut for VhostUserBlkBackend {
|
|||
return Err(Error::HandleEventNotEpollIn.into());
|
||||
}
|
||||
|
||||
debug!("event received: {:?}", device_event);
|
||||
debug!("event received: {device_event:?}");
|
||||
|
||||
let thread = self.threads[thread_id].get_mut().unwrap();
|
||||
match device_event {
|
||||
|
|
@ -531,20 +531,17 @@ pub fn start_block_backend(backend_command: &str) {
|
|||
debug!("blk_daemon is created!\n");
|
||||
|
||||
if let Err(e) = blk_daemon.start(listener) {
|
||||
error!(
|
||||
"Failed to start daemon for vhost-user-block with error: {:?}\n",
|
||||
e
|
||||
);
|
||||
error!("Failed to start daemon for vhost-user-block with error: {e:?}\n");
|
||||
process::exit(1);
|
||||
}
|
||||
|
||||
if let Err(e) = blk_daemon.wait() {
|
||||
error!("Error from the main thread: {:?}", e);
|
||||
error!("Error from the main thread: {e:?}");
|
||||
}
|
||||
|
||||
for thread in blk_backend.read().unwrap().threads.iter() {
|
||||
if let Err(e) = thread.lock().unwrap().kill_evt.write(1) {
|
||||
error!("Error shutting down worker thread: {:?}", e)
|
||||
error!("Error shutting down worker thread: {e:?}")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -396,20 +396,17 @@ pub fn start_net_backend(backend_command: &str) {
|
|||
} else {
|
||||
net_daemon.start(Listener::new(&backend_config.socket, true).unwrap())
|
||||
} {
|
||||
error!(
|
||||
"failed to start daemon for vhost-user-net with error: {:?}",
|
||||
e
|
||||
);
|
||||
error!("failed to start daemon for vhost-user-net with error: {e:?}");
|
||||
process::exit(1);
|
||||
}
|
||||
|
||||
if let Err(e) = net_daemon.wait() {
|
||||
error!("Error from the main thread: {:?}", e);
|
||||
error!("Error from the main thread: {e:?}");
|
||||
}
|
||||
|
||||
for thread in net_backend.read().unwrap().threads.iter() {
|
||||
if let Err(e) = thread.lock().unwrap().kill_evt.write(1) {
|
||||
error!("Error shutting down worker thread: {:?}", e)
|
||||
error!("Error shutting down worker thread: {e:?}")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -163,7 +163,7 @@ struct BalloonEpollHandler {
|
|||
impl BalloonEpollHandler {
|
||||
fn signal(&self, int_type: VirtioInterruptType) -> result::Result<(), Error> {
|
||||
self.interrupt_cb.trigger(int_type).map_err(|e| {
|
||||
error!("Failed to signal used queue: {:?}", e);
|
||||
error!("Failed to signal used queue: {e:?}");
|
||||
Error::FailedSignal(e)
|
||||
})
|
||||
}
|
||||
|
|
@ -363,28 +363,24 @@ impl EpollHelperHandler for BalloonEpollHandler {
|
|||
INFLATE_QUEUE_EVENT => {
|
||||
self.inflate_queue_evt.read().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to get inflate queue event: {:?}",
|
||||
e
|
||||
"Failed to get inflate queue event: {e:?}"
|
||||
))
|
||||
})?;
|
||||
self.process_queue(0).map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to signal used inflate queue: {:?}",
|
||||
e
|
||||
"Failed to signal used inflate queue: {e:?}"
|
||||
))
|
||||
})?;
|
||||
}
|
||||
DEFLATE_QUEUE_EVENT => {
|
||||
self.deflate_queue_evt.read().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to get deflate queue event: {:?}",
|
||||
e
|
||||
"Failed to get deflate queue event: {e:?}"
|
||||
))
|
||||
})?;
|
||||
self.process_queue(1).map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to signal used deflate queue: {:?}",
|
||||
e
|
||||
"Failed to signal used deflate queue: {e:?}"
|
||||
))
|
||||
})?;
|
||||
}
|
||||
|
|
@ -392,14 +388,12 @@ impl EpollHelperHandler for BalloonEpollHandler {
|
|||
if let Some(reporting_queue_evt) = self.reporting_queue_evt.as_ref() {
|
||||
reporting_queue_evt.read().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to get reporting queue event: {:?}",
|
||||
e
|
||||
"Failed to get reporting queue event: {e:?}"
|
||||
))
|
||||
})?;
|
||||
self.process_reporting_queue(2).map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to signal used inflate queue: {:?}",
|
||||
e
|
||||
"Failed to signal used inflate queue: {e:?}"
|
||||
))
|
||||
})?;
|
||||
} else {
|
||||
|
|
@ -450,7 +444,7 @@ impl Balloon {
|
|||
let mut queue_sizes = vec![QUEUE_SIZE; MIN_NUM_QUEUES];
|
||||
|
||||
let (avail_features, acked_features, config, paused) = if let Some(state) = state {
|
||||
info!("Restoring virtio-balloon {}", id);
|
||||
info!("Restoring virtio-balloon {id}");
|
||||
(
|
||||
state.avail_features,
|
||||
state.acked_features,
|
||||
|
|
|
|||
|
|
@ -178,7 +178,7 @@ impl BlockEpollHandler {
|
|||
// "A device MUST set the status byte to VIRTIO_BLK_S_IOERR for a write request
|
||||
// if the VIRTIO_BLK_F_RO feature if offered, and MUST NOT write any data."
|
||||
if let Err(e) = Self::check_request(self.acked_features, request.request_type) {
|
||||
warn!("Request check failed: {:x?} {:?}", request, e);
|
||||
warn!("Request check failed: {request:x?} {e:?}");
|
||||
desc_chain
|
||||
.memory()
|
||||
.write_obj(VIRTIO_BLK_S_IOERR, request.status_addr)
|
||||
|
|
@ -257,7 +257,7 @@ impl BlockEpollHandler {
|
|||
let status = match result {
|
||||
Ok(_) => VIRTIO_BLK_S_OK,
|
||||
Err(e) => {
|
||||
warn!("Request failed: {:x?} {:?}", request, e);
|
||||
warn!("Request failed: {request:x?} {e:?}");
|
||||
VIRTIO_BLK_S_IOERR
|
||||
}
|
||||
};
|
||||
|
|
@ -285,10 +285,7 @@ impl BlockEpollHandler {
|
|||
Err(e) => {
|
||||
// If batch submission fails, report VIRTIO_BLK_S_IOERR for all requests.
|
||||
for (user_data, request) in batch_inflight_requests {
|
||||
warn!(
|
||||
"Request failed with batch submission: {:x?} {:?}",
|
||||
request, e
|
||||
);
|
||||
warn!("Request failed with batch submission: {request:x?} {e:?}");
|
||||
let desc_index = user_data;
|
||||
let mem = self.mem.memory();
|
||||
mem.write_obj(VIRTIO_BLK_S_IOERR as u8, request.status_addr)
|
||||
|
|
@ -311,14 +308,11 @@ impl BlockEpollHandler {
|
|||
.queue
|
||||
.needs_notification(self.mem.memory().deref())
|
||||
.map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to check needs_notification: {:?}",
|
||||
e
|
||||
))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to check needs_notification: {e:?}"))
|
||||
})?
|
||||
{
|
||||
self.signal_used_queue().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to signal used queue: {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to signal used queue: {e:?}"))
|
||||
})?;
|
||||
}
|
||||
|
||||
|
|
@ -327,7 +321,7 @@ impl BlockEpollHandler {
|
|||
|
||||
fn process_queue_submit_and_signal(&mut self) -> result::Result<(), EpollHelperError> {
|
||||
self.process_queue_submit().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to process queue (submit): {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to process queue (submit): {e:?}"))
|
||||
})?;
|
||||
|
||||
self.try_signal_used_queue()
|
||||
|
|
@ -493,7 +487,7 @@ impl BlockEpollHandler {
|
|||
self.interrupt_cb
|
||||
.trigger(VirtioInterruptType::Queue(self.queue_index))
|
||||
.map_err(|e| {
|
||||
error!("Failed to signal used queue: {:?}", e);
|
||||
error!("Failed to signal used queue: {e:?}");
|
||||
DeviceError::FailedSignalingUsedQueue(e)
|
||||
})
|
||||
}
|
||||
|
|
@ -561,7 +555,7 @@ impl EpollHelperHandler for BlockEpollHandler {
|
|||
match ev_type {
|
||||
QUEUE_AVAIL_EVENT => {
|
||||
self.queue_evt.read().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get queue event: {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get queue event: {e:?}"))
|
||||
})?;
|
||||
|
||||
let rate_limit_reached = self.rate_limiter.as_ref().is_some_and(|r| r.is_blocked());
|
||||
|
|
@ -573,13 +567,12 @@ impl EpollHelperHandler for BlockEpollHandler {
|
|||
}
|
||||
COMPLETION_EVENT => {
|
||||
self.disk_image.notifier().read().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get queue event: {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get queue event: {e:?}"))
|
||||
})?;
|
||||
|
||||
self.process_queue_complete().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to process queue (complete): {:?}",
|
||||
e
|
||||
"Failed to process queue (complete): {e:?}"
|
||||
))
|
||||
})?;
|
||||
|
||||
|
|
@ -589,8 +582,7 @@ impl EpollHelperHandler for BlockEpollHandler {
|
|||
if !rate_limit_reached {
|
||||
self.process_queue_submit().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to process queue (submit): {:?}",
|
||||
e
|
||||
"Failed to process queue (submit): {e:?}"
|
||||
))
|
||||
})?;
|
||||
}
|
||||
|
|
@ -602,8 +594,7 @@ impl EpollHelperHandler for BlockEpollHandler {
|
|||
// and restart processing the queue.
|
||||
rate_limiter.event_handler().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to process rate limiter event: {:?}",
|
||||
e
|
||||
"Failed to process rate limiter event: {e:?}"
|
||||
))
|
||||
})?;
|
||||
|
||||
|
|
@ -616,8 +607,7 @@ impl EpollHelperHandler for BlockEpollHandler {
|
|||
}
|
||||
_ => {
|
||||
return Err(EpollHelperError::HandleEvent(anyhow!(
|
||||
"Unexpected event: {}",
|
||||
ev_type
|
||||
"Unexpected event: {ev_type}"
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
|
@ -671,7 +661,7 @@ impl Block {
|
|||
) -> io::Result<Self> {
|
||||
let (disk_nsectors, avail_features, acked_features, config, paused) =
|
||||
if let Some(state) = state {
|
||||
info!("Restoring virtio-block {}", id);
|
||||
info!("Restoring virtio-block {id}");
|
||||
(
|
||||
state.disk_nsectors,
|
||||
state.avail_features,
|
||||
|
|
@ -685,9 +675,8 @@ impl Block {
|
|||
.map_err(|e| io::Error::other(format!("Failed getting disk size: {e}")))?;
|
||||
if disk_size % SECTOR_SIZE != 0 {
|
||||
warn!(
|
||||
"Disk size {} is not a multiple of sector size {}; \
|
||||
the remainder will not be visible to the guest.",
|
||||
disk_size, SECTOR_SIZE
|
||||
"Disk size {disk_size} is not a multiple of sector size {SECTOR_SIZE}; \
|
||||
the remainder will not be visible to the guest."
|
||||
);
|
||||
}
|
||||
|
||||
|
|
@ -708,7 +697,7 @@ impl Block {
|
|||
}
|
||||
|
||||
let topology = disk_image.topology();
|
||||
info!("Disk topology: {:?}", topology);
|
||||
info!("Disk topology: {topology:?}");
|
||||
|
||||
let logical_block_size = if topology.logical_block_size > 512 {
|
||||
topology.logical_block_size
|
||||
|
|
@ -939,7 +928,7 @@ impl VirtioDevice for Block {
|
|||
.disk_image
|
||||
.new_async_io(queue_size as u32)
|
||||
.map_err(|e| {
|
||||
error!("failed to create new AsyncIo: {}", e);
|
||||
error!("failed to create new AsyncIo: {e}");
|
||||
ActivateError::BadActivate
|
||||
})?,
|
||||
disk_nsectors: self.disk_nsectors,
|
||||
|
|
|
|||
|
|
@ -275,7 +275,7 @@ impl ConsoleEpollHandler {
|
|||
self.interrupt_cb
|
||||
.trigger(VirtioInterruptType::Queue(queue_index))
|
||||
.map_err(|e| {
|
||||
error!("Failed to signal used queue: {:?}", e);
|
||||
error!("Failed to signal used queue: {e:?}");
|
||||
DeviceError::FailedSignalingUsedQueue(e)
|
||||
})
|
||||
}
|
||||
|
|
@ -328,7 +328,7 @@ impl ConsoleEpollHandler {
|
|||
}
|
||||
pty_write_out.store(true, Ordering::Release);
|
||||
out.flush()
|
||||
.map_err(|e| anyhow!("Failed to flush PTY: {:?}", e))
|
||||
.map_err(|e| anyhow!("Failed to flush PTY: {e:?}"))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
|
|
@ -365,52 +365,39 @@ impl EpollHelperHandler for ConsoleEpollHandler {
|
|||
match ev_type {
|
||||
INPUT_QUEUE_EVENT => {
|
||||
self.input_queue_evt.read().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get queue event: {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get queue event: {e:?}"))
|
||||
})?;
|
||||
let needs_notification = self.process_input_queue().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to process input queue : {:?}",
|
||||
e
|
||||
))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to process input queue : {e:?}"))
|
||||
})?;
|
||||
if needs_notification {
|
||||
self.signal_used_queue(0).map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to signal used queue: {:?}",
|
||||
e
|
||||
))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to signal used queue: {e:?}"))
|
||||
})?;
|
||||
}
|
||||
}
|
||||
OUTPUT_QUEUE_EVENT => {
|
||||
self.output_queue_evt.read().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get queue event: {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get queue event: {e:?}"))
|
||||
})?;
|
||||
let needs_notification = self.process_output_queue().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to process output queue : {:?}",
|
||||
e
|
||||
))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to process output queue : {e:?}"))
|
||||
})?;
|
||||
if needs_notification {
|
||||
self.signal_used_queue(1).map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to signal used queue: {:?}",
|
||||
e
|
||||
))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to signal used queue: {e:?}"))
|
||||
})?;
|
||||
}
|
||||
}
|
||||
CONFIG_EVENT => {
|
||||
self.config_evt.read().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get config event: {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get config event: {e:?}"))
|
||||
})?;
|
||||
self.interrupt_cb
|
||||
.trigger(VirtioInterruptType::Config)
|
||||
.map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to signal console driver: {:?}",
|
||||
e
|
||||
"Failed to signal console driver: {e:?}"
|
||||
))
|
||||
})?;
|
||||
}
|
||||
|
|
@ -420,10 +407,7 @@ impl EpollHelperHandler for ConsoleEpollHandler {
|
|||
.unwrap()
|
||||
.read_exact(&mut [0])
|
||||
.map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to get resize event: {:?}",
|
||||
e
|
||||
))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get resize event: {e:?}"))
|
||||
})?;
|
||||
self.resizer.update_console_size();
|
||||
}
|
||||
|
|
@ -438,15 +422,13 @@ impl EpollHelperHandler for ConsoleEpollHandler {
|
|||
|
||||
let needs_notification = self.process_input_queue().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to process input queue : {:?}",
|
||||
e
|
||||
"Failed to process input queue : {e:?}"
|
||||
))
|
||||
})?;
|
||||
if needs_notification {
|
||||
self.signal_used_queue(0).map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to signal used queue: {:?}",
|
||||
e
|
||||
"Failed to signal used queue: {e:?}"
|
||||
))
|
||||
})?;
|
||||
}
|
||||
|
|
@ -614,7 +596,7 @@ impl Console {
|
|||
) -> io::Result<(Console, Arc<ConsoleResizer>)> {
|
||||
let (avail_features, acked_features, config, in_buffer, paused) = if let Some(state) = state
|
||||
{
|
||||
info!("Restoring virtio-console {}", id);
|
||||
info!("Restoring virtio-console {id}");
|
||||
(
|
||||
state.avail_features,
|
||||
state.acked_features,
|
||||
|
|
@ -733,7 +715,7 @@ impl VirtioDevice for Console {
|
|||
if self.common.feature_acked(VIRTIO_CONSOLE_F_SIZE)
|
||||
&& let Err(e) = interrupt_cb.trigger(VirtioInterruptType::Config)
|
||||
{
|
||||
error!("Failed to signal console driver: {:?}", e);
|
||||
error!("Failed to signal console driver: {e:?}");
|
||||
}
|
||||
|
||||
let (kill_evt, pause_evt) = self.common.dup_eventfds();
|
||||
|
|
|
|||
|
|
@ -233,13 +233,13 @@ impl VirtioCommon {
|
|||
}
|
||||
|
||||
let kill_evt = EventFd::new(EFD_NONBLOCK).map_err(|e| {
|
||||
error!("failed creating kill EventFd: {}", e);
|
||||
error!("failed creating kill EventFd: {e}");
|
||||
ActivateError::BadActivate
|
||||
})?;
|
||||
self.kill_evt = Some(kill_evt);
|
||||
|
||||
let pause_evt = EventFd::new(EFD_NONBLOCK).map_err(|e| {
|
||||
error!("failed creating pause EventFd: {}", e);
|
||||
error!("failed creating pause EventFd: {e}");
|
||||
ActivateError::BadActivate
|
||||
})?;
|
||||
self.pause_evt = Some(pause_evt);
|
||||
|
|
@ -265,7 +265,7 @@ impl VirtioCommon {
|
|||
if let Some(mut threads) = self.epoll_threads.take() {
|
||||
for t in threads.drain(..) {
|
||||
if let Err(e) = t.join() {
|
||||
error!("Error joining thread: {:?}", e);
|
||||
error!("Error joining thread: {e:?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -279,7 +279,7 @@ impl VirtioCommon {
|
|||
if let Some(mut threads) = self.epoll_threads.take() {
|
||||
for t in threads.drain(..) {
|
||||
if let Err(e) = t.join() {
|
||||
error!("Error joining thread: {:?}", e);
|
||||
error!("Error joining thread: {e:?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -394,7 +394,7 @@ impl Request {
|
|||
.memory()
|
||||
.read_obj(req_addr as GuestAddress)
|
||||
.map_err(Error::GuestMemory)?;
|
||||
debug!("Attach request 0x{:x?}", req);
|
||||
debug!("Attach request 0x{req:x?}");
|
||||
|
||||
// Copy the value to use it as a proper reference.
|
||||
let domain_id = req.domain;
|
||||
|
|
@ -448,7 +448,7 @@ impl Request {
|
|||
.memory()
|
||||
.read_obj(req_addr as GuestAddress)
|
||||
.map_err(Error::GuestMemory)?;
|
||||
debug!("Detach request 0x{:x?}", req);
|
||||
debug!("Detach request 0x{req:x?}");
|
||||
|
||||
// Copy the value to use it as a proper reference.
|
||||
let domain_id = req.domain;
|
||||
|
|
@ -467,7 +467,7 @@ impl Request {
|
|||
.memory()
|
||||
.read_obj(req_addr as GuestAddress)
|
||||
.map_err(Error::GuestMemory)?;
|
||||
debug!("Map request 0x{:x?}", req);
|
||||
debug!("Map request 0x{req:x?}");
|
||||
|
||||
// Copy the value to use it as a proper reference.
|
||||
let domain_id = req.domain;
|
||||
|
|
@ -530,7 +530,7 @@ impl Request {
|
|||
.memory()
|
||||
.read_obj(req_addr as GuestAddress)
|
||||
.map_err(Error::GuestMemory)?;
|
||||
debug!("Unmap request 0x{:x?}", req);
|
||||
debug!("Unmap request 0x{req:x?}");
|
||||
|
||||
// Copy the value to use it as a proper reference.
|
||||
let domain_id = req.domain;
|
||||
|
|
@ -586,7 +586,7 @@ impl Request {
|
|||
.memory()
|
||||
.read_obj(req_addr as GuestAddress)
|
||||
.map_err(Error::GuestMemory)?;
|
||||
debug!("Probe request 0x{:x?}", req);
|
||||
debug!("Probe request 0x{req:x?}");
|
||||
|
||||
let probe_prop = VirtioIommuProbeProperty {
|
||||
type_: VIRTIO_IOMMU_PROBE_T_RESV_MEM,
|
||||
|
|
@ -718,7 +718,7 @@ impl IommuEpollHandler {
|
|||
self.interrupt_cb
|
||||
.trigger(VirtioInterruptType::Queue(queue_index))
|
||||
.map_err(|e| {
|
||||
error!("Failed to signal used queue: {:?}", e);
|
||||
error!("Failed to signal used queue: {e:?}");
|
||||
DeviceError::FailedSignalingUsedQueue(e)
|
||||
})
|
||||
}
|
||||
|
|
@ -746,28 +746,23 @@ impl EpollHelperHandler for IommuEpollHandler {
|
|||
match ev_type {
|
||||
REQUEST_Q_EVENT => {
|
||||
self.request_queue_evt.read().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get queue event: {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get queue event: {e:?}"))
|
||||
})?;
|
||||
|
||||
let needs_notification = self.request_queue().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to process request queue : {:?}",
|
||||
e
|
||||
"Failed to process request queue : {e:?}"
|
||||
))
|
||||
})?;
|
||||
if needs_notification {
|
||||
self.signal_used_queue(0).map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to signal used queue: {:?}",
|
||||
e
|
||||
))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to signal used queue: {e:?}"))
|
||||
})?;
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
return Err(EpollHelperError::HandleEvent(anyhow!(
|
||||
"Unexpected event: {}",
|
||||
ev_type
|
||||
"Unexpected event: {ev_type}"
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
|
@ -800,7 +795,7 @@ pub struct IommuMapping {
|
|||
|
||||
impl DmaRemapping for IommuMapping {
|
||||
fn translate_gva(&self, id: u32, addr: u64) -> std::result::Result<u64, std::io::Error> {
|
||||
debug!("Translate GVA addr 0x{:x}", addr);
|
||||
debug!("Translate GVA addr 0x{addr:x}");
|
||||
if let Some(domain_id) = self.endpoints.read().unwrap().get(&id) {
|
||||
if let Some(domain) = self.domains.read().unwrap().get(domain_id) {
|
||||
// Directly return identity mapping in case the domain is in
|
||||
|
|
@ -812,7 +807,7 @@ impl DmaRemapping for IommuMapping {
|
|||
for (&key, &value) in domain.mappings.iter() {
|
||||
if addr >= key && addr < key + value.size {
|
||||
let new_addr = addr - key + value.gpa;
|
||||
debug!("Into GPA addr 0x{:x}", new_addr);
|
||||
debug!("Into GPA addr 0x{new_addr:x}");
|
||||
return Ok(new_addr);
|
||||
}
|
||||
}
|
||||
|
|
@ -827,7 +822,7 @@ impl DmaRemapping for IommuMapping {
|
|||
}
|
||||
|
||||
fn translate_gpa(&self, id: u32, addr: u64) -> std::result::Result<u64, std::io::Error> {
|
||||
debug!("Translate GPA addr 0x{:x}", addr);
|
||||
debug!("Translate GPA addr 0x{addr:x}");
|
||||
if let Some(domain_id) = self.endpoints.read().unwrap().get(&id) {
|
||||
if let Some(domain) = self.domains.read().unwrap().get(domain_id) {
|
||||
// Directly return identity mapping in case the domain is in
|
||||
|
|
@ -839,7 +834,7 @@ impl DmaRemapping for IommuMapping {
|
|||
for (&key, &value) in domain.mappings.iter() {
|
||||
if addr >= value.gpa && addr < value.gpa + value.size {
|
||||
let new_addr = addr - value.gpa + key;
|
||||
debug!("Into GVA addr 0x{:x}", new_addr);
|
||||
debug!("Into GVA addr 0x{new_addr:x}");
|
||||
return Ok(new_addr);
|
||||
}
|
||||
}
|
||||
|
|
@ -908,7 +903,7 @@ impl Iommu {
|
|||
) -> io::Result<(Self, Arc<IommuMapping>)> {
|
||||
let (mut avail_features, acked_features, endpoints, domains, paused) =
|
||||
if let Some(state) = state {
|
||||
info!("Restoring virtio-iommu {}", id);
|
||||
info!("Restoring virtio-iommu {id}");
|
||||
(
|
||||
state.avail_features,
|
||||
state.acked_features,
|
||||
|
|
@ -1015,7 +1010,7 @@ impl Iommu {
|
|||
}
|
||||
|
||||
let bypass = self.config.bypass == 1;
|
||||
info!("Updating bypass mode to {}", bypass);
|
||||
info!("Updating bypass mode to {bypass}");
|
||||
self.mapping.bypass.store(bypass, Ordering::Release);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -235,8 +235,7 @@ impl VirtioMemConfig {
|
|||
fn resize(&mut self, size: u64) -> result::Result<(), Error> {
|
||||
if self.requested_size == size {
|
||||
return Err(Error::ResizeError(anyhow!(
|
||||
"new size 0x{:x} and requested_size are identical",
|
||||
size
|
||||
"new size 0x{size:x} and requested_size are identical"
|
||||
)));
|
||||
} else if size > self.region_size {
|
||||
return Err(Error::ResizeError(anyhow!(
|
||||
|
|
@ -426,7 +425,7 @@ impl MemEpollHandler {
|
|||
};
|
||||
if res != 0 {
|
||||
let err = io::Error::last_os_error();
|
||||
error!("Deallocating file space failed: {}", err);
|
||||
error!("Deallocating file space failed: {err}");
|
||||
return Err(Error::DiscardMemoryRange(err));
|
||||
}
|
||||
}
|
||||
|
|
@ -444,7 +443,7 @@ impl MemEpollHandler {
|
|||
};
|
||||
if res != 0 {
|
||||
let err = io::Error::last_os_error();
|
||||
error!("Advising kernel about pages range failed: {}", err);
|
||||
error!("Advising kernel about pages range failed: {err}");
|
||||
return Err(Error::DiscardMemoryRange(err));
|
||||
}
|
||||
}
|
||||
|
|
@ -476,7 +475,7 @@ impl MemEpollHandler {
|
|||
}
|
||||
|
||||
if !plug && let Err(e) = self.discard_memory_range(offset, size) {
|
||||
error!("failed discarding memory range: {:?}", e);
|
||||
error!("failed discarding memory range: {e:?}");
|
||||
return VIRTIO_MEM_RESP_ERROR;
|
||||
}
|
||||
|
||||
|
|
@ -506,10 +505,7 @@ impl MemEpollHandler {
|
|||
} else {
|
||||
for (_, handler) in handlers.iter() {
|
||||
if let Err(e) = handler.unmap(addr, size) {
|
||||
error!(
|
||||
"failed DMA unmapping addr 0x{:x} size 0x{:x}: {}",
|
||||
addr, size, e
|
||||
);
|
||||
error!("failed DMA unmapping addr 0x{addr:x} size 0x{size:x}: {e}");
|
||||
return VIRTIO_MEM_RESP_ERROR;
|
||||
}
|
||||
}
|
||||
|
|
@ -523,7 +519,7 @@ impl MemEpollHandler {
|
|||
fn unplug_all(&mut self) -> u16 {
|
||||
let mut config = self.config.lock().unwrap();
|
||||
if let Err(e) = self.discard_memory_range(0, config.region_size) {
|
||||
error!("failed discarding memory range: {:?}", e);
|
||||
error!("failed discarding memory range: {e:?}");
|
||||
return VIRTIO_MEM_RESP_ERROR;
|
||||
}
|
||||
|
||||
|
|
@ -592,7 +588,7 @@ impl MemEpollHandler {
|
|||
|
||||
fn signal(&self, int_type: VirtioInterruptType) -> result::Result<(), DeviceError> {
|
||||
self.interrupt_cb.trigger(int_type).map_err(|e| {
|
||||
error!("Failed to signal used queue: {:?}", e);
|
||||
error!("Failed to signal used queue: {e:?}");
|
||||
DeviceError::FailedSignalingUsedQueue(e)
|
||||
})
|
||||
}
|
||||
|
|
@ -650,25 +646,21 @@ impl EpollHelperHandler for MemEpollHandler {
|
|||
match ev_type {
|
||||
QUEUE_AVAIL_EVENT => {
|
||||
self.queue_evt.read().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get queue event: {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get queue event: {e:?}"))
|
||||
})?;
|
||||
|
||||
let needs_notification = self.process_queue().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to process queue : {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to process queue : {e:?}"))
|
||||
})?;
|
||||
if needs_notification {
|
||||
self.signal(VirtioInterruptType::Queue(0)).map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to signal used queue: {:?}",
|
||||
e
|
||||
))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to signal used queue: {e:?}"))
|
||||
})?;
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
return Err(EpollHelperError::HandleEvent(anyhow!(
|
||||
"Unexpected event: {}",
|
||||
ev_type
|
||||
"Unexpected event: {ev_type}"
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
|
@ -727,7 +719,7 @@ impl Mem {
|
|||
}
|
||||
|
||||
let (avail_features, acked_features, config, paused) = if let Some(state) = state {
|
||||
info!("Restoring virtio-mem {}", id);
|
||||
info!("Restoring virtio-mem {id}");
|
||||
*(blocks_state.lock().unwrap()) = state.blocks_state.clone();
|
||||
(
|
||||
state.avail_features,
|
||||
|
|
@ -801,14 +793,14 @@ impl Mem {
|
|||
pub fn resize(&mut self, size: u64) -> result::Result<(), Error> {
|
||||
let mut config = self.config.lock().unwrap();
|
||||
config.resize(size).map_err(|e| {
|
||||
Error::ResizeError(anyhow!("Failed to update virtio configuration: {:?}", e))
|
||||
Error::ResizeError(anyhow!("Failed to update virtio configuration: {e:?}"))
|
||||
})?;
|
||||
|
||||
if let Some(interrupt_cb) = self.interrupt_cb.as_ref() {
|
||||
interrupt_cb
|
||||
.trigger(VirtioInterruptType::Config)
|
||||
.map_err(|e| {
|
||||
Error::ResizeError(anyhow!("Failed to signal the guest about resize: {:?}", e))
|
||||
Error::ResizeError(anyhow!("Failed to signal the guest about resize: {e:?}"))
|
||||
})
|
||||
} else {
|
||||
Ok(())
|
||||
|
|
|
|||
|
|
@ -66,7 +66,7 @@ impl NetCtrlEpollHandler {
|
|||
self.interrupt_cb
|
||||
.trigger(VirtioInterruptType::Queue(queue_index))
|
||||
.map_err(|e| {
|
||||
error!("Failed to signal used queue: {:?}", e);
|
||||
error!("Failed to signal used queue: {e:?}");
|
||||
DeviceError::FailedSignalingUsedQueue(e)
|
||||
})
|
||||
}
|
||||
|
|
@ -96,32 +96,28 @@ impl EpollHelperHandler for NetCtrlEpollHandler {
|
|||
let mem = self.mem.memory();
|
||||
self.queue_evt.read().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to get control queue event: {:?}",
|
||||
e
|
||||
"Failed to get control queue event: {e:?}"
|
||||
))
|
||||
})?;
|
||||
self.ctrl_q
|
||||
.process(mem.deref(), &mut self.queue, self.access_platform.as_ref())
|
||||
.map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to process control queue: {:?}",
|
||||
e
|
||||
"Failed to process control queue: {e:?}"
|
||||
))
|
||||
})?;
|
||||
match self.queue.needs_notification(mem.deref()) {
|
||||
Ok(true) => {
|
||||
self.signal_used_queue(self.queue_index).map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Error signalling that control queue was used: {:?}",
|
||||
e
|
||||
"Error signalling that control queue was used: {e:?}"
|
||||
))
|
||||
})?;
|
||||
}
|
||||
Ok(false) => {}
|
||||
Err(e) => {
|
||||
return Err(EpollHelperError::HandleEvent(anyhow!(
|
||||
"Error getting notification state of control queue: {}",
|
||||
e
|
||||
"Error getting notification state of control queue: {e}"
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
|
@ -184,7 +180,7 @@ impl NetEpollHandler {
|
|||
self.interrupt_cb
|
||||
.trigger(VirtioInterruptType::Queue(queue_index))
|
||||
.map_err(|e| {
|
||||
error!("Failed to signal used queue: {:?}", e);
|
||||
error!("Failed to signal used queue: {e:?}");
|
||||
DeviceError::FailedSignalingUsedQueue(e)
|
||||
})
|
||||
}
|
||||
|
|
@ -192,7 +188,7 @@ impl NetEpollHandler {
|
|||
fn handle_rx_event(&mut self) -> result::Result<(), DeviceError> {
|
||||
let queue_evt = &self.queue_evt_pair.0;
|
||||
if let Err(e) = queue_evt.read() {
|
||||
error!("Failed to get rx queue event: {:?}", e);
|
||||
error!("Failed to get rx queue event: {e:?}");
|
||||
}
|
||||
|
||||
self.net.rx_desc_avail = true;
|
||||
|
|
@ -316,30 +312,29 @@ impl EpollHelperHandler for NetEpollHandler {
|
|||
RX_QUEUE_EVENT => {
|
||||
self.driver_awake = true;
|
||||
self.handle_rx_event().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Error processing RX queue: {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Error processing RX queue: {e:?}"))
|
||||
})?;
|
||||
}
|
||||
TX_QUEUE_EVENT => {
|
||||
let queue_evt = &self.queue_evt_pair.1;
|
||||
if let Err(e) = queue_evt.read() {
|
||||
error!("Failed to get tx queue event: {:?}", e);
|
||||
error!("Failed to get tx queue event: {e:?}");
|
||||
}
|
||||
self.driver_awake = true;
|
||||
self.handle_tx_event().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Error processing TX queue: {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Error processing TX queue: {e:?}"))
|
||||
})?;
|
||||
}
|
||||
TX_TAP_EVENT => {
|
||||
self.handle_tx_event().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Error processing TX queue (TAP event): {:?}",
|
||||
e
|
||||
"Error processing TX queue (TAP event): {e:?}"
|
||||
))
|
||||
})?;
|
||||
}
|
||||
RX_TAP_EVENT => {
|
||||
self.handle_rx_tap_event().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Error processing tap queue: {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Error processing tap queue: {e:?}"))
|
||||
})?;
|
||||
}
|
||||
RX_RATE_LIMITER_EVENT => {
|
||||
|
|
@ -348,8 +343,7 @@ impl EpollHelperHandler for NetEpollHandler {
|
|||
// TAP fd for further processing if some RX buffers are available
|
||||
rate_limiter.event_handler().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Error from 'rate_limiter.event_handler()': {:?}",
|
||||
e
|
||||
"Error from 'rate_limiter.event_handler()': {e:?}"
|
||||
))
|
||||
})?;
|
||||
|
||||
|
|
@ -362,8 +356,7 @@ impl EpollHelperHandler for NetEpollHandler {
|
|||
)
|
||||
.map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Error register_listener with `RX_RATE_LIMITER_EVENT`: {:?}",
|
||||
e
|
||||
"Error register_listener with `RX_RATE_LIMITER_EVENT`: {e:?}"
|
||||
))
|
||||
})?;
|
||||
|
||||
|
|
@ -381,14 +374,13 @@ impl EpollHelperHandler for NetEpollHandler {
|
|||
// and restart processing the queue.
|
||||
rate_limiter.event_handler().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Error from 'rate_limiter.event_handler()': {:?}",
|
||||
e
|
||||
"Error from 'rate_limiter.event_handler()': {e:?}"
|
||||
))
|
||||
})?;
|
||||
|
||||
self.driver_awake = true;
|
||||
self.process_tx().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Error processing TX queue: {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Error processing TX queue: {e:?}"))
|
||||
})?;
|
||||
} else {
|
||||
return Err(EpollHelperError::HandleEvent(anyhow!(
|
||||
|
|
@ -398,8 +390,7 @@ impl EpollHelperHandler for NetEpollHandler {
|
|||
}
|
||||
_ => {
|
||||
return Err(EpollHelperError::HandleEvent(anyhow!(
|
||||
"Unexpected event: {}",
|
||||
ev_type
|
||||
"Unexpected event: {ev_type}"
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
|
@ -452,7 +443,7 @@ impl Net {
|
|||
let (avail_features, acked_features, config, queue_sizes, paused) = if let Some(state) =
|
||||
state
|
||||
{
|
||||
info!("Restoring virtio-net {}", id);
|
||||
info!("Restoring virtio-net {id}");
|
||||
(
|
||||
state.avail_features,
|
||||
state.acked_features,
|
||||
|
|
@ -670,7 +661,7 @@ impl Drop for Net {
|
|||
if let Some(thread) = self.ctrl_queue_epoll_thread.take()
|
||||
&& let Err(e) = thread.join()
|
||||
{
|
||||
error!("Error joining thread: {:?}", e);
|
||||
error!("Error joining thread: {e:?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -777,7 +768,7 @@ impl VirtioDevice for Net {
|
|||
#[cfg(not(fuzzing))]
|
||||
tap.set_offload(virtio_features_to_tap_offload(self.common.acked_features))
|
||||
.map_err(|e| {
|
||||
error!("Error programming tap offload: {:?}", e);
|
||||
error!("Error programming tap offload: {e:?}");
|
||||
ActivateError::BadActivate
|
||||
})?;
|
||||
|
||||
|
|
|
|||
|
|
@ -170,7 +170,7 @@ impl PmemEpollHandler {
|
|||
let status_code = match self.disk.sync_all() {
|
||||
Ok(()) => VIRTIO_PMEM_RESP_TYPE_OK,
|
||||
Err(e) => {
|
||||
error!("failed flushing disk image: {}", e);
|
||||
error!("failed flushing disk image: {e}");
|
||||
VIRTIO_PMEM_RESP_TYPE_EIO
|
||||
}
|
||||
};
|
||||
|
|
@ -179,7 +179,7 @@ impl PmemEpollHandler {
|
|||
match desc_chain.memory().write_obj(resp, req.status_addr) {
|
||||
Ok(_) => size_of::<VirtioPmemResp>() as u32,
|
||||
Err(e) => {
|
||||
error!("bad guest memory address: {}", e);
|
||||
error!("bad guest memory address: {e}");
|
||||
0
|
||||
}
|
||||
}
|
||||
|
|
@ -190,7 +190,7 @@ impl PmemEpollHandler {
|
|||
0
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to parse available descriptor chain: {:?}", e);
|
||||
error!("Failed to parse available descriptor chain: {e:?}");
|
||||
0
|
||||
}
|
||||
};
|
||||
|
|
@ -208,7 +208,7 @@ impl PmemEpollHandler {
|
|||
self.interrupt_cb
|
||||
.trigger(VirtioInterruptType::Queue(0))
|
||||
.map_err(|e| {
|
||||
error!("Failed to signal used queue: {:?}", e);
|
||||
error!("Failed to signal used queue: {e:?}");
|
||||
DeviceError::FailedSignalingUsedQueue(e)
|
||||
})
|
||||
}
|
||||
|
|
@ -236,26 +236,22 @@ impl EpollHelperHandler for PmemEpollHandler {
|
|||
match ev_type {
|
||||
QUEUE_AVAIL_EVENT => {
|
||||
self.queue_evt.read().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get queue event: {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get queue event: {e:?}"))
|
||||
})?;
|
||||
|
||||
let needs_notification = self.process_queue().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to process queue : {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to process queue : {e:?}"))
|
||||
})?;
|
||||
|
||||
if needs_notification {
|
||||
self.signal_used_queue().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to signal used queue: {:?}",
|
||||
e
|
||||
))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to signal used queue: {e:?}"))
|
||||
})?;
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
return Err(EpollHelperError::HandleEvent(anyhow!(
|
||||
"Unexpected event: {}",
|
||||
ev_type
|
||||
"Unexpected event: {ev_type}"
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
|
@ -298,7 +294,7 @@ impl Pmem {
|
|||
state: Option<PmemState>,
|
||||
) -> io::Result<Pmem> {
|
||||
let (avail_features, acked_features, config, paused) = if let Some(state) = state {
|
||||
info!("Restoring virtio-pmem {}", id);
|
||||
info!("Restoring virtio-pmem {id}");
|
||||
(
|
||||
state.avail_features,
|
||||
state.acked_features,
|
||||
|
|
@ -395,7 +391,7 @@ impl VirtioDevice for Pmem {
|
|||
let (kill_evt, pause_evt) = self.common.dup_eventfds();
|
||||
if let Some(disk) = self.disk.as_ref() {
|
||||
let disk = disk.try_clone().map_err(|e| {
|
||||
error!("failed cloning pmem disk: {}", e);
|
||||
error!("failed cloning pmem disk: {e}");
|
||||
ActivateError::BadActivate
|
||||
})?;
|
||||
|
||||
|
|
|
|||
|
|
@ -95,7 +95,7 @@ impl RngEpollHandler {
|
|||
self.interrupt_cb
|
||||
.trigger(VirtioInterruptType::Queue(0))
|
||||
.map_err(|e| {
|
||||
error!("Failed to signal used queue: {:?}", e);
|
||||
error!("Failed to signal used queue: {e:?}");
|
||||
DeviceError::FailedSignalingUsedQueue(e)
|
||||
})
|
||||
}
|
||||
|
|
@ -123,24 +123,20 @@ impl EpollHelperHandler for RngEpollHandler {
|
|||
match ev_type {
|
||||
QUEUE_AVAIL_EVENT => {
|
||||
self.queue_evt.read().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get queue event: {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get queue event: {e:?}"))
|
||||
})?;
|
||||
let needs_notification = self.process_queue().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to process queue : {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to process queue : {e:?}"))
|
||||
})?;
|
||||
if needs_notification {
|
||||
self.signal_used_queue().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to signal used queue: {:?}",
|
||||
e
|
||||
))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to signal used queue: {e:?}"))
|
||||
})?;
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
return Err(EpollHelperError::HandleEvent(anyhow!(
|
||||
"Unexpected event: {}",
|
||||
ev_type
|
||||
"Unexpected event: {ev_type}"
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
|
@ -176,7 +172,7 @@ impl Rng {
|
|||
let random_file = File::open(path)?;
|
||||
|
||||
let (avail_features, acked_features, paused) = if let Some(state) = state {
|
||||
info!("Restoring virtio-rng {}", id);
|
||||
info!("Restoring virtio-rng {id}");
|
||||
(state.avail_features, state.acked_features, true)
|
||||
} else {
|
||||
let mut avail_features = 1u64 << VIRTIO_F_VERSION_1;
|
||||
|
|
@ -257,7 +253,7 @@ impl VirtioDevice for Rng {
|
|||
|
||||
if let Some(file) = self.random_file.as_ref() {
|
||||
let random_file = file.try_clone().map_err(|e| {
|
||||
error!("failed cloning rng source: {}", e);
|
||||
error!("failed cloning rng source: {e}");
|
||||
ActivateError::BadActivate
|
||||
})?;
|
||||
|
||||
|
|
|
|||
|
|
@ -39,18 +39,18 @@ where
|
|||
if !seccomp_filter.is_empty()
|
||||
&& let Err(e) = apply_filter(&seccomp_filter)
|
||||
{
|
||||
error!("Error applying seccomp filter: {:?}", e);
|
||||
error!("Error applying seccomp filter: {e:?}");
|
||||
thread_exit_evt.write(1).ok();
|
||||
return;
|
||||
}
|
||||
match std::panic::catch_unwind(AssertUnwindSafe(f)) {
|
||||
Err(_) => {
|
||||
error!("{} thread panicked", thread_name);
|
||||
error!("{thread_name} thread panicked");
|
||||
thread_exit_evt.write(1).ok();
|
||||
}
|
||||
Ok(r) => {
|
||||
if let Err(e) = r {
|
||||
error!("Error running worker: {:?}", e);
|
||||
error!("Error running worker: {e:?}");
|
||||
thread_exit_evt.write(1).ok();
|
||||
}
|
||||
}
|
||||
|
|
@ -58,7 +58,7 @@ where
|
|||
})
|
||||
.map(|thread| epoll_threads.push(thread))
|
||||
.map_err(|e| {
|
||||
error!("Failed to spawn thread for {}: {}", name, e);
|
||||
error!("Failed to spawn thread for {name}: {e}");
|
||||
ActivateError::ThreadSpawn(e)
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -211,30 +211,30 @@ impl VirtioPciCommonConfig {
|
|||
}
|
||||
|
||||
fn read_common_config_byte(&self, offset: u64) -> u8 {
|
||||
debug!("read_common_config_byte: offset 0x{:x}", offset);
|
||||
debug!("read_common_config_byte: offset 0x{offset:x}");
|
||||
// The driver is only allowed to do aligned, properly sized access.
|
||||
match offset {
|
||||
0x14 => self.driver_status,
|
||||
0x15 => self.config_generation,
|
||||
_ => {
|
||||
warn!("invalid virtio config byte read: 0x{:x}", offset);
|
||||
warn!("invalid virtio config byte read: 0x{offset:x}");
|
||||
0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn write_common_config_byte(&mut self, offset: u64, value: u8) {
|
||||
debug!("write_common_config_byte: offset 0x{:x}", offset);
|
||||
debug!("write_common_config_byte: offset 0x{offset:x}");
|
||||
match offset {
|
||||
0x14 => self.driver_status = value,
|
||||
_ => {
|
||||
warn!("invalid virtio config byte write: 0x{:x}", offset);
|
||||
warn!("invalid virtio config byte write: 0x{offset:x}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn read_common_config_word(&self, offset: u64, queues: &[Queue]) -> u16 {
|
||||
debug!("read_common_config_word: offset 0x{:x}", offset);
|
||||
debug!("read_common_config_word: offset 0x{offset:x}");
|
||||
match offset {
|
||||
0x10 => self.msix_config.load(Ordering::Acquire),
|
||||
0x12 => queues.len() as u16, // num_queues
|
||||
|
|
@ -244,14 +244,14 @@ impl VirtioPciCommonConfig {
|
|||
0x1c => u16::from(self.with_queue(queues, |q| q.ready()).unwrap_or(false)),
|
||||
0x1e => self.queue_select, // notify_off
|
||||
_ => {
|
||||
warn!("invalid virtio register word read: 0x{:x}", offset);
|
||||
warn!("invalid virtio register word read: 0x{offset:x}");
|
||||
0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn write_common_config_word(&mut self, offset: u64, value: u16, queues: &mut [Queue]) {
|
||||
debug!("write_common_config_word: offset 0x{:x}", offset);
|
||||
debug!("write_common_config_word: offset 0x{offset:x}");
|
||||
match offset {
|
||||
0x10 => self.msix_config.store(value, Ordering::Release),
|
||||
0x16 => self.queue_select = value,
|
||||
|
|
@ -286,13 +286,13 @@ impl VirtioPciCommonConfig {
|
|||
}
|
||||
}),
|
||||
_ => {
|
||||
warn!("invalid virtio register word write: 0x{:x}", offset);
|
||||
warn!("invalid virtio register word write: 0x{offset:x}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn read_common_config_dword(&self, offset: u64, device: Arc<Mutex<dyn VirtioDevice>>) -> u32 {
|
||||
debug!("read_common_config_dword: offset 0x{:x}", offset);
|
||||
debug!("read_common_config_dword: offset 0x{offset:x}");
|
||||
match offset {
|
||||
0x00 => self.device_feature_select,
|
||||
0x04 => {
|
||||
|
|
@ -307,7 +307,7 @@ impl VirtioPciCommonConfig {
|
|||
}
|
||||
0x08 => self.driver_feature_select,
|
||||
_ => {
|
||||
warn!("invalid virtio register dword read: 0x{:x}", offset);
|
||||
warn!("invalid virtio register dword read: 0x{offset:x}");
|
||||
0
|
||||
}
|
||||
}
|
||||
|
|
@ -320,7 +320,7 @@ impl VirtioPciCommonConfig {
|
|||
queues: &mut [Queue],
|
||||
device: Arc<Mutex<dyn VirtioDevice>>,
|
||||
) {
|
||||
debug!("write_common_config_dword: offset 0x{:x}", offset);
|
||||
debug!("write_common_config_dword: offset 0x{offset:x}");
|
||||
|
||||
match offset {
|
||||
0x00 => self.device_feature_select = value,
|
||||
|
|
@ -344,18 +344,18 @@ impl VirtioPciCommonConfig {
|
|||
0x30 => self.with_queue_mut(queues, |q| q.set_used_ring_address(Some(value), None)),
|
||||
0x34 => self.with_queue_mut(queues, |q| q.set_used_ring_address(None, Some(value))),
|
||||
_ => {
|
||||
warn!("invalid virtio register dword write: 0x{:x}", offset);
|
||||
warn!("invalid virtio register dword write: 0x{offset:x}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn read_common_config_qword(&self, _offset: u64) -> u64 {
|
||||
debug!("read_common_config_qword: offset 0x{:x}", _offset);
|
||||
debug!("read_common_config_qword: offset 0x{_offset:x}");
|
||||
0 // Assume the guest has no reason to read write-only registers.
|
||||
}
|
||||
|
||||
fn write_common_config_qword(&mut self, offset: u64, value: u64, queues: &mut [Queue]) {
|
||||
debug!("write_common_config_qword: offset 0x{:x}", offset);
|
||||
debug!("write_common_config_qword: offset 0x{offset:x}");
|
||||
|
||||
let low = Some((value & 0xffff_ffff) as u32);
|
||||
let high = Some((value >> 32) as u32);
|
||||
|
|
@ -365,7 +365,7 @@ impl VirtioPciCommonConfig {
|
|||
0x28 => self.with_queue_mut(queues, |q| q.set_avail_ring_address(low, high)),
|
||||
0x30 => self.with_queue_mut(queues, |q| q.set_used_ring_address(low, high)),
|
||||
_ => {
|
||||
warn!("invalid virtio register qword write: 0x{:x}", offset);
|
||||
warn!("invalid virtio register qword write: 0x{offset:x}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -394,10 +394,7 @@ impl VirtioPciDevice {
|
|||
let mut queue_evts = Vec::new();
|
||||
for _ in locked_device.queue_max_sizes().iter() {
|
||||
queue_evts.push(EventFd::new(EFD_NONBLOCK).map_err(|e| {
|
||||
VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!(
|
||||
"Failed creating eventfd: {}",
|
||||
e
|
||||
))
|
||||
VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!("Failed creating eventfd: {e}"))
|
||||
})?)
|
||||
}
|
||||
let num_queues = locked_device.queue_max_sizes().len();
|
||||
|
|
@ -421,16 +418,14 @@ impl VirtioPciDevice {
|
|||
})
|
||||
.map_err(|e| {
|
||||
VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!(
|
||||
"Failed creating MSI interrupt group: {}",
|
||||
e
|
||||
"Failed creating MSI interrupt group: {e}"
|
||||
))
|
||||
})?;
|
||||
|
||||
let msix_state = vm_migration::state_from_id(snapshot.as_ref(), pci::MSIX_CONFIG_ID)
|
||||
.map_err(|e| {
|
||||
VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!(
|
||||
"Failed to get MsixConfigState from Snapshot: {}",
|
||||
e
|
||||
"Failed to get MsixConfigState from Snapshot: {e}"
|
||||
))
|
||||
})?;
|
||||
|
||||
|
|
@ -469,8 +464,7 @@ impl VirtioPciDevice {
|
|||
vm_migration::state_from_id(snapshot.as_ref(), pci::PCI_CONFIGURATION_ID).map_err(
|
||||
|e| {
|
||||
VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!(
|
||||
"Failed to get PciConfigurationState from Snapshot: {}",
|
||||
e
|
||||
"Failed to get PciConfigurationState from Snapshot: {e}"
|
||||
))
|
||||
},
|
||||
)?;
|
||||
|
|
@ -493,8 +487,7 @@ impl VirtioPciDevice {
|
|||
vm_migration::state_from_id(snapshot.as_ref(), VIRTIO_PCI_COMMON_CONFIG_ID).map_err(
|
||||
|e| {
|
||||
VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!(
|
||||
"Failed to get VirtioPciCommonConfigState from Snapshot: {}",
|
||||
e
|
||||
"Failed to get VirtioPciCommonConfigState from Snapshot: {e}"
|
||||
))
|
||||
},
|
||||
)?;
|
||||
|
|
@ -522,8 +515,7 @@ impl VirtioPciDevice {
|
|||
.transpose()
|
||||
.map_err(|e| {
|
||||
VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!(
|
||||
"Failed to get VirtioPciDeviceState from Snapshot: {}",
|
||||
e
|
||||
"Failed to get VirtioPciDeviceState from Snapshot: {e}"
|
||||
))
|
||||
})?;
|
||||
|
||||
|
|
@ -613,8 +605,7 @@ impl VirtioPciDevice {
|
|||
{
|
||||
virtio_pci_device.activate().map_err(|e| {
|
||||
VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!(
|
||||
"Failed activating the device: {}",
|
||||
e
|
||||
"Failed activating the device: {e}"
|
||||
))
|
||||
})?;
|
||||
}
|
||||
|
|
@ -795,7 +786,7 @@ impl VirtioPciDevice {
|
|||
}
|
||||
|
||||
if !queue.is_valid(self.memory.memory().deref()) {
|
||||
error!("Queue {} is not valid", queue_index);
|
||||
error!("Queue {queue_index} is not valid");
|
||||
}
|
||||
|
||||
queues.push((
|
||||
|
|
@ -1182,7 +1173,7 @@ impl PciDevice for VirtioPciDevice {
|
|||
}
|
||||
// Handled with ioeventfds.
|
||||
#[cfg(not(feature = "sev_snp"))]
|
||||
error!("Unexpected write to notification BAR: offset = 0x{:x}", o);
|
||||
error!("Unexpected write to notification BAR: offset = 0x{o:x}");
|
||||
}
|
||||
o if (MSIX_TABLE_BAR_OFFSET..MSIX_TABLE_BAR_OFFSET + MSIX_TABLE_SIZE).contains(&o) => {
|
||||
if let Some(msix_config) = &self.msix_config {
|
||||
|
|
|
|||
|
|
@ -132,7 +132,7 @@ impl Vdpa {
|
|||
backend_features,
|
||||
paused,
|
||||
) = if let Some(state) = state {
|
||||
info!("Restoring vDPA {}", id);
|
||||
info!("Restoring vDPA {id}");
|
||||
|
||||
vhost.set_backend_features_acked(state.backend_features);
|
||||
vhost
|
||||
|
|
@ -404,14 +404,14 @@ impl VirtioDevice for Vdpa {
|
|||
fn read_config(&self, offset: u64, data: &mut [u8]) {
|
||||
assert!(self.vhost.is_some());
|
||||
if let Err(e) = self.vhost.as_ref().unwrap().get_config(offset as u32, data) {
|
||||
error!("Failed reading virtio config: {}", e);
|
||||
error!("Failed reading virtio config: {e}");
|
||||
}
|
||||
}
|
||||
|
||||
fn write_config(&mut self, offset: u64, data: &[u8]) {
|
||||
assert!(self.vhost.is_some());
|
||||
if let Err(e) = self.vhost.as_ref().unwrap().set_config(offset as u32, data) {
|
||||
error!("Failed writing virtio config: {}", e);
|
||||
error!("Failed writing virtio config: {e}");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -433,7 +433,7 @@ impl VirtioDevice for Vdpa {
|
|||
|
||||
fn reset(&mut self) -> Option<Arc<dyn VirtioInterrupt>> {
|
||||
if let Err(e) = self.reset_vdpa() {
|
||||
error!("Failed to reset vhost-vdpa: {:?}", e);
|
||||
error!("Failed to reset vhost-vdpa: {e:?}");
|
||||
return None;
|
||||
}
|
||||
|
||||
|
|
@ -487,7 +487,7 @@ impl Snapshottable for Vdpa {
|
|||
}
|
||||
|
||||
let snapshot = Snapshot::new_from_state(&self.state().map_err(|e| {
|
||||
MigratableError::Snapshot(anyhow!("Error snapshotting vDPA device: {:?}", e))
|
||||
MigratableError::Snapshot(anyhow!("Error snapshotting vDPA device: {e:?}"))
|
||||
})?)?;
|
||||
|
||||
// Force the vhost handler to be dropped in order to close the vDPA
|
||||
|
|
@ -509,7 +509,7 @@ impl Migratable for Vdpa {
|
|||
if self.backend_features & (1 << VHOST_BACKEND_F_SUSPEND) != 0 {
|
||||
assert!(self.vhost.is_some());
|
||||
self.vhost.as_ref().unwrap().suspend().map_err(|e| {
|
||||
MigratableError::StartMigration(anyhow!("Error suspending vDPA device: {:?}", e))
|
||||
MigratableError::StartMigration(anyhow!("Error suspending vDPA device: {e:?}"))
|
||||
})
|
||||
} else {
|
||||
Err(MigratableError::StartMigration(anyhow!(
|
||||
|
|
@ -565,7 +565,7 @@ impl<M: GuestAddressSpace + Sync + Send> ExternalDmaMapping for VdpaDmaMapping<M
|
|||
}
|
||||
|
||||
fn unmap(&self, iova: u64, size: u64) -> std::result::Result<(), std::io::Error> {
|
||||
debug!("DMA unmap iova 0x{:x} size 0x{:x}", iova, size);
|
||||
debug!("DMA unmap iova 0x{iova:x} size 0x{size:x}");
|
||||
self.device
|
||||
.lock()
|
||||
.unwrap()
|
||||
|
|
|
|||
|
|
@ -81,7 +81,7 @@ impl Blk {
|
|||
config,
|
||||
paused,
|
||||
) = if let Some(state) = state {
|
||||
info!("Restoring vhost-user-block {}", id);
|
||||
info!("Restoring vhost-user-block {id}");
|
||||
|
||||
vu.set_protocol_features_vhost_user(
|
||||
state.acked_features,
|
||||
|
|
@ -135,8 +135,7 @@ impl Blk {
|
|||
|
||||
if num_queues > backend_num_queues {
|
||||
error!(
|
||||
"vhost-user-blk requested too many queues ({}) since the backend only supports {}\n",
|
||||
num_queues, backend_num_queues
|
||||
"vhost-user-blk requested too many queues ({num_queues}) since the backend only supports {backend_num_queues}\n"
|
||||
);
|
||||
return Err(Error::BadQueueNum);
|
||||
}
|
||||
|
|
@ -216,13 +215,13 @@ impl Drop for Blk {
|
|||
if let Some(kill_evt) = self.common.kill_evt.take()
|
||||
&& let Err(e) = kill_evt.write(1)
|
||||
{
|
||||
error!("failed to kill vhost-user-blk: {:?}", e);
|
||||
error!("failed to kill vhost-user-blk: {e:?}");
|
||||
}
|
||||
self.common.wait_for_epoll_threads();
|
||||
if let Some(thread) = self.epoll_thread.take()
|
||||
&& let Err(e) = thread.join()
|
||||
{
|
||||
error!("Error joining thread: {:?}", e);
|
||||
error!("Error joining thread: {e:?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -275,7 +274,7 @@ impl VirtioDevice for Blk {
|
|||
.set_config(offset as u32, VhostUserConfigFlags::WRITABLE, data)
|
||||
.map_err(Error::VhostUserSetConfig)
|
||||
{
|
||||
error!("Failed setting vhost-user-blk configuration: {:?}", e);
|
||||
error!("Failed setting vhost-user-blk configuration: {e:?}");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -331,7 +330,7 @@ impl VirtioDevice for Blk {
|
|||
if let Some(vu) = &self.vu_common.vu
|
||||
&& let Err(e) = vu.lock().unwrap().reset_vhost_user()
|
||||
{
|
||||
error!("Failed to reset vhost-user daemon: {:?}", e);
|
||||
error!("Failed to reset vhost-user daemon: {e:?}");
|
||||
return None;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -109,7 +109,7 @@ impl Fs {
|
|||
config,
|
||||
paused,
|
||||
) = if let Some(state) = state {
|
||||
info!("Restoring vhost-user-fs {}", id);
|
||||
info!("Restoring vhost-user-fs {id}");
|
||||
|
||||
vu.set_protocol_features_vhost_user(
|
||||
state.acked_features,
|
||||
|
|
@ -148,8 +148,7 @@ impl Fs {
|
|||
|
||||
if num_queues > backend_num_queues {
|
||||
error!(
|
||||
"vhost-user-fs requested too many queues ({}) since the backend only supports {}\n",
|
||||
num_queues, backend_num_queues
|
||||
"vhost-user-fs requested too many queues ({num_queues}) since the backend only supports {backend_num_queues}\n"
|
||||
);
|
||||
return Err(Error::BadQueueNum);
|
||||
}
|
||||
|
|
@ -230,7 +229,7 @@ impl Drop for Fs {
|
|||
if let Some(thread) = self.epoll_thread.take()
|
||||
&& let Err(e) = thread.join()
|
||||
{
|
||||
error!("Error joining thread: {:?}", e);
|
||||
error!("Error joining thread: {e:?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -311,7 +310,7 @@ impl VirtioDevice for Fs {
|
|||
if let Some(vu) = &self.vu_common.vu
|
||||
&& let Err(e) = vu.lock().unwrap().reset_vhost_user()
|
||||
{
|
||||
error!("Failed to reset vhost-user daemon: {:?}", e);
|
||||
error!("Failed to reset vhost-user daemon: {e:?}");
|
||||
return None;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -265,8 +265,7 @@ impl<S: VhostUserFrontendReqHandler> EpollHelperHandler for VhostUserEpollHandle
|
|||
HUP_CONNECTION_EVENT => {
|
||||
self.reconnect(helper).map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"failed to reconnect vhost-user backend: {:?}",
|
||||
e
|
||||
"failed to reconnect vhost-user backend: {e:?}"
|
||||
))
|
||||
})?;
|
||||
}
|
||||
|
|
@ -274,8 +273,7 @@ impl<S: VhostUserFrontendReqHandler> EpollHelperHandler for VhostUserEpollHandle
|
|||
if let Some(backend_req_handler) = self.backend_req_handler.as_mut() {
|
||||
backend_req_handler.handle_request().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to handle request from vhost-user backend: {:?}",
|
||||
e
|
||||
"Failed to handle request from vhost-user backend: {e:?}"
|
||||
))
|
||||
})?;
|
||||
}
|
||||
|
|
@ -412,7 +410,7 @@ impl VhostUserCommon {
|
|||
pub fn pause(&mut self) -> std::result::Result<(), MigratableError> {
|
||||
if let Some(vu) = &self.vu {
|
||||
vu.lock().unwrap().pause_vhost_user().map_err(|e| {
|
||||
MigratableError::Pause(anyhow!("Error pausing vhost-user backend: {:?}", e))
|
||||
MigratableError::Pause(anyhow!("Error pausing vhost-user backend: {e:?}"))
|
||||
})
|
||||
} else {
|
||||
Ok(())
|
||||
|
|
@ -422,7 +420,7 @@ impl VhostUserCommon {
|
|||
pub fn resume(&mut self) -> std::result::Result<(), MigratableError> {
|
||||
if let Some(vu) = &self.vu {
|
||||
vu.lock().unwrap().resume_vhost_user().map_err(|e| {
|
||||
MigratableError::Resume(anyhow!("Error resuming vhost-user backend: {:?}", e))
|
||||
MigratableError::Resume(anyhow!("Error resuming vhost-user backend: {e:?}"))
|
||||
})
|
||||
} else {
|
||||
Ok(())
|
||||
|
|
@ -454,8 +452,7 @@ impl VhostUserCommon {
|
|||
.start_dirty_log(last_ram_addr)
|
||||
.map_err(|e| {
|
||||
MigratableError::StartDirtyLog(anyhow!(
|
||||
"Error starting migration for vhost-user backend: {:?}",
|
||||
e
|
||||
"Error starting migration for vhost-user backend: {e:?}"
|
||||
))
|
||||
})
|
||||
} else {
|
||||
|
|
@ -472,8 +469,7 @@ impl VhostUserCommon {
|
|||
if let Some(vu) = &self.vu {
|
||||
vu.lock().unwrap().stop_dirty_log().map_err(|e| {
|
||||
MigratableError::StopDirtyLog(anyhow!(
|
||||
"Error stopping migration for vhost-user backend: {:?}",
|
||||
e
|
||||
"Error stopping migration for vhost-user backend: {e:?}"
|
||||
))
|
||||
})
|
||||
} else {
|
||||
|
|
@ -490,8 +486,7 @@ impl VhostUserCommon {
|
|||
let last_ram_addr = guest_memory.memory().last_addr().raw_value();
|
||||
vu.lock().unwrap().dirty_log(last_ram_addr).map_err(|e| {
|
||||
MigratableError::DirtyLog(anyhow!(
|
||||
"Error retrieving dirty ranges from vhost-user backend: {:?}",
|
||||
e
|
||||
"Error retrieving dirty ranges from vhost-user backend: {e:?}"
|
||||
))
|
||||
})
|
||||
} else {
|
||||
|
|
@ -518,8 +513,7 @@ impl VhostUserCommon {
|
|||
if let Some(kill_evt) = kill_evt {
|
||||
kill_evt.write(1).map_err(|e| {
|
||||
MigratableError::CompleteMigration(anyhow!(
|
||||
"Error killing vhost-user thread: {:?}",
|
||||
e
|
||||
"Error killing vhost-user thread: {e:?}"
|
||||
))
|
||||
})?;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -90,7 +90,7 @@ impl Net {
|
|||
config,
|
||||
paused,
|
||||
) = if let Some(state) = state {
|
||||
info!("Restoring vhost-user-net {}", id);
|
||||
info!("Restoring vhost-user-net {id}");
|
||||
|
||||
// The backend acknowledged features must not contain
|
||||
// VIRTIO_NET_F_MAC since we don't expect the backend
|
||||
|
|
@ -169,8 +169,7 @@ impl Net {
|
|||
|
||||
if num_queues > backend_num_queues {
|
||||
error!(
|
||||
"vhost-user-net requested too many queues ({}) since the backend only supports {}\n",
|
||||
num_queues, backend_num_queues
|
||||
"vhost-user-net requested too many queues ({num_queues}) since the backend only supports {backend_num_queues}\n"
|
||||
);
|
||||
return Err(Error::BadQueueNum);
|
||||
}
|
||||
|
|
@ -246,7 +245,7 @@ impl Drop for Net {
|
|||
if let Some(kill_evt) = self.common.kill_evt.take()
|
||||
&& let Err(e) = kill_evt.write(1)
|
||||
{
|
||||
error!("failed to kill vhost-user-net: {:?}", e);
|
||||
error!("failed to kill vhost-user-net: {e:?}");
|
||||
}
|
||||
|
||||
self.common.wait_for_epoll_threads();
|
||||
|
|
@ -254,13 +253,13 @@ impl Drop for Net {
|
|||
if let Some(thread) = self.epoll_thread.take()
|
||||
&& let Err(e) = thread.join()
|
||||
{
|
||||
error!("Error joining thread: {:?}", e);
|
||||
error!("Error joining thread: {e:?}");
|
||||
}
|
||||
|
||||
if let Some(thread) = self.ctrl_queue_epoll_thread.take()
|
||||
&& let Err(e) = thread.join()
|
||||
{
|
||||
error!("Error joining thread: {:?}", e);
|
||||
error!("Error joining thread: {e:?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -386,7 +385,7 @@ impl VirtioDevice for Net {
|
|||
if let Some(vu) = &self.vu_common.vu
|
||||
&& let Err(e) = vu.lock().unwrap().reset_vhost_user()
|
||||
{
|
||||
error!("Failed to reset vhost-user daemon: {:?}", e);
|
||||
error!("Failed to reset vhost-user daemon: {e:?}");
|
||||
return None;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -409,10 +409,7 @@ impl VhostUserHandle {
|
|||
}
|
||||
};
|
||||
|
||||
error!(
|
||||
"Failed connecting the backend after trying for 1 minute: {:?}",
|
||||
err
|
||||
);
|
||||
error!("Failed connecting the backend after trying for 1 minute: {err:?}");
|
||||
Err(Error::VhostUserConnect)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -108,7 +108,7 @@ where
|
|||
self.interrupt_cb
|
||||
.trigger(VirtioInterruptType::Queue(queue_index))
|
||||
.map_err(|e| {
|
||||
error!("Failed to signal used queue: {:?}", e);
|
||||
error!("Failed to signal used queue: {e:?}");
|
||||
DeviceError::FailedSignalingUsedQueue(e)
|
||||
})
|
||||
}
|
||||
|
|
@ -137,7 +137,7 @@ where
|
|||
}
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("vsock: RX queue error: {:?}", e);
|
||||
warn!("vsock: RX queue error: {e:?}");
|
||||
0
|
||||
}
|
||||
};
|
||||
|
|
@ -170,7 +170,7 @@ where
|
|||
) {
|
||||
Ok(pkt) => pkt,
|
||||
Err(e) => {
|
||||
error!("vsock: error reading TX packet: {:?}", e);
|
||||
error!("vsock: error reading TX packet: {e:?}");
|
||||
self.queues[1]
|
||||
.add_used(desc_chain.memory(), desc_chain.head_index(), 0)
|
||||
.map_err(DeviceError::QueueAddUsed)?;
|
||||
|
|
@ -226,7 +226,7 @@ where
|
|||
Some(evset) => evset,
|
||||
None => {
|
||||
let evbits = event.events;
|
||||
warn!("epoll: ignoring unknown event set: 0x{:x}", evbits);
|
||||
warn!("epoll: ignoring unknown event set: 0x{evbits:x}");
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
|
@ -236,25 +236,22 @@ where
|
|||
RX_QUEUE_EVENT => {
|
||||
debug!("vsock: RX queue event");
|
||||
self.queue_evts[0].read().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get RX queue event: {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get RX queue event: {e:?}"))
|
||||
})?;
|
||||
if self.backend.read().unwrap().has_pending_rx() {
|
||||
self.process_rx().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to process RX queue: {:?}",
|
||||
e
|
||||
))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to process RX queue: {e:?}"))
|
||||
})?;
|
||||
}
|
||||
}
|
||||
TX_QUEUE_EVENT => {
|
||||
debug!("vsock: TX queue event");
|
||||
self.queue_evts[1].read().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get TX queue event: {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get TX queue event: {e:?}"))
|
||||
})?;
|
||||
|
||||
self.process_tx().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to process TX queue: {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to process TX queue: {e:?}"))
|
||||
})?;
|
||||
|
||||
// The backend may have queued up responses to the packets we sent during TX queue
|
||||
|
|
@ -262,17 +259,14 @@ where
|
|||
// into RX buffers.
|
||||
if self.backend.read().unwrap().has_pending_rx() {
|
||||
self.process_rx().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to process RX queue: {:?}",
|
||||
e
|
||||
))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to process RX queue: {e:?}"))
|
||||
})?;
|
||||
}
|
||||
}
|
||||
EVT_QUEUE_EVENT => {
|
||||
debug!("vsock: EVT queue event");
|
||||
self.queue_evts[2].read().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get EVT queue event: {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get EVT queue event: {e:?}"))
|
||||
})?;
|
||||
}
|
||||
BACKEND_EVENT => {
|
||||
|
|
@ -284,14 +278,11 @@ where
|
|||
// returning an error) at some point in the past, now is the time to try walking the
|
||||
// TX queue again.
|
||||
self.process_tx().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to process TX queue: {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to process TX queue: {e:?}"))
|
||||
})?;
|
||||
if self.backend.read().unwrap().has_pending_rx() {
|
||||
self.process_rx().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to process RX queue: {:?}",
|
||||
e
|
||||
))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to process RX queue: {e:?}"))
|
||||
})?;
|
||||
}
|
||||
}
|
||||
|
|
@ -341,7 +332,7 @@ where
|
|||
state: Option<VsockState>,
|
||||
) -> io::Result<Vsock<B>> {
|
||||
let (avail_features, acked_features, paused) = if let Some(state) = state {
|
||||
info!("Restoring virtio-vsock {}", id);
|
||||
info!("Restoring virtio-vsock {id}");
|
||||
(state.avail_features, state.acked_features, true)
|
||||
} else {
|
||||
let mut avail_features = (1u64 << VIRTIO_F_VERSION_1) | (1u64 << VIRTIO_F_IN_ORDER);
|
||||
|
|
|
|||
|
|
@ -335,7 +335,7 @@ impl VsockEpollListener for VsockMuxer {
|
|||
// appropriate to retry, by calling into epoll_wait().
|
||||
continue;
|
||||
}
|
||||
warn!("vsock: failed to consume muxer epoll event: {}", e);
|
||||
warn!("vsock: failed to consume muxer epoll event: {e}");
|
||||
}
|
||||
}
|
||||
break 'epoll;
|
||||
|
|
@ -383,10 +383,7 @@ impl VsockMuxer {
|
|||
/// Handle/dispatch an epoll event to its listener.
|
||||
///
|
||||
fn handle_event(&mut self, fd: RawFd, event_set: epoll::Events) {
|
||||
debug!(
|
||||
"vsock: muxer processing event: fd={}, event_set={:?}",
|
||||
fd, event_set
|
||||
);
|
||||
debug!("vsock: muxer processing event: fd={fd}, event_set={event_set:?}");
|
||||
|
||||
match self.listener_map.get_mut(&fd) {
|
||||
// This event needs to be forwarded to a `MuxerConnection` that is listening for
|
||||
|
|
@ -430,7 +427,7 @@ impl VsockMuxer {
|
|||
self.add_listener(stream.as_raw_fd(), EpollListener::LocalStream(stream))
|
||||
})
|
||||
.unwrap_or_else(|err| {
|
||||
warn!("vsock: unable to accept local connection: {:?}", err);
|
||||
warn!("vsock: unable to accept local connection: {err:?}");
|
||||
});
|
||||
}
|
||||
|
||||
|
|
@ -478,16 +475,13 @@ impl VsockMuxer {
|
|||
)
|
||||
})
|
||||
.unwrap_or_else(|err| {
|
||||
info!("vsock: error adding local-init connection: {:?}", err);
|
||||
info!("vsock: error adding local-init connection: {err:?}");
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
_ => {
|
||||
info!(
|
||||
"vsock: unexpected event: fd={:?}, event_set={:?}",
|
||||
fd, event_set
|
||||
);
|
||||
info!("vsock: unexpected event: fd={fd:?}, event_set={event_set:?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -662,10 +656,7 @@ impl VsockMuxer {
|
|||
epoll::Event::new(epoll::Events::empty(), 0),
|
||||
)
|
||||
.unwrap_or_else(|err| {
|
||||
warn!(
|
||||
"vosck muxer: error removing epoll listener for fd {:?}: {:?}",
|
||||
fd, err
|
||||
);
|
||||
warn!("vosck muxer: error removing epoll listener for fd {fd:?}: {err:?}");
|
||||
});
|
||||
}
|
||||
|
||||
|
|
@ -760,7 +751,7 @@ impl VsockMuxer {
|
|||
}
|
||||
Err(err) => {
|
||||
conn.kill();
|
||||
warn!("vsock: unable to ack host connection: {:?}", err);
|
||||
warn!("vsock: unable to ack host connection: {err:?}");
|
||||
}
|
||||
};
|
||||
}
|
||||
|
|
@ -870,10 +861,7 @@ impl VsockMuxer {
|
|||
peer_port,
|
||||
});
|
||||
if !pushed {
|
||||
warn!(
|
||||
"vsock: muxer.rxq full; dropping RST packet for lp={}, pp={}",
|
||||
local_port, peer_port
|
||||
);
|
||||
warn!("vsock: muxer.rxq full; dropping RST packet for lp={local_port}, pp={peer_port}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -93,8 +93,7 @@ impl WatchdogEpollHandler {
|
|||
// If this is the first "ping" then setup the timer
|
||||
if self.last_ping_time.lock().unwrap().is_none() {
|
||||
info!(
|
||||
"First ping received. Starting timer (every {} seconds)",
|
||||
WATCHDOG_TIMER_INTERVAL
|
||||
"First ping received. Starting timer (every {WATCHDOG_TIMER_INTERVAL} seconds)"
|
||||
);
|
||||
timerfd_setup(&self.timer, WATCHDOG_TIMER_INTERVAL).map_err(Error::TimerfdSetup)?;
|
||||
}
|
||||
|
|
@ -113,7 +112,7 @@ impl WatchdogEpollHandler {
|
|||
self.interrupt_cb
|
||||
.trigger(VirtioInterruptType::Queue(0))
|
||||
.map_err(|e| {
|
||||
error!("Failed to signal used queue: {:?}", e);
|
||||
error!("Failed to signal used queue: {e:?}");
|
||||
DeviceError::FailedSignalingUsedQueue(e)
|
||||
})
|
||||
}
|
||||
|
|
@ -142,18 +141,15 @@ impl EpollHelperHandler for WatchdogEpollHandler {
|
|||
match ev_type {
|
||||
QUEUE_AVAIL_EVENT => {
|
||||
self.queue_evt.read().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get queue event: {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get queue event: {e:?}"))
|
||||
})?;
|
||||
|
||||
let needs_notification = self.process_queue().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to process queue : {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to process queue : {e:?}"))
|
||||
})?;
|
||||
if needs_notification {
|
||||
self.signal_used_queue().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to signal used queue: {:?}",
|
||||
e
|
||||
))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to signal used queue: {e:?}"))
|
||||
})?;
|
||||
}
|
||||
}
|
||||
|
|
@ -162,22 +158,21 @@ impl EpollHelperHandler for WatchdogEpollHandler {
|
|||
// the number of times this event has elapsed since the last read.
|
||||
let mut buf = vec![0; 8];
|
||||
self.timer.read_exact(&mut buf).map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Error reading from timer fd: {:}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Error reading from timer fd: {e:}"))
|
||||
})?;
|
||||
|
||||
if let Some(last_ping_time) = self.last_ping_time.lock().unwrap().as_ref() {
|
||||
let now = Instant::now();
|
||||
let gap = now.duration_since(*last_ping_time).as_secs();
|
||||
if gap > WATCHDOG_TIMEOUT {
|
||||
error!("Watchdog triggered: {} seconds since last ping", gap);
|
||||
error!("Watchdog triggered: {gap} seconds since last ping");
|
||||
self.reset_evt.write(1).ok();
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
return Err(EpollHelperError::HandleEvent(anyhow!(
|
||||
"Unexpected event: {}",
|
||||
ev_type
|
||||
"Unexpected event: {ev_type}"
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
|
@ -214,7 +209,7 @@ impl Watchdog {
|
|||
) -> io::Result<Watchdog> {
|
||||
let mut last_ping_time = None;
|
||||
let (avail_features, acked_features, paused) = if let Some(state) = state {
|
||||
info!("Restoring virtio-watchdog {}", id);
|
||||
info!("Restoring virtio-watchdog {id}");
|
||||
|
||||
// When restoring enable the watchdog if it was previously enabled.
|
||||
// We reset the timer to ensure that we don't unnecessarily reboot
|
||||
|
|
@ -229,7 +224,7 @@ impl Watchdog {
|
|||
};
|
||||
|
||||
let timer_fd = timerfd_create().map_err(|e| {
|
||||
error!("Failed to create timer fd {}", e);
|
||||
error!("Failed to create timer fd {e}");
|
||||
e
|
||||
})?;
|
||||
// SAFETY: timer_fd is a valid fd
|
||||
|
|
@ -339,12 +334,12 @@ impl VirtioDevice for Watchdog {
|
|||
let (kill_evt, pause_evt) = self.common.dup_eventfds();
|
||||
|
||||
let reset_evt = self.reset_evt.try_clone().map_err(|e| {
|
||||
error!("Failed to clone reset_evt eventfd: {}", e);
|
||||
error!("Failed to clone reset_evt eventfd: {e}");
|
||||
ActivateError::BadActivate
|
||||
})?;
|
||||
|
||||
let timer = self.timer.try_clone().map_err(|e| {
|
||||
error!("Failed to clone timer fd: {}", e);
|
||||
error!("Failed to clone timer fd: {e}");
|
||||
ActivateError::BadActivate
|
||||
})?;
|
||||
|
||||
|
|
@ -392,20 +387,17 @@ impl Pausable for Watchdog {
|
|||
fn pause(&mut self) -> result::Result<(), MigratableError> {
|
||||
info!("Watchdog paused - disabling timer");
|
||||
timerfd_setup(&self.timer, 0)
|
||||
.map_err(|e| MigratableError::Pause(anyhow!("Error clearing timer: {:?}", e)))?;
|
||||
.map_err(|e| MigratableError::Pause(anyhow!("Error clearing timer: {e:?}")))?;
|
||||
self.common.pause()
|
||||
}
|
||||
|
||||
fn resume(&mut self) -> result::Result<(), MigratableError> {
|
||||
// Reset the timer on pause if it was previously used
|
||||
if self.last_ping_time.lock().unwrap().is_some() {
|
||||
info!(
|
||||
"Watchdog resumed - enabling timer (every {} seconds)",
|
||||
WATCHDOG_TIMER_INTERVAL
|
||||
);
|
||||
info!("Watchdog resumed - enabling timer (every {WATCHDOG_TIMER_INTERVAL} seconds)");
|
||||
self.last_ping_time.lock().unwrap().replace(Instant::now());
|
||||
timerfd_setup(&self.timer, WATCHDOG_TIMER_INTERVAL)
|
||||
.map_err(|e| MigratableError::Resume(anyhow!("Error setting timer: {:?}", e)))?;
|
||||
.map_err(|e| MigratableError::Resume(anyhow!("Error setting timer: {e:?}")))?;
|
||||
}
|
||||
self.common.resume()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -84,7 +84,7 @@ impl SnapshotData {
|
|||
T: Deserialize<'a>,
|
||||
{
|
||||
serde_json::from_str(&self.state)
|
||||
.map_err(|e| MigratableError::Restore(anyhow!("Error deserialising: {}", e)))
|
||||
.map_err(|e| MigratableError::Restore(anyhow!("Error deserialising: {e}")))
|
||||
}
|
||||
|
||||
/// Create from state that can be serialized
|
||||
|
|
@ -93,7 +93,7 @@ impl SnapshotData {
|
|||
T: Serialize,
|
||||
{
|
||||
let state = serde_json::to_string(state)
|
||||
.map_err(|e| MigratableError::Snapshot(anyhow!("Error serialising: {}", e)))?;
|
||||
.map_err(|e| MigratableError::Snapshot(anyhow!("Error serialising: {e}")))?;
|
||||
|
||||
Ok(SnapshotData { state })
|
||||
}
|
||||
|
|
|
|||
|
|
@ -338,7 +338,7 @@ fn start_http_thread(
|
|||
apply_filter(&api_seccomp_filter)
|
||||
.map_err(VmmError::ApplySeccompFilter)
|
||||
.map_err(|e| {
|
||||
error!("Error applying seccomp filter: {:?}", e);
|
||||
error!("Error applying seccomp filter: {e:?}");
|
||||
exit_evt.write(1).ok();
|
||||
e
|
||||
})?;
|
||||
|
|
@ -350,7 +350,7 @@ fn start_http_thread(
|
|||
.restrict_self()
|
||||
.map_err(VmmError::ApplyLandlock)
|
||||
.map_err(|e| {
|
||||
error!("Error applying landlock to http-server thread: {:?}", e);
|
||||
error!("Error applying landlock to http-server thread: {e:?}");
|
||||
exit_evt.write(1).ok();
|
||||
e
|
||||
})?;
|
||||
|
|
@ -365,7 +365,7 @@ fn start_http_thread(
|
|||
if let Err(e) = server.respond(server_request.process(|request| {
|
||||
handle_http_request(request, &api_notifier, &api_sender)
|
||||
})) {
|
||||
error!("HTTP server error on response: {}", e);
|
||||
error!("HTTP server error on response: {e}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -374,10 +374,7 @@ fn start_http_thread(
|
|||
return;
|
||||
}
|
||||
Err(e) => {
|
||||
error!(
|
||||
"HTTP server error on retrieving incoming request. Error: {}",
|
||||
e
|
||||
);
|
||||
error!("HTTP server error on retrieving incoming request. Error: {e}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -426,7 +426,7 @@ impl ApiAction for VmAddDevice {
|
|||
response_sender: Sender<ApiResponse>,
|
||||
) -> ApiRequest {
|
||||
Box::new(move |vmm| {
|
||||
info!("API request event: VmAddDevice {:?}", config);
|
||||
info!("API request event: VmAddDevice {config:?}");
|
||||
|
||||
let response = vmm
|
||||
.vm_add_device(config)
|
||||
|
|
@ -463,7 +463,7 @@ impl ApiAction for AddDisk {
|
|||
response_sender: Sender<ApiResponse>,
|
||||
) -> ApiRequest {
|
||||
Box::new(move |vmm| {
|
||||
info!("API request event: AddDisk {:?}", config);
|
||||
info!("API request event: AddDisk {config:?}");
|
||||
|
||||
let response = vmm
|
||||
.vm_add_disk(config)
|
||||
|
|
@ -500,7 +500,7 @@ impl ApiAction for VmAddFs {
|
|||
response_sender: Sender<ApiResponse>,
|
||||
) -> ApiRequest {
|
||||
Box::new(move |vmm| {
|
||||
info!("API request event: VmAddFs {:?}", config);
|
||||
info!("API request event: VmAddFs {config:?}");
|
||||
|
||||
let response = vmm
|
||||
.vm_add_fs(config)
|
||||
|
|
@ -537,7 +537,7 @@ impl ApiAction for VmAddPmem {
|
|||
response_sender: Sender<ApiResponse>,
|
||||
) -> ApiRequest {
|
||||
Box::new(move |vmm| {
|
||||
info!("API request event: VmAddPmem {:?}", config);
|
||||
info!("API request event: VmAddPmem {config:?}");
|
||||
|
||||
let response = vmm
|
||||
.vm_add_pmem(config)
|
||||
|
|
@ -574,7 +574,7 @@ impl ApiAction for VmAddNet {
|
|||
response_sender: Sender<ApiResponse>,
|
||||
) -> ApiRequest {
|
||||
Box::new(move |vmm| {
|
||||
info!("API request event: VmAddNet {:?}", config);
|
||||
info!("API request event: VmAddNet {config:?}");
|
||||
|
||||
let response = vmm
|
||||
.vm_add_net(config)
|
||||
|
|
@ -611,7 +611,7 @@ impl ApiAction for VmAddVdpa {
|
|||
response_sender: Sender<ApiResponse>,
|
||||
) -> ApiRequest {
|
||||
Box::new(move |vmm| {
|
||||
info!("API request event: VmAddVdpa {:?}", config);
|
||||
info!("API request event: VmAddVdpa {config:?}");
|
||||
|
||||
let response = vmm
|
||||
.vm_add_vdpa(config)
|
||||
|
|
@ -648,7 +648,7 @@ impl ApiAction for VmAddVsock {
|
|||
response_sender: Sender<ApiResponse>,
|
||||
) -> ApiRequest {
|
||||
Box::new(move |vmm| {
|
||||
info!("API request event: VmAddVsock {:?}", config);
|
||||
info!("API request event: VmAddVsock {config:?}");
|
||||
|
||||
let response = vmm
|
||||
.vm_add_vsock(config)
|
||||
|
|
@ -685,7 +685,7 @@ impl ApiAction for VmAddUserDevice {
|
|||
response_sender: Sender<ApiResponse>,
|
||||
) -> ApiRequest {
|
||||
Box::new(move |vmm| {
|
||||
info!("API request event: VmAddUserDevice {:?}", config);
|
||||
info!("API request event: VmAddUserDevice {config:?}");
|
||||
|
||||
let response = vmm
|
||||
.vm_add_user_device(config)
|
||||
|
|
@ -827,7 +827,7 @@ impl ApiAction for VmCreate {
|
|||
response_sender: Sender<ApiResponse>,
|
||||
) -> ApiRequest {
|
||||
Box::new(move |vmm| {
|
||||
info!("API request event: VmCreate {:?}", config);
|
||||
info!("API request event: VmCreate {config:?}");
|
||||
|
||||
let response = vmm
|
||||
.vm_create(config)
|
||||
|
|
@ -1032,7 +1032,7 @@ impl ApiAction for VmReceiveMigration {
|
|||
|
||||
fn request(&self, data: Self::RequestBody, response_sender: Sender<ApiResponse>) -> ApiRequest {
|
||||
Box::new(move |vmm| {
|
||||
info!("API request event: VmReceiveMigration {:?}", data);
|
||||
info!("API request event: VmReceiveMigration {data:?}");
|
||||
|
||||
let response = vmm
|
||||
.vm_receive_migration(data)
|
||||
|
|
@ -1069,7 +1069,7 @@ impl ApiAction for VmRemoveDevice {
|
|||
response_sender: Sender<ApiResponse>,
|
||||
) -> ApiRequest {
|
||||
Box::new(move |vmm| {
|
||||
info!("API request event: VmRemoveDevice {:?}", remove_device_data);
|
||||
info!("API request event: VmRemoveDevice {remove_device_data:?}");
|
||||
|
||||
let response = vmm
|
||||
.vm_remove_device(remove_device_data.id)
|
||||
|
|
@ -1106,7 +1106,7 @@ impl ApiAction for VmResize {
|
|||
response_sender: Sender<ApiResponse>,
|
||||
) -> ApiRequest {
|
||||
Box::new(move |vmm| {
|
||||
info!("API request event: VmResize {:?}", resize_data);
|
||||
info!("API request event: VmResize {resize_data:?}");
|
||||
|
||||
let response = vmm
|
||||
.vm_resize(
|
||||
|
|
@ -1147,7 +1147,7 @@ impl ApiAction for VmResizeZone {
|
|||
response_sender: Sender<ApiResponse>,
|
||||
) -> ApiRequest {
|
||||
Box::new(move |vmm| {
|
||||
info!("API request event: VmResizeZone {:?}", resize_zone_data);
|
||||
info!("API request event: VmResizeZone {resize_zone_data:?}");
|
||||
|
||||
let response = vmm
|
||||
.vm_resize_zone(resize_zone_data.id, resize_zone_data.desired_ram)
|
||||
|
|
@ -1184,7 +1184,7 @@ impl ApiAction for VmRestore {
|
|||
response_sender: Sender<ApiResponse>,
|
||||
) -> ApiRequest {
|
||||
Box::new(move |vmm| {
|
||||
info!("API request event: VmRestore {:?}", config);
|
||||
info!("API request event: VmRestore {config:?}");
|
||||
|
||||
let response = vmm
|
||||
.vm_restore(config)
|
||||
|
|
@ -1250,7 +1250,7 @@ impl ApiAction for VmSendMigration {
|
|||
|
||||
fn request(&self, data: Self::RequestBody, response_sender: Sender<ApiResponse>) -> ApiRequest {
|
||||
Box::new(move |vmm| {
|
||||
info!("API request event: VmSendMigration {:?}", data);
|
||||
info!("API request event: VmSendMigration {data:?}");
|
||||
|
||||
let response = vmm
|
||||
.vm_send_migration(data)
|
||||
|
|
@ -1287,7 +1287,7 @@ impl ApiAction for VmShutdown {
|
|||
response_sender: Sender<ApiResponse>,
|
||||
) -> ApiRequest {
|
||||
Box::new(move |vmm| {
|
||||
info!("API request event: VmShutdown {:?}", config);
|
||||
info!("API request event: VmShutdown {config:?}");
|
||||
|
||||
let response = vmm
|
||||
.vm_shutdown()
|
||||
|
|
@ -1324,7 +1324,7 @@ impl ApiAction for VmSnapshot {
|
|||
response_sender: Sender<ApiResponse>,
|
||||
) -> ApiRequest {
|
||||
Box::new(move |vmm| {
|
||||
info!("API request event: VmSnapshot {:?}", config);
|
||||
info!("API request event: VmSnapshot {config:?}");
|
||||
|
||||
let response = vmm
|
||||
.vm_snapshot(&config.destination_url)
|
||||
|
|
|
|||
|
|
@ -2523,7 +2523,7 @@ impl VmConfig {
|
|||
tty_consoles.push("debug-console");
|
||||
};
|
||||
if tty_consoles.len() > 1 {
|
||||
warn!("Using TTY output for multiple consoles: {:?}", tty_consoles);
|
||||
warn!("Using TTY output for multiple consoles: {tty_consoles:?}");
|
||||
}
|
||||
|
||||
if self.console.mode == ConsoleOutputMode::File && self.console.file.is_none() {
|
||||
|
|
|
|||
|
|
@ -497,7 +497,7 @@ impl Snapshottable for Vcpu {
|
|||
let saved_state = self
|
||||
.vcpu
|
||||
.state()
|
||||
.map_err(|e| MigratableError::Snapshot(anyhow!("Could not get vCPU state {:?}", e)))?;
|
||||
.map_err(|e| MigratableError::Snapshot(anyhow!("Could not get vCPU state {e:?}")))?;
|
||||
|
||||
self.saved_state = Some(saved_state.clone());
|
||||
|
||||
|
|
@ -574,10 +574,7 @@ impl BusDevice for CpuManager {
|
|||
}
|
||||
}
|
||||
_ => {
|
||||
warn!(
|
||||
"Unexpected offset for accessing CPU manager device: {:#}",
|
||||
offset
|
||||
);
|
||||
warn!("Unexpected offset for accessing CPU manager device: {offset:#}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -608,17 +605,14 @@ impl BusDevice for CpuManager {
|
|||
if data[0] & (1 << CPU_EJECT_FLAG) == 1 << CPU_EJECT_FLAG
|
||||
&& let Err(e) = self.remove_vcpu(self.selected_cpu)
|
||||
{
|
||||
error!("Error removing vCPU: {:?}", e);
|
||||
error!("Error removing vCPU: {e:?}");
|
||||
}
|
||||
} else {
|
||||
warn!("Out of range vCPU id: {}", self.selected_cpu);
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
warn!(
|
||||
"Unexpected offset for accessing CPU manager device: {:#}",
|
||||
offset
|
||||
);
|
||||
warn!("Unexpected offset for accessing CPU manager device: {offset:#}");
|
||||
}
|
||||
}
|
||||
None
|
||||
|
|
@ -827,7 +821,7 @@ impl CpuManager {
|
|||
}
|
||||
|
||||
fn create_vcpu(&mut self, cpu_id: u32, snapshot: Option<Snapshot>) -> Result<Arc<Mutex<Vcpu>>> {
|
||||
info!("Creating vCPU: cpu_id = {}", cpu_id);
|
||||
info!("Creating vCPU: cpu_id = {cpu_id}");
|
||||
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
let topology = self.get_vcpu_topology();
|
||||
|
|
@ -851,11 +845,11 @@ impl CpuManager {
|
|||
vcpu.init(&self.vm)?;
|
||||
|
||||
let state: CpuState = snapshot.to_state().map_err(|e| {
|
||||
Error::VcpuCreate(anyhow!("Could not get vCPU state from snapshot {:?}", e))
|
||||
Error::VcpuCreate(anyhow!("Could not get vCPU state from snapshot {e:?}"))
|
||||
})?;
|
||||
vcpu.vcpu
|
||||
.set_state(&state)
|
||||
.map_err(|e| Error::VcpuCreate(anyhow!("Could not set the vCPU state {:?}", e)))?;
|
||||
.map_err(|e| Error::VcpuCreate(anyhow!("Could not set the vCPU state {e:?}")))?;
|
||||
|
||||
vcpu.saved_state = Some(state);
|
||||
}
|
||||
|
|
@ -1035,7 +1029,7 @@ impl CpuManager {
|
|||
#[cfg(target_arch = "x86_64")]
|
||||
let interrupt_controller_clone = self.interrupt_controller.as_ref().cloned();
|
||||
|
||||
info!("Starting vCPU: cpu_id = {}", vcpu_id);
|
||||
info!("Starting vCPU: cpu_id = {vcpu_id}");
|
||||
|
||||
let handle = Some(
|
||||
thread::Builder::new()
|
||||
|
|
@ -1066,7 +1060,7 @@ impl CpuManager {
|
|||
if !vcpu_seccomp_filter.is_empty() && let Err(e) =
|
||||
apply_filter(&vcpu_seccomp_filter).map_err(Error::ApplySeccompFilter)
|
||||
{
|
||||
error!("Error applying seccomp filter: {:?}", e);
|
||||
error!("Error applying seccomp filter: {e:?}");
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -1132,7 +1126,7 @@ impl CpuManager {
|
|||
match vcpu.lock().as_ref().unwrap().vcpu.nmi() {
|
||||
Ok(()) => {},
|
||||
Err(e) => {
|
||||
error!("Error when inject nmi {}", e);
|
||||
error!("Error when inject nmi {e}");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
@ -1303,7 +1297,7 @@ impl CpuManager {
|
|||
}
|
||||
|
||||
fn remove_vcpu(&mut self, cpu_id: u32) -> Result<()> {
|
||||
info!("Removing vCPU: cpu_id = {}", cpu_id);
|
||||
info!("Removing vCPU: cpu_id = {cpu_id}");
|
||||
let state = &mut self.vcpu_states[usize::try_from(cpu_id).unwrap()];
|
||||
state.kill.store(true, Ordering::SeqCst);
|
||||
state.signal_thread();
|
||||
|
|
@ -1335,7 +1329,7 @@ impl CpuManager {
|
|||
pub fn start_restored_vcpus(&mut self) -> Result<()> {
|
||||
self.activate_vcpus(self.vcpus.len() as u32, false, Some(true))
|
||||
.map_err(|e| {
|
||||
Error::StartRestoreVcpu(anyhow!("Failed to start restored vCPUs: {:#?}", e))
|
||||
Error::StartRestoreVcpu(anyhow!("Failed to start restored vCPUs: {e:#?}"))
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
|
|
@ -2324,8 +2318,7 @@ impl Pausable for CpuManager {
|
|||
if !self.config.kvm_hyperv {
|
||||
vcpu.vcpu.notify_guest_clock_paused().map_err(|e| {
|
||||
MigratableError::Pause(anyhow!(
|
||||
"Could not notify guest it has been paused {:?}",
|
||||
e
|
||||
"Could not notify guest it has been paused {e:?}"
|
||||
))
|
||||
})?;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2484,7 +2484,7 @@ impl DeviceManager {
|
|||
) -> DeviceManagerResult<Arc<Mutex<devices::tpm::Tpm>>> {
|
||||
// Create TPM Device
|
||||
let tpm = devices::tpm::Tpm::new(tpm_path.to_str().unwrap().to_string()).map_err(|e| {
|
||||
DeviceManagerError::CreateTpmDevice(anyhow!("Failed to create TPM Device : {:?}", e))
|
||||
DeviceManagerError::CreateTpmDevice(anyhow!("Failed to create TPM Device : {e:?}"))
|
||||
})?;
|
||||
let tpm = Arc::new(Mutex::new(tpm));
|
||||
|
||||
|
|
@ -2606,7 +2606,7 @@ impl DeviceManager {
|
|||
id
|
||||
};
|
||||
|
||||
info!("Creating virtio-block device: {:?}", disk_cfg);
|
||||
info!("Creating virtio-block device: {disk_cfg:?}");
|
||||
|
||||
let (virtio_device, migratable_device) = if disk_cfg.vhost_user {
|
||||
if is_hotplug {
|
||||
|
|
@ -2854,7 +2854,7 @@ impl DeviceManager {
|
|||
net_cfg.id = Some(id.clone());
|
||||
id
|
||||
};
|
||||
info!("Creating virtio-net device: {:?}", net_cfg);
|
||||
info!("Creating virtio-net device: {net_cfg:?}");
|
||||
|
||||
let (virtio_device, migratable_device) = if net_cfg.vhost_user {
|
||||
let socket = net_cfg.vhost_socket.as_ref().unwrap().clone();
|
||||
|
|
@ -3020,7 +3020,7 @@ impl DeviceManager {
|
|||
// Add virtio-rng if required
|
||||
let rng_config = self.config.lock().unwrap().rng.clone();
|
||||
if let Some(rng_path) = rng_config.src.to_str() {
|
||||
info!("Creating virtio-rng device: {:?}", rng_config);
|
||||
info!("Creating virtio-rng device: {rng_config:?}");
|
||||
let id = String::from(RNG_DEVICE_NAME);
|
||||
|
||||
let virtio_rng_device = Arc::new(Mutex::new(
|
||||
|
|
@ -3070,7 +3070,7 @@ impl DeviceManager {
|
|||
id
|
||||
};
|
||||
|
||||
info!("Creating virtio-fs device: {:?}", fs_cfg);
|
||||
info!("Creating virtio-fs device: {fs_cfg:?}");
|
||||
|
||||
let mut node = device_node!(id);
|
||||
|
||||
|
|
@ -3137,14 +3137,14 @@ impl DeviceManager {
|
|||
id
|
||||
};
|
||||
|
||||
info!("Creating virtio-pmem device: {:?}", pmem_cfg);
|
||||
info!("Creating virtio-pmem device: {pmem_cfg:?}");
|
||||
|
||||
let mut node = device_node!(id);
|
||||
|
||||
// Look for the id in the device tree. If it can be found, that means
|
||||
// the device is being restored, otherwise it's created from scratch.
|
||||
let region_range = if let Some(node) = self.device_tree.lock().unwrap().get(&id) {
|
||||
info!("Restoring virtio-pmem {} resources", id);
|
||||
info!("Restoring virtio-pmem {id} resources");
|
||||
|
||||
let mut region_range: Option<(u64, u64)> = None;
|
||||
for resource in node.resources.iter() {
|
||||
|
|
@ -3157,7 +3157,7 @@ impl DeviceManager {
|
|||
region_range = Some((*base, *size));
|
||||
}
|
||||
_ => {
|
||||
error!("Unexpected resource {:?} for {}", resource, id);
|
||||
error!("Unexpected resource {resource:?} for {id}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -3323,7 +3323,7 @@ impl DeviceManager {
|
|||
id
|
||||
};
|
||||
|
||||
info!("Creating virtio-vsock device: {:?}", vsock_cfg);
|
||||
info!("Creating virtio-vsock device: {vsock_cfg:?}");
|
||||
|
||||
let socket_path = vsock_cfg
|
||||
.socket
|
||||
|
|
@ -3387,7 +3387,7 @@ impl DeviceManager {
|
|||
let mut mm = mm.lock().unwrap();
|
||||
for (memory_zone_id, memory_zone) in mm.memory_zones_mut().iter_mut() {
|
||||
if let Some(virtio_mem_zone) = memory_zone.virtio_mem_zone_mut() {
|
||||
info!("Creating virtio-mem device: id = {}", memory_zone_id);
|
||||
info!("Creating virtio-mem device: id = {memory_zone_id}");
|
||||
|
||||
let node_id = numa_node_id_from_memory_zone_id(&self.numa_nodes, memory_zone_id)
|
||||
.map(|i| i as u16);
|
||||
|
|
@ -3486,7 +3486,7 @@ impl DeviceManager {
|
|||
|
||||
if let Some(balloon_config) = &self.config.lock().unwrap().balloon {
|
||||
let id = String::from(BALLOON_DEVICE_NAME);
|
||||
info!("Creating virtio-balloon device: id = {}", id);
|
||||
info!("Creating virtio-balloon device: id = {id}");
|
||||
|
||||
let virtio_balloon_device = Arc::new(Mutex::new(
|
||||
virtio_devices::Balloon::new(
|
||||
|
|
@ -3532,7 +3532,7 @@ impl DeviceManager {
|
|||
}
|
||||
|
||||
let id = String::from(WATCHDOG_DEVICE_NAME);
|
||||
info!("Creating virtio-watchdog device: id = {}", id);
|
||||
info!("Creating virtio-watchdog device: id = {id}");
|
||||
|
||||
let virtio_watchdog_device = Arc::new(Mutex::new(
|
||||
virtio_devices::Watchdog::new(
|
||||
|
|
@ -3576,7 +3576,7 @@ impl DeviceManager {
|
|||
id
|
||||
};
|
||||
|
||||
info!("Creating vDPA device: {:?}", vdpa_cfg);
|
||||
info!("Creating vDPA device: {vdpa_cfg:?}");
|
||||
|
||||
let device_path = vdpa_cfg
|
||||
.path
|
||||
|
|
@ -4189,7 +4189,7 @@ impl DeviceManager {
|
|||
let id = String::from(PVPANIC_DEVICE_NAME);
|
||||
let pci_segment_id = 0x0_u16;
|
||||
|
||||
info!("Creating pvpanic device {}", id);
|
||||
info!("Creating pvpanic device {id}");
|
||||
|
||||
let (pci_segment_id, pci_device_bdf, resources) =
|
||||
self.pci_resources(&id, pci_segment_id)?;
|
||||
|
|
@ -4280,7 +4280,7 @@ impl DeviceManager {
|
|||
// the device is being restored, otherwise it's created from scratch.
|
||||
let (pci_device_bdf, resources) =
|
||||
if let Some(node) = self.device_tree.lock().unwrap().get(id) {
|
||||
info!("Restoring virtio-pci {} resources", id);
|
||||
info!("Restoring virtio-pci {id} resources");
|
||||
let pci_device_bdf: PciBdf = node
|
||||
.pci_bdf
|
||||
.ok_or(DeviceManagerError::MissingDeviceNodePciBdf)?;
|
||||
|
|
@ -4529,10 +4529,7 @@ impl DeviceManager {
|
|||
}
|
||||
|
||||
pub fn eject_device(&mut self, pci_segment_id: u16, device_id: u8) -> DeviceManagerResult<()> {
|
||||
info!(
|
||||
"Ejecting device_id = {} on segment_id={}",
|
||||
device_id, pci_segment_id
|
||||
);
|
||||
info!("Ejecting device_id = {device_id} on segment_id={pci_segment_id}");
|
||||
|
||||
// Convert the device ID into the corresponding b/d/f.
|
||||
let pci_device_bdf = PciBdf::new(pci_segment_id, 0, device_id, 0);
|
||||
|
|
@ -5400,16 +5397,10 @@ impl BusDevice for DeviceManager {
|
|||
assert_eq!(data.len(), PSEG_FIELD_SIZE);
|
||||
data.copy_from_slice(&(self.selected_segment as u32).to_le_bytes());
|
||||
}
|
||||
_ => error!(
|
||||
"Accessing unknown location at base 0x{:x}, offset 0x{:x}",
|
||||
base, offset
|
||||
),
|
||||
_ => error!("Accessing unknown location at base 0x{base:x}, offset 0x{offset:x}"),
|
||||
}
|
||||
|
||||
debug!(
|
||||
"PCI_HP_REG_R: base 0x{:x}, offset 0x{:x}, data {:?}",
|
||||
base, offset, data
|
||||
)
|
||||
debug!("PCI_HP_REG_R: base 0x{base:x}, offset 0x{offset:x}, data {data:?}")
|
||||
}
|
||||
|
||||
fn write(&mut self, base: u64, offset: u64, data: &[u8]) -> Option<Arc<std::sync::Barrier>> {
|
||||
|
|
@ -5423,7 +5414,7 @@ impl BusDevice for DeviceManager {
|
|||
while slot_bitmap > 0 {
|
||||
let slot_id = slot_bitmap.trailing_zeros();
|
||||
if let Err(e) = self.eject_device(self.selected_segment as u16, slot_id as u8) {
|
||||
error!("Failed ejecting device {}: {:?}", slot_id, e);
|
||||
error!("Failed ejecting device {slot_id}: {e:?}");
|
||||
}
|
||||
slot_bitmap &= !(1 << slot_id);
|
||||
}
|
||||
|
|
@ -5443,16 +5434,10 @@ impl BusDevice for DeviceManager {
|
|||
}
|
||||
self.selected_segment = selected_segment;
|
||||
}
|
||||
_ => error!(
|
||||
"Accessing unknown location at base 0x{:x}, offset 0x{:x}",
|
||||
base, offset
|
||||
),
|
||||
_ => error!("Accessing unknown location at base 0x{base:x}, offset 0x{offset:x}"),
|
||||
}
|
||||
|
||||
debug!(
|
||||
"PCI_HP_REG_W: base 0x{:x}, offset 0x{:x}, data {:?}",
|
||||
base, offset, data
|
||||
);
|
||||
debug!("PCI_HP_REG_W: base 0x{base:x}, offset 0x{offset:x}, data {data:?}");
|
||||
|
||||
None
|
||||
}
|
||||
|
|
@ -5463,7 +5448,7 @@ impl Drop for DeviceManager {
|
|||
// Wake up the DeviceManager threads (mainly virtio device workers),
|
||||
// to avoid deadlock on waiting for paused/parked worker threads.
|
||||
if let Err(e) = self.resume() {
|
||||
error!("Error resuming DeviceManager: {:?}", e);
|
||||
error!("Error resuming DeviceManager: {e:?}");
|
||||
}
|
||||
|
||||
for handle in self.virtio_devices.drain(..) {
|
||||
|
|
|
|||
110
vmm/src/lib.rs
110
vmm/src/lib.rs
|
|
@ -449,7 +449,7 @@ pub fn start_event_monitor_thread(
|
|||
apply_filter(&seccomp_filter)
|
||||
.map_err(Error::ApplySeccompFilter)
|
||||
.map_err(|e| {
|
||||
error!("Error applying seccomp filter: {:?}", e);
|
||||
error!("Error applying seccomp filter: {e:?}");
|
||||
exit_event.write(1).ok();
|
||||
e
|
||||
})?;
|
||||
|
|
@ -460,7 +460,7 @@ pub fn start_event_monitor_thread(
|
|||
.restrict_self()
|
||||
.map_err(Error::ApplyLandlock)
|
||||
.map_err(|e| {
|
||||
error!("Error applying landlock to event monitor thread: {:?}", e);
|
||||
error!("Error applying landlock to event monitor thread: {e:?}");
|
||||
exit_event.write(1).ok();
|
||||
e
|
||||
})?;
|
||||
|
|
@ -732,7 +732,7 @@ impl Vmm {
|
|||
if !signal_handler_seccomp_filter.is_empty() && let Err(e) = apply_filter(&signal_handler_seccomp_filter)
|
||||
.map_err(Error::ApplySeccompFilter)
|
||||
{
|
||||
error!("Error applying seccomp filter: {:?}", e);
|
||||
error!("Error applying seccomp filter: {e:?}");
|
||||
exit_evt.write(1).ok();
|
||||
return;
|
||||
}
|
||||
|
|
@ -741,12 +741,12 @@ impl Vmm {
|
|||
match Landlock::new() {
|
||||
Ok(landlock) => {
|
||||
let _ = landlock.restrict_self().map_err(Error::ApplyLandlock).map_err(|e| {
|
||||
error!("Error applying Landlock to signal handler thread: {:?}", e);
|
||||
error!("Error applying Landlock to signal handler thread: {e:?}");
|
||||
exit_evt.write(1).ok();
|
||||
});
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Error creating Landlock object: {:?}", e);
|
||||
error!("Error creating Landlock object: {e:?}");
|
||||
exit_evt.write(1).ok();
|
||||
}
|
||||
};
|
||||
|
|
@ -764,7 +764,7 @@ impl Vmm {
|
|||
.map_err(Error::SignalHandlerSpawn)?,
|
||||
);
|
||||
}
|
||||
Err(e) => error!("Signal not found {}", e),
|
||||
Err(e) => error!("Signal not found {e}"),
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
|
@ -845,7 +845,7 @@ impl Vmm {
|
|||
|
||||
let vm_migration_config: VmMigrationConfig =
|
||||
serde_json::from_slice(&data).map_err(|e| {
|
||||
MigratableError::MigrateReceive(anyhow!("Error deserialising config: {}", e))
|
||||
MigratableError::MigrateReceive(anyhow!("Error deserialising config: {e}"))
|
||||
})?;
|
||||
|
||||
#[cfg(all(feature = "kvm", target_arch = "x86_64"))]
|
||||
|
|
@ -857,7 +857,7 @@ impl Vmm {
|
|||
let config = vm_migration_config.vm_config.clone();
|
||||
self.vm_config = Some(vm_migration_config.vm_config);
|
||||
self.console_info = Some(pre_create_console_devices(self).map_err(|e| {
|
||||
MigratableError::MigrateReceive(anyhow!("Error creating console devices: {:?}", e))
|
||||
MigratableError::MigrateReceive(anyhow!("Error creating console devices: {e:?}"))
|
||||
})?);
|
||||
|
||||
if self
|
||||
|
|
@ -869,7 +869,7 @@ impl Vmm {
|
|||
.landlock_enable
|
||||
{
|
||||
apply_landlock(self.vm_config.as_ref().unwrap().clone()).map_err(|e| {
|
||||
MigratableError::MigrateReceive(anyhow!("Error applying landlock: {:?}", e))
|
||||
MigratableError::MigrateReceive(anyhow!("Error applying landlock: {e:?}"))
|
||||
})?;
|
||||
}
|
||||
|
||||
|
|
@ -884,8 +884,7 @@ impl Vmm {
|
|||
)
|
||||
.map_err(|e| {
|
||||
MigratableError::MigrateReceive(anyhow!(
|
||||
"Error creating hypervisor VM from snapshot: {:?}",
|
||||
e
|
||||
"Error creating hypervisor VM from snapshot: {e:?}"
|
||||
))
|
||||
})?;
|
||||
|
||||
|
|
@ -909,8 +908,7 @@ impl Vmm {
|
|||
)
|
||||
.map_err(|e| {
|
||||
MigratableError::MigrateReceive(anyhow!(
|
||||
"Error creating MemoryManager from snapshot: {:?}",
|
||||
e
|
||||
"Error creating MemoryManager from snapshot: {e:?}"
|
||||
))
|
||||
})?;
|
||||
|
||||
|
|
@ -935,21 +933,21 @@ impl Vmm {
|
|||
.read_exact(&mut data)
|
||||
.map_err(MigratableError::MigrateSocket)?;
|
||||
let snapshot: Snapshot = serde_json::from_slice(&data).map_err(|e| {
|
||||
MigratableError::MigrateReceive(anyhow!("Error deserialising snapshot: {}", e))
|
||||
MigratableError::MigrateReceive(anyhow!("Error deserialising snapshot: {e}"))
|
||||
})?;
|
||||
|
||||
let exit_evt = self.exit_evt.try_clone().map_err(|e| {
|
||||
MigratableError::MigrateReceive(anyhow!("Error cloning exit EventFd: {}", e))
|
||||
MigratableError::MigrateReceive(anyhow!("Error cloning exit EventFd: {e}"))
|
||||
})?;
|
||||
let reset_evt = self.reset_evt.try_clone().map_err(|e| {
|
||||
MigratableError::MigrateReceive(anyhow!("Error cloning reset EventFd: {}", e))
|
||||
MigratableError::MigrateReceive(anyhow!("Error cloning reset EventFd: {e}"))
|
||||
})?;
|
||||
#[cfg(feature = "guest_debug")]
|
||||
let debug_evt = self.vm_debug_evt.try_clone().map_err(|e| {
|
||||
MigratableError::MigrateReceive(anyhow!("Error cloning debug EventFd: {}", e))
|
||||
})?;
|
||||
let activate_evt = self.activate_evt.try_clone().map_err(|e| {
|
||||
MigratableError::MigrateReceive(anyhow!("Error cloning activate EventFd: {}", e))
|
||||
MigratableError::MigrateReceive(anyhow!("Error cloning activate EventFd: {e}"))
|
||||
})?;
|
||||
|
||||
#[cfg(not(target_arch = "riscv64"))]
|
||||
|
|
@ -974,13 +972,13 @@ impl Vmm {
|
|||
Some(snapshot),
|
||||
)
|
||||
.map_err(|e| {
|
||||
MigratableError::MigrateReceive(anyhow!("Error creating VM from snapshot: {:?}", e))
|
||||
MigratableError::MigrateReceive(anyhow!("Error creating VM from snapshot: {e:?}"))
|
||||
})?;
|
||||
|
||||
// Create VM
|
||||
vm.restore().map_err(|e| {
|
||||
Response::error().write_to(socket).ok();
|
||||
MigratableError::MigrateReceive(anyhow!("Failed restoring the Vm: {}", e))
|
||||
MigratableError::MigrateReceive(anyhow!("Failed restoring the Vm: {e}"))
|
||||
})?;
|
||||
self.vm = Some(vm);
|
||||
|
||||
|
|
@ -1014,7 +1012,7 @@ impl Vmm {
|
|||
fn socket_url_to_path(url: &str) -> result::Result<PathBuf, MigratableError> {
|
||||
url.strip_prefix("unix:")
|
||||
.ok_or_else(|| {
|
||||
MigratableError::MigrateSend(anyhow!("Could not extract path from URL: {}", url))
|
||||
MigratableError::MigrateSend(anyhow!("Could not extract path from URL: {url}"))
|
||||
})
|
||||
.map(|s| s.into())
|
||||
}
|
||||
|
|
@ -1023,19 +1021,19 @@ impl Vmm {
|
|||
destination_url: &str,
|
||||
) -> std::result::Result<SocketStream, MigratableError> {
|
||||
if let Some(address) = destination_url.strip_prefix("tcp:") {
|
||||
info!("Connecting to TCP socket at {}", address);
|
||||
info!("Connecting to TCP socket at {address}");
|
||||
|
||||
let socket = TcpStream::connect(address).map_err(|e| {
|
||||
MigratableError::MigrateSend(anyhow!("Error connecting to TCP socket: {}", e))
|
||||
MigratableError::MigrateSend(anyhow!("Error connecting to TCP socket: {e}"))
|
||||
})?;
|
||||
|
||||
Ok(SocketStream::Tcp(socket))
|
||||
} else {
|
||||
let path = Vmm::socket_url_to_path(destination_url)?;
|
||||
info!("Connecting to UNIX socket at {:?}", path);
|
||||
info!("Connecting to UNIX socket at {path:?}");
|
||||
|
||||
let socket = UnixStream::connect(&path).map_err(|e| {
|
||||
MigratableError::MigrateSend(anyhow!("Error connecting to UNIX socket: {}", e))
|
||||
MigratableError::MigrateSend(anyhow!("Error connecting to UNIX socket: {e}"))
|
||||
})?;
|
||||
|
||||
Ok(SocketStream::Unix(socket))
|
||||
|
|
@ -1047,13 +1045,12 @@ impl Vmm {
|
|||
) -> std::result::Result<SocketStream, MigratableError> {
|
||||
if let Some(address) = receiver_url.strip_prefix("tcp:") {
|
||||
let listener = TcpListener::bind(address).map_err(|e| {
|
||||
MigratableError::MigrateReceive(anyhow!("Error binding to TCP socket: {}", e))
|
||||
MigratableError::MigrateReceive(anyhow!("Error binding to TCP socket: {e}"))
|
||||
})?;
|
||||
|
||||
let (socket, _addr) = listener.accept().map_err(|e| {
|
||||
MigratableError::MigrateReceive(anyhow!(
|
||||
"Error accepting connection on TCP socket: {}",
|
||||
e
|
||||
"Error accepting connection on TCP socket: {e}"
|
||||
))
|
||||
})?;
|
||||
|
||||
|
|
@ -1061,19 +1058,18 @@ impl Vmm {
|
|||
} else {
|
||||
let path = Vmm::socket_url_to_path(receiver_url)?;
|
||||
let listener = UnixListener::bind(&path).map_err(|e| {
|
||||
MigratableError::MigrateReceive(anyhow!("Error binding to UNIX socket: {}", e))
|
||||
MigratableError::MigrateReceive(anyhow!("Error binding to UNIX socket: {e}"))
|
||||
})?;
|
||||
|
||||
let (socket, _addr) = listener.accept().map_err(|e| {
|
||||
MigratableError::MigrateReceive(anyhow!(
|
||||
"Error accepting connection on UNIX socket: {}",
|
||||
e
|
||||
"Error accepting connection on UNIX socket: {e}"
|
||||
))
|
||||
})?;
|
||||
|
||||
// Remove the UNIX socket file after accepting the connection
|
||||
std::fs::remove_file(&path).map_err(|e| {
|
||||
MigratableError::MigrateReceive(anyhow!("Error removing UNIX socket file: {}", e))
|
||||
MigratableError::MigrateReceive(anyhow!("Error removing UNIX socket file: {e}"))
|
||||
})?;
|
||||
|
||||
Ok(SocketStream::Unix(socket))
|
||||
|
|
@ -1147,7 +1143,7 @@ impl Vmm {
|
|||
},
|
||||
)
|
||||
.map_err(|e| {
|
||||
MigratableError::MigrateSend(anyhow!("Error generating common cpuid': {:?}", e))
|
||||
MigratableError::MigrateSend(anyhow!("Error generating common cpuid': {e:?}"))
|
||||
})?
|
||||
};
|
||||
|
||||
|
|
@ -1207,7 +1203,7 @@ impl Vmm {
|
|||
// Try at most 5 passes of dirty memory sending
|
||||
const MAX_DIRTY_MIGRATIONS: usize = 5;
|
||||
for i in 0..MAX_DIRTY_MIGRATIONS {
|
||||
info!("Dirty memory migration {} of {}", i, MAX_DIRTY_MIGRATIONS);
|
||||
info!("Dirty memory migration {i} of {MAX_DIRTY_MIGRATIONS}");
|
||||
if !Self::vm_maybe_send_dirty_pages(vm, &mut socket)? {
|
||||
break;
|
||||
}
|
||||
|
|
@ -1285,13 +1281,12 @@ impl Vmm {
|
|||
},
|
||||
)
|
||||
.map_err(|e| {
|
||||
MigratableError::MigrateReceive(anyhow!("Error generating common cpuid: {:?}", e))
|
||||
MigratableError::MigrateReceive(anyhow!("Error generating common cpuid: {e:?}"))
|
||||
})?
|
||||
};
|
||||
arch::CpuidFeatureEntry::check_cpuid_compatibility(src_vm_cpuid, dest_cpuid).map_err(|e| {
|
||||
MigratableError::MigrateReceive(anyhow!(
|
||||
"Error checking cpu feature compatibility': {:?}",
|
||||
e
|
||||
"Error checking cpu feature compatibility': {e:?}"
|
||||
))
|
||||
})
|
||||
}
|
||||
|
|
@ -1399,7 +1394,7 @@ impl Vmm {
|
|||
match dispatch_event {
|
||||
EpollDispatch::Unknown => {
|
||||
let event = event.data;
|
||||
warn!("Unknown VMM loop event: {}", event);
|
||||
warn!("Unknown VMM loop event: {event}");
|
||||
}
|
||||
EpollDispatch::Exit => {
|
||||
info!("VM exit event");
|
||||
|
|
@ -1418,10 +1413,7 @@ impl Vmm {
|
|||
EpollDispatch::ActivateVirtioDevices => {
|
||||
if let Some(ref vm) = self.vm {
|
||||
let count = self.activate_evt.read().map_err(Error::EventFdRead)?;
|
||||
info!(
|
||||
"Trying to activate pending virtio devices: count = {}",
|
||||
count
|
||||
);
|
||||
info!("Trying to activate pending virtio devices: count = {count}");
|
||||
vm.activate_virtio_devices()
|
||||
.map_err(Error::ActivateVirtioDevices)?;
|
||||
}
|
||||
|
|
@ -1641,7 +1633,7 @@ impl RequestHandler for Vmm {
|
|||
|
||||
self.vm_restore(source_url, vm_config, restore_cfg.prefault)
|
||||
.map_err(|vm_restore_err| {
|
||||
error!("VM Restore failed: {:?}", vm_restore_err);
|
||||
error!("VM Restore failed: {vm_restore_err:?}");
|
||||
|
||||
// Cleanup the VM being created while vm restore
|
||||
if let Err(e) = self.vm_delete() {
|
||||
|
|
@ -1820,7 +1812,7 @@ impl RequestHandler for Vmm {
|
|||
|
||||
if let Some(ref mut vm) = self.vm {
|
||||
if let Err(e) = vm.resize(desired_vcpus, desired_ram, desired_balloon) {
|
||||
error!("Error when resizing VM: {:?}", e);
|
||||
error!("Error when resizing VM: {e:?}");
|
||||
Err(e)
|
||||
} else {
|
||||
Ok(())
|
||||
|
|
@ -1847,7 +1839,7 @@ impl RequestHandler for Vmm {
|
|||
|
||||
if let Some(ref mut vm) = self.vm {
|
||||
if let Err(e) = vm.resize_zone(id, desired_ram) {
|
||||
error!("Error when resizing VM: {:?}", e);
|
||||
error!("Error when resizing VM: {e:?}");
|
||||
Err(e)
|
||||
} else {
|
||||
Ok(())
|
||||
|
|
@ -1865,7 +1857,7 @@ impl RequestHandler for Vmm {
|
|||
}
|
||||
}
|
||||
|
||||
error!("Could not find the memory zone {} for the resize", id);
|
||||
error!("Could not find the memory zone {id} for the resize");
|
||||
Err(VmError::ResizeZone)
|
||||
}
|
||||
}
|
||||
|
|
@ -1885,7 +1877,7 @@ impl RequestHandler for Vmm {
|
|||
|
||||
if let Some(ref mut vm) = self.vm {
|
||||
let info = vm.add_device(device_cfg).map_err(|e| {
|
||||
error!("Error when adding new device to the VM: {:?}", e);
|
||||
error!("Error when adding new device to the VM: {e:?}");
|
||||
e
|
||||
})?;
|
||||
serde_json::to_vec(&info)
|
||||
|
|
@ -1914,7 +1906,7 @@ impl RequestHandler for Vmm {
|
|||
|
||||
if let Some(ref mut vm) = self.vm {
|
||||
let info = vm.add_user_device(device_cfg).map_err(|e| {
|
||||
error!("Error when adding new user device to the VM: {:?}", e);
|
||||
error!("Error when adding new user device to the VM: {e:?}");
|
||||
e
|
||||
})?;
|
||||
serde_json::to_vec(&info)
|
||||
|
|
@ -1931,7 +1923,7 @@ impl RequestHandler for Vmm {
|
|||
fn vm_remove_device(&mut self, id: String) -> result::Result<(), VmError> {
|
||||
if let Some(ref mut vm) = self.vm {
|
||||
if let Err(e) = vm.remove_device(id) {
|
||||
error!("Error when removing device from the VM: {:?}", e);
|
||||
error!("Error when removing device from the VM: {e:?}");
|
||||
Err(e)
|
||||
} else {
|
||||
Ok(())
|
||||
|
|
@ -1960,7 +1952,7 @@ impl RequestHandler for Vmm {
|
|||
|
||||
if let Some(ref mut vm) = self.vm {
|
||||
let info = vm.add_disk(disk_cfg).map_err(|e| {
|
||||
error!("Error when adding new disk to the VM: {:?}", e);
|
||||
error!("Error when adding new disk to the VM: {e:?}");
|
||||
e
|
||||
})?;
|
||||
serde_json::to_vec(&info)
|
||||
|
|
@ -1986,7 +1978,7 @@ impl RequestHandler for Vmm {
|
|||
|
||||
if let Some(ref mut vm) = self.vm {
|
||||
let info = vm.add_fs(fs_cfg).map_err(|e| {
|
||||
error!("Error when adding new fs to the VM: {:?}", e);
|
||||
error!("Error when adding new fs to the VM: {e:?}");
|
||||
e
|
||||
})?;
|
||||
serde_json::to_vec(&info)
|
||||
|
|
@ -2012,7 +2004,7 @@ impl RequestHandler for Vmm {
|
|||
|
||||
if let Some(ref mut vm) = self.vm {
|
||||
let info = vm.add_pmem(pmem_cfg).map_err(|e| {
|
||||
error!("Error when adding new pmem device to the VM: {:?}", e);
|
||||
error!("Error when adding new pmem device to the VM: {e:?}");
|
||||
e
|
||||
})?;
|
||||
serde_json::to_vec(&info)
|
||||
|
|
@ -2038,7 +2030,7 @@ impl RequestHandler for Vmm {
|
|||
|
||||
if let Some(ref mut vm) = self.vm {
|
||||
let info = vm.add_net(net_cfg).map_err(|e| {
|
||||
error!("Error when adding new network device to the VM: {:?}", e);
|
||||
error!("Error when adding new network device to the VM: {e:?}");
|
||||
e
|
||||
})?;
|
||||
serde_json::to_vec(&info)
|
||||
|
|
@ -2064,7 +2056,7 @@ impl RequestHandler for Vmm {
|
|||
|
||||
if let Some(ref mut vm) = self.vm {
|
||||
let info = vm.add_vdpa(vdpa_cfg).map_err(|e| {
|
||||
error!("Error when adding new vDPA device to the VM: {:?}", e);
|
||||
error!("Error when adding new vDPA device to the VM: {e:?}");
|
||||
e
|
||||
})?;
|
||||
serde_json::to_vec(&info)
|
||||
|
|
@ -2095,7 +2087,7 @@ impl RequestHandler for Vmm {
|
|||
|
||||
if let Some(ref mut vm) = self.vm {
|
||||
let info = vm.add_vsock(vsock_cfg).map_err(|e| {
|
||||
error!("Error when adding new vsock device to the VM: {:?}", e);
|
||||
error!("Error when adding new vsock device to the VM: {e:?}");
|
||||
e
|
||||
})?;
|
||||
serde_json::to_vec(&info)
|
||||
|
|
@ -2112,7 +2104,7 @@ impl RequestHandler for Vmm {
|
|||
fn vm_counters(&mut self) -> result::Result<Option<Vec<u8>>, VmError> {
|
||||
if let Some(ref mut vm) = self.vm {
|
||||
let info = vm.counters().map_err(|e| {
|
||||
error!("Error when getting counters from the VM: {:?}", e);
|
||||
error!("Error when getting counters from the VM: {e:?}");
|
||||
e
|
||||
})?;
|
||||
serde_json::to_vec(&info)
|
||||
|
|
@ -2222,8 +2214,7 @@ impl RequestHandler for Vmm {
|
|||
let mut buf = [0u8; 4];
|
||||
let (_, file) = unix_socket.recv_with_fd(&mut buf).map_err(|e| {
|
||||
MigratableError::MigrateReceive(anyhow!(
|
||||
"Error receiving slot from socket: {}",
|
||||
e
|
||||
"Error receiving slot from socket: {e}"
|
||||
))
|
||||
})?;
|
||||
|
||||
|
|
@ -2302,7 +2293,7 @@ impl RequestHandler for Vmm {
|
|||
send_data_migration.clone(),
|
||||
)
|
||||
.map_err(|migration_err| {
|
||||
error!("Migration failed: {:?}", migration_err);
|
||||
error!("Migration failed: {migration_err:?}");
|
||||
|
||||
// Stop logging dirty pages only for non-local migrations
|
||||
if !send_data_migration.local
|
||||
|
|
@ -2323,8 +2314,7 @@ impl RequestHandler for Vmm {
|
|||
// Shutdown the VM after the migration succeeded
|
||||
self.exit_evt.write(1).map_err(|e| {
|
||||
MigratableError::MigrateSend(anyhow!(
|
||||
"Failed shutting down the VM after migration: {:?}",
|
||||
e
|
||||
"Failed shutting down the VM after migration: {e:?}"
|
||||
))
|
||||
})
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -472,10 +472,7 @@ impl BusDevice for MemoryManager {
|
|||
}
|
||||
}
|
||||
_ => {
|
||||
warn!(
|
||||
"Unexpected offset for accessing memory manager device: {:#}",
|
||||
offset
|
||||
);
|
||||
warn!("Unexpected offset for accessing memory manager device: {offset:#}");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
|
@ -508,10 +505,7 @@ impl BusDevice for MemoryManager {
|
|||
}
|
||||
}
|
||||
_ => {
|
||||
warn!(
|
||||
"Unexpected offset for accessing memory manager device: {:#}",
|
||||
offset
|
||||
);
|
||||
warn!("Unexpected offset for accessing memory manager device: {offset:#}");
|
||||
}
|
||||
};
|
||||
None
|
||||
|
|
@ -786,9 +780,8 @@ impl MemoryManager {
|
|||
if let Some(hotplug_size) = config.hotplug_size {
|
||||
if hotplugged_size > hotplug_size {
|
||||
error!(
|
||||
"'hotplugged_size' {} can't be bigger than \
|
||||
'hotplug_size' {}",
|
||||
hotplugged_size, hotplug_size,
|
||||
"'hotplugged_size' {hotplugged_size} can't be bigger than \
|
||||
'hotplug_size' {hotplug_size}",
|
||||
);
|
||||
return Err(Error::InvalidMemoryParameters);
|
||||
}
|
||||
|
|
@ -861,9 +854,8 @@ impl MemoryManager {
|
|||
if let Some(hotplug_size) = zone.hotplug_size {
|
||||
if hotplugged_size > hotplug_size {
|
||||
error!(
|
||||
"'hotplugged_size' {} can't be bigger than \
|
||||
'hotplug_size' {}",
|
||||
hotplugged_size, hotplug_size,
|
||||
"'hotplugged_size' {hotplugged_size} can't be bigger than \
|
||||
'hotplug_size' {hotplug_size}",
|
||||
);
|
||||
return Err(Error::InvalidMemoryParameters);
|
||||
}
|
||||
|
|
@ -1452,10 +1444,7 @@ impl MemoryManager {
|
|||
Self::get_prefault_align_size(backing_file, hugepages, hugepage_size)? as usize;
|
||||
|
||||
if !is_aligned(size, page_size) {
|
||||
warn!(
|
||||
"Prefaulting memory size {} misaligned with page size {}",
|
||||
size, page_size
|
||||
);
|
||||
warn!("Prefaulting memory size {size} misaligned with page size {page_size}");
|
||||
}
|
||||
|
||||
let num_pages = size / page_size;
|
||||
|
|
@ -1484,7 +1473,7 @@ impl MemoryManager {
|
|||
};
|
||||
if ret != 0 {
|
||||
let e = io::Error::last_os_error();
|
||||
warn!("Failed to prefault pages: {}", e);
|
||||
warn!("Failed to prefault pages: {e}");
|
||||
}
|
||||
});
|
||||
}
|
||||
|
|
@ -1501,7 +1490,7 @@ impl MemoryManager {
|
|||
let ret = unsafe { libc::madvise(region.as_ptr() as _, size, libc::MADV_HUGEPAGE) };
|
||||
if ret != 0 {
|
||||
let e = io::Error::last_os_error();
|
||||
warn!("Failed to mark pages as THP eligible: {}", e);
|
||||
warn!("Failed to mark pages as THP eligible: {e}");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1642,7 +1631,7 @@ impl MemoryManager {
|
|||
}
|
||||
|
||||
fn hotplug_ram_region(&mut self, size: usize) -> Result<Arc<GuestRegionMmap>, Error> {
|
||||
info!("Hotplugging new RAM: {}", size);
|
||||
info!("Hotplugging new RAM: {size}");
|
||||
|
||||
// Check that there is a free slot
|
||||
if self.next_hotplug_slot >= HOTPLUG_COUNT {
|
||||
|
|
@ -1739,8 +1728,7 @@ impl MemoryManager {
|
|||
);
|
||||
|
||||
info!(
|
||||
"Creating userspace mapping: {:x} -> {:x} {:x}, slot {}",
|
||||
guest_phys_addr, userspace_addr, memory_size, slot
|
||||
"Creating userspace mapping: {guest_phys_addr:x} -> {userspace_addr:x} {memory_size:x}, slot {slot}"
|
||||
);
|
||||
|
||||
self.vm
|
||||
|
|
@ -1758,7 +1746,7 @@ impl MemoryManager {
|
|||
};
|
||||
if ret != 0 {
|
||||
let e = io::Error::last_os_error();
|
||||
warn!("Failed to mark mapping as MADV_DONTDUMP: {}", e);
|
||||
warn!("Failed to mark mapping as MADV_DONTDUMP: {e}");
|
||||
}
|
||||
|
||||
// Mark the pages as mergeable if explicitly asked for.
|
||||
|
|
@ -1780,15 +1768,14 @@ impl MemoryManager {
|
|||
if errno == libc::EINVAL {
|
||||
warn!("kernel not configured with CONFIG_KSM");
|
||||
} else {
|
||||
warn!("madvise error: {}", err);
|
||||
warn!("madvise error: {err}");
|
||||
}
|
||||
warn!("failed to mark pages as mergeable");
|
||||
}
|
||||
}
|
||||
|
||||
info!(
|
||||
"Created userspace mapping: {:x} -> {:x} {:x}",
|
||||
guest_phys_addr, userspace_addr, memory_size
|
||||
"Created userspace mapping: {guest_phys_addr:x} -> {userspace_addr:x} {memory_size:x}"
|
||||
);
|
||||
|
||||
Ok(slot)
|
||||
|
|
@ -1835,15 +1822,14 @@ impl MemoryManager {
|
|||
if errno == libc::EINVAL {
|
||||
warn!("kernel not configured with CONFIG_KSM");
|
||||
} else {
|
||||
warn!("madvise error: {}", err);
|
||||
warn!("madvise error: {err}");
|
||||
}
|
||||
warn!("failed to mark pages as unmergeable");
|
||||
}
|
||||
}
|
||||
|
||||
info!(
|
||||
"Removed userspace mapping: {:x} -> {:x} {:x}",
|
||||
guest_phys_addr, userspace_addr, memory_size
|
||||
"Removed userspace mapping: {guest_phys_addr:x} -> {userspace_addr:x} {memory_size:x}"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
|
|
@ -2119,8 +2105,7 @@ impl MemoryManager {
|
|||
)
|
||||
.map_err(|e| {
|
||||
MigratableError::MigrateReceive(anyhow!(
|
||||
"Error receiving memory from socket: {}",
|
||||
e
|
||||
"Error receiving memory from socket: {e}"
|
||||
))
|
||||
})?;
|
||||
offset += bytes_read as u64;
|
||||
|
|
@ -2574,7 +2559,7 @@ impl Migratable for MemoryManager {
|
|||
// pages touched during our bulk copy are tracked.
|
||||
fn start_dirty_log(&mut self) -> std::result::Result<(), MigratableError> {
|
||||
self.vm.start_dirty_log().map_err(|e| {
|
||||
MigratableError::MigrateSend(anyhow!("Error starting VM dirty log {}", e))
|
||||
MigratableError::MigrateSend(anyhow!("Error starting VM dirty log {e}"))
|
||||
})?;
|
||||
|
||||
for r in self.guest_memory.memory().iter() {
|
||||
|
|
@ -2586,7 +2571,7 @@ impl Migratable for MemoryManager {
|
|||
|
||||
fn stop_dirty_log(&mut self) -> std::result::Result<(), MigratableError> {
|
||||
self.vm.stop_dirty_log().map_err(|e| {
|
||||
MigratableError::MigrateSend(anyhow!("Error stopping VM dirty log {}", e))
|
||||
MigratableError::MigrateSend(anyhow!("Error stopping VM dirty log {e}"))
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
|
|
@ -2598,7 +2583,7 @@ impl Migratable for MemoryManager {
|
|||
let mut table = MemoryRangeTable::default();
|
||||
for r in &self.guest_ram_mappings {
|
||||
let vm_dirty_bitmap = self.vm.get_dirty_log(r.slot, r.gpa, r.size).map_err(|e| {
|
||||
MigratableError::MigrateSend(anyhow!("Error getting VM dirty log {}", e))
|
||||
MigratableError::MigrateSend(anyhow!("Error getting VM dirty log {e}"))
|
||||
})?;
|
||||
let vmm_dirty_bitmap = match self.guest_memory.memory().find_region(GuestAddress(r.gpa))
|
||||
{
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ pub fn url_to_path(url: &str) -> std::result::Result<PathBuf, MigratableError> {
|
|||
let path: PathBuf = url
|
||||
.strip_prefix("file://")
|
||||
.ok_or_else(|| {
|
||||
MigratableError::MigrateSend(anyhow!("Could not extract path from URL: {}", url))
|
||||
MigratableError::MigrateSend(anyhow!("Could not extract path from URL: {url}"))
|
||||
})
|
||||
.map(|s| s.into())?;
|
||||
|
||||
|
|
|
|||
|
|
@ -308,7 +308,7 @@ impl SerialManager {
|
|||
match dispatch_event {
|
||||
EpollDispatch::Unknown => {
|
||||
let event = event.data;
|
||||
warn!("Unknown serial manager loop event: {}", event);
|
||||
warn!("Unknown serial manager loop event: {event}");
|
||||
}
|
||||
EpollDispatch::Socket => {
|
||||
// New connection request arrived.
|
||||
|
|
|
|||
|
|
@ -448,7 +448,7 @@ impl VmOps for VmOpsHandler {
|
|||
|
||||
fn mmio_read(&self, gpa: u64, data: &mut [u8]) -> result::Result<(), HypervisorVmError> {
|
||||
if let Err(vm_device::BusError::MissingAddressRange) = self.mmio_bus.read(gpa, data) {
|
||||
info!("Guest MMIO read to unregistered address 0x{:x}", gpa);
|
||||
info!("Guest MMIO read to unregistered address 0x{gpa:x}");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
|
@ -456,7 +456,7 @@ impl VmOps for VmOpsHandler {
|
|||
fn mmio_write(&self, gpa: u64, data: &[u8]) -> result::Result<(), HypervisorVmError> {
|
||||
match self.mmio_bus.write(gpa, data) {
|
||||
Err(vm_device::BusError::MissingAddressRange) => {
|
||||
info!("Guest MMIO write to unregistered address 0x{:x}", gpa);
|
||||
info!("Guest MMIO write to unregistered address 0x{gpa:x}");
|
||||
}
|
||||
Ok(Some(barrier)) => {
|
||||
info!("Waiting for barrier");
|
||||
|
|
@ -471,7 +471,7 @@ impl VmOps for VmOpsHandler {
|
|||
#[cfg(target_arch = "x86_64")]
|
||||
fn pio_read(&self, port: u64, data: &mut [u8]) -> result::Result<(), HypervisorVmError> {
|
||||
if let Err(vm_device::BusError::MissingAddressRange) = self.io_bus.read(port, data) {
|
||||
info!("Guest PIO read to unregistered address 0x{:x}", port);
|
||||
info!("Guest PIO read to unregistered address 0x{port:x}");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
|
@ -480,7 +480,7 @@ impl VmOps for VmOpsHandler {
|
|||
fn pio_write(&self, port: u64, data: &[u8]) -> result::Result<(), HypervisorVmError> {
|
||||
match self.io_bus.write(port, data) {
|
||||
Err(vm_device::BusError::MissingAddressRange) => {
|
||||
info!("Guest PIO write to unregistered address 0x{:x}", port);
|
||||
info!("Guest PIO write to unregistered address 0x{port:x}");
|
||||
}
|
||||
Ok(Some(barrier)) => {
|
||||
info!("Waiting for barrier");
|
||||
|
|
@ -939,7 +939,7 @@ impl Vm {
|
|||
}
|
||||
node.memory_zones.push(memory_zone.clone());
|
||||
} else {
|
||||
error!("Unknown memory zone '{}'", memory_zone);
|
||||
error!("Unknown memory zone '{memory_zone}'");
|
||||
return Err(Error::InvalidNumaConfig);
|
||||
}
|
||||
}
|
||||
|
|
@ -959,12 +959,12 @@ impl Vm {
|
|||
let dist = distance.distance;
|
||||
|
||||
if !configs.iter().any(|cfg| cfg.guest_numa_id == dest) {
|
||||
error!("Unknown destination NUMA node {}", dest);
|
||||
error!("Unknown destination NUMA node {dest}");
|
||||
return Err(Error::InvalidNumaConfig);
|
||||
}
|
||||
|
||||
if node.distances.contains_key(&dest) {
|
||||
error!("Destination NUMA node {} has been already set", dest);
|
||||
error!("Destination NUMA node {dest} has been already set");
|
||||
return Err(Error::InvalidNumaConfig);
|
||||
}
|
||||
|
||||
|
|
@ -1751,7 +1751,7 @@ impl Vm {
|
|||
}
|
||||
}
|
||||
|
||||
error!("Could not find the memory zone {} for the resize", id);
|
||||
error!("Could not find the memory zone {id} for the resize");
|
||||
Err(Error::ResizeZone)
|
||||
}
|
||||
|
||||
|
|
@ -2532,12 +2532,12 @@ impl Vm {
|
|||
Request::memory_fd(std::mem::size_of_val(&slot) as u64)
|
||||
.write_to(socket)
|
||||
.map_err(|e| {
|
||||
MigratableError::MigrateSend(anyhow!("Error sending memory fd request: {}", e))
|
||||
MigratableError::MigrateSend(anyhow!("Error sending memory fd request: {e}"))
|
||||
})?;
|
||||
socket
|
||||
.send_with_fd(&slot.to_le_bytes()[..], fd)
|
||||
.map_err(|e| {
|
||||
MigratableError::MigrateSend(anyhow!("Error sending memory fd: {}", e))
|
||||
MigratableError::MigrateSend(anyhow!("Error sending memory fd: {e}"))
|
||||
})?;
|
||||
|
||||
Response::read_from(socket)?.ok_or_abandon(
|
||||
|
|
@ -2576,8 +2576,7 @@ impl Vm {
|
|||
)
|
||||
.map_err(|e| {
|
||||
MigratableError::MigrateSend(anyhow!(
|
||||
"Error transferring memory to socket: {}",
|
||||
e
|
||||
"Error transferring memory to socket: {e}"
|
||||
))
|
||||
})?;
|
||||
offset += bytes_written as u64;
|
||||
|
|
@ -2766,19 +2765,19 @@ impl Pausable for Vm {
|
|||
let mut state = self
|
||||
.state
|
||||
.try_write()
|
||||
.map_err(|e| MigratableError::Pause(anyhow!("Could not get VM state: {}", e)))?;
|
||||
.map_err(|e| MigratableError::Pause(anyhow!("Could not get VM state: {e}")))?;
|
||||
let new_state = VmState::Paused;
|
||||
|
||||
state
|
||||
.valid_transition(new_state)
|
||||
.map_err(|e| MigratableError::Pause(anyhow!("Invalid transition: {:?}", e)))?;
|
||||
.map_err(|e| MigratableError::Pause(anyhow!("Invalid transition: {e:?}")))?;
|
||||
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
{
|
||||
let mut clock = self
|
||||
.vm
|
||||
.get_clock()
|
||||
.map_err(|e| MigratableError::Pause(anyhow!("Could not get VM clock: {}", e)))?;
|
||||
.map_err(|e| MigratableError::Pause(anyhow!("Could not get VM clock: {e}")))?;
|
||||
clock.reset_flags();
|
||||
self.saved_clock = Some(clock);
|
||||
}
|
||||
|
|
@ -2786,7 +2785,7 @@ impl Pausable for Vm {
|
|||
// Before pausing the vCPUs activate any pending virtio devices that might
|
||||
// need activation between starting the pause (or e.g. a migration it's part of)
|
||||
self.activate_virtio_devices().map_err(|e| {
|
||||
MigratableError::Pause(anyhow!("Error activating pending virtio devices: {:?}", e))
|
||||
MigratableError::Pause(anyhow!("Error activating pending virtio devices: {e:?}"))
|
||||
})?;
|
||||
|
||||
self.cpu_manager.lock().unwrap().pause()?;
|
||||
|
|
@ -2794,7 +2793,7 @@ impl Pausable for Vm {
|
|||
|
||||
self.vm
|
||||
.pause()
|
||||
.map_err(|e| MigratableError::Pause(anyhow!("Could not pause the VM: {}", e)))?;
|
||||
.map_err(|e| MigratableError::Pause(anyhow!("Could not pause the VM: {e}")))?;
|
||||
|
||||
*state = new_state;
|
||||
|
||||
|
|
@ -2808,27 +2807,27 @@ impl Pausable for Vm {
|
|||
let mut state = self
|
||||
.state
|
||||
.try_write()
|
||||
.map_err(|e| MigratableError::Resume(anyhow!("Could not get VM state: {}", e)))?;
|
||||
.map_err(|e| MigratableError::Resume(anyhow!("Could not get VM state: {e}")))?;
|
||||
let new_state = VmState::Running;
|
||||
|
||||
state
|
||||
.valid_transition(new_state)
|
||||
.map_err(|e| MigratableError::Resume(anyhow!("Invalid transition: {:?}", e)))?;
|
||||
.map_err(|e| MigratableError::Resume(anyhow!("Invalid transition: {e:?}")))?;
|
||||
|
||||
self.cpu_manager.lock().unwrap().resume()?;
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
{
|
||||
if let Some(clock) = &self.saved_clock {
|
||||
self.vm.set_clock(clock).map_err(|e| {
|
||||
MigratableError::Resume(anyhow!("Could not set VM clock: {}", e))
|
||||
})?;
|
||||
self.vm
|
||||
.set_clock(clock)
|
||||
.map_err(|e| MigratableError::Resume(anyhow!("Could not set VM clock: {e}")))?;
|
||||
}
|
||||
}
|
||||
|
||||
if current_state == VmState::Paused {
|
||||
self.vm
|
||||
.resume()
|
||||
.map_err(|e| MigratableError::Resume(anyhow!("Could not resume the VM: {}", e)))?;
|
||||
.map_err(|e| MigratableError::Resume(anyhow!("Could not resume the VM: {e}")))?;
|
||||
}
|
||||
|
||||
self.device_manager.lock().unwrap().resume()?;
|
||||
|
|
@ -2891,7 +2890,7 @@ impl Snapshottable for Vm {
|
|||
},
|
||||
)
|
||||
.map_err(|e| {
|
||||
MigratableError::MigrateReceive(anyhow!("Error generating common cpuid: {:?}", e))
|
||||
MigratableError::MigrateReceive(anyhow!("Error generating common cpuid: {e:?}"))
|
||||
})?
|
||||
};
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue