misc: Use variables directly in format! string
Fix clippy warning `uninlined_format_args` reported by rustc rustc
1.89.0 (29483883e 2025-08-04).
```console
warning: variables can be used directly in the `format!` string
--> block/src/lib.rs:649:17
|
649 | info!("{} failed to create io_uring instance: {}", error_msg, e);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
= help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#uninlined_format_args
= note: `#[warn(clippy::uninlined_format_args)]` on by default
help: change this to
|
649 - info!("{} failed to create io_uring instance: {}", error_msg, e);
649 + info!("{error_msg} failed to create io_uring instance: {e}");
|
```
Signed-off-by: Ruoqing He <heruoqing@iscas.ac.cn>
This commit is contained in:
parent
ea83fe314c
commit
f2dfa7f6e0
56 changed files with 470 additions and 679 deletions
|
|
@ -163,7 +163,7 @@ struct BalloonEpollHandler {
|
|||
impl BalloonEpollHandler {
|
||||
fn signal(&self, int_type: VirtioInterruptType) -> result::Result<(), Error> {
|
||||
self.interrupt_cb.trigger(int_type).map_err(|e| {
|
||||
error!("Failed to signal used queue: {:?}", e);
|
||||
error!("Failed to signal used queue: {e:?}");
|
||||
Error::FailedSignal(e)
|
||||
})
|
||||
}
|
||||
|
|
@ -363,28 +363,24 @@ impl EpollHelperHandler for BalloonEpollHandler {
|
|||
INFLATE_QUEUE_EVENT => {
|
||||
self.inflate_queue_evt.read().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to get inflate queue event: {:?}",
|
||||
e
|
||||
"Failed to get inflate queue event: {e:?}"
|
||||
))
|
||||
})?;
|
||||
self.process_queue(0).map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to signal used inflate queue: {:?}",
|
||||
e
|
||||
"Failed to signal used inflate queue: {e:?}"
|
||||
))
|
||||
})?;
|
||||
}
|
||||
DEFLATE_QUEUE_EVENT => {
|
||||
self.deflate_queue_evt.read().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to get deflate queue event: {:?}",
|
||||
e
|
||||
"Failed to get deflate queue event: {e:?}"
|
||||
))
|
||||
})?;
|
||||
self.process_queue(1).map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to signal used deflate queue: {:?}",
|
||||
e
|
||||
"Failed to signal used deflate queue: {e:?}"
|
||||
))
|
||||
})?;
|
||||
}
|
||||
|
|
@ -392,14 +388,12 @@ impl EpollHelperHandler for BalloonEpollHandler {
|
|||
if let Some(reporting_queue_evt) = self.reporting_queue_evt.as_ref() {
|
||||
reporting_queue_evt.read().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to get reporting queue event: {:?}",
|
||||
e
|
||||
"Failed to get reporting queue event: {e:?}"
|
||||
))
|
||||
})?;
|
||||
self.process_reporting_queue(2).map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to signal used inflate queue: {:?}",
|
||||
e
|
||||
"Failed to signal used inflate queue: {e:?}"
|
||||
))
|
||||
})?;
|
||||
} else {
|
||||
|
|
@ -450,7 +444,7 @@ impl Balloon {
|
|||
let mut queue_sizes = vec![QUEUE_SIZE; MIN_NUM_QUEUES];
|
||||
|
||||
let (avail_features, acked_features, config, paused) = if let Some(state) = state {
|
||||
info!("Restoring virtio-balloon {}", id);
|
||||
info!("Restoring virtio-balloon {id}");
|
||||
(
|
||||
state.avail_features,
|
||||
state.acked_features,
|
||||
|
|
|
|||
|
|
@ -178,7 +178,7 @@ impl BlockEpollHandler {
|
|||
// "A device MUST set the status byte to VIRTIO_BLK_S_IOERR for a write request
|
||||
// if the VIRTIO_BLK_F_RO feature if offered, and MUST NOT write any data."
|
||||
if let Err(e) = Self::check_request(self.acked_features, request.request_type) {
|
||||
warn!("Request check failed: {:x?} {:?}", request, e);
|
||||
warn!("Request check failed: {request:x?} {e:?}");
|
||||
desc_chain
|
||||
.memory()
|
||||
.write_obj(VIRTIO_BLK_S_IOERR, request.status_addr)
|
||||
|
|
@ -257,7 +257,7 @@ impl BlockEpollHandler {
|
|||
let status = match result {
|
||||
Ok(_) => VIRTIO_BLK_S_OK,
|
||||
Err(e) => {
|
||||
warn!("Request failed: {:x?} {:?}", request, e);
|
||||
warn!("Request failed: {request:x?} {e:?}");
|
||||
VIRTIO_BLK_S_IOERR
|
||||
}
|
||||
};
|
||||
|
|
@ -285,10 +285,7 @@ impl BlockEpollHandler {
|
|||
Err(e) => {
|
||||
// If batch submission fails, report VIRTIO_BLK_S_IOERR for all requests.
|
||||
for (user_data, request) in batch_inflight_requests {
|
||||
warn!(
|
||||
"Request failed with batch submission: {:x?} {:?}",
|
||||
request, e
|
||||
);
|
||||
warn!("Request failed with batch submission: {request:x?} {e:?}");
|
||||
let desc_index = user_data;
|
||||
let mem = self.mem.memory();
|
||||
mem.write_obj(VIRTIO_BLK_S_IOERR as u8, request.status_addr)
|
||||
|
|
@ -311,14 +308,11 @@ impl BlockEpollHandler {
|
|||
.queue
|
||||
.needs_notification(self.mem.memory().deref())
|
||||
.map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to check needs_notification: {:?}",
|
||||
e
|
||||
))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to check needs_notification: {e:?}"))
|
||||
})?
|
||||
{
|
||||
self.signal_used_queue().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to signal used queue: {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to signal used queue: {e:?}"))
|
||||
})?;
|
||||
}
|
||||
|
||||
|
|
@ -327,7 +321,7 @@ impl BlockEpollHandler {
|
|||
|
||||
fn process_queue_submit_and_signal(&mut self) -> result::Result<(), EpollHelperError> {
|
||||
self.process_queue_submit().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to process queue (submit): {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to process queue (submit): {e:?}"))
|
||||
})?;
|
||||
|
||||
self.try_signal_used_queue()
|
||||
|
|
@ -493,7 +487,7 @@ impl BlockEpollHandler {
|
|||
self.interrupt_cb
|
||||
.trigger(VirtioInterruptType::Queue(self.queue_index))
|
||||
.map_err(|e| {
|
||||
error!("Failed to signal used queue: {:?}", e);
|
||||
error!("Failed to signal used queue: {e:?}");
|
||||
DeviceError::FailedSignalingUsedQueue(e)
|
||||
})
|
||||
}
|
||||
|
|
@ -561,7 +555,7 @@ impl EpollHelperHandler for BlockEpollHandler {
|
|||
match ev_type {
|
||||
QUEUE_AVAIL_EVENT => {
|
||||
self.queue_evt.read().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get queue event: {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get queue event: {e:?}"))
|
||||
})?;
|
||||
|
||||
let rate_limit_reached = self.rate_limiter.as_ref().is_some_and(|r| r.is_blocked());
|
||||
|
|
@ -573,13 +567,12 @@ impl EpollHelperHandler for BlockEpollHandler {
|
|||
}
|
||||
COMPLETION_EVENT => {
|
||||
self.disk_image.notifier().read().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get queue event: {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get queue event: {e:?}"))
|
||||
})?;
|
||||
|
||||
self.process_queue_complete().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to process queue (complete): {:?}",
|
||||
e
|
||||
"Failed to process queue (complete): {e:?}"
|
||||
))
|
||||
})?;
|
||||
|
||||
|
|
@ -589,8 +582,7 @@ impl EpollHelperHandler for BlockEpollHandler {
|
|||
if !rate_limit_reached {
|
||||
self.process_queue_submit().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to process queue (submit): {:?}",
|
||||
e
|
||||
"Failed to process queue (submit): {e:?}"
|
||||
))
|
||||
})?;
|
||||
}
|
||||
|
|
@ -602,8 +594,7 @@ impl EpollHelperHandler for BlockEpollHandler {
|
|||
// and restart processing the queue.
|
||||
rate_limiter.event_handler().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to process rate limiter event: {:?}",
|
||||
e
|
||||
"Failed to process rate limiter event: {e:?}"
|
||||
))
|
||||
})?;
|
||||
|
||||
|
|
@ -616,8 +607,7 @@ impl EpollHelperHandler for BlockEpollHandler {
|
|||
}
|
||||
_ => {
|
||||
return Err(EpollHelperError::HandleEvent(anyhow!(
|
||||
"Unexpected event: {}",
|
||||
ev_type
|
||||
"Unexpected event: {ev_type}"
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
|
@ -671,7 +661,7 @@ impl Block {
|
|||
) -> io::Result<Self> {
|
||||
let (disk_nsectors, avail_features, acked_features, config, paused) =
|
||||
if let Some(state) = state {
|
||||
info!("Restoring virtio-block {}", id);
|
||||
info!("Restoring virtio-block {id}");
|
||||
(
|
||||
state.disk_nsectors,
|
||||
state.avail_features,
|
||||
|
|
@ -685,9 +675,8 @@ impl Block {
|
|||
.map_err(|e| io::Error::other(format!("Failed getting disk size: {e}")))?;
|
||||
if disk_size % SECTOR_SIZE != 0 {
|
||||
warn!(
|
||||
"Disk size {} is not a multiple of sector size {}; \
|
||||
the remainder will not be visible to the guest.",
|
||||
disk_size, SECTOR_SIZE
|
||||
"Disk size {disk_size} is not a multiple of sector size {SECTOR_SIZE}; \
|
||||
the remainder will not be visible to the guest."
|
||||
);
|
||||
}
|
||||
|
||||
|
|
@ -708,7 +697,7 @@ impl Block {
|
|||
}
|
||||
|
||||
let topology = disk_image.topology();
|
||||
info!("Disk topology: {:?}", topology);
|
||||
info!("Disk topology: {topology:?}");
|
||||
|
||||
let logical_block_size = if topology.logical_block_size > 512 {
|
||||
topology.logical_block_size
|
||||
|
|
@ -939,7 +928,7 @@ impl VirtioDevice for Block {
|
|||
.disk_image
|
||||
.new_async_io(queue_size as u32)
|
||||
.map_err(|e| {
|
||||
error!("failed to create new AsyncIo: {}", e);
|
||||
error!("failed to create new AsyncIo: {e}");
|
||||
ActivateError::BadActivate
|
||||
})?,
|
||||
disk_nsectors: self.disk_nsectors,
|
||||
|
|
|
|||
|
|
@ -275,7 +275,7 @@ impl ConsoleEpollHandler {
|
|||
self.interrupt_cb
|
||||
.trigger(VirtioInterruptType::Queue(queue_index))
|
||||
.map_err(|e| {
|
||||
error!("Failed to signal used queue: {:?}", e);
|
||||
error!("Failed to signal used queue: {e:?}");
|
||||
DeviceError::FailedSignalingUsedQueue(e)
|
||||
})
|
||||
}
|
||||
|
|
@ -328,7 +328,7 @@ impl ConsoleEpollHandler {
|
|||
}
|
||||
pty_write_out.store(true, Ordering::Release);
|
||||
out.flush()
|
||||
.map_err(|e| anyhow!("Failed to flush PTY: {:?}", e))
|
||||
.map_err(|e| anyhow!("Failed to flush PTY: {e:?}"))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
|
|
@ -365,52 +365,39 @@ impl EpollHelperHandler for ConsoleEpollHandler {
|
|||
match ev_type {
|
||||
INPUT_QUEUE_EVENT => {
|
||||
self.input_queue_evt.read().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get queue event: {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get queue event: {e:?}"))
|
||||
})?;
|
||||
let needs_notification = self.process_input_queue().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to process input queue : {:?}",
|
||||
e
|
||||
))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to process input queue : {e:?}"))
|
||||
})?;
|
||||
if needs_notification {
|
||||
self.signal_used_queue(0).map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to signal used queue: {:?}",
|
||||
e
|
||||
))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to signal used queue: {e:?}"))
|
||||
})?;
|
||||
}
|
||||
}
|
||||
OUTPUT_QUEUE_EVENT => {
|
||||
self.output_queue_evt.read().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get queue event: {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get queue event: {e:?}"))
|
||||
})?;
|
||||
let needs_notification = self.process_output_queue().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to process output queue : {:?}",
|
||||
e
|
||||
))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to process output queue : {e:?}"))
|
||||
})?;
|
||||
if needs_notification {
|
||||
self.signal_used_queue(1).map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to signal used queue: {:?}",
|
||||
e
|
||||
))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to signal used queue: {e:?}"))
|
||||
})?;
|
||||
}
|
||||
}
|
||||
CONFIG_EVENT => {
|
||||
self.config_evt.read().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get config event: {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get config event: {e:?}"))
|
||||
})?;
|
||||
self.interrupt_cb
|
||||
.trigger(VirtioInterruptType::Config)
|
||||
.map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to signal console driver: {:?}",
|
||||
e
|
||||
"Failed to signal console driver: {e:?}"
|
||||
))
|
||||
})?;
|
||||
}
|
||||
|
|
@ -420,10 +407,7 @@ impl EpollHelperHandler for ConsoleEpollHandler {
|
|||
.unwrap()
|
||||
.read_exact(&mut [0])
|
||||
.map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to get resize event: {:?}",
|
||||
e
|
||||
))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get resize event: {e:?}"))
|
||||
})?;
|
||||
self.resizer.update_console_size();
|
||||
}
|
||||
|
|
@ -438,15 +422,13 @@ impl EpollHelperHandler for ConsoleEpollHandler {
|
|||
|
||||
let needs_notification = self.process_input_queue().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to process input queue : {:?}",
|
||||
e
|
||||
"Failed to process input queue : {e:?}"
|
||||
))
|
||||
})?;
|
||||
if needs_notification {
|
||||
self.signal_used_queue(0).map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to signal used queue: {:?}",
|
||||
e
|
||||
"Failed to signal used queue: {e:?}"
|
||||
))
|
||||
})?;
|
||||
}
|
||||
|
|
@ -614,7 +596,7 @@ impl Console {
|
|||
) -> io::Result<(Console, Arc<ConsoleResizer>)> {
|
||||
let (avail_features, acked_features, config, in_buffer, paused) = if let Some(state) = state
|
||||
{
|
||||
info!("Restoring virtio-console {}", id);
|
||||
info!("Restoring virtio-console {id}");
|
||||
(
|
||||
state.avail_features,
|
||||
state.acked_features,
|
||||
|
|
@ -733,7 +715,7 @@ impl VirtioDevice for Console {
|
|||
if self.common.feature_acked(VIRTIO_CONSOLE_F_SIZE)
|
||||
&& let Err(e) = interrupt_cb.trigger(VirtioInterruptType::Config)
|
||||
{
|
||||
error!("Failed to signal console driver: {:?}", e);
|
||||
error!("Failed to signal console driver: {e:?}");
|
||||
}
|
||||
|
||||
let (kill_evt, pause_evt) = self.common.dup_eventfds();
|
||||
|
|
|
|||
|
|
@ -233,13 +233,13 @@ impl VirtioCommon {
|
|||
}
|
||||
|
||||
let kill_evt = EventFd::new(EFD_NONBLOCK).map_err(|e| {
|
||||
error!("failed creating kill EventFd: {}", e);
|
||||
error!("failed creating kill EventFd: {e}");
|
||||
ActivateError::BadActivate
|
||||
})?;
|
||||
self.kill_evt = Some(kill_evt);
|
||||
|
||||
let pause_evt = EventFd::new(EFD_NONBLOCK).map_err(|e| {
|
||||
error!("failed creating pause EventFd: {}", e);
|
||||
error!("failed creating pause EventFd: {e}");
|
||||
ActivateError::BadActivate
|
||||
})?;
|
||||
self.pause_evt = Some(pause_evt);
|
||||
|
|
@ -265,7 +265,7 @@ impl VirtioCommon {
|
|||
if let Some(mut threads) = self.epoll_threads.take() {
|
||||
for t in threads.drain(..) {
|
||||
if let Err(e) = t.join() {
|
||||
error!("Error joining thread: {:?}", e);
|
||||
error!("Error joining thread: {e:?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -279,7 +279,7 @@ impl VirtioCommon {
|
|||
if let Some(mut threads) = self.epoll_threads.take() {
|
||||
for t in threads.drain(..) {
|
||||
if let Err(e) = t.join() {
|
||||
error!("Error joining thread: {:?}", e);
|
||||
error!("Error joining thread: {e:?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -394,7 +394,7 @@ impl Request {
|
|||
.memory()
|
||||
.read_obj(req_addr as GuestAddress)
|
||||
.map_err(Error::GuestMemory)?;
|
||||
debug!("Attach request 0x{:x?}", req);
|
||||
debug!("Attach request 0x{req:x?}");
|
||||
|
||||
// Copy the value to use it as a proper reference.
|
||||
let domain_id = req.domain;
|
||||
|
|
@ -448,7 +448,7 @@ impl Request {
|
|||
.memory()
|
||||
.read_obj(req_addr as GuestAddress)
|
||||
.map_err(Error::GuestMemory)?;
|
||||
debug!("Detach request 0x{:x?}", req);
|
||||
debug!("Detach request 0x{req:x?}");
|
||||
|
||||
// Copy the value to use it as a proper reference.
|
||||
let domain_id = req.domain;
|
||||
|
|
@ -467,7 +467,7 @@ impl Request {
|
|||
.memory()
|
||||
.read_obj(req_addr as GuestAddress)
|
||||
.map_err(Error::GuestMemory)?;
|
||||
debug!("Map request 0x{:x?}", req);
|
||||
debug!("Map request 0x{req:x?}");
|
||||
|
||||
// Copy the value to use it as a proper reference.
|
||||
let domain_id = req.domain;
|
||||
|
|
@ -530,7 +530,7 @@ impl Request {
|
|||
.memory()
|
||||
.read_obj(req_addr as GuestAddress)
|
||||
.map_err(Error::GuestMemory)?;
|
||||
debug!("Unmap request 0x{:x?}", req);
|
||||
debug!("Unmap request 0x{req:x?}");
|
||||
|
||||
// Copy the value to use it as a proper reference.
|
||||
let domain_id = req.domain;
|
||||
|
|
@ -586,7 +586,7 @@ impl Request {
|
|||
.memory()
|
||||
.read_obj(req_addr as GuestAddress)
|
||||
.map_err(Error::GuestMemory)?;
|
||||
debug!("Probe request 0x{:x?}", req);
|
||||
debug!("Probe request 0x{req:x?}");
|
||||
|
||||
let probe_prop = VirtioIommuProbeProperty {
|
||||
type_: VIRTIO_IOMMU_PROBE_T_RESV_MEM,
|
||||
|
|
@ -718,7 +718,7 @@ impl IommuEpollHandler {
|
|||
self.interrupt_cb
|
||||
.trigger(VirtioInterruptType::Queue(queue_index))
|
||||
.map_err(|e| {
|
||||
error!("Failed to signal used queue: {:?}", e);
|
||||
error!("Failed to signal used queue: {e:?}");
|
||||
DeviceError::FailedSignalingUsedQueue(e)
|
||||
})
|
||||
}
|
||||
|
|
@ -746,28 +746,23 @@ impl EpollHelperHandler for IommuEpollHandler {
|
|||
match ev_type {
|
||||
REQUEST_Q_EVENT => {
|
||||
self.request_queue_evt.read().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get queue event: {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get queue event: {e:?}"))
|
||||
})?;
|
||||
|
||||
let needs_notification = self.request_queue().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to process request queue : {:?}",
|
||||
e
|
||||
"Failed to process request queue : {e:?}"
|
||||
))
|
||||
})?;
|
||||
if needs_notification {
|
||||
self.signal_used_queue(0).map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to signal used queue: {:?}",
|
||||
e
|
||||
))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to signal used queue: {e:?}"))
|
||||
})?;
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
return Err(EpollHelperError::HandleEvent(anyhow!(
|
||||
"Unexpected event: {}",
|
||||
ev_type
|
||||
"Unexpected event: {ev_type}"
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
|
@ -800,7 +795,7 @@ pub struct IommuMapping {
|
|||
|
||||
impl DmaRemapping for IommuMapping {
|
||||
fn translate_gva(&self, id: u32, addr: u64) -> std::result::Result<u64, std::io::Error> {
|
||||
debug!("Translate GVA addr 0x{:x}", addr);
|
||||
debug!("Translate GVA addr 0x{addr:x}");
|
||||
if let Some(domain_id) = self.endpoints.read().unwrap().get(&id) {
|
||||
if let Some(domain) = self.domains.read().unwrap().get(domain_id) {
|
||||
// Directly return identity mapping in case the domain is in
|
||||
|
|
@ -812,7 +807,7 @@ impl DmaRemapping for IommuMapping {
|
|||
for (&key, &value) in domain.mappings.iter() {
|
||||
if addr >= key && addr < key + value.size {
|
||||
let new_addr = addr - key + value.gpa;
|
||||
debug!("Into GPA addr 0x{:x}", new_addr);
|
||||
debug!("Into GPA addr 0x{new_addr:x}");
|
||||
return Ok(new_addr);
|
||||
}
|
||||
}
|
||||
|
|
@ -827,7 +822,7 @@ impl DmaRemapping for IommuMapping {
|
|||
}
|
||||
|
||||
fn translate_gpa(&self, id: u32, addr: u64) -> std::result::Result<u64, std::io::Error> {
|
||||
debug!("Translate GPA addr 0x{:x}", addr);
|
||||
debug!("Translate GPA addr 0x{addr:x}");
|
||||
if let Some(domain_id) = self.endpoints.read().unwrap().get(&id) {
|
||||
if let Some(domain) = self.domains.read().unwrap().get(domain_id) {
|
||||
// Directly return identity mapping in case the domain is in
|
||||
|
|
@ -839,7 +834,7 @@ impl DmaRemapping for IommuMapping {
|
|||
for (&key, &value) in domain.mappings.iter() {
|
||||
if addr >= value.gpa && addr < value.gpa + value.size {
|
||||
let new_addr = addr - value.gpa + key;
|
||||
debug!("Into GVA addr 0x{:x}", new_addr);
|
||||
debug!("Into GVA addr 0x{new_addr:x}");
|
||||
return Ok(new_addr);
|
||||
}
|
||||
}
|
||||
|
|
@ -908,7 +903,7 @@ impl Iommu {
|
|||
) -> io::Result<(Self, Arc<IommuMapping>)> {
|
||||
let (mut avail_features, acked_features, endpoints, domains, paused) =
|
||||
if let Some(state) = state {
|
||||
info!("Restoring virtio-iommu {}", id);
|
||||
info!("Restoring virtio-iommu {id}");
|
||||
(
|
||||
state.avail_features,
|
||||
state.acked_features,
|
||||
|
|
@ -1015,7 +1010,7 @@ impl Iommu {
|
|||
}
|
||||
|
||||
let bypass = self.config.bypass == 1;
|
||||
info!("Updating bypass mode to {}", bypass);
|
||||
info!("Updating bypass mode to {bypass}");
|
||||
self.mapping.bypass.store(bypass, Ordering::Release);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -235,8 +235,7 @@ impl VirtioMemConfig {
|
|||
fn resize(&mut self, size: u64) -> result::Result<(), Error> {
|
||||
if self.requested_size == size {
|
||||
return Err(Error::ResizeError(anyhow!(
|
||||
"new size 0x{:x} and requested_size are identical",
|
||||
size
|
||||
"new size 0x{size:x} and requested_size are identical"
|
||||
)));
|
||||
} else if size > self.region_size {
|
||||
return Err(Error::ResizeError(anyhow!(
|
||||
|
|
@ -426,7 +425,7 @@ impl MemEpollHandler {
|
|||
};
|
||||
if res != 0 {
|
||||
let err = io::Error::last_os_error();
|
||||
error!("Deallocating file space failed: {}", err);
|
||||
error!("Deallocating file space failed: {err}");
|
||||
return Err(Error::DiscardMemoryRange(err));
|
||||
}
|
||||
}
|
||||
|
|
@ -444,7 +443,7 @@ impl MemEpollHandler {
|
|||
};
|
||||
if res != 0 {
|
||||
let err = io::Error::last_os_error();
|
||||
error!("Advising kernel about pages range failed: {}", err);
|
||||
error!("Advising kernel about pages range failed: {err}");
|
||||
return Err(Error::DiscardMemoryRange(err));
|
||||
}
|
||||
}
|
||||
|
|
@ -476,7 +475,7 @@ impl MemEpollHandler {
|
|||
}
|
||||
|
||||
if !plug && let Err(e) = self.discard_memory_range(offset, size) {
|
||||
error!("failed discarding memory range: {:?}", e);
|
||||
error!("failed discarding memory range: {e:?}");
|
||||
return VIRTIO_MEM_RESP_ERROR;
|
||||
}
|
||||
|
||||
|
|
@ -506,10 +505,7 @@ impl MemEpollHandler {
|
|||
} else {
|
||||
for (_, handler) in handlers.iter() {
|
||||
if let Err(e) = handler.unmap(addr, size) {
|
||||
error!(
|
||||
"failed DMA unmapping addr 0x{:x} size 0x{:x}: {}",
|
||||
addr, size, e
|
||||
);
|
||||
error!("failed DMA unmapping addr 0x{addr:x} size 0x{size:x}: {e}");
|
||||
return VIRTIO_MEM_RESP_ERROR;
|
||||
}
|
||||
}
|
||||
|
|
@ -523,7 +519,7 @@ impl MemEpollHandler {
|
|||
fn unplug_all(&mut self) -> u16 {
|
||||
let mut config = self.config.lock().unwrap();
|
||||
if let Err(e) = self.discard_memory_range(0, config.region_size) {
|
||||
error!("failed discarding memory range: {:?}", e);
|
||||
error!("failed discarding memory range: {e:?}");
|
||||
return VIRTIO_MEM_RESP_ERROR;
|
||||
}
|
||||
|
||||
|
|
@ -592,7 +588,7 @@ impl MemEpollHandler {
|
|||
|
||||
fn signal(&self, int_type: VirtioInterruptType) -> result::Result<(), DeviceError> {
|
||||
self.interrupt_cb.trigger(int_type).map_err(|e| {
|
||||
error!("Failed to signal used queue: {:?}", e);
|
||||
error!("Failed to signal used queue: {e:?}");
|
||||
DeviceError::FailedSignalingUsedQueue(e)
|
||||
})
|
||||
}
|
||||
|
|
@ -650,25 +646,21 @@ impl EpollHelperHandler for MemEpollHandler {
|
|||
match ev_type {
|
||||
QUEUE_AVAIL_EVENT => {
|
||||
self.queue_evt.read().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get queue event: {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get queue event: {e:?}"))
|
||||
})?;
|
||||
|
||||
let needs_notification = self.process_queue().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to process queue : {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to process queue : {e:?}"))
|
||||
})?;
|
||||
if needs_notification {
|
||||
self.signal(VirtioInterruptType::Queue(0)).map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to signal used queue: {:?}",
|
||||
e
|
||||
))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to signal used queue: {e:?}"))
|
||||
})?;
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
return Err(EpollHelperError::HandleEvent(anyhow!(
|
||||
"Unexpected event: {}",
|
||||
ev_type
|
||||
"Unexpected event: {ev_type}"
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
|
@ -727,7 +719,7 @@ impl Mem {
|
|||
}
|
||||
|
||||
let (avail_features, acked_features, config, paused) = if let Some(state) = state {
|
||||
info!("Restoring virtio-mem {}", id);
|
||||
info!("Restoring virtio-mem {id}");
|
||||
*(blocks_state.lock().unwrap()) = state.blocks_state.clone();
|
||||
(
|
||||
state.avail_features,
|
||||
|
|
@ -801,14 +793,14 @@ impl Mem {
|
|||
pub fn resize(&mut self, size: u64) -> result::Result<(), Error> {
|
||||
let mut config = self.config.lock().unwrap();
|
||||
config.resize(size).map_err(|e| {
|
||||
Error::ResizeError(anyhow!("Failed to update virtio configuration: {:?}", e))
|
||||
Error::ResizeError(anyhow!("Failed to update virtio configuration: {e:?}"))
|
||||
})?;
|
||||
|
||||
if let Some(interrupt_cb) = self.interrupt_cb.as_ref() {
|
||||
interrupt_cb
|
||||
.trigger(VirtioInterruptType::Config)
|
||||
.map_err(|e| {
|
||||
Error::ResizeError(anyhow!("Failed to signal the guest about resize: {:?}", e))
|
||||
Error::ResizeError(anyhow!("Failed to signal the guest about resize: {e:?}"))
|
||||
})
|
||||
} else {
|
||||
Ok(())
|
||||
|
|
|
|||
|
|
@ -66,7 +66,7 @@ impl NetCtrlEpollHandler {
|
|||
self.interrupt_cb
|
||||
.trigger(VirtioInterruptType::Queue(queue_index))
|
||||
.map_err(|e| {
|
||||
error!("Failed to signal used queue: {:?}", e);
|
||||
error!("Failed to signal used queue: {e:?}");
|
||||
DeviceError::FailedSignalingUsedQueue(e)
|
||||
})
|
||||
}
|
||||
|
|
@ -96,32 +96,28 @@ impl EpollHelperHandler for NetCtrlEpollHandler {
|
|||
let mem = self.mem.memory();
|
||||
self.queue_evt.read().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to get control queue event: {:?}",
|
||||
e
|
||||
"Failed to get control queue event: {e:?}"
|
||||
))
|
||||
})?;
|
||||
self.ctrl_q
|
||||
.process(mem.deref(), &mut self.queue, self.access_platform.as_ref())
|
||||
.map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to process control queue: {:?}",
|
||||
e
|
||||
"Failed to process control queue: {e:?}"
|
||||
))
|
||||
})?;
|
||||
match self.queue.needs_notification(mem.deref()) {
|
||||
Ok(true) => {
|
||||
self.signal_used_queue(self.queue_index).map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Error signalling that control queue was used: {:?}",
|
||||
e
|
||||
"Error signalling that control queue was used: {e:?}"
|
||||
))
|
||||
})?;
|
||||
}
|
||||
Ok(false) => {}
|
||||
Err(e) => {
|
||||
return Err(EpollHelperError::HandleEvent(anyhow!(
|
||||
"Error getting notification state of control queue: {}",
|
||||
e
|
||||
"Error getting notification state of control queue: {e}"
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
|
@ -184,7 +180,7 @@ impl NetEpollHandler {
|
|||
self.interrupt_cb
|
||||
.trigger(VirtioInterruptType::Queue(queue_index))
|
||||
.map_err(|e| {
|
||||
error!("Failed to signal used queue: {:?}", e);
|
||||
error!("Failed to signal used queue: {e:?}");
|
||||
DeviceError::FailedSignalingUsedQueue(e)
|
||||
})
|
||||
}
|
||||
|
|
@ -192,7 +188,7 @@ impl NetEpollHandler {
|
|||
fn handle_rx_event(&mut self) -> result::Result<(), DeviceError> {
|
||||
let queue_evt = &self.queue_evt_pair.0;
|
||||
if let Err(e) = queue_evt.read() {
|
||||
error!("Failed to get rx queue event: {:?}", e);
|
||||
error!("Failed to get rx queue event: {e:?}");
|
||||
}
|
||||
|
||||
self.net.rx_desc_avail = true;
|
||||
|
|
@ -316,30 +312,29 @@ impl EpollHelperHandler for NetEpollHandler {
|
|||
RX_QUEUE_EVENT => {
|
||||
self.driver_awake = true;
|
||||
self.handle_rx_event().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Error processing RX queue: {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Error processing RX queue: {e:?}"))
|
||||
})?;
|
||||
}
|
||||
TX_QUEUE_EVENT => {
|
||||
let queue_evt = &self.queue_evt_pair.1;
|
||||
if let Err(e) = queue_evt.read() {
|
||||
error!("Failed to get tx queue event: {:?}", e);
|
||||
error!("Failed to get tx queue event: {e:?}");
|
||||
}
|
||||
self.driver_awake = true;
|
||||
self.handle_tx_event().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Error processing TX queue: {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Error processing TX queue: {e:?}"))
|
||||
})?;
|
||||
}
|
||||
TX_TAP_EVENT => {
|
||||
self.handle_tx_event().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Error processing TX queue (TAP event): {:?}",
|
||||
e
|
||||
"Error processing TX queue (TAP event): {e:?}"
|
||||
))
|
||||
})?;
|
||||
}
|
||||
RX_TAP_EVENT => {
|
||||
self.handle_rx_tap_event().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Error processing tap queue: {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Error processing tap queue: {e:?}"))
|
||||
})?;
|
||||
}
|
||||
RX_RATE_LIMITER_EVENT => {
|
||||
|
|
@ -348,8 +343,7 @@ impl EpollHelperHandler for NetEpollHandler {
|
|||
// TAP fd for further processing if some RX buffers are available
|
||||
rate_limiter.event_handler().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Error from 'rate_limiter.event_handler()': {:?}",
|
||||
e
|
||||
"Error from 'rate_limiter.event_handler()': {e:?}"
|
||||
))
|
||||
})?;
|
||||
|
||||
|
|
@ -362,8 +356,7 @@ impl EpollHelperHandler for NetEpollHandler {
|
|||
)
|
||||
.map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Error register_listener with `RX_RATE_LIMITER_EVENT`: {:?}",
|
||||
e
|
||||
"Error register_listener with `RX_RATE_LIMITER_EVENT`: {e:?}"
|
||||
))
|
||||
})?;
|
||||
|
||||
|
|
@ -381,14 +374,13 @@ impl EpollHelperHandler for NetEpollHandler {
|
|||
// and restart processing the queue.
|
||||
rate_limiter.event_handler().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Error from 'rate_limiter.event_handler()': {:?}",
|
||||
e
|
||||
"Error from 'rate_limiter.event_handler()': {e:?}"
|
||||
))
|
||||
})?;
|
||||
|
||||
self.driver_awake = true;
|
||||
self.process_tx().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Error processing TX queue: {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Error processing TX queue: {e:?}"))
|
||||
})?;
|
||||
} else {
|
||||
return Err(EpollHelperError::HandleEvent(anyhow!(
|
||||
|
|
@ -398,8 +390,7 @@ impl EpollHelperHandler for NetEpollHandler {
|
|||
}
|
||||
_ => {
|
||||
return Err(EpollHelperError::HandleEvent(anyhow!(
|
||||
"Unexpected event: {}",
|
||||
ev_type
|
||||
"Unexpected event: {ev_type}"
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
|
@ -452,7 +443,7 @@ impl Net {
|
|||
let (avail_features, acked_features, config, queue_sizes, paused) = if let Some(state) =
|
||||
state
|
||||
{
|
||||
info!("Restoring virtio-net {}", id);
|
||||
info!("Restoring virtio-net {id}");
|
||||
(
|
||||
state.avail_features,
|
||||
state.acked_features,
|
||||
|
|
@ -670,7 +661,7 @@ impl Drop for Net {
|
|||
if let Some(thread) = self.ctrl_queue_epoll_thread.take()
|
||||
&& let Err(e) = thread.join()
|
||||
{
|
||||
error!("Error joining thread: {:?}", e);
|
||||
error!("Error joining thread: {e:?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -777,7 +768,7 @@ impl VirtioDevice for Net {
|
|||
#[cfg(not(fuzzing))]
|
||||
tap.set_offload(virtio_features_to_tap_offload(self.common.acked_features))
|
||||
.map_err(|e| {
|
||||
error!("Error programming tap offload: {:?}", e);
|
||||
error!("Error programming tap offload: {e:?}");
|
||||
ActivateError::BadActivate
|
||||
})?;
|
||||
|
||||
|
|
|
|||
|
|
@ -170,7 +170,7 @@ impl PmemEpollHandler {
|
|||
let status_code = match self.disk.sync_all() {
|
||||
Ok(()) => VIRTIO_PMEM_RESP_TYPE_OK,
|
||||
Err(e) => {
|
||||
error!("failed flushing disk image: {}", e);
|
||||
error!("failed flushing disk image: {e}");
|
||||
VIRTIO_PMEM_RESP_TYPE_EIO
|
||||
}
|
||||
};
|
||||
|
|
@ -179,7 +179,7 @@ impl PmemEpollHandler {
|
|||
match desc_chain.memory().write_obj(resp, req.status_addr) {
|
||||
Ok(_) => size_of::<VirtioPmemResp>() as u32,
|
||||
Err(e) => {
|
||||
error!("bad guest memory address: {}", e);
|
||||
error!("bad guest memory address: {e}");
|
||||
0
|
||||
}
|
||||
}
|
||||
|
|
@ -190,7 +190,7 @@ impl PmemEpollHandler {
|
|||
0
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to parse available descriptor chain: {:?}", e);
|
||||
error!("Failed to parse available descriptor chain: {e:?}");
|
||||
0
|
||||
}
|
||||
};
|
||||
|
|
@ -208,7 +208,7 @@ impl PmemEpollHandler {
|
|||
self.interrupt_cb
|
||||
.trigger(VirtioInterruptType::Queue(0))
|
||||
.map_err(|e| {
|
||||
error!("Failed to signal used queue: {:?}", e);
|
||||
error!("Failed to signal used queue: {e:?}");
|
||||
DeviceError::FailedSignalingUsedQueue(e)
|
||||
})
|
||||
}
|
||||
|
|
@ -236,26 +236,22 @@ impl EpollHelperHandler for PmemEpollHandler {
|
|||
match ev_type {
|
||||
QUEUE_AVAIL_EVENT => {
|
||||
self.queue_evt.read().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get queue event: {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get queue event: {e:?}"))
|
||||
})?;
|
||||
|
||||
let needs_notification = self.process_queue().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to process queue : {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to process queue : {e:?}"))
|
||||
})?;
|
||||
|
||||
if needs_notification {
|
||||
self.signal_used_queue().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to signal used queue: {:?}",
|
||||
e
|
||||
))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to signal used queue: {e:?}"))
|
||||
})?;
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
return Err(EpollHelperError::HandleEvent(anyhow!(
|
||||
"Unexpected event: {}",
|
||||
ev_type
|
||||
"Unexpected event: {ev_type}"
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
|
@ -298,7 +294,7 @@ impl Pmem {
|
|||
state: Option<PmemState>,
|
||||
) -> io::Result<Pmem> {
|
||||
let (avail_features, acked_features, config, paused) = if let Some(state) = state {
|
||||
info!("Restoring virtio-pmem {}", id);
|
||||
info!("Restoring virtio-pmem {id}");
|
||||
(
|
||||
state.avail_features,
|
||||
state.acked_features,
|
||||
|
|
@ -395,7 +391,7 @@ impl VirtioDevice for Pmem {
|
|||
let (kill_evt, pause_evt) = self.common.dup_eventfds();
|
||||
if let Some(disk) = self.disk.as_ref() {
|
||||
let disk = disk.try_clone().map_err(|e| {
|
||||
error!("failed cloning pmem disk: {}", e);
|
||||
error!("failed cloning pmem disk: {e}");
|
||||
ActivateError::BadActivate
|
||||
})?;
|
||||
|
||||
|
|
|
|||
|
|
@ -95,7 +95,7 @@ impl RngEpollHandler {
|
|||
self.interrupt_cb
|
||||
.trigger(VirtioInterruptType::Queue(0))
|
||||
.map_err(|e| {
|
||||
error!("Failed to signal used queue: {:?}", e);
|
||||
error!("Failed to signal used queue: {e:?}");
|
||||
DeviceError::FailedSignalingUsedQueue(e)
|
||||
})
|
||||
}
|
||||
|
|
@ -123,24 +123,20 @@ impl EpollHelperHandler for RngEpollHandler {
|
|||
match ev_type {
|
||||
QUEUE_AVAIL_EVENT => {
|
||||
self.queue_evt.read().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get queue event: {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get queue event: {e:?}"))
|
||||
})?;
|
||||
let needs_notification = self.process_queue().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to process queue : {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to process queue : {e:?}"))
|
||||
})?;
|
||||
if needs_notification {
|
||||
self.signal_used_queue().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to signal used queue: {:?}",
|
||||
e
|
||||
))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to signal used queue: {e:?}"))
|
||||
})?;
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
return Err(EpollHelperError::HandleEvent(anyhow!(
|
||||
"Unexpected event: {}",
|
||||
ev_type
|
||||
"Unexpected event: {ev_type}"
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
|
@ -176,7 +172,7 @@ impl Rng {
|
|||
let random_file = File::open(path)?;
|
||||
|
||||
let (avail_features, acked_features, paused) = if let Some(state) = state {
|
||||
info!("Restoring virtio-rng {}", id);
|
||||
info!("Restoring virtio-rng {id}");
|
||||
(state.avail_features, state.acked_features, true)
|
||||
} else {
|
||||
let mut avail_features = 1u64 << VIRTIO_F_VERSION_1;
|
||||
|
|
@ -257,7 +253,7 @@ impl VirtioDevice for Rng {
|
|||
|
||||
if let Some(file) = self.random_file.as_ref() {
|
||||
let random_file = file.try_clone().map_err(|e| {
|
||||
error!("failed cloning rng source: {}", e);
|
||||
error!("failed cloning rng source: {e}");
|
||||
ActivateError::BadActivate
|
||||
})?;
|
||||
|
||||
|
|
|
|||
|
|
@ -39,18 +39,18 @@ where
|
|||
if !seccomp_filter.is_empty()
|
||||
&& let Err(e) = apply_filter(&seccomp_filter)
|
||||
{
|
||||
error!("Error applying seccomp filter: {:?}", e);
|
||||
error!("Error applying seccomp filter: {e:?}");
|
||||
thread_exit_evt.write(1).ok();
|
||||
return;
|
||||
}
|
||||
match std::panic::catch_unwind(AssertUnwindSafe(f)) {
|
||||
Err(_) => {
|
||||
error!("{} thread panicked", thread_name);
|
||||
error!("{thread_name} thread panicked");
|
||||
thread_exit_evt.write(1).ok();
|
||||
}
|
||||
Ok(r) => {
|
||||
if let Err(e) = r {
|
||||
error!("Error running worker: {:?}", e);
|
||||
error!("Error running worker: {e:?}");
|
||||
thread_exit_evt.write(1).ok();
|
||||
}
|
||||
}
|
||||
|
|
@ -58,7 +58,7 @@ where
|
|||
})
|
||||
.map(|thread| epoll_threads.push(thread))
|
||||
.map_err(|e| {
|
||||
error!("Failed to spawn thread for {}: {}", name, e);
|
||||
error!("Failed to spawn thread for {name}: {e}");
|
||||
ActivateError::ThreadSpawn(e)
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -211,30 +211,30 @@ impl VirtioPciCommonConfig {
|
|||
}
|
||||
|
||||
fn read_common_config_byte(&self, offset: u64) -> u8 {
|
||||
debug!("read_common_config_byte: offset 0x{:x}", offset);
|
||||
debug!("read_common_config_byte: offset 0x{offset:x}");
|
||||
// The driver is only allowed to do aligned, properly sized access.
|
||||
match offset {
|
||||
0x14 => self.driver_status,
|
||||
0x15 => self.config_generation,
|
||||
_ => {
|
||||
warn!("invalid virtio config byte read: 0x{:x}", offset);
|
||||
warn!("invalid virtio config byte read: 0x{offset:x}");
|
||||
0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn write_common_config_byte(&mut self, offset: u64, value: u8) {
|
||||
debug!("write_common_config_byte: offset 0x{:x}", offset);
|
||||
debug!("write_common_config_byte: offset 0x{offset:x}");
|
||||
match offset {
|
||||
0x14 => self.driver_status = value,
|
||||
_ => {
|
||||
warn!("invalid virtio config byte write: 0x{:x}", offset);
|
||||
warn!("invalid virtio config byte write: 0x{offset:x}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn read_common_config_word(&self, offset: u64, queues: &[Queue]) -> u16 {
|
||||
debug!("read_common_config_word: offset 0x{:x}", offset);
|
||||
debug!("read_common_config_word: offset 0x{offset:x}");
|
||||
match offset {
|
||||
0x10 => self.msix_config.load(Ordering::Acquire),
|
||||
0x12 => queues.len() as u16, // num_queues
|
||||
|
|
@ -244,14 +244,14 @@ impl VirtioPciCommonConfig {
|
|||
0x1c => u16::from(self.with_queue(queues, |q| q.ready()).unwrap_or(false)),
|
||||
0x1e => self.queue_select, // notify_off
|
||||
_ => {
|
||||
warn!("invalid virtio register word read: 0x{:x}", offset);
|
||||
warn!("invalid virtio register word read: 0x{offset:x}");
|
||||
0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn write_common_config_word(&mut self, offset: u64, value: u16, queues: &mut [Queue]) {
|
||||
debug!("write_common_config_word: offset 0x{:x}", offset);
|
||||
debug!("write_common_config_word: offset 0x{offset:x}");
|
||||
match offset {
|
||||
0x10 => self.msix_config.store(value, Ordering::Release),
|
||||
0x16 => self.queue_select = value,
|
||||
|
|
@ -286,13 +286,13 @@ impl VirtioPciCommonConfig {
|
|||
}
|
||||
}),
|
||||
_ => {
|
||||
warn!("invalid virtio register word write: 0x{:x}", offset);
|
||||
warn!("invalid virtio register word write: 0x{offset:x}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn read_common_config_dword(&self, offset: u64, device: Arc<Mutex<dyn VirtioDevice>>) -> u32 {
|
||||
debug!("read_common_config_dword: offset 0x{:x}", offset);
|
||||
debug!("read_common_config_dword: offset 0x{offset:x}");
|
||||
match offset {
|
||||
0x00 => self.device_feature_select,
|
||||
0x04 => {
|
||||
|
|
@ -307,7 +307,7 @@ impl VirtioPciCommonConfig {
|
|||
}
|
||||
0x08 => self.driver_feature_select,
|
||||
_ => {
|
||||
warn!("invalid virtio register dword read: 0x{:x}", offset);
|
||||
warn!("invalid virtio register dword read: 0x{offset:x}");
|
||||
0
|
||||
}
|
||||
}
|
||||
|
|
@ -320,7 +320,7 @@ impl VirtioPciCommonConfig {
|
|||
queues: &mut [Queue],
|
||||
device: Arc<Mutex<dyn VirtioDevice>>,
|
||||
) {
|
||||
debug!("write_common_config_dword: offset 0x{:x}", offset);
|
||||
debug!("write_common_config_dword: offset 0x{offset:x}");
|
||||
|
||||
match offset {
|
||||
0x00 => self.device_feature_select = value,
|
||||
|
|
@ -344,18 +344,18 @@ impl VirtioPciCommonConfig {
|
|||
0x30 => self.with_queue_mut(queues, |q| q.set_used_ring_address(Some(value), None)),
|
||||
0x34 => self.with_queue_mut(queues, |q| q.set_used_ring_address(None, Some(value))),
|
||||
_ => {
|
||||
warn!("invalid virtio register dword write: 0x{:x}", offset);
|
||||
warn!("invalid virtio register dword write: 0x{offset:x}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn read_common_config_qword(&self, _offset: u64) -> u64 {
|
||||
debug!("read_common_config_qword: offset 0x{:x}", _offset);
|
||||
debug!("read_common_config_qword: offset 0x{_offset:x}");
|
||||
0 // Assume the guest has no reason to read write-only registers.
|
||||
}
|
||||
|
||||
fn write_common_config_qword(&mut self, offset: u64, value: u64, queues: &mut [Queue]) {
|
||||
debug!("write_common_config_qword: offset 0x{:x}", offset);
|
||||
debug!("write_common_config_qword: offset 0x{offset:x}");
|
||||
|
||||
let low = Some((value & 0xffff_ffff) as u32);
|
||||
let high = Some((value >> 32) as u32);
|
||||
|
|
@ -365,7 +365,7 @@ impl VirtioPciCommonConfig {
|
|||
0x28 => self.with_queue_mut(queues, |q| q.set_avail_ring_address(low, high)),
|
||||
0x30 => self.with_queue_mut(queues, |q| q.set_used_ring_address(low, high)),
|
||||
_ => {
|
||||
warn!("invalid virtio register qword write: 0x{:x}", offset);
|
||||
warn!("invalid virtio register qword write: 0x{offset:x}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -394,10 +394,7 @@ impl VirtioPciDevice {
|
|||
let mut queue_evts = Vec::new();
|
||||
for _ in locked_device.queue_max_sizes().iter() {
|
||||
queue_evts.push(EventFd::new(EFD_NONBLOCK).map_err(|e| {
|
||||
VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!(
|
||||
"Failed creating eventfd: {}",
|
||||
e
|
||||
))
|
||||
VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!("Failed creating eventfd: {e}"))
|
||||
})?)
|
||||
}
|
||||
let num_queues = locked_device.queue_max_sizes().len();
|
||||
|
|
@ -421,16 +418,14 @@ impl VirtioPciDevice {
|
|||
})
|
||||
.map_err(|e| {
|
||||
VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!(
|
||||
"Failed creating MSI interrupt group: {}",
|
||||
e
|
||||
"Failed creating MSI interrupt group: {e}"
|
||||
))
|
||||
})?;
|
||||
|
||||
let msix_state = vm_migration::state_from_id(snapshot.as_ref(), pci::MSIX_CONFIG_ID)
|
||||
.map_err(|e| {
|
||||
VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!(
|
||||
"Failed to get MsixConfigState from Snapshot: {}",
|
||||
e
|
||||
"Failed to get MsixConfigState from Snapshot: {e}"
|
||||
))
|
||||
})?;
|
||||
|
||||
|
|
@ -469,8 +464,7 @@ impl VirtioPciDevice {
|
|||
vm_migration::state_from_id(snapshot.as_ref(), pci::PCI_CONFIGURATION_ID).map_err(
|
||||
|e| {
|
||||
VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!(
|
||||
"Failed to get PciConfigurationState from Snapshot: {}",
|
||||
e
|
||||
"Failed to get PciConfigurationState from Snapshot: {e}"
|
||||
))
|
||||
},
|
||||
)?;
|
||||
|
|
@ -493,8 +487,7 @@ impl VirtioPciDevice {
|
|||
vm_migration::state_from_id(snapshot.as_ref(), VIRTIO_PCI_COMMON_CONFIG_ID).map_err(
|
||||
|e| {
|
||||
VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!(
|
||||
"Failed to get VirtioPciCommonConfigState from Snapshot: {}",
|
||||
e
|
||||
"Failed to get VirtioPciCommonConfigState from Snapshot: {e}"
|
||||
))
|
||||
},
|
||||
)?;
|
||||
|
|
@ -522,8 +515,7 @@ impl VirtioPciDevice {
|
|||
.transpose()
|
||||
.map_err(|e| {
|
||||
VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!(
|
||||
"Failed to get VirtioPciDeviceState from Snapshot: {}",
|
||||
e
|
||||
"Failed to get VirtioPciDeviceState from Snapshot: {e}"
|
||||
))
|
||||
})?;
|
||||
|
||||
|
|
@ -613,8 +605,7 @@ impl VirtioPciDevice {
|
|||
{
|
||||
virtio_pci_device.activate().map_err(|e| {
|
||||
VirtioPciDeviceError::CreateVirtioPciDevice(anyhow!(
|
||||
"Failed activating the device: {}",
|
||||
e
|
||||
"Failed activating the device: {e}"
|
||||
))
|
||||
})?;
|
||||
}
|
||||
|
|
@ -795,7 +786,7 @@ impl VirtioPciDevice {
|
|||
}
|
||||
|
||||
if !queue.is_valid(self.memory.memory().deref()) {
|
||||
error!("Queue {} is not valid", queue_index);
|
||||
error!("Queue {queue_index} is not valid");
|
||||
}
|
||||
|
||||
queues.push((
|
||||
|
|
@ -1182,7 +1173,7 @@ impl PciDevice for VirtioPciDevice {
|
|||
}
|
||||
// Handled with ioeventfds.
|
||||
#[cfg(not(feature = "sev_snp"))]
|
||||
error!("Unexpected write to notification BAR: offset = 0x{:x}", o);
|
||||
error!("Unexpected write to notification BAR: offset = 0x{o:x}");
|
||||
}
|
||||
o if (MSIX_TABLE_BAR_OFFSET..MSIX_TABLE_BAR_OFFSET + MSIX_TABLE_SIZE).contains(&o) => {
|
||||
if let Some(msix_config) = &self.msix_config {
|
||||
|
|
|
|||
|
|
@ -132,7 +132,7 @@ impl Vdpa {
|
|||
backend_features,
|
||||
paused,
|
||||
) = if let Some(state) = state {
|
||||
info!("Restoring vDPA {}", id);
|
||||
info!("Restoring vDPA {id}");
|
||||
|
||||
vhost.set_backend_features_acked(state.backend_features);
|
||||
vhost
|
||||
|
|
@ -404,14 +404,14 @@ impl VirtioDevice for Vdpa {
|
|||
fn read_config(&self, offset: u64, data: &mut [u8]) {
|
||||
assert!(self.vhost.is_some());
|
||||
if let Err(e) = self.vhost.as_ref().unwrap().get_config(offset as u32, data) {
|
||||
error!("Failed reading virtio config: {}", e);
|
||||
error!("Failed reading virtio config: {e}");
|
||||
}
|
||||
}
|
||||
|
||||
fn write_config(&mut self, offset: u64, data: &[u8]) {
|
||||
assert!(self.vhost.is_some());
|
||||
if let Err(e) = self.vhost.as_ref().unwrap().set_config(offset as u32, data) {
|
||||
error!("Failed writing virtio config: {}", e);
|
||||
error!("Failed writing virtio config: {e}");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -433,7 +433,7 @@ impl VirtioDevice for Vdpa {
|
|||
|
||||
fn reset(&mut self) -> Option<Arc<dyn VirtioInterrupt>> {
|
||||
if let Err(e) = self.reset_vdpa() {
|
||||
error!("Failed to reset vhost-vdpa: {:?}", e);
|
||||
error!("Failed to reset vhost-vdpa: {e:?}");
|
||||
return None;
|
||||
}
|
||||
|
||||
|
|
@ -487,7 +487,7 @@ impl Snapshottable for Vdpa {
|
|||
}
|
||||
|
||||
let snapshot = Snapshot::new_from_state(&self.state().map_err(|e| {
|
||||
MigratableError::Snapshot(anyhow!("Error snapshotting vDPA device: {:?}", e))
|
||||
MigratableError::Snapshot(anyhow!("Error snapshotting vDPA device: {e:?}"))
|
||||
})?)?;
|
||||
|
||||
// Force the vhost handler to be dropped in order to close the vDPA
|
||||
|
|
@ -509,7 +509,7 @@ impl Migratable for Vdpa {
|
|||
if self.backend_features & (1 << VHOST_BACKEND_F_SUSPEND) != 0 {
|
||||
assert!(self.vhost.is_some());
|
||||
self.vhost.as_ref().unwrap().suspend().map_err(|e| {
|
||||
MigratableError::StartMigration(anyhow!("Error suspending vDPA device: {:?}", e))
|
||||
MigratableError::StartMigration(anyhow!("Error suspending vDPA device: {e:?}"))
|
||||
})
|
||||
} else {
|
||||
Err(MigratableError::StartMigration(anyhow!(
|
||||
|
|
@ -565,7 +565,7 @@ impl<M: GuestAddressSpace + Sync + Send> ExternalDmaMapping for VdpaDmaMapping<M
|
|||
}
|
||||
|
||||
fn unmap(&self, iova: u64, size: u64) -> std::result::Result<(), std::io::Error> {
|
||||
debug!("DMA unmap iova 0x{:x} size 0x{:x}", iova, size);
|
||||
debug!("DMA unmap iova 0x{iova:x} size 0x{size:x}");
|
||||
self.device
|
||||
.lock()
|
||||
.unwrap()
|
||||
|
|
|
|||
|
|
@ -81,7 +81,7 @@ impl Blk {
|
|||
config,
|
||||
paused,
|
||||
) = if let Some(state) = state {
|
||||
info!("Restoring vhost-user-block {}", id);
|
||||
info!("Restoring vhost-user-block {id}");
|
||||
|
||||
vu.set_protocol_features_vhost_user(
|
||||
state.acked_features,
|
||||
|
|
@ -135,8 +135,7 @@ impl Blk {
|
|||
|
||||
if num_queues > backend_num_queues {
|
||||
error!(
|
||||
"vhost-user-blk requested too many queues ({}) since the backend only supports {}\n",
|
||||
num_queues, backend_num_queues
|
||||
"vhost-user-blk requested too many queues ({num_queues}) since the backend only supports {backend_num_queues}\n"
|
||||
);
|
||||
return Err(Error::BadQueueNum);
|
||||
}
|
||||
|
|
@ -216,13 +215,13 @@ impl Drop for Blk {
|
|||
if let Some(kill_evt) = self.common.kill_evt.take()
|
||||
&& let Err(e) = kill_evt.write(1)
|
||||
{
|
||||
error!("failed to kill vhost-user-blk: {:?}", e);
|
||||
error!("failed to kill vhost-user-blk: {e:?}");
|
||||
}
|
||||
self.common.wait_for_epoll_threads();
|
||||
if let Some(thread) = self.epoll_thread.take()
|
||||
&& let Err(e) = thread.join()
|
||||
{
|
||||
error!("Error joining thread: {:?}", e);
|
||||
error!("Error joining thread: {e:?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -275,7 +274,7 @@ impl VirtioDevice for Blk {
|
|||
.set_config(offset as u32, VhostUserConfigFlags::WRITABLE, data)
|
||||
.map_err(Error::VhostUserSetConfig)
|
||||
{
|
||||
error!("Failed setting vhost-user-blk configuration: {:?}", e);
|
||||
error!("Failed setting vhost-user-blk configuration: {e:?}");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -331,7 +330,7 @@ impl VirtioDevice for Blk {
|
|||
if let Some(vu) = &self.vu_common.vu
|
||||
&& let Err(e) = vu.lock().unwrap().reset_vhost_user()
|
||||
{
|
||||
error!("Failed to reset vhost-user daemon: {:?}", e);
|
||||
error!("Failed to reset vhost-user daemon: {e:?}");
|
||||
return None;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -109,7 +109,7 @@ impl Fs {
|
|||
config,
|
||||
paused,
|
||||
) = if let Some(state) = state {
|
||||
info!("Restoring vhost-user-fs {}", id);
|
||||
info!("Restoring vhost-user-fs {id}");
|
||||
|
||||
vu.set_protocol_features_vhost_user(
|
||||
state.acked_features,
|
||||
|
|
@ -148,8 +148,7 @@ impl Fs {
|
|||
|
||||
if num_queues > backend_num_queues {
|
||||
error!(
|
||||
"vhost-user-fs requested too many queues ({}) since the backend only supports {}\n",
|
||||
num_queues, backend_num_queues
|
||||
"vhost-user-fs requested too many queues ({num_queues}) since the backend only supports {backend_num_queues}\n"
|
||||
);
|
||||
return Err(Error::BadQueueNum);
|
||||
}
|
||||
|
|
@ -230,7 +229,7 @@ impl Drop for Fs {
|
|||
if let Some(thread) = self.epoll_thread.take()
|
||||
&& let Err(e) = thread.join()
|
||||
{
|
||||
error!("Error joining thread: {:?}", e);
|
||||
error!("Error joining thread: {e:?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -311,7 +310,7 @@ impl VirtioDevice for Fs {
|
|||
if let Some(vu) = &self.vu_common.vu
|
||||
&& let Err(e) = vu.lock().unwrap().reset_vhost_user()
|
||||
{
|
||||
error!("Failed to reset vhost-user daemon: {:?}", e);
|
||||
error!("Failed to reset vhost-user daemon: {e:?}");
|
||||
return None;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -265,8 +265,7 @@ impl<S: VhostUserFrontendReqHandler> EpollHelperHandler for VhostUserEpollHandle
|
|||
HUP_CONNECTION_EVENT => {
|
||||
self.reconnect(helper).map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"failed to reconnect vhost-user backend: {:?}",
|
||||
e
|
||||
"failed to reconnect vhost-user backend: {e:?}"
|
||||
))
|
||||
})?;
|
||||
}
|
||||
|
|
@ -274,8 +273,7 @@ impl<S: VhostUserFrontendReqHandler> EpollHelperHandler for VhostUserEpollHandle
|
|||
if let Some(backend_req_handler) = self.backend_req_handler.as_mut() {
|
||||
backend_req_handler.handle_request().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to handle request from vhost-user backend: {:?}",
|
||||
e
|
||||
"Failed to handle request from vhost-user backend: {e:?}"
|
||||
))
|
||||
})?;
|
||||
}
|
||||
|
|
@ -412,7 +410,7 @@ impl VhostUserCommon {
|
|||
pub fn pause(&mut self) -> std::result::Result<(), MigratableError> {
|
||||
if let Some(vu) = &self.vu {
|
||||
vu.lock().unwrap().pause_vhost_user().map_err(|e| {
|
||||
MigratableError::Pause(anyhow!("Error pausing vhost-user backend: {:?}", e))
|
||||
MigratableError::Pause(anyhow!("Error pausing vhost-user backend: {e:?}"))
|
||||
})
|
||||
} else {
|
||||
Ok(())
|
||||
|
|
@ -422,7 +420,7 @@ impl VhostUserCommon {
|
|||
pub fn resume(&mut self) -> std::result::Result<(), MigratableError> {
|
||||
if let Some(vu) = &self.vu {
|
||||
vu.lock().unwrap().resume_vhost_user().map_err(|e| {
|
||||
MigratableError::Resume(anyhow!("Error resuming vhost-user backend: {:?}", e))
|
||||
MigratableError::Resume(anyhow!("Error resuming vhost-user backend: {e:?}"))
|
||||
})
|
||||
} else {
|
||||
Ok(())
|
||||
|
|
@ -454,8 +452,7 @@ impl VhostUserCommon {
|
|||
.start_dirty_log(last_ram_addr)
|
||||
.map_err(|e| {
|
||||
MigratableError::StartDirtyLog(anyhow!(
|
||||
"Error starting migration for vhost-user backend: {:?}",
|
||||
e
|
||||
"Error starting migration for vhost-user backend: {e:?}"
|
||||
))
|
||||
})
|
||||
} else {
|
||||
|
|
@ -472,8 +469,7 @@ impl VhostUserCommon {
|
|||
if let Some(vu) = &self.vu {
|
||||
vu.lock().unwrap().stop_dirty_log().map_err(|e| {
|
||||
MigratableError::StopDirtyLog(anyhow!(
|
||||
"Error stopping migration for vhost-user backend: {:?}",
|
||||
e
|
||||
"Error stopping migration for vhost-user backend: {e:?}"
|
||||
))
|
||||
})
|
||||
} else {
|
||||
|
|
@ -490,8 +486,7 @@ impl VhostUserCommon {
|
|||
let last_ram_addr = guest_memory.memory().last_addr().raw_value();
|
||||
vu.lock().unwrap().dirty_log(last_ram_addr).map_err(|e| {
|
||||
MigratableError::DirtyLog(anyhow!(
|
||||
"Error retrieving dirty ranges from vhost-user backend: {:?}",
|
||||
e
|
||||
"Error retrieving dirty ranges from vhost-user backend: {e:?}"
|
||||
))
|
||||
})
|
||||
} else {
|
||||
|
|
@ -518,8 +513,7 @@ impl VhostUserCommon {
|
|||
if let Some(kill_evt) = kill_evt {
|
||||
kill_evt.write(1).map_err(|e| {
|
||||
MigratableError::CompleteMigration(anyhow!(
|
||||
"Error killing vhost-user thread: {:?}",
|
||||
e
|
||||
"Error killing vhost-user thread: {e:?}"
|
||||
))
|
||||
})?;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -90,7 +90,7 @@ impl Net {
|
|||
config,
|
||||
paused,
|
||||
) = if let Some(state) = state {
|
||||
info!("Restoring vhost-user-net {}", id);
|
||||
info!("Restoring vhost-user-net {id}");
|
||||
|
||||
// The backend acknowledged features must not contain
|
||||
// VIRTIO_NET_F_MAC since we don't expect the backend
|
||||
|
|
@ -169,8 +169,7 @@ impl Net {
|
|||
|
||||
if num_queues > backend_num_queues {
|
||||
error!(
|
||||
"vhost-user-net requested too many queues ({}) since the backend only supports {}\n",
|
||||
num_queues, backend_num_queues
|
||||
"vhost-user-net requested too many queues ({num_queues}) since the backend only supports {backend_num_queues}\n"
|
||||
);
|
||||
return Err(Error::BadQueueNum);
|
||||
}
|
||||
|
|
@ -246,7 +245,7 @@ impl Drop for Net {
|
|||
if let Some(kill_evt) = self.common.kill_evt.take()
|
||||
&& let Err(e) = kill_evt.write(1)
|
||||
{
|
||||
error!("failed to kill vhost-user-net: {:?}", e);
|
||||
error!("failed to kill vhost-user-net: {e:?}");
|
||||
}
|
||||
|
||||
self.common.wait_for_epoll_threads();
|
||||
|
|
@ -254,13 +253,13 @@ impl Drop for Net {
|
|||
if let Some(thread) = self.epoll_thread.take()
|
||||
&& let Err(e) = thread.join()
|
||||
{
|
||||
error!("Error joining thread: {:?}", e);
|
||||
error!("Error joining thread: {e:?}");
|
||||
}
|
||||
|
||||
if let Some(thread) = self.ctrl_queue_epoll_thread.take()
|
||||
&& let Err(e) = thread.join()
|
||||
{
|
||||
error!("Error joining thread: {:?}", e);
|
||||
error!("Error joining thread: {e:?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -386,7 +385,7 @@ impl VirtioDevice for Net {
|
|||
if let Some(vu) = &self.vu_common.vu
|
||||
&& let Err(e) = vu.lock().unwrap().reset_vhost_user()
|
||||
{
|
||||
error!("Failed to reset vhost-user daemon: {:?}", e);
|
||||
error!("Failed to reset vhost-user daemon: {e:?}");
|
||||
return None;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -409,10 +409,7 @@ impl VhostUserHandle {
|
|||
}
|
||||
};
|
||||
|
||||
error!(
|
||||
"Failed connecting the backend after trying for 1 minute: {:?}",
|
||||
err
|
||||
);
|
||||
error!("Failed connecting the backend after trying for 1 minute: {err:?}");
|
||||
Err(Error::VhostUserConnect)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -108,7 +108,7 @@ where
|
|||
self.interrupt_cb
|
||||
.trigger(VirtioInterruptType::Queue(queue_index))
|
||||
.map_err(|e| {
|
||||
error!("Failed to signal used queue: {:?}", e);
|
||||
error!("Failed to signal used queue: {e:?}");
|
||||
DeviceError::FailedSignalingUsedQueue(e)
|
||||
})
|
||||
}
|
||||
|
|
@ -137,7 +137,7 @@ where
|
|||
}
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("vsock: RX queue error: {:?}", e);
|
||||
warn!("vsock: RX queue error: {e:?}");
|
||||
0
|
||||
}
|
||||
};
|
||||
|
|
@ -170,7 +170,7 @@ where
|
|||
) {
|
||||
Ok(pkt) => pkt,
|
||||
Err(e) => {
|
||||
error!("vsock: error reading TX packet: {:?}", e);
|
||||
error!("vsock: error reading TX packet: {e:?}");
|
||||
self.queues[1]
|
||||
.add_used(desc_chain.memory(), desc_chain.head_index(), 0)
|
||||
.map_err(DeviceError::QueueAddUsed)?;
|
||||
|
|
@ -226,7 +226,7 @@ where
|
|||
Some(evset) => evset,
|
||||
None => {
|
||||
let evbits = event.events;
|
||||
warn!("epoll: ignoring unknown event set: 0x{:x}", evbits);
|
||||
warn!("epoll: ignoring unknown event set: 0x{evbits:x}");
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
|
@ -236,25 +236,22 @@ where
|
|||
RX_QUEUE_EVENT => {
|
||||
debug!("vsock: RX queue event");
|
||||
self.queue_evts[0].read().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get RX queue event: {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get RX queue event: {e:?}"))
|
||||
})?;
|
||||
if self.backend.read().unwrap().has_pending_rx() {
|
||||
self.process_rx().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to process RX queue: {:?}",
|
||||
e
|
||||
))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to process RX queue: {e:?}"))
|
||||
})?;
|
||||
}
|
||||
}
|
||||
TX_QUEUE_EVENT => {
|
||||
debug!("vsock: TX queue event");
|
||||
self.queue_evts[1].read().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get TX queue event: {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get TX queue event: {e:?}"))
|
||||
})?;
|
||||
|
||||
self.process_tx().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to process TX queue: {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to process TX queue: {e:?}"))
|
||||
})?;
|
||||
|
||||
// The backend may have queued up responses to the packets we sent during TX queue
|
||||
|
|
@ -262,17 +259,14 @@ where
|
|||
// into RX buffers.
|
||||
if self.backend.read().unwrap().has_pending_rx() {
|
||||
self.process_rx().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to process RX queue: {:?}",
|
||||
e
|
||||
))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to process RX queue: {e:?}"))
|
||||
})?;
|
||||
}
|
||||
}
|
||||
EVT_QUEUE_EVENT => {
|
||||
debug!("vsock: EVT queue event");
|
||||
self.queue_evts[2].read().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get EVT queue event: {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get EVT queue event: {e:?}"))
|
||||
})?;
|
||||
}
|
||||
BACKEND_EVENT => {
|
||||
|
|
@ -284,14 +278,11 @@ where
|
|||
// returning an error) at some point in the past, now is the time to try walking the
|
||||
// TX queue again.
|
||||
self.process_tx().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to process TX queue: {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to process TX queue: {e:?}"))
|
||||
})?;
|
||||
if self.backend.read().unwrap().has_pending_rx() {
|
||||
self.process_rx().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to process RX queue: {:?}",
|
||||
e
|
||||
))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to process RX queue: {e:?}"))
|
||||
})?;
|
||||
}
|
||||
}
|
||||
|
|
@ -341,7 +332,7 @@ where
|
|||
state: Option<VsockState>,
|
||||
) -> io::Result<Vsock<B>> {
|
||||
let (avail_features, acked_features, paused) = if let Some(state) = state {
|
||||
info!("Restoring virtio-vsock {}", id);
|
||||
info!("Restoring virtio-vsock {id}");
|
||||
(state.avail_features, state.acked_features, true)
|
||||
} else {
|
||||
let mut avail_features = (1u64 << VIRTIO_F_VERSION_1) | (1u64 << VIRTIO_F_IN_ORDER);
|
||||
|
|
|
|||
|
|
@ -335,7 +335,7 @@ impl VsockEpollListener for VsockMuxer {
|
|||
// appropriate to retry, by calling into epoll_wait().
|
||||
continue;
|
||||
}
|
||||
warn!("vsock: failed to consume muxer epoll event: {}", e);
|
||||
warn!("vsock: failed to consume muxer epoll event: {e}");
|
||||
}
|
||||
}
|
||||
break 'epoll;
|
||||
|
|
@ -383,10 +383,7 @@ impl VsockMuxer {
|
|||
/// Handle/dispatch an epoll event to its listener.
|
||||
///
|
||||
fn handle_event(&mut self, fd: RawFd, event_set: epoll::Events) {
|
||||
debug!(
|
||||
"vsock: muxer processing event: fd={}, event_set={:?}",
|
||||
fd, event_set
|
||||
);
|
||||
debug!("vsock: muxer processing event: fd={fd}, event_set={event_set:?}");
|
||||
|
||||
match self.listener_map.get_mut(&fd) {
|
||||
// This event needs to be forwarded to a `MuxerConnection` that is listening for
|
||||
|
|
@ -430,7 +427,7 @@ impl VsockMuxer {
|
|||
self.add_listener(stream.as_raw_fd(), EpollListener::LocalStream(stream))
|
||||
})
|
||||
.unwrap_or_else(|err| {
|
||||
warn!("vsock: unable to accept local connection: {:?}", err);
|
||||
warn!("vsock: unable to accept local connection: {err:?}");
|
||||
});
|
||||
}
|
||||
|
||||
|
|
@ -478,16 +475,13 @@ impl VsockMuxer {
|
|||
)
|
||||
})
|
||||
.unwrap_or_else(|err| {
|
||||
info!("vsock: error adding local-init connection: {:?}", err);
|
||||
info!("vsock: error adding local-init connection: {err:?}");
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
_ => {
|
||||
info!(
|
||||
"vsock: unexpected event: fd={:?}, event_set={:?}",
|
||||
fd, event_set
|
||||
);
|
||||
info!("vsock: unexpected event: fd={fd:?}, event_set={event_set:?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -662,10 +656,7 @@ impl VsockMuxer {
|
|||
epoll::Event::new(epoll::Events::empty(), 0),
|
||||
)
|
||||
.unwrap_or_else(|err| {
|
||||
warn!(
|
||||
"vosck muxer: error removing epoll listener for fd {:?}: {:?}",
|
||||
fd, err
|
||||
);
|
||||
warn!("vosck muxer: error removing epoll listener for fd {fd:?}: {err:?}");
|
||||
});
|
||||
}
|
||||
|
||||
|
|
@ -760,7 +751,7 @@ impl VsockMuxer {
|
|||
}
|
||||
Err(err) => {
|
||||
conn.kill();
|
||||
warn!("vsock: unable to ack host connection: {:?}", err);
|
||||
warn!("vsock: unable to ack host connection: {err:?}");
|
||||
}
|
||||
};
|
||||
}
|
||||
|
|
@ -870,10 +861,7 @@ impl VsockMuxer {
|
|||
peer_port,
|
||||
});
|
||||
if !pushed {
|
||||
warn!(
|
||||
"vsock: muxer.rxq full; dropping RST packet for lp={}, pp={}",
|
||||
local_port, peer_port
|
||||
);
|
||||
warn!("vsock: muxer.rxq full; dropping RST packet for lp={local_port}, pp={peer_port}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -93,8 +93,7 @@ impl WatchdogEpollHandler {
|
|||
// If this is the first "ping" then setup the timer
|
||||
if self.last_ping_time.lock().unwrap().is_none() {
|
||||
info!(
|
||||
"First ping received. Starting timer (every {} seconds)",
|
||||
WATCHDOG_TIMER_INTERVAL
|
||||
"First ping received. Starting timer (every {WATCHDOG_TIMER_INTERVAL} seconds)"
|
||||
);
|
||||
timerfd_setup(&self.timer, WATCHDOG_TIMER_INTERVAL).map_err(Error::TimerfdSetup)?;
|
||||
}
|
||||
|
|
@ -113,7 +112,7 @@ impl WatchdogEpollHandler {
|
|||
self.interrupt_cb
|
||||
.trigger(VirtioInterruptType::Queue(0))
|
||||
.map_err(|e| {
|
||||
error!("Failed to signal used queue: {:?}", e);
|
||||
error!("Failed to signal used queue: {e:?}");
|
||||
DeviceError::FailedSignalingUsedQueue(e)
|
||||
})
|
||||
}
|
||||
|
|
@ -142,18 +141,15 @@ impl EpollHelperHandler for WatchdogEpollHandler {
|
|||
match ev_type {
|
||||
QUEUE_AVAIL_EVENT => {
|
||||
self.queue_evt.read().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get queue event: {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to get queue event: {e:?}"))
|
||||
})?;
|
||||
|
||||
let needs_notification = self.process_queue().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to process queue : {:?}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to process queue : {e:?}"))
|
||||
})?;
|
||||
if needs_notification {
|
||||
self.signal_used_queue().map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!(
|
||||
"Failed to signal used queue: {:?}",
|
||||
e
|
||||
))
|
||||
EpollHelperError::HandleEvent(anyhow!("Failed to signal used queue: {e:?}"))
|
||||
})?;
|
||||
}
|
||||
}
|
||||
|
|
@ -162,22 +158,21 @@ impl EpollHelperHandler for WatchdogEpollHandler {
|
|||
// the number of times this event has elapsed since the last read.
|
||||
let mut buf = vec![0; 8];
|
||||
self.timer.read_exact(&mut buf).map_err(|e| {
|
||||
EpollHelperError::HandleEvent(anyhow!("Error reading from timer fd: {:}", e))
|
||||
EpollHelperError::HandleEvent(anyhow!("Error reading from timer fd: {e:}"))
|
||||
})?;
|
||||
|
||||
if let Some(last_ping_time) = self.last_ping_time.lock().unwrap().as_ref() {
|
||||
let now = Instant::now();
|
||||
let gap = now.duration_since(*last_ping_time).as_secs();
|
||||
if gap > WATCHDOG_TIMEOUT {
|
||||
error!("Watchdog triggered: {} seconds since last ping", gap);
|
||||
error!("Watchdog triggered: {gap} seconds since last ping");
|
||||
self.reset_evt.write(1).ok();
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
return Err(EpollHelperError::HandleEvent(anyhow!(
|
||||
"Unexpected event: {}",
|
||||
ev_type
|
||||
"Unexpected event: {ev_type}"
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
|
@ -214,7 +209,7 @@ impl Watchdog {
|
|||
) -> io::Result<Watchdog> {
|
||||
let mut last_ping_time = None;
|
||||
let (avail_features, acked_features, paused) = if let Some(state) = state {
|
||||
info!("Restoring virtio-watchdog {}", id);
|
||||
info!("Restoring virtio-watchdog {id}");
|
||||
|
||||
// When restoring enable the watchdog if it was previously enabled.
|
||||
// We reset the timer to ensure that we don't unnecessarily reboot
|
||||
|
|
@ -229,7 +224,7 @@ impl Watchdog {
|
|||
};
|
||||
|
||||
let timer_fd = timerfd_create().map_err(|e| {
|
||||
error!("Failed to create timer fd {}", e);
|
||||
error!("Failed to create timer fd {e}");
|
||||
e
|
||||
})?;
|
||||
// SAFETY: timer_fd is a valid fd
|
||||
|
|
@ -339,12 +334,12 @@ impl VirtioDevice for Watchdog {
|
|||
let (kill_evt, pause_evt) = self.common.dup_eventfds();
|
||||
|
||||
let reset_evt = self.reset_evt.try_clone().map_err(|e| {
|
||||
error!("Failed to clone reset_evt eventfd: {}", e);
|
||||
error!("Failed to clone reset_evt eventfd: {e}");
|
||||
ActivateError::BadActivate
|
||||
})?;
|
||||
|
||||
let timer = self.timer.try_clone().map_err(|e| {
|
||||
error!("Failed to clone timer fd: {}", e);
|
||||
error!("Failed to clone timer fd: {e}");
|
||||
ActivateError::BadActivate
|
||||
})?;
|
||||
|
||||
|
|
@ -392,20 +387,17 @@ impl Pausable for Watchdog {
|
|||
fn pause(&mut self) -> result::Result<(), MigratableError> {
|
||||
info!("Watchdog paused - disabling timer");
|
||||
timerfd_setup(&self.timer, 0)
|
||||
.map_err(|e| MigratableError::Pause(anyhow!("Error clearing timer: {:?}", e)))?;
|
||||
.map_err(|e| MigratableError::Pause(anyhow!("Error clearing timer: {e:?}")))?;
|
||||
self.common.pause()
|
||||
}
|
||||
|
||||
fn resume(&mut self) -> result::Result<(), MigratableError> {
|
||||
// Reset the timer on pause if it was previously used
|
||||
if self.last_ping_time.lock().unwrap().is_some() {
|
||||
info!(
|
||||
"Watchdog resumed - enabling timer (every {} seconds)",
|
||||
WATCHDOG_TIMER_INTERVAL
|
||||
);
|
||||
info!("Watchdog resumed - enabling timer (every {WATCHDOG_TIMER_INTERVAL} seconds)");
|
||||
self.last_ping_time.lock().unwrap().replace(Instant::now());
|
||||
timerfd_setup(&self.timer, WATCHDOG_TIMER_INTERVAL)
|
||||
.map_err(|e| MigratableError::Resume(anyhow!("Error setting timer: {:?}", e)))?;
|
||||
.map_err(|e| MigratableError::Resume(anyhow!("Error setting timer: {e:?}")))?;
|
||||
}
|
||||
self.common.resume()
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue