misc: cleanup &Arc<dyn T> -> &dyn T

Consuming `&Arc<T>` as argument is almost always an antipattern as it
hides whether the callee is going to take over (shared) ownership
(by .clone()) or not. Instead, it is better to consume `&dyn T` or
`Arc<dyn T>` to be more explicit. This commit cleans up the code.

The change is very mechanic and was very easy to implement across the
code base.

Signed-off-by: Philipp Schuster <philipp.schuster@cyberus-technology.de>
On-behalf-of: SAP philipp.schuster@sap.com
This commit is contained in:
Philipp Schuster 2025-10-28 10:49:34 +01:00 committed by Rob Bradford
parent e295719967
commit 7536a95424
37 changed files with 121 additions and 117 deletions

View file

@ -180,7 +180,7 @@ pub fn initramfs_load_addr(
}
}
pub fn get_host_cpu_phys_bits(hypervisor: &Arc<dyn hypervisor::Hypervisor>) -> u8 {
pub fn get_host_cpu_phys_bits(hypervisor: &dyn hypervisor::Hypervisor) -> u8 {
let host_cpu_phys_bits = hypervisor.get_host_ipa_limit().try_into().unwrap();
if host_cpu_phys_bits == 0 {
// Host kernel does not support `get_host_ipa_limit`,

View file

@ -156,7 +156,7 @@ pub fn initramfs_load_addr(
}
}
pub fn get_host_cpu_phys_bits(_hypervisor: &Arc<dyn hypervisor::Hypervisor>) -> u8 {
pub fn get_host_cpu_phys_bits(_hypervisor: &dyn hypervisor::Hypervisor) -> u8 {
40
}

View file

@ -6,7 +6,6 @@
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD-3-Clause file.
use std::sync::Arc;
pub mod interrupts;
pub mod layout;
mod mpspec;
@ -547,7 +546,7 @@ impl CpuidFeatureEntry {
}
pub fn generate_common_cpuid(
hypervisor: &Arc<dyn hypervisor::Hypervisor>,
hypervisor: &dyn hypervisor::Hypervisor,
config: &CpuidConfig,
) -> super::Result<Vec<CpuIdEntry>> {
// SAFETY: cpuid called with valid leaves
@ -1258,7 +1257,7 @@ pub fn initramfs_load_addr(
Ok(aligned_addr)
}
pub fn get_host_cpu_phys_bits(hypervisor: &Arc<dyn hypervisor::Hypervisor>) -> u8 {
pub fn get_host_cpu_phys_bits(hypervisor: &dyn hypervisor::Hypervisor) -> u8 {
// SAFETY: call cpuid with valid leaves
unsafe {
let leaf = x86_64::__cpuid(0x8000_0000);

View file

@ -39,7 +39,6 @@ use std::io::{self, IoSlice, IoSliceMut, Read, Seek, SeekFrom, Write};
use std::os::linux::fs::MetadataExt;
use std::os::unix::io::AsRawFd;
use std::path::Path;
use std::sync::Arc;
use std::time::Instant;
use std::{cmp, result};
@ -260,7 +259,7 @@ pub struct Request {
impl Request {
pub fn parse<B: Bitmap + 'static>(
desc_chain: &mut DescriptorChain<GuestMemoryLoadGuard<vm_memory::GuestMemoryMmap<B>>>,
access_platform: Option<&Arc<dyn AccessPlatform>>,
access_platform: Option<&dyn AccessPlatform>,
) -> result::Result<Request, Error> {
let hdr_desc = desc_chain
.next()

View file

@ -10,9 +10,6 @@
//
//
#[cfg(target_arch = "aarch64")]
use std::sync::Arc;
use thiserror::Error;
#[cfg(not(target_arch = "riscv64"))]
use vm_memory::GuestAddress;
@ -473,7 +470,7 @@ pub trait Vcpu: Send + Sync {
#[cfg(target_arch = "aarch64")]
fn vcpu_set_processor_features(
&self,
vm: &Arc<dyn crate::Vm>,
vm: &dyn crate::Vm,
kvi: &mut VcpuInit,
id: u32,
) -> Result<()>;

View file

@ -2098,7 +2098,7 @@ impl cpu::Vcpu for KvmVcpu {
#[cfg(target_arch = "aarch64")]
fn vcpu_set_processor_features(
&self,
vm: &Arc<dyn crate::Vm>,
vm: &dyn crate::Vm,
kvi: &mut crate::VcpuInit,
id: u32,
) -> cpu::Result<()> {

View file

@ -1306,7 +1306,7 @@ impl cpu::Vcpu for MshvVcpu {
#[cfg(target_arch = "aarch64")]
fn vcpu_set_processor_features(
&self,
_vm: &Arc<dyn crate::Vm>,
_vm: &dyn crate::Vm,
_kvi: &mut crate::VcpuInit,
_id: u32,
) -> cpu::Result<()> {

View file

@ -2,8 +2,6 @@
//
// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
use std::sync::Arc;
use thiserror::Error;
use virtio_bindings::virtio_net::{
VIRTIO_NET_CTRL_GUEST_OFFLOADS, VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, VIRTIO_NET_CTRL_MQ,
@ -67,7 +65,7 @@ impl CtrlQueue {
&mut self,
mem: &GuestMemoryMmap,
queue: &mut Queue,
access_platform: Option<&Arc<dyn AccessPlatform>>,
access_platform: Option<&dyn AccessPlatform>,
) -> Result<()> {
while let Some(mut desc_chain) = queue.pop_descriptor_chain(mem) {
let ctrl_desc = desc_chain.next().ok_or(Error::NoControlHeaderDescriptor)?;

View file

@ -45,7 +45,7 @@ impl TxVirtio {
tap: &Tap,
queue: &mut Queue,
rate_limiter: &mut Option<RateLimiter>,
access_platform: Option<&Arc<dyn AccessPlatform>>,
access_platform: Option<&dyn AccessPlatform>,
) -> Result<bool, NetQueuePairError> {
let mut retry_write = false;
let mut rate_limit_reached = false;
@ -173,7 +173,7 @@ impl RxVirtio {
tap: &Tap,
queue: &mut Queue,
rate_limiter: &mut Option<RateLimiter>,
access_platform: Option<&Arc<dyn AccessPlatform>>,
access_platform: Option<&dyn AccessPlatform>,
) -> Result<bool, NetQueuePairError> {
let mut exhausted_descs = true;
let mut rate_limit_reached = false;
@ -413,7 +413,7 @@ impl NetQueuePair {
&self.tap,
queue,
&mut self.tx_rate_limiter,
self.access_platform.as_ref(),
self.access_platform.as_deref(),
)?;
// We got told to try again when writing to the tap. Wait for the TAP to be writable
@ -463,7 +463,7 @@ impl NetQueuePair {
&self.tap,
queue,
&mut self.rx_rate_limiter,
self.access_platform.as_ref(),
self.access_platform.as_deref(),
)?;
let rate_limit_reached = self
.rx_rate_limiter

View file

@ -1449,7 +1449,7 @@ impl VfioPciDevice {
#[allow(clippy::too_many_arguments)]
pub fn new(
id: String,
vm: &Arc<dyn hypervisor::Vm>,
vm: Arc<dyn hypervisor::Vm>,
device: VfioDevice,
container: Arc<VfioContainer>,
msi_interrupt_manager: Arc<dyn InterruptManager<GroupConfig = MsiIrqGroupConfig>>,
@ -1478,7 +1478,7 @@ impl VfioPciDevice {
let vfio_pci_device = VfioPciDevice {
id,
vm: vm.clone(),
vm,
device,
container,
common,

View file

@ -69,7 +69,7 @@ impl VfioUserPciDevice {
#[allow(clippy::too_many_arguments)]
pub fn new(
id: String,
vm: &Arc<dyn hypervisor::Vm>,
vm: Arc<dyn hypervisor::Vm>,
client: Arc<Mutex<Client>>,
msi_interrupt_manager: Arc<dyn InterruptManager<GroupConfig = MsiIrqGroupConfig>>,
legacy_interrupt_group: Option<Arc<dyn InterruptSourceGroup>>,
@ -103,7 +103,7 @@ impl VfioUserPciDevice {
Ok(Self {
id,
vm: vm.clone(),
vm,
client,
common,
memory_slot_allocator,

View file

@ -591,7 +591,7 @@ impl VirtioDevice for Balloon {
interrupt_cb: Arc<dyn VirtioInterrupt>,
mut queues: Vec<(usize, Queue, EventFd)>,
) -> ActivateResult {
self.common.activate(&queues, &interrupt_cb)?;
self.common.activate(&queues, interrupt_cb.clone())?;
let (kill_evt, pause_evt) = self.common.dup_eventfds();
let mut virtqueues = Vec::new();

View file

@ -171,7 +171,7 @@ impl BlockEpollHandler {
let mut batch_inflight_requests = Vec::new();
while let Some(mut desc_chain) = queue.pop_descriptor_chain(self.mem.memory()) {
let mut request = Request::parse(&mut desc_chain, self.access_platform.as_ref())
let mut request = Request::parse(&mut desc_chain, self.access_platform.as_deref())
.map_err(Error::RequestParsing)?;
// For virtio spec compliance
@ -905,7 +905,7 @@ impl VirtioDevice for Block {
interrupt_cb: Arc<dyn VirtioInterrupt>,
mut queues: Vec<(usize, Queue, EventFd)>,
) -> ActivateResult {
self.common.activate(&queues, &interrupt_cb)?;
self.common.activate(&queues, interrupt_cb.clone())?;
self.update_writeback();

View file

@ -217,7 +217,7 @@ impl ConsoleEpollHandler {
.write_slice(
&source_slice[..],
desc.addr()
.translate_gva(self.access_platform.as_ref(), desc.len() as usize),
.translate_gva(self.access_platform.as_deref(), desc.len() as usize),
)
.map_err(Error::GuestMemoryWrite)?;
@ -253,7 +253,7 @@ impl ConsoleEpollHandler {
.memory()
.write_volatile_to(
desc.addr()
.translate_gva(self.access_platform.as_ref(), desc.len() as usize),
.translate_gva(self.access_platform.as_deref(), desc.len() as usize),
&mut buf,
desc.len() as usize,
)
@ -707,7 +707,7 @@ impl VirtioDevice for Console {
interrupt_cb: Arc<dyn VirtioInterrupt>,
mut queues: Vec<(usize, Queue, EventFd)>,
) -> ActivateResult {
self.common.activate(&queues, &interrupt_cb)?;
self.common.activate(&queues, interrupt_cb.clone())?;
self.resizer
.acked_features
.store(self.common.acked_features, Ordering::Relaxed);

View file

@ -221,7 +221,7 @@ impl VirtioCommon {
pub fn activate(
&mut self,
queues: &[(usize, Queue, EventFd)],
interrupt_cb: &Arc<dyn VirtioInterrupt>,
interrupt_cb: Arc<dyn VirtioInterrupt>,
) -> ActivateResult {
if queues.len() < self.min_queues.into() {
error!(
@ -246,7 +246,7 @@ impl VirtioCommon {
// Save the interrupt EventFD as we need to return it on reset
// but clone it to pass into the thread.
self.interrupt_cb = Some(interrupt_cb.clone());
self.interrupt_cb = Some(interrupt_cb);
Ok(())
}

View file

@ -1079,7 +1079,7 @@ impl VirtioDevice for Iommu {
interrupt_cb: Arc<dyn VirtioInterrupt>,
mut queues: Vec<(usize, Queue, EventFd)>,
) -> ActivateResult {
self.common.activate(&queues, &interrupt_cb)?;
self.common.activate(&queues, interrupt_cb.clone())?;
let (kill_evt, pause_evt) = self.common.dup_eventfds();
let (_, request_queue, request_queue_evt) = queues.remove(0);

View file

@ -912,7 +912,7 @@ impl VirtioDevice for Mem {
interrupt_cb: Arc<dyn VirtioInterrupt>,
mut queues: Vec<(usize, Queue, EventFd)>,
) -> ActivateResult {
self.common.activate(&queues, &interrupt_cb)?;
self.common.activate(&queues, interrupt_cb.clone())?;
let (kill_evt, pause_evt) = self.common.dup_eventfds();
let (_, queue, queue_evt) = queues.remove(0);

View file

@ -100,7 +100,11 @@ impl EpollHelperHandler for NetCtrlEpollHandler {
))
})?;
self.ctrl_q
.process(mem.deref(), &mut self.queue, self.access_platform.as_ref())
.process(
mem.deref(),
&mut self.queue,
self.access_platform.as_deref(),
)
.map_err(|e| {
EpollHelperError::HandleEvent(anyhow!(
"Failed to process control queue: {e:?}"
@ -693,7 +697,7 @@ impl VirtioDevice for Net {
interrupt_cb: Arc<dyn VirtioInterrupt>,
mut queues: Vec<(usize, Queue, EventFd)>,
) -> ActivateResult {
self.common.activate(&queues, &interrupt_cb)?;
self.common.activate(&queues, interrupt_cb.clone())?;
let num_queues = queues.len();
let event_idx = self.common.feature_acked(VIRTIO_RING_F_EVENT_IDX.into());

View file

@ -105,7 +105,7 @@ struct Request {
impl Request {
fn parse(
desc_chain: &mut DescriptorChain<GuestMemoryLoadGuard<GuestMemoryMmap>>,
access_platform: Option<&Arc<dyn AccessPlatform>>,
access_platform: Option<&dyn AccessPlatform>,
) -> result::Result<Request, Error> {
let desc = desc_chain.next().ok_or(Error::DescriptorChainTooShort)?;
// The descriptor contains the request type which MUST be readable.
@ -165,7 +165,7 @@ impl PmemEpollHandler {
fn process_queue(&mut self) -> result::Result<bool, Error> {
let mut used_descs = false;
while let Some(mut desc_chain) = self.queue.pop_descriptor_chain(self.mem.memory()) {
let len = match Request::parse(&mut desc_chain, self.access_platform.as_ref()) {
let len = match Request::parse(&mut desc_chain, self.access_platform.as_deref()) {
Ok(ref req) if (req.type_ == RequestType::Flush) => {
let status_code = match self.disk.sync_all() {
Ok(()) => VIRTIO_PMEM_RESP_TYPE_OK,
@ -387,7 +387,7 @@ impl VirtioDevice for Pmem {
interrupt_cb: Arc<dyn VirtioInterrupt>,
mut queues: Vec<(usize, Queue, EventFd)>,
) -> ActivateResult {
self.common.activate(&queues, &interrupt_cb)?;
self.common.activate(&queues, interrupt_cb.clone())?;
let (kill_evt, pause_evt) = self.common.dup_eventfds();
if let Some(disk) = self.disk.as_ref() {
let disk = disk.try_clone().map_err(|e| {

View file

@ -76,7 +76,7 @@ impl RngEpollHandler {
.memory()
.read_volatile_from(
desc.addr()
.translate_gva(self.access_platform.as_ref(), desc.len() as usize),
.translate_gva(self.access_platform.as_deref(), desc.len() as usize),
&mut self.random_file,
desc.len() as usize,
)
@ -248,7 +248,7 @@ impl VirtioDevice for Rng {
interrupt_cb: Arc<dyn VirtioInterrupt>,
mut queues: Vec<(usize, Queue, EventFd)>,
) -> ActivateResult {
self.common.activate(&queues, &interrupt_cb)?;
self.common.activate(&queues, interrupt_cb.clone())?;
let (kill_evt, pause_evt) = self.common.dup_eventfds();
if let Some(file) = self.random_file.as_ref() {

View file

@ -382,7 +382,7 @@ impl VirtioPciDevice {
device: Arc<Mutex<dyn VirtioDevice>>,
msix_num: u16,
access_platform: Option<Arc<dyn AccessPlatform>>,
interrupt_manager: &Arc<dyn InterruptManager<GroupConfig = MsiIrqGroupConfig>>,
interrupt_manager: &dyn InterruptManager<GroupConfig = MsiIrqGroupConfig>,
pci_device_bdf: u32,
activate_evt: EventFd,
use_64bit_bar: bool,
@ -815,8 +815,8 @@ impl VirtioPciDevice {
!self.device_activated.load(Ordering::SeqCst) && self.is_driver_ready()
}
pub fn dma_handler(&self) -> Option<&Arc<dyn ExternalDmaMapping>> {
self.dma_handler.as_ref()
pub fn dma_handler(&self) -> Option<&dyn ExternalDmaMapping> {
self.dma_handler.as_deref()
}
}

View file

@ -216,7 +216,7 @@ impl Vdpa {
fn activate_vdpa(
&mut self,
mem: &GuestMemoryMmap,
virtio_interrupt: &Arc<dyn VirtioInterrupt>,
virtio_interrupt: &dyn VirtioInterrupt,
queues: Vec<(usize, Queue, EventFd)>,
) -> Result<()> {
assert!(self.vhost.is_some());
@ -245,15 +245,15 @@ impl Vdpa {
queue_size,
flags: 0u32,
desc_table_addr: queue.desc_table().translate_gpa(
self.common.access_platform.as_ref(),
self.common.access_platform.as_deref(),
queue_size as usize * std::mem::size_of::<RawDescriptor>(),
),
used_ring_addr: queue.used_ring().translate_gpa(
self.common.access_platform.as_ref(),
self.common.access_platform.as_deref(),
4 + queue_size as usize * 8,
),
avail_ring_addr: queue.avail_ring().translate_gpa(
self.common.access_platform.as_ref(),
self.common.access_platform.as_deref(),
4 + queue_size as usize * 2,
),
log_addr: None,
@ -421,7 +421,7 @@ impl VirtioDevice for Vdpa {
virtio_interrupt: Arc<dyn VirtioInterrupt>,
queues: Vec<(usize, Queue, EventFd)>,
) -> ActivateResult {
self.activate_vdpa(&mem.memory(), &virtio_interrupt, queues)
self.activate_vdpa(&mem.memory(), virtio_interrupt.as_ref(), queues)
.map_err(ActivateError::ActivateVdpa)?;
// Store the virtio interrupt handler as we need to return it on reset

View file

@ -284,7 +284,7 @@ impl VirtioDevice for Blk {
interrupt_cb: Arc<dyn VirtioInterrupt>,
queues: Vec<(usize, Queue, EventFd)>,
) -> ActivateResult {
self.common.activate(&queues, &interrupt_cb)?;
self.common.activate(&queues, interrupt_cb.clone())?;
self.guest_memory = Some(mem.clone());
let backend_req_handler: Option<FrontendReqHandler<BackendReqHandler>> = None;

View file

@ -265,7 +265,7 @@ impl VirtioDevice for Fs {
interrupt_cb: Arc<dyn VirtioInterrupt>,
queues: Vec<(usize, Queue, EventFd)>,
) -> ActivateResult {
self.common.activate(&queues, &interrupt_cb)?;
self.common.activate(&queues, interrupt_cb.clone())?;
self.guest_memory = Some(mem.clone());
let backend_req_handler: Option<FrontendReqHandler<BackendReqHandler>> = None;

View file

@ -228,7 +228,7 @@ impl<S: VhostUserFrontendReqHandler> VhostUserEpollHandler<S> {
.iter()
.map(|(i, q, e)| (*i, vm_virtio::clone_queue(q), e.try_clone().unwrap()))
.collect(),
&self.virtio_interrupt,
self.virtio_interrupt.as_ref(),
self.acked_features,
self.acked_protocol_features,
&self.backend_req_handler,
@ -332,7 +332,7 @@ impl VhostUserCommon {
.iter()
.map(|(i, q, e)| (*i, vm_virtio::clone_queue(q), e.try_clone().unwrap()))
.collect(),
&interrupt_cb,
interrupt_cb.as_ref(),
acked_features,
&backend_req_handler,
inflight.as_mut(),

View file

@ -295,7 +295,7 @@ impl VirtioDevice for Net {
interrupt_cb: Arc<dyn VirtioInterrupt>,
mut queues: Vec<(usize, Queue, EventFd)>,
) -> ActivateResult {
self.common.activate(&queues, &interrupt_cb)?;
self.common.activate(&queues, interrupt_cb.clone())?;
self.guest_memory = Some(mem.clone());
let num_queues = queues.len();

View file

@ -156,7 +156,7 @@ impl VhostUserHandle {
&mut self,
mem: &GuestMemoryMmap,
queues: Vec<(usize, Queue, EventFd)>,
virtio_interrupt: &Arc<dyn VirtioInterrupt>,
virtio_interrupt: &dyn VirtioInterrupt,
acked_features: u64,
backend_req_handler: &Option<FrontendReqHandler<S>>,
inflight: Option<&mut Inflight>,
@ -340,7 +340,7 @@ impl VhostUserHandle {
&mut self,
mem: &GuestMemoryMmap,
queues: Vec<(usize, Queue, EventFd)>,
virtio_interrupt: &Arc<dyn VirtioInterrupt>,
virtio_interrupt: &dyn VirtioInterrupt,
acked_features: u64,
acked_protocol_features: u64,
backend_req_handler: &Option<FrontendReqHandler<S>>,

View file

@ -124,7 +124,7 @@ where
while let Some(mut desc_chain) = self.queues[0].pop_descriptor_chain(self.mem.memory()) {
let used_len = match VsockPacket::from_rx_virtq_head(
&mut desc_chain,
self.access_platform.as_ref(),
self.access_platform.as_deref(),
) {
Ok(mut pkt) => {
if self.backend.write().unwrap().recv_pkt(&mut pkt).is_ok() {
@ -166,7 +166,7 @@ where
while let Some(mut desc_chain) = self.queues[1].pop_descriptor_chain(self.mem.memory()) {
let pkt = match VsockPacket::from_tx_virtq_head(
&mut desc_chain,
self.access_platform.as_ref(),
self.access_platform.as_deref(),
) {
Ok(pkt) => pkt,
Err(e) => {
@ -430,7 +430,7 @@ where
interrupt_cb: Arc<dyn VirtioInterrupt>,
queues: Vec<(usize, Queue, EventFd)>,
) -> ActivateResult {
self.common.activate(&queues, &interrupt_cb)?;
self.common.activate(&queues, interrupt_cb.clone())?;
let (kill_evt, pause_evt) = self.common.dup_eventfds();
let mut virtqueues = Vec::new();

View file

@ -17,7 +17,6 @@
//! to temporary buffers, before passing it on to the vsock backend.
use std::ops::Deref;
use std::sync::Arc;
use byteorder::{ByteOrder, LittleEndian};
use virtio_queue::DescriptorChain;
@ -111,7 +110,7 @@ impl VsockPacket {
///
pub fn from_tx_virtq_head<M>(
desc_chain: &mut DescriptorChain<M>,
access_platform: Option<&Arc<dyn AccessPlatform>>,
access_platform: Option<&dyn AccessPlatform>,
) -> Result<Self>
where
M: Clone + Deref,
@ -203,7 +202,7 @@ impl VsockPacket {
///
pub fn from_rx_virtq_head<M>(
desc_chain: &mut DescriptorChain<M>,
access_platform: Option<&Arc<dyn AccessPlatform>>,
access_platform: Option<&dyn AccessPlatform>,
) -> Result<Self>
where
M: Clone + Deref,

View file

@ -330,7 +330,7 @@ impl VirtioDevice for Watchdog {
interrupt_cb: Arc<dyn VirtioInterrupt>,
mut queues: Vec<(usize, Queue, EventFd)>,
) -> ActivateResult {
self.common.activate(&queues, &interrupt_cb)?;
self.common.activate(&queues, interrupt_cb.clone())?;
let (kill_evt, pause_evt) = self.common.dup_eventfds();
let reset_evt = self.reset_evt.try_clone().map_err(|e| {

View file

@ -192,12 +192,13 @@ impl Bus {
}
/// Removes all entries referencing the given device.
pub fn remove_by_device(&self, device: &Arc<dyn BusDeviceSync>) -> Result<()> {
pub fn remove_by_device(&self, device: &dyn BusDeviceSync) -> Result<()> {
let mut device_list = self.devices.write().unwrap();
let mut remove_key_list = Vec::new();
for (key, value) in device_list.iter() {
if Arc::ptr_eq(&value.upgrade().unwrap(), device) {
let value = value.upgrade().unwrap();
if core::ptr::eq(Arc::as_ptr(&value), device) {
remove_key_list.push(*key);
}
}

View file

@ -11,7 +11,6 @@
//! Implements virtio queues
use std::fmt::{self, Debug};
use std::sync::Arc;
use virtio_queue::{Queue, QueueT};
use vm_memory::GuestAddress;
@ -102,28 +101,28 @@ pub trait AccessPlatform: Send + Sync + Debug {
}
pub trait Translatable {
fn translate_gva(&self, access_platform: Option<&Arc<dyn AccessPlatform>>, len: usize) -> Self;
fn translate_gpa(&self, access_platform: Option<&Arc<dyn AccessPlatform>>, len: usize) -> Self;
fn translate_gva(&self, access_platform: Option<&dyn AccessPlatform>, len: usize) -> Self;
fn translate_gpa(&self, access_platform: Option<&dyn AccessPlatform>, len: usize) -> Self;
}
impl Translatable for GuestAddress {
fn translate_gva(&self, access_platform: Option<&Arc<dyn AccessPlatform>>, len: usize) -> Self {
fn translate_gva(&self, access_platform: Option<&dyn AccessPlatform>, len: usize) -> Self {
GuestAddress(self.0.translate_gva(access_platform, len))
}
fn translate_gpa(&self, access_platform: Option<&Arc<dyn AccessPlatform>>, len: usize) -> Self {
fn translate_gpa(&self, access_platform: Option<&dyn AccessPlatform>, len: usize) -> Self {
GuestAddress(self.0.translate_gpa(access_platform, len))
}
}
impl Translatable for u64 {
fn translate_gva(&self, access_platform: Option<&Arc<dyn AccessPlatform>>, len: usize) -> Self {
fn translate_gva(&self, access_platform: Option<&dyn AccessPlatform>, len: usize) -> Self {
if let Some(access_platform) = access_platform {
access_platform.translate_gva(*self, len as u64).unwrap()
} else {
*self
}
}
fn translate_gpa(&self, access_platform: Option<&Arc<dyn AccessPlatform>>, len: usize) -> Self {
fn translate_gpa(&self, access_platform: Option<&dyn AccessPlatform>, len: usize) -> Self {
if let Some(access_platform) = access_platform {
access_platform.translate_gpa(*self, len as u64).unwrap()
} else {

View file

@ -413,7 +413,7 @@ impl Vcpu {
pub fn new(
id: u32,
apic_id: u32,
vm: &Arc<dyn hypervisor::Vm>,
vm: &dyn hypervisor::Vm,
vm_ops: Option<Arc<dyn VmOps>>,
#[cfg(target_arch = "x86_64")] cpu_vendor: CpuVendor,
) -> Result<Self> {
@ -441,7 +441,7 @@ impl Vcpu {
/// * `cpuid` - (x86_64) CpuId, wrapper over the `kvm_cpuid2` structure.
pub fn configure(
&mut self,
#[cfg(target_arch = "aarch64")] vm: &Arc<dyn hypervisor::Vm>,
#[cfg(target_arch = "aarch64")] vm: &dyn hypervisor::Vm,
boot_setup: Option<(EntryPoint, &GuestMemoryAtomic<GuestMemoryMmap>)>,
#[cfg(target_arch = "x86_64")] cpuid: Vec<CpuIdEntry>,
#[cfg(target_arch = "x86_64")] kvm_hyperv: bool,
@ -486,7 +486,7 @@ impl Vcpu {
/// Initializes an aarch64 specific vcpu for booting Linux.
#[cfg(target_arch = "aarch64")]
pub fn init(&self, vm: &Arc<dyn hypervisor::Vm>) -> Result<()> {
pub fn init(&self, vm: &dyn hypervisor::Vm) -> Result<()> {
use std::arch::is_aarch64_feature_detected;
#[allow(clippy::nonminimal_bool)]
let sve_supported =
@ -748,7 +748,7 @@ impl CpuManager {
exit_evt: EventFd,
reset_evt: EventFd,
#[cfg(feature = "guest_debug")] vm_debug_evt: EventFd,
hypervisor: &Arc<dyn hypervisor::Hypervisor>,
hypervisor: Arc<dyn hypervisor::Hypervisor>,
seccomp_action: SeccompAction,
vm_ops: Arc<dyn VmOps>,
#[cfg(feature = "tdx")] tdx_enabled: bool,
@ -854,7 +854,7 @@ impl CpuManager {
proximity_domain_per_cpu,
affinity,
dynamic,
hypervisor: hypervisor.clone(),
hypervisor,
#[cfg(feature = "sev_snp")]
sev_snp_enabled,
})))
@ -863,7 +863,7 @@ impl CpuManager {
#[cfg(target_arch = "x86_64")]
pub fn populate_cpuid(
&mut self,
hypervisor: &Arc<dyn hypervisor::Hypervisor>,
hypervisor: &dyn hypervisor::Hypervisor,
#[cfg(feature = "tdx")] tdx: bool,
) -> Result<()> {
self.cpuid = {
@ -897,7 +897,7 @@ impl CpuManager {
let mut vcpu = Vcpu::new(
cpu_id,
x2apic_id,
&self.vm,
self.vm.as_ref(),
Some(self.vm_ops.clone()),
#[cfg(target_arch = "x86_64")]
self.hypervisor.get_cpu_vendor(),
@ -906,7 +906,7 @@ impl CpuManager {
if let Some(snapshot) = snapshot {
// AArch64 vCPUs should be initialized after created.
#[cfg(target_arch = "aarch64")]
vcpu.init(&self.vm)?;
vcpu.init(self.vm.as_ref())?;
let state: CpuState = snapshot.to_state().map_err(|e| {
Error::VcpuCreate(anyhow!("Could not get vCPU state from snapshot {e:?}"))
@ -977,7 +977,7 @@ impl CpuManager {
)?;
#[cfg(target_arch = "aarch64")]
vcpu.configure(&self.vm, boot_setup)?;
vcpu.configure(self.vm.as_ref(), boot_setup)?;
#[cfg(target_arch = "riscv64")]
vcpu.configure(boot_setup)?;

View file

@ -1433,11 +1433,11 @@ impl DeviceManager {
)?;
#[cfg(target_arch = "aarch64")]
self.add_legacy_devices(&legacy_interrupt_manager)?;
self.add_legacy_devices(legacy_interrupt_manager.as_ref())?;
{
self.ged_notification_device = self.add_acpi_devices(
&legacy_interrupt_manager,
legacy_interrupt_manager.as_ref(),
self.reset_evt
.try_clone()
.map_err(DeviceManagerError::EventFd)?,
@ -1450,7 +1450,7 @@ impl DeviceManager {
self.original_termios_opt = original_termios_opt;
self.console = self.add_console_devices(
&legacy_interrupt_manager,
legacy_interrupt_manager.as_ref(),
&mut virtio_devices,
console_info,
console_resize_pipe,
@ -1813,7 +1813,7 @@ impl DeviceManager {
fn add_acpi_devices(
&mut self,
interrupt_manager: &Arc<dyn InterruptManager<GroupConfig = LegacyIrqGroupConfig>>,
interrupt_manager: &dyn InterruptManager<GroupConfig = LegacyIrqGroupConfig>,
reset_evt: EventFd,
exit_evt: EventFd,
) -> DeviceManagerResult<Option<Arc<Mutex<devices::AcpiGedDevice>>>> {
@ -1999,7 +1999,7 @@ impl DeviceManager {
#[cfg(target_arch = "aarch64")]
fn add_legacy_devices(
&mut self,
interrupt_manager: &Arc<dyn InterruptManager<GroupConfig = LegacyIrqGroupConfig>>,
interrupt_manager: &dyn InterruptManager<GroupConfig = LegacyIrqGroupConfig>,
) -> DeviceManagerResult<()> {
// Add a RTC device
let rtc_irq = self
@ -2133,7 +2133,7 @@ impl DeviceManager {
#[cfg(target_arch = "x86_64")]
fn add_serial_device(
&mut self,
interrupt_manager: &Arc<dyn InterruptManager<GroupConfig = LegacyIrqGroupConfig>>,
interrupt_manager: &dyn InterruptManager<GroupConfig = LegacyIrqGroupConfig>,
serial_writer: Option<Box<dyn io::Write + Send>>,
) -> DeviceManagerResult<Arc<Mutex<Serial>>> {
// Serial is tied to IRQ #4
@ -2184,7 +2184,7 @@ impl DeviceManager {
#[cfg(target_arch = "aarch64")]
fn add_serial_device(
&mut self,
interrupt_manager: &Arc<dyn InterruptManager<GroupConfig = LegacyIrqGroupConfig>>,
interrupt_manager: &dyn InterruptManager<GroupConfig = LegacyIrqGroupConfig>,
serial_writer: Option<Box<dyn io::Write + Send>>,
) -> DeviceManagerResult<Arc<Mutex<Pl011>>> {
let id = String::from(SERIAL_DEVICE_NAME);
@ -2248,7 +2248,7 @@ impl DeviceManager {
#[cfg(target_arch = "riscv64")]
fn add_serial_device(
&mut self,
interrupt_manager: &Arc<dyn InterruptManager<GroupConfig = LegacyIrqGroupConfig>>,
interrupt_manager: &dyn InterruptManager<GroupConfig = LegacyIrqGroupConfig>,
serial_writer: Option<Box<dyn io::Write + Send>>,
) -> DeviceManagerResult<Arc<Mutex<Serial>>> {
let id = String::from(SERIAL_DEVICE_NAME);
@ -2398,7 +2398,7 @@ impl DeviceManager {
/// - virtio-console
fn add_console_devices(
&mut self,
interrupt_manager: &Arc<dyn InterruptManager<GroupConfig = LegacyIrqGroupConfig>>,
interrupt_manager: &dyn InterruptManager<GroupConfig = LegacyIrqGroupConfig>,
virtio_devices: &mut Vec<MetaVirtioDevice>,
console_info: Option<ConsoleInfo>,
console_resize_pipe: Option<Arc<File>>,
@ -3790,7 +3790,7 @@ impl DeviceManager {
let vfio_pci_device = VfioPciDevice::new(
vfio_name.clone(),
&self.address_manager.vm,
self.address_manager.vm.clone(),
vfio_device,
vfio_container,
self.msi_interrupt_manager.clone(),
@ -3956,7 +3956,7 @@ impl DeviceManager {
let mut vfio_user_pci_device = VfioUserPciDevice::new(
vfio_user_name.clone(),
&self.address_manager.vm,
self.address_manager.vm.clone(),
client.clone(),
self.msi_interrupt_manager.clone(),
legacy_interrupt_group,
@ -4134,7 +4134,7 @@ impl DeviceManager {
virtio_device,
msix_num,
access_platform,
&self.msi_interrupt_manager,
self.msi_interrupt_manager.as_ref(),
pci_device_bdf.into(),
self.activate_evt
.try_clone()
@ -4706,12 +4706,12 @@ impl DeviceManager {
#[cfg(target_arch = "x86_64")]
// Remove the device from the IO bus
self.io_bus()
.remove_by_device(&bus_device)
.remove_by_device(bus_device.as_ref())
.map_err(DeviceManagerError::RemoveDeviceFromIoBus)?;
// Remove the device from the MMIO bus
self.mmio_bus()
.remove_by_device(&bus_device)
.remove_by_device(bus_device.as_ref())
.map_err(DeviceManagerError::RemoveDeviceFromMmioBus)?;
// Remove the device from the list of BusDevice held by the

View file

@ -40,7 +40,7 @@ impl InterruptRoute {
})
}
pub fn enable(&self, vm: &Arc<dyn hypervisor::Vm>) -> Result<()> {
pub fn enable(&self, vm: &dyn hypervisor::Vm) -> Result<()> {
if !self.registered.load(Ordering::Acquire) {
vm.register_irqfd(&self.irq_fd, self.gsi)
.map_err(|e| io::Error::other(format!("Failed registering irq_fd: {e}")))?;
@ -52,7 +52,7 @@ impl InterruptRoute {
Ok(())
}
pub fn disable(&self, vm: &Arc<dyn hypervisor::Vm>) -> Result<()> {
pub fn disable(&self, vm: &dyn hypervisor::Vm) -> Result<()> {
if self.registered.load(Ordering::Acquire) {
vm.unregister_irqfd(&self.irq_fd, self.gsi)
.map_err(|e| io::Error::other(format!("Failed unregistering irq_fd: {e}")))?;
@ -122,7 +122,7 @@ impl MsiInterruptGroup {
impl InterruptSourceGroup for MsiInterruptGroup {
fn enable(&self) -> Result<()> {
for (_, route) in self.irq_routes.iter() {
route.enable(&self.vm)?;
route.enable(self.vm.as_ref())?;
}
Ok(())
@ -130,7 +130,7 @@ impl InterruptSourceGroup for MsiInterruptGroup {
fn disable(&self) -> Result<()> {
for (_, route) in self.irq_routes.iter() {
route.disable(&self.vm)?;
route.disable(self.vm.as_ref())?;
}
Ok(())
@ -172,7 +172,7 @@ impl InterruptSourceGroup for MsiInterruptGroup {
// So it's required to call disable() (which deassign KVM_IRQFD) before
// set_gsi_routes() to avoid kernel panic (see #3827)
if masked {
route.disable(&self.vm)?;
route.disable(self.vm.as_ref())?;
}
let mut routes = self.gsi_msi_routes.lock().unwrap();
@ -185,7 +185,7 @@ impl InterruptSourceGroup for MsiInterruptGroup {
// panic on kernel which not have commit a80ced6ea514
// (KVM: SVM: fix panic on out-of-bounds guest IRQ).
if !masked {
route.enable(&self.vm)?;
route.enable(self.vm.as_ref())?;
}
return Ok(());

View file

@ -873,7 +873,7 @@ impl Vmm {
}
let vm = Vm::create_hypervisor_vm(
&self.hypervisor,
self.hypervisor.as_ref(),
#[cfg(feature = "tdx")]
false,
#[cfg(feature = "sev_snp")]
@ -892,8 +892,10 @@ impl Vmm {
vm.enable_x2apic_api().unwrap();
}
let phys_bits =
vm::physical_bits(&self.hypervisor, config.lock().unwrap().cpus.max_phys_bits);
let phys_bits = vm::physical_bits(
self.hypervisor.as_ref(),
config.lock().unwrap().cpus.max_phys_bits,
);
let memory_manager = MemoryManager::new(
vm,
@ -1129,10 +1131,12 @@ impl Vmm {
};
let amx = vm_config.lock().unwrap().cpus.features.amx;
let phys_bits =
vm::physical_bits(&hypervisor, vm_config.lock().unwrap().cpus.max_phys_bits);
let phys_bits = vm::physical_bits(
hypervisor.as_ref(),
vm_config.lock().unwrap().cpus.max_phys_bits,
);
arch::generate_common_cpuid(
&hypervisor,
hypervisor.as_ref(),
&arch::CpuidConfig {
phys_bits,
kvm_hyperv: vm_config.lock().unwrap().cpus.kvm_hyperv,
@ -1268,9 +1272,10 @@ impl Vmm {
let dest_cpuid = &{
let vm_config = &src_vm_config.lock().unwrap();
let phys_bits = vm::physical_bits(&self.hypervisor, vm_config.cpus.max_phys_bits);
let phys_bits =
vm::physical_bits(self.hypervisor.as_ref(), vm_config.cpus.max_phys_bits);
arch::generate_common_cpuid(
&self.hypervisor.clone(),
self.hypervisor.as_ref(),
&arch::CpuidConfig {
phys_bits,
kvm_hyperv: vm_config.cpus.kvm_hyperv,

View file

@ -493,7 +493,7 @@ impl VmOps for VmOpsHandler {
}
}
pub fn physical_bits(hypervisor: &Arc<dyn hypervisor::Hypervisor>, max_phys_bits: u8) -> u8 {
pub fn physical_bits(hypervisor: &dyn hypervisor::Hypervisor, max_phys_bits: u8) -> u8 {
let host_phys_bits = get_host_cpu_phys_bits(hypervisor);
cmp::min(host_phys_bits, max_phys_bits)
@ -592,7 +592,7 @@ impl Vm {
reset_evt.try_clone().map_err(Error::EventFdClone)?,
#[cfg(feature = "guest_debug")]
vm_debug_evt,
&hypervisor,
hypervisor.clone(),
seccomp_action.clone(),
vm_ops,
#[cfg(feature = "tdx")]
@ -608,7 +608,7 @@ impl Vm {
.lock()
.unwrap()
.populate_cpuid(
&hypervisor,
hypervisor.as_ref(),
#[cfg(feature = "tdx")]
tdx_enabled,
)
@ -1015,7 +1015,7 @@ impl Vm {
};
let vm = Self::create_hypervisor_vm(
&hypervisor,
hypervisor.as_ref(),
#[cfg(feature = "tdx")]
tdx_enabled,
#[cfg(feature = "sev_snp")]
@ -1029,7 +1029,10 @@ impl Vm {
vm.enable_x2apic_api().unwrap();
}
let phys_bits = physical_bits(&hypervisor, vm_config.lock().unwrap().cpus.max_phys_bits);
let phys_bits = physical_bits(
hypervisor.as_ref(),
vm_config.lock().unwrap().cpus.max_phys_bits,
);
let memory_manager = if let Some(snapshot) =
snapshot_from_id(snapshot.as_ref(), MEMORY_MANAGER_SNAPSHOT_ID)
@ -1078,7 +1081,7 @@ impl Vm {
}
pub fn create_hypervisor_vm(
hypervisor: &Arc<dyn hypervisor::Hypervisor>,
hypervisor: &dyn hypervisor::Hypervisor,
#[cfg(feature = "tdx")] tdx_enabled: bool,
#[cfg(feature = "sev_snp")] sev_snp_enabled: bool,
#[cfg(feature = "sev_snp")] mem_size: u64,
@ -2876,11 +2879,11 @@ impl Snapshottable for Vm {
let common_cpuid = {
let amx = self.config.lock().unwrap().cpus.features.amx;
let phys_bits = physical_bits(
&self.hypervisor,
self.hypervisor.as_ref(),
self.config.lock().unwrap().cpus.max_phys_bits,
);
arch::generate_common_cpuid(
&self.hypervisor,
self.hypervisor.as_ref(),
&arch::CpuidConfig {
phys_bits,
kvm_hyperv: self.config.lock().unwrap().cpus.kvm_hyperv,