From 7be69edf5179e948b892693f6c88cba9626a0795 Mon Sep 17 00:00:00 2001 From: Rob Bradford Date: Thu, 25 Apr 2024 16:18:53 +0100 Subject: [PATCH] hypervisor: kvm: Introduce Mutex around VcpuFd This is required as the VcpuFd::run and VcpuFd::set_immediate_exit methods now take a &mut self. I explored alternative solutions: 1. Using RefCell for runtime interior mutability - the Vcpu trait is Sync and it's not possible to use RefCell with types that are Sync. 2. Using UnsafeCell - the mutable reference nature of ::run and and ::set_kvm_immediate_exit was added for a reason so it unwise to bypass this. 3. Adjusting the trait interface to expose the &mut self - this requires an Arc> around the hypervisor::vcpu::Vcpu object and was very intrusive. Signed-off-by: Rob Bradford --- hypervisor/src/kvm/mod.rs | 144 ++++++++++++++++++++++++++++++++++---- 1 file changed, 129 insertions(+), 15 deletions(-) diff --git a/hypervisor/src/kvm/mod.rs b/hypervisor/src/kvm/mod.rs index 2f201a974..2ff93aa14 100644 --- a/hypervisor/src/kvm/mod.rs +++ b/hypervisor/src/kvm/mod.rs @@ -36,7 +36,6 @@ use std::os::unix::io::RawFd; use std::result; #[cfg(target_arch = "x86_64")] use std::sync::atomic::{AtomicBool, Ordering}; -#[cfg(target_arch = "aarch64")] use std::sync::Mutex; use std::sync::{Arc, RwLock}; use vmm_sys_util::eventfd::EventFd; @@ -455,12 +454,12 @@ impl vm::Vm for KvmVm { id: u8, vm_ops: Option>, ) -> vm::Result> { - let vc = self + let fd = self .fd .create_vcpu(id as u64) .map_err(|e| vm::HypervisorVmError::CreateVcpu(e.into()))?; let vcpu = KvmVcpu { - fd: vc, + fd: Arc::new(Mutex::new(fd)), #[cfg(target_arch = "x86_64")] msrs: self.msrs.clone(), vm_ops, @@ -1170,7 +1169,7 @@ impl hypervisor::Hypervisor for KvmHypervisor { /// Vcpu struct for KVM pub struct KvmVcpu { - fd: VcpuFd, + fd: Arc>, #[cfg(target_arch = "x86_64")] msrs: Vec, vm_ops: Option>, @@ -1198,6 +1197,8 @@ impl cpu::Vcpu for KvmVcpu { fn get_regs(&self) -> cpu::Result { Ok(self .fd + .lock() + .unwrap() .get_regs() .map_err(|e| cpu::HypervisorCpuError::GetStandardRegs(e.into()))? .into()) @@ -1219,6 +1220,8 @@ impl cpu::Vcpu for KvmVcpu { for i in 0..31 { let mut bytes = [0_u8; 8]; self.fd + .lock() + .unwrap() .get_one_reg(arm64_core_reg_id!(KVM_REG_SIZE_U64, off), &mut bytes) .map_err(|e| cpu::HypervisorCpuError::GetCoreRegister(e.into()))?; state.regs.regs[i] = u64::from_le_bytes(bytes); @@ -1230,6 +1233,8 @@ impl cpu::Vcpu for KvmVcpu { let off = offset_of!(user_pt_regs, sp); let mut bytes = [0_u8; 8]; self.fd + .lock() + .unwrap() .get_one_reg(arm64_core_reg_id!(KVM_REG_SIZE_U64, off), &mut bytes) .map_err(|e| cpu::HypervisorCpuError::GetCoreRegister(e.into()))?; state.regs.sp = u64::from_le_bytes(bytes); @@ -1238,6 +1243,8 @@ impl cpu::Vcpu for KvmVcpu { let off = offset_of!(user_pt_regs, pc); let mut bytes = [0_u8; 8]; self.fd + .lock() + .unwrap() .get_one_reg(arm64_core_reg_id!(KVM_REG_SIZE_U64, off), &mut bytes) .map_err(|e| cpu::HypervisorCpuError::GetCoreRegister(e.into()))?; state.regs.pc = u64::from_le_bytes(bytes); @@ -1246,6 +1253,8 @@ impl cpu::Vcpu for KvmVcpu { let off = offset_of!(user_pt_regs, pstate); let mut bytes = [0_u8; 8]; self.fd + .lock() + .unwrap() .get_one_reg(arm64_core_reg_id!(KVM_REG_SIZE_U64, off), &mut bytes) .map_err(|e| cpu::HypervisorCpuError::GetCoreRegister(e.into()))?; state.regs.pstate = u64::from_le_bytes(bytes); @@ -1254,6 +1263,8 @@ impl cpu::Vcpu for KvmVcpu { let off = offset_of!(kvm_regs, sp_el1); let mut bytes = [0_u8; 8]; self.fd + .lock() + .unwrap() .get_one_reg(arm64_core_reg_id!(KVM_REG_SIZE_U64, off), &mut bytes) .map_err(|e| cpu::HypervisorCpuError::GetCoreRegister(e.into()))?; state.sp_el1 = u64::from_le_bytes(bytes); @@ -1263,6 +1274,8 @@ impl cpu::Vcpu for KvmVcpu { let off = offset_of!(kvm_regs, elr_el1); let mut bytes = [0_u8; 8]; self.fd + .lock() + .unwrap() .get_one_reg(arm64_core_reg_id!(KVM_REG_SIZE_U64, off), &mut bytes) .map_err(|e| cpu::HypervisorCpuError::GetCoreRegister(e.into()))?; state.elr_el1 = u64::from_le_bytes(bytes); @@ -1272,6 +1285,8 @@ impl cpu::Vcpu for KvmVcpu { for i in 0..KVM_NR_SPSR as usize { let mut bytes = [0_u8; 8]; self.fd + .lock() + .unwrap() .get_one_reg(arm64_core_reg_id!(KVM_REG_SIZE_U64, off), &mut bytes) .map_err(|e| cpu::HypervisorCpuError::GetCoreRegister(e.into()))?; state.spsr[i] = u64::from_le_bytes(bytes); @@ -1284,6 +1299,8 @@ impl cpu::Vcpu for KvmVcpu { for i in 0..32 { let mut bytes = [0_u8; 16]; self.fd + .lock() + .unwrap() .get_one_reg(arm64_core_reg_id!(KVM_REG_SIZE_U128, off), &mut bytes) .map_err(|e| cpu::HypervisorCpuError::GetCoreRegister(e.into()))?; state.fp_regs.vregs[i] = u128::from_le_bytes(bytes); @@ -1294,6 +1311,8 @@ impl cpu::Vcpu for KvmVcpu { let off = offset_of!(kvm_regs, fp_regs) + offset_of!(user_fpsimd_state, fpsr); let mut bytes = [0_u8; 4]; self.fd + .lock() + .unwrap() .get_one_reg(arm64_core_reg_id!(KVM_REG_SIZE_U32, off), &mut bytes) .map_err(|e| cpu::HypervisorCpuError::GetCoreRegister(e.into()))?; state.fp_regs.fpsr = u32::from_le_bytes(bytes); @@ -1302,6 +1321,8 @@ impl cpu::Vcpu for KvmVcpu { let off = offset_of!(kvm_regs, fp_regs) + offset_of!(user_fpsimd_state, fpcr); let mut bytes = [0_u8; 4]; self.fd + .lock() + .unwrap() .get_one_reg(arm64_core_reg_id!(KVM_REG_SIZE_U32, off), &mut bytes) .map_err(|e| cpu::HypervisorCpuError::GetCoreRegister(e.into()))?; state.fp_regs.fpcr = u32::from_le_bytes(bytes); @@ -1315,6 +1336,8 @@ impl cpu::Vcpu for KvmVcpu { fn set_regs(&self, regs: &StandardRegisters) -> cpu::Result<()> { let regs = (*regs).into(); self.fd + .lock() + .unwrap() .set_regs(®s) .map_err(|e| cpu::HypervisorCpuError::SetStandardRegs(e.into())) } @@ -1331,6 +1354,8 @@ impl cpu::Vcpu for KvmVcpu { let mut off = offset_of!(user_pt_regs, regs); for i in 0..31 { self.fd + .lock() + .unwrap() .set_one_reg( arm64_core_reg_id!(KVM_REG_SIZE_U64, off), &state.regs.regs[i].to_le_bytes(), @@ -1341,6 +1366,8 @@ impl cpu::Vcpu for KvmVcpu { let off = offset_of!(user_pt_regs, sp); self.fd + .lock() + .unwrap() .set_one_reg( arm64_core_reg_id!(KVM_REG_SIZE_U64, off), &state.regs.sp.to_le_bytes(), @@ -1349,6 +1376,8 @@ impl cpu::Vcpu for KvmVcpu { let off = offset_of!(user_pt_regs, pc); self.fd + .lock() + .unwrap() .set_one_reg( arm64_core_reg_id!(KVM_REG_SIZE_U64, off), &state.regs.pc.to_le_bytes(), @@ -1357,6 +1386,8 @@ impl cpu::Vcpu for KvmVcpu { let off = offset_of!(user_pt_regs, pstate); self.fd + .lock() + .unwrap() .set_one_reg( arm64_core_reg_id!(KVM_REG_SIZE_U64, off), &state.regs.pstate.to_le_bytes(), @@ -1365,6 +1396,8 @@ impl cpu::Vcpu for KvmVcpu { let off = offset_of!(kvm_regs, sp_el1); self.fd + .lock() + .unwrap() .set_one_reg( arm64_core_reg_id!(KVM_REG_SIZE_U64, off), &state.sp_el1.to_le_bytes(), @@ -1373,6 +1406,8 @@ impl cpu::Vcpu for KvmVcpu { let off = offset_of!(kvm_regs, elr_el1); self.fd + .lock() + .unwrap() .set_one_reg( arm64_core_reg_id!(KVM_REG_SIZE_U64, off), &state.elr_el1.to_le_bytes(), @@ -1382,6 +1417,8 @@ impl cpu::Vcpu for KvmVcpu { let mut off = offset_of!(kvm_regs, spsr); for i in 0..KVM_NR_SPSR as usize { self.fd + .lock() + .unwrap() .set_one_reg( arm64_core_reg_id!(KVM_REG_SIZE_U64, off), &state.spsr[i].to_le_bytes(), @@ -1393,6 +1430,8 @@ impl cpu::Vcpu for KvmVcpu { let mut off = offset_of!(kvm_regs, fp_regs) + offset_of!(user_fpsimd_state, vregs); for i in 0..32 { self.fd + .lock() + .unwrap() .set_one_reg( arm64_core_reg_id!(KVM_REG_SIZE_U128, off), &state.fp_regs.vregs[i].to_le_bytes(), @@ -1403,6 +1442,8 @@ impl cpu::Vcpu for KvmVcpu { let off = offset_of!(kvm_regs, fp_regs) + offset_of!(user_fpsimd_state, fpsr); self.fd + .lock() + .unwrap() .set_one_reg( arm64_core_reg_id!(KVM_REG_SIZE_U32, off), &state.fp_regs.fpsr.to_le_bytes(), @@ -1411,6 +1452,8 @@ impl cpu::Vcpu for KvmVcpu { let off = offset_of!(kvm_regs, fp_regs) + offset_of!(user_fpsimd_state, fpcr); self.fd + .lock() + .unwrap() .set_one_reg( arm64_core_reg_id!(KVM_REG_SIZE_U32, off), &state.fp_regs.fpcr.to_le_bytes(), @@ -1426,6 +1469,8 @@ impl cpu::Vcpu for KvmVcpu { fn get_sregs(&self) -> cpu::Result { Ok(self .fd + .lock() + .unwrap() .get_sregs() .map_err(|e| cpu::HypervisorCpuError::GetSpecialRegs(e.into()))? .into()) @@ -1438,6 +1483,8 @@ impl cpu::Vcpu for KvmVcpu { fn set_sregs(&self, sregs: &SpecialRegisters) -> cpu::Result<()> { let sregs = (*sregs).into(); self.fd + .lock() + .unwrap() .set_sregs(&sregs) .map_err(|e| cpu::HypervisorCpuError::SetSpecialRegs(e.into())) } @@ -1449,6 +1496,8 @@ impl cpu::Vcpu for KvmVcpu { fn get_fpu(&self) -> cpu::Result { Ok(self .fd + .lock() + .unwrap() .get_fpu() .map_err(|e| cpu::HypervisorCpuError::GetFloatingPointRegs(e.into()))? .into()) @@ -1461,6 +1510,8 @@ impl cpu::Vcpu for KvmVcpu { fn set_fpu(&self, fpu: &FpuState) -> cpu::Result<()> { let fpu: kvm_bindings::kvm_fpu = (*fpu).clone().into(); self.fd + .lock() + .unwrap() .set_fpu(&fpu) .map_err(|e| cpu::HypervisorCpuError::SetFloatingPointRegs(e.into())) } @@ -1476,6 +1527,8 @@ impl cpu::Vcpu for KvmVcpu { .map_err(|_| cpu::HypervisorCpuError::SetCpuid(anyhow!("failed to create CpuId")))?; self.fd + .lock() + .unwrap() .set_cpuid2(&kvm_cpuid) .map_err(|e| cpu::HypervisorCpuError::SetCpuid(e.into())) } @@ -1494,6 +1547,8 @@ impl cpu::Vcpu for KvmVcpu { ..Default::default() }; self.fd + .lock() + .unwrap() .enable_cap(&cap) .map_err(|e| cpu::HypervisorCpuError::EnableHyperVSyncIc(e.into())) } @@ -1505,6 +1560,8 @@ impl cpu::Vcpu for KvmVcpu { fn get_cpuid2(&self, num_entries: usize) -> cpu::Result> { let kvm_cpuid = self .fd + .lock() + .unwrap() .get_cpuid2(num_entries) .map_err(|e| cpu::HypervisorCpuError::GetCpuid(e.into()))?; @@ -1520,6 +1577,8 @@ impl cpu::Vcpu for KvmVcpu { fn get_lapic(&self) -> cpu::Result { Ok(self .fd + .lock() + .unwrap() .get_lapic() .map_err(|e| cpu::HypervisorCpuError::GetlapicState(e.into()))? .into()) @@ -1532,6 +1591,8 @@ impl cpu::Vcpu for KvmVcpu { fn set_lapic(&self, klapic: &LapicState) -> cpu::Result<()> { let klapic: kvm_bindings::kvm_lapic_state = (*klapic).clone().into(); self.fd + .lock() + .unwrap() .set_lapic(&klapic) .map_err(|e| cpu::HypervisorCpuError::SetLapicState(e.into())) } @@ -1545,6 +1606,8 @@ impl cpu::Vcpu for KvmVcpu { let mut kvm_msrs = MsrEntries::from_entries(&kvm_msrs).unwrap(); let succ = self .fd + .lock() + .unwrap() .get_msrs(&mut kvm_msrs) .map_err(|e| cpu::HypervisorCpuError::GetMsrEntries(e.into()))?; @@ -1567,6 +1630,8 @@ impl cpu::Vcpu for KvmVcpu { let kvm_msrs: Vec = msrs.iter().map(|e| (*e).into()).collect(); let kvm_msrs = MsrEntries::from_entries(&kvm_msrs).unwrap(); self.fd + .lock() + .unwrap() .set_msrs(&kvm_msrs) .map_err(|e| cpu::HypervisorCpuError::SetMsrEntries(e.into())) } @@ -1577,6 +1642,8 @@ impl cpu::Vcpu for KvmVcpu { fn get_mp_state(&self) -> cpu::Result { Ok(self .fd + .lock() + .unwrap() .get_mp_state() .map_err(|e| cpu::HypervisorCpuError::GetMpState(e.into()))? .into()) @@ -1587,6 +1654,8 @@ impl cpu::Vcpu for KvmVcpu { /// fn set_mp_state(&self, mp_state: MpState) -> cpu::Result<()> { self.fd + .lock() + .unwrap() .set_mp_state(mp_state.into()) .map_err(|e| cpu::HypervisorCpuError::SetMpState(e.into())) } @@ -1598,6 +1667,8 @@ impl cpu::Vcpu for KvmVcpu { fn translate_gva(&self, gva: u64, _flags: u64) -> cpu::Result<(u64, u32)> { let tr = self .fd + .lock() + .unwrap() .translate_gva(gva) .map_err(|e| cpu::HypervisorCpuError::TranslateVirtualAddress(e.into()))?; // tr.valid is set if the GVA is mapped to valid GPA. @@ -1614,7 +1685,7 @@ impl cpu::Vcpu for KvmVcpu { /// Triggers the running of the current virtual CPU returning an exit reason. /// fn run(&self) -> std::result::Result { - match self.fd.run() { + match self.fd.lock().unwrap().run() { Ok(run) => match run { #[cfg(target_arch = "x86_64")] VcpuExit::IoIn(addr, data) => { @@ -1708,7 +1779,7 @@ impl cpu::Vcpu for KvmVcpu { /// potential soft lockups when being resumed. /// fn notify_guest_clock_paused(&self) -> cpu::Result<()> { - if let Err(e) = self.fd.kvmclock_ctrl() { + if let Err(e) = self.fd.lock().unwrap().kvmclock_ctrl() { // Linux kernel returns -EINVAL if the PV clock isn't yet initialised // which could be because we're still in firmware or the guest doesn't // use KVM clock. @@ -1770,6 +1841,8 @@ impl cpu::Vcpu for KvmVcpu { } } self.fd + .lock() + .unwrap() .set_guest_debug(&dbg) .map_err(|e| cpu::HypervisorCpuError::SetDebugRegs(e.into())) } @@ -1777,6 +1850,8 @@ impl cpu::Vcpu for KvmVcpu { #[cfg(target_arch = "aarch64")] fn vcpu_init(&self, kvi: &VcpuInit) -> cpu::Result<()> { self.fd + .lock() + .unwrap() .vcpu_init(kvi) .map_err(|e| cpu::HypervisorCpuError::VcpuInit(e.into())) } @@ -1788,6 +1863,8 @@ impl cpu::Vcpu for KvmVcpu { #[cfg(target_arch = "aarch64")] fn get_reg_list(&self, reg_list: &mut RegList) -> cpu::Result<()> { self.fd + .lock() + .unwrap() .get_reg_list(reg_list) .map_err(|e| cpu::HypervisorCpuError::GetRegList(e.into())) } @@ -1820,6 +1897,8 @@ impl cpu::Vcpu for KvmVcpu { | KVM_REG_ARM64_SYSREG_OP2_MASK)) as u64); let mut bytes = [0_u8; 8]; self.fd + .lock() + .unwrap() .get_one_reg(id, &mut bytes) .map_err(|e| cpu::HypervisorCpuError::GetSysRegister(e.into()))?; Ok(u64::from_le_bytes(bytes)) @@ -1847,6 +1926,8 @@ impl cpu::Vcpu for KvmVcpu { // Get the register index of the PSTATE (Processor State) register. let pstate = offset_of!(user_pt_regs, pstate) + kreg_off; self.fd + .lock() + .unwrap() .set_one_reg( arm64_core_reg_id!(KVM_REG_SIZE_U64, pstate), &PSTATE_FAULT_BITS_64.to_le_bytes(), @@ -1858,6 +1939,8 @@ impl cpu::Vcpu for KvmVcpu { // Setting the PC (Processor Counter) to the current program address (kernel address). let pc = offset_of!(user_pt_regs, pc) + kreg_off; self.fd + .lock() + .unwrap() .set_one_reg( arm64_core_reg_id!(KVM_REG_SIZE_U64, pc), &boot_ip.to_le_bytes(), @@ -1870,6 +1953,8 @@ impl cpu::Vcpu for KvmVcpu { // We are choosing to place it the end of DRAM. See `get_fdt_addr`. let regs0 = offset_of!(user_pt_regs, regs) + kreg_off; self.fd + .lock() + .unwrap() .set_one_reg( arm64_core_reg_id!(KVM_REG_SIZE_U64, regs0), &fdt_start.to_le_bytes(), @@ -2021,6 +2106,8 @@ impl cpu::Vcpu for KvmVcpu { let mut sys_regs: Vec = Vec::new(); let mut reg_list = RegList::new(500).unwrap(); self.fd + .lock() + .unwrap() .get_reg_list(&mut reg_list) .map_err(|e| cpu::HypervisorCpuError::GetRegList(e.into()))?; @@ -2039,6 +2126,8 @@ impl cpu::Vcpu for KvmVcpu { for index in indices.iter() { let mut bytes = [0_u8; 8]; self.fd + .lock() + .unwrap() .get_one_reg(*index, &mut bytes) .map_err(|e| cpu::HypervisorCpuError::GetSysRegister(e.into()))?; sys_regs.push(kvm_bindings::kvm_one_reg { @@ -2153,6 +2242,8 @@ impl cpu::Vcpu for KvmVcpu { // Set system registers for reg in &state.sys_regs { self.fd + .lock() + .unwrap() .set_one_reg(reg.id, ®.addr.to_le_bytes()) .map_err(|e| cpu::HypervisorCpuError::SetSysRegister(e.into()))?; } @@ -2167,15 +2258,20 @@ impl cpu::Vcpu for KvmVcpu { /// #[cfg(feature = "tdx")] fn tdx_init(&self, hob_address: u64) -> cpu::Result<()> { - tdx_command(&self.fd.as_raw_fd(), TdxCommand::InitVcpu, 0, hob_address) - .map_err(cpu::HypervisorCpuError::InitializeTdx) + tdx_command( + &self.fd.lock().unwrap().as_raw_fd(), + TdxCommand::InitVcpu, + 0, + hob_address, + ) + .map_err(cpu::HypervisorCpuError::InitializeTdx) } /// /// Set the "immediate_exit" state /// fn set_immediate_exit(&self, exit: bool) { - self.fd.set_kvm_immediate_exit(exit.into()); + self.fd.lock().unwrap().set_kvm_immediate_exit(exit.into()); } /// @@ -2183,7 +2279,8 @@ impl cpu::Vcpu for KvmVcpu { /// #[cfg(feature = "tdx")] fn get_tdx_exit_details(&mut self) -> cpu::Result { - let kvm_run = self.fd.get_kvm_run(); + let mut fd = self.fd.as_ref().lock().unwrap(); + let kvm_run = fd.get_kvm_run(); // SAFETY: accessing a union field in a valid structure let tdx_vmcall = unsafe { &mut (*((&mut kvm_run.__bindgen_anon_1) as *mut kvm_run__bindgen_ty_1 @@ -2212,7 +2309,8 @@ impl cpu::Vcpu for KvmVcpu { /// #[cfg(feature = "tdx")] fn set_tdx_status(&mut self, status: TdxExitStatus) { - let kvm_run = self.fd.get_kvm_run(); + let mut fd = self.fd.as_ref().lock().unwrap(); + let kvm_run = fd.get_kvm_run(); // SAFETY: accessing a union field in a valid structure let tdx_vmcall = unsafe { &mut (*((&mut kvm_run.__bindgen_anon_1) as *mut kvm_run__bindgen_ty_1 @@ -2261,7 +2359,7 @@ impl cpu::Vcpu for KvmVcpu { addr: 0x0, flags: 0, }; - self.fd.has_device_attr(&cpu_attr).is_ok() + self.fd.lock().unwrap().has_device_attr(&cpu_attr).is_ok() } #[cfg(target_arch = "aarch64")] @@ -2279,9 +2377,13 @@ impl cpu::Vcpu for KvmVcpu { flags: 0, }; self.fd + .lock() + .unwrap() .set_device_attr(&cpu_attr_irq) .map_err(|_| cpu::HypervisorCpuError::InitializePmu)?; self.fd + .lock() + .unwrap() .set_device_attr(&cpu_attr) .map_err(|_| cpu::HypervisorCpuError::InitializePmu) } @@ -2291,7 +2393,7 @@ impl cpu::Vcpu for KvmVcpu { /// Get the frequency of the TSC if available /// fn tsc_khz(&self) -> cpu::Result> { - match self.fd.get_tsc_khz() { + match self.fd.lock().unwrap().get_tsc_khz() { Err(e) => { if e.errno() == libc::EIO { Ok(None) @@ -2308,7 +2410,7 @@ impl cpu::Vcpu for KvmVcpu { /// Set the frequency of the TSC if available /// fn set_tsc_khz(&self, freq: u32) -> cpu::Result<()> { - match self.fd.set_tsc_khz(freq) { + match self.fd.lock().unwrap().set_tsc_khz(freq) { Err(e) => { if e.errno() == libc::EIO { Ok(()) @@ -2325,7 +2427,7 @@ impl cpu::Vcpu for KvmVcpu { /// Trigger NMI interrupt /// fn nmi(&self) -> cpu::Result<()> { - match self.fd.nmi() { + match self.fd.lock().unwrap().nmi() { Err(e) => { if e.errno() == libc::EIO { Ok(()) @@ -2346,6 +2448,8 @@ impl KvmVcpu { fn get_xsave(&self) -> cpu::Result { Ok(self .fd + .lock() + .unwrap() .get_xsave() .map_err(|e| cpu::HypervisorCpuError::GetXsaveState(e.into()))? .into()) @@ -2358,6 +2462,8 @@ impl KvmVcpu { fn set_xsave(&self, xsave: &XsaveState) -> cpu::Result<()> { let xsave: kvm_bindings::kvm_xsave = (*xsave).clone().into(); self.fd + .lock() + .unwrap() .set_xsave(&xsave) .map_err(|e| cpu::HypervisorCpuError::SetXsaveState(e.into())) } @@ -2368,6 +2474,8 @@ impl KvmVcpu { /// fn get_xcrs(&self) -> cpu::Result { self.fd + .lock() + .unwrap() .get_xcrs() .map_err(|e| cpu::HypervisorCpuError::GetXcsr(e.into())) } @@ -2378,6 +2486,8 @@ impl KvmVcpu { /// fn set_xcrs(&self, xcrs: &ExtendedControlRegisters) -> cpu::Result<()> { self.fd + .lock() + .unwrap() .set_xcrs(xcrs) .map_err(|e| cpu::HypervisorCpuError::SetXcsr(e.into())) } @@ -2389,6 +2499,8 @@ impl KvmVcpu { /// fn get_vcpu_events(&self) -> cpu::Result { self.fd + .lock() + .unwrap() .get_vcpu_events() .map_err(|e| cpu::HypervisorCpuError::GetVcpuEvents(e.into())) } @@ -2400,6 +2512,8 @@ impl KvmVcpu { /// fn set_vcpu_events(&self, events: &VcpuEvents) -> cpu::Result<()> { self.fd + .lock() + .unwrap() .set_vcpu_events(events) .map_err(|e| cpu::HypervisorCpuError::SetVcpuEvents(e.into())) }