misc: Mark memory region APIs as unsafe
To ensure that struct sizes are the same on 32-bit and 64-bit, various kernel APIs use __u64 (Rust u64) to represent userspace pointers. Userspace is expected to cast pointers to __u64 before passing them to the kernel, and cast kernel-provided __u64 to a pointer before using them. However, various safe APIs in Cloud Hypervisor took caller-provided u64 values and passed them to syscalls that treat them as userspace addresses. Therefore, passing bad u64 values would cause memory disclosure or corruption. The memory region APIs are one example of this, so mark them as unsafe. Signed-off-by: Demi Marie Obenour <demiobenour@gmail.com>
This commit is contained in:
parent
00f0b9e42c
commit
fdc19ad85e
12 changed files with 489 additions and 407 deletions
|
|
@ -69,11 +69,7 @@ use crate::ClockData;
|
|||
use crate::arch::x86::{
|
||||
CpuIdEntry, FpuState, LapicState, MsrEntry, NUM_IOAPIC_PINS, SpecialRegisters, XsaveState,
|
||||
};
|
||||
use crate::{
|
||||
CpuState, IoEventAddress, IrqRoutingEntry, MpState, StandardRegisters,
|
||||
USER_MEMORY_REGION_LOG_DIRTY, USER_MEMORY_REGION_READ, USER_MEMORY_REGION_WRITE,
|
||||
UserMemoryRegion,
|
||||
};
|
||||
use crate::{CpuState, IoEventAddress, IrqRoutingEntry, MpState, StandardRegisters};
|
||||
// aarch64 dependencies
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
pub mod aarch64;
|
||||
|
|
@ -234,51 +230,6 @@ pub struct KvmTdxExitVmcall {
|
|||
pub out_rdx: u64,
|
||||
}
|
||||
|
||||
impl From<kvm_userspace_memory_region> for UserMemoryRegion {
|
||||
fn from(region: kvm_userspace_memory_region) -> Self {
|
||||
let mut flags = USER_MEMORY_REGION_READ;
|
||||
if region.flags & KVM_MEM_READONLY == 0 {
|
||||
flags |= USER_MEMORY_REGION_WRITE;
|
||||
}
|
||||
if region.flags & KVM_MEM_LOG_DIRTY_PAGES != 0 {
|
||||
flags |= USER_MEMORY_REGION_LOG_DIRTY;
|
||||
}
|
||||
|
||||
UserMemoryRegion {
|
||||
slot: region.slot,
|
||||
guest_phys_addr: region.guest_phys_addr,
|
||||
memory_size: region.memory_size,
|
||||
userspace_addr: region.userspace_addr,
|
||||
flags,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<UserMemoryRegion> for kvm_userspace_memory_region {
|
||||
fn from(region: UserMemoryRegion) -> Self {
|
||||
assert!(
|
||||
region.flags & USER_MEMORY_REGION_READ != 0,
|
||||
"KVM mapped memory is always readable"
|
||||
);
|
||||
|
||||
let mut flags = 0;
|
||||
if region.flags & USER_MEMORY_REGION_WRITE == 0 {
|
||||
flags |= KVM_MEM_READONLY;
|
||||
}
|
||||
if region.flags & USER_MEMORY_REGION_LOG_DIRTY != 0 {
|
||||
flags |= KVM_MEM_LOG_DIRTY_PAGES;
|
||||
}
|
||||
|
||||
kvm_userspace_memory_region {
|
||||
slot: region.slot,
|
||||
guest_phys_addr: region.guest_phys_addr,
|
||||
memory_size: region.memory_size,
|
||||
userspace_addr: region.userspace_addr,
|
||||
flags,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<kvm_mp_state> for MpState {
|
||||
fn from(s: kvm_mp_state) -> Self {
|
||||
MpState::Kvm(s)
|
||||
|
|
@ -758,10 +709,17 @@ impl vm::Vm for KvmVm {
|
|||
.map_err(|e| vm::HypervisorVmError::SetGsiRouting(e.into()))
|
||||
}
|
||||
|
||||
/// Creates a guest physical memory region.
|
||||
///
|
||||
/// Creates a memory region structure that can be used with {create/remove}_user_memory_region
|
||||
/// # Safety
|
||||
///
|
||||
fn make_user_memory_region(
|
||||
/// `userspace_addr` must point to `memory_size` bytes of memory
|
||||
/// that will stay mapped until a successful call to
|
||||
/// `remove_user_memory_region().` Freeing them with `munmap()`
|
||||
/// before then will cause undefined guest behavior but at least
|
||||
/// should not cause undefined behavior in the host. In theory,
|
||||
/// at least.
|
||||
unsafe fn create_user_memory_region(
|
||||
&self,
|
||||
slot: u32,
|
||||
guest_phys_addr: u64,
|
||||
|
|
@ -769,27 +727,24 @@ impl vm::Vm for KvmVm {
|
|||
userspace_addr: u64,
|
||||
readonly: bool,
|
||||
log_dirty_pages: bool,
|
||||
) -> UserMemoryRegion {
|
||||
kvm_userspace_memory_region {
|
||||
) -> vm::Result<()> {
|
||||
let mut flags = 0;
|
||||
if readonly {
|
||||
flags |= KVM_MEM_READONLY;
|
||||
}
|
||||
if log_dirty_pages {
|
||||
flags |= KVM_MEM_LOG_DIRTY_PAGES;
|
||||
}
|
||||
|
||||
const _: () = assert!(core::mem::size_of::<usize>() <= core::mem::size_of::<u64>());
|
||||
|
||||
let mut region = kvm_userspace_memory_region {
|
||||
slot,
|
||||
guest_phys_addr,
|
||||
memory_size,
|
||||
userspace_addr,
|
||||
flags: if readonly { KVM_MEM_READONLY } else { 0 }
|
||||
| if log_dirty_pages {
|
||||
KVM_MEM_LOG_DIRTY_PAGES
|
||||
} else {
|
||||
0
|
||||
},
|
||||
}
|
||||
.into()
|
||||
}
|
||||
|
||||
///
|
||||
/// Creates a guest physical memory region.
|
||||
///
|
||||
fn create_user_memory_region(&self, user_memory_region: UserMemoryRegion) -> vm::Result<()> {
|
||||
let mut region: kvm_userspace_memory_region = user_memory_region.into();
|
||||
flags,
|
||||
};
|
||||
|
||||
if (region.flags & KVM_MEM_LOG_DIRTY_PAGES) != 0 {
|
||||
if (region.flags & KVM_MEM_READONLY) != 0 {
|
||||
|
|
@ -814,7 +769,7 @@ impl vm::Vm for KvmVm {
|
|||
region.flags = 0;
|
||||
}
|
||||
|
||||
// SAFETY: Safe because guest regions are guaranteed not to overlap.
|
||||
// SAFETY: Safe because caller promised this is safe.
|
||||
unsafe {
|
||||
self.fd
|
||||
.set_user_memory_region(region)
|
||||
|
|
@ -822,18 +777,45 @@ impl vm::Vm for KvmVm {
|
|||
}
|
||||
}
|
||||
|
||||
///
|
||||
/// Removes a guest physical memory region.
|
||||
///
|
||||
fn remove_user_memory_region(&self, user_memory_region: UserMemoryRegion) -> vm::Result<()> {
|
||||
let mut region: kvm_userspace_memory_region = user_memory_region.into();
|
||||
/// # Safety
|
||||
///
|
||||
/// `userspace_addr` must point to `memory_size` bytes of memory,
|
||||
/// and `add_user_memory_region()` must have been successfully called.
|
||||
unsafe fn remove_user_memory_region(
|
||||
&self,
|
||||
slot: u32,
|
||||
guest_phys_addr: u64,
|
||||
memory_size: u64,
|
||||
userspace_addr: u64,
|
||||
readonly: bool,
|
||||
log_dirty_pages: bool,
|
||||
) -> vm::Result<()> {
|
||||
let mut flags = 0;
|
||||
if readonly {
|
||||
flags |= KVM_MEM_READONLY;
|
||||
}
|
||||
if log_dirty_pages {
|
||||
flags |= KVM_MEM_LOG_DIRTY_PAGES;
|
||||
}
|
||||
|
||||
const _: () = assert!(core::mem::size_of::<usize>() <= core::mem::size_of::<u64>());
|
||||
|
||||
let mut region = kvm_userspace_memory_region {
|
||||
slot,
|
||||
guest_phys_addr,
|
||||
memory_size,
|
||||
userspace_addr,
|
||||
flags,
|
||||
};
|
||||
|
||||
// Remove the corresponding entry from "self.dirty_log_slots" if needed
|
||||
self.dirty_log_slots.write().unwrap().remove(®ion.slot);
|
||||
|
||||
// Setting the size to 0 means "remove"
|
||||
region.memory_size = 0;
|
||||
// SAFETY: Safe because guest regions are guaranteed not to overlap.
|
||||
// SAFETY: Safe because caller promised this is safe.
|
||||
unsafe {
|
||||
self.fd
|
||||
.set_user_memory_region(region)
|
||||
|
|
@ -1211,7 +1193,7 @@ impl hypervisor::Hypervisor for KvmHypervisor {
|
|||
vm_type = KVM_X86_SW_PROTECTED_VM.into();
|
||||
} else {
|
||||
vm_type = KVM_X86_DEFAULT_VM.into();
|
||||
};
|
||||
}
|
||||
|
||||
loop {
|
||||
match self.kvm.create_vm_with_type(vm_type) {
|
||||
|
|
|
|||
|
|
@ -123,27 +123,6 @@ pub fn vec_with_array_field<T: Default, F>(count: usize) -> Vec<T> {
|
|||
vec_with_size_in_bytes(vec_size_bytes)
|
||||
}
|
||||
|
||||
///
|
||||
/// User memory region structure
|
||||
///
|
||||
#[derive(Debug, Default, Eq, PartialEq)]
|
||||
pub struct UserMemoryRegion {
|
||||
pub slot: u32,
|
||||
pub guest_phys_addr: u64,
|
||||
pub memory_size: u64,
|
||||
pub userspace_addr: u64,
|
||||
pub flags: u32,
|
||||
}
|
||||
|
||||
///
|
||||
/// Flags for user memory region
|
||||
///
|
||||
pub const USER_MEMORY_REGION_READ: u32 = 1;
|
||||
pub const USER_MEMORY_REGION_WRITE: u32 = 1 << 1;
|
||||
pub const USER_MEMORY_REGION_EXECUTE: u32 = 1 << 2;
|
||||
pub const USER_MEMORY_REGION_LOG_DIRTY: u32 = 1 << 3;
|
||||
pub const USER_MEMORY_REGION_ADJUSTABLE: u32 = 1 << 4;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum MpState {
|
||||
#[cfg(feature = "kvm")]
|
||||
|
|
|
|||
|
|
@ -74,35 +74,10 @@ use crate::arch::aarch64::gic::{Vgic, VgicConfig};
|
|||
use crate::arch::aarch64::regs;
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
use crate::arch::x86::{CpuIdEntry, FpuState, MsrEntry};
|
||||
use crate::{
|
||||
CpuState, IoEventAddress, IrqRoutingEntry, MpState, USER_MEMORY_REGION_ADJUSTABLE,
|
||||
USER_MEMORY_REGION_EXECUTE, USER_MEMORY_REGION_READ, USER_MEMORY_REGION_WRITE,
|
||||
UserMemoryRegion,
|
||||
};
|
||||
use crate::{CpuState, IoEventAddress, IrqRoutingEntry, MpState};
|
||||
|
||||
pub const PAGE_SHIFT: usize = 12;
|
||||
|
||||
impl From<mshv_user_mem_region> for UserMemoryRegion {
|
||||
fn from(region: mshv_user_mem_region) -> Self {
|
||||
let mut flags: u32 = USER_MEMORY_REGION_READ | USER_MEMORY_REGION_ADJUSTABLE;
|
||||
if region.flags & (1 << MSHV_SET_MEM_BIT_WRITABLE) != 0 {
|
||||
flags |= USER_MEMORY_REGION_WRITE;
|
||||
}
|
||||
if region.flags & (1 << MSHV_SET_MEM_BIT_EXECUTABLE) != 0 {
|
||||
flags |= USER_MEMORY_REGION_EXECUTE;
|
||||
}
|
||||
|
||||
UserMemoryRegion {
|
||||
guest_phys_addr: (region.guest_pfn << PAGE_SHIFT as u64)
|
||||
+ (region.userspace_addr & ((1 << PAGE_SHIFT) - 1)),
|
||||
memory_size: region.size,
|
||||
userspace_addr: region.userspace_addr,
|
||||
flags,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
impl From<MshvClockData> for ClockData {
|
||||
fn from(d: MshvClockData) -> Self {
|
||||
|
|
@ -122,26 +97,6 @@ impl From<ClockData> for MshvClockData {
|
|||
}
|
||||
}
|
||||
|
||||
impl From<UserMemoryRegion> for mshv_user_mem_region {
|
||||
fn from(region: UserMemoryRegion) -> Self {
|
||||
let mut flags: u8 = 0;
|
||||
if region.flags & USER_MEMORY_REGION_WRITE != 0 {
|
||||
flags |= 1 << MSHV_SET_MEM_BIT_WRITABLE;
|
||||
}
|
||||
if region.flags & USER_MEMORY_REGION_EXECUTE != 0 {
|
||||
flags |= 1 << MSHV_SET_MEM_BIT_EXECUTABLE;
|
||||
}
|
||||
|
||||
mshv_user_mem_region {
|
||||
guest_pfn: region.guest_phys_addr >> PAGE_SHIFT,
|
||||
size: region.memory_size,
|
||||
userspace_addr: region.userspace_addr,
|
||||
flags,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<mshv_ioctls::IoEventAddress> for IoEventAddress {
|
||||
fn from(a: mshv_ioctls::IoEventAddress) -> Self {
|
||||
match a {
|
||||
|
|
@ -1918,8 +1873,36 @@ impl vm::Vm for MshvVm {
|
|||
}
|
||||
|
||||
/// Creates a guest physical memory region.
|
||||
fn create_user_memory_region(&self, user_memory_region: UserMemoryRegion) -> vm::Result<()> {
|
||||
let user_memory_region: mshv_user_mem_region = user_memory_region.into();
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// `userspace_addr` must point to `memory_size` bytes of memory
|
||||
/// that will stay mapped until a successful call to
|
||||
/// `remove_user_memory_region().` Freeing them with `munmap()`
|
||||
/// before then will cause undefined guest behavior but at least
|
||||
/// should not cause undefined behavior in the host. In theory,
|
||||
/// at least.
|
||||
unsafe fn create_user_memory_region(
|
||||
&self,
|
||||
_slot: u32,
|
||||
guest_phys_addr: u64,
|
||||
memory_size: u64,
|
||||
userspace_addr: u64,
|
||||
readonly: bool,
|
||||
_log_dirty_pages: bool,
|
||||
) -> vm::Result<()> {
|
||||
let mut flags = 1 << MSHV_SET_MEM_BIT_EXECUTABLE;
|
||||
if !readonly {
|
||||
flags |= 1 << MSHV_SET_MEM_BIT_WRITABLE;
|
||||
}
|
||||
|
||||
let user_memory_region = mshv_user_mem_region {
|
||||
flags,
|
||||
guest_pfn: guest_phys_addr >> PAGE_SHIFT,
|
||||
size: memory_size,
|
||||
userspace_addr,
|
||||
..Default::default()
|
||||
};
|
||||
// No matter read only or not we keep track the slots.
|
||||
// For readonly hypervisor can enable the dirty bits,
|
||||
// but a VM exit happens before setting the dirty bits
|
||||
|
|
@ -1938,8 +1921,32 @@ impl vm::Vm for MshvVm {
|
|||
}
|
||||
|
||||
/// Removes a guest physical memory region.
|
||||
fn remove_user_memory_region(&self, user_memory_region: UserMemoryRegion) -> vm::Result<()> {
|
||||
let user_memory_region: mshv_user_mem_region = user_memory_region.into();
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// `userspace_addr` must point to `memory_size` bytes of memory,
|
||||
/// and `add_user_memory_region()` must have been successfully called.
|
||||
unsafe fn remove_user_memory_region(
|
||||
&self,
|
||||
_slot: u32,
|
||||
guest_phys_addr: u64,
|
||||
memory_size: u64,
|
||||
userspace_addr: u64,
|
||||
readonly: bool,
|
||||
_log_dirty_pages: bool,
|
||||
) -> vm::Result<()> {
|
||||
let mut flags = 1 << MSHV_SET_MEM_BIT_EXECUTABLE;
|
||||
if !readonly {
|
||||
flags |= 1 << MSHV_SET_MEM_BIT_WRITABLE;
|
||||
}
|
||||
|
||||
let user_memory_region = mshv_user_mem_region {
|
||||
flags,
|
||||
guest_pfn: guest_phys_addr >> PAGE_SHIFT,
|
||||
size: memory_size,
|
||||
userspace_addr,
|
||||
..Default::default()
|
||||
};
|
||||
// Remove the corresponding entry from "self.dirty_log_slots" if needed
|
||||
self.dirty_log_slots
|
||||
.write()
|
||||
|
|
@ -1952,30 +1959,6 @@ impl vm::Vm for MshvVm {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
fn make_user_memory_region(
|
||||
&self,
|
||||
_slot: u32,
|
||||
guest_phys_addr: u64,
|
||||
memory_size: u64,
|
||||
userspace_addr: u64,
|
||||
readonly: bool,
|
||||
_log_dirty_pages: bool,
|
||||
) -> UserMemoryRegion {
|
||||
let mut flags = 1 << MSHV_SET_MEM_BIT_EXECUTABLE;
|
||||
if !readonly {
|
||||
flags |= 1 << MSHV_SET_MEM_BIT_WRITABLE;
|
||||
}
|
||||
|
||||
mshv_user_mem_region {
|
||||
flags,
|
||||
guest_pfn: guest_phys_addr >> PAGE_SHIFT,
|
||||
size: memory_size,
|
||||
userspace_addr,
|
||||
..Default::default()
|
||||
}
|
||||
.into()
|
||||
}
|
||||
|
||||
fn create_passthrough_device(&self) -> vm::Result<VfioDeviceFd> {
|
||||
let mut vfio_dev = mshv_create_device {
|
||||
type_: MSHV_DEV_TYPE_VFIO,
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ use crate::arch::riscv64::aia::{Vaia, VaiaConfig};
|
|||
#[cfg(feature = "tdx")]
|
||||
use crate::arch::x86::CpuIdEntry;
|
||||
use crate::cpu::Vcpu;
|
||||
use crate::{IoEventAddress, IrqRoutingEntry, UserMemoryRegion};
|
||||
use crate::{IoEventAddress, IrqRoutingEntry};
|
||||
|
||||
///
|
||||
/// I/O events data matches (32 or 64 bits).
|
||||
|
|
@ -335,8 +335,13 @@ pub trait Vm: Send + Sync + Any {
|
|||
fn make_routing_entry(&self, gsi: u32, config: &InterruptSourceConfig) -> IrqRoutingEntry;
|
||||
/// Sets the GSI routing table entries, overwriting any previously set
|
||||
fn set_gsi_routing(&self, entries: &[IrqRoutingEntry]) -> Result<()>;
|
||||
/// Creates a memory region structure that can be used with {create/remove}_user_memory_region
|
||||
fn make_user_memory_region(
|
||||
/// Creates a guest physical memory slot.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// `[userspace_addr, userspace_addr + memory_size)` must be valid memory,
|
||||
/// and that address range must remain valid until [`Vm::remove_user_memory_region`] is called.
|
||||
unsafe fn create_user_memory_region(
|
||||
&self,
|
||||
slot: u32,
|
||||
guest_phys_addr: u64,
|
||||
|
|
@ -344,11 +349,21 @@ pub trait Vm: Send + Sync + Any {
|
|||
userspace_addr: u64,
|
||||
readonly: bool,
|
||||
log_dirty_pages: bool,
|
||||
) -> UserMemoryRegion;
|
||||
/// Creates a guest physical memory slot.
|
||||
fn create_user_memory_region(&self, user_memory_region: UserMemoryRegion) -> Result<()>;
|
||||
) -> Result<()>;
|
||||
/// Removes a guest physical memory slot.
|
||||
fn remove_user_memory_region(&self, user_memory_region: UserMemoryRegion) -> Result<()>;
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// `[userspace_addr, userspace_addr + memory_size)` must be valid memory,
|
||||
unsafe fn remove_user_memory_region(
|
||||
&self,
|
||||
slot: u32,
|
||||
guest_phys_addr: u64,
|
||||
memory_size: u64,
|
||||
userspace_addr: u64,
|
||||
readonly: bool,
|
||||
log_dirty_pages: bool,
|
||||
) -> Result<()>;
|
||||
/// Returns the preferred CPU target type which can be emulated by KVM on underlying host.
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
fn get_preferred_target(&self, kvi: &mut crate::VcpuInit) -> Result<()>;
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue