misc: Remove SGX support from Cloud Hypervisor

This commit removes the SGX support from cloud hypervisor. SGX support
was deprecated in May as part of #7090.

Signed-off-by: Shubham Chakrawar <schakrawar@crusoe.ai>
This commit is contained in:
Shubham Chakrawar 2025-08-15 13:18:14 -07:00 committed by Bo Chen
parent 7281459bf9
commit 2d9e243163
23 changed files with 11 additions and 805 deletions

View file

@ -2,8 +2,6 @@ verbose = "info"
exclude = [ exclude = [
# Availability of links below should be manually verified. # Availability of links below should be manually verified.
# Page for intel SGX support, returns 403 while querying.
'^https://www.intel.com/content/www/us/en/developer/tools/software-guard-extensions/linux-overview.html',
# Page for intel TDX support, returns 403 while querying. # Page for intel TDX support, returns 403 while querying.
'^https://www.intel.com/content/www/us/en/developer/tools/trust-domain-extensions/overview.html', '^https://www.intel.com/content/www/us/en/developer/tools/trust-domain-extensions/overview.html',
# Page for TPM, returns 403 while querying. # Page for TPM, returns 403 while querying.

View file

@ -18,9 +18,6 @@ use std::{fmt, result};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use thiserror::Error; use thiserror::Error;
#[cfg(target_arch = "x86_64")]
use crate::x86_64::SgxEpcSection;
type GuestMemoryMmap = vm_memory::GuestMemoryMmap<vm_memory::bitmap::AtomicBitmap>; type GuestMemoryMmap = vm_memory::GuestMemoryMmap<vm_memory::bitmap::AtomicBitmap>;
type GuestRegionMmap = vm_memory::GuestRegionMmap<vm_memory::bitmap::AtomicBitmap>; type GuestRegionMmap = vm_memory::GuestRegionMmap<vm_memory::bitmap::AtomicBitmap>;
@ -127,8 +124,6 @@ pub struct NumaNode {
pub pci_segments: Vec<u16>, pub pci_segments: Vec<u16>,
pub distances: BTreeMap<u32, u8>, pub distances: BTreeMap<u32, u8>,
pub memory_zones: Vec<String>, pub memory_zones: Vec<String>,
#[cfg(target_arch = "x86_64")]
pub sgx_epc_sections: Vec<SgxEpcSection>,
} }
pub type NumaNodes = BTreeMap<u32, NumaNode>; pub type NumaNodes = BTreeMap<u32, NumaNode>;

View file

@ -12,7 +12,6 @@ pub mod layout;
mod mpspec; mod mpspec;
mod mptable; mod mptable;
pub mod regs; pub mod regs;
use std::collections::BTreeMap;
use std::mem; use std::mem;
use hypervisor::arch::x86::{CpuIdEntry, CPUID_FLAG_VALID_INDEX}; use hypervisor::arch::x86::{CpuIdEntry, CPUID_FLAG_VALID_INDEX};
@ -24,7 +23,7 @@ use linux_loader::loader::elf::start_info::{
use thiserror::Error; use thiserror::Error;
use vm_memory::{ use vm_memory::{
Address, Bytes, GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryAtomic, Address, Bytes, GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryAtomic,
GuestMemoryRegion, GuestUsize, GuestMemoryRegion,
}; };
use crate::{GuestMemoryMmap, InitramfsConfig, RegionType}; use crate::{GuestMemoryMmap, InitramfsConfig, RegionType};
@ -79,55 +78,7 @@ pub struct EntryPoint {
const E820_RAM: u32 = 1; const E820_RAM: u32 = 1;
const E820_RESERVED: u32 = 2; const E820_RESERVED: u32 = 2;
#[derive(Clone)]
pub struct SgxEpcSection {
start: GuestAddress,
size: GuestUsize,
}
impl SgxEpcSection {
pub fn new(start: GuestAddress, size: GuestUsize) -> Self {
SgxEpcSection { start, size }
}
pub fn start(&self) -> GuestAddress {
self.start
}
pub fn size(&self) -> GuestUsize {
self.size
}
}
#[derive(Clone)]
pub struct SgxEpcRegion {
start: GuestAddress,
size: GuestUsize,
epc_sections: BTreeMap<String, SgxEpcSection>,
}
impl SgxEpcRegion {
pub fn new(start: GuestAddress, size: GuestUsize) -> Self {
SgxEpcRegion {
start,
size,
epc_sections: BTreeMap::new(),
}
}
pub fn start(&self) -> GuestAddress {
self.start
}
pub fn size(&self) -> GuestUsize {
self.size
}
pub fn epc_sections(&self) -> &BTreeMap<String, SgxEpcSection> {
&self.epc_sections
}
pub fn insert(&mut self, id: String, epc_section: SgxEpcSection) {
self.epc_sections.insert(id, epc_section);
}
}
pub struct CpuidConfig { pub struct CpuidConfig {
pub sgx_epc_sections: Option<Vec<SgxEpcSection>>,
pub phys_bits: u8, pub phys_bits: u8,
pub kvm_hyperv: bool, pub kvm_hyperv: bool,
#[cfg(feature = "tdx")] #[cfg(feature = "tdx")]
@ -169,18 +120,6 @@ pub enum Error {
#[error("Error setting up SMBIOS table")] #[error("Error setting up SMBIOS table")]
SmbiosSetup(#[source] smbios::Error), SmbiosSetup(#[source] smbios::Error),
/// Could not find any SGX EPC section
#[error("Could not find any SGX EPC section")]
NoSgxEpcSection,
/// Missing SGX CPU feature
#[error("Missing SGX CPU feature")]
MissingSgxFeature,
/// Missing SGX_LC CPU feature
#[error("Missing SGX_LC CPU feature")]
MissingSgxLaunchControlFeature,
/// Error getting supported CPUID through the hypervisor (kvm/mshv) API /// Error getting supported CPUID through the hypervisor (kvm/mshv) API
#[error("Error getting supported CPUID through the hypervisor API")] #[error("Error getting supported CPUID through the hypervisor API")]
CpuidGetSupported(#[source] HypervisorError), CpuidGetSupported(#[source] HypervisorError),
@ -467,7 +406,7 @@ impl CpuidFeatureEntry {
feature_reg: CpuidReg::EDX, feature_reg: CpuidReg::EDX,
compatible_check: CpuidCompatibleCheck::BitwiseSubset, compatible_check: CpuidCompatibleCheck::BitwiseSubset,
}, },
// KVM CPUID bits: https://www.kernel.org/doc/html/latest/virt/kvm/cpuid.html // KVM CPUID bits: https://www.kernel.org/doc/html/latest/virt/kvm/x86/cpuid.html
// Leaf 0x4000_0000, EAX/EBX/ECX/EDX, KVM CPUID SIGNATURE // Leaf 0x4000_0000, EAX/EBX/ECX/EDX, KVM CPUID SIGNATURE
CpuidFeatureEntry { CpuidFeatureEntry {
function: 0x4000_0000, function: 0x4000_0000,
@ -675,10 +614,6 @@ pub fn generate_common_cpuid(
CpuidPatch::patch_cpuid(&mut cpuid, cpuid_patches); CpuidPatch::patch_cpuid(&mut cpuid, cpuid_patches);
if let Some(sgx_epc_sections) = &config.sgx_epc_sections {
update_cpuid_sgx(&mut cpuid, sgx_epc_sections)?;
}
#[cfg(feature = "tdx")] #[cfg(feature = "tdx")]
let tdx_capabilities = if config.tdx { let tdx_capabilities = if config.tdx {
let caps = hypervisor let caps = hypervisor
@ -974,7 +909,6 @@ pub fn configure_system(
_num_cpus: u32, _num_cpus: u32,
setup_header: Option<setup_header>, setup_header: Option<setup_header>,
rsdp_addr: Option<GuestAddress>, rsdp_addr: Option<GuestAddress>,
sgx_epc_region: Option<SgxEpcRegion>,
serial_number: Option<&str>, serial_number: Option<&str>,
uuid: Option<&str>, uuid: Option<&str>,
oem_strings: Option<&[&str]>, oem_strings: Option<&[&str]>,
@ -1008,15 +942,8 @@ pub fn configure_system(
initramfs, initramfs,
hdr, hdr,
rsdp_addr, rsdp_addr,
sgx_epc_region,
),
None => configure_pvh(
guest_mem,
cmdline_addr,
initramfs,
rsdp_addr,
sgx_epc_region,
), ),
None => configure_pvh(guest_mem, cmdline_addr, initramfs, rsdp_addr),
} }
} }
@ -1108,7 +1035,6 @@ fn configure_pvh(
cmdline_addr: GuestAddress, cmdline_addr: GuestAddress,
initramfs: &Option<InitramfsConfig>, initramfs: &Option<InitramfsConfig>,
rsdp_addr: Option<GuestAddress>, rsdp_addr: Option<GuestAddress>,
sgx_epc_region: Option<SgxEpcRegion>,
) -> super::Result<()> { ) -> super::Result<()> {
const XEN_HVM_START_MAGIC_VALUE: u32 = 0x336ec578; const XEN_HVM_START_MAGIC_VALUE: u32 = 0x336ec578;
@ -1174,15 +1100,6 @@ fn configure_pvh(
E820_RESERVED, E820_RESERVED,
); );
if let Some(sgx_epc_region) = sgx_epc_region {
add_memmap_entry(
&mut memmap,
sgx_epc_region.start().raw_value(),
sgx_epc_region.size(),
E820_RESERVED,
);
}
start_info.memmap_entries = memmap.len() as u32; start_info.memmap_entries = memmap.len() as u32;
// Copy the vector with the memmap table to the MEMMAP_START address // Copy the vector with the memmap table to the MEMMAP_START address
@ -1229,7 +1146,6 @@ fn configure_32bit_entry(
initramfs: &Option<InitramfsConfig>, initramfs: &Option<InitramfsConfig>,
setup_hdr: setup_header, setup_hdr: setup_header,
rsdp_addr: Option<GuestAddress>, rsdp_addr: Option<GuestAddress>,
sgx_epc_region: Option<SgxEpcRegion>,
) -> super::Result<()> { ) -> super::Result<()> {
const KERNEL_LOADER_OTHER: u8 = 0xff; const KERNEL_LOADER_OTHER: u8 = 0xff;
@ -1285,15 +1201,6 @@ fn configure_32bit_entry(
E820_RESERVED, E820_RESERVED,
)?; )?;
if let Some(sgx_epc_region) = sgx_epc_region {
add_e820_entry(
&mut params,
sgx_epc_region.start().raw_value(),
sgx_epc_region.size(),
E820_RESERVED,
)?;
}
if let Some(rsdp_addr) = rsdp_addr { if let Some(rsdp_addr) = rsdp_addr {
params.acpi_rsdp_addr = rsdp_addr.0; params.acpi_rsdp_addr = rsdp_addr.0;
} }
@ -1527,57 +1434,6 @@ fn update_cpuid_topology(
} }
} }
} }
// The goal is to update the CPUID sub-leaves to reflect the number of EPC
// sections exposed to the guest.
fn update_cpuid_sgx(
cpuid: &mut Vec<CpuIdEntry>,
epc_sections: &[SgxEpcSection],
) -> Result<(), Error> {
// Something's wrong if there's no EPC section.
if epc_sections.is_empty() {
return Err(Error::NoSgxEpcSection);
}
// We can't go further if the hypervisor does not support SGX feature.
if !CpuidPatch::is_feature_enabled(cpuid, 0x7, 0, CpuidReg::EBX, 2) {
return Err(Error::MissingSgxFeature);
}
// We can't go further if the hypervisor does not support SGX_LC feature.
if !CpuidPatch::is_feature_enabled(cpuid, 0x7, 0, CpuidReg::ECX, 30) {
return Err(Error::MissingSgxLaunchControlFeature);
}
// Get host CPUID for leaf 0x12, subleaf 0x2. This is to retrieve EPC
// properties such as confidentiality and integrity.
// SAFETY: call cpuid with valid leaves
let leaf = unsafe { std::arch::x86_64::__cpuid_count(0x12, 0x2) };
for (i, epc_section) in epc_sections.iter().enumerate() {
let subleaf_idx = i + 2;
let start = epc_section.start().raw_value();
let size = epc_section.size();
let eax = (start & 0xffff_f000) as u32 | 0x1;
let ebx = (start >> 32) as u32;
let ecx = (size & 0xffff_f000) as u32 | (leaf.ecx & 0xf);
let edx = (size >> 32) as u32;
// CPU Topology leaf 0x12
CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::EAX, eax);
CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::EBX, ebx);
CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::ECX, ecx);
CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::EDX, edx);
}
// Add one NULL entry to terminate the dynamic list
let subleaf_idx = epc_sections.len() + 2;
// CPU Topology leaf 0x12
CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::EAX, 0);
CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::EBX, 0);
CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::ECX, 0);
CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::EDX, 0);
Ok(())
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use linux_loader::loader::bootparam::boot_e820_entry; use linux_loader::loader::bootparam::boot_e820_entry;
@ -1608,7 +1464,6 @@ mod tests {
None, None,
None, None,
None, None,
None,
); );
config_err.unwrap_err(); config_err.unwrap_err();
@ -1633,7 +1488,6 @@ mod tests {
None, None,
None, None,
None, None,
None,
) )
.unwrap(); .unwrap();
@ -1663,7 +1517,6 @@ mod tests {
None, None,
None, None,
None, None,
None,
) )
.unwrap(); .unwrap();
@ -1679,7 +1532,6 @@ mod tests {
None, None,
None, None,
None, None,
None,
) )
.unwrap(); .unwrap();
} }

View file

@ -1,54 +0,0 @@
# Intel SGX
Intel® Software Guard Extensions (Intel® SGX) is an Intel technology designed
to increase the security of application code and data. Cloud Hypervisor supports
SGX virtualization through KVM. Because SGX is built on hardware features that
cannot be emulated in software, virtualizing SGX requires support in KVM and in
the host kernel. The required Linux and KVM changes can be found in Linux 5.13+.
Utilizing SGX in the guest requires a kernel/OS with SGX support, e.g. a kernel
since release 5.11, see
[here](https://www.intel.com/content/www/us/en/developer/tools/software-guard-extensions/linux-overview.html).
Running Linux 5.13+ as the guest kernel allows nested virtualization of SGX.
For more information about SGX, please refer to the [SGX Homepage](https://www.intel.com/content/www/us/en/developer/tools/software-guard-extensions/linux-overview.html).
For more information about SGX SDK and how to test SGX, please refer to the
following [instructions](https://github.com/intel/linux-sgx).
## Cloud Hypervisor support
Assuming the host exposes `/dev/sgx_vepc`, we can pass SGX enclaves through
the guest.
In order to use SGX enclaves within a Cloud Hypervisor VM, we must define one
or several Enclave Page Cache (EPC) sections. Here is an example of a VM being
created with 2 EPC sections, the first one being 64MiB with pre-allocated
memory, the second one being 32MiB with no pre-allocated memory.
```bash
./cloud-hypervisor \
--cpus boot=1 \
--memory size=1G \
--disk path=focal-server-cloudimg-amd64.raw \
--kernel vmlinux \
--cmdline "console=ttyS0 console=hvc0 root=/dev/vda1 rw" \
--sgx-epc id=epc0,size=64M,prefault=on id=epc1,size=32M,prefault=off
```
Once booted, and assuming your guest kernel contains the patches from the
[KVM SGX Tree](https://github.com/intel/kvm-sgx), you can validate SGX devices
have been correctly created under `/dev/sgx`:
```bash
ls /dev/sgx*
/dev/sgx_enclave /dev/sgx_provision /dev/sgx_vepc
```
From this point, it is possible to run any SGX application from the guest, as
it will access `/dev/sgx_enclave` device to create dedicated SGX enclaves.
Note: There is only one contiguous SGX EPC region, which contains all SGX EPC
sections. This region is exposed through ACPI and marked as reserved through
the e820 table. It is treated as yet another device, which means it should
appear at the end of the guest address space.

View file

@ -437,12 +437,11 @@ struct NumaConfig {
cpus: Option<Vec<u8>>, cpus: Option<Vec<u8>>,
distances: Option<Vec<NumaDistance>>, distances: Option<Vec<NumaDistance>>,
memory_zones: Option<Vec<String>>, memory_zones: Option<Vec<String>>,
sgx_epc_sections: Option<Vec<String>>,
} }
``` ```
``` ```
--numa <numa> Settings related to a given NUMA node "guest_numa_id=<node_id>,cpus=<cpus_id>,distances=<list_of_distances_to_destination_nodes>,memory_zones=<list_of_memory_zones>,sgx_epc_sections=<list_of_sgx_epc_sections>" --numa <numa> Settings related to a given NUMA node "guest_numa_id=<node_id>,cpus=<cpus_id>,distances=<list_of_distances_to_destination_nodes>,memory_zones=<list_of_memory_zones>
``` ```
### `guest_numa_id` ### `guest_numa_id`
@ -550,26 +549,6 @@ _Example_
--numa guest_numa_id=0,memory_zones=[mem0,mem2] guest_numa_id=1,memory_zones=mem1 --numa guest_numa_id=0,memory_zones=[mem0,mem2] guest_numa_id=1,memory_zones=mem1
``` ```
### `sgx_epc_sections`
List of SGX EPC sections attached to the guest NUMA node identified by the
`guest_numa_id` option. This allows for describing a list of SGX EPC sections
which must be seen by the guest as belonging to the NUMA node `guest_numa_id`.
Multiple values can be provided to define the list. Each value is a string
referring to an existing SGX EPC section identifier. Values are separated from
each other with the `,` separator.
As soon as one tries to describe a list of values, `[` and `]` must be used to
demarcate the list.
_Example_
```
--sgx-epc id=epc0,size=32M id=epc1,size=64M id=epc2,size=32M
--numa guest_numa_id=0,sgx_epc_sections=epc1 guest_numa_id=1,sgx_epc_sections=[epc0,epc2]
```
### PCI bus ### PCI bus
Cloud Hypervisor supports guests with one or more PCI segments. The default PCI segment always Cloud Hypervisor supports guests with one or more PCI segments. The default PCI segment always

View file

@ -110,4 +110,4 @@ from the restored VM.
## Limitations ## Limitations
VFIO devices and Intel SGX are out of scope. VFIO devices is out of scope.

View file

@ -186,8 +186,6 @@ impl RequestHandler for StubApiRequestHandler {
#[cfg(feature = "pvmemcontrol")] #[cfg(feature = "pvmemcontrol")]
pvmemcontrol: None, pvmemcontrol: None,
iommu: false, iommu: false,
#[cfg(target_arch = "x86_64")]
sgx_epc: None,
numa: None, numa: None,
watchdog: false, watchdog: false,
gdb: false, gdb: false,

View file

@ -12,11 +12,9 @@
use std::any::Any; use std::any::Any;
use std::collections::HashMap; use std::collections::HashMap;
#[cfg(target_arch = "x86_64")]
use std::fs::File;
#[cfg(any(target_arch = "aarch64", target_arch = "riscv64"))] #[cfg(any(target_arch = "aarch64", target_arch = "riscv64"))]
use std::mem::offset_of; use std::mem::offset_of;
#[cfg(target_arch = "x86_64")] #[cfg(feature = "tdx")]
use std::os::unix::io::AsRawFd; use std::os::unix::io::AsRawFd;
#[cfg(feature = "tdx")] #[cfg(feature = "tdx")]
use std::os::unix::io::RawFd; use std::os::unix::io::RawFd;
@ -108,6 +106,8 @@ use kvm_bindings::{kvm_run__bindgen_ty_1, KVMIO};
pub use kvm_ioctls::{Cap, Kvm, VcpuExit}; pub use kvm_ioctls::{Cap, Kvm, VcpuExit};
use thiserror::Error; use thiserror::Error;
use vfio_ioctls::VfioDeviceFd; use vfio_ioctls::VfioDeviceFd;
#[cfg(target_arch = "x86_64")]
use vmm_sys_util::ioctl_io_nr;
#[cfg(feature = "tdx")] #[cfg(feature = "tdx")]
use vmm_sys_util::{ioctl::ioctl_with_val, ioctl_iowr_nr}; use vmm_sys_util::{ioctl::ioctl_with_val, ioctl_iowr_nr};
pub use {kvm_bindings, kvm_ioctls}; pub use {kvm_bindings, kvm_ioctls};
@ -116,13 +116,6 @@ pub use {kvm_bindings, kvm_ioctls};
use crate::arch::aarch64::regs; use crate::arch::aarch64::regs;
#[cfg(any(target_arch = "aarch64", target_arch = "riscv64"))] #[cfg(any(target_arch = "aarch64", target_arch = "riscv64"))]
use crate::RegList; use crate::RegList;
#[cfg(target_arch = "x86_64")]
const KVM_CAP_SGX_ATTRIBUTE: u32 = 196;
#[cfg(target_arch = "x86_64")]
use vmm_sys_util::ioctl_io_nr;
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
ioctl_io_nr!(KVM_NMI, kvm_bindings::KVMIO, 0x9a); ioctl_io_nr!(KVM_NMI, kvm_bindings::KVMIO, 0x9a);
@ -893,19 +886,6 @@ impl vm::Vm for KvmVm {
Ok(()) Ok(())
} }
#[cfg(target_arch = "x86_64")]
fn enable_sgx_attribute(&self, file: File) -> vm::Result<()> {
let mut cap = kvm_enable_cap {
cap: KVM_CAP_SGX_ATTRIBUTE,
..Default::default()
};
cap.args[0] = file.as_raw_fd() as u64;
self.fd
.enable_cap(&cap)
.map_err(|e| vm::HypervisorVmError::EnableSgxAttribute(e.into()))?;
Ok(())
}
/// Retrieve guest clock. /// Retrieve guest clock.
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
fn get_clock(&self) -> vm::Result<ClockData> { fn get_clock(&self) -> vm::Result<ClockData> {

View file

@ -41,8 +41,6 @@ pub mod x86_64;
// aarch64 dependencies // aarch64 dependencies
#[cfg(target_arch = "aarch64")] #[cfg(target_arch = "aarch64")]
pub mod aarch64; pub mod aarch64;
#[cfg(target_arch = "x86_64")]
use std::fs::File;
use std::os::unix::io::AsRawFd; use std::os::unix::io::AsRawFd;
#[cfg(target_arch = "aarch64")] #[cfg(target_arch = "aarch64")]
use std::sync::Mutex; use std::sync::Mutex;
@ -1892,11 +1890,6 @@ impl vm::Vm for MshvVm {
Ok(()) Ok(())
} }
#[cfg(target_arch = "x86_64")]
fn enable_sgx_attribute(&self, _file: File) -> vm::Result<()> {
Ok(())
}
fn register_ioevent( fn register_ioevent(
&self, &self,
fd: &EventFd, fd: &EventFd,

View file

@ -11,8 +11,6 @@
// //
use std::any::Any; use std::any::Any;
#[cfg(target_arch = "x86_64")]
use std::fs::File;
use std::sync::Arc; use std::sync::Arc;
#[cfg(any(target_arch = "aarch64", target_arch = "riscv64"))] #[cfg(any(target_arch = "aarch64", target_arch = "riscv64"))]
use std::sync::Mutex; use std::sync::Mutex;
@ -130,12 +128,6 @@ pub enum HypervisorVmError {
/// ///
#[error("Failed to enable x2apic API")] #[error("Failed to enable x2apic API")]
EnableX2ApicApi(#[source] anyhow::Error), EnableX2ApicApi(#[source] anyhow::Error),
///
/// Enable SGX attribute error
///
#[error("Failed to enable SGX attribute")]
EnableSgxAttribute(#[source] anyhow::Error),
///
/// Get clock error /// Get clock error
/// ///
#[error("Failed to get clock")] #[error("Failed to get clock")]
@ -363,8 +355,6 @@ pub trait Vm: Send + Sync + Any {
/// Enable split Irq capability /// Enable split Irq capability
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
fn enable_split_irq(&self) -> Result<()>; fn enable_split_irq(&self) -> Result<()>;
#[cfg(target_arch = "x86_64")]
fn enable_sgx_attribute(&self, file: File) -> Result<()>;
/// Retrieve guest clock. /// Retrieve guest clock.
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
fn get_clock(&self) -> Result<ClockData>; fn get_clock(&self) -> Result<ClockData>;

View file

@ -193,7 +193,6 @@ cmd_help() {
echo " Run the Cloud Hypervisor tests." echo " Run the Cloud Hypervisor tests."
echo " --unit Run the unit tests." echo " --unit Run the unit tests."
echo " --integration Run the integration tests." echo " --integration Run the integration tests."
echo " --integration-sgx Run the SGX integration tests."
echo " --integration-vfio Run the VFIO integration tests." echo " --integration-vfio Run the VFIO integration tests."
echo " --integration-windows Run the Windows guest integration tests." echo " --integration-windows Run the Windows guest integration tests."
echo " --integration-live-migration Run the live-migration integration tests." echo " --integration-live-migration Run the live-migration integration tests."
@ -327,7 +326,6 @@ cmd_clean() {
cmd_tests() { cmd_tests() {
unit=false unit=false
integration=false integration=false
integration_sgx=false
integration_vfio=false integration_vfio=false
integration_windows=false integration_windows=false
integration_live_migration=false integration_live_migration=false
@ -346,7 +344,6 @@ cmd_tests() {
} ;; } ;;
"--unit") { unit=true; } ;; "--unit") { unit=true; } ;;
"--integration") { integration=true; } ;; "--integration") { integration=true; } ;;
"--integration-sgx") { integration_sgx=true; } ;;
"--integration-vfio") { integration_vfio=true; } ;; "--integration-vfio") { integration_vfio=true; } ;;
"--integration-windows") { integration_windows=true; } ;; "--integration-windows") { integration_windows=true; } ;;
"--integration-live-migration") { integration_live_migration=true; } ;; "--integration-live-migration") { integration_live_migration=true; } ;;
@ -449,29 +446,6 @@ cmd_tests() {
dbus-run-session ./scripts/run_integration_tests_"$(uname -m)".sh "$@" || fix_dir_perms $? || exit $? dbus-run-session ./scripts/run_integration_tests_"$(uname -m)".sh "$@" || fix_dir_perms $? || exit $?
fi fi
if [ "$integration_sgx" = true ]; then
say "Running SGX integration tests for $target..."
$DOCKER_RUNTIME run \
--workdir "$CTR_CLH_ROOT_DIR" \
--rm \
--privileged \
--security-opt seccomp=unconfined \
--ipc=host \
--net="$CTR_CLH_NET" \
--mount type=tmpfs,destination=/tmp \
--volume /dev:/dev \
--volume "$CLH_ROOT_DIR:$CTR_CLH_ROOT_DIR" \
${exported_volumes:+"$exported_volumes"} \
--volume "$CLH_INTEGRATION_WORKLOADS:$CTR_CLH_INTEGRATION_WORKLOADS" \
--env USER="root" \
--env BUILD_TARGET="$target" \
--env RUSTFLAGS="$rustflags" \
--env TARGET_CC="$target_cc" \
--env AUTH_DOWNLOAD_TOKEN="$AUTH_DOWNLOAD_TOKEN" \
"$CTR_IMAGE" \
./scripts/run_integration_tests_sgx.sh "$@" || fix_dir_perms $? || exit $?
fi
if [ "$integration_vfio" = true ]; then if [ "$integration_vfio" = true ]; then
say "Running VFIO integration tests for $target..." say "Running VFIO integration tests for $target..."
$DOCKER_RUNTIME run \ $DOCKER_RUNTIME run \

View file

@ -1,51 +0,0 @@
#!/usr/bin/env bash
# shellcheck disable=SC2048,SC2086
set -x
# shellcheck source=/dev/null
source "$HOME"/.cargo/env
source "$(dirname "$0")"/test-util.sh
process_common_args "$@"
if [[ "$hypervisor" = "mshv" ]]; then
echo "Unsupported SGX test for MSHV"
exit 1
fi
WORKLOADS_DIR="$HOME/workloads"
mkdir -p "$WORKLOADS_DIR"
download_hypervisor_fw
JAMMY_OS_IMAGE_NAME="jammy-server-cloudimg-amd64-custom-20241017-0.qcow2"
JAMMY_OS_IMAGE_URL="https://ch-images.azureedge.net/$JAMMY_OS_IMAGE_NAME"
JAMMY_OS_IMAGE="$WORKLOADS_DIR/$JAMMY_OS_IMAGE_NAME"
if [ ! -f "$JAMMY_OS_IMAGE" ]; then
pushd "$WORKLOADS_DIR" || exit
time wget --quiet $JAMMY_OS_IMAGE_URL || exit 1
popd || exit
fi
JAMMY_OS_RAW_IMAGE_NAME="jammy-server-cloudimg-amd64-custom-20241017-0.raw"
JAMMY_OS_RAW_IMAGE="$WORKLOADS_DIR/$JAMMY_OS_RAW_IMAGE_NAME"
if [ ! -f "$JAMMY_OS_RAW_IMAGE" ]; then
pushd "$WORKLOADS_DIR" || exit
time qemu-img convert -p -f qcow2 -O raw $JAMMY_OS_IMAGE_NAME $JAMMY_OS_RAW_IMAGE_NAME || exit 1
popd || exit
fi
CFLAGS=""
if [[ "${BUILD_TARGET}" == "x86_64-unknown-linux-musl" ]]; then
# shellcheck disable=SC2034
CFLAGS="-I /usr/include/x86_64-linux-musl/ -idirafter /usr/include/"
fi
cargo build --features mshv --all --release --target "$BUILD_TARGET"
export RUST_BACKTRACE=1
time cargo test "sgx::$test_filter" -- ${test_binary_args[*]}
RES=$?
exit $RES

View file

@ -31,8 +31,6 @@ use vmm::vm_config;
use vmm::vm_config::FwCfgConfig; use vmm::vm_config::FwCfgConfig;
#[cfg(feature = "ivshmem")] #[cfg(feature = "ivshmem")]
use vmm::vm_config::IvshmemConfig; use vmm::vm_config::IvshmemConfig;
#[cfg(target_arch = "x86_64")]
use vmm::vm_config::SgxEpcConfig;
use vmm::vm_config::{ use vmm::vm_config::{
BalloonConfig, DeviceConfig, DiskConfig, FsConfig, LandlockConfig, NetConfig, NumaConfig, BalloonConfig, DeviceConfig, DiskConfig, FsConfig, LandlockConfig, NetConfig, NumaConfig,
PciSegmentConfig, PmemConfig, RateLimiterGroupConfig, TpmConfig, UserDeviceConfig, VdpaConfig, PciSegmentConfig, PmemConfig, RateLimiterGroupConfig, TpmConfig, UserDeviceConfig, VdpaConfig,
@ -429,12 +427,6 @@ fn get_cli_options_sorted(
.help("Control serial port: off|null|pty|tty|file=</path/to/a/file>|socket=</path/to/a/file>") .help("Control serial port: off|null|pty|tty|file=</path/to/a/file>|socket=</path/to/a/file>")
.default_value("null") .default_value("null")
.group("vm-config"), .group("vm-config"),
#[cfg(target_arch = "x86_64")]
Arg::new("sgx-epc")
.long("sgx-epc")
.help(SgxEpcConfig::SYNTAX)
.num_args(1..)
.group("vm-config"),
Arg::new("tpm") Arg::new("tpm")
.long("tpm") .long("tpm")
.num_args(1) .num_args(1)
@ -1030,8 +1022,6 @@ mod unit_tests {
#[cfg(feature = "pvmemcontrol")] #[cfg(feature = "pvmemcontrol")]
pvmemcontrol: None, pvmemcontrol: None,
iommu: false, iommu: false,
#[cfg(target_arch = "x86_64")]
sgx_epc: None,
numa: None, numa: None,
watchdog: false, watchdog: false,
#[cfg(feature = "guest_debug")] #[cfg(feature = "guest_debug")]

View file

@ -1061,24 +1061,6 @@ impl Guest {
} }
} }
#[cfg(target_arch = "x86_64")]
pub fn check_sgx_support(&self) -> Result<(), Error> {
self.ssh_command(
"cpuid -l 0x7 -s 0 | tr -s [:space:] | grep -q 'SGX: \
Software Guard Extensions supported = true'",
)?;
self.ssh_command(
"cpuid -l 0x7 -s 0 | tr -s [:space:] | grep -q 'SGX_LC: \
SGX launch config supported = true'",
)?;
self.ssh_command(
"cpuid -l 0x12 -s 0 | tr -s [:space:] | grep -q 'SGX1 \
supported = true'",
)?;
Ok(())
}
pub fn get_pci_bridge_class(&self) -> Result<String, Error> { pub fn get_pci_bridge_class(&self) -> Result<String, Error> {
Ok(self Ok(self
.ssh_command("cat /sys/bus/pci/devices/0000:00:00.0/class")? .ssh_command("cat /sys/bus/pci/devices/0000:00:00.0/class")?

View file

@ -9530,50 +9530,6 @@ mod windows {
} }
} }
#[cfg(target_arch = "x86_64")]
mod sgx {
use crate::*;
#[test]
fn test_sgx() {
let jammy_image = JAMMY_IMAGE_NAME.to_string();
let jammy = UbuntuDiskConfig::new(jammy_image);
let guest = Guest::new(Box::new(jammy));
let mut child = GuestCommand::new(&guest)
.args(["--cpus", "boot=1"])
.args(["--memory", "size=512M"])
.args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()])
.default_disks()
.default_net()
.args(["--sgx-epc", "id=epc0,size=64M"])
.capture_output()
.spawn()
.unwrap();
let r = std::panic::catch_unwind(|| {
guest.wait_vm_boot(None).unwrap();
// Check if SGX is correctly detected in the guest.
guest.check_sgx_support().unwrap();
// Validate the SGX EPC section is 64MiB.
assert_eq!(
guest
.ssh_command("cpuid -l 0x12 -s 2 | grep 'section size' | cut -d '=' -f 2")
.unwrap()
.trim(),
"0x0000000004000000"
);
});
let _ = child.kill();
let output = child.wait_with_output().unwrap();
handle_child_output(r, &output);
}
}
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
mod vfio { mod vfio {
use crate::*; use crate::*;

View file

@ -314,16 +314,6 @@ fn create_srat_table(
)) ))
} }
#[cfg(target_arch = "x86_64")]
for section in &node.sgx_epc_sections {
srat.append(MemoryAffinity::from_range(
section.start().raw_value(),
section.size(),
proximity_domain,
MemAffinityFlags::ENABLE,
))
}
for cpu in &node.cpus { for cpu in &node.cpus {
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
let x2apic_id = arch::x86_64::get_x2apic_id(*cpu, topology); let x2apic_id = arch::x86_64::get_x2apic_id(*cpu, topology);

View file

@ -607,10 +607,6 @@ components:
$ref: "#/components/schemas/VdpaConfig" $ref: "#/components/schemas/VdpaConfig"
vsock: vsock:
$ref: "#/components/schemas/VsockConfig" $ref: "#/components/schemas/VsockConfig"
sgx_epc:
type: array
items:
$ref: "#/components/schemas/SgxEpcConfig"
numa: numa:
type: array type: array
items: items:
@ -1143,21 +1139,6 @@ components:
id: id:
type: string type: string
SgxEpcConfig:
required:
- id
- size
type: object
properties:
id:
type: string
size:
type: integer
format: int64
prefault:
type: boolean
default: false
NumaDistance: NumaDistance:
required: required:
- destination - destination
@ -1192,10 +1173,6 @@ components:
type: array type: array
items: items:
type: string type: string
sgx_epc_sections:
type: array
items:
type: string
pci_segments: pci_segments:
type: array type: array
items: items:

View file

@ -109,14 +109,6 @@ pub enum Error {
/// Failed parsing restore parameters /// Failed parsing restore parameters
#[error("Error parsing --restore")] #[error("Error parsing --restore")]
ParseRestore(#[source] OptionParserError), ParseRestore(#[source] OptionParserError),
/// Failed parsing SGX EPC parameters
#[cfg(target_arch = "x86_64")]
#[error("Error parsing --sgx-epc")]
ParseSgxEpc(#[source] OptionParserError),
/// Missing 'id' from SGX EPC section
#[cfg(target_arch = "x86_64")]
#[error("Error parsing --sgx-epc: id missing")]
ParseSgxEpcIdMissing,
/// Failed parsing NUMA parameters /// Failed parsing NUMA parameters
#[error("Error parsing --numa")] #[error("Error parsing --numa")]
ParseNuma(#[source] OptionParserError), ParseNuma(#[source] OptionParserError),
@ -395,8 +387,6 @@ pub struct VmParams<'a> {
#[cfg(feature = "pvmemcontrol")] #[cfg(feature = "pvmemcontrol")]
pub pvmemcontrol: bool, pub pvmemcontrol: bool,
pub pvpanic: bool, pub pvpanic: bool,
#[cfg(target_arch = "x86_64")]
pub sgx_epc: Option<Vec<&'a str>>,
pub numa: Option<Vec<&'a str>>, pub numa: Option<Vec<&'a str>>,
pub watchdog: bool, pub watchdog: bool,
#[cfg(feature = "guest_debug")] #[cfg(feature = "guest_debug")]
@ -462,10 +452,6 @@ impl<'a> VmParams<'a> {
#[cfg(feature = "pvmemcontrol")] #[cfg(feature = "pvmemcontrol")]
let pvmemcontrol = args.get_flag("pvmemcontrol"); let pvmemcontrol = args.get_flag("pvmemcontrol");
let pvpanic = args.get_flag("pvpanic"); let pvpanic = args.get_flag("pvpanic");
#[cfg(target_arch = "x86_64")]
let sgx_epc: Option<Vec<&str>> = args
.get_many::<String>("sgx-epc")
.map(|x| x.map(|y| y as &str).collect());
let numa: Option<Vec<&str>> = args let numa: Option<Vec<&str>> = args
.get_many::<String>("numa") .get_many::<String>("numa")
.map(|x| x.map(|y| y as &str).collect()); .map(|x| x.map(|y| y as &str).collect());
@ -516,8 +502,6 @@ impl<'a> VmParams<'a> {
#[cfg(feature = "pvmemcontrol")] #[cfg(feature = "pvmemcontrol")]
pvmemcontrol, pvmemcontrol,
pvpanic, pvpanic,
#[cfg(target_arch = "x86_64")]
sgx_epc,
numa, numa,
watchdog, watchdog,
#[cfg(feature = "guest_debug")] #[cfg(feature = "guest_debug")]
@ -2139,36 +2123,10 @@ impl VsockConfig {
} }
} }
#[cfg(target_arch = "x86_64")]
impl SgxEpcConfig {
pub const SYNTAX: &'static str = "SGX EPC parameters \
\"id=<epc_section_identifier>,size=<epc_section_size>,prefault=on|off\"";
pub fn parse(sgx_epc: &str) -> Result<Self> {
let mut parser = OptionParser::new();
parser.add("id").add("size").add("prefault");
parser.parse(sgx_epc).map_err(Error::ParseSgxEpc)?;
let id = parser.get("id").ok_or(Error::ParseSgxEpcIdMissing)?;
let size = parser
.convert::<ByteSized>("size")
.map_err(Error::ParseSgxEpc)?
.unwrap_or(ByteSized(0))
.0;
let prefault = parser
.convert::<Toggle>("prefault")
.map_err(Error::ParseSgxEpc)?
.unwrap_or(Toggle(false))
.0;
Ok(SgxEpcConfig { id, size, prefault })
}
}
impl NumaConfig { impl NumaConfig {
pub const SYNTAX: &'static str = "Settings related to a given NUMA node \ pub const SYNTAX: &'static str = "Settings related to a given NUMA node \
\"guest_numa_id=<node_id>,cpus=<cpus_id>,distances=<list_of_distances_to_destination_nodes>,\ \"guest_numa_id=<node_id>,cpus=<cpus_id>,distances=<list_of_distances_to_destination_nodes>,\
memory_zones=<list_of_memory_zones>,sgx_epc_sections=<list_of_sgx_epc_sections>,\ memory_zones=<list_of_memory_zones>,\
pci_segments=<list_of_pci_segments>\""; pci_segments=<list_of_pci_segments>\"";
pub fn parse(numa: &str) -> Result<Self> { pub fn parse(numa: &str) -> Result<Self> {
@ -2178,7 +2136,6 @@ impl NumaConfig {
.add("cpus") .add("cpus")
.add("distances") .add("distances")
.add("memory_zones") .add("memory_zones")
.add("sgx_epc_sections")
.add("pci_segments"); .add("pci_segments");
parser.parse(numa).map_err(Error::ParseNuma)?; parser.parse(numa).map_err(Error::ParseNuma)?;
@ -2206,11 +2163,6 @@ impl NumaConfig {
.convert::<StringList>("memory_zones") .convert::<StringList>("memory_zones")
.map_err(Error::ParseNuma)? .map_err(Error::ParseNuma)?
.map(|v| v.0); .map(|v| v.0);
#[cfg(target_arch = "x86_64")]
let sgx_epc_sections = parser
.convert::<StringList>("sgx_epc_sections")
.map_err(Error::ParseNuma)?
.map(|v| v.0);
let pci_segments = parser let pci_segments = parser
.convert::<IntegerList>("pci_segments") .convert::<IntegerList>("pci_segments")
.map_err(Error::ParseNuma)? .map_err(Error::ParseNuma)?
@ -2220,8 +2172,6 @@ impl NumaConfig {
cpus, cpus,
distances, distances,
memory_zones, memory_zones,
#[cfg(target_arch = "x86_64")]
sgx_epc_sections,
pci_segments, pci_segments,
}) })
} }
@ -2800,14 +2750,6 @@ impl VmConfig {
} }
} }
#[cfg(target_arch = "x86_64")]
if let Some(sgx_epcs) = &self.sgx_epc {
for sgx_epc in sgx_epcs.iter() {
let id = sgx_epc.id.clone();
Self::validate_identifier(&mut id_list, &Some(id))?;
}
}
if let Some(pci_segments) = &self.pci_segments { if let Some(pci_segments) = &self.pci_segments {
for pci_segment in pci_segments { for pci_segment in pci_segments {
pci_segment.validate(self)?; pci_segment.validate(self)?;
@ -2957,21 +2899,6 @@ impl VmConfig {
let platform = vm_params.platform.map(PlatformConfig::parse).transpose()?; let platform = vm_params.platform.map(PlatformConfig::parse).transpose()?;
#[cfg(target_arch = "x86_64")]
let mut sgx_epc: Option<Vec<SgxEpcConfig>> = None;
#[cfg(target_arch = "x86_64")]
{
if let Some(sgx_epc_list) = &vm_params.sgx_epc {
warn!("SGX support is deprecated and will be removed in a future release.");
let mut sgx_epc_config_list = Vec::new();
for item in sgx_epc_list.iter() {
let sgx_epc_config = SgxEpcConfig::parse(item)?;
sgx_epc_config_list.push(sgx_epc_config);
}
sgx_epc = Some(sgx_epc_config_list);
}
}
let mut numa: Option<Vec<NumaConfig>> = None; let mut numa: Option<Vec<NumaConfig>> = None;
if let Some(numa_list) = &vm_params.numa { if let Some(numa_list) = &vm_params.numa {
let mut numa_config_list = Vec::new(); let mut numa_config_list = Vec::new();
@ -3058,8 +2985,6 @@ impl VmConfig {
pvmemcontrol, pvmemcontrol,
pvpanic: vm_params.pvpanic, pvpanic: vm_params.pvpanic,
iommu: false, // updated in VmConfig::validate() iommu: false, // updated in VmConfig::validate()
#[cfg(target_arch = "x86_64")]
sgx_epc,
numa, numa,
watchdog: vm_params.watchdog, watchdog: vm_params.watchdog,
#[cfg(feature = "guest_debug")] #[cfg(feature = "guest_debug")]
@ -3189,8 +3114,6 @@ impl Clone for VmConfig {
user_devices: self.user_devices.clone(), user_devices: self.user_devices.clone(),
vdpa: self.vdpa.clone(), vdpa: self.vdpa.clone(),
vsock: self.vsock.clone(), vsock: self.vsock.clone(),
#[cfg(target_arch = "x86_64")]
sgx_epc: self.sgx_epc.clone(),
numa: self.numa.clone(), numa: self.numa.clone(),
pci_segments: self.pci_segments.clone(), pci_segments: self.pci_segments.clone(),
platform: self.platform.clone(), platform: self.platform.clone(),
@ -3976,8 +3899,6 @@ mod tests {
pvmemcontrol: None, pvmemcontrol: None,
pvpanic: false, pvpanic: false,
iommu: false, iommu: false,
#[cfg(target_arch = "x86_64")]
sgx_epc: None,
numa: None, numa: None,
watchdog: false, watchdog: false,
#[cfg(feature = "guest_debug")] #[cfg(feature = "guest_debug")]
@ -4119,8 +4040,6 @@ mod tests {
cpus: None, cpus: None,
distances: None, distances: None,
memory_zones: None, memory_zones: None,
#[cfg(target_arch = "x86_64")]
sgx_epc_sections: None,
pci_segments: None, pci_segments: None,
} }
} }
@ -4192,8 +4111,6 @@ mod tests {
pvmemcontrol: None, pvmemcontrol: None,
pvpanic: false, pvpanic: false,
iommu: false, iommu: false,
#[cfg(target_arch = "x86_64")]
sgx_epc: None,
numa: None, numa: None,
watchdog: false, watchdog: false,
#[cfg(feature = "guest_debug")] #[cfg(feature = "guest_debug")]

View file

@ -82,8 +82,6 @@ use crate::coredump::{
}; };
#[cfg(feature = "guest_debug")] #[cfg(feature = "guest_debug")]
use crate::gdb::{get_raw_tid, Debuggable, DebuggableError}; use crate::gdb::{get_raw_tid, Debuggable, DebuggableError};
#[cfg(target_arch = "x86_64")]
use crate::memory_manager::MemoryManager;
use crate::seccomp_filters::{get_seccomp_filter, Thread}; use crate::seccomp_filters::{get_seccomp_filter, Thread};
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
use crate::vm::physical_bits; use crate::vm::physical_bits;
@ -799,23 +797,14 @@ impl CpuManager {
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
pub fn populate_cpuid( pub fn populate_cpuid(
&mut self, &mut self,
memory_manager: &Arc<Mutex<MemoryManager>>,
hypervisor: &Arc<dyn hypervisor::Hypervisor>, hypervisor: &Arc<dyn hypervisor::Hypervisor>,
#[cfg(feature = "tdx")] tdx: bool, #[cfg(feature = "tdx")] tdx: bool,
) -> Result<()> { ) -> Result<()> {
let sgx_epc_sections = memory_manager
.lock()
.unwrap()
.sgx_epc_region()
.as_ref()
.map(|sgx_epc_region| sgx_epc_region.epc_sections().values().cloned().collect());
self.cpuid = { self.cpuid = {
let phys_bits = physical_bits(hypervisor, self.config.max_phys_bits); let phys_bits = physical_bits(hypervisor, self.config.max_phys_bits);
arch::generate_common_cpuid( arch::generate_common_cpuid(
hypervisor, hypervisor,
&arch::CpuidConfig { &arch::CpuidConfig {
sgx_epc_sections,
phys_bits, phys_bits,
kvm_hyperv: self.config.kvm_hyperv, kvm_hyperv: self.config.kvm_hyperv,
#[cfg(feature = "tdx")] #[cfg(feature = "tdx")]

View file

@ -900,8 +900,6 @@ impl Vmm {
false, false,
Some(&vm_migration_config.memory_manager_data), Some(&vm_migration_config.memory_manager_data),
existing_memory_files, existing_memory_files,
#[cfg(target_arch = "x86_64")]
None,
) )
.map_err(|e| { .map_err(|e| {
MigratableError::MigrateReceive(anyhow!( MigratableError::MigrateReceive(anyhow!(
@ -1135,7 +1133,6 @@ impl Vmm {
arch::generate_common_cpuid( arch::generate_common_cpuid(
&hypervisor, &hypervisor,
&arch::CpuidConfig { &arch::CpuidConfig {
sgx_epc_sections: None,
phys_bits, phys_bits,
kvm_hyperv: vm_config.lock().unwrap().cpus.kvm_hyperv, kvm_hyperv: vm_config.lock().unwrap().cpus.kvm_hyperv,
#[cfg(feature = "tdx")] #[cfg(feature = "tdx")]
@ -1266,7 +1263,7 @@ impl Vmm {
}; };
// We check the `CPUID` compatibility of between the source vm and destination, which is // We check the `CPUID` compatibility of between the source vm and destination, which is
// mostly about feature compatibility and "topology/sgx" leaves are not relevant. // mostly about feature compatibility.
let dest_cpuid = &{ let dest_cpuid = &{
let vm_config = &src_vm_config.lock().unwrap(); let vm_config = &src_vm_config.lock().unwrap();
@ -1274,7 +1271,6 @@ impl Vmm {
arch::generate_common_cpuid( arch::generate_common_cpuid(
&self.hypervisor.clone(), &self.hypervisor.clone(),
&arch::CpuidConfig { &arch::CpuidConfig {
sgx_epc_sections: None,
phys_bits, phys_bits,
kvm_hyperv: vm_config.cpus.kvm_hyperv, kvm_hyperv: vm_config.cpus.kvm_hyperv,
#[cfg(feature = "tdx")] #[cfg(feature = "tdx")]
@ -2428,8 +2424,6 @@ mod unit_tests {
pvmemcontrol: None, pvmemcontrol: None,
pvpanic: false, pvpanic: false,
iommu: false, iommu: false,
#[cfg(target_arch = "x86_64")]
sgx_epc: None,
numa: None, numa: None,
watchdog: false, watchdog: false,
#[cfg(feature = "guest_debug")] #[cfg(feature = "guest_debug")]

View file

@ -19,16 +19,12 @@ use std::{ffi, result, thread};
use acpi_tables::{aml, Aml}; use acpi_tables::{aml, Aml};
use anyhow::anyhow; use anyhow::anyhow;
#[cfg(target_arch = "x86_64")]
use arch::x86_64::{SgxEpcRegion, SgxEpcSection};
use arch::RegionType; use arch::RegionType;
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
use devices::ioapic; use devices::ioapic;
#[cfg(target_arch = "aarch64")] #[cfg(target_arch = "aarch64")]
use hypervisor::HypervisorVmError; use hypervisor::HypervisorVmError;
use libc::_SC_NPROCESSORS_ONLN; use libc::_SC_NPROCESSORS_ONLN;
#[cfg(target_arch = "x86_64")]
use libc::{MAP_NORESERVE, MAP_POPULATE, MAP_SHARED, PROT_READ, PROT_WRITE};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use thiserror::Error; use thiserror::Error;
use tracer::trace_scoped; use tracer::trace_scoped;
@ -54,8 +50,6 @@ use crate::coredump::{
CoredumpMemoryRegion, CoredumpMemoryRegions, DumpState, GuestDebuggableError, CoredumpMemoryRegion, CoredumpMemoryRegions, DumpState, GuestDebuggableError,
}; };
use crate::migration::url_to_path; use crate::migration::url_to_path;
#[cfg(target_arch = "x86_64")]
use crate::vm_config::SgxEpcConfig;
use crate::vm_config::{HotplugMethod, MemoryConfig, MemoryZoneConfig}; use crate::vm_config::{HotplugMethod, MemoryConfig, MemoryZoneConfig};
use crate::{GuestMemoryMmap, GuestRegionMmap, MEMORY_MANAGER_SNAPSHOT_ID}; use crate::{GuestMemoryMmap, GuestRegionMmap, MEMORY_MANAGER_SNAPSHOT_ID};
@ -68,9 +62,6 @@ const SNAPSHOT_FILENAME: &str = "memory-ranges";
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
const X86_64_IRQ_BASE: u32 = 5; const X86_64_IRQ_BASE: u32 = 5;
#[cfg(target_arch = "x86_64")]
const SGX_PAGE_SIZE: u64 = 1 << 12;
const HOTPLUG_COUNT: usize = 8; const HOTPLUG_COUNT: usize = 8;
// Memory policy constants // Memory policy constants
@ -183,8 +174,6 @@ pub struct MemoryManager {
hugepage_size: Option<u64>, hugepage_size: Option<u64>,
prefault: bool, prefault: bool,
thp: bool, thp: bool,
#[cfg(target_arch = "x86_64")]
sgx_epc_region: Option<SgxEpcRegion>,
user_provided_zones: bool, user_provided_zones: bool,
snapshot_memory_ranges: MemoryRangeTable, snapshot_memory_ranges: MemoryRangeTable,
memory_zones: MemoryZones, memory_zones: MemoryZones,
@ -269,36 +258,6 @@ pub enum Error {
#[error("Cannot create the system allocator")] #[error("Cannot create the system allocator")]
CreateSystemAllocator, CreateSystemAllocator,
/// Invalid SGX EPC section size
#[cfg(target_arch = "x86_64")]
#[error("Invalid SGX EPC section size")]
EpcSectionSizeInvalid,
/// Failed allocating SGX EPC region
#[cfg(target_arch = "x86_64")]
#[error("Failed allocating SGX EPC region")]
SgxEpcRangeAllocation,
/// Failed opening SGX virtual EPC device
#[cfg(target_arch = "x86_64")]
#[error("Failed opening SGX virtual EPC device")]
SgxVirtEpcOpen(#[source] io::Error),
/// Failed setting the SGX virtual EPC section size
#[cfg(target_arch = "x86_64")]
#[error("Failed setting the SGX virtual EPC section size")]
SgxVirtEpcFileSetLen(#[source] io::Error),
/// Failed opening SGX provisioning device
#[cfg(target_arch = "x86_64")]
#[error("Failed opening SGX provisioning device")]
SgxProvisionOpen(#[source] io::Error),
/// Failed enabling SGX provisioning
#[cfg(target_arch = "x86_64")]
#[error("Failed enabling SGX provisioning")]
SgxEnableProvisioning(#[source] hypervisor::HypervisorVmError),
/// Failed creating a new MmapRegion instance. /// Failed creating a new MmapRegion instance.
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
#[error("Failed creating a new MmapRegion instance")] #[error("Failed creating a new MmapRegion instance")]
@ -1034,7 +993,6 @@ impl MemoryManager {
#[cfg(feature = "tdx")] tdx_enabled: bool, #[cfg(feature = "tdx")] tdx_enabled: bool,
restore_data: Option<&MemoryManagerSnapshotData>, restore_data: Option<&MemoryManagerSnapshotData>,
existing_memory_files: Option<HashMap<u32, File>>, existing_memory_files: Option<HashMap<u32, File>>,
#[cfg(target_arch = "x86_64")] sgx_epc_config: Option<Vec<SgxEpcConfig>>,
) -> Result<Arc<Mutex<MemoryManager>>, Error> { ) -> Result<Arc<Mutex<MemoryManager>>, Error> {
trace_scoped!("MemoryManager::new"); trace_scoped!("MemoryManager::new");
@ -1236,8 +1194,7 @@ impl MemoryManager {
None None
}; };
// If running on SGX the start of device area and RAM area may diverge but // The start of device area and RAM area are placed next to each other.
// at this point they are next to each other.
let end_of_ram_area = start_of_device_area.unchecked_sub(1); let end_of_ram_area = start_of_device_area.unchecked_sub(1);
let ram_allocator = AddressAllocator::new(GuestAddress(0), start_of_device_area.0).unwrap(); let ram_allocator = AddressAllocator::new(GuestAddress(0), start_of_device_area.0).unwrap();
@ -1263,8 +1220,6 @@ impl MemoryManager {
hugepages: config.hugepages, hugepages: config.hugepages,
hugepage_size: config.hugepage_size, hugepage_size: config.hugepage_size,
prefault: config.prefault, prefault: config.prefault,
#[cfg(target_arch = "x86_64")]
sgx_epc_region: None,
user_provided_zones, user_provided_zones,
snapshot_memory_ranges: MemoryRangeTable::default(), snapshot_memory_ranges: MemoryRangeTable::default(),
memory_zones, memory_zones,
@ -1279,11 +1234,6 @@ impl MemoryManager {
thp: config.thp, thp: config.thp,
}; };
#[cfg(target_arch = "x86_64")]
if let Some(sgx_epc_config) = sgx_epc_config {
memory_manager.setup_sgx(sgx_epc_config)?;
}
Ok(Arc::new(Mutex::new(memory_manager))) Ok(Arc::new(Mutex::new(memory_manager)))
} }
@ -1311,8 +1261,6 @@ impl MemoryManager {
false, false,
Some(&mem_snapshot), Some(&mem_snapshot),
None, None,
#[cfg(target_arch = "x86_64")]
None,
)?; )?;
mm.lock() mm.lock()
@ -1976,121 +1924,6 @@ impl MemoryManager {
self.virtio_mem_resize(id, virtio_mem_size) self.virtio_mem_resize(id, virtio_mem_size)
} }
#[cfg(target_arch = "x86_64")]
pub fn setup_sgx(&mut self, sgx_epc_config: Vec<SgxEpcConfig>) -> Result<(), Error> {
let file = OpenOptions::new()
.read(true)
.open("/dev/sgx_provision")
.map_err(Error::SgxProvisionOpen)?;
self.vm
.enable_sgx_attribute(file)
.map_err(Error::SgxEnableProvisioning)?;
// Go over each EPC section and verify its size is a 4k multiple. At
// the same time, calculate the total size needed for the contiguous
// EPC region.
let mut epc_region_size = 0;
for epc_section in sgx_epc_config.iter() {
if epc_section.size == 0 {
return Err(Error::EpcSectionSizeInvalid);
}
if epc_section.size & (SGX_PAGE_SIZE - 1) != 0 {
return Err(Error::EpcSectionSizeInvalid);
}
epc_region_size += epc_section.size;
}
// Place the SGX EPC region on a 4k boundary between the RAM and the device area
let epc_region_start =
GuestAddress(self.start_of_device_area.0.div_ceil(SGX_PAGE_SIZE) * SGX_PAGE_SIZE);
self.start_of_device_area = epc_region_start
.checked_add(epc_region_size)
.ok_or(Error::GuestAddressOverFlow)?;
let mut sgx_epc_region = SgxEpcRegion::new(epc_region_start, epc_region_size as GuestUsize);
info!(
"SGX EPC region: 0x{:x} (0x{:x})",
epc_region_start.0, epc_region_size
);
// Each section can be memory mapped into the allocated region.
let mut epc_section_start = epc_region_start.raw_value();
for epc_section in sgx_epc_config.iter() {
let file = OpenOptions::new()
.read(true)
.write(true)
.open("/dev/sgx_vepc")
.map_err(Error::SgxVirtEpcOpen)?;
let prot = PROT_READ | PROT_WRITE;
let mut flags = MAP_NORESERVE | MAP_SHARED;
if epc_section.prefault {
flags |= MAP_POPULATE;
}
// We can't use the vm-memory crate to perform the memory mapping
// here as it would try to ensure the size of the backing file is
// matching the size of the expected mapping. The /dev/sgx_vepc
// device does not work that way, it provides a file descriptor
// which is not matching the mapping size, as it's a just a way to
// let KVM know that an EPC section is being created for the guest.
// SAFETY: FFI call with correct arguments
let host_addr = unsafe {
libc::mmap(
std::ptr::null_mut(),
epc_section.size as usize,
prot,
flags,
file.as_raw_fd(),
0,
)
};
if host_addr == libc::MAP_FAILED {
error!(
"Could not add SGX EPC section (size 0x{:x})",
epc_section.size
);
return Err(Error::SgxEpcRangeAllocation);
}
info!(
"Adding SGX EPC section: 0x{:x} (0x{:x})",
epc_section_start, epc_section.size
);
let _mem_slot = self.create_userspace_mapping(
epc_section_start,
epc_section.size,
host_addr as u64,
false,
false,
false,
)?;
sgx_epc_region.insert(
epc_section.id.clone(),
SgxEpcSection::new(
GuestAddress(epc_section_start),
epc_section.size as GuestUsize,
),
);
epc_section_start += epc_section.size;
}
self.sgx_epc_region = Some(sgx_epc_region);
Ok(())
}
#[cfg(target_arch = "x86_64")]
pub fn sgx_epc_region(&self) -> &Option<SgxEpcRegion> {
&self.sgx_epc_region
}
pub fn is_hardlink(f: &File) -> bool { pub fn is_hardlink(f: &File) -> bool {
let mut stat = std::mem::MaybeUninit::<libc::stat>::uninit(); let mut stat = std::mem::MaybeUninit::<libc::stat>::uninit();
// SAFETY: FFI call with correct arguments // SAFETY: FFI call with correct arguments
@ -2642,34 +2475,6 @@ impl Aml for MemoryManager {
) )
.to_aml_bytes(sink); .to_aml_bytes(sink);
} }
#[cfg(target_arch = "x86_64")]
{
if let Some(sgx_epc_region) = &self.sgx_epc_region {
let min = sgx_epc_region.start().raw_value();
let max = min + sgx_epc_region.size() - 1;
// SGX EPC region
aml::Device::new(
"_SB_.EPC_".into(),
vec![
&aml::Name::new("_HID".into(), &aml::EISAName::new("INT0E0C")),
// QWORD describing the EPC region start and size
&aml::Name::new(
"_CRS".into(),
&aml::ResourceTemplate::new(vec![&aml::AddressSpace::new_memory(
aml::AddressSpaceCacheable::NotCacheable,
true,
min,
max,
None,
)]),
),
&aml::Method::new("_STA".into(), 0, false, vec![&aml::Return::new(&0xfu8)]),
],
)
.to_aml_bytes(sink);
}
}
} }
} }

View file

@ -606,7 +606,6 @@ impl Vm {
.lock() .lock()
.unwrap() .unwrap()
.populate_cpuid( .populate_cpuid(
&memory_manager,
&hypervisor, &hypervisor,
#[cfg(feature = "tdx")] #[cfg(feature = "tdx")]
tdx_enabled, tdx_enabled,
@ -971,24 +970,6 @@ impl Vm {
} }
} }
#[cfg(target_arch = "x86_64")]
if let Some(sgx_epc_sections) = &config.sgx_epc_sections {
if let Some(sgx_epc_region) = mm.sgx_epc_region() {
let mm_sections = sgx_epc_region.epc_sections();
for sgx_epc_section in sgx_epc_sections.iter() {
if let Some(mm_section) = mm_sections.get(sgx_epc_section) {
node.sgx_epc_sections.push(mm_section.clone());
} else {
error!("Unknown SGX EPC section '{}'", sgx_epc_section);
return Err(Error::InvalidNumaConfig);
}
}
} else {
error!("Missing SGX EPC region");
return Err(Error::InvalidNumaConfig);
}
}
numa_nodes.insert(config.guest_numa_id, node); numa_nodes.insert(config.guest_numa_id, node);
} }
} }
@ -1056,9 +1037,6 @@ impl Vm {
) )
.map_err(Error::MemoryManager)? .map_err(Error::MemoryManager)?
} else { } else {
#[cfg(target_arch = "x86_64")]
let sgx_epc_config = vm_config.lock().unwrap().sgx_epc.clone();
MemoryManager::new( MemoryManager::new(
vm.clone(), vm.clone(),
&vm_config.lock().unwrap().memory.clone(), &vm_config.lock().unwrap().memory.clone(),
@ -1068,8 +1046,6 @@ impl Vm {
tdx_enabled, tdx_enabled,
None, None,
None, None,
#[cfg(target_arch = "x86_64")]
sgx_epc_config,
) )
.map_err(Error::MemoryManager)? .map_err(Error::MemoryManager)?
}; };
@ -1420,13 +1396,6 @@ impl Vm {
let boot_vcpus = self.cpu_manager.lock().unwrap().boot_vcpus(); let boot_vcpus = self.cpu_manager.lock().unwrap().boot_vcpus();
let rsdp_addr = Some(rsdp_addr); let rsdp_addr = Some(rsdp_addr);
let sgx_epc_region = self
.memory_manager
.lock()
.unwrap()
.sgx_epc_region()
.as_ref()
.cloned();
let serial_number = self let serial_number = self
.config .config
@ -1466,7 +1435,6 @@ impl Vm {
boot_vcpus, boot_vcpus,
entry_addr.setup_header, entry_addr.setup_header,
rsdp_addr, rsdp_addr,
sgx_epc_region,
serial_number.as_deref(), serial_number.as_deref(),
uuid.as_deref(), uuid.as_deref(),
oem_strings.as_deref(), oem_strings.as_deref(),
@ -2917,7 +2885,6 @@ impl Snapshottable for Vm {
arch::generate_common_cpuid( arch::generate_common_cpuid(
&self.hypervisor, &self.hypervisor,
&arch::CpuidConfig { &arch::CpuidConfig {
sgx_epc_sections: None,
phys_bits, phys_bits,
kvm_hyperv: self.config.lock().unwrap().cpus.kvm_hyperv, kvm_hyperv: self.config.lock().unwrap().cpus.kvm_hyperv,
#[cfg(feature = "tdx")] #[cfg(feature = "tdx")]

View file

@ -671,16 +671,6 @@ impl Default for IvshmemConfig {
} }
} }
#[cfg(target_arch = "x86_64")]
#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)]
pub struct SgxEpcConfig {
pub id: String,
#[serde(default)]
pub size: u64,
#[serde(default)]
pub prefault: bool,
}
#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)]
pub struct NumaDistance { pub struct NumaDistance {
#[serde(default)] #[serde(default)]
@ -699,9 +689,6 @@ pub struct NumaConfig {
pub distances: Option<Vec<NumaDistance>>, pub distances: Option<Vec<NumaDistance>>,
#[serde(default)] #[serde(default)]
pub memory_zones: Option<Vec<String>>, pub memory_zones: Option<Vec<String>>,
#[cfg(target_arch = "x86_64")]
#[serde(default)]
pub sgx_epc_sections: Option<Vec<String>>,
#[serde(default)] #[serde(default)]
pub pci_segments: Option<Vec<u16>>, pub pci_segments: Option<Vec<u16>>,
} }
@ -941,8 +928,6 @@ pub struct VmConfig {
pub pvpanic: bool, pub pvpanic: bool,
#[serde(default)] #[serde(default)]
pub iommu: bool, pub iommu: bool,
#[cfg(target_arch = "x86_64")]
pub sgx_epc: Option<Vec<SgxEpcConfig>>,
pub numa: Option<Vec<NumaConfig>>, pub numa: Option<Vec<NumaConfig>>,
#[serde(default)] #[serde(default)]
pub watchdog: bool, pub watchdog: bool,