From 2d9e2431638dfdbc341625a1ab67f3814dbd77cf Mon Sep 17 00:00:00 2001 From: Shubham Chakrawar Date: Fri, 15 Aug 2025 13:18:14 -0700 Subject: [PATCH] misc: Remove SGX support from Cloud Hypervisor This commit removes the SGX support from cloud hypervisor. SGX support was deprecated in May as part of #7090. Signed-off-by: Shubham Chakrawar --- .lychee.toml | 2 - arch/src/lib.rs | 5 - arch/src/x86_64/mod.rs | 154 +---------------- docs/intel_sgx.md | 54 ------ docs/memory.md | 23 +-- docs/snapshot_restore.md | 2 +- fuzz/fuzz_targets/http_api.rs | 2 - hypervisor/src/kvm/mod.rs | 26 +-- hypervisor/src/mshv/mod.rs | 7 - hypervisor/src/vm.rs | 10 -- scripts/dev_cli.sh | 26 --- scripts/run_integration_tests_sgx.sh | 51 ------ src/main.rs | 10 -- test_infra/src/lib.rs | 18 -- tests/integration.rs | 44 ----- vmm/src/acpi.rs | 10 -- vmm/src/api/openapi/cloud-hypervisor.yaml | 23 --- vmm/src/config.rs | 85 +--------- vmm/src/cpu.rs | 11 -- vmm/src/lib.rs | 8 +- vmm/src/memory_manager.rs | 197 +--------------------- vmm/src/vm.rs | 33 ---- vmm/src/vm_config.rs | 15 -- 23 files changed, 11 insertions(+), 805 deletions(-) delete mode 100644 docs/intel_sgx.md delete mode 100755 scripts/run_integration_tests_sgx.sh diff --git a/.lychee.toml b/.lychee.toml index 875a86182..44517a781 100644 --- a/.lychee.toml +++ b/.lychee.toml @@ -2,8 +2,6 @@ verbose = "info" exclude = [ # Availability of links below should be manually verified. - # Page for intel SGX support, returns 403 while querying. - '^https://www.intel.com/content/www/us/en/developer/tools/software-guard-extensions/linux-overview.html', # Page for intel TDX support, returns 403 while querying. '^https://www.intel.com/content/www/us/en/developer/tools/trust-domain-extensions/overview.html', # Page for TPM, returns 403 while querying. diff --git a/arch/src/lib.rs b/arch/src/lib.rs index aff58ffe3..2413fe223 100644 --- a/arch/src/lib.rs +++ b/arch/src/lib.rs @@ -18,9 +18,6 @@ use std::{fmt, result}; use serde::{Deserialize, Serialize}; use thiserror::Error; -#[cfg(target_arch = "x86_64")] -use crate::x86_64::SgxEpcSection; - type GuestMemoryMmap = vm_memory::GuestMemoryMmap; type GuestRegionMmap = vm_memory::GuestRegionMmap; @@ -127,8 +124,6 @@ pub struct NumaNode { pub pci_segments: Vec, pub distances: BTreeMap, pub memory_zones: Vec, - #[cfg(target_arch = "x86_64")] - pub sgx_epc_sections: Vec, } pub type NumaNodes = BTreeMap; diff --git a/arch/src/x86_64/mod.rs b/arch/src/x86_64/mod.rs index 27e1375d8..22d1a1cfe 100644 --- a/arch/src/x86_64/mod.rs +++ b/arch/src/x86_64/mod.rs @@ -12,7 +12,6 @@ pub mod layout; mod mpspec; mod mptable; pub mod regs; -use std::collections::BTreeMap; use std::mem; use hypervisor::arch::x86::{CpuIdEntry, CPUID_FLAG_VALID_INDEX}; @@ -24,7 +23,7 @@ use linux_loader::loader::elf::start_info::{ use thiserror::Error; use vm_memory::{ Address, Bytes, GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryAtomic, - GuestMemoryRegion, GuestUsize, + GuestMemoryRegion, }; use crate::{GuestMemoryMmap, InitramfsConfig, RegionType}; @@ -79,55 +78,7 @@ pub struct EntryPoint { const E820_RAM: u32 = 1; const E820_RESERVED: u32 = 2; -#[derive(Clone)] -pub struct SgxEpcSection { - start: GuestAddress, - size: GuestUsize, -} - -impl SgxEpcSection { - pub fn new(start: GuestAddress, size: GuestUsize) -> Self { - SgxEpcSection { start, size } - } - pub fn start(&self) -> GuestAddress { - self.start - } - pub fn size(&self) -> GuestUsize { - self.size - } -} - -#[derive(Clone)] -pub struct SgxEpcRegion { - start: GuestAddress, - size: GuestUsize, - epc_sections: BTreeMap, -} - -impl SgxEpcRegion { - pub fn new(start: GuestAddress, size: GuestUsize) -> Self { - SgxEpcRegion { - start, - size, - epc_sections: BTreeMap::new(), - } - } - pub fn start(&self) -> GuestAddress { - self.start - } - pub fn size(&self) -> GuestUsize { - self.size - } - pub fn epc_sections(&self) -> &BTreeMap { - &self.epc_sections - } - pub fn insert(&mut self, id: String, epc_section: SgxEpcSection) { - self.epc_sections.insert(id, epc_section); - } -} - pub struct CpuidConfig { - pub sgx_epc_sections: Option>, pub phys_bits: u8, pub kvm_hyperv: bool, #[cfg(feature = "tdx")] @@ -169,18 +120,6 @@ pub enum Error { #[error("Error setting up SMBIOS table")] SmbiosSetup(#[source] smbios::Error), - /// Could not find any SGX EPC section - #[error("Could not find any SGX EPC section")] - NoSgxEpcSection, - - /// Missing SGX CPU feature - #[error("Missing SGX CPU feature")] - MissingSgxFeature, - - /// Missing SGX_LC CPU feature - #[error("Missing SGX_LC CPU feature")] - MissingSgxLaunchControlFeature, - /// Error getting supported CPUID through the hypervisor (kvm/mshv) API #[error("Error getting supported CPUID through the hypervisor API")] CpuidGetSupported(#[source] HypervisorError), @@ -467,7 +406,7 @@ impl CpuidFeatureEntry { feature_reg: CpuidReg::EDX, compatible_check: CpuidCompatibleCheck::BitwiseSubset, }, - // KVM CPUID bits: https://www.kernel.org/doc/html/latest/virt/kvm/cpuid.html + // KVM CPUID bits: https://www.kernel.org/doc/html/latest/virt/kvm/x86/cpuid.html // Leaf 0x4000_0000, EAX/EBX/ECX/EDX, KVM CPUID SIGNATURE CpuidFeatureEntry { function: 0x4000_0000, @@ -675,10 +614,6 @@ pub fn generate_common_cpuid( CpuidPatch::patch_cpuid(&mut cpuid, cpuid_patches); - if let Some(sgx_epc_sections) = &config.sgx_epc_sections { - update_cpuid_sgx(&mut cpuid, sgx_epc_sections)?; - } - #[cfg(feature = "tdx")] let tdx_capabilities = if config.tdx { let caps = hypervisor @@ -974,7 +909,6 @@ pub fn configure_system( _num_cpus: u32, setup_header: Option, rsdp_addr: Option, - sgx_epc_region: Option, serial_number: Option<&str>, uuid: Option<&str>, oem_strings: Option<&[&str]>, @@ -1008,15 +942,8 @@ pub fn configure_system( initramfs, hdr, rsdp_addr, - sgx_epc_region, - ), - None => configure_pvh( - guest_mem, - cmdline_addr, - initramfs, - rsdp_addr, - sgx_epc_region, ), + None => configure_pvh(guest_mem, cmdline_addr, initramfs, rsdp_addr), } } @@ -1108,7 +1035,6 @@ fn configure_pvh( cmdline_addr: GuestAddress, initramfs: &Option, rsdp_addr: Option, - sgx_epc_region: Option, ) -> super::Result<()> { const XEN_HVM_START_MAGIC_VALUE: u32 = 0x336ec578; @@ -1174,15 +1100,6 @@ fn configure_pvh( E820_RESERVED, ); - if let Some(sgx_epc_region) = sgx_epc_region { - add_memmap_entry( - &mut memmap, - sgx_epc_region.start().raw_value(), - sgx_epc_region.size(), - E820_RESERVED, - ); - } - start_info.memmap_entries = memmap.len() as u32; // Copy the vector with the memmap table to the MEMMAP_START address @@ -1229,7 +1146,6 @@ fn configure_32bit_entry( initramfs: &Option, setup_hdr: setup_header, rsdp_addr: Option, - sgx_epc_region: Option, ) -> super::Result<()> { const KERNEL_LOADER_OTHER: u8 = 0xff; @@ -1285,15 +1201,6 @@ fn configure_32bit_entry( E820_RESERVED, )?; - if let Some(sgx_epc_region) = sgx_epc_region { - add_e820_entry( - &mut params, - sgx_epc_region.start().raw_value(), - sgx_epc_region.size(), - E820_RESERVED, - )?; - } - if let Some(rsdp_addr) = rsdp_addr { params.acpi_rsdp_addr = rsdp_addr.0; } @@ -1527,57 +1434,6 @@ fn update_cpuid_topology( } } } - -// The goal is to update the CPUID sub-leaves to reflect the number of EPC -// sections exposed to the guest. -fn update_cpuid_sgx( - cpuid: &mut Vec, - epc_sections: &[SgxEpcSection], -) -> Result<(), Error> { - // Something's wrong if there's no EPC section. - if epc_sections.is_empty() { - return Err(Error::NoSgxEpcSection); - } - // We can't go further if the hypervisor does not support SGX feature. - if !CpuidPatch::is_feature_enabled(cpuid, 0x7, 0, CpuidReg::EBX, 2) { - return Err(Error::MissingSgxFeature); - } - // We can't go further if the hypervisor does not support SGX_LC feature. - if !CpuidPatch::is_feature_enabled(cpuid, 0x7, 0, CpuidReg::ECX, 30) { - return Err(Error::MissingSgxLaunchControlFeature); - } - - // Get host CPUID for leaf 0x12, subleaf 0x2. This is to retrieve EPC - // properties such as confidentiality and integrity. - // SAFETY: call cpuid with valid leaves - let leaf = unsafe { std::arch::x86_64::__cpuid_count(0x12, 0x2) }; - - for (i, epc_section) in epc_sections.iter().enumerate() { - let subleaf_idx = i + 2; - let start = epc_section.start().raw_value(); - let size = epc_section.size(); - let eax = (start & 0xffff_f000) as u32 | 0x1; - let ebx = (start >> 32) as u32; - let ecx = (size & 0xffff_f000) as u32 | (leaf.ecx & 0xf); - let edx = (size >> 32) as u32; - // CPU Topology leaf 0x12 - CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::EAX, eax); - CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::EBX, ebx); - CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::ECX, ecx); - CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::EDX, edx); - } - - // Add one NULL entry to terminate the dynamic list - let subleaf_idx = epc_sections.len() + 2; - // CPU Topology leaf 0x12 - CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::EAX, 0); - CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::EBX, 0); - CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::ECX, 0); - CpuidPatch::set_cpuid_reg(cpuid, 0x12, Some(subleaf_idx as u32), CpuidReg::EDX, 0); - - Ok(()) -} - #[cfg(test)] mod tests { use linux_loader::loader::bootparam::boot_e820_entry; @@ -1608,7 +1464,6 @@ mod tests { None, None, None, - None, ); config_err.unwrap_err(); @@ -1633,7 +1488,6 @@ mod tests { None, None, None, - None, ) .unwrap(); @@ -1663,7 +1517,6 @@ mod tests { None, None, None, - None, ) .unwrap(); @@ -1679,7 +1532,6 @@ mod tests { None, None, None, - None, ) .unwrap(); } diff --git a/docs/intel_sgx.md b/docs/intel_sgx.md deleted file mode 100644 index 9f2ca76bd..000000000 --- a/docs/intel_sgx.md +++ /dev/null @@ -1,54 +0,0 @@ -# Intel SGX - -IntelĀ® Software Guard Extensions (IntelĀ® SGX) is an Intel technology designed -to increase the security of application code and data. Cloud Hypervisor supports -SGX virtualization through KVM. Because SGX is built on hardware features that -cannot be emulated in software, virtualizing SGX requires support in KVM and in -the host kernel. The required Linux and KVM changes can be found in Linux 5.13+. - -Utilizing SGX in the guest requires a kernel/OS with SGX support, e.g. a kernel -since release 5.11, see -[here](https://www.intel.com/content/www/us/en/developer/tools/software-guard-extensions/linux-overview.html). -Running Linux 5.13+ as the guest kernel allows nested virtualization of SGX. - -For more information about SGX, please refer to the [SGX Homepage](https://www.intel.com/content/www/us/en/developer/tools/software-guard-extensions/linux-overview.html). - -For more information about SGX SDK and how to test SGX, please refer to the -following [instructions](https://github.com/intel/linux-sgx). - -## Cloud Hypervisor support - -Assuming the host exposes `/dev/sgx_vepc`, we can pass SGX enclaves through -the guest. - -In order to use SGX enclaves within a Cloud Hypervisor VM, we must define one -or several Enclave Page Cache (EPC) sections. Here is an example of a VM being -created with 2 EPC sections, the first one being 64MiB with pre-allocated -memory, the second one being 32MiB with no pre-allocated memory. - -```bash -./cloud-hypervisor \ - --cpus boot=1 \ - --memory size=1G \ - --disk path=focal-server-cloudimg-amd64.raw \ - --kernel vmlinux \ - --cmdline "console=ttyS0 console=hvc0 root=/dev/vda1 rw" \ - --sgx-epc id=epc0,size=64M,prefault=on id=epc1,size=32M,prefault=off -``` - -Once booted, and assuming your guest kernel contains the patches from the -[KVM SGX Tree](https://github.com/intel/kvm-sgx), you can validate SGX devices -have been correctly created under `/dev/sgx`: - -```bash -ls /dev/sgx* -/dev/sgx_enclave /dev/sgx_provision /dev/sgx_vepc -``` - -From this point, it is possible to run any SGX application from the guest, as -it will access `/dev/sgx_enclave` device to create dedicated SGX enclaves. - -Note: There is only one contiguous SGX EPC region, which contains all SGX EPC -sections. This region is exposed through ACPI and marked as reserved through -the e820 table. It is treated as yet another device, which means it should -appear at the end of the guest address space. diff --git a/docs/memory.md b/docs/memory.md index 46569449c..a429ff1b7 100644 --- a/docs/memory.md +++ b/docs/memory.md @@ -437,12 +437,11 @@ struct NumaConfig { cpus: Option>, distances: Option>, memory_zones: Option>, - sgx_epc_sections: Option>, } ``` ``` ---numa Settings related to a given NUMA node "guest_numa_id=,cpus=,distances=,memory_zones=,sgx_epc_sections=" +--numa Settings related to a given NUMA node "guest_numa_id=,cpus=,distances=,memory_zones= ``` ### `guest_numa_id` @@ -550,26 +549,6 @@ _Example_ --numa guest_numa_id=0,memory_zones=[mem0,mem2] guest_numa_id=1,memory_zones=mem1 ``` -### `sgx_epc_sections` - -List of SGX EPC sections attached to the guest NUMA node identified by the -`guest_numa_id` option. This allows for describing a list of SGX EPC sections -which must be seen by the guest as belonging to the NUMA node `guest_numa_id`. - -Multiple values can be provided to define the list. Each value is a string -referring to an existing SGX EPC section identifier. Values are separated from -each other with the `,` separator. - -As soon as one tries to describe a list of values, `[` and `]` must be used to -demarcate the list. - -_Example_ - -``` ---sgx-epc id=epc0,size=32M id=epc1,size=64M id=epc2,size=32M ---numa guest_numa_id=0,sgx_epc_sections=epc1 guest_numa_id=1,sgx_epc_sections=[epc0,epc2] -``` - ### PCI bus Cloud Hypervisor supports guests with one or more PCI segments. The default PCI segment always diff --git a/docs/snapshot_restore.md b/docs/snapshot_restore.md index 67f29ce6d..df7248805 100644 --- a/docs/snapshot_restore.md +++ b/docs/snapshot_restore.md @@ -110,4 +110,4 @@ from the restored VM. ## Limitations -VFIO devices and Intel SGX are out of scope. +VFIO devices is out of scope. diff --git a/fuzz/fuzz_targets/http_api.rs b/fuzz/fuzz_targets/http_api.rs index ee8fa5237..e9965cedd 100644 --- a/fuzz/fuzz_targets/http_api.rs +++ b/fuzz/fuzz_targets/http_api.rs @@ -186,8 +186,6 @@ impl RequestHandler for StubApiRequestHandler { #[cfg(feature = "pvmemcontrol")] pvmemcontrol: None, iommu: false, - #[cfg(target_arch = "x86_64")] - sgx_epc: None, numa: None, watchdog: false, gdb: false, diff --git a/hypervisor/src/kvm/mod.rs b/hypervisor/src/kvm/mod.rs index 4204d4f83..9aaafd5b0 100644 --- a/hypervisor/src/kvm/mod.rs +++ b/hypervisor/src/kvm/mod.rs @@ -12,11 +12,9 @@ use std::any::Any; use std::collections::HashMap; -#[cfg(target_arch = "x86_64")] -use std::fs::File; #[cfg(any(target_arch = "aarch64", target_arch = "riscv64"))] use std::mem::offset_of; -#[cfg(target_arch = "x86_64")] +#[cfg(feature = "tdx")] use std::os::unix::io::AsRawFd; #[cfg(feature = "tdx")] use std::os::unix::io::RawFd; @@ -108,6 +106,8 @@ use kvm_bindings::{kvm_run__bindgen_ty_1, KVMIO}; pub use kvm_ioctls::{Cap, Kvm, VcpuExit}; use thiserror::Error; use vfio_ioctls::VfioDeviceFd; +#[cfg(target_arch = "x86_64")] +use vmm_sys_util::ioctl_io_nr; #[cfg(feature = "tdx")] use vmm_sys_util::{ioctl::ioctl_with_val, ioctl_iowr_nr}; pub use {kvm_bindings, kvm_ioctls}; @@ -116,13 +116,6 @@ pub use {kvm_bindings, kvm_ioctls}; use crate::arch::aarch64::regs; #[cfg(any(target_arch = "aarch64", target_arch = "riscv64"))] use crate::RegList; - -#[cfg(target_arch = "x86_64")] -const KVM_CAP_SGX_ATTRIBUTE: u32 = 196; - -#[cfg(target_arch = "x86_64")] -use vmm_sys_util::ioctl_io_nr; - #[cfg(target_arch = "x86_64")] ioctl_io_nr!(KVM_NMI, kvm_bindings::KVMIO, 0x9a); @@ -893,19 +886,6 @@ impl vm::Vm for KvmVm { Ok(()) } - #[cfg(target_arch = "x86_64")] - fn enable_sgx_attribute(&self, file: File) -> vm::Result<()> { - let mut cap = kvm_enable_cap { - cap: KVM_CAP_SGX_ATTRIBUTE, - ..Default::default() - }; - cap.args[0] = file.as_raw_fd() as u64; - self.fd - .enable_cap(&cap) - .map_err(|e| vm::HypervisorVmError::EnableSgxAttribute(e.into()))?; - Ok(()) - } - /// Retrieve guest clock. #[cfg(target_arch = "x86_64")] fn get_clock(&self) -> vm::Result { diff --git a/hypervisor/src/mshv/mod.rs b/hypervisor/src/mshv/mod.rs index 8b331cf0a..93634ada0 100644 --- a/hypervisor/src/mshv/mod.rs +++ b/hypervisor/src/mshv/mod.rs @@ -41,8 +41,6 @@ pub mod x86_64; // aarch64 dependencies #[cfg(target_arch = "aarch64")] pub mod aarch64; -#[cfg(target_arch = "x86_64")] -use std::fs::File; use std::os::unix::io::AsRawFd; #[cfg(target_arch = "aarch64")] use std::sync::Mutex; @@ -1892,11 +1890,6 @@ impl vm::Vm for MshvVm { Ok(()) } - #[cfg(target_arch = "x86_64")] - fn enable_sgx_attribute(&self, _file: File) -> vm::Result<()> { - Ok(()) - } - fn register_ioevent( &self, fd: &EventFd, diff --git a/hypervisor/src/vm.rs b/hypervisor/src/vm.rs index 113c2001f..bd9c0e674 100644 --- a/hypervisor/src/vm.rs +++ b/hypervisor/src/vm.rs @@ -11,8 +11,6 @@ // use std::any::Any; -#[cfg(target_arch = "x86_64")] -use std::fs::File; use std::sync::Arc; #[cfg(any(target_arch = "aarch64", target_arch = "riscv64"))] use std::sync::Mutex; @@ -130,12 +128,6 @@ pub enum HypervisorVmError { /// #[error("Failed to enable x2apic API")] EnableX2ApicApi(#[source] anyhow::Error), - /// - /// Enable SGX attribute error - /// - #[error("Failed to enable SGX attribute")] - EnableSgxAttribute(#[source] anyhow::Error), - /// /// Get clock error /// #[error("Failed to get clock")] @@ -363,8 +355,6 @@ pub trait Vm: Send + Sync + Any { /// Enable split Irq capability #[cfg(target_arch = "x86_64")] fn enable_split_irq(&self) -> Result<()>; - #[cfg(target_arch = "x86_64")] - fn enable_sgx_attribute(&self, file: File) -> Result<()>; /// Retrieve guest clock. #[cfg(target_arch = "x86_64")] fn get_clock(&self) -> Result; diff --git a/scripts/dev_cli.sh b/scripts/dev_cli.sh index 6dea0d7d2..519517dcd 100755 --- a/scripts/dev_cli.sh +++ b/scripts/dev_cli.sh @@ -193,7 +193,6 @@ cmd_help() { echo " Run the Cloud Hypervisor tests." echo " --unit Run the unit tests." echo " --integration Run the integration tests." - echo " --integration-sgx Run the SGX integration tests." echo " --integration-vfio Run the VFIO integration tests." echo " --integration-windows Run the Windows guest integration tests." echo " --integration-live-migration Run the live-migration integration tests." @@ -327,7 +326,6 @@ cmd_clean() { cmd_tests() { unit=false integration=false - integration_sgx=false integration_vfio=false integration_windows=false integration_live_migration=false @@ -346,7 +344,6 @@ cmd_tests() { } ;; "--unit") { unit=true; } ;; "--integration") { integration=true; } ;; - "--integration-sgx") { integration_sgx=true; } ;; "--integration-vfio") { integration_vfio=true; } ;; "--integration-windows") { integration_windows=true; } ;; "--integration-live-migration") { integration_live_migration=true; } ;; @@ -449,29 +446,6 @@ cmd_tests() { dbus-run-session ./scripts/run_integration_tests_"$(uname -m)".sh "$@" || fix_dir_perms $? || exit $? fi - if [ "$integration_sgx" = true ]; then - say "Running SGX integration tests for $target..." - $DOCKER_RUNTIME run \ - --workdir "$CTR_CLH_ROOT_DIR" \ - --rm \ - --privileged \ - --security-opt seccomp=unconfined \ - --ipc=host \ - --net="$CTR_CLH_NET" \ - --mount type=tmpfs,destination=/tmp \ - --volume /dev:/dev \ - --volume "$CLH_ROOT_DIR:$CTR_CLH_ROOT_DIR" \ - ${exported_volumes:+"$exported_volumes"} \ - --volume "$CLH_INTEGRATION_WORKLOADS:$CTR_CLH_INTEGRATION_WORKLOADS" \ - --env USER="root" \ - --env BUILD_TARGET="$target" \ - --env RUSTFLAGS="$rustflags" \ - --env TARGET_CC="$target_cc" \ - --env AUTH_DOWNLOAD_TOKEN="$AUTH_DOWNLOAD_TOKEN" \ - "$CTR_IMAGE" \ - ./scripts/run_integration_tests_sgx.sh "$@" || fix_dir_perms $? || exit $? - fi - if [ "$integration_vfio" = true ]; then say "Running VFIO integration tests for $target..." $DOCKER_RUNTIME run \ diff --git a/scripts/run_integration_tests_sgx.sh b/scripts/run_integration_tests_sgx.sh deleted file mode 100755 index b6549b628..000000000 --- a/scripts/run_integration_tests_sgx.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/env bash -# shellcheck disable=SC2048,SC2086 -set -x - -# shellcheck source=/dev/null -source "$HOME"/.cargo/env -source "$(dirname "$0")"/test-util.sh - -process_common_args "$@" - -if [[ "$hypervisor" = "mshv" ]]; then - echo "Unsupported SGX test for MSHV" - exit 1 -fi - -WORKLOADS_DIR="$HOME/workloads" -mkdir -p "$WORKLOADS_DIR" - -download_hypervisor_fw - -JAMMY_OS_IMAGE_NAME="jammy-server-cloudimg-amd64-custom-20241017-0.qcow2" -JAMMY_OS_IMAGE_URL="https://ch-images.azureedge.net/$JAMMY_OS_IMAGE_NAME" -JAMMY_OS_IMAGE="$WORKLOADS_DIR/$JAMMY_OS_IMAGE_NAME" -if [ ! -f "$JAMMY_OS_IMAGE" ]; then - pushd "$WORKLOADS_DIR" || exit - time wget --quiet $JAMMY_OS_IMAGE_URL || exit 1 - popd || exit -fi - -JAMMY_OS_RAW_IMAGE_NAME="jammy-server-cloudimg-amd64-custom-20241017-0.raw" -JAMMY_OS_RAW_IMAGE="$WORKLOADS_DIR/$JAMMY_OS_RAW_IMAGE_NAME" -if [ ! -f "$JAMMY_OS_RAW_IMAGE" ]; then - pushd "$WORKLOADS_DIR" || exit - time qemu-img convert -p -f qcow2 -O raw $JAMMY_OS_IMAGE_NAME $JAMMY_OS_RAW_IMAGE_NAME || exit 1 - popd || exit -fi - -CFLAGS="" -if [[ "${BUILD_TARGET}" == "x86_64-unknown-linux-musl" ]]; then - # shellcheck disable=SC2034 - CFLAGS="-I /usr/include/x86_64-linux-musl/ -idirafter /usr/include/" -fi - -cargo build --features mshv --all --release --target "$BUILD_TARGET" - -export RUST_BACKTRACE=1 - -time cargo test "sgx::$test_filter" -- ${test_binary_args[*]} -RES=$? - -exit $RES diff --git a/src/main.rs b/src/main.rs index 6daac338f..8329100b6 100644 --- a/src/main.rs +++ b/src/main.rs @@ -31,8 +31,6 @@ use vmm::vm_config; use vmm::vm_config::FwCfgConfig; #[cfg(feature = "ivshmem")] use vmm::vm_config::IvshmemConfig; -#[cfg(target_arch = "x86_64")] -use vmm::vm_config::SgxEpcConfig; use vmm::vm_config::{ BalloonConfig, DeviceConfig, DiskConfig, FsConfig, LandlockConfig, NetConfig, NumaConfig, PciSegmentConfig, PmemConfig, RateLimiterGroupConfig, TpmConfig, UserDeviceConfig, VdpaConfig, @@ -429,12 +427,6 @@ fn get_cli_options_sorted( .help("Control serial port: off|null|pty|tty|file=|socket=") .default_value("null") .group("vm-config"), - #[cfg(target_arch = "x86_64")] - Arg::new("sgx-epc") - .long("sgx-epc") - .help(SgxEpcConfig::SYNTAX) - .num_args(1..) - .group("vm-config"), Arg::new("tpm") .long("tpm") .num_args(1) @@ -1030,8 +1022,6 @@ mod unit_tests { #[cfg(feature = "pvmemcontrol")] pvmemcontrol: None, iommu: false, - #[cfg(target_arch = "x86_64")] - sgx_epc: None, numa: None, watchdog: false, #[cfg(feature = "guest_debug")] diff --git a/test_infra/src/lib.rs b/test_infra/src/lib.rs index 6875aa5b2..812a3a933 100644 --- a/test_infra/src/lib.rs +++ b/test_infra/src/lib.rs @@ -1061,24 +1061,6 @@ impl Guest { } } - #[cfg(target_arch = "x86_64")] - pub fn check_sgx_support(&self) -> Result<(), Error> { - self.ssh_command( - "cpuid -l 0x7 -s 0 | tr -s [:space:] | grep -q 'SGX: \ - Software Guard Extensions supported = true'", - )?; - self.ssh_command( - "cpuid -l 0x7 -s 0 | tr -s [:space:] | grep -q 'SGX_LC: \ - SGX launch config supported = true'", - )?; - self.ssh_command( - "cpuid -l 0x12 -s 0 | tr -s [:space:] | grep -q 'SGX1 \ - supported = true'", - )?; - - Ok(()) - } - pub fn get_pci_bridge_class(&self) -> Result { Ok(self .ssh_command("cat /sys/bus/pci/devices/0000:00:00.0/class")? diff --git a/tests/integration.rs b/tests/integration.rs index 4e74cf0fc..94f79dac6 100644 --- a/tests/integration.rs +++ b/tests/integration.rs @@ -9530,50 +9530,6 @@ mod windows { } } -#[cfg(target_arch = "x86_64")] -mod sgx { - use crate::*; - - #[test] - fn test_sgx() { - let jammy_image = JAMMY_IMAGE_NAME.to_string(); - let jammy = UbuntuDiskConfig::new(jammy_image); - let guest = Guest::new(Box::new(jammy)); - - let mut child = GuestCommand::new(&guest) - .args(["--cpus", "boot=1"]) - .args(["--memory", "size=512M"]) - .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) - .default_disks() - .default_net() - .args(["--sgx-epc", "id=epc0,size=64M"]) - .capture_output() - .spawn() - .unwrap(); - - let r = std::panic::catch_unwind(|| { - guest.wait_vm_boot(None).unwrap(); - - // Check if SGX is correctly detected in the guest. - guest.check_sgx_support().unwrap(); - - // Validate the SGX EPC section is 64MiB. - assert_eq!( - guest - .ssh_command("cpuid -l 0x12 -s 2 | grep 'section size' | cut -d '=' -f 2") - .unwrap() - .trim(), - "0x0000000004000000" - ); - }); - - let _ = child.kill(); - let output = child.wait_with_output().unwrap(); - - handle_child_output(r, &output); - } -} - #[cfg(target_arch = "x86_64")] mod vfio { use crate::*; diff --git a/vmm/src/acpi.rs b/vmm/src/acpi.rs index a2299acd8..215deac91 100644 --- a/vmm/src/acpi.rs +++ b/vmm/src/acpi.rs @@ -314,16 +314,6 @@ fn create_srat_table( )) } - #[cfg(target_arch = "x86_64")] - for section in &node.sgx_epc_sections { - srat.append(MemoryAffinity::from_range( - section.start().raw_value(), - section.size(), - proximity_domain, - MemAffinityFlags::ENABLE, - )) - } - for cpu in &node.cpus { #[cfg(target_arch = "x86_64")] let x2apic_id = arch::x86_64::get_x2apic_id(*cpu, topology); diff --git a/vmm/src/api/openapi/cloud-hypervisor.yaml b/vmm/src/api/openapi/cloud-hypervisor.yaml index 80a4fa257..e4a76f6b7 100644 --- a/vmm/src/api/openapi/cloud-hypervisor.yaml +++ b/vmm/src/api/openapi/cloud-hypervisor.yaml @@ -607,10 +607,6 @@ components: $ref: "#/components/schemas/VdpaConfig" vsock: $ref: "#/components/schemas/VsockConfig" - sgx_epc: - type: array - items: - $ref: "#/components/schemas/SgxEpcConfig" numa: type: array items: @@ -1143,21 +1139,6 @@ components: id: type: string - SgxEpcConfig: - required: - - id - - size - type: object - properties: - id: - type: string - size: - type: integer - format: int64 - prefault: - type: boolean - default: false - NumaDistance: required: - destination @@ -1192,10 +1173,6 @@ components: type: array items: type: string - sgx_epc_sections: - type: array - items: - type: string pci_segments: type: array items: diff --git a/vmm/src/config.rs b/vmm/src/config.rs index 06523761a..29a2644b7 100644 --- a/vmm/src/config.rs +++ b/vmm/src/config.rs @@ -109,14 +109,6 @@ pub enum Error { /// Failed parsing restore parameters #[error("Error parsing --restore")] ParseRestore(#[source] OptionParserError), - /// Failed parsing SGX EPC parameters - #[cfg(target_arch = "x86_64")] - #[error("Error parsing --sgx-epc")] - ParseSgxEpc(#[source] OptionParserError), - /// Missing 'id' from SGX EPC section - #[cfg(target_arch = "x86_64")] - #[error("Error parsing --sgx-epc: id missing")] - ParseSgxEpcIdMissing, /// Failed parsing NUMA parameters #[error("Error parsing --numa")] ParseNuma(#[source] OptionParserError), @@ -395,8 +387,6 @@ pub struct VmParams<'a> { #[cfg(feature = "pvmemcontrol")] pub pvmemcontrol: bool, pub pvpanic: bool, - #[cfg(target_arch = "x86_64")] - pub sgx_epc: Option>, pub numa: Option>, pub watchdog: bool, #[cfg(feature = "guest_debug")] @@ -462,10 +452,6 @@ impl<'a> VmParams<'a> { #[cfg(feature = "pvmemcontrol")] let pvmemcontrol = args.get_flag("pvmemcontrol"); let pvpanic = args.get_flag("pvpanic"); - #[cfg(target_arch = "x86_64")] - let sgx_epc: Option> = args - .get_many::("sgx-epc") - .map(|x| x.map(|y| y as &str).collect()); let numa: Option> = args .get_many::("numa") .map(|x| x.map(|y| y as &str).collect()); @@ -516,8 +502,6 @@ impl<'a> VmParams<'a> { #[cfg(feature = "pvmemcontrol")] pvmemcontrol, pvpanic, - #[cfg(target_arch = "x86_64")] - sgx_epc, numa, watchdog, #[cfg(feature = "guest_debug")] @@ -2139,36 +2123,10 @@ impl VsockConfig { } } -#[cfg(target_arch = "x86_64")] -impl SgxEpcConfig { - pub const SYNTAX: &'static str = "SGX EPC parameters \ - \"id=,size=,prefault=on|off\""; - - pub fn parse(sgx_epc: &str) -> Result { - let mut parser = OptionParser::new(); - parser.add("id").add("size").add("prefault"); - parser.parse(sgx_epc).map_err(Error::ParseSgxEpc)?; - - let id = parser.get("id").ok_or(Error::ParseSgxEpcIdMissing)?; - let size = parser - .convert::("size") - .map_err(Error::ParseSgxEpc)? - .unwrap_or(ByteSized(0)) - .0; - let prefault = parser - .convert::("prefault") - .map_err(Error::ParseSgxEpc)? - .unwrap_or(Toggle(false)) - .0; - - Ok(SgxEpcConfig { id, size, prefault }) - } -} - impl NumaConfig { pub const SYNTAX: &'static str = "Settings related to a given NUMA node \ \"guest_numa_id=,cpus=,distances=,\ - memory_zones=,sgx_epc_sections=,\ + memory_zones=,\ pci_segments=\""; pub fn parse(numa: &str) -> Result { @@ -2178,7 +2136,6 @@ impl NumaConfig { .add("cpus") .add("distances") .add("memory_zones") - .add("sgx_epc_sections") .add("pci_segments"); parser.parse(numa).map_err(Error::ParseNuma)?; @@ -2206,11 +2163,6 @@ impl NumaConfig { .convert::("memory_zones") .map_err(Error::ParseNuma)? .map(|v| v.0); - #[cfg(target_arch = "x86_64")] - let sgx_epc_sections = parser - .convert::("sgx_epc_sections") - .map_err(Error::ParseNuma)? - .map(|v| v.0); let pci_segments = parser .convert::("pci_segments") .map_err(Error::ParseNuma)? @@ -2220,8 +2172,6 @@ impl NumaConfig { cpus, distances, memory_zones, - #[cfg(target_arch = "x86_64")] - sgx_epc_sections, pci_segments, }) } @@ -2800,14 +2750,6 @@ impl VmConfig { } } - #[cfg(target_arch = "x86_64")] - if let Some(sgx_epcs) = &self.sgx_epc { - for sgx_epc in sgx_epcs.iter() { - let id = sgx_epc.id.clone(); - Self::validate_identifier(&mut id_list, &Some(id))?; - } - } - if let Some(pci_segments) = &self.pci_segments { for pci_segment in pci_segments { pci_segment.validate(self)?; @@ -2957,21 +2899,6 @@ impl VmConfig { let platform = vm_params.platform.map(PlatformConfig::parse).transpose()?; - #[cfg(target_arch = "x86_64")] - let mut sgx_epc: Option> = None; - #[cfg(target_arch = "x86_64")] - { - if let Some(sgx_epc_list) = &vm_params.sgx_epc { - warn!("SGX support is deprecated and will be removed in a future release."); - let mut sgx_epc_config_list = Vec::new(); - for item in sgx_epc_list.iter() { - let sgx_epc_config = SgxEpcConfig::parse(item)?; - sgx_epc_config_list.push(sgx_epc_config); - } - sgx_epc = Some(sgx_epc_config_list); - } - } - let mut numa: Option> = None; if let Some(numa_list) = &vm_params.numa { let mut numa_config_list = Vec::new(); @@ -3058,8 +2985,6 @@ impl VmConfig { pvmemcontrol, pvpanic: vm_params.pvpanic, iommu: false, // updated in VmConfig::validate() - #[cfg(target_arch = "x86_64")] - sgx_epc, numa, watchdog: vm_params.watchdog, #[cfg(feature = "guest_debug")] @@ -3189,8 +3114,6 @@ impl Clone for VmConfig { user_devices: self.user_devices.clone(), vdpa: self.vdpa.clone(), vsock: self.vsock.clone(), - #[cfg(target_arch = "x86_64")] - sgx_epc: self.sgx_epc.clone(), numa: self.numa.clone(), pci_segments: self.pci_segments.clone(), platform: self.platform.clone(), @@ -3976,8 +3899,6 @@ mod tests { pvmemcontrol: None, pvpanic: false, iommu: false, - #[cfg(target_arch = "x86_64")] - sgx_epc: None, numa: None, watchdog: false, #[cfg(feature = "guest_debug")] @@ -4119,8 +4040,6 @@ mod tests { cpus: None, distances: None, memory_zones: None, - #[cfg(target_arch = "x86_64")] - sgx_epc_sections: None, pci_segments: None, } } @@ -4192,8 +4111,6 @@ mod tests { pvmemcontrol: None, pvpanic: false, iommu: false, - #[cfg(target_arch = "x86_64")] - sgx_epc: None, numa: None, watchdog: false, #[cfg(feature = "guest_debug")] diff --git a/vmm/src/cpu.rs b/vmm/src/cpu.rs index 0f4ac18cc..7f542015e 100644 --- a/vmm/src/cpu.rs +++ b/vmm/src/cpu.rs @@ -82,8 +82,6 @@ use crate::coredump::{ }; #[cfg(feature = "guest_debug")] use crate::gdb::{get_raw_tid, Debuggable, DebuggableError}; -#[cfg(target_arch = "x86_64")] -use crate::memory_manager::MemoryManager; use crate::seccomp_filters::{get_seccomp_filter, Thread}; #[cfg(target_arch = "x86_64")] use crate::vm::physical_bits; @@ -799,23 +797,14 @@ impl CpuManager { #[cfg(target_arch = "x86_64")] pub fn populate_cpuid( &mut self, - memory_manager: &Arc>, hypervisor: &Arc, #[cfg(feature = "tdx")] tdx: bool, ) -> Result<()> { - let sgx_epc_sections = memory_manager - .lock() - .unwrap() - .sgx_epc_region() - .as_ref() - .map(|sgx_epc_region| sgx_epc_region.epc_sections().values().cloned().collect()); - self.cpuid = { let phys_bits = physical_bits(hypervisor, self.config.max_phys_bits); arch::generate_common_cpuid( hypervisor, &arch::CpuidConfig { - sgx_epc_sections, phys_bits, kvm_hyperv: self.config.kvm_hyperv, #[cfg(feature = "tdx")] diff --git a/vmm/src/lib.rs b/vmm/src/lib.rs index dddfe9bd3..0378b7b18 100644 --- a/vmm/src/lib.rs +++ b/vmm/src/lib.rs @@ -900,8 +900,6 @@ impl Vmm { false, Some(&vm_migration_config.memory_manager_data), existing_memory_files, - #[cfg(target_arch = "x86_64")] - None, ) .map_err(|e| { MigratableError::MigrateReceive(anyhow!( @@ -1135,7 +1133,6 @@ impl Vmm { arch::generate_common_cpuid( &hypervisor, &arch::CpuidConfig { - sgx_epc_sections: None, phys_bits, kvm_hyperv: vm_config.lock().unwrap().cpus.kvm_hyperv, #[cfg(feature = "tdx")] @@ -1266,7 +1263,7 @@ impl Vmm { }; // We check the `CPUID` compatibility of between the source vm and destination, which is - // mostly about feature compatibility and "topology/sgx" leaves are not relevant. + // mostly about feature compatibility. let dest_cpuid = &{ let vm_config = &src_vm_config.lock().unwrap(); @@ -1274,7 +1271,6 @@ impl Vmm { arch::generate_common_cpuid( &self.hypervisor.clone(), &arch::CpuidConfig { - sgx_epc_sections: None, phys_bits, kvm_hyperv: vm_config.cpus.kvm_hyperv, #[cfg(feature = "tdx")] @@ -2428,8 +2424,6 @@ mod unit_tests { pvmemcontrol: None, pvpanic: false, iommu: false, - #[cfg(target_arch = "x86_64")] - sgx_epc: None, numa: None, watchdog: false, #[cfg(feature = "guest_debug")] diff --git a/vmm/src/memory_manager.rs b/vmm/src/memory_manager.rs index cdc581044..461eb24b9 100644 --- a/vmm/src/memory_manager.rs +++ b/vmm/src/memory_manager.rs @@ -19,16 +19,12 @@ use std::{ffi, result, thread}; use acpi_tables::{aml, Aml}; use anyhow::anyhow; -#[cfg(target_arch = "x86_64")] -use arch::x86_64::{SgxEpcRegion, SgxEpcSection}; use arch::RegionType; #[cfg(target_arch = "x86_64")] use devices::ioapic; #[cfg(target_arch = "aarch64")] use hypervisor::HypervisorVmError; use libc::_SC_NPROCESSORS_ONLN; -#[cfg(target_arch = "x86_64")] -use libc::{MAP_NORESERVE, MAP_POPULATE, MAP_SHARED, PROT_READ, PROT_WRITE}; use serde::{Deserialize, Serialize}; use thiserror::Error; use tracer::trace_scoped; @@ -54,8 +50,6 @@ use crate::coredump::{ CoredumpMemoryRegion, CoredumpMemoryRegions, DumpState, GuestDebuggableError, }; use crate::migration::url_to_path; -#[cfg(target_arch = "x86_64")] -use crate::vm_config::SgxEpcConfig; use crate::vm_config::{HotplugMethod, MemoryConfig, MemoryZoneConfig}; use crate::{GuestMemoryMmap, GuestRegionMmap, MEMORY_MANAGER_SNAPSHOT_ID}; @@ -68,9 +62,6 @@ const SNAPSHOT_FILENAME: &str = "memory-ranges"; #[cfg(target_arch = "x86_64")] const X86_64_IRQ_BASE: u32 = 5; -#[cfg(target_arch = "x86_64")] -const SGX_PAGE_SIZE: u64 = 1 << 12; - const HOTPLUG_COUNT: usize = 8; // Memory policy constants @@ -183,8 +174,6 @@ pub struct MemoryManager { hugepage_size: Option, prefault: bool, thp: bool, - #[cfg(target_arch = "x86_64")] - sgx_epc_region: Option, user_provided_zones: bool, snapshot_memory_ranges: MemoryRangeTable, memory_zones: MemoryZones, @@ -269,36 +258,6 @@ pub enum Error { #[error("Cannot create the system allocator")] CreateSystemAllocator, - /// Invalid SGX EPC section size - #[cfg(target_arch = "x86_64")] - #[error("Invalid SGX EPC section size")] - EpcSectionSizeInvalid, - - /// Failed allocating SGX EPC region - #[cfg(target_arch = "x86_64")] - #[error("Failed allocating SGX EPC region")] - SgxEpcRangeAllocation, - - /// Failed opening SGX virtual EPC device - #[cfg(target_arch = "x86_64")] - #[error("Failed opening SGX virtual EPC device")] - SgxVirtEpcOpen(#[source] io::Error), - - /// Failed setting the SGX virtual EPC section size - #[cfg(target_arch = "x86_64")] - #[error("Failed setting the SGX virtual EPC section size")] - SgxVirtEpcFileSetLen(#[source] io::Error), - - /// Failed opening SGX provisioning device - #[cfg(target_arch = "x86_64")] - #[error("Failed opening SGX provisioning device")] - SgxProvisionOpen(#[source] io::Error), - - /// Failed enabling SGX provisioning - #[cfg(target_arch = "x86_64")] - #[error("Failed enabling SGX provisioning")] - SgxEnableProvisioning(#[source] hypervisor::HypervisorVmError), - /// Failed creating a new MmapRegion instance. #[cfg(target_arch = "x86_64")] #[error("Failed creating a new MmapRegion instance")] @@ -1034,7 +993,6 @@ impl MemoryManager { #[cfg(feature = "tdx")] tdx_enabled: bool, restore_data: Option<&MemoryManagerSnapshotData>, existing_memory_files: Option>, - #[cfg(target_arch = "x86_64")] sgx_epc_config: Option>, ) -> Result>, Error> { trace_scoped!("MemoryManager::new"); @@ -1236,8 +1194,7 @@ impl MemoryManager { None }; - // If running on SGX the start of device area and RAM area may diverge but - // at this point they are next to each other. + // The start of device area and RAM area are placed next to each other. let end_of_ram_area = start_of_device_area.unchecked_sub(1); let ram_allocator = AddressAllocator::new(GuestAddress(0), start_of_device_area.0).unwrap(); @@ -1263,8 +1220,6 @@ impl MemoryManager { hugepages: config.hugepages, hugepage_size: config.hugepage_size, prefault: config.prefault, - #[cfg(target_arch = "x86_64")] - sgx_epc_region: None, user_provided_zones, snapshot_memory_ranges: MemoryRangeTable::default(), memory_zones, @@ -1279,11 +1234,6 @@ impl MemoryManager { thp: config.thp, }; - #[cfg(target_arch = "x86_64")] - if let Some(sgx_epc_config) = sgx_epc_config { - memory_manager.setup_sgx(sgx_epc_config)?; - } - Ok(Arc::new(Mutex::new(memory_manager))) } @@ -1311,8 +1261,6 @@ impl MemoryManager { false, Some(&mem_snapshot), None, - #[cfg(target_arch = "x86_64")] - None, )?; mm.lock() @@ -1976,121 +1924,6 @@ impl MemoryManager { self.virtio_mem_resize(id, virtio_mem_size) } - #[cfg(target_arch = "x86_64")] - pub fn setup_sgx(&mut self, sgx_epc_config: Vec) -> Result<(), Error> { - let file = OpenOptions::new() - .read(true) - .open("/dev/sgx_provision") - .map_err(Error::SgxProvisionOpen)?; - self.vm - .enable_sgx_attribute(file) - .map_err(Error::SgxEnableProvisioning)?; - - // Go over each EPC section and verify its size is a 4k multiple. At - // the same time, calculate the total size needed for the contiguous - // EPC region. - let mut epc_region_size = 0; - for epc_section in sgx_epc_config.iter() { - if epc_section.size == 0 { - return Err(Error::EpcSectionSizeInvalid); - } - if epc_section.size & (SGX_PAGE_SIZE - 1) != 0 { - return Err(Error::EpcSectionSizeInvalid); - } - - epc_region_size += epc_section.size; - } - - // Place the SGX EPC region on a 4k boundary between the RAM and the device area - let epc_region_start = - GuestAddress(self.start_of_device_area.0.div_ceil(SGX_PAGE_SIZE) * SGX_PAGE_SIZE); - - self.start_of_device_area = epc_region_start - .checked_add(epc_region_size) - .ok_or(Error::GuestAddressOverFlow)?; - - let mut sgx_epc_region = SgxEpcRegion::new(epc_region_start, epc_region_size as GuestUsize); - info!( - "SGX EPC region: 0x{:x} (0x{:x})", - epc_region_start.0, epc_region_size - ); - - // Each section can be memory mapped into the allocated region. - let mut epc_section_start = epc_region_start.raw_value(); - for epc_section in sgx_epc_config.iter() { - let file = OpenOptions::new() - .read(true) - .write(true) - .open("/dev/sgx_vepc") - .map_err(Error::SgxVirtEpcOpen)?; - - let prot = PROT_READ | PROT_WRITE; - let mut flags = MAP_NORESERVE | MAP_SHARED; - if epc_section.prefault { - flags |= MAP_POPULATE; - } - - // We can't use the vm-memory crate to perform the memory mapping - // here as it would try to ensure the size of the backing file is - // matching the size of the expected mapping. The /dev/sgx_vepc - // device does not work that way, it provides a file descriptor - // which is not matching the mapping size, as it's a just a way to - // let KVM know that an EPC section is being created for the guest. - // SAFETY: FFI call with correct arguments - let host_addr = unsafe { - libc::mmap( - std::ptr::null_mut(), - epc_section.size as usize, - prot, - flags, - file.as_raw_fd(), - 0, - ) - }; - - if host_addr == libc::MAP_FAILED { - error!( - "Could not add SGX EPC section (size 0x{:x})", - epc_section.size - ); - return Err(Error::SgxEpcRangeAllocation); - } - - info!( - "Adding SGX EPC section: 0x{:x} (0x{:x})", - epc_section_start, epc_section.size - ); - - let _mem_slot = self.create_userspace_mapping( - epc_section_start, - epc_section.size, - host_addr as u64, - false, - false, - false, - )?; - - sgx_epc_region.insert( - epc_section.id.clone(), - SgxEpcSection::new( - GuestAddress(epc_section_start), - epc_section.size as GuestUsize, - ), - ); - - epc_section_start += epc_section.size; - } - - self.sgx_epc_region = Some(sgx_epc_region); - - Ok(()) - } - - #[cfg(target_arch = "x86_64")] - pub fn sgx_epc_region(&self) -> &Option { - &self.sgx_epc_region - } - pub fn is_hardlink(f: &File) -> bool { let mut stat = std::mem::MaybeUninit::::uninit(); // SAFETY: FFI call with correct arguments @@ -2642,34 +2475,6 @@ impl Aml for MemoryManager { ) .to_aml_bytes(sink); } - - #[cfg(target_arch = "x86_64")] - { - if let Some(sgx_epc_region) = &self.sgx_epc_region { - let min = sgx_epc_region.start().raw_value(); - let max = min + sgx_epc_region.size() - 1; - // SGX EPC region - aml::Device::new( - "_SB_.EPC_".into(), - vec![ - &aml::Name::new("_HID".into(), &aml::EISAName::new("INT0E0C")), - // QWORD describing the EPC region start and size - &aml::Name::new( - "_CRS".into(), - &aml::ResourceTemplate::new(vec![&aml::AddressSpace::new_memory( - aml::AddressSpaceCacheable::NotCacheable, - true, - min, - max, - None, - )]), - ), - &aml::Method::new("_STA".into(), 0, false, vec![&aml::Return::new(&0xfu8)]), - ], - ) - .to_aml_bytes(sink); - } - } } } diff --git a/vmm/src/vm.rs b/vmm/src/vm.rs index af1ddaaa4..3092c3236 100644 --- a/vmm/src/vm.rs +++ b/vmm/src/vm.rs @@ -606,7 +606,6 @@ impl Vm { .lock() .unwrap() .populate_cpuid( - &memory_manager, &hypervisor, #[cfg(feature = "tdx")] tdx_enabled, @@ -971,24 +970,6 @@ impl Vm { } } - #[cfg(target_arch = "x86_64")] - if let Some(sgx_epc_sections) = &config.sgx_epc_sections { - if let Some(sgx_epc_region) = mm.sgx_epc_region() { - let mm_sections = sgx_epc_region.epc_sections(); - for sgx_epc_section in sgx_epc_sections.iter() { - if let Some(mm_section) = mm_sections.get(sgx_epc_section) { - node.sgx_epc_sections.push(mm_section.clone()); - } else { - error!("Unknown SGX EPC section '{}'", sgx_epc_section); - return Err(Error::InvalidNumaConfig); - } - } - } else { - error!("Missing SGX EPC region"); - return Err(Error::InvalidNumaConfig); - } - } - numa_nodes.insert(config.guest_numa_id, node); } } @@ -1056,9 +1037,6 @@ impl Vm { ) .map_err(Error::MemoryManager)? } else { - #[cfg(target_arch = "x86_64")] - let sgx_epc_config = vm_config.lock().unwrap().sgx_epc.clone(); - MemoryManager::new( vm.clone(), &vm_config.lock().unwrap().memory.clone(), @@ -1068,8 +1046,6 @@ impl Vm { tdx_enabled, None, None, - #[cfg(target_arch = "x86_64")] - sgx_epc_config, ) .map_err(Error::MemoryManager)? }; @@ -1420,13 +1396,6 @@ impl Vm { let boot_vcpus = self.cpu_manager.lock().unwrap().boot_vcpus(); let rsdp_addr = Some(rsdp_addr); - let sgx_epc_region = self - .memory_manager - .lock() - .unwrap() - .sgx_epc_region() - .as_ref() - .cloned(); let serial_number = self .config @@ -1466,7 +1435,6 @@ impl Vm { boot_vcpus, entry_addr.setup_header, rsdp_addr, - sgx_epc_region, serial_number.as_deref(), uuid.as_deref(), oem_strings.as_deref(), @@ -2917,7 +2885,6 @@ impl Snapshottable for Vm { arch::generate_common_cpuid( &self.hypervisor, &arch::CpuidConfig { - sgx_epc_sections: None, phys_bits, kvm_hyperv: self.config.lock().unwrap().cpus.kvm_hyperv, #[cfg(feature = "tdx")] diff --git a/vmm/src/vm_config.rs b/vmm/src/vm_config.rs index cf1f61e05..9c149d05f 100644 --- a/vmm/src/vm_config.rs +++ b/vmm/src/vm_config.rs @@ -671,16 +671,6 @@ impl Default for IvshmemConfig { } } -#[cfg(target_arch = "x86_64")] -#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] -pub struct SgxEpcConfig { - pub id: String, - #[serde(default)] - pub size: u64, - #[serde(default)] - pub prefault: bool, -} - #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] pub struct NumaDistance { #[serde(default)] @@ -699,9 +689,6 @@ pub struct NumaConfig { pub distances: Option>, #[serde(default)] pub memory_zones: Option>, - #[cfg(target_arch = "x86_64")] - #[serde(default)] - pub sgx_epc_sections: Option>, #[serde(default)] pub pci_segments: Option>, } @@ -941,8 +928,6 @@ pub struct VmConfig { pub pvpanic: bool, #[serde(default)] pub iommu: bool, - #[cfg(target_arch = "x86_64")] - pub sgx_epc: Option>, pub numa: Option>, #[serde(default)] pub watchdog: bool,