vmm: Unify address space allocation
It seems like address allocation has been spread into different files and different location for x86 vs ARM. This makes it hard to follow the code. Thus, unify it a single location which satisfies all the requirement. Signed-off-by: Jinank Jain <jinankjain@microsoft.com>
This commit is contained in:
parent
4e48f429eb
commit
034aa514d7
2 changed files with 15 additions and 42 deletions
|
|
@ -952,7 +952,7 @@ impl MemoryManager {
|
|||
}
|
||||
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
fn add_uefi_flash(&mut self) -> Result<(), Error> {
|
||||
pub fn add_uefi_flash(&mut self) -> Result<(), Error> {
|
||||
// On AArch64, the UEFI binary requires a flash device at address 0.
|
||||
// 4 MiB memory is mapped to simulate the flash.
|
||||
let uefi_mem_slot = self.allocate_memory_slot();
|
||||
|
|
@ -1197,6 +1197,7 @@ impl MemoryManager {
|
|||
let end_of_ram_area = start_of_device_area.unchecked_sub(1);
|
||||
let ram_allocator = AddressAllocator::new(GuestAddress(0), start_of_device_area.0).unwrap();
|
||||
|
||||
#[allow(unused_mut)]
|
||||
let mut memory_manager = MemoryManager {
|
||||
boot_guest_memory,
|
||||
guest_memory,
|
||||
|
|
@ -1234,23 +1235,6 @@ impl MemoryManager {
|
|||
thp: config.thp,
|
||||
};
|
||||
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
{
|
||||
// For Aarch64 we cannot lazily allocate the address space like we
|
||||
// do for x86, because while restoring a VM from snapshot we would
|
||||
// need the address space to be allocated to properly restore VGIC.
|
||||
// And the restore of VGIC happens before we attempt to run the vCPUs
|
||||
// for the first time, thus we need to allocate the address space
|
||||
// beforehand.
|
||||
memory_manager.allocate_address_space()?;
|
||||
memory_manager.add_uefi_flash()?;
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "riscv64")]
|
||||
{
|
||||
memory_manager.allocate_address_space()?;
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
if let Some(sgx_epc_config) = sgx_epc_config {
|
||||
memory_manager.setup_sgx(sgx_epc_config)?;
|
||||
|
|
|
|||
|
|
@ -646,6 +646,19 @@ impl Vm {
|
|||
}
|
||||
}
|
||||
|
||||
memory_manager
|
||||
.lock()
|
||||
.unwrap()
|
||||
.allocate_address_space()
|
||||
.map_err(Error::MemoryManager)?;
|
||||
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
memory_manager
|
||||
.lock()
|
||||
.unwrap()
|
||||
.add_uefi_flash()
|
||||
.map_err(Error::MemoryManager)?;
|
||||
|
||||
// Loading the igvm file is pushed down here because
|
||||
// igvm parser needs cpu_manager to retrieve cpuid leaf.
|
||||
// Currently, Microsoft Hypervisor does not provide any
|
||||
|
|
@ -2320,18 +2333,6 @@ impl Vm {
|
|||
#[cfg(target_arch = "riscv64")]
|
||||
self.configure_system().unwrap();
|
||||
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
// Note: For x86, always call this function before invoking start boot vcpus.
|
||||
// Otherwise guest would fail to boot because we haven't created the
|
||||
// userspace mappings to update the hypervisor about the memory mappings.
|
||||
// These mappings must be created before we start the vCPU threads for
|
||||
// the very first time.
|
||||
self.memory_manager
|
||||
.lock()
|
||||
.unwrap()
|
||||
.allocate_address_space()
|
||||
.map_err(Error::MemoryManager)?;
|
||||
|
||||
#[cfg(feature = "tdx")]
|
||||
if let Some(hob_address) = hob_address {
|
||||
// With the HOB address extracted the vCPUs can have
|
||||
|
|
@ -2368,18 +2369,6 @@ impl Vm {
|
|||
pub fn restore(&mut self) -> Result<()> {
|
||||
event!("vm", "restoring");
|
||||
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
// Note: For x86, always call this function before invoking start boot vcpus.
|
||||
// Otherwise guest would fail to boot because we haven't created the
|
||||
// userspace mappings to update the hypervisor about the memory mappings.
|
||||
// These mappings must be created before we start the vCPU threads for
|
||||
// the very first time for the restored VM.
|
||||
self.memory_manager
|
||||
.lock()
|
||||
.unwrap()
|
||||
.allocate_address_space()
|
||||
.map_err(Error::MemoryManager)?;
|
||||
|
||||
// Now we can start all vCPUs from here.
|
||||
self.cpu_manager
|
||||
.lock()
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue