panvk: Implement VK_EXT_map_memory_placed

Allow applications to control the virtual address where device memory
is mapped by passing MAP_FIXED to mmap via pan_kmod_bo_mmap().

Support VK_MEMORY_UNMAP_RESERVE_BIT_EXT by replacing the mapping with
a PROT_NONE anonymous mapping to keep the address range reserved.

Only memoryMapPlaced and memoryUnmapReserve are advertised.
memoryMapRangePlaced is not supported because the DRM GEM mmap offset
mechanism requires mapping from offset 0.

Signed-off-by: Christian Gmeiner <cgmeiner@igalia.com>
Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
Reviewed-by: Erik Faye-Lund <erik.faye-lund@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/40315>
This commit is contained in:
Christian Gmeiner 2026-03-17 11:11:46 +01:00 committed by Marge Bot
parent dcd2450a0b
commit c81dc438e5
4 changed files with 50 additions and 35 deletions

View file

@ -726,7 +726,7 @@ Khronos extensions that are not part of any Vulkan version:
VK_EXT_swapchain_colorspace DONE (anv, hk, lvp, nvk, radv, tu, v3dv, vn)
VK_EXT_depth_clamp_zero_one DONE (anv, nvk, panvk, pvr, radv, tu, v3dv/vc7+, vn)
VK_INTEL_shader_integer_functions2 DONE (anv, hasvk, radv)
VK_EXT_map_memory_placed DONE (anv, hk, nvk, pvr, radv, tu, vn)
VK_EXT_map_memory_placed DONE (anv, hk, nvk, panvk, pvr, radv, tu, vn)
VK_MESA_image_alignment_control DONE (anv, nvk, radv)
VK_EXT_legacy_dithering DONE (anv, panvk, tu, vn)
VK_QCOM_fragment_density_map_offset DONE (tu)

View file

@ -32,3 +32,4 @@ cl_khr_subgroup_rotate on asahi, llvmpipe and zink
VK_EXT_nested_command_buffer on panvk
VK_VALVE_mutable_descriptor_type on panvk
VK_EXT_shader_stencil_export on panvk
VK_EXT_map_memory_placed on panvk

View file

@ -52,33 +52,6 @@ panvk_memory_emit_report(struct panvk_device *device,
(uintptr_t)(mem), mem->vk.memory_type_index);
}
static void *
panvk_memory_mmap(struct panvk_device_memory *mem)
{
if (!mem->addr.host) {
void *addr = pan_kmod_bo_mmap(mem->bo, 0, pan_kmod_bo_size(mem->bo),
PROT_READ | PROT_WRITE, MAP_SHARED, NULL);
if (addr == MAP_FAILED)
return NULL;
mem->addr.host = addr;
}
return mem->addr.host;
}
static void
panvk_memory_munmap(struct panvk_device_memory *mem)
{
if (mem->addr.host) {
ASSERTED int ret =
os_munmap((void *)mem->addr.host, pan_kmod_bo_size(mem->bo));
assert(!ret);
mem->addr.host = NULL;
}
}
VKAPI_ATTR VkResult VKAPI_CALL
panvk_AllocateMemory(VkDevice _device,
const VkMemoryAllocateInfo *pAllocateInfo,
@ -220,8 +193,8 @@ panvk_AllocateMemory(VkDevice _device,
if (device->debug.decode_ctx) {
if (PANVK_DEBUG(DUMP) || PANVK_DEBUG(TRACE)) {
void *cpu = pan_kmod_bo_mmap(mem->bo, 0, pan_kmod_bo_size(mem->bo),
PROT_READ | PROT_WRITE, MAP_SHARED, NULL);
void *cpu =
pan_kmod_bo_mmap(mem->bo, PROT_READ | PROT_WRITE, MAP_SHARED, NULL);
if (cpu != MAP_FAILED)
mem->debug.host_mapping = cpu;
else
@ -282,7 +255,10 @@ panvk_FreeMemory(VkDevice _device, VkDeviceMemory _mem,
os_munmap(mem->debug.host_mapping, pan_kmod_bo_size(mem->bo));
}
panvk_memory_munmap(mem);
if (mem->addr.host) {
ASSERTED int ret = os_munmap(mem->addr.host, pan_kmod_bo_size(mem->bo));
assert(!ret);
}
struct pan_kmod_vm_op op = {
.type = PAN_KMOD_VM_OP_TYPE_UNMAP,
@ -351,12 +327,28 @@ panvk_MapMemory2KHR(VkDevice _device, const VkMemoryMapInfoKHR *pMemoryMapInfo,
return panvk_errorf(device, VK_ERROR_MEMORY_MAP_FAILED,
"Memory object already mapped.");
void *addr = panvk_memory_mmap(mem);
if (!addr)
void *host_addr = NULL;
int map_flags = MAP_SHARED;
if (pMemoryMapInfo->flags & VK_MEMORY_MAP_PLACED_BIT_EXT) {
const VkMemoryMapPlacedInfoEXT *placed_info = vk_find_struct_const(
pMemoryMapInfo->pNext, MEMORY_MAP_PLACED_INFO_EXT);
assert(placed_info != NULL);
assert(offset == 0 && size == mem->bo->size);
host_addr = placed_info->pPlacedAddress;
map_flags |= MAP_FIXED;
}
void *addr =
pan_kmod_bo_mmap(mem->bo, PROT_READ | PROT_WRITE, map_flags, host_addr);
if (addr == MAP_FAILED)
return panvk_errorf(device, VK_ERROR_MEMORY_MAP_FAILED,
"Memory object couldn't be mapped.");
*ppData = addr + offset;
assert(!host_addr || addr == host_addr);
mem->addr.host = addr;
*ppData = (char *)addr + offset;
return VK_SUCCESS;
}
@ -364,9 +356,22 @@ VKAPI_ATTR VkResult VKAPI_CALL
panvk_UnmapMemory2KHR(VkDevice _device,
const VkMemoryUnmapInfoKHR *pMemoryUnmapInfo)
{
VK_FROM_HANDLE(panvk_device, device, _device);
VK_FROM_HANDLE(panvk_device_memory, mem, pMemoryUnmapInfo->memory);
panvk_memory_munmap(mem);
if (pMemoryUnmapInfo->flags & VK_MEMORY_UNMAP_RESERVE_BIT_EXT) {
void *reserved =
os_mmap(mem->addr.host, pan_kmod_bo_size(mem->bo), PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
if (reserved == MAP_FAILED)
return panvk_errorf(device, VK_ERROR_MEMORY_MAP_FAILED,
"Failed to reserve VA on unmap.");
} else {
ASSERTED int ret = os_munmap(mem->addr.host, pan_kmod_bo_size(mem->bo));
assert(!ret);
}
mem->addr.host = NULL;
return VK_SUCCESS;
}

View file

@ -164,6 +164,7 @@ panvk_per_arch(get_physical_device_extensions)(
.EXT_legacy_dithering = true,
.EXT_line_rasterization = true,
.EXT_load_store_op_none = true,
.EXT_map_memory_placed = true,
.EXT_nested_command_buffer = PAN_ARCH >= 10,
.EXT_memory_budget = true,
.EXT_non_seamless_cube_map = true,
@ -568,6 +569,11 @@ panvk_per_arch(get_physical_device_features)(
.nestedCommandBufferRendering = PAN_ARCH >= 10,
.nestedCommandBufferSimultaneousUse = PAN_ARCH >= 10,
/* VK_EXT_map_memory_placed */
.memoryMapPlaced = true,
.memoryMapRangePlaced = false,
.memoryUnmapReserve = true,
/* VK_EXT_non_seamless_cube_map */
.nonSeamlessCubeMap = true,
@ -1119,6 +1125,9 @@ panvk_per_arch(get_physical_device_properties)(
/* VK_EXT_nested_command_buffer */
.maxCommandBufferNestingLevel = 5,
/* VK_EXT_map_memory_placed */
.minPlacedMemoryMapAlignment = os_page_size,
/* VK_EXT_provoking_vertex */
.provokingVertexModePerPipeline = false,
.transformFeedbackPreservesTriangleFanProvokingVertex = false,