turnip: virtio: fix iova leak upon found already imported dmabuf
There's a success path on found dmabuf while the iova won't be cleaned
up. This change defers iova alloc till lookup miss and also to prepare
for later racy dmabuf re-import fix.
Also documented a potential leak on error path due to unable to tell
whether a gem handle should be closed or not without refcounting.
Fixes: f17c5297d7 ("tu: Add virtgpu support")
Signed-off-by: Yiwei Zhang <zzyiwei@chromium.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/29093>
This commit is contained in:
parent
585a87ae53
commit
6ca192f586
1 changed files with 15 additions and 13 deletions
|
|
@ -675,11 +675,6 @@ virtio_bo_init_dmabuf(struct tu_device *dev,
|
||||||
/* iova allocation needs to consider the object's *real* size: */
|
/* iova allocation needs to consider the object's *real* size: */
|
||||||
size = real_size;
|
size = real_size;
|
||||||
|
|
||||||
uint64_t iova;
|
|
||||||
result = virtio_allocate_userspace_iova(dev, size, 0, TU_BO_ALLOC_NO_FLAGS, &iova);
|
|
||||||
if (result != VK_SUCCESS)
|
|
||||||
return result;
|
|
||||||
|
|
||||||
/* Importing the same dmabuf several times would yield the same
|
/* Importing the same dmabuf several times would yield the same
|
||||||
* gem_handle. Thus there could be a race when destroying
|
* gem_handle. Thus there could be a race when destroying
|
||||||
* BO and importing the same dmabuf from different threads.
|
* BO and importing the same dmabuf from different threads.
|
||||||
|
|
@ -689,6 +684,7 @@ virtio_bo_init_dmabuf(struct tu_device *dev,
|
||||||
u_rwlock_wrlock(&dev->dma_bo_lock);
|
u_rwlock_wrlock(&dev->dma_bo_lock);
|
||||||
|
|
||||||
uint32_t handle, res_id;
|
uint32_t handle, res_id;
|
||||||
|
uint64_t iova;
|
||||||
|
|
||||||
handle = vdrm_dmabuf_to_handle(vdrm, prime_fd);
|
handle = vdrm_dmabuf_to_handle(vdrm, prime_fd);
|
||||||
if (!handle) {
|
if (!handle) {
|
||||||
|
|
@ -698,6 +694,7 @@ virtio_bo_init_dmabuf(struct tu_device *dev,
|
||||||
|
|
||||||
res_id = vdrm_handle_to_res_id(vdrm, handle);
|
res_id = vdrm_handle_to_res_id(vdrm, handle);
|
||||||
if (!res_id) {
|
if (!res_id) {
|
||||||
|
/* XXX gem_handle potentially leaked here since no refcnt */
|
||||||
result = vk_error(dev, VK_ERROR_INVALID_EXTERNAL_HANDLE);
|
result = vk_error(dev, VK_ERROR_INVALID_EXTERNAL_HANDLE);
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
|
@ -714,21 +711,26 @@ virtio_bo_init_dmabuf(struct tu_device *dev,
|
||||||
|
|
||||||
bo->res_id = res_id;
|
bo->res_id = res_id;
|
||||||
|
|
||||||
result = tu_bo_init(dev, bo, handle, size, iova,
|
result = virtio_allocate_userspace_iova(dev, size, 0, TU_BO_ALLOC_NO_FLAGS,
|
||||||
TU_BO_ALLOC_NO_FLAGS, "dmabuf");
|
&iova);
|
||||||
if (result != VK_SUCCESS)
|
if (result != VK_SUCCESS) {
|
||||||
memset(bo, 0, sizeof(*bo));
|
vdrm_bo_close(dev->vdev->vdrm, handle);
|
||||||
else
|
goto out_unlock;
|
||||||
*out_bo = bo;
|
}
|
||||||
|
|
||||||
out_unlock:
|
result =
|
||||||
u_rwlock_wrunlock(&dev->dma_bo_lock);
|
tu_bo_init(dev, bo, handle, size, iova, TU_BO_ALLOC_NO_FLAGS, "dmabuf");
|
||||||
if (result != VK_SUCCESS) {
|
if (result != VK_SUCCESS) {
|
||||||
mtx_lock(&dev->vma_mutex);
|
mtx_lock(&dev->vma_mutex);
|
||||||
util_vma_heap_free(&dev->vma, iova, size);
|
util_vma_heap_free(&dev->vma, iova, size);
|
||||||
mtx_unlock(&dev->vma_mutex);
|
mtx_unlock(&dev->vma_mutex);
|
||||||
|
memset(bo, 0, sizeof(*bo));
|
||||||
|
} else {
|
||||||
|
*out_bo = bo;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
out_unlock:
|
||||||
|
u_rwlock_wrunlock(&dev->dma_bo_lock);
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue