intel: Add virtio-gpu native context

Add virtio-intel native DRM context base preparatory code. Virtio-intel
works by passing ioctl's from guest to host for execution, utilizing
available VirtIO-GPU infrastructure.

This patch adds initial experimental native context support using i915
KMD UAPI.

Compile Mesa with -Dintel-virtio-experimental=true to enable virtio-intel
native context support.

Acked-by: Alyssa Rosenzweig <alyssa.rosenzweig@intel.com>
Acked-by: José Roberto de Souza <jose.souza@intel.com>
Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/29870>
This commit is contained in:
Dmitry Osipenko 2024-07-20 20:07:54 +03:00 committed by Marge Bot
parent 59fc94ffd2
commit b06d759a93
17 changed files with 1702 additions and 25 deletions

View file

@ -338,11 +338,17 @@ if host_machine.cpu_family() == 'x86' and with_glx_read_only_text
pre_args += ['-DGLX_X86_READONLY_TEXT']
endif
with_intel_virtio = get_option('intel-virtio-experimental')
if with_intel_virtio
pre_args += '-DHAVE_INTEL_VIRTIO'
endif
with_vdrm = [
with_amdgpu_virtio,
freedreno_kmds.contains('virtio'),
with_gallium_asahi,
with_asahi_vk,
with_intel_virtio,
].contains(true)
with_dri = false

View file

@ -872,3 +872,10 @@ option(
value : false,
description : 'Manual override switch to enable LTO, which is unsupported due to being broken. WARNING: This option may break your driver randomly!'
)
option(
'intel-virtio-experimental',
type : 'boolean',
value : false,
description : 'use experimental virtio backend for intel driver',
)

View file

@ -71,6 +71,10 @@ intel_48b_address(uint64_t v)
return (uint64_t)(v << shift) >> shift;
}
#ifdef HAVE_INTEL_VIRTIO
extern int intel_virtio_ioctl(int fd, unsigned long request, void *arg);
#endif
/**
* Call ioctl, restarting if it is interrupted
*/
@ -80,7 +84,11 @@ intel_ioctl(int fd, unsigned long request, void *arg)
int ret;
do {
ret = ioctl(fd, request, arg);
#ifdef HAVE_INTEL_VIRTIO
ret = intel_virtio_ioctl(fd, request, arg);
#else
ret = ioctl(fd, request, arg);
#endif
} while (ret == -1 && (errno == EINTR || errno == EAGAIN));
return ret;
}

View file

@ -35,6 +35,7 @@
#include "intel_wa.h"
#include "i915/intel_device_info.h"
#include "xe/intel_device_info.h"
#include "virtio/intel_virtio.h"
#include "common/intel_gem.h"
#include "util/u_debug.h"
@ -1866,29 +1867,39 @@ intel_get_device_info_from_fd(int fd, struct intel_device_info *devinfo, int min
* rely on an ioctl to get PCI device id for the next step when skipping
* this drm query.
*/
drmDevicePtr drmdev = NULL;
if (drmGetDevice2(fd, DRM_DEVICE_GET_PCI_REVISION, &drmdev)) {
mesa_loge("Failed to query drm device.");
return false;
}
if (!intel_device_info_init_common(drmdev->deviceinfo.pci->device_id,
false, devinfo)) {
if (is_intel_virtio_fd(fd)) {
if (!intel_virtio_get_pci_device_info(fd, devinfo))
return false;
if (!intel_device_info_init_common(devinfo->pci_device_id,
false, devinfo))
return false;
devinfo->is_virtio = true;
} else {
drmDevicePtr drmdev = NULL;
if (drmGetDevice2(fd, DRM_DEVICE_GET_PCI_REVISION, &drmdev)) {
mesa_loge("Failed to query drm device.");
return false;
}
if (!intel_device_info_init_common(drmdev->deviceinfo.pci->device_id,
false, devinfo)) {
drmFreeDevice(&drmdev);
return false;
}
devinfo->pci_domain = drmdev->businfo.pci->domain;
devinfo->pci_bus = drmdev->businfo.pci->bus;
devinfo->pci_dev = drmdev->businfo.pci->dev;
devinfo->pci_func = drmdev->businfo.pci->func;
devinfo->pci_device_id = drmdev->deviceinfo.pci->device_id;
devinfo->pci_revision_id = drmdev->deviceinfo.pci->revision_id;
drmFreeDevice(&drmdev);
return false;
}
if ((min_ver > 0 && devinfo->ver < min_ver) || (max_ver > 0 && devinfo->ver > max_ver)) {
drmFreeDevice(&drmdev);
if ((min_ver > 0 && devinfo->ver < min_ver) || (max_ver > 0 && devinfo->ver > max_ver))
return false;
}
devinfo->pci_domain = drmdev->businfo.pci->domain;
devinfo->pci_bus = drmdev->businfo.pci->bus;
devinfo->pci_dev = drmdev->businfo.pci->dev;
devinfo->pci_func = drmdev->businfo.pci->func;
devinfo->pci_device_id = drmdev->deviceinfo.pci->device_id;
devinfo->pci_revision_id = drmdev->deviceinfo.pci->revision_id;
drmFreeDevice(&drmdev);
devinfo->no_hw = debug_get_bool_option("INTEL_NO_HW", false);
devinfo->kmd_type = intel_get_kmd_type(fd);

View file

@ -483,5 +483,7 @@ Struct("intel_device_info",
Member("intel_device_info_mem_desc", "mem"),
Member("intel_device_info_pat_desc", "pat"),
Member("intel_cooperative_matrix_configuration",
"cooperative_matrix_configurations", array=16)]
"cooperative_matrix_configurations", array=16),
Member("bool", "is_virtio")]
)

View file

@ -24,6 +24,8 @@
#include <string.h>
#include "util/libdrm.h"
#include "virtio/intel_virtio.h"
#include "intel_kmd.h"
enum intel_kmd_type
@ -35,11 +37,16 @@ intel_get_kmd_type(int fd)
if (!version)
return type;
if (strcmp(version->name, "i915") == 0)
/*
* For virtio, version->name would be either "virtio_gpu" or "i915",
* depending on whether vtest is used or not.
*/
if (strcmp(version->name, "i915") == 0 || is_intel_virtio_fd(fd))
type = INTEL_KMD_TYPE_I915;
else if (strcmp(version->name, "xe") == 0)
type = INTEL_KMD_TYPE_XE;
drmFreeVersion(version);
return type;
}

View file

@ -18,6 +18,25 @@ files_libintel_dev = files(
'intel_kmd.h',
)
inc_intel_dev_virtio = []
link_intel_dev_virtio = []
if with_intel_virtio
files_libintel_dev += files(
'virtio/intel_virtio_bo.c',
'virtio/intel_virtio_device.c',
'virtio/i915_virtio_ccmd.c',
'virtio/i915_virtio_ccmd_execbuf.c',
)
inc_intel_dev_virtio += [
inc_virtio_gpu,
inc_virtio_vdrm,
]
link_intel_dev_virtio += [ libvdrm ]
endif
intel_dev_wa_src = custom_target('intel_wa.[ch]',
input : ['gen_wa_helpers.py', 'mesa_defs.json'],
output : ['intel_wa.h', 'intel_wa.c'],
@ -43,11 +62,12 @@ idep_intel_dev_info_gen = declare_dependency(sources : [intel_dev_info_gen_src[0
libintel_dev = static_library(
'intel_dev',
[files_libintel_dev, sha1_h, [intel_dev_wa_src]],
include_directories : [inc_include, inc_src, inc_intel],
include_directories : [inc_include, inc_src, inc_intel, inc_intel_dev_virtio],
dependencies : [dep_libdrm, idep_mesautil, idep_intel_dev_wa,
idep_intel_dev_info_gen],
c_args : [no_override_init_args],
gnu_symbol_visibility : 'hidden',
link_with : [link_intel_dev_virtio],
)
idep_intel_dev = declare_dependency(

View file

@ -0,0 +1,220 @@
/*
* Copyright 2025 Collabora, Ltd.
* SPDX-License-Identifier: MIT
*/
#ifndef I915_VIRTIO_PROTO_H_
#define I915_VIRTIO_PROTO_H_
#ifdef __GNUC__
# pragma GCC diagnostic push
# pragma GCC diagnostic error "-Wpadded"
#endif
#define I915_STATIC_ASSERT_SIZE(t) \
static_assert(sizeof(struct t) % 8 == 0, "sizeof(struct " #t ") not multiple of 8"); \
static_assert(alignof(struct t) <= 8, "alignof(struct " #t ") too large");
#define I915_DEFINE_CAST(parent, child) \
I915_STATIC_ASSERT_SIZE(child) \
DEFINE_CAST(parent, child) \
enum i915_ccmd {
I915_CCMD_IOCTL_SIMPLE = 1,
I915_CCMD_GETPARAM,
I915_CCMD_QUERYPARAM,
I915_CCMD_GEM_CREATE,
I915_CCMD_GEM_CREATE_EXT,
I915_CCMD_GEM_CONTEXT_CREATE,
I915_CCMD_GEM_EXECBUFFER2,
I915_CCMD_GEM_SET_MMAP_MODE,
I915_CCMD_GEM_BUSY,
};
#define I915_CCMD(_cmd, _len) (struct vdrm_ccmd_req){ \
.cmd = I915_CCMD_##_cmd, \
.len = (_len), \
}
/**
* Defines the layout of shmem buffer used for host->guest communication.
*/
struct i915_shmem {
struct vdrm_shmem base;
/**
* Bitmask of banned GEM context IDs.
*/
uint64_t banned_ctx_mask;
};
DEFINE_CAST(vdrm_shmem, i915_shmem)
/*
* I915_CCMD_IOCTL_SIMPLE
*/
struct i915_ccmd_ioctl_simple_req {
struct vdrm_ccmd_req hdr;
uint32_t cmd;
uint32_t pad;
uint8_t payload[];
};
I915_DEFINE_CAST(vdrm_ccmd_req, i915_ccmd_ioctl_simple_req)
struct i915_ccmd_ioctl_simple_rsp {
struct vdrm_ccmd_rsp hdr;
int32_t ret;
uint8_t payload[];
};
I915_STATIC_ASSERT_SIZE(i915_ccmd_ioctl_simple_rsp)
/*
* I915_CCMD_GETPARAM
*/
struct i915_ccmd_getparam_req {
struct vdrm_ccmd_req hdr;
uint32_t param;
uint32_t value;
};
I915_DEFINE_CAST(vdrm_ccmd_req, i915_ccmd_getparam_req)
struct i915_ccmd_getparam_rsp {
struct vdrm_ccmd_rsp hdr;
uint32_t pad;
int32_t ret;
uint32_t value;
};
I915_STATIC_ASSERT_SIZE(i915_ccmd_getparam_rsp)
/*
* I915_CCMD_QUERYPARAM
*/
struct i915_ccmd_queryparam_req {
struct vdrm_ccmd_req hdr;
uint32_t query_id;
uint32_t length;
uint32_t flags;
uint32_t pad;
};
I915_DEFINE_CAST(vdrm_ccmd_req, i915_ccmd_queryparam_req)
struct i915_ccmd_queryparam_rsp {
struct vdrm_ccmd_rsp hdr;
uint32_t pad;
int32_t ret;
int32_t length;
uint8_t payload[];
};
I915_STATIC_ASSERT_SIZE(i915_ccmd_queryparam_rsp)
/*
* I915_CCMD_GEM_CONTEXT_CREATE
*/
struct i915_ccmd_gem_context_create_req {
struct vdrm_ccmd_req hdr;
uint32_t flags;
uint32_t params_size;
uint8_t payload[];
};
I915_DEFINE_CAST(vdrm_ccmd_req, i915_ccmd_gem_context_create_req)
struct i915_ccmd_gem_context_create_rsp {
struct vdrm_ccmd_rsp hdr;
uint32_t pad;
int32_t ret;
uint32_t ctx_id;
};
I915_STATIC_ASSERT_SIZE(i915_ccmd_gem_context_create_rsp)
/*
* I915_CCMD_GEM_CREATE
*/
struct i915_ccmd_gem_create_req {
struct vdrm_ccmd_req hdr;
uint64_t size;
uint32_t blob_id;
uint32_t pad;
};
I915_DEFINE_CAST(vdrm_ccmd_req, i915_ccmd_gem_create_req)
/*
* I915_CCMD_GEM_CREATE_EXT
*/
struct i915_ccmd_gem_create_ext_req {
struct vdrm_ccmd_req hdr;
uint64_t size;
uint32_t blob_id;
uint32_t gem_flags;
uint32_t ext_size;
uint32_t pad;
uint8_t payload[];
};
I915_DEFINE_CAST(vdrm_ccmd_req, i915_ccmd_gem_create_ext_req)
/*
* I915_CCMD_GEM_EXECBUFFER2
*/
struct i915_ccmd_gem_execbuffer2_req {
struct vdrm_ccmd_req hdr;
uint64_t flags;
uint64_t context_id;
uint32_t buffer_count;
uint32_t batch_start_offset;
uint32_t batch_len;
uint32_t relocs_count;
uint8_t payload[];
};
I915_DEFINE_CAST(vdrm_ccmd_req, i915_ccmd_gem_execbuffer2_req)
struct i915_ccmd_gem_execbuffer2_rsp {
struct vdrm_ccmd_rsp hdr;
int32_t ret;
};
I915_STATIC_ASSERT_SIZE(i915_ccmd_gem_execbuffer2_rsp)
/*
* I915_CCMD_GEM_SET_MMAP_MODE
*/
struct i915_ccmd_gem_set_mmap_mode_req {
struct vdrm_ccmd_req hdr;
uint32_t res_id;
uint32_t flags;
};
I915_DEFINE_CAST(vdrm_ccmd_req, i915_ccmd_gem_set_mmap_mode_req)
/*
* I915_CCMD_GEM_BUSY
*/
struct i915_ccmd_gem_busy_req {
struct vdrm_ccmd_req hdr;
uint32_t res_id;
uint32_t pad;
};
I915_DEFINE_CAST(vdrm_ccmd_req, i915_ccmd_gem_busy_req)
struct i915_ccmd_gem_busy_rsp {
struct vdrm_ccmd_rsp hdr;
uint32_t pad;
int32_t ret;
int32_t busy;
};
I915_STATIC_ASSERT_SIZE(i915_ccmd_gem_busy_rsp)
#ifdef __GNUC__
# pragma GCC diagnostic pop
#endif
#endif /* I915_VIRTIO_PROTO_H_ */

View file

@ -0,0 +1,866 @@
/*
* Copyright 2024 Collabora, Ltd.
* SPDX-License-Identifier: MIT
*/
#include "intel_virtio_priv.h"
#include "i915_proto.h"
static int
i915_virtio_simple_ioctl(struct intel_virtio_device *dev,
unsigned cmd, void *_req)
{
unsigned req_len = sizeof(struct i915_ccmd_ioctl_simple_req);
unsigned rsp_len = sizeof(struct i915_ccmd_ioctl_simple_rsp);
bool sync = !!(cmd & IOC_OUT);
int err;
req_len += _IOC_SIZE(cmd);
if (cmd & IOC_OUT)
rsp_len += _IOC_SIZE(cmd);
uint8_t buf[req_len];
struct i915_ccmd_ioctl_simple_req *req = (void *)(uintptr_t)buf;
struct i915_ccmd_ioctl_simple_rsp *rsp;
req->hdr = I915_CCMD(IOCTL_SIMPLE, req_len);
req->cmd = cmd;
memcpy(req->payload, _req, _IOC_SIZE(cmd));
rsp = vdrm_alloc_rsp(dev->vdrm, &req->hdr, rsp_len);
err = vdrm_send_req(dev->vdrm, &req->hdr, sync);
if (err)
return errno;
if (cmd & IOC_OUT) {
memcpy(_req, rsp->payload, _IOC_SIZE(cmd));
return rsp->ret;
}
return 0;
}
static int
i915_virtio_queryparam(struct intel_virtio_device *dev,
struct drm_i915_query *query)
{
struct drm_i915_query_item *item = (void *)(uintptr_t)query->items_ptr;
struct i915_ccmd_queryparam_rsp *rsp;
struct i915_ccmd_queryparam_req req;
int err;
if (query->num_items != 1) {
mesa_loge("unsupported number of query items");
return EINVAL;
}
req.hdr = I915_CCMD(QUERYPARAM, sizeof(req));
req.query_id = item->query_id;
req.length = item->length;
req.flags = item->flags;
rsp = vdrm_alloc_rsp(dev->vdrm, &req.hdr, sizeof(*rsp) + item->length);
err = vdrm_send_req(dev->vdrm, &req.hdr, true);
if (err)
return errno;
if (item->data_ptr && rsp->length > 0)
memcpy((void *)(uintptr_t)item->data_ptr, rsp->payload, rsp->length);
item->length = rsp->length;
return rsp->ret;
}
static int
i915_virtio_getparam(struct intel_virtio_device *dev,
struct drm_i915_getparam *gp)
{
struct i915_ccmd_getparam_rsp *rsp;
struct i915_ccmd_getparam_req req;
int err;
req.hdr = I915_CCMD(GETPARAM, sizeof(req));
req.param = gp->param;
rsp = vdrm_alloc_rsp(dev->vdrm, &req.hdr, sizeof(*rsp));
err = vdrm_send_req(dev->vdrm, &req.hdr, true);
if (err)
return errno;
*gp->value = rsp->value;
return rsp->ret;
}
static int
i915_virtio_gem_create(struct intel_virtio_device *dev,
struct drm_i915_gem_create *create)
{
uint32_t blob_flags = VIRTGPU_BLOB_FLAG_USE_MAPPABLE |
VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
if (dev->vdrm->supports_cross_device)
blob_flags |= VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE;
struct i915_ccmd_gem_create_req req = {
.hdr = I915_CCMD(GEM_CREATE, sizeof(req)),
.size = create->size,
};
/* tunneled cmds are processed separately on host side,
* before the renderer->get_blob() callback.. the blob_id
* is used to like the created bo to the get_blob() call
*/
req.blob_id = p_atomic_inc_return(&dev->next_blob_id);
int ret = vdrm_bo_create(dev->vdrm, create->size, blob_flags,
req.blob_id, &req.hdr);
if (!ret)
return EINVAL;
create->handle = ret;
return 0;
}
static int
i915_virtio_gem_create_ext(struct intel_virtio_device *dev,
struct drm_i915_gem_create_ext *create)
{
struct i915_user_extension *extension = (void *)(uintptr_t)create->extensions;
unsigned ext_size = 0;
void *payload_ptr;
while (extension) {
switch (extension->name) {
case I915_GEM_CREATE_EXT_MEMORY_REGIONS:
{
struct drm_i915_gem_create_ext_memory_regions *mem_regions;
mem_regions = (void*)(uintptr_t)extension;
ext_size += sizeof(*mem_regions);
ext_size += sizeof(struct drm_i915_gem_memory_class_instance) * mem_regions->num_regions;
break;
}
case I915_GEM_CREATE_EXT_PROTECTED_CONTENT:
ext_size += sizeof(struct drm_i915_gem_create_ext_protected_content);
break;
case I915_GEM_CREATE_EXT_SET_PAT:
ext_size += sizeof(struct drm_i915_gem_create_ext_set_pat);
break;
default:
mesa_loge("unsupported extension %d", extension->name);
return EINVAL;
}
extension = (void *)(uintptr_t)extension->next_extension;
}
unsigned req_len = sizeof(struct i915_ccmd_gem_create_ext_req);
req_len += ext_size;
uint8_t buf[req_len];
struct i915_ccmd_gem_create_ext_req *req = (void *)(uintptr_t)buf;
extension = (void *)(uintptr_t)create->extensions;
payload_ptr = req->payload;
while (extension) {
switch (extension->name) {
case I915_GEM_CREATE_EXT_MEMORY_REGIONS:
{
struct drm_i915_gem_create_ext_memory_regions *mem_regions;
struct drm_i915_gem_memory_class_instance *instances;
mem_regions = (void*)(uintptr_t)extension;
instances = (void*)(uintptr_t)mem_regions->regions;
memcpy(payload_ptr, mem_regions, sizeof(*mem_regions));
payload_ptr += sizeof(*mem_regions);
memcpy(payload_ptr, instances, sizeof(*instances) * mem_regions->num_regions);
payload_ptr += sizeof(*instances) * mem_regions->num_regions;
break;
}
case I915_GEM_CREATE_EXT_PROTECTED_CONTENT:
memcpy(payload_ptr, extension, sizeof(struct drm_i915_gem_create_ext_protected_content));
payload_ptr += sizeof(struct drm_i915_gem_create_ext_protected_content);
break;
case I915_GEM_CREATE_EXT_SET_PAT:
memcpy(payload_ptr, extension, sizeof(struct drm_i915_gem_create_ext_set_pat));
payload_ptr += sizeof(struct drm_i915_gem_create_ext_set_pat);
break;
default:
mesa_loge("unsupported extension");
return EINVAL;
}
extension = (void *)(uintptr_t)extension->next_extension;
}
req->hdr = I915_CCMD(GEM_CREATE_EXT, req_len);
req->gem_flags = create->flags;
req->ext_size = ext_size;
req->size = create->size;
uint32_t blob_flags = VIRTGPU_BLOB_FLAG_USE_MAPPABLE |
VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
if (dev->vdrm->supports_cross_device)
blob_flags |= VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE;
/* tunneled cmds are processed separately on host side,
* before the renderer->get_blob() callback.. the blob_id
* is used to like the created bo to the get_blob() call
*/
req->blob_id = p_atomic_inc_return(&dev->next_blob_id);
int ret = vdrm_bo_create(dev->vdrm, create->size, blob_flags,
req->blob_id, &req->hdr);
if (!ret)
return EINVAL;
create->handle = ret;
return 0;
}
static int
i915_virtio_gem_close(struct intel_virtio_device *dev,
struct drm_gem_close *close)
{
vdrm_bo_close(dev->vdrm, close->handle);
return 0;
}
static int
i915_virtio_gem_context_create_ext(struct intel_virtio_device *dev,
struct drm_i915_gem_context_create_ext *create)
{
struct drm_i915_gem_context_create_ext_setparam *setparam;
struct i915_ccmd_gem_context_create_rsp *rsp;
unsigned params_size = 0;
void *payload_ptr;
int err;
if (!(create->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS))
return i915_virtio_simple_ioctl(dev, DRM_IOCTL_I915_GEM_CONTEXT_CREATE,
create);
setparam = (void *)(uintptr_t)create->extensions;
while (setparam) {
switch (setparam->param.param) {
case I915_CONTEXT_PARAM_BAN_PERIOD:
case I915_CONTEXT_PARAM_NO_ZEROMAP:
case I915_CONTEXT_PARAM_GTT_SIZE:
case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
case I915_CONTEXT_PARAM_BANNABLE:
case I915_CONTEXT_PARAM_PRIORITY:
case I915_CONTEXT_PARAM_SSEU:
case I915_CONTEXT_PARAM_RECOVERABLE:
case I915_CONTEXT_PARAM_VM:
case I915_CONTEXT_PARAM_ENGINES:
case I915_CONTEXT_PARAM_PERSISTENCE:
case I915_CONTEXT_PARAM_RINGSIZE:
case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
break;
default:
mesa_loge("unsupported context param");
return EINVAL;
}
params_size += sizeof(*setparam) + setparam->param.size;
setparam = (void *)(uintptr_t)setparam->base.next_extension;
}
unsigned req_len = sizeof(struct i915_ccmd_gem_context_create_req);
req_len += params_size;
uint8_t buf[req_len];
struct i915_ccmd_gem_context_create_req *req = (void *)(uintptr_t)buf;
setparam = (void *)(uintptr_t)create->extensions;
payload_ptr = req->payload;
while (setparam) {
memcpy(payload_ptr, setparam, sizeof(*setparam));
payload_ptr += sizeof(*setparam);
if (setparam->param.size) {
memcpy(payload_ptr, (void*)(uintptr_t)setparam->param.value,
setparam->param.size);
payload_ptr += setparam->param.size;
}
setparam = (void *)(uintptr_t)setparam->base.next_extension;
}
req->hdr = I915_CCMD(GEM_CONTEXT_CREATE, req_len);
req->params_size = params_size;
req->flags = create->flags;
rsp = vdrm_alloc_rsp(dev->vdrm, &req->hdr, sizeof(*rsp));
err = vdrm_send_req(dev->vdrm, &req->hdr, true);
if (err)
return errno;
create->ctx_id = rsp->ctx_id;
return rsp->ret;
}
static int
i915_virtio_gem_context_param(struct intel_virtio_device *dev,
unsigned long cmd,
struct drm_i915_gem_context_param *param)
{
switch (param->param) {
case I915_CONTEXT_PARAM_RECOVERABLE:
case I915_CONTEXT_PARAM_PRIORITY:
case I915_CONTEXT_PARAM_GTT_SIZE:
case I915_CONTEXT_PARAM_VM:
return i915_virtio_simple_ioctl(dev, cmd, param);
default:
mesa_loge("unsupported context param");
return EINVAL;
}
}
static int
intel_virtio_ioctl_errno(int fd, unsigned long cmd, void *req)
{
int err = ioctl(fd, cmd, req);
if (!err)
errno = 0;
return errno;
}
static int
i915_virtio_gem_busy(struct intel_virtio_device *dev,
struct drm_i915_gem_busy *busy)
{
struct drm_virtgpu_3d_wait virt_wait = {
.handle = busy->handle,
.flags = VIRTGPU_WAIT_NOWAIT,
};
intel_virtio_ioctl_errno(dev->fd, DRM_IOCTL_VIRTGPU_WAIT, &virt_wait);
if (errno == EBUSY) {
errno = 0;
busy->busy = 1;
} else if (!errno) {
busy->busy = 0;
}
return errno;
}
static int
i915_virtio_gem_wait(struct intel_virtio_device *dev,
struct drm_i915_gem_wait *wait)
{
struct drm_virtgpu_3d_wait virt_wait = { .handle = wait->bo_handle };
if (!wait->timeout_ns)
virt_wait.flags = VIRTGPU_WAIT_NOWAIT;
intel_virtio_ioctl_errno(dev->fd, DRM_IOCTL_VIRTGPU_WAIT, &virt_wait);
if (errno == EBUSY)
errno = ETIME;
return errno;
}
static int
i915_virtio_gem_busy_vpipe(struct intel_virtio_device *dev,
struct drm_i915_gem_busy *busy)
{
struct i915_ccmd_gem_busy_rsp *rsp;
struct i915_ccmd_gem_busy_req req;
int err;
req.hdr = I915_CCMD(GEM_BUSY, sizeof(req));
req.res_id = vdrm_handle_to_res_id(dev->vdrm, busy->handle);
rsp = vdrm_alloc_rsp(dev->vdrm, &req.hdr, sizeof(*rsp));
err = vdrm_send_req(dev->vdrm, &req.hdr, true);
if (err)
return errno;
busy->busy = (rsp->busy != 0);
return rsp->ret;
}
static int64_t time_ns(void)
{
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
return ts.tv_sec * 1000000000LL + ts.tv_nsec;
}
static int
i915_virtio_gem_wait_vpipe(struct intel_virtio_device *dev,
struct drm_i915_gem_wait *wait)
{
do {
struct drm_i915_gem_busy busy = { .handle = wait->bo_handle };
int64_t start_time = time_ns();
int err = i915_virtio_gem_busy_vpipe(dev, &busy);
if (err)
return err;
wait->timeout_ns -= time_ns() - start_time;
if (wait->timeout_ns < 0)
wait->timeout_ns = 0;
if (!busy.busy)
return 0;
if (wait->timeout_ns)
sched_yield();
} while (wait->timeout_ns);
return ETIME;
}
static int
i915_virtio_simple_ioctl_gem_patched(struct intel_virtio_device *dev,
unsigned long cmd, void *req)
{
uint32_t *handle = req;
uint32_t tmp_handle = *handle;
*handle = vdrm_handle_to_res_id(dev->vdrm, *handle);
errno = i915_virtio_simple_ioctl(dev, cmd, req);
*handle = tmp_handle;
return errno;
}
static int
i915_virtio_gem_vm_control(struct intel_virtio_device *dev,
unsigned long cmd,
struct drm_i915_gem_vm_control *vm)
{
if (vm->extensions) {
mesa_loge("unsupported vm extension");
return EINVAL;
}
if (vm->flags) {
mesa_loge("unsupported vm flags");
return EINVAL;
}
return i915_virtio_simple_ioctl(dev, cmd, vm);
}
static int
i915_virtio_get_reset_stats(struct intel_virtio_device *dev,
unsigned long cmd,
struct drm_i915_reset_stats *stats)
{
struct i915_shmem *shmem = to_i915_shmem(dev->vdrm->shmem);
if (stats->ctx_id >= 64 ||
!(p_atomic_read(&shmem->banned_ctx_mask) & (1ULL << stats->ctx_id)))
return 0;
errno = i915_virtio_simple_ioctl(dev, cmd, stats);
return errno;
}
static int
i915_virtio_gem_mmap_offset(struct intel_virtio_device *dev,
unsigned long cmd,
struct drm_i915_gem_mmap_offset *mmap_offset)
{
struct i915_ccmd_gem_set_mmap_mode_req req;
int err;
req.hdr = I915_CCMD(GEM_SET_MMAP_MODE, sizeof(req));
req.res_id = vdrm_handle_to_res_id(dev->vdrm, mmap_offset->handle);
req.flags = mmap_offset->flags;
err = vdrm_send_req(dev->vdrm, &req.hdr, false);
if (err)
return errno;
return 0;
}
static int
intel_virtio_sync_syncobj_wait(struct intel_virtio_device *dev,
struct drm_syncobj_wait *args)
{
int ret = dev->sync->wait(dev->sync,
(uint32_t*)(uintptr_t)args->handles,
args->count_handles,
args->timeout_nsec, args->flags,
&args->first_signaled);
if (ret < 0)
return errno;
return 0;
}
static int
intel_virtio_sync_syncobj_create(struct intel_virtio_device *dev,
struct drm_syncobj_create *args)
{
int ret = dev->sync->create(dev->sync, args->flags, &args->handle);
if (ret < 0)
return errno;
return 0;
}
static int
intel_virtio_sync_syncobj_destroy(struct intel_virtio_device *dev,
struct drm_syncobj_destroy *args)
{
int ret = dev->sync->destroy(dev->sync, args->handle);
if (ret < 0)
return errno;
return 0;
}
static int
intel_virtio_sync_syncobj_reset(struct intel_virtio_device *dev,
struct drm_syncobj_array *args)
{
int ret = dev->sync->reset(dev->sync, (uint32_t*)(uintptr_t)args->handles,
args->count_handles);
if (ret < 0)
return errno;
return 0;
}
static int
intel_virtio_sync_syncobj_signal(struct intel_virtio_device *dev,
struct drm_syncobj_array *args)
{
int ret = dev->sync->signal(dev->sync, (uint32_t*)(uintptr_t)args->handles,
args->count_handles);
if (ret < 0)
return errno;
return 0;
}
static int
intel_virtio_sync_syncobj_timeline_signal(struct intel_virtio_device *dev,
struct drm_syncobj_timeline_array *args)
{
int ret = dev->sync->timeline_signal(dev->sync,
(uint32_t*)(uintptr_t)args->handles,
(uint64_t*)(uintptr_t)args->points,
args->count_handles);
if (ret < 0)
return errno;
return 0;
}
static int
intel_virtio_sync_syncobj_timeline_wait(struct intel_virtio_device *dev,
struct drm_syncobj_timeline_wait *args)
{
int ret = dev->sync->timeline_wait(dev->sync,
(uint32_t*)(uintptr_t)args->handles,
(uint64_t*)(uintptr_t)args->points,
args->count_handles,
args->timeout_nsec, args->flags,
&args->first_signaled);
if (ret < 0)
return -ret;
return 0;
}
static int
intel_virtio_sync_syncobj_transfer(struct intel_virtio_device *dev,
struct drm_syncobj_transfer *args)
{
int ret = dev->sync->transfer(dev->sync, args->dst_handle, args->dst_point,
args->src_handle, args->src_point, args->flags);
if (ret < 0)
return errno;
return 0;
}
static int
intel_virtio_sync_syncobj_query(struct intel_virtio_device *dev,
struct drm_syncobj_timeline_array *args)
{
int ret = dev->sync->query(dev->sync,
(uint32_t*)(uintptr_t)args->handles,
(uint64_t*)(uintptr_t)args->points,
args->count_handles,
args->flags);
if (ret < 0)
return errno;
return 0;
}
static int
intel_virtio_sync_syncobj_fd_to_handle(struct intel_virtio_device *dev,
unsigned long cmd,
struct drm_syncobj_handle *args)
{
if (args->flags)
assert(!dev->vpipe);
if (!dev->vpipe)
return intel_virtio_ioctl_errno(dev->fd, cmd, args);
int ret = dev->sync->fd_to_handle(dev->sync, args->fd, &args->handle);
if (ret < 0)
return errno;
return 0;
}
static int
intel_virtio_sync_syncobj_handle_to_fd(struct intel_virtio_device *dev,
unsigned long cmd,
struct drm_syncobj_handle *args)
{
if (args->flags)
assert(!dev->vpipe);
if (!dev->vpipe)
return intel_virtio_ioctl_errno(dev->fd, cmd, args);
int ret = dev->sync->handle_to_fd(dev->sync, args->handle, &args->fd);
if (ret < 0)
return errno;
return 0;
}
static int
intel_virtio_prime_fd_to_handle(struct intel_virtio_device *dev,
struct drm_prime_handle *args)
{
args->handle = vdrm_dmabuf_to_handle(dev->vdrm, args->fd);
if (!args->handle) {
errno = EINVAL;
return errno;
}
return 0;
}
static int
intel_virtio_prime_handle_to_fd(struct intel_virtio_device *dev,
struct drm_prime_handle *args)
{
args->fd = vdrm_bo_export_dmabuf(dev->vdrm, args->handle);
if (args->fd < 0) {
errno = EINVAL;
return errno;
}
return 0;
}
int
intel_virtio_ioctl(int fd, unsigned long cmd, void *req)
{
struct intel_virtio_device *dev = fd_to_intel_virtio_device(fd);
if (!dev) {
/* this is a real phys device if not bound to virtio */
return intel_virtio_ioctl_errno(fd, cmd, req);
}
int orig_errno = errno;
/*
* Special case for legacy ioctls that have same NR as extended ioctl
* and need to be handled differently.
*/
switch (cmd) {
case DRM_IOCTL_I915_GEM_CREATE:
errno = i915_virtio_gem_create(dev, req);
goto out;
case DRM_IOCTL_I915_GEM_CONTEXT_CREATE:
errno = i915_virtio_simple_ioctl(dev, cmd, req);
goto out;
default:
break;
}
#define IOC_MASKED(IOC) ((IOC) & ~IOCSIZE_MASK)
/* DRM ioctls vary in size depending on a used UAPI header version */
switch (IOC_MASKED(cmd)) {
case IOC_MASKED(DRM_IOCTL_SYNCOBJ_WAIT):
errno = intel_virtio_sync_syncobj_wait(dev, req);
break;
case IOC_MASKED(DRM_IOCTL_SYNCOBJ_CREATE):
errno = intel_virtio_sync_syncobj_create(dev, req);
break;
case IOC_MASKED(DRM_IOCTL_SYNCOBJ_DESTROY):
errno = intel_virtio_sync_syncobj_destroy(dev, req);
break;
case IOC_MASKED(DRM_IOCTL_SYNCOBJ_RESET):
errno = intel_virtio_sync_syncobj_reset(dev, req);
break;
case IOC_MASKED(DRM_IOCTL_SYNCOBJ_SIGNAL):
errno = intel_virtio_sync_syncobj_signal(dev, req);
break;
case IOC_MASKED(DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL):
errno = intel_virtio_sync_syncobj_timeline_signal(dev, req);
break;
case IOC_MASKED(DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT):
errno = intel_virtio_sync_syncobj_timeline_wait(dev, req);
break;
case IOC_MASKED(DRM_IOCTL_SYNCOBJ_TRANSFER):
errno = intel_virtio_sync_syncobj_transfer(dev, req);
break;
case IOC_MASKED(DRM_IOCTL_SYNCOBJ_QUERY):
errno = intel_virtio_sync_syncobj_query(dev, req);
break;
case IOC_MASKED(DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE):
errno = intel_virtio_sync_syncobj_fd_to_handle(dev, cmd, req);
break;
case IOC_MASKED(DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD):
errno = intel_virtio_sync_syncobj_handle_to_fd(dev, cmd, req);
break;
case IOC_MASKED(DRM_IOCTL_PRIME_HANDLE_TO_FD):
errno = intel_virtio_prime_handle_to_fd(dev, req);
break;
case IOC_MASKED(DRM_IOCTL_PRIME_FD_TO_HANDLE):
errno = intel_virtio_prime_fd_to_handle(dev, req);
break;
case IOC_MASKED(DRM_IOCTL_I915_GEM_CREATE_EXT):
errno = i915_virtio_gem_create_ext(dev, req);
break;
case IOC_MASKED(DRM_IOCTL_I915_GETPARAM):
errno = i915_virtio_getparam(dev, req);
break;
case IOC_MASKED(DRM_IOCTL_I915_QUERY):
errno = i915_virtio_queryparam(dev, req);
break;
case IOC_MASKED(DRM_IOCTL_GEM_CLOSE):
errno = i915_virtio_gem_close(dev, req);
break;
case IOC_MASKED(DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT):
errno = i915_virtio_gem_context_create_ext(dev, req);
break;
case IOC_MASKED(DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM):
case IOC_MASKED(DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM):
errno = i915_virtio_gem_context_param(dev, cmd, req);
break;
case IOC_MASKED(DRM_IOCTL_I915_GEM_EXECBUFFER2):
errno = i915_virtio_gem_execbuffer2(dev, req);
break;
case IOC_MASKED(DRM_IOCTL_I915_GEM_MADVISE):
errno = 0;
break;
case IOC_MASKED(DRM_IOCTL_I915_GET_RESET_STATS):
errno = i915_virtio_get_reset_stats(dev, cmd, req);
break;
case IOC_MASKED(DRM_IOCTL_I915_REG_READ):
case IOC_MASKED(DRM_IOCTL_I915_GEM_CONTEXT_DESTROY):
case IOC_MASKED(DRM_IOCTL_I915_GEM_GET_APERTURE):
errno = i915_virtio_simple_ioctl(dev, cmd, req);
break;
case IOC_MASKED(DRM_IOCTL_I915_GEM_GET_TILING):
case IOC_MASKED(DRM_IOCTL_I915_GEM_SET_TILING):
case IOC_MASKED(DRM_IOCTL_I915_GEM_SET_DOMAIN):
errno = i915_virtio_simple_ioctl_gem_patched(dev, cmd, req);
break;
case IOC_MASKED(DRM_IOCTL_I915_GEM_BUSY):
/*
* vpipe doesn't support tracking busy GEMs, use GEM-busy CCMD that
* is added specifically for vpipe purposes.
*/
if (dev->vpipe)
errno = i915_virtio_gem_busy_vpipe(dev, req);
else
errno = i915_virtio_gem_busy(dev, req);
break;
case IOC_MASKED(DRM_IOCTL_I915_GEM_WAIT):
if (dev->vpipe)
errno = i915_virtio_gem_wait_vpipe(dev, req);
else
errno = i915_virtio_gem_wait(dev, req);
break;
case IOC_MASKED(DRM_IOCTL_I915_GEM_VM_CREATE):
case IOC_MASKED(DRM_IOCTL_I915_GEM_VM_DESTROY):
errno = i915_virtio_gem_vm_control(dev, cmd, req);
break;
case IOC_MASKED(DRM_IOCTL_I915_GEM_MMAP_OFFSET):
errno = i915_virtio_gem_mmap_offset(dev, cmd, req);
break;
case IOC_MASKED(DRM_IOCTL_I915_GEM_USERPTR):
errno = ENODEV;
break;
default:
mesa_loge("unsupported ioctl 0x%lx\n", _IOC_NR(cmd));
errno = ENOTTY;
break;
}
#undef IOC_MASKED
out:
if (errno) {
mesa_logd("ioctl 0x%lx failed errno=%d\n", _IOC_NR(cmd), errno);
return -1;
}
errno = orig_errno;
return 0;
}

View file

@ -0,0 +1,199 @@
/*
* Copyright 2024 Collabora, Ltd.
* SPDX-License-Identifier: MIT
*/
#include "intel_virtio_priv.h"
#include "i915_proto.h"
struct virtio_gem_execbuffer_params {
struct intel_virtio_device *dev;
struct drm_i915_gem_execbuffer2 *exec;
struct drm_virtgpu_execbuffer_syncobj *in_syncobjs;
unsigned num_in_syncobjs;
struct drm_virtgpu_execbuffer_syncobj *out_syncobjs;
unsigned num_out_syncobjs;
uint32_t *bo_handles;
unsigned num_bo_handles;
};
static int
i915_virtio_gem_execbuffer2_submit(struct virtio_gem_execbuffer_params *params)
{
struct intel_virtio_device *dev = params->dev;
struct i915_shmem *shmem = to_i915_shmem(dev->vdrm->shmem);
struct drm_i915_gem_execbuffer2 *exec = params->exec;
struct drm_i915_gem_exec_object2 *buffers = (void *)(uintptr_t)exec->buffers_ptr;
struct drm_i915_gem_relocation_entry *relocs;
struct i915_ccmd_gem_execbuffer2_rsp *rsp;
uint64_t allowed_flags = 0, flags = exec->flags;
if (exec->rsvd1 < 64 &&
(p_atomic_read(&shmem->banned_ctx_mask) & (1ULL << exec->rsvd1)))
return EIO;
unsigned relocs_count = 0;
for (int i = 0; i < exec->buffer_count; i++)
relocs_count += buffers[i].relocation_count;
size_t buffers_size = sizeof(*buffers) * exec->buffer_count;
size_t relocations_size = sizeof(struct drm_i915_gem_relocation_entry) * relocs_count;
unsigned req_len = sizeof(struct i915_ccmd_gem_execbuffer2_req);
req_len += buffers_size + relocations_size;
uint8_t buf[req_len];
struct i915_ccmd_gem_execbuffer2_req *req = (void *)buf;
memcpy(req->payload, buffers, buffers_size);
uint32_t bo_handles[exec->buffer_count + 1];
buffers = (void *)req->payload;
relocs = (void *)(req->payload + buffers_size);
for (int i = 0; i < exec->buffer_count; i++) {
memcpy(relocs, (void *)(uintptr_t)buffers[i].relocs_ptr,
sizeof(*relocs) * buffers[i].relocation_count);
relocs += buffers[i].relocation_count;
bo_handles[i] = buffers[i].handle;
buffers[i].handle = vdrm_handle_to_res_id(dev->vdrm, buffers[i].handle);
}
params->bo_handles = bo_handles;
params->num_bo_handles = exec->buffer_count;
allowed_flags |= I915_EXEC_RING_MASK;
allowed_flags |= I915_EXEC_CONSTANTS_MASK;
allowed_flags |= I915_EXEC_GEN7_SOL_RESET;
allowed_flags |= I915_EXEC_NO_RELOC;
allowed_flags |= I915_EXEC_HANDLE_LUT;
allowed_flags |= I915_EXEC_BSD_MASK << I915_EXEC_BSD_SHIFT;
allowed_flags |= I915_EXEC_BATCH_FIRST;
/* XXX: sanity-check flags, might be removed in a release version */
if (flags & ~(allowed_flags | I915_EXEC_FENCE_ARRAY | I915_EXEC_USE_EXTENSIONS)) {
mesa_loge("unsupported flags");
return EINVAL;
}
req->hdr = I915_CCMD(GEM_EXECBUFFER2, req_len);
req->relocs_count = relocs_count;
req->buffer_count = exec->buffer_count;
req->batch_start_offset = exec->batch_start_offset;
req->batch_len = exec->batch_len;
req->context_id = exec->rsvd1;
req->flags = flags & allowed_flags;
rsp = vdrm_alloc_rsp(dev->vdrm, &req->hdr, sizeof(*rsp));
struct vdrm_execbuf_params p = {
.req = &req->hdr,
.ring_idx = 1 + (flags & I915_EXEC_RING_MASK),
.in_syncobjs = params->in_syncobjs,
.num_in_syncobjs = params->num_in_syncobjs,
.out_syncobjs = params->out_syncobjs,
.num_out_syncobjs = params->num_out_syncobjs,
.handles = params->bo_handles,
.num_handles = params->num_bo_handles,
};
return vdrm_execbuf(dev->vdrm, &p);
}
int
i915_virtio_gem_execbuffer2(struct intel_virtio_device *dev,
struct drm_i915_gem_execbuffer2 *exec)
{
unsigned num_waits = 0, num_signals = 0, num_fences = 0, i, w, s;
struct drm_i915_gem_execbuffer_ext_timeline_fences *ext;
struct virtio_gem_execbuffer_params params;
struct drm_i915_gem_exec_fence *fences;
uint64_t *syncobj_values = NULL;
int ret;
memset(&params, 0, sizeof(params));
if (exec->flags & I915_EXEC_USE_EXTENSIONS) {
ext = (void *)(uintptr_t)exec->cliprects_ptr;
if (ext->base.name != DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES) {
mesa_loge("unsupported extension");
return EINVAL;
}
if (ext->base.next_extension) {
mesa_loge("unsupported extension");
return EINVAL;
}
num_fences = ext->fence_count;
fences = (void *)(uintptr_t)ext->handles_ptr;
syncobj_values = (void *)(uintptr_t)ext->values_ptr;
} else if (exec->flags & I915_EXEC_FENCE_ARRAY) {
fences = (void *)(uintptr_t)exec->cliprects_ptr;
num_fences = exec->num_cliprects;
}
for (i = 0; i < num_fences; i++) {
if (fences[i].flags & I915_EXEC_FENCE_WAIT)
num_waits++;
if (fences[i].flags & I915_EXEC_FENCE_SIGNAL)
num_signals++;
}
if (num_waits) {
params.in_syncobjs = calloc(sizeof(*params.in_syncobjs), num_waits);
if (!params.in_syncobjs) {
ret = ENOMEM;
goto out;
}
}
if (num_signals) {
params.out_syncobjs = calloc(sizeof(*params.out_syncobjs), num_signals);
if (!params.out_syncobjs) {
ret = ENOMEM;
goto out;
}
}
for (i = 0, w = 0, s = 0; i < num_fences; i++) {
if (fences[i].flags & I915_EXEC_FENCE_WAIT) {
params.in_syncobjs[w].handle = fences[i].handle;
if (syncobj_values)
params.in_syncobjs[w].point = syncobj_values[i];
w++;
}
if (fences[i].flags & I915_EXEC_FENCE_SIGNAL) {
params.out_syncobjs[s].handle = fences[i].handle;
if (syncobj_values)
params.out_syncobjs[s].point = syncobj_values[i];
s++;
}
if (!(fences[i].flags & (I915_EXEC_FENCE_WAIT | I915_EXEC_FENCE_SIGNAL))) {
mesa_loge("invalid fence flags");
ret = ENOMEM;
goto out;
}
}
params.dev = dev;
params.exec = exec;
params.num_in_syncobjs = num_waits;
params.num_out_syncobjs = num_signals;
ret = i915_virtio_gem_execbuffer2_submit(&params);
out:
free(params.out_syncobjs);
free(params.in_syncobjs);
return ret;
}

View file

@ -0,0 +1,59 @@
/*
* Copyright 2024 Collabora, Ltd.
* SPDX-License-Identifier: MIT
*/
#ifndef INTEL_VIRTIO_H_
#define INTEL_VIRTIO_H_
#include <stdbool.h>
#include <sys/mman.h>
#include "util/macros.h"
struct intel_device_info;
#ifdef HAVE_INTEL_VIRTIO
int intel_virtio_init_fd(int fd);
void intel_virtio_ref_fd(int fd);
void intel_virtio_unref_fd(int fd);
bool intel_virtio_get_pci_device_info(int fd,
struct intel_device_info *devinfo);
bool is_intel_virtio_fd(int fd);
void *intel_virtio_bo_mmap(int fd, uint32_t handle, size_t size, void *placed_addr);
struct util_sync_provider *intel_virtio_sync_provider(int fd);
#else
static inline int intel_virtio_init_fd(int fd)
{
return 0;
}
static inline void intel_virtio_ref_fd(int fd) {};
static inline void intel_virtio_unref_fd(int fd) {};
static inline bool
intel_virtio_get_pci_device_info(int fd, struct intel_device_info *devinfo)
{
return false;
}
static inline bool is_intel_virtio_fd(int fd)
{
return false;
}
static inline void *
intel_virtio_bo_mmap(int fd, uint32_t handle, size_t size, void *placed_addr)
{
return MAP_FAILED;
}
static struct util_sync_provider *
intel_virtio_sync_provider(int fd)
{
return NULL;
}
#endif /* HAVE_INTEL_VIRTIO */
#endif /* INTEL_VIRTIO_H_ */

View file

@ -0,0 +1,23 @@
/*
* Copyright 2024 Collabora, Ltd.
* SPDX-License-Identifier: MIT
*/
#include "intel_virtio_priv.h"
void *intel_virtio_bo_mmap(int fd, uint32_t handle, size_t size,
void *placed_addr)
{
struct intel_virtio_device *dev = fd_to_intel_virtio_device(fd);
if (!dev)
return MAP_FAILED;
void *map = vdrm_bo_map(dev->vdrm, handle, size, placed_addr);
if (!map) {
mesa_loge("failed to map bo");
return MAP_FAILED;
}
return map;
}

View file

@ -0,0 +1,168 @@
/*
* Copyright 2024 Collabora, Ltd.
* SPDX-License-Identifier: MIT
*/
#include "intel_device_info.h"
#include "intel_virtio_priv.h"
static simple_mtx_t dev_list_lock = SIMPLE_MTX_INITIALIZER;
static struct list_head dev_list = {
.next = &dev_list,
.prev = &dev_list,
};
/*
* Returns NULL if given FD isn't backed by virtio-intel device.
* Note this function is only used internally by the virtio-intel code,
* we don't expose struct intel_virtio_device globally.
*/
struct intel_virtio_device *
fd_to_intel_virtio_device(int fd)
{
struct intel_virtio_device *dev = NULL;
simple_mtx_lock(&dev_list_lock);
list_for_each_entry(struct intel_virtio_device, itr,
&dev_list, list_item) {
int err = os_same_file_description(itr->fd, fd);
if (!err) {
dev = itr;
break;
}
}
simple_mtx_unlock(&dev_list_lock);
return dev;
}
bool is_intel_virtio_fd(int fd)
{
return fd_to_intel_virtio_device(fd) != NULL;
}
bool
intel_virtio_get_pci_device_info(int fd, struct intel_device_info *devinfo)
{
struct intel_virtio_device *dev = fd_to_intel_virtio_device(fd);
struct virgl_renderer_capset_drm caps;
if (!dev)
return false;
caps = dev->vdrm->caps;
devinfo->pci_bus = caps.u.intel.pci_bus;
devinfo->pci_dev = caps.u.intel.pci_dev;
devinfo->pci_func = caps.u.intel.pci_func;
devinfo->pci_domain = caps.u.intel.pci_domain;
devinfo->pci_device_id = caps.u.intel.pci_device_id;
devinfo->pci_revision_id = caps.u.intel.pci_revision_id;
return true;
}
static bool is_virtio_fd(int fd)
{
drmVersionPtr version = drmGetVersion(fd);
bool is_virtio = !strcmp(version->name, "virtio_gpu");
drmFreeVersion(version);
if (debug_get_bool_option("INTEL_VIRTIO_FORCE_VTEST", false))
is_virtio = true;
return is_virtio;
}
static uint64_t
virtgpu_ioctl_getparam(int fd, uint64_t param)
{
/* val must be zeroed because kernel only writes the lower 32 bits */
uint64_t val = 0;
struct drm_virtgpu_getparam args = {
.param = param,
.value = (uintptr_t)&val,
};
const int ret = drmIoctl(fd, DRM_IOCTL_VIRTGPU_GETPARAM, &args);
return ret ? 0 : val;
}
int intel_virtio_init_fd(int fd)
{
if (!is_virtio_fd(fd))
return 0;
struct intel_virtio_device *dev = calloc(1, sizeof(*dev));
if (!dev)
return -ENOMEM;
if (debug_get_bool_option("INTEL_VIRTIO_FORCE_VTEST", false)) {
dev->vdrm = vdrm_device_connect(-1, VIRTGPU_DRM_CONTEXT_I915);
dev->vpipe = true;
} else {
dev->vdrm = vdrm_device_connect(fd, VIRTGPU_DRM_CONTEXT_I915);
}
if (!dev->vdrm) {
free(dev);
return -EINVAL;
}
if (dev->vpipe)
dev->sync = vdrm_vpipe_get_sync(dev->vdrm);
else
dev->sync = util_sync_provider_drm(fd);
if (!dev->sync) {
vdrm_device_close(dev->vdrm);
free(dev);
return -EINVAL;
}
dev->fd = os_dupfd_cloexec(fd);
p_atomic_set(&dev->refcnt, 1);
simple_mtx_lock(&dev_list_lock);
list_add(&dev->list_item, &dev_list);
simple_mtx_unlock(&dev_list_lock);
return 1;
}
void intel_virtio_ref_fd(int fd)
{
struct intel_virtio_device *dev = fd_to_intel_virtio_device(fd);
if (dev)
p_atomic_inc(&dev->refcnt);
}
void intel_virtio_unref_fd(int fd)
{
struct intel_virtio_device *dev = fd_to_intel_virtio_device(fd);
if (dev && !p_atomic_dec_return(&dev->refcnt)) {
simple_mtx_lock(&dev_list_lock);
list_del(&dev->list_item);
simple_mtx_unlock(&dev_list_lock);
dev->sync->finalize(dev->sync);
vdrm_device_close(dev->vdrm);
close(dev->fd);
free(dev);
}
}
struct util_sync_provider *intel_virtio_sync_provider(int fd)
{
struct intel_virtio_device *dev = fd_to_intel_virtio_device(fd);
if (dev)
return dev->sync;
return NULL;
}

View file

@ -0,0 +1,67 @@
/*
* Copyright 2024 Collabora, Ltd.
* SPDX-License-Identifier: MIT
*/
#ifndef INTEL_VIRTIO_PRIV_H_
#define INTEL_VIRTIO_PRIV_H_
#include <errno.h>
#include <fcntl.h>
#include <poll.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include "drm-uapi/drm.h"
#include "drm-uapi/i915_drm.h"
#include "drm-uapi/virtgpu_drm.h"
#include <xf86drm.h>
#include "util/libsync.h"
#include "util/list.h"
#include "util/log.h"
#include "util/macros.h"
#include "util/os_file.h"
#include "util/simple_mtx.h"
#include "util/u_atomic.h"
#include "util/u_debug.h"
#include "util/u_math.h"
#include "util/perf/cpu_trace.h"
#include "util/u_sync_provider.h"
#include "common/intel_gem.h"
#include "virtio/virtio-gpu/drm_hw.h"
#include "intel_virtio.h"
#include "vdrm.h"
#define virtio_ioctl(fd, name, args) ({ \
MESA_TRACE_SCOPE(#name); \
int ret = drmIoctl((fd), DRM_IOCTL_ ## name, (args)); \
ret; \
})
struct intel_virtio_device {
struct list_head list_item;
struct vdrm_device *vdrm;
struct util_sync_provider *sync;
int fd;
uint32_t next_blob_id;
uint32_t refcnt;
bool vpipe;
};
struct intel_virtio_device *fd_to_intel_virtio_device(int fd);
int i915_virtio_gem_execbuffer2(struct intel_virtio_device *dev,
struct drm_i915_gem_execbuffer2 *exec);
#endif /* INTEL_VIRTIO_PRIV_H_ */

View file

@ -45,6 +45,7 @@
#include "dev/intel_debug.h"
#include "dev/intel_device_info.h"
#include "dev/virtio/intel_virtio.h"
#include "perf/i915/intel_perf.h"
#include "perf/xe/intel_perf.h"
@ -691,6 +692,10 @@ oa_metrics_available(struct intel_perf_config *perf, int fd,
perf_register_oa_queries_t oa_register = get_register_queries_function(devinfo);
bool oa_metrics_available = false;
/* TODO: Support performance metrics */
if (devinfo->is_virtio)
return false;
perf->devinfo = devinfo;
/* Consider an invalid as supported. */

View file

@ -68,9 +68,6 @@ endif
if with_any_vk or with_gallium_zink
subdir('vulkan')
endif
if with_any_intel
subdir('intel')
endif
if with_virtio_vk
subdir('virtio/vulkan')
endif
@ -92,6 +89,9 @@ endif
if with_imagination_vk or with_tools.contains('imagination')
subdir('imagination')
endif
if with_any_intel
subdir('intel')
endif
if with_gallium_panfrost or with_gallium_lima or with_panfrost_vk or with_tools.contains('panfrost')
subdir('panfrost')
endif

View file

@ -30,6 +30,7 @@ struct virgl_renderer_capset_drm {
uint32_t version_patchlevel;
#define VIRTGPU_DRM_CONTEXT_MSM 1
#define VIRTGPU_DRM_CONTEXT_AMDGPU 2
#define VIRTGPU_DRM_CONTEXT_I915 3
uint32_t context_type;
uint32_t pad;
union {
@ -60,6 +61,14 @@ struct virgl_renderer_capset_drm {
#endif
char marketing_name[128];
} amdgpu; /* context_type == VIRTGPU_DRM_CONTEXT_AMDGPU */
struct {
uint32_t pci_bus;
uint32_t pci_dev;
uint32_t pci_func;
uint32_t pci_revision_id;
uint32_t pci_domain;
uint32_t pci_device_id;
} intel; /* context_type == VIRTGPU_DRM_CONTEXT_I915 */
} u;
};