gpu: Request backend channel for frontend communication

Add Backend field to GPU adapters and request BACKEND_REQ and
BACKEND_SEND_FD protocol features. This allows the backend to
communicate with the frontend for operations like shared memory mapping.

Signed-off-by: Matej Hrica <mhrica@redhat.com>
(cherry picked from commit 513534be326f67411d32a3b22b681df748adb7e1)
This commit is contained in:
Matej Hrica 2025-12-09 18:53:58 +01:00 committed by Davíð Steinn Geirsson
parent fbc640a7f3
commit 6b8660d82a
4 changed files with 408 additions and 565 deletions

891
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -21,7 +21,7 @@ use vhost::vhost_user::{
gpu_message::{
VhostUserGpuCursorPos, VhostUserGpuEdidRequest, VhostUserGpuScanout, VhostUserGpuUpdate,
},
GpuBackend,
Backend, GpuBackend,
};
use vhost_user_backend::{VringRwLock, VringT};
use vm_memory::{GuestAddress, GuestMemory, GuestMemoryMmap, VolatileSlice};
@ -96,6 +96,8 @@ thread_local! {
}
pub struct GfxstreamAdapter {
#[allow(dead_code)]
backend: Backend,
gpu_backend: Option<GpuBackend>,
resources: BTreeMap<u32, GfxstreamResource>,
fence_state: Arc<Mutex<FenceState>>,
@ -105,6 +107,7 @@ pub struct GfxstreamAdapter {
impl GfxstreamAdapter {
pub fn new(
queue_ctl: &VringRwLock,
backend: Backend,
gpu_config: &GpuConfig,
gpu_backend: Option<GpuBackend>,
) -> Self {
@ -121,6 +124,7 @@ impl GfxstreamAdapter {
});
Self {
backend,
gpu_backend,
fence_state,
resources: BTreeMap::new(),
@ -769,6 +773,11 @@ mod gfx_fence_tests {
GpuBackend::from_stream(backend)
}
fn dummy_backend() -> Backend {
let (_, backend) = UnixStream::pair().unwrap();
Backend::from_stream(backend)
}
/// Attempts to create a GPU adapter for testing.
/// Returns None if gfxstream initialization fails (e.g., in CI without GPU
/// drivers).
@ -813,6 +822,7 @@ mod gfx_fence_tests {
});
Some(GfxstreamAdapter {
backend: dummy_backend(),
gpu_backend: Some(dummy_gpu_backend()),
resources: BTreeMap::default(),
fence_state,

View file

@ -20,7 +20,7 @@ use vhost::vhost_user::{
VhostUserGpuCursorPos, VhostUserGpuDMABUFScanout, VhostUserGpuDMABUFScanout2,
VhostUserGpuEdidRequest, VhostUserGpuUpdate,
},
GpuBackend,
Backend, GpuBackend,
};
use vhost_user_backend::{VringRwLock, VringT};
use virglrenderer::{
@ -150,6 +150,8 @@ impl FenceHandler for VirglFenceHandler {
pub struct VirglRendererAdapter {
renderer: VirglRenderer,
capsets: GpuCapset,
#[allow(dead_code)]
backend: Backend,
gpu_backend: Option<GpuBackend>,
fence_state: Arc<Mutex<FenceState>>,
resources: BTreeMap<u32, GpuResource>,
@ -159,6 +161,7 @@ pub struct VirglRendererAdapter {
impl VirglRendererAdapter {
pub fn new(
queue_ctl: &VringRwLock,
backend: Backend,
config: &GpuConfig,
gpu_backend: Option<GpuBackend>,
) -> io::Result<Self> {
@ -193,6 +196,7 @@ impl VirglRendererAdapter {
Ok(Self {
capsets,
renderer,
backend,
gpu_backend,
fence_state,
resources: BTreeMap::new(),
@ -727,6 +731,11 @@ mod virgl_cov_tests {
GpuBackend::from_stream(backend)
}
fn dummy_backend() -> Backend {
let (_, backend) = UnixStream::pair().unwrap();
Backend::from_stream(backend)
}
#[test]
fn sglist_to_iovecs_err_on_invalid_slice() {
// Single region: 0x1000..0x2000 (4 KiB)
@ -813,8 +822,9 @@ mod virgl_cov_tests {
let (vring, _outs, _call_evt) =
create_vring(&mem, &[] as &[TestingDescChainArgs], GuestAddress(0x2000), GuestAddress(0x4000), 64);
let backend = dummy_gpu_backend();
let mut gpu = VirglRendererAdapter::new(&vring, &cfg, Some(backend)).unwrap();
let backend = dummy_backend();
let gpu_backend = dummy_gpu_backend();
let mut gpu = VirglRendererAdapter::new(&vring, backend, &cfg, Some(gpu_backend)).unwrap();
gpu.event_poll();
let edid_req = VhostUserGpuEdidRequest {

View file

@ -22,9 +22,10 @@ macro_rules! handle_adapter {
Some(renderer) => renderer,
None => {
// Pass $vrings to the call
let (control_vring, gpu_backend) = $self.extract_backend_and_vring($vrings)?;
let (control_vring, backend, gpu_backend) =
$self.extract_backend_and_vring($vrings)?;
let renderer = $new_adapter(control_vring, gpu_backend)?;
let renderer = $new_adapter(control_vring, backend, gpu_backend)?;
event_poll_fd = renderer.get_event_poll_fd();
maybe_renderer.insert(renderer)
@ -52,7 +53,7 @@ use thiserror::Error as ThisError;
use vhost::vhost_user::{
gpu_message::{VhostUserGpuCursorPos, VhostUserGpuEdidRequest},
message::{VhostUserProtocolFeatures, VhostUserVirtioFeatures},
GpuBackend,
Backend, GpuBackend,
};
use vhost_user_backend::{VhostUserBackend, VringEpollHandler, VringRwLock, VringT};
use virtio_bindings::{
@ -140,6 +141,7 @@ impl From<Error> for io::Error {
struct VhostUserGpuBackendInner {
virtio_cfg: VirtioGpuConfig,
event_idx_enabled: bool,
backend: Option<Backend>,
gpu_backend: Option<GpuBackend>,
exit_consumer: EventConsumer,
exit_notifier: EventNotifier,
@ -173,6 +175,7 @@ impl VhostUserGpuBackend {
num_capsets: Le32::from(gpu_config.capsets().num_capsets()),
},
event_idx_enabled: false,
backend: None,
gpu_backend: None,
exit_consumer,
exit_notifier,
@ -592,17 +595,21 @@ impl VhostUserGpuBackendInner {
fn extract_backend_and_vring<'a>(
&mut self,
vrings: &'a [VringRwLock],
) -> IoResult<(&'a VringRwLock, Option<GpuBackend>)> {
) -> IoResult<(&'a VringRwLock, Backend, Option<GpuBackend>)> {
let control_vring = &vrings[CONTROL_QUEUE as usize];
let backend = self.gpu_backend.take();
let backend = self
.backend
.take()
.ok_or_else(|| io::Error::other("set_backend_req_fd() not called, Backend missing"))?;
let gpu_backend = self.gpu_backend.take();
if !self.gpu_config.flags().headless && backend.is_none() {
if !self.gpu_config.flags().headless && gpu_backend.is_none() {
return Err(io::Error::other(
"set_gpu_socket() not called, GpuBackend missing",
));
}
Ok((control_vring, backend))
Ok((control_vring, backend, gpu_backend))
}
fn lazy_init_and_handle_event(
@ -622,9 +629,10 @@ impl VhostUserGpuBackendInner {
GpuMode::Gfxstream => handle_adapter!(
GfxstreamAdapter,
TLS_GFXSTREAM,
|control_vring, gpu_backend| -> io::Result<GfxstreamAdapter> {
|control_vring, backend, gpu_backend| -> io::Result<GfxstreamAdapter> {
Ok(GfxstreamAdapter::new(
control_vring,
backend,
&self.gpu_config,
gpu_backend,
))
@ -638,8 +646,8 @@ impl VhostUserGpuBackendInner {
GpuMode::VirglRenderer => handle_adapter!(
VirglRendererAdapter,
TLS_VIRGL,
|control_vring, gpu_backend| {
VirglRendererAdapter::new(control_vring, &self.gpu_config, gpu_backend)
|control_vring, backend, gpu_backend| {
VirglRendererAdapter::new(control_vring, backend, &self.gpu_config, gpu_backend)
},
self,
device_event,
@ -649,7 +657,7 @@ impl VhostUserGpuBackendInner {
GpuMode::Null => handle_adapter!(
NullAdapter,
TLS_NULL,
|control_vring, gpu_backend| -> io::Result<NullAdapter> {
|control_vring, _backend, gpu_backend| -> io::Result<NullAdapter> {
Ok(NullAdapter::new(
control_vring,
&self.gpu_config,
@ -713,7 +721,10 @@ impl VhostUserBackend for VhostUserGpuBackend {
fn protocol_features(&self) -> VhostUserProtocolFeatures {
debug!("Protocol features called");
VhostUserProtocolFeatures::CONFIG | VhostUserProtocolFeatures::MQ
VhostUserProtocolFeatures::CONFIG
| VhostUserProtocolFeatures::MQ
| VhostUserProtocolFeatures::BACKEND_REQ
| VhostUserProtocolFeatures::BACKEND_SEND_FD
}
fn set_event_idx(&self, enabled: bool) {
@ -727,6 +738,11 @@ impl VhostUserBackend for VhostUserGpuBackend {
Ok(())
}
fn set_backend_req_fd(&self, backend: Backend) {
trace!("Got set_backend_req_fd");
self.inner.lock().unwrap().backend = Some(backend);
}
fn set_gpu_socket(&self, backend: GpuBackend) -> IoResult<()> {
self.inner.lock().unwrap().gpu_backend = Some(backend);
Ok(())
@ -935,6 +951,11 @@ mod tests {
(frontend, backend)
}
fn dummy_backend_request_socket() -> Backend {
let (_frontend, backend) = UnixStream::pair().unwrap();
Backend::from_stream(backend)
}
#[test]
fn test_process_gpu_command() {
let (_, mem) = init();
@ -1436,7 +1457,10 @@ mod tests {
assert_eq!(backend.features(), 0x0101_7100_001B);
assert_eq!(
backend.protocol_features(),
VhostUserProtocolFeatures::CONFIG | VhostUserProtocolFeatures::MQ
VhostUserProtocolFeatures::CONFIG
| VhostUserProtocolFeatures::MQ
| VhostUserProtocolFeatures::BACKEND_REQ
| VhostUserProtocolFeatures::BACKEND_SEND_FD
);
assert_eq!(backend.queues_per_thread(), vec![0xffff_ffff]);
assert_eq!(backend.get_config(0, 0), Vec::<u8>::new());
@ -1458,7 +1482,7 @@ mod tests {
let vring = VringRwLock::new(mem, 0x1000).unwrap();
vring.set_queue_info(0x100, 0x200, 0x300).unwrap();
vring.set_queue_ready(true);
backend.set_backend_req_fd(dummy_backend_request_socket());
assert_eq!(
backend
.handle_event(0, EventSet::OUT, &[vring.clone()], 0)
@ -1477,6 +1501,7 @@ mod tests {
// Hit the loop part
backend.set_event_idx(true);
backend.set_backend_req_fd(dummy_backend_request_socket());
backend
.handle_event(0, EventSet::IN, &[vring.clone()], 0)
.unwrap();
@ -1579,6 +1604,7 @@ mod tests {
.unwrap();
backend.set_gpu_socket(gpu_backend).unwrap();
backend.set_backend_req_fd(dummy_backend_request_socket());
// Unfortunately, there is no way to create a VringEpollHandler directly (the ::new is not public)
// So we create a daemon to create the epoll handler for us here