@@ -169,6 +169,9 @@ static void virtio_gpu_gl_device_unrealize(DeviceState *qdev)
if (gl->renderer_state >= RS_INITED) {
#if VIRGL_VERSION_MAJOR >= 1
qemu_bh_delete(gl->cmdq_resume_bh);
+
+ virtio_gpu_virgl_reset_async_fences(g);
+ qemu_bh_delete(gl->async_fence_bh);
#endif
if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
timer_free(gl->print_stats);
@@ -871,6 +871,7 @@ static void virgl_cmd_set_scanout_blob(VirtIOGPU *g,
void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd)
{
+ VirtIOGPUGL *gl = VIRTIO_GPU_GL(g);
bool cmd_suspended = false;
int ret;
@@ -972,15 +973,29 @@ void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
trace_virtio_gpu_fence_ctrl(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type);
- /*
- * Unlike other virglrenderer functions, this one returns a positive
- * error code.
- */
- ret = virgl_renderer_create_fence(cmd->cmd_hdr.fence_id, 0);
- if (ret) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: virgl_renderer_create_fence error: %s",
- __func__, strerror(ret));
+ if (gl->context_fence_enabled &&
+ (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_INFO_RING_IDX)) {
+ uint32_t flags = 0;
+
+ ret = virgl_renderer_context_create_fence(cmd->cmd_hdr.ctx_id, flags,
+ cmd->cmd_hdr.ring_idx,
+ cmd->cmd_hdr.fence_id);
+ if (ret) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: virgl_renderer_context_create_fence error: %s",
+ __func__, strerror(-ret));
+ }
+ } else {
+ /*
+ * Unlike other virglrenderer functions, this one returns a positive
+ * error code.
+ */
+ ret = virgl_renderer_create_fence(cmd->cmd_hdr.fence_id, 0);
+ if (ret) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: virgl_renderer_create_fence error: %s",
+ __func__, strerror(ret));
+ }
}
}
@@ -1008,6 +1023,102 @@ static void virgl_write_fence(void *opaque, uint32_t fence)
}
}
+void virtio_gpu_virgl_reset_async_fences(VirtIOGPU *g)
+{
+ struct virtio_gpu_virgl_context_fence *f;
+ VirtIOGPUGL *gl = VIRTIO_GPU_GL(g);
+
+ while (!QSLIST_EMPTY(&gl->async_fenceq)) {
+ f = QSLIST_FIRST(&gl->async_fenceq);
+
+ QSLIST_REMOVE_HEAD(&gl->async_fenceq, next);
+
+ g_free(f);
+ }
+}
+
+#if VIRGL_VERSION_MAJOR >= 1
+static void virtio_gpu_virgl_async_fence_bh(void *opaque)
+{
+ QSLIST_HEAD(, virtio_gpu_virgl_context_fence) async_fenceq;
+ struct virtio_gpu_ctrl_command *cmd, *tmp;
+ struct virtio_gpu_virgl_context_fence *f;
+ VirtIOGPU *g = opaque;
+ VirtIOGPUGL *gl = VIRTIO_GPU_GL(g);
+
+ QSLIST_MOVE_ATOMIC(&async_fenceq, &gl->async_fenceq);
+
+ while (!QSLIST_EMPTY(&async_fenceq)) {
+ f = QSLIST_FIRST(&async_fenceq);
+
+ QSLIST_REMOVE_HEAD(&async_fenceq, next);
+
+ QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) {
+ /*
+ * the guest can end up emitting fences out of order
+ * so we should check all fenced cmds not just the first one.
+ */
+ if (cmd->cmd_hdr.fence_id > f->fence_id) {
+ continue;
+ }
+ if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_INFO_RING_IDX) {
+ if (cmd->cmd_hdr.ring_idx != f->ring_idx) {
+ continue;
+ }
+ if (cmd->cmd_hdr.ctx_id != f->ctx_id) {
+ continue;
+ }
+ } else if (f->ring_idx >= 0) {
+ /* ctx0 GL-query fences don't have ring info */
+ continue;
+ }
+ virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
+ QTAILQ_REMOVE(&g->fenceq, cmd, next);
+ g_free(cmd);
+ }
+
+ trace_virtio_gpu_fence_resp(f->fence_id);
+ g_free(f);
+ g->inflight--;
+ if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
+ trace_virtio_gpu_dec_inflight_fences(g->inflight);
+ }
+ }
+}
+
+static void
+virtio_gpu_virgl_push_async_fence(VirtIOGPU *g, uint32_t ctx_id,
+ int64_t ring_idx, uint64_t fence_id)
+{
+ struct virtio_gpu_virgl_context_fence *f;
+ VirtIOGPUGL *gl = VIRTIO_GPU_GL(g);
+
+ f = g_new(struct virtio_gpu_virgl_context_fence, 1);
+ f->ctx_id = ctx_id;
+ f->ring_idx = ring_idx;
+ f->fence_id = fence_id;
+
+ QSLIST_INSERT_HEAD_ATOMIC(&gl->async_fenceq, f, next);
+
+ qemu_bh_schedule(gl->async_fence_bh);
+}
+
+static void virgl_write_async_fence(void *opaque, uint32_t fence)
+{
+ VirtIOGPU *g = opaque;
+
+ virtio_gpu_virgl_push_async_fence(g, 0, -1, fence);
+}
+
+static void virgl_write_async_context_fence(void *opaque, uint32_t ctx_id,
+ uint32_t ring_idx, uint64_t fence)
+{
+ VirtIOGPU *g = opaque;
+
+ virtio_gpu_virgl_push_async_fence(g, ctx_id, ring_idx, fence);
+}
+#endif
+
static virgl_renderer_gl_context
virgl_create_context(void *opaque, int scanout_idx,
struct virgl_renderer_gl_ctx_param *params)
@@ -1095,6 +1206,8 @@ void virtio_gpu_virgl_reset_scanout(VirtIOGPU *g)
dpy_gfx_replace_surface(g->parent_obj.scanout[i].con, NULL);
dpy_gl_scanout_disable(g->parent_obj.scanout[i].con);
}
+
+ virtio_gpu_virgl_reset_async_fences(g);
}
void virtio_gpu_virgl_reset(VirtIOGPU *g)
@@ -1112,6 +1225,13 @@ int virtio_gpu_virgl_init(VirtIOGPU *g)
if (qemu_egl_display) {
virtio_gpu_3d_cbs.version = 4;
virtio_gpu_3d_cbs.get_egl_display = virgl_get_egl_display;
+#if VIRGL_VERSION_MAJOR >= 1
+ virtio_gpu_3d_cbs.write_fence = virgl_write_async_fence;
+ virtio_gpu_3d_cbs.write_context_fence = virgl_write_async_context_fence;
+ flags |= VIRGL_RENDERER_ASYNC_FENCE_CB;
+ flags |= VIRGL_RENDERER_THREAD_SYNC;
+ gl->context_fence_enabled = true;
+#endif
}
#endif
#ifdef VIRGL_RENDERER_D3D11_SHARE_TEXTURE
@@ -1145,6 +1265,10 @@ int virtio_gpu_virgl_init(VirtIOGPU *g)
gl->cmdq_resume_bh = aio_bh_new(qemu_get_aio_context(),
virtio_gpu_virgl_resume_cmdq_bh,
g);
+
+ gl->async_fence_bh = aio_bh_new(qemu_get_aio_context(),
+ virtio_gpu_virgl_async_fence_bh,
+ g);
#endif
return 0;
@@ -230,6 +230,13 @@ struct VirtIOGPUClass {
Error **errp);
};
+struct virtio_gpu_virgl_context_fence {
+ uint32_t ctx_id;
+ int64_t ring_idx;
+ uint64_t fence_id;
+ QSLIST_ENTRY(virtio_gpu_virgl_context_fence) next;
+};
+
/* VirtIOGPUGL renderer states */
typedef enum {
RS_START, /* starting state */
@@ -247,6 +254,11 @@ struct VirtIOGPUGL {
QEMUTimer *print_stats;
QEMUBH *cmdq_resume_bh;
+
+ QEMUBH *async_fence_bh;
+ QSLIST_HEAD(, virtio_gpu_virgl_context_fence) async_fenceq;
+
+ bool context_fence_enabled;
};
struct VhostUserGPU {
@@ -376,5 +388,6 @@ void virtio_gpu_virgl_reset_scanout(VirtIOGPU *g);
void virtio_gpu_virgl_reset(VirtIOGPU *g);
int virtio_gpu_virgl_init(VirtIOGPU *g);
GArray *virtio_gpu_virgl_get_capsets(VirtIOGPU *g);
+void virtio_gpu_virgl_reset_async_fences(VirtIOGPU *g);
#endif