diff mbox

[RFC,14/14] Add virtio-gpu vhost-user backend

Message ID 1465076003-26291-15-git-send-email-marcandre.lureau@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Marc-André Lureau June 4, 2016, 9:33 p.m. UTC
From: Marc-André Lureau <marcandre.lureau@redhat.com>

Add to virtio-gpu devices a "vhost-user" property. When set, the
associated vhost-user backend is used to handle the virtio rings.

For now, a socketpair is created for the backend to share the rendering
results with qemu via a simple VHOST_GPU protocol.

Example usage:
-object vhost-user-backend,id=vug,cmd="./vhost-user-gpu"
-device virtio-vga,virgl=true,vhost-user=vug

Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
---
 hw/display/Makefile.objs       |   2 +-
 hw/display/vhost-gpu.c         | 264 +++++++++++++++++++++++++++++++++++++++++
 hw/display/virtio-gpu-pci.c    |   6 +
 hw/display/virtio-gpu.c        |  75 +++++++++++-
 hw/display/virtio-vga.c        |   5 +
 include/hw/virtio/virtio-gpu.h |   7 ++
 6 files changed, 356 insertions(+), 3 deletions(-)
 create mode 100644 hw/display/vhost-gpu.c

Comments

Gerd Hoffmann June 6, 2016, 6:54 a.m. UTC | #1
On Sa, 2016-06-04 at 23:33 +0200, marcandre.lureau@redhat.com wrote:
> From: Marc-André Lureau <marcandre.lureau@redhat.com>
> 
> Add to virtio-gpu devices a "vhost-user" property. When set, the
> associated vhost-user backend is used to handle the virtio rings.
> 
> For now, a socketpair is created for the backend to share the rendering
> results with qemu via a simple VHOST_GPU protocol.

Can you give a design overview?

On a first look this seems to not share much code with virtio-gpu
either, so I guess it makes sense to put this into a separate
virtio-gpu-vhost device too.

cheers,
  Gerd
diff mbox

Patch

diff --git a/hw/display/Makefile.objs b/hw/display/Makefile.objs
index d99780e..f889730 100644
--- a/hw/display/Makefile.objs
+++ b/hw/display/Makefile.objs
@@ -36,7 +36,7 @@  obj-$(CONFIG_VGA) += vga.o
 
 common-obj-$(CONFIG_QXL) += qxl.o qxl-logger.o qxl-render.o
 
-obj-$(CONFIG_VIRTIO) += virtio-gpu.o virtio-gpu-3d.o
+obj-$(CONFIG_VIRTIO) += virtio-gpu.o virtio-gpu-3d.o vhost-gpu.o
 obj-$(CONFIG_VIRTIO_PCI) += virtio-gpu-pci.o
 obj-$(CONFIG_VIRTIO_VGA) += virtio-vga.o
 virtio-gpu.o-cflags := $(VIRGL_CFLAGS)
diff --git a/hw/display/vhost-gpu.c b/hw/display/vhost-gpu.c
new file mode 100644
index 0000000..9dc8b13
--- /dev/null
+++ b/hw/display/vhost-gpu.c
@@ -0,0 +1,264 @@ 
+/*
+ * Virtio vhost GPU Device
+ *
+ * Copyright Red Hat, Inc. 2016
+ *
+ * Authors:
+ *     Marc-André Lureau <marcandre.lureau@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/virtio/virtio-gpu.h"
+#include "sysemu/char.h"
+
+typedef enum VhostGpuRequest {
+    VHOST_GPU_NONE = 0,
+    VHOST_GPU_CURSOR_POS,
+    VHOST_GPU_CURSOR_POS_HIDE,
+    VHOST_GPU_CURSOR_UPDATE,
+    VHOST_GPU_SCANOUT,
+    VHOST_GPU_UPDATE,
+    VHOST_GPU_GL_SCANOUT,
+    VHOST_GPU_GL_UPDATE,
+} VhostGpuRequest;
+
+typedef struct VhostGpuCursorPos {
+    uint32_t scanout_id;
+    uint32_t x;
+    uint32_t y;
+} VhostGpuCursorPos;
+
+typedef struct VhostGpuCursorUpdate {
+    VhostGpuCursorPos pos;
+    uint32_t hot_x;
+    uint32_t hot_y;
+    uint32_t data[64 * 64];
+} VhostGpuCursorUpdate;
+
+typedef struct VhostGpuScanout {
+    uint32_t scanout_id;
+    uint32_t width;
+    uint32_t height;
+} VhostGpuScanout;
+
+typedef struct VhostGpuGlScanout {
+    uint32_t scanout_id;
+    uint32_t x;
+    uint32_t y;
+    uint32_t width;
+    uint32_t height;
+    uint32_t fd_width;
+    uint32_t fd_height;
+    uint32_t fd_stride;
+    uint32_t fd_flags;
+    int fd_drm_fourcc;
+} VhostGpuGlScanout;
+
+typedef struct VhostGpuUpdate {
+    uint32_t scanout_id;
+    uint32_t x;
+    uint32_t y;
+    uint32_t width;
+    uint32_t height;
+    uint8_t data[];
+} VhostGpuUpdate;
+
+typedef struct VhostGpuMsg {
+    VhostGpuRequest request;
+    uint32_t size; /* the following payload size */
+    union {
+        VhostGpuCursorPos cursor_pos;
+        VhostGpuCursorUpdate cursor_update;
+        VhostGpuScanout scanout;
+        VhostGpuUpdate update;
+        VhostGpuGlScanout gl_scanout;
+    } payload;
+} QEMU_PACKED VhostGpuMsg;
+
+static VhostGpuMsg m __attribute__ ((unused));
+#define VHOST_GPU_HDR_SIZE (sizeof(m.request) + sizeof(m.size))
+
+static void vhost_gpu_handle_cursor(VirtIOGPU *g, VhostGpuMsg *msg)
+{
+    VhostGpuCursorPos *pos = &msg->payload.cursor_pos;
+    struct virtio_gpu_scanout *s;
+
+    if (pos->scanout_id >= g->conf.max_outputs) {
+        return;
+    }
+    s = &g->scanout[pos->scanout_id];
+
+    if (msg->request == VHOST_GPU_CURSOR_UPDATE) {
+        VhostGpuCursorUpdate *up = &msg->payload.cursor_update;
+        if (!s->current_cursor) {
+            s->current_cursor = cursor_alloc(64, 64);
+        }
+
+        s->current_cursor->hot_x = up->hot_x;
+        s->current_cursor->hot_y = up->hot_y;
+
+        memcpy(s->current_cursor->data, up->data,
+               64 * 64 * sizeof(uint32_t));
+
+        dpy_cursor_define(s->con, s->current_cursor);
+    }
+
+    dpy_mouse_set(s->con, pos->x, pos->y,
+                  msg->request != VHOST_GPU_CURSOR_POS_HIDE);
+}
+
+static void vhost_gpu_handle_display(VirtIOGPU *g, VhostGpuMsg *msg)
+{
+    struct virtio_gpu_scanout *s;
+
+    switch (msg->request) {
+    case VHOST_GPU_SCANOUT: {
+        VhostGpuScanout *m = &msg->payload.scanout;
+
+        if (m->scanout_id >= g->conf.max_outputs) {
+            return;
+        }
+        s = &g->scanout[m->scanout_id];
+
+        s->ds = qemu_create_displaysurface(m->width, m->height);
+        if (!s->ds) {
+            return;
+        }
+
+        dpy_gfx_replace_surface(s->con, s->ds);
+        break;
+    }
+    case VHOST_GPU_GL_SCANOUT: {
+        VhostGpuGlScanout *m = &msg->payload.gl_scanout;
+        int fd = qemu_chr_fe_get_msgfd(g->vhost_chr);
+
+        if (m->scanout_id >= g->conf.max_outputs) {
+            close(fd);
+            break;
+        }
+
+        g->enable = 1;
+        dpy_gl_scanout2(g->scanout[m->scanout_id].con, fd,
+                        m->fd_flags & 1 /* FIXME: Y_0_TOP */,
+                        m->x, m->y, m->width, m->height,
+                        m->fd_width, m->fd_height, m->fd_stride,
+                        m->fd_drm_fourcc);
+        break;
+    }
+    case VHOST_GPU_GL_UPDATE: {
+        VhostGpuUpdate *m = &msg->payload.update;
+
+        if (m->scanout_id >= g->conf.max_outputs ||
+            !g->scanout[m->scanout_id].con) {
+            break;
+        }
+
+        dpy_gl_update(g->scanout[m->scanout_id].con,
+                      m->x, m->y, m->width, m->height);
+        break;
+    }
+    case VHOST_GPU_UPDATE: {
+        VhostGpuUpdate *m = &msg->payload.update;
+
+        if (m->scanout_id >= g->conf.max_outputs) {
+            break;
+        }
+        s = &g->scanout[m->scanout_id];
+
+        pixman_image_t *image =
+            pixman_image_create_bits(PIXMAN_x8r8g8b8,
+                                     m->width,
+                                     m->height,
+                                     (uint32_t *)m->data,
+                                     m->width * 4);
+
+        pixman_image_composite(PIXMAN_OP_SRC,
+                               image, NULL, s->ds->image,
+                               0, 0, 0, 0, m->x, m->y, m->width, m->height);
+
+        pixman_image_unref(image);
+        dpy_gfx_update(s->con, m->x, m->y, m->width, m->height);
+        break;
+    }
+    default:
+        g_warn_if_reached();
+    }
+}
+
+static void vhost_gpu_chr_read(void *opaque)
+{
+    VirtIOGPU *g = opaque;
+    VhostGpuMsg *msg = NULL;
+    VhostGpuRequest request;
+    uint32_t size;
+    int r;
+
+    r = qemu_chr_fe_read_all(g->vhost_chr,
+                             (uint8_t *)&request, sizeof(uint32_t));
+    if (r != sizeof(uint32_t)) {
+        error_report("failed to read msg header");
+        goto end;
+    }
+
+    r = qemu_chr_fe_read_all(g->vhost_chr,
+                             (uint8_t *)&size, sizeof(uint32_t));
+    if (r != sizeof(uint32_t)) {
+        error_report("failed to read msg size");
+        goto end;
+    }
+
+    msg = g_malloc(VHOST_GPU_HDR_SIZE + size);
+    g_return_if_fail(msg != NULL);
+
+    r = qemu_chr_fe_read_all(g->vhost_chr,
+                             (uint8_t *)&msg->payload, size);
+    if (r != size) {
+        error_report("failed to read msg payload %d != %d", r, size);
+        goto end;
+    }
+
+    msg->request = request;
+    msg->size = size;
+
+    if (request == VHOST_GPU_CURSOR_UPDATE ||
+        request == VHOST_GPU_CURSOR_POS ||
+        request == VHOST_GPU_CURSOR_POS_HIDE) {
+        vhost_gpu_handle_cursor(g, msg);
+    } else {
+        vhost_gpu_handle_display(g, msg);
+    }
+
+end:
+    g_free(msg);
+}
+
+int vhost_gpu_init(VirtIOGPU *g, Error **errp)
+{
+    VirtIODevice *vdev = VIRTIO_DEVICE(g);
+    int sv[2];
+
+    if (vhost_user_backend_dev_init(g->vhost, vdev, 2, errp) < 0) {
+        return -1;
+    }
+
+    if (socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) {
+        error_setg_errno(errp, errno, "socketpair() failed");
+        return -1;
+    }
+
+    g->vhost_chr = qemu_chr_open_socket(sv[0], errp);
+    if (!g->vhost_chr) {
+        return -1;
+    }
+
+    qemu_set_fd_handler(sv[0], vhost_gpu_chr_read, NULL, g);
+
+    vhost_user_gpu_set_socket(&g->vhost->dev, sv[1]);
+
+    close(sv[1]);
+
+    return 0;
+}
diff --git a/hw/display/virtio-gpu-pci.c b/hw/display/virtio-gpu-pci.c
index a71b230..2331d87 100644
--- a/hw/display/virtio-gpu-pci.c
+++ b/hw/display/virtio-gpu-pci.c
@@ -16,6 +16,7 @@ 
 #include "hw/virtio/virtio-bus.h"
 #include "hw/virtio/virtio-pci.h"
 #include "hw/virtio/virtio-gpu.h"
+#include "qapi/error.h"
 
 static Property virtio_gpu_pci_properties[] = {
     DEFINE_VIRTIO_GPU_PCI_PROPERTIES(VirtIOPCIProxy),
@@ -60,6 +61,11 @@  static void virtio_gpu_initfn(Object *obj)
 
     virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
                                 TYPE_VIRTIO_GPU);
+
+    /* could eventually be included in qdev_alias_all_properties? */
+    object_property_add_alias(obj, "vhost-user",
+                              OBJECT(&dev->vdev), "vhost-user",
+                              &error_abort);
 }
 
 static const TypeInfo virtio_gpu_pci_info = {
diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c
index f3b0f14..b92f493 100644
--- a/hw/display/virtio-gpu.c
+++ b/hw/display/virtio-gpu.c
@@ -21,6 +21,8 @@ 
 #include "hw/virtio/virtio-bus.h"
 #include "qemu/log.h"
 #include "qapi/error.h"
+#include "sysemu/char.h"
+#include "qemu/error-report.h"
 
 static struct virtio_gpu_simple_resource*
 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id);
@@ -905,7 +907,12 @@  static void virtio_gpu_gl_block(void *opaque, bool block)
 
     g->renderer_blocked = block;
     if (!block) {
-        virtio_gpu_process_cmdq(g);
+        if (g->vhost_chr) {
+            uint32_t ok;
+            qemu_chr_fe_write(g->vhost_chr, (uint8_t *)&ok, sizeof(ok));
+        } else {
+            virtio_gpu_process_cmdq(g);
+        }
     }
 }
 
@@ -962,6 +969,10 @@  static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
         g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb);
     }
 
+    if (g->vhost && vhost_gpu_init(g, errp) < 0) {
+        return;
+    }
+
     g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g);
     g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g);
     QTAILQ_INIT(&g->reslist);
@@ -982,8 +993,27 @@  static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
     vmstate_register(qdev, -1, &vmstate_virtio_gpu_unmigratable, g);
 }
 
+static void virtio_gpu_host_user_is_busy(Object *obj, const char *name,
+                                         Object *val, Error **errp)
+{
+    VirtIOGPU *g = VIRTIO_GPU(obj);
+
+    if (g->vhost) {
+        error_setg(errp, "can't use already busy vhost-user");
+    } else {
+        qdev_prop_allow_set_link_before_realize(obj, name, val, errp);
+    }
+}
+
 static void virtio_gpu_instance_init(Object *obj)
 {
+    VirtIOGPU *g = VIRTIO_GPU(obj);
+
+    object_property_add_link(obj, "vhost-user", TYPE_VHOST_USER_BACKEND,
+                             (Object **)&g->vhost,
+                             virtio_gpu_host_user_is_busy,
+                             OBJ_PROP_LINK_UNREF_ON_RELEASE,
+                             &error_abort);
 }
 
 static void virtio_gpu_reset(VirtIODevice *vdev)
@@ -993,7 +1023,9 @@  static void virtio_gpu_reset(VirtIODevice *vdev)
     int i;
 
     g->enable = 0;
-
+    if (g->vhost) {
+        vhost_user_backend_stop(g->vhost);
+    }
     QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) {
         virtio_gpu_resource_destroy(g, res);
     }
@@ -1026,6 +1058,42 @@  static void virtio_gpu_reset(VirtIODevice *vdev)
 #endif
 }
 
+static void virtio_gpu_set_status(VirtIODevice *vdev, uint8_t val)
+{
+    VirtIOGPU *g = VIRTIO_GPU(vdev);
+
+    if (g->vhost) {
+        if (val & VIRTIO_CONFIG_S_DRIVER_OK) {
+            vhost_user_backend_start(g->vhost);
+        } else {
+            vhost_user_backend_stop(g->vhost);
+        }
+    }
+}
+
+static bool virtio_gpu_guest_notifier_pending(VirtIODevice *vdev, int idx)
+{
+    VirtIOGPU *g = VIRTIO_GPU(vdev);
+
+    if (!g->vhost) {
+        return false;
+    }
+
+    return vhost_virtqueue_pending(&g->vhost->dev, idx);
+}
+
+static void virtio_gpu_guest_notifier_mask(VirtIODevice *vdev, int idx,
+                                           bool mask)
+{
+    VirtIOGPU *g = VIRTIO_GPU(vdev);
+
+    if (!g->vhost) {
+        return;
+    }
+
+    vhost_virtqueue_mask(&g->vhost->dev, vdev, idx, mask);
+}
+
 static Property virtio_gpu_properties[] = {
     DEFINE_PROP_UINT32("max_outputs", VirtIOGPU, conf.max_outputs, 1),
 #ifdef CONFIG_VIRGL
@@ -1047,6 +1115,9 @@  static void virtio_gpu_class_init(ObjectClass *klass, void *data)
     vdc->set_config = virtio_gpu_set_config;
     vdc->get_features = virtio_gpu_get_features;
     vdc->set_features = virtio_gpu_set_features;
+    vdc->set_status   = virtio_gpu_set_status;
+    vdc->guest_notifier_mask = virtio_gpu_guest_notifier_mask;
+    vdc->guest_notifier_pending = virtio_gpu_guest_notifier_pending;
 
     vdc->reset = virtio_gpu_reset;
 
diff --git a/hw/display/virtio-vga.c b/hw/display/virtio-vga.c
index f49f8de..6b233bb 100644
--- a/hw/display/virtio-vga.c
+++ b/hw/display/virtio-vga.c
@@ -181,6 +181,11 @@  static void virtio_vga_inst_initfn(Object *obj)
 
     virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
                                 TYPE_VIRTIO_GPU);
+
+    /* could eventually be included in qdev_alias_all_properties? */
+    object_property_add_alias(obj, "vhost-user",
+                              OBJECT(&dev->vdev), "vhost-user",
+                              &error_abort);
 }
 
 static TypeInfo virtio_vga_info = {
diff --git a/include/hw/virtio/virtio-gpu.h b/include/hw/virtio/virtio-gpu.h
index 0cc8e67..a1e9fe5 100644
--- a/include/hw/virtio/virtio-gpu.h
+++ b/include/hw/virtio/virtio-gpu.h
@@ -19,6 +19,7 @@ 
 #include "ui/console.h"
 #include "hw/virtio/virtio.h"
 #include "hw/pci/pci.h"
+#include "sysemu/vhost-user-backend.h"
 
 #include "standard-headers/linux/virtio_gpu.h"
 #define TYPE_VIRTIO_GPU "virtio-gpu-device"
@@ -82,6 +83,9 @@  struct virtio_gpu_ctrl_command {
 typedef struct VirtIOGPU {
     VirtIODevice parent_obj;
 
+    VhostUserBackend *vhost;
+    CharDriverState *vhost_chr;
+
     QEMUBH *ctrl_bh;
     QEMUBH *cursor_bh;
     VirtQueue *ctrl_vq;
@@ -161,4 +165,7 @@  void virtio_gpu_virgl_fence_poll(VirtIOGPU *g);
 void virtio_gpu_virgl_reset(VirtIOGPU *g);
 int virtio_gpu_virgl_init(VirtIOGPU *g);
 
+/* vhost-gpu.c */
+int vhost_gpu_init(VirtIOGPU *g, Error **errp);
+
 #endif