@@ -42,6 +42,7 @@ virtio_gpu_cmd_get_edid(uint32_t scanout) "scanout %d"
virtio_gpu_cmd_set_scanout(uint32_t id, uint32_t res, uint32_t w, uint32_t h, uint32_t x, uint32_t y) "id %d, res 0x%x, w %d, h %d, x %d, y %d"
virtio_gpu_cmd_res_create_2d(uint32_t res, uint32_t fmt, uint32_t w, uint32_t h) "res 0x%x, fmt 0x%x, w %d, h %d"
virtio_gpu_cmd_res_create_3d(uint32_t res, uint32_t fmt, uint32_t w, uint32_t h, uint32_t d) "res 0x%x, fmt 0x%x, w %d, h %d, d %d"
+virtio_gpu_cmd_res_create_blob(uint32_t res, uint64_t size) "res 0x%x, size %" PRId64
virtio_gpu_cmd_res_unref(uint32_t res) "res 0x%x"
virtio_gpu_cmd_res_back_attach(uint32_t res) "res 0x%x"
virtio_gpu_cmd_res_back_detach(uint32_t res) "res 0x%x"
@@ -72,7 +72,10 @@ static void virtio_gpu_create_udmabuf(struct virtio_gpu_simple_resource *res)
static void virtio_gpu_remap_udmabuf(struct virtio_gpu_simple_resource *res)
{
- res->remapsz = res->width * res->height * 4;
+ if (res->blob_size) {
+ res->remapsz = res->blob_size;
+ }
+
res->remapsz = QEMU_ALIGN_UP(res->remapsz, getpagesize());
res->remapped = mmap(NULL, res->remapsz, PROT_READ,
@@ -151,7 +154,9 @@ void virtio_gpu_init_udmabuf(struct virtio_gpu_simple_resource *res)
pdata = res->remapped;
}
- (void) pdata;
+ if (pdata) {
+ res->blob = pdata;
+ }
}
void virtio_gpu_fini_udmabuf(struct virtio_gpu_simple_resource *res)
@@ -182,7 +182,7 @@ virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id,
}
if (require_backing) {
- if (!res->iov || !res->image) {
+ if (!res->iov || (!res->image && !res->blob)) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: no backing storage %d\n",
caller, resource_id);
if (error) {
@@ -358,6 +358,63 @@ static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
g->hostmem += res->hostmem;
}
+static void virtio_gpu_resource_create_blob(VirtIOGPU *g,
+ struct virtio_gpu_ctrl_command *cmd)
+{
+ struct virtio_gpu_simple_resource *res;
+ struct virtio_gpu_resource_create_blob cblob;
+ int ret;
+
+ VIRTIO_GPU_FILL_CMD(cblob);
+ virtio_gpu_create_blob_bswap(&cblob);
+ trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size);
+
+ if (cblob.resource_id == 0) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
+ __func__);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+
+ res = virtio_gpu_find_resource(g, cblob.resource_id);
+ if (res) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
+ __func__, cblob.resource_id);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+
+ res = g_new0(struct virtio_gpu_simple_resource, 1);
+ res->resource_id = cblob.resource_id;
+ res->blob_size = cblob.size;
+
+ if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_GUEST &&
+ cblob.blob_flags != VIRTIO_GPU_BLOB_FLAG_USE_SHAREABLE) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid memory type\n",
+ __func__);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
+ g_free(res);
+ return;
+ }
+
+ if (res->iov) {
+ cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
+ return;
+ }
+
+ ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob),
+ cmd, &res->addrs, &res->iov);
+ if (ret != 0) {
+ cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
+ return;
+ }
+
+ res->iov_cnt = cblob.nr_entries;
+ virtio_gpu_init_udmabuf(res);
+
+ QTAILQ_INSERT_HEAD(&g->reslist, res, next);
+}
+
static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id)
{
struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id];
@@ -435,7 +492,7 @@ static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g,
res = virtio_gpu_find_check_resource(g, t2d.resource_id, true,
__func__, &cmd->error);
- if (!res) {
+ if (!res || res->blob) {
return;
}
@@ -491,7 +548,7 @@ static void virtio_gpu_resource_flush(VirtIOGPU *g,
res = virtio_gpu_find_check_resource(g, rf.resource_id, false,
__func__, &cmd->error);
- if (!res) {
+ if (!res || res->blob) {
return;
}
@@ -743,6 +800,10 @@ static void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
res->iov_cnt = 0;
g_free(res->addrs);
res->addrs = NULL;
+
+ if (res->blob) {
+ virtio_gpu_fini_udmabuf(res);
+ }
}
static void
@@ -815,6 +876,13 @@ static void virtio_gpu_simple_process_cmd(VirtIOGPU *g,
case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
virtio_gpu_resource_create_2d(g, cmd);
break;
+ case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB:
+ if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) {
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
+ break;
+ }
+ virtio_gpu_resource_create_blob(g, cmd);
+ break;
case VIRTIO_GPU_CMD_RESOURCE_UNREF:
virtio_gpu_resource_unref(g, cmd);
break;
@@ -59,4 +59,13 @@ virtio_gpu_t2d_bswap(struct virtio_gpu_transfer_to_host_2d *t2d)
le32_to_cpus(&t2d->padding);
}
+static inline void
+virtio_gpu_create_blob_bswap(struct virtio_gpu_resource_create_blob *cblob)
+{
+ virtio_gpu_ctrl_hdr_bswap(&cblob->hdr);
+ le32_to_cpus(&cblob->resource_id);
+ le32_to_cpus(&cblob->blob_flags);
+ le64_to_cpus(&cblob->size);
+}
+
#endif
@@ -48,6 +48,8 @@ struct virtio_gpu_simple_resource {
pixman_image_t *image;
uint64_t hostmem;
+ uint64_t blob_size;
+ void *blob;
int dmabuf_fd;
size_t remapsz;
uint8_t *remapped;
This API allows Qemu to register the blob allocated by the Guest as a new resource and map its backing storage. Based-on-patch-by: Gerd Hoffmann <kraxel@redhat.com> Signed-off-by: Vivek Kasireddy <vivek.kasireddy@intel.com> --- hw/display/trace-events | 1 + hw/display/virtio-gpu-udmabuf.c | 9 +++- hw/display/virtio-gpu.c | 74 ++++++++++++++++++++++++++-- include/hw/virtio/virtio-gpu-bswap.h | 9 ++++ include/hw/virtio/virtio-gpu.h | 2 + 5 files changed, 90 insertions(+), 5 deletions(-)