diff mbox series

[v6,05/11] virtio-gpu: Introduce virgl_gpu_resource structure

Message ID 20231219075320.165227-6-ray.huang@amd.com (mailing list archive)
State New, archived
Headers show
Series Support blob memory and venus on qemu | expand

Commit Message

Huang Rui Dec. 19, 2023, 7:53 a.m. UTC
Introduce a new virgl_gpu_resource data structure and helper functions
for virgl. It's used to add new member which is specific for virgl in
following patches of blob memory support.

Signed-off-by: Huang Rui <ray.huang@amd.com>
---

New patch:
- Introduce new struct virgl_gpu_resource to store virgl specific members.
- Move resource initialization from path "virtio-gpu: Resource UUID" here.
- Remove error handling of g_new0, because glib will abort() on OOM.
- Set iov and iov_cnt in struct virtio_gpu_simple_resource for all types
  of resources.

 hw/display/virtio-gpu-virgl.c | 84 ++++++++++++++++++++++++++---------
 1 file changed, 64 insertions(+), 20 deletions(-)

Comments

Akihiko Odaki Dec. 19, 2023, 12:35 p.m. UTC | #1
On 2023/12/19 16:53, Huang Rui wrote:
> Introduce a new virgl_gpu_resource data structure and helper functions
> for virgl. It's used to add new member which is specific for virgl in
> following patches of blob memory support.

The name is ambigious. It should tell that it's specific for virgl.
Huang Rui Dec. 19, 2023, 1:27 p.m. UTC | #2
On Tue, Dec 19, 2023 at 08:35:27PM +0800, Akihiko Odaki wrote:
> On 2023/12/19 16:53, Huang Rui wrote:
> > Introduce a new virgl_gpu_resource data structure and helper functions
> > for virgl. It's used to add new member which is specific for virgl in
> > following patches of blob memory support.
> 
> The name is ambigious. It should tell that it's specific for virgl.

How about "virgl_resource" which inherits virtio_gpu_simple_resource? But
this name is exactly same with the name in virglrenderer.

Thanks,
Ray
Akihiko Odaki Dec. 21, 2023, 5:43 a.m. UTC | #3
On 2023/12/19 22:27, Huang Rui wrote:
> On Tue, Dec 19, 2023 at 08:35:27PM +0800, Akihiko Odaki wrote:
>> On 2023/12/19 16:53, Huang Rui wrote:
>>> Introduce a new virgl_gpu_resource data structure and helper functions
>>> for virgl. It's used to add new member which is specific for virgl in
>>> following patches of blob memory support.
>>
>> The name is ambigious. It should tell that it's specific for virgl.
> 
> How about "virgl_resource" which inherits virtio_gpu_simple_resource? But
> this name is exactly same with the name in virglrenderer.

You can prefix it with virtio_gpu_virgl as virtio_gpu_virgl_init() and 
other functions do.

Regards,
Akihiko Odaki
Marc-André Lureau Jan. 2, 2024, 11:52 a.m. UTC | #4
Hi

On Tue, Dec 19, 2023 at 11:55 AM Huang Rui <ray.huang@amd.com> wrote:
>
> Introduce a new virgl_gpu_resource data structure and helper functions
> for virgl. It's used to add new member which is specific for virgl in
> following patches of blob memory support.
>
> Signed-off-by: Huang Rui <ray.huang@amd.com>
> ---
>
> New patch:
> - Introduce new struct virgl_gpu_resource to store virgl specific members.
> - Move resource initialization from path "virtio-gpu: Resource UUID" here.
> - Remove error handling of g_new0, because glib will abort() on OOM.
> - Set iov and iov_cnt in struct virtio_gpu_simple_resource for all types
>   of resources.
>
>  hw/display/virtio-gpu-virgl.c | 84 ++++++++++++++++++++++++++---------
>  1 file changed, 64 insertions(+), 20 deletions(-)
>
> diff --git a/hw/display/virtio-gpu-virgl.c b/hw/display/virtio-gpu-virgl.c
> index 5bbc8071b2..faab374336 100644
> --- a/hw/display/virtio-gpu-virgl.c
> +++ b/hw/display/virtio-gpu-virgl.c
> @@ -22,6 +22,23 @@
>
>  #include <virglrenderer.h>
>
> +struct virgl_gpu_resource {
> +    struct virtio_gpu_simple_resource res;
> +};
> +
> +static struct virgl_gpu_resource *
> +virgl_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id)
> +{
> +    struct virtio_gpu_simple_resource *res;
> +
> +    res = virtio_gpu_find_resource(g, resource_id);
> +    if (!res) {
> +        return NULL;
> +    }
> +
> +    return container_of(res, struct virgl_gpu_resource, res);
> +}
> +
>  #if VIRGL_RENDERER_CALLBACKS_VERSION >= 4
>  static void *
>  virgl_get_egl_display(G_GNUC_UNUSED void *cookie)
> @@ -35,11 +52,19 @@ static void virgl_cmd_create_resource_2d(VirtIOGPU *g,
>  {
>      struct virtio_gpu_resource_create_2d c2d;
>      struct virgl_renderer_resource_create_args args;
> +    struct virgl_gpu_resource *vres;
>
>      VIRTIO_GPU_FILL_CMD(c2d);
>      trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
>                                         c2d.width, c2d.height);
>

It should check the resource doesn't already exist (similar to 2d code)

> +    vres = g_new0(struct virgl_gpu_resource, 1);
> +    vres->res.width = c2d.width;
> +    vres->res.height = c2d.height;
> +    vres->res.format = c2d.format;
> +    vres->res.resource_id = c2d.resource_id;
> +    QTAILQ_INSERT_HEAD(&g->reslist, &vres->res, next);
> +
>      args.handle = c2d.resource_id;
>      args.target = 2;
>      args.format = c2d.format;
> @@ -59,11 +84,19 @@ static void virgl_cmd_create_resource_3d(VirtIOGPU *g,
>  {
>      struct virtio_gpu_resource_create_3d c3d;
>      struct virgl_renderer_resource_create_args args;
> +    struct virgl_gpu_resource *vres;
>
>      VIRTIO_GPU_FILL_CMD(c3d);
>      trace_virtio_gpu_cmd_res_create_3d(c3d.resource_id, c3d.format,
>                                         c3d.width, c3d.height, c3d.depth);
>

same

> +    vres = g_new0(struct virgl_gpu_resource, 1);
> +    vres->res.width = c3d.width;
> +    vres->res.height = c3d.height;
> +    vres->res.format = c3d.format;
> +    vres->res.resource_id = c3d.resource_id;
> +    QTAILQ_INSERT_HEAD(&g->reslist, &vres->res, next);
> +
>      args.handle = c3d.resource_id;
>      args.target = c3d.target;
>      args.format = c3d.format;
> @@ -82,19 +115,23 @@ static void virgl_cmd_resource_unref(VirtIOGPU *g,
>                                       struct virtio_gpu_ctrl_command *cmd)
>  {
>      struct virtio_gpu_resource_unref unref;
> -    struct iovec *res_iovs = NULL;
> -    int num_iovs = 0;
> +    struct virgl_gpu_resource *vres;
>
>      VIRTIO_GPU_FILL_CMD(unref);
>      trace_virtio_gpu_cmd_res_unref(unref.resource_id);
>
> -    virgl_renderer_resource_detach_iov(unref.resource_id,
> -                                       &res_iovs,
> -                                       &num_iovs);
> -    if (res_iovs != NULL && num_iovs != 0) {
> -        virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs);
> +    vres = virgl_gpu_find_resource(g, unref.resource_id);
> +    if (!vres) {
> +        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
> +        return;
>      }
> +
> +    virgl_renderer_resource_detach_iov(unref.resource_id, NULL, NULL);
>      virgl_renderer_resource_unref(unref.resource_id);
> +
> +    QTAILQ_REMOVE(&g->reslist, &vres->res, next);
> +    virtio_gpu_cleanup_mapping(g, &vres->res);
> +    g_free(vres);
>  }
>
>  static void virgl_cmd_context_create(VirtIOGPU *g,
> @@ -310,44 +347,51 @@ static void virgl_resource_attach_backing(VirtIOGPU *g,
>                                            struct virtio_gpu_ctrl_command *cmd)
>  {
>      struct virtio_gpu_resource_attach_backing att_rb;
> -    struct iovec *res_iovs;
> -    uint32_t res_niov;
> +    struct virgl_gpu_resource *vres;
>      int ret;
>
>      VIRTIO_GPU_FILL_CMD(att_rb);
>      trace_virtio_gpu_cmd_res_back_attach(att_rb.resource_id);
>
> +    vres = virgl_gpu_find_resource(g, att_rb.resource_id);
> +    if (!vres) {
> +        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
> +        return;
> +    }
> +
>      ret = virtio_gpu_create_mapping_iov(g, att_rb.nr_entries, sizeof(att_rb),
> -                                        cmd, NULL, &res_iovs, &res_niov);
> +                                        cmd, NULL, &vres->res.iov,
> +                                        &vres->res.iov_cnt);
>      if (ret != 0) {
>          cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
>          return;
>      }
>
>      ret = virgl_renderer_resource_attach_iov(att_rb.resource_id,
> -                                             res_iovs, res_niov);
> +                                             vres->res.iov, vres->res.iov_cnt);
>
> -    if (ret != 0)
> -        virtio_gpu_cleanup_mapping_iov(g, res_iovs, res_niov);
> +    if (ret != 0) {
> +        virtio_gpu_cleanup_mapping(g, &vres->res);
> +    }
>  }
>
>  static void virgl_resource_detach_backing(VirtIOGPU *g,
>                                            struct virtio_gpu_ctrl_command *cmd)
>  {
>      struct virtio_gpu_resource_detach_backing detach_rb;
> -    struct iovec *res_iovs = NULL;
> -    int num_iovs = 0;
> +    struct virgl_gpu_resource *vres;
>
>      VIRTIO_GPU_FILL_CMD(detach_rb);
>      trace_virtio_gpu_cmd_res_back_detach(detach_rb.resource_id);
>
> -    virgl_renderer_resource_detach_iov(detach_rb.resource_id,
> -                                       &res_iovs,
> -                                       &num_iovs);
> -    if (res_iovs == NULL || num_iovs == 0) {
> +    vres = virgl_gpu_find_resource(g, detach_rb.resource_id);
> +    if (!vres) {
> +        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
>          return;
>      }
> -    virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs);
> +
> +    virgl_renderer_resource_detach_iov(detach_rb.resource_id, NULL, NULL);
> +    virtio_gpu_cleanup_mapping(g, &vres->res);
>  }
>
>
> --
> 2.25.1
>
>
Huang Rui Jan. 3, 2024, 8:48 a.m. UTC | #5
On Thu, Dec 21, 2023 at 01:43:37PM +0800, Akihiko Odaki wrote:
> On 2023/12/19 22:27, Huang Rui wrote:
> > On Tue, Dec 19, 2023 at 08:35:27PM +0800, Akihiko Odaki wrote:
> >> On 2023/12/19 16:53, Huang Rui wrote:
> >>> Introduce a new virgl_gpu_resource data structure and helper functions
> >>> for virgl. It's used to add new member which is specific for virgl in
> >>> following patches of blob memory support.
> >>
> >> The name is ambigious. It should tell that it's specific for virgl.
> > 
> > How about "virgl_resource" which inherits virtio_gpu_simple_resource? But
> > this name is exactly same with the name in virglrenderer.
> 
> You can prefix it with virtio_gpu_virgl as virtio_gpu_virgl_init() and 
> other functions do.
> 

Thanks, will update it in V7.

Ray
Huang Rui Jan. 4, 2024, 7:27 a.m. UTC | #6
On Tue, Jan 02, 2024 at 07:52:04PM +0800, Marc-André Lureau wrote:
> Hi
> 
> On Tue, Dec 19, 2023 at 11:55 AM Huang Rui <ray.huang@amd.com> wrote:
> >
> > Introduce a new virgl_gpu_resource data structure and helper functions
> > for virgl. It's used to add new member which is specific for virgl in
> > following patches of blob memory support.
> >
> > Signed-off-by: Huang Rui <ray.huang@amd.com>
> > ---
> >
> > New patch:
> > - Introduce new struct virgl_gpu_resource to store virgl specific members.
> > - Move resource initialization from path "virtio-gpu: Resource UUID" here.
> > - Remove error handling of g_new0, because glib will abort() on OOM.
> > - Set iov and iov_cnt in struct virtio_gpu_simple_resource for all types
> >   of resources.
> >
> >  hw/display/virtio-gpu-virgl.c | 84 ++++++++++++++++++++++++++---------
> >  1 file changed, 64 insertions(+), 20 deletions(-)
> >
> > diff --git a/hw/display/virtio-gpu-virgl.c b/hw/display/virtio-gpu-virgl.c
> > index 5bbc8071b2..faab374336 100644
> > --- a/hw/display/virtio-gpu-virgl.c
> > +++ b/hw/display/virtio-gpu-virgl.c
> > @@ -22,6 +22,23 @@
> >
> >  #include <virglrenderer.h>
> >
> > +struct virgl_gpu_resource {
> > +    struct virtio_gpu_simple_resource res;
> > +};
> > +
> > +static struct virgl_gpu_resource *
> > +virgl_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id)
> > +{
> > +    struct virtio_gpu_simple_resource *res;
> > +
> > +    res = virtio_gpu_find_resource(g, resource_id);
> > +    if (!res) {
> > +        return NULL;
> > +    }
> > +
> > +    return container_of(res, struct virgl_gpu_resource, res);
> > +}
> > +
> >  #if VIRGL_RENDERER_CALLBACKS_VERSION >= 4
> >  static void *
> >  virgl_get_egl_display(G_GNUC_UNUSED void *cookie)
> > @@ -35,11 +52,19 @@ static void virgl_cmd_create_resource_2d(VirtIOGPU *g,
> >  {
> >      struct virtio_gpu_resource_create_2d c2d;
> >      struct virgl_renderer_resource_create_args args;
> > +    struct virgl_gpu_resource *vres;
> >
> >      VIRTIO_GPU_FILL_CMD(c2d);
> >      trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
> >                                         c2d.width, c2d.height);
> >
> 
> It should check the resource doesn't already exist (similar to 2d code)
> 

Will add the resource check here in V7.

Thanks,
Ray

> > +    vres = g_new0(struct virgl_gpu_resource, 1);
> > +    vres->res.width = c2d.width;
> > +    vres->res.height = c2d.height;
> > +    vres->res.format = c2d.format;
> > +    vres->res.resource_id = c2d.resource_id;
> > +    QTAILQ_INSERT_HEAD(&g->reslist, &vres->res, next);
> > +
> >      args.handle = c2d.resource_id;
> >      args.target = 2;
> >      args.format = c2d.format;
> > @@ -59,11 +84,19 @@ static void virgl_cmd_create_resource_3d(VirtIOGPU *g,
> >  {
> >      struct virtio_gpu_resource_create_3d c3d;
> >      struct virgl_renderer_resource_create_args args;
> > +    struct virgl_gpu_resource *vres;
> >
> >      VIRTIO_GPU_FILL_CMD(c3d);
> >      trace_virtio_gpu_cmd_res_create_3d(c3d.resource_id, c3d.format,
> >                                         c3d.width, c3d.height, c3d.depth);
> >
> 
> same
> 
> > +    vres = g_new0(struct virgl_gpu_resource, 1);
> > +    vres->res.width = c3d.width;
> > +    vres->res.height = c3d.height;
> > +    vres->res.format = c3d.format;
> > +    vres->res.resource_id = c3d.resource_id;
> > +    QTAILQ_INSERT_HEAD(&g->reslist, &vres->res, next);
> > +
> >      args.handle = c3d.resource_id;
> >      args.target = c3d.target;
> >      args.format = c3d.format;
> > @@ -82,19 +115,23 @@ static void virgl_cmd_resource_unref(VirtIOGPU *g,
> >                                       struct virtio_gpu_ctrl_command *cmd)
> >  {
> >      struct virtio_gpu_resource_unref unref;
> > -    struct iovec *res_iovs = NULL;
> > -    int num_iovs = 0;
> > +    struct virgl_gpu_resource *vres;
> >
> >      VIRTIO_GPU_FILL_CMD(unref);
> >      trace_virtio_gpu_cmd_res_unref(unref.resource_id);
> >
> > -    virgl_renderer_resource_detach_iov(unref.resource_id,
> > -                                       &res_iovs,
> > -                                       &num_iovs);
> > -    if (res_iovs != NULL && num_iovs != 0) {
> > -        virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs);
> > +    vres = virgl_gpu_find_resource(g, unref.resource_id);
> > +    if (!vres) {
> > +        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
> > +        return;
> >      }
> > +
> > +    virgl_renderer_resource_detach_iov(unref.resource_id, NULL, NULL);
> >      virgl_renderer_resource_unref(unref.resource_id);
> > +
> > +    QTAILQ_REMOVE(&g->reslist, &vres->res, next);
> > +    virtio_gpu_cleanup_mapping(g, &vres->res);
> > +    g_free(vres);
> >  }
> >
> >  static void virgl_cmd_context_create(VirtIOGPU *g,
> > @@ -310,44 +347,51 @@ static void virgl_resource_attach_backing(VirtIOGPU *g,
> >                                            struct virtio_gpu_ctrl_command *cmd)
> >  {
> >      struct virtio_gpu_resource_attach_backing att_rb;
> > -    struct iovec *res_iovs;
> > -    uint32_t res_niov;
> > +    struct virgl_gpu_resource *vres;
> >      int ret;
> >
> >      VIRTIO_GPU_FILL_CMD(att_rb);
> >      trace_virtio_gpu_cmd_res_back_attach(att_rb.resource_id);
> >
> > +    vres = virgl_gpu_find_resource(g, att_rb.resource_id);
> > +    if (!vres) {
> > +        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
> > +        return;
> > +    }
> > +
> >      ret = virtio_gpu_create_mapping_iov(g, att_rb.nr_entries, sizeof(att_rb),
> > -                                        cmd, NULL, &res_iovs, &res_niov);
> > +                                        cmd, NULL, &vres->res.iov,
> > +                                        &vres->res.iov_cnt);
> >      if (ret != 0) {
> >          cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
> >          return;
> >      }
> >
> >      ret = virgl_renderer_resource_attach_iov(att_rb.resource_id,
> > -                                             res_iovs, res_niov);
> > +                                             vres->res.iov, vres->res.iov_cnt);
> >
> > -    if (ret != 0)
> > -        virtio_gpu_cleanup_mapping_iov(g, res_iovs, res_niov);
> > +    if (ret != 0) {
> > +        virtio_gpu_cleanup_mapping(g, &vres->res);
> > +    }
> >  }
> >
> >  static void virgl_resource_detach_backing(VirtIOGPU *g,
> >                                            struct virtio_gpu_ctrl_command *cmd)
> >  {
> >      struct virtio_gpu_resource_detach_backing detach_rb;
> > -    struct iovec *res_iovs = NULL;
> > -    int num_iovs = 0;
> > +    struct virgl_gpu_resource *vres;
> >
> >      VIRTIO_GPU_FILL_CMD(detach_rb);
> >      trace_virtio_gpu_cmd_res_back_detach(detach_rb.resource_id);
> >
> > -    virgl_renderer_resource_detach_iov(detach_rb.resource_id,
> > -                                       &res_iovs,
> > -                                       &num_iovs);
> > -    if (res_iovs == NULL || num_iovs == 0) {
> > +    vres = virgl_gpu_find_resource(g, detach_rb.resource_id);
> > +    if (!vres) {
> > +        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
> >          return;
> >      }
> > -    virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs);
> > +
> > +    virgl_renderer_resource_detach_iov(detach_rb.resource_id, NULL, NULL);
> > +    virtio_gpu_cleanup_mapping(g, &vres->res);
> >  }
> >
> >
> > --
> > 2.25.1
> >
> >
> 
> 
> -- 
> Marc-André Lureau
diff mbox series

Patch

diff --git a/hw/display/virtio-gpu-virgl.c b/hw/display/virtio-gpu-virgl.c
index 5bbc8071b2..faab374336 100644
--- a/hw/display/virtio-gpu-virgl.c
+++ b/hw/display/virtio-gpu-virgl.c
@@ -22,6 +22,23 @@ 
 
 #include <virglrenderer.h>
 
+struct virgl_gpu_resource {
+    struct virtio_gpu_simple_resource res;
+};
+
+static struct virgl_gpu_resource *
+virgl_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id)
+{
+    struct virtio_gpu_simple_resource *res;
+
+    res = virtio_gpu_find_resource(g, resource_id);
+    if (!res) {
+        return NULL;
+    }
+
+    return container_of(res, struct virgl_gpu_resource, res);
+}
+
 #if VIRGL_RENDERER_CALLBACKS_VERSION >= 4
 static void *
 virgl_get_egl_display(G_GNUC_UNUSED void *cookie)
@@ -35,11 +52,19 @@  static void virgl_cmd_create_resource_2d(VirtIOGPU *g,
 {
     struct virtio_gpu_resource_create_2d c2d;
     struct virgl_renderer_resource_create_args args;
+    struct virgl_gpu_resource *vres;
 
     VIRTIO_GPU_FILL_CMD(c2d);
     trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
                                        c2d.width, c2d.height);
 
+    vres = g_new0(struct virgl_gpu_resource, 1);
+    vres->res.width = c2d.width;
+    vres->res.height = c2d.height;
+    vres->res.format = c2d.format;
+    vres->res.resource_id = c2d.resource_id;
+    QTAILQ_INSERT_HEAD(&g->reslist, &vres->res, next);
+
     args.handle = c2d.resource_id;
     args.target = 2;
     args.format = c2d.format;
@@ -59,11 +84,19 @@  static void virgl_cmd_create_resource_3d(VirtIOGPU *g,
 {
     struct virtio_gpu_resource_create_3d c3d;
     struct virgl_renderer_resource_create_args args;
+    struct virgl_gpu_resource *vres;
 
     VIRTIO_GPU_FILL_CMD(c3d);
     trace_virtio_gpu_cmd_res_create_3d(c3d.resource_id, c3d.format,
                                        c3d.width, c3d.height, c3d.depth);
 
+    vres = g_new0(struct virgl_gpu_resource, 1);
+    vres->res.width = c3d.width;
+    vres->res.height = c3d.height;
+    vres->res.format = c3d.format;
+    vres->res.resource_id = c3d.resource_id;
+    QTAILQ_INSERT_HEAD(&g->reslist, &vres->res, next);
+
     args.handle = c3d.resource_id;
     args.target = c3d.target;
     args.format = c3d.format;
@@ -82,19 +115,23 @@  static void virgl_cmd_resource_unref(VirtIOGPU *g,
                                      struct virtio_gpu_ctrl_command *cmd)
 {
     struct virtio_gpu_resource_unref unref;
-    struct iovec *res_iovs = NULL;
-    int num_iovs = 0;
+    struct virgl_gpu_resource *vres;
 
     VIRTIO_GPU_FILL_CMD(unref);
     trace_virtio_gpu_cmd_res_unref(unref.resource_id);
 
-    virgl_renderer_resource_detach_iov(unref.resource_id,
-                                       &res_iovs,
-                                       &num_iovs);
-    if (res_iovs != NULL && num_iovs != 0) {
-        virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs);
+    vres = virgl_gpu_find_resource(g, unref.resource_id);
+    if (!vres) {
+        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+        return;
     }
+
+    virgl_renderer_resource_detach_iov(unref.resource_id, NULL, NULL);
     virgl_renderer_resource_unref(unref.resource_id);
+
+    QTAILQ_REMOVE(&g->reslist, &vres->res, next);
+    virtio_gpu_cleanup_mapping(g, &vres->res);
+    g_free(vres);
 }
 
 static void virgl_cmd_context_create(VirtIOGPU *g,
@@ -310,44 +347,51 @@  static void virgl_resource_attach_backing(VirtIOGPU *g,
                                           struct virtio_gpu_ctrl_command *cmd)
 {
     struct virtio_gpu_resource_attach_backing att_rb;
-    struct iovec *res_iovs;
-    uint32_t res_niov;
+    struct virgl_gpu_resource *vres;
     int ret;
 
     VIRTIO_GPU_FILL_CMD(att_rb);
     trace_virtio_gpu_cmd_res_back_attach(att_rb.resource_id);
 
+    vres = virgl_gpu_find_resource(g, att_rb.resource_id);
+    if (!vres) {
+        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+        return;
+    }
+
     ret = virtio_gpu_create_mapping_iov(g, att_rb.nr_entries, sizeof(att_rb),
-                                        cmd, NULL, &res_iovs, &res_niov);
+                                        cmd, NULL, &vres->res.iov,
+                                        &vres->res.iov_cnt);
     if (ret != 0) {
         cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
         return;
     }
 
     ret = virgl_renderer_resource_attach_iov(att_rb.resource_id,
-                                             res_iovs, res_niov);
+                                             vres->res.iov, vres->res.iov_cnt);
 
-    if (ret != 0)
-        virtio_gpu_cleanup_mapping_iov(g, res_iovs, res_niov);
+    if (ret != 0) {
+        virtio_gpu_cleanup_mapping(g, &vres->res);
+    }
 }
 
 static void virgl_resource_detach_backing(VirtIOGPU *g,
                                           struct virtio_gpu_ctrl_command *cmd)
 {
     struct virtio_gpu_resource_detach_backing detach_rb;
-    struct iovec *res_iovs = NULL;
-    int num_iovs = 0;
+    struct virgl_gpu_resource *vres;
 
     VIRTIO_GPU_FILL_CMD(detach_rb);
     trace_virtio_gpu_cmd_res_back_detach(detach_rb.resource_id);
 
-    virgl_renderer_resource_detach_iov(detach_rb.resource_id,
-                                       &res_iovs,
-                                       &num_iovs);
-    if (res_iovs == NULL || num_iovs == 0) {
+    vres = virgl_gpu_find_resource(g, detach_rb.resource_id);
+    if (!vres) {
+        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
         return;
     }
-    virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs);
+
+    virgl_renderer_resource_detach_iov(detach_rb.resource_id, NULL, NULL);
+    virtio_gpu_cleanup_mapping(g, &vres->res);
 }