diff mbox series

[4/4] drm/vmwgfx: Add basic support for external buffers

Message ID 20240627053452.2908605-5-zack.rusin@broadcom.com (mailing list archive)
State New
Headers show
Series Fix various buffer mapping/import issues | expand

Commit Message

Zack Rusin June 27, 2024, 5:34 a.m. UTC
Make vmwgfx go through the dma-buf interface to map/unmap imported
buffers. The driver used to try to directly manipulate external
buffers, assuming that everything that was coming to it had to live
in cpu accessible memory. While technically true because what's in the
vms is controlled by us, it's semantically completely broken.

Fix importing of external buffers by forwarding all memory access
requests to the importer.

Tested by the vmw_prime basic_vgem test.

Signed-off-by: Zack Rusin <zack.rusin@broadcom.com>
---
 drivers/gpu/drm/vmwgfx/vmwgfx_gem.c | 62 +++++++++++++++++++++++++++--
 1 file changed, 58 insertions(+), 4 deletions(-)

Comments

Martin Krastev June 27, 2024, 1:47 p.m. UTC | #1
On Thu, Jun 27, 2024 at 8:34 AM Zack Rusin <zack.rusin@broadcom.com> wrote:
>
> Make vmwgfx go through the dma-buf interface to map/unmap imported
> buffers. The driver used to try to directly manipulate external
> buffers, assuming that everything that was coming to it had to live
> in cpu accessible memory. While technically true because what's in the
> vms is controlled by us, it's semantically completely broken.
>
> Fix importing of external buffers by forwarding all memory access
> requests to the importer.
>
> Tested by the vmw_prime basic_vgem test.
>
> Signed-off-by: Zack Rusin <zack.rusin@broadcom.com>
> ---
>  drivers/gpu/drm/vmwgfx/vmwgfx_gem.c | 62 +++++++++++++++++++++++++++--
>  1 file changed, 58 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
> index 07185c108218..07567d9519ec 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
> @@ -1,6 +1,7 @@
>  /* SPDX-License-Identifier: GPL-2.0 OR MIT */
>  /*
> - * Copyright 2021-2023 VMware, Inc.
> + * Copyright (c) 2021-2024 Broadcom. All Rights Reserved. The term
> + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
>   *
>   * Permission is hereby granted, free of charge, to any person
>   * obtaining a copy of this software and associated documentation
> @@ -78,6 +79,59 @@ static struct sg_table *vmw_gem_object_get_sg_table(struct drm_gem_object *obj)
>         return drm_prime_pages_to_sg(obj->dev, vmw_tt->dma_ttm.pages, vmw_tt->dma_ttm.num_pages);
>  }
>
> +static int vmw_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
> +{
> +       struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(obj);
> +       int ret;
> +
> +       if (obj->import_attach) {
> +               ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
> +               if (!ret) {
> +                       if (drm_WARN_ON(obj->dev, map->is_iomem)) {
> +                               dma_buf_vunmap(obj->import_attach->dmabuf, map);
> +                               return -EIO;
> +                       }
> +               }
> +       } else {
> +               ret = ttm_bo_vmap(bo, map);
> +       }
> +
> +       return ret;
> +}
> +
> +static void vmw_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
> +{
> +       if (obj->import_attach) {
> +               dma_buf_vunmap(obj->import_attach->dmabuf, map);
> +       } else {
> +               drm_gem_ttm_vunmap(obj, map);
> +       }
> +}
> +
> +static int vmw_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
> +{
> +       int ret;
> +
> +       if (obj->import_attach) {
> +               /* Reset both vm_ops and vm_private_data, so we don't end up with
> +                * vm_ops pointing to our implementation if the dma-buf backend
> +                * doesn't set those fields.
> +                */
> +               vma->vm_private_data = NULL;
> +               vma->vm_ops = NULL;
> +
> +               ret = dma_buf_mmap(obj->dma_buf, vma, 0);
> +
> +               /* Drop the reference drm_gem_mmap_obj() acquired.*/
> +               if (!ret)
> +                       drm_gem_object_put(obj);
> +
> +               return ret;
> +       }
> +
> +       return drm_gem_ttm_mmap(obj, vma);
> +}
> +
>  static const struct vm_operations_struct vmw_vm_ops = {
>         .pfn_mkwrite = vmw_bo_vm_mkwrite,
>         .page_mkwrite = vmw_bo_vm_mkwrite,
> @@ -94,9 +148,9 @@ static const struct drm_gem_object_funcs vmw_gem_object_funcs = {
>         .pin = vmw_gem_object_pin,
>         .unpin = vmw_gem_object_unpin,
>         .get_sg_table = vmw_gem_object_get_sg_table,
> -       .vmap = drm_gem_ttm_vmap,
> -       .vunmap = drm_gem_ttm_vunmap,
> -       .mmap = drm_gem_ttm_mmap,
> +       .vmap = vmw_gem_vmap,
> +       .vunmap = vmw_gem_vunmap,
> +       .mmap = vmw_gem_mmap,
>         .vm_ops = &vmw_vm_ops,
>  };
>
> --
> 2.40.1
>

LGTM!

Reviewed-by: Martin Krastev <martin.krastev@broadcom.com>

Regards,
Martin
diff mbox series

Patch

diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
index 07185c108218..07567d9519ec 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
@@ -1,6 +1,7 @@ 
 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
 /*
- * Copyright 2021-2023 VMware, Inc.
+ * Copyright (c) 2021-2024 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
  *
  * Permission is hereby granted, free of charge, to any person
  * obtaining a copy of this software and associated documentation
@@ -78,6 +79,59 @@  static struct sg_table *vmw_gem_object_get_sg_table(struct drm_gem_object *obj)
 	return drm_prime_pages_to_sg(obj->dev, vmw_tt->dma_ttm.pages, vmw_tt->dma_ttm.num_pages);
 }
 
+static int vmw_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
+{
+	struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(obj);
+	int ret;
+
+	if (obj->import_attach) {
+		ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
+		if (!ret) {
+			if (drm_WARN_ON(obj->dev, map->is_iomem)) {
+				dma_buf_vunmap(obj->import_attach->dmabuf, map);
+				return -EIO;
+			}
+		}
+	} else {
+		ret = ttm_bo_vmap(bo, map);
+	}
+
+	return ret;
+}
+
+static void vmw_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
+{
+	if (obj->import_attach) {
+		dma_buf_vunmap(obj->import_attach->dmabuf, map);
+	} else {
+		drm_gem_ttm_vunmap(obj, map);
+	}
+}
+
+static int vmw_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+	int ret;
+
+	if (obj->import_attach) {
+		/* Reset both vm_ops and vm_private_data, so we don't end up with
+                * vm_ops pointing to our implementation if the dma-buf backend
+                * doesn't set those fields.
+                */
+		vma->vm_private_data = NULL;
+		vma->vm_ops = NULL;
+
+		ret = dma_buf_mmap(obj->dma_buf, vma, 0);
+
+		/* Drop the reference drm_gem_mmap_obj() acquired.*/
+		if (!ret)
+			drm_gem_object_put(obj);
+
+		return ret;
+	}
+
+	return drm_gem_ttm_mmap(obj, vma);
+}
+
 static const struct vm_operations_struct vmw_vm_ops = {
 	.pfn_mkwrite = vmw_bo_vm_mkwrite,
 	.page_mkwrite = vmw_bo_vm_mkwrite,
@@ -94,9 +148,9 @@  static const struct drm_gem_object_funcs vmw_gem_object_funcs = {
 	.pin = vmw_gem_object_pin,
 	.unpin = vmw_gem_object_unpin,
 	.get_sg_table = vmw_gem_object_get_sg_table,
-	.vmap = drm_gem_ttm_vmap,
-	.vunmap = drm_gem_ttm_vunmap,
-	.mmap = drm_gem_ttm_mmap,
+	.vmap = vmw_gem_vmap,
+	.vunmap = vmw_gem_vunmap,
+	.mmap = vmw_gem_mmap,
 	.vm_ops = &vmw_vm_ops,
 };