diff mbox series

[v8,2/7] drm/shmem-helper: Don't use vmap_use_count for dma-bufs

Message ID 20221105232719.302619-3-dmitry.osipenko@collabora.com (mailing list archive)
State New, archived
Headers show
Series Add generic memory shrinker to VirtIO-GPU and Panfrost DRM drivers | expand

Commit Message

Dmitry Osipenko Nov. 5, 2022, 11:27 p.m. UTC
DMA-buf core has its own refcounting of vmaps, use it instead of drm-shmem
counting. This change prepares drm-shmem for addition of memory shrinker
support where drm-shmem will use a single dma-buf reservation lock for
all operations performed over dma-bufs.

Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
---
 drivers/gpu/drm/drm_gem_shmem_helper.c | 35 +++++++++++++++-----------
 1 file changed, 20 insertions(+), 15 deletions(-)

Comments

Thomas Zimmermann Nov. 10, 2022, 8:55 a.m. UTC | #1
Hi,

I have a few comments that are not really something for this patch. I'll 
comment it here anyway to have them posted.

Am 06.11.22 um 00:27 schrieb Dmitry Osipenko:
> DMA-buf core has its own refcounting of vmaps, use it instead of drm-shmem
> counting. This change prepares drm-shmem for addition of memory shrinker
> support where drm-shmem will use a single dma-buf reservation lock for
> all operations performed over dma-bufs.
> 
> Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
> ---
>   drivers/gpu/drm/drm_gem_shmem_helper.c | 35 +++++++++++++++-----------
>   1 file changed, 20 insertions(+), 15 deletions(-)
> 
> diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
> index 35138f8a375c..801033b48893 100644
> --- a/drivers/gpu/drm/drm_gem_shmem_helper.c
> +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
> @@ -293,24 +293,22 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
>   	struct drm_gem_object *obj = &shmem->base;
>   	int ret = 0;
>   
> -	if (shmem->vmap_use_count++ > 0) {
> -		iosys_map_set_vaddr(map, shmem->vaddr);
> -		return 0;
> -	}
> -
>   	if (obj->import_attach) {

We have a number of such branches in various memory managers. At some 
point we should think about something like a GEM DMA-BUF object (or at 
least some helpers) that represents an imported buffer. Such a ting 
might be useful to avoid the duplication and branching that's going on 
here and in other functions.

>   		ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
>   		if (!ret) {
>   			if (WARN_ON(map->is_iomem)) {

I think it's OK to drop this test for is_iomem.  It was useful when we 
did not distinguish between I/O and system memory in most of our 
helpers. But that should have been resolved by now and whatever code 
still depends on it should be updated accordingly. All helpers use 
iosys_map functionality to access the memory.


>   				dma_buf_vunmap(obj->import_attach->dmabuf, map);
> -				ret = -EIO;
> -				goto err_put_pages;
> +				return -EIO;
>   			}
> -			shmem->vaddr = map->vaddr;

To resolve the is_iomap issue, we'd have to store vaddr as struct 
iosysm_map value.

>   		}
>   	} else {
>   		pgprot_t prot = PAGE_KERNEL;
>   
> +		if (shmem->vmap_use_count++ > 0) {
> +			iosys_map_set_vaddr(map, shmem->vaddr);
> +			return 0;
> +		}
> +
>   		ret = drm_gem_shmem_get_pages(shmem);
>   		if (ret)
>   			goto err_zero_use;
> @@ -376,15 +374,15 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
>   {
>   	struct drm_gem_object *obj = &shmem->base;
>   
> -	if (WARN_ON_ONCE(!shmem->vmap_use_count))
> -		return;
> -
> -	if (--shmem->vmap_use_count > 0)
> -		return;
> -
>   	if (obj->import_attach) {
>   		dma_buf_vunmap(obj->import_attach->dmabuf, map);
>   	} else {
> +		if (WARN_ON_ONCE(!shmem->vmap_use_count))
> +			return;
> +
> +		if (--shmem->vmap_use_count > 0)
> +			return;
> +
>   		vunmap(shmem->vaddr);
>   		drm_gem_shmem_put_pages(shmem);
>   	}
> @@ -646,7 +644,14 @@ void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
>   			      struct drm_printer *p, unsigned int indent)
>   {
>   	drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
> -	drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
> +
> +	if (shmem->base.import_attach)
> +		drm_printf_indent(p, indent, "vmap_use_count=%u\n",
> +				  shmem->base.dma_buf->vmapping_counter);
> +	else
> +		drm_printf_indent(p, indent, "vmap_use_count=%u\n",
> +				  shmem->vmap_use_count);
> +

Here's another case where a GEM DMA-BUF object might become helpful.

Best regards
Thomas

>   	drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
>   }
>   EXPORT_SYMBOL(drm_gem_shmem_print_info);
diff mbox series

Patch

diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 35138f8a375c..801033b48893 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -293,24 +293,22 @@  static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
 	struct drm_gem_object *obj = &shmem->base;
 	int ret = 0;
 
-	if (shmem->vmap_use_count++ > 0) {
-		iosys_map_set_vaddr(map, shmem->vaddr);
-		return 0;
-	}
-
 	if (obj->import_attach) {
 		ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
 		if (!ret) {
 			if (WARN_ON(map->is_iomem)) {
 				dma_buf_vunmap(obj->import_attach->dmabuf, map);
-				ret = -EIO;
-				goto err_put_pages;
+				return -EIO;
 			}
-			shmem->vaddr = map->vaddr;
 		}
 	} else {
 		pgprot_t prot = PAGE_KERNEL;
 
+		if (shmem->vmap_use_count++ > 0) {
+			iosys_map_set_vaddr(map, shmem->vaddr);
+			return 0;
+		}
+
 		ret = drm_gem_shmem_get_pages(shmem);
 		if (ret)
 			goto err_zero_use;
@@ -376,15 +374,15 @@  static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
 {
 	struct drm_gem_object *obj = &shmem->base;
 
-	if (WARN_ON_ONCE(!shmem->vmap_use_count))
-		return;
-
-	if (--shmem->vmap_use_count > 0)
-		return;
-
 	if (obj->import_attach) {
 		dma_buf_vunmap(obj->import_attach->dmabuf, map);
 	} else {
+		if (WARN_ON_ONCE(!shmem->vmap_use_count))
+			return;
+
+		if (--shmem->vmap_use_count > 0)
+			return;
+
 		vunmap(shmem->vaddr);
 		drm_gem_shmem_put_pages(shmem);
 	}
@@ -646,7 +644,14 @@  void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
 			      struct drm_printer *p, unsigned int indent)
 {
 	drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
-	drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
+
+	if (shmem->base.import_attach)
+		drm_printf_indent(p, indent, "vmap_use_count=%u\n",
+				  shmem->base.dma_buf->vmapping_counter);
+	else
+		drm_printf_indent(p, indent, "vmap_use_count=%u\n",
+				  shmem->vmap_use_count);
+
 	drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
 }
 EXPORT_SYMBOL(drm_gem_shmem_print_info);