diff mbox series

[v10,06/11] drm/shmem-helper: Don't use vmap_use_count for dma-bufs

Message ID 20230108210445.3948344-7-dmitry.osipenko@collabora.com (mailing list archive)
State New, archived
Headers show
Series Add generic memory shrinker to VirtIO-GPU and Panfrost DRM drivers | expand

Commit Message

Dmitry Osipenko Jan. 8, 2023, 9:04 p.m. UTC
DMA-buf core has its own refcounting of vmaps, use it instead of drm-shmem
counting. This change prepares drm-shmem for addition of memory shrinker
support where drm-shmem will use a single dma-buf reservation lock for
all operations performed over dma-bufs.

Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
---
 drivers/gpu/drm/drm_gem_shmem_helper.c | 35 +++++++++++++++-----------
 1 file changed, 20 insertions(+), 15 deletions(-)

Comments

Gerd Hoffmann Jan. 26, 2023, 12:17 p.m. UTC | #1
On Mon, Jan 09, 2023 at 12:04:40AM +0300, Dmitry Osipenko wrote:
>  its own refcounting of vmaps, use it instead of drm-shmem
> counting. This change prepares drm-shmem for addition of memory shrinker
> support where drm-shmem will use a single dma-buf reservation lock for
> all operations performed over dma-bufs.

Likewise truncated?

take care,
  Gerd
Dmitry Osipenko Jan. 26, 2023, 12:24 p.m. UTC | #2
On 1/26/23 15:17, Gerd Hoffmann wrote:
> On Mon, Jan 09, 2023 at 12:04:40AM +0300, Dmitry Osipenko wrote:
>>  its own refcounting of vmaps, use it instead of drm-shmem
>> counting. This change prepares drm-shmem for addition of memory shrinker
>> support where drm-shmem will use a single dma-buf reservation lock for
>> all operations performed over dma-bufs.
> 
> Likewise truncated?

Should be the email problem on yours side, please see [1][2] where the
messages are okay.

[1]
https://lore.kernel.org/dri-devel/20230108210445.3948344-7-dmitry.osipenko@collabora.com/
[2] https://patchwork.freedesktop.org/patch/517401/
Gerd Hoffmann Jan. 27, 2023, 8:06 a.m. UTC | #3
On Thu, Jan 26, 2023 at 03:24:30PM +0300, Dmitry Osipenko wrote:
> On 1/26/23 15:17, Gerd Hoffmann wrote:
> > On Mon, Jan 09, 2023 at 12:04:40AM +0300, Dmitry Osipenko wrote:
> >>  its own refcounting of vmaps, use it instead of drm-shmem
> >> counting. This change prepares drm-shmem for addition of memory shrinker
> >> support where drm-shmem will use a single dma-buf reservation lock for
> >> all operations performed over dma-bufs.
> > 
> > Likewise truncated?
> 
> Should be the email problem on yours side, please see [1][2] where the
> messages are okay.

Indeed, scratch the comments then.

take care,
  Gerd
Thomas Zimmermann Feb. 17, 2023, 12:41 p.m. UTC | #4
Hi

Am 08.01.23 um 22:04 schrieb Dmitry Osipenko:
> DMA-buf core has its own refcounting of vmaps, use it instead of drm-shmem
> counting. This change prepares drm-shmem for addition of memory shrinker
> support where drm-shmem will use a single dma-buf reservation lock for
> all operations performed over dma-bufs.
> 
> Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>

Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>

with my comments below considered.

> ---
>   drivers/gpu/drm/drm_gem_shmem_helper.c | 35 +++++++++++++++-----------
>   1 file changed, 20 insertions(+), 15 deletions(-)
> 
> diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
> index 5006f7da7f2d..1392cbd3cc02 100644
> --- a/drivers/gpu/drm/drm_gem_shmem_helper.c
> +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
> @@ -301,24 +301,22 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
>   	struct drm_gem_object *obj = &shmem->base;
>   	int ret = 0;
>   
> -	if (shmem->vmap_use_count++ > 0) {
> -		iosys_map_set_vaddr(map, shmem->vaddr);
> -		return 0;
> -	}
> -
>   	if (obj->import_attach) {
>   		ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
>   		if (!ret) {
>   			if (drm_WARN_ON(obj->dev, map->is_iomem)) {

I'm sure that I added this line at some point. But I'm now wondering why 
we're testing this flag. Everything that uses the mapped buffer should 
by agnostic to is_iomem. IIRC the only reason for this test is is that 
we're setting shmem->vaddr to the returned map->vaddr. Now that the code 
is gone, we can also remove that whole branch.

>   				dma_buf_vunmap(obj->import_attach->dmabuf, map);
> -				ret = -EIO;
> -				goto err_put_pages;
> +				return -EIO;
>   			}
> -			shmem->vaddr = map->vaddr;
>   		}
>   	} else {
>   		pgprot_t prot = PAGE_KERNEL;
>   
> +		if (shmem->vmap_use_count++ > 0) {
> +			iosys_map_set_vaddr(map, shmem->vaddr);
> +			return 0;
> +		}
> +
>   		ret = drm_gem_shmem_get_pages(shmem);
>   		if (ret)
>   			goto err_zero_use;
> @@ -384,15 +382,15 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
>   {
>   	struct drm_gem_object *obj = &shmem->base;
>   
> -	if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
> -		return;
> -
> -	if (--shmem->vmap_use_count > 0)
> -		return;
> -
>   	if (obj->import_attach) {
>   		dma_buf_vunmap(obj->import_attach->dmabuf, map);
>   	} else {
> +		if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
> +			return;
> +
> +		if (--shmem->vmap_use_count > 0)
> +			return;
> +
>   		vunmap(shmem->vaddr);
>   		drm_gem_shmem_put_pages(shmem);
>   	}
> @@ -660,7 +658,14 @@ void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
>   			      struct drm_printer *p, unsigned int indent)
>   {
>   	drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
> -	drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
> +
> +	if (shmem->base.import_attach)
> +		drm_printf_indent(p, indent, "vmap_use_count=%u\n",
> +				  shmem->base.dma_buf->vmapping_counter);

This is not vmap_use_count and the best solution is to add a print_info 
callback to dma_bufs. So Maybe simply ignore imported buffers here.

Best regards
Thomas

> +	else
> +		drm_printf_indent(p, indent, "vmap_use_count=%u\n",
> +				  shmem->vmap_use_count);
> +
>   	drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
>   }
>   EXPORT_SYMBOL(drm_gem_shmem_print_info);
diff mbox series

Patch

diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 5006f7da7f2d..1392cbd3cc02 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -301,24 +301,22 @@  static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
 	struct drm_gem_object *obj = &shmem->base;
 	int ret = 0;
 
-	if (shmem->vmap_use_count++ > 0) {
-		iosys_map_set_vaddr(map, shmem->vaddr);
-		return 0;
-	}
-
 	if (obj->import_attach) {
 		ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
 		if (!ret) {
 			if (drm_WARN_ON(obj->dev, map->is_iomem)) {
 				dma_buf_vunmap(obj->import_attach->dmabuf, map);
-				ret = -EIO;
-				goto err_put_pages;
+				return -EIO;
 			}
-			shmem->vaddr = map->vaddr;
 		}
 	} else {
 		pgprot_t prot = PAGE_KERNEL;
 
+		if (shmem->vmap_use_count++ > 0) {
+			iosys_map_set_vaddr(map, shmem->vaddr);
+			return 0;
+		}
+
 		ret = drm_gem_shmem_get_pages(shmem);
 		if (ret)
 			goto err_zero_use;
@@ -384,15 +382,15 @@  static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
 {
 	struct drm_gem_object *obj = &shmem->base;
 
-	if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
-		return;
-
-	if (--shmem->vmap_use_count > 0)
-		return;
-
 	if (obj->import_attach) {
 		dma_buf_vunmap(obj->import_attach->dmabuf, map);
 	} else {
+		if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
+			return;
+
+		if (--shmem->vmap_use_count > 0)
+			return;
+
 		vunmap(shmem->vaddr);
 		drm_gem_shmem_put_pages(shmem);
 	}
@@ -660,7 +658,14 @@  void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
 			      struct drm_printer *p, unsigned int indent)
 {
 	drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
-	drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
+
+	if (shmem->base.import_attach)
+		drm_printf_indent(p, indent, "vmap_use_count=%u\n",
+				  shmem->base.dma_buf->vmapping_counter);
+	else
+		drm_printf_indent(p, indent, "vmap_use_count=%u\n",
+				  shmem->vmap_use_count);
+
 	drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
 }
 EXPORT_SYMBOL(drm_gem_shmem_print_info);