diff mbox series

[v16,14/20] drm/shmem-helper: Use refcount_t for vmap_use_count

Message ID 20230903170736.513347-15-dmitry.osipenko@collabora.com (mailing list archive)
State New, archived
Headers show
Series Add generic memory shrinker to VirtIO-GPU and Panfrost DRM drivers | expand

Commit Message

Dmitry Osipenko Sept. 3, 2023, 5:07 p.m. UTC
Use refcount_t helper for vmap_use_count to make refcounting consistent
with pages_use_count and pages_pin_count that use refcount_t. This will
allow to optimize unlocked vmappings by skipping reservation locking if
refcnt > 1 and also makes vmapping to benefit from the refcount_t's
overflow checks.

Suggested-by: Boris Brezillon <boris.brezillon@collabora.com>
Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
---
 drivers/gpu/drm/drm_gem_shmem_helper.c | 28 +++++++++++---------------
 include/drm/drm_gem_shmem_helper.h     |  2 +-
 2 files changed, 13 insertions(+), 17 deletions(-)

Comments

Boris Brezillon Sept. 5, 2023, 7:05 a.m. UTC | #1
On Sun,  3 Sep 2023 20:07:30 +0300
Dmitry Osipenko <dmitry.osipenko@collabora.com> wrote:

> Use refcount_t helper for vmap_use_count to make refcounting consistent
> with pages_use_count and pages_pin_count that use refcount_t. This will
> allow to optimize unlocked vmappings by skipping reservation locking if
> refcnt > 1

nit: this optimization doesn't exist in practice, because the resv
lock is taken by the core, and ->v[un]map() are called with this lock
held.

> and also makes vmapping to benefit from the refcount_t's
> overflow checks.
> 
> Suggested-by: Boris Brezillon <boris.brezillon@collabora.com>
> Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>

I agree with your 2 other points (consistency with other refcounting
primitives and safeness provided by refcount_t) so

Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>

but I'd recommend rephrasing/dropping the part mentioning the lock-free
optimization.

> ---
>  drivers/gpu/drm/drm_gem_shmem_helper.c | 28 +++++++++++---------------
>  include/drm/drm_gem_shmem_helper.h     |  2 +-
>  2 files changed, 13 insertions(+), 17 deletions(-)
> 
> diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
> index 899f655a65bb..4633a418faba 100644
> --- a/drivers/gpu/drm/drm_gem_shmem_helper.c
> +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
> @@ -144,7 +144,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
>  	} else if (!shmem->imported_sgt) {
>  		dma_resv_lock(shmem->base.resv, NULL);
>  
> -		drm_WARN_ON(obj->dev, shmem->vmap_use_count);
> +		drm_WARN_ON(obj->dev, refcount_read(&shmem->vmap_use_count));
>  
>  		if (shmem->sgt) {
>  			dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
> @@ -345,23 +345,25 @@ int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
>  
>  		dma_resv_assert_held(shmem->base.resv);
>  
> -		if (shmem->vmap_use_count++ > 0) {
> +		if (refcount_inc_not_zero(&shmem->vmap_use_count)) {
>  			iosys_map_set_vaddr(map, shmem->vaddr);
>  			return 0;
>  		}
>  
>  		ret = drm_gem_shmem_pin_locked(shmem);
>  		if (ret)
> -			goto err_zero_use;
> +			return ret;
>  
>  		if (shmem->map_wc)
>  			prot = pgprot_writecombine(prot);
>  		shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
>  				    VM_MAP, prot);
> -		if (!shmem->vaddr)
> +		if (!shmem->vaddr) {
>  			ret = -ENOMEM;
> -		else
> +		} else {
>  			iosys_map_set_vaddr(map, shmem->vaddr);
> +			refcount_set(&shmem->vmap_use_count, 1);
> +		}
>  	}
>  
>  	if (ret) {
> @@ -374,8 +376,6 @@ int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
>  err_put_pages:
>  	if (!obj->import_attach)
>  		drm_gem_shmem_unpin_locked(shmem);
> -err_zero_use:
> -	shmem->vmap_use_count = 0;
>  
>  	return ret;
>  }
> @@ -403,14 +403,10 @@ void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
>  	} else {
>  		dma_resv_assert_held(shmem->base.resv);
>  
> -		if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
> -			return;
> -
> -		if (--shmem->vmap_use_count > 0)
> -			return;
> -
> -		vunmap(shmem->vaddr);
> -		drm_gem_shmem_unpin_locked(shmem);
> +		if (refcount_dec_and_test(&shmem->vmap_use_count)) {
> +			vunmap(shmem->vaddr);
> +			drm_gem_shmem_unpin_locked(shmem);
> +		}
>  	}
>  
>  	shmem->vaddr = NULL;
> @@ -656,7 +652,7 @@ void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
>  
>  	drm_printf_indent(p, indent, "pages_pin_count=%u\n", refcount_read(&shmem->pages_pin_count));
>  	drm_printf_indent(p, indent, "pages_use_count=%u\n", refcount_read(&shmem->pages_use_count));
> -	drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
> +	drm_printf_indent(p, indent, "vmap_use_count=%u\n", refcount_read(&shmem->vmap_use_count));
>  	drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
>  }
>  EXPORT_SYMBOL_GPL(drm_gem_shmem_print_info);
> diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
> index 396958a98c34..63e91e8f2d5c 100644
> --- a/include/drm/drm_gem_shmem_helper.h
> +++ b/include/drm/drm_gem_shmem_helper.h
> @@ -81,7 +81,7 @@ struct drm_gem_shmem_object {
>  	 * Reference count on the virtual address.
>  	 * The address are un-mapped when the count reaches zero.
>  	 */
> -	unsigned int vmap_use_count;
> +	refcount_t vmap_use_count;
>  
>  	/**
>  	 * @got_pages_sgt:
diff mbox series

Patch

diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 899f655a65bb..4633a418faba 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -144,7 +144,7 @@  void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
 	} else if (!shmem->imported_sgt) {
 		dma_resv_lock(shmem->base.resv, NULL);
 
-		drm_WARN_ON(obj->dev, shmem->vmap_use_count);
+		drm_WARN_ON(obj->dev, refcount_read(&shmem->vmap_use_count));
 
 		if (shmem->sgt) {
 			dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
@@ -345,23 +345,25 @@  int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
 
 		dma_resv_assert_held(shmem->base.resv);
 
-		if (shmem->vmap_use_count++ > 0) {
+		if (refcount_inc_not_zero(&shmem->vmap_use_count)) {
 			iosys_map_set_vaddr(map, shmem->vaddr);
 			return 0;
 		}
 
 		ret = drm_gem_shmem_pin_locked(shmem);
 		if (ret)
-			goto err_zero_use;
+			return ret;
 
 		if (shmem->map_wc)
 			prot = pgprot_writecombine(prot);
 		shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
 				    VM_MAP, prot);
-		if (!shmem->vaddr)
+		if (!shmem->vaddr) {
 			ret = -ENOMEM;
-		else
+		} else {
 			iosys_map_set_vaddr(map, shmem->vaddr);
+			refcount_set(&shmem->vmap_use_count, 1);
+		}
 	}
 
 	if (ret) {
@@ -374,8 +376,6 @@  int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
 err_put_pages:
 	if (!obj->import_attach)
 		drm_gem_shmem_unpin_locked(shmem);
-err_zero_use:
-	shmem->vmap_use_count = 0;
 
 	return ret;
 }
@@ -403,14 +403,10 @@  void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
 	} else {
 		dma_resv_assert_held(shmem->base.resv);
 
-		if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
-			return;
-
-		if (--shmem->vmap_use_count > 0)
-			return;
-
-		vunmap(shmem->vaddr);
-		drm_gem_shmem_unpin_locked(shmem);
+		if (refcount_dec_and_test(&shmem->vmap_use_count)) {
+			vunmap(shmem->vaddr);
+			drm_gem_shmem_unpin_locked(shmem);
+		}
 	}
 
 	shmem->vaddr = NULL;
@@ -656,7 +652,7 @@  void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
 
 	drm_printf_indent(p, indent, "pages_pin_count=%u\n", refcount_read(&shmem->pages_pin_count));
 	drm_printf_indent(p, indent, "pages_use_count=%u\n", refcount_read(&shmem->pages_use_count));
-	drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
+	drm_printf_indent(p, indent, "vmap_use_count=%u\n", refcount_read(&shmem->vmap_use_count));
 	drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
 }
 EXPORT_SYMBOL_GPL(drm_gem_shmem_print_info);
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
index 396958a98c34..63e91e8f2d5c 100644
--- a/include/drm/drm_gem_shmem_helper.h
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -81,7 +81,7 @@  struct drm_gem_shmem_object {
 	 * Reference count on the virtual address.
 	 * The address are un-mapped when the count reaches zero.
 	 */
-	unsigned int vmap_use_count;
+	refcount_t vmap_use_count;
 
 	/**
 	 * @got_pages_sgt: