diff mbox series

[v2] drm/vmwgfx: Make sure unpinning handles reservations

Message ID 20210410185956.720612-1-zackr@vmware.com (mailing list archive)
State New, archived
Headers show
Series [v2] drm/vmwgfx: Make sure unpinning handles reservations | expand

Commit Message

Zack Rusin April 10, 2021, 6:59 p.m. UTC
Quite often it's a little hard to tell if reservations are already held
in code paths that unpin bo's. While our pinning/unpinning code should
be more explicit that requires a substential amount of work so instead
we can avoid the issues by making sure we try to reserve before unpinning.
Because we unpin those bo's only on destruction/error paths just that check
tells us if we're already reserved or not and allows to cleanly unpin.

Reviewed-by: Martin Krastev <krastevm@vmware.com>
Reviewed-by: Roland Scheidegger <sroland@vmware.com>
Fixes: d1a73c641afd ("drm/vmwgfx: Make sure we unpin no longer needed buffers")
Cc: dri-devel@lists.freedesktop.org
Signed-off-by: Zack Rusin <zackr@vmware.com>
---
 drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 11 +++++++++++
 drivers/gpu/drm/vmwgfx/vmwgfx_mob.c | 18 ++++++++++++++----
 2 files changed, 25 insertions(+), 4 deletions(-)

Comments

Thomas Hellström (Intel) April 13, 2021, 9:52 a.m. UTC | #1
Hi, Zack,

On 4/10/21 8:59 PM, Zack Rusin wrote:
> Quite often it's a little hard to tell if reservations are already held
> in code paths that unpin bo's. While our pinning/unpinning code should
> be more explicit that requires a substential amount of work so instead
> we can avoid the issues by making sure we try to reserve before unpinning.
> Because we unpin those bo's only on destruction/error paths just that check
> tells us if we're already reserved or not and allows to cleanly unpin.
>
> Reviewed-by: Martin Krastev <krastevm@vmware.com>
> Reviewed-by: Roland Scheidegger <sroland@vmware.com>
> Fixes: d1a73c641afd ("drm/vmwgfx: Make sure we unpin no longer needed buffers")
> Cc: dri-devel@lists.freedesktop.org
> Signed-off-by: Zack Rusin <zackr@vmware.com>
> ---
>   drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 11 +++++++++++
>   drivers/gpu/drm/vmwgfx/vmwgfx_mob.c | 18 ++++++++++++++----
>   2 files changed, 25 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> index 8087a9013455..9a3133cb5cc1 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> @@ -1523,8 +1523,19 @@ static inline void vmw_bo_unreference(struct vmw_buffer_object **buf)
>   
>   	*buf = NULL;
>   	if (tmp_buf != NULL) {
> +		/*
> +		 * currently pinning requires a reservation to be held
> +		 * but unreference can be called either reserved or not.
> +		 * To avoid deadloacks we have to try to get a reservation.
> +		 * This is exlusively to satisfy the requirements of the
> +		 * unpin, ttm_bo_put will handle delayed deletion for us
> +		 * in case the bo is really busy.
> +		 */
> +		bool locked = dma_resv_trylock(tmp_buf->base.base.resv);
>   		if (tmp_buf->base.pin_count > 0)
Hmm, Similar to the previous version, I figure someone another thread 
might be holding the lock and drop it here?
>   			ttm_bo_unpin(&tmp_buf->base);
And also, if this is a nested reference of a pinned buffer, you'd be 
incorrectly unpinning it.

/Thomas
diff mbox series

Patch

diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 8087a9013455..9a3133cb5cc1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1523,8 +1523,19 @@  static inline void vmw_bo_unreference(struct vmw_buffer_object **buf)
 
 	*buf = NULL;
 	if (tmp_buf != NULL) {
+		/*
+		 * currently pinning requires a reservation to be held
+		 * but unreference can be called either reserved or not.
+		 * To avoid deadloacks we have to try to get a reservation.
+		 * This is exlusively to satisfy the requirements of the
+		 * unpin, ttm_bo_put will handle delayed deletion for us
+		 * in case the bo is really busy.
+		 */
+		bool locked = dma_resv_trylock(tmp_buf->base.base.resv);
 		if (tmp_buf->base.pin_count > 0)
 			ttm_bo_unpin(&tmp_buf->base);
+		if (locked)
+			dma_resv_unlock(tmp_buf->base.base.resv);
 		ttm_bo_put(&tmp_buf->base);
 	}
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index a0b53141dded..f2d625415458 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -94,6 +94,16 @@  static void vmw_mob_pt_setup(struct vmw_mob *mob,
 			     struct vmw_piter data_iter,
 			     unsigned long num_data_pages);
 
+
+static inline void vmw_bo_unpin_unlocked(struct ttm_buffer_object *bo)
+{
+	int ret = ttm_bo_reserve(bo, false, true, NULL);
+	BUG_ON(ret != 0);
+	ttm_bo_unpin(bo);
+	ttm_bo_unreserve(bo);
+}
+
+
 /*
  * vmw_setup_otable_base - Issue an object table base setup command to
  * the device
@@ -277,7 +287,7 @@  static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
 						 &batch->otables[i]);
 	}
 
-	ttm_bo_unpin(batch->otable_bo);
+	vmw_bo_unpin_unlocked(batch->otable_bo);
 	ttm_bo_put(batch->otable_bo);
 	batch->otable_bo = NULL;
 	return ret;
@@ -341,9 +351,9 @@  static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
 	BUG_ON(ret != 0);
 
 	vmw_bo_fence_single(bo, NULL);
+	ttm_bo_unpin(bo);
 	ttm_bo_unreserve(bo);
 
-	ttm_bo_unpin(batch->otable_bo);
 	ttm_bo_put(batch->otable_bo);
 	batch->otable_bo = NULL;
 }
@@ -530,7 +540,7 @@  static void vmw_mob_pt_setup(struct vmw_mob *mob,
 void vmw_mob_destroy(struct vmw_mob *mob)
 {
 	if (mob->pt_bo) {
-		ttm_bo_unpin(mob->pt_bo);
+		vmw_bo_unpin_unlocked(mob->pt_bo);
 		ttm_bo_put(mob->pt_bo);
 		mob->pt_bo = NULL;
 	}
@@ -646,7 +656,7 @@  int vmw_mob_bind(struct vmw_private *dev_priv,
 out_no_cmd_space:
 	vmw_fifo_resource_dec(dev_priv);
 	if (pt_set_up) {
-		ttm_bo_unpin(mob->pt_bo);
+		vmw_bo_unpin_unlocked(mob->pt_bo);
 		ttm_bo_put(mob->pt_bo);
 		mob->pt_bo = NULL;
 	}