diff mbox

[v2] drm/ttm: Always and only destroy bo->ttm_resv in ttm_bo_release_list

Message ID 20171102172218.7827-1-michel@daenzer.net (mailing list archive)
State New, archived
Headers show

Commit Message

Michel Dänzer Nov. 2, 2017, 5:22 p.m. UTC
From: Michel Dänzer <michel.daenzer@amd.com>

Fixes a use-after-free due to a race condition in
ttm_bo_cleanup_refs_and_unlock, which allows one task to reserve a BO
and destroy its ttm_resv while another task is waiting for it to signal
in reservation_object_wait_timeout_rcu.

v2:
* Always initialize bo->ttm_resv in ttm_bo_init_reserved
 (Christian König)

Fixes: 0d2bd2ae045d "drm/ttm: fix memory leak while individualizing BOs"
Reviewed-by: Chunming Zhou <david1.zhou@amd.com> # v1
Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
---
 drivers/gpu/drm/ttm/ttm_bo.c | 16 ++++------------
 1 file changed, 4 insertions(+), 12 deletions(-)

Comments

Christian König Nov. 2, 2017, 5:34 p.m. UTC | #1
Am 02.11.2017 um 18:22 schrieb Michel Dänzer:
> From: Michel Dänzer <michel.daenzer@amd.com>
>
> Fixes a use-after-free due to a race condition in
> ttm_bo_cleanup_refs_and_unlock, which allows one task to reserve a BO
> and destroy its ttm_resv while another task is waiting for it to signal
> in reservation_object_wait_timeout_rcu.
>
> v2:
> * Always initialize bo->ttm_resv in ttm_bo_init_reserved
>   (Christian König)
>
> Fixes: 0d2bd2ae045d "drm/ttm: fix memory leak while individualizing BOs"
> Reviewed-by: Chunming Zhou <david1.zhou@amd.com> # v1
> Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>

Reviewed-by: Christian König <christian.koenig@amd.com>

> ---
>   drivers/gpu/drm/ttm/ttm_bo.c | 16 ++++------------
>   1 file changed, 4 insertions(+), 12 deletions(-)
>
> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
> index 379ec41d2c69..c088703777e2 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo.c
> @@ -150,8 +150,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
>   	ttm_tt_destroy(bo->ttm);
>   	atomic_dec(&bo->glob->bo_count);
>   	dma_fence_put(bo->moving);
> -	if (bo->resv == &bo->ttm_resv)
> -		reservation_object_fini(&bo->ttm_resv);
> +	reservation_object_fini(&bo->ttm_resv);
>   	mutex_destroy(&bo->wu_mutex);
>   	if (bo->destroy)
>   		bo->destroy(bo);
> @@ -402,14 +401,11 @@ static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
>   	if (bo->resv == &bo->ttm_resv)
>   		return 0;
>   
> -	reservation_object_init(&bo->ttm_resv);
>   	BUG_ON(!reservation_object_trylock(&bo->ttm_resv));
>   
>   	r = reservation_object_copy_fences(&bo->ttm_resv, bo->resv);
> -	if (r) {
> +	if (r)
>   		reservation_object_unlock(&bo->ttm_resv);
> -		reservation_object_fini(&bo->ttm_resv);
> -	}
>   
>   	return r;
>   }
> @@ -457,10 +453,8 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
>   		if (reservation_object_test_signaled_rcu(&bo->ttm_resv, true)) {
>   			ttm_bo_del_from_lru(bo);
>   			spin_unlock(&glob->lru_lock);
> -			if (bo->resv != &bo->ttm_resv) {
> +			if (bo->resv != &bo->ttm_resv)
>   				reservation_object_unlock(&bo->ttm_resv);
> -				reservation_object_fini(&bo->ttm_resv);
> -			}
>   
>   			ttm_bo_cleanup_memtype_use(bo);
>   			return;
> @@ -560,8 +554,6 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
>   	}
>   
>   	ttm_bo_del_from_lru(bo);
> -	if (!list_empty(&bo->ddestroy) && (bo->resv != &bo->ttm_resv))
> -		reservation_object_fini(&bo->ttm_resv);
>   	list_del_init(&bo->ddestroy);
>   	kref_put(&bo->list_kref, ttm_bo_ref_bug);
>   
> @@ -1210,8 +1202,8 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
>   		lockdep_assert_held(&bo->resv->lock.base);
>   	} else {
>   		bo->resv = &bo->ttm_resv;
> -		reservation_object_init(&bo->ttm_resv);
>   	}
> +	reservation_object_init(&bo->ttm_resv);
>   	atomic_inc(&bo->glob->bo_count);
>   	drm_vma_node_reset(&bo->vma_node);
>   	bo->priority = 0;
diff mbox

Patch

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 379ec41d2c69..c088703777e2 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -150,8 +150,7 @@  static void ttm_bo_release_list(struct kref *list_kref)
 	ttm_tt_destroy(bo->ttm);
 	atomic_dec(&bo->glob->bo_count);
 	dma_fence_put(bo->moving);
-	if (bo->resv == &bo->ttm_resv)
-		reservation_object_fini(&bo->ttm_resv);
+	reservation_object_fini(&bo->ttm_resv);
 	mutex_destroy(&bo->wu_mutex);
 	if (bo->destroy)
 		bo->destroy(bo);
@@ -402,14 +401,11 @@  static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
 	if (bo->resv == &bo->ttm_resv)
 		return 0;
 
-	reservation_object_init(&bo->ttm_resv);
 	BUG_ON(!reservation_object_trylock(&bo->ttm_resv));
 
 	r = reservation_object_copy_fences(&bo->ttm_resv, bo->resv);
-	if (r) {
+	if (r)
 		reservation_object_unlock(&bo->ttm_resv);
-		reservation_object_fini(&bo->ttm_resv);
-	}
 
 	return r;
 }
@@ -457,10 +453,8 @@  static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
 		if (reservation_object_test_signaled_rcu(&bo->ttm_resv, true)) {
 			ttm_bo_del_from_lru(bo);
 			spin_unlock(&glob->lru_lock);
-			if (bo->resv != &bo->ttm_resv) {
+			if (bo->resv != &bo->ttm_resv)
 				reservation_object_unlock(&bo->ttm_resv);
-				reservation_object_fini(&bo->ttm_resv);
-			}
 
 			ttm_bo_cleanup_memtype_use(bo);
 			return;
@@ -560,8 +554,6 @@  static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
 	}
 
 	ttm_bo_del_from_lru(bo);
-	if (!list_empty(&bo->ddestroy) && (bo->resv != &bo->ttm_resv))
-		reservation_object_fini(&bo->ttm_resv);
 	list_del_init(&bo->ddestroy);
 	kref_put(&bo->list_kref, ttm_bo_ref_bug);
 
@@ -1210,8 +1202,8 @@  int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
 		lockdep_assert_held(&bo->resv->lock.base);
 	} else {
 		bo->resv = &bo->ttm_resv;
-		reservation_object_init(&bo->ttm_resv);
 	}
+	reservation_object_init(&bo->ttm_resv);
 	atomic_inc(&bo->glob->bo_count);
 	drm_vma_node_reset(&bo->vma_node);
 	bo->priority = 0;