diff mbox series

drm/i915: Split i915_active.mutex into an irq-safe spinlock for the rbtree

Message ID 20191113185359.11690-1-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show
Series drm/i915: Split i915_active.mutex into an irq-safe spinlock for the rbtree | expand

Commit Message

Chris Wilson Nov. 13, 2019, 6:53 p.m. UTC
As we want to be able to run inside atomic context for retiring the
i915_active, and we are no longer allowed to abuse mutex_trylock, split
the tree management portion of i915_active.mutex into an irq-safe
spinlock.

References: a0855d24fc22d ("locking/mutex: Complain upon mutex API misuse in IRQ contexts")
References: https://bugs.freedesktop.org/show_bug.cgi?id=111626
Fixes: 274cbf20fd10 ("drm/i915: Push the i915_active.retire into a worker")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
---
 drivers/gpu/drm/i915/i915_active.c           | 57 ++++++++++----------
 drivers/gpu/drm/i915/i915_active_types.h     |  1 +
 drivers/gpu/drm/i915/selftests/i915_active.c |  4 +-
 3 files changed, 31 insertions(+), 31 deletions(-)

Comments

Tvrtko Ursulin Nov. 14, 2019, 4:55 p.m. UTC | #1
On 13/11/2019 18:53, Chris Wilson wrote:
> As we want to be able to run inside atomic context for retiring the
> i915_active, and we are no longer allowed to abuse mutex_trylock, split
> the tree management portion of i915_active.mutex into an irq-safe
> spinlock.
> 
> References: a0855d24fc22d ("locking/mutex: Complain upon mutex API misuse in IRQ contexts")
> References: https://bugs.freedesktop.org/show_bug.cgi?id=111626
> Fixes: 274cbf20fd10 ("drm/i915: Push the i915_active.retire into a worker")
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
> Cc: Matthew Auld <matthew.auld@intel.com>
> ---
>   drivers/gpu/drm/i915/i915_active.c           | 57 ++++++++++----------
>   drivers/gpu/drm/i915/i915_active_types.h     |  1 +
>   drivers/gpu/drm/i915/selftests/i915_active.c |  4 +-
>   3 files changed, 31 insertions(+), 31 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
> index 207383dda84d..5448f37c8102 100644
> --- a/drivers/gpu/drm/i915/i915_active.c
> +++ b/drivers/gpu/drm/i915/i915_active.c
> @@ -91,14 +91,15 @@ static void debug_active_init(struct i915_active *ref)
>   
>   static void debug_active_activate(struct i915_active *ref)
>   {
> -	lockdep_assert_held(&ref->mutex);
> +	spin_lock_irq(&ref->tree_lock);
>   	if (!atomic_read(&ref->count)) /* before the first inc */
>   		debug_object_activate(ref, &active_debug_desc);
> +	spin_unlock_irq(&ref->tree_lock);
>   }
>   
>   static void debug_active_deactivate(struct i915_active *ref)
>   {
> -	lockdep_assert_held(&ref->mutex);
> +	lockdep_assert_held(&ref->tree_lock);
>   	if (!atomic_read(&ref->count)) /* after the last dec */
>   		debug_object_deactivate(ref, &active_debug_desc);
>   }
> @@ -128,29 +129,22 @@ __active_retire(struct i915_active *ref)
>   {
>   	struct active_node *it, *n;
>   	struct rb_root root;
> -	bool retire = false;
> +	unsigned long flags;
>   
> -	lockdep_assert_held(&ref->mutex);
>   	GEM_BUG_ON(i915_active_is_idle(ref));
>   
>   	/* return the unused nodes to our slabcache -- flushing the allocator */
> -	if (atomic_dec_and_test(&ref->count)) {
> -		debug_active_deactivate(ref);
> -		root = ref->tree;
> -		ref->tree = RB_ROOT;
> -		ref->cache = NULL;
> -		retire = true;
> -	}
> -
> -	mutex_unlock(&ref->mutex);
> -	if (!retire)
> +	if (!atomic_dec_and_lock_irqsave(&ref->count, &ref->tree_lock, flags))
>   		return;
>   
>   	GEM_BUG_ON(rcu_access_pointer(ref->excl.fence));
> -	rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
> -		GEM_BUG_ON(i915_active_fence_isset(&it->base));
> -		kmem_cache_free(global.slab_cache, it);
> -	}
> +	debug_active_deactivate(ref);
> +
> +	root = ref->tree;
> +	ref->tree = RB_ROOT;
> +	ref->cache = NULL;
> +
> +	spin_unlock_irqrestore(&ref->tree_lock, flags);
>   
>   	/* After the final retire, the entire struct may be freed */
>   	if (ref->retire)
> @@ -158,6 +152,11 @@ __active_retire(struct i915_active *ref)
>   
>   	/* ... except if you wait on it, you must manage your own references! */
>   	wake_up_var(ref);
> +
> +	rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
> +		GEM_BUG_ON(i915_active_fence_isset(&it->base));
> +		kmem_cache_free(global.slab_cache, it);
> +	}
>   }
>   
>   static void
> @@ -169,7 +168,6 @@ active_work(struct work_struct *wrk)
>   	if (atomic_add_unless(&ref->count, -1, 1))
>   		return;
>   
> -	mutex_lock(&ref->mutex);
>   	__active_retire(ref);
>   }
>   
> @@ -180,9 +178,7 @@ active_retire(struct i915_active *ref)
>   	if (atomic_add_unless(&ref->count, -1, 1))
>   		return;
>   
> -	/* If we are inside interrupt context (fence signaling), defer */
> -	if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS ||
> -	    !mutex_trylock(&ref->mutex)) {
> +	if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) {
>   		queue_work(system_unbound_wq, &ref->work);
>   		return;
>   	}
> @@ -227,7 +223,7 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl)
>   	if (!prealloc)
>   		return NULL;
>   
> -	mutex_lock(&ref->mutex);
> +	spin_lock_irq(&ref->tree_lock);
>   	GEM_BUG_ON(i915_active_is_idle(ref));
>   
>   	parent = NULL;
> @@ -257,7 +253,7 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl)
>   
>   out:
>   	ref->cache = node;
> -	mutex_unlock(&ref->mutex);
> +	spin_unlock_irq(&ref->tree_lock);
>   
>   	BUILD_BUG_ON(offsetof(typeof(*node), base));
>   	return &node->base;
> @@ -278,8 +274,10 @@ void __i915_active_init(struct i915_active *ref,
>   	if (bits & I915_ACTIVE_MAY_SLEEP)
>   		ref->flags |= I915_ACTIVE_RETIRE_SLEEPS;
>   
> +	spin_lock_init(&ref->tree_lock);
>   	ref->tree = RB_ROOT;
>   	ref->cache = NULL;
> +
>   	init_llist_head(&ref->preallocated_barriers);
>   	atomic_set(&ref->count, 0);
>   	__mutex_init(&ref->mutex, "i915_active", key);
> @@ -510,7 +508,7 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
>   	if (RB_EMPTY_ROOT(&ref->tree))
>   		return NULL;
>   
> -	mutex_lock(&ref->mutex);
> +	spin_lock_irq(&ref->tree_lock);
>   	GEM_BUG_ON(i915_active_is_idle(ref));
>   
>   	/*
> @@ -575,7 +573,7 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
>   			goto match;
>   	}
>   
> -	mutex_unlock(&ref->mutex);
> +	spin_unlock_irq(&ref->tree_lock);
>   
>   	return NULL;
>   
> @@ -583,7 +581,7 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
>   	rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */
>   	if (p == &ref->cache->node)
>   		ref->cache = NULL;
> -	mutex_unlock(&ref->mutex);
> +	spin_unlock_irq(&ref->tree_lock);
>   
>   	return rb_entry(p, struct active_node, node);
>   }
> @@ -664,6 +662,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
>   void i915_active_acquire_barrier(struct i915_active *ref)
>   {
>   	struct llist_node *pos, *next;
> +	unsigned long flags;
>   
>   	GEM_BUG_ON(i915_active_is_idle(ref));
>   
> @@ -673,7 +672,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
>   	 * populated by i915_request_add_active_barriers() to point to the
>   	 * request that will eventually release them.
>   	 */
> -	mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING);
> +	spin_lock_irqsave_nested(&ref->tree_lock, flags, SINGLE_DEPTH_NESTING);
>   	llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
>   		struct active_node *node = barrier_from_ll(pos);
>   		struct intel_engine_cs *engine = barrier_to_engine(node);
> @@ -699,7 +698,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
>   		llist_add(barrier_to_ll(node), &engine->barrier_tasks);
>   		intel_engine_pm_put(engine);
>   	}
> -	mutex_unlock(&ref->mutex);
> +	spin_unlock_irqrestore(&ref->tree_lock, flags);
>   }
>   
>   void i915_request_add_active_barriers(struct i915_request *rq)
> diff --git a/drivers/gpu/drm/i915/i915_active_types.h b/drivers/gpu/drm/i915/i915_active_types.h
> index d89a74c142c6..96aed0ee700a 100644
> --- a/drivers/gpu/drm/i915/i915_active_types.h
> +++ b/drivers/gpu/drm/i915/i915_active_types.h
> @@ -48,6 +48,7 @@ struct i915_active {
>   	atomic_t count;
>   	struct mutex mutex;
>   
> +	spinlock_t tree_lock;
>   	struct active_node *cache;
>   	struct rb_root tree;
>   
> diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
> index f3fa05c78d78..60290f78750d 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_active.c
> +++ b/drivers/gpu/drm/i915/selftests/i915_active.c
> @@ -277,8 +277,8 @@ void i915_active_unlock_wait(struct i915_active *ref)
>   	}
>   
>   	/* And wait for the retire callback */
> -	mutex_lock(&ref->mutex);
> -	mutex_unlock(&ref->mutex);
> +	spin_lock_irq(&ref->tree_lock);
> +	spin_unlock_irq(&ref->tree_lock);
>   
>   	/* ... which may have been on a thread instead */
>   	flush_work(&ref->work);
> 

Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Regards,

Tvrtko
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 207383dda84d..5448f37c8102 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -91,14 +91,15 @@  static void debug_active_init(struct i915_active *ref)
 
 static void debug_active_activate(struct i915_active *ref)
 {
-	lockdep_assert_held(&ref->mutex);
+	spin_lock_irq(&ref->tree_lock);
 	if (!atomic_read(&ref->count)) /* before the first inc */
 		debug_object_activate(ref, &active_debug_desc);
+	spin_unlock_irq(&ref->tree_lock);
 }
 
 static void debug_active_deactivate(struct i915_active *ref)
 {
-	lockdep_assert_held(&ref->mutex);
+	lockdep_assert_held(&ref->tree_lock);
 	if (!atomic_read(&ref->count)) /* after the last dec */
 		debug_object_deactivate(ref, &active_debug_desc);
 }
@@ -128,29 +129,22 @@  __active_retire(struct i915_active *ref)
 {
 	struct active_node *it, *n;
 	struct rb_root root;
-	bool retire = false;
+	unsigned long flags;
 
-	lockdep_assert_held(&ref->mutex);
 	GEM_BUG_ON(i915_active_is_idle(ref));
 
 	/* return the unused nodes to our slabcache -- flushing the allocator */
-	if (atomic_dec_and_test(&ref->count)) {
-		debug_active_deactivate(ref);
-		root = ref->tree;
-		ref->tree = RB_ROOT;
-		ref->cache = NULL;
-		retire = true;
-	}
-
-	mutex_unlock(&ref->mutex);
-	if (!retire)
+	if (!atomic_dec_and_lock_irqsave(&ref->count, &ref->tree_lock, flags))
 		return;
 
 	GEM_BUG_ON(rcu_access_pointer(ref->excl.fence));
-	rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
-		GEM_BUG_ON(i915_active_fence_isset(&it->base));
-		kmem_cache_free(global.slab_cache, it);
-	}
+	debug_active_deactivate(ref);
+
+	root = ref->tree;
+	ref->tree = RB_ROOT;
+	ref->cache = NULL;
+
+	spin_unlock_irqrestore(&ref->tree_lock, flags);
 
 	/* After the final retire, the entire struct may be freed */
 	if (ref->retire)
@@ -158,6 +152,11 @@  __active_retire(struct i915_active *ref)
 
 	/* ... except if you wait on it, you must manage your own references! */
 	wake_up_var(ref);
+
+	rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
+		GEM_BUG_ON(i915_active_fence_isset(&it->base));
+		kmem_cache_free(global.slab_cache, it);
+	}
 }
 
 static void
@@ -169,7 +168,6 @@  active_work(struct work_struct *wrk)
 	if (atomic_add_unless(&ref->count, -1, 1))
 		return;
 
-	mutex_lock(&ref->mutex);
 	__active_retire(ref);
 }
 
@@ -180,9 +178,7 @@  active_retire(struct i915_active *ref)
 	if (atomic_add_unless(&ref->count, -1, 1))
 		return;
 
-	/* If we are inside interrupt context (fence signaling), defer */
-	if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS ||
-	    !mutex_trylock(&ref->mutex)) {
+	if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) {
 		queue_work(system_unbound_wq, &ref->work);
 		return;
 	}
@@ -227,7 +223,7 @@  active_instance(struct i915_active *ref, struct intel_timeline *tl)
 	if (!prealloc)
 		return NULL;
 
-	mutex_lock(&ref->mutex);
+	spin_lock_irq(&ref->tree_lock);
 	GEM_BUG_ON(i915_active_is_idle(ref));
 
 	parent = NULL;
@@ -257,7 +253,7 @@  active_instance(struct i915_active *ref, struct intel_timeline *tl)
 
 out:
 	ref->cache = node;
-	mutex_unlock(&ref->mutex);
+	spin_unlock_irq(&ref->tree_lock);
 
 	BUILD_BUG_ON(offsetof(typeof(*node), base));
 	return &node->base;
@@ -278,8 +274,10 @@  void __i915_active_init(struct i915_active *ref,
 	if (bits & I915_ACTIVE_MAY_SLEEP)
 		ref->flags |= I915_ACTIVE_RETIRE_SLEEPS;
 
+	spin_lock_init(&ref->tree_lock);
 	ref->tree = RB_ROOT;
 	ref->cache = NULL;
+
 	init_llist_head(&ref->preallocated_barriers);
 	atomic_set(&ref->count, 0);
 	__mutex_init(&ref->mutex, "i915_active", key);
@@ -510,7 +508,7 @@  static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
 	if (RB_EMPTY_ROOT(&ref->tree))
 		return NULL;
 
-	mutex_lock(&ref->mutex);
+	spin_lock_irq(&ref->tree_lock);
 	GEM_BUG_ON(i915_active_is_idle(ref));
 
 	/*
@@ -575,7 +573,7 @@  static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
 			goto match;
 	}
 
-	mutex_unlock(&ref->mutex);
+	spin_unlock_irq(&ref->tree_lock);
 
 	return NULL;
 
@@ -583,7 +581,7 @@  static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
 	rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */
 	if (p == &ref->cache->node)
 		ref->cache = NULL;
-	mutex_unlock(&ref->mutex);
+	spin_unlock_irq(&ref->tree_lock);
 
 	return rb_entry(p, struct active_node, node);
 }
@@ -664,6 +662,7 @@  int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
 void i915_active_acquire_barrier(struct i915_active *ref)
 {
 	struct llist_node *pos, *next;
+	unsigned long flags;
 
 	GEM_BUG_ON(i915_active_is_idle(ref));
 
@@ -673,7 +672,7 @@  void i915_active_acquire_barrier(struct i915_active *ref)
 	 * populated by i915_request_add_active_barriers() to point to the
 	 * request that will eventually release them.
 	 */
-	mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING);
+	spin_lock_irqsave_nested(&ref->tree_lock, flags, SINGLE_DEPTH_NESTING);
 	llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
 		struct active_node *node = barrier_from_ll(pos);
 		struct intel_engine_cs *engine = barrier_to_engine(node);
@@ -699,7 +698,7 @@  void i915_active_acquire_barrier(struct i915_active *ref)
 		llist_add(barrier_to_ll(node), &engine->barrier_tasks);
 		intel_engine_pm_put(engine);
 	}
-	mutex_unlock(&ref->mutex);
+	spin_unlock_irqrestore(&ref->tree_lock, flags);
 }
 
 void i915_request_add_active_barriers(struct i915_request *rq)
diff --git a/drivers/gpu/drm/i915/i915_active_types.h b/drivers/gpu/drm/i915/i915_active_types.h
index d89a74c142c6..96aed0ee700a 100644
--- a/drivers/gpu/drm/i915/i915_active_types.h
+++ b/drivers/gpu/drm/i915/i915_active_types.h
@@ -48,6 +48,7 @@  struct i915_active {
 	atomic_t count;
 	struct mutex mutex;
 
+	spinlock_t tree_lock;
 	struct active_node *cache;
 	struct rb_root tree;
 
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index f3fa05c78d78..60290f78750d 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -277,8 +277,8 @@  void i915_active_unlock_wait(struct i915_active *ref)
 	}
 
 	/* And wait for the retire callback */
-	mutex_lock(&ref->mutex);
-	mutex_unlock(&ref->mutex);
+	spin_lock_irq(&ref->tree_lock);
+	spin_unlock_irq(&ref->tree_lock);
 
 	/* ... which may have been on a thread instead */
 	flush_work(&ref->work);