diff mbox series

drm/i915/gt: Pull intel_timeline.requests list under a spinlock

Message ID 20191212010353.736593-1-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show
Series drm/i915/gt: Pull intel_timeline.requests list under a spinlock | expand

Commit Message

Chris Wilson Dec. 12, 2019, 1:03 a.m. UTC
Currently we use the intel_timeline.mutex to guard constructing and
retiring requests in the timeline, including the adding and removing of
the request from the list of requests (intel_timeline.requests).
However, we want to peek at neighbouring elements in the request list
while constructing a request on another timeline (see
i915_request_await_start) and this implies nesting timeline mutexes. To
avoid the nested mutex, we currently use a mutex_trylock() but this is
fraught with a potential race causing an -EBUSY. We can eliminate the
nested mutex here with a spinlock guarding list operations within the
broader mutex, so callers can choose between locking everything with the
mutex or just the list with the spinlock. (The mutex caters for
virtually all of the current users, but maybe being able to easily peek
at the request list, we will do so more often in the future.)

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_timeline.c      |  1 +
 .../gpu/drm/i915/gt/intel_timeline_types.h    |  1 +
 .../gpu/drm/i915/gt/selftests/mock_timeline.c |  1 +
 drivers/gpu/drm/i915/i915_request.c           | 54 +++++++++++--------
 4 files changed, 34 insertions(+), 23 deletions(-)
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 038e05a6336c..06cbd0777a0c 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -256,6 +256,7 @@  int intel_timeline_init(struct intel_timeline *timeline,
 
 	INIT_ACTIVE_FENCE(&timeline->last_request);
 	INIT_LIST_HEAD(&timeline->requests);
+	spin_lock_init(&timeline->request_lock);
 
 	i915_syncmap_init(&timeline->sync);
 
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline_types.h b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
index aaf15cbe1ce1..7c9f49f46626 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
@@ -57,6 +57,7 @@  struct intel_timeline {
 	 * outstanding.
 	 */
 	struct list_head requests;
+	spinlock_t request_lock;
 
 	/*
 	 * Contains an RCU guarded pointer to the last request. No reference is
diff --git a/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c
index aeb1d1f616e8..540729250fef 100644
--- a/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c
@@ -17,6 +17,7 @@  void mock_timeline_init(struct intel_timeline *timeline, u64 context)
 
 	INIT_ACTIVE_FENCE(&timeline->last_request);
 	INIT_LIST_HEAD(&timeline->requests);
+	spin_lock_init(&timeline->request_lock);
 
 	i915_syncmap_init(&timeline->sync);
 
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 51bb8a0812a1..219e1e3ed440 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -184,6 +184,27 @@  remove_from_client(struct i915_request *request)
 	rcu_read_unlock();
 }
 
+static inline void remove_from_timeline(struct i915_request *rq)
+{
+	struct intel_timeline *tl = i915_request_timeline(rq);
+
+	/*
+	 * We know the GPU must have read the request to have
+	 * sent us the seqno + interrupt, so use the position
+	 * of tail of the request to update the last known position
+	 * of the GPU head.
+	 *
+	 * Note this requires that we are always called in request
+	 * completion order.
+	 */
+	GEM_BUG_ON(!list_is_first(&rq->link, &tl->requests));
+	rq->ring->head = rq->postfix;
+
+	spin_lock(&tl->request_lock);
+	list_del(&rq->link);
+	spin_unlock(&tl->request_lock);
+}
+
 static void free_capture_list(struct i915_request *request)
 {
 	struct i915_capture_list *capture;
@@ -231,19 +252,6 @@  bool i915_request_retire(struct i915_request *rq)
 	GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
 	trace_i915_request_retire(rq);
 
-	/*
-	 * We know the GPU must have read the request to have
-	 * sent us the seqno + interrupt, so use the position
-	 * of tail of the request to update the last known position
-	 * of the GPU head.
-	 *
-	 * Note this requires that we are always called in request
-	 * completion order.
-	 */
-	GEM_BUG_ON(!list_is_first(&rq->link,
-				  &i915_request_timeline(rq)->requests));
-	rq->ring->head = rq->postfix;
-
 	/*
 	 * We only loosely track inflight requests across preemption,
 	 * and so we may find ourselves attempting to retire a _completed_
@@ -270,7 +278,7 @@  bool i915_request_retire(struct i915_request *rq)
 	spin_unlock_irq(&rq->lock);
 
 	remove_from_client(rq);
-	list_del(&rq->link);
+	remove_from_timeline(rq);
 
 	intel_context_exit(rq->hw_context);
 	intel_context_unpin(rq->hw_context);
@@ -783,16 +791,14 @@  i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
 	if (!tl) /* already started or maybe even completed */
 		return 0;
 
-	fence = ERR_PTR(-EBUSY);
-	if (mutex_trylock(&tl->mutex)) {
-		fence = NULL;
-		if (!i915_request_started(signal) &&
-		    !list_is_first(&signal->link, &tl->requests)) {
-			signal = list_prev_entry(signal, link);
-			fence = dma_fence_get(&signal->fence);
-		}
-		mutex_unlock(&tl->mutex);
+	fence = NULL;
+	spin_lock(&tl->request_lock);
+	if (!i915_request_started(signal) &&
+	    !list_is_first(&signal->link, &tl->requests)) {
+		signal = list_prev_entry(signal, link);
+		fence = dma_fence_get(&signal->fence);
 	}
+	spin_unlock(&tl->request_lock);
 	intel_timeline_put(tl);
 	if (IS_ERR_OR_NULL(fence))
 		return PTR_ERR_OR_ZERO(fence);
@@ -1238,7 +1244,9 @@  __i915_request_add_to_timeline(struct i915_request *rq)
 							 0);
 	}
 
+	spin_lock(&timeline->request_lock);
 	list_add_tail(&rq->link, &timeline->requests);
+	spin_unlock(&timeline->request_lock);
 
 	/*
 	 * Make sure that no request gazumped us - if it was allocated after