diff mbox series

drm/i915: Pull seqno started checks together

Message ID 20180806111121.12998-1-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show
Series drm/i915: Pull seqno started checks together | expand

Commit Message

Chris Wilson Aug. 6, 2018, 11:11 a.m. UTC
We have a few instances of checking seqno-1 to see if the HW has started
the request. Pull those together under a helper.

Suggested-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/i915_request.c      |  9 +++---
 drivers/gpu/drm/i915/i915_request.h      | 39 +++++++++++++++---------
 drivers/gpu/drm/i915/intel_breadcrumbs.c |  6 ++--
 drivers/gpu/drm/i915/intel_engine_cs.c   |  3 +-
 drivers/gpu/drm/i915/intel_hangcheck.c   |  2 +-
 drivers/gpu/drm/i915/intel_ringbuffer.h  | 32 +++++++++++++++----
 6 files changed, 59 insertions(+), 32 deletions(-)

Comments

Mika Kuoppala Aug. 6, 2018, 11:17 a.m. UTC | #1
Chris Wilson <chris@chris-wilson.co.uk> writes:

> We have a few instances of checking seqno-1 to see if the HW has started
> the request. Pull those together under a helper.
>

Could you elaborate why you want both completed and signaled?
Otherwise it looks good.
-Mika

> Suggested-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> ---
>  drivers/gpu/drm/i915/i915_request.c      |  9 +++---
>  drivers/gpu/drm/i915/i915_request.h      | 39 +++++++++++++++---------
>  drivers/gpu/drm/i915/intel_breadcrumbs.c |  6 ++--
>  drivers/gpu/drm/i915/intel_engine_cs.c   |  3 +-
>  drivers/gpu/drm/i915/intel_hangcheck.c   |  2 +-
>  drivers/gpu/drm/i915/intel_ringbuffer.h  | 32 +++++++++++++++----
>  6 files changed, 59 insertions(+), 32 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
> index 5c2c93cbab12..09ed48833b54 100644
> --- a/drivers/gpu/drm/i915/i915_request.c
> +++ b/drivers/gpu/drm/i915/i915_request.c
> @@ -527,7 +527,7 @@ void __i915_request_submit(struct i915_request *request)
>  
>  	seqno = timeline_get_seqno(&engine->timeline);
>  	GEM_BUG_ON(!seqno);
> -	GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), seqno));
> +	GEM_BUG_ON(intel_engine_signaled(engine, seqno));
>  
>  	/* We may be recursing from the signal callback of another i915 fence */
>  	spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
> @@ -579,8 +579,7 @@ void __i915_request_unsubmit(struct i915_request *request)
>  	 */
>  	GEM_BUG_ON(!request->global_seqno);
>  	GEM_BUG_ON(request->global_seqno != engine->timeline.seqno);
> -	GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine),
> -				     request->global_seqno));
> +	GEM_BUG_ON(intel_engine_has_completed(engine, request->global_seqno));
>  	engine->timeline.seqno--;
>  
>  	/* We may be recursing from the signal callback of another i915 fence */
> @@ -1205,7 +1204,7 @@ static bool __i915_spin_request(const struct i915_request *rq,
>  	 * it is a fair assumption that it will not complete within our
>  	 * relatively short timeout.
>  	 */
> -	if (!i915_seqno_passed(intel_engine_get_seqno(engine), seqno - 1))
> +	if (!intel_engine_has_started(engine, seqno))
>  		return false;
>  
>  	/*
> @@ -1222,7 +1221,7 @@ static bool __i915_spin_request(const struct i915_request *rq,
>  	irq = READ_ONCE(engine->breadcrumbs.irq_count);
>  	timeout_us += local_clock_us(&cpu);
>  	do {
> -		if (i915_seqno_passed(intel_engine_get_seqno(engine), seqno))
> +		if (intel_engine_has_completed(engine, seqno))
>  			return seqno == i915_request_global_seqno(rq);
>  
>  		/*
> diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
> index e1c9365dfefb..05888de2e045 100644
> --- a/drivers/gpu/drm/i915/i915_request.h
> +++ b/drivers/gpu/drm/i915/i915_request.h
> @@ -272,7 +272,10 @@ long i915_request_wait(struct i915_request *rq,
>  #define I915_WAIT_ALL		BIT(2) /* used by i915_gem_object_wait() */
>  #define I915_WAIT_FOR_IDLE_BOOST BIT(3)
>  
> -static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
> +static inline bool intel_engine_has_started(struct intel_engine_cs *engine,
> +					    u32 seqno);
> +static inline bool intel_engine_has_completed(struct intel_engine_cs *engine,
> +					    u32 seqno);
>  
>  /**
>   * Returns true if seq1 is later than seq2.
> @@ -282,11 +285,31 @@ static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
>  	return (s32)(seq1 - seq2) >= 0;
>  }
>  
> +/**
> + * i915_request_started - check if the request has begun being executed
> + * @rq: the request
> + *
> + * Returns true if the request has been submitted to hardware, and the hardware
> + * has advanced passed the end of the previous request and so should be either
> + * currently processing the request (though it may be preempted and so
> + * not necessarily the next request to complete) or have completed the request.
> + */
> +static inline bool i915_request_started(const struct i915_request *rq)
> +{
> +	u32 seqno;
> +
> +	seqno = i915_request_global_seqno(rq);
> +	if (!seqno) /* not yet submitted to HW */
> +		return false;
> +
> +	return intel_engine_has_started(rq->engine, seqno);
> +}
> +
>  static inline bool
>  __i915_request_completed(const struct i915_request *rq, u32 seqno)
>  {
>  	GEM_BUG_ON(!seqno);
> -	return i915_seqno_passed(intel_engine_get_seqno(rq->engine), seqno) &&
> +	return intel_engine_has_completed(rq->engine, seqno) &&
>  		seqno == i915_request_global_seqno(rq);
>  }
>  
> @@ -301,18 +324,6 @@ static inline bool i915_request_completed(const struct i915_request *rq)
>  	return __i915_request_completed(rq, seqno);
>  }
>  
> -static inline bool i915_request_started(const struct i915_request *rq)
> -{
> -	u32 seqno;
> -
> -	seqno = i915_request_global_seqno(rq);
> -	if (!seqno)
> -		return false;
> -
> -	return i915_seqno_passed(intel_engine_get_seqno(rq->engine),
> -				 seqno - 1);
> -}
> -
>  static inline bool i915_sched_node_signaled(const struct i915_sched_node *node)
>  {
>  	const struct i915_request *rq =
> diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
> index 1db6ba7d926e..84bf8d827136 100644
> --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
> +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
> @@ -256,8 +256,7 @@ void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
>  	spin_unlock(&b->irq_lock);
>  
>  	rbtree_postorder_for_each_entry_safe(wait, n, &b->waiters, node) {
> -		GEM_BUG_ON(!i915_seqno_passed(intel_engine_get_seqno(engine),
> -					      wait->seqno));
> +		GEM_BUG_ON(!intel_engine_signaled(engine, wait->seqno));
>  		RB_CLEAR_NODE(&wait->node);
>  		wake_up_process(wait->tsk);
>  	}
> @@ -508,8 +507,7 @@ bool intel_engine_add_wait(struct intel_engine_cs *engine,
>  		return armed;
>  
>  	/* Make the caller recheck if its request has already started. */
> -	return i915_seqno_passed(intel_engine_get_seqno(engine),
> -				 wait->seqno - 1);
> +	return intel_engine_has_started(engine, wait->seqno);
>  }
>  
>  static inline bool chain_wakeup(struct rb_node *rb, int priority)
> diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
> index 67c4fc5d737c..99d5a24219c1 100644
> --- a/drivers/gpu/drm/i915/intel_engine_cs.c
> +++ b/drivers/gpu/drm/i915/intel_engine_cs.c
> @@ -968,8 +968,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
>  		return true;
>  
>  	/* Any inflight/incomplete requests? */
> -	if (!i915_seqno_passed(intel_engine_get_seqno(engine),
> -			       intel_engine_last_submit(engine)))
> +	if (!intel_engine_signaled(engine, intel_engine_last_submit(engine)))
>  		return false;
>  
>  	if (I915_SELFTEST_ONLY(engine->breadcrumbs.mock))
> diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c
> index 2fc7a0dd0df9..e26d05a46451 100644
> --- a/drivers/gpu/drm/i915/intel_hangcheck.c
> +++ b/drivers/gpu/drm/i915/intel_hangcheck.c
> @@ -142,7 +142,7 @@ static int semaphore_passed(struct intel_engine_cs *engine)
>  	if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
>  		return -1;
>  
> -	if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno))
> +	if (intel_engine_signaled(signaller, seqno))
>  		return 1;
>  
>  	/* cursory check for an unkickable deadlock */
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index 57f3787ed6ec..057b88c149f7 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -911,14 +911,10 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine);
>  u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
>  u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
>  
> -static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
> -{
> -	return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
> -}
> -
>  static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
>  {
> -	/* We are only peeking at the tail of the submit queue (and not the
> +	/*
> +	 * We are only peeking at the tail of the submit queue (and not the
>  	 * queue itself) in order to gain a hint as to the current active
>  	 * state of the engine. Callers are not expected to be taking
>  	 * engine->timeline->lock, nor are they expected to be concerned
> @@ -928,6 +924,30 @@ static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
>  	return READ_ONCE(engine->timeline.seqno);
>  }
>  
> +static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
> +{
> +	return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
> +}
> +
> +static inline bool intel_engine_signaled(struct intel_engine_cs *engine,
> +					 u32 seqno)
> +{
> +	GEM_BUG_ON(!seqno);
> +	return i915_seqno_passed(intel_engine_get_seqno(engine), seqno);
> +}
> +
> +static inline bool intel_engine_has_completed(struct intel_engine_cs *engine,
> +					      u32 seqno)
> +{
> +	return intel_engine_signaled(engine, seqno);
> +}
> +
> +static inline bool intel_engine_has_started(struct intel_engine_cs *engine,
> +					    u32 seqno)
> +{
> +	return intel_engine_signaled(engine, seqno - 1);
> +}
> +
>  void intel_engine_get_instdone(struct intel_engine_cs *engine,
>  			       struct intel_instdone *instdone);
>  
> -- 
> 2.18.0
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
Chris Wilson Aug. 6, 2018, 11:24 a.m. UTC | #2
Quoting Mika Kuoppala (2018-08-06 12:17:16)
> Chris Wilson <chris@chris-wilson.co.uk> writes:
> 
> > We have a few instances of checking seqno-1 to see if the HW has started
> > the request. Pull those together under a helper.
> >
> 
> Could you elaborate why you want both completed and signaled?

Purely on context. In some cases we are talking about signaling and so
signaled makes sense, in others we are at a higher level and talking
about requests being started or completed.
-Chris
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 5c2c93cbab12..09ed48833b54 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -527,7 +527,7 @@  void __i915_request_submit(struct i915_request *request)
 
 	seqno = timeline_get_seqno(&engine->timeline);
 	GEM_BUG_ON(!seqno);
-	GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), seqno));
+	GEM_BUG_ON(intel_engine_signaled(engine, seqno));
 
 	/* We may be recursing from the signal callback of another i915 fence */
 	spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
@@ -579,8 +579,7 @@  void __i915_request_unsubmit(struct i915_request *request)
 	 */
 	GEM_BUG_ON(!request->global_seqno);
 	GEM_BUG_ON(request->global_seqno != engine->timeline.seqno);
-	GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine),
-				     request->global_seqno));
+	GEM_BUG_ON(intel_engine_has_completed(engine, request->global_seqno));
 	engine->timeline.seqno--;
 
 	/* We may be recursing from the signal callback of another i915 fence */
@@ -1205,7 +1204,7 @@  static bool __i915_spin_request(const struct i915_request *rq,
 	 * it is a fair assumption that it will not complete within our
 	 * relatively short timeout.
 	 */
-	if (!i915_seqno_passed(intel_engine_get_seqno(engine), seqno - 1))
+	if (!intel_engine_has_started(engine, seqno))
 		return false;
 
 	/*
@@ -1222,7 +1221,7 @@  static bool __i915_spin_request(const struct i915_request *rq,
 	irq = READ_ONCE(engine->breadcrumbs.irq_count);
 	timeout_us += local_clock_us(&cpu);
 	do {
-		if (i915_seqno_passed(intel_engine_get_seqno(engine), seqno))
+		if (intel_engine_has_completed(engine, seqno))
 			return seqno == i915_request_global_seqno(rq);
 
 		/*
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index e1c9365dfefb..05888de2e045 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -272,7 +272,10 @@  long i915_request_wait(struct i915_request *rq,
 #define I915_WAIT_ALL		BIT(2) /* used by i915_gem_object_wait() */
 #define I915_WAIT_FOR_IDLE_BOOST BIT(3)
 
-static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
+static inline bool intel_engine_has_started(struct intel_engine_cs *engine,
+					    u32 seqno);
+static inline bool intel_engine_has_completed(struct intel_engine_cs *engine,
+					    u32 seqno);
 
 /**
  * Returns true if seq1 is later than seq2.
@@ -282,11 +285,31 @@  static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
 	return (s32)(seq1 - seq2) >= 0;
 }
 
+/**
+ * i915_request_started - check if the request has begun being executed
+ * @rq: the request
+ *
+ * Returns true if the request has been submitted to hardware, and the hardware
+ * has advanced passed the end of the previous request and so should be either
+ * currently processing the request (though it may be preempted and so
+ * not necessarily the next request to complete) or have completed the request.
+ */
+static inline bool i915_request_started(const struct i915_request *rq)
+{
+	u32 seqno;
+
+	seqno = i915_request_global_seqno(rq);
+	if (!seqno) /* not yet submitted to HW */
+		return false;
+
+	return intel_engine_has_started(rq->engine, seqno);
+}
+
 static inline bool
 __i915_request_completed(const struct i915_request *rq, u32 seqno)
 {
 	GEM_BUG_ON(!seqno);
-	return i915_seqno_passed(intel_engine_get_seqno(rq->engine), seqno) &&
+	return intel_engine_has_completed(rq->engine, seqno) &&
 		seqno == i915_request_global_seqno(rq);
 }
 
@@ -301,18 +324,6 @@  static inline bool i915_request_completed(const struct i915_request *rq)
 	return __i915_request_completed(rq, seqno);
 }
 
-static inline bool i915_request_started(const struct i915_request *rq)
-{
-	u32 seqno;
-
-	seqno = i915_request_global_seqno(rq);
-	if (!seqno)
-		return false;
-
-	return i915_seqno_passed(intel_engine_get_seqno(rq->engine),
-				 seqno - 1);
-}
-
 static inline bool i915_sched_node_signaled(const struct i915_sched_node *node)
 {
 	const struct i915_request *rq =
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 1db6ba7d926e..84bf8d827136 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -256,8 +256,7 @@  void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
 	spin_unlock(&b->irq_lock);
 
 	rbtree_postorder_for_each_entry_safe(wait, n, &b->waiters, node) {
-		GEM_BUG_ON(!i915_seqno_passed(intel_engine_get_seqno(engine),
-					      wait->seqno));
+		GEM_BUG_ON(!intel_engine_signaled(engine, wait->seqno));
 		RB_CLEAR_NODE(&wait->node);
 		wake_up_process(wait->tsk);
 	}
@@ -508,8 +507,7 @@  bool intel_engine_add_wait(struct intel_engine_cs *engine,
 		return armed;
 
 	/* Make the caller recheck if its request has already started. */
-	return i915_seqno_passed(intel_engine_get_seqno(engine),
-				 wait->seqno - 1);
+	return intel_engine_has_started(engine, wait->seqno);
 }
 
 static inline bool chain_wakeup(struct rb_node *rb, int priority)
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 67c4fc5d737c..99d5a24219c1 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -968,8 +968,7 @@  bool intel_engine_is_idle(struct intel_engine_cs *engine)
 		return true;
 
 	/* Any inflight/incomplete requests? */
-	if (!i915_seqno_passed(intel_engine_get_seqno(engine),
-			       intel_engine_last_submit(engine)))
+	if (!intel_engine_signaled(engine, intel_engine_last_submit(engine)))
 		return false;
 
 	if (I915_SELFTEST_ONLY(engine->breadcrumbs.mock))
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c
index 2fc7a0dd0df9..e26d05a46451 100644
--- a/drivers/gpu/drm/i915/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/intel_hangcheck.c
@@ -142,7 +142,7 @@  static int semaphore_passed(struct intel_engine_cs *engine)
 	if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
 		return -1;
 
-	if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno))
+	if (intel_engine_signaled(signaller, seqno))
 		return 1;
 
 	/* cursory check for an unkickable deadlock */
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 57f3787ed6ec..057b88c149f7 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -911,14 +911,10 @@  int intel_engine_stop_cs(struct intel_engine_cs *engine);
 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
 u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
 
-static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
-{
-	return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
-}
-
 static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
 {
-	/* We are only peeking at the tail of the submit queue (and not the
+	/*
+	 * We are only peeking at the tail of the submit queue (and not the
 	 * queue itself) in order to gain a hint as to the current active
 	 * state of the engine. Callers are not expected to be taking
 	 * engine->timeline->lock, nor are they expected to be concerned
@@ -928,6 +924,30 @@  static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
 	return READ_ONCE(engine->timeline.seqno);
 }
 
+static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
+{
+	return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
+}
+
+static inline bool intel_engine_signaled(struct intel_engine_cs *engine,
+					 u32 seqno)
+{
+	GEM_BUG_ON(!seqno);
+	return i915_seqno_passed(intel_engine_get_seqno(engine), seqno);
+}
+
+static inline bool intel_engine_has_completed(struct intel_engine_cs *engine,
+					      u32 seqno)
+{
+	return intel_engine_signaled(engine, seqno);
+}
+
+static inline bool intel_engine_has_started(struct intel_engine_cs *engine,
+					    u32 seqno)
+{
+	return intel_engine_signaled(engine, seqno - 1);
+}
+
 void intel_engine_get_instdone(struct intel_engine_cs *engine,
 			       struct intel_instdone *instdone);