diff mbox series

[09/12] drm/i915/gt: Incorporate the virtual engine into timeslicing

Message ID 20200519063123.20673-9-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show
Series [01/12] drm/i915: Don't set queue-priority hint when supressing the reschedule | expand

Commit Message

Chris Wilson May 19, 2020, 6:31 a.m. UTC
It was quite the oversight to only factor in the normal queue to decide
the timeslicing switch priority. By leaving out the next virtual request
from the priority decision, we would not timeslice the current engine if
there was an available virtual request.

Testcase: igt/gem_exec_balancer/sliced
Fixes: 3df2deed411e ("drm/i915/execlists: Enable timeslice on partial virtual engine dequeue")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_lrc.c | 30 +++++++++++++++++++++++------
 1 file changed, 24 insertions(+), 6 deletions(-)

Comments

Tvrtko Ursulin May 19, 2020, 9:40 a.m. UTC | #1
On 19/05/2020 07:31, Chris Wilson wrote:
> It was quite the oversight to only factor in the normal queue to decide
> the timeslicing switch priority. By leaving out the next virtual request
> from the priority decision, we would not timeslice the current engine if
> there was an available virtual request.
> 
> Testcase: igt/gem_exec_balancer/sliced
> Fixes: 3df2deed411e ("drm/i915/execlists: Enable timeslice on partial virtual engine dequeue")
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> ---
>   drivers/gpu/drm/i915/gt/intel_lrc.c | 30 +++++++++++++++++++++++------
>   1 file changed, 24 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
> index 35e7ae3c049c..42cb0cae2845 100644
> --- a/drivers/gpu/drm/i915/gt/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
> @@ -1898,7 +1898,8 @@ static void defer_active(struct intel_engine_cs *engine)
>   
>   static bool
>   need_timeslice(const struct intel_engine_cs *engine,
> -	       const struct i915_request *rq)
> +	       const struct i915_request *rq,
> +	       const struct rb_node *rb)
>   {
>   	int hint;
>   
> @@ -1906,6 +1907,24 @@ need_timeslice(const struct intel_engine_cs *engine,
>   		return false;
>   
>   	hint = engine->execlists.queue_priority_hint;
> +
> +	if (rb) {
> +		const struct virtual_engine *ve =
> +			rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
> +		const struct intel_engine_cs *inflight =
> +			intel_context_inflight(&ve->context);
> +
> +		if (!inflight || inflight == engine) {
> +			struct i915_request *next;
> +
> +			rcu_read_lock();
> +			next = READ_ONCE(ve->request);
> +			if (next)
> +				hint = max(hint, rq_prio(next));
> +			rcu_read_unlock();
> +		}
> +	}
> +
>   	if (!list_is_last(&rq->sched.link, &engine->active.requests))
>   		hint = max(hint, rq_prio(list_next_entry(rq, sched.link)));
>   
> @@ -1980,10 +1999,9 @@ static void set_timeslice(struct intel_engine_cs *engine)
>   	set_timer_ms(&engine->execlists.timer, duration);
>   }
>   
> -static void start_timeslice(struct intel_engine_cs *engine)
> +static void start_timeslice(struct intel_engine_cs *engine, int prio)
>   {
>   	struct intel_engine_execlists *execlists = &engine->execlists;
> -	const int prio = queue_prio(execlists);
>   	unsigned long duration;
>   
>   	if (!intel_engine_has_timeslices(engine))
> @@ -2143,7 +2161,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
>   			__unwind_incomplete_requests(engine);
>   
>   			last = NULL;
> -		} else if (need_timeslice(engine, last) &&
> +		} else if (need_timeslice(engine, last, rb) &&
>   			   timeslice_expired(execlists, last)) {
>   			if (i915_request_completed(last)) {
>   				tasklet_hi_schedule(&execlists->tasklet);
> @@ -2191,7 +2209,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
>   				 * Even if ELSP[1] is occupied and not worthy
>   				 * of timeslices, our queue might be.
>   				 */
> -				start_timeslice(engine);
> +				start_timeslice(engine, queue_prio(execlists));
>   				return;
>   			}
>   		}
> @@ -2226,7 +2244,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
>   
>   			if (last && !can_merge_rq(last, rq)) {
>   				spin_unlock(&ve->base.active.lock);
> -				start_timeslice(engine);
> +				start_timeslice(engine, rq_prio(rq));
>   				return; /* leave this for another sibling */
>   			}
>   
> 

Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Regards,

Tvrtko
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 35e7ae3c049c..42cb0cae2845 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1898,7 +1898,8 @@  static void defer_active(struct intel_engine_cs *engine)
 
 static bool
 need_timeslice(const struct intel_engine_cs *engine,
-	       const struct i915_request *rq)
+	       const struct i915_request *rq,
+	       const struct rb_node *rb)
 {
 	int hint;
 
@@ -1906,6 +1907,24 @@  need_timeslice(const struct intel_engine_cs *engine,
 		return false;
 
 	hint = engine->execlists.queue_priority_hint;
+
+	if (rb) {
+		const struct virtual_engine *ve =
+			rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
+		const struct intel_engine_cs *inflight =
+			intel_context_inflight(&ve->context);
+
+		if (!inflight || inflight == engine) {
+			struct i915_request *next;
+
+			rcu_read_lock();
+			next = READ_ONCE(ve->request);
+			if (next)
+				hint = max(hint, rq_prio(next));
+			rcu_read_unlock();
+		}
+	}
+
 	if (!list_is_last(&rq->sched.link, &engine->active.requests))
 		hint = max(hint, rq_prio(list_next_entry(rq, sched.link)));
 
@@ -1980,10 +1999,9 @@  static void set_timeslice(struct intel_engine_cs *engine)
 	set_timer_ms(&engine->execlists.timer, duration);
 }
 
-static void start_timeslice(struct intel_engine_cs *engine)
+static void start_timeslice(struct intel_engine_cs *engine, int prio)
 {
 	struct intel_engine_execlists *execlists = &engine->execlists;
-	const int prio = queue_prio(execlists);
 	unsigned long duration;
 
 	if (!intel_engine_has_timeslices(engine))
@@ -2143,7 +2161,7 @@  static void execlists_dequeue(struct intel_engine_cs *engine)
 			__unwind_incomplete_requests(engine);
 
 			last = NULL;
-		} else if (need_timeslice(engine, last) &&
+		} else if (need_timeslice(engine, last, rb) &&
 			   timeslice_expired(execlists, last)) {
 			if (i915_request_completed(last)) {
 				tasklet_hi_schedule(&execlists->tasklet);
@@ -2191,7 +2209,7 @@  static void execlists_dequeue(struct intel_engine_cs *engine)
 				 * Even if ELSP[1] is occupied and not worthy
 				 * of timeslices, our queue might be.
 				 */
-				start_timeslice(engine);
+				start_timeslice(engine, queue_prio(execlists));
 				return;
 			}
 		}
@@ -2226,7 +2244,7 @@  static void execlists_dequeue(struct intel_engine_cs *engine)
 
 			if (last && !can_merge_rq(last, rq)) {
 				spin_unlock(&ve->base.active.lock);
-				start_timeslice(engine);
+				start_timeslice(engine, rq_prio(rq));
 				return; /* leave this for another sibling */
 			}