diff mbox series

[01/16] drm/i915: Don't set queue_priority_hint if we don't kick the submission

Message ID 20191021080226.537-1-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show
Series [01/16] drm/i915: Don't set queue_priority_hint if we don't kick the submission | expand

Commit Message

Chris Wilson Oct. 21, 2019, 8:02 a.m. UTC
If we change the priority of the active context, then it has no impact
on the decision of whether to preempt the active context -- we don't
preempt the context with itself. In this situation, we elide the tasklet
rescheduling and should *not* be marking up the queue_priority_hint as
that may mask a later submission where we decide we don't have to kick
the tasklet as a higher priority submission is pending (spoiler alert,
it was not).

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_scheduler.c | 37 ++++++++++++++++-----------
 1 file changed, 22 insertions(+), 15 deletions(-)

Comments

Mika Kuoppala Oct. 21, 2019, 9:49 a.m. UTC | #1
Chris Wilson <chris@chris-wilson.co.uk> writes:

> If we change the priority of the active context, then it has no impact
> on the decision of whether to preempt the active context -- we don't
> preempt the context with itself. In this situation, we elide the tasklet
> rescheduling and should *not* be marking up the queue_priority_hint as
> that may mask a later submission where we decide we don't have to kick
> the tasklet as a higher priority submission is pending (spoiler alert,
> it was not).
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> ---
>  drivers/gpu/drm/i915/i915_scheduler.c | 37 ++++++++++++++++-----------
>  1 file changed, 22 insertions(+), 15 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
> index 0ca40f6bf08c..d2edb527dcb8 100644
> --- a/drivers/gpu/drm/i915/i915_scheduler.c
> +++ b/drivers/gpu/drm/i915/i915_scheduler.c
> @@ -189,22 +189,34 @@ static inline bool need_preempt(int prio, int active)
>  	return prio >= max(I915_PRIORITY_NORMAL, active);
>  }
>  
> -static void kick_submission(struct intel_engine_cs *engine, int prio)
> +static void kick_submission(struct intel_engine_cs *engine,
> +			    const struct i915_request *rq,
> +			    int prio)
>  {
> -	const struct i915_request *inflight =
> -		execlists_active(&engine->execlists);
> +	const struct i915_request *inflight;
> +
> +	/*
> +	 * We only need to kick the tasklet once for the high priority
> +	 * new context we add into the queue.
> +	 */
> +	if (prio <= engine->execlists.queue_priority_hint)
> +		return;
> +
> +	/* Nothing currently active? We're overdue for a submission! */
> +	inflight = execlists_active(&engine->execlists);
> +	if (!inflight)
> +		return;
>  
>  	/*
>  	 * If we are already the currently executing context, don't
> -	 * bother evaluating if we should preempt ourselves, or if
> -	 * we expect nothing to change as a result of running the
> -	 * tasklet, i.e. we have not change the priority queue
> -	 * sufficiently to oust the running context.
> +	 * bother evaluating if we should preempt ourselves.
>  	 */
> -	if (!inflight || !need_preempt(prio, rq_prio(inflight)))
> +	if (inflight->hw_context == rq->hw_context)

If there is a tail update at this moment, does the hardware
take it into account or do we need to kick?

-Mika


>  		return;
>  
> -	tasklet_hi_schedule(&engine->execlists.tasklet);
> +	engine->execlists.queue_priority_hint = prio;
> +	if (need_preempt(prio, rq_prio(inflight)))
> +		tasklet_hi_schedule(&engine->execlists.tasklet);
>  }
>  
>  static void __i915_schedule(struct i915_sched_node *node,
> @@ -330,13 +342,8 @@ static void __i915_schedule(struct i915_sched_node *node,
>  			list_move_tail(&node->link, cache.priolist);
>  		}
>  
> -		if (prio <= engine->execlists.queue_priority_hint)
> -			continue;
> -
> -		engine->execlists.queue_priority_hint = prio;
> -
>  		/* Defer (tasklet) submission until after all of our updates. */
> -		kick_submission(engine, prio);
> +		kick_submission(engine, node_to_request(node), prio);
>  	}
>  
>  	spin_unlock(&engine->active.lock);
> -- 
> 2.24.0.rc0
Chris Wilson Oct. 21, 2019, 9:56 a.m. UTC | #2
Quoting Mika Kuoppala (2019-10-21 10:49:14)
> Chris Wilson <chris@chris-wilson.co.uk> writes:
> 
> > If we change the priority of the active context, then it has no impact
> > on the decision of whether to preempt the active context -- we don't
> > preempt the context with itself. In this situation, we elide the tasklet
> > rescheduling and should *not* be marking up the queue_priority_hint as
> > that may mask a later submission where we decide we don't have to kick
> > the tasklet as a higher priority submission is pending (spoiler alert,
> > it was not).
> >
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> > Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> > ---
> >  drivers/gpu/drm/i915/i915_scheduler.c | 37 ++++++++++++++++-----------
> >  1 file changed, 22 insertions(+), 15 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
> > index 0ca40f6bf08c..d2edb527dcb8 100644
> > --- a/drivers/gpu/drm/i915/i915_scheduler.c
> > +++ b/drivers/gpu/drm/i915/i915_scheduler.c
> > @@ -189,22 +189,34 @@ static inline bool need_preempt(int prio, int active)
> >       return prio >= max(I915_PRIORITY_NORMAL, active);
> >  }
> >  
> > -static void kick_submission(struct intel_engine_cs *engine, int prio)
> > +static void kick_submission(struct intel_engine_cs *engine,
> > +                         const struct i915_request *rq,
> > +                         int prio)
> >  {
> > -     const struct i915_request *inflight =
> > -             execlists_active(&engine->execlists);
> > +     const struct i915_request *inflight;
> > +
> > +     /*
> > +      * We only need to kick the tasklet once for the high priority
> > +      * new context we add into the queue.
> > +      */
> > +     if (prio <= engine->execlists.queue_priority_hint)
> > +             return;
> > +
> > +     /* Nothing currently active? We're overdue for a submission! */
> > +     inflight = execlists_active(&engine->execlists);
> > +     if (!inflight)
> > +             return;
> >  
> >       /*
> >        * If we are already the currently executing context, don't
> > -      * bother evaluating if we should preempt ourselves, or if
> > -      * we expect nothing to change as a result of running the
> > -      * tasklet, i.e. we have not change the priority queue
> > -      * sufficiently to oust the running context.
> > +      * bother evaluating if we should preempt ourselves.
> >        */
> > -     if (!inflight || !need_preempt(prio, rq_prio(inflight)))
> > +     if (inflight->hw_context == rq->hw_context)
> 
> If there is a tail update at this moment, does the hardware
> take it into account or do we need to kick?

We are holding the engine->active.lock, so we can't submit at this
moment. If we are inside process_csb (which is outside of the lock),
then this stale value if of no consequence as we are inside the tasklet
already. So if we suppress the kick, we are inside the tasklet and
didn't need the kick. The other result of giving a kick even though the
HW as about ready, is just one kick too many. We are just trying to
reduce the number of unnecessary tasklet executions, ideal is 0 false
kicks, but any small number is better than kicking on every loop through
the priority node updates.
-Chris
Mika Kuoppala Oct. 21, 2019, 10:01 a.m. UTC | #3
Chris Wilson <chris@chris-wilson.co.uk> writes:

> Quoting Mika Kuoppala (2019-10-21 10:49:14)
>> Chris Wilson <chris@chris-wilson.co.uk> writes:
>> 
>> > If we change the priority of the active context, then it has no impact
>> > on the decision of whether to preempt the active context -- we don't
>> > preempt the context with itself. In this situation, we elide the tasklet
>> > rescheduling and should *not* be marking up the queue_priority_hint as
>> > that may mask a later submission where we decide we don't have to kick
>> > the tasklet as a higher priority submission is pending (spoiler alert,
>> > it was not).
>> >
>> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
>> > Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
>> > Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
>> > ---
>> >  drivers/gpu/drm/i915/i915_scheduler.c | 37 ++++++++++++++++-----------
>> >  1 file changed, 22 insertions(+), 15 deletions(-)
>> >
>> > diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
>> > index 0ca40f6bf08c..d2edb527dcb8 100644
>> > --- a/drivers/gpu/drm/i915/i915_scheduler.c
>> > +++ b/drivers/gpu/drm/i915/i915_scheduler.c
>> > @@ -189,22 +189,34 @@ static inline bool need_preempt(int prio, int active)
>> >       return prio >= max(I915_PRIORITY_NORMAL, active);
>> >  }
>> >  
>> > -static void kick_submission(struct intel_engine_cs *engine, int prio)
>> > +static void kick_submission(struct intel_engine_cs *engine,
>> > +                         const struct i915_request *rq,
>> > +                         int prio)
>> >  {
>> > -     const struct i915_request *inflight =
>> > -             execlists_active(&engine->execlists);
>> > +     const struct i915_request *inflight;
>> > +
>> > +     /*
>> > +      * We only need to kick the tasklet once for the high priority
>> > +      * new context we add into the queue.
>> > +      */
>> > +     if (prio <= engine->execlists.queue_priority_hint)
>> > +             return;
>> > +
>> > +     /* Nothing currently active? We're overdue for a submission! */
>> > +     inflight = execlists_active(&engine->execlists);
>> > +     if (!inflight)
>> > +             return;
>> >  
>> >       /*
>> >        * If we are already the currently executing context, don't
>> > -      * bother evaluating if we should preempt ourselves, or if
>> > -      * we expect nothing to change as a result of running the
>> > -      * tasklet, i.e. we have not change the priority queue
>> > -      * sufficiently to oust the running context.
>> > +      * bother evaluating if we should preempt ourselves.
>> >        */
>> > -     if (!inflight || !need_preempt(prio, rq_prio(inflight)))
>> > +     if (inflight->hw_context == rq->hw_context)
>> 
>> If there is a tail update at this moment, does the hardware
>> take it into account or do we need to kick?
>
> We are holding the engine->active.lock, so we can't submit at this
> moment. If we are inside process_csb (which is outside of the lock),
> then this stale value if of no consequence as we are inside the tasklet
> already. So if we suppress the kick, we are inside the tasklet and
> didn't need the kick. The other result of giving a kick even though the
> HW as about ready, is just one kick too many. We are just trying to
> reduce the number of unnecessary tasklet executions, ideal is 0 false
> kicks, but any small number is better than kicking on every loop through
> the priority node updates.

Ok, can't submit nor can't change prio. My prime concern was one
kick too little.

Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 0ca40f6bf08c..d2edb527dcb8 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -189,22 +189,34 @@  static inline bool need_preempt(int prio, int active)
 	return prio >= max(I915_PRIORITY_NORMAL, active);
 }
 
-static void kick_submission(struct intel_engine_cs *engine, int prio)
+static void kick_submission(struct intel_engine_cs *engine,
+			    const struct i915_request *rq,
+			    int prio)
 {
-	const struct i915_request *inflight =
-		execlists_active(&engine->execlists);
+	const struct i915_request *inflight;
+
+	/*
+	 * We only need to kick the tasklet once for the high priority
+	 * new context we add into the queue.
+	 */
+	if (prio <= engine->execlists.queue_priority_hint)
+		return;
+
+	/* Nothing currently active? We're overdue for a submission! */
+	inflight = execlists_active(&engine->execlists);
+	if (!inflight)
+		return;
 
 	/*
 	 * If we are already the currently executing context, don't
-	 * bother evaluating if we should preempt ourselves, or if
-	 * we expect nothing to change as a result of running the
-	 * tasklet, i.e. we have not change the priority queue
-	 * sufficiently to oust the running context.
+	 * bother evaluating if we should preempt ourselves.
 	 */
-	if (!inflight || !need_preempt(prio, rq_prio(inflight)))
+	if (inflight->hw_context == rq->hw_context)
 		return;
 
-	tasklet_hi_schedule(&engine->execlists.tasklet);
+	engine->execlists.queue_priority_hint = prio;
+	if (need_preempt(prio, rq_prio(inflight)))
+		tasklet_hi_schedule(&engine->execlists.tasklet);
 }
 
 static void __i915_schedule(struct i915_sched_node *node,
@@ -330,13 +342,8 @@  static void __i915_schedule(struct i915_sched_node *node,
 			list_move_tail(&node->link, cache.priolist);
 		}
 
-		if (prio <= engine->execlists.queue_priority_hint)
-			continue;
-
-		engine->execlists.queue_priority_hint = prio;
-
 		/* Defer (tasklet) submission until after all of our updates. */
-		kick_submission(engine, prio);
+		kick_submission(engine, node_to_request(node), prio);
 	}
 
 	spin_unlock(&engine->active.lock);