diff mbox series

[11/13] drm/i915: Pass i915_sched_node around internally

Message ID 20190503115225.30831-11-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show
Series [01/13] drm/i915: Assert breadcrumbs are correctly ordered in the signal handler | expand

Commit Message

Chris Wilson May 3, 2019, 11:52 a.m. UTC
To simplify the next patch, update bump_priority and schedule to accept
the internal i915_sched_ndoe directly and not expect a request pointer.

add/remove: 0/0 grow/shrink: 2/1 up/down: 8/-15 (-7)
Function                                     old     new   delta
i915_schedule_bump_priority                  109     113      +4
i915_schedule                                 50      54      +4
__i915_schedule                              922     907     -15

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_scheduler.c | 33 +++++++++++++++------------
 1 file changed, 18 insertions(+), 15 deletions(-)

Comments

Tvrtko Ursulin May 7, 2019, 12:12 p.m. UTC | #1
On 03/05/2019 12:52, Chris Wilson wrote:
> To simplify the next patch, update bump_priority and schedule to accept
> the internal i915_sched_ndoe directly and not expect a request pointer.
> 
> add/remove: 0/0 grow/shrink: 2/1 up/down: 8/-15 (-7)
> Function                                     old     new   delta
> i915_schedule_bump_priority                  109     113      +4
> i915_schedule                                 50      54      +4
> __i915_schedule                              922     907     -15
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>   drivers/gpu/drm/i915/i915_scheduler.c | 33 +++++++++++++++------------
>   1 file changed, 18 insertions(+), 15 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
> index 4a95cf2201a7..380cb7343a10 100644
> --- a/drivers/gpu/drm/i915/i915_scheduler.c
> +++ b/drivers/gpu/drm/i915/i915_scheduler.c
> @@ -189,7 +189,7 @@ static void kick_submission(struct intel_engine_cs *engine, int prio)
>   	tasklet_hi_schedule(&engine->execlists.tasklet);
>   }
>   
> -static void __i915_schedule(struct i915_request *rq,
> +static void __i915_schedule(struct i915_sched_node *rq,

Can you not use rq for sched node, but perhaps node?

>   			    const struct i915_sched_attr *attr)
>   {
>   	struct intel_engine_cs *engine;
> @@ -203,13 +203,13 @@ static void __i915_schedule(struct i915_request *rq,
>   	lockdep_assert_held(&schedule_lock);
>   	GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
>   
> -	if (i915_request_completed(rq))
> +	if (prio <= READ_ONCE(rq->attr.priority))
>   		return;
>   
> -	if (prio <= READ_ONCE(rq->sched.attr.priority))
> +	if (node_signaled(rq))

And refrain from re-ordering the sequence in this patch please.

>   		return;
>   
> -	stack.signaler = &rq->sched;
> +	stack.signaler = rq;
>   	list_add(&stack.dfs_link, &dfs);
>   
>   	/*
> @@ -260,9 +260,9 @@ static void __i915_schedule(struct i915_request *rq,
>   	 * execlists_submit_request()), we can set our own priority and skip
>   	 * acquiring the engine locks.
>   	 */
> -	if (rq->sched.attr.priority == I915_PRIORITY_INVALID) {
> -		GEM_BUG_ON(!list_empty(&rq->sched.link));
> -		rq->sched.attr = *attr;
> +	if (rq->attr.priority == I915_PRIORITY_INVALID) {
> +		GEM_BUG_ON(!list_empty(&rq->link));
> +		rq->attr = *attr;
>   
>   		if (stack.dfs_link.next == stack.dfs_link.prev)
>   			return;
> @@ -271,7 +271,7 @@ static void __i915_schedule(struct i915_request *rq,
>   	}
>   
>   	memset(&cache, 0, sizeof(cache));
> -	engine = rq->engine;
> +	engine = node_to_request(rq)->engine;
>   	spin_lock(&engine->timeline.lock);
>   
>   	/* Fifo and depth-first replacement ensure our deps execute before us */
> @@ -322,13 +322,20 @@ static void __i915_schedule(struct i915_request *rq,
>   void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
>   {
>   	spin_lock_irq(&schedule_lock);
> -	__i915_schedule(rq, attr);
> +	__i915_schedule(&rq->sched, attr);
>   	spin_unlock_irq(&schedule_lock);
>   }
>   
> +static void __bump_priority(struct i915_sched_node *node, unsigned int bump)
> +{
> +	struct i915_sched_attr attr = node->attr;
> +
> +	attr.priority |= bump;
> +	__i915_schedule(node, &attr);
> +}
> +
>   void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
>   {
> -	struct i915_sched_attr attr;
>   	unsigned long flags;
>   
>   	GEM_BUG_ON(bump & ~I915_PRIORITY_MASK);
> @@ -337,11 +344,7 @@ void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
>   		return;
>   
>   	spin_lock_irqsave(&schedule_lock, flags);
> -
> -	attr = rq->sched.attr;
> -	attr.priority |= bump;
> -	__i915_schedule(rq, &attr);
> -
> +	__bump_priority(&rq->sched, bump);
>   	spin_unlock_irqrestore(&schedule_lock, flags);
>   }
>   
> 

Regards,

Tvrtko
Chris Wilson May 7, 2019, 12:26 p.m. UTC | #2
Quoting Tvrtko Ursulin (2019-05-07 13:12:05)
> 
> On 03/05/2019 12:52, Chris Wilson wrote:
> > To simplify the next patch, update bump_priority and schedule to accept
> > the internal i915_sched_ndoe directly and not expect a request pointer.
> > 
> > add/remove: 0/0 grow/shrink: 2/1 up/down: 8/-15 (-7)
> > Function                                     old     new   delta
> > i915_schedule_bump_priority                  109     113      +4
> > i915_schedule                                 50      54      +4
> > __i915_schedule                              922     907     -15
> > 
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > ---
> >   drivers/gpu/drm/i915/i915_scheduler.c | 33 +++++++++++++++------------
> >   1 file changed, 18 insertions(+), 15 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
> > index 4a95cf2201a7..380cb7343a10 100644
> > --- a/drivers/gpu/drm/i915/i915_scheduler.c
> > +++ b/drivers/gpu/drm/i915/i915_scheduler.c
> > @@ -189,7 +189,7 @@ static void kick_submission(struct intel_engine_cs *engine, int prio)
> >       tasklet_hi_schedule(&engine->execlists.tasklet);
> >   }
> >   
> > -static void __i915_schedule(struct i915_request *rq,
> > +static void __i915_schedule(struct i915_sched_node *rq,
> 
> Can you not use rq for sched node, but perhaps node?

We use node later on. I kept with rq to try and keep the patch small,
and stick to the current semantics. We could reuse node... That looks
like it is semantically clean.
-Chris
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 4a95cf2201a7..380cb7343a10 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -189,7 +189,7 @@  static void kick_submission(struct intel_engine_cs *engine, int prio)
 	tasklet_hi_schedule(&engine->execlists.tasklet);
 }
 
-static void __i915_schedule(struct i915_request *rq,
+static void __i915_schedule(struct i915_sched_node *rq,
 			    const struct i915_sched_attr *attr)
 {
 	struct intel_engine_cs *engine;
@@ -203,13 +203,13 @@  static void __i915_schedule(struct i915_request *rq,
 	lockdep_assert_held(&schedule_lock);
 	GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
 
-	if (i915_request_completed(rq))
+	if (prio <= READ_ONCE(rq->attr.priority))
 		return;
 
-	if (prio <= READ_ONCE(rq->sched.attr.priority))
+	if (node_signaled(rq))
 		return;
 
-	stack.signaler = &rq->sched;
+	stack.signaler = rq;
 	list_add(&stack.dfs_link, &dfs);
 
 	/*
@@ -260,9 +260,9 @@  static void __i915_schedule(struct i915_request *rq,
 	 * execlists_submit_request()), we can set our own priority and skip
 	 * acquiring the engine locks.
 	 */
-	if (rq->sched.attr.priority == I915_PRIORITY_INVALID) {
-		GEM_BUG_ON(!list_empty(&rq->sched.link));
-		rq->sched.attr = *attr;
+	if (rq->attr.priority == I915_PRIORITY_INVALID) {
+		GEM_BUG_ON(!list_empty(&rq->link));
+		rq->attr = *attr;
 
 		if (stack.dfs_link.next == stack.dfs_link.prev)
 			return;
@@ -271,7 +271,7 @@  static void __i915_schedule(struct i915_request *rq,
 	}
 
 	memset(&cache, 0, sizeof(cache));
-	engine = rq->engine;
+	engine = node_to_request(rq)->engine;
 	spin_lock(&engine->timeline.lock);
 
 	/* Fifo and depth-first replacement ensure our deps execute before us */
@@ -322,13 +322,20 @@  static void __i915_schedule(struct i915_request *rq,
 void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
 {
 	spin_lock_irq(&schedule_lock);
-	__i915_schedule(rq, attr);
+	__i915_schedule(&rq->sched, attr);
 	spin_unlock_irq(&schedule_lock);
 }
 
+static void __bump_priority(struct i915_sched_node *node, unsigned int bump)
+{
+	struct i915_sched_attr attr = node->attr;
+
+	attr.priority |= bump;
+	__i915_schedule(node, &attr);
+}
+
 void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
 {
-	struct i915_sched_attr attr;
 	unsigned long flags;
 
 	GEM_BUG_ON(bump & ~I915_PRIORITY_MASK);
@@ -337,11 +344,7 @@  void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
 		return;
 
 	spin_lock_irqsave(&schedule_lock, flags);
-
-	attr = rq->sched.attr;
-	attr.priority |= bump;
-	__i915_schedule(rq, &attr);
-
+	__bump_priority(&rq->sched, bump);
 	spin_unlock_irqrestore(&schedule_lock, flags);
 }