diff mbox series

[12/51] drm/i915/guc: Ensure request ordering via completion fences

Message ID 20210716201724.54804-13-matthew.brost@intel.com (mailing list archive)
State New, archived
Headers show
Series GuC submission support | expand

Commit Message

Matthew Brost July 16, 2021, 8:16 p.m. UTC
If two requests are on the same ring, they are explicitly ordered by the
HW. So, a submission fence is sufficient to ensure ordering when using
the new GuC submission interface. Conversely, if two requests share a
timeline and are on the same physical engine but different context this
doesn't ensure ordering on the new GuC submission interface. So, a
completion fence needs to be used to ensure ordering.

Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c |  1 -
 drivers/gpu/drm/i915/i915_request.c               | 12 ++++++++++--
 2 files changed, 10 insertions(+), 3 deletions(-)

Comments

Daniele Ceraolo Spurio July 19, 2021, 11:46 p.m. UTC | #1
On 7/16/2021 1:16 PM, Matthew Brost wrote:
> If two requests are on the same ring, they are explicitly ordered by the
> HW. So, a submission fence is sufficient to ensure ordering when using
> the new GuC submission interface. Conversely, if two requests share a
> timeline and are on the same physical engine but different context this
> doesn't ensure ordering on the new GuC submission interface. So, a
> completion fence needs to be used to ensure ordering.
>
> Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> ---
>   drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c |  1 -
>   drivers/gpu/drm/i915/i915_request.c               | 12 ++++++++++--
>   2 files changed, 10 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> index 9dc1a256e185..4443cc6f5320 100644
> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> @@ -933,7 +933,6 @@ static void guc_context_sched_disable(struct intel_context *ce)
>   	 * a request before we set the 'context_pending_disable' flag here.
>   	 */
>   	if (unlikely(atomic_add_unless(&ce->pin_count, -2, 2))) {
> -		spin_unlock_irqrestore(&ce->guc_state.lock, flags);

incorrect spinlock drop is still here. Everything else looks ok (my 
suggestion to use an engine flag stands, but can be addressed as a 
follow up).

Daniele

>   		return;
>   	}
>   	guc_id = prep_context_pending_disable(ce);
> diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
> index b48c4905d3fc..2b2b63cba06c 100644
> --- a/drivers/gpu/drm/i915/i915_request.c
> +++ b/drivers/gpu/drm/i915/i915_request.c
> @@ -432,6 +432,7 @@ void i915_request_retire_upto(struct i915_request *rq)
>   
>   	do {
>   		tmp = list_first_entry(&tl->requests, typeof(*tmp), link);
> +		GEM_BUG_ON(!i915_request_completed(tmp));
>   	} while (i915_request_retire(tmp) && tmp != rq);
>   }
>   
> @@ -1380,6 +1381,9 @@ i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
>   	return err;
>   }
>   
> +static int
> +i915_request_await_request(struct i915_request *to, struct i915_request *from);
> +
>   int
>   i915_request_await_execution(struct i915_request *rq,
>   			     struct dma_fence *fence)
> @@ -1465,7 +1469,8 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
>   			return ret;
>   	}
>   
> -	if (is_power_of_2(to->execution_mask | READ_ONCE(from->execution_mask)))
> +	if (!intel_engine_uses_guc(to->engine) &&
> +	    is_power_of_2(to->execution_mask | READ_ONCE(from->execution_mask)))
>   		ret = await_request_submit(to, from);
>   	else
>   		ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
> @@ -1626,6 +1631,8 @@ __i915_request_add_to_timeline(struct i915_request *rq)
>   	prev = to_request(__i915_active_fence_set(&timeline->last_request,
>   						  &rq->fence));
>   	if (prev && !__i915_request_is_complete(prev)) {
> +		bool uses_guc = intel_engine_uses_guc(rq->engine);
> +
>   		/*
>   		 * The requests are supposed to be kept in order. However,
>   		 * we need to be wary in case the timeline->last_request
> @@ -1636,7 +1643,8 @@ __i915_request_add_to_timeline(struct i915_request *rq)
>   			   i915_seqno_passed(prev->fence.seqno,
>   					     rq->fence.seqno));
>   
> -		if (is_power_of_2(READ_ONCE(prev->engine)->mask | rq->engine->mask))
> +		if ((!uses_guc && is_power_of_2(READ_ONCE(prev->engine)->mask | rq->engine->mask)) ||
> +		    (uses_guc && prev->context == rq->context))
>   			i915_sw_fence_await_sw_fence(&rq->submit,
>   						     &prev->submit,
>   						     &rq->submitq);
Matthew Brost July 20, 2021, 2:48 a.m. UTC | #2
On Mon, Jul 19, 2021 at 04:46:57PM -0700, Daniele Ceraolo Spurio wrote:
> 
> 
> On 7/16/2021 1:16 PM, Matthew Brost wrote:
> > If two requests are on the same ring, they are explicitly ordered by the
> > HW. So, a submission fence is sufficient to ensure ordering when using
> > the new GuC submission interface. Conversely, if two requests share a
> > timeline and are on the same physical engine but different context this
> > doesn't ensure ordering on the new GuC submission interface. So, a
> > completion fence needs to be used to ensure ordering.
> > 
> > Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
> > Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> > ---
> >   drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c |  1 -
> >   drivers/gpu/drm/i915/i915_request.c               | 12 ++++++++++--
> >   2 files changed, 10 insertions(+), 3 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> > index 9dc1a256e185..4443cc6f5320 100644
> > --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> > +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> > @@ -933,7 +933,6 @@ static void guc_context_sched_disable(struct intel_context *ce)
> >   	 * a request before we set the 'context_pending_disable' flag here.
> >   	 */
> >   	if (unlikely(atomic_add_unless(&ce->pin_count, -2, 2))) {
> > -		spin_unlock_irqrestore(&ce->guc_state.lock, flags);
> 
> incorrect spinlock drop is still here. Everything else looks ok (my

No it isn't not. See the return directly below the drop of the spin lock.
Matt

> suggestion to use an engine flag stands, but can be addressed as a follow
> up).
> 

Not sure I follow this one, but we can sync and address in a follow if
needed.

Matt

> Daniele
> 
> >   		return;
> >   	}
> >   	guc_id = prep_context_pending_disable(ce);
> > diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
> > index b48c4905d3fc..2b2b63cba06c 100644
> > --- a/drivers/gpu/drm/i915/i915_request.c
> > +++ b/drivers/gpu/drm/i915/i915_request.c
> > @@ -432,6 +432,7 @@ void i915_request_retire_upto(struct i915_request *rq)
> >   	do {
> >   		tmp = list_first_entry(&tl->requests, typeof(*tmp), link);
> > +		GEM_BUG_ON(!i915_request_completed(tmp));
> >   	} while (i915_request_retire(tmp) && tmp != rq);
> >   }
> > @@ -1380,6 +1381,9 @@ i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
> >   	return err;
> >   }
> > +static int
> > +i915_request_await_request(struct i915_request *to, struct i915_request *from);
> > +
> >   int
> >   i915_request_await_execution(struct i915_request *rq,
> >   			     struct dma_fence *fence)
> > @@ -1465,7 +1469,8 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
> >   			return ret;
> >   	}
> > -	if (is_power_of_2(to->execution_mask | READ_ONCE(from->execution_mask)))
> > +	if (!intel_engine_uses_guc(to->engine) &&
> > +	    is_power_of_2(to->execution_mask | READ_ONCE(from->execution_mask)))
> >   		ret = await_request_submit(to, from);
> >   	else
> >   		ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
> > @@ -1626,6 +1631,8 @@ __i915_request_add_to_timeline(struct i915_request *rq)
> >   	prev = to_request(__i915_active_fence_set(&timeline->last_request,
> >   						  &rq->fence));
> >   	if (prev && !__i915_request_is_complete(prev)) {
> > +		bool uses_guc = intel_engine_uses_guc(rq->engine);
> > +
> >   		/*
> >   		 * The requests are supposed to be kept in order. However,
> >   		 * we need to be wary in case the timeline->last_request
> > @@ -1636,7 +1643,8 @@ __i915_request_add_to_timeline(struct i915_request *rq)
> >   			   i915_seqno_passed(prev->fence.seqno,
> >   					     rq->fence.seqno));
> > -		if (is_power_of_2(READ_ONCE(prev->engine)->mask | rq->engine->mask))
> > +		if ((!uses_guc && is_power_of_2(READ_ONCE(prev->engine)->mask | rq->engine->mask)) ||
> > +		    (uses_guc && prev->context == rq->context))
> >   			i915_sw_fence_await_sw_fence(&rq->submit,
> >   						     &prev->submit,
> >   						     &rq->submitq);
>
Matthew Brost July 20, 2021, 2:50 a.m. UTC | #3
On Mon, Jul 19, 2021 at 07:48:17PM -0700, Matthew Brost wrote:
> On Mon, Jul 19, 2021 at 04:46:57PM -0700, Daniele Ceraolo Spurio wrote:
> > 
> > 
> > On 7/16/2021 1:16 PM, Matthew Brost wrote:
> > > If two requests are on the same ring, they are explicitly ordered by the
> > > HW. So, a submission fence is sufficient to ensure ordering when using
> > > the new GuC submission interface. Conversely, if two requests share a
> > > timeline and are on the same physical engine but different context this
> > > doesn't ensure ordering on the new GuC submission interface. So, a
> > > completion fence needs to be used to ensure ordering.
> > > 
> > > Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
> > > Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> > > ---
> > >   drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c |  1 -
> > >   drivers/gpu/drm/i915/i915_request.c               | 12 ++++++++++--
> > >   2 files changed, 10 insertions(+), 3 deletions(-)
> > > 
> > > diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> > > index 9dc1a256e185..4443cc6f5320 100644
> > > --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> > > +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> > > @@ -933,7 +933,6 @@ static void guc_context_sched_disable(struct intel_context *ce)
> > >   	 * a request before we set the 'context_pending_disable' flag here.
> > >   	 */
> > >   	if (unlikely(atomic_add_unless(&ce->pin_count, -2, 2))) {
> > > -		spin_unlock_irqrestore(&ce->guc_state.lock, flags);
> > 
> > incorrect spinlock drop is still here. Everything else looks ok (my
> 
> No it isn't not. See the return directly below the drop of the spin lock.
> Matt
> 

Ugh, misread this. Yes, this deletion shouldn't be here. Will fix.

Matt

> > suggestion to use an engine flag stands, but can be addressed as a follow
> > up).
> > 
> 
> Not sure I follow this one, but we can sync and address in a follow if
> needed.
> 
> Matt
> 
> > Daniele
> > 
> > >   		return;
> > >   	}
> > >   	guc_id = prep_context_pending_disable(ce);
> > > diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
> > > index b48c4905d3fc..2b2b63cba06c 100644
> > > --- a/drivers/gpu/drm/i915/i915_request.c
> > > +++ b/drivers/gpu/drm/i915/i915_request.c
> > > @@ -432,6 +432,7 @@ void i915_request_retire_upto(struct i915_request *rq)
> > >   	do {
> > >   		tmp = list_first_entry(&tl->requests, typeof(*tmp), link);
> > > +		GEM_BUG_ON(!i915_request_completed(tmp));
> > >   	} while (i915_request_retire(tmp) && tmp != rq);
> > >   }
> > > @@ -1380,6 +1381,9 @@ i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
> > >   	return err;
> > >   }
> > > +static int
> > > +i915_request_await_request(struct i915_request *to, struct i915_request *from);
> > > +
> > >   int
> > >   i915_request_await_execution(struct i915_request *rq,
> > >   			     struct dma_fence *fence)
> > > @@ -1465,7 +1469,8 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
> > >   			return ret;
> > >   	}
> > > -	if (is_power_of_2(to->execution_mask | READ_ONCE(from->execution_mask)))
> > > +	if (!intel_engine_uses_guc(to->engine) &&
> > > +	    is_power_of_2(to->execution_mask | READ_ONCE(from->execution_mask)))
> > >   		ret = await_request_submit(to, from);
> > >   	else
> > >   		ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
> > > @@ -1626,6 +1631,8 @@ __i915_request_add_to_timeline(struct i915_request *rq)
> > >   	prev = to_request(__i915_active_fence_set(&timeline->last_request,
> > >   						  &rq->fence));
> > >   	if (prev && !__i915_request_is_complete(prev)) {
> > > +		bool uses_guc = intel_engine_uses_guc(rq->engine);
> > > +
> > >   		/*
> > >   		 * The requests are supposed to be kept in order. However,
> > >   		 * we need to be wary in case the timeline->last_request
> > > @@ -1636,7 +1643,8 @@ __i915_request_add_to_timeline(struct i915_request *rq)
> > >   			   i915_seqno_passed(prev->fence.seqno,
> > >   					     rq->fence.seqno));
> > > -		if (is_power_of_2(READ_ONCE(prev->engine)->mask | rq->engine->mask))
> > > +		if ((!uses_guc && is_power_of_2(READ_ONCE(prev->engine)->mask | rq->engine->mask)) ||
> > > +		    (uses_guc && prev->context == rq->context))
> > >   			i915_sw_fence_await_sw_fence(&rq->submit,
> > >   						     &prev->submit,
> > >   						     &rq->submitq);
> >
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 9dc1a256e185..4443cc6f5320 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -933,7 +933,6 @@  static void guc_context_sched_disable(struct intel_context *ce)
 	 * a request before we set the 'context_pending_disable' flag here.
 	 */
 	if (unlikely(atomic_add_unless(&ce->pin_count, -2, 2))) {
-		spin_unlock_irqrestore(&ce->guc_state.lock, flags);
 		return;
 	}
 	guc_id = prep_context_pending_disable(ce);
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index b48c4905d3fc..2b2b63cba06c 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -432,6 +432,7 @@  void i915_request_retire_upto(struct i915_request *rq)
 
 	do {
 		tmp = list_first_entry(&tl->requests, typeof(*tmp), link);
+		GEM_BUG_ON(!i915_request_completed(tmp));
 	} while (i915_request_retire(tmp) && tmp != rq);
 }
 
@@ -1380,6 +1381,9 @@  i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
 	return err;
 }
 
+static int
+i915_request_await_request(struct i915_request *to, struct i915_request *from);
+
 int
 i915_request_await_execution(struct i915_request *rq,
 			     struct dma_fence *fence)
@@ -1465,7 +1469,8 @@  i915_request_await_request(struct i915_request *to, struct i915_request *from)
 			return ret;
 	}
 
-	if (is_power_of_2(to->execution_mask | READ_ONCE(from->execution_mask)))
+	if (!intel_engine_uses_guc(to->engine) &&
+	    is_power_of_2(to->execution_mask | READ_ONCE(from->execution_mask)))
 		ret = await_request_submit(to, from);
 	else
 		ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
@@ -1626,6 +1631,8 @@  __i915_request_add_to_timeline(struct i915_request *rq)
 	prev = to_request(__i915_active_fence_set(&timeline->last_request,
 						  &rq->fence));
 	if (prev && !__i915_request_is_complete(prev)) {
+		bool uses_guc = intel_engine_uses_guc(rq->engine);
+
 		/*
 		 * The requests are supposed to be kept in order. However,
 		 * we need to be wary in case the timeline->last_request
@@ -1636,7 +1643,8 @@  __i915_request_add_to_timeline(struct i915_request *rq)
 			   i915_seqno_passed(prev->fence.seqno,
 					     rq->fence.seqno));
 
-		if (is_power_of_2(READ_ONCE(prev->engine)->mask | rq->engine->mask))
+		if ((!uses_guc && is_power_of_2(READ_ONCE(prev->engine)->mask | rq->engine->mask)) ||
+		    (uses_guc && prev->context == rq->context))
 			i915_sw_fence_await_sw_fence(&rq->submit,
 						     &prev->submit,
 						     &rq->submitq);