diff mbox

drm/i915: Shrink search list for active timelines

Message ID 20180513134656.3342-1-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show

Commit Message

Chris Wilson May 13, 2018, 1:46 p.m. UTC
When switching to the kernel context, we force the switch to occur after
all currently active requests (so that we know the GPU won't switch
immediately away and the kernel context remains current as we work). To
do so we have to inspect all the timelines and add a fence from the
active work to queue our switch afterwards. We can use the tracked set
of active rings to shrink our search for active timelines.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/i915_gem_context.c | 23 ++++++++++++-----------
 1 file changed, 12 insertions(+), 11 deletions(-)

Comments

Tvrtko Ursulin May 15, 2018, 1:42 p.m. UTC | #1
On 13/05/2018 14:46, Chris Wilson wrote:
> When switching to the kernel context, we force the switch to occur after
> all currently active requests (so that we know the GPU won't switch
> immediately away and the kernel context remains current as we work). To
> do so we have to inspect all the timelines and add a fence from the
> active work to queue our switch afterwards. We can use the tracked set
> of active rings to shrink our search for active timelines.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> ---
>   drivers/gpu/drm/i915/i915_gem_context.c | 23 ++++++++++++-----------
>   1 file changed, 12 insertions(+), 11 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
> index 33f8a4b3c981..48254483c4a6 100644
> --- a/drivers/gpu/drm/i915/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/i915_gem_context.c
> @@ -596,41 +596,42 @@ last_request_on_engine(struct i915_timeline *timeline,
>   
>   static bool engine_has_idle_kernel_context(struct intel_engine_cs *engine)
>   {
> -	struct i915_timeline *timeline;
> +	struct intel_ring *ring;
>   
> -	list_for_each_entry(timeline, &engine->i915->gt.timelines, link) {
> -		if (last_request_on_engine(timeline, engine))
> +	lockdep_assert_held(&engine->i915->drm.struct_mutex);
> +	list_for_each_entry(ring, &engine->i915->gt.active_rings, active_link) {
> +		if (last_request_on_engine(ring->timeline, engine))
>   			return false;
>   	}

Prettier if i915 passed directly to the function?

>   
>   	return intel_engine_has_kernel_context(engine);
>   }
>   
> -int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
> +int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915)
>   {
>   	struct intel_engine_cs *engine;
> -	struct i915_timeline *timeline;
>   	enum intel_engine_id id;
>   
> -	lockdep_assert_held(&dev_priv->drm.struct_mutex);
> +	lockdep_assert_held(&i915->drm.struct_mutex);
>   
> -	i915_retire_requests(dev_priv);
> +	i915_retire_requests(i915);
>   
> -	for_each_engine(engine, dev_priv, id) {
> +	for_each_engine(engine, i915, id) {
> +		struct intel_ring *ring;
>   		struct i915_request *rq;
>   
>   		if (engine_has_idle_kernel_context(engine))
>   			continue;
>   
> -		rq = i915_request_alloc(engine, dev_priv->kernel_context);
> +		rq = i915_request_alloc(engine, i915->kernel_context);
>   		if (IS_ERR(rq))
>   			return PTR_ERR(rq);
>   
>   		/* Queue this switch after all other activity */
> -		list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
> +		list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
>   			struct i915_request *prev;
>   
> -			prev = last_request_on_engine(timeline, engine);
> +			prev = last_request_on_engine(ring->timeline, engine);
>   			if (prev)
>   				i915_sw_fence_await_sw_fence_gfp(&rq->submit,
>   								 &prev->submit,
> 

Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Regards,

Tvrtko
Chris Wilson May 15, 2018, 1:48 p.m. UTC | #2
Quoting Tvrtko Ursulin (2018-05-15 14:42:02)
> 
> On 13/05/2018 14:46, Chris Wilson wrote:
> > When switching to the kernel context, we force the switch to occur after
> > all currently active requests (so that we know the GPU won't switch
> > immediately away and the kernel context remains current as we work). To
> > do so we have to inspect all the timelines and add a fence from the
> > active work to queue our switch afterwards. We can use the tracked set
> > of active rings to shrink our search for active timelines.
> > 
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> > ---
> >   drivers/gpu/drm/i915/i915_gem_context.c | 23 ++++++++++++-----------
> >   1 file changed, 12 insertions(+), 11 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
> > index 33f8a4b3c981..48254483c4a6 100644
> > --- a/drivers/gpu/drm/i915/i915_gem_context.c
> > +++ b/drivers/gpu/drm/i915/i915_gem_context.c
> > @@ -596,41 +596,42 @@ last_request_on_engine(struct i915_timeline *timeline,
> >   
> >   static bool engine_has_idle_kernel_context(struct intel_engine_cs *engine)
> >   {
> > -     struct i915_timeline *timeline;
> > +     struct intel_ring *ring;
> >   
> > -     list_for_each_entry(timeline, &engine->i915->gt.timelines, link) {
> > -             if (last_request_on_engine(timeline, engine))
> > +     lockdep_assert_held(&engine->i915->drm.struct_mutex);
> > +     list_for_each_entry(ring, &engine->i915->gt.active_rings, active_link) {
> > +             if (last_request_on_engine(ring->timeline, engine))
> >                       return false;
> >       }
> 
> Prettier if i915 passed directly to the function?

A compromise would be to pull the list out into a local. Yes it is a bit
ugly, I just valued the function name more :)
-Chris
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 33f8a4b3c981..48254483c4a6 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -596,41 +596,42 @@  last_request_on_engine(struct i915_timeline *timeline,
 
 static bool engine_has_idle_kernel_context(struct intel_engine_cs *engine)
 {
-	struct i915_timeline *timeline;
+	struct intel_ring *ring;
 
-	list_for_each_entry(timeline, &engine->i915->gt.timelines, link) {
-		if (last_request_on_engine(timeline, engine))
+	lockdep_assert_held(&engine->i915->drm.struct_mutex);
+	list_for_each_entry(ring, &engine->i915->gt.active_rings, active_link) {
+		if (last_request_on_engine(ring->timeline, engine))
 			return false;
 	}
 
 	return intel_engine_has_kernel_context(engine);
 }
 
-int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
+int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915)
 {
 	struct intel_engine_cs *engine;
-	struct i915_timeline *timeline;
 	enum intel_engine_id id;
 
-	lockdep_assert_held(&dev_priv->drm.struct_mutex);
+	lockdep_assert_held(&i915->drm.struct_mutex);
 
-	i915_retire_requests(dev_priv);
+	i915_retire_requests(i915);
 
-	for_each_engine(engine, dev_priv, id) {
+	for_each_engine(engine, i915, id) {
+		struct intel_ring *ring;
 		struct i915_request *rq;
 
 		if (engine_has_idle_kernel_context(engine))
 			continue;
 
-		rq = i915_request_alloc(engine, dev_priv->kernel_context);
+		rq = i915_request_alloc(engine, i915->kernel_context);
 		if (IS_ERR(rq))
 			return PTR_ERR(rq);
 
 		/* Queue this switch after all other activity */
-		list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
+		list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
 			struct i915_request *prev;
 
-			prev = last_request_on_engine(timeline, engine);
+			prev = last_request_on_engine(ring->timeline, engine);
 			if (prev)
 				i915_sw_fence_await_sw_fence_gfp(&rq->submit,
 								 &prev->submit,