diff mbox series

[13/46] drm/i915: Compute the global scheduler caps

Message ID 20190206130356.18771-14-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show
Series [01/46] drm/i915: Hack and slash, throttle execbuffer hogs | expand

Commit Message

Chris Wilson Feb. 6, 2019, 1:03 p.m. UTC
Do a pass over all the engines upon starting to determine the global
scheduler capability flags (those that are agreed upon by all).

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem.c         |  2 ++
 drivers/gpu/drm/i915/intel_engine_cs.c  | 39 +++++++++++++++++++++++++
 drivers/gpu/drm/i915/intel_lrc.c        |  6 ----
 drivers/gpu/drm/i915/intel_ringbuffer.h |  2 ++
 4 files changed, 43 insertions(+), 6 deletions(-)

Comments

Tvrtko Ursulin Feb. 11, 2019, 12:24 p.m. UTC | #1
On 06/02/2019 13:03, Chris Wilson wrote:
> Do a pass over all the engines upon starting to determine the global
> scheduler capability flags (those that are agreed upon by all).
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>   drivers/gpu/drm/i915/i915_gem.c         |  2 ++
>   drivers/gpu/drm/i915/intel_engine_cs.c  | 39 +++++++++++++++++++++++++
>   drivers/gpu/drm/i915/intel_lrc.c        |  6 ----
>   drivers/gpu/drm/i915/intel_ringbuffer.h |  2 ++
>   4 files changed, 43 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index d18c4ccff370..04fa184fdff5 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -4728,6 +4728,8 @@ static int __i915_gem_restart_engines(void *data)
>   		}
>   	}
>   
> +	intel_engines_set_scheduler_caps(i915);
> +
>   	return 0;
>   }
>   
> diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
> index 49fa43ff02ba..02ee86159adc 100644
> --- a/drivers/gpu/drm/i915/intel_engine_cs.c
> +++ b/drivers/gpu/drm/i915/intel_engine_cs.c
> @@ -614,6 +614,45 @@ int intel_engine_setup_common(struct intel_engine_cs *engine)
>   	return err;
>   }
>   
> +void intel_engines_set_scheduler_caps(struct drm_i915_private *i915)
> +{
> +	static const struct {
> +		u8 engine;
> +		u8 sched;
> +	} map[] = {
> +#define MAP(x, y) { ilog2(I915_ENGINE_HAS_##x), ilog2(I915_SCHEDULER_CAP_##y) }
> +		MAP(PREEMPTION, PREEMPTION),
> +#undef MAP
> +	};
> +	struct intel_engine_cs *engine;
> +	enum intel_engine_id id;
> +	u32 enabled, disabled;
> +
> +	enabled = 0;
> +	disabled = 0;
> +	for_each_engine(engine, i915, id) { /* all engines must agree! */
> +		int i;
> +
> +		if (engine->schedule)
> +			enabled |= (I915_SCHEDULER_CAP_ENABLED |
> +				    I915_SCHEDULER_CAP_PRIORITY);
> +		else
> +			disabled |= (I915_SCHEDULER_CAP_ENABLED |
> +				     I915_SCHEDULER_CAP_PRIORITY);
> +
> +		for (i = 0; i < ARRAY_SIZE(map); i++) {
> +			if (engine->flags & BIT(map[i].engine))
> +				enabled |= BIT(map[i].sched);
> +			else
> +				disabled |= BIT(map[i].sched);
> +		}
> +	}
> +
> +	i915->caps.scheduler = enabled & ~disabled;
> +	if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED))
> +		i915->caps.scheduler = 0;

This effectively means that as soon engine->schedule is NULL for one 
engine, scheduler caps will be zero. I am thinking if potentially it 
would read clearer to just return from the if (engine->schedule) else 
branch in that case.

May or may not need to zero i915->caps.scheduler at the beginning of the 
function then - depending on whether we think configuration can change 
dynamically at runtime.

Regards,

Tvrtko


> +}
> +
>   static void __intel_context_unpin(struct i915_gem_context *ctx,
>   				  struct intel_engine_cs *engine)
>   {
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index da5120283263..59891cca35c1 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -2341,12 +2341,6 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
>   	engine->flags |= I915_ENGINE_SUPPORTS_STATS;
>   	if (engine->i915->preempt_context)
>   		engine->flags |= I915_ENGINE_HAS_PREEMPTION;
> -
> -	engine->i915->caps.scheduler =
> -		I915_SCHEDULER_CAP_ENABLED |
> -		I915_SCHEDULER_CAP_PRIORITY;
> -	if (intel_engine_has_preemption(engine))
> -		engine->i915->caps.scheduler |= I915_SCHEDULER_CAP_PREEMPTION;
>   }
>   
>   static void
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index e7d85aaee415..19faa19f2529 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -574,6 +574,8 @@ intel_engine_has_preemption(const struct intel_engine_cs *engine)
>   	return engine->flags & I915_ENGINE_HAS_PREEMPTION;
>   }
>   
> +void intel_engines_set_scheduler_caps(struct drm_i915_private *i915);
> +
>   static inline bool __execlists_need_preempt(int prio, int last)
>   {
>   	/*
>
Chris Wilson Feb. 11, 2019, 12:33 p.m. UTC | #2
Quoting Tvrtko Ursulin (2019-02-11 12:24:31)
> 
> On 06/02/2019 13:03, Chris Wilson wrote:
> > Do a pass over all the engines upon starting to determine the global
> > scheduler capability flags (those that are agreed upon by all).
> > 
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > ---
> >   drivers/gpu/drm/i915/i915_gem.c         |  2 ++
> >   drivers/gpu/drm/i915/intel_engine_cs.c  | 39 +++++++++++++++++++++++++
> >   drivers/gpu/drm/i915/intel_lrc.c        |  6 ----
> >   drivers/gpu/drm/i915/intel_ringbuffer.h |  2 ++
> >   4 files changed, 43 insertions(+), 6 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> > index d18c4ccff370..04fa184fdff5 100644
> > --- a/drivers/gpu/drm/i915/i915_gem.c
> > +++ b/drivers/gpu/drm/i915/i915_gem.c
> > @@ -4728,6 +4728,8 @@ static int __i915_gem_restart_engines(void *data)
> >               }
> >       }
> >   
> > +     intel_engines_set_scheduler_caps(i915);
> > +
> >       return 0;
> >   }
> >   
> > diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
> > index 49fa43ff02ba..02ee86159adc 100644
> > --- a/drivers/gpu/drm/i915/intel_engine_cs.c
> > +++ b/drivers/gpu/drm/i915/intel_engine_cs.c
> > @@ -614,6 +614,45 @@ int intel_engine_setup_common(struct intel_engine_cs *engine)
> >       return err;
> >   }
> >   
> > +void intel_engines_set_scheduler_caps(struct drm_i915_private *i915)
> > +{
> > +     static const struct {
> > +             u8 engine;
> > +             u8 sched;
> > +     } map[] = {
> > +#define MAP(x, y) { ilog2(I915_ENGINE_HAS_##x), ilog2(I915_SCHEDULER_CAP_##y) }
> > +             MAP(PREEMPTION, PREEMPTION),
> > +#undef MAP
> > +     };
> > +     struct intel_engine_cs *engine;
> > +     enum intel_engine_id id;
> > +     u32 enabled, disabled;
> > +
> > +     enabled = 0;
> > +     disabled = 0;
> > +     for_each_engine(engine, i915, id) { /* all engines must agree! */
> > +             int i;
> > +
> > +             if (engine->schedule)
> > +                     enabled |= (I915_SCHEDULER_CAP_ENABLED |
> > +                                 I915_SCHEDULER_CAP_PRIORITY);
> > +             else
> > +                     disabled |= (I915_SCHEDULER_CAP_ENABLED |
> > +                                  I915_SCHEDULER_CAP_PRIORITY);
> > +
> > +             for (i = 0; i < ARRAY_SIZE(map); i++) {
> > +                     if (engine->flags & BIT(map[i].engine))
> > +                             enabled |= BIT(map[i].sched);
> > +                     else
> > +                             disabled |= BIT(map[i].sched);
> > +             }
> > +     }
> > +
> > +     i915->caps.scheduler = enabled & ~disabled;
> > +     if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED))
> > +             i915->caps.scheduler = 0;
> 
> This effectively means that as soon engine->schedule is NULL for one 
> engine, scheduler caps will be zero. I am thinking if potentially it 
> would read clearer to just return from the if (engine->schedule) else 
> branch in that case.

I thought it was nice to have the same pattern throughout the loop with
the final fixup of making sure that all caps were zero if the global
scheduler was disabled.  Whether that fixup actually makes sense? As it
seems a little over protective as we already have an explicit on/off bit
(with the rest showing what you could have won!).
-Chris
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d18c4ccff370..04fa184fdff5 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4728,6 +4728,8 @@  static int __i915_gem_restart_engines(void *data)
 		}
 	}
 
+	intel_engines_set_scheduler_caps(i915);
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 49fa43ff02ba..02ee86159adc 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -614,6 +614,45 @@  int intel_engine_setup_common(struct intel_engine_cs *engine)
 	return err;
 }
 
+void intel_engines_set_scheduler_caps(struct drm_i915_private *i915)
+{
+	static const struct {
+		u8 engine;
+		u8 sched;
+	} map[] = {
+#define MAP(x, y) { ilog2(I915_ENGINE_HAS_##x), ilog2(I915_SCHEDULER_CAP_##y) }
+		MAP(PREEMPTION, PREEMPTION),
+#undef MAP
+	};
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	u32 enabled, disabled;
+
+	enabled = 0;
+	disabled = 0;
+	for_each_engine(engine, i915, id) { /* all engines must agree! */
+		int i;
+
+		if (engine->schedule)
+			enabled |= (I915_SCHEDULER_CAP_ENABLED |
+				    I915_SCHEDULER_CAP_PRIORITY);
+		else
+			disabled |= (I915_SCHEDULER_CAP_ENABLED |
+				     I915_SCHEDULER_CAP_PRIORITY);
+
+		for (i = 0; i < ARRAY_SIZE(map); i++) {
+			if (engine->flags & BIT(map[i].engine))
+				enabled |= BIT(map[i].sched);
+			else
+				disabled |= BIT(map[i].sched);
+		}
+	}
+
+	i915->caps.scheduler = enabled & ~disabled;
+	if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED))
+		i915->caps.scheduler = 0;
+}
+
 static void __intel_context_unpin(struct i915_gem_context *ctx,
 				  struct intel_engine_cs *engine)
 {
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index da5120283263..59891cca35c1 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -2341,12 +2341,6 @@  void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
 	engine->flags |= I915_ENGINE_SUPPORTS_STATS;
 	if (engine->i915->preempt_context)
 		engine->flags |= I915_ENGINE_HAS_PREEMPTION;
-
-	engine->i915->caps.scheduler =
-		I915_SCHEDULER_CAP_ENABLED |
-		I915_SCHEDULER_CAP_PRIORITY;
-	if (intel_engine_has_preemption(engine))
-		engine->i915->caps.scheduler |= I915_SCHEDULER_CAP_PREEMPTION;
 }
 
 static void
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index e7d85aaee415..19faa19f2529 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -574,6 +574,8 @@  intel_engine_has_preemption(const struct intel_engine_cs *engine)
 	return engine->flags & I915_ENGINE_HAS_PREEMPTION;
 }
 
+void intel_engines_set_scheduler_caps(struct drm_i915_private *i915);
+
 static inline bool __execlists_need_preempt(int prio, int last)
 {
 	/*