diff mbox series

[1/8] drm/i915: Move saturated workload detection back to the context

Message ID 20200518081440.17948-1-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show
Series [1/8] drm/i915: Move saturated workload detection back to the context | expand

Commit Message

Chris Wilson May 18, 2020, 8:14 a.m. UTC
When we introduced the saturated workload detection to tell us to back
off from semaphore usage [semaphores have a noticeable impact on
contended bus cycles with the CPU for some heavy workloads], we first
introduced it as a per-context tracker. This allows individual contexts
to try and optimise their own usage, but we found that with the local
tracking and the no-semaphore boosting, the first context to disable
semaphores got a massive priority boost and so would starve the rest and
all new contexts (as they started with semaphores enabled and lower
priority). Hence we moved the saturated workload detection to the
engine, and a consequence had to disable semaphores on virtual engines.

Now that we do not have semaphore priority boosting, we can move the
tracking back to the context and virtual engines can now utilise the
faster inter-engine synchronisation.

References: 44d89409a12e ("drm/i915: Make the semaphore saturation mask global")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/intel_context.c       |  1 +
 drivers/gpu/drm/i915/gt/intel_context_types.h |  2 ++
 drivers/gpu/drm/i915/gt/intel_engine_pm.c     |  2 --
 drivers/gpu/drm/i915/gt/intel_engine_types.h  |  2 --
 drivers/gpu/drm/i915/gt/intel_lrc.c           | 15 ---------------
 drivers/gpu/drm/i915/i915_request.c           |  4 ++--
 6 files changed, 5 insertions(+), 21 deletions(-)

Comments

Tvrtko Ursulin May 18, 2020, 9:53 a.m. UTC | #1
On 18/05/2020 09:14, Chris Wilson wrote:
> When we introduced the saturated workload detection to tell us to back
> off from semaphore usage [semaphores have a noticeable impact on
> contended bus cycles with the CPU for some heavy workloads], we first
> introduced it as a per-context tracker. This allows individual contexts
> to try and optimise their own usage, but we found that with the local
> tracking and the no-semaphore boosting, the first context to disable
> semaphores got a massive priority boost and so would starve the rest and
> all new contexts (as they started with semaphores enabled and lower
> priority). Hence we moved the saturated workload detection to the
> engine, and a consequence had to disable semaphores on virtual engines.
> 
> Now that we do not have semaphore priority boosting, we can move the
> tracking back to the context and virtual engines can now utilise the
> faster inter-engine synchronisation.
> 
> References: 44d89409a12e ("drm/i915: Make the semaphore saturation mask global")

We'd need to dig out the bug report which the above commit fixed and see 
what tests need to be ran to check for no regressions. Sounds tricky to 
find without a tag. I certainly don't remember it from a year ago. :(

Regards,

Tvrtko

> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>   drivers/gpu/drm/i915/gt/intel_context.c       |  1 +
>   drivers/gpu/drm/i915/gt/intel_context_types.h |  2 ++
>   drivers/gpu/drm/i915/gt/intel_engine_pm.c     |  2 --
>   drivers/gpu/drm/i915/gt/intel_engine_types.h  |  2 --
>   drivers/gpu/drm/i915/gt/intel_lrc.c           | 15 ---------------
>   drivers/gpu/drm/i915/i915_request.c           |  4 ++--
>   6 files changed, 5 insertions(+), 21 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
> index e4aece20bc80..762a251d553b 100644
> --- a/drivers/gpu/drm/i915/gt/intel_context.c
> +++ b/drivers/gpu/drm/i915/gt/intel_context.c
> @@ -268,6 +268,7 @@ static int __intel_context_active(struct i915_active *active)
>   	if (err)
>   		goto err_timeline;
>   
> +	ce->saturated = 0;
>   	return 0;
>   
>   err_timeline:
> diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
> index 4954b0df4864..aed26d93c2ca 100644
> --- a/drivers/gpu/drm/i915/gt/intel_context_types.h
> +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
> @@ -78,6 +78,8 @@ struct intel_context {
>   	} lrc;
>   	u32 tag; /* cookie passed to HW to track this context on submission */
>   
> +	intel_engine_mask_t saturated; /* submitting semaphores too late? */
> +
>   	/* Time on GPU as tracked by the hw. */
>   	struct {
>   		struct ewma_runtime avg;
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
> index d0a1078ef632..6d7fdba5adef 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
> @@ -229,8 +229,6 @@ static int __engine_park(struct intel_wakeref *wf)
>   	struct intel_engine_cs *engine =
>   		container_of(wf, typeof(*engine), wakeref);
>   
> -	engine->saturated = 0;
> -
>   	/*
>   	 * If one and only one request is completed between pm events,
>   	 * we know that we are inside the kernel context and it is
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
> index 2b6cdf47d428..c443b6bb884b 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
> @@ -332,8 +332,6 @@ struct intel_engine_cs {
>   
>   	struct intel_context *kernel_context; /* pinned */
>   
> -	intel_engine_mask_t saturated; /* submitting semaphores too late? */
> -
>   	struct {
>   		struct delayed_work work;
>   		struct i915_request *systole;
> diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
> index 87e6c5bdd2dc..e597325d04f1 100644
> --- a/drivers/gpu/drm/i915/gt/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
> @@ -5630,21 +5630,6 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
>   	ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
>   	ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
>   
> -	/*
> -	 * The decision on whether to submit a request using semaphores
> -	 * depends on the saturated state of the engine. We only compute
> -	 * this during HW submission of the request, and we need for this
> -	 * state to be globally applied to all requests being submitted
> -	 * to this engine. Virtual engines encompass more than one physical
> -	 * engine and so we cannot accurately tell in advance if one of those
> -	 * engines is already saturated and so cannot afford to use a semaphore
> -	 * and be pessimized in priority for doing so -- if we are the only
> -	 * context using semaphores after all other clients have stopped, we
> -	 * will be starved on the saturated system. Such a global switch for
> -	 * semaphores is less than ideal, but alas is the current compromise.
> -	 */
> -	ve->base.saturated = ALL_ENGINES;
> -
>   	snprintf(ve->base.name, sizeof(ve->base.name), "virtual");
>   
>   	intel_engine_init_active(&ve->base, ENGINE_VIRTUAL);
> diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
> index 526c1e9acbd5..31ef683d27b4 100644
> --- a/drivers/gpu/drm/i915/i915_request.c
> +++ b/drivers/gpu/drm/i915/i915_request.c
> @@ -467,7 +467,7 @@ bool __i915_request_submit(struct i915_request *request)
>   	 */
>   	if (request->sched.semaphores &&
>   	    i915_sw_fence_signaled(&request->semaphore))
> -		engine->saturated |= request->sched.semaphores;
> +		request->context->saturated |= request->sched.semaphores;
>   
>   	engine->emit_fini_breadcrumb(request,
>   				     request->ring->vaddr + request->postfix);
> @@ -919,7 +919,7 @@ already_busywaiting(struct i915_request *rq)
>   	 *
>   	 * See the are-we-too-late? check in __i915_request_submit().
>   	 */
> -	return rq->sched.semaphores | READ_ONCE(rq->engine->saturated);
> +	return rq->sched.semaphores | READ_ONCE(rq->context->saturated);
>   }
>   
>   static int
>
Chris Wilson May 18, 2020, 10:11 a.m. UTC | #2
Quoting Tvrtko Ursulin (2020-05-18 10:53:22)
> 
> On 18/05/2020 09:14, Chris Wilson wrote:
> > When we introduced the saturated workload detection to tell us to back
> > off from semaphore usage [semaphores have a noticeable impact on
> > contended bus cycles with the CPU for some heavy workloads], we first
> > introduced it as a per-context tracker. This allows individual contexts
> > to try and optimise their own usage, but we found that with the local
> > tracking and the no-semaphore boosting, the first context to disable
> > semaphores got a massive priority boost and so would starve the rest and
> > all new contexts (as they started with semaphores enabled and lower
> > priority). Hence we moved the saturated workload detection to the
> > engine, and a consequence had to disable semaphores on virtual engines.
> > 
> > Now that we do not have semaphore priority boosting, we can move the
> > tracking back to the context and virtual engines can now utilise the
> > faster inter-engine synchronisation.
> > 
> > References: 44d89409a12e ("drm/i915: Make the semaphore saturation mask global")
> 
> We'd need to dig out the bug report which the above commit fixed and see 
> what tests need to be ran to check for no regressions. Sounds tricky to 
> find without a tag. I certainly don't remember it from a year ago. :(

This is all about the semaphore priority boosting and inversions that
caused. The situation was that we would turn off the semaphore usage for
existing contexts, but new contexts would arrive and try and use
semaphore and be demoted in priority. Thus the new contexts would be
starved.

No semaphore boosting and the playing field is level again, and -b i915 is
no longer slower than -b busy/context/etc for unsaturated workloads.

I wanted to try and remove the saturation entirely. The impact on the
perf_density tests seems to be much lower than before, but I think that
is due to other mitigating factors.
-Chris
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index e4aece20bc80..762a251d553b 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -268,6 +268,7 @@  static int __intel_context_active(struct i915_active *active)
 	if (err)
 		goto err_timeline;
 
+	ce->saturated = 0;
 	return 0;
 
 err_timeline:
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 4954b0df4864..aed26d93c2ca 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -78,6 +78,8 @@  struct intel_context {
 	} lrc;
 	u32 tag; /* cookie passed to HW to track this context on submission */
 
+	intel_engine_mask_t saturated; /* submitting semaphores too late? */
+
 	/* Time on GPU as tracked by the hw. */
 	struct {
 		struct ewma_runtime avg;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index d0a1078ef632..6d7fdba5adef 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -229,8 +229,6 @@  static int __engine_park(struct intel_wakeref *wf)
 	struct intel_engine_cs *engine =
 		container_of(wf, typeof(*engine), wakeref);
 
-	engine->saturated = 0;
-
 	/*
 	 * If one and only one request is completed between pm events,
 	 * we know that we are inside the kernel context and it is
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 2b6cdf47d428..c443b6bb884b 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -332,8 +332,6 @@  struct intel_engine_cs {
 
 	struct intel_context *kernel_context; /* pinned */
 
-	intel_engine_mask_t saturated; /* submitting semaphores too late? */
-
 	struct {
 		struct delayed_work work;
 		struct i915_request *systole;
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 87e6c5bdd2dc..e597325d04f1 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -5630,21 +5630,6 @@  intel_execlists_create_virtual(struct intel_engine_cs **siblings,
 	ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
 	ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
 
-	/*
-	 * The decision on whether to submit a request using semaphores
-	 * depends on the saturated state of the engine. We only compute
-	 * this during HW submission of the request, and we need for this
-	 * state to be globally applied to all requests being submitted
-	 * to this engine. Virtual engines encompass more than one physical
-	 * engine and so we cannot accurately tell in advance if one of those
-	 * engines is already saturated and so cannot afford to use a semaphore
-	 * and be pessimized in priority for doing so -- if we are the only
-	 * context using semaphores after all other clients have stopped, we
-	 * will be starved on the saturated system. Such a global switch for
-	 * semaphores is less than ideal, but alas is the current compromise.
-	 */
-	ve->base.saturated = ALL_ENGINES;
-
 	snprintf(ve->base.name, sizeof(ve->base.name), "virtual");
 
 	intel_engine_init_active(&ve->base, ENGINE_VIRTUAL);
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 526c1e9acbd5..31ef683d27b4 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -467,7 +467,7 @@  bool __i915_request_submit(struct i915_request *request)
 	 */
 	if (request->sched.semaphores &&
 	    i915_sw_fence_signaled(&request->semaphore))
-		engine->saturated |= request->sched.semaphores;
+		request->context->saturated |= request->sched.semaphores;
 
 	engine->emit_fini_breadcrumb(request,
 				     request->ring->vaddr + request->postfix);
@@ -919,7 +919,7 @@  already_busywaiting(struct i915_request *rq)
 	 *
 	 * See the are-we-too-late? check in __i915_request_submit().
 	 */
-	return rq->sched.semaphores | READ_ONCE(rq->engine->saturated);
+	return rq->sched.semaphores | READ_ONCE(rq->context->saturated);
 }
 
 static int