diff mbox

[4/5] drm/i915/execlists: Direct submission from irq handler

Message ID 20180509142801.28130-4-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show

Commit Message

Chris Wilson May 9, 2018, 2:28 p.m. UTC
Continuing the theme of bypassing ksoftirqd latency, also first try to
directly submit from the CS interrupt handler to clear the ELSP and
queue the next.

In the past, we have been hesitant to do this as the context switch
processing has been quite heavy, requiring forcewaked mmio. However, as
we now can read the GPU state from the cacheable HWSP, it is relatively
cheap!

v2: Explain why we test_bit(IRQ_EXECLIST) after doing notify_ring (it's
because the notify_ring() may itself trigger direct submission clearing
the bit)

Suggested-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/i915_irq.c             | 21 ++++++++++++++-------
 drivers/gpu/drm/i915/i915_tasklet.h         | 21 +++++++++++++++++++++
 drivers/gpu/drm/i915/intel_guc_submission.c |  2 ++
 3 files changed, 37 insertions(+), 7 deletions(-)

Comments

Chris Wilson May 10, 2018, 12:02 p.m. UTC | #1
Quoting Chris Wilson (2018-05-09 15:28:00)
> Continuing the theme of bypassing ksoftirqd latency, also first try to
> directly submit from the CS interrupt handler to clear the ELSP and
> queue the next.
> 
> In the past, we have been hesitant to do this as the context switch
> processing has been quite heavy, requiring forcewaked mmio. However, as
> we now can read the GPU state from the cacheable HWSP, it is relatively
> cheap!
> 
> v2: Explain why we test_bit(IRQ_EXECLIST) after doing notify_ring (it's
> because the notify_ring() may itself trigger direct submission clearing
> the bit)
> 
> Suggested-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Tvrtko has some extra tests for igt/gem_exec_latency that show the
impact of triggering ksoftirqd has on submission latency. It's not
pretty. Fortunately, this pair of patches makes a huge difference.
-Chris
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index f8aff5a5aa83..e1b3a7575fe7 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1465,19 +1465,26 @@  gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
 	struct intel_engine_execlists * const execlists = &engine->execlists;
 	bool tasklet = false;
 
-	if (iir & GT_CONTEXT_SWITCH_INTERRUPT) {
-		if (READ_ONCE(engine->execlists.active))
-			tasklet = !test_and_set_bit(ENGINE_IRQ_EXECLIST,
-						    &engine->irq_posted);
-	}
+	if (iir & GT_CONTEXT_SWITCH_INTERRUPT && READ_ONCE(execlists->active))
+		tasklet = !test_and_set_bit(ENGINE_IRQ_EXECLIST,
+					    &engine->irq_posted);
 
 	if (iir & GT_RENDER_USER_INTERRUPT) {
 		notify_ring(engine);
-		tasklet |= USES_GUC_SUBMISSION(engine->i915);
+		/*
+		 * notify_ring() may trigger direct submission onto this
+		 * engine, clearing the ENGINE_IRQ_EXECLIST bit. In that
+		 * case, we don't want to resubmit and so clear the tasklet
+		 * boolean. GuC never sets the ENGINE_IRQ_EXECLIST bit and
+		 * so when using the GuC this equates to an unconditional
+		 * setting of tasklet to true.
+		 */
+		if (!test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted))
+			tasklet = USES_GUC_SUBMISSION(engine->i915);
 	}
 
 	if (tasklet)
-		i915_tasklet_schedule(&execlists->tasklet);
+		i915_tasklet(&execlists->tasklet);
 }
 
 static void gen8_gt_irq_ack(struct drm_i915_private *i915,
diff --git a/drivers/gpu/drm/i915/i915_tasklet.h b/drivers/gpu/drm/i915/i915_tasklet.h
index 99e2fa2241ba..c532282551b7 100644
--- a/drivers/gpu/drm/i915/i915_tasklet.h
+++ b/drivers/gpu/drm/i915/i915_tasklet.h
@@ -99,4 +99,25 @@  static inline void i915_tasklet_run(const struct i915_tasklet *t)
 	__i915_tasklet_run(t);
 }
 
+static inline bool i915_tasklet_try(struct i915_tasklet *t)
+{
+	if (unlikely(!tasklet_trylock(&t->base)))
+		return false;
+
+	if (i915_tasklet_is_enabled(t))
+		i915_tasklet_run(t);
+
+	tasklet_unlock(&t->base);
+	return true;
+}
+
+static inline void i915_tasklet(struct i915_tasklet *t)
+{
+	if (!i915_tasklet_is_enabled(t)) /* GPU reset active */
+		return;
+
+	if (!i915_tasklet_try(t))
+		i915_tasklet_schedule(t);
+}
+
 #endif /* _I915_TASKLET_H_ */
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index f2ded1796523..4e09abf7e206 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -780,6 +780,8 @@  static void guc_submission_tasklet(unsigned long data)
 	struct execlist_port *port = execlists->port;
 	struct i915_request *rq;
 
+	clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
+
 	rq = port_request(port);
 	while (rq && i915_request_completed(rq)) {
 		trace_i915_request_out(rq);