@@ -1465,19 +1465,26 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
struct intel_engine_execlists * const execlists = &engine->execlists;
bool tasklet = false;
- if (iir & GT_CONTEXT_SWITCH_INTERRUPT) {
- if (READ_ONCE(engine->execlists.active))
- tasklet = !test_and_set_bit(ENGINE_IRQ_EXECLIST,
- &engine->irq_posted);
- }
+ if (iir & GT_CONTEXT_SWITCH_INTERRUPT && READ_ONCE(execlists->active))
+ tasklet = !test_and_set_bit(ENGINE_IRQ_EXECLIST,
+ &engine->irq_posted);
if (iir & GT_RENDER_USER_INTERRUPT) {
notify_ring(engine);
- tasklet |= USES_GUC_SUBMISSION(engine->i915);
+ /*
+ * notify_ring() may trigger direct submission onto this
+ * engine, clearing the ENGINE_IRQ_EXECLIST bit. In that
+ * case, we don't want to resubmit and so clear the tasklet
+ * boolean. GuC never sets the ENGINE_IRQ_EXECLIST bit and
+ * so when using the GuC this equates to an unconditional
+ * setting of tasklet to true.
+ */
+ if (!test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted))
+ tasklet = USES_GUC_SUBMISSION(engine->i915);
}
if (tasklet)
- i915_tasklet_schedule(&execlists->tasklet);
+ i915_tasklet(&execlists->tasklet);
}
static void gen8_gt_irq_ack(struct drm_i915_private *i915,
@@ -105,4 +105,25 @@ static inline void i915_tasklet_run(const struct i915_tasklet *t)
__i915_tasklet_run(t);
}
+static inline bool i915_tasklet_try(struct i915_tasklet *t)
+{
+ if (unlikely(!tasklet_trylock(&t->base)))
+ return false;
+
+ if (i915_tasklet_is_enabled(t))
+ i915_tasklet_run(t);
+
+ tasklet_unlock(&t->base);
+ return true;
+}
+
+static inline void i915_tasklet(struct i915_tasklet *t)
+{
+ if (!i915_tasklet_is_enabled(t)) /* GPU reset active */
+ return;
+
+ if (!i915_tasklet_try(t))
+ i915_tasklet_schedule(t);
+}
+
#endif /* _I915_TASKLET_H_ */
@@ -780,6 +780,8 @@ static void guc_submission_tasklet(unsigned long data)
struct execlist_port *port = execlists->port;
struct i915_request *rq;
+ clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
+
rq = port_request(port);
while (rq && i915_request_completed(rq)) {
trace_i915_request_out(rq);
Continuing the theme of bypassing ksoftirqd latency, also first try to directly submit from the CS interrupt handler to clear the ELSP and queue the next. In the past, we have been hesitant to do this as the context switch processing has been quite heavy, requiring forcewaked mmio. However, as we now can read the GPU state from the cacheable HWSP, it is relatively cheap! v2: Explain why we test_bit(IRQ_EXECLIST) after doing notify_ring (it's because the notify_ring() may itself trigger direct submission clearing the bit) Suggested-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> --- drivers/gpu/drm/i915/i915_irq.c | 21 ++++++++++++++------- drivers/gpu/drm/i915/i915_tasklet.h | 21 +++++++++++++++++++++ drivers/gpu/drm/i915/intel_guc_submission.c | 2 ++ 3 files changed, 37 insertions(+), 7 deletions(-)