@@ -25,3 +25,15 @@ config DRM_I915_SPIN_REQUEST
May be 0 to disable the initial spin. In practice, we estimate
the cost of enabling the interrupt (if currently disabled) to be
a few microseconds.
+
+config DRM_I915_PREEMPT_TIMEOUT
+ int "Preempt timeout (ms)"
+ default 100 # milliseconds
+ help
+ How long to wait (in milliseconds) for a preemption event to occur
+ when submitting a new context via execlists. If the current context
+ does not hit an arbitration point and yield to HW before the timer
+ expires, the HW will be reset to allow the more important context
+ to execute.
+
+ May be 0 to disable the timeout.
@@ -945,6 +945,21 @@ static void record_preemption(struct intel_engine_execlists *execlists)
(void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
}
+static unsigned long preempt_expires(void)
+{
+ const unsigned long timeout =
+ msecs_to_jiffies_timeout(CONFIG_DRM_I915_PREEMPT_TIMEOUT);
+
+ /*
+ * Paranoia to make sure the compiler computes the timeout before
+ * loading 'jiffies' as jiffies is volatile and may be updated in
+ * the background by a timer tick. All to reduce the complexity
+ * of the addition and reduce the risk of losing a jiffie.
+ */
+ barrier();
+ return jiffies + timeout;
+}
+
static void execlists_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
@@ -1283,6 +1298,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
*port = execlists_schedule_in(last, port - execlists->pending);
memset(port + 1, 0, (last_port - port) * sizeof(*port));
execlists_submit_ports(engine);
+ if (CONFIG_DRM_I915_PREEMPT_TIMEOUT)
+ mod_timer(&execlists->timer, preempt_expires());
} else {
ring_set_paused(engine, 0);
}
@@ -1467,13 +1484,45 @@ static void process_csb(struct intel_engine_cs *engine)
invalidate_csb_entries(&buf[0], &buf[num_entries - 1]);
}
-static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
+static bool __execlists_submission_tasklet(struct intel_engine_cs *const engine)
{
lockdep_assert_held(&engine->active.lock);
process_csb(engine);
- if (!engine->execlists.pending[0])
+ if (!engine->execlists.pending[0]) {
execlists_dequeue(engine);
+ return true;
+ }
+
+ return false;
+}
+
+static void preempt_reset(struct intel_engine_cs *engine)
+{
+ const unsigned int bit = I915_RESET_ENGINE + engine->id;
+ unsigned long *lock = &engine->gt->reset.flags;
+
+ if (test_and_set_bit(bit, lock))
+ return;
+
+ /* Mark this tasklet as disabled to avoid waiting for it to complete */
+ tasklet_disable_nosync(&engine->execlists.tasklet);
+
+ intel_engine_reset(engine, "preemption time out");
+
+ tasklet_enable(&engine->execlists.tasklet);
+ clear_and_wake_up_bit(bit, lock);
+}
+
+static bool preempt_timeout(struct intel_engine_cs *const engine)
+{
+ if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
+ return false;
+
+ if (!intel_engine_has_preemption(engine))
+ return false;
+
+ return !timer_pending(&engine->execlists.timer);
}
/*
@@ -1484,10 +1533,17 @@ static void execlists_submission_tasklet(unsigned long data)
{
struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
unsigned long flags;
+ bool reset = false;
spin_lock_irqsave(&engine->active.lock, flags);
- __execlists_submission_tasklet(engine);
+
+ if (!__execlists_submission_tasklet(engine) && preempt_timeout(engine))
+ reset = true;
+
spin_unlock_irqrestore(&engine->active.lock, flags);
+
+ if (reset)
+ preempt_reset(engine);
}
static void execlists_submission_timer(struct timer_list *timer)