@@ -558,6 +558,7 @@ void i915_request_submit(struct i915_request *request)
spin_lock_irqsave(&engine->timeline.lock, flags);
engine->request_stats.runnable++;
+ atomic_dec(&engine->request_stats.queued);
__i915_request_submit(request);
@@ -1109,6 +1110,8 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
}
request->emitted_jiffies = jiffies;
+ atomic_inc(&engine->request_stats.queued);
+
/*
* Let the backend know a new request has arrived that may need
* to adjust the existing execution schedule due to a high priority
@@ -1420,11 +1420,12 @@ void intel_engine_dump(struct intel_engine_cs *engine,
if (i915_terminally_wedged(&engine->i915->gpu_error))
drm_printf(m, "*** WEDGED ***\n");
- drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], runnable %u\n",
+ drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], queued %u, runnable %u\n",
intel_engine_get_seqno(engine),
intel_engine_last_submit(engine),
engine->hangcheck.seqno,
jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
+ atomic_read(&engine->request_stats.queued),
engine->request_stats.runnable);
drm_printf(m, "\tReset count: %d (global %d)\n",
i915_reset_engine_count(error, engine),
@@ -1201,7 +1201,9 @@ static void execlists_submit_request(struct i915_request *request)
queue_request(engine, &request->sched, rq_prio(request));
submit_queue(engine, rq_prio(request));
+
engine->request_stats.runnable++;
+ atomic_dec(&engine->request_stats.queued);
GEM_BUG_ON(!engine->execlists.first);
GEM_BUG_ON(list_empty(&request->sched.link));
@@ -345,6 +345,14 @@ struct intel_engine_cs {
void *pinned_default_state;
struct {
+ /**
+ * @queued: Number of submitted requests with dependencies.
+ *
+ * Count of requests waiting for dependencies before they can be
+ * submitted to the backend.
+ */
+ atomic_t queued;
+
/**
* @runnable: Number of runnable requests sent to the backend.
*