@@ -511,6 +511,7 @@ void i915_request_submit(struct i915_request *request)
spin_lock_irqsave(&engine->timeline->lock, flags);
engine->request_stats.runnable++;
+ atomic_dec(&engine->request_stats.queued);
__i915_request_submit(request);
@@ -1072,6 +1073,8 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
engine->schedule(request, request->ctx->priority);
rcu_read_unlock();
+ atomic_inc(&engine->request_stats.queued);
+
local_bh_disable();
i915_sw_fence_commit(&request->submit);
local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
@@ -1934,12 +1934,13 @@ void intel_engine_dump(struct intel_engine_cs *engine,
if (i915_terminally_wedged(&engine->i915->gpu_error))
drm_printf(m, "*** WEDGED ***\n");
- drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d, runnable %u\n",
+ drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d, queued %u, runnable %u\n",
intel_engine_get_seqno(engine),
intel_engine_last_submit(engine),
engine->hangcheck.seqno,
jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
engine->timeline->inflight_seqnos,
+ atomic_read(&engine->request_stats.queued),
engine->request_stats.runnable);
drm_printf(m, "\tReset count: %d (global %d)\n",
i915_reset_engine_count(error, engine),
@@ -1124,7 +1124,9 @@ static void execlists_submit_request(struct i915_request *request)
queue_request(engine, &request->priotree, rq_prio(request));
submit_queue(engine, rq_prio(request));
+
engine->request_stats.runnable++;
+ atomic_dec(&engine->request_stats.queued);
GEM_BUG_ON(!engine->execlists.first);
GEM_BUG_ON(list_empty(&request->priotree.link));
@@ -339,6 +339,14 @@ struct intel_engine_cs {
struct drm_i915_gem_object *default_state;
struct {
+ /**
+ * @queued: Number of submitted requests with dependencies.
+ *
+ * Count of requests waiting for dependencies before they can be
+ * submitted to the backend.
+ */
+ atomic_t queued;
+
/**
* @runnable: Number of runnable requests sent to the backend.
*