@@ -634,6 +634,7 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
rcu_read_lock();
request->engine->submit_request(request);
rcu_read_unlock();
+ atomic_dec(&request->engine->request_stats.queued);
break;
case FENCE_FREE:
@@ -1112,6 +1113,8 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
engine->schedule(request, request->ctx->priority);
rcu_read_unlock();
+ atomic_inc(&engine->request_stats.queued);
+
local_bh_disable();
i915_sw_fence_commit(&request->submit);
local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
@@ -1917,12 +1917,13 @@ void intel_engine_dump(struct intel_engine_cs *engine,
if (i915_terminally_wedged(&engine->i915->gpu_error))
drm_printf(m, "*** WEDGED ***\n");
- drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d, runnable %u\n",
+ drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d, queued %u, runnable %u\n",
intel_engine_get_seqno(engine),
intel_engine_last_submit(engine),
engine->hangcheck.seqno,
jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
engine->timeline->inflight_seqnos,
+ atomic_read(&engine->request_stats.queued),
engine->request_stats.runnable);
drm_printf(m, "\tReset count: %d (global %d)\n",
i915_reset_engine_count(error, engine),
@@ -339,6 +339,14 @@ struct intel_engine_cs {
struct drm_i915_gem_object *default_state;
struct {
+ /**
+ * @queued: Number of submitted requests with dependencies.
+ *
+ * Count of requests waiting for dependencies before they can be
+ * submitted to the backend.
+ */
+ atomic_t queued;
+
/**
* @runnable: Number of runnable requests sent to the backend.
*