@@ -474,6 +474,7 @@ void i915_request_submit(struct i915_request *request)
spin_lock_irqsave(&engine->timeline.lock, flags);
engine->request_stats.runnable++;
+ atomic_dec(&engine->request_stats.queued);
__i915_request_submit(request);
@@ -1036,6 +1037,8 @@ void i915_request_add(struct i915_request *request)
}
request->emitted_jiffies = jiffies;
+ atomic_inc(&engine->request_stats.queued);
+
/*
* Let the backend know a new request has arrived that may need
* to adjust the existing execution schedule due to a high priority
@@ -1460,11 +1460,12 @@ void intel_engine_dump(struct intel_engine_cs *engine,
if (i915_terminally_wedged(&engine->i915->gpu_error))
drm_printf(m, "*** WEDGED ***\n");
- drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], runnable %u\n",
+ drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], queued %u, runnable %u\n",
intel_engine_get_seqno(engine),
intel_engine_last_submit(engine),
engine->hangcheck.seqno,
jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
+ atomic_read(&engine->request_stats.queued),
engine->request_stats.runnable);
drm_printf(m, "\tReset count: %d (global %d)\n",
i915_reset_engine_count(error, engine),
@@ -1024,6 +1024,7 @@ static void queue_request(struct intel_engine_cs *engine,
{
list_add_tail(&node->link, i915_sched_lookup_priolist(engine, prio));
engine->request_stats.runnable++;
+ atomic_dec(&engine->request_stats.queued);
}
static void __submit_queue_imm(struct intel_engine_cs *engine)
@@ -381,6 +381,14 @@ struct intel_engine_cs {
void *pinned_default_state;
struct {
+ /**
+ * @queued: Number of submitted requests with dependencies.
+ *
+ * Count of requests waiting for dependencies before they can be
+ * submitted to the backend.
+ */
+ atomic_t queued;
+
/**
* @runnable: Number of runnable requests sent to the backend.
*