@@ -3258,6 +3258,81 @@ static int i915_drrs_status(struct seq_file *m, void *unused)
return 0;
}
+static int i915_scheduler_info(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_scheduler *scheduler = dev_priv->scheduler;
+ struct i915_scheduler_stats *stats = scheduler->stats;
+ struct i915_scheduler_stats_nodes node_stats[I915_NUM_RINGS];
+ struct intel_engine_cs *ring;
+ char str[50 * (I915_NUM_RINGS + 1)], name[50], *ptr;
+ int ret, i, r;
+
+ ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+ if (ret)
+ return ret;
+
+#define PRINT_VAR(name, fmt, var) \
+ do { \
+ sprintf(str, "%-22s", name); \
+ ptr = str + strlen(str); \
+ for_each_ring(ring, dev_priv, r) { \
+ sprintf(ptr, " %10" fmt, var); \
+ ptr += strlen(ptr); \
+ } \
+ seq_printf(m, "%s\n", str); \
+ } while (0)
+
+ PRINT_VAR("Ring name:", "s", dev_priv->ring[r].name);
+ PRINT_VAR(" Ring seqno", "d", ring->get_seqno(ring, false));
+ seq_putc(m, '\n');
+
+ seq_puts(m, "Batch submissions:\n");
+ PRINT_VAR(" Queued", "u", stats[r].queued);
+ PRINT_VAR(" Submitted", "u", stats[r].submitted);
+ PRINT_VAR(" Completed", "u", stats[r].completed);
+ PRINT_VAR(" Expired", "u", stats[r].expired);
+ seq_putc(m, '\n');
+
+ seq_puts(m, "Flush counts:\n");
+ PRINT_VAR(" By object", "u", stats[r].flush_obj);
+ PRINT_VAR(" By request", "u", stats[r].flush_req);
+ PRINT_VAR(" Blanket", "u", stats[r].flush_all);
+ PRINT_VAR(" Entries bumped", "u", stats[r].flush_bump);
+ PRINT_VAR(" Entries submitted", "u", stats[r].flush_submit);
+ seq_putc(m, '\n');
+
+ seq_puts(m, "Miscellaneous:\n");
+ PRINT_VAR(" ExecEarly retry", "u", stats[r].exec_early);
+ PRINT_VAR(" ExecFinal requeue", "u", stats[r].exec_again);
+ PRINT_VAR(" ExecFinal killed", "u", stats[r].exec_dead);
+ PRINT_VAR(" Fence wait", "u", stats[r].fence_wait);
+ PRINT_VAR(" Fence wait again", "u", stats[r].fence_again);
+ PRINT_VAR(" Fence wait ignore", "u", stats[r].fence_ignore);
+ PRINT_VAR(" Fence supplied", "u", stats[r].fence_got);
+ PRINT_VAR(" Hung flying", "u", stats[r].kill_flying);
+ PRINT_VAR(" Hung queued", "u", stats[r].kill_queued);
+ seq_putc(m, '\n');
+
+ seq_puts(m, "Queue contents:\n");
+ for_each_ring(ring, dev_priv, i)
+ i915_scheduler_query_stats(ring, node_stats + ring->id);
+
+ for (i = 0; i < (i915_sqs_MAX + 1); i++) {
+ sprintf(name, " %s", i915_scheduler_queue_status_str(i));
+ PRINT_VAR(name, "d", node_stats[r].counts[i]);
+ }
+ seq_putc(m, '\n');
+
+#undef PRINT_VAR
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return 0;
+}
+
struct pipe_crc_info {
const char *name;
struct drm_device *dev;
@@ -5250,6 +5325,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_semaphore_status", i915_semaphore_status, 0},
{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
{"i915_dp_mst_info", i915_dp_mst_info, 0},
+ {"i915_scheduler_info", i915_scheduler_info, 0},
{"i915_wa_registers", i915_wa_registers, 0},
{"i915_ddb_info", i915_ddb_info, 0},
{"i915_sseu_status", i915_sseu_status, 0},
@@ -1507,8 +1507,15 @@ static int i915_early_fence_wait(struct intel_engine_cs *ring, int fence_fd)
}
if (atomic_read(&fence->status) == 0) {
- if (!i915_safe_to_ignore_fence(ring, fence))
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct i915_scheduler *scheduler = dev_priv->scheduler;
+
+ if (i915_safe_to_ignore_fence(ring, fence))
+ scheduler->stats[ring->id].fence_ignore++;
+ else {
+ scheduler->stats[ring->id].fence_wait++;
ret = sync_fence_wait(fence, 1000);
+ }
}
sync_fence_put(fence);
@@ -1922,6 +1929,8 @@ pre_mutex_err:
args->rsvd2 = (__u64) -1;
}
+ dev_priv->scheduler->stats[ring->id].exec_early++;
+
return ret;
}
@@ -120,6 +120,9 @@ const char *i915_scheduler_queue_status_str(
case i915_sqs_dead:
return "Dead";
+ case i915_sqs_MAX:
+ return "Invalid";
+
default:
break;
}
@@ -211,10 +214,14 @@ int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe)
BUG_ON(!scheduler);
+ if (qe->params.fence_wait)
+ scheduler->stats[ring->id].fence_got++;
+
if (i915.scheduler_override & i915_so_direct_submit) {
int ret;
qe->scheduler_index = scheduler->index++;
+ scheduler->stats[qe->params.ring->id].queued++;
trace_i915_scheduler_queue(qe->params.ring, qe);
@@ -225,6 +232,7 @@ int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe)
scheduler->flags[qe->params.ring->id] |= i915_sf_submitting;
ret = dev_priv->gt.execbuf_final(&qe->params);
+ scheduler->stats[qe->params.ring->id].submitted++;
scheduler->flags[qe->params.ring->id] &= ~i915_sf_submitting;
/*
@@ -260,6 +268,8 @@ int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe)
sync_fence_put(qe->params.fence_wait);
#endif
+ scheduler->stats[qe->params.ring->id].expired++;
+
return 0;
}
@@ -372,6 +382,8 @@ int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe)
not_flying = i915_scheduler_count_flying(scheduler, ring) <
scheduler->min_flying;
+ scheduler->stats[ring->id].queued++;
+
trace_i915_scheduler_queue(ring, node);
trace_i915_scheduler_node_state_change(ring, node);
@@ -516,10 +528,12 @@ void i915_scheduler_kill_all(struct drm_device *dev)
case I915_SQS_CASE_FLYING:
i915_scheduler_node_kill(node);
+ scheduler->stats[r].kill_flying++;
break;
case I915_SQS_CASE_QUEUED:
i915_scheduler_node_kill_queued(node);
+ scheduler->stats[r].kill_queued++;
break;
default:
@@ -594,6 +608,7 @@ static void i915_scheduler_seqno_complete(struct intel_engine_cs *ring, uint32_t
/* Node was in flight so mark it as complete. */
node->status = i915_sqs_complete;
trace_i915_scheduler_node_state_change(ring, node);
+ scheduler->stats[ring->id].completed++;
got_changes = true;
}
@@ -743,6 +758,7 @@ static int i915_scheduler_remove(struct intel_engine_cs *ring)
list_del(&node->link);
list_add(&node->link, &remove);
+ scheduler->stats[ring->id].expired++;
/* Strip the dependency info while the mutex is still locked */
i915_scheduler_remove_dependent(scheduler, node);
@@ -985,6 +1001,35 @@ static int i915_scheduler_dump_locked(struct intel_engine_cs *ring, const char *
return 0;
}
+int i915_scheduler_query_stats(struct intel_engine_cs *ring,
+ struct i915_scheduler_stats_nodes *stats)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct i915_scheduler *scheduler = dev_priv->scheduler;
+ struct i915_scheduler_queue_entry *node;
+ unsigned long flags;
+
+ memset(stats, 0x00, sizeof(*stats));
+
+ spin_lock_irqsave(&scheduler->lock, flags);
+
+ list_for_each_entry(node, &scheduler->node_queue[ring->id], link) {
+ if (node->status >= i915_sqs_MAX) {
+ DRM_DEBUG_DRIVER("Invalid node state: %d! [uniq = %d, seqno = %d]\n",
+ node->status, node->params.request->uniq, node->params.request->seqno);
+
+ stats->counts[i915_sqs_MAX]++;
+ continue;
+ }
+
+ stats->counts[node->status]++;
+ }
+
+ spin_unlock_irqrestore(&scheduler->lock, flags);
+
+ return 0;
+}
+
int i915_scheduler_flush_request(struct drm_i915_gem_request *req,
bool is_locked)
{
@@ -1021,16 +1066,21 @@ int i915_scheduler_flush_request(struct drm_i915_gem_request *req,
spin_lock_irqsave(&scheduler->lock, flags);
+ scheduler->stats[ring_id].flush_req++;
+
i915_scheduler_priority_bump_clear(scheduler);
flush_count = i915_scheduler_priority_bump(scheduler,
req->scheduler_qe, scheduler->priority_level_max);
+ scheduler->stats[ring_id].flush_bump += flush_count;
spin_unlock_irqrestore(&scheduler->lock, flags);
if (flush_count) {
DRM_DEBUG_DRIVER("<%s> Bumped %d entries\n", req->ring->name, flush_count);
flush_count = i915_scheduler_submit_max_priority(req->ring, is_locked);
+ if (flush_count > 0)
+ scheduler->stats[ring_id].flush_submit += flush_count;
}
return flush_count;
@@ -1057,6 +1107,8 @@ int i915_scheduler_flush(struct intel_engine_cs *ring, bool is_locked)
BUG_ON(is_locked && (scheduler->flags[ring->id] & i915_sf_submitting));
+ scheduler->stats[ring->id].flush_all++;
+
do {
found = false;
spin_lock_irqsave(&scheduler->lock, flags);
@@ -1071,6 +1123,7 @@ int i915_scheduler_flush(struct intel_engine_cs *ring, bool is_locked)
if (found) {
ret = i915_scheduler_submit(ring, is_locked);
+ scheduler->stats[ring->id].flush_submit++;
if (ret < 0)
return ret;
@@ -1213,15 +1266,20 @@ static void i915_scheduler_wait_fence_signaled(struct sync_fence *fence,
static bool i915_scheduler_async_fence_wait(struct drm_device *dev,
struct i915_scheduler_queue_entry *node)
{
+ struct drm_i915_private *dev_priv = node->params.ring->dev->dev_private;
+ struct i915_scheduler *scheduler = dev_priv->scheduler;
struct i915_sync_fence_waiter *fence_waiter;
struct sync_fence *fence = node->params.fence_wait;
int signaled;
bool success = true;
- if ((node->flags & i915_qef_fence_waiting) == 0)
+ if ((node->flags & i915_qef_fence_waiting) == 0) {
node->flags |= i915_qef_fence_waiting;
- else
+ scheduler->stats[node->params.ring->id].fence_wait++;
+ } else {
+ scheduler->stats[node->params.ring->id].fence_again++;
return true;
+ }
if (fence == NULL)
return false;
@@ -1291,8 +1349,10 @@ static int i915_scheduler_pop_from_queue_locked(struct intel_engine_cs *ring,
else
signalled = true;
- if (!signalled)
+ if (!signalled) {
signalled = i915_safe_to_ignore_fence(ring, node->params.fence_wait);
+ scheduler->stats[node->params.ring->id].fence_ignore++;
+ }
#endif // CONFIG_SYNC
has_local = false;
@@ -1434,6 +1494,8 @@ static int i915_scheduler_submit(struct intel_engine_cs *ring, bool was_locked)
* list. So add it back in and mark it as in flight. */
i915_scheduler_fly_node(node);
+ scheduler->stats[ring->id].submitted++;
+
scheduler->flags[ring->id] |= i915_sf_submitting;
spin_unlock_irqrestore(&scheduler->lock, flags);
ret = dev_priv->gt.execbuf_final(&node->params);
@@ -1452,6 +1514,7 @@ static int i915_scheduler_submit(struct intel_engine_cs *ring, bool was_locked)
case ENOENT:
/* Fatal errors. Kill the node. */
requeue = -1;
+ scheduler->stats[ring->id].exec_dead++;
break;
case EAGAIN:
@@ -1461,12 +1524,14 @@ static int i915_scheduler_submit(struct intel_engine_cs *ring, bool was_locked)
case ERESTARTSYS:
case EINTR:
/* Supposedly recoverable errors. */
+ scheduler->stats[ring->id].exec_again++;
break;
default:
DRM_DEBUG_DRIVER("<%s> Got unexpected error from execfinal(): %d!\n",
ring->name, ret);
/* Assume it is recoverable and hope for the best. */
+ scheduler->stats[ring->id].exec_again++;
break;
}
@@ -80,6 +80,36 @@ struct i915_scheduler_queue_entry {
};
const char *i915_qe_state_str(struct i915_scheduler_queue_entry *node);
+struct i915_scheduler_stats_nodes {
+ uint32_t counts[i915_sqs_MAX + 1];
+};
+
+struct i915_scheduler_stats {
+ /* Batch buffer counts: */
+ uint32_t queued;
+ uint32_t submitted;
+ uint32_t completed;
+ uint32_t expired;
+
+ /* Other stuff: */
+ uint32_t flush_obj;
+ uint32_t flush_req;
+ uint32_t flush_all;
+ uint32_t flush_bump;
+ uint32_t flush_submit;
+
+ uint32_t exec_early;
+ uint32_t exec_again;
+ uint32_t exec_dead;
+ uint32_t kill_flying;
+ uint32_t kill_queued;
+
+ uint32_t fence_wait;
+ uint32_t fence_again;
+ uint32_t fence_ignore;
+ uint32_t fence_got;
+};
+
struct i915_scheduler {
struct list_head node_queue[I915_NUM_RINGS];
uint32_t flags[I915_NUM_RINGS];
@@ -92,6 +122,9 @@ struct i915_scheduler {
uint32_t priority_level_preempt;
uint32_t min_flying;
uint32_t file_queue_max;
+
+ /* Statistics: */
+ struct i915_scheduler_stats stats[I915_NUM_RINGS];
};
/* Flag bits for i915_scheduler::flags */
@@ -135,6 +168,8 @@ int i915_scheduler_dump(struct intel_engine_cs *ring,
int i915_scheduler_dump_all(struct drm_device *dev, const char *msg);
bool i915_scheduler_is_request_tracked(struct drm_i915_gem_request *req,
bool *completed, bool *busy);
+int i915_scheduler_query_stats(struct intel_engine_cs *ring,
+ struct i915_scheduler_stats_nodes *stats);
bool i915_scheduler_file_queue_is_full(struct drm_file *file);
#endif /* _I915_SCHEDULER_H_ */