@@ -1693,6 +1693,37 @@ int i915_scheduler_closefile(struct drm_device *dev, struct drm_file *file)
return 0;
}
+/*
+ * Used by TDR to distinguish hung rings (not moving but with work to do)
+ * from idle rings (not moving because there is nothing to do).
+ */
+bool i915_scheduler_is_ring_flying(struct intel_engine_cs *ring)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct i915_scheduler *scheduler = dev_priv->scheduler;
+ struct i915_scheduler_queue_entry *node;
+ unsigned long flags;
+ bool found = false;
+
+ /* With the scheduler in bypass mode, no information can be returned. */
+ if (i915.scheduler_override & i915_so_direct_submit) {
+ return true;
+ }
+
+ spin_lock_irqsave(&scheduler->lock, flags);
+
+ list_for_each_entry(node, &scheduler->node_queue[ring->id], link) {
+ if (I915_SQS_IS_FLYING(node)) {
+ found = true;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&scheduler->lock, flags);
+
+ return found;
+}
+
bool i915_scheduler_file_queue_is_full(struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
@@ -154,6 +154,7 @@ int i915_scheduler_closefile(struct drm_device *dev,
void i915_gem_scheduler_clean_node(struct i915_scheduler_queue_entry *node);
int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe);
int i915_scheduler_handle_irq(struct intel_engine_cs *ring);
+bool i915_scheduler_is_ring_flying(struct intel_engine_cs *ring);
void i915_scheduler_kill_all(struct drm_device *dev);
void i915_gem_scheduler_work_handler(struct work_struct *work);
#ifdef CONFIG_SYNC