@@ -3327,6 +3327,8 @@ static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
}
intel_ring_init_seqno(engine, engine->last_submitted_seqno);
+
+ i915_scheduler_reset_cleanup(engine);
}
void i915_gem_reset(struct drm_device *dev)
@@ -821,6 +821,17 @@ void i915_scheduler_clean_node(struct i915_scheduler_queue_entry *node)
}
}
+void i915_scheduler_reset_cleanup(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = to_i915(engine->dev);
+ struct i915_scheduler *scheduler = dev_priv->scheduler;
+
+ if (scheduler->flags[engine->id] & I915_SF_INTERRUPTS_ENABLED) {
+ engine->irq_put(engine);
+ scheduler->flags[engine->id] &= ~I915_SF_INTERRUPTS_ENABLED;
+ }
+}
+
static bool i915_scheduler_remove(struct i915_scheduler *scheduler,
struct intel_engine_cs *engine,
struct list_head *remove)
@@ -106,6 +106,7 @@ bool i915_scheduler_is_enabled(struct drm_device *dev);
int i915_scheduler_init(struct drm_device *dev);
void i915_scheduler_destroy(struct drm_i915_private *dev_priv);
void i915_scheduler_closefile(struct drm_device *dev, struct drm_file *file);
+void i915_scheduler_reset_cleanup(struct intel_engine_cs *engine);
void i915_scheduler_clean_node(struct i915_scheduler_queue_entry *node);
int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe);
bool i915_scheduler_notify_request(struct drm_i915_gem_request *req);