@@ -1116,6 +1116,7 @@ int i915_driver_unload(struct drm_device *dev)
/* Free error state after interrupts are fully disabled. */
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
i915_destroy_error_state(dev);
+ cancel_work_sync(&dev_priv->gpu_error.work);
if (dev->pdev->msi_enabled)
pci_disable_msi(dev->pdev);
@@ -1245,6 +1245,7 @@ struct i915_gpu_error {
spinlock_t lock;
/* Protected by the above dev->gpu_error.lock. */
struct drm_i915_error_state *first_error;
+ struct work_struct work;
unsigned long missed_irq_rings;
@@ -2300,15 +2300,18 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv,
}
/**
- * i915_reset_and_wakeup - do process context error handling work
+ * i915_error_work_func - do process context error handling work
*
* Fire an error uevent so userspace can see that a hang or error
* was detected.
*/
-static void i915_reset_and_wakeup(struct drm_device *dev)
+static void i915_error_work_func(struct work_struct *work)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_gpu_error *error = &dev_priv->gpu_error;
+ struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
+ work);
+ struct drm_i915_private *dev_priv =
+ container_of(error, struct drm_i915_private, gpu_error);
+ struct drm_device *dev = dev_priv->dev;
char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
@@ -2668,7 +2671,21 @@ void i915_handle_error(struct drm_device *dev, u32 engine_mask, bool wedged,
i915_error_wake_up(dev_priv, false);
}
- i915_reset_and_wakeup(dev);
+ /*
+ * Gen 7:
+ *
+ * Our reset work can grab modeset locks (since it needs to reset the
+ * state of outstanding pageflips). Hence it must not be run on our own
+ * dev-priv->wq work queue for otherwise the flush_work in the pageflip
+ * code will deadlock.
+ * If error_work is already in the work queue then it will not be added
+ * again. It hasn't yet executed so it will see the reset flags when
+ * it is scheduled. If it isn't in the queue or it is currently
+ * executing then this call will add it to the queue again so that
+ * even if it misses the reset flags during the current call it is
+ * guaranteed to see them on the next call.
+ */
+ schedule_work(&dev_priv->gpu_error.work);
}
/* Called from drm generic code, passed 'crtc' which
@@ -4389,6 +4406,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
struct drm_device *dev = dev_priv->dev;
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
+ INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);