@@ -736,22 +736,81 @@ int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
return i915_drm_suspend_late(dev, false);
}
+struct resume_context {
+ struct kref kref;
+ struct drm_i915_private *i915;
+
+ async_cookie_t cookie;
+};
+
+static struct resume_context *resume_context(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct resume_context *ctx;
+
+ ctx = dev_priv->async_context;
+ if (ctx == NULL) {
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+
+ kref_init(&ctx->kref);
+ ctx->i915 = dev_priv;
+
+ dev_priv->async_context = ctx;
+ }
+
+ return ctx;
+}
+
+static void resume_context_free(struct kref *kref)
+{
+ struct resume_context *ctx = container_of(kref, typeof(*ctx), kref);
+
+ kfree(ctx);
+}
+
+static struct resume_context *resume_context_get(struct resume_context *ctx)
+{
+ kref_get(&ctx->kref);
+ return ctx;
+}
+
+static void resume_context_put(struct resume_context *ctx)
+{
+ kref_put(&ctx->kref, resume_context_free);
+}
+
static void i915_drm_complete(struct drm_device *dev)
{
+ struct resume_context *ctx;
+
+ ctx = resume_context(dev);
+ if (!ctx)
+ return;
+
+ if (ctx->cookie)
+ async_synchronize_cookie_domain(ctx->cookie,
+ &ctx->i915->async_domain);
+
+ ctx->i915->async_context = NULL;
+ resume_context_put(ctx);
}
static int i915_drm_resume(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
+ struct resume_context *ctx;
- disable_rpm_wakeref_asserts(dev_priv);
+ ctx = resume_context(dev);
+ if (!ctx)
+ return -ENOMEM;
- ret = i915_ggtt_enable_hw(dev);
- if (ret)
+ disable_rpm_wakeref_asserts(ctx->i915);
+
+ if(i915_ggtt_enable_hw(dev))
DRM_ERROR("failed to re-enable GGTT\n");
- intel_csr_ucode_resume(dev_priv);
+ intel_csr_ucode_resume(ctx->i915);
mutex_lock(&dev->struct_mutex);
i915_gem_restore_gtt_mappings(dev);
@@ -771,12 +830,12 @@ static int i915_drm_resume(struct drm_device *dev)
* Modeset enabling in intel_modeset_init_hw() also needs working
* interrupts.
*/
- intel_runtime_pm_enable_interrupts(dev_priv);
+ intel_runtime_pm_enable_interrupts(ctx->i915);
mutex_lock(&dev->struct_mutex);
if (i915_gem_init_hw(dev)) {
DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
- atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
+ atomic_or(I915_WEDGED, &ctx->i915->gpu_error.reset_counter);
}
mutex_unlock(&dev->struct_mutex);
@@ -784,10 +843,10 @@ static int i915_drm_resume(struct drm_device *dev)
intel_modeset_init_hw(dev);
- spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display.hpd_irq_setup)
- dev_priv->display.hpd_irq_setup(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&ctx->i915->irq_lock);
+ if (ctx->i915->display.hpd_irq_setup)
+ ctx->i915->display.hpd_irq_setup(ctx->i915);
+ spin_unlock_irq(&ctx->i915->irq_lock);
intel_dp_mst_resume(dev);
@@ -799,7 +858,7 @@ static int i915_drm_resume(struct drm_device *dev)
* bother with the tiny race here where we might loose hotplug
* notifications.
* */
- intel_hpd_init(dev_priv);
+ intel_hpd_init(ctx->i915);
/* Config may have changed between suspend and resume */
drm_helper_hpd_irq_event(dev);
@@ -807,24 +866,28 @@ static int i915_drm_resume(struct drm_device *dev)
intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
- mutex_lock(&dev_priv->modeset_restore_lock);
- dev_priv->modeset_restore = MODESET_DONE;
- mutex_unlock(&dev_priv->modeset_restore_lock);
+ mutex_lock(&ctx->i915->modeset_restore_lock);
+ ctx->i915->modeset_restore = MODESET_DONE;
+ mutex_unlock(&ctx->i915->modeset_restore_lock);
intel_opregion_notify_adapter(dev, PCI_D0);
drm_kms_helper_poll_enable(dev);
- enable_rpm_wakeref_asserts(dev_priv);
+ enable_rpm_wakeref_asserts(ctx->i915);
return 0;
}
static int i915_drm_resume_early(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct resume_context *ctx;
int ret;
+ ctx = resume_context(dev);
+ if (!ctx)
+ return -ENOMEM;
+
/*
* We have a resume ordering issue with the snd-hda driver also
* requiring our device to be power up. Due to the lack of a
@@ -871,34 +934,34 @@ static int i915_drm_resume_early(struct drm_device *dev)
pci_set_master(dev->pdev);
- disable_rpm_wakeref_asserts(dev_priv);
+ disable_rpm_wakeref_asserts(ctx->i915);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- ret = vlv_resume_prepare(dev_priv, false);
+ if (IS_VALLEYVIEW(ctx->i915) || IS_CHERRYVIEW(ctx->i915))
+ ret = vlv_resume_prepare(ctx->i915, false);
if (ret)
DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
ret);
- intel_uncore_early_sanitize(dev_priv, true);
+ intel_uncore_early_sanitize(ctx->i915, true);
- if (IS_BROXTON(dev_priv)) {
- if (!dev_priv->suspended_to_idle)
- gen9_sanitize_dc_state(dev_priv);
- bxt_disable_dc9(dev_priv);
- } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- hsw_disable_pc8(dev_priv);
+ if (IS_BROXTON(ctx->i915)) {
+ if (!ctx->i915->suspended_to_idle)
+ gen9_sanitize_dc_state(ctx->i915);
+ bxt_disable_dc9(ctx->i915);
+ } else if (IS_HASWELL(ctx->i915) || IS_BROADWELL(ctx->i915)) {
+ hsw_disable_pc8(ctx->i915);
}
- intel_uncore_sanitize(dev_priv);
+ intel_uncore_sanitize(ctx->i915);
- if (IS_BROXTON(dev_priv) ||
- !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
- intel_power_domains_init_hw(dev_priv, true);
+ if (IS_BROXTON(ctx->i915) ||
+ !(ctx->i915->suspended_to_idle && ctx->i915->csr.dmc_payload))
+ intel_power_domains_init_hw(ctx->i915, true);
- enable_rpm_wakeref_asserts(dev_priv);
+ enable_rpm_wakeref_asserts(ctx->i915);
out:
- dev_priv->suspended_to_idle = false;
+ ctx->i915->suspended_to_idle = false;
return ret;
}