@@ -4742,6 +4742,13 @@ int i915_gem_freeze(struct drm_i915_private *dev_priv)
i915_gem_shrink_all(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
+ /*
+ * Cannot call synchronize_rcu() inside the struct_mutex
+ * because it may block until workqueues complete, and the
+ * running workqueue may wait on the struct_mutex.
+ */
+ synchronize_rcu(); /* wait for our earlier RCU delayed slab frees */
+
intel_runtime_pm_put(dev_priv);
return 0;
@@ -4781,6 +4788,8 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
}
mutex_unlock(&dev_priv->drm.struct_mutex);
+ synchronize_rcu_expedited();
+
return 0;
}
@@ -235,9 +235,6 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
if (unlock)
mutex_unlock(&dev_priv->drm.struct_mutex);
- /* expedite the RCU grace period to free some request slabs */
- synchronize_rcu_expedited();
-
return count;
}
@@ -263,7 +260,6 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_ACTIVE);
- synchronize_rcu(); /* wait for our earlier RCU delayed slab frees */
return freed;
}
@@ -321,8 +317,17 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
sc->nr_to_scan - freed,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND);
- if (unlock)
+ if (unlock) {
mutex_unlock(&dev->struct_mutex);
+ /*
+ * If reclaim was invoked by an allocation done while
+ * holding the struct mutex, we cannot call
+ * synchronize_rcu_expedited() as it depends on
+ * workqueues to run but the running workqueue may be
+ * blocked waiting on us to release struct_mutex.
+ */
+ synchronize_rcu_expedited();
+ }
return freed;
}