@@ -4124,7 +4124,7 @@ i915_drop_caches_set(void *data, u64 val)
if (val & DROP_FREED) {
synchronize_rcu();
- flush_work(&dev_priv->mm.free_work);
+ i915_gem_drain_freed_objects(dev_priv);
}
return ret;
@@ -545,8 +545,7 @@ static void i915_gem_fini(struct drm_i915_private *dev_priv)
i915_gem_context_fini(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
- rcu_barrier();
- flush_work(&dev_priv->mm.free_work);
+ i915_gem_drain_freed_objects(dev_priv);
WARN_ON(!list_empty(&dev_priv->context_list));
}
@@ -3208,6 +3208,19 @@ i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
void i915_gem_free_object(struct drm_gem_object *obj);
+static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
+{
+ /* A single pass should suffice to release all the freed objects (along
+ * most call paths) , but be a little more paranoid in that freeing
+ * the objects does take a little amount of time, during which the rcu
+ * callbacks could have added new objects into the freed list, and
+ * armed the work again.
+ */
+ do {
+ rcu_barrier();
+ } while (flush_work(&i915->mm.free_work));
+}
+
struct i915_vma * __must_check
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view,
@@ -4263,8 +4263,14 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
cancel_delayed_work_sync(&dev_priv->gt.retire_work);
- flush_delayed_work(&dev_priv->gt.idle_work);
- flush_work(&dev_priv->mm.free_work);
+
+ /* As the idle_work is rearming if it detects a race, play safe and
+ * repeat the flush until it is definitely idle.
+ */
+ while (flush_delayed_work(&dev_priv->gt.idle_work))
+ ;
+
+ i915_gem_drain_freed_objects(dev_priv);
/* Assert that we sucessfully flushed all the work and
* reset the GPU back to its idle, low power state.