diff mbox series

[v3,1/2] drm/i915: Reduce recursive mutex locking from the shrinker

Message ID 20190104145446.18678-1-tvrtko.ursulin@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series [v3,1/2] drm/i915: Reduce recursive mutex locking from the shrinker | expand

Commit Message

Tvrtko Ursulin Jan. 4, 2019, 2:54 p.m. UTC
From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

In two codepaths internal to the shrinker we know we will end up taking
the resursive mutex path.

It instead feels more elegant to avoid this altogether and not call
mutex_trylock_recursive in those cases.

We achieve this by extracting the shrinking part to __i915_gem_shrink,
wrapped by struct mutex taking i915_gem_shrink.

At the same time move the runtime pm reference taking to always be in the
usual, before struct mutex, order.

v2:
 * Don't use flags but split i915_gem_shrink into locked and unlocked.

v3:
 * Whitespace and checkpatch reported errors.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h          |   2 +-
 drivers/gpu/drm/i915/i915_gem_shrinker.c | 154 ++++++++++++-----------
 2 files changed, 85 insertions(+), 71 deletions(-)
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7fa2a405c5fe..e8514c4afbb2 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3178,7 +3178,7 @@  i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
 unsigned long i915_gem_shrink(struct drm_i915_private *i915,
 			      unsigned long target,
 			      unsigned long *nr_scanned,
-			      unsigned flags);
+			      unsigned int flags);
 #define I915_SHRINK_PURGEABLE 0x1
 #define I915_SHRINK_UNBOUND 0x2
 #define I915_SHRINK_BOUND 0x4
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index ea90d3a0d511..ba0e0ca31d30 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -117,36 +117,11 @@  static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
 	return !i915_gem_object_has_pages(obj);
 }
 
-/**
- * i915_gem_shrink - Shrink buffer object caches
- * @i915: i915 device
- * @target: amount of memory to make available, in pages
- * @nr_scanned: optional output for number of pages scanned (incremental)
- * @flags: control flags for selecting cache types
- *
- * This function is the main interface to the shrinker. It will try to release
- * up to @target pages of main memory backing storage from buffer objects.
- * Selection of the specific caches can be done with @flags. This is e.g. useful
- * when purgeable objects should be removed from caches preferentially.
- *
- * Note that it's not guaranteed that released amount is actually available as
- * free system memory - the pages might still be in-used to due to other reasons
- * (like cpu mmaps) or the mm core has reused them before we could grab them.
- * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to
- * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all().
- *
- * Also note that any kind of pinning (both per-vma address space pins and
- * backing storage pins at the buffer object level) result in the shrinker code
- * having to skip the object.
- *
- * Returns:
- * The number of pages of backing storage actually released.
- */
-unsigned long
-i915_gem_shrink(struct drm_i915_private *i915,
-		unsigned long target,
-		unsigned long *nr_scanned,
-		unsigned flags)
+static unsigned long
+__i915_gem_shrink(struct drm_i915_private *i915,
+		  unsigned long target,
+		  unsigned long *nr_scanned,
+		  unsigned int flags)
 {
 	const struct {
 		struct list_head *list;
@@ -158,10 +133,8 @@  i915_gem_shrink(struct drm_i915_private *i915,
 	}, *phase;
 	unsigned long count = 0;
 	unsigned long scanned = 0;
-	bool unlock;
 
-	if (!shrinker_lock(i915, &unlock))
-		return 0;
+	lockdep_assert_held(&i915->drm.struct_mutex);
 
 	/*
 	 * When shrinking the active list, also consider active contexts.
@@ -177,18 +150,8 @@  i915_gem_shrink(struct drm_i915_private *i915,
 				       I915_WAIT_LOCKED,
 				       MAX_SCHEDULE_TIMEOUT);
 
-	trace_i915_gem_shrink(i915, target, flags);
 	i915_retire_requests(i915);
 
-	/*
-	 * Unbinding of objects will require HW access; Let us not wake the
-	 * device just to recover a little memory. If absolutely necessary,
-	 * we will force the wake during oom-notifier.
-	 */
-	if ((flags & I915_SHRINK_BOUND) &&
-	    !intel_runtime_pm_get_if_in_use(i915))
-		flags &= ~I915_SHRINK_BOUND;
-
 	/*
 	 * As we may completely rewrite the (un)bound list whilst unbinding
 	 * (due to retiring requests) we have to strictly process only
@@ -267,15 +230,70 @@  i915_gem_shrink(struct drm_i915_private *i915,
 		spin_unlock(&i915->mm.obj_lock);
 	}
 
-	if (flags & I915_SHRINK_BOUND)
-		intel_runtime_pm_put(i915);
-
 	i915_retire_requests(i915);
 
-	shrinker_unlock(i915, unlock);
-
 	if (nr_scanned)
 		*nr_scanned += scanned;
+
+	return count;
+}
+
+/**
+ * i915_gem_shrink - Shrink buffer object caches
+ * @i915: i915 device
+ * @target: amount of memory to make available, in pages
+ * @nr_scanned: optional output for number of pages scanned (incremental)
+ * @flags: control flags for selecting cache types
+ *
+ * This function is the main interface to the shrinker. It will try to release
+ * up to @target pages of main memory backing storage from buffer objects.
+ * Selection of the specific caches can be done with @flags. This is e.g. useful
+ * when purgeable objects should be removed from caches preferentially.
+ *
+ * Note that it's not guaranteed that released amount is actually available as
+ * free system memory - the pages might still be in-used to due to other reasons
+ * (like cpu mmaps) or the mm core has reused them before we could grab them.
+ * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to
+ * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all().
+ *
+ * Also note that any kind of pinning (both per-vma address space pins and
+ * backing storage pins at the buffer object level) result in the shrinker code
+ * having to skip the object.
+ *
+ * Returns:
+ * The number of pages of backing storage actually released.
+ */
+unsigned long
+i915_gem_shrink(struct drm_i915_private *i915,
+		unsigned long target,
+		unsigned long *nr_scanned,
+		unsigned int flags)
+{
+	unsigned long count = 0;
+	bool unlock;
+
+	trace_i915_gem_shrink(i915, target, flags);
+
+	/*
+	 * Unbinding of objects will require HW access; Let us not wake the
+	 * device just to recover a little memory. If absolutely necessary,
+	 * we will force the wake during oom-notifier.
+	 */
+	if ((flags & I915_SHRINK_BOUND) &&
+	    !intel_runtime_pm_get_if_in_use(i915))
+		flags &= ~I915_SHRINK_BOUND;
+
+	if (!shrinker_lock(i915, &unlock))
+		goto out;
+
+	count = __i915_gem_shrink(i915, target, nr_scanned, flags);
+
+	shrinker_unlock(i915, unlock);
+
+out:
+	if (flags & I915_SHRINK_BOUND)
+		intel_runtime_pm_put(i915);
+
 	return count;
 }
 
@@ -352,6 +370,7 @@  i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
 {
 	struct drm_i915_private *i915 =
 		container_of(shrinker, struct drm_i915_private, mm.shrinker);
+	const unsigned int flags = I915_SHRINK_BOUND | I915_SHRINK_UNBOUND;
 	unsigned long freed;
 	bool unlock;
 
@@ -360,26 +379,21 @@  i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
 	if (!shrinker_lock(i915, &unlock))
 		return SHRINK_STOP;
 
-	freed = i915_gem_shrink(i915,
-				sc->nr_to_scan,
-				&sc->nr_scanned,
-				I915_SHRINK_BOUND |
-				I915_SHRINK_UNBOUND |
-				I915_SHRINK_PURGEABLE);
+	freed = __i915_gem_shrink(i915,
+				  sc->nr_to_scan,
+				  &sc->nr_scanned,
+				  flags | I915_SHRINK_PURGEABLE);
 	if (sc->nr_scanned < sc->nr_to_scan)
-		freed += i915_gem_shrink(i915,
-					 sc->nr_to_scan - sc->nr_scanned,
-					 &sc->nr_scanned,
-					 I915_SHRINK_BOUND |
-					 I915_SHRINK_UNBOUND);
+		freed += __i915_gem_shrink(i915,
+					   sc->nr_to_scan - sc->nr_scanned,
+					   &sc->nr_scanned,
+					   flags);
 	if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) {
 		intel_runtime_pm_get(i915);
-		freed += i915_gem_shrink(i915,
-					 sc->nr_to_scan - sc->nr_scanned,
-					 &sc->nr_scanned,
-					 I915_SHRINK_ACTIVE |
-					 I915_SHRINK_BOUND |
-					 I915_SHRINK_UNBOUND);
+		freed += __i915_gem_shrink(i915,
+					   sc->nr_to_scan - sc->nr_scanned,
+					   &sc->nr_scanned,
+					   flags | I915_SHRINK_ACTIVE);
 		intel_runtime_pm_put(i915);
 	}
 
@@ -477,11 +491,11 @@  i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
 		goto out;
 
 	intel_runtime_pm_get(i915);
-	freed_pages += i915_gem_shrink(i915, -1UL, NULL,
-				       I915_SHRINK_BOUND |
-				       I915_SHRINK_UNBOUND |
-				       I915_SHRINK_ACTIVE |
-				       I915_SHRINK_VMAPS);
+	freed_pages += __i915_gem_shrink(i915, -1UL, NULL,
+					 I915_SHRINK_BOUND |
+					 I915_SHRINK_UNBOUND |
+					 I915_SHRINK_ACTIVE |
+					 I915_SHRINK_VMAPS);
 	intel_runtime_pm_put(i915);
 
 	/* We also want to clear any cached iomaps as they wrap vmap */