diff mbox series

[RFC,1/4] drm/i915: Reduce recursive mutex locking from the shrinker

Message ID 20190103122329.19948-2-tvrtko.ursulin@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series Shrinker tweaks/fixes? | expand

Commit Message

Tvrtko Ursulin Jan. 3, 2019, 12:23 p.m. UTC
From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

In two codepaths internal to the shrinker we know we will end up taking
the resursive mutex path.

It instead feels more elegant to avoid this altogether and not call
mutex_trylock_recursive in those cases.

We achieve this by adding a new I915_SHRINK_LOCKED flag which gets passed
to i915_gem_shrink by the internal callers.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h          | 11 ++++++-----
 drivers/gpu/drm/i915/i915_gem_shrinker.c | 18 ++++++++----------
 2 files changed, 14 insertions(+), 15 deletions(-)

Comments

Chris Wilson Jan. 3, 2019, 12:29 p.m. UTC | #1
Quoting Tvrtko Ursulin (2019-01-03 12:23:26)
> From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> 
> In two codepaths internal to the shrinker we know we will end up taking
> the resursive mutex path.
> 
> It instead feels more elegant to avoid this altogether and not call
> mutex_trylock_recursive in those cases.
> 
> We achieve this by adding a new I915_SHRINK_LOCKED flag which gets passed
> to i915_gem_shrink by the internal callers.

Since we reach here from many paths with struct_mutex either locked or
unlocked, special casing one such seems silly. Especially when removing
struct_mutex from here entirely is within our grasp.
-Chris
Tvrtko Ursulin Jan. 3, 2019, 1:09 p.m. UTC | #2
On 03/01/2019 12:29, Chris Wilson wrote:
> Quoting Tvrtko Ursulin (2019-01-03 12:23:26)
>> From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
>>
>> In two codepaths internal to the shrinker we know we will end up taking
>> the resursive mutex path.
>>
>> It instead feels more elegant to avoid this altogether and not call
>> mutex_trylock_recursive in those cases.
>>
>> We achieve this by adding a new I915_SHRINK_LOCKED flag which gets passed
>> to i915_gem_shrink by the internal callers.
> 
> Since we reach here from many paths with struct_mutex either locked or
> unlocked, special casing one such seems silly. Especially when removing
> struct_mutex from here entirely is within our grasp.

Hm, for me it is silly that we rely on mutex_trylock_recursive to handle 
what the code clearly knows. Would you be happy with the 
__i915_gem_shrink which does no locking, and i915_gem_shrink wraps with 
locking, approach instead?

Regards,

Tvrtko
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7fa2a405c5fe..63e0951561fc 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3179,11 +3179,12 @@  unsigned long i915_gem_shrink(struct drm_i915_private *i915,
 			      unsigned long target,
 			      unsigned long *nr_scanned,
 			      unsigned flags);
-#define I915_SHRINK_PURGEABLE 0x1
-#define I915_SHRINK_UNBOUND 0x2
-#define I915_SHRINK_BOUND 0x4
-#define I915_SHRINK_ACTIVE 0x8
-#define I915_SHRINK_VMAPS 0x10
+#define I915_SHRINK_LOCKED	BIT(0)
+#define I915_SHRINK_PURGEABLE	BIT(1)
+#define I915_SHRINK_UNBOUND	BIT(2)
+#define I915_SHRINK_BOUND	BIT(3)
+#define I915_SHRINK_ACTIVE	BIT(4)
+#define I915_SHRINK_VMAPS	BIT(5)
 unsigned long i915_gem_shrink_all(struct drm_i915_private *i915);
 void i915_gem_shrinker_register(struct drm_i915_private *i915);
 void i915_gem_shrinker_unregister(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index ea90d3a0d511..d07ee5921e5e 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -158,9 +158,9 @@  i915_gem_shrink(struct drm_i915_private *i915,
 	}, *phase;
 	unsigned long count = 0;
 	unsigned long scanned = 0;
-	bool unlock;
+	bool unlock = false;
 
-	if (!shrinker_lock(i915, &unlock))
+	if (!(flags & I915_SHRINK_LOCKED) && !shrinker_lock(i915, &unlock))
 		return 0;
 
 	/*
@@ -352,6 +352,8 @@  i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
 {
 	struct drm_i915_private *i915 =
 		container_of(shrinker, struct drm_i915_private, mm.shrinker);
+	const unsigned flags =
+		I915_SHRINK_LOCKED | I915_SHRINK_BOUND | I915_SHRINK_UNBOUND;
 	unsigned long freed;
 	bool unlock;
 
@@ -363,23 +365,18 @@  i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
 	freed = i915_gem_shrink(i915,
 				sc->nr_to_scan,
 				&sc->nr_scanned,
-				I915_SHRINK_BOUND |
-				I915_SHRINK_UNBOUND |
-				I915_SHRINK_PURGEABLE);
+				flags | I915_SHRINK_PURGEABLE);
 	if (sc->nr_scanned < sc->nr_to_scan)
 		freed += i915_gem_shrink(i915,
 					 sc->nr_to_scan - sc->nr_scanned,
 					 &sc->nr_scanned,
-					 I915_SHRINK_BOUND |
-					 I915_SHRINK_UNBOUND);
+					 flags);
 	if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) {
 		intel_runtime_pm_get(i915);
 		freed += i915_gem_shrink(i915,
 					 sc->nr_to_scan - sc->nr_scanned,
 					 &sc->nr_scanned,
-					 I915_SHRINK_ACTIVE |
-					 I915_SHRINK_BOUND |
-					 I915_SHRINK_UNBOUND);
+					 flags | I915_SHRINK_ACTIVE);
 		intel_runtime_pm_put(i915);
 	}
 
@@ -478,6 +475,7 @@  i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
 
 	intel_runtime_pm_get(i915);
 	freed_pages += i915_gem_shrink(i915, -1UL, NULL,
+				       I915_SHRINK_LOCKED |
 				       I915_SHRINK_BOUND |
 				       I915_SHRINK_UNBOUND |
 				       I915_SHRINK_ACTIVE |