diff mbox series

[09/19] drm/i915: Change shrink ordering to use locking around unbinding.

Message ID 20210830121006.2978297-10-maarten.lankhorst@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series drm/i915: Short-term pinning and async eviction. | expand

Commit Message

Maarten Lankhorst Aug. 30, 2021, 12:09 p.m. UTC
Call drop_pages with the gem object lock held, instead of the other
way around. This will allow us to drop the vma bindings with the
gem object lock held.

We plan to require the object lock for unpinning in the future,
and this is an easy target.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 42 ++++++++++----------
 1 file changed, 21 insertions(+), 21 deletions(-)

Comments

Niranjana Vishwanathapura Sept. 8, 2021, 1:04 a.m. UTC | #1
On Mon, Aug 30, 2021 at 02:09:56PM +0200, Maarten Lankhorst wrote:
>Call drop_pages with the gem object lock held, instead of the other
>way around. This will allow us to drop the vma bindings with the
>gem object lock held.
>
>We plan to require the object lock for unpinning in the future,
>and this is an easy target.
>
>Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>

Looks good to me, we need this for upcoming vm_bind as well.
Reviewed-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>

>---
> drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 42 ++++++++++----------
> 1 file changed, 21 insertions(+), 21 deletions(-)
>
>diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
>index 5ab136ffdeb2..7f7849b6296d 100644
>--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
>+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
>@@ -37,8 +37,8 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
> 	return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
> }
>
>-static bool unsafe_drop_pages(struct drm_i915_gem_object *obj,
>-			      unsigned long shrink, bool trylock_vm)
>+static int drop_pages(struct drm_i915_gem_object *obj,
>+		       unsigned long shrink, bool trylock_vm)
> {
> 	unsigned long flags;
>
>@@ -209,27 +209,27 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
>
> 			spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
>
>-			err = 0;
>-			if (unsafe_drop_pages(obj, shrink, trylock_vm)) {
>-				/* May arrive from get_pages on another bo */
>-				if (!ww) {
>-					if (!i915_gem_object_trylock(obj))
>-						goto skip;
>-				} else {
>-					err = i915_gem_object_lock(obj, ww);
>-					if (err)
>-						goto skip;
>-				}
>-
>-				if (!__i915_gem_object_put_pages(obj)) {
>-					try_to_writeback(obj, shrink);
>-					count += obj->base.size >> PAGE_SHIFT;
>-				}
>-				if (!ww)
>-					i915_gem_object_unlock(obj);
>+			/* May arrive from get_pages on another bo */
>+			if (!ww) {
>+				if (!i915_gem_object_trylock(obj))
>+					goto skip;
>+			} else {
>+				err = i915_gem_object_lock(obj, ww);
>+				if (err)
>+					goto skip;
> 			}
>
>-			dma_resv_prune(obj->base.resv);
>+			if (drop_pages(obj, shrink, trylock_vm) &&
>+			    !__i915_gem_object_put_pages(obj)) {
>+				try_to_writeback(obj, shrink);
>+				count += obj->base.size >> PAGE_SHIFT;
>+			}
>+
>+			if (dma_resv_test_signaled(obj->base.resv, true))
>+				dma_resv_add_excl_fence(obj->base.resv, NULL);
>+
>+			if (!ww)
>+				i915_gem_object_unlock(obj);
>
> 			scanned += obj->base.size >> PAGE_SHIFT;
> skip:
>-- 
>2.32.0
>
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 5ab136ffdeb2..7f7849b6296d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -37,8 +37,8 @@  static bool can_release_pages(struct drm_i915_gem_object *obj)
 	return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
 }
 
-static bool unsafe_drop_pages(struct drm_i915_gem_object *obj,
-			      unsigned long shrink, bool trylock_vm)
+static int drop_pages(struct drm_i915_gem_object *obj,
+		       unsigned long shrink, bool trylock_vm)
 {
 	unsigned long flags;
 
@@ -209,27 +209,27 @@  i915_gem_shrink(struct i915_gem_ww_ctx *ww,
 
 			spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
 
-			err = 0;
-			if (unsafe_drop_pages(obj, shrink, trylock_vm)) {
-				/* May arrive from get_pages on another bo */
-				if (!ww) {
-					if (!i915_gem_object_trylock(obj))
-						goto skip;
-				} else {
-					err = i915_gem_object_lock(obj, ww);
-					if (err)
-						goto skip;
-				}
-
-				if (!__i915_gem_object_put_pages(obj)) {
-					try_to_writeback(obj, shrink);
-					count += obj->base.size >> PAGE_SHIFT;
-				}
-				if (!ww)
-					i915_gem_object_unlock(obj);
+			/* May arrive from get_pages on another bo */
+			if (!ww) {
+				if (!i915_gem_object_trylock(obj))
+					goto skip;
+			} else {
+				err = i915_gem_object_lock(obj, ww);
+				if (err)
+					goto skip;
 			}
 
-			dma_resv_prune(obj->base.resv);
+			if (drop_pages(obj, shrink, trylock_vm) &&
+			    !__i915_gem_object_put_pages(obj)) {
+				try_to_writeback(obj, shrink);
+				count += obj->base.size >> PAGE_SHIFT;
+			}
+
+			if (dma_resv_test_signaled(obj->base.resv, true))
+				dma_resv_add_excl_fence(obj->base.resv, NULL);
+
+			if (!ww)
+				i915_gem_object_unlock(obj);
 
 			scanned += obj->base.size >> PAGE_SHIFT;
 skip: