diff mbox series

[2/2] drm/i915/gem: Rework i915_gem_object_unbind to rely on the object lock

Message ID 20220704135249.8241-3-thomas.hellstrom@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series Protect vma destruction with the object lock | expand

Commit Message

Thomas Hellstrom July 4, 2022, 1:52 p.m. UTC
Since vma destruction from the vm destruction path now takes the
object lock, we no longer need to take a vm reference from
i915_gem_object_unbind to make sure vmas are kept alive, the
required object lock taken across i915_gem_object_unbind is
sufficient.

Also skip retrying on I915_GEM_OBJECT_UNBIND_BARRIER, since we
can't race with vm destruction anymore.

Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_domain.c |  4 +---
 drivers/gpu/drm/i915/i915_drv.h            |  7 +++----
 drivers/gpu/drm/i915/i915_gem.c            | 18 ------------------
 3 files changed, 4 insertions(+), 25 deletions(-)
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index 1674b0c5802b..cf1f39f80603 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -279,9 +279,7 @@  int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 	}
 
 	/* The cache-level will be applied when each vma is rebound. */
-	return i915_gem_object_unbind(obj,
-				      I915_GEM_OBJECT_UNBIND_ACTIVE |
-				      I915_GEM_OBJECT_UNBIND_BARRIER);
+	return i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
 }
 
 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index c22f29c3faa0..6034e6319b6c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1415,10 +1415,9 @@  i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
 			   unsigned long flags);
 #define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)
-#define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1)
-#define I915_GEM_OBJECT_UNBIND_TEST BIT(2)
-#define I915_GEM_OBJECT_UNBIND_VM_TRYLOCK BIT(3)
-#define I915_GEM_OBJECT_UNBIND_ASYNC BIT(4)
+#define I915_GEM_OBJECT_UNBIND_TEST BIT(1)
+#define I915_GEM_OBJECT_UNBIND_VM_TRYLOCK BIT(2)
+#define I915_GEM_OBJECT_UNBIND_ASYNC BIT(3)
 
 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 702e5b89be22..df16a5e8f5c7 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -137,7 +137,6 @@  int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
 	 */
 	wakeref = intel_runtime_pm_get(rpm);
 
-try_again:
 	ret = 0;
 	spin_lock(&obj->vma.lock);
 	while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
@@ -152,17 +151,6 @@  int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
 			break;
 		}
 
-		/*
-		 * Requiring the vm destructor to take the object lock
-		 * before destroying a vma would help us eliminate the
-		 * i915_vm_tryget() here, AND thus also the barrier stuff
-		 * at the end. That's an easy fix, but sleeping locks in
-		 * a kthread should generally be avoided.
-		 */
-		ret = -EAGAIN;
-		if (!i915_vm_tryget(vma->vm))
-			break;
-
 		spin_unlock(&obj->vma.lock);
 
 		/*
@@ -189,17 +177,11 @@  int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
 			}
 		}
 
-		i915_vm_put(vma->vm);
 		spin_lock(&obj->vma.lock);
 	}
 	list_splice_init(&still_in_list, &obj->vma.list);
 	spin_unlock(&obj->vma.lock);
 
-	if (ret == -EAGAIN && flags & I915_GEM_OBJECT_UNBIND_BARRIER) {
-		rcu_barrier(); /* flush the i915_vm_release() */
-		goto try_again;
-	}
-
 	intel_runtime_pm_put(rpm, wakeref);
 
 	return ret;