diff mbox series

[RFC,087/162] drm/i915: Delay publishing objects on the eviction lists

Message ID 20201127120718.454037-88-matthew.auld@intel.com (mailing list archive)
State New, archived
Headers show
Series DG1 + LMEM enabling | expand

Commit Message

Matthew Auld Nov. 27, 2020, 12:06 p.m. UTC
From: Thomas Hellström <thomas.hellstrom@intel.com>

When an object is published on an eviction list, it's considered for
eviction and can be locked by other threads. This is strictly not
necessary until the object has pages. To limit eviction lookups that
need to discard the object and facilitate a longer period during
which we can lock the object isolated (trylock or ww lock without
chance of deadlock or interruption), delay eviction list publishing
until pages are set. Also take the object off the eviction lists when
pages are unset. Finally make sure that an object is either locked or
isolated when eviction list manipulation happens.

Signed-off-by: Thomas Hellström <thomas.hellstrom@intel.com>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_object.c |  2 ++
 drivers/gpu/drm/i915/gem/i915_gem_pages.c  | 22 +++++++++++++++++++++-
 drivers/gpu/drm/i915/gem/i915_gem_region.c | 18 ++----------------
 3 files changed, 25 insertions(+), 17 deletions(-)
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 08d806bbf48e..5326b4b5a9f7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -66,6 +66,7 @@  void i915_gem_object_init(struct drm_i915_gem_object *obj,
 	INIT_LIST_HEAD(&obj->vma.list);
 
 	INIT_LIST_HEAD(&obj->mm.link);
+	INIT_LIST_HEAD(&obj->mm.region_link);
 
 	INIT_LIST_HEAD(&obj->lut_list);
 	spin_lock_init(&obj->lut_lock);
@@ -79,6 +80,7 @@  void i915_gem_object_init(struct drm_i915_gem_object *obj,
 	GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS);
 	obj->flags = flags;
 
+	obj->mm.region = NULL;
 	obj->mm.madv = I915_MADV_WILLNEED;
 	INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
 	mutex_init(&obj->mm.get_page.lock);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 4a8be759832b..eacad971b955 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -16,6 +16,8 @@  void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
 {
 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 	unsigned long supported = INTEL_INFO(i915)->page_sizes;
+	struct intel_memory_region *mem;
+	struct list_head *list;
 	int i;
 
 	assert_object_held_shared(obj);
@@ -64,7 +66,6 @@  void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
 	GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
 
 	if (i915_gem_object_is_shrinkable(obj)) {
-		struct list_head *list;
 		unsigned long flags;
 
 		assert_object_held(obj);
@@ -82,6 +83,18 @@  void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
 		atomic_set(&obj->mm.shrink_pin, 0);
 		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
 	}
+
+	mem = obj->mm.region;
+	if (mem) {
+		mutex_lock(&mem->objects.lock);
+		GEM_WARN_ON(!list_empty(&obj->mm.region_link));
+		if (obj->mm.madv != I915_MADV_WILLNEED)
+			list = &mem->objects.purgeable;
+		else
+			list = &mem->objects.list;
+		list_move_tail(&obj->mm.region_link, list);
+		mutex_unlock(&mem->objects.lock);
+	}
 }
 
 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
@@ -192,6 +205,7 @@  static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
 struct sg_table *
 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
 {
+	struct intel_memory_region *mem = obj->mm.region;
 	struct sg_table *pages;
 
 	assert_object_held_shared(obj);
@@ -205,6 +219,12 @@  __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
 
 	i915_gem_object_make_unshrinkable(obj);
 
+	if (mem) {
+		mutex_lock(&mem->objects.lock);
+		list_del_init(&obj->mm.region_link);
+		mutex_unlock(&mem->objects.lock);
+	}
+
 	if (obj->mm.mapping) {
 		unmap_object(obj, page_mask_bits(obj->mm.mapping));
 		obj->mm.mapping = NULL;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c
index 6a96741253b3..58bf5f9e3199 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -105,30 +105,16 @@  void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
 					struct intel_memory_region *mem)
 {
 	INIT_LIST_HEAD(&obj->mm.blocks);
+	WARN_ON(i915_gem_object_has_pages(obj));
 	obj->mm.region = intel_memory_region_get(mem);
 
 	if (obj->base.size <= mem->min_page_size)
 		obj->flags |= I915_BO_ALLOC_CONTIGUOUS;
-
-	mutex_lock(&mem->objects.lock);
-
-	if (obj->flags & I915_BO_ALLOC_VOLATILE)
-		list_add(&obj->mm.region_link, &mem->objects.purgeable);
-	else
-		list_add(&obj->mm.region_link, &mem->objects.list);
-
-	mutex_unlock(&mem->objects.lock);
 }
 
 void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj)
 {
-	struct intel_memory_region *mem = obj->mm.region;
-
-	mutex_lock(&mem->objects.lock);
-	list_del(&obj->mm.region_link);
-	mutex_unlock(&mem->objects.lock);
-
-	intel_memory_region_put(mem);
+	intel_memory_region_put(obj->mm.region);
 }
 
 struct drm_i915_gem_object *