diff mbox series

[RFC,155/162] drm/i915: Use a ww transaction in the fault handler

Message ID 20201127120718.454037-156-matthew.auld@intel.com (mailing list archive)
State New, archived
Headers show
Series DG1 + LMEM enabling | expand

Commit Message

Matthew Auld Nov. 27, 2020, 12:07 p.m. UTC
From: Thomas Hellström <thomas.hellstrom@intel.com>

Prefer a ww transaction rather than a single object lock to
enable sleeping lock eviction if reached by the fault
handler.

Signed-off-by: Thomas Hellström <thomas.hellstrom@intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_mman.c | 45 +++++++++++++-----------
 1 file changed, 24 insertions(+), 21 deletions(-)
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 33ccd4d665d4..a9526cc309d3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -238,6 +238,7 @@  static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
 	struct vm_area_struct *area = vmf->vma;
 	struct i915_mmap_offset *mmo = area->vm_private_data;
 	struct drm_i915_gem_object *obj = mmo->obj;
+	struct i915_gem_ww_ctx ww;
 	resource_size_t iomap;
 	int err;
 
@@ -246,33 +247,35 @@  static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
 		     area->vm_flags & VM_WRITE))
 		return VM_FAULT_SIGBUS;
 
-	if (i915_gem_object_lock_interruptible(obj, NULL))
-		return VM_FAULT_NOPAGE;
+	for_i915_gem_ww(&ww, err, true) {
+		err = i915_gem_object_lock(obj, &ww);
+		if (err)
+			continue;
 
-	err = i915_gem_object_pin_pages(obj);
-	if (err)
-		goto out;
+		err = i915_gem_object_pin_pages(obj);
+		if (err)
+			continue;
 
-	iomap = -1;
-	if (!i915_gem_object_has_struct_page(obj)) {
-		iomap = obj->mm.region->iomap.base;
-		iomap -= obj->mm.region->region.start;
-	}
+		iomap = -1;
+		if (!i915_gem_object_has_struct_page(obj)) {
+			iomap = obj->mm.region->iomap.base;
+			iomap -= obj->mm.region->region.start;
+		}
 
-	/* PTEs are revoked in obj->ops->put_pages() */
-	err = remap_io_sg(area,
-			  area->vm_start, area->vm_end - area->vm_start,
-			  obj->mm.pages->sgl, iomap);
+		/* PTEs are revoked in obj->ops->put_pages() */
+		err = remap_io_sg(area,
+				  area->vm_start, area->vm_end - area->vm_start,
+				  obj->mm.pages->sgl, iomap);
 
-	if (area->vm_flags & VM_WRITE) {
-		GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
-		obj->mm.dirty = true;
-	}
+		if (area->vm_flags & VM_WRITE) {
+			GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+			obj->mm.dirty = true;
+		}
 
-	i915_gem_object_unpin_pages(obj);
+		i915_gem_object_unpin_pages(obj);
+		/* Implicit unlock */
+	}
 
-out:
-	i915_gem_object_unlock(obj);
 	return i915_error_to_vmf_fault(err);
 }