diff mbox series

drm/i915: Split out GTT fault handler to make it generic

Message ID 20190604103723.23041-1-abdiel.janulgue@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series drm/i915: Split out GTT fault handler to make it generic | expand

Commit Message

Abdiel Janulgue June 4, 2019, 10:37 a.m. UTC
Allow reuse of the fault-handling code in preparation for having
multiple fault handlers depending on the mmaping type and backing
storage.

Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>

Signed-off-by: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_mman.c | 208 ++++++++++++++---------
 1 file changed, 132 insertions(+), 76 deletions(-)

Comments

Chris Wilson June 4, 2019, 11 a.m. UTC | #1
Quoting Abdiel Janulgue (2019-06-04 11:37:23)
> Allow reuse of the fault-handling code in preparation for having
> multiple fault handlers depending on the mmaping type and backing
> storage.
> 
> Cc: Matthew Auld <matthew.auld@intel.com>
> Cc: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> 
> Signed-off-by: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>
> ---
>  drivers/gpu/drm/i915/gem/i915_gem_mman.c | 208 ++++++++++++++---------
>  1 file changed, 132 insertions(+), 76 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> index c7b9b34de01b..ed20fab66d2f 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> @@ -197,6 +197,133 @@ compute_partial_view(const struct drm_i915_gem_object *obj,
>         return view;
>  }
>  
> +struct gem_object_fault {
> +       struct drm_i915_gem_object *obj;
> +       pgoff_t page_offset;
> +       intel_wakeref_t wakeref;
> +       int srcu;
> +       bool pin_vma;
> +       vm_fault_t error_ret;
> +};
> +
> +static vm_fault_t __vm_fault_from_error(struct drm_i915_private *i915,
> +                                       int fault_ret)
> +{
> +       switch (fault_ret) {
> +       case -EIO:
> +               /*
> +                * We eat errors when the gpu is terminally wedged to avoid
> +                * userspace unduly crashing (gl has no provisions for mmaps to
> +                * fail). But any other -EIO isn't ours (e.g. swap in failure)
> +                * and so needs to be reported.
> +                */
> +               if (!i915_terminally_wedged(i915))
> +                       return VM_FAULT_SIGBUS;
> +               /* else: fall through */
> +       case -EAGAIN:
> +               /*
> +                * EAGAIN means the gpu is hung and we'll wait for the error
> +                * handler to reset everything when re-faulting in
> +                * i915_mutex_lock_interruptible.
> +                */
> +       case 0:
> +       case -ERESTARTSYS:
> +       case -EINTR:
> +       case -EBUSY:
> +               /*
> +                * EBUSY is ok: this just means that another thread
> +                * already did the job.
> +                */
> +               return VM_FAULT_NOPAGE;
> +       case -ENOMEM:
> +               return VM_FAULT_OOM;
> +       case -ENOSPC:
> +       case -EFAULT:
> +               return VM_FAULT_SIGBUS;
> +       default:
> +               WARN_ONCE(fault_ret, "unhandled error in %s: %i\n", __func__,
> +                         fault_ret);
> +               return VM_FAULT_SIGBUS;
> +       }
> +}
> +
> +static int __prepare_object_fault(struct vm_fault *vmf,
> +                                 bool pin_vma,
> +                                 struct gem_object_fault *f)
> +{
> +       struct vm_area_struct *area = vmf->vma;
> +       struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
> +       struct drm_device *dev = obj->base.dev;
> +       struct drm_i915_private *dev_priv = to_i915(dev);
> +       bool write = area->vm_flags & VM_WRITE;
> +       int ret;
> +
> +       /* Sanity check that we allow writing into this object */
> +       if (i915_gem_object_is_readonly(obj) && write) {
> +               f->error_ret = VM_FAULT_SIGBUS;
> +               return -EACCES;
> +       }
> +
> +       f->obj = obj;
> +       /* We don't use vmf->pgoff since that has the fake offset */
> +       f->page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
> +
> +       trace_i915_gem_object_fault(obj, f->page_offset, true, write);
> +
> +       ret = i915_gem_object_pin_pages(obj);
> +       if (ret)
> +               goto err;
> +
> +       f->wakeref = intel_runtime_pm_get(dev_priv);

Not every path requires the device reset.

> +
> +       f->srcu = i915_reset_trylock(dev_priv);

Not every path requires interaction with reset handling.

> +       if (f->srcu < 0) {
> +               ret = f->srcu;
> +               goto err_rpm;
> +       }
> +
> +       f->pin_vma = pin_vma;
> +       if (f->pin_vma) {
> +               ret = i915_mutex_lock_interruptible(dev);

Bah.

> +               if (ret)
> +                       goto err_reset;
> +       }
> +
> +       /* Access to snoopable pages through the GTT is incoherent. */
> +       if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {

And that is very, very specific to one path.

From the sampling here, this is not generic preamble.
-Chris
Abdiel Janulgue June 4, 2019, 11:20 a.m. UTC | #2
On 06/04/2019 02:00 PM, Chris Wilson wrote:

>> +
>> +       /* Access to snoopable pages through the GTT is incoherent. */
>> +       if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
> 
> And that is very, very specific to one path.
> 
Oops, yep that should be gtt-fault specific


> From the sampling here, this is not generic preamble.
> -Chris
>
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index c7b9b34de01b..ed20fab66d2f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -197,6 +197,133 @@  compute_partial_view(const struct drm_i915_gem_object *obj,
 	return view;
 }
 
+struct gem_object_fault {
+	struct drm_i915_gem_object *obj;
+	pgoff_t page_offset;
+	intel_wakeref_t wakeref;
+	int srcu;
+	bool pin_vma;
+	vm_fault_t error_ret;
+};
+
+static vm_fault_t __vm_fault_from_error(struct drm_i915_private *i915,
+					int fault_ret)
+{
+	switch (fault_ret) {
+	case -EIO:
+		/*
+		 * We eat errors when the gpu is terminally wedged to avoid
+		 * userspace unduly crashing (gl has no provisions for mmaps to
+		 * fail). But any other -EIO isn't ours (e.g. swap in failure)
+		 * and so needs to be reported.
+		 */
+		if (!i915_terminally_wedged(i915))
+			return VM_FAULT_SIGBUS;
+		/* else: fall through */
+	case -EAGAIN:
+		/*
+		 * EAGAIN means the gpu is hung and we'll wait for the error
+		 * handler to reset everything when re-faulting in
+		 * i915_mutex_lock_interruptible.
+		 */
+	case 0:
+	case -ERESTARTSYS:
+	case -EINTR:
+	case -EBUSY:
+		/*
+		 * EBUSY is ok: this just means that another thread
+		 * already did the job.
+		 */
+		return VM_FAULT_NOPAGE;
+	case -ENOMEM:
+		return VM_FAULT_OOM;
+	case -ENOSPC:
+	case -EFAULT:
+		return VM_FAULT_SIGBUS;
+	default:
+		WARN_ONCE(fault_ret, "unhandled error in %s: %i\n", __func__,
+			  fault_ret);
+		return VM_FAULT_SIGBUS;
+	}
+}
+
+static int __prepare_object_fault(struct vm_fault *vmf,
+				  bool pin_vma,
+				  struct gem_object_fault *f)
+{
+	struct vm_area_struct *area = vmf->vma;
+	struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	bool write = area->vm_flags & VM_WRITE;
+	int ret;
+
+	/* Sanity check that we allow writing into this object */
+	if (i915_gem_object_is_readonly(obj) && write) {
+		f->error_ret = VM_FAULT_SIGBUS;
+		return -EACCES;
+	}
+
+	f->obj = obj;
+	/* We don't use vmf->pgoff since that has the fake offset */
+	f->page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
+
+	trace_i915_gem_object_fault(obj, f->page_offset, true, write);
+
+	ret = i915_gem_object_pin_pages(obj);
+	if (ret)
+		goto err;
+
+	f->wakeref = intel_runtime_pm_get(dev_priv);
+
+	f->srcu = i915_reset_trylock(dev_priv);
+	if (f->srcu < 0) {
+		ret = f->srcu;
+		goto err_rpm;
+	}
+
+	f->pin_vma = pin_vma;
+	if (f->pin_vma) {
+		ret = i915_mutex_lock_interruptible(dev);
+		if (ret)
+			goto err_reset;
+	}
+
+	/* Access to snoopable pages through the GTT is incoherent. */
+	if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
+		ret = -EFAULT;
+		goto err_unlock;
+	}
+
+	return 0;
+
+err_unlock:
+	if (f->pin_vma)
+		mutex_unlock(&dev->struct_mutex);
+err_reset:
+	i915_reset_unlock(dev_priv, f->srcu);
+err_rpm:
+	intel_runtime_pm_put(dev_priv, f->wakeref);
+	i915_gem_object_unpin_pages(obj);
+err:
+	f->error_ret = __vm_fault_from_error(dev_priv, ret);
+	return ret;
+}
+
+static vm_fault_t __object_fault_fini(int fault_ret, struct gem_object_fault *f)
+{
+	struct drm_device *dev = f->obj->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+
+	if (f->pin_vma)
+		mutex_unlock(&dev->struct_mutex);
+	i915_reset_unlock(dev_priv, f->srcu);
+	intel_runtime_pm_put(dev_priv, f->wakeref);
+	i915_gem_object_unpin_pages(f->obj);
+
+	return __vm_fault_from_error(dev_priv, fault_ret);
+}
+
 /**
  * i915_gem_fault - fault a page into the GTT
  * @vmf: fault info
@@ -223,43 +350,13 @@  vm_fault_t i915_gem_fault(struct vm_fault *vmf)
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *i915 = to_i915(dev);
 	struct i915_ggtt *ggtt = &i915->ggtt;
-	bool write = area->vm_flags & VM_WRITE;
-	intel_wakeref_t wakeref;
 	struct i915_vma *vma;
 	pgoff_t page_offset;
-	int srcu;
+	struct gem_object_fault fault;
 	int ret;
 
-	/* Sanity check that we allow writing into this object */
-	if (i915_gem_object_is_readonly(obj) && write)
-		return VM_FAULT_SIGBUS;
-
-	/* We don't use vmf->pgoff since that has the fake offset */
-	page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
-
-	trace_i915_gem_object_fault(obj, page_offset, true, write);
-
-	ret = i915_gem_object_pin_pages(obj);
-	if (ret)
-		goto err;
-
-	wakeref = intel_runtime_pm_get(i915);
-
-	srcu = i915_reset_trylock(i915);
-	if (srcu < 0) {
-		ret = srcu;
-		goto err_rpm;
-	}
-
-	ret = i915_mutex_lock_interruptible(dev);
-	if (ret)
-		goto err_reset;
-
-	/* Access to snoopable pages through the GTT is incoherent. */
-	if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(i915)) {
-		ret = -EFAULT;
-		goto err_unlock;
-	}
+	if (__prepare_object_fault(vmf, true, &fault))
+		return fault.error_ret;
 
 	/* Now pin it into the GTT as needed */
 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
@@ -291,7 +388,7 @@  vm_fault_t i915_gem_fault(struct vm_fault *vmf)
 	}
 	if (IS_ERR(vma)) {
 		ret = PTR_ERR(vma);
-		goto err_unlock;
+		goto err;
 	}
 
 	ret = i915_vma_pin_fence(vma);
@@ -322,49 +419,8 @@  vm_fault_t i915_gem_fault(struct vm_fault *vmf)
 	i915_vma_unpin_fence(vma);
 err_unpin:
 	__i915_vma_unpin(vma);
-err_unlock:
-	mutex_unlock(&dev->struct_mutex);
-err_reset:
-	i915_reset_unlock(i915, srcu);
-err_rpm:
-	intel_runtime_pm_put(i915, wakeref);
-	i915_gem_object_unpin_pages(obj);
 err:
-	switch (ret) {
-	case -EIO:
-		/*
-		 * We eat errors when the gpu is terminally wedged to avoid
-		 * userspace unduly crashing (gl has no provisions for mmaps to
-		 * fail). But any other -EIO isn't ours (e.g. swap in failure)
-		 * and so needs to be reported.
-		 */
-		if (!i915_terminally_wedged(i915))
-			return VM_FAULT_SIGBUS;
-		/* else: fall through */
-	case -EAGAIN:
-		/*
-		 * EAGAIN means the gpu is hung and we'll wait for the error
-		 * handler to reset everything when re-faulting in
-		 * i915_mutex_lock_interruptible.
-		 */
-	case 0:
-	case -ERESTARTSYS:
-	case -EINTR:
-	case -EBUSY:
-		/*
-		 * EBUSY is ok: this just means that another thread
-		 * already did the job.
-		 */
-		return VM_FAULT_NOPAGE;
-	case -ENOMEM:
-		return VM_FAULT_OOM;
-	case -ENOSPC:
-	case -EFAULT:
-		return VM_FAULT_SIGBUS;
-	default:
-		WARN_ONCE(ret, "unhandled error in %s: %i\n", __func__, ret);
-		return VM_FAULT_SIGBUS;
-	}
+	return __object_fault_fini(ret, &fault);
 }
 
 void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)