diff mbox

[v3] gpu: drm: i915: Change return type to vm_fault_t

Message ID 20180516143826.GA29156@jordon-HP-15-Notebook-PC (mailing list archive)
State New, archived
Headers show

Commit Message

Souptick Joarder May 16, 2018, 2:38 p.m. UTC
Use new return type vm_fault_t for fault handler. For
now, this is just documenting that the function returns
a VM_FAULT value rather than an errno. Once all instances
are converted, vm_fault_t will become a distinct type.

commit 1c8f422059ae ("mm: change return type to vm_fault_t")

Fixed one checkpatch.pl warning inside WARN_ONCE.

Signed-off-by: Souptick Joarder <jrdr.linux@gmail.com>
---
v2: Updated the change log

v3: Corrected variable name to err

 drivers/gpu/drm/i915/i915_drv.h |  3 ++-
 drivers/gpu/drm/i915/i915_gem.c | 37 +++++++++++++++++++------------------
 2 files changed, 21 insertions(+), 19 deletions(-)
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a42deeb..95b0d50 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -51,6 +51,7 @@ 
 #include <drm/drm_gem.h>
 #include <drm/drm_auth.h>
 #include <drm/drm_cache.h>
+#include <linux/mm_types.h>
 
 #include "i915_params.h"
 #include "i915_reg.h"
@@ -3363,7 +3364,7 @@  int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
 			   unsigned int flags);
 int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv);
 void i915_gem_resume(struct drm_i915_private *dev_priv);
-int i915_gem_fault(struct vm_fault *vmf);
+vm_fault_t i915_gem_fault(struct vm_fault *vmf);
 int i915_gem_object_wait(struct drm_i915_gem_object *obj,
 			 unsigned int flags,
 			 long timeout,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index dd89abd..b81b785 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1882,7 +1882,7 @@  int i915_gem_mmap_gtt_version(void)
  * The current feature set supported by i915_gem_fault() and thus GTT mmaps
  * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
  */
-int i915_gem_fault(struct vm_fault *vmf)
+vm_fault_t i915_gem_fault(struct vm_fault *vmf)
 {
 #define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
 	struct vm_area_struct *area = vmf->vma;
@@ -1894,7 +1894,8 @@  int i915_gem_fault(struct vm_fault *vmf)
 	struct i915_vma *vma;
 	pgoff_t page_offset;
 	unsigned int flags;
-	int ret;
+	int err;
+	vm_fault_t ret;
 
 	/* We don't use vmf->pgoff since that has the fake offset */
 	page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
@@ -1906,26 +1907,26 @@  int i915_gem_fault(struct vm_fault *vmf)
 	 * repeat the flush holding the lock in the normal manner to catch cases
 	 * where we are gazumped.
 	 */
-	ret = i915_gem_object_wait(obj,
+	err = i915_gem_object_wait(obj,
 				   I915_WAIT_INTERRUPTIBLE,
 				   MAX_SCHEDULE_TIMEOUT,
 				   NULL);
-	if (ret)
+	if (err)
 		goto err;
 
-	ret = i915_gem_object_pin_pages(obj);
-	if (ret)
+	err = i915_gem_object_pin_pages(obj);
+	if (err)
 		goto err;
 
 	intel_runtime_pm_get(dev_priv);
 
-	ret = i915_mutex_lock_interruptible(dev);
-	if (ret)
+	err = i915_mutex_lock_interruptible(dev);
+	if (err)
 		goto err_rpm;
 
 	/* Access to snoopable pages through the GTT is incoherent. */
 	if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
-		ret = -EFAULT;
+		err = -EFAULT;
 		goto err_unlock;
 	}
 
@@ -1952,25 +1953,25 @@  int i915_gem_fault(struct vm_fault *vmf)
 		vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
 	}
 	if (IS_ERR(vma)) {
-		ret = PTR_ERR(vma);
+		err = PTR_ERR(vma);
 		goto err_unlock;
 	}
 
-	ret = i915_gem_object_set_to_gtt_domain(obj, write);
-	if (ret)
+	err = i915_gem_object_set_to_gtt_domain(obj, write);
+	if (err)
 		goto err_unpin;
 
-	ret = i915_vma_pin_fence(vma);
-	if (ret)
+	err = i915_vma_pin_fence(vma);
+	if (err)
 		goto err_unpin;
 
 	/* Finally, remap it using the new GTT offset */
-	ret = remap_io_mapping(area,
+	err = remap_io_mapping(area,
 			       area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
 			       (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
 			       min_t(u64, vma->size, area->vm_end - area->vm_start),
 			       &ggtt->iomap);
-	if (ret)
+	if (err)
 		goto err_fence;
 
 	/* Mark as being mmapped into userspace for later revocation */
@@ -1991,7 +1992,7 @@  int i915_gem_fault(struct vm_fault *vmf)
 	intel_runtime_pm_put(dev_priv);
 	i915_gem_object_unpin_pages(obj);
 err:
-	switch (ret) {
+	switch (err) {
 	case -EIO:
 		/*
 		 * We eat errors when the gpu is terminally wedged to avoid
@@ -2027,7 +2028,7 @@  int i915_gem_fault(struct vm_fault *vmf)
 		ret = VM_FAULT_SIGBUS;
 		break;
 	default:
-		WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
+		WARN_ONCE(ret, "unhandled error in %s: %x\n", __func__, err);
 		ret = VM_FAULT_SIGBUS;
 		break;
 	}