diff mbox series

[v2,5/5] drm/i915: Add cpu fault handler for mmap_offset

Message ID 20191007091920.2176-5-abdiel.janulgue@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series [v2,1/5] drm/i915: Allow i915 to manage the vma offset nodes instead of drm core | expand

Commit Message

Abdiel Janulgue Oct. 7, 2019, 9:19 a.m. UTC
Fault handler to handle missing pages for shmem-backed objects.

Signed-off-by: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_mman.c | 125 ++++++++++++++++++-----
 1 file changed, 100 insertions(+), 25 deletions(-)

Comments

Chris Wilson Oct. 7, 2019, 10:10 a.m. UTC | #1
Quoting Abdiel Janulgue (2019-10-07 10:19:20)
> +static vm_fault_t i915_gem_fault_cpu(struct vm_fault *vmf)
> +{
> +       struct vm_area_struct *area = vmf->vma;
> +       struct i915_mmap_offset *priv = area->vm_private_data;
> +       struct drm_i915_gem_object *obj = priv->obj;
> +       vm_fault_t vmf_ret;
> +       unsigned long size = area->vm_end - area->vm_start;
> +       bool write = area->vm_flags & VM_WRITE;
> +       int i, ret;
> +
> +       /* Sanity check that we allow writing into this object */
> +       if (i915_gem_object_is_readonly(obj) && write)
> +               return VM_FAULT_SIGBUS;
> +
> +       ret = i915_gem_object_pin_pages(obj);
> +       if (ret)
> +               return i915_error_to_vmf_fault(ret);
> +
> +       for (i = 0; i < size >> PAGE_SHIFT; i++) {

int i!

No, no, no.
-Chris
Chris Wilson Oct. 7, 2019, 10:20 a.m. UTC | #2
Quoting Abdiel Janulgue (2019-10-07 10:19:20)
> +static vm_fault_t i915_gem_fault_cpu(struct vm_fault *vmf)
> +{
> +       struct vm_area_struct *area = vmf->vma;
> +       struct i915_mmap_offset *priv = area->vm_private_data;
> +       struct drm_i915_gem_object *obj = priv->obj;
> +       vm_fault_t vmf_ret;
> +       unsigned long size = area->vm_end - area->vm_start;
> +       bool write = area->vm_flags & VM_WRITE;
> +       int i, ret;
> +
> +       /* Sanity check that we allow writing into this object */
> +       if (i915_gem_object_is_readonly(obj) && write)
> +               return VM_FAULT_SIGBUS;
> +
> +       ret = i915_gem_object_pin_pages(obj);
> +       if (ret)
> +               return i915_error_to_vmf_fault(ret);
> +
> +       for (i = 0; i < size >> PAGE_SHIFT; i++) {
> +               struct page *page = i915_gem_object_get_page(obj, i);
> +
> +               vmf_ret = vmf_insert_pfn(area,
> +                                        (unsigned long)area->vm_start + i * PAGE_SIZE,
> +                                        page_to_pfn(page));
> +               if (vmf_ret & VM_FAULT_ERROR)
> +                       break;

So why are we using vmf_insert_pfn + VM_PFNMAP for this? It does seem to
be page backed. And since you are prefaulting, you may legitimately try
to double insert the same page and hit an error. You should only bail if
you fail to insert the fault address.
-Chris
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index a9604d0db606..02afdae812ba 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -5,6 +5,7 @@ 
  */
 
 #include <linux/mman.h>
+#include <linux/pfn_t.h>
 #include <linux/sizes.h>
 
 #include "gt/intel_gt.h"
@@ -200,6 +201,67 @@  compute_partial_view(const struct drm_i915_gem_object *obj,
 	return view;
 }
 
+static vm_fault_t i915_error_to_vmf_fault(int err)
+{
+	switch (err) {
+	default:
+		WARN_ONCE(err, "unhandled error in %s: %i\n", __func__, err);
+		/* fallthrough */
+	case -EIO: /* shmemfs failure from swap device */
+	case -EFAULT: /* purged object */
+	case -ENODEV: /* bad object, how did you get here! */
+		return VM_FAULT_SIGBUS;
+
+	case -ENOSPC: /* shmemfs allocation failure */
+	case -ENOMEM: /* our allocation failure */
+		return VM_FAULT_OOM;
+
+	case 0:
+	case -EAGAIN:
+	case -ERESTARTSYS:
+	case -EINTR:
+	case -EBUSY:
+		/*
+		 * EBUSY is ok: this just means that another thread
+		 * already did the job.
+		 */
+		return VM_FAULT_NOPAGE;
+	}
+}
+
+static vm_fault_t i915_gem_fault_cpu(struct vm_fault *vmf)
+{
+	struct vm_area_struct *area = vmf->vma;
+	struct i915_mmap_offset *priv = area->vm_private_data;
+	struct drm_i915_gem_object *obj = priv->obj;
+	vm_fault_t vmf_ret;
+	unsigned long size = area->vm_end - area->vm_start;
+	bool write = area->vm_flags & VM_WRITE;
+	int i, ret;
+
+	/* Sanity check that we allow writing into this object */
+	if (i915_gem_object_is_readonly(obj) && write)
+		return VM_FAULT_SIGBUS;
+
+	ret = i915_gem_object_pin_pages(obj);
+	if (ret)
+		return i915_error_to_vmf_fault(ret);
+
+	for (i = 0; i < size >> PAGE_SHIFT; i++) {
+		struct page *page = i915_gem_object_get_page(obj, i);
+
+		vmf_ret = vmf_insert_pfn(area,
+					 (unsigned long)area->vm_start + i * PAGE_SIZE,
+					 page_to_pfn(page));
+		if (vmf_ret & VM_FAULT_ERROR)
+			break;
+	}
+
+	i915_gem_object_unpin_pages(obj);
+
+	return vmf_ret;
+}
+
 /**
  * i915_gem_fault - fault a page into the GTT
  * @vmf: fault info
@@ -342,30 +404,7 @@  vm_fault_t i915_gem_fault(struct vm_fault *vmf)
 	intel_runtime_pm_put(rpm, wakeref);
 	i915_gem_object_unpin_pages(obj);
 err:
-	switch (ret) {
-	default:
-		WARN_ONCE(ret, "unhandled error in %s: %i\n", __func__, ret);
-		/* fallthrough */
-	case -EIO: /* shmemfs failure from swap device */
-	case -EFAULT: /* purged object */
-	case -ENODEV: /* bad object, how did you get here! */
-		return VM_FAULT_SIGBUS;
-
-	case -ENOSPC: /* shmemfs allocation failure */
-	case -ENOMEM: /* our allocation failure */
-		return VM_FAULT_OOM;
-
-	case 0:
-	case -EAGAIN:
-	case -ERESTARTSYS:
-	case -EINTR:
-	case -EBUSY:
-		/*
-		 * EBUSY is ok: this just means that another thread
-		 * already did the job.
-		 */
-		return VM_FAULT_NOPAGE;
-	}
+	return i915_error_to_vmf_fault(ret);
 }
 
 void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
@@ -660,6 +699,33 @@  static const struct vm_operations_struct i915_gem_gtt_vm_ops = {
 	.close = i915_gem_vm_close,
 };
 
+static const struct vm_operations_struct i915_gem_cpu_vm_ops = {
+	.fault = i915_gem_fault_cpu,
+	.open = i915_gem_vm_open,
+	.close = i915_gem_vm_close,
+};
+
+static void set_vmdata_mmap_offset(struct i915_mmap_offset *mmo, struct vm_area_struct *vma)
+{
+	switch (mmo->mmap_type) {
+	case I915_MMAP_TYPE_OFFSET_WC:
+		vma->vm_page_prot =
+			pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+		break;
+	case I915_MMAP_TYPE_OFFSET_WB:
+		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+		break;
+	case I915_MMAP_TYPE_OFFSET_UC:
+		vma->vm_page_prot =
+			pgprot_noncached(vm_get_page_prot(vma->vm_flags));
+		break;
+	default:
+		break;
+	}
+
+	vma->vm_ops = &i915_gem_cpu_vm_ops;
+}
+
 /* This overcomes the limitation in drm_gem_mmap's assignment of a
  * drm_gem_object as the vma->vm_private_data. Since we need to
  * be able to resolve multiple mmap offsets which could be tied
@@ -727,7 +793,16 @@  int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
 	vma->vm_private_data = mmo;
 
-	vma->vm_ops = &i915_gem_gtt_vm_ops;
+	switch (mmo->mmap_type) {
+	case I915_MMAP_TYPE_OFFSET_WC:
+	case I915_MMAP_TYPE_OFFSET_WB:
+	case I915_MMAP_TYPE_OFFSET_UC:
+		set_vmdata_mmap_offset(mmo, vma);
+		break;
+	case I915_MMAP_TYPE_GTT:
+		vma->vm_ops = &i915_gem_gtt_vm_ops;
+		break;
+	}
 
 	return 0;
 }