diff mbox

[3/5] drm/i915: Prevent writing into a read-only object via a GGTT mmap

Message ID 20180614192404.24534-3-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show

Commit Message

Chris Wilson June 14, 2018, 7:24 p.m. UTC
If the user has created a read-only object, they should not be allowed
to circumvent the write protection by using a GGTT mmapping. Deny it.

Also most machines do not support read-only GGTT PTEs, so again we have
to reject attempted writes. Fortunately, this is known a priori, so we
can at least reject in the call to create the mmap (with a sanity check
in the fault handler).

v2: Check the vma->vm_flags during mmap() to allow readonly access.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Jon Bloomfield <jon.bloomfield@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Matthew Auld <matthew.william.auld@gmail.com>
Cc: David Herrmann <dh.herrmann@gmail.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> #v1
Reviewed-by: Matthew Auld <matthew.william.auld@gmail.com> #v1
Reviewed-by: Jon Bloomfield <jon.bloomfield@intel.com>
---
 drivers/gpu/drm/drm_gem.c                         |  5 +++++
 drivers/gpu/drm/i915/i915_gem.c                   |  4 ++++
 drivers/gpu/drm/i915/i915_gem_gtt.c               | 12 +++++++-----
 drivers/gpu/drm/i915/i915_gem_object.h            | 13 ++++++++++++-
 drivers/gpu/drm/i915/intel_ringbuffer.c           |  2 +-
 drivers/gpu/drm/i915/selftests/i915_gem_context.c |  5 +++--
 include/drm/drm_vma_manager.h                     |  1 +
 7 files changed, 33 insertions(+), 9 deletions(-)

Comments

Joonas Lahtinen June 15, 2018, 8:08 a.m. UTC | #1
Quoting Chris Wilson (2018-06-14 22:24:02)
> If the user has created a read-only object, they should not be allowed
> to circumvent the write protection by using a GGTT mmapping. Deny it.
> 
> Also most machines do not support read-only GGTT PTEs, so again we have
> to reject attempted writes. Fortunately, this is known a priori, so we
> can at least reject in the call to create the mmap (with a sanity check
> in the fault handler).
> 
> v2: Check the vma->vm_flags during mmap() to allow readonly access.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> Cc: Matthew Auld <matthew.william.auld@gmail.com>
> Cc: David Herrmann <dh.herrmann@gmail.com>
> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> #v1
> Reviewed-by: Matthew Auld <matthew.william.auld@gmail.com> #v1
> Reviewed-by: Jon Bloomfield <jon.bloomfield@intel.com>

<SNIP>

> +++ b/drivers/gpu/drm/drm_gem.c
> @@ -1036,6 +1036,11 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
>                 return -EACCES;
>         }
>  
> +       if (vma->vm_flags & VM_WRITE && node->readonly) {
> +               drm_gem_object_put_unlocked(obj);
> +               return -EINVAL;
> +       }
> +

Pretty sure you want to split this patch and Cc: dri-devel. With that,
both are:

Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>

Regards, Joonas
Chris Wilson June 15, 2018, 8:33 a.m. UTC | #2
Quoting Joonas Lahtinen (2018-06-15 09:08:54)
> Quoting Chris Wilson (2018-06-14 22:24:02)
> > If the user has created a read-only object, they should not be allowed
> > to circumvent the write protection by using a GGTT mmapping. Deny it.
> > 
> > Also most machines do not support read-only GGTT PTEs, so again we have
> > to reject attempted writes. Fortunately, this is known a priori, so we
> > can at least reject in the call to create the mmap (with a sanity check
> > in the fault handler).
> > 
> > v2: Check the vma->vm_flags during mmap() to allow readonly access.
> > 
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > Cc: Jon Bloomfield <jon.bloomfield@intel.com>
> > Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> > Cc: Matthew Auld <matthew.william.auld@gmail.com>
> > Cc: David Herrmann <dh.herrmann@gmail.com>
> > Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> #v1
> > Reviewed-by: Matthew Auld <matthew.william.auld@gmail.com> #v1
> > Reviewed-by: Jon Bloomfield <jon.bloomfield@intel.com>
> 
> <SNIP>
> 
> > +++ b/drivers/gpu/drm/drm_gem.c
> > @@ -1036,6 +1036,11 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
> >                 return -EACCES;
> >         }
> >  
> > +       if (vma->vm_flags & VM_WRITE && node->readonly) {
> > +               drm_gem_object_put_unlocked(obj);
> > +               return -EINVAL;
> > +       }
> > +
> 
> Pretty sure you want to split this patch and Cc: dri-devel. With that,
> both are:

I thought I cc'ed dri-devel on the previous sending. (At least I thought
about it.) Besides until ttm differentiates between read/write access,
it's a moot point.
-Chris
diff mbox

Patch

diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 4a16d7b26c89..230863813905 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -1036,6 +1036,11 @@  int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 		return -EACCES;
 	}
 
+	if (vma->vm_flags & VM_WRITE && node->readonly) {
+		drm_gem_object_put_unlocked(obj);
+		return -EINVAL;
+	}
+
 	ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
 			       vma);
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8dd4d35655af..2bfb16e83af2 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2009,6 +2009,10 @@  vm_fault_t i915_gem_fault(struct vm_fault *vmf)
 	unsigned int flags;
 	int ret;
 
+	/* Sanity check that we allow writing into this object */
+	if (i915_gem_object_is_readonly(obj) && write)
+		return VM_FAULT_SIGBUS;
+
 	/* We don't use vmf->pgoff since that has the fake offset */
 	page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index bfe23e10a127..8591e7051f0d 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -206,7 +206,7 @@  static int ppgtt_bind_vma(struct i915_vma *vma,
 
 	/* Applicable to VLV, and gen8+ */
 	pte_flags = 0;
-	if (vma->obj->gt_ro)
+	if (i915_gem_object_is_readonly(vma->obj))
 		pte_flags |= PTE_READ_ONLY;
 
 	vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
@@ -2423,8 +2423,10 @@  static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
 	const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0);
 	dma_addr_t addr;
 
-	/* The GTT does not support read-only mappings */
-	GEM_BUG_ON(flags & PTE_READ_ONLY);
+	/*
+	 * Note that we ignore PTE_READ_ONLY here. The caller must be careful
+	 * not to allow the user to override access to a read only page.
+	 */
 
 	gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
 	gtt_entries += vma->node.start >> PAGE_SHIFT;
@@ -2663,7 +2665,7 @@  static int ggtt_bind_vma(struct i915_vma *vma,
 
 	/* Applicable to VLV (gen8+ do not support RO in the GGTT) */
 	pte_flags = 0;
-	if (obj->gt_ro)
+	if (i915_gem_object_is_readonly(obj))
 		pte_flags |= PTE_READ_ONLY;
 
 	intel_runtime_pm_get(i915);
@@ -2701,7 +2703,7 @@  static int aliasing_gtt_bind_vma(struct i915_vma *vma,
 
 	/* Currently applicable only to VLV */
 	pte_flags = 0;
-	if (vma->obj->gt_ro)
+	if (i915_gem_object_is_readonly(vma->obj))
 		pte_flags |= PTE_READ_ONLY;
 
 	if (flags & I915_VMA_LOCAL_BIND) {
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index 54f00b350779..fd703d768b70 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -141,7 +141,6 @@  struct drm_i915_gem_object {
 	 * Is the object to be mapped as read-only to the GPU
 	 * Only honoured if hardware has relevant pte bit
 	 */
-	unsigned long gt_ro:1;
 	unsigned int cache_level:3;
 	unsigned int cache_coherent:2;
 #define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
@@ -367,6 +366,18 @@  static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
 	reservation_object_unlock(obj->resv);
 }
 
+static inline void
+i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
+{
+	obj->base.vma_node.readonly = true;
+}
+
+static inline bool
+i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
+{
+	return obj->base.vma_node.readonly;
+}
+
 static inline bool
 i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
 {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 8853a5e6d421..9d54c3c24b10 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1112,7 +1112,7 @@  intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
 	 * if supported by the platform's GGTT.
 	 */
 	if (vm->has_read_only)
-		obj->gt_ro = 1;
+		i915_gem_object_set_readonly(obj);
 
 	vma = i915_vma_instance(obj, vm, NULL);
 	if (IS_ERR(vma))
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index 5bae52068926..fea447b1a74f 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -510,7 +510,8 @@  static int igt_ctx_readonly(void *arg)
 					goto out_unlock;
 				}
 
-				obj->gt_ro = prandom_u32_state(&prng);
+				if (prandom_u32_state(&prng) & 1)
+					i915_gem_object_set_readonly(obj);
 			}
 
 			intel_runtime_pm_get(i915);
@@ -539,7 +540,7 @@  static int igt_ctx_readonly(void *arg)
 		unsigned int rem =
 			min_t(unsigned int, ndwords - dw, max_dwords(obj));
 
-		if (obj->gt_ro)
+		if (i915_gem_object_is_readonly(obj))
 			err = ro_check(obj, rem);
 		else
 			err = cpu_check(obj, rem);
diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h
index 8758df94e9a0..c7987daeaed0 100644
--- a/include/drm/drm_vma_manager.h
+++ b/include/drm/drm_vma_manager.h
@@ -41,6 +41,7 @@  struct drm_vma_offset_node {
 	rwlock_t vm_lock;
 	struct drm_mm_node vm_node;
 	struct rb_root vm_files;
+	bool readonly:1;
 };
 
 struct drm_vma_offset_manager {