[2/3] drm/i915: Fix userptr deadlock with MAP_FIXED
diff mbox

Message ID 1435683333-17844-2-git-send-email-chris@chris-wilson.co.uk
State New
Headers show

Commit Message

Chris Wilson June 30, 2015, 4:55 p.m. UTC
Micha? Winiarski found a really evil way to trigger a struct_mutex
deadlock with userptr. He found that if he allocated a userptr bo and
then GTT mmaped another bo, or even itself, at the same address as the
userptr using MAP_FIXED, he could then cause a deadlock any time we then
had to invalidate the GTT mmappings (so at will).

To counter act the deadlock, we make the observation that when the
MAP_FIXED is made we would have an invalidate_range event for our
object. After that we should no longer alias with the rogue mmapping. If
we are then able to mark the object as no longer in use after the first
invalidate, we do not need to grab the struct_mutex for the subsequent
invalidations.

The patch makes one eye-catching change. That is the removal serial=0
after detecting a to-be-freed object inside the invalidate walker. I
felt setting serial=0 was a questionable pessimisation: it denies us the
chance to reuse the current iterator for the next loop (before it is
freed) and being explicit makes the reader question the validity of the
locking (since the object-free race could occur elsewhere). The
serialisation of the iterator is through the spinlock, if the object is
freed before the next loop then the notifier.serial will be incremented
and we start the walk from the beginning as we detect the invalid cache.

v2: Grammar fixes
v3: Reorder set-active so that it is only set when obj->pages is set
(and so needs cancellation). Only the order of setting obj->pages and
the active-flag is crucial. Calling gup after invalidate-range begin
means the userptr sees the new set of backing storage (and so will not
need to invalidate its new pages), but we have to be careful not to set
the active-flag prior to successfully establishing obj->pages.

Reported-by: Micha? Winiarski <michal.winiarski@intel.com>
Testcase: igt/gem_userptr_blits/map-fixed*
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Micha? Winiarski <michal.winiarski@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: stable@vger.kernel.org
---
 drivers/gpu/drm/i915/i915_gem_userptr.c | 43 +++++++++++++++++++++++++++------
 1 file changed, 36 insertions(+), 7 deletions(-)

Comments

Tvrtko Ursulin July 1, 2015, 11:14 a.m. UTC | #1
On 06/30/2015 05:55 PM, Chris Wilson wrote:
> Micha? Winiarski found a really evil way to trigger a struct_mutex
> deadlock with userptr. He found that if he allocated a userptr bo and
> then GTT mmaped another bo, or even itself, at the same address as the
> userptr using MAP_FIXED, he could then cause a deadlock any time we then
> had to invalidate the GTT mmappings (so at will).
>
> To counter act the deadlock, we make the observation that when the
> MAP_FIXED is made we would have an invalidate_range event for our
> object. After that we should no longer alias with the rogue mmapping. If
> we are then able to mark the object as no longer in use after the first
> invalidate, we do not need to grab the struct_mutex for the subsequent
> invalidations.
>
> The patch makes one eye-catching change. That is the removal serial=0
> after detecting a to-be-freed object inside the invalidate walker. I
> felt setting serial=0 was a questionable pessimisation: it denies us the
> chance to reuse the current iterator for the next loop (before it is
> freed) and being explicit makes the reader question the validity of the
> locking (since the object-free race could occur elsewhere). The
> serialisation of the iterator is through the spinlock, if the object is
> freed before the next loop then the notifier.serial will be incremented
> and we start the walk from the beginning as we detect the invalid cache.
>
> v2: Grammar fixes
> v3: Reorder set-active so that it is only set when obj->pages is set
> (and so needs cancellation). Only the order of setting obj->pages and
> the active-flag is crucial. Calling gup after invalidate-range begin
> means the userptr sees the new set of backing storage (and so will not
> need to invalidate its new pages), but we have to be careful not to set
> the active-flag prior to successfully establishing obj->pages.
>
> Reported-by: Micha? Winiarski <michal.winiarski@intel.com>
> Testcase: igt/gem_userptr_blits/map-fixed*
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Micha? Winiarski <michal.winiarski@intel.com>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> Cc: stable@vger.kernel.org
> ---
>   drivers/gpu/drm/i915/i915_gem_userptr.c | 43 +++++++++++++++++++++++++++------
>   1 file changed, 36 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
> index cb367d9f7909..d3213fdefafc 100644
> --- a/drivers/gpu/drm/i915/i915_gem_userptr.c
> +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
> @@ -59,6 +59,7 @@ struct i915_mmu_object {
>   	struct interval_tree_node it;
>   	struct list_head link;
>   	struct drm_i915_gem_object *obj;
> +	bool active;
>   	bool is_linear;
>   };
>
> @@ -114,7 +115,8 @@ restart:
>
>   		obj = mo->obj;
>
> -		if (!kref_get_unless_zero(&obj->base.refcount))
> +		if (!mo->active ||
> +		    !kref_get_unless_zero(&obj->base.refcount))
>   			continue;
>
>   		spin_unlock(&mn->lock);
> @@ -151,7 +153,8 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
>   		else
>   			it = interval_tree_iter_first(&mn->objects, start, end);
>   		if (it != NULL) {
> -			obj = container_of(it, struct i915_mmu_object, it)->obj;
> +			struct i915_mmu_object *mo =
> +				container_of(it, struct i915_mmu_object, it);
>
>   			/* The mmu_object is released late when destroying the
>   			 * GEM object so it is entirely possible to gain a
> @@ -160,11 +163,9 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
>   			 * the struct_mutex - and consequently use it after it
>   			 * is freed and then double free it.
>   			 */
> -			if (!kref_get_unless_zero(&obj->base.refcount)) {
> -				spin_unlock(&mn->lock);
> -				serial = 0;
> -				continue;
> -			}
> +			if (mo->active &&
> +			    kref_get_unless_zero(&mo->obj->base.refcount))
> +				obj = mo->obj;
>
>   			serial = mn->serial;
>   		}
> @@ -546,6 +547,30 @@ err:
>   }
>
>   static void
> +__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj,
> +			      bool value)
> +{
> +	/* During mm_invalidate_range we need to cancel any userptr that
> +	 * overlaps the range being invalidated. Doing so requires the
> +	 * struct_mutex, and that risks recursion. In order to cause
> +	 * recursion, the user must alias the userptr address space with
> +	 * a GTT mmapping (possible with a MAP_FIXED) - then when we have
> +	 * to invalidate that mmaping, mm_invalidate_range is called with
> +	 * the userptr address *and* the struct_mutex held.  To prevent that
> +	 * we set a flag under the i915_mmu_notifier spinlock to indicate
> +	 * whether this object is valid.
> +	 */
> +#if defined(CONFIG_MMU_NOTIFIER)
> +	if (obj->userptr.mmu_object == NULL)
> +		return;
> +
> +	spin_lock(&obj->userptr.mmu_object->mn->lock);
> +	obj->userptr.mmu_object->active = value;
> +	spin_unlock(&obj->userptr.mmu_object->mn->lock);
> +#endif

Would this be more obvious as atomic_t? Since I suspect spinlock is just 
for the memory barrier, if that. Hm.. What if we get invalidate while 
get_pages is running, before it set active but after gup has succeeded?

> +
> +static void
>   __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
>   {
>   	struct get_pages_work *work = container_of(_work, typeof(*work), work);
> @@ -585,6 +610,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
>   		if (pinned == num_pages) {
>   			ret = st_set_pages(&obj->pages, pvec, num_pages);
>   			if (ret == 0) {
> +				__i915_gem_userptr_set_active(obj, true);
>   				list_add_tail(&obj->global_list, &to_i915(dev)->mm.unbound_list);
>   				pinned = 0;
>   			}
> @@ -699,6 +725,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
>   	} else {
>   		ret = st_set_pages(&obj->pages, pvec, num_pages);
>   		if (ret == 0) {
> +			__i915_gem_userptr_set_active(obj, true);
>   			obj->userptr.work = NULL;
>   			pinned = 0;
>   		}
> @@ -732,6 +759,8 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
>
>   	sg_free_table(obj->pages);
>   	kfree(obj->pages);
> +
> +	__i915_gem_userptr_set_active(obj, false);
>   }
>
>   static void
>

Regards,

Tvrtko
Chris Wilson July 1, 2015, 11:29 a.m. UTC | #2
On Wed, Jul 01, 2015 at 12:14:34PM +0100, Tvrtko Ursulin wrote:
> >  static void
> >+__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj,
> >+			      bool value)
> >+{
> >+	/* During mm_invalidate_range we need to cancel any userptr that
> >+	 * overlaps the range being invalidated. Doing so requires the
> >+	 * struct_mutex, and that risks recursion. In order to cause
> >+	 * recursion, the user must alias the userptr address space with
> >+	 * a GTT mmapping (possible with a MAP_FIXED) - then when we have
> >+	 * to invalidate that mmaping, mm_invalidate_range is called with
> >+	 * the userptr address *and* the struct_mutex held.  To prevent that
> >+	 * we set a flag under the i915_mmu_notifier spinlock to indicate
> >+	 * whether this object is valid.
> >+	 */
> >+#if defined(CONFIG_MMU_NOTIFIER)
> >+	if (obj->userptr.mmu_object == NULL)
> >+		return;
> >+
> >+	spin_lock(&obj->userptr.mmu_object->mn->lock);
> >+	obj->userptr.mmu_object->active = value;
> >+	spin_unlock(&obj->userptr.mmu_object->mn->lock);
> >+#endif
> 
> Would this be more obvious as atomic_t? Since I suspect spinlock is
> just for the memory barrier, if that.

Yes, you could probably get away with a little thought and just a memory
barrier. But since one side is guarded by the spinlock, I think it is
easier to reuse that. Especially when the expression becomes a little
more complicated.

> Hm.. What if we get invalidate
> while get_pages is running, before it set active but after gup has
> succeeded?

Yeah, that actually is a probably. Especially thinking about the
gup_worker which we need to cancel.

Back to setting active earlier and clearing it along error paths.
-Chris

Patch
diff mbox

diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index cb367d9f7909..d3213fdefafc 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -59,6 +59,7 @@  struct i915_mmu_object {
 	struct interval_tree_node it;
 	struct list_head link;
 	struct drm_i915_gem_object *obj;
+	bool active;
 	bool is_linear;
 };
 
@@ -114,7 +115,8 @@  restart:
 
 		obj = mo->obj;
 
-		if (!kref_get_unless_zero(&obj->base.refcount))
+		if (!mo->active ||
+		    !kref_get_unless_zero(&obj->base.refcount))
 			continue;
 
 		spin_unlock(&mn->lock);
@@ -151,7 +153,8 @@  static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
 		else
 			it = interval_tree_iter_first(&mn->objects, start, end);
 		if (it != NULL) {
-			obj = container_of(it, struct i915_mmu_object, it)->obj;
+			struct i915_mmu_object *mo =
+				container_of(it, struct i915_mmu_object, it);
 
 			/* The mmu_object is released late when destroying the
 			 * GEM object so it is entirely possible to gain a
@@ -160,11 +163,9 @@  static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
 			 * the struct_mutex - and consequently use it after it
 			 * is freed and then double free it.
 			 */
-			if (!kref_get_unless_zero(&obj->base.refcount)) {
-				spin_unlock(&mn->lock);
-				serial = 0;
-				continue;
-			}
+			if (mo->active &&
+			    kref_get_unless_zero(&mo->obj->base.refcount))
+				obj = mo->obj;
 
 			serial = mn->serial;
 		}
@@ -546,6 +547,30 @@  err:
 }
 
 static void
+__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj,
+			      bool value)
+{
+	/* During mm_invalidate_range we need to cancel any userptr that
+	 * overlaps the range being invalidated. Doing so requires the
+	 * struct_mutex, and that risks recursion. In order to cause
+	 * recursion, the user must alias the userptr address space with
+	 * a GTT mmapping (possible with a MAP_FIXED) - then when we have
+	 * to invalidate that mmaping, mm_invalidate_range is called with
+	 * the userptr address *and* the struct_mutex held.  To prevent that
+	 * we set a flag under the i915_mmu_notifier spinlock to indicate
+	 * whether this object is valid.
+	 */
+#if defined(CONFIG_MMU_NOTIFIER)
+	if (obj->userptr.mmu_object == NULL)
+		return;
+
+	spin_lock(&obj->userptr.mmu_object->mn->lock);
+	obj->userptr.mmu_object->active = value;
+	spin_unlock(&obj->userptr.mmu_object->mn->lock);
+#endif
+}
+
+static void
 __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
 {
 	struct get_pages_work *work = container_of(_work, typeof(*work), work);
@@ -585,6 +610,7 @@  __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
 		if (pinned == num_pages) {
 			ret = st_set_pages(&obj->pages, pvec, num_pages);
 			if (ret == 0) {
+				__i915_gem_userptr_set_active(obj, true);
 				list_add_tail(&obj->global_list, &to_i915(dev)->mm.unbound_list);
 				pinned = 0;
 			}
@@ -699,6 +725,7 @@  i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
 	} else {
 		ret = st_set_pages(&obj->pages, pvec, num_pages);
 		if (ret == 0) {
+			__i915_gem_userptr_set_active(obj, true);
 			obj->userptr.work = NULL;
 			pinned = 0;
 		}
@@ -732,6 +759,8 @@  i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
 
 	sg_free_table(obj->pages);
 	kfree(obj->pages);
+
+	__i915_gem_userptr_set_active(obj, false);
 }
 
 static void