diff mbox

[v2] drm/i915: Fix userptr deadlock with MAP_FIXED

Message ID 1435576653-17973-1-git-send-email-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show

Commit Message

Chris Wilson June 29, 2015, 11:17 a.m. UTC
Micha? Winiarski found a really evil way to trigger a struct_mutex
deadlock with userptr. He found that if he allocated a userptr bo and
then GTT mmaped another bo, or even itself, at the same address as the
userptr using MAP_FIXED, he could then cause a deadlock any time we then
had to invalidate the GTT mmappings (so at will).

To counter act the deadlock, we make the observation that when the
MAP_FIXED is made we would have an invalidate_range event for our
object. After that we should no longer alias with the rogue mmapping. If
we are then able to mark the object as no longer in use after the first
invalidate, we do not need to grab the struct_mutex for the subsequent
invalidations.

The patch makes one eye-catching change. That is the removal serial=0
after detecting a to-be-freed object inside the invalidate walker. I
felt setting serial=0 was a questionable pessimisation: it denies us the
chance to reuse the current iterator for the next loop (before it is
freed) and being explicit makes the reader question the validity of the
locking (since the object-free race could occur elsewhere). The
serialisation of the iterator is through the spinlock, if the object is
freed before the next loop then the notifier.serial will be incremented
and we start the walk from the beginning as we detect the invalid cache.

v2: Grammar fixes

Reported-by: Micha? Winiarski <michal.winiarski@intel.com>
Testcase: igt/gem_userptr_blits/map-fixed*
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Micha? Winiarski <michal.winiarski@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: stable@vger.kernel.org
---
 drivers/gpu/drm/i915/i915_gem_userptr.c | 43 +++++++++++++++++++++++++++------
 1 file changed, 36 insertions(+), 7 deletions(-)

Comments

MichaƂ Winiarski June 29, 2015, 3:57 p.m. UTC | #1
On Mon, Jun 29, 2015 at 12:17:33PM +0100, Chris Wilson wrote:
> Micha? Winiarski found a really evil way to trigger a struct_mutex
> deadlock with userptr. He found that if he allocated a userptr bo and
> then GTT mmaped another bo, or even itself, at the same address as the
> userptr using MAP_FIXED, he could then cause a deadlock any time we then
> had to invalidate the GTT mmappings (so at will).
> 
> To counter act the deadlock, we make the observation that when the
> MAP_FIXED is made we would have an invalidate_range event for our
> object. After that we should no longer alias with the rogue mmapping. If
> we are then able to mark the object as no longer in use after the first
> invalidate, we do not need to grab the struct_mutex for the subsequent
> invalidations.
> 
> The patch makes one eye-catching change. That is the removal serial=0
> after detecting a to-be-freed object inside the invalidate walker. I
> felt setting serial=0 was a questionable pessimisation: it denies us the
> chance to reuse the current iterator for the next loop (before it is
> freed) and being explicit makes the reader question the validity of the
> locking (since the object-free race could occur elsewhere). The
> serialisation of the iterator is through the spinlock, if the object is
> freed before the next loop then the notifier.serial will be incremented
> and we start the walk from the beginning as we detect the invalid cache.
> 
> v2: Grammar fixes
> 
> Reported-by: Micha? Winiarski <michal.winiarski@intel.com>
> Testcase: igt/gem_userptr_blits/map-fixed*
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Micha? Winiarski <michal.winiarski@intel.com>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> Cc: stable@vger.kernel.org
> ---
>  drivers/gpu/drm/i915/i915_gem_userptr.c | 43 +++++++++++++++++++++++++++------
>  1 file changed, 36 insertions(+), 7 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
> index cb367d9f7909..e1965d8c08c8 100644
> --- a/drivers/gpu/drm/i915/i915_gem_userptr.c
> +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
> @@ -59,6 +59,7 @@ struct i915_mmu_object {
>  	struct interval_tree_node it;
>  	struct list_head link;
>  	struct drm_i915_gem_object *obj;
> +	bool active;
>  	bool is_linear;
>  };
>  
> @@ -114,7 +115,8 @@ restart:
>  
>  		obj = mo->obj;
>  
> -		if (!kref_get_unless_zero(&obj->base.refcount))
> +		if (!mo->active ||
> +		    !kref_get_unless_zero(&obj->base.refcount))
>  			continue;
>  
>  		spin_unlock(&mn->lock);
> @@ -151,7 +153,8 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
>  		else
>  			it = interval_tree_iter_first(&mn->objects, start, end);
>  		if (it != NULL) {
> -			obj = container_of(it, struct i915_mmu_object, it)->obj;
> +			struct i915_mmu_object *mo =
> +				container_of(it, struct i915_mmu_object, it);
>  
>  			/* The mmu_object is released late when destroying the
>  			 * GEM object so it is entirely possible to gain a
> @@ -160,11 +163,9 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
>  			 * the struct_mutex - and consequently use it after it
>  			 * is freed and then double free it.
>  			 */
> -			if (!kref_get_unless_zero(&obj->base.refcount)) {
> -				spin_unlock(&mn->lock);
> -				serial = 0;
> -				continue;
> -			}
> +			if (mo->active &&
> +			    kref_get_unless_zero(&mo->obj->base.refcount))
> +				obj = mo->obj;
>  
>  			serial = mn->serial;
>  		}
> @@ -606,6 +607,20 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
>  		wake_up_all(&to_i915(dev)->mm.queue);
>  }
>  
> +static void
> +__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj,
> +			      bool value)
> +{
> +#if defined(CONFIG_MMU_NOTIFIER)
> +	if (obj->userptr.mmu_object == NULL)
> +		return;
> +
> +	spin_lock(&obj->userptr.mmu_object->mn->lock);
> +	obj->userptr.mmu_object->active = value;
> +	spin_unlock(&obj->userptr.mmu_object->mn->lock);
> +#endif
> +}
> +
>  static int
>  i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
>  {
> @@ -613,6 +628,18 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
>  	struct page **pvec;
>  	int pinned, ret;
>  
> +	/* During mm_invalidate_range we need to cancel any userptr that
> +	 * overlaps the range being invalidated. Doing so requires the
> +	 * struct_mutex, and that risks recursion. In order to cause
> +	 * recursion, the user must alias the userptr address space with
> +	 * a GTT mmapping (possible with a MAP_FIXED) - then when we have
> +	 * to invalidate that mmaping, mm_invalidate_range is called with
> +	 * the userptr address *and* the struct_mutex held.  To prevent that
> +	 * we set a flag under the i915_mmu_notifier spinlock to indicate
> +	 * whether this object is valid.
> +	 */
> +	__i915_gem_userptr_set_active(obj, true);
> +

This will set mmu_object to active even if we're exiting early from here
(because of error handling), creating another possibility for deadlock.

-Micha?

>  	/* If userspace should engineer that these pages are replaced in
>  	 * the vma between us binding this page into the GTT and completion
>  	 * of rendering... Their loss. If they change the mapping of their
> @@ -732,6 +759,8 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
>  
>  	sg_free_table(obj->pages);
>  	kfree(obj->pages);
> +
> +	__i915_gem_userptr_set_active(obj, false);
>  }
>  
>  static void
> -- 
> 2.1.4
>
Tvrtko Ursulin June 30, 2015, 2:52 p.m. UTC | #2
On 06/29/2015 04:57 PM, Micha? Winiarski wrote:
> On Mon, Jun 29, 2015 at 12:17:33PM +0100, Chris Wilson wrote:
>> Micha? Winiarski found a really evil way to trigger a struct_mutex
>> deadlock with userptr. He found that if he allocated a userptr bo and
>> then GTT mmaped another bo, or even itself, at the same address as the
>> userptr using MAP_FIXED, he could then cause a deadlock any time we then
>> had to invalidate the GTT mmappings (so at will).
>>
>> To counter act the deadlock, we make the observation that when the
>> MAP_FIXED is made we would have an invalidate_range event for our
>> object. After that we should no longer alias with the rogue mmapping. If
>> we are then able to mark the object as no longer in use after the first
>> invalidate, we do not need to grab the struct_mutex for the subsequent
>> invalidations.
>>
>> The patch makes one eye-catching change. That is the removal serial=0
>> after detecting a to-be-freed object inside the invalidate walker. I
>> felt setting serial=0 was a questionable pessimisation: it denies us the
>> chance to reuse the current iterator for the next loop (before it is
>> freed) and being explicit makes the reader question the validity of the
>> locking (since the object-free race could occur elsewhere). The
>> serialisation of the iterator is through the spinlock, if the object is
>> freed before the next loop then the notifier.serial will be incremented
>> and we start the walk from the beginning as we detect the invalid cache.
>>
>> v2: Grammar fixes
>>
>> Reported-by: Micha? Winiarski <michal.winiarski@intel.com>
>> Testcase: igt/gem_userptr_blits/map-fixed*
>> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
>> Cc: Micha? Winiarski <michal.winiarski@intel.com>
>> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
>> Cc: stable@vger.kernel.org
>> ---
>>   drivers/gpu/drm/i915/i915_gem_userptr.c | 43 +++++++++++++++++++++++++++------
>>   1 file changed, 36 insertions(+), 7 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
>> index cb367d9f7909..e1965d8c08c8 100644
>> --- a/drivers/gpu/drm/i915/i915_gem_userptr.c
>> +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
>> @@ -59,6 +59,7 @@ struct i915_mmu_object {
>>   	struct interval_tree_node it;
>>   	struct list_head link;
>>   	struct drm_i915_gem_object *obj;
>> +	bool active;
>>   	bool is_linear;
>>   };
>>
>> @@ -114,7 +115,8 @@ restart:
>>
>>   		obj = mo->obj;
>>
>> -		if (!kref_get_unless_zero(&obj->base.refcount))
>> +		if (!mo->active ||
>> +		    !kref_get_unless_zero(&obj->base.refcount))
>>   			continue;
>>
>>   		spin_unlock(&mn->lock);
>> @@ -151,7 +153,8 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
>>   		else
>>   			it = interval_tree_iter_first(&mn->objects, start, end);
>>   		if (it != NULL) {
>> -			obj = container_of(it, struct i915_mmu_object, it)->obj;
>> +			struct i915_mmu_object *mo =
>> +				container_of(it, struct i915_mmu_object, it);
>>
>>   			/* The mmu_object is released late when destroying the
>>   			 * GEM object so it is entirely possible to gain a
>> @@ -160,11 +163,9 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
>>   			 * the struct_mutex - and consequently use it after it
>>   			 * is freed and then double free it.
>>   			 */
>> -			if (!kref_get_unless_zero(&obj->base.refcount)) {
>> -				spin_unlock(&mn->lock);
>> -				serial = 0;
>> -				continue;
>> -			}
>> +			if (mo->active &&
>> +			    kref_get_unless_zero(&mo->obj->base.refcount))
>> +				obj = mo->obj;
>>
>>   			serial = mn->serial;
>>   		}
>> @@ -606,6 +607,20 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
>>   		wake_up_all(&to_i915(dev)->mm.queue);
>>   }
>>
>> +static void
>> +__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj,
>> +			      bool value)
>> +{
>> +#if defined(CONFIG_MMU_NOTIFIER)
>> +	if (obj->userptr.mmu_object == NULL)
>> +		return;
>> +
>> +	spin_lock(&obj->userptr.mmu_object->mn->lock);
>> +	obj->userptr.mmu_object->active = value;
>> +	spin_unlock(&obj->userptr.mmu_object->mn->lock);
>> +#endif
>> +}
>> +
>>   static int
>>   i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
>>   {
>> @@ -613,6 +628,18 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
>>   	struct page **pvec;
>>   	int pinned, ret;
>>
>> +	/* During mm_invalidate_range we need to cancel any userptr that
>> +	 * overlaps the range being invalidated. Doing so requires the
>> +	 * struct_mutex, and that risks recursion. In order to cause
>> +	 * recursion, the user must alias the userptr address space with
>> +	 * a GTT mmapping (possible with a MAP_FIXED) - then when we have
>> +	 * to invalidate that mmaping, mm_invalidate_range is called with
>> +	 * the userptr address *and* the struct_mutex held.  To prevent that
>> +	 * we set a flag under the i915_mmu_notifier spinlock to indicate
>> +	 * whether this object is valid.
>> +	 */
>> +	__i915_gem_userptr_set_active(obj, true);
>> +
>
> This will set mmu_object to active even if we're exiting early from here
> (because of error handling), creating another possibility for deadlock.

I think this deadlock is reproducible without MAP_FIXED, so commit 
message should be probably reworded to allow for the more generic case.

I reproduced it like this:

1. mmap, gem_userptr, munmap
2. Create a normal bo.
3. Loop a bit mmapping the above until it hits the same address as userptr.
4. Write to the BO mmap to set fault_mappable.
5. set_tiling on normal bo handle.

I am still thinking about this active flag in the above scenario.

userptr->get_pages hasn't been called above so active == false. If 
between steps 4 and 5 we trigger get_pages, userptr transitions to 
active and set_tiling deadlocks. Or I still missing something?

Regards,

Tvrtko
Chris Wilson June 30, 2015, 3:31 p.m. UTC | #3
On Tue, Jun 30, 2015 at 03:52:52PM +0100, Tvrtko Ursulin wrote:
> I think this deadlock is reproducible without MAP_FIXED, so commit
> message should be probably reworded to allow for the more generic
> case.

Ok, but it is still aliasing the address range, MAP_FIXED is the
easiest way to demonstrate that.
 
> I reproduced it like this:
> 
> 1. mmap, gem_userptr, munmap
> 2. Create a normal bo.
> 3. Loop a bit mmapping the above until it hits the same address as userptr.
> 4. Write to the BO mmap to set fault_mappable.
> 5. set_tiling on normal bo handle.
> 
> I am still thinking about this active flag in the above scenario.
> 
> userptr->get_pages hasn't been called above so active == false. If
> between steps 4 and 5 we trigger get_pages, userptr transitions to
> active and set_tiling deadlocks. Or I still missing something?

That suggests MAP_FIXED is special in invalidating the range unlike a
normal mmap().  This is arguing that we must always make any access after
invalidate_range be EFAULT. The danger here is that I am not sure if there
are any circumstances where invalidate_range is called and the vma
survives. Looking again at mm/, I can't see any place where we can
legally be expecting to reuse the same address for a userptr after the
invalidate.

I'm sure you have a testcase waiting for igt :)
-Chris
Chris Wilson June 30, 2015, 3:47 p.m. UTC | #4
On Tue, Jun 30, 2015 at 04:31:15PM +0100, Chris Wilson wrote:
> That suggests MAP_FIXED is special in invalidating the range unlike a
> normal mmap().  This is arguing that we must always make any access after
> invalidate_range be EFAULT. The danger here is that I am not sure if there
> are any circumstances where invalidate_range is called and the vma
> survives. Looking again at mm/, I can't see any place where we can
> legally be expecting to reuse the same address for a userptr after the
> invalidate.

Sigh, changing page protections (e.g. after a fork) if I am not mistaken
also generates an invalidate_range. Back to the drawing board here I am
afraid, as I think I need a worker to do cancel_userptr with all the
complications of coordinating between the multiple workers.
-Chris
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index cb367d9f7909..e1965d8c08c8 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -59,6 +59,7 @@  struct i915_mmu_object {
 	struct interval_tree_node it;
 	struct list_head link;
 	struct drm_i915_gem_object *obj;
+	bool active;
 	bool is_linear;
 };
 
@@ -114,7 +115,8 @@  restart:
 
 		obj = mo->obj;
 
-		if (!kref_get_unless_zero(&obj->base.refcount))
+		if (!mo->active ||
+		    !kref_get_unless_zero(&obj->base.refcount))
 			continue;
 
 		spin_unlock(&mn->lock);
@@ -151,7 +153,8 @@  static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
 		else
 			it = interval_tree_iter_first(&mn->objects, start, end);
 		if (it != NULL) {
-			obj = container_of(it, struct i915_mmu_object, it)->obj;
+			struct i915_mmu_object *mo =
+				container_of(it, struct i915_mmu_object, it);
 
 			/* The mmu_object is released late when destroying the
 			 * GEM object so it is entirely possible to gain a
@@ -160,11 +163,9 @@  static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
 			 * the struct_mutex - and consequently use it after it
 			 * is freed and then double free it.
 			 */
-			if (!kref_get_unless_zero(&obj->base.refcount)) {
-				spin_unlock(&mn->lock);
-				serial = 0;
-				continue;
-			}
+			if (mo->active &&
+			    kref_get_unless_zero(&mo->obj->base.refcount))
+				obj = mo->obj;
 
 			serial = mn->serial;
 		}
@@ -606,6 +607,20 @@  __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
 		wake_up_all(&to_i915(dev)->mm.queue);
 }
 
+static void
+__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj,
+			      bool value)
+{
+#if defined(CONFIG_MMU_NOTIFIER)
+	if (obj->userptr.mmu_object == NULL)
+		return;
+
+	spin_lock(&obj->userptr.mmu_object->mn->lock);
+	obj->userptr.mmu_object->active = value;
+	spin_unlock(&obj->userptr.mmu_object->mn->lock);
+#endif
+}
+
 static int
 i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
 {
@@ -613,6 +628,18 @@  i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
 	struct page **pvec;
 	int pinned, ret;
 
+	/* During mm_invalidate_range we need to cancel any userptr that
+	 * overlaps the range being invalidated. Doing so requires the
+	 * struct_mutex, and that risks recursion. In order to cause
+	 * recursion, the user must alias the userptr address space with
+	 * a GTT mmapping (possible with a MAP_FIXED) - then when we have
+	 * to invalidate that mmaping, mm_invalidate_range is called with
+	 * the userptr address *and* the struct_mutex held.  To prevent that
+	 * we set a flag under the i915_mmu_notifier spinlock to indicate
+	 * whether this object is valid.
+	 */
+	__i915_gem_userptr_set_active(obj, true);
+
 	/* If userspace should engineer that these pages are replaced in
 	 * the vma between us binding this page into the GTT and completion
 	 * of rendering... Their loss. If they change the mapping of their
@@ -732,6 +759,8 @@  i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
 
 	sg_free_table(obj->pages);
 	kfree(obj->pages);
+
+	__i915_gem_userptr_set_active(obj, false);
 }
 
 static void