diff mbox

fixup! drm/msm: Separate locking of buffer resources from struct_mutex

Message ID 20170616142207.20821-1-robdclark@gmail.com (mailing list archive)
State Not Applicable, archived
Delegated to: Andy Gross
Headers show

Commit Message

Rob Clark June 16, 2017, 2:22 p.m. UTC
---
Ok, 2nd fixup to handle vmap shrinking.

 drivers/gpu/drm/msm/msm_gem.c | 44 +++++++++++++++++++++++++++++++++++--------
 1 file changed, 36 insertions(+), 8 deletions(-)

Comments

Sushmita Susheelendra June 16, 2017, 9:32 p.m. UTC | #1
Hi Rob,

This looks good to me!

Just one nit: msm_gem_vunmap becomes very shrinker specific as it holds 
the msm_obj->lock with the shrinker class. Should we have the caller 
i.e. msm_gem_shrinker_vmap hold it instead? Or change it's name to 
msm_gem_vunmap_shrinker or something like that...?

It does make sense to make the madv/priv->inactive_list locking cleanup 
separately.

Thanks,
Sushmita


On 2017-06-16 08:22, Rob Clark wrote:
> ---
> Ok, 2nd fixup to handle vmap shrinking.
> 
>  drivers/gpu/drm/msm/msm_gem.c | 44 
> +++++++++++++++++++++++++++++++++++--------
>  1 file changed, 36 insertions(+), 8 deletions(-)
> 
> diff --git a/drivers/gpu/drm/msm/msm_gem.c 
> b/drivers/gpu/drm/msm/msm_gem.c
> index f5d1f84..3190e05 100644
> --- a/drivers/gpu/drm/msm/msm_gem.c
> +++ b/drivers/gpu/drm/msm/msm_gem.c
> @@ -42,6 +42,9 @@ enum {
>  	OBJ_LOCK_SHRINKER,
>  };
> 
> +static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
> +
> +
>  static dma_addr_t physaddr(struct drm_gem_object *obj)
>  {
>  	struct msm_gem_object *msm_obj = to_msm_bo(obj);
> @@ -484,6 +487,7 @@ int msm_gem_dumb_map_offset(struct drm_file *file,
> struct drm_device *dev,
>  void *msm_gem_get_vaddr(struct drm_gem_object *obj)
>  {
>  	struct msm_gem_object *msm_obj = to_msm_bo(obj);
> +	int ret = 0;
> 
>  	mutex_lock(&msm_obj->lock);
> 
> @@ -492,22 +496,35 @@ void *msm_gem_get_vaddr(struct drm_gem_object 
> *obj)
>  		return ERR_PTR(-EBUSY);
>  	}
> 
> +	/* increment vmap_count *before* vmap() call, so shrinker can
> +	 * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
> +	 * This guarantees that we won't try to msm_gem_vunmap() this
> +	 * same object from within the vmap() call (while we already
> +	 * hold msm_obj->lock)
> +	 */
> +	msm_obj->vmap_count++;
> +
>  	if (!msm_obj->vaddr) {
>  		struct page **pages = get_pages(obj);
>  		if (IS_ERR(pages)) {
> -			mutex_unlock(&msm_obj->lock);
> -			return ERR_CAST(pages);
> +			ret = PTR_ERR(pages);
> +			goto fail;
>  		}
>  		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
>  				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
>  		if (msm_obj->vaddr == NULL) {
> -			mutex_unlock(&msm_obj->lock);
> -			return ERR_PTR(-ENOMEM);
> +			ret = -ENOMEM;
> +			goto fail;
>  		}
>  	}
> -	msm_obj->vmap_count++;
> +
>  	mutex_unlock(&msm_obj->lock);
>  	return msm_obj->vaddr;
> +
> +fail:
> +	msm_obj->vmap_count--;
> +	mutex_unlock(&msm_obj->lock);
> +	return ERR_PTR(ret);
>  }
> 
>  void msm_gem_put_vaddr(struct drm_gem_object *obj)
> @@ -554,7 +571,7 @@ void msm_gem_purge(struct drm_gem_object *obj)
> 
>  	put_iova(obj);
> 
> -	msm_gem_vunmap(obj);
> +	msm_gem_vunmap_locked(obj);
> 
>  	put_pages(obj);
> 
> @@ -576,10 +593,12 @@ void msm_gem_purge(struct drm_gem_object *obj)
>  	mutex_unlock(&msm_obj->lock);
>  }
> 
> -void msm_gem_vunmap(struct drm_gem_object *obj)
> +static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
>  {
>  	struct msm_gem_object *msm_obj = to_msm_bo(obj);
> 
> +	WARN_ON(!mutex_is_locked(&msm_obj->lock));
> +
>  	if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
>  		return;
> 
> @@ -587,6 +606,15 @@ void msm_gem_vunmap(struct drm_gem_object *obj)
>  	msm_obj->vaddr = NULL;
>  }
> 
> +void msm_gem_vunmap(struct drm_gem_object *obj)
> +{
> +	struct msm_gem_object *msm_obj = to_msm_bo(obj);
> +
> +	mutex_lock_nested(&msm_obj->lock, OBJ_LOCK_SHRINKER);
> +	msm_gem_vunmap_locked(obj);
> +	mutex_unlock(&msm_obj->lock);
> +}
> +
>  /* must be called before _move_to_active().. */
>  int msm_gem_sync_object(struct drm_gem_object *obj,
>  		struct msm_fence_context *fctx, bool exclusive)
> @@ -799,7 +827,7 @@ void msm_gem_free_object(struct drm_gem_object 
> *obj)
> 
>  		drm_prime_gem_destroy(obj, msm_obj->sgt);
>  	} else {
> -		msm_gem_vunmap(obj);
> +		msm_gem_vunmap_locked(obj);
>  		put_pages(obj);
>  	}
--
To unsubscribe from this list: send the line "unsubscribe linux-arm-msm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Rob Clark June 16, 2017, 9:44 p.m. UTC | #2
On Fri, Jun 16, 2017 at 5:32 PM,  <ssusheel@codeaurora.org> wrote:
> Hi Rob,
>
> This looks good to me!
>
> Just one nit: msm_gem_vunmap becomes very shrinker specific as it holds the
> msm_obj->lock with the shrinker class. Should we have the caller i.e.
> msm_gem_shrinker_vmap hold it instead? Or change it's name to
> msm_gem_vunmap_shrinker or something like that...?

Hmm, I suppose I could pass the lock class in to msm_gem_vunmap() in
case we ever want to call it elsewhere (iirc, i915 did something like
that)

BR,
-R

> It does make sense to make the madv/priv->inactive_list locking cleanup
> separately.
>
> Thanks,
> Sushmita
>
>
>
> On 2017-06-16 08:22, Rob Clark wrote:
>>
>> ---
>> Ok, 2nd fixup to handle vmap shrinking.
>>
>>  drivers/gpu/drm/msm/msm_gem.c | 44
>> +++++++++++++++++++++++++++++++++++--------
>>  1 file changed, 36 insertions(+), 8 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
>> index f5d1f84..3190e05 100644
>> --- a/drivers/gpu/drm/msm/msm_gem.c
>> +++ b/drivers/gpu/drm/msm/msm_gem.c
>> @@ -42,6 +42,9 @@ enum {
>>         OBJ_LOCK_SHRINKER,
>>  };
>>
>> +static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
>> +
>> +
>>  static dma_addr_t physaddr(struct drm_gem_object *obj)
>>  {
>>         struct msm_gem_object *msm_obj = to_msm_bo(obj);
>> @@ -484,6 +487,7 @@ int msm_gem_dumb_map_offset(struct drm_file *file,
>> struct drm_device *dev,
>>  void *msm_gem_get_vaddr(struct drm_gem_object *obj)
>>  {
>>         struct msm_gem_object *msm_obj = to_msm_bo(obj);
>> +       int ret = 0;
>>
>>         mutex_lock(&msm_obj->lock);
>>
>> @@ -492,22 +496,35 @@ void *msm_gem_get_vaddr(struct drm_gem_object *obj)
>>                 return ERR_PTR(-EBUSY);
>>         }
>>
>> +       /* increment vmap_count *before* vmap() call, so shrinker can
>> +        * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
>> +        * This guarantees that we won't try to msm_gem_vunmap() this
>> +        * same object from within the vmap() call (while we already
>> +        * hold msm_obj->lock)
>> +        */
>> +       msm_obj->vmap_count++;
>> +
>>         if (!msm_obj->vaddr) {
>>                 struct page **pages = get_pages(obj);
>>                 if (IS_ERR(pages)) {
>> -                       mutex_unlock(&msm_obj->lock);
>> -                       return ERR_CAST(pages);
>> +                       ret = PTR_ERR(pages);
>> +                       goto fail;
>>                 }
>>                 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
>>                                 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
>>                 if (msm_obj->vaddr == NULL) {
>> -                       mutex_unlock(&msm_obj->lock);
>> -                       return ERR_PTR(-ENOMEM);
>> +                       ret = -ENOMEM;
>> +                       goto fail;
>>                 }
>>         }
>> -       msm_obj->vmap_count++;
>> +
>>         mutex_unlock(&msm_obj->lock);
>>         return msm_obj->vaddr;
>> +
>> +fail:
>> +       msm_obj->vmap_count--;
>> +       mutex_unlock(&msm_obj->lock);
>> +       return ERR_PTR(ret);
>>  }
>>
>>  void msm_gem_put_vaddr(struct drm_gem_object *obj)
>> @@ -554,7 +571,7 @@ void msm_gem_purge(struct drm_gem_object *obj)
>>
>>         put_iova(obj);
>>
>> -       msm_gem_vunmap(obj);
>> +       msm_gem_vunmap_locked(obj);
>>
>>         put_pages(obj);
>>
>> @@ -576,10 +593,12 @@ void msm_gem_purge(struct drm_gem_object *obj)
>>         mutex_unlock(&msm_obj->lock);
>>  }
>>
>> -void msm_gem_vunmap(struct drm_gem_object *obj)
>> +static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
>>  {
>>         struct msm_gem_object *msm_obj = to_msm_bo(obj);
>>
>> +       WARN_ON(!mutex_is_locked(&msm_obj->lock));
>> +
>>         if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
>>                 return;
>>
>> @@ -587,6 +606,15 @@ void msm_gem_vunmap(struct drm_gem_object *obj)
>>         msm_obj->vaddr = NULL;
>>  }
>>
>> +void msm_gem_vunmap(struct drm_gem_object *obj)
>> +{
>> +       struct msm_gem_object *msm_obj = to_msm_bo(obj);
>> +
>> +       mutex_lock_nested(&msm_obj->lock, OBJ_LOCK_SHRINKER);
>> +       msm_gem_vunmap_locked(obj);
>> +       mutex_unlock(&msm_obj->lock);
>> +}
>> +
>>  /* must be called before _move_to_active().. */
>>  int msm_gem_sync_object(struct drm_gem_object *obj,
>>                 struct msm_fence_context *fctx, bool exclusive)
>> @@ -799,7 +827,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
>>
>>                 drm_prime_gem_destroy(obj, msm_obj->sgt);
>>         } else {
>> -               msm_gem_vunmap(obj);
>> +               msm_gem_vunmap_locked(obj);
>>                 put_pages(obj);
>>         }
--
To unsubscribe from this list: send the line "unsubscribe linux-arm-msm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Sushmita Susheelendra June 16, 2017, 11:04 p.m. UTC | #3
Sounds good.

On 2017-06-16 15:44, Rob Clark wrote:
> On Fri, Jun 16, 2017 at 5:32 PM,  <ssusheel@codeaurora.org> wrote:
>> Hi Rob,
>> 
>> This looks good to me!
>> 
>> Just one nit: msm_gem_vunmap becomes very shrinker specific as it 
>> holds the
>> msm_obj->lock with the shrinker class. Should we have the caller i.e.
>> msm_gem_shrinker_vmap hold it instead? Or change it's name to
>> msm_gem_vunmap_shrinker or something like that...?
> 
> Hmm, I suppose I could pass the lock class in to msm_gem_vunmap() in
> case we ever want to call it elsewhere (iirc, i915 did something like
> that)
> 
> BR,
> -R
> 
>> It does make sense to make the madv/priv->inactive_list locking 
>> cleanup
>> separately.
>> 
>> Thanks,
>> Sushmita
>> 
>> 
>> 
>> On 2017-06-16 08:22, Rob Clark wrote:
>>> 
>>> ---
>>> Ok, 2nd fixup to handle vmap shrinking.
>>> 
>>>  drivers/gpu/drm/msm/msm_gem.c | 44
>>> +++++++++++++++++++++++++++++++++++--------
>>>  1 file changed, 36 insertions(+), 8 deletions(-)
>>> 
>>> diff --git a/drivers/gpu/drm/msm/msm_gem.c 
>>> b/drivers/gpu/drm/msm/msm_gem.c
>>> index f5d1f84..3190e05 100644
>>> --- a/drivers/gpu/drm/msm/msm_gem.c
>>> +++ b/drivers/gpu/drm/msm/msm_gem.c
>>> @@ -42,6 +42,9 @@ enum {
>>>         OBJ_LOCK_SHRINKER,
>>>  };
>>> 
>>> +static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
>>> +
>>> +
>>>  static dma_addr_t physaddr(struct drm_gem_object *obj)
>>>  {
>>>         struct msm_gem_object *msm_obj = to_msm_bo(obj);
>>> @@ -484,6 +487,7 @@ int msm_gem_dumb_map_offset(struct drm_file 
>>> *file,
>>> struct drm_device *dev,
>>>  void *msm_gem_get_vaddr(struct drm_gem_object *obj)
>>>  {
>>>         struct msm_gem_object *msm_obj = to_msm_bo(obj);
>>> +       int ret = 0;
>>> 
>>>         mutex_lock(&msm_obj->lock);
>>> 
>>> @@ -492,22 +496,35 @@ void *msm_gem_get_vaddr(struct drm_gem_object 
>>> *obj)
>>>                 return ERR_PTR(-EBUSY);
>>>         }
>>> 
>>> +       /* increment vmap_count *before* vmap() call, so shrinker can
>>> +        * check vmap_count (is_vunmapable()) outside of 
>>> msm_obj->lock.
>>> +        * This guarantees that we won't try to msm_gem_vunmap() this
>>> +        * same object from within the vmap() call (while we already
>>> +        * hold msm_obj->lock)
>>> +        */
>>> +       msm_obj->vmap_count++;
>>> +
>>>         if (!msm_obj->vaddr) {
>>>                 struct page **pages = get_pages(obj);
>>>                 if (IS_ERR(pages)) {
>>> -                       mutex_unlock(&msm_obj->lock);
>>> -                       return ERR_CAST(pages);
>>> +                       ret = PTR_ERR(pages);
>>> +                       goto fail;
>>>                 }
>>>                 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
>>>                                 VM_MAP, 
>>> pgprot_writecombine(PAGE_KERNEL));
>>>                 if (msm_obj->vaddr == NULL) {
>>> -                       mutex_unlock(&msm_obj->lock);
>>> -                       return ERR_PTR(-ENOMEM);
>>> +                       ret = -ENOMEM;
>>> +                       goto fail;
>>>                 }
>>>         }
>>> -       msm_obj->vmap_count++;
>>> +
>>>         mutex_unlock(&msm_obj->lock);
>>>         return msm_obj->vaddr;
>>> +
>>> +fail:
>>> +       msm_obj->vmap_count--;
>>> +       mutex_unlock(&msm_obj->lock);
>>> +       return ERR_PTR(ret);
>>>  }
>>> 
>>>  void msm_gem_put_vaddr(struct drm_gem_object *obj)
>>> @@ -554,7 +571,7 @@ void msm_gem_purge(struct drm_gem_object *obj)
>>> 
>>>         put_iova(obj);
>>> 
>>> -       msm_gem_vunmap(obj);
>>> +       msm_gem_vunmap_locked(obj);
>>> 
>>>         put_pages(obj);
>>> 
>>> @@ -576,10 +593,12 @@ void msm_gem_purge(struct drm_gem_object *obj)
>>>         mutex_unlock(&msm_obj->lock);
>>>  }
>>> 
>>> -void msm_gem_vunmap(struct drm_gem_object *obj)
>>> +static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
>>>  {
>>>         struct msm_gem_object *msm_obj = to_msm_bo(obj);
>>> 
>>> +       WARN_ON(!mutex_is_locked(&msm_obj->lock));
>>> +
>>>         if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
>>>                 return;
>>> 
>>> @@ -587,6 +606,15 @@ void msm_gem_vunmap(struct drm_gem_object *obj)
>>>         msm_obj->vaddr = NULL;
>>>  }
>>> 
>>> +void msm_gem_vunmap(struct drm_gem_object *obj)
>>> +{
>>> +       struct msm_gem_object *msm_obj = to_msm_bo(obj);
>>> +
>>> +       mutex_lock_nested(&msm_obj->lock, OBJ_LOCK_SHRINKER);
>>> +       msm_gem_vunmap_locked(obj);
>>> +       mutex_unlock(&msm_obj->lock);
>>> +}
>>> +
>>>  /* must be called before _move_to_active().. */
>>>  int msm_gem_sync_object(struct drm_gem_object *obj,
>>>                 struct msm_fence_context *fctx, bool exclusive)
>>> @@ -799,7 +827,7 @@ void msm_gem_free_object(struct drm_gem_object 
>>> *obj)
>>> 
>>>                 drm_prime_gem_destroy(obj, msm_obj->sgt);
>>>         } else {
>>> -               msm_gem_vunmap(obj);
>>> +               msm_gem_vunmap_locked(obj);
>>>                 put_pages(obj);
>>>         }
> _______________________________________________
> Freedreno mailing list
> Freedreno@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/freedreno
--
To unsubscribe from this list: send the line "unsubscribe linux-arm-msm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index f5d1f84..3190e05 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -42,6 +42,9 @@  enum {
 	OBJ_LOCK_SHRINKER,
 };
 
+static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
+
+
 static dma_addr_t physaddr(struct drm_gem_object *obj)
 {
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -484,6 +487,7 @@  int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
 void *msm_gem_get_vaddr(struct drm_gem_object *obj)
 {
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+	int ret = 0;
 
 	mutex_lock(&msm_obj->lock);
 
@@ -492,22 +496,35 @@  void *msm_gem_get_vaddr(struct drm_gem_object *obj)
 		return ERR_PTR(-EBUSY);
 	}
 
+	/* increment vmap_count *before* vmap() call, so shrinker can
+	 * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
+	 * This guarantees that we won't try to msm_gem_vunmap() this
+	 * same object from within the vmap() call (while we already
+	 * hold msm_obj->lock)
+	 */
+	msm_obj->vmap_count++;
+
 	if (!msm_obj->vaddr) {
 		struct page **pages = get_pages(obj);
 		if (IS_ERR(pages)) {
-			mutex_unlock(&msm_obj->lock);
-			return ERR_CAST(pages);
+			ret = PTR_ERR(pages);
+			goto fail;
 		}
 		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
 				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
 		if (msm_obj->vaddr == NULL) {
-			mutex_unlock(&msm_obj->lock);
-			return ERR_PTR(-ENOMEM);
+			ret = -ENOMEM;
+			goto fail;
 		}
 	}
-	msm_obj->vmap_count++;
+
 	mutex_unlock(&msm_obj->lock);
 	return msm_obj->vaddr;
+
+fail:
+	msm_obj->vmap_count--;
+	mutex_unlock(&msm_obj->lock);
+	return ERR_PTR(ret);
 }
 
 void msm_gem_put_vaddr(struct drm_gem_object *obj)
@@ -554,7 +571,7 @@  void msm_gem_purge(struct drm_gem_object *obj)
 
 	put_iova(obj);
 
-	msm_gem_vunmap(obj);
+	msm_gem_vunmap_locked(obj);
 
 	put_pages(obj);
 
@@ -576,10 +593,12 @@  void msm_gem_purge(struct drm_gem_object *obj)
 	mutex_unlock(&msm_obj->lock);
 }
 
-void msm_gem_vunmap(struct drm_gem_object *obj)
+static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
 {
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
+	WARN_ON(!mutex_is_locked(&msm_obj->lock));
+
 	if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
 		return;
 
@@ -587,6 +606,15 @@  void msm_gem_vunmap(struct drm_gem_object *obj)
 	msm_obj->vaddr = NULL;
 }
 
+void msm_gem_vunmap(struct drm_gem_object *obj)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+	mutex_lock_nested(&msm_obj->lock, OBJ_LOCK_SHRINKER);
+	msm_gem_vunmap_locked(obj);
+	mutex_unlock(&msm_obj->lock);
+}
+
 /* must be called before _move_to_active().. */
 int msm_gem_sync_object(struct drm_gem_object *obj,
 		struct msm_fence_context *fctx, bool exclusive)
@@ -799,7 +827,7 @@  void msm_gem_free_object(struct drm_gem_object *obj)
 
 		drm_prime_gem_destroy(obj, msm_obj->sgt);
 	} else {
-		msm_gem_vunmap(obj);
+		msm_gem_vunmap_locked(obj);
 		put_pages(obj);
 	}