diff mbox series

[9/9] drm/shmem-helpers: Simplify dma-buf importing

Message ID 20200511093554.211493-10-daniel.vetter@ffwll.ch (mailing list archive)
State New, archived
Headers show
Series shmem helper untangling | expand

Commit Message

Daniel Vetter May 11, 2020, 9:35 a.m. UTC
- Ditch the ->pages array
- Make it a private gem bo, which means no shmem object, which means
  fireworks if anyone calls drm_gem_object_get_pages. But we've just
  made sure that's all covered.

Cc: Gerd Hoffmann <kraxel@redhat.com>
Cc: Rob Herring <robh@kernel.org>
Cc: Noralf Trønnes <noralf@tronnes.org>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
---
 drivers/gpu/drm/drm_gem_shmem_helper.c | 59 ++++++++++----------------
 1 file changed, 23 insertions(+), 36 deletions(-)

Comments

Thomas Zimmermann May 14, 2020, 7:44 a.m. UTC | #1
Hi

Am 11.05.20 um 11:35 schrieb Daniel Vetter:
> - Ditch the ->pages array
> - Make it a private gem bo, which means no shmem object, which means
>   fireworks if anyone calls drm_gem_object_get_pages. But we've just
>   made sure that's all covered.
> 
> Cc: Gerd Hoffmann <kraxel@redhat.com>
> Cc: Rob Herring <robh@kernel.org>
> Cc: Noralf Trønnes <noralf@tronnes.org>
> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
> ---
>  drivers/gpu/drm/drm_gem_shmem_helper.c | 59 ++++++++++----------------
>  1 file changed, 23 insertions(+), 36 deletions(-)
> 
> diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
> index f7011338813e..8c7d4f422b7b 100644
> --- a/drivers/gpu/drm/drm_gem_shmem_helper.c
> +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
> @@ -35,22 +35,12 @@ static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
>  	.mmap = drm_gem_shmem_mmap,
>  };
>  
> -/**
> - * drm_gem_shmem_create - Allocate an object with the given size
> - * @dev: DRM device
> - * @size: Size of the object to allocate
> - *
> - * This function creates a shmem GEM object.
> - *
> - * Returns:
> - * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
> - * error code on failure.
> - */
> -struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
> +static struct drm_gem_shmem_object *
> +__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
>  {
>  	struct drm_gem_shmem_object *shmem;
>  	struct drm_gem_object *obj;
> -	int ret;
> +	int ret = 0;
>  
>  	size = PAGE_ALIGN(size);
>  
> @@ -64,7 +54,10 @@ struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t
>  	if (!obj->funcs)
>  		obj->funcs = &drm_gem_shmem_funcs;
>  
> -	ret = drm_gem_object_init(dev, obj, size);
> +	if (private)
> +		drm_gem_private_object_init(dev, obj, size);
> +	else
> +		ret = drm_gem_object_init(dev, obj, size);
>  	if (ret)
>  		goto err_free;
>  
> @@ -96,6 +89,21 @@ struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t
>  
>  	return ERR_PTR(ret);
>  }
> +/**
> + * drm_gem_shmem_create - Allocate an object with the given size
> + * @dev: DRM device
> + * @size: Size of the object to allocate
> + *
> + * This function creates a shmem GEM object.
> + *
> + * Returns:
> + * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
> + * error code on failure.
> + */
> +struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
> +{
> +	return __drm_gem_shmem_create(dev, size, false);
> +}
>  EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
>  
>  /**
> @@ -115,7 +123,6 @@ void drm_gem_shmem_free_object(struct drm_gem_object *obj)
>  	if (obj->import_attach) {
>  		shmem->pages_use_count--;
>  		drm_prime_gem_destroy(obj, shmem->sgt);
> -		kvfree(shmem->pages);
>  	} else {
>  		if (shmem->sgt) {
>  			dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl,
> @@ -371,7 +378,7 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
>  	struct drm_gem_shmem_object *shmem;
>  	int ret;
>  
> -	shmem = drm_gem_shmem_create(dev, size);
> +	shmem = __drm_gem_shmem_create(dev, size, true);
>  	if (IS_ERR(shmem))
>  		return shmem;
>  
> @@ -695,36 +702,16 @@ drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
>  				    struct sg_table *sgt)
>  {
>  	size_t size = PAGE_ALIGN(attach->dmabuf->size);
> -	size_t npages = size >> PAGE_SHIFT;
>  	struct drm_gem_shmem_object *shmem;
> -	int ret;
>  
>  	shmem = drm_gem_shmem_create(dev, size);
>  	if (IS_ERR(shmem))
>  		return ERR_CAST(shmem);
>  
> -	shmem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
> -	if (!shmem->pages) {
> -		ret = -ENOMEM;
> -		goto err_free_gem;
> -	}
> -
> -	ret = drm_prime_sg_to_page_addr_arrays(sgt, shmem->pages, NULL, npages);
> -	if (ret < 0)
> -		goto err_free_array;
> -
>  	shmem->sgt = sgt;
> -	shmem->pages_use_count = 1; /* Permanently pinned from our point of view */

This counter protected drm_gem_shmem_get_pages() from being executed on
imported buffers. I guess that previous patches sorted out all the
instances where this could occur. If so, the current patch looks
correct. I'm not sure, if the overall code is really better than what we
have ATM, but anyway

Acked-by: Thomas Zimmermann <tzimmermann@suse.de>

>  
>  	DRM_DEBUG_PRIME("size = %zu\n", size);
>  
>  	return &shmem->base;
> -
> -err_free_array:
> -	kvfree(shmem->pages);
> -err_free_gem:
> -	drm_gem_object_put_unlocked(&shmem->base);
> -
> -	return ERR_PTR(ret);
>  }
>  EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
>
Daniel Vetter May 14, 2020, 12:55 p.m. UTC | #2
On Thu, May 14, 2020 at 09:44:02AM +0200, Thomas Zimmermann wrote:
> Hi
> 
> Am 11.05.20 um 11:35 schrieb Daniel Vetter:
> > - Ditch the ->pages array
> > - Make it a private gem bo, which means no shmem object, which means
> >   fireworks if anyone calls drm_gem_object_get_pages. But we've just
> >   made sure that's all covered.
> > 
> > Cc: Gerd Hoffmann <kraxel@redhat.com>
> > Cc: Rob Herring <robh@kernel.org>
> > Cc: Noralf Trønnes <noralf@tronnes.org>
> > Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
> > ---
> >  drivers/gpu/drm/drm_gem_shmem_helper.c | 59 ++++++++++----------------
> >  1 file changed, 23 insertions(+), 36 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
> > index f7011338813e..8c7d4f422b7b 100644
> > --- a/drivers/gpu/drm/drm_gem_shmem_helper.c
> > +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
> > @@ -35,22 +35,12 @@ static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
> >  	.mmap = drm_gem_shmem_mmap,
> >  };
> >  
> > -/**
> > - * drm_gem_shmem_create - Allocate an object with the given size
> > - * @dev: DRM device
> > - * @size: Size of the object to allocate
> > - *
> > - * This function creates a shmem GEM object.
> > - *
> > - * Returns:
> > - * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
> > - * error code on failure.
> > - */
> > -struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
> > +static struct drm_gem_shmem_object *
> > +__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
> >  {
> >  	struct drm_gem_shmem_object *shmem;
> >  	struct drm_gem_object *obj;
> > -	int ret;
> > +	int ret = 0;
> >  
> >  	size = PAGE_ALIGN(size);
> >  
> > @@ -64,7 +54,10 @@ struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t
> >  	if (!obj->funcs)
> >  		obj->funcs = &drm_gem_shmem_funcs;
> >  
> > -	ret = drm_gem_object_init(dev, obj, size);
> > +	if (private)
> > +		drm_gem_private_object_init(dev, obj, size);
> > +	else
> > +		ret = drm_gem_object_init(dev, obj, size);
> >  	if (ret)
> >  		goto err_free;
> >  
> > @@ -96,6 +89,21 @@ struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t
> >  
> >  	return ERR_PTR(ret);
> >  }
> > +/**
> > + * drm_gem_shmem_create - Allocate an object with the given size
> > + * @dev: DRM device
> > + * @size: Size of the object to allocate
> > + *
> > + * This function creates a shmem GEM object.
> > + *
> > + * Returns:
> > + * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
> > + * error code on failure.
> > + */
> > +struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
> > +{
> > +	return __drm_gem_shmem_create(dev, size, false);
> > +}
> >  EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
> >  
> >  /**
> > @@ -115,7 +123,6 @@ void drm_gem_shmem_free_object(struct drm_gem_object *obj)
> >  	if (obj->import_attach) {
> >  		shmem->pages_use_count--;
> >  		drm_prime_gem_destroy(obj, shmem->sgt);
> > -		kvfree(shmem->pages);
> >  	} else {
> >  		if (shmem->sgt) {
> >  			dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl,
> > @@ -371,7 +378,7 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
> >  	struct drm_gem_shmem_object *shmem;
> >  	int ret;
> >  
> > -	shmem = drm_gem_shmem_create(dev, size);
> > +	shmem = __drm_gem_shmem_create(dev, size, true);
> >  	if (IS_ERR(shmem))
> >  		return shmem;
> >  
> > @@ -695,36 +702,16 @@ drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
> >  				    struct sg_table *sgt)
> >  {
> >  	size_t size = PAGE_ALIGN(attach->dmabuf->size);
> > -	size_t npages = size >> PAGE_SHIFT;
> >  	struct drm_gem_shmem_object *shmem;
> > -	int ret;
> >  
> >  	shmem = drm_gem_shmem_create(dev, size);
> >  	if (IS_ERR(shmem))
> >  		return ERR_CAST(shmem);
> >  
> > -	shmem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
> > -	if (!shmem->pages) {
> > -		ret = -ENOMEM;
> > -		goto err_free_gem;
> > -	}
> > -
> > -	ret = drm_prime_sg_to_page_addr_arrays(sgt, shmem->pages, NULL, npages);
> > -	if (ret < 0)
> > -		goto err_free_array;
> > -
> >  	shmem->sgt = sgt;
> > -	shmem->pages_use_count = 1; /* Permanently pinned from our point of view */
> 
> This counter protected drm_gem_shmem_get_pages() from being executed on
> imported buffers. I guess that previous patches sorted out all the
> instances where this could occur. If so, the current patch looks
> correct. I'm not sure, if the overall code is really better than what we
> have ATM, but anyway

The goal was to clearly sort these cases out, iirc we had callers of
get_pages doing the wrong thing, but I tried to review them all. Some got
removed while this series was hanging around in my tree somewhere.

What I wanted to do in the end is replace all mutex_lock with
dma_resv_lock, which now should be doable. Except I need to audit all the
drivers, and some want _locked variant since they are already holding the
lock. That's roughly the point where I gave up on this eandeavour, at
least for now.

But if we'd get there then all the various helpers we have (cma, shmem,
vram) would more or less properly use dma_resv_lock as their protectiong
concept. That's kinda neat since with the dynamic dma-buf stuff
dma_resv_lock really becomes _the_ buffer lock for drivers, so some
motivation to move towards that.

Anyway if you don't feel like this is all that useful without the
dma_resv_lock work on top, I guess I can merge up to the doc patch and
leave the others out. Not sure myself, thoughts?

Thanks for taking a look.
-Daniel

> 
> Acked-by: Thomas Zimmermann <tzimmermann@suse.de>
> 
> >  
> >  	DRM_DEBUG_PRIME("size = %zu\n", size);
> >  
> >  	return &shmem->base;
> > -
> > -err_free_array:
> > -	kvfree(shmem->pages);
> > -err_free_gem:
> > -	drm_gem_object_put_unlocked(&shmem->base);
> > -
> > -	return ERR_PTR(ret);
> >  }
> >  EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
> > 
> 
> -- 
> Thomas Zimmermann
> Graphics Driver Developer
> SUSE Software Solutions Germany GmbH
> Maxfeldstr. 5, 90409 Nürnberg, Germany
> (HRB 36809, AG Nürnberg)
> Geschäftsführer: Felix Imendörffer
>
Thomas Zimmermann May 14, 2020, 3:26 p.m. UTC | #3
Hi

Am 14.05.20 um 14:55 schrieb Daniel Vetter:
> On Thu, May 14, 2020 at 09:44:02AM +0200, Thomas Zimmermann wrote:
>> Hi
>>
>> Am 11.05.20 um 11:35 schrieb Daniel Vetter:
>>> - Ditch the ->pages array
>>> - Make it a private gem bo, which means no shmem object, which means
>>>   fireworks if anyone calls drm_gem_object_get_pages. But we've just
>>>   made sure that's all covered.
>>>
>>> Cc: Gerd Hoffmann <kraxel@redhat.com>
>>> Cc: Rob Herring <robh@kernel.org>
>>> Cc: Noralf Trønnes <noralf@tronnes.org>
>>> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
>>> ---
>>>  drivers/gpu/drm/drm_gem_shmem_helper.c | 59 ++++++++++----------------
>>>  1 file changed, 23 insertions(+), 36 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
>>> index f7011338813e..8c7d4f422b7b 100644
>>> --- a/drivers/gpu/drm/drm_gem_shmem_helper.c
>>> +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
>>> @@ -35,22 +35,12 @@ static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
>>>  	.mmap = drm_gem_shmem_mmap,
>>>  };
>>>  
>>> -/**
>>> - * drm_gem_shmem_create - Allocate an object with the given size
>>> - * @dev: DRM device
>>> - * @size: Size of the object to allocate
>>> - *
>>> - * This function creates a shmem GEM object.
>>> - *
>>> - * Returns:
>>> - * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
>>> - * error code on failure.
>>> - */
>>> -struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
>>> +static struct drm_gem_shmem_object *
>>> +__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
>>>  {
>>>  	struct drm_gem_shmem_object *shmem;
>>>  	struct drm_gem_object *obj;
>>> -	int ret;
>>> +	int ret = 0;
>>>  
>>>  	size = PAGE_ALIGN(size);
>>>  
>>> @@ -64,7 +54,10 @@ struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t
>>>  	if (!obj->funcs)
>>>  		obj->funcs = &drm_gem_shmem_funcs;
>>>  
>>> -	ret = drm_gem_object_init(dev, obj, size);
>>> +	if (private)
>>> +		drm_gem_private_object_init(dev, obj, size);
>>> +	else
>>> +		ret = drm_gem_object_init(dev, obj, size);
>>>  	if (ret)
>>>  		goto err_free;
>>>  
>>> @@ -96,6 +89,21 @@ struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t
>>>  
>>>  	return ERR_PTR(ret);
>>>  }
>>> +/**
>>> + * drm_gem_shmem_create - Allocate an object with the given size
>>> + * @dev: DRM device
>>> + * @size: Size of the object to allocate
>>> + *
>>> + * This function creates a shmem GEM object.
>>> + *
>>> + * Returns:
>>> + * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
>>> + * error code on failure.
>>> + */
>>> +struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
>>> +{
>>> +	return __drm_gem_shmem_create(dev, size, false);
>>> +}
>>>  EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
>>>  
>>>  /**
>>> @@ -115,7 +123,6 @@ void drm_gem_shmem_free_object(struct drm_gem_object *obj)
>>>  	if (obj->import_attach) {
>>>  		shmem->pages_use_count--;
>>>  		drm_prime_gem_destroy(obj, shmem->sgt);
>>> -		kvfree(shmem->pages);
>>>  	} else {
>>>  		if (shmem->sgt) {
>>>  			dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl,
>>> @@ -371,7 +378,7 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
>>>  	struct drm_gem_shmem_object *shmem;
>>>  	int ret;
>>>  
>>> -	shmem = drm_gem_shmem_create(dev, size);
>>> +	shmem = __drm_gem_shmem_create(dev, size, true);
>>>  	if (IS_ERR(shmem))
>>>  		return shmem;
>>>  
>>> @@ -695,36 +702,16 @@ drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
>>>  				    struct sg_table *sgt)
>>>  {
>>>  	size_t size = PAGE_ALIGN(attach->dmabuf->size);
>>> -	size_t npages = size >> PAGE_SHIFT;
>>>  	struct drm_gem_shmem_object *shmem;
>>> -	int ret;
>>>  
>>>  	shmem = drm_gem_shmem_create(dev, size);
>>>  	if (IS_ERR(shmem))
>>>  		return ERR_CAST(shmem);
>>>  
>>> -	shmem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
>>> -	if (!shmem->pages) {
>>> -		ret = -ENOMEM;
>>> -		goto err_free_gem;
>>> -	}
>>> -
>>> -	ret = drm_prime_sg_to_page_addr_arrays(sgt, shmem->pages, NULL, npages);
>>> -	if (ret < 0)
>>> -		goto err_free_array;
>>> -
>>>  	shmem->sgt = sgt;
>>> -	shmem->pages_use_count = 1; /* Permanently pinned from our point of view */
>>
>> This counter protected drm_gem_shmem_get_pages() from being executed on
>> imported buffers. I guess that previous patches sorted out all the
>> instances where this could occur. If so, the current patch looks
>> correct. I'm not sure, if the overall code is really better than what we
>> have ATM, but anyway
> 
> The goal was to clearly sort these cases out, iirc we had callers of
> get_pages doing the wrong thing, but I tried to review them all. Some got
> removed while this series was hanging around in my tree somewhere.
> 
> What I wanted to do in the end is replace all mutex_lock with
> dma_resv_lock, which now should be doable. Except I need to audit all the
> drivers, and some want _locked variant since they are already holding the
> lock. That's roughly the point where I gave up on this eandeavour, at
> least for now.
> 
> But if we'd get there then all the various helpers we have (cma, shmem,
> vram) would more or less properly use dma_resv_lock as their protectiong
> concept. That's kinda neat since with the dynamic dma-buf stuff
> dma_resv_lock really becomes _the_ buffer lock for drivers, so some
> motivation to move towards that.
> 
> Anyway if you don't feel like this is all that useful without the
> dma_resv_lock work on top, I guess I can merge up to the doc patch and
> leave the others out. Not sure myself, thoughts?

Don't get me wrong, it's useful. The dma-buf support is just always a
bit special and bolted-on in all memory managers.

Please go ahead and merge it if you like. The clean separation of
dma-buf calls is better than what I did in my shmem patches. I'll rebase
my stuff on top of whatever you end up merging.

Best regards
Thomas

> 
> Thanks for taking a look.
> -Daniel
> 
>>
>> Acked-by: Thomas Zimmermann <tzimmermann@suse.de>
>>
>>>  
>>>  	DRM_DEBUG_PRIME("size = %zu\n", size);
>>>  
>>>  	return &shmem->base;
>>> -
>>> -err_free_array:
>>> -	kvfree(shmem->pages);
>>> -err_free_gem:
>>> -	drm_gem_object_put_unlocked(&shmem->base);
>>> -
>>> -	return ERR_PTR(ret);
>>>  }
>>>  EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
>>>
>>
>> -- 
>> Thomas Zimmermann
>> Graphics Driver Developer
>> SUSE Software Solutions Germany GmbH
>> Maxfeldstr. 5, 90409 Nürnberg, Germany
>> (HRB 36809, AG Nürnberg)
>> Geschäftsführer: Felix Imendörffer
>>
> 
> 
> 
>
Thomas Zimmermann June 15, 2020, 8:23 a.m. UTC | #4
Hi Daniel

this patch causes a segmentation fault.

Am 11.05.20 um 11:35 schrieb Daniel Vetter:
> - Ditch the ->pages array
> - Make it a private gem bo, which means no shmem object, which means
>   fireworks if anyone calls drm_gem_object_get_pages. But we've just
>   made sure that's all covered.
> 
> Cc: Gerd Hoffmann <kraxel@redhat.com>
> Cc: Rob Herring <robh@kernel.org>
> Cc: Noralf Trønnes <noralf@tronnes.org>
> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
> ---
>  drivers/gpu/drm/drm_gem_shmem_helper.c | 59 ++++++++++----------------
>  1 file changed, 23 insertions(+), 36 deletions(-)
> 
> diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
> index f7011338813e..8c7d4f422b7b 100644
> --- a/drivers/gpu/drm/drm_gem_shmem_helper.c
> +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
> @@ -35,22 +35,12 @@ static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
>  	.mmap = drm_gem_shmem_mmap,
>  };
>  
> -/**
> - * drm_gem_shmem_create - Allocate an object with the given size
> - * @dev: DRM device
> - * @size: Size of the object to allocate
> - *
> - * This function creates a shmem GEM object.
> - *
> - * Returns:
> - * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
> - * error code on failure.
> - */
> -struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
> +static struct drm_gem_shmem_object *
> +__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
>  {
>  	struct drm_gem_shmem_object *shmem;
>  	struct drm_gem_object *obj;
> -	int ret;
> +	int ret = 0;
>  
>  	size = PAGE_ALIGN(size);
>  
> @@ -64,7 +54,10 @@ struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t
>  	if (!obj->funcs)
>  		obj->funcs = &drm_gem_shmem_funcs;
>  
> -	ret = drm_gem_object_init(dev, obj, size);
> +	if (private)
> +		drm_gem_private_object_init(dev, obj, size);

This call doesn't set obj->filp, which is dereferenced further below at
[1]. This happens from dumb_create().

Best regards
Thomas

[1]
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/gpu/drm/drm_gem_shmem_helper.c?h=v5.8-rc1#n87

> +	else
> +		ret = drm_gem_object_init(dev, obj, size);
>  	if (ret)
>  		goto err_free;
>  
> @@ -96,6 +89,21 @@ struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t
>  
>  	return ERR_PTR(ret);
>  }
> +/**
> + * drm_gem_shmem_create - Allocate an object with the given size
> + * @dev: DRM device
> + * @size: Size of the object to allocate
> + *
> + * This function creates a shmem GEM object.
> + *
> + * Returns:
> + * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
> + * error code on failure.
> + */
> +struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
> +{
> +	return __drm_gem_shmem_create(dev, size, false);
> +}
>  EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
>  
>  /**
> @@ -115,7 +123,6 @@ void drm_gem_shmem_free_object(struct drm_gem_object *obj)
>  	if (obj->import_attach) {
>  		shmem->pages_use_count--;
>  		drm_prime_gem_destroy(obj, shmem->sgt);
> -		kvfree(shmem->pages);
>  	} else {
>  		if (shmem->sgt) {
>  			dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl,
> @@ -371,7 +378,7 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
>  	struct drm_gem_shmem_object *shmem;
>  	int ret;
>  
> -	shmem = drm_gem_shmem_create(dev, size);
> +	shmem = __drm_gem_shmem_create(dev, size, true);
>  	if (IS_ERR(shmem))
>  		return shmem;
>  
> @@ -695,36 +702,16 @@ drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
>  				    struct sg_table *sgt)
>  {
>  	size_t size = PAGE_ALIGN(attach->dmabuf->size);
> -	size_t npages = size >> PAGE_SHIFT;
>  	struct drm_gem_shmem_object *shmem;
> -	int ret;
>  
>  	shmem = drm_gem_shmem_create(dev, size);
>  	if (IS_ERR(shmem))
>  		return ERR_CAST(shmem);
>  
> -	shmem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
> -	if (!shmem->pages) {
> -		ret = -ENOMEM;
> -		goto err_free_gem;
> -	}
> -
> -	ret = drm_prime_sg_to_page_addr_arrays(sgt, shmem->pages, NULL, npages);
> -	if (ret < 0)
> -		goto err_free_array;
> -
>  	shmem->sgt = sgt;
> -	shmem->pages_use_count = 1; /* Permanently pinned from our point of view */
>  
>  	DRM_DEBUG_PRIME("size = %zu\n", size);
>  
>  	return &shmem->base;
> -
> -err_free_array:
> -	kvfree(shmem->pages);
> -err_free_gem:
> -	drm_gem_object_put_unlocked(&shmem->base);
> -
> -	return ERR_PTR(ret);
>  }
>  EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
>
diff mbox series

Patch

diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index f7011338813e..8c7d4f422b7b 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -35,22 +35,12 @@  static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
 	.mmap = drm_gem_shmem_mmap,
 };
 
-/**
- * drm_gem_shmem_create - Allocate an object with the given size
- * @dev: DRM device
- * @size: Size of the object to allocate
- *
- * This function creates a shmem GEM object.
- *
- * Returns:
- * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
- * error code on failure.
- */
-struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
+static struct drm_gem_shmem_object *
+__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
 {
 	struct drm_gem_shmem_object *shmem;
 	struct drm_gem_object *obj;
-	int ret;
+	int ret = 0;
 
 	size = PAGE_ALIGN(size);
 
@@ -64,7 +54,10 @@  struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t
 	if (!obj->funcs)
 		obj->funcs = &drm_gem_shmem_funcs;
 
-	ret = drm_gem_object_init(dev, obj, size);
+	if (private)
+		drm_gem_private_object_init(dev, obj, size);
+	else
+		ret = drm_gem_object_init(dev, obj, size);
 	if (ret)
 		goto err_free;
 
@@ -96,6 +89,21 @@  struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t
 
 	return ERR_PTR(ret);
 }
+/**
+ * drm_gem_shmem_create - Allocate an object with the given size
+ * @dev: DRM device
+ * @size: Size of the object to allocate
+ *
+ * This function creates a shmem GEM object.
+ *
+ * Returns:
+ * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
+ * error code on failure.
+ */
+struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
+{
+	return __drm_gem_shmem_create(dev, size, false);
+}
 EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
 
 /**
@@ -115,7 +123,6 @@  void drm_gem_shmem_free_object(struct drm_gem_object *obj)
 	if (obj->import_attach) {
 		shmem->pages_use_count--;
 		drm_prime_gem_destroy(obj, shmem->sgt);
-		kvfree(shmem->pages);
 	} else {
 		if (shmem->sgt) {
 			dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl,
@@ -371,7 +378,7 @@  drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
 	struct drm_gem_shmem_object *shmem;
 	int ret;
 
-	shmem = drm_gem_shmem_create(dev, size);
+	shmem = __drm_gem_shmem_create(dev, size, true);
 	if (IS_ERR(shmem))
 		return shmem;
 
@@ -695,36 +702,16 @@  drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
 				    struct sg_table *sgt)
 {
 	size_t size = PAGE_ALIGN(attach->dmabuf->size);
-	size_t npages = size >> PAGE_SHIFT;
 	struct drm_gem_shmem_object *shmem;
-	int ret;
 
 	shmem = drm_gem_shmem_create(dev, size);
 	if (IS_ERR(shmem))
 		return ERR_CAST(shmem);
 
-	shmem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
-	if (!shmem->pages) {
-		ret = -ENOMEM;
-		goto err_free_gem;
-	}
-
-	ret = drm_prime_sg_to_page_addr_arrays(sgt, shmem->pages, NULL, npages);
-	if (ret < 0)
-		goto err_free_array;
-
 	shmem->sgt = sgt;
-	shmem->pages_use_count = 1; /* Permanently pinned from our point of view */
 
 	DRM_DEBUG_PRIME("size = %zu\n", size);
 
 	return &shmem->base;
-
-err_free_array:
-	kvfree(shmem->pages);
-err_free_gem:
-	drm_gem_object_put_unlocked(&shmem->base);
-
-	return ERR_PTR(ret);
 }
 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);