diff mbox series

[6/9] drm/shmem-helpers: Don't call get/put_pages on imported dma-buf in vmap

Message ID 20200511093554.211493-7-daniel.vetter@ffwll.ch (mailing list archive)
State New, archived
Headers show
Series shmem helper untangling | expand

Commit Message

Daniel Vetter May 11, 2020, 9:35 a.m. UTC
There's no direct harm, because for the shmem helpers these are noops
on imported buffers. The trouble is in the locks these take - I want
to change dma_buf_vmap locking, and so need to make sure that we only
ever take certain locks on one side of the dma-buf interface: Either
for exporters, or for importers.

Cc: Gerd Hoffmann <kraxel@redhat.com>
Cc: Rob Herring <robh@kernel.org>
Cc: Noralf Trønnes <noralf@tronnes.org>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
---
 drivers/gpu/drm/drm_gem_shmem_helper.c | 17 +++++++++--------
 1 file changed, 9 insertions(+), 8 deletions(-)

Comments

Thomas Zimmermann May 14, 2020, 7:16 a.m. UTC | #1
Hi

Am 11.05.20 um 11:35 schrieb Daniel Vetter:
> There's no direct harm, because for the shmem helpers these are noops
> on imported buffers. The trouble is in the locks these take - I want
> to change dma_buf_vmap locking, and so need to make sure that we only
> ever take certain locks on one side of the dma-buf interface: Either
> for exporters, or for importers.
> 
> Cc: Gerd Hoffmann <kraxel@redhat.com>
> Cc: Rob Herring <robh@kernel.org>
> Cc: Noralf Trønnes <noralf@tronnes.org>
> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
> ---
>  drivers/gpu/drm/drm_gem_shmem_helper.c | 17 +++++++++--------
>  1 file changed, 9 insertions(+), 8 deletions(-)
> 
> diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
> index 2a70159d50ef..b9cba5cc61c3 100644
> --- a/drivers/gpu/drm/drm_gem_shmem_helper.c
> +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
> @@ -252,32 +252,33 @@ static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
>  	if (shmem->vmap_use_count++ > 0)
>  		return shmem->vaddr;
>  
> -	ret = drm_gem_shmem_get_pages(shmem);
> -	if (ret)
> -		goto err_zero_use;
> -
>  	if (obj->import_attach) {
>  		shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
>  	} else {
>  		pgprot_t prot = PAGE_KERNEL;
>  
> +		ret = drm_gem_shmem_get_pages(shmem);
> +		if (ret)
> +			goto err;
> +
>  		if (!shmem->map_cached)
>  			prot = pgprot_writecombine(prot);
>  		shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
>  				    VM_MAP, prot);
> +
> +		if (!shmem->vaddr)
> +			drm_gem_shmem_put_pages(shmem);
>  	}
>  
>  	if (!shmem->vaddr) {
>  		DRM_DEBUG_KMS("Failed to vmap pages\n");
>  		ret = -ENOMEM;
> -		goto err_put_pages;
> +		goto err;
>  	}
>  
>  	return shmem->vaddr;
>  
> -err_put_pages:
> -	drm_gem_shmem_put_pages(shmem);

I found the new code to be less readable. Maybe keep the error rollback
as-is and protect _put_pages() with if (!import_attach).

In any case

Acked-by: Thomas Zimmermann <tzimmermann@suse.de>

> -err_zero_use:
> +err:
>  	shmem->vmap_use_count = 0;
>  
>  	return ERR_PTR(ret);
>
Daniel Vetter May 14, 2020, 12:49 p.m. UTC | #2
On Thu, May 14, 2020 at 09:16:54AM +0200, Thomas Zimmermann wrote:
> Hi
> 
> Am 11.05.20 um 11:35 schrieb Daniel Vetter:
> > There's no direct harm, because for the shmem helpers these are noops
> > on imported buffers. The trouble is in the locks these take - I want
> > to change dma_buf_vmap locking, and so need to make sure that we only
> > ever take certain locks on one side of the dma-buf interface: Either
> > for exporters, or for importers.
> > 
> > Cc: Gerd Hoffmann <kraxel@redhat.com>
> > Cc: Rob Herring <robh@kernel.org>
> > Cc: Noralf Trønnes <noralf@tronnes.org>
> > Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
> > ---
> >  drivers/gpu/drm/drm_gem_shmem_helper.c | 17 +++++++++--------
> >  1 file changed, 9 insertions(+), 8 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
> > index 2a70159d50ef..b9cba5cc61c3 100644
> > --- a/drivers/gpu/drm/drm_gem_shmem_helper.c
> > +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
> > @@ -252,32 +252,33 @@ static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
> >  	if (shmem->vmap_use_count++ > 0)
> >  		return shmem->vaddr;
> >  
> > -	ret = drm_gem_shmem_get_pages(shmem);
> > -	if (ret)
> > -		goto err_zero_use;
> > -
> >  	if (obj->import_attach) {
> >  		shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
> >  	} else {
> >  		pgprot_t prot = PAGE_KERNEL;
> >  
> > +		ret = drm_gem_shmem_get_pages(shmem);
> > +		if (ret)
> > +			goto err;
> > +
> >  		if (!shmem->map_cached)
> >  			prot = pgprot_writecombine(prot);
> >  		shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
> >  				    VM_MAP, prot);
> > +
> > +		if (!shmem->vaddr)
> > +			drm_gem_shmem_put_pages(shmem);
> >  	}
> >  
> >  	if (!shmem->vaddr) {
> >  		DRM_DEBUG_KMS("Failed to vmap pages\n");
> >  		ret = -ENOMEM;
> > -		goto err_put_pages;
> > +		goto err;
> >  	}
> >  
> >  	return shmem->vaddr;
> >  
> > -err_put_pages:
> > -	drm_gem_shmem_put_pages(shmem);
> 
> I found the new code to be less readable. Maybe keep the error rollback
> as-is and protect _put_pages() with if (!import_attach).

Hm yeah I guess I can leave this as-is mostly, makes at least the diff
smaller. Imo it all looks a bit awkward, but what I've done isn't clearly
better than just leaving stuff mostly where it was.
-Daniel

> 
> In any case
> 
> Acked-by: Thomas Zimmermann <tzimmermann@suse.de>
> 
> > -err_zero_use:
> > +err:
> >  	shmem->vmap_use_count = 0;
> >  
> >  	return ERR_PTR(ret);
> > 
> 
> -- 
> Thomas Zimmermann
> Graphics Driver Developer
> SUSE Software Solutions Germany GmbH
> Maxfeldstr. 5, 90409 Nürnberg, Germany
> (HRB 36809, AG Nürnberg)
> Geschäftsführer: Felix Imendörffer
>
diff mbox series

Patch

diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 2a70159d50ef..b9cba5cc61c3 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -252,32 +252,33 @@  static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
 	if (shmem->vmap_use_count++ > 0)
 		return shmem->vaddr;
 
-	ret = drm_gem_shmem_get_pages(shmem);
-	if (ret)
-		goto err_zero_use;
-
 	if (obj->import_attach) {
 		shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
 	} else {
 		pgprot_t prot = PAGE_KERNEL;
 
+		ret = drm_gem_shmem_get_pages(shmem);
+		if (ret)
+			goto err;
+
 		if (!shmem->map_cached)
 			prot = pgprot_writecombine(prot);
 		shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
 				    VM_MAP, prot);
+
+		if (!shmem->vaddr)
+			drm_gem_shmem_put_pages(shmem);
 	}
 
 	if (!shmem->vaddr) {
 		DRM_DEBUG_KMS("Failed to vmap pages\n");
 		ret = -ENOMEM;
-		goto err_put_pages;
+		goto err;
 	}
 
 	return shmem->vaddr;
 
-err_put_pages:
-	drm_gem_shmem_put_pages(shmem);
-err_zero_use:
+err:
 	shmem->vmap_use_count = 0;
 
 	return ERR_PTR(ret);