diff mbox series

[1/2] dma-buf: start caching of sg_table objects v2

Message ID 20190514140515.35566-2-christian.koenig@amd.com (mailing list archive)
State New, archived
Headers show
Series [1/2] dma-buf: start caching of sg_table objects v2 | expand

Commit Message

Christian König May 14, 2019, 2:05 p.m. UTC
To allow a smooth transition from pinning buffer objects to dynamic
invalidation we first start to cache the sg_table for an attachment.

v2: keep closer to the DRM implementation

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/dma-buf.c | 27 +++++++++++++++++++++++++--
 include/linux/dma-buf.h   | 13 +++++++++++++
 2 files changed, 38 insertions(+), 2 deletions(-)

Comments

Daniel Vetter May 15, 2019, 8:58 a.m. UTC | #1
On Tue, May 14, 2019 at 04:05:14PM +0200, Christian König wrote:
> To allow a smooth transition from pinning buffer objects to dynamic
> invalidation we first start to cache the sg_table for an attachment.
> 
> v2: keep closer to the DRM implementation
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>

i915 doesn't use the prime helpers, so this would be a change for us. Not
sure that matters. Since we can't use this as an easy way out of the
locking problem around reservation_object I'm not sure how useful it is to
move this ahead. At least until we have a clearer picture on how we plan
to roll out the dynamic dma-buf stuff. Once we have a bit more clarity
there I'm all for merging prep work, but as-is I'd be cautious ... This
stuff is a lot more tricky than it seems at first :-/
-Daniel
> ---
>  drivers/dma-buf/dma-buf.c | 27 +++++++++++++++++++++++++--
>  include/linux/dma-buf.h   | 13 +++++++++++++
>  2 files changed, 38 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
> index 7c858020d14b..a2d34c6b80a5 100644
> --- a/drivers/dma-buf/dma-buf.c
> +++ b/drivers/dma-buf/dma-buf.c
> @@ -573,6 +573,7 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
>  	list_add(&attach->node, &dmabuf->attachments);
>  
>  	mutex_unlock(&dmabuf->lock);
> +
>  	return attach;
>  
>  err_attach:
> @@ -595,6 +596,9 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
>  	if (WARN_ON(!dmabuf || !attach))
>  		return;
>  
> +	if (attach->sgt)
> +		dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
> +
>  	mutex_lock(&dmabuf->lock);
>  	list_del(&attach->node);
>  	if (dmabuf->ops->detach)
> @@ -630,10 +634,27 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
>  	if (WARN_ON(!attach || !attach->dmabuf))
>  		return ERR_PTR(-EINVAL);
>  
> +	if (attach->sgt) {
> +		/*
> +		 * Two mappings with different directions for the same
> +		 * attachment are not allowed.
> +		 */
> +		if (attach->dir != direction &&
> +		    attach->dir != DMA_BIDIRECTIONAL)
> +			return ERR_PTR(-EBUSY);
> +
> +		return attach->sgt;
> +	}
> +
>  	sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
>  	if (!sg_table)
>  		sg_table = ERR_PTR(-ENOMEM);
>  
> +	if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
> +		attach->sgt = sg_table;
> +		attach->dir = direction;
> +	}
> +
>  	return sg_table;
>  }
>  EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
> @@ -657,8 +678,10 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
>  	if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
>  		return;
>  
> -	attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
> -						direction);
> +	if (attach->sgt == sg_table)
> +		return;
> +
> +	attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
>  }
>  EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
>  
> diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
> index 58725f890b5b..45b9767e67ec 100644
> --- a/include/linux/dma-buf.h
> +++ b/include/linux/dma-buf.h
> @@ -51,6 +51,15 @@ struct dma_buf_attachment;
>   * @vunmap: [optional] unmaps a vmap from the buffer
>   */
>  struct dma_buf_ops {
> +	/**
> +	  * @cache_sgt_mapping:
> +	  *
> +	  * If true the framework will cache the first mapping made for each
> +	  * attachment. This avoids creating mappings for attachments multiple
> +	  * times.
> +	  */
> +	bool cache_sgt_mapping;
> +
>  	/**
>  	 * @attach:
>  	 *
> @@ -307,6 +316,8 @@ struct dma_buf {
>   * @dmabuf: buffer for this attachment.
>   * @dev: device attached to the buffer.
>   * @node: list of dma_buf_attachment.
> + * @sgt: cached mapping.
> + * @dir: direction of cached mapping.
>   * @priv: exporter specific attachment data.
>   *
>   * This structure holds the attachment information between the dma_buf buffer
> @@ -322,6 +333,8 @@ struct dma_buf_attachment {
>  	struct dma_buf *dmabuf;
>  	struct device *dev;
>  	struct list_head node;
> +	struct sg_table *sgt;
> +	enum dma_data_direction dir;
>  	void *priv;
>  };
>  
> -- 
> 2.17.1
>
Christian König May 15, 2019, 9:25 a.m. UTC | #2
Am 15.05.19 um 10:58 schrieb Daniel Vetter:
> On Tue, May 14, 2019 at 04:05:14PM +0200, Christian König wrote:
>> To allow a smooth transition from pinning buffer objects to dynamic
>> invalidation we first start to cache the sg_table for an attachment.
>>
>> v2: keep closer to the DRM implementation
>>
>> Signed-off-by: Christian König <christian.koenig@amd.com>
> i915 doesn't use the prime helpers, so this would be a change for us.

Not a problem at all, see below. I've added an cache_sgt_mapping member 
into the dma_buf_ops structure. Only when this member is true the 
functionality is enabled.

So I'm just moving code around in those two patches and no functional 
change at all.

Regards,
Christian.

>   Not
> sure that matters. Since we can't use this as an easy way out of the
> locking problem around reservation_object I'm not sure how useful it is to
> move this ahead. At least until we have a clearer picture on how we plan
> to roll out the dynamic dma-buf stuff. Once we have a bit more clarity
> there I'm all for merging prep work, but as-is I'd be cautious ... This
> stuff is a lot more tricky than it seems at first :-/
> -Daniel
>> ---
>>   drivers/dma-buf/dma-buf.c | 27 +++++++++++++++++++++++++--
>>   include/linux/dma-buf.h   | 13 +++++++++++++
>>   2 files changed, 38 insertions(+), 2 deletions(-)
>>
>> diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
>> index 7c858020d14b..a2d34c6b80a5 100644
>> --- a/drivers/dma-buf/dma-buf.c
>> +++ b/drivers/dma-buf/dma-buf.c
>> @@ -573,6 +573,7 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
>>   	list_add(&attach->node, &dmabuf->attachments);
>>   
>>   	mutex_unlock(&dmabuf->lock);
>> +
>>   	return attach;
>>   
>>   err_attach:
>> @@ -595,6 +596,9 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
>>   	if (WARN_ON(!dmabuf || !attach))
>>   		return;
>>   
>> +	if (attach->sgt)
>> +		dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
>> +
>>   	mutex_lock(&dmabuf->lock);
>>   	list_del(&attach->node);
>>   	if (dmabuf->ops->detach)
>> @@ -630,10 +634,27 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
>>   	if (WARN_ON(!attach || !attach->dmabuf))
>>   		return ERR_PTR(-EINVAL);
>>   
>> +	if (attach->sgt) {
>> +		/*
>> +		 * Two mappings with different directions for the same
>> +		 * attachment are not allowed.
>> +		 */
>> +		if (attach->dir != direction &&
>> +		    attach->dir != DMA_BIDIRECTIONAL)
>> +			return ERR_PTR(-EBUSY);
>> +
>> +		return attach->sgt;
>> +	}
>> +
>>   	sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
>>   	if (!sg_table)
>>   		sg_table = ERR_PTR(-ENOMEM);
>>   
>> +	if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
>> +		attach->sgt = sg_table;
>> +		attach->dir = direction;
>> +	}
>> +
>>   	return sg_table;
>>   }
>>   EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
>> @@ -657,8 +678,10 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
>>   	if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
>>   		return;
>>   
>> -	attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
>> -						direction);
>> +	if (attach->sgt == sg_table)
>> +		return;
>> +
>> +	attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
>>   }
>>   EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
>>   
>> diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
>> index 58725f890b5b..45b9767e67ec 100644
>> --- a/include/linux/dma-buf.h
>> +++ b/include/linux/dma-buf.h
>> @@ -51,6 +51,15 @@ struct dma_buf_attachment;
>>    * @vunmap: [optional] unmaps a vmap from the buffer
>>    */
>>   struct dma_buf_ops {
>> +	/**
>> +	  * @cache_sgt_mapping:
>> +	  *
>> +	  * If true the framework will cache the first mapping made for each
>> +	  * attachment. This avoids creating mappings for attachments multiple
>> +	  * times.
>> +	  */
>> +	bool cache_sgt_mapping;
>> +
>>   	/**
>>   	 * @attach:
>>   	 *
>> @@ -307,6 +316,8 @@ struct dma_buf {
>>    * @dmabuf: buffer for this attachment.
>>    * @dev: device attached to the buffer.
>>    * @node: list of dma_buf_attachment.
>> + * @sgt: cached mapping.
>> + * @dir: direction of cached mapping.
>>    * @priv: exporter specific attachment data.
>>    *
>>    * This structure holds the attachment information between the dma_buf buffer
>> @@ -322,6 +333,8 @@ struct dma_buf_attachment {
>>   	struct dma_buf *dmabuf;
>>   	struct device *dev;
>>   	struct list_head node;
>> +	struct sg_table *sgt;
>> +	enum dma_data_direction dir;
>>   	void *priv;
>>   };
>>   
>> -- 
>> 2.17.1
>>
Daniel Vetter May 16, 2019, 6:26 a.m. UTC | #3
On Wed, May 15, 2019 at 11:25 AM Christian König
<ckoenig.leichtzumerken@gmail.com> wrote:
>
> Am 15.05.19 um 10:58 schrieb Daniel Vetter:
> > On Tue, May 14, 2019 at 04:05:14PM +0200, Christian König wrote:
> >> To allow a smooth transition from pinning buffer objects to dynamic
> >> invalidation we first start to cache the sg_table for an attachment.
> >>
> >> v2: keep closer to the DRM implementation
> >>
> >> Signed-off-by: Christian König <christian.koenig@amd.com>
> > i915 doesn't use the prime helpers, so this would be a change for us.
>
> Not a problem at all, see below. I've added an cache_sgt_mapping member
> into the dma_buf_ops structure. Only when this member is true the
> functionality is enabled.

Hm right I missed that.

> So I'm just moving code around in those two patches and no functional
> change at all.

Still not sure. I'll look at your latest dma-buf series to see how it
all fits together.
-Daniel

>
> Regards,
> Christian.
>
> >   Not
> > sure that matters. Since we can't use this as an easy way out of the
> > locking problem around reservation_object I'm not sure how useful it is to
> > move this ahead. At least until we have a clearer picture on how we plan
> > to roll out the dynamic dma-buf stuff. Once we have a bit more clarity
> > there I'm all for merging prep work, but as-is I'd be cautious ... This
> > stuff is a lot more tricky than it seems at first :-/
> > -Daniel
> >> ---
> >>   drivers/dma-buf/dma-buf.c | 27 +++++++++++++++++++++++++--
> >>   include/linux/dma-buf.h   | 13 +++++++++++++
> >>   2 files changed, 38 insertions(+), 2 deletions(-)
> >>
> >> diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
> >> index 7c858020d14b..a2d34c6b80a5 100644
> >> --- a/drivers/dma-buf/dma-buf.c
> >> +++ b/drivers/dma-buf/dma-buf.c
> >> @@ -573,6 +573,7 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
> >>      list_add(&attach->node, &dmabuf->attachments);
> >>
> >>      mutex_unlock(&dmabuf->lock);
> >> +
> >>      return attach;
> >>
> >>   err_attach:
> >> @@ -595,6 +596,9 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
> >>      if (WARN_ON(!dmabuf || !attach))
> >>              return;
> >>
> >> +    if (attach->sgt)
> >> +            dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
> >> +
> >>      mutex_lock(&dmabuf->lock);
> >>      list_del(&attach->node);
> >>      if (dmabuf->ops->detach)
> >> @@ -630,10 +634,27 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
> >>      if (WARN_ON(!attach || !attach->dmabuf))
> >>              return ERR_PTR(-EINVAL);
> >>
> >> +    if (attach->sgt) {
> >> +            /*
> >> +             * Two mappings with different directions for the same
> >> +             * attachment are not allowed.
> >> +             */
> >> +            if (attach->dir != direction &&
> >> +                attach->dir != DMA_BIDIRECTIONAL)
> >> +                    return ERR_PTR(-EBUSY);
> >> +
> >> +            return attach->sgt;
> >> +    }
> >> +
> >>      sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
> >>      if (!sg_table)
> >>              sg_table = ERR_PTR(-ENOMEM);
> >>
> >> +    if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
> >> +            attach->sgt = sg_table;
> >> +            attach->dir = direction;
> >> +    }
> >> +
> >>      return sg_table;
> >>   }
> >>   EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
> >> @@ -657,8 +678,10 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
> >>      if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
> >>              return;
> >>
> >> -    attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
> >> -                                            direction);
> >> +    if (attach->sgt == sg_table)
> >> +            return;
> >> +
> >> +    attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
> >>   }
> >>   EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
> >>
> >> diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
> >> index 58725f890b5b..45b9767e67ec 100644
> >> --- a/include/linux/dma-buf.h
> >> +++ b/include/linux/dma-buf.h
> >> @@ -51,6 +51,15 @@ struct dma_buf_attachment;
> >>    * @vunmap: [optional] unmaps a vmap from the buffer
> >>    */
> >>   struct dma_buf_ops {
> >> +    /**
> >> +      * @cache_sgt_mapping:
> >> +      *
> >> +      * If true the framework will cache the first mapping made for each
> >> +      * attachment. This avoids creating mappings for attachments multiple
> >> +      * times.
> >> +      */
> >> +    bool cache_sgt_mapping;
> >> +
> >>      /**
> >>       * @attach:
> >>       *
> >> @@ -307,6 +316,8 @@ struct dma_buf {
> >>    * @dmabuf: buffer for this attachment.
> >>    * @dev: device attached to the buffer.
> >>    * @node: list of dma_buf_attachment.
> >> + * @sgt: cached mapping.
> >> + * @dir: direction of cached mapping.
> >>    * @priv: exporter specific attachment data.
> >>    *
> >>    * This structure holds the attachment information between the dma_buf buffer
> >> @@ -322,6 +333,8 @@ struct dma_buf_attachment {
> >>      struct dma_buf *dmabuf;
> >>      struct device *dev;
> >>      struct list_head node;
> >> +    struct sg_table *sgt;
> >> +    enum dma_data_direction dir;
> >>      void *priv;
> >>   };
> >>
> >> --
> >> 2.17.1
> >>
>


--
Daniel Vetter
Software Engineer, Intel Corporation
+41 (0) 79 365 57 48 - http://blog.ffwll.ch
Daniel Vetter May 22, 2019, 8:11 a.m. UTC | #4
On Thu, May 16, 2019 at 08:26:36AM +0200, Daniel Vetter wrote:
> On Wed, May 15, 2019 at 11:25 AM Christian König
> <ckoenig.leichtzumerken@gmail.com> wrote:
> >
> > Am 15.05.19 um 10:58 schrieb Daniel Vetter:
> > > On Tue, May 14, 2019 at 04:05:14PM +0200, Christian König wrote:
> > >> To allow a smooth transition from pinning buffer objects to dynamic
> > >> invalidation we first start to cache the sg_table for an attachment.
> > >>
> > >> v2: keep closer to the DRM implementation
> > >>
> > >> Signed-off-by: Christian König <christian.koenig@amd.com>
> > > i915 doesn't use the prime helpers, so this would be a change for us.
> >
> > Not a problem at all, see below. I've added an cache_sgt_mapping member
> > into the dma_buf_ops structure. Only when this member is true the
> > functionality is enabled.
> 
> Hm right I missed that.
> 
> > So I'm just moving code around in those two patches and no functional
> > change at all.
> 
> Still not sure. I'll look at your latest dma-buf series to see how it
> all fits together.

Ok I'm still procrastinating on that one, but I guess either way we need
some caching or another in dma-buf.c, and this series here at least looks
like it's moving in that direction.

On both patches Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

btw might be good to throw this at intel-gfx-trybot too, just to see
whether anything is amiss. I think we've beaten down all the -rc1
regressions now.
-Daniel


> -Daniel
> 
> >
> > Regards,
> > Christian.
> >
> > >   Not
> > > sure that matters. Since we can't use this as an easy way out of the
> > > locking problem around reservation_object I'm not sure how useful it is to
> > > move this ahead. At least until we have a clearer picture on how we plan
> > > to roll out the dynamic dma-buf stuff. Once we have a bit more clarity
> > > there I'm all for merging prep work, but as-is I'd be cautious ... This
> > > stuff is a lot more tricky than it seems at first :-/
> > > -Daniel
> > >> ---
> > >>   drivers/dma-buf/dma-buf.c | 27 +++++++++++++++++++++++++--
> > >>   include/linux/dma-buf.h   | 13 +++++++++++++
> > >>   2 files changed, 38 insertions(+), 2 deletions(-)
> > >>
> > >> diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
> > >> index 7c858020d14b..a2d34c6b80a5 100644
> > >> --- a/drivers/dma-buf/dma-buf.c
> > >> +++ b/drivers/dma-buf/dma-buf.c
> > >> @@ -573,6 +573,7 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
> > >>      list_add(&attach->node, &dmabuf->attachments);
> > >>
> > >>      mutex_unlock(&dmabuf->lock);
> > >> +
> > >>      return attach;
> > >>
> > >>   err_attach:
> > >> @@ -595,6 +596,9 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
> > >>      if (WARN_ON(!dmabuf || !attach))
> > >>              return;
> > >>
> > >> +    if (attach->sgt)
> > >> +            dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
> > >> +
> > >>      mutex_lock(&dmabuf->lock);
> > >>      list_del(&attach->node);
> > >>      if (dmabuf->ops->detach)
> > >> @@ -630,10 +634,27 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
> > >>      if (WARN_ON(!attach || !attach->dmabuf))
> > >>              return ERR_PTR(-EINVAL);
> > >>
> > >> +    if (attach->sgt) {
> > >> +            /*
> > >> +             * Two mappings with different directions for the same
> > >> +             * attachment are not allowed.
> > >> +             */
> > >> +            if (attach->dir != direction &&
> > >> +                attach->dir != DMA_BIDIRECTIONAL)
> > >> +                    return ERR_PTR(-EBUSY);
> > >> +
> > >> +            return attach->sgt;
> > >> +    }
> > >> +
> > >>      sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
> > >>      if (!sg_table)
> > >>              sg_table = ERR_PTR(-ENOMEM);
> > >>
> > >> +    if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
> > >> +            attach->sgt = sg_table;
> > >> +            attach->dir = direction;
> > >> +    }
> > >> +
> > >>      return sg_table;
> > >>   }
> > >>   EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
> > >> @@ -657,8 +678,10 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
> > >>      if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
> > >>              return;
> > >>
> > >> -    attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
> > >> -                                            direction);
> > >> +    if (attach->sgt == sg_table)
> > >> +            return;
> > >> +
> > >> +    attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
> > >>   }
> > >>   EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
> > >>
> > >> diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
> > >> index 58725f890b5b..45b9767e67ec 100644
> > >> --- a/include/linux/dma-buf.h
> > >> +++ b/include/linux/dma-buf.h
> > >> @@ -51,6 +51,15 @@ struct dma_buf_attachment;
> > >>    * @vunmap: [optional] unmaps a vmap from the buffer
> > >>    */
> > >>   struct dma_buf_ops {
> > >> +    /**
> > >> +      * @cache_sgt_mapping:
> > >> +      *
> > >> +      * If true the framework will cache the first mapping made for each
> > >> +      * attachment. This avoids creating mappings for attachments multiple
> > >> +      * times.
> > >> +      */
> > >> +    bool cache_sgt_mapping;
> > >> +
> > >>      /**
> > >>       * @attach:
> > >>       *
> > >> @@ -307,6 +316,8 @@ struct dma_buf {
> > >>    * @dmabuf: buffer for this attachment.
> > >>    * @dev: device attached to the buffer.
> > >>    * @node: list of dma_buf_attachment.
> > >> + * @sgt: cached mapping.
> > >> + * @dir: direction of cached mapping.
> > >>    * @priv: exporter specific attachment data.
> > >>    *
> > >>    * This structure holds the attachment information between the dma_buf buffer
> > >> @@ -322,6 +333,8 @@ struct dma_buf_attachment {
> > >>      struct dma_buf *dmabuf;
> > >>      struct device *dev;
> > >>      struct list_head node;
> > >> +    struct sg_table *sgt;
> > >> +    enum dma_data_direction dir;
> > >>      void *priv;
> > >>   };
> > >>
> > >> --
> > >> 2.17.1
> > >>
> >
> 
> 
> --
> Daniel Vetter
> Software Engineer, Intel Corporation
> +41 (0) 79 365 57 48 - http://blog.ffwll.ch
diff mbox series

Patch

diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 7c858020d14b..a2d34c6b80a5 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -573,6 +573,7 @@  struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
 	list_add(&attach->node, &dmabuf->attachments);
 
 	mutex_unlock(&dmabuf->lock);
+
 	return attach;
 
 err_attach:
@@ -595,6 +596,9 @@  void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
 	if (WARN_ON(!dmabuf || !attach))
 		return;
 
+	if (attach->sgt)
+		dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
+
 	mutex_lock(&dmabuf->lock);
 	list_del(&attach->node);
 	if (dmabuf->ops->detach)
@@ -630,10 +634,27 @@  struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
 	if (WARN_ON(!attach || !attach->dmabuf))
 		return ERR_PTR(-EINVAL);
 
+	if (attach->sgt) {
+		/*
+		 * Two mappings with different directions for the same
+		 * attachment are not allowed.
+		 */
+		if (attach->dir != direction &&
+		    attach->dir != DMA_BIDIRECTIONAL)
+			return ERR_PTR(-EBUSY);
+
+		return attach->sgt;
+	}
+
 	sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
 	if (!sg_table)
 		sg_table = ERR_PTR(-ENOMEM);
 
+	if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
+		attach->sgt = sg_table;
+		attach->dir = direction;
+	}
+
 	return sg_table;
 }
 EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
@@ -657,8 +678,10 @@  void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
 	if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
 		return;
 
-	attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
-						direction);
+	if (attach->sgt == sg_table)
+		return;
+
+	attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
 }
 EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
 
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 58725f890b5b..45b9767e67ec 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -51,6 +51,15 @@  struct dma_buf_attachment;
  * @vunmap: [optional] unmaps a vmap from the buffer
  */
 struct dma_buf_ops {
+	/**
+	  * @cache_sgt_mapping:
+	  *
+	  * If true the framework will cache the first mapping made for each
+	  * attachment. This avoids creating mappings for attachments multiple
+	  * times.
+	  */
+	bool cache_sgt_mapping;
+
 	/**
 	 * @attach:
 	 *
@@ -307,6 +316,8 @@  struct dma_buf {
  * @dmabuf: buffer for this attachment.
  * @dev: device attached to the buffer.
  * @node: list of dma_buf_attachment.
+ * @sgt: cached mapping.
+ * @dir: direction of cached mapping.
  * @priv: exporter specific attachment data.
  *
  * This structure holds the attachment information between the dma_buf buffer
@@ -322,6 +333,8 @@  struct dma_buf_attachment {
 	struct dma_buf *dmabuf;
 	struct device *dev;
 	struct list_head node;
+	struct sg_table *sgt;
+	enum dma_data_direction dir;
 	void *priv;
 };