diff mbox series

[RFC,12/15] videobuf2: add begin/end cpu_access callbacks to dma-sg

Message ID 20191217032034.54897-13-senozhatsky@chromium.org (mailing list archive)
State New, archived
Headers show
Series Implement V4L2_BUF_FLAG_NO_CACHE_* flags | expand

Commit Message

Sergey Senozhatsky Dec. 17, 2019, 3:20 a.m. UTC
Provide begin_cpu_access() and end_cpu_access() dma_buf_ops
callbacks for cache synchronisation on exported buffers.

Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
---
 .../media/common/videobuf2/videobuf2-dma-sg.c | 22 +++++++++++++++++++
 1 file changed, 22 insertions(+)

Comments

Hans Verkuil Jan. 10, 2020, 10:13 a.m. UTC | #1
On 12/17/19 4:20 AM, Sergey Senozhatsky wrote:
> Provide begin_cpu_access() and end_cpu_access() dma_buf_ops
> callbacks for cache synchronisation on exported buffers.
> 
> Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
> ---
>  .../media/common/videobuf2/videobuf2-dma-sg.c | 22 +++++++++++++++++++
>  1 file changed, 22 insertions(+)
> 
> diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
> index 6db60e9d5183..bfc99a0cb7b9 100644
> --- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c
> +++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
> @@ -470,6 +470,26 @@ static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf)
>  	vb2_dma_sg_put(dbuf->priv);
>  }
>  

There is no corresponding vb2_sg_buffer_consistent function here.

Looking more closely I see that vb2_dma_sg_alloc doesn't pass the dma_attrs
argument to dma_map_sg_attrs, thus V4L2_FLAG_MEMORY_NON_CONSISTENT has no
effect on dma-sg buffers.

Is there a reason why dma_attrs isn't passed on to dma_map_sg_attrs()?

I suspect it was just laziness in the past, and that it should be wired
up, just as for dma-contig.

Regards,

	Hans

> +static int vb2_dma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
> +					enum dma_data_direction direction)
> +{
> +	struct vb2_dma_sg_buf *buf = dbuf->priv;
> +	struct sg_table *sgt = buf->dma_sgt;
> +
> +	dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
> +	return 0;
> +}
> +
> +static int vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
> +					enum dma_data_direction direction)
> +{
> +	struct vb2_dma_sg_buf *buf = dbuf->priv;
> +	struct sg_table *sgt = buf->dma_sgt;
> +
> +	dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
> +	return 0;
> +}
> +
>  static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf)
>  {
>  	struct vb2_dma_sg_buf *buf = dbuf->priv;
> @@ -488,6 +508,8 @@ static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
>  	.detach = vb2_dma_sg_dmabuf_ops_detach,
>  	.map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
>  	.unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
> +	.begin_cpu_access = vb2_dma_sg_dmabuf_ops_begin_cpu_access,
> +	.end_cpu_access = vb2_dma_sg_dmabuf_ops_end_cpu_access,
>  	.vmap = vb2_dma_sg_dmabuf_ops_vmap,
>  	.mmap = vb2_dma_sg_dmabuf_ops_mmap,
>  	.release = vb2_dma_sg_dmabuf_ops_release,
>
Sergey Senozhatsky Jan. 22, 2020, 6:37 a.m. UTC | #2
On (20/01/10 11:13), Hans Verkuil wrote:
> On 12/17/19 4:20 AM, Sergey Senozhatsky wrote:
> > Provide begin_cpu_access() and end_cpu_access() dma_buf_ops
> > callbacks for cache synchronisation on exported buffers.
> > 
> > Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
> > ---
> >  .../media/common/videobuf2/videobuf2-dma-sg.c | 22 +++++++++++++++++++
> >  1 file changed, 22 insertions(+)
> > 
> > diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
> > index 6db60e9d5183..bfc99a0cb7b9 100644
> > --- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c
> > +++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
> > @@ -470,6 +470,26 @@ static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf)
> >  	vb2_dma_sg_put(dbuf->priv);
> >  }
> >  
> 
> There is no corresponding vb2_sg_buffer_consistent function here.
> 
> Looking more closely I see that vb2_dma_sg_alloc doesn't pass the dma_attrs
> argument to dma_map_sg_attrs, thus V4L2_FLAG_MEMORY_NON_CONSISTENT has no
> effect on dma-sg buffers.
> 
> Is there a reason why dma_attrs isn't passed on to dma_map_sg_attrs()?

Laziness.

> I suspect it was just laziness in the past, and that it should be wired
> up, just as for dma-contig.

OK, I can include it into v2.

	-ss
Tomasz Figa Jan. 28, 2020, 4:38 a.m. UTC | #3
On Fri, Jan 10, 2020 at 7:13 PM Hans Verkuil <hverkuil@xs4all.nl> wrote:
>
> On 12/17/19 4:20 AM, Sergey Senozhatsky wrote:
> > Provide begin_cpu_access() and end_cpu_access() dma_buf_ops
> > callbacks for cache synchronisation on exported buffers.
> >
> > Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
> > ---
> >  .../media/common/videobuf2/videobuf2-dma-sg.c | 22 +++++++++++++++++++
> >  1 file changed, 22 insertions(+)
> >
> > diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
> > index 6db60e9d5183..bfc99a0cb7b9 100644
> > --- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c
> > +++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
> > @@ -470,6 +470,26 @@ static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf)
> >       vb2_dma_sg_put(dbuf->priv);
> >  }
> >
>
> There is no corresponding vb2_sg_buffer_consistent function here.
>
> Looking more closely I see that vb2_dma_sg_alloc doesn't pass the dma_attrs
> argument to dma_map_sg_attrs, thus V4L2_FLAG_MEMORY_NON_CONSISTENT has no
> effect on dma-sg buffers.

videobuf2-dma-sg allocates the memory using the page allocator
directly, which means that there is no memory consistency guarantee.

>
> Is there a reason why dma_attrs isn't passed on to dma_map_sg_attrs()?
>

V4L2_FLAG_MEMORY_NON_CONSISTENT is a flag for dma_alloc_attrs(). It
isn't supposed to do anything for dma_map_sg_attrs(), which is only
supposed to create the device (e.g. IOMMU) mapping for already
allocated memory.

> I suspect it was just laziness in the past, and that it should be wired
> up, just as for dma-contig.
>
> Regards,
>
>         Hans
>
> > +static int vb2_dma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
> > +                                     enum dma_data_direction direction)
> > +{
> > +     struct vb2_dma_sg_buf *buf = dbuf->priv;
> > +     struct sg_table *sgt = buf->dma_sgt;
> > +
> > +     dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
> > +     return 0;
> > +}
> > +
> > +static int vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
> > +                                     enum dma_data_direction direction)
> > +{
> > +     struct vb2_dma_sg_buf *buf = dbuf->priv;
> > +     struct sg_table *sgt = buf->dma_sgt;
> > +
> > +     dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
> > +     return 0;
> > +}
> > +
> >  static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf)
> >  {
> >       struct vb2_dma_sg_buf *buf = dbuf->priv;
> > @@ -488,6 +508,8 @@ static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
> >       .detach = vb2_dma_sg_dmabuf_ops_detach,
> >       .map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
> >       .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
> > +     .begin_cpu_access = vb2_dma_sg_dmabuf_ops_begin_cpu_access,
> > +     .end_cpu_access = vb2_dma_sg_dmabuf_ops_end_cpu_access,
> >       .vmap = vb2_dma_sg_dmabuf_ops_vmap,
> >       .mmap = vb2_dma_sg_dmabuf_ops_mmap,
> >       .release = vb2_dma_sg_dmabuf_ops_release,
> >
>
Hans Verkuil Jan. 28, 2020, 8:36 a.m. UTC | #4
On 1/28/20 5:38 AM, Tomasz Figa wrote:
> On Fri, Jan 10, 2020 at 7:13 PM Hans Verkuil <hverkuil@xs4all.nl> wrote:
>>
>> On 12/17/19 4:20 AM, Sergey Senozhatsky wrote:
>>> Provide begin_cpu_access() and end_cpu_access() dma_buf_ops
>>> callbacks for cache synchronisation on exported buffers.
>>>
>>> Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
>>> ---
>>>  .../media/common/videobuf2/videobuf2-dma-sg.c | 22 +++++++++++++++++++
>>>  1 file changed, 22 insertions(+)
>>>
>>> diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
>>> index 6db60e9d5183..bfc99a0cb7b9 100644
>>> --- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c
>>> +++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
>>> @@ -470,6 +470,26 @@ static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf)
>>>       vb2_dma_sg_put(dbuf->priv);
>>>  }
>>>
>>
>> There is no corresponding vb2_sg_buffer_consistent function here.
>>
>> Looking more closely I see that vb2_dma_sg_alloc doesn't pass the dma_attrs
>> argument to dma_map_sg_attrs, thus V4L2_FLAG_MEMORY_NON_CONSISTENT has no
>> effect on dma-sg buffers.
> 
> videobuf2-dma-sg allocates the memory using the page allocator
> directly, which means that there is no memory consistency guarantee.
> 
>>
>> Is there a reason why dma_attrs isn't passed on to dma_map_sg_attrs()?
>>
> 
> V4L2_FLAG_MEMORY_NON_CONSISTENT is a flag for dma_alloc_attrs(). It
> isn't supposed to do anything for dma_map_sg_attrs(), which is only
> supposed to create the device (e.g. IOMMU) mapping for already
> allocated memory.

Ah, right.

But could vb2_dma_sg_alloc_compacted() be modified so that is uses
dma_alloc_attrs() instead of alloc_pages()? Sorry, that might be a stupid
question, I'm not an expert in this area. All I know is that I hate inconsistent
APIs where something works for one thing, but not another.

Regards,

	Hans

> 
>> I suspect it was just laziness in the past, and that it should be wired
>> up, just as for dma-contig.
>>
>> Regards,
>>
>>         Hans
>>
>>> +static int vb2_dma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
>>> +                                     enum dma_data_direction direction)
>>> +{
>>> +     struct vb2_dma_sg_buf *buf = dbuf->priv;
>>> +     struct sg_table *sgt = buf->dma_sgt;
>>> +
>>> +     dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
>>> +     return 0;
>>> +}
>>> +
>>> +static int vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
>>> +                                     enum dma_data_direction direction)
>>> +{
>>> +     struct vb2_dma_sg_buf *buf = dbuf->priv;
>>> +     struct sg_table *sgt = buf->dma_sgt;
>>> +
>>> +     dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
>>> +     return 0;
>>> +}
>>> +
>>>  static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf)
>>>  {
>>>       struct vb2_dma_sg_buf *buf = dbuf->priv;
>>> @@ -488,6 +508,8 @@ static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
>>>       .detach = vb2_dma_sg_dmabuf_ops_detach,
>>>       .map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
>>>       .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
>>> +     .begin_cpu_access = vb2_dma_sg_dmabuf_ops_begin_cpu_access,
>>> +     .end_cpu_access = vb2_dma_sg_dmabuf_ops_end_cpu_access,
>>>       .vmap = vb2_dma_sg_dmabuf_ops_vmap,
>>>       .mmap = vb2_dma_sg_dmabuf_ops_mmap,
>>>       .release = vb2_dma_sg_dmabuf_ops_release,
>>>
>>
Tomasz Figa Jan. 30, 2020, 11:02 a.m. UTC | #5
On Tue, Jan 28, 2020 at 5:36 PM Hans Verkuil <hverkuil@xs4all.nl> wrote:
>
> On 1/28/20 5:38 AM, Tomasz Figa wrote:
> > On Fri, Jan 10, 2020 at 7:13 PM Hans Verkuil <hverkuil@xs4all.nl> wrote:
> >>
> >> On 12/17/19 4:20 AM, Sergey Senozhatsky wrote:
> >>> Provide begin_cpu_access() and end_cpu_access() dma_buf_ops
> >>> callbacks for cache synchronisation on exported buffers.
> >>>
> >>> Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
> >>> ---
> >>>  .../media/common/videobuf2/videobuf2-dma-sg.c | 22 +++++++++++++++++++
> >>>  1 file changed, 22 insertions(+)
> >>>
> >>> diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
> >>> index 6db60e9d5183..bfc99a0cb7b9 100644
> >>> --- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c
> >>> +++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
> >>> @@ -470,6 +470,26 @@ static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf)
> >>>       vb2_dma_sg_put(dbuf->priv);
> >>>  }
> >>>
> >>
> >> There is no corresponding vb2_sg_buffer_consistent function here.
> >>
> >> Looking more closely I see that vb2_dma_sg_alloc doesn't pass the dma_attrs
> >> argument to dma_map_sg_attrs, thus V4L2_FLAG_MEMORY_NON_CONSISTENT has no
> >> effect on dma-sg buffers.
> >
> > videobuf2-dma-sg allocates the memory using the page allocator
> > directly, which means that there is no memory consistency guarantee.
> >
> >>
> >> Is there a reason why dma_attrs isn't passed on to dma_map_sg_attrs()?
> >>
> >
> > V4L2_FLAG_MEMORY_NON_CONSISTENT is a flag for dma_alloc_attrs(). It
> > isn't supposed to do anything for dma_map_sg_attrs(), which is only
> > supposed to create the device (e.g. IOMMU) mapping for already
> > allocated memory.
>
> Ah, right.
>
> But could vb2_dma_sg_alloc_compacted() be modified so that is uses
> dma_alloc_attrs() instead of alloc_pages()? Sorry, that might be a stupid
> question, I'm not an expert in this area. All I know is that I hate inconsistent
> APIs where something works for one thing, but not another.
>

dma_alloc_attrs() would allocate contiguous memory, which kind of goes
against the vb2_dma_sg allocator idea. Technically we could call it N
times with size = 1 page to achieve what we want, but is this really
what we want?

Given that vb2_dma_sg has always been returning non-consistent memory,
do we have any good reason to add consistent memory to it? If so,
perhaps we could still do that in an incremental patch, to avoid
complicating this already complex series? :)

Best regards,
Tomasz

> Regards,
>
>         Hans
>
> >
> >> I suspect it was just laziness in the past, and that it should be wired
> >> up, just as for dma-contig.
> >>
> >> Regards,
> >>
> >>         Hans
> >>
> >>> +static int vb2_dma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
> >>> +                                     enum dma_data_direction direction)
> >>> +{
> >>> +     struct vb2_dma_sg_buf *buf = dbuf->priv;
> >>> +     struct sg_table *sgt = buf->dma_sgt;
> >>> +
> >>> +     dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
> >>> +     return 0;
> >>> +}
> >>> +
> >>> +static int vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
> >>> +                                     enum dma_data_direction direction)
> >>> +{
> >>> +     struct vb2_dma_sg_buf *buf = dbuf->priv;
> >>> +     struct sg_table *sgt = buf->dma_sgt;
> >>> +
> >>> +     dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
> >>> +     return 0;
> >>> +}
> >>> +
> >>>  static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf)
> >>>  {
> >>>       struct vb2_dma_sg_buf *buf = dbuf->priv;
> >>> @@ -488,6 +508,8 @@ static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
> >>>       .detach = vb2_dma_sg_dmabuf_ops_detach,
> >>>       .map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
> >>>       .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
> >>> +     .begin_cpu_access = vb2_dma_sg_dmabuf_ops_begin_cpu_access,
> >>> +     .end_cpu_access = vb2_dma_sg_dmabuf_ops_end_cpu_access,
> >>>       .vmap = vb2_dma_sg_dmabuf_ops_vmap,
> >>>       .mmap = vb2_dma_sg_dmabuf_ops_mmap,
> >>>       .release = vb2_dma_sg_dmabuf_ops_release,
> >>>
> >>
>
Hans Verkuil Jan. 30, 2020, 12:18 p.m. UTC | #6
On 1/30/20 12:02 PM, Tomasz Figa wrote:
> On Tue, Jan 28, 2020 at 5:36 PM Hans Verkuil <hverkuil@xs4all.nl> wrote:
>>
>> On 1/28/20 5:38 AM, Tomasz Figa wrote:
>>> On Fri, Jan 10, 2020 at 7:13 PM Hans Verkuil <hverkuil@xs4all.nl> wrote:
>>>>
>>>> On 12/17/19 4:20 AM, Sergey Senozhatsky wrote:
>>>>> Provide begin_cpu_access() and end_cpu_access() dma_buf_ops
>>>>> callbacks for cache synchronisation on exported buffers.
>>>>>
>>>>> Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
>>>>> ---
>>>>>  .../media/common/videobuf2/videobuf2-dma-sg.c | 22 +++++++++++++++++++
>>>>>  1 file changed, 22 insertions(+)
>>>>>
>>>>> diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
>>>>> index 6db60e9d5183..bfc99a0cb7b9 100644
>>>>> --- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c
>>>>> +++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
>>>>> @@ -470,6 +470,26 @@ static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf)
>>>>>       vb2_dma_sg_put(dbuf->priv);
>>>>>  }
>>>>>
>>>>
>>>> There is no corresponding vb2_sg_buffer_consistent function here.
>>>>
>>>> Looking more closely I see that vb2_dma_sg_alloc doesn't pass the dma_attrs
>>>> argument to dma_map_sg_attrs, thus V4L2_FLAG_MEMORY_NON_CONSISTENT has no
>>>> effect on dma-sg buffers.
>>>
>>> videobuf2-dma-sg allocates the memory using the page allocator
>>> directly, which means that there is no memory consistency guarantee.
>>>
>>>>
>>>> Is there a reason why dma_attrs isn't passed on to dma_map_sg_attrs()?
>>>>
>>>
>>> V4L2_FLAG_MEMORY_NON_CONSISTENT is a flag for dma_alloc_attrs(). It
>>> isn't supposed to do anything for dma_map_sg_attrs(), which is only
>>> supposed to create the device (e.g. IOMMU) mapping for already
>>> allocated memory.
>>
>> Ah, right.
>>
>> But could vb2_dma_sg_alloc_compacted() be modified so that is uses
>> dma_alloc_attrs() instead of alloc_pages()? Sorry, that might be a stupid
>> question, I'm not an expert in this area. All I know is that I hate inconsistent
>> APIs where something works for one thing, but not another.
>>
> 
> dma_alloc_attrs() would allocate contiguous memory, which kind of goes
> against the vb2_dma_sg allocator idea. Technically we could call it N
> times with size = 1 page to achieve what we want, but is this really
> what we want?
> 
> Given that vb2_dma_sg has always been returning non-consistent memory,
> do we have any good reason to add consistent memory to it? If so,
> perhaps we could still do that in an incremental patch, to avoid
> complicating this already complex series? :)

I very much agree with that. But this should be very clearly documented.
Should V4L2_CAP_MEMORY_NON_CONSISTENT always be set in this case?

Regards,

	Hans

> 
> Best regards,
> Tomasz
> 
>> Regards,
>>
>>         Hans
>>
>>>
>>>> I suspect it was just laziness in the past, and that it should be wired
>>>> up, just as for dma-contig.
>>>>
>>>> Regards,
>>>>
>>>>         Hans
>>>>
>>>>> +static int vb2_dma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
>>>>> +                                     enum dma_data_direction direction)
>>>>> +{
>>>>> +     struct vb2_dma_sg_buf *buf = dbuf->priv;
>>>>> +     struct sg_table *sgt = buf->dma_sgt;
>>>>> +
>>>>> +     dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
>>>>> +     return 0;
>>>>> +}
>>>>> +
>>>>> +static int vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
>>>>> +                                     enum dma_data_direction direction)
>>>>> +{
>>>>> +     struct vb2_dma_sg_buf *buf = dbuf->priv;
>>>>> +     struct sg_table *sgt = buf->dma_sgt;
>>>>> +
>>>>> +     dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
>>>>> +     return 0;
>>>>> +}
>>>>> +
>>>>>  static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf)
>>>>>  {
>>>>>       struct vb2_dma_sg_buf *buf = dbuf->priv;
>>>>> @@ -488,6 +508,8 @@ static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
>>>>>       .detach = vb2_dma_sg_dmabuf_ops_detach,
>>>>>       .map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
>>>>>       .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
>>>>> +     .begin_cpu_access = vb2_dma_sg_dmabuf_ops_begin_cpu_access,
>>>>> +     .end_cpu_access = vb2_dma_sg_dmabuf_ops_end_cpu_access,
>>>>>       .vmap = vb2_dma_sg_dmabuf_ops_vmap,
>>>>>       .mmap = vb2_dma_sg_dmabuf_ops_mmap,
>>>>>       .release = vb2_dma_sg_dmabuf_ops_release,
>>>>>
>>>>
>>
Tomasz Figa Feb. 3, 2020, 10:04 a.m. UTC | #7
On Thu, Jan 30, 2020 at 9:18 PM Hans Verkuil <hverkuil@xs4all.nl> wrote:
>
> On 1/30/20 12:02 PM, Tomasz Figa wrote:
> > On Tue, Jan 28, 2020 at 5:36 PM Hans Verkuil <hverkuil@xs4all.nl> wrote:
> >>
> >> On 1/28/20 5:38 AM, Tomasz Figa wrote:
> >>> On Fri, Jan 10, 2020 at 7:13 PM Hans Verkuil <hverkuil@xs4all.nl> wrote:
> >>>>
> >>>> On 12/17/19 4:20 AM, Sergey Senozhatsky wrote:
> >>>>> Provide begin_cpu_access() and end_cpu_access() dma_buf_ops
> >>>>> callbacks for cache synchronisation on exported buffers.
> >>>>>
> >>>>> Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
> >>>>> ---
> >>>>>  .../media/common/videobuf2/videobuf2-dma-sg.c | 22 +++++++++++++++++++
> >>>>>  1 file changed, 22 insertions(+)
> >>>>>
> >>>>> diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
> >>>>> index 6db60e9d5183..bfc99a0cb7b9 100644
> >>>>> --- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c
> >>>>> +++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
> >>>>> @@ -470,6 +470,26 @@ static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf)
> >>>>>       vb2_dma_sg_put(dbuf->priv);
> >>>>>  }
> >>>>>
> >>>>
> >>>> There is no corresponding vb2_sg_buffer_consistent function here.
> >>>>
> >>>> Looking more closely I see that vb2_dma_sg_alloc doesn't pass the dma_attrs
> >>>> argument to dma_map_sg_attrs, thus V4L2_FLAG_MEMORY_NON_CONSISTENT has no
> >>>> effect on dma-sg buffers.
> >>>
> >>> videobuf2-dma-sg allocates the memory using the page allocator
> >>> directly, which means that there is no memory consistency guarantee.
> >>>
> >>>>
> >>>> Is there a reason why dma_attrs isn't passed on to dma_map_sg_attrs()?
> >>>>
> >>>
> >>> V4L2_FLAG_MEMORY_NON_CONSISTENT is a flag for dma_alloc_attrs(). It
> >>> isn't supposed to do anything for dma_map_sg_attrs(), which is only
> >>> supposed to create the device (e.g. IOMMU) mapping for already
> >>> allocated memory.
> >>
> >> Ah, right.
> >>
> >> But could vb2_dma_sg_alloc_compacted() be modified so that is uses
> >> dma_alloc_attrs() instead of alloc_pages()? Sorry, that might be a stupid
> >> question, I'm not an expert in this area. All I know is that I hate inconsistent
> >> APIs where something works for one thing, but not another.
> >>
> >
> > dma_alloc_attrs() would allocate contiguous memory, which kind of goes
> > against the vb2_dma_sg allocator idea. Technically we could call it N
> > times with size = 1 page to achieve what we want, but is this really
> > what we want?
> >
> > Given that vb2_dma_sg has always been returning non-consistent memory,
> > do we have any good reason to add consistent memory to it? If so,
> > perhaps we could still do that in an incremental patch, to avoid
> > complicating this already complex series? :)
>
> I very much agree with that. But this should be very clearly documented.
> Should V4L2_CAP_MEMORY_NON_CONSISTENT always be set in this case?
>

Yes, IMHO that would make sense. My understanding is that currently
the consistency of allocated memory is unspecified, so it can be
either. With V4L2_FLAG_MEMORY_NON_CONSISTENT, the userspace can
explicitly ask for inconsistent memory.

Moreover, I'd vote for setting V4L2_CAP_MEMORY_NON_CONSISTENT when
V4L2_FLAG_MEMORY_NON_CONSISTENT is guaranteed to return inconsistent
memory to avoid "optional" features or "hints" without guaranteed
behavior.

> Regards,
>
>         Hans
>
> >
> > Best regards,
> > Tomasz
> >
> >> Regards,
> >>
> >>         Hans
> >>
> >>>
> >>>> I suspect it was just laziness in the past, and that it should be wired
> >>>> up, just as for dma-contig.
> >>>>
> >>>> Regards,
> >>>>
> >>>>         Hans
> >>>>
> >>>>> +static int vb2_dma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
> >>>>> +                                     enum dma_data_direction direction)
> >>>>> +{
> >>>>> +     struct vb2_dma_sg_buf *buf = dbuf->priv;
> >>>>> +     struct sg_table *sgt = buf->dma_sgt;
> >>>>> +
> >>>>> +     dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
> >>>>> +     return 0;
> >>>>> +}
> >>>>> +
> >>>>> +static int vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
> >>>>> +                                     enum dma_data_direction direction)
> >>>>> +{
> >>>>> +     struct vb2_dma_sg_buf *buf = dbuf->priv;
> >>>>> +     struct sg_table *sgt = buf->dma_sgt;
> >>>>> +
> >>>>> +     dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
> >>>>> +     return 0;
> >>>>> +}
> >>>>> +
> >>>>>  static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf)
> >>>>>  {
> >>>>>       struct vb2_dma_sg_buf *buf = dbuf->priv;
> >>>>> @@ -488,6 +508,8 @@ static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
> >>>>>       .detach = vb2_dma_sg_dmabuf_ops_detach,
> >>>>>       .map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
> >>>>>       .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
> >>>>> +     .begin_cpu_access = vb2_dma_sg_dmabuf_ops_begin_cpu_access,
> >>>>> +     .end_cpu_access = vb2_dma_sg_dmabuf_ops_end_cpu_access,
> >>>>>       .vmap = vb2_dma_sg_dmabuf_ops_vmap,
> >>>>>       .mmap = vb2_dma_sg_dmabuf_ops_mmap,
> >>>>>       .release = vb2_dma_sg_dmabuf_ops_release,
> >>>>>
> >>>>
> >>
>
Sergey Senozhatsky Feb. 4, 2020, 2:50 a.m. UTC | #8
On (20/02/03 19:04), Tomasz Figa wrote:
[..]
> > I very much agree with that. But this should be very clearly documented.
> > Should V4L2_CAP_MEMORY_NON_CONSISTENT always be set in this case?
> >
> 
> Yes, IMHO that would make sense. My understanding is that currently
> the consistency of allocated memory is unspecified, so it can be
> either. With V4L2_FLAG_MEMORY_NON_CONSISTENT, the userspace can
> explicitly ask for inconsistent memory.
> 
> Moreover, I'd vote for setting V4L2_CAP_MEMORY_NON_CONSISTENT when
> V4L2_FLAG_MEMORY_NON_CONSISTENT is guaranteed to return inconsistent
> memory to avoid "optional" features or "hints" without guaranteed
> behavior.

Documentation/DMA-attributes.txt says the following

  DMA_ATTR_NON_CONSISTENT
  -----------------------

  DMA_ATTR_NON_CONSISTENT lets the platform to choose to return either
  consistent or non-consistent memory as it sees fit.  By using this API,
  you are guaranteeing to the platform that you have all the correct and
  necessary sync points for this memory in the driver.

	-ss
Tomasz Figa Feb. 6, 2020, 8:51 a.m. UTC | #9
On Tue, Feb 4, 2020 at 11:50 AM Sergey Senozhatsky
<senozhatsky@chromium.org> wrote:
>
> On (20/02/03 19:04), Tomasz Figa wrote:
> [..]
> > > I very much agree with that. But this should be very clearly documented.
> > > Should V4L2_CAP_MEMORY_NON_CONSISTENT always be set in this case?
> > >
> >
> > Yes, IMHO that would make sense. My understanding is that currently
> > the consistency of allocated memory is unspecified, so it can be
> > either. With V4L2_FLAG_MEMORY_NON_CONSISTENT, the userspace can
> > explicitly ask for inconsistent memory.
> >
> > Moreover, I'd vote for setting V4L2_CAP_MEMORY_NON_CONSISTENT when
> > V4L2_FLAG_MEMORY_NON_CONSISTENT is guaranteed to return inconsistent
> > memory to avoid "optional" features or "hints" without guaranteed
> > behavior.
>
> Documentation/DMA-attributes.txt says the following
>
>   DMA_ATTR_NON_CONSISTENT
>   -----------------------
>
>   DMA_ATTR_NON_CONSISTENT lets the platform to choose to return either
>   consistent or non-consistent memory as it sees fit.  By using this API,
>   you are guaranteeing to the platform that you have all the correct and
>   necessary sync points for this memory in the driver.

Good point. And I also realized that some platforms just have no way
to make the memory inconsistent, because they may have hardware
coherency.

Then we need to keep it a hint only.

Best regards,
Tomasz
diff mbox series

Patch

diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
index 6db60e9d5183..bfc99a0cb7b9 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
@@ -470,6 +470,26 @@  static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf)
 	vb2_dma_sg_put(dbuf->priv);
 }
 
+static int vb2_dma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
+					enum dma_data_direction direction)
+{
+	struct vb2_dma_sg_buf *buf = dbuf->priv;
+	struct sg_table *sgt = buf->dma_sgt;
+
+	dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
+	return 0;
+}
+
+static int vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
+					enum dma_data_direction direction)
+{
+	struct vb2_dma_sg_buf *buf = dbuf->priv;
+	struct sg_table *sgt = buf->dma_sgt;
+
+	dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
+	return 0;
+}
+
 static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf)
 {
 	struct vb2_dma_sg_buf *buf = dbuf->priv;
@@ -488,6 +508,8 @@  static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
 	.detach = vb2_dma_sg_dmabuf_ops_detach,
 	.map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
 	.unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
+	.begin_cpu_access = vb2_dma_sg_dmabuf_ops_begin_cpu_access,
+	.end_cpu_access = vb2_dma_sg_dmabuf_ops_end_cpu_access,
 	.vmap = vb2_dma_sg_dmabuf_ops_vmap,
 	.mmap = vb2_dma_sg_dmabuf_ops_mmap,
 	.release = vb2_dma_sg_dmabuf_ops_release,