diff mbox series

[RFC,PATCHv2,11/12] videobuf2: add begin/end cpu_access callbacks to dma-sg

Message ID 20200204025641.218376-12-senozhatsky@chromium.org (mailing list archive)
State New, archived
Headers show
Series Implement V4L2_BUF_FLAG_NO_CACHE_* flags | expand

Commit Message

Sergey Senozhatsky Feb. 4, 2020, 2:56 a.m. UTC
Provide begin_cpu_access() and end_cpu_access() dma_buf_ops
callbacks for cache synchronisation on exported buffers.

V4L2_FLAG_MEMORY_NON_CONSISTENT has no effect on dma-sg buffers.
dma-sg allocates memory using the page allocator directly, so
there is no memory consistency guarantee.

Change-Id: Ia0d9d72a8c2a9fe3264ac148f59201573289ed2c
Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
---
 .../media/common/videobuf2/videobuf2-dma-sg.c | 22 +++++++++++++++++++
 1 file changed, 22 insertions(+)

Comments

Hans Verkuil Feb. 19, 2020, 8:35 a.m. UTC | #1
On 2/4/20 3:56 AM, Sergey Senozhatsky wrote:
> Provide begin_cpu_access() and end_cpu_access() dma_buf_ops
> callbacks for cache synchronisation on exported buffers.
> 
> V4L2_FLAG_MEMORY_NON_CONSISTENT has no effect on dma-sg buffers.
> dma-sg allocates memory using the page allocator directly, so
> there is no memory consistency guarantee.

This should also be a comment in the code.

Regards,

	Hans

> 
> Change-Id: Ia0d9d72a8c2a9fe3264ac148f59201573289ed2c
> Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
> ---
>  .../media/common/videobuf2/videobuf2-dma-sg.c | 22 +++++++++++++++++++
>  1 file changed, 22 insertions(+)
> 
> diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
> index 6db60e9d5183..bfc99a0cb7b9 100644
> --- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c
> +++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
> @@ -470,6 +470,26 @@ static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf)
>  	vb2_dma_sg_put(dbuf->priv);
>  }
>  
> +static int vb2_dma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
> +					enum dma_data_direction direction)
> +{
> +	struct vb2_dma_sg_buf *buf = dbuf->priv;
> +	struct sg_table *sgt = buf->dma_sgt;
> +
> +	dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
> +	return 0;
> +}
> +
> +static int vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
> +					enum dma_data_direction direction)
> +{
> +	struct vb2_dma_sg_buf *buf = dbuf->priv;
> +	struct sg_table *sgt = buf->dma_sgt;
> +
> +	dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
> +	return 0;
> +}
> +
>  static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf)
>  {
>  	struct vb2_dma_sg_buf *buf = dbuf->priv;
> @@ -488,6 +508,8 @@ static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
>  	.detach = vb2_dma_sg_dmabuf_ops_detach,
>  	.map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
>  	.unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
> +	.begin_cpu_access = vb2_dma_sg_dmabuf_ops_begin_cpu_access,
> +	.end_cpu_access = vb2_dma_sg_dmabuf_ops_end_cpu_access,
>  	.vmap = vb2_dma_sg_dmabuf_ops_vmap,
>  	.mmap = vb2_dma_sg_dmabuf_ops_mmap,
>  	.release = vb2_dma_sg_dmabuf_ops_release,
>
Sergey Senozhatsky Feb. 19, 2020, 8:42 a.m. UTC | #2
On (20/02/19 09:35), Hans Verkuil wrote:
> On 2/4/20 3:56 AM, Sergey Senozhatsky wrote:
> > Provide begin_cpu_access() and end_cpu_access() dma_buf_ops
> > callbacks for cache synchronisation on exported buffers.
> > 
> > V4L2_FLAG_MEMORY_NON_CONSISTENT has no effect on dma-sg buffers.
> > dma-sg allocates memory using the page allocator directly, so
> > there is no memory consistency guarantee.
> 
> This should also be a comment in the code.

OK.

	-ss
diff mbox series

Patch

diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
index 6db60e9d5183..bfc99a0cb7b9 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
@@ -470,6 +470,26 @@  static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf)
 	vb2_dma_sg_put(dbuf->priv);
 }
 
+static int vb2_dma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
+					enum dma_data_direction direction)
+{
+	struct vb2_dma_sg_buf *buf = dbuf->priv;
+	struct sg_table *sgt = buf->dma_sgt;
+
+	dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
+	return 0;
+}
+
+static int vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
+					enum dma_data_direction direction)
+{
+	struct vb2_dma_sg_buf *buf = dbuf->priv;
+	struct sg_table *sgt = buf->dma_sgt;
+
+	dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
+	return 0;
+}
+
 static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf)
 {
 	struct vb2_dma_sg_buf *buf = dbuf->priv;
@@ -488,6 +508,8 @@  static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
 	.detach = vb2_dma_sg_dmabuf_ops_detach,
 	.map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
 	.unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
+	.begin_cpu_access = vb2_dma_sg_dmabuf_ops_begin_cpu_access,
+	.end_cpu_access = vb2_dma_sg_dmabuf_ops_end_cpu_access,
 	.vmap = vb2_dma_sg_dmabuf_ops_vmap,
 	.mmap = vb2_dma_sg_dmabuf_ops_mmap,
 	.release = vb2_dma_sg_dmabuf_ops_release,