diff mbox series

[v5,7/8] iio: buffer-dmaengine: Support new DMABUF based userspace API

Message ID 20231219175009.65482-8-paul@crapouillou.net (mailing list archive)
State New, archived
Headers show
Series iio: new DMABUF based API, v5 | expand

Commit Message

Paul Cercueil Dec. 19, 2023, 5:50 p.m. UTC
Use the functions provided by the buffer-dma core to implement the
DMABUF userspace API in the buffer-dmaengine IIO buffer implementation.

Since we want to be able to transfer an arbitrary number of bytes and
not necesarily the full DMABUF, the associated scatterlist is converted
to an array of DMA addresses + lengths, which is then passed to
dmaengine_prep_slave_dma_array().

Signed-off-by: Paul Cercueil <paul@crapouillou.net>

---
v3: Use the new dmaengine_prep_slave_dma_array(), and adapt the code to
    work with the new functions introduced in industrialio-buffer-dma.c.

v5: - Use the new dmaengine_prep_slave_dma_vec().
    - Restrict to input buffers, since output buffers are not yet
      supported by IIO buffers.
---
 .../buffer/industrialio-buffer-dmaengine.c    | 52 ++++++++++++++++---
 1 file changed, 46 insertions(+), 6 deletions(-)

Comments

Jonathan Cameron Dec. 21, 2023, 4:12 p.m. UTC | #1
On Tue, 19 Dec 2023 18:50:08 +0100
Paul Cercueil <paul@crapouillou.net> wrote:

> Use the functions provided by the buffer-dma core to implement the
> DMABUF userspace API in the buffer-dmaengine IIO buffer implementation.
> 
> Since we want to be able to transfer an arbitrary number of bytes and
> not necesarily the full DMABUF, the associated scatterlist is converted
> to an array of DMA addresses + lengths, which is then passed to
> dmaengine_prep_slave_dma_array().
> 
> Signed-off-by: Paul Cercueil <paul@crapouillou.net>
One question inline. Otherwise looks fine to me.

J
> 
> ---
> v3: Use the new dmaengine_prep_slave_dma_array(), and adapt the code to
>     work with the new functions introduced in industrialio-buffer-dma.c.
> 
> v5: - Use the new dmaengine_prep_slave_dma_vec().
>     - Restrict to input buffers, since output buffers are not yet
>       supported by IIO buffers.
> ---
>  .../buffer/industrialio-buffer-dmaengine.c    | 52 ++++++++++++++++---
>  1 file changed, 46 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
> index 5f85ba38e6f6..825d76a24a67 100644
> --- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
> +++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
> @@ -64,15 +64,51 @@ static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue,
>  	struct dmaengine_buffer *dmaengine_buffer =
>  		iio_buffer_to_dmaengine_buffer(&queue->buffer);
>  	struct dma_async_tx_descriptor *desc;
> +	unsigned int i, nents;
> +	struct scatterlist *sgl;
> +	struct dma_vec *vecs;
> +	size_t max_size;
>  	dma_cookie_t cookie;
> +	size_t len_total;
>  
> -	block->bytes_used = min(block->size, dmaengine_buffer->max_size);
> -	block->bytes_used = round_down(block->bytes_used,
> -			dmaengine_buffer->align);
> +	if (queue->buffer.direction != IIO_BUFFER_DIRECTION_IN) {
> +		/* We do not yet support output buffers. */
> +		return -EINVAL;
> +	}
>  
> -	desc = dmaengine_prep_slave_single(dmaengine_buffer->chan,
> -		block->phys_addr, block->bytes_used, DMA_DEV_TO_MEM,
> -		DMA_PREP_INTERRUPT);
> +	if (block->sg_table) {
> +		sgl = block->sg_table->sgl;
> +		nents = sg_nents_for_len(sgl, block->bytes_used);

Are we guaranteed the length in the sglist is enough?  If not this
can return an error code.


> +
> +		vecs = kmalloc_array(nents, sizeof(*vecs), GFP_KERNEL);
> +		if (!vecs)
> +			return -ENOMEM;
> +
> +		len_total = block->bytes_used;
> +
> +		for (i = 0; i < nents; i++) {
> +			vecs[i].addr = sg_dma_address(sgl);
> +			vecs[i].len = min(sg_dma_len(sgl), len_total);
> +			len_total -= vecs[i].len;
> +
> +			sgl = sg_next(sgl);
> +		}
> +
> +		desc = dmaengine_prep_slave_dma_vec(dmaengine_buffer->chan,
> +						    vecs, nents, DMA_DEV_TO_MEM,
> +						    DMA_PREP_INTERRUPT);
> +		kfree(vecs);
> +	} else {
> +		max_size = min(block->size, dmaengine_buffer->max_size);
> +		max_size = round_down(max_size, dmaengine_buffer->align);
> +		block->bytes_used = max_size;
> +
> +		desc = dmaengine_prep_slave_single(dmaengine_buffer->chan,
> +						   block->phys_addr,
> +						   block->bytes_used,
> +						   DMA_DEV_TO_MEM,
> +						   DMA_PREP_INTERRUPT);
> +	}
>  	if (!desc)
>  		return -ENOMEM;
>
Paul Cercueil Dec. 21, 2023, 5:30 p.m. UTC | #2
Hi Jonathan,

Le jeudi 21 décembre 2023 à 16:12 +0000, Jonathan Cameron a écrit :
> On Tue, 19 Dec 2023 18:50:08 +0100
> Paul Cercueil <paul@crapouillou.net> wrote:
> 
> > Use the functions provided by the buffer-dma core to implement the
> > DMABUF userspace API in the buffer-dmaengine IIO buffer
> > implementation.
> > 
> > Since we want to be able to transfer an arbitrary number of bytes
> > and
> > not necesarily the full DMABUF, the associated scatterlist is
> > converted
> > to an array of DMA addresses + lengths, which is then passed to
> > dmaengine_prep_slave_dma_array().
> > 
> > Signed-off-by: Paul Cercueil <paul@crapouillou.net>
> One question inline. Otherwise looks fine to me.
> 
> J
> > 
> > ---
> > v3: Use the new dmaengine_prep_slave_dma_array(), and adapt the
> > code to
> >     work with the new functions introduced in industrialio-buffer-
> > dma.c.
> > 
> > v5: - Use the new dmaengine_prep_slave_dma_vec().
> >     - Restrict to input buffers, since output buffers are not yet
> >       supported by IIO buffers.
> > ---
> >  .../buffer/industrialio-buffer-dmaengine.c    | 52
> > ++++++++++++++++---
> >  1 file changed, 46 insertions(+), 6 deletions(-)
> > 
> > diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
> > b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
> > index 5f85ba38e6f6..825d76a24a67 100644
> > --- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
> > +++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
> > @@ -64,15 +64,51 @@ static int
> > iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue
> > *queue,
> >  	struct dmaengine_buffer *dmaengine_buffer =
> >  		iio_buffer_to_dmaengine_buffer(&queue->buffer);
> >  	struct dma_async_tx_descriptor *desc;
> > +	unsigned int i, nents;
> > +	struct scatterlist *sgl;
> > +	struct dma_vec *vecs;
> > +	size_t max_size;
> >  	dma_cookie_t cookie;
> > +	size_t len_total;
> >  
> > -	block->bytes_used = min(block->size, dmaengine_buffer-
> > >max_size);
> > -	block->bytes_used = round_down(block->bytes_used,
> > -			dmaengine_buffer->align);
> > +	if (queue->buffer.direction != IIO_BUFFER_DIRECTION_IN) {
> > +		/* We do not yet support output buffers. */
> > +		return -EINVAL;
> > +	}
> >  
> > -	desc = dmaengine_prep_slave_single(dmaengine_buffer->chan,
> > -		block->phys_addr, block->bytes_used,
> > DMA_DEV_TO_MEM,
> > -		DMA_PREP_INTERRUPT);
> > +	if (block->sg_table) {
> > +		sgl = block->sg_table->sgl;
> > +		nents = sg_nents_for_len(sgl, block->bytes_used);
> 
> Are we guaranteed the length in the sglist is enough?  If not this
> can return an error code.

The length of the sglist will always be enough, the
iio_buffer_enqueue_dmabuf() function already checks that block-
>bytes_used is equal or smaller than the size of the DMABUF.

It is quite a few functions above in the call stack though, so I can
handle the errors of sg_nents_for_len() here if you think makes sense.

Cheers,
-Paul

> > +
> > +		vecs = kmalloc_array(nents, sizeof(*vecs),
> > GFP_KERNEL);
> > +		if (!vecs)
> > +			return -ENOMEM;
> > +
> > +		len_total = block->bytes_used;
> > +
> > +		for (i = 0; i < nents; i++) {
> > +			vecs[i].addr = sg_dma_address(sgl);
> > +			vecs[i].len = min(sg_dma_len(sgl),
> > len_total);
> > +			len_total -= vecs[i].len;
> > +
> > +			sgl = sg_next(sgl);
> > +		}
> > +
> > +		desc =
> > dmaengine_prep_slave_dma_vec(dmaengine_buffer->chan,
> > +						    vecs, nents,
> > DMA_DEV_TO_MEM,
> > +						   
> > DMA_PREP_INTERRUPT);
> > +		kfree(vecs);
> > +	} else {
> > +		max_size = min(block->size, dmaengine_buffer-
> > >max_size);
> > +		max_size = round_down(max_size, dmaengine_buffer-
> > >align);
> > +		block->bytes_used = max_size;
> > +
> > +		desc =
> > dmaengine_prep_slave_single(dmaengine_buffer->chan,
> > +						   block-
> > >phys_addr,
> > +						   block-
> > >bytes_used,
> > +						   DMA_DEV_TO_MEM,
> > +						  
> > DMA_PREP_INTERRUPT);
> > +	}
> >  	if (!desc)
> >  		return -ENOMEM;
> >  
>
Nuno Sá Dec. 22, 2023, 8:58 a.m. UTC | #3
On Thu, 2023-12-21 at 18:30 +0100, Paul Cercueil wrote:
> Hi Jonathan,
> 
> Le jeudi 21 décembre 2023 à 16:12 +0000, Jonathan Cameron a écrit :
> > On Tue, 19 Dec 2023 18:50:08 +0100
> > Paul Cercueil <paul@crapouillou.net> wrote:
> > 
> > > Use the functions provided by the buffer-dma core to implement the
> > > DMABUF userspace API in the buffer-dmaengine IIO buffer
> > > implementation.
> > > 
> > > Since we want to be able to transfer an arbitrary number of bytes
> > > and
> > > not necesarily the full DMABUF, the associated scatterlist is
> > > converted
> > > to an array of DMA addresses + lengths, which is then passed to
> > > dmaengine_prep_slave_dma_array().
> > > 
> > > Signed-off-by: Paul Cercueil <paul@crapouillou.net>
> > One question inline. Otherwise looks fine to me.
> > 
> > J
> > > 
> > > ---
> > > v3: Use the new dmaengine_prep_slave_dma_array(), and adapt the
> > > code to
> > >     work with the new functions introduced in industrialio-buffer-
> > > dma.c.
> > > 
> > > v5: - Use the new dmaengine_prep_slave_dma_vec().
> > >     - Restrict to input buffers, since output buffers are not yet
> > >       supported by IIO buffers.
> > > ---
> > >  .../buffer/industrialio-buffer-dmaengine.c    | 52
> > > ++++++++++++++++---
> > >  1 file changed, 46 insertions(+), 6 deletions(-)
> > > 
> > > diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
> > > b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
> > > index 5f85ba38e6f6..825d76a24a67 100644
> > > --- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
> > > +++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
> > > @@ -64,15 +64,51 @@ static int
> > > iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue
> > > *queue,
> > >         struct dmaengine_buffer *dmaengine_buffer =
> > >                 iio_buffer_to_dmaengine_buffer(&queue->buffer);
> > >         struct dma_async_tx_descriptor *desc;
> > > +       unsigned int i, nents;
> > > +       struct scatterlist *sgl;
> > > +       struct dma_vec *vecs;
> > > +       size_t max_size;
> > >         dma_cookie_t cookie;
> > > +       size_t len_total;
> > >  
> > > -       block->bytes_used = min(block->size, dmaengine_buffer-
> > > > max_size);
> > > -       block->bytes_used = round_down(block->bytes_used,
> > > -                       dmaengine_buffer->align);
> > > +       if (queue->buffer.direction != IIO_BUFFER_DIRECTION_IN) {
> > > +               /* We do not yet support output buffers. */
> > > +               return -EINVAL;
> > > +       }
> > >  
> > > -       desc = dmaengine_prep_slave_single(dmaengine_buffer->chan,
> > > -               block->phys_addr, block->bytes_used,
> > > DMA_DEV_TO_MEM,
> > > -               DMA_PREP_INTERRUPT);
> > > +       if (block->sg_table) {
> > > +               sgl = block->sg_table->sgl;
> > > +               nents = sg_nents_for_len(sgl, block->bytes_used);
> > 
> > Are we guaranteed the length in the sglist is enough?  If not this
> > can return an error code.
> 
> The length of the sglist will always be enough, the
> iio_buffer_enqueue_dmabuf() function already checks that block-
> > bytes_used is equal or smaller than the size of the DMABUF.
> 
> It is quite a few functions above in the call stack though, so I can
> handle the errors of sg_nents_for_len() here if you think makes sense.

Maybe putting something like the above in a comment?

- Nuno Sá
Jonathan Cameron Dec. 26, 2023, 3:31 p.m. UTC | #4
On Fri, 22 Dec 2023 09:58:29 +0100
Nuno Sá <noname.nuno@gmail.com> wrote:

> On Thu, 2023-12-21 at 18:30 +0100, Paul Cercueil wrote:
> > Hi Jonathan,
> > 
> > Le jeudi 21 décembre 2023 à 16:12 +0000, Jonathan Cameron a écrit :  
> > > On Tue, 19 Dec 2023 18:50:08 +0100
> > > Paul Cercueil <paul@crapouillou.net> wrote:
> > >   
> > > > Use the functions provided by the buffer-dma core to implement the
> > > > DMABUF userspace API in the buffer-dmaengine IIO buffer
> > > > implementation.
> > > > 
> > > > Since we want to be able to transfer an arbitrary number of bytes
> > > > and
> > > > not necesarily the full DMABUF, the associated scatterlist is
> > > > converted
> > > > to an array of DMA addresses + lengths, which is then passed to
> > > > dmaengine_prep_slave_dma_array().
> > > > 
> > > > Signed-off-by: Paul Cercueil <paul@crapouillou.net>  
> > > One question inline. Otherwise looks fine to me.
> > > 
> > > J  
> > > > 
> > > > ---
> > > > v3: Use the new dmaengine_prep_slave_dma_array(), and adapt the
> > > > code to
> > > >     work with the new functions introduced in industrialio-buffer-
> > > > dma.c.
> > > > 
> > > > v5: - Use the new dmaengine_prep_slave_dma_vec().
> > > >     - Restrict to input buffers, since output buffers are not yet
> > > >       supported by IIO buffers.
> > > > ---
> > > >  .../buffer/industrialio-buffer-dmaengine.c    | 52
> > > > ++++++++++++++++---
> > > >  1 file changed, 46 insertions(+), 6 deletions(-)
> > > > 
> > > > diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
> > > > b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
> > > > index 5f85ba38e6f6..825d76a24a67 100644
> > > > --- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
> > > > +++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
> > > > @@ -64,15 +64,51 @@ static int
> > > > iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue
> > > > *queue,
> > > >         struct dmaengine_buffer *dmaengine_buffer =
> > > >                 iio_buffer_to_dmaengine_buffer(&queue->buffer);
> > > >         struct dma_async_tx_descriptor *desc;
> > > > +       unsigned int i, nents;
> > > > +       struct scatterlist *sgl;
> > > > +       struct dma_vec *vecs;
> > > > +       size_t max_size;
> > > >         dma_cookie_t cookie;
> > > > +       size_t len_total;
> > > >  
> > > > -       block->bytes_used = min(block->size, dmaengine_buffer-  
> > > > > max_size);  
> > > > -       block->bytes_used = round_down(block->bytes_used,
> > > > -                       dmaengine_buffer->align);
> > > > +       if (queue->buffer.direction != IIO_BUFFER_DIRECTION_IN) {
> > > > +               /* We do not yet support output buffers. */
> > > > +               return -EINVAL;
> > > > +       }
> > > >  
> > > > -       desc = dmaengine_prep_slave_single(dmaengine_buffer->chan,
> > > > -               block->phys_addr, block->bytes_used,
> > > > DMA_DEV_TO_MEM,
> > > > -               DMA_PREP_INTERRUPT);
> > > > +       if (block->sg_table) {
> > > > +               sgl = block->sg_table->sgl;
> > > > +               nents = sg_nents_for_len(sgl, block->bytes_used);  
> > > 
> > > Are we guaranteed the length in the sglist is enough?  If not this
> > > can return an error code.  
> > 
> > The length of the sglist will always be enough, the
> > iio_buffer_enqueue_dmabuf() function already checks that block-  
> > > bytes_used is equal or smaller than the size of the DMABUF.  
> > 
> > It is quite a few functions above in the call stack though, so I can
> > handle the errors of sg_nents_for_len() here if you think makes sense.  
> 
> Maybe putting something like the above in a comment?
Either comment, or an explicit check so we don't need the comment is
fine as far as I'm concerned.

Jonathan

> 
> - Nuno Sá
> 
>
diff mbox series

Patch

diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
index 5f85ba38e6f6..825d76a24a67 100644
--- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
+++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
@@ -64,15 +64,51 @@  static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue,
 	struct dmaengine_buffer *dmaengine_buffer =
 		iio_buffer_to_dmaengine_buffer(&queue->buffer);
 	struct dma_async_tx_descriptor *desc;
+	unsigned int i, nents;
+	struct scatterlist *sgl;
+	struct dma_vec *vecs;
+	size_t max_size;
 	dma_cookie_t cookie;
+	size_t len_total;
 
-	block->bytes_used = min(block->size, dmaengine_buffer->max_size);
-	block->bytes_used = round_down(block->bytes_used,
-			dmaengine_buffer->align);
+	if (queue->buffer.direction != IIO_BUFFER_DIRECTION_IN) {
+		/* We do not yet support output buffers. */
+		return -EINVAL;
+	}
 
-	desc = dmaengine_prep_slave_single(dmaengine_buffer->chan,
-		block->phys_addr, block->bytes_used, DMA_DEV_TO_MEM,
-		DMA_PREP_INTERRUPT);
+	if (block->sg_table) {
+		sgl = block->sg_table->sgl;
+		nents = sg_nents_for_len(sgl, block->bytes_used);
+
+		vecs = kmalloc_array(nents, sizeof(*vecs), GFP_KERNEL);
+		if (!vecs)
+			return -ENOMEM;
+
+		len_total = block->bytes_used;
+
+		for (i = 0; i < nents; i++) {
+			vecs[i].addr = sg_dma_address(sgl);
+			vecs[i].len = min(sg_dma_len(sgl), len_total);
+			len_total -= vecs[i].len;
+
+			sgl = sg_next(sgl);
+		}
+
+		desc = dmaengine_prep_slave_dma_vec(dmaengine_buffer->chan,
+						    vecs, nents, DMA_DEV_TO_MEM,
+						    DMA_PREP_INTERRUPT);
+		kfree(vecs);
+	} else {
+		max_size = min(block->size, dmaengine_buffer->max_size);
+		max_size = round_down(max_size, dmaengine_buffer->align);
+		block->bytes_used = max_size;
+
+		desc = dmaengine_prep_slave_single(dmaengine_buffer->chan,
+						   block->phys_addr,
+						   block->bytes_used,
+						   DMA_DEV_TO_MEM,
+						   DMA_PREP_INTERRUPT);
+	}
 	if (!desc)
 		return -ENOMEM;
 
@@ -120,6 +156,10 @@  static const struct iio_buffer_access_funcs iio_dmaengine_buffer_ops = {
 	.data_available = iio_dma_buffer_data_available,
 	.release = iio_dmaengine_buffer_release,
 
+	.enqueue_dmabuf = iio_dma_buffer_enqueue_dmabuf,
+	.attach_dmabuf = iio_dma_buffer_attach_dmabuf,
+	.detach_dmabuf = iio_dma_buffer_detach_dmabuf,
+
 	.modes = INDIO_BUFFER_HARDWARE,
 	.flags = INDIO_BUFFER_FLAG_FIXED_WATERMARK,
 };