diff mbox series

[v3,01/11] dmaengine: Add API function dmaengine_prep_slave_dma_array()

Message ID 20230403154800.215924-2-paul@crapouillou.net (mailing list archive)
State Changes Requested
Headers show
Series iio: new DMABUF based API, v3 | expand

Commit Message

Paul Cercueil April 3, 2023, 3:47 p.m. UTC
This function can be used to initiate a scatter-gather DMA transfer
where the DMA addresses and lengths are located inside arrays.

The major difference with dmaengine_prep_slave_sg() is that it supports
specifying the lengths of each DMA transfer; as trying to override the
length of the transfer with dmaengine_prep_slave_sg() is a very tedious
process. The introduction of a new API function is also justified by the
fact that scatterlists are on their way out.

Signed-off-by: Paul Cercueil <paul@crapouillou.net>

---
v3: New patch
---
 include/linux/dmaengine.h | 16 ++++++++++++++++
 1 file changed, 16 insertions(+)

Comments

Vinod Koul April 12, 2023, 5:23 p.m. UTC | #1
On 03-04-23, 17:47, Paul Cercueil wrote:
> This function can be used to initiate a scatter-gather DMA transfer
> where the DMA addresses and lengths are located inside arrays.
> 
> The major difference with dmaengine_prep_slave_sg() is that it supports
> specifying the lengths of each DMA transfer; as trying to override the
> length of the transfer with dmaengine_prep_slave_sg() is a very tedious
> process. The introduction of a new API function is also justified by the
> fact that scatterlists are on their way out.

Do we need a new API for this? why not use device_prep_interleaved_dma?

> 
> Signed-off-by: Paul Cercueil <paul@crapouillou.net>
> 
> ---
> v3: New patch
> ---
>  include/linux/dmaengine.h | 16 ++++++++++++++++
>  1 file changed, 16 insertions(+)
> 
> diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
> index c3656e590213..62efa28c009a 100644
> --- a/include/linux/dmaengine.h
> +++ b/include/linux/dmaengine.h
> @@ -912,6 +912,11 @@ struct dma_device {
>  	struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
>  		struct dma_chan *chan, unsigned long flags);
>  
> +	struct dma_async_tx_descriptor *(*device_prep_slave_dma_array)(
> +		struct dma_chan *chan, dma_addr_t *addrs,
> +		size_t *lengths, size_t nb,
> +		enum dma_transfer_direction direction,
> +		unsigned long flags);
>  	struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
>  		struct dma_chan *chan, struct scatterlist *sgl,
>  		unsigned int sg_len, enum dma_transfer_direction direction,
> @@ -974,6 +979,17 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
>  						  dir, flags, NULL);
>  }
>  
> +static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_dma_array(
> +	struct dma_chan *chan, dma_addr_t *addrs, size_t *lengths,
> +	size_t nb, enum dma_transfer_direction dir, unsigned long flags)
> +{
> +	if (!chan || !chan->device || !chan->device->device_prep_slave_dma_array)
> +		return NULL;
> +
> +	return chan->device->device_prep_slave_dma_array(chan, addrs, lengths,
> +							 nb, dir, flags);
> +}
> +
>  static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
>  	struct dma_chan *chan, struct scatterlist *sgl,	unsigned int sg_len,
>  	enum dma_transfer_direction dir, unsigned long flags)
> -- 
> 2.39.2
Paul Cercueil April 13, 2023, 7:59 a.m. UTC | #2
Hi Vinod,

Le mercredi 12 avril 2023 à 22:53 +0530, Vinod Koul a écrit :
> On 03-04-23, 17:47, Paul Cercueil wrote:
> > This function can be used to initiate a scatter-gather DMA transfer
> > where the DMA addresses and lengths are located inside arrays.
> > 
> > The major difference with dmaengine_prep_slave_sg() is that it
> > supports
> > specifying the lengths of each DMA transfer; as trying to override
> > the
> > length of the transfer with dmaengine_prep_slave_sg() is a very
> > tedious
> > process. The introduction of a new API function is also justified
> > by the
> > fact that scatterlists are on their way out.
> 
> Do we need a new API for this? why not use
> device_prep_interleaved_dma?

I admit that I discarded the interleaved DMA without trying it, because
reading the doc, e.g. the one for "struct data_chunk", It looked like
it was not usable for when the DMA addresses are scattered in memory;
it assumes that the following DMA addresses will always come after the
previous one.

Cheers,
-Paul 

> > 
> > Signed-off-by: Paul Cercueil <paul@crapouillou.net>
> > 
> > ---
> > v3: New patch
> > ---
> >  include/linux/dmaengine.h | 16 ++++++++++++++++
> >  1 file changed, 16 insertions(+)
> > 
> > diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
> > index c3656e590213..62efa28c009a 100644
> > --- a/include/linux/dmaengine.h
> > +++ b/include/linux/dmaengine.h
> > @@ -912,6 +912,11 @@ struct dma_device {
> >         struct dma_async_tx_descriptor
> > *(*device_prep_dma_interrupt)(
> >                 struct dma_chan *chan, unsigned long flags);
> >  
> > +       struct dma_async_tx_descriptor
> > *(*device_prep_slave_dma_array)(
> > +               struct dma_chan *chan, dma_addr_t *addrs,
> > +               size_t *lengths, size_t nb,
> > +               enum dma_transfer_direction direction,
> > +               unsigned long flags);
> >         struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
> >                 struct dma_chan *chan, struct scatterlist *sgl,
> >                 unsigned int sg_len, enum dma_transfer_direction
> > direction,
> > @@ -974,6 +979,17 @@ static inline struct dma_async_tx_descriptor
> > *dmaengine_prep_slave_single(
> >                                                   dir, flags,
> > NULL);
> >  }
> >  
> > +static inline struct dma_async_tx_descriptor
> > *dmaengine_prep_slave_dma_array(
> > +       struct dma_chan *chan, dma_addr_t *addrs, size_t *lengths,
> > +       size_t nb, enum dma_transfer_direction dir, unsigned long
> > flags)
> > +{
> > +       if (!chan || !chan->device || !chan->device-
> > >device_prep_slave_dma_array)
> > +               return NULL;
> > +
> > +       return chan->device->device_prep_slave_dma_array(chan,
> > addrs, lengths,
> > +                                                        nb, dir,
> > flags);
> > +}
> > +
> >  static inline struct dma_async_tx_descriptor
> > *dmaengine_prep_slave_sg(
> >         struct dma_chan *chan, struct scatterlist *sgl, unsigned
> > int sg_len,
> >         enum dma_transfer_direction dir, unsigned long flags)
> > -- 
> > 2.39.2
>
diff mbox series

Patch

diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index c3656e590213..62efa28c009a 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -912,6 +912,11 @@  struct dma_device {
 	struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
 		struct dma_chan *chan, unsigned long flags);
 
+	struct dma_async_tx_descriptor *(*device_prep_slave_dma_array)(
+		struct dma_chan *chan, dma_addr_t *addrs,
+		size_t *lengths, size_t nb,
+		enum dma_transfer_direction direction,
+		unsigned long flags);
 	struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
 		struct dma_chan *chan, struct scatterlist *sgl,
 		unsigned int sg_len, enum dma_transfer_direction direction,
@@ -974,6 +979,17 @@  static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
 						  dir, flags, NULL);
 }
 
+static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_dma_array(
+	struct dma_chan *chan, dma_addr_t *addrs, size_t *lengths,
+	size_t nb, enum dma_transfer_direction dir, unsigned long flags)
+{
+	if (!chan || !chan->device || !chan->device->device_prep_slave_dma_array)
+		return NULL;
+
+	return chan->device->device_prep_slave_dma_array(chan, addrs, lengths,
+							 nb, dir, flags);
+}
+
 static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
 	struct dma_chan *chan, struct scatterlist *sgl,	unsigned int sg_len,
 	enum dma_transfer_direction dir, unsigned long flags)