diff mbox

[V2,03/12] DMA: PL330: Add DMA capabilities

Message ID 1310546857-6304-4-git-send-email-kgene.kim@samsung.com (mailing list archive)
State New, archived
Headers show

Commit Message

Kim Kukjin July 13, 2011, 8:47 a.m. UTC
From: Boojin Kim <boojin.kim@samsung.com>

This patch adds DMA_CYCLIC capability that is used for audio driver
and SLAVE_CONFIG capability for transmit between device and memory.

Signed-off-by: Boojin Kim <boojin.kim@samsung.com>
Cc: Vinod Koul <vinod.koul@intel.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Kukjin Kim <kgene.kim@samsung.com>
---
 drivers/dma/pl330.c |  187 +++++++++++++++++++++++++++++++++++++++++++++++----
 1 files changed, 173 insertions(+), 14 deletions(-)

Comments

Russell King - ARM Linux July 13, 2011, 9:14 a.m. UTC | #1
On Wed, Jul 13, 2011 at 05:47:28PM +0900, Kukjin Kim wrote:
> +static void pl330_tasklet_cyclic(unsigned long data)
> +{
> +	struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
> +	struct dma_pl330_desc *desc, *_dt;
> +	unsigned long flags;
> +	LIST_HEAD(list);
> +
> +	spin_lock_irqsave(&pch->lock, flags);
> +
> +	/* Pick up ripe tomatoes */
> +	list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
> +		if ((desc->status == DONE) && desc->cyclic) {
> +			dma_async_tx_callback callback;
> +
> +			list_move_tail(&desc->node, &pch->work_list);
> +			pch->completed = desc->txd.cookie;
> +
> +			desc->status = PREP;
> +
> +			/* Try to submit a req imm.
> +			next to the last completed cookie */
> +			fill_queue(pch);
> +
> +			/* Make sure the PL330 Channel thread is active */
> +			pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START);
> +
> +			callback = desc->txd.callback;
> +			if (callback)
> +				callback(desc->txd.callback_param);

How does this work when callbacks are allowed to queue new requests?
Aren't you going to deadlock on the spinlock?

I don't see 'list' being used in this function either.

> +
> +		}
> +
> +	spin_unlock_irqrestore(&pch->lock, flags);
> +}
> +
> +static void pl330_cyclic_free(struct dma_pl330_chan *pch)
> +{
> +	struct dma_pl330_dmac *pdmac = pch->dmac;
> +	struct dma_pl330_desc *desc, *_dt;
> +	unsigned long flags;
> +	LIST_HEAD(list);
> +
> +	spin_lock_irqsave(&pdmac->pool_lock, flags);
> +
> +	list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
> +	if (desc->cyclic)
> +		list_move_tail(&desc->node, &list);
> +
> +	list_splice_tail_init(&list, &pdmac->desc_pool);

As you're not using 'list' after this point, would 'list_splice_tail'
do here?

> +
> +	spin_unlock_irqrestore(&pdmac->pool_lock, flags);
> +	pch->cyclic_task = NULL;
> +}
> +
>  static void pl330_tasklet(unsigned long data)
>  {
>  	struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
> @@ -227,6 +285,9 @@ static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
>  
>  	spin_unlock_irqrestore(&pch->lock, flags);
>  
> +	if (pch->cyclic_task)
> +		tasklet_schedule(pch->cyclic_task);
> +	else
>  	tasklet_schedule(&pch->task);

This 'tasklet_schedule' wants to be indented.

>  }
>  
> @@ -256,25 +317,58 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
>  static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
>  {
>  	struct dma_pl330_chan *pch = to_pchan(chan);
> -	struct dma_pl330_desc *desc;
> +	struct dma_pl330_desc *desc, *_dt;
>  	unsigned long flags;
> +	struct dma_pl330_dmac *pdmac = pch->dmac;
> +	struct dma_slave_config *slave_config;
> +	struct dma_pl330_peri *peri;
> +	int i;
> +	LIST_HEAD(list);
>  
> -	/* Only supports DMA_TERMINATE_ALL */
> -	if (cmd != DMA_TERMINATE_ALL)
> -		return -ENXIO;
> -
> -	spin_lock_irqsave(&pch->lock, flags);
> -
> -	/* FLUSH the PL330 Channel thread */
> -	pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
> +	switch (cmd) {
> +	case DMA_TERMINATE_ALL:
> +		spin_lock_irqsave(&pch->lock, flags);
>  
> -	/* Mark all desc done */
> -	list_for_each_entry(desc, &pch->work_list, node)
> -		desc->status = DONE;
> +		/* FLUSH the PL330 Channel thread */
> +		pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
>  
> -	spin_unlock_irqrestore(&pch->lock, flags);
> +		/* Mark all desc done */
> +		list_for_each_entry_safe(desc, _dt, &pch->work_list , node) {
> +			desc->status = DONE;
> +			pch->completed = desc->txd.cookie;
> +			list_move_tail(&desc->node, &list);
> +		}
>  
> -	pl330_tasklet((unsigned long) pch);
> +		list_splice_tail_init(&list, &pdmac->desc_pool);

Again, would list_splice_tail() do here?

> +		spin_unlock_irqrestore(&pch->lock, flags);
> +		break;
> +	case DMA_SLAVE_CONFIG:
> +		slave_config = (struct dma_slave_config *)arg;
> +		peri = pch->chan.private;
> +
> +		if (slave_config->direction == DMA_TO_DEVICE) {
> +			if (slave_config->dst_addr)
> +				peri->fifo_addr = slave_config->dst_addr;
> +			if (slave_config->dst_addr_width) {
> +				i = 0;
> +				while (slave_config->dst_addr_width != (1 << i))
> +					i++;
> +				peri->burst_sz = i;
> +			}
> +		} else if (slave_config->direction == DMA_FROM_DEVICE) {
> +			if (slave_config->src_addr)
> +				peri->fifo_addr = slave_config->src_addr;
> +			if (slave_config->src_addr_width) {
> +				i = 0;
> +				while (slave_config->src_addr_width != (1 << i))
> +					i++;
> +				peri->burst_sz = i;
> +			}
> +		}

It would make more sense to store the M2P and P2M address/width/burst size
separately, so you don't have to make DMA_SLAVE_CONFIG calls before every
transfer.
boojin.kim July 13, 2011, 11:04 a.m. UTC | #2
This patch adds DMA cyclic capability and Slave configuration.
DMA cyclic capability is only used for audio circular buffer.

Russell king wrote:
> On Wed, Jul 13, 2011 at 05:47:28PM +0900, Kukjin Kim wrote:
> > +static void pl330_tasklet_cyclic(unsigned long data)
> > +{
> > +	struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
> > +	struct dma_pl330_desc *desc, *_dt;
> > +	unsigned long flags;
> > +	LIST_HEAD(list);
> > +
> > +	spin_lock_irqsave(&pch->lock, flags);
> > +
> > +	/* Pick up ripe tomatoes */
> > +	list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
> > +		if ((desc->status == DONE) && desc->cyclic) {
> > +			dma_async_tx_callback callback;
> > +
> > +			list_move_tail(&desc->node, &pch->work_list);
> > +			pch->completed = desc->txd.cookie;
> > +
> > +			desc->status = PREP;
> > +
> > +			/* Try to submit a req imm.
> > +			next to the last completed cookie */
> > +			fill_queue(pch);
> > +
> > +			/* Make sure the PL330 Channel thread is active */
> > +			pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START);
> > +
> > +			callback = desc->txd.callback;
> > +			if (callback)
> > +				callback(desc->txd.callback_param);
> 
> How does this work when callbacks are allowed to queue new requests?
> Aren't you going to deadlock on the spinlock?
> 
> I don't see 'list' being used in this function either.
> 
>
Cyclic capability re-uses the requests that were submitted through
tx_submit().
It's possible because Cyclic capability uses same DMA configuration.
This pl330_tasklet_cyclic() makes the status of the done request into
'PREP'. 
And, re-submit it to PL330.
There are no deadlock because callback function just gathers transmit
position.
And Cyclic capability doesn't use 'list' for queue new requests.

> > +
> > +		}
> > +
> > +	spin_unlock_irqrestore(&pch->lock, flags);
> > +}
> > +
> > +static void pl330_cyclic_free(struct dma_pl330_chan *pch)
> > +{
> > +	struct dma_pl330_dmac *pdmac = pch->dmac;
> > +	struct dma_pl330_desc *desc, *_dt;
> > +	unsigned long flags;
> > +	LIST_HEAD(list);
> > +
> > +	spin_lock_irqsave(&pdmac->pool_lock, flags);
> > +
> > +	list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
> > +	if (desc->cyclic)
> > +		list_move_tail(&desc->node, &list);
> > +
> > +	list_splice_tail_init(&list, &pdmac->desc_pool);
> 
> As you're not using 'list' after this point, would 'list_splice_tail'
> do here?
Yes, You're right. the 'list' isn't used after device_free_chan_resources().
So, I release it to desc_pool by using 'list_splice_tail'

> 
> > +
> > +	spin_unlock_irqrestore(&pdmac->pool_lock, flags);
> > +	pch->cyclic_task = NULL;
> > +}
> > +
> >  static void pl330_tasklet(unsigned long data)
> >  {
> >  	struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
> > @@ -227,6 +285,9 @@ static void dma_pl330_rqcb(void *token, enum
> pl330_op_err err)
> >
> >  	spin_unlock_irqrestore(&pch->lock, flags);
> >
> > +	if (pch->cyclic_task)
> > +		tasklet_schedule(pch->cyclic_task);
> > +	else
> >  	tasklet_schedule(&pch->task);
> 
> This 'tasklet_schedule' wants to be indented.
I will address your guide. Thanks

> 
> >  }
> >
> > @@ -256,25 +317,58 @@ static int pl330_alloc_chan_resources(struct
> dma_chan *chan)
> >  static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
> unsigned long arg)
> >  {
> >  	struct dma_pl330_chan *pch = to_pchan(chan);
> > -	struct dma_pl330_desc *desc;
> > +	struct dma_pl330_desc *desc, *_dt;
> >  	unsigned long flags;
> > +	struct dma_pl330_dmac *pdmac = pch->dmac;
> > +	struct dma_slave_config *slave_config;
> > +	struct dma_pl330_peri *peri;
> > +	int i;
> > +	LIST_HEAD(list);
> >
> > -	/* Only supports DMA_TERMINATE_ALL */
> > -	if (cmd != DMA_TERMINATE_ALL)
> > -		return -ENXIO;
> > -
> > -	spin_lock_irqsave(&pch->lock, flags);
> > -
> > -	/* FLUSH the PL330 Channel thread */
> > -	pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
> > +	switch (cmd) {
> > +	case DMA_TERMINATE_ALL:
> > +		spin_lock_irqsave(&pch->lock, flags);
> >
> > -	/* Mark all desc done */
> > -	list_for_each_entry(desc, &pch->work_list, node)
> > -		desc->status = DONE;
> > +		/* FLUSH the PL330 Channel thread */
> > +		pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
> >
> > -	spin_unlock_irqrestore(&pch->lock, flags);
> > +		/* Mark all desc done */
> > +		list_for_each_entry_safe(desc, _dt, &pch->work_list , node)
> {
> > +			desc->status = DONE;
> > +			pch->completed = desc->txd.cookie;
> > +			list_move_tail(&desc->node, &list);
> > +		}
> >
> > -	pl330_tasklet((unsigned long) pch);
> > +		list_splice_tail_init(&list, &pdmac->desc_pool);
> 
> Again, would list_splice_tail() do here?
This code is for 'flush' operation that releases all of the pre-loaded
requests.
I uses 'list_splice_tail_init' for it instead of 'pl330_tasklet' that also
calls 'list_splice_tail_init'.

> 
> > +		spin_unlock_irqrestore(&pch->lock, flags);
> > +		break;
> > +	case DMA_SLAVE_CONFIG:
> > +		slave_config = (struct dma_slave_config *)arg;
> > +		peri = pch->chan.private;
> > +
> > +		if (slave_config->direction == DMA_TO_DEVICE) {
> > +			if (slave_config->dst_addr)
> > +				peri->fifo_addr = slave_config->dst_addr;
> > +			if (slave_config->dst_addr_width) {
> > +				i = 0;
> > +				while (slave_config->dst_addr_width != (1 <<
i))
> > +					i++;
> > +				peri->burst_sz = i;
> > +			}
> > +		} else if (slave_config->direction == DMA_FROM_DEVICE) {
> > +			if (slave_config->src_addr)
> > +				peri->fifo_addr = slave_config->src_addr;
> > +			if (slave_config->src_addr_width) {
> > +				i = 0;
> > +				while (slave_config->src_addr_width != (1 <<
i))
> > +					i++;
> > +				peri->burst_sz = i;
> > +			}
> > +		}
> 
> It would make more sense to store the M2P and P2M address/width/burst size
> separately, so you don't have to make DMA_SLAVE_CONFIG calls before every
> transfer.
DMA_SLAVE_CONFIG is called the first time that DMA client driver requests
DMA channel.
Chanho Park July 15, 2011, 4:45 a.m. UTC | #3
Hello,

2011/7/13 Kukjin Kim <kgene.kim@samsung.com>:
> From: Boojin Kim <boojin.kim@samsung.com>
>
> This patch adds DMA_CYCLIC capability that is used for audio driver
> and SLAVE_CONFIG capability for transmit between device and memory.
>
> Signed-off-by: Boojin Kim <boojin.kim@samsung.com>
> Cc: Vinod Koul <vinod.koul@intel.com>
> Cc: Dan Williams <dan.j.williams@intel.com>
> Signed-off-by: Kukjin Kim <kgene.kim@samsung.com>
> ---
>  drivers/dma/pl330.c |  187 +++++++++++++++++++++++++++++++++++++++++++++++----
>  1 files changed, 173 insertions(+), 14 deletions(-)
>
> diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
> index 9bdda7b..2162ac5 100644
> --- a/drivers/dma/pl330.c
> +++ b/drivers/dma/pl330.c
> @@ -69,6 +69,9 @@ struct dma_pl330_chan {
>         * NULL if the channel is available to be acquired.
>         */
>        void *pl330_chid;
> +
> +       /* taks for cyclic capability */
> +       struct tasklet_struct *cyclic_task;
>  };
>
>  struct dma_pl330_dmac {
> @@ -105,6 +108,7 @@ struct dma_pl330_desc {
>
>        /* The channel which currently holds this desc */
>        struct dma_pl330_chan *pchan;
> +       bool cyclic;
>  };
>
>  static inline struct dma_pl330_chan *
> @@ -184,6 +188,60 @@ static inline void fill_queue(struct dma_pl330_chan *pch)
>        }
>  }
>
> +static void pl330_tasklet_cyclic(unsigned long data)
> +{
> +       struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
> +       struct dma_pl330_desc *desc, *_dt;
> +       unsigned long flags;
> +       LIST_HEAD(list);
> +
> +       spin_lock_irqsave(&pch->lock, flags);
> +
> +       /* Pick up ripe tomatoes */
> +       list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
> +               if ((desc->status == DONE) && desc->cyclic) {
> +                       dma_async_tx_callback callback;
> +
> +                       list_move_tail(&desc->node, &pch->work_list);
> +                       pch->completed = desc->txd.cookie;
> +
> +                       desc->status = PREP;
> +
> +                       /* Try to submit a req imm.
> +                       next to the last completed cookie */
> +                       fill_queue(pch);
> +
> +                       /* Make sure the PL330 Channel thread is active */
> +                       pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START);
> +
> +                       callback = desc->txd.callback;
> +                       if (callback)
> +                               callback(desc->txd.callback_param);
> +
> +               }
> +
> +       spin_unlock_irqrestore(&pch->lock, flags);
> +}
> +
> +static void pl330_cyclic_free(struct dma_pl330_chan *pch)
> +{
> +       struct dma_pl330_dmac *pdmac = pch->dmac;
> +       struct dma_pl330_desc *desc, *_dt;
> +       unsigned long flags;
> +       LIST_HEAD(list);
> +
> +       spin_lock_irqsave(&pdmac->pool_lock, flags);
> +
> +       list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
> +       if (desc->cyclic)
> +               list_move_tail(&desc->node, &list);

nitpick: indentation will be required after for_each

> +
> +       list_splice_tail_init(&list, &pdmac->desc_pool);
> +
> +       spin_unlock_irqrestore(&pdmac->pool_lock, flags);
> +       pch->cyclic_task = NULL;
> +}
> +
>  static void pl330_tasklet(unsigned long data)
>  {
>        struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
> @@ -227,6 +285,9 @@ static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
>
>        spin_unlock_irqrestore(&pch->lock, flags);
>
> +       if (pch->cyclic_task)
> +               tasklet_schedule(pch->cyclic_task);
> +       else
>        tasklet_schedule(&pch->task);
>  }
>
> @@ -256,25 +317,58 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
>  static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
>  {
>        struct dma_pl330_chan *pch = to_pchan(chan);
> -       struct dma_pl330_desc *desc;
> +       struct dma_pl330_desc *desc, *_dt;
>        unsigned long flags;
> +       struct dma_pl330_dmac *pdmac = pch->dmac;
> +       struct dma_slave_config *slave_config;
> +       struct dma_pl330_peri *peri;
> +       int i;
> +       LIST_HEAD(list);
>
> -       /* Only supports DMA_TERMINATE_ALL */
> -       if (cmd != DMA_TERMINATE_ALL)
> -               return -ENXIO;
> -
> -       spin_lock_irqsave(&pch->lock, flags);
> -
> -       /* FLUSH the PL330 Channel thread */
> -       pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
> +       switch (cmd) {
> +       case DMA_TERMINATE_ALL:
> +               spin_lock_irqsave(&pch->lock, flags);
>
> -       /* Mark all desc done */
> -       list_for_each_entry(desc, &pch->work_list, node)
> -               desc->status = DONE;
> +               /* FLUSH the PL330 Channel thread */
> +               pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
>
> -       spin_unlock_irqrestore(&pch->lock, flags);
> +               /* Mark all desc done */
> +               list_for_each_entry_safe(desc, _dt, &pch->work_list , node) {
> +                       desc->status = DONE;
> +                       pch->completed = desc->txd.cookie;
> +                       list_move_tail(&desc->node, &list);
> +               }
>
> -       pl330_tasklet((unsigned long) pch);
> +               list_splice_tail_init(&list, &pdmac->desc_pool);
> +               spin_unlock_irqrestore(&pch->lock, flags);
> +               break;
> +       case DMA_SLAVE_CONFIG:
> +               slave_config = (struct dma_slave_config *)arg;
> +               peri = pch->chan.private;
> +
> +               if (slave_config->direction == DMA_TO_DEVICE) {
> +                       if (slave_config->dst_addr)
> +                               peri->fifo_addr = slave_config->dst_addr;
> +                       if (slave_config->dst_addr_width) {
> +                               i = 0;
> +                               while (slave_config->dst_addr_width != (1 << i))
> +                                       i++;
> +                               peri->burst_sz = i;
> +                       }
> +               } else if (slave_config->direction == DMA_FROM_DEVICE) {
> +                       if (slave_config->src_addr)
> +                               peri->fifo_addr = slave_config->src_addr;
> +                       if (slave_config->src_addr_width) {
> +                               i = 0;
> +                               while (slave_config->src_addr_width != (1 << i))
> +                                       i++;
> +                               peri->burst_sz = i;
> +                       }
> +               }
> +               break;
> +       default:
> +               return -ENXIO;

To print error message in the default case will be better.

> +       }
>
>        return 0;
>  }
> @@ -291,6 +385,9 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
>        pl330_release_channel(pch->pl330_chid);
>        pch->pl330_chid = NULL;
>
> +       if (pch->cyclic_task)
> +               pl330_cyclic_free(pch);
> +
>        spin_unlock_irqrestore(&pch->lock, flags);
>  }
>
> @@ -522,6 +619,66 @@ static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
>        return burst_len;
>  }
>
> +static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
> +               struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
> +               size_t period_len, enum dma_data_direction direction)
> +{
> +       struct dma_pl330_desc *desc;
> +       struct dma_pl330_chan *pch = to_pchan(chan);
> +       struct dma_pl330_peri *peri = chan->private;
> +       dma_addr_t dst;
> +       dma_addr_t src;
> +
> +       pch = to_pchan(chan);
> +       if (!pch) {
> +               dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
> +                       __func__, __LINE__);
> +               return NULL;
> +       }

'pch' is already acquired when defined. It's duplicated.
dev_err has a incorrect parameter because pch is NULL.
Accessing pch->dmac will be failed.

> +
> +       desc = pl330_get_desc(pch);
> +       if (!desc) {
> +               dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
> +                       __func__, __LINE__);
> +               return NULL;
> +       }
> +
> +       switch (direction) {
> +       case DMA_TO_DEVICE:
> +               desc->rqcfg.src_inc = 1;
> +               desc->rqcfg.dst_inc = 0;
> +               src = dma_addr;
> +               dst = peri->fifo_addr;
> +               break;
> +       case DMA_FROM_DEVICE:
> +               desc->rqcfg.src_inc = 0;
> +               desc->rqcfg.dst_inc = 1;
> +               src = peri->fifo_addr;
> +               dst = dma_addr;
> +               break;
> +       default:
> +               dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n",
> +               __func__, __LINE__);
> +               return NULL;
> +       }
> +
> +       desc->rqcfg.brst_size = peri->burst_sz;
> +       desc->rqcfg.brst_len = 1;
> +
> +       if (!pch->cyclic_task) {
> +               pch->cyclic_task =
> +                       kmalloc(sizeof(struct tasklet_struct), GFP_KERNEL);
> +               tasklet_init(pch->cyclic_task,
> +                       pl330_tasklet_cyclic, (unsigned int)pch);
> +       }
> +
> +       desc->cyclic = true;
> +
> +       fill_px(&desc->px, dst, src, period_len);
> +
> +       return &desc->txd;
> +}
> +
>  static struct dma_async_tx_descriptor *
>  pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
>                dma_addr_t src, size_t len, unsigned long flags)
> @@ -756,6 +913,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
>                        case MEMTODEV:
>                        case DEVTOMEM:
>                                dma_cap_set(DMA_SLAVE, pd->cap_mask);
> +                               dma_cap_set(DMA_CYCLIC, pd->cap_mask);
>                                break;
>                        default:
>                                dev_err(&adev->dev, "DEVTODEV Not Supported\n");
> @@ -781,6 +939,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
>        pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
>        pd->device_free_chan_resources = pl330_free_chan_resources;
>        pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
> +       pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic;
>        pd->device_tx_status = pl330_tx_status;
>        pd->device_prep_slave_sg = pl330_prep_slave_sg;
>        pd->device_control = pl330_control;
> --
> 1.7.1
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-samsung-soc" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
>
boojin.kim July 16, 2011, 1:11 a.m. UTC | #4
Chanho Park wrote:

> 2011/7/13 Kukjin Kim <kgene.kim@samsung.com>:
> > From: Boojin Kim <boojin.kim@samsung.com>
> >
> > This patch adds DMA_CYCLIC capability that is used for audio driver
> > and SLAVE_CONFIG capability for transmit between device and memory.
> >
> > Signed-off-by: Boojin Kim <boojin.kim@samsung.com>
> > Cc: Vinod Koul <vinod.koul@intel.com>
> > Cc: Dan Williams <dan.j.williams@intel.com>
> > Signed-off-by: Kukjin Kim <kgene.kim@samsung.com>
> > ---
> >  drivers/dma/pl330.c |  187
> +++++++++++++++++++++++++++++++++++++++++++++++----
> >  1 files changed, 173 insertions(+), 14 deletions(-)
> >
> > diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
> > index 9bdda7b..2162ac5 100644
> > --- a/drivers/dma/pl330.c
> > +++ b/drivers/dma/pl330.c
> > @@ -69,6 +69,9 @@ struct dma_pl330_chan {
> >         * NULL if the channel is available to be acquired.
> >         */
> >        void *pl330_chid;
> > +
> > +       /* taks for cyclic capability */
> > +       struct tasklet_struct *cyclic_task;
> >  };
> >
> >  struct dma_pl330_dmac {
> > @@ -105,6 +108,7 @@ struct dma_pl330_desc {
> >
> >        /* The channel which currently holds this desc */
> >        struct dma_pl330_chan *pchan;
> > +       bool cyclic;
> >  };
> >
> >  static inline struct dma_pl330_chan *
> > @@ -184,6 +188,60 @@ static inline void fill_queue(struct dma_pl330_chan
> *pch)
> >        }
> >  }
> >
> > +static void pl330_tasklet_cyclic(unsigned long data)
> > +{
> > +       struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
> > +       struct dma_pl330_desc *desc, *_dt;
> > +       unsigned long flags;
> > +       LIST_HEAD(list);
> > +
> > +       spin_lock_irqsave(&pch->lock, flags);
> > +
> > +       /* Pick up ripe tomatoes */
> > +       list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
> > +               if ((desc->status == DONE) && desc->cyclic) {
> > +                       dma_async_tx_callback callback;
> > +
> > +                       list_move_tail(&desc->node, &pch->work_list);
> > +                       pch->completed = desc->txd.cookie;
> > +
> > +                       desc->status = PREP;
> > +
> > +                       /* Try to submit a req imm.
> > +                       next to the last completed cookie */
> > +                       fill_queue(pch);
> > +
> > +                       /* Make sure the PL330 Channel thread is active
*/
> > +                       pl330_chan_ctrl(pch->pl330_chid,
PL330_OP_START);
> > +
> > +                       callback = desc->txd.callback;
> > +                       if (callback)
> > +                               callback(desc->txd.callback_param);
> > +
> > +               }
> > +
> > +       spin_unlock_irqrestore(&pch->lock, flags);
> > +}
> > +
> > +static void pl330_cyclic_free(struct dma_pl330_chan *pch)
> > +{
> > +       struct dma_pl330_dmac *pdmac = pch->dmac;
> > +       struct dma_pl330_desc *desc, *_dt;
> > +       unsigned long flags;
> > +       LIST_HEAD(list);
> > +
> > +       spin_lock_irqsave(&pdmac->pool_lock, flags);
> > +
> > +       list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
> > +       if (desc->cyclic)
> > +               list_move_tail(&desc->node, &list);
> 
> nitpick: indentation will be required after for_each
> 

I will address your comment.

> > +
> > +       list_splice_tail_init(&list, &pdmac->desc_pool);
> > +
> > +       spin_unlock_irqrestore(&pdmac->pool_lock, flags);
> > +       pch->cyclic_task = NULL;
> > +}
> > +
> >  static void pl330_tasklet(unsigned long data)
> >  {
> >        struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
> > @@ -227,6 +285,9 @@ static void dma_pl330_rqcb(void *token, enum
> pl330_op_err err)
> >
> >        spin_unlock_irqrestore(&pch->lock, flags);
> >
> > +       if (pch->cyclic_task)
> > +               tasklet_schedule(pch->cyclic_task);
> > +       else
> >        tasklet_schedule(&pch->task);
> >  }
> >
> > @@ -256,25 +317,58 @@ static int pl330_alloc_chan_resources(struct
> dma_chan *chan)
> >  static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
> unsigned long arg)
> >  {
> >        struct dma_pl330_chan *pch = to_pchan(chan);
> > -       struct dma_pl330_desc *desc;
> > +       struct dma_pl330_desc *desc, *_dt;
> >        unsigned long flags;
> > +       struct dma_pl330_dmac *pdmac = pch->dmac;
> > +       struct dma_slave_config *slave_config;
> > +       struct dma_pl330_peri *peri;
> > +       int i;
> > +       LIST_HEAD(list);
> >
> > -       /* Only supports DMA_TERMINATE_ALL */
> > -       if (cmd != DMA_TERMINATE_ALL)
> > -               return -ENXIO;
> > -
> > -       spin_lock_irqsave(&pch->lock, flags);
> > -
> > -       /* FLUSH the PL330 Channel thread */
> > -       pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
> > +       switch (cmd) {
> > +       case DMA_TERMINATE_ALL:
> > +               spin_lock_irqsave(&pch->lock, flags);
> >
> > -       /* Mark all desc done */
> > -       list_for_each_entry(desc, &pch->work_list, node)
> > -               desc->status = DONE;
> > +               /* FLUSH the PL330 Channel thread */
> > +               pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
> >
> > -       spin_unlock_irqrestore(&pch->lock, flags);
> > +               /* Mark all desc done */
> > +               list_for_each_entry_safe(desc, _dt, &pch->work_list ,
node)
> {
> > +                       desc->status = DONE;
> > +                       pch->completed = desc->txd.cookie;
> > +                       list_move_tail(&desc->node, &list);
> > +               }
> >
> > -       pl330_tasklet((unsigned long) pch);
> > +               list_splice_tail_init(&list, &pdmac->desc_pool);
> > +               spin_unlock_irqrestore(&pch->lock, flags);
> > +               break;
> > +       case DMA_SLAVE_CONFIG:
> > +               slave_config = (struct dma_slave_config *)arg;
> > +               peri = pch->chan.private;
> > +
> > +               if (slave_config->direction == DMA_TO_DEVICE) {
> > +                       if (slave_config->dst_addr)
> > +                               peri->fifo_addr =
slave_config->dst_addr;
> > +                       if (slave_config->dst_addr_width) {
> > +                               i = 0;
> > +                               while (slave_config->dst_addr_width !=
(1 <<
> i))
> > +                                       i++;
> > +                               peri->burst_sz = i;
> > +                       }
> > +               } else if (slave_config->direction == DMA_FROM_DEVICE) {
> > +                       if (slave_config->src_addr)
> > +                               peri->fifo_addr =
slave_config->src_addr;
> > +                       if (slave_config->src_addr_width) {
> > +                               i = 0;
> > +                               while (slave_config->src_addr_width !=
(1 <<
> i))
> > +                                       i++;
> > +                               peri->burst_sz = i;
> > +                       }
> > +               }
> > +               break;
> > +       default:
> > +               return -ENXIO;
> 
> To print error message in the default case will be better.
> 

I will address your comment.

> > +       }
> >
> >        return 0;
> >  }
> > @@ -291,6 +385,9 @@ static void pl330_free_chan_resources(struct
> dma_chan *chan)
> >        pl330_release_channel(pch->pl330_chid);
> >        pch->pl330_chid = NULL;
> >
> > +       if (pch->cyclic_task)
> > +               pl330_cyclic_free(pch);
> > +
> >        spin_unlock_irqrestore(&pch->lock, flags);
> >  }
> >
> > @@ -522,6 +619,66 @@ static inline int get_burst_len(struct
> dma_pl330_desc *desc, size_t len)
> >        return burst_len;
> >  }
> >
> > +static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
> > +               struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
> > +               size_t period_len, enum dma_data_direction direction)
> > +{
> > +       struct dma_pl330_desc *desc;
> > +       struct dma_pl330_chan *pch = to_pchan(chan);
> > +       struct dma_pl330_peri *peri = chan->private;
> > +       dma_addr_t dst;
> > +       dma_addr_t src;
> > +
> > +       pch = to_pchan(chan);
> > +       if (!pch) {
> > +               dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch
desc\n",
> > +                       __func__, __LINE__);
> > +               return NULL;
> > +       }
> 
> 'pch' is already acquired when defined. It's duplicated.
> dev_err has a incorrect parameter because pch is NULL.
> Accessing pch->dmac will be failed.

I will address your comment.

> 
> > +
> > +       desc = pl330_get_desc(pch);
> > +       if (!desc) {
> > +               dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch
desc\n",
> > +                       __func__, __LINE__);
> > +               return NULL;
> > +       }
> > +
> > +       switch (direction) {
> > +       case DMA_TO_DEVICE:
> > +               desc->rqcfg.src_inc = 1;
> > +               desc->rqcfg.dst_inc = 0;
> > +               src = dma_addr;
> > +               dst = peri->fifo_addr;
> > +               break;
> > +       case DMA_FROM_DEVICE:
> > +               desc->rqcfg.src_inc = 0;
> > +               desc->rqcfg.dst_inc = 1;
> > +               src = peri->fifo_addr;
> > +               dst = dma_addr;
> > +               break;
> > +       default:
> > +               dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma
> direction\n",
> > +               __func__, __LINE__);
> > +               return NULL;
> > +       }
> > +
> > +       desc->rqcfg.brst_size = peri->burst_sz;
> > +       desc->rqcfg.brst_len = 1;
> > +
> > +       if (!pch->cyclic_task) {
> > +               pch->cyclic_task =
> > +                       kmalloc(sizeof(struct tasklet_struct),
GFP_KERNEL);
> > +               tasklet_init(pch->cyclic_task,
> > +                       pl330_tasklet_cyclic, (unsigned int)pch);
> > +       }
> > +
> > +       desc->cyclic = true;
> > +
> > +       fill_px(&desc->px, dst, src, period_len);
> > +
> > +       return &desc->txd;
> > +}
> > +
> >  static struct dma_async_tx_descriptor *
> >  pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
> >                dma_addr_t src, size_t len, unsigned long flags)
> > @@ -756,6 +913,7 @@ pl330_probe(struct amba_device *adev, const struct
> amba_id *id)
> >                        case MEMTODEV:
> >                        case DEVTOMEM:
> >                                dma_cap_set(DMA_SLAVE, pd->cap_mask);
> > +                               dma_cap_set(DMA_CYCLIC, pd->cap_mask);
> >                                break;
> >                        default:
> >                                dev_err(&adev->dev, "DEVTODEV Not
> Supported\n");
> > @@ -781,6 +939,7 @@ pl330_probe(struct amba_device *adev, const struct
> amba_id *id)
> >        pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
> >        pd->device_free_chan_resources = pl330_free_chan_resources;
> >        pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
> > +       pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic;
> >        pd->device_tx_status = pl330_tx_status;
> >        pd->device_prep_slave_sg = pl330_prep_slave_sg;
> >        pd->device_control = pl330_control;
> > --
> > 1.7.1
> >
> > --
> > To unsubscribe from this list: send the line "unsubscribe linux-samsung-
> soc" in
> > the body of a message to majordomo@vger.kernel.org
> > More majordomo info at  http://vger.kernel.org/majordomo-info.html
> >
> 
> 
> 
> --
> Best Regards,
> Chanho Park
diff mbox

Patch

diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 9bdda7b..2162ac5 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -69,6 +69,9 @@  struct dma_pl330_chan {
 	 * NULL if the channel is available to be acquired.
 	 */
 	void *pl330_chid;
+
+	/* taks for cyclic capability */
+	struct tasklet_struct *cyclic_task;
 };
 
 struct dma_pl330_dmac {
@@ -105,6 +108,7 @@  struct dma_pl330_desc {
 
 	/* The channel which currently holds this desc */
 	struct dma_pl330_chan *pchan;
+	bool cyclic;
 };
 
 static inline struct dma_pl330_chan *
@@ -184,6 +188,60 @@  static inline void fill_queue(struct dma_pl330_chan *pch)
 	}
 }
 
+static void pl330_tasklet_cyclic(unsigned long data)
+{
+	struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
+	struct dma_pl330_desc *desc, *_dt;
+	unsigned long flags;
+	LIST_HEAD(list);
+
+	spin_lock_irqsave(&pch->lock, flags);
+
+	/* Pick up ripe tomatoes */
+	list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
+		if ((desc->status == DONE) && desc->cyclic) {
+			dma_async_tx_callback callback;
+
+			list_move_tail(&desc->node, &pch->work_list);
+			pch->completed = desc->txd.cookie;
+
+			desc->status = PREP;
+
+			/* Try to submit a req imm.
+			next to the last completed cookie */
+			fill_queue(pch);
+
+			/* Make sure the PL330 Channel thread is active */
+			pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START);
+
+			callback = desc->txd.callback;
+			if (callback)
+				callback(desc->txd.callback_param);
+
+		}
+
+	spin_unlock_irqrestore(&pch->lock, flags);
+}
+
+static void pl330_cyclic_free(struct dma_pl330_chan *pch)
+{
+	struct dma_pl330_dmac *pdmac = pch->dmac;
+	struct dma_pl330_desc *desc, *_dt;
+	unsigned long flags;
+	LIST_HEAD(list);
+
+	spin_lock_irqsave(&pdmac->pool_lock, flags);
+
+	list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
+	if (desc->cyclic)
+		list_move_tail(&desc->node, &list);
+
+	list_splice_tail_init(&list, &pdmac->desc_pool);
+
+	spin_unlock_irqrestore(&pdmac->pool_lock, flags);
+	pch->cyclic_task = NULL;
+}
+
 static void pl330_tasklet(unsigned long data)
 {
 	struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
@@ -227,6 +285,9 @@  static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
 
 	spin_unlock_irqrestore(&pch->lock, flags);
 
+	if (pch->cyclic_task)
+		tasklet_schedule(pch->cyclic_task);
+	else
 	tasklet_schedule(&pch->task);
 }
 
@@ -256,25 +317,58 @@  static int pl330_alloc_chan_resources(struct dma_chan *chan)
 static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
 {
 	struct dma_pl330_chan *pch = to_pchan(chan);
-	struct dma_pl330_desc *desc;
+	struct dma_pl330_desc *desc, *_dt;
 	unsigned long flags;
+	struct dma_pl330_dmac *pdmac = pch->dmac;
+	struct dma_slave_config *slave_config;
+	struct dma_pl330_peri *peri;
+	int i;
+	LIST_HEAD(list);
 
-	/* Only supports DMA_TERMINATE_ALL */
-	if (cmd != DMA_TERMINATE_ALL)
-		return -ENXIO;
-
-	spin_lock_irqsave(&pch->lock, flags);
-
-	/* FLUSH the PL330 Channel thread */
-	pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
+	switch (cmd) {
+	case DMA_TERMINATE_ALL:
+		spin_lock_irqsave(&pch->lock, flags);
 
-	/* Mark all desc done */
-	list_for_each_entry(desc, &pch->work_list, node)
-		desc->status = DONE;
+		/* FLUSH the PL330 Channel thread */
+		pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
 
-	spin_unlock_irqrestore(&pch->lock, flags);
+		/* Mark all desc done */
+		list_for_each_entry_safe(desc, _dt, &pch->work_list , node) {
+			desc->status = DONE;
+			pch->completed = desc->txd.cookie;
+			list_move_tail(&desc->node, &list);
+		}
 
-	pl330_tasklet((unsigned long) pch);
+		list_splice_tail_init(&list, &pdmac->desc_pool);
+		spin_unlock_irqrestore(&pch->lock, flags);
+		break;
+	case DMA_SLAVE_CONFIG:
+		slave_config = (struct dma_slave_config *)arg;
+		peri = pch->chan.private;
+
+		if (slave_config->direction == DMA_TO_DEVICE) {
+			if (slave_config->dst_addr)
+				peri->fifo_addr = slave_config->dst_addr;
+			if (slave_config->dst_addr_width) {
+				i = 0;
+				while (slave_config->dst_addr_width != (1 << i))
+					i++;
+				peri->burst_sz = i;
+			}
+		} else if (slave_config->direction == DMA_FROM_DEVICE) {
+			if (slave_config->src_addr)
+				peri->fifo_addr = slave_config->src_addr;
+			if (slave_config->src_addr_width) {
+				i = 0;
+				while (slave_config->src_addr_width != (1 << i))
+					i++;
+				peri->burst_sz = i;
+			}
+		}
+		break;
+	default:
+		return -ENXIO;
+	}
 
 	return 0;
 }
@@ -291,6 +385,9 @@  static void pl330_free_chan_resources(struct dma_chan *chan)
 	pl330_release_channel(pch->pl330_chid);
 	pch->pl330_chid = NULL;
 
+	if (pch->cyclic_task)
+		pl330_cyclic_free(pch);
+
 	spin_unlock_irqrestore(&pch->lock, flags);
 }
 
@@ -522,6 +619,66 @@  static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
 	return burst_len;
 }
 
+static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
+		struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
+		size_t period_len, enum dma_data_direction direction)
+{
+	struct dma_pl330_desc *desc;
+	struct dma_pl330_chan *pch = to_pchan(chan);
+	struct dma_pl330_peri *peri = chan->private;
+	dma_addr_t dst;
+	dma_addr_t src;
+
+	pch = to_pchan(chan);
+	if (!pch) {
+		dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
+			__func__, __LINE__);
+		return NULL;
+	}
+
+	desc = pl330_get_desc(pch);
+	if (!desc) {
+		dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
+			__func__, __LINE__);
+		return NULL;
+	}
+
+	switch (direction) {
+	case DMA_TO_DEVICE:
+		desc->rqcfg.src_inc = 1;
+		desc->rqcfg.dst_inc = 0;
+		src = dma_addr;
+		dst = peri->fifo_addr;
+		break;
+	case DMA_FROM_DEVICE:
+		desc->rqcfg.src_inc = 0;
+		desc->rqcfg.dst_inc = 1;
+		src = peri->fifo_addr;
+		dst = dma_addr;
+		break;
+	default:
+		dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n",
+		__func__, __LINE__);
+		return NULL;
+	}
+
+	desc->rqcfg.brst_size = peri->burst_sz;
+	desc->rqcfg.brst_len = 1;
+
+	if (!pch->cyclic_task) {
+		pch->cyclic_task =
+			kmalloc(sizeof(struct tasklet_struct), GFP_KERNEL);
+		tasklet_init(pch->cyclic_task,
+			pl330_tasklet_cyclic, (unsigned int)pch);
+	}
+
+	desc->cyclic = true;
+
+	fill_px(&desc->px, dst, src, period_len);
+
+	return &desc->txd;
+}
+
 static struct dma_async_tx_descriptor *
 pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
 		dma_addr_t src, size_t len, unsigned long flags)
@@ -756,6 +913,7 @@  pl330_probe(struct amba_device *adev, const struct amba_id *id)
 			case MEMTODEV:
 			case DEVTOMEM:
 				dma_cap_set(DMA_SLAVE, pd->cap_mask);
+				dma_cap_set(DMA_CYCLIC, pd->cap_mask);
 				break;
 			default:
 				dev_err(&adev->dev, "DEVTODEV Not Supported\n");
@@ -781,6 +939,7 @@  pl330_probe(struct amba_device *adev, const struct amba_id *id)
 	pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
 	pd->device_free_chan_resources = pl330_free_chan_resources;
 	pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
+	pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic;
 	pd->device_tx_status = pl330_tx_status;
 	pd->device_prep_slave_sg = pl330_prep_slave_sg;
 	pd->device_control = pl330_control;