@@ -601,6 +601,9 @@ static void zynqmp_dma_start_transfer(struct zynqmp_dma_chan *chan)
static void zynqmp_dma_chan_desc_cleanup(struct zynqmp_dma_chan *chan)
{
struct zynqmp_dma_desc_sw *desc, *next;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&chan->lock, irqflags);
list_for_each_entry_safe(desc, next, &chan->done_list, node) {
dma_async_tx_callback callback;
@@ -617,6 +620,8 @@ static void zynqmp_dma_chan_desc_cleanup(struct zynqmp_dma_chan *chan)
/* Run any dependencies, then free the descriptor */
zynqmp_dma_free_descriptor(chan, desc);
}
+
+ spin_unlock_irqrestore(&chan->lock, irqflags);
}
/**
@@ -656,9 +661,13 @@ static void zynqmp_dma_issue_pending(struct dma_chan *dchan)
*/
static void zynqmp_dma_free_descriptors(struct zynqmp_dma_chan *chan)
{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&chan->lock, irqflags);
zynqmp_dma_free_desc_list(chan, &chan->active_list);
zynqmp_dma_free_desc_list(chan, &chan->pending_list);
zynqmp_dma_free_desc_list(chan, &chan->done_list);
+ spin_unlock_irqrestore(&chan->lock, irqflags);
}
/**
@@ -668,11 +677,8 @@ static void zynqmp_dma_free_descriptors(struct zynqmp_dma_chan *chan)
static void zynqmp_dma_free_chan_resources(struct dma_chan *dchan)
{
struct zynqmp_dma_chan *chan = to_chan(dchan);
- unsigned long irqflags;
- spin_lock_irqsave(&chan->lock, irqflags);
zynqmp_dma_free_descriptors(chan);
- spin_unlock_irqrestore(&chan->lock, irqflags);
dma_free_coherent(chan->dev,
(2 * ZYNQMP_DMA_DESC_SIZE(chan) * ZYNQMP_DMA_NUM_DESCS),
chan->desc_pool_v, chan->desc_pool_p);
@@ -687,11 +693,16 @@ static void zynqmp_dma_free_chan_resources(struct dma_chan *dchan)
*/
static void zynqmp_dma_reset(struct zynqmp_dma_chan *chan)
{
+ unsigned long irqflags;
+
writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS);
+ spin_lock_irqsave(&chan->lock, irqflags);
zynqmp_dma_complete_descriptor(chan);
+ spin_unlock_irqrestore(&chan->lock, irqflags);
zynqmp_dma_chan_desc_cleanup(chan);
zynqmp_dma_free_descriptors(chan);
+
zynqmp_dma_init(chan);
}
@@ -747,28 +758,27 @@ static void zynqmp_dma_do_tasklet(struct tasklet_struct *t)
u32 count;
unsigned long irqflags;
- spin_lock_irqsave(&chan->lock, irqflags);
-
if (chan->err) {
zynqmp_dma_reset(chan);
chan->err = false;
- goto unlock;
+ return;
}
+ spin_lock_irqsave(&chan->lock, irqflags);
count = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT);
-
while (count) {
zynqmp_dma_complete_descriptor(chan);
count--;
}
+ spin_unlock_irqrestore(&chan->lock, irqflags);
zynqmp_dma_chan_desc_cleanup(chan);
- if (chan->idle)
+ if (chan->idle) {
+ spin_lock_irqsave(&chan->lock, irqflags);
zynqmp_dma_start_transfer(chan);
-
-unlock:
- spin_unlock_irqrestore(&chan->lock, irqflags);
+ spin_unlock_irqrestore(&chan->lock, irqflags);
+ }
}
/**
@@ -780,12 +790,9 @@ static void zynqmp_dma_do_tasklet(struct tasklet_struct *t)
static int zynqmp_dma_device_terminate_all(struct dma_chan *dchan)
{
struct zynqmp_dma_chan *chan = to_chan(dchan);
- unsigned long irqflags;
- spin_lock_irqsave(&chan->lock, irqflags);
writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS);
zynqmp_dma_free_descriptors(chan);
- spin_unlock_irqrestore(&chan->lock, irqflags);
return 0;
}
The descriptor lists are locked for the entire tasklet that completes the descriptors. This is not necessary, because the lock actually only protects the descriptor lists. Make the spin lock more fine-grained and only protect functions that actually operate on the descriptor lists. This decreases the time when the lock is held. Signed-off-by: Michael Tretter <m.tretter@pengutronix.de> --- drivers/dma/xilinx/zynqmp_dma.c | 35 ++++++++++++++++++++------------- 1 file changed, 21 insertions(+), 14 deletions(-)