diff mbox

[1/2] dmaengine: shdma: restore partial transfer calculation

Message ID Pine.LNX.4.64.1207302119320.28003@axis700.grange (mailing list archive)
State Accepted
Commit 4f46f8ac80416b0e8fd3aba6a0d842205fb29140
Headers show

Commit Message

Guennadi Liakhovetski July 30, 2012, 7:28 p.m. UTC
The recent shdma driver split has mistakenly removed support for partial
DMA transfer size calculation on forced termination. This patch restores
it.

Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
---
 drivers/dma/sh/shdma-base.c |    9 +++++++++
 drivers/dma/sh/shdma.c      |   12 ++++++++++++
 include/linux/shdma-base.h  |    2 ++
 3 files changed, 23 insertions(+), 0 deletions(-)
diff mbox

Patch

diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 27f5c78..f4cd946 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -483,6 +483,7 @@  static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan,
 	new->mark = DESC_PREPARED;
 	new->async_tx.flags = flags;
 	new->direction = direction;
+	new->partial = 0;
 
 	*len -= copy_size;
 	if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV)
@@ -644,6 +645,14 @@  static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
 	case DMA_TERMINATE_ALL:
 		spin_lock_irqsave(&schan->chan_lock, flags);
 		ops->halt_channel(schan);
+
+		if (ops->get_partial && !list_empty(&schan->ld_queue)) {
+			/* Record partial transfer */
+			struct shdma_desc *desc = list_first_entry(&schan->ld_queue,
+						struct shdma_desc, node);
+			desc->partial = ops->get_partial(schan, desc);
+		}
+
 		spin_unlock_irqrestore(&schan->chan_lock, flags);
 
 		shdma_chan_ld_cleanup(schan, true);
diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c
index 027c9be..f41bcc5 100644
--- a/drivers/dma/sh/shdma.c
+++ b/drivers/dma/sh/shdma.c
@@ -381,6 +381,17 @@  static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq)
 	return true;
 }
 
+static size_t sh_dmae_get_partial(struct shdma_chan *schan,
+				  struct shdma_desc *sdesc)
+{
+	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
+						    shdma_chan);
+	struct sh_dmae_desc *sh_desc = container_of(sdesc,
+					struct sh_dmae_desc, shdma_desc);
+	return (sh_desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
+		sh_chan->xmit_shift;
+}
+
 /* Called from error IRQ or NMI */
 static bool sh_dmae_reset(struct sh_dmae_device *shdev)
 {
@@ -632,6 +643,7 @@  static const struct shdma_ops sh_dmae_shdma_ops = {
 	.start_xfer = sh_dmae_start_xfer,
 	.embedded_desc = sh_dmae_embedded_desc,
 	.chan_irq = sh_dmae_chan_irq,
+	.get_partial = sh_dmae_get_partial,
 };
 
 static int __devinit sh_dmae_probe(struct platform_device *pdev)
diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h
index 93f9821..a3728bf 100644
--- a/include/linux/shdma-base.h
+++ b/include/linux/shdma-base.h
@@ -50,6 +50,7 @@  struct shdma_desc {
 	struct list_head node;
 	struct dma_async_tx_descriptor async_tx;
 	enum dma_transfer_direction direction;
+	size_t partial;
 	dma_cookie_t cookie;
 	int chunks;
 	int mark;
@@ -98,6 +99,7 @@  struct shdma_ops {
 	void (*start_xfer)(struct shdma_chan *, struct shdma_desc *);
 	struct shdma_desc *(*embedded_desc)(void *, int);
 	bool (*chan_irq)(struct shdma_chan *, int);
+	size_t (*get_partial)(struct shdma_chan *, struct shdma_desc *);
 };
 
 struct shdma_dev {