diff mbox series

dmaengine: sprd: Set request pending flag when DMA controller is active

Message ID 02adbe4364ec436ec2c5bc8fd2386bab98edd884.1584019223.git.baolin.wang7@gmail.com (mailing list archive)
State Accepted
Headers show
Series dmaengine: sprd: Set request pending flag when DMA controller is active | expand

Commit Message

Baolin Wang March 12, 2020, 1:26 p.m. UTC
From: Zhenfang Wang <zhenfang.wang@unisoc.com>

On new Spreadtrum platforms, when the CPU enters idle, it will close
the DMA controllers' clock to save power if the DMA controller is not
busy. Moreover the DMA controller's busy signal depends on the DMA
enable flag and the request pending flag.

When DMA controller starts to transfer data, which means we already
set the DMA enable flag, but now we should also set the request pending
flag, in case the DMA clock will be closed accidentally if the CPU
can not detect the DMA controller's busy signal.

Signed-off-by: Zhenfang Wang <zhenfang.wang@unisoc.com>
Signed-off-by: Baolin Wang <baolin.wang7@gmail.com>
---
 drivers/dma/sprd-dma.c | 24 ++++++++++++++++++++++++
 1 file changed, 24 insertions(+)

Comments

Vinod Koul March 23, 2020, 6:07 a.m. UTC | #1
On 12-03-20, 21:26, Baolin Wang wrote:
> From: Zhenfang Wang <zhenfang.wang@unisoc.com>
> 
> On new Spreadtrum platforms, when the CPU enters idle, it will close
> the DMA controllers' clock to save power if the DMA controller is not
> busy. Moreover the DMA controller's busy signal depends on the DMA
> enable flag and the request pending flag.
> 
> When DMA controller starts to transfer data, which means we already
> set the DMA enable flag, but now we should also set the request pending
> flag, in case the DMA clock will be closed accidentally if the CPU
> can not detect the DMA controller's busy signal.

Applied, thanks
diff mbox series

Patch

diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index 9a31a315..c327eaa 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -486,6 +486,28 @@  static int sprd_dma_set_2stage_config(struct sprd_dma_chn *schan)
 	return 0;
 }
 
+static void sprd_dma_set_pending(struct sprd_dma_chn *schan, bool enable)
+{
+	struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
+	u32 reg, val, req_id;
+
+	if (schan->dev_id == SPRD_DMA_SOFTWARE_UID)
+		return;
+
+	/* The DMA request id always starts from 0. */
+	req_id = schan->dev_id - 1;
+
+	if (req_id < 32) {
+		reg = SPRD_DMA_GLB_REQ_PEND0_EN;
+		val = BIT(req_id);
+	} else {
+		reg = SPRD_DMA_GLB_REQ_PEND1_EN;
+		val = BIT(req_id - 32);
+	}
+
+	sprd_dma_glb_update(sdev, reg, val, enable ? val : 0);
+}
+
 static void sprd_dma_set_chn_config(struct sprd_dma_chn *schan,
 				    struct sprd_dma_desc *sdesc)
 {
@@ -532,6 +554,7 @@  static void sprd_dma_start(struct sprd_dma_chn *schan)
 	 */
 	sprd_dma_set_chn_config(schan, schan->cur_desc);
 	sprd_dma_set_uid(schan);
+	sprd_dma_set_pending(schan, true);
 	sprd_dma_enable_chn(schan);
 
 	if (schan->dev_id == SPRD_DMA_SOFTWARE_UID &&
@@ -543,6 +566,7 @@  static void sprd_dma_start(struct sprd_dma_chn *schan)
 static void sprd_dma_stop(struct sprd_dma_chn *schan)
 {
 	sprd_dma_stop_and_disable(schan);
+	sprd_dma_set_pending(schan, false);
 	sprd_dma_unset_uid(schan);
 	sprd_dma_clear_int(schan);
 	schan->cur_desc = NULL;