From patchwork Tue Jul 16 08:26:53 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Peter Ujfalusi X-Patchwork-Id: 11045545 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 58B7B1823 for ; Tue, 16 Jul 2019 08:26:14 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 47EB9285E2 for ; Tue, 16 Jul 2019 08:26:14 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id 3C8E8285D4; Tue, 16 Jul 2019 08:26:14 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-8.0 required=2.0 tests=BAYES_00,DKIM_SIGNED, DKIM_VALID,DKIM_VALID_AU,MAILING_LIST_MULTI,RCVD_IN_DNSWL_HI autolearn=ham version=3.3.1 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 46B70285C9 for ; Tue, 16 Jul 2019 08:26:13 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1728001AbfGPI0N (ORCPT ); Tue, 16 Jul 2019 04:26:13 -0400 Received: from fllv0016.ext.ti.com ([198.47.19.142]:37938 "EHLO fllv0016.ext.ti.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1727862AbfGPI0M (ORCPT ); Tue, 16 Jul 2019 04:26:12 -0400 Received: from fllv0035.itg.ti.com ([10.64.41.0]) by fllv0016.ext.ti.com (8.15.2/8.15.2) with ESMTP id x6G8Q0of074214; Tue, 16 Jul 2019 03:26:00 -0500 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=ti.com; s=ti-com-17Q1; t=1563265560; bh=t+Qlr1SoRNQmbucrnseL/C40XiGOQTaRa+VSXOPP/Fo=; h=From:To:CC:Subject:Date:In-Reply-To:References; b=TnOWfb0krGedRaVZXpsyZneBtXoPv8RPUIu7bGn/Ac19/A1PrHGcwjfgyZqeLo3Y8 cbHf7M9W3oguw7q6b3zDJ2zgnI9zHBojzI+wCKZBkg5faxOsGRBecS4eB0vdOZE+Xv nOs0A46WqXeXf/IK/pwHDy03bDk3ZogQjG08HcVM= Received: from DFLE107.ent.ti.com (dfle107.ent.ti.com [10.64.6.28]) by fllv0035.itg.ti.com (8.15.2/8.15.2) with ESMTPS id x6G8Q0oF053474 (version=TLSv1.2 cipher=AES256-GCM-SHA384 bits=256 verify=FAIL); Tue, 16 Jul 2019 03:26:00 -0500 Received: from DFLE103.ent.ti.com (10.64.6.24) by DFLE107.ent.ti.com (10.64.6.28) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256_P256) id 15.1.1713.5; Tue, 16 Jul 2019 03:25:59 -0500 Received: from lelv0326.itg.ti.com (10.180.67.84) by DFLE103.ent.ti.com (10.64.6.24) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256_P256) id 15.1.1713.5 via Frontend Transport; Tue, 16 Jul 2019 03:25:59 -0500 Received: from feketebors.ti.com (ileax41-snat.itg.ti.com [10.172.224.153]) by lelv0326.itg.ti.com (8.15.2/8.15.2) with ESMTP id x6G8PuBo103858; Tue, 16 Jul 2019 03:25:58 -0500 From: Peter Ujfalusi To: CC: , , , Subject: [PATCH v5 1/3] dmaengine: ti: edma: Clean up the 2x32bit array register accesses Date: Tue, 16 Jul 2019 11:26:53 +0300 Message-ID: <20190716082655.1620-2-peter.ujfalusi@ti.com> X-Mailer: git-send-email 2.22.0 In-Reply-To: <20190716082655.1620-1-peter.ujfalusi@ti.com> References: <20190716082655.1620-1-peter.ujfalusi@ti.com> MIME-Version: 1.0 X-EXCLAIMER-MD-CONFIG: e1e8a2fd-e40a-4ac6-ac9b-f7e9cc9ee180 Sender: dmaengine-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: dmaengine@vger.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP Introduce defines for getting the array index and the bit number within the 64bit array register pairs. Signed-off-by: Peter Ujfalusi --- drivers/dma/ti/edma.c | 106 ++++++++++++++++++++++++------------------ 1 file changed, 61 insertions(+), 45 deletions(-) diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c index ceabdea40ae0..a39f817b3888 100644 --- a/drivers/dma/ti/edma.c +++ b/drivers/dma/ti/edma.c @@ -133,6 +133,17 @@ #define EDMA_CONT_PARAMS_FIXED_EXACT 1002 #define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003 +/* + * 64bit array registers are split into two 32bit registers: + * reg0: channel/event 0-31 + * reg1: channel/event 32-63 + * + * bit 5 in the channel number tells the array index (0/1) + * bit 0-4 (0x1f) is the bit offset within the register + */ +#define EDMA_REG_ARRAY_INDEX(channel) ((channel) >> 5) +#define EDMA_CHANNEL_BIT(channel) (BIT((channel) & 0x1f)) + /* PaRAM slots are laid out like this */ struct edmacc_param { u32 opt; @@ -441,15 +452,14 @@ static void edma_setup_interrupt(struct edma_chan *echan, bool enable) { struct edma_cc *ecc = echan->ecc; int channel = EDMA_CHAN_SLOT(echan->ch_num); + int idx = EDMA_REG_ARRAY_INDEX(channel); + int ch_bit = EDMA_CHANNEL_BIT(channel); if (enable) { - edma_shadow0_write_array(ecc, SH_ICR, channel >> 5, - BIT(channel & 0x1f)); - edma_shadow0_write_array(ecc, SH_IESR, channel >> 5, - BIT(channel & 0x1f)); + edma_shadow0_write_array(ecc, SH_ICR, idx, ch_bit); + edma_shadow0_write_array(ecc, SH_IESR, idx, ch_bit); } else { - edma_shadow0_write_array(ecc, SH_IECR, channel >> 5, - BIT(channel & 0x1f)); + edma_shadow0_write_array(ecc, SH_IECR, idx, ch_bit); } } @@ -587,26 +597,26 @@ static void edma_start(struct edma_chan *echan) { struct edma_cc *ecc = echan->ecc; int channel = EDMA_CHAN_SLOT(echan->ch_num); - int j = (channel >> 5); - unsigned int mask = BIT(channel & 0x1f); + int idx = EDMA_REG_ARRAY_INDEX(channel); + int ch_bit = EDMA_CHANNEL_BIT(channel); if (!echan->hw_triggered) { /* EDMA channels without event association */ - dev_dbg(ecc->dev, "ESR%d %08x\n", j, - edma_shadow0_read_array(ecc, SH_ESR, j)); - edma_shadow0_write_array(ecc, SH_ESR, j, mask); + dev_dbg(ecc->dev, "ESR%d %08x\n", idx, + edma_shadow0_read_array(ecc, SH_ESR, idx)); + edma_shadow0_write_array(ecc, SH_ESR, idx, ch_bit); } else { /* EDMA channel with event association */ - dev_dbg(ecc->dev, "ER%d %08x\n", j, - edma_shadow0_read_array(ecc, SH_ER, j)); + dev_dbg(ecc->dev, "ER%d %08x\n", idx, + edma_shadow0_read_array(ecc, SH_ER, idx)); /* Clear any pending event or error */ - edma_write_array(ecc, EDMA_ECR, j, mask); - edma_write_array(ecc, EDMA_EMCR, j, mask); + edma_write_array(ecc, EDMA_ECR, idx, ch_bit); + edma_write_array(ecc, EDMA_EMCR, idx, ch_bit); /* Clear any SER */ - edma_shadow0_write_array(ecc, SH_SECR, j, mask); - edma_shadow0_write_array(ecc, SH_EESR, j, mask); - dev_dbg(ecc->dev, "EER%d %08x\n", j, - edma_shadow0_read_array(ecc, SH_EER, j)); + edma_shadow0_write_array(ecc, SH_SECR, idx, ch_bit); + edma_shadow0_write_array(ecc, SH_EESR, idx, ch_bit); + dev_dbg(ecc->dev, "EER%d %08x\n", idx, + edma_shadow0_read_array(ecc, SH_EER, idx)); } } @@ -614,19 +624,19 @@ static void edma_stop(struct edma_chan *echan) { struct edma_cc *ecc = echan->ecc; int channel = EDMA_CHAN_SLOT(echan->ch_num); - int j = (channel >> 5); - unsigned int mask = BIT(channel & 0x1f); + int idx = EDMA_REG_ARRAY_INDEX(channel); + int ch_bit = EDMA_CHANNEL_BIT(channel); - edma_shadow0_write_array(ecc, SH_EECR, j, mask); - edma_shadow0_write_array(ecc, SH_ECR, j, mask); - edma_shadow0_write_array(ecc, SH_SECR, j, mask); - edma_write_array(ecc, EDMA_EMCR, j, mask); + edma_shadow0_write_array(ecc, SH_EECR, idx, ch_bit); + edma_shadow0_write_array(ecc, SH_ECR, idx, ch_bit); + edma_shadow0_write_array(ecc, SH_SECR, idx, ch_bit); + edma_write_array(ecc, EDMA_EMCR, idx, ch_bit); /* clear possibly pending completion interrupt */ - edma_shadow0_write_array(ecc, SH_ICR, j, mask); + edma_shadow0_write_array(ecc, SH_ICR, idx, ch_bit); - dev_dbg(ecc->dev, "EER%d %08x\n", j, - edma_shadow0_read_array(ecc, SH_EER, j)); + dev_dbg(ecc->dev, "EER%d %08x\n", idx, + edma_shadow0_read_array(ecc, SH_EER, idx)); /* REVISIT: consider guarding against inappropriate event * chaining by overwriting with dummy_paramset. @@ -640,45 +650,49 @@ static void edma_stop(struct edma_chan *echan) static void edma_pause(struct edma_chan *echan) { int channel = EDMA_CHAN_SLOT(echan->ch_num); - unsigned int mask = BIT(channel & 0x1f); - edma_shadow0_write_array(echan->ecc, SH_EECR, channel >> 5, mask); + edma_shadow0_write_array(echan->ecc, SH_EECR, + EDMA_REG_ARRAY_INDEX(channel), + EDMA_CHANNEL_BIT(channel)); } /* Re-enable EDMA hardware events on the specified channel. */ static void edma_resume(struct edma_chan *echan) { int channel = EDMA_CHAN_SLOT(echan->ch_num); - unsigned int mask = BIT(channel & 0x1f); - edma_shadow0_write_array(echan->ecc, SH_EESR, channel >> 5, mask); + edma_shadow0_write_array(echan->ecc, SH_EESR, + EDMA_REG_ARRAY_INDEX(channel), + EDMA_CHANNEL_BIT(channel)); } static void edma_trigger_channel(struct edma_chan *echan) { struct edma_cc *ecc = echan->ecc; int channel = EDMA_CHAN_SLOT(echan->ch_num); - unsigned int mask = BIT(channel & 0x1f); + int idx = EDMA_REG_ARRAY_INDEX(channel); + int ch_bit = EDMA_CHANNEL_BIT(channel); - edma_shadow0_write_array(ecc, SH_ESR, (channel >> 5), mask); + edma_shadow0_write_array(ecc, SH_ESR, idx, ch_bit); - dev_dbg(ecc->dev, "ESR%d %08x\n", (channel >> 5), - edma_shadow0_read_array(ecc, SH_ESR, (channel >> 5))); + dev_dbg(ecc->dev, "ESR%d %08x\n", idx, + edma_shadow0_read_array(ecc, SH_ESR, idx)); } static void edma_clean_channel(struct edma_chan *echan) { struct edma_cc *ecc = echan->ecc; int channel = EDMA_CHAN_SLOT(echan->ch_num); - int j = (channel >> 5); - unsigned int mask = BIT(channel & 0x1f); + int idx = EDMA_REG_ARRAY_INDEX(channel); + int ch_bit = EDMA_CHANNEL_BIT(channel); - dev_dbg(ecc->dev, "EMR%d %08x\n", j, edma_read_array(ecc, EDMA_EMR, j)); - edma_shadow0_write_array(ecc, SH_ECR, j, mask); + dev_dbg(ecc->dev, "EMR%d %08x\n", idx, + edma_read_array(ecc, EDMA_EMR, idx)); + edma_shadow0_write_array(ecc, SH_ECR, idx, ch_bit); /* Clear the corresponding EMR bits */ - edma_write_array(ecc, EDMA_EMCR, j, mask); + edma_write_array(ecc, EDMA_EMCR, idx, ch_bit); /* Clear any SER */ - edma_shadow0_write_array(ecc, SH_SECR, j, mask); + edma_shadow0_write_array(ecc, SH_SECR, idx, ch_bit); edma_write(ecc, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0)); } @@ -708,7 +722,8 @@ static int edma_alloc_channel(struct edma_chan *echan, int channel = EDMA_CHAN_SLOT(echan->ch_num); /* ensure access through shadow region 0 */ - edma_or_array2(ecc, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f)); + edma_or_array2(ecc, EDMA_DRAE, 0, EDMA_REG_ARRAY_INDEX(channel), + EDMA_CHANNEL_BIT(channel)); /* ensure no events are pending */ edma_stop(echan); @@ -2482,8 +2497,9 @@ static int edma_pm_resume(struct device *dev) for (i = 0; i < ecc->num_channels; i++) { if (echan[i].alloced) { /* ensure access through shadow region 0 */ - edma_or_array2(ecc, EDMA_DRAE, 0, i >> 5, - BIT(i & 0x1f)); + edma_or_array2(ecc, EDMA_DRAE, 0, + EDMA_REG_ARRAY_INDEX(i), + EDMA_CHANNEL_BIT(i)); edma_setup_interrupt(&echan[i], true); From patchwork Tue Jul 16 08:26:54 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Peter Ujfalusi X-Patchwork-Id: 11045553 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 63742112C for ; Tue, 16 Jul 2019 08:26:15 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 56F0F285BE for ; Tue, 16 Jul 2019 08:26:15 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id 4BC74285C4; Tue, 16 Jul 2019 08:26:15 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-8.0 required=2.0 tests=BAYES_00,DKIM_SIGNED, DKIM_VALID,DKIM_VALID_AU,MAILING_LIST_MULTI,RCVD_IN_DNSWL_HI autolearn=unavailable version=3.3.1 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 0657A285CB for ; Tue, 16 Jul 2019 08:26:11 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726537AbfGPI0K (ORCPT ); Tue, 16 Jul 2019 04:26:10 -0400 Received: from lelv0143.ext.ti.com ([198.47.23.248]:43294 "EHLO lelv0143.ext.ti.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1726774AbfGPI0K (ORCPT ); Tue, 16 Jul 2019 04:26:10 -0400 Received: from lelv0265.itg.ti.com ([10.180.67.224]) by lelv0143.ext.ti.com (8.15.2/8.15.2) with ESMTP id x6G8Q2o9114429; Tue, 16 Jul 2019 03:26:02 -0500 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=ti.com; s=ti-com-17Q1; t=1563265562; bh=0M/bHMm/nUf3WaVg6DIzI6PI09k1r/G2ByMKP6TEV1Q=; h=From:To:CC:Subject:Date:In-Reply-To:References; b=omVZfygnT6SVwu+bIwMkZXmZze8cXHjCdpK+v7tL4vW72QwhBvxwCyMDyLY4KTXw7 F1Zhq3fSL3pAMZW57AdvCCgEefhA3kyksafzsgBkXvUzMS4IGKw5ht4ECEmoq4YJht gX+OSY9YTpi849r+Ho74qIJ9sPmsW9TSfkw+ZgQ4= Received: from DLEE101.ent.ti.com (dlee101.ent.ti.com [157.170.170.31]) by lelv0265.itg.ti.com (8.15.2/8.15.2) with ESMTPS id x6G8Q2CE102218 (version=TLSv1.2 cipher=AES256-GCM-SHA384 bits=256 verify=FAIL); Tue, 16 Jul 2019 03:26:02 -0500 Received: from DLEE105.ent.ti.com (157.170.170.35) by DLEE101.ent.ti.com (157.170.170.31) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256_P256) id 15.1.1713.5; Tue, 16 Jul 2019 03:26:01 -0500 Received: from lelv0326.itg.ti.com (10.180.67.84) by DLEE105.ent.ti.com (157.170.170.35) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256_P256) id 15.1.1713.5 via Frontend Transport; Tue, 16 Jul 2019 03:26:01 -0500 Received: from feketebors.ti.com (ileax41-snat.itg.ti.com [10.172.224.153]) by lelv0326.itg.ti.com (8.15.2/8.15.2) with ESMTP id x6G8PuBp103858; Tue, 16 Jul 2019 03:26:00 -0500 From: Peter Ujfalusi To: CC: , , , Subject: [PATCH v5 2/3] dmaengine: ti: edma: Correct the residue calculation (fix for memcpy) Date: Tue, 16 Jul 2019 11:26:54 +0300 Message-ID: <20190716082655.1620-3-peter.ujfalusi@ti.com> X-Mailer: git-send-email 2.22.0 In-Reply-To: <20190716082655.1620-1-peter.ujfalusi@ti.com> References: <20190716082655.1620-1-peter.ujfalusi@ti.com> MIME-Version: 1.0 X-EXCLAIMER-MD-CONFIG: e1e8a2fd-e40a-4ac6-ac9b-f7e9cc9ee180 Sender: dmaengine-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: dmaengine@vger.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP For memcpy we never stored the start address of the transfer for the pset which rendered the memcpy residue calculation completely broken. In the edma_residue() function we also need to to some correction for the calculations: Instead waiting for all EDMA channels to be idle (in a busy system it can take few iteration to hit a point when all queues are idle) wait for the event pending on the given channel (SH_ER for hw synchronized channels, SH_ESR for manually triggered channels). If the position returned by EMDA is 0 it imiplies that the last paRAM set has been consumed and we are at the closing dummy set, thus we can conclude that the transfer is completed and we can return 0 as residue. Signed-off-by: Peter Ujfalusi --- drivers/dma/ti/edma.c | 31 +++++++++++++++++++++++-------- 1 file changed, 23 insertions(+), 8 deletions(-) diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c index a39f817b3888..5b8cbd6d7610 100644 --- a/drivers/dma/ti/edma.c +++ b/drivers/dma/ti/edma.c @@ -1026,6 +1026,7 @@ static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset, src_cidx = cidx; dst_bidx = acnt; dst_cidx = cidx; + epset->addr = src_addr; } else { dev_err(dev, "%s: direction not implemented yet\n", __func__); return -EINVAL; @@ -1736,7 +1737,11 @@ static u32 edma_residue(struct edma_desc *edesc) int loop_count = EDMA_MAX_TR_WAIT_LOOPS; struct edma_chan *echan = edesc->echan; struct edma_pset *pset = edesc->pset; - dma_addr_t done, pos; + dma_addr_t done, pos, pos_old; + int channel = EDMA_CHAN_SLOT(echan->ch_num); + int idx = EDMA_REG_ARRAY_INDEX(channel); + int ch_bit = EDMA_CHANNEL_BIT(channel); + int event_reg; int i; /* @@ -1749,16 +1754,20 @@ static u32 edma_residue(struct edma_desc *edesc) * "pos" may represent a transfer request that is still being * processed by the EDMACC or EDMATC. We will busy wait until * any one of the situations occurs: - * 1. the DMA hardware is idle - * 2. a new transfer request is setup + * 1. while and event is pending for the channel + * 2. a position updated * 3. we hit the loop limit */ - while (edma_read(echan->ecc, EDMA_CCSTAT) & EDMA_CCSTAT_ACTV) { - /* check if a new transfer request is setup */ - if (edma_get_position(echan->ecc, - echan->slot[0], dst) != pos) { + if (is_slave_direction(edesc->direction)) + event_reg = SH_ER; + else + event_reg = SH_ESR; + + pos_old = pos; + while (edma_shadow0_read_array(echan->ecc, event_reg, idx) & ch_bit) { + pos = edma_get_position(echan->ecc, echan->slot[0], dst); + if (pos != pos_old) break; - } if (!--loop_count) { dev_dbg_ratelimited(echan->vchan.chan.device->dev, @@ -1783,6 +1792,12 @@ static u32 edma_residue(struct edma_desc *edesc) return edesc->residue_stat; } + /* + * If the position is 0, then EDMA loaded the closing dummy slot, the + * transfer is completed + */ + if (!pos) + return 0; /* * For SG operation we catch up with the last processed * status. From patchwork Tue Jul 16 08:26:55 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Peter Ujfalusi X-Patchwork-Id: 11045539 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id CA7C2112C for ; Tue, 16 Jul 2019 08:26:13 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id B6514285C6 for ; Tue, 16 Jul 2019 08:26:13 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id AA418285DA; Tue, 16 Jul 2019 08:26:13 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-8.0 required=2.0 tests=BAYES_00,DKIM_SIGNED, DKIM_VALID,DKIM_VALID_AU,MAILING_LIST_MULTI,RCVD_IN_DNSWL_HI autolearn=ham version=3.3.1 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 43F60285CE for ; Tue, 16 Jul 2019 08:26:12 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1727906AbfGPI0M (ORCPT ); Tue, 16 Jul 2019 04:26:12 -0400 Received: from lelv0143.ext.ti.com ([198.47.23.248]:43300 "EHLO lelv0143.ext.ti.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1726774AbfGPI0L (ORCPT ); Tue, 16 Jul 2019 04:26:11 -0400 Received: from lelv0265.itg.ti.com ([10.180.67.224]) by lelv0143.ext.ti.com (8.15.2/8.15.2) with ESMTP id x6G8Q47R114442; Tue, 16 Jul 2019 03:26:04 -0500 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=ti.com; s=ti-com-17Q1; t=1563265564; bh=INN8eT9Tuq8HUrMekaBYruIJDFB4niwn3CDGYmdZlz4=; h=From:To:CC:Subject:Date:In-Reply-To:References; b=F6L+LqbgAYmwu/oCbR/YjbnSLrvApeMDtbO8aTDr8Ga4FYB/ZJrSQ1RDszlr7Bs+z XlkEDznObLeEil7wqdT1HQXQqfn/TW+YTxwgdm6awLHo8rRDFX6rkgWgB7KNFQDeOQ 6LQcaGfRZ/56YHrEoIAd/vVgf7guJiEsKDbLpHX0= Received: from DFLE102.ent.ti.com (dfle102.ent.ti.com [10.64.6.23]) by lelv0265.itg.ti.com (8.15.2/8.15.2) with ESMTPS id x6G8Q4V7102469 (version=TLSv1.2 cipher=AES256-GCM-SHA384 bits=256 verify=FAIL); Tue, 16 Jul 2019 03:26:04 -0500 Received: from DFLE104.ent.ti.com (10.64.6.25) by DFLE102.ent.ti.com (10.64.6.23) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256_P256) id 15.1.1713.5; Tue, 16 Jul 2019 03:26:03 -0500 Received: from lelv0326.itg.ti.com (10.180.67.84) by DFLE104.ent.ti.com (10.64.6.25) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256_P256) id 15.1.1713.5 via Frontend Transport; Tue, 16 Jul 2019 03:26:03 -0500 Received: from feketebors.ti.com (ileax41-snat.itg.ti.com [10.172.224.153]) by lelv0326.itg.ti.com (8.15.2/8.15.2) with ESMTP id x6G8PuBq103858; Tue, 16 Jul 2019 03:26:01 -0500 From: Peter Ujfalusi To: CC: , , , Subject: [PATCH v5 3/3] dmaengine: ti: edma: Support for polled (memcpy) completion Date: Tue, 16 Jul 2019 11:26:55 +0300 Message-ID: <20190716082655.1620-4-peter.ujfalusi@ti.com> X-Mailer: git-send-email 2.22.0 In-Reply-To: <20190716082655.1620-1-peter.ujfalusi@ti.com> References: <20190716082655.1620-1-peter.ujfalusi@ti.com> MIME-Version: 1.0 X-EXCLAIMER-MD-CONFIG: e1e8a2fd-e40a-4ac6-ac9b-f7e9cc9ee180 Sender: dmaengine-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: dmaengine@vger.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP When a DMA client driver does not set the DMA_PREP_INTERRUPT because it does not want to use interrupts for DMA completion or because it can not rely on DMA interrupts due to executing the memcpy when interrupts are disabled it will poll the status of the transfer. Since we can not tell from any EDMA register that the transfer is completed, we can only know that the paRAM set has been sent to TPTC for processing we need to check the residue of the transfer, if it is 0 then the transfer is completed. Signed-off-by: Peter Ujfalusi --- drivers/dma/ti/edma.c | 37 +++++++++++++++++++++++++++++++++---- 1 file changed, 33 insertions(+), 4 deletions(-) diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c index 5b8cbd6d7610..bcd431283d8a 100644 --- a/drivers/dma/ti/edma.c +++ b/drivers/dma/ti/edma.c @@ -180,6 +180,7 @@ struct edma_desc { struct list_head node; enum dma_transfer_direction direction; int cyclic; + bool polled; int absync; int pset_nr; struct edma_chan *echan; @@ -1227,8 +1228,9 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy( edesc->pset[0].param.opt |= ITCCHEN; if (nslots == 1) { - /* Enable transfer complete interrupt */ - edesc->pset[0].param.opt |= TCINTEN; + /* Enable transfer complete interrupt if requested */ + if (tx_flags & DMA_PREP_INTERRUPT) + edesc->pset[0].param.opt |= TCINTEN; } else { /* Enable transfer complete chaining for the first slot */ edesc->pset[0].param.opt |= TCCHEN; @@ -1255,9 +1257,14 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy( } edesc->pset[1].param.opt |= ITCCHEN; - edesc->pset[1].param.opt |= TCINTEN; + /* Enable transfer complete interrupt if requested */ + if (tx_flags & DMA_PREP_INTERRUPT) + edesc->pset[1].param.opt |= TCINTEN; } + if (!(tx_flags & DMA_PREP_INTERRUPT)) + edesc->polled = true; + return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); } @@ -1827,18 +1834,40 @@ static enum dma_status edma_tx_status(struct dma_chan *chan, { struct edma_chan *echan = to_edma_chan(chan); struct virt_dma_desc *vdesc; + struct dma_tx_state txstate_tmp; enum dma_status ret; unsigned long flags; ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_COMPLETE || !txstate) + + if (ret == DMA_COMPLETE) return ret; + /* Provide a dummy dma_tx_state for completion checking */ + if (!txstate) + txstate = &txstate_tmp; + + txstate->residue = 0; spin_lock_irqsave(&echan->vchan.lock, flags); if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) txstate->residue = edma_residue(echan->edesc); else if ((vdesc = vchan_find_desc(&echan->vchan, cookie))) txstate->residue = to_edma_desc(&vdesc->tx)->residue; + + /* + * Mark the cookie completed if the residue is 0 for non cyclic + * transfers + */ + if (ret != DMA_COMPLETE && !txstate->residue && + echan->edesc && echan->edesc->polled && + echan->edesc->vdesc.tx.cookie == cookie) { + edma_stop(echan); + vchan_cookie_complete(&echan->edesc->vdesc); + echan->edesc = NULL; + edma_execute(echan); + ret = DMA_COMPLETE; + } + spin_unlock_irqrestore(&echan->vchan.lock, flags); return ret;