From patchwork Fri Mar 25 13:36:13 2011 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Guennadi Liakhovetski X-Patchwork-Id: 662181 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by demeter1.kernel.org (8.14.4/8.14.3) with ESMTP id p2PDaF9m016782 for ; Fri, 25 Mar 2011 13:36:21 GMT Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753160Ab1CYNgU (ORCPT ); Fri, 25 Mar 2011 09:36:20 -0400 Received: from moutng.kundenserver.de ([212.227.126.171]:55718 "EHLO moutng.kundenserver.de" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752159Ab1CYNgT (ORCPT ); Fri, 25 Mar 2011 09:36:19 -0400 Received: from axis700.grange (pD9EB8EC2.dip0.t-ipconnect.de [217.235.142.194]) by mrelayeu.kundenserver.de (node=mreu0) with ESMTP (Nemesis) id 0M8dKp-1Pqebt0vat-00wAQm; Fri, 25 Mar 2011 14:36:14 +0100 Received: by axis700.grange (Postfix, from userid 1000) id D180F189B86; Fri, 25 Mar 2011 14:36:13 +0100 (CET) Received: from localhost (localhost [127.0.0.1]) by axis700.grange (Postfix) with ESMTP id CEBB8189B85; Fri, 25 Mar 2011 14:36:13 +0100 (CET) Date: Fri, 25 Mar 2011 14:36:13 +0100 (CET) From: Guennadi Liakhovetski X-X-Sender: lyakh@axis700.grange To: linux-sh@vger.kernel.org cc: linux-mmc@vger.kernel.org, Ian Molton , Chris Ball Subject: [PATCH 1/2 v2] mmc: tmio-mmc: Improve DMA stability on sh-mobile In-Reply-To: Message-ID: References: MIME-Version: 1.0 X-Provags-ID: V02:K0:lOX+jDAXV2KWV4yEF33PJD1E3n+pklWhQVC+JGNvZIo VFumBTdIRIu3VJyzlP6dpo2xKGziMd337eGPXmdp8s2v1boWyC 6XoKf9iG0uA1QI/+8KNfcPHBEvdmJk1g8/gtOEXPs0e6X6EKTZ LQke8KG80ZpA7+Dpc8Y0h2lK0q4y8G5mtGz5Kg6j3ioxRjTvSF xAsftAcvlV2yIh9InlGoViD9ahBe7sycfFqmDE/TME= Sender: linux-sh-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-sh@vger.kernel.org X-Greylist: IP, sender and recipient auto-whitelisted, not delayed by milter-greylist-4.2.6 (demeter1.kernel.org [140.211.167.41]); Fri, 25 Mar 2011 13:36:21 +0000 (UTC) diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c index ab1adea..e88627b 100644 --- a/drivers/mmc/host/tmio_mmc.c +++ b/drivers/mmc/host/tmio_mmc.c @@ -485,7 +485,10 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) unsigned int count; unsigned long flags; - if (!data) { + if (host->chan_tx || host->chan_rx) { + pr_err("PIO IRQ in DMA mode!\n"); + return; + } else if (!data) { pr_debug("Spurious PIO IRQ\n"); return; } @@ -648,6 +651,8 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, if (host->data->flags & MMC_DATA_READ) { if (!host->chan_rx) enable_mmc_irqs(host, TMIO_MASK_READOP); + else + tasklet_schedule(&host->dma_issue); } else { if (!host->chan_tx) enable_mmc_irqs(host, TMIO_MASK_WRITEOP); @@ -779,18 +784,6 @@ static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) #endif } -static void tmio_dma_complete(void *arg) -{ - struct tmio_mmc_host *host = arg; - - dev_dbg(&host->pdev->dev, "Command completed\n"); - - if (!host->data) - dev_warn(&host->pdev->dev, "NULL data in DMA completion!\n"); - else - enable_mmc_irqs(host, TMIO_STAT_DATAEND); -} - static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) { struct scatterlist *sg = host->sg_ptr, *sg_tmp; @@ -817,6 +810,8 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) goto pio; } + disable_mmc_irqs(host, TMIO_STAT_RXRDY); + /* The only sg element can be unaligned, use our bounce buffer then */ if (!aligned) { sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); @@ -827,14 +822,11 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); if (ret > 0) desc = chan->device->device_prep_slave_sg(chan, sg, ret, - DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + DMA_FROM_DEVICE, DMA_CTRL_ACK); - if (desc) { - desc->callback = tmio_dma_complete; - desc->callback_param = host; + if (desc) cookie = dmaengine_submit(desc); - dma_async_issue_pending(chan); - } + dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", __func__, host->sg_len, ret, cookie, host->mrq); @@ -886,6 +878,8 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) goto pio; } + disable_mmc_irqs(host, TMIO_STAT_TXRQ); + /* The only sg element can be unaligned, use our bounce buffer then */ if (!aligned) { unsigned long flags; @@ -900,13 +894,11 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); if (ret > 0) desc = chan->device->device_prep_slave_sg(chan, sg, ret, - DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + DMA_TO_DEVICE, DMA_CTRL_ACK); - if (desc) { - desc->callback = tmio_dma_complete; - desc->callback_param = host; + if (desc) cookie = dmaengine_submit(desc); - } + dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", __func__, host->sg_len, ret, cookie, host->mrq); @@ -947,17 +939,30 @@ static void tmio_mmc_start_dma(struct tmio_mmc_host *host, static void tmio_issue_tasklet_fn(unsigned long priv) { struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv; - struct dma_chan *chan = host->chan_tx; + struct dma_chan *chan = NULL; + + spin_lock_irq(&host->lock); + + if (host && host->data) { + if (host->data->flags & MMC_DATA_READ) + chan = host->chan_rx; + else + chan = host->chan_tx; + } + + spin_unlock_irq(&host->lock); - dma_async_issue_pending(chan); + enable_mmc_irqs(host, TMIO_STAT_DATAEND); + + if (chan) + dma_async_issue_pending(chan); } static void tmio_tasklet_fn(unsigned long arg) { struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; - unsigned long flags; - spin_lock_irqsave(&host->lock, flags); + spin_lock_irq(&host->lock); if (!host->data) goto out; @@ -973,7 +978,7 @@ static void tmio_tasklet_fn(unsigned long arg) tmio_mmc_do_data_irq(host); out: - spin_unlock_irqrestore(&host->lock, flags); + spin_unlock_irq(&host->lock); } /* It might be necessary to make filter MFD specific */