From patchwork Tue Oct 2 13:12:01 2012 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Bastian Hecht X-Patchwork-Id: 1536961 Return-Path: X-Original-To: patchwork-linux-sh@patchwork.kernel.org Delivered-To: patchwork-process-083081@patchwork1.kernel.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by patchwork1.kernel.org (Postfix) with ESMTP id 83BD53FDAE for ; Tue, 2 Oct 2012 13:12:13 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753806Ab2JBNMN (ORCPT ); Tue, 2 Oct 2012 09:12:13 -0400 Received: from mail-wi0-f170.google.com ([209.85.212.170]:38872 "EHLO mail-wi0-f170.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753399Ab2JBNMM (ORCPT ); Tue, 2 Oct 2012 09:12:12 -0400 Received: by mail-wi0-f170.google.com with SMTP id hm2so792220wib.1 for ; Tue, 02 Oct 2012 06:12:11 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=googlemail.com; s=20120113; h=from:to:cc:subject:date:message-id:x-mailer:in-reply-to:references; bh=vDRqkBdQpuNzXDmomvH6CNN99Al78Mow1y0FkyexdCs=; b=i2L1PT1zcoPCpp4rQEo4sp62DnFmr8f1rug2r5UV6aC33xgy0svbDteL6gYLbdxgw8 UiBcjtM1363/5ndsBfKVDkMVhvLDCGbtayrcJK3QRYHGngBkQVjnKFSfBHcwkM8gQC0q DA5w27NDYN2iWk5uOaRCa8m9LjOlOA62fxICM2KhT+bGpOg0qhFzk/Fggli1vwkOGspS uHuEzRxUCBY/Aw4nhakw8Y7m1GHU8LgSSOQVgDBWKGpWwpfRTeun5B09DZJSWQcAIs6+ 38Y6bzoYBBxF+gEh7ZRAN86vTMnMPdwiv6OxAMGw8x0jtg438OU2DYYbLuMOtLhKEBpo zIfg== Received: by 10.216.140.87 with SMTP id d65mr10019208wej.131.1349183530788; Tue, 02 Oct 2012 06:12:10 -0700 (PDT) Received: from localhost.localdomain (p4FD2323F.dip.t-dialin.net. [79.210.50.63]) by mx.google.com with ESMTPS id m14sm21554624wie.8.2012.10.02.06.12.09 (version=TLSv1/SSLv3 cipher=OTHER); Tue, 02 Oct 2012 06:12:10 -0700 (PDT) From: Bastian Hecht To: linux-mtd@lists.infradead.org Cc: Magnus Damm , Guennadi Liakhovetski , Vikram Narayanan , linux-sh@vger.kernel.org, linux-arm-kernel@lists.infradead.org Subject: [PATCH 1/2] mtd: sh_flctl: Add DMA capabilty Date: Tue, 2 Oct 2012 15:12:01 +0200 Message-Id: <1349183522-15321-2-git-send-email-hechtb@gmail.com> X-Mailer: git-send-email 1.7.5.4 In-Reply-To: <1349183522-15321-1-git-send-email-hechtb@gmail.com> References: <1349183522-15321-1-git-send-email-hechtb@gmail.com> Sender: linux-sh-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-sh@vger.kernel.org The code probes if DMA channels can get allocated and tears them down at removal/failure if needed. If available it uses them to transfer the data part (not ECC). On failure we fall back to PIO mode. Based on Guennadi Liakhovetski's code from the sh_mmcif driver. Signed-off-by: Bastian Hecht Reviewed-by: Guennadi Liakhovetski --- drivers/mtd/nand/sh_flctl.c | 174 +++++++++++++++++++++++++++++++++++++++++- include/linux/mtd/sh_flctl.h | 12 +++ 2 files changed, 183 insertions(+), 3 deletions(-) diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c index 4fbfe96..0fead2a 100644 --- a/drivers/mtd/nand/sh_flctl.c +++ b/drivers/mtd/nand/sh_flctl.c @@ -24,10 +24,13 @@ #include #include #include +#include +#include #include #include #include #include +#include #include #include @@ -106,6 +109,84 @@ static void wait_completion(struct sh_flctl *flctl) writeb(0x0, FLTRCR(flctl)); } +static void flctl_dma_complete(void *param) +{ + struct sh_flctl *flctl = param; + + complete(&flctl->dma_complete); +} + +static void flctl_release_dma(struct sh_flctl *flctl) +{ + if (flctl->chan_fifo0_rx) { + dma_release_channel(flctl->chan_fifo0_rx); + flctl->chan_fifo0_rx = NULL; + } + if (flctl->chan_fifo0_tx) { + dma_release_channel(flctl->chan_fifo0_tx); + flctl->chan_fifo0_tx = NULL; + } +} + +static void flctl_setup_dma(struct sh_flctl *flctl) +{ + dma_cap_mask_t mask; + struct dma_slave_config cfg; + struct platform_device *pdev = flctl->pdev; + struct sh_flctl_platform_data *pdata = pdev->dev.platform_data; + int ret; + + if (!pdata) + return; + + if (pdata->slave_id_fifo0_tx <= 0 || pdata->slave_id_fifo0_rx <= 0) + return; + + /* We can only either use DMA for both Tx and Rx or not use it at all */ + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + flctl->chan_fifo0_tx = dma_request_channel(mask, shdma_chan_filter, + (void *)pdata->slave_id_fifo0_tx); + dev_dbg(&pdev->dev, "%s: TX: got channel %p\n", __func__, + flctl->chan_fifo0_tx); + + if (!flctl->chan_fifo0_tx) + return; + + memset(&cfg, 0, sizeof(cfg)); + cfg.slave_id = pdata->slave_id_fifo0_tx; + cfg.direction = DMA_MEM_TO_DEV; + cfg.dst_addr = (dma_addr_t)FLDTFIFO(flctl); + cfg.src_addr = 0; + ret = dmaengine_slave_config(flctl->chan_fifo0_tx, &cfg); + if (ret < 0) + goto err; + + flctl->chan_fifo0_rx = dma_request_channel(mask, shdma_chan_filter, + (void *)pdata->slave_id_fifo0_rx); + dev_dbg(&pdev->dev, "%s: RX: got channel %p\n", __func__, + flctl->chan_fifo0_rx); + + if (!flctl->chan_fifo0_rx) + goto err; + + cfg.slave_id = pdata->slave_id_fifo0_rx; + cfg.direction = DMA_DEV_TO_MEM; + cfg.dst_addr = 0; + cfg.src_addr = (dma_addr_t)FLDTFIFO(flctl); + ret = dmaengine_slave_config(flctl->chan_fifo0_rx, &cfg); + if (ret < 0) + goto err; + + init_completion(&flctl->dma_complete); + + return; + +err: + flctl_release_dma(flctl); +} + static void set_addr(struct mtd_info *mtd, int column, int page_addr) { struct sh_flctl *flctl = mtd_to_flctl(mtd); @@ -261,6 +342,70 @@ static void wait_wecfifo_ready(struct sh_flctl *flctl) timeout_error(flctl, __func__); } +static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf, + int len, enum dma_data_direction dir) +{ + struct dma_async_tx_descriptor *desc = NULL; + struct dma_chan *chan; + enum dma_transfer_direction tr_dir; + dma_addr_t dma_addr; + dma_cookie_t cookie = -EINVAL; + uint32_t reg; + int ret; + + if (dir == DMA_FROM_DEVICE) { + chan = flctl->chan_fifo0_rx; + tr_dir = DMA_DEV_TO_MEM; + } else { + chan = flctl->chan_fifo0_tx; + tr_dir = DMA_MEM_TO_DEV; + } + + dma_addr = dma_map_single(chan->device->dev, buf, len, dir); + + if (dma_addr) + desc = dmaengine_prep_slave_single(chan, dma_addr, len, + tr_dir, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + + if (desc) { + reg = readl(FLINTDMACR(flctl)); + reg |= DREQ0EN; + writel(reg, FLINTDMACR(flctl)); + + desc->callback = flctl_dma_complete; + desc->callback_param = flctl; + cookie = dmaengine_submit(desc); + + dma_async_issue_pending(chan); + } else { + /* DMA failed, fall back to PIO */ + flctl_release_dma(flctl); + dev_warn(&flctl->pdev->dev, + "DMA failed, falling back to PIO\n"); + ret = -EIO; + goto out; + } + + ret = + wait_for_completion_timeout(&flctl->dma_complete, + msecs_to_jiffies(3000)); + + if (ret <= 0) { + chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); + dev_err(&flctl->pdev->dev, "wait_for_completion_timeout\n"); + } + +out: + reg = readl(FLINTDMACR(flctl)); + reg &= ~DREQ0EN; + writel(reg, FLINTDMACR(flctl)); + + dma_unmap_single(chan->device->dev, dma_addr, len, dir); + + /* ret > 0 is success */ + return ret; +} + static void read_datareg(struct sh_flctl *flctl, int offset) { unsigned long data; @@ -279,11 +424,20 @@ static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset) len_4align = (rlen + 3) / 4; + /* initiate DMA transfer */ + if (flctl->chan_fifo0_rx && rlen >= 32 && + flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_DEV_TO_MEM) > 0) + goto convert; /* DMA success */ + + /* do polling transfer */ for (i = 0; i < len_4align; i++) { wait_rfifo_ready(flctl); buf[i] = readl(FLDTFIFO(flctl)); - buf[i] = be32_to_cpu(buf[i]); } + +convert: + for (i = 0; i < len_4align; i++) + buf[i] = be32_to_cpu(buf[i]); } static enum flctl_ecc_res_t read_ecfiforeg @@ -308,13 +462,23 @@ static enum flctl_ecc_res_t read_ecfiforeg static void write_fiforeg(struct sh_flctl *flctl, int rlen, int offset) { int i, len_4align; - unsigned long *data = (unsigned long *)&flctl->done_buff[offset]; + unsigned long *buf = (unsigned long *)&flctl->done_buff[offset]; void *fifo_addr = (void *)FLDTFIFO(flctl); len_4align = (rlen + 3) / 4; + + for (i = 0; i < len_4align; i++) + buf[i] = cpu_to_be32(buf[i]); + + /* initiate DMA transfer */ + if (flctl->chan_fifo0_tx && rlen >= 32 && + flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_MEM_TO_DEV) > 0) + return; /* DMA success */ + + /* do polling transfer */ for (i = 0; i < len_4align; i++) { wait_wfifo_ready(flctl); - writel(cpu_to_be32(data[i]), fifo_addr); + writel(buf[i], fifo_addr); } } @@ -930,6 +1094,8 @@ static int __devinit flctl_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); pm_runtime_resume(&pdev->dev); + flctl_setup_dma(flctl); + ret = nand_scan_ident(flctl_mtd, 1, NULL); if (ret) goto err_chip; @@ -947,6 +1113,7 @@ static int __devinit flctl_probe(struct platform_device *pdev) return 0; err_chip: + flctl_release_dma(flctl); pm_runtime_disable(&pdev->dev); free_irq(irq, flctl); err_flste: @@ -960,6 +1127,7 @@ static int __devexit flctl_remove(struct platform_device *pdev) { struct sh_flctl *flctl = platform_get_drvdata(pdev); + flctl_release_dma(flctl); nand_release(&flctl->mtd); pm_runtime_disable(&pdev->dev); free_irq(platform_get_irq(pdev, 0), flctl); diff --git a/include/linux/mtd/sh_flctl.h b/include/linux/mtd/sh_flctl.h index 01e4b15..e98fe7e 100644 --- a/include/linux/mtd/sh_flctl.h +++ b/include/linux/mtd/sh_flctl.h @@ -20,6 +20,7 @@ #ifndef __SH_FLCTL_H__ #define __SH_FLCTL_H__ +#include #include #include #include @@ -107,6 +108,7 @@ #define ESTERINTE (0x1 << 24) /* ECC error interrupt enable */ #define AC1CLR (0x1 << 19) /* ECC FIFO clear */ #define AC0CLR (0x1 << 18) /* Data FIFO clear */ +#define DREQ0EN (0x1 << 16) /* FLDTFIFODMA Request Enable */ #define ECERB (0x1 << 9) /* ECC error */ #define STERB (0x1 << 8) /* Status error */ #define STERINTE (0x1 << 4) /* Status error enable */ @@ -138,6 +140,8 @@ enum flctl_ecc_res_t { FL_TIMEOUT }; +struct dma_chan; + struct sh_flctl { struct mtd_info mtd; struct nand_chip chip; @@ -161,6 +165,11 @@ struct sh_flctl { unsigned hwecc:1; /* Hardware ECC (0 = disabled, 1 = enabled) */ unsigned holden:1; /* Hardware has FLHOLDCR and HOLDEN is set */ unsigned qos_request:1; /* QoS request to prevent deep power shutdown */ + + /* DMA related objects */ + struct dma_chan *chan_fifo0_rx; + struct dma_chan *chan_fifo0_tx; + struct completion dma_complete; }; struct sh_flctl_platform_data { @@ -170,6 +179,9 @@ struct sh_flctl_platform_data { unsigned has_hwecc:1; unsigned use_holden:1; + + unsigned int slave_id_fifo0_tx; + unsigned int slave_id_fifo0_rx; }; static inline struct sh_flctl *mtd_to_flctl(struct mtd_info *mtdinfo)