From patchwork Mon Jun 13 17:46:53 2016 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: Michal X-Patchwork-Id: 9173711 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork.web.codeaurora.org (Postfix) with ESMTP id AE67D60573 for ; Mon, 13 Jun 2016 17:53:03 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id A20FF20855 for ; Mon, 13 Jun 2016 17:53:03 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id 966B1262AE; Mon, 13 Jun 2016 17:53:03 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-4.2 required=2.0 tests=BAYES_00, RCVD_IN_DNSWL_MED autolearn=ham version=3.3.1 Received: from bombadil.infradead.org (bombadil.infradead.org [198.137.202.9]) (using TLSv1.2 with cipher AES128-GCM-SHA256 (128/128 bits)) (No client certificate requested) by mail.wl.linuxfoundation.org (Postfix) with ESMTPS id F384720855 for ; Mon, 13 Jun 2016 17:53:01 +0000 (UTC) Received: from localhost ([127.0.0.1] helo=bombadil.infradead.org) by bombadil.infradead.org with esmtp (Exim 4.80.1 #2 (Red Hat Linux)) id 1bCW13-0000wm-8a; Mon, 13 Jun 2016 17:51:13 +0000 Received: from dec59.ruk.cuni.cz ([2001:718:1e03:4::11]) by bombadil.infradead.org with smtp (Exim 4.80.1 #2 (Red Hat Linux)) id 1bCVxe-0003yc-U1 for linux-arm-kernel@lists.infradead.org; Mon, 13 Jun 2016 17:47:46 +0000 Received: (qmail 53223 invoked by uid 2313); 13 Jun 2016 17:46:53 -0000 Date: 13 Jun 2016 17:46:53 -0000 From: "Michal" MBOX-Line: From f52becbaae7fa1f750fa2f085ad44df9c903b295 Mon Sep 17 00:00:00 2001 Message-Id: In-Reply-To: References: Subject: [PATCH v3 13/13] spi: sun4i: add DMA support MIME-Version: 1.0 To: linux-sunxi@googlegroups.com, Rob Herring , Pawel Moll , Mark Rutland , Ian Campbell , Kumar Gala , Maxime Ripard , Chen-Yu Tsai , Russell King , Mark Brown , Michal Suchanek , Arnd Bergmann , Olof Johansson , Krzysztof Kozlowski , Javier Martinez Canillas , Simon Horman , Sjoerd Simons , Thierry Reding , Alison Wang , Timo Sigurdsson , Jonathan Liu , Gerhard Bertelsmann , Priit Laes , devicetree@vger.kernel.org, linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org, linux-spi@vger.kernel.org, X-CRM114-Version: 20100106-BlameMichelson ( TRE 0.8.0 (BSD) ) MR-646709E3 X-CRM114-CacheID: sfid-20160613_104743_598407_0701F426 X-CRM114-Status: GOOD ( 24.39 ) X-BeenThere: linux-arm-kernel@lists.infradead.org X-Mailman-Version: 2.1.20 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: "linux-arm-kernel" Errors-To: linux-arm-kernel-bounces+patchwork-linux-arm=patchwork.kernel.org@lists.infradead.org X-Virus-Scanned: ClamAV using ClamSMTP From: Emilio López This patch adds support for 64 byte or bigger transfers on the sun4i SPI controller. Said transfers will be performed via DMA. Signed-off-by: Emilio López Signed-off-by: Michal Suchanek --- v2: - fallback to previous behaviour when DMA initialization fails v3: - adjust to merged driver - add bit set/unset helpers - add wait_for_dma (default=1) so driver does not randomly load without dma - use SUNXI_CNT_MASK as transfer size limit --- drivers/spi/spi-sun4i.c | 247 ++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 230 insertions(+), 17 deletions(-) diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c index c76f8e4..fd6b1a8 100644 --- a/drivers/spi/spi-sun4i.c +++ b/drivers/spi/spi-sun4i.c @@ -14,6 +14,8 @@ #include #include #include +#include +#include #include #include #include @@ -50,6 +52,12 @@ #define SUNXI_FIFO_STA_TF_CNT_MASK 0x7f #define SUNXI_FIFO_STA_TF_CNT_BITS 16 +static int wait_for_dma = 1; +module_param(wait_for_dma, int, 0644); +MODULE_PARM_DESC(wait_for_dma, + "When acquiring a DMA channel returns EDEFER return and let kernel defer spi master probe.\n" + "Non-DMA operation is used otherwise (defaults to wait for DMA driver to load)."); + enum SPI_SUNXI_TYPE { SPI_SUN4I = 1, SPI_SUN6I, @@ -61,6 +69,7 @@ enum SUNXI_REG_ENUM { SUNXI_TFR_CTL_REG, SUNXI_INT_CTL_REG, SUNXI_INT_STA_REG, + SUNXI_DMA_CTL_REG, SUNXI_WAIT_REG, SUNXI_CLK_CTL_REG, SUNXI_BURST_CNT_REG, @@ -79,6 +88,7 @@ static int sun4i_regmap[SUNXI_NUM_REGS] = { /* SUNXI_TFR_CTL_REG */ 0x08, /* SUNXI_INT_CTL_REG */ 0x0c, /* SUNXI_INT_STA_REG */ 0x10, +/* SUNXI_DMA_CTL_REG */ 0x14, /* SUNXI_WAIT_REG */ 0x18, /* SUNXI_CLK_CTL_REG */ 0x1c, /* SUNXI_BURST_CNT_REG */ 0x20, @@ -93,6 +103,7 @@ static int sun6i_regmap[SUNXI_NUM_REGS] = { /* SUNXI_TFR_CTL_REG */ 0x08, /* SUNXI_INT_CTL_REG */ 0x10, /* SUNXI_INT_STA_REG */ 0x14, +/* SUNXI_DMA_CTL_REG */ -1, /* SUNXI_WAIT_REG */ 0x20, /* SUNXI_CLK_CTL_REG */ 0x24, /* SUNXI_BURST_CNT_REG */ 0x30, @@ -110,6 +121,7 @@ enum SUNXI_BITMAP_ENUM { SUNXI_TFR_CTL_CPHA, SUNXI_TFR_CTL_CPOL, SUNXI_TFR_CTL_CS_ACTIVE_LOW, + SUNXI_CTL_DMA_DEDICATED, SUNXI_TFR_CTL_FBS, SUNXI_CTL_TF_RST, SUNXI_CTL_RF_RST, @@ -121,6 +133,9 @@ enum SUNXI_BITMAP_ENUM { SUNXI_TFR_CTL_CS_LEVEL, SUNXI_CTL_TP, SUNXI_INT_CTL_TC, + SUNXI_CTL_DMA_RF_READY, + SUNXI_CTL_DMA_TF_NOT_FULL, + SUNXI_CTL_DMA_TF_HALF, SUNXI_BITMAP_SIZE }; @@ -130,6 +145,7 @@ static int sun4i_bitmap[SUNXI_BITMAP_SIZE] = { /* SUNXI_TFR_CTL_CPHA */ BIT(2), /* SUNXI_TFR_CTL_CPOL */ BIT(3), /* SUNXI_TFR_CTL_CS_ACTIVE_LOW */ BIT(4), +/* SUNXI_CTL_DMA_DEDICATED */ BIT(5), /* SUNXI_TFR_CTL_FBS */ BIT(6), /* SUNXI_CTL_TF_RST */ BIT(8), /* SUNXI_CTL_RF_RST */ BIT(9), @@ -141,6 +157,9 @@ static int sun4i_bitmap[SUNXI_BITMAP_SIZE] = { /* SUNXI_TFR_CTL_CS_LEVEL */ BIT(17), /* SUNXI_CTL_TP */ BIT(18), /* SUNXI_INT_CTL_TC */ BIT(16), +/* SUNXI_CTL_DMA_RF_READY */ BIT(0), +/* SUNXI_CTL_DMA_TF_NOT_FULL */ BIT(10), +/* SUNXI_CTL_DMA_TF_HALF */ BIT(9), }; static int sun6i_bitmap[SUNXI_BITMAP_SIZE] = { @@ -149,6 +168,12 @@ static int sun6i_bitmap[SUNXI_BITMAP_SIZE] = { /* SUNXI_TFR_CTL_CPHA */ BIT(0), /* SUNXI_TFR_CTL_CPOL */ BIT(1), /* SUNXI_TFR_CTL_CS_ACTIVE_LOW */ BIT(2), +/* + * Bit 9 is listed as dedicated dma control for rx. + * There is no dedicated dma control bit listed for tx and bit 25 + * on the logical position is listed as unused. + */ +/* SUNXI_CTL_DMA_DEDICATED */ BIT(9)|BIT(25), /* SUNXI_TFR_CTL_FBS */ BIT(12), /* SUNXI_CTL_TF_RST */ BIT(31), /* SUNXI_CTL_RF_RST */ BIT(15), @@ -160,6 +185,15 @@ static int sun6i_bitmap[SUNXI_BITMAP_SIZE] = { /* SUNXI_TFR_CTL_CS_LEVEL */ BIT(7), /* SUNXI_CTL_TP */ BIT(7), /* SUNXI_INT_CTL_TC */ BIT(12), +/* + * On sun4i there are separate bits enabling request on different fifo levels. + * On sun6i there is a level field and enable bit which enables request on that + * FIFO level. Only one level is ever used so just pack the relevant bits as + * one constant. + */ +/* SUNXI_CTL_DMA_RF_READY */ BIT(0)|BIT(8), +/* SUNXI_CTL_DMA_TF_NOT_FULL */ (0x7f << 16)|BIT(24), +/* SUNXI_CTL_DMA_TF_HALF */ BIT(23)|BIT(24), }; struct sunxi_spi { @@ -207,6 +241,20 @@ static inline u32 sspi_bits(struct sunxi_spi *sspi, return (*sspi->bitmap)[name]; } +static inline void sunxi_spi_set(struct sunxi_spi *sspi, u32 reg, u32 value) +{ + u32 orig = sunxi_spi_read(sspi, reg); + + sunxi_spi_write(sspi, reg, orig | value); +} + +static inline void sunxi_spi_unset(struct sunxi_spi *sspi, u32 reg, u32 value) +{ + u32 orig = sunxi_spi_read(sspi, reg); + + sunxi_spi_write(sspi, reg, orig & ~value); +} + static inline void sunxi_spi_drain_fifo(struct sunxi_spi *sspi, int len) { u32 reg, cnt; @@ -243,6 +291,15 @@ static inline void sunxi_spi_fill_fifo(struct sunxi_spi *sspi, int len) } } +static bool sunxi_spi_can_dma(struct spi_master *master, + struct spi_device *spi, + struct spi_transfer *tfr) +{ + struct sunxi_spi *sspi = spi_master_get_devdata(master); + + return tfr->len >= sspi->fifo_depth; +} + static void sunxi_spi_set_cs(struct spi_device *spi, bool enable) { struct sunxi_spi *sspi = spi_master_get_devdata(spi->master); @@ -284,6 +341,8 @@ static size_t sunxi_spi_max_transfer_size(struct spi_device *spi) struct spi_master *master = spi->master; struct sunxi_spi *sspi = spi_master_get_devdata(master); + if (master->can_dma) + return SUNXI_CNT_MASK; return sspi->fifo_depth - 1; } @@ -292,22 +351,27 @@ static int sunxi_spi_transfer_one(struct spi_master *master, struct spi_transfer *tfr) { struct sunxi_spi *sspi = spi_master_get_devdata(master); + struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL; unsigned int mclk_rate, div, timeout; unsigned int start, end, tx_time; unsigned int tx_len = 0; int ret = 0; - u32 reg; + u32 reg, trigger = 0; + + if (!master->can_dma) { + /* We don't support transfer larger than the FIFO */ + if (tfr->len > sspi->fifo_depth) + return -EMSGSIZE; + /* + * Filling the FIFO fully causes timeout for some reason + * at least on spi2 on A10s + */ + if ((sspi->type == SPI_SUN4I) && + tfr->tx_buf && tfr->len >= sspi->fifo_depth) + return -EMSGSIZE; + } - /* We don't support transfer larger than the FIFO */ - if (tfr->len > sspi->fifo_depth) - return -EMSGSIZE; - - /* - * Filling the FIFO fully causes timeout for some reason - * at least on spi2 on A10s - */ - if ((sspi->type == SPI_SUN4I) && - tfr->tx_buf && tfr->len >= sspi->fifo_depth) + if (tfr->len > SUNXI_CNT_MASK) return -EMSGSIZE; reinit_completion(&sspi->done); @@ -405,17 +469,81 @@ static int sunxi_spi_transfer_one(struct spi_master *master, sunxi_spi_write(sspi, SUNXI_BURST_CTL_CNT_REG, SUNXI_BURST_CTL_CNT_STC(tx_len)); - /* Fill the TX FIFO */ - sunxi_spi_fill_fifo(sspi, sspi->fifo_depth); + /* Setup transfer buffers */ + if (sunxi_spi_can_dma(master, spi, tfr)) { + dev_dbg(&sspi->master->dev, "Using DMA mode for transfer\n"); + + if (sspi->tx_buf) { + desc_tx = dmaengine_prep_slave_sg(master->dma_tx, + tfr->tx_sg.sgl, tfr->tx_sg.nents, + DMA_TO_DEVICE, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc_tx) { + dev_err(&sspi->master->dev, + "Couldn't prepare dma slave\n"); + ret = -EIO; + goto out; + } + + if (sspi->type == SPI_SUN4I) + trigger |= sspi_bits(sspi, SUNXI_CTL_DMA_TF_NOT_FULL); + else + trigger |= sspi_bits(sspi, SUNXI_CTL_DMA_TF_HALF); + + dmaengine_submit(desc_tx); + dma_async_issue_pending(master->dma_tx); + } + + if (sspi->rx_buf) { + desc_rx = dmaengine_prep_slave_sg(master->dma_rx, + tfr->rx_sg.sgl, tfr->rx_sg.nents, + DMA_FROM_DEVICE, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc_rx) { + dev_err(&sspi->master->dev, + "Couldn't prepare dma slave\n"); + ret = -EIO; + goto out; + } + + trigger |= sspi_bits(sspi, SUNXI_CTL_DMA_RF_READY); + + dmaengine_submit(desc_rx); + dma_async_issue_pending(master->dma_rx); + } + + /* Enable Dedicated DMA requests */ + if (sspi->type == SPI_SUN4I) { + sunxi_spi_set(sspi, SUNXI_TFR_CTL_REG, + sspi_bits(sspi, SUNXI_CTL_DMA_DEDICATED)); + sunxi_spi_write(sspi, SUNXI_DMA_CTL_REG, trigger); + } else { + trigger |= sspi_bits(sspi, SUNXI_CTL_DMA_DEDICATED); + sunxi_spi_write(sspi, SUNXI_FIFO_CTL_REG, trigger); + } + } else { + dev_dbg(&sspi->master->dev, "Using PIO mode for transfer\n"); + + /* Disable DMA requests */ + if (sspi->type == SPI_SUN4I) { + sunxi_spi_unset(sspi, SUNXI_TFR_CTL_REG, + sspi_bits(sspi, SUNXI_CTL_DMA_DEDICATED)); + sunxi_spi_write(sspi, SUNXI_DMA_CTL_REG, 0); + } else { + sunxi_spi_write(sspi, SUNXI_FIFO_CTL_REG, 0); + } + + /* Fill the TX FIFO */ + sunxi_spi_fill_fifo(sspi, sspi->fifo_depth); + } /* Enable the interrupts */ sunxi_spi_write(sspi, SUNXI_INT_CTL_REG, sspi_bits(sspi, SUNXI_INT_CTL_TC)); /* Start the transfer */ - reg = sunxi_spi_read(sspi, SUNXI_TFR_CTL_REG); - sunxi_spi_write(sspi, SUNXI_TFR_CTL_REG, - reg | sspi_bits(sspi, SUNXI_TFR_CTL_XCH)); + sunxi_spi_set(sspi, SUNXI_TFR_CTL_REG, + sspi_bits(sspi, SUNXI_TFR_CTL_XCH)); tx_time = max(tfr->len * 8 * 2 / (tfr->speed_hz / 1000), 100U); start = jiffies; @@ -431,9 +559,23 @@ static int sunxi_spi_transfer_one(struct spi_master *master, goto out; } +out: + if (ret < 0 && sunxi_spi_can_dma(master, spi, tfr)) { + dev_dbg(&master->dev, "DMA channel teardown"); + if (sspi->tx_buf) + dmaengine_terminate_sync(master->dma_tx); + if (sspi->rx_buf) + dmaengine_terminate_sync(master->dma_rx); + } + + /* + * By this time either the transfer has ended and we have data in the + * FIFO buffer from a PIO RX transfer or the buffer is empty + * or something has failed. + * Empty the buffer either way to avoid leaving garbage around. + */ sunxi_spi_drain_fifo(sspi, sspi->fifo_depth); -out: sunxi_spi_write(sspi, SUNXI_INT_CTL_REG, 0); return ret; @@ -515,6 +657,7 @@ static int sunxi_spi_runtime_suspend(struct device *dev) static int sunxi_spi_probe(struct platform_device *pdev) { + struct dma_slave_config dma_sconfig; struct spi_master *master; struct sunxi_spi *sspi; struct resource *res; @@ -625,6 +768,54 @@ static int sunxi_spi_probe(struct platform_device *pdev) } } + master->dma_tx = dma_request_slave_channel_reason(&pdev->dev, "tx"); + if (IS_ERR(master->dma_tx)) { + dev_err(&pdev->dev, "Unable to acquire DMA channel TX\n"); + ret = PTR_ERR(master->dma_tx); + goto err_dma_chan; + } + + dma_sconfig.direction = DMA_MEM_TO_DEV; + dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + dma_sconfig.dst_addr = res->start + sspi_reg(sspi, SUNXI_TXDATA_REG); + dma_sconfig.src_maxburst = 1; + dma_sconfig.dst_maxburst = 1; + + ret = dmaengine_slave_config(master->dma_tx, &dma_sconfig); + if (ret) { + dev_err(&pdev->dev, "Unable to configure TX DMA slave\n"); + goto err_tx_dma_release; + } + + master->dma_rx = dma_request_slave_channel_reason(&pdev->dev, "rx"); + if (IS_ERR(master->dma_rx)) { + dev_err(&pdev->dev, "Unable to acquire DMA channel RX\n"); + ret = PTR_ERR(master->dma_rx); + goto err_tx_dma_release; + } + + dma_sconfig.direction = DMA_DEV_TO_MEM; + dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + dma_sconfig.src_addr = res->start + sspi_reg(sspi, SUNXI_RXDATA_REG); + dma_sconfig.src_maxburst = 1; + dma_sconfig.dst_maxburst = 1; + + ret = dmaengine_slave_config(master->dma_rx, &dma_sconfig); + if (ret) { + dev_err(&pdev->dev, "Unable to configure RX DMA slave\n"); + goto err_rx_dma_release; + } + + /* + * This is a bit dodgy. If you set can_dma then map_msg in spi.c + * apparently dereferences your dma channels if non-NULL even if your + * can_dma never returns true (and crashes if the channel is an error + * pointer). So just don't set can_dma unless both channels are valid. + */ + master->can_dma = sunxi_spi_can_dma; +wakeup: /* * This wake-up/shutdown pattern is to be able to have the * device woken up, even if runtime_pm is disabled @@ -665,18 +856,40 @@ static int sunxi_spi_probe(struct platform_device *pdev) return 0; +err_rx_dma_release: + dma_release_channel(master->dma_rx); +err_tx_dma_release: + dma_release_channel(master->dma_tx); +err_dma_chan: + master->dma_tx = NULL; + master->dma_rx = NULL; + if ((ret == -EPROBE_DEFER) && wait_for_dma) + goto err_free_master; + goto wakeup; + err_pm_disable: pm_runtime_disable(&pdev->dev); sunxi_spi_runtime_suspend(&pdev->dev); err_free_master: + if (master->can_dma) { + dma_release_channel(master->dma_rx); + dma_release_channel(master->dma_tx); + } spi_master_put(master); return ret; } static int sunxi_spi_remove(struct platform_device *pdev) { + struct spi_master *master = platform_get_drvdata(pdev); + pm_runtime_disable(&pdev->dev); + if (master->can_dma) { + dma_release_channel(master->dma_rx); + dma_release_channel(master->dma_tx); + } + return 0; }