From patchwork Thu Aug 28 06:56:50 2014 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Simon Horman X-Patchwork-Id: 4799131 Return-Path: X-Original-To: patchwork-ltsi-dev@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork2.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.19.201]) by patchwork2.web.kernel.org (Postfix) with ESMTP id 020CDC0338 for ; Thu, 28 Aug 2014 08:30:13 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id C29E3200E8 for ; Thu, 28 Aug 2014 08:30:11 +0000 (UTC) Received: from mail.linuxfoundation.org (mail.linuxfoundation.org [140.211.169.12]) (using TLSv1.2 with cipher DHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id 65FA0200E3 for ; Thu, 28 Aug 2014 08:30:10 +0000 (UTC) Received: from mail.linux-foundation.org (localhost [127.0.0.1]) by mail.linuxfoundation.org (Postfix) with ESMTP id 391C91509; Thu, 28 Aug 2014 07:34:00 +0000 (UTC) X-Original-To: ltsi-dev@lists.linuxfoundation.org Delivered-To: ltsi-dev@mail.linuxfoundation.org Received: from smtp1.linuxfoundation.org (smtp1.linux-foundation.org [172.17.192.35]) by mail.linuxfoundation.org (Postfix) with ESMTPS id 165A214AC for ; Thu, 28 Aug 2014 07:33:49 +0000 (UTC) X-Greylist: from auto-whitelisted by SQLgrey-1.7.6 Received: from kirsty.vergenet.net (kirsty.vergenet.net [202.4.237.240]) by smtp1.linuxfoundation.org (Postfix) with ESMTP id 15A07201B4 for ; Thu, 28 Aug 2014 07:33:48 +0000 (UTC) Received: from ayumi.isobedori.kobe.vergenet.net (p4222-ipbfp1605kobeminato.hyogo.ocn.ne.jp [114.154.95.222]) by kirsty.vergenet.net (Postfix) with ESMTP id 6A818267262; Thu, 28 Aug 2014 17:08:07 +1000 (EST) Received: by ayumi.isobedori.kobe.vergenet.net (Postfix, from userid 7100) id E4144EDE008; Thu, 28 Aug 2014 16:08:05 +0900 (JST) From: Simon Horman To: ltsi-dev@lists.linuxfoundation.org Date: Thu, 28 Aug 2014 15:56:50 +0900 Message-Id: <1409209620-24487-285-git-send-email-horms+renesas@verge.net.au> X-Mailer: git-send-email 2.0.1 In-Reply-To: <1409209620-24487-1-git-send-email-horms+renesas@verge.net.au> References: <1409209620-24487-1-git-send-email-horms+renesas@verge.net.au> X-Spam-Status: No, score=-4.2 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_MED, RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org Cc: Magnus Damm Subject: [LTSI-dev] [PATCH LTSI-3.14 284/894] spi: rspi: Use SPI core DMA mapping framework X-BeenThere: ltsi-dev@lists.linuxfoundation.org X-Mailman-Version: 2.1.12 Precedence: list List-Id: "A list to discuss patches, development, and other things related to the LTSI project" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , MIME-Version: 1.0 Sender: ltsi-dev-bounces@lists.linuxfoundation.org Errors-To: ltsi-dev-bounces@lists.linuxfoundation.org X-Virus-Scanned: ClamAV using ClamSMTP From: Geert Uytterhoeven Use the SPI core DMA mapping framework instead of our own. If available, DMA is used for transfers larger than the FIFO size (8 or 32 bytes). Signed-off-by: Geert Uytterhoeven Signed-off-by: Mark Brown (cherry picked from commit 2f777ec91aa0623e058c43dd4aaf0b3325d3c3e8) Signed-off-by: Simon Horman --- drivers/spi/spi-rspi.c | 140 ++++++++++++++++++------------------------------- 1 file changed, 50 insertions(+), 90 deletions(-) diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c index 7b993f7..753ac7b 100644 --- a/drivers/spi/spi-rspi.c +++ b/drivers/spi/spi-rspi.c @@ -195,10 +195,6 @@ struct rspi_data { int rx_irq, tx_irq; const struct spi_ops *ops; - /* for dmaengine */ - struct dma_chan *chan_tx; - struct dma_chan *chan_rx; - unsigned dma_callbacked:1; unsigned byte_access:1; }; @@ -251,6 +247,7 @@ struct spi_ops { struct spi_transfer *xfer); u16 mode_bits; u16 flags; + u16 fifo_size; }; /* @@ -466,39 +463,16 @@ static void rspi_dma_complete(void *arg) wake_up_interruptible(&rspi->wait); } -static int rspi_dma_map_sg(struct scatterlist *sg, const void *buf, - unsigned len, struct dma_chan *chan, - enum dma_transfer_direction dir) -{ - sg_init_table(sg, 1); - sg_set_buf(sg, buf, len); - sg_dma_len(sg) = len; - return dma_map_sg(chan->device->dev, sg, 1, dir); -} - -static void rspi_dma_unmap_sg(struct scatterlist *sg, struct dma_chan *chan, - enum dma_transfer_direction dir) -{ - dma_unmap_sg(chan->device->dev, sg, 1, dir); -} - static int rspi_send_dma(struct rspi_data *rspi, struct spi_transfer *t) { - struct scatterlist sg; - const void *buf = t->tx_buf; struct dma_async_tx_descriptor *desc; - unsigned int len = t->len; - int ret = 0; - - if (!rspi_dma_map_sg(&sg, buf, len, rspi->chan_tx, DMA_TO_DEVICE)) - return -EFAULT; + int ret; - desc = dmaengine_prep_slave_sg(rspi->chan_tx, &sg, 1, DMA_TO_DEVICE, + desc = dmaengine_prep_slave_sg(rspi->master->dma_tx, t->tx_sg.sgl, + t->tx_sg.nents, DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - if (!desc) { - ret = -EIO; - goto end; - } + if (!desc) + return -EIO; /* * DMAC needs SPTIE, but if SPTIE is set, this IRQ routine will be @@ -513,7 +487,7 @@ static int rspi_send_dma(struct rspi_data *rspi, struct spi_transfer *t) desc->callback = rspi_dma_complete; desc->callback_param = rspi; dmaengine_submit(desc); - dma_async_issue_pending(rspi->chan_tx); + dma_async_issue_pending(rspi->master->dma_tx); ret = wait_event_interruptible_timeout(rspi->wait, rspi->dma_callbacked, HZ); @@ -524,9 +498,6 @@ static int rspi_send_dma(struct rspi_data *rspi, struct spi_transfer *t) rspi_disable_irq(rspi, SPCR_SPTIE); enable_irq(rspi->tx_irq); - -end: - rspi_dma_unmap_sg(&sg, rspi->chan_tx, DMA_TO_DEVICE); return ret; } @@ -562,39 +533,22 @@ static void qspi_receive_init(const struct rspi_data *rspi) static int rspi_send_receive_dma(struct rspi_data *rspi, struct spi_transfer *t) { - struct scatterlist sg_rx, sg_tx; - const void *tx_buf = t->tx_buf; - void *rx_buf = t->rx_buf; struct dma_async_tx_descriptor *desc_tx, *desc_rx; - unsigned int len = t->len; - int ret = 0; + int ret; /* prepare transmit transfer */ - if (!rspi_dma_map_sg(&sg_tx, tx_buf, len, rspi->chan_tx, - DMA_TO_DEVICE)) - return -EFAULT; - - desc_tx = dmaengine_prep_slave_sg(rspi->chan_tx, &sg_tx, 1, - DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - if (!desc_tx) { - ret = -EIO; - goto end_tx_mapped; - } + desc_tx = dmaengine_prep_slave_sg(rspi->master->dma_tx, t->tx_sg.sgl, + t->tx_sg.nents, DMA_TO_DEVICE, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc_tx) + return -EIO; /* prepare receive transfer */ - if (!rspi_dma_map_sg(&sg_rx, rx_buf, len, rspi->chan_rx, - DMA_FROM_DEVICE)) { - ret = -EFAULT; - goto end_tx_mapped; - - } - desc_rx = dmaengine_prep_slave_sg(rspi->chan_rx, &sg_rx, 1, - DMA_FROM_DEVICE, + desc_rx = dmaengine_prep_slave_sg(rspi->master->dma_rx, t->rx_sg.sgl, + t->rx_sg.nents, DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - if (!desc_rx) { - ret = -EIO; - goto end; - } + if (!desc_rx) + return -EIO; rspi_receive_init(rspi); @@ -613,11 +567,11 @@ static int rspi_send_receive_dma(struct rspi_data *rspi, struct spi_transfer *t) desc_rx->callback = rspi_dma_complete; desc_rx->callback_param = rspi; dmaengine_submit(desc_rx); - dma_async_issue_pending(rspi->chan_rx); + dma_async_issue_pending(rspi->master->dma_rx); desc_tx->callback = NULL; /* No callback */ dmaengine_submit(desc_tx); - dma_async_issue_pending(rspi->chan_tx); + dma_async_issue_pending(rspi->master->dma_tx); ret = wait_event_interruptible_timeout(rspi->wait, rspi->dma_callbacked, HZ); @@ -631,19 +585,21 @@ static int rspi_send_receive_dma(struct rspi_data *rspi, struct spi_transfer *t) if (rspi->rx_irq != rspi->tx_irq) enable_irq(rspi->rx_irq); -end: - rspi_dma_unmap_sg(&sg_rx, rspi->chan_rx, DMA_FROM_DEVICE); -end_tx_mapped: - rspi_dma_unmap_sg(&sg_tx, rspi->chan_tx, DMA_TO_DEVICE); return ret; } -static int rspi_is_dma(const struct rspi_data *rspi, struct spi_transfer *t) +static bool __rspi_can_dma(const struct rspi_data *rspi, + const struct spi_transfer *xfer) { - if (rspi->chan_tx) - return 1; + return xfer->len > rspi->ops->fifo_size; +} - return 0; +static bool rspi_can_dma(struct spi_master *master, struct spi_device *spi, + struct spi_transfer *xfer) +{ + struct rspi_data *rspi = spi_master_get_devdata(master); + + return __rspi_can_dma(rspi, xfer); } static int rspi_transfer_out_in(struct rspi_data *rspi, @@ -676,7 +632,7 @@ static int rspi_transfer_one(struct spi_master *master, struct spi_device *spi, { struct rspi_data *rspi = spi_master_get_devdata(master); - if (!rspi_is_dma(rspi, xfer)) + if (!master->can_dma || !__rspi_can_dma(rspi, xfer)) return rspi_transfer_out_in(rspi, xfer); if (xfer->rx_buf) @@ -976,7 +932,7 @@ static struct dma_chan *rspi_request_dma_chan(struct device *dev, return chan; } -static int rspi_request_dma(struct device *dev, struct rspi_data *rspi, +static int rspi_request_dma(struct device *dev, struct spi_master *master, const struct resource *res) { const struct rspi_plat_data *rspi_pd = dev_get_platdata(dev); @@ -984,31 +940,32 @@ static int rspi_request_dma(struct device *dev, struct rspi_data *rspi, if (!rspi_pd || !rspi_pd->dma_rx_id || !rspi_pd->dma_tx_id) return 0; /* The driver assumes no error. */ - rspi->chan_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM, - rspi_pd->dma_rx_id, - res->start + RSPI_SPDR); - if (!rspi->chan_rx) + master->dma_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM, + rspi_pd->dma_rx_id, + res->start + RSPI_SPDR); + if (!master->dma_rx) return -ENODEV; - rspi->chan_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV, - rspi_pd->dma_tx_id, - res->start + RSPI_SPDR); - if (!rspi->chan_tx) { - dma_release_channel(rspi->chan_rx); - rspi->chan_rx = NULL; + master->dma_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV, + rspi_pd->dma_tx_id, + res->start + RSPI_SPDR); + if (!master->dma_tx) { + dma_release_channel(master->dma_rx); + master->dma_rx = NULL; return -ENODEV; } + master->can_dma = rspi_can_dma; dev_info(dev, "DMA available"); return 0; } static void rspi_release_dma(struct rspi_data *rspi) { - if (rspi->chan_tx) - dma_release_channel(rspi->chan_tx); - if (rspi->chan_rx) - dma_release_channel(rspi->chan_rx); + if (rspi->master->dma_tx) + dma_release_channel(rspi->master->dma_tx); + if (rspi->master->dma_rx) + dma_release_channel(rspi->master->dma_rx); } static int rspi_remove(struct platform_device *pdev) @@ -1026,6 +983,7 @@ static const struct spi_ops rspi_ops = { .transfer_one = rspi_transfer_one, .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP, .flags = SPI_MASTER_MUST_TX, + .fifo_size = 8, }; static const struct spi_ops rspi_rz_ops = { @@ -1033,6 +991,7 @@ static const struct spi_ops rspi_rz_ops = { .transfer_one = rspi_rz_transfer_one, .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP, .flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX, + .fifo_size = 8, /* 8 for TX, 32 for RX */ }; static const struct spi_ops qspi_ops = { @@ -1042,6 +1001,7 @@ static const struct spi_ops qspi_ops = { SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD, .flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX, + .fifo_size = 32, }; #ifdef CONFIG_OF @@ -1199,7 +1159,7 @@ static int rspi_probe(struct platform_device *pdev) goto error2; } - ret = rspi_request_dma(&pdev->dev, rspi, res); + ret = rspi_request_dma(&pdev->dev, master, res); if (ret < 0) dev_warn(&pdev->dev, "DMA not available, using PIO\n");