From patchwork Thu Oct 23 03:14:13 2014 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Cao Minh Hiep X-Patchwork-Id: 5137891 Return-Path: X-Original-To: patchwork-linux-sh@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork1.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.19.201]) by patchwork1.web.kernel.org (Postfix) with ESMTP id 259759F4D4 for ; Thu, 23 Oct 2014 03:14:22 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id 33C442024C for ; Thu, 23 Oct 2014 03:14:21 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 1F9222024F for ; Thu, 23 Oct 2014 03:14:20 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S932381AbaJWDOQ (ORCPT ); Wed, 22 Oct 2014 23:14:16 -0400 Received: from qecsay241.secure.ne.jp ([158.199.161.241]:41887 "HELO m119.secure.ne.jp" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with SMTP id S932513AbaJWDOP (ORCPT ); Wed, 22 Oct 2014 23:14:15 -0400 Received: (qmail 65929 invoked from network); 23 Oct 2014 12:14:13 +0900 Received: from unknown (HELO localhost) (61.118.107.10) by 0 with SMTP; 23 Oct 2014 12:14:13 +0900 From: Cao Minh Hiep To: geert+renesas@glider.be Cc: broonie@kernel.org, linux-spi@vger.kernel.org, kuninori.morimoto.gx@renesas.com, yoshihiro.shimoda.uh@renesas.com, ryusuke.sakato.bx@renesas.com, linux-sh@vger.kernel.org Subject: [PATCH v3 1/1] spi: Using Trigger number to transmit/receive data Date: Thu, 23 Oct 2014 12:14:13 +0900 Message-Id: <1414034053-14750-2-git-send-email-cm-hiep@jinso.co.jp> X-Mailer: git-send-email 1.9.1 In-Reply-To: <1414034053-14750-1-git-send-email-cm-hiep@jinso.co.jp> References: <1414034053-14750-1-git-send-email-cm-hiep@jinso.co.jp> Sender: linux-sh-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-sh@vger.kernel.org X-Spam-Status: No, score=-8.3 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_HI, RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP From: Hiep Cao Minh In order to transmit and receive data when have 32 bytes of data that ready has prepared on Transmit/Receive Buffer to transmit or receive. Instead transmits/receives a byte data using Transmit/Receive Buffer Data Triggering Number will improve the speed of transfer data. Signed-off-by: Hiep Cao Minh --- drivers/spi/spi-rspi.c | 124 ++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 117 insertions(+), 7 deletions(-) diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c index ad87a98..32a2515 100644 --- a/drivers/spi/spi-rspi.c +++ b/drivers/spi/spi-rspi.c @@ -182,6 +182,13 @@ #define SPBFCR_RXRST 0x40 /* Receive Buffer Data Reset */ #define SPBFCR_TXTRG_MASK 0x30 /* Transmit Buffer Data Triggering Number */ #define SPBFCR_RXTRG_MASK 0x07 /* Receive Buffer Data Triggering Number */ +/* QSPI on R-Car Gen2 */ +#define SPBFCR_TXTRG_1B 0x00 /* 31 bytes (1 byte available) */ +#define SPBFCR_TXTRG_32B 0x30 /* 0 byte (32 bytes available) */ +#define SPBFCR_RXTRG_1B 0x00 /* 1 byte (31 bytes available) */ +#define SPBFCR_RXTRG_32B 0x07 /* 32 bytes (0 byte available) */ + +#define QSPI_BUFFER_SIZE 32u struct rspi_data { void __iomem *addr; @@ -371,6 +378,52 @@ static int qspi_set_config_register(struct rspi_data *rspi, int access_size) return 0; } +static void qspi_update(const struct rspi_data *rspi, u8 mask, u8 val, u8 reg) +{ + u8 data; + + data = rspi_read8(rspi, reg); + data &= ~mask; + data |= (val & mask); + rspi_write8(rspi, data, reg); +} + +static int qspi_set_send_trigger(struct rspi_data *rspi, unsigned int len) +{ + unsigned int n; + + n = min(len, QSPI_BUFFER_SIZE); + + if (len >= QSPI_BUFFER_SIZE) { + /* sets triggering number to 32 bytes */ + qspi_update(rspi, SPBFCR_TXTRG_MASK, + SPBFCR_TXTRG_32B, QSPI_SPBFCR); + } else { + /* sets triggering number to 1 byte */ + qspi_update(rspi, SPBFCR_TXTRG_MASK, + SPBFCR_TXTRG_1B, QSPI_SPBFCR); + } + + return n; +} + +static void qspi_set_receive_trigger(struct rspi_data *rspi, unsigned int len) +{ + unsigned int n; + + n = min(len, QSPI_BUFFER_SIZE); + + if (len >= QSPI_BUFFER_SIZE) { + /* sets triggering number to 32 bytes */ + qspi_update(rspi, SPBFCR_RXTRG_MASK, + SPBFCR_RXTRG_32B, QSPI_SPBFCR); + } else { + /* sets triggering number to 1 byte */ + qspi_update(rspi, SPBFCR_RXTRG_MASK, + SPBFCR_RXTRG_1B, QSPI_SPBFCR); + } +} + #define set_config_register(spi, n) spi->ops->set_config_register(spi, n) static void rspi_enable_irq(const struct rspi_data *rspi, u8 enable) @@ -614,19 +667,29 @@ static bool rspi_can_dma(struct spi_master *master, struct spi_device *spi, return __rspi_can_dma(rspi, xfer); } -static int rspi_common_transfer(struct rspi_data *rspi, - struct spi_transfer *xfer) +static int rspi_dma_check_then_transfer(struct rspi_data *rspi, + struct spi_transfer *xfer) { - int ret; - if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) { /* rx_buf can be NULL on RSPI on SH in TX-only Mode */ - ret = rspi_dma_transfer(rspi, &xfer->tx_sg, + int ret = rspi_dma_transfer(rspi, &xfer->tx_sg, xfer->rx_buf ? &xfer->rx_sg : NULL); if (ret != -EAGAIN) - return ret; + return 0; } + return -EAGAIN; +} + +static int rspi_common_transfer(struct rspi_data *rspi, + struct spi_transfer *xfer) +{ + int ret; + + ret = rspi_dma_check_then_transfer(rspi, xfer); + if (ret != -EAGAIN) + return ret; + ret = rspi_pio_transfer(rspi, xfer->tx_buf, xfer->rx_buf, xfer->len); if (ret < 0) return ret; @@ -666,12 +729,59 @@ static int rspi_rz_transfer_one(struct spi_master *master, return rspi_common_transfer(rspi, xfer); } +static int qspi_trigger_transfer_out_int(struct rspi_data *rspi, const u8 *tx, + u8 *rx, unsigned int len) +{ + unsigned int i, n, ret; + int error; + + while (len > 0) { + n = qspi_set_send_trigger(rspi, len); + qspi_set_receive_trigger(rspi, len); + if (n == QSPI_BUFFER_SIZE) { + error = rspi_wait_for_tx_empty(rspi); + if (error < 0) { + dev_err(&rspi->master->dev, "transmit timeout\n"); + return error; + } + for (i = 0; i < n; i++) + rspi_write_data(rspi, *tx++); + + error = rspi_wait_for_rx_full(rspi); + if (error < 0) { + dev_err(&rspi->master->dev, "receive timeout\n"); + return error; + } + for (i = 0; i < n; i++) + *rx++ = rspi_read_data(rspi); + } else { + ret = rspi_pio_transfer(rspi, tx, rx, n); + if (ret < 0) + return ret; + } + len -= n; + } + + return 0; +} + static int qspi_transfer_out_in(struct rspi_data *rspi, struct spi_transfer *xfer) { + int ret; + qspi_receive_init(rspi); - return rspi_common_transfer(rspi, xfer); + ret = rspi_dma_check_then_transfer(rspi, xfer); + if (ret != -EAGAIN) + return ret; + + ret = qspi_trigger_transfer_out_int(rspi, xfer->tx_buf, + xfer->rx_buf, xfer->len); + if (ret < 0) + return ret; + + return 0; } static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)