diff mbox series

[4/4] spi: tegra210-quad: Enable TPM wait polling

Message ID 20230202161750.21210-5-kyarlagadda@nvidia.com (mailing list archive)
State New, archived
Headers show
Series Tegra TPM driver with hw flow control | expand

Commit Message

Krishna Yarlagadda Feb. 2, 2023, 4:17 p.m. UTC
Trusted Platform Module defines flow control where slave will drive
data line at specified clock cycles. Tegra241 has TPM wait state
detections support when using combined sequence transfers. Enabling
the feature based on device tree flag.

Signed-off-by: Krishna Yarlagadda <kyarlagadda@nvidia.com>
---
 drivers/spi/spi-tegra210-quad.c | 16 ++++++++++++++++
 1 file changed, 16 insertions(+)
diff mbox series

Patch

diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c
index 9f356612ba7e..ea8a08a3d838 100644
--- a/drivers/spi/spi-tegra210-quad.c
+++ b/drivers/spi/spi-tegra210-quad.c
@@ -142,6 +142,7 @@ 
 
 #define QSPI_GLOBAL_CONFIG			0X1a4
 #define QSPI_CMB_SEQ_EN				BIT(0)
+#define QSPI_TPM_WAIT_POLL_EN			BIT(1)
 
 #define QSPI_CMB_SEQ_ADDR			0x1a8
 #define QSPI_ADDRESS_VALUE_SET(X)		(((x) & 0xFFFF) << 0)
@@ -170,6 +171,7 @@  struct tegra_qspi_soc_data {
 struct tegra_qspi_client_data {
 	int tx_clk_tap_delay;
 	int rx_clk_tap_delay;
+	bool wait_polling;
 };
 
 struct tegra_qspi {
@@ -934,6 +936,8 @@  static struct tegra_qspi_client_data *tegra_qspi_parse_cdata_dt(struct spi_devic
 				 &cdata->tx_clk_tap_delay);
 	device_property_read_u32(&spi->dev, "nvidia,rx-clk-tap-delay",
 				 &cdata->rx_clk_tap_delay);
+	cdata->wait_polling =
+		device_property_read_bool(&spi->dev, "nvidia,wait-polling");
 
 	return cdata;
 }
@@ -991,6 +995,14 @@  static void tegra_qspi_dump_regs(struct tegra_qspi *tqspi)
 	dev_dbg(tqspi->dev, "TRANS_STAT:  0x%08x | FIFO_STATUS: 0x%08x\n",
 		tegra_qspi_readl(tqspi, QSPI_TRANS_STATUS),
 		tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS));
+	dev_dbg(tqspi->dev, "GLOBAL_CFG: 0x%08x\n",
+		tegra_qspi_readl(tqspi, QSPI_GLOBAL_CONFIG));
+	dev_dbg(tqspi->dev, "CMB_CMD: 0x%08x | CMB_CMD_CFG: 0x%08x\n",
+		tegra_qspi_readl(tqspi, QSPI_CMB_SEQ_CMD),
+		tegra_qspi_readl(tqspi, QSPI_CMB_SEQ_CMD_CFG));
+	dev_dbg(tqspi->dev, "CMB_ADDR: 0x%08x | CMB_ADDR_CFG: 0x%08x\n",
+		tegra_qspi_readl(tqspi, QSPI_CMB_SEQ_ADDR),
+		tegra_qspi_readl(tqspi, QSPI_CMB_SEQ_ADDR_CFG));
 }
 
 static void tegra_qspi_handle_error(struct tegra_qspi *tqspi)
@@ -1056,6 +1068,7 @@  static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
 	bool is_first_msg = true;
 	struct spi_transfer *xfer;
 	struct spi_device *spi = msg->spi;
+	struct tegra_qspi_client_data *cdata = spi->controller_data;
 	u8 transfer_phase = 0;
 	u32 cmd1 = 0, dma_ctl = 0;
 	int ret = 0;
@@ -1065,6 +1078,8 @@  static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
 
 	/* Enable Combined sequence mode */
 	val = tegra_qspi_readl(tqspi, QSPI_GLOBAL_CONFIG);
+	if (cdata->wait_polling)
+		val |= QSPI_TPM_WAIT_POLL_EN;
 	val |= QSPI_CMB_SEQ_EN;
 	tegra_qspi_writel(tqspi, val, QSPI_GLOBAL_CONFIG);
 	/* Process individual transfer list */
@@ -1192,6 +1207,7 @@  static int tegra_qspi_non_combined_seq_xfer(struct tegra_qspi *tqspi,
 	/* Disable Combined sequence mode */
 	val = tegra_qspi_readl(tqspi, QSPI_GLOBAL_CONFIG);
 	val &= ~QSPI_CMB_SEQ_EN;
+	val &= ~QSPI_TPM_WAIT_POLL_EN;
 	tegra_qspi_writel(tqspi, val, QSPI_GLOBAL_CONFIG);
 	list_for_each_entry(transfer, &msg->transfers, transfer_list) {
 		struct spi_transfer *xfer = transfer;