diff mbox series

[v2,SPI,for-next,2/3] spi: mchp-pci1xxxx: DMA Read support for copying data into SPI Buf

Message ID 20240206034118.748801-3-thangaraj.s@microchip.com (mailing list archive)
State New
Headers show
Series DMA Support for SPI in PCI1xxxx | expand

Commit Message

Thangaraj Samynathan Feb. 6, 2024, 3:41 a.m. UTC
pci1xxxx_spi_transfer_with_dma is registered as transfer_one callback
when DMA can be supported. This function adds DMA read operation which
copies the data from host cpu buffer to SPI Tx Buffer.
On DMA Read Completion interrupt, SPI transaction is initiated in isr.
Helper functions pci1xxxx_spi_setup, pci1xxxx_spi_setup_dma_read and
pci1xxxx_start_spi_xfer are added for starting spi transfer, setting up
spi and dma read operation. In the existing implementation, codes are
replaced with helper wherever applicable.

Signed-off-by: Thangaraj Samynathan <thangaraj.s@microchip.com>
---
 drivers/spi/spi-pci1xxxx.c | 275 +++++++++++++++++++++++++++++++++----
 1 file changed, 248 insertions(+), 27 deletions(-)

Comments

Mark Brown Feb. 6, 2024, 12:38 p.m. UTC | #1
On Tue, Feb 06, 2024 at 09:11:17AM +0530, Thangaraj Samynathan wrote:

> pci1xxxx_spi_transfer_with_dma is registered as transfer_one callback
> when DMA can be supported. This function adds DMA read operation which
> copies the data from host cpu buffer to SPI Tx Buffer.
> On DMA Read Completion interrupt, SPI transaction is initiated in isr.
> Helper functions pci1xxxx_spi_setup, pci1xxxx_spi_setup_dma_read and
> pci1xxxx_start_spi_xfer are added for starting spi transfer, setting up
> spi and dma read operation. In the existing implementation, codes are
> replaced with helper wherever applicable.

This description is kind of hard to follow and it seems like the
hardware is quite weirdly designed which doesn't help.  What you appear
to be saying here is that the DMA here is transmit only as far as SPI is
concerned (it's a bit confusing that you say it's read DMA but AFAICT
it's reading from CPU memory and writing to the SPI controller).  This
is a bit off in terms of the core DMA support, the core is assuming that
DMA will be bidirectional and mapping both the TX and RX buffers for DMA
which isn't great if the RX path is PIO.  If that's the case then you
might be better off open coding the mapping of the buffers.

> +static irqreturn_t pci1xxxx_spi_isr_dma(int irq, void *dev)
> +{

> +	if (regval & SPI_INTR) {
> +		rx_buf = p->rx_buf;
> +		memcpy_fromio(rx_buf + p->bytes_recvd, p->parent->reg_base +
> +				      SPI_MST_RSP_BUF_OFFSET(p->hw_inst), p->tx_sgl_len);
> +		p->bytes_recvd += p->tx_sgl_len;
> +
> +		p->tx_sgl = sg_next(p->tx_sgl);

If we're doing DMA why do we need to have a memcpy() here?  That would
tie in with the DMA being transmit only.
diff mbox series

Patch

diff --git a/drivers/spi/spi-pci1xxxx.c b/drivers/spi/spi-pci1xxxx.c
index 85a6068b244d..2b2f55a0b5a5 100644
--- a/drivers/spi/spi-pci1xxxx.c
+++ b/drivers/spi/spi-pci1xxxx.c
@@ -5,6 +5,7 @@ 
 //          Kumaravel Thiagarajan <Kumaravel.Thiagarajan@microchip.com>
 
 
+#include <linux/bitfield.h>
 #include <linux/dma-mapping.h>
 #include <linux/iopoll.h>
 #include <linux/irq.h>
@@ -12,6 +13,7 @@ 
 #include <linux/msi.h>
 #include <linux/pci_regs.h>
 #include <linux/pci.h>
+#include <linux/spinlock.h>
 #include <linux/spi/spi.h>
 #include <linux/delay.h>
 
@@ -37,6 +39,7 @@ 
 #define	SPI_MST_CTL_MODE_SEL		(BIT(2))
 #define	SPI_MST_CTL_GO			(BIT(0))
 
+#define SPI_PERI_ADDR_BASE		(0x160000)
 #define SPI_SYSTEM_ADDR_BASE		(0x2000)
 #define	SPI_MST1_ADDR_BASE		(0x800)
 
@@ -48,22 +51,49 @@ 
 #define DEV_REV_MASK			(GENMASK(7, 0))
 
 #define SPI_SYSLOCK			BIT(4)
+#define SPI0				(0)
+#define SPI1				(1)
 
 /* DMA Related Registers */
 #define SPI_DMA_ADDR_BASE		(0x1000)
 #define SPI_DMA_GLOBAL_WR_ENGINE_EN	(SPI_DMA_ADDR_BASE + 0x0C)
 #define SPI_DMA_GLOBAL_RD_ENGINE_EN	(SPI_DMA_ADDR_BASE + 0x2C)
+#define SPI_DMA_RD_DOORBELL_REG		(SPI_DMA_ADDR_BASE + 0x30)
 #define SPI_DMA_INTR_IMWR_WDONE_LOW	(SPI_DMA_ADDR_BASE + 0x60)
 #define SPI_DMA_INTR_IMWR_WDONE_HIGH	(SPI_DMA_ADDR_BASE + 0x64)
 #define SPI_DMA_INTR_IMWR_WABORT_LOW	(SPI_DMA_ADDR_BASE + 0x68)
 #define SPI_DMA_INTR_IMWR_WABORT_HIGH	(SPI_DMA_ADDR_BASE + 0x6C)
 #define SPI_DMA_INTR_WR_IMWR_DATA	(SPI_DMA_ADDR_BASE + 0x70)
+#define SPI_DMA_INTR_RD_STS		(SPI_DMA_ADDR_BASE + 0xA0)
+#define SPI_DMA_RD_INT_MASK		(SPI_DMA_ADDR_BASE + 0xA8)
+#define SPI_DMA_INTR_RD_CLR		(SPI_DMA_ADDR_BASE + 0xAC)
+#define SPI_DMA_ERR_RD_STS		(SPI_DMA_ADDR_BASE + 0xB8)
 #define SPI_DMA_INTR_IMWR_RDONE_LOW	(SPI_DMA_ADDR_BASE + 0xCC)
 #define SPI_DMA_INTR_IMWR_RDONE_HIGH	(SPI_DMA_ADDR_BASE + 0xD0)
 #define SPI_DMA_INTR_IMWR_RABORT_LOW	(SPI_DMA_ADDR_BASE + 0xD4)
 #define SPI_DMA_INTR_IMWR_RABORT_HIGH	(SPI_DMA_ADDR_BASE + 0xD8)
 #define SPI_DMA_INTR_RD_IMWR_DATA	(SPI_DMA_ADDR_BASE + 0xDC)
 
+#define SPI_DMA_CH0_RD_BASE		(SPI_DMA_ADDR_BASE + 0x300)
+#define SPI_DMA_CH1_RD_BASE		(SPI_DMA_ADDR_BASE + 0x500)
+
+#define SPI_DMA_CH_CTL1_OFFSET		(0x00)
+#define SPI_DMA_CH_XFER_LEN_OFFSET	(0x08)
+#define SPI_DMA_CH_SAR_LO_OFFSET	(0x0C)
+#define SPI_DMA_CH_SAR_HI_OFFSET	(0x10)
+#define SPI_DMA_CH_DAR_LO_OFFSET	(0x14)
+#define SPI_DMA_CH_DAR_HI_OFFSET	(0x18)
+
+#define SPI_DMA_CH0_DONE_INT		BIT(0)
+#define SPI_DMA_CH1_DONE_INT		BIT(1)
+#define SPI_DMA_CH0_ABORT_INT		BIT(16)
+#define SPI_DMA_CH1_ABORT_INT		BIT(17)
+#define SPI_DMA_DONE_INT_MASK		(SPI_DMA_CH0_DONE_INT | SPI_DMA_CH1_DONE_INT)
+#define SPI_DMA_ABORT_INT_MASK		(SPI_DMA_CH0_ABORT_INT | SPI_DMA_CH1_ABORT_INT)
+#define DMA_CH_CONTROL_LIE		BIT(3)
+#define DMA_CH_CONTROL_RIE		BIT(4)
+#define DMA_INTR_EN			(DMA_CH_CONTROL_RIE | DMA_CH_CONTROL_LIE)
+
 /* x refers to SPI Host Controller HW instance id in the below macros - 0 or 1 */
 
 #define	SPI_MST_CMD_BUF_OFFSET(x)		(((x) * SPI_MST1_ADDR_BASE) + 0x00)
@@ -82,6 +112,7 @@ 
 #define PCI1XXXX_SPI_TIMEOUT			(msecs_to_jiffies(100))
 #define SYSLOCK_RETRY_CNT			(1000)
 #define SPI_DMA_ENGINE_EN			(0x1)
+#define SPI_DMA_ENGINE_DIS			(0x0)
 
 #define SPI_INTR		BIT(8)
 #define SPI_FORCE_CE		BIT(4)
@@ -94,11 +125,19 @@ 
 
 struct pci1xxxx_spi_internal {
 	u8 hw_inst;
-	bool spi_xfer_in_progress;
+	u8 clkdiv;
 	int irq;
+	int mode;
+	bool spi_xfer_in_progress;
+	bool dma_aborted_rd;
+	u32 bytes_recvd;
+	u32 tx_sgl_len;
+	struct scatterlist *tx_sgl, *rx_sgl;
 	struct completion spi_xfer_done;
 	struct spi_controller *spi_host;
 	struct pci1xxxx_spi *parent;
+	struct spi_transfer *xfer;
+	void *rx_buf;
 	struct {
 		unsigned int dev_sel : 3;
 		unsigned int msi_vector_sel : 1;
@@ -111,6 +150,8 @@  struct pci1xxxx_spi {
 	u8 dev_rev;
 	void __iomem *reg_base;
 	void __iomem *dma_offset_bar;
+	/* lock to safely access the DMA registers in isr */
+	spinlock_t dma_reg_lock;
 	bool can_dma;
 	struct pci1xxxx_spi_internal *spi_int[] __counted_by(total_hw_instances);
 };
@@ -230,6 +271,7 @@  static int pci1xxxx_spi_dma_init(struct pci1xxxx_spi *spi_bus, int irq)
 	if (ret)
 		return ret;
 
+	spin_lock_init(&spi_bus->dma_reg_lock);
 	get_cached_msi_msg(irq, &msi);
 	writel(SPI_DMA_ENGINE_EN, spi_bus->dma_offset_bar + SPI_DMA_GLOBAL_WR_ENGINE_EN);
 	writel(SPI_DMA_ENGINE_EN, spi_bus->dma_offset_bar + SPI_DMA_GLOBAL_RD_ENGINE_EN);
@@ -243,6 +285,7 @@  static int pci1xxxx_spi_dma_init(struct pci1xxxx_spi *spi_bus, int irq)
 	writel(msi.address_lo, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_RABORT_LOW);
 	writel(msi.data, spi_bus->dma_offset_bar + SPI_DMA_INTR_WR_IMWR_DATA);
 	writel(msi.data, spi_bus->dma_offset_bar + SPI_DMA_INTR_RD_IMWR_DATA);
+	dma_set_max_seg_size(&spi_bus->dev->dev, PCI1XXXX_SPI_BUFFER_SIZE);
 	spi_bus->can_dma = true;
 	return 0;
 }
@@ -287,12 +330,59 @@  static u8 pci1xxxx_get_clock_div(u32 hz)
 	return val;
 }
 
-static int pci1xxxx_spi_transfer_one(struct spi_controller *spi_ctlr,
-				     struct spi_device *spi, struct spi_transfer *xfer)
+static void pci1xxxx_spi_setup_dma_read(struct pci1xxxx_spi_internal *p,
+					dma_addr_t dma_addr, u32 len)
+{
+	void __iomem *base;
+
+	if (!p->hw_inst)
+		base = p->parent->dma_offset_bar + SPI_DMA_CH0_RD_BASE;
+	else
+		base = p->parent->dma_offset_bar + SPI_DMA_CH1_RD_BASE;
+
+	writel(DMA_INTR_EN, base + SPI_DMA_CH_CTL1_OFFSET);
+	writel(len, base + SPI_DMA_CH_XFER_LEN_OFFSET);
+	writel(lower_32_bits(dma_addr), base + SPI_DMA_CH_SAR_LO_OFFSET);
+	writel(upper_32_bits(dma_addr), base + SPI_DMA_CH_SAR_HI_OFFSET);
+	/* Updated SPI Command Registers */
+	writel(lower_32_bits(SPI_PERI_ADDR_BASE + SPI_MST_CMD_BUF_OFFSET(p->hw_inst)),
+	       base + SPI_DMA_CH_DAR_LO_OFFSET);
+	writel(upper_32_bits(SPI_PERI_ADDR_BASE + SPI_MST_CMD_BUF_OFFSET(p->hw_inst)),
+	       base + SPI_DMA_CH_DAR_HI_OFFSET);
+}
+
+static void pci1xxxx_spi_setup(struct pci1xxxx_spi *par, u8 hw_inst, u32 mode,
+			       u8 clkdiv, u32 len)
+{
+	u32 regval;
+
+	regval = readl(par->reg_base + SPI_MST_CTL_REG_OFFSET(hw_inst));
+	regval &= ~(SPI_MST_CTL_MODE_SEL | SPI_MST_CTL_CMD_LEN_MASK |
+		    SPI_MST_CTL_SPEED_MASK);
+
+	if (mode == SPI_MODE_3)
+		regval |= SPI_MST_CTL_MODE_SEL;
+
+	regval |= FIELD_PREP(SPI_MST_CTL_CMD_LEN_MASK, len);
+	regval |= FIELD_PREP(SPI_MST_CTL_SPEED_MASK, clkdiv);
+	writel(regval, par->reg_base + SPI_MST_CTL_REG_OFFSET(hw_inst));
+}
+
+static void pci1xxxx_start_spi_xfer(struct pci1xxxx_spi_internal *p, u8 hw_inst)
+{
+	u32 regval;
+
+	regval = readl(p->parent->reg_base + SPI_MST_CTL_REG_OFFSET(hw_inst));
+	regval |= SPI_MST_CTL_GO;
+	writel(regval, p->parent->reg_base + SPI_MST_CTL_REG_OFFSET(hw_inst));
+}
+
+static int pci1xxxx_spi_transfer_with_io(struct spi_controller *spi_ctlr,
+					 struct spi_device *spi, struct spi_transfer *xfer)
 {
 	struct pci1xxxx_spi_internal *p = spi_controller_get_devdata(spi_ctlr);
-	int mode, len, loop_iter, transfer_len;
 	struct pci1xxxx_spi *par = p->parent;
+	int len, loop_iter, transfer_len;
 	unsigned long bytes_transfered;
 	unsigned long bytes_recvd;
 	unsigned long loop_count;
@@ -302,7 +392,7 @@  static int pci1xxxx_spi_transfer_one(struct spi_controller *spi_ctlr,
 	u8 clkdiv;
 
 	p->spi_xfer_in_progress = true;
-	mode = spi->mode;
+	p->bytes_recvd = 0;
 	clkdiv = pci1xxxx_get_clock_div(xfer->speed_hz);
 	tx_buf = xfer->tx_buf;
 	rx_buf = xfer->rx_buf;
@@ -327,26 +417,8 @@  static int pci1xxxx_spi_transfer_one(struct spi_controller *spi_ctlr,
 			memcpy_toio(par->reg_base + SPI_MST_CMD_BUF_OFFSET(p->hw_inst),
 				    &tx_buf[bytes_transfered], len);
 			bytes_transfered += len;
-			regval = readl(par->reg_base +
-				       SPI_MST_CTL_REG_OFFSET(p->hw_inst));
-			regval &= ~(SPI_MST_CTL_MODE_SEL | SPI_MST_CTL_CMD_LEN_MASK |
-				    SPI_MST_CTL_SPEED_MASK);
-
-			if (mode == SPI_MODE_3)
-				regval |= SPI_MST_CTL_MODE_SEL;
-			else
-				regval &= ~SPI_MST_CTL_MODE_SEL;
-
-			regval |= (clkdiv << 5);
-			regval &= ~SPI_MST_CTL_CMD_LEN_MASK;
-			regval |= (len << 8);
-			writel(regval, par->reg_base +
-			       SPI_MST_CTL_REG_OFFSET(p->hw_inst));
-			regval = readl(par->reg_base +
-				       SPI_MST_CTL_REG_OFFSET(p->hw_inst));
-			regval |= SPI_MST_CTL_GO;
-			writel(regval, par->reg_base +
-			       SPI_MST_CTL_REG_OFFSET(p->hw_inst));
+			pci1xxxx_spi_setup(par, p->hw_inst, spi->mode, clkdiv, len);
+			pci1xxxx_start_spi_xfer(p, p->hw_inst);
 
 			/* Wait for DMA_TERM interrupt */
 			result = wait_for_completion_timeout(&p->spi_xfer_done,
@@ -366,7 +438,83 @@  static int pci1xxxx_spi_transfer_one(struct spi_controller *spi_ctlr,
 	return 0;
 }
 
-static irqreturn_t pci1xxxx_spi_isr(int irq, void *dev)
+static int pci1xxxx_spi_transfer_with_dma(struct spi_controller *spi_ctlr,
+					  struct spi_device *spi,
+					  struct spi_transfer *xfer)
+{
+	struct pci1xxxx_spi_internal *p = spi_controller_get_devdata(spi_ctlr);
+	struct pci1xxxx_spi *par = p->parent;
+	dma_addr_t tx_dma_addr = 0;
+	int ret = 0;
+	u32 regval;
+
+	p->spi_xfer_in_progress = true;
+	p->tx_sgl = xfer->tx_sg.sgl;
+
+	if (!xfer->tx_buf || !p->tx_sgl) {
+		ret = -EINVAL;
+		goto error;
+	}
+	p->xfer = xfer;
+	p->mode = spi->mode;
+	p->clkdiv = pci1xxxx_get_clock_div(xfer->speed_hz);
+	p->bytes_recvd = 0;
+	p->rx_buf = xfer->rx_buf;
+	regval = readl(par->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
+	writel(regval, par->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
+
+	tx_dma_addr = sg_dma_address(p->tx_sgl);
+	p->tx_sgl_len = sg_dma_len(p->tx_sgl);
+	pci1xxxx_spi_setup(par, p->hw_inst, p->mode, p->clkdiv, p->tx_sgl_len);
+
+	pci1xxxx_spi_setup_dma_read(p, (tx_dma_addr), p->tx_sgl_len);
+	writel(p->hw_inst, par->dma_offset_bar + SPI_DMA_RD_DOORBELL_REG);
+
+	reinit_completion(&p->spi_xfer_done);
+	/* Wait for DMA_TERM interrupt */
+	ret = wait_for_completion_timeout(&p->spi_xfer_done, PCI1XXXX_SPI_TIMEOUT);
+	if (!ret) {
+		ret = -ETIMEDOUT;
+		if (p->dma_aborted_rd) {
+			writel(SPI_DMA_ENGINE_DIS,
+			       par->dma_offset_bar + SPI_DMA_GLOBAL_RD_ENGINE_EN);
+			/*
+			 * DMA ENGINE reset takes time if any TLP
+			 * completeion in progress, should wait
+			 * till DMA Engine reset is completed.
+			 */
+			ret = readl_poll_timeout(par->dma_offset_bar +
+						 SPI_DMA_GLOBAL_RD_ENGINE_EN, regval,
+						 (regval == 0x0), 0, USEC_PER_MSEC);
+			if (ret) {
+				ret = -ECANCELED;
+				goto error;
+			}
+			writel(SPI_DMA_ENGINE_EN,
+			       par->dma_offset_bar + SPI_DMA_GLOBAL_RD_ENGINE_EN);
+			p->dma_aborted_rd = false;
+			ret = -ECANCELED;
+			goto error;
+		}
+	}
+	ret = 0;
+
+error:
+	p->spi_xfer_in_progress = false;
+
+	return ret;
+}
+
+static int pci1xxxx_spi_transfer_one(struct spi_controller *spi_ctlr,
+				     struct spi_device *spi, struct spi_transfer *xfer)
+{
+	if (spi_ctlr->can_dma(spi_ctlr, spi, xfer) && spi_ctlr->cur_msg_mapped)
+		return pci1xxxx_spi_transfer_with_dma(spi_ctlr, spi, xfer);
+	else
+		return pci1xxxx_spi_transfer_with_io(spi_ctlr, spi, xfer);
+}
+
+static irqreturn_t pci1xxxx_spi_isr_io(int irq, void *dev)
 {
 	struct pci1xxxx_spi_internal *p = dev;
 	irqreturn_t spi_int_fired = IRQ_NONE;
@@ -379,12 +527,83 @@  static irqreturn_t pci1xxxx_spi_isr(int irq, void *dev)
 		complete(&p->spi_xfer_done);
 		spi_int_fired = IRQ_HANDLED;
 	}
-
 	writel(regval, p->parent->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
+	return spi_int_fired;
+}
 
+static irqreturn_t pci1xxxx_spi_isr_dma(int irq, void *dev)
+{
+	struct pci1xxxx_spi_internal *p = dev;
+	irqreturn_t spi_int_fired = IRQ_NONE;
+	dma_addr_t tx_dma_addr = 0;
+	void *rx_buf = NULL;
+	unsigned long flags;
+	u32 regval;
+
+	spin_lock_irqsave(&p->parent->dma_reg_lock, flags);
+	/* Clear the DMA RD INT and start spi xfer*/
+	regval = readl(p->parent->dma_offset_bar + SPI_DMA_INTR_RD_STS);
+	if (regval & SPI_DMA_DONE_INT_MASK) {
+		if (regval & SPI_DMA_CH0_DONE_INT)
+			pci1xxxx_start_spi_xfer(p, SPI0);
+		if (regval & SPI_DMA_CH1_DONE_INT)
+			pci1xxxx_start_spi_xfer(p, SPI1);
+		spi_int_fired = IRQ_HANDLED;
+	}
+	if (regval & SPI_DMA_ABORT_INT_MASK) {
+		p->dma_aborted_rd = true;
+		spi_int_fired = IRQ_HANDLED;
+	}
+	writel(regval, p->parent->dma_offset_bar + SPI_DMA_INTR_RD_CLR);
+	spin_unlock_irqrestore(&p->parent->dma_reg_lock, flags);
+
+	/* Clear the SPI GO_BIT Interrupt */
+	regval = readl(p->parent->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
+	if (regval & SPI_INTR) {
+		rx_buf = p->rx_buf;
+		memcpy_fromio(rx_buf + p->bytes_recvd, p->parent->reg_base +
+				      SPI_MST_RSP_BUF_OFFSET(p->hw_inst), p->tx_sgl_len);
+		p->bytes_recvd += p->tx_sgl_len;
+
+		p->tx_sgl = sg_next(p->tx_sgl);
+		if (!p->tx_sgl) {
+			/* Clear xfer_done */
+			complete(&p->spi_xfer_done);
+			spi_int_fired = IRQ_HANDLED;
+		} else {
+			tx_dma_addr = sg_dma_address(p->tx_sgl);
+			p->tx_sgl_len = sg_dma_len(p->tx_sgl);
+			pci1xxxx_spi_setup(p->parent, p->hw_inst, p->mode, p->clkdiv,
+					   p->tx_sgl_len);
+			pci1xxxx_spi_setup_dma_read(p, tx_dma_addr, p->tx_sgl_len);
+			writel(p->hw_inst, p->parent->dma_offset_bar +
+			       SPI_DMA_RD_DOORBELL_REG);
+		}
+	}
+	writel(regval, p->parent->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
 	return spi_int_fired;
 }
 
+static irqreturn_t pci1xxxx_spi_isr(int irq, void *dev)
+{
+	struct pci1xxxx_spi_internal *p = dev;
+
+	if (p->spi_host->can_dma(p->spi_host, NULL, p->xfer))
+		return pci1xxxx_spi_isr_dma(irq, dev);
+	else
+		return pci1xxxx_spi_isr_io(irq, dev);
+}
+
+static bool pci1xxxx_spi_can_dma(struct spi_controller *host,
+				 struct spi_device *spi,
+				 struct spi_transfer *xfer)
+{
+	struct pci1xxxx_spi_internal *p = spi_controller_get_devdata(host);
+	struct pci1xxxx_spi *par = p->parent;
+
+	return par->can_dma;
+}
+
 static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	u8 hw_inst_cnt, iter, start, only_sec_inst;
@@ -505,7 +724,9 @@  static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *
 		spi_host->num_chipselect = SPI_CHIP_SEL_COUNT;
 		spi_host->mode_bits = SPI_MODE_0 | SPI_MODE_3 | SPI_RX_DUAL |
 				      SPI_TX_DUAL | SPI_LOOP;
+		spi_host->can_dma = pci1xxxx_spi_can_dma;
 		spi_host->transfer_one = pci1xxxx_spi_transfer_one;
+
 		spi_host->set_cs = pci1xxxx_spi_set_cs;
 		spi_host->bits_per_word_mask = SPI_BPW_MASK(8);
 		spi_host->max_speed_hz = PCI1XXXX_SPI_MAX_CLOCK_HZ;