diff mbox series

dmaengine: idxd: add missing callback function to support DMA_INTERRUPT

Message ID 165101232637.3951447.15765792791591763119.stgit@djiang5-desk3.ch.intel.com (mailing list archive)
State Accepted
Commit 2112b8f4fb5cc35d1c384324763765953186b81f
Headers show
Series dmaengine: idxd: add missing callback function to support DMA_INTERRUPT | expand

Commit Message

Dave Jiang April 26, 2022, 10:32 p.m. UTC
When setting DMA_INTERRUPT capability, a callback function
dma->device_prep_dma_interrupt() is needed to support this capability.
Without setting the callback, dma_async_device_register() will fail dma
capability check.

Fixes: 4e5a4eb20393 ("dmaengine: idxd: set DMA_INTERRUPT cap bit")
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
---
 drivers/dma/idxd/dma.c |   22 ++++++++++++++++++++++
 1 file changed, 22 insertions(+)

Comments

Dave Jiang May 16, 2022, 5:02 p.m. UTC | #1
On 4/26/2022 3:32 PM, Dave Jiang wrote:
> When setting DMA_INTERRUPT capability, a callback function
> dma->device_prep_dma_interrupt() is needed to support this capability.
> Without setting the callback, dma_async_device_register() will fail dma
> capability check.
>
> Fixes: 4e5a4eb20393 ("dmaengine: idxd: set DMA_INTERRUPT cap bit")
> Signed-off-by: Dave Jiang <dave.jiang@intel.com>


Hi Vinod, can you please make sure this patch goes into the merge 
window? It fixes the patch that's in the dmaengine/next. Thanks!

> ---
>   drivers/dma/idxd/dma.c |   22 ++++++++++++++++++++++
>   1 file changed, 22 insertions(+)
>
> diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c
> index 950f06c8aad5..d66cef5a918e 100644
> --- a/drivers/dma/idxd/dma.c
> +++ b/drivers/dma/idxd/dma.c
> @@ -87,6 +87,27 @@ static inline void idxd_prep_desc_common(struct idxd_wq *wq,
>   	hw->completion_addr = compl;
>   }
>   
> +static struct dma_async_tx_descriptor *
> +idxd_dma_prep_interrupt(struct dma_chan *c, unsigned long flags)
> +{
> +	struct idxd_wq *wq = to_idxd_wq(c);
> +	u32 desc_flags;
> +	struct idxd_desc *desc;
> +
> +	if (wq->state != IDXD_WQ_ENABLED)
> +		return NULL;
> +
> +	op_flag_setup(flags, &desc_flags);
> +	desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
> +	if (IS_ERR(desc))
> +		return NULL;
> +
> +	idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_NOOP,
> +			      0, 0, 0, desc->compl_dma, desc_flags);
> +	desc->txd.flags = flags;
> +	return &desc->txd;
> +}
> +
>   static struct dma_async_tx_descriptor *
>   idxd_dma_submit_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
>   		       dma_addr_t dma_src, size_t len, unsigned long flags)
> @@ -198,6 +219,7 @@ int idxd_register_dma_device(struct idxd_device *idxd)
>   	dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask);
>   	dma->device_release = idxd_dma_release;
>   
> +	dma->device_prep_dma_interrupt = idxd_dma_prep_interrupt;
>   	if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) {
>   		dma_cap_set(DMA_MEMCPY, dma->cap_mask);
>   		dma->device_prep_dma_memcpy = idxd_dma_submit_memcpy;
>
>
Vinod Koul May 16, 2022, 5:48 p.m. UTC | #2
On 26-04-22, 15:32, Dave Jiang wrote:
> When setting DMA_INTERRUPT capability, a callback function
> dma->device_prep_dma_interrupt() is needed to support this capability.
> Without setting the callback, dma_async_device_register() will fail dma
> capability check.

Applied, thanks
diff mbox series

Patch

diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c
index 950f06c8aad5..d66cef5a918e 100644
--- a/drivers/dma/idxd/dma.c
+++ b/drivers/dma/idxd/dma.c
@@ -87,6 +87,27 @@  static inline void idxd_prep_desc_common(struct idxd_wq *wq,
 	hw->completion_addr = compl;
 }
 
+static struct dma_async_tx_descriptor *
+idxd_dma_prep_interrupt(struct dma_chan *c, unsigned long flags)
+{
+	struct idxd_wq *wq = to_idxd_wq(c);
+	u32 desc_flags;
+	struct idxd_desc *desc;
+
+	if (wq->state != IDXD_WQ_ENABLED)
+		return NULL;
+
+	op_flag_setup(flags, &desc_flags);
+	desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
+	if (IS_ERR(desc))
+		return NULL;
+
+	idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_NOOP,
+			      0, 0, 0, desc->compl_dma, desc_flags);
+	desc->txd.flags = flags;
+	return &desc->txd;
+}
+
 static struct dma_async_tx_descriptor *
 idxd_dma_submit_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
 		       dma_addr_t dma_src, size_t len, unsigned long flags)
@@ -198,6 +219,7 @@  int idxd_register_dma_device(struct idxd_device *idxd)
 	dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask);
 	dma->device_release = idxd_dma_release;
 
+	dma->device_prep_dma_interrupt = idxd_dma_prep_interrupt;
 	if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) {
 		dma_cap_set(DMA_MEMCPY, dma->cap_mask);
 		dma->device_prep_dma_memcpy = idxd_dma_submit_memcpy;