diff mbox series

[V2,6/6] dmaengine: xilinx_dma: Program interrupt delay timeout

Message ID 20221124102745.2620370-7-sarath.babu.naidu.gaddam@amd.com (mailing list archive)
State New, archived
Headers show
Series Xilinx DMA enhancements and optimization | expand

Commit Message

Sarath Babu Naidu Gaddam Nov. 24, 2022, 10:27 a.m. UTC
From: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>

Program IRQDelay for AXI DMA. The interrupt timeout mechanism causes
the DMA engine to generate an interrupt after the delay time period
has expired. It enables dmaengine to respond in real-time even though
interrupt coalescing is configured. It also remove the placeholder
for delay interrupt and merge it with frame completion interrupt.
Since by default interrupt delay timeout is disabled this feature
addition has no functional impact on VDMA and CDMA IP's.

Signed-off-by: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>
Signed-off-by: Sarath Babu Naidu Gaddam <sarath.babu.naidu.gaddam@amd.com>
---
 drivers/dma/xilinx/xilinx_dma.c | 20 +++++++++++---------
 1 file changed, 11 insertions(+), 9 deletions(-)

Comments

Vinod Koul Dec. 28, 2022, 11 a.m. UTC | #1
On 24-11-22, 15:57, Sarath Babu Naidu Gaddam wrote:
> From: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>
> 
> Program IRQDelay for AXI DMA. The interrupt timeout mechanism causes
> the DMA engine to generate an interrupt after the delay time period
> has expired. It enables dmaengine to respond in real-time even though
> interrupt coalescing is configured. It also remove the placeholder
> for delay interrupt and merge it with frame completion interrupt.
> Since by default interrupt delay timeout is disabled this feature
> addition has no functional impact on VDMA and CDMA IP's.
> 
> Signed-off-by: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>
> Signed-off-by: Sarath Babu Naidu Gaddam <sarath.babu.naidu.gaddam@amd.com>
> ---
>  drivers/dma/xilinx/xilinx_dma.c | 20 +++++++++++---------
>  1 file changed, 11 insertions(+), 9 deletions(-)
> 
> diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
> index ce0c151d8f61..333d68ee3559 100644
> --- a/drivers/dma/xilinx/xilinx_dma.c
> +++ b/drivers/dma/xilinx/xilinx_dma.c
> @@ -173,8 +173,10 @@
>  #define XILINX_DMA_MAX_TRANS_LEN_MAX	23
>  #define XILINX_DMA_V2_MAX_TRANS_LEN_MAX	26
>  #define XILINX_DMA_CR_COALESCE_MAX	GENMASK(23, 16)
> +#define XILINX_DMA_CR_DELAY_MAX		GENMASK(31, 24)
>  #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK	BIT(4)
>  #define XILINX_DMA_CR_COALESCE_SHIFT	16
> +#define XILINX_DMA_CR_DELAY_SHIFT	24
>  #define XILINX_DMA_BD_SOP		BIT(27)
>  #define XILINX_DMA_BD_EOP		BIT(26)
>  #define XILINX_DMA_COALESCE_MAX		255
> @@ -410,6 +412,7 @@ struct xilinx_dma_tx_descriptor {
>   * @stop_transfer: Differentiate b/w DMA IP's quiesce
>   * @tdest: TDEST value for mcdma
>   * @has_vflip: S2MM vertical flip
> + * @irq_delay: Interrupt delay timeout
>   */
>  struct xilinx_dma_chan {
>  	struct xilinx_dma_device *xdev;
> @@ -448,6 +451,7 @@ struct xilinx_dma_chan {
>  	int (*stop_transfer)(struct xilinx_dma_chan *chan);
>  	u16 tdest;
>  	bool has_vflip;
> +	u8 irq_delay;
>  };
>  
>  /**
> @@ -1560,6 +1564,9 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
>  	if (chan->has_sg)
>  		xilinx_write(chan, XILINX_DMA_REG_CURDESC,
>  			     head_desc->async_tx.phys);
> +	reg  &= ~XILINX_DMA_CR_DELAY_MAX;
> +	reg  |= chan->irq_delay << XILINX_DMA_CR_DELAY_SHIFT;
> +	dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
>  
>  	xilinx_dma_start(chan);
>  
> @@ -1887,15 +1894,8 @@ static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
>  		}
>  	}
>  
> -	if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
> -		/*
> -		 * Device takes too long to do the transfer when user requires
> -		 * responsiveness.
> -		 */
> -		dev_dbg(chan->dev, "Inter-packet latency too long\n");
> -	}
> -
> -	if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
> +	if (status & (XILINX_DMA_DMASR_FRM_CNT_IRQ |
> +		      XILINX_DMA_DMASR_DLY_CNT_IRQ)) {
>  		spin_lock(&chan->lock);
>  		xilinx_dma_complete_descriptor(chan);
>  		chan->idle = true;
> @@ -2822,6 +2822,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
>  	/* Retrieve the channel properties from the device tree */
>  	has_dre = of_property_read_bool(node, "xlnx,include-dre");
>  
> +	of_property_read_u8(node, "xlnx,irq-delay", &chan->irq_delay);

Same question here too

> +
>  	chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
>  
>  	err = of_property_read_u32(node, "xlnx,datawidth", &value);
> -- 
> 2.25.1
Sarath Babu Naidu Gaddam Feb. 2, 2023, 7:52 a.m. UTC | #2
> -----Original Message-----
> From: Vinod Koul <vkoul@kernel.org>
> Sent: Wednesday, December 28, 2022 4:31 PM
> To: Gaddam, Sarath Babu Naidu
> <sarath.babu.naidu.gaddam@amd.com>
> Cc: robh+dt@kernel.org; krzysztof.kozlowski+dt@linaro.org;
> lars@metafoo.de; adrianml@alumnos.upm.es;
> dmaengine@vger.kernel.org; devicetree@vger.kernel.org; linux-arm-
> kernel@lists.infradead.org; linux-kernel@vger.kernel.org; Simek, Michal
> <michal.simek@amd.com>; Pandey, Radhey Shyam
> <radhey.shyam.pandey@amd.com>; Sarangi, Anirudha
> <anirudha.sarangi@amd.com>; Katakam, Harini
> <harini.katakam@amd.com>; git (AMD-Xilinx) <git@amd.com>
> Subject: Re: [PATCH V2 6/6] dmaengine: xilinx_dma: Program interrupt
> delay timeout
> 
> On 24-11-22, 15:57, Sarath Babu Naidu Gaddam wrote:
> > From: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>
> >
> > Program IRQDelay for AXI DMA. The interrupt timeout mechanism
> causes
> > the DMA engine to generate an interrupt after the delay time period
> > has expired. It enables dmaengine to respond in real-time even though
> > interrupt coalescing is configured. It also remove the placeholder for
> > delay interrupt and merge it with frame completion interrupt.
> > Since by default interrupt delay timeout is disabled this feature
> > addition has no functional impact on VDMA and CDMA IP's.
> >
> > Signed-off-by: Radhey Shyam Pandey
> <radhey.shyam.pandey@xilinx.com>
> > Signed-off-by: Sarath Babu Naidu Gaddam
> > <sarath.babu.naidu.gaddam@amd.com>
> > ---
> >  drivers/dma/xilinx/xilinx_dma.c | 20 +++++++++++---------
> >  1 file changed, 11 insertions(+), 9 deletions(-)
> >
> > diff --git a/drivers/dma/xilinx/xilinx_dma.c
> > b/drivers/dma/xilinx/xilinx_dma.c index ce0c151d8f61..333d68ee3559
> > 100644
> > --- a/drivers/dma/xilinx/xilinx_dma.c
> > +++ b/drivers/dma/xilinx/xilinx_dma.c
> > @@ -173,8 +173,10 @@
> >  #define XILINX_DMA_MAX_TRANS_LEN_MAX	23
> >  #define XILINX_DMA_V2_MAX_TRANS_LEN_MAX	26
> >  #define XILINX_DMA_CR_COALESCE_MAX	GENMASK(23, 16)
> > +#define XILINX_DMA_CR_DELAY_MAX		GENMASK(31, 24)
> >  #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK	BIT(4)
> >  #define XILINX_DMA_CR_COALESCE_SHIFT	16
> > +#define XILINX_DMA_CR_DELAY_SHIFT	24
> >  #define XILINX_DMA_BD_SOP		BIT(27)
> >  #define XILINX_DMA_BD_EOP		BIT(26)
> >  #define XILINX_DMA_COALESCE_MAX		255
> > @@ -410,6 +412,7 @@ struct xilinx_dma_tx_descriptor {
> >   * @stop_transfer: Differentiate b/w DMA IP's quiesce
> >   * @tdest: TDEST value for mcdma
> >   * @has_vflip: S2MM vertical flip
> > + * @irq_delay: Interrupt delay timeout
> >   */
> >  struct xilinx_dma_chan {
> >  	struct xilinx_dma_device *xdev;
> > @@ -448,6 +451,7 @@ struct xilinx_dma_chan {
> >  	int (*stop_transfer)(struct xilinx_dma_chan *chan);
> >  	u16 tdest;
> >  	bool has_vflip;
> > +	u8 irq_delay;
> >  };
> >
> >  /**
> > @@ -1560,6 +1564,9 @@ static void xilinx_dma_start_transfer(struct
> xilinx_dma_chan *chan)
> >  	if (chan->has_sg)
> >  		xilinx_write(chan, XILINX_DMA_REG_CURDESC,
> >  			     head_desc->async_tx.phys);
> > +	reg  &= ~XILINX_DMA_CR_DELAY_MAX;
> > +	reg  |= chan->irq_delay << XILINX_DMA_CR_DELAY_SHIFT;
> > +	dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
> >
> >  	xilinx_dma_start(chan);
> >
> > @@ -1887,15 +1894,8 @@ static irqreturn_t
> xilinx_dma_irq_handler(int irq, void *data)
> >  		}
> >  	}
> >
> > -	if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
> > -		/*
> > -		 * Device takes too long to do the transfer when user
> requires
> > -		 * responsiveness.
> > -		 */
> > -		dev_dbg(chan->dev, "Inter-packet latency too long\n");
> > -	}
> > -
> > -	if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
> > +	if (status & (XILINX_DMA_DMASR_FRM_CNT_IRQ |
> > +		      XILINX_DMA_DMASR_DLY_CNT_IRQ)) {
> >  		spin_lock(&chan->lock);
> >  		xilinx_dma_complete_descriptor(chan);
> >  		chan->idle = true;
> > @@ -2822,6 +2822,8 @@ static int xilinx_dma_chan_probe(struct
> xilinx_dma_device *xdev,
> >  	/* Retrieve the channel properties from the device tree */
> >  	has_dre = of_property_read_bool(node, "xlnx,include-dre");
> >
> > +	of_property_read_u8(node, "xlnx,irq-delay", &chan->irq_delay);
> 
> Same question here too

This is an optional property. If it is not there, then driver programs
 this value as zero and it does not break existing functionality.  

> 
> > +
> >  	chan->genlock = of_property_read_bool(node, "xlnx,genlock-
> mode");
> >
> >  	err = of_property_read_u32(node, "xlnx,datawidth", &value);
> > --
> > 2.25.1
> 
> --
> ~Vinod
diff mbox series

Patch

diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index ce0c151d8f61..333d68ee3559 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -173,8 +173,10 @@ 
 #define XILINX_DMA_MAX_TRANS_LEN_MAX	23
 #define XILINX_DMA_V2_MAX_TRANS_LEN_MAX	26
 #define XILINX_DMA_CR_COALESCE_MAX	GENMASK(23, 16)
+#define XILINX_DMA_CR_DELAY_MAX		GENMASK(31, 24)
 #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK	BIT(4)
 #define XILINX_DMA_CR_COALESCE_SHIFT	16
+#define XILINX_DMA_CR_DELAY_SHIFT	24
 #define XILINX_DMA_BD_SOP		BIT(27)
 #define XILINX_DMA_BD_EOP		BIT(26)
 #define XILINX_DMA_COALESCE_MAX		255
@@ -410,6 +412,7 @@  struct xilinx_dma_tx_descriptor {
  * @stop_transfer: Differentiate b/w DMA IP's quiesce
  * @tdest: TDEST value for mcdma
  * @has_vflip: S2MM vertical flip
+ * @irq_delay: Interrupt delay timeout
  */
 struct xilinx_dma_chan {
 	struct xilinx_dma_device *xdev;
@@ -448,6 +451,7 @@  struct xilinx_dma_chan {
 	int (*stop_transfer)(struct xilinx_dma_chan *chan);
 	u16 tdest;
 	bool has_vflip;
+	u8 irq_delay;
 };
 
 /**
@@ -1560,6 +1564,9 @@  static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
 	if (chan->has_sg)
 		xilinx_write(chan, XILINX_DMA_REG_CURDESC,
 			     head_desc->async_tx.phys);
+	reg  &= ~XILINX_DMA_CR_DELAY_MAX;
+	reg  |= chan->irq_delay << XILINX_DMA_CR_DELAY_SHIFT;
+	dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
 
 	xilinx_dma_start(chan);
 
@@ -1887,15 +1894,8 @@  static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
 		}
 	}
 
-	if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
-		/*
-		 * Device takes too long to do the transfer when user requires
-		 * responsiveness.
-		 */
-		dev_dbg(chan->dev, "Inter-packet latency too long\n");
-	}
-
-	if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
+	if (status & (XILINX_DMA_DMASR_FRM_CNT_IRQ |
+		      XILINX_DMA_DMASR_DLY_CNT_IRQ)) {
 		spin_lock(&chan->lock);
 		xilinx_dma_complete_descriptor(chan);
 		chan->idle = true;
@@ -2822,6 +2822,8 @@  static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
 	/* Retrieve the channel properties from the device tree */
 	has_dre = of_property_read_bool(node, "xlnx,include-dre");
 
+	of_property_read_u8(node, "xlnx,irq-delay", &chan->irq_delay);
+
 	chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
 
 	err = of_property_read_u32(node, "xlnx,datawidth", &value);