diff mbox series

dmaengine: ti: edma: Enable support for polled (memcpy) completion

Message ID 20190514080909.10306-1-peter.ujfalusi@ti.com (mailing list archive)
State New, archived
Headers show
Series dmaengine: ti: edma: Enable support for polled (memcpy) completion | expand

Commit Message

Peter Ujfalusi May 14, 2019, 8:09 a.m. UTC
When a DMA client driver decides that it is not providing callback for
completion of a transfer (and/or does not set the DMA_PREP_INTERRUPT) but
it will poll the status of the transfer (in case of short memcpy for
example) we will not get interrupt for the completion of the transfer and
will not mark the transaction as done.

Check the event registers (ER and EER) and if the channel is inactive then
return wioth DMA_COMPLETE to let the client know that the transfer is
completed.

Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
---
 drivers/dma/ti/edma.c | 23 ++++++++++++++++++++---
 1 file changed, 20 insertions(+), 3 deletions(-)

Comments

Vinod Koul May 21, 2019, 5:04 a.m. UTC | #1
On 14-05-19, 11:09, Peter Ujfalusi wrote:
> When a DMA client driver decides that it is not providing callback for
> completion of a transfer (and/or does not set the DMA_PREP_INTERRUPT) but
> it will poll the status of the transfer (in case of short memcpy for
> example) we will not get interrupt for the completion of the transfer and
> will not mark the transaction as done.
> 
> Check the event registers (ER and EER) and if the channel is inactive then
> return wioth DMA_COMPLETE to let the client know that the transfer is
        ^^^^^
Typo

> completed.
> 
> Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
> ---
>  drivers/dma/ti/edma.c | 23 ++++++++++++++++++++---
>  1 file changed, 20 insertions(+), 3 deletions(-)
> 
> diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
> index ceabdea40ae0..7501445af069 100644
> --- a/drivers/dma/ti/edma.c
> +++ b/drivers/dma/ti/edma.c
> @@ -1211,8 +1211,9 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
>  
>  	edesc->pset[0].param.opt |= ITCCHEN;
>  	if (nslots == 1) {
> -		/* Enable transfer complete interrupt */
> -		edesc->pset[0].param.opt |= TCINTEN;
> +		/* Enable transfer complete interrupt if requested */
> +		if (tx_flags & DMA_PREP_INTERRUPT)
> +			edesc->pset[0].param.opt |= TCINTEN;
>  	} else {
>  		/* Enable transfer complete chaining for the first slot */
>  		edesc->pset[0].param.opt |= TCCHEN;
> @@ -1239,7 +1240,9 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
>  		}
>  
>  		edesc->pset[1].param.opt |= ITCCHEN;
> -		edesc->pset[1].param.opt |= TCINTEN;
> +		/* Enable transfer complete interrupt if requested */
> +		if (tx_flags & DMA_PREP_INTERRUPT)
> +			edesc->pset[1].param.opt |= TCINTEN;
>  	}
>  
>  	return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
> @@ -1801,6 +1804,20 @@ static enum dma_status edma_tx_status(struct dma_chan *chan,
>  	unsigned long flags;
>  
>  	ret = dma_cookie_status(chan, cookie, txstate);
> +
> +	if (ret != DMA_COMPLETE && echan->edesc && !echan->edesc->cyclic) {
> +		struct edma_cc *ecc = echan->ecc;
> +		int channel = EDMA_CHAN_SLOT(echan->ch_num);
> +		int j = (channel >> 5);
> +		unsigned int mask = BIT(channel & 0x1f);

GENMASK or define a macro instead of a magic number?

> +		unsigned int sh_er = edma_shadow0_read_array(ecc, SH_ER, j);
> +		unsigned int sh_eer = edma_shadow0_read_array(ecc, SH_EER, j);
> +
> +		/* The channel is no longer active */
> +		if (!(sh_er & mask) && !(sh_eer & mask))
> +			ret = DMA_COMPLETE;
> +	}
> +
>  	if (ret == DMA_COMPLETE || !txstate)
>  		return ret;
>  
> -- 
> Peter
> 
> Texas Instruments Finland Oy, Porkkalankatu 22, 00180 Helsinki.
> Y-tunnus/Business ID: 0615521-4. Kotipaikka/Domicile: Helsinki
Peter Ujfalusi May 21, 2019, 6:16 a.m. UTC | #2
On 21/05/2019 8.04, Vinod Koul wrote:
> On 14-05-19, 11:09, Peter Ujfalusi wrote:
>> When a DMA client driver decides that it is not providing callback for
>> completion of a transfer (and/or does not set the DMA_PREP_INTERRUPT) but
>> it will poll the status of the transfer (in case of short memcpy for
>> example) we will not get interrupt for the completion of the transfer and
>> will not mark the transaction as done.
>>
>> Check the event registers (ER and EER) and if the channel is inactive then
>> return wioth DMA_COMPLETE to let the client know that the transfer is
>         ^^^^^
> Typo

Ok

> 
>> completed.
>>
>> Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
>> ---
>>  drivers/dma/ti/edma.c | 23 ++++++++++++++++++++---
>>  1 file changed, 20 insertions(+), 3 deletions(-)
>>
>> diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
>> index ceabdea40ae0..7501445af069 100644
>> --- a/drivers/dma/ti/edma.c
>> +++ b/drivers/dma/ti/edma.c
>> @@ -1211,8 +1211,9 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
>>  
>>  	edesc->pset[0].param.opt |= ITCCHEN;
>>  	if (nslots == 1) {
>> -		/* Enable transfer complete interrupt */
>> -		edesc->pset[0].param.opt |= TCINTEN;
>> +		/* Enable transfer complete interrupt if requested */
>> +		if (tx_flags & DMA_PREP_INTERRUPT)
>> +			edesc->pset[0].param.opt |= TCINTEN;
>>  	} else {
>>  		/* Enable transfer complete chaining for the first slot */
>>  		edesc->pset[0].param.opt |= TCCHEN;
>> @@ -1239,7 +1240,9 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
>>  		}
>>  
>>  		edesc->pset[1].param.opt |= ITCCHEN;
>> -		edesc->pset[1].param.opt |= TCINTEN;
>> +		/* Enable transfer complete interrupt if requested */
>> +		if (tx_flags & DMA_PREP_INTERRUPT)
>> +			edesc->pset[1].param.opt |= TCINTEN;
>>  	}
>>  
>>  	return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
>> @@ -1801,6 +1804,20 @@ static enum dma_status edma_tx_status(struct dma_chan *chan,
>>  	unsigned long flags;
>>  
>>  	ret = dma_cookie_status(chan, cookie, txstate);
>> +
>> +	if (ret != DMA_COMPLETE && echan->edesc && !echan->edesc->cyclic) {
>> +		struct edma_cc *ecc = echan->ecc;
>> +		int channel = EDMA_CHAN_SLOT(echan->ch_num);
>> +		int j = (channel >> 5);
>> +		unsigned int mask = BIT(channel & 0x1f);
> 
> GENMASK or define a macro instead of a magic number?

So it is better to change the other places first from where I have just
copied this.

> 
>> +		unsigned int sh_er = edma_shadow0_read_array(ecc, SH_ER, j);
>> +		unsigned int sh_eer = edma_shadow0_read_array(ecc, SH_EER, j);
>> +
>> +		/* The channel is no longer active */
>> +		if (!(sh_er & mask) && !(sh_eer & mask))
>> +			ret = DMA_COMPLETE;
>> +	}
>> +
>>  	if (ret == DMA_COMPLETE || !txstate)
>>  		return ret;
>>  
>> -- 
>> Peter
>>
>> Texas Instruments Finland Oy, Porkkalankatu 22, 00180 Helsinki.
>> Y-tunnus/Business ID: 0615521-4. Kotipaikka/Domicile: Helsinki
> 

- Péter

Texas Instruments Finland Oy, Porkkalankatu 22, 00180 Helsinki.
Y-tunnus/Business ID: 0615521-4. Kotipaikka/Domicile: Helsinki
Vinod Koul May 21, 2019, 7:21 a.m. UTC | #3
On 21-05-19, 09:16, Peter Ujfalusi wrote:
> On 21/05/2019 8.04, Vinod Koul wrote:
> > On 14-05-19, 11:09, Peter Ujfalusi wrote:

> >>  	return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
> >> @@ -1801,6 +1804,20 @@ static enum dma_status edma_tx_status(struct dma_chan *chan,
> >>  	unsigned long flags;
> >>  
> >>  	ret = dma_cookie_status(chan, cookie, txstate);
> >> +
> >> +	if (ret != DMA_COMPLETE && echan->edesc && !echan->edesc->cyclic) {
> >> +		struct edma_cc *ecc = echan->ecc;
> >> +		int channel = EDMA_CHAN_SLOT(echan->ch_num);
> >> +		int j = (channel >> 5);
> >> +		unsigned int mask = BIT(channel & 0x1f);
> > 
> > GENMASK or define a macro instead of a magic number?
> 
> So it is better to change the other places first from where I have just
> copied this.

That would be nice as well :)
diff mbox series

Patch

diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index ceabdea40ae0..7501445af069 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -1211,8 +1211,9 @@  static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
 
 	edesc->pset[0].param.opt |= ITCCHEN;
 	if (nslots == 1) {
-		/* Enable transfer complete interrupt */
-		edesc->pset[0].param.opt |= TCINTEN;
+		/* Enable transfer complete interrupt if requested */
+		if (tx_flags & DMA_PREP_INTERRUPT)
+			edesc->pset[0].param.opt |= TCINTEN;
 	} else {
 		/* Enable transfer complete chaining for the first slot */
 		edesc->pset[0].param.opt |= TCCHEN;
@@ -1239,7 +1240,9 @@  static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
 		}
 
 		edesc->pset[1].param.opt |= ITCCHEN;
-		edesc->pset[1].param.opt |= TCINTEN;
+		/* Enable transfer complete interrupt if requested */
+		if (tx_flags & DMA_PREP_INTERRUPT)
+			edesc->pset[1].param.opt |= TCINTEN;
 	}
 
 	return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
@@ -1801,6 +1804,20 @@  static enum dma_status edma_tx_status(struct dma_chan *chan,
 	unsigned long flags;
 
 	ret = dma_cookie_status(chan, cookie, txstate);
+
+	if (ret != DMA_COMPLETE && echan->edesc && !echan->edesc->cyclic) {
+		struct edma_cc *ecc = echan->ecc;
+		int channel = EDMA_CHAN_SLOT(echan->ch_num);
+		int j = (channel >> 5);
+		unsigned int mask = BIT(channel & 0x1f);
+		unsigned int sh_er = edma_shadow0_read_array(ecc, SH_ER, j);
+		unsigned int sh_eer = edma_shadow0_read_array(ecc, SH_EER, j);
+
+		/* The channel is no longer active */
+		if (!(sh_er & mask) && !(sh_eer & mask))
+			ret = DMA_COMPLETE;
+	}
+
 	if (ret == DMA_COMPLETE || !txstate)
 		return ret;