diff mbox series

[v2,4/4] dmaengine: xilinx: xdma: Add terminate_all/synchronize callbacks

Message ID 20231130111315.729430-5-miquel.raynal@bootlin.com (mailing list archive)
State Accepted
Commit f5c392d106e7cc58c7705799ef4c36c3b2f60b31
Headers show
Series dmaengine: xilinx: Misc (cyclic) transfers fixes | expand

Commit Message

Miquel Raynal Nov. 30, 2023, 11:13 a.m. UTC
The driver is capable of starting scatter-gather transfers and needs to
wait until their end. It is also capable of starting cyclic transfers
and will only be "reset" next time the channel will be reused. In
practice most of the time we hear no audio glitch because the sound card
stops the flow on its side so the DMA transfers are just
discarded. There are however some cases (when playing a bit with a
number of frames and with a discontinuous sound file) when the sound
card seems to be slightly too slow at stopping the flow, leading to a
glitch that can be heard.

In all cases, we need to earn better control of the DMA engine and
adding proper ->device_terminate_all() and ->device_synchronize()
callbacks feels totally relevant. With these two callbacks, no glitch
can be heard anymore.

Fixes: cd8c732ce1a5 ("dmaengine: xilinx: xdma: Support cyclic transfers")
Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
---

This was only tested with cyclic transfers.
---
 drivers/dma/xilinx/xdma.c | 68 +++++++++++++++++++++++++++++++++++++++
 1 file changed, 68 insertions(+)

Comments

Lizhi Hou Nov. 30, 2023, 5:28 p.m. UTC | #1
Added Jan Kuliga who submitted a similar change.

https://lore.kernel.org/dmaengine/20231124192524.134989-1-jankul@alatek.krakow.pl/T/#m20c1ca4bba291f6ca07a8e5fbcaeed9fd0a6f008


Thanks,

Lizhi

On 11/30/23 03:13, Miquel Raynal wrote:
> The driver is capable of starting scatter-gather transfers and needs to
> wait until their end. It is also capable of starting cyclic transfers
> and will only be "reset" next time the channel will be reused. In
> practice most of the time we hear no audio glitch because the sound card
> stops the flow on its side so the DMA transfers are just
> discarded. There are however some cases (when playing a bit with a
> number of frames and with a discontinuous sound file) when the sound
> card seems to be slightly too slow at stopping the flow, leading to a
> glitch that can be heard.
>
> In all cases, we need to earn better control of the DMA engine and
> adding proper ->device_terminate_all() and ->device_synchronize()
> callbacks feels totally relevant. With these two callbacks, no glitch
> can be heard anymore.
>
> Fixes: cd8c732ce1a5 ("dmaengine: xilinx: xdma: Support cyclic transfers")
> Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
> ---
>
> This was only tested with cyclic transfers.
> ---
>   drivers/dma/xilinx/xdma.c | 68 +++++++++++++++++++++++++++++++++++++++
>   1 file changed, 68 insertions(+)
>
> diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c
> index e931ff42209c..290bb5d2d1e2 100644
> --- a/drivers/dma/xilinx/xdma.c
> +++ b/drivers/dma/xilinx/xdma.c
> @@ -371,6 +371,31 @@ static int xdma_xfer_start(struct xdma_chan *xchan)
>   		return ret;
>   
>   	xchan->busy = true;
> +
> +	return 0;
> +}
> +
> +/**
> + * xdma_xfer_stop - Stop DMA transfer
> + * @xchan: DMA channel pointer
> + */
> +static int xdma_xfer_stop(struct xdma_chan *xchan)
> +{
> +	struct virt_dma_desc *vd = vchan_next_desc(&xchan->vchan);
> +	struct xdma_device *xdev = xchan->xdev_hdl;
> +	int ret;
> +
> +	if (!vd || !xchan->busy)
> +		return -EINVAL;
> +
> +	/* clear run stop bit to prevent any further auto-triggering */
> +	ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C,
> +			   CHAN_CTRL_RUN_STOP);
> +	if (ret)
> +		return ret;
> +
> +	xchan->busy = false;
> +
>   	return 0;
>   }
>   
> @@ -475,6 +500,47 @@ static void xdma_issue_pending(struct dma_chan *chan)
>   	spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
>   }
>   
> +/**
> + * xdma_terminate_all - Terminate all transactions
> + * @chan: DMA channel pointer
> + */
> +static int xdma_terminate_all(struct dma_chan *chan)
> +{
> +	struct xdma_chan *xdma_chan = to_xdma_chan(chan);
> +	struct xdma_desc *desc = NULL;
> +	struct virt_dma_desc *vd;
> +	unsigned long flags;
> +	LIST_HEAD(head);
> +
> +	spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
> +	xdma_xfer_stop(xdma_chan);
> +
> +	vd = vchan_next_desc(&xdma_chan->vchan);
> +	if (vd)
> +		desc = to_xdma_desc(vd);
> +	if (desc) {
> +		dma_cookie_complete(&desc->vdesc.tx);
> +		vchan_terminate_vdesc(&desc->vdesc);
> +	}
> +
> +	vchan_get_all_descriptors(&xdma_chan->vchan, &head);
> +	spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
> +	vchan_dma_desc_free_list(&xdma_chan->vchan, &head);
> +
> +	return 0;
> +}
> +
> +/**
> + * xdma_synchronize - Synchronize terminated transactions
> + * @chan: DMA channel pointer
> + */
> +static void xdma_synchronize(struct dma_chan *chan)
> +{
> +	struct xdma_chan *xdma_chan = to_xdma_chan(chan);
> +
> +	vchan_synchronize(&xdma_chan->vchan);
> +}
> +
>   /**
>    * xdma_prep_device_sg - prepare a descriptor for a DMA transaction
>    * @chan: DMA channel pointer
> @@ -1088,6 +1154,8 @@ static int xdma_probe(struct platform_device *pdev)
>   	xdev->dma_dev.device_prep_slave_sg = xdma_prep_device_sg;
>   	xdev->dma_dev.device_config = xdma_device_config;
>   	xdev->dma_dev.device_issue_pending = xdma_issue_pending;
> +	xdev->dma_dev.device_terminate_all = xdma_terminate_all;
> +	xdev->dma_dev.device_synchronize = xdma_synchronize;
>   	xdev->dma_dev.filter.map = pdata->device_map;
>   	xdev->dma_dev.filter.mapcnt = pdata->device_map_cnt;
>   	xdev->dma_dev.filter.fn = xdma_filter_fn;
Jan Kuliga Nov. 30, 2023, 7:06 p.m. UTC | #2
Hi,

On 30.11.2023 18:28, Lizhi Hou wrote:
> Added Jan Kuliga who submitted a similar change.
>
Thanks for CC'ing me to the other patchset. I'm currently working on 
interleaved-DMA transfers implementation for XDMA. While testing it, 
I've come across a flaw in mine patch you mentioned here (and it also 
exists in the Miquel's patch).

> https://lore.kernel.org/dmaengine/20231124192524.134989-1-jankul@alatek.krakow.pl/T/#m20c1ca4bba291f6ca07a8e5fbcaeed9fd0a6f008 >
> Thanks,
> 
> Lizhi
> 
> On 11/30/23 03:13, Miquel Raynal wrote:
>> The driver is capable of starting scatter-gather transfers and needs to
>> wait until their end. It is also capable of starting cyclic transfers
>> and will only be "reset" next time the channel will be reused. In
>> practice most of the time we hear no audio glitch because the sound card
>> stops the flow on its side so the DMA transfers are just
>> discarded. There are however some cases (when playing a bit with a
>> number of frames and with a discontinuous sound file) when the sound
>> card seems to be slightly too slow at stopping the flow, leading to a
>> glitch that can be heard.
>>
>> In all cases, we need to earn better control of the DMA engine and
>> adding proper ->device_terminate_all() and ->device_synchronize()
>> callbacks feels totally relevant. With these two callbacks, no glitch
>> can be heard anymore.
>>
>> Fixes: cd8c732ce1a5 ("dmaengine: xilinx: xdma: Support cyclic transfers")
>> Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
>> ---
>>
>> This was only tested with cyclic transfers.
>> ---
>>   drivers/dma/xilinx/xdma.c | 68 +++++++++++++++++++++++++++++++++++++++
>>   1 file changed, 68 insertions(+)
>>
>> diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c
>> index e931ff42209c..290bb5d2d1e2 100644
>> --- a/drivers/dma/xilinx/xdma.c
>> +++ b/drivers/dma/xilinx/xdma.c
>> @@ -371,6 +371,31 @@ static int xdma_xfer_start(struct xdma_chan *xchan)
>>           return ret;
>>       xchan->busy = true;
>> +
>> +    return 0;
>> +}
>> +
>> +/**
>> + * xdma_xfer_stop - Stop DMA transfer
>> + * @xchan: DMA channel pointer
>> + */
>> +static int xdma_xfer_stop(struct xdma_chan *xchan)
>> +{
>> +    struct virt_dma_desc *vd = vchan_next_desc(&xchan->vchan);
>> +    struct xdma_device *xdev = xchan->xdev_hdl;
>> +    int ret;
>> +
>> +    if (!vd || !xchan->busy)
>> +        return -EINVAL;
>> +
>> +    /* clear run stop bit to prevent any further auto-triggering */
>> +    ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C,
>> +               CHAN_CTRL_RUN_STOP);
>> +    if (ret)
>> +        return ret;

Shouldn't status register be cleared prior to using it next time? It can 
be cleared-on-read by doing a read from a separate register (offset 0x44).
>> +
>> +    xchan->busy = false;
>> +
>>       return 0;
>>   }
>> @@ -475,6 +500,47 @@ static void xdma_issue_pending(struct dma_chan 
>> *chan)
>>       spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
>>   }
>> +/**
>> + * xdma_terminate_all - Terminate all transactions
>> + * @chan: DMA channel pointer
>> + */
>> +static int xdma_terminate_all(struct dma_chan *chan)
>> +{
>> +    struct xdma_chan *xdma_chan = to_xdma_chan(chan);
>> +    struct xdma_desc *desc = NULL;
>> +    struct virt_dma_desc *vd;
>> +    unsigned long flags;
>> +    LIST_HEAD(head);
>> +
>> +    spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
>> +    xdma_xfer_stop(xdma_chan);
>> +
>> +    vd = vchan_next_desc(&xdma_chan->vchan);
>> +    if (vd)
>> +        desc = to_xdma_desc(vd);
>> +    if (desc) {
>> +        dma_cookie_complete(&desc->vdesc.tx);
Prior to a call to vchan_terminate_vdesc(), the vd node has to be 
deleted from vc.desc_issued list. Otherwise, if there is more than one 
descriptor present on that list, its link with list's head is going to 
be lost and freeing resources associated with it will become impossible 
(doing so results in dma_pool_destroy() failure). I noticed it when I 
was playing with a large number of interleaved DMA TXs.
>> +        vchan_terminate_vdesc(&desc->vdesc);
>> +    }
>> +
>> +    vchan_get_all_descriptors(&xdma_chan->vchan, &head);
>> +    spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
>> +    vchan_dma_desc_free_list(&xdma_chan->vchan, &head);
>> +
>> +    return 0;
>> +}
>> +
>> +/**
>> + * xdma_synchronize - Synchronize terminated transactions
>> + * @chan: DMA channel pointer
>> + */
>> +static void xdma_synchronize(struct dma_chan *chan)
>> +{
>> +    struct xdma_chan *xdma_chan = to_xdma_chan(chan);
>> +
>> +    vchan_synchronize(&xdma_chan->vchan);
>> +}
>> +
>>   /**
>>    * xdma_prep_device_sg - prepare a descriptor for a DMA transaction
>>    * @chan: DMA channel pointer
>> @@ -1088,6 +1154,8 @@ static int xdma_probe(struct platform_device *pdev)
>>       xdev->dma_dev.device_prep_slave_sg = xdma_prep_device_sg;
>>       xdev->dma_dev.device_config = xdma_device_config;
>>       xdev->dma_dev.device_issue_pending = xdma_issue_pending;
>> +    xdev->dma_dev.device_terminate_all = xdma_terminate_all;
>> +    xdev->dma_dev.device_synchronize = xdma_synchronize;
>>       xdev->dma_dev.filter.map = pdata->device_map;
>>       xdev->dma_dev.filter.mapcnt = pdata->device_map_cnt;
>>       xdev->dma_dev.filter.fn = xdma_filter_fn;

I have already prepared a patch with an appropriate fix, which I'm going 
to submit with the whole patch series, once I have interleaved DMA 
transfers properly sorted out (hopefully soon). Or maybe should I post 
this patch with fix, immediately as a reply to the already sent one? 
What do you prefer?

Thanks,
Jan
Miquel Raynal Nov. 30, 2023, 7:23 p.m. UTC | #3
Hi Jan,

jankul@alatek.krakow.pl wrote on Thu, 30 Nov 2023 20:06:51 +0100:

> Hi,
> 
> On 30.11.2023 18:28, Lizhi Hou wrote:
> > Added Jan Kuliga who submitted a similar change.
> >  
> Thanks for CC'ing me to the other patchset. I'm currently working on interleaved-DMA transfers implementation for XDMA. While testing it, I've come across a flaw in mine patch you mentioned here (and it also exists in the Miquel's patch).
> 
> > https://lore.kernel.org/dmaengine/20231124192524.134989-1-jankul@alatek  
> .krakow.pl/T/#m20c1ca4bba291f6ca07a8e5fbcaeed9fd0a6f008 >
> > Thanks,
> > 
> > Lizhi
> > 
> > On 11/30/23 03:13, Miquel Raynal wrote:  
> >> The driver is capable of starting scatter-gather transfers and needs t  
> o
> >> wait until their end. It is also capable of starting cyclic transfers
> >> and will only be "reset" next time the channel will be reused. In
> >> practice most of the time we hear no audio glitch because the sound ca  
> rd
> >> stops the flow on its side so the DMA transfers are just
> >> discarded. There are however some cases (when playing a bit with a
> >> number of frames and with a discontinuous sound file) when the sound
> >> card seems to be slightly too slow at stopping the flow, leading to a
> >> glitch that can be heard.
> >>
> >> In all cases, we need to earn better control of the DMA engine and
> >> adding proper ->device_terminate_all() and ->device_synchronize()
> >> callbacks feels totally relevant. With these two callbacks, no glitch
> >> can be heard anymore.
> >>
> >> Fixes: cd8c732ce1a5 ("dmaengine: xilinx: xdma: Support cyclic transfer  
> s")
> >> Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
> >> ---
> >>
> >> This was only tested with cyclic transfers.
> >> ---
> >>   drivers/dma/xilinx/xdma.c | 68 ++++++++++++++++++++++++++++++++  
> +++++++
> >>   1 file changed, 68 insertions(+)
> >>
> >> diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c
> >> index e931ff42209c..290bb5d2d1e2 100644
> >> --- a/drivers/dma/xilinx/xdma.c
> >> +++ b/drivers/dma/xilinx/xdma.c
> >> @@ -371,6 +371,31 @@ static int xdma_xfer_start(struct xdma_chan *xcha  
> n)
> >>           return ret;
> >>       xchan->busy = true;
> >> +
> >> +    return 0;
> >> +}
> >> +
> >> +/**
> >> + * xdma_xfer_stop - Stop DMA transfer
> >> + * @xchan: DMA channel pointer
> >> + */
> >> +static int xdma_xfer_stop(struct xdma_chan *xchan)
> >> +{
> >> +    struct virt_dma_desc *vd = vchan_next_desc(&xcha  
> n->vchan);
> >> +    struct xdma_device *xdev = xchan->xdev_hdl;
> >> +    int ret;
> >> +
> >> +    if (!vd || !xchan->busy)
> >> +        return -EINVAL;
> >> +
> >> +    /* clear run stop bit to prevent any further auto-  
> triggering */
> >> +    ret = regmap_write(xdev->rmap, xchan->base + XDM  
> A_CHAN_CONTROL_W1C,
> >> +_______________________  
> _____ CHAN_CTRL_RUN_STOP);
> >> +    if (ret)
> >> +        return ret;  
> 
> Shouldn't status register be cleared prior to using it next time? It can be cleared-on-read by doing a read from a separate register (offset 0x44)
> .
> >> +
> >> +    xchan->busy = false;
> >> +
> >>       return 0;
> >>   }
> >> @@ -475,6 +500,47 @@ static void xdma_issue_pending(struct dma_chan >> *chan)
> >>       spin_unlock_irqrestore(&xdma_chan->vcha  
> n.lock, flags);
> >>   }
> >> +/**
> >> + * xdma_terminate_all - Terminate all transactions
> >> + * @chan: DMA channel pointer
> >> + */
> >> +static int xdma_terminate_all(struct dma_chan *chan)
> >> +{
> >> +    struct xdma_chan *xdma_chan = to_xdma_chan(chan)  
> ;
> >> +    struct xdma_desc *desc = NULL;
> >> +    struct virt_dma_desc *vd;
> >> +    unsigned long flags;
> >> +    LIST_HEAD(head);
> >> +
> >> +    spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
> >> +    xdma_xfer_stop(xdma_chan);
> >> +
> >> +    vd = vchan_next_desc(&xdma_chan->vchan);
> >> +    if (vd)
> >> +        desc = to_xdma_desc(vd);
> >> +    if (desc) {
> >> +        dma_cookie_complete(&desc-  
> >vdesc.tx);  
> Prior to a call to vchan_terminate_vdesc(), the vd node has to be deleted from vc.desc_issued list. Otherwise, if there is more than one descriptor present on that list, its link with list's head is going to be lost and freeing resources associated with it will become impossible (doing so results in dma_pool_destroy() failure). I noticed it when I was playing with a large number of interleaved DMA TXs.
> >> +        vchan_terminate_vdesc(&des  
> c->vdesc);
> >> +    }
> >> +
> >> +    vchan_get_all_descriptors(&xdma_chan->vchan, &head  
> );
> >> +    spin_unlock_irqrestore(&xdma_chan->vchan.lock, fla  
> gs);
> >> +    vchan_dma_desc_free_list(&xdma_chan->vchan, &head)  
> ;
> >> +
> >> +    return 0;
> >> +}
> >> +
> >> +/**
> >> + * xdma_synchronize - Synchronize terminated transactions
> >> + * @chan: DMA channel pointer
> >> + */
> >> +static void xdma_synchronize(struct dma_chan *chan)
> >> +{
> >> +    struct xdma_chan *xdma_chan = to_xdma_chan(chan)  
> ;
> >> +
> >> +    vchan_synchronize(&xdma_chan->vchan);
> >> +}
> >> +
> >>   /**
> >>    * xdma_prep_device_sg - prepare a descriptor for a DMA tr  
> ansaction
> >>    * @chan: DMA channel pointer
> >> @@ -1088,6 +1154,8 @@ static int xdma_probe(struct platform_device *pd  
> ev)
> >>       xdev->dma_dev.device_prep_slave_sg = xdma_prep_device_sg;
> >>       xdev->dma_dev.device_config = xdma_de  
> vice_config;
> >>       xdev->dma_dev.device_issue_pending = xdma_issue_pending;
> >> +    xdev->dma_dev.device_terminate_all = xdma_termin  
> ate_all;
> >> +    xdev->dma_dev.device_synchronize = xdma_synchron  
> ize;
> >>       xdev->dma_dev.filter.map = pdata->dev  
> ice_map;
> >>       xdev->dma_dev.filter.mapcnt = pdata->  
> device_map_cnt;
> >>       xdev->dma_dev.filter.fn = xdma_filter  
> _fn;
> 
> I have already prepared a patch with an appropriate fix, which I'm going to submit with the whole patch series, once I have interleaved DMA transfers properly sorted out (hopefully soon). Or maybe should I post this patch with fix, immediately as a reply to the already sent one? What do you prefer?

I see. Well in the case of cyclic transfers it looks like this is enough
(I don't have any way to test interleaved/SG transfers) so maybe
maintainers can take this now as it is ready and fixes cyclic
transfers, so when the interleaved transfers are ready you can
improve these functions with a series on top of it?

Thanks,
Miquèl
Jan Kuliga Dec. 4, 2023, 9:34 a.m. UTC | #4
Hi Miquel,

On 30.11.2023 20:23, Miquel Raynal wrote:
> Hi Jan,
> 
> jankul@alatek.krakow.pl wrote on Thu, 30 Nov 2023 20:06:51 +0100:
> 
>> Hi,
>>
>> On 30.11.2023 18:28, Lizhi Hou wrote:
>>> Added Jan Kuliga who submitted a similar change.
>>>   
>> Thanks for CC'ing me to the other patchset. I'm currently working on interleaved-DMA transfers implementation for XDMA. While testing it, I've come across a flaw in mine patch you mentioned here (and it also exists in the Miquel's patch).
>>
>>> https://lore.kernel.org/dmaengine/20231124192524.134989-1-jankul@alatek
>> .krakow.pl/T/#m20c1ca4bba291f6ca07a8e5fbcaeed9fd0a6f008 >
>>> Thanks,
>>>
>>> Lizhi
>>>
>>> On 11/30/23 03:13, Miquel Raynal wrote:
>>>> The driver is capable of starting scatter-gather transfers and needs t
>> o
>>>> wait until their end. It is also capable of starting cyclic transfers
>>>> and will only be "reset" next time the channel will be reused. In
>>>> practice most of the time we hear no audio glitch because the sound ca
>> rd
>>>> stops the flow on its side so the DMA transfers are just
>>>> discarded. There are however some cases (when playing a bit with a
>>>> number of frames and with a discontinuous sound file) when the sound
>>>> card seems to be slightly too slow at stopping the flow, leading to a
>>>> glitch that can be heard.
>>>>
>>>> In all cases, we need to earn better control of the DMA engine and
>>>> adding proper ->device_terminate_all() and ->device_synchronize()
>>>> callbacks feels totally relevant. With these two callbacks, no glitch
>>>> can be heard anymore.
>>>>
>>>> Fixes: cd8c732ce1a5 ("dmaengine: xilinx: xdma: Support cyclic transfer
>> s")
>>>> Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
>>>> ---
>>>>
>>>> This was only tested with cyclic transfers.
>>>> ---
>>>>    drivers/dma/xilinx/xdma.c | 68 ++++++++++++++++++++++++++++++++
>> +++++++
>>>>    1 file changed, 68 insertions(+)
>>>>
>>>> diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c
>>>> index e931ff42209c..290bb5d2d1e2 100644
>>>> --- a/drivers/dma/xilinx/xdma.c
>>>> +++ b/drivers/dma/xilinx/xdma.c
>>>> @@ -371,6 +371,31 @@ static int xdma_xfer_start(struct xdma_chan *xcha
>> n)
>>>>            return ret;
>>>>        xchan->busy = true;
>>>> +
>>>> +    return 0;
>>>> +}
>>>> +
>>>> +/**
>>>> + * xdma_xfer_stop - Stop DMA transfer
>>>> + * @xchan: DMA channel pointer
>>>> + */
>>>> +static int xdma_xfer_stop(struct xdma_chan *xchan)
>>>> +{
>>>> +    struct virt_dma_desc *vd = vchan_next_desc(&xcha
>> n->vchan);
>>>> +    struct xdma_device *xdev = xchan->xdev_hdl;
>>>> +    int ret;
>>>> +
>>>> +    if (!vd || !xchan->busy)
>>>> +        return -EINVAL;
>>>> +
>>>> +    /* clear run stop bit to prevent any further auto-
>> triggering */
>>>> +    ret = regmap_write(xdev->rmap, xchan->base + XDM
>> A_CHAN_CONTROL_W1C,
>>>> +_______________________
>> _____ CHAN_CTRL_RUN_STOP);
>>>> +    if (ret)
>>>> +        return ret;
>>
>> Shouldn't status register be cleared prior to using it next time? It can be cleared-on-read by doing a read from a separate register (offset 0x44)
>> .
>>>> +
>>>> +    xchan->busy = false;
>>>> +
>>>>        return 0;
>>>>    }
>>>> @@ -475,6 +500,47 @@ static void xdma_issue_pending(struct dma_chan >> *chan)
>>>>        spin_unlock_irqrestore(&xdma_chan->vcha
>> n.lock, flags);
>>>>    }
>>>> +/**
>>>> + * xdma_terminate_all - Terminate all transactions
>>>> + * @chan: DMA channel pointer
>>>> + */
>>>> +static int xdma_terminate_all(struct dma_chan *chan)
>>>> +{
>>>> +    struct xdma_chan *xdma_chan = to_xdma_chan(chan)
>> ;
>>>> +    struct xdma_desc *desc = NULL;
>>>> +    struct virt_dma_desc *vd;
>>>> +    unsigned long flags;
>>>> +    LIST_HEAD(head);
>>>> +
>>>> +    spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
>>>> +    xdma_xfer_stop(xdma_chan);
>>>> +
>>>> +    vd = vchan_next_desc(&xdma_chan->vchan);
>>>> +    if (vd)
>>>> +        desc = to_xdma_desc(vd);
>>>> +    if (desc) {
>>>> +        dma_cookie_complete(&desc-
>>> vdesc.tx);
>> Prior to a call to vchan_terminate_vdesc(), the vd node has to be deleted from vc.desc_issued list. Otherwise, if there is more than one descriptor present on that list, its link with list's head is going to be lost and freeing resources associated with it will become impossible (doing so results in dma_pool_destroy() failure). I noticed it when I was playing with a large number of interleaved DMA TXs.
>>>> +        vchan_terminate_vdesc(&des
>> c->vdesc);
>>>> +    }
>>>> +
>>>> +    vchan_get_all_descriptors(&xdma_chan->vchan, &head
>> );
>>>> +    spin_unlock_irqrestore(&xdma_chan->vchan.lock, fla
>> gs);
>>>> +    vchan_dma_desc_free_list(&xdma_chan->vchan, &head)
>> ;
>>>> +
>>>> +    return 0;
>>>> +}
>>>> +
>>>> +/**
>>>> + * xdma_synchronize - Synchronize terminated transactions
>>>> + * @chan: DMA channel pointer
>>>> + */
>>>> +static void xdma_synchronize(struct dma_chan *chan)
>>>> +{
>>>> +    struct xdma_chan *xdma_chan = to_xdma_chan(chan)
>> ;
>>>> +
>>>> +    vchan_synchronize(&xdma_chan->vchan);
>>>> +}
>>>> +
>>>>    /**
>>>>     * xdma_prep_device_sg - prepare a descriptor for a DMA tr
>> ansaction
>>>>     * @chan: DMA channel pointer
>>>> @@ -1088,6 +1154,8 @@ static int xdma_probe(struct platform_device *pd
>> ev)
>>>>        xdev->dma_dev.device_prep_slave_sg = xdma_prep_device_sg;
>>>>        xdev->dma_dev.device_config = xdma_de
>> vice_config;
>>>>        xdev->dma_dev.device_issue_pending = xdma_issue_pending;
>>>> +    xdev->dma_dev.device_terminate_all = xdma_termin
>> ate_all;
>>>> +    xdev->dma_dev.device_synchronize = xdma_synchron
>> ize;
>>>>        xdev->dma_dev.filter.map = pdata->dev
>> ice_map;
>>>>        xdev->dma_dev.filter.mapcnt = pdata->
>> device_map_cnt;
>>>>        xdev->dma_dev.filter.fn = xdma_filter
>> _fn;
>>
>> I have already prepared a patch with an appropriate fix, which I'm going to submit with the whole patch series, once I have interleaved DMA transfers properly sorted out (hopefully soon). Or maybe should I post this patch with fix, immediately as a reply to the already sent one? What do you prefer?
> 
> I see. Well in the case of cyclic transfers it looks like this is enough
> (I don't have any way to test interleaved/SG transfers) so maybe
> maintainers can take this now as it is ready and fixes cyclic
> transfers, so when the interleaved transfers are ready you can
> improve these functions with a series on top of it?
> 
So I decided to base my new patchset on my previous one, as I haven't 
seen any ack from any maintainer yet on both mine and your patchset. I'm 
going to submit it this week.

This specific commit of yours (PATCH 4/4) basically does the same thing 
as mine patch, so there will be no difference in its functionality, i.e. 
it will also fix cyclic transfers.

> Thanks,
> Miquèl
Thanks,
Jan
Miquel Raynal Dec. 4, 2023, 11:02 a.m. UTC | #5
Hi Jan,

> >>>> +    vchan_synchronize(&xdma_chan->vchan);
> >>>> +}
> >>>> +
> >>>>    /**
> >>>>     * xdma_prep_device_sg - prepare a descriptor for a DMA  
>  tr
> >> ansaction  
> >>>>     * @chan: DMA channel pointer
> >>>> @@ -1088,6 +1154,8 @@ static int xdma_probe(struct platform_device *  
> pd
> >> ev)  
> >>>>        xdev->dma_dev.device_prep_slave_sg =  
>  xdma_prep_device_sg;
> >>>>        xdev->dma_dev.device_config = xdma  
> _de
> >> vice_config;  
> >>>>        xdev->dma_dev.device_issue_pending =  
>  xdma_issue_pending;
> >>>> +    xdev->dma_dev.device_terminate_all = xdma_term  
> in
> >> ate_all;  
> >>>> +    xdev->dma_dev.device_synchronize = xdma_synchr  
> on
> >> ize;  
> >>>>        xdev->dma_dev.filter.map = pdata->  
> dev
> >> ice_map;  
> >>>>        xdev->dma_dev.filter.mapcnt = pdat  
> a->
> >> device_map_cnt;  
> >>>>        xdev->dma_dev.filter.fn = xdma_fil  
> ter
> >> _fn;

Not related, but if you could fix your mailer, it is a bit hard to
track your answers.

> >>
> >> I have already prepared a patch with an appropriate fix, which I'm goi  
> ng to submit with the whole patch series, once I have interleaved DMA tra
> nsfers properly sorted out (hopefully soon). Or maybe should I post this patch with fix, immediately as a reply to the already sent one? What do y
> ou prefer?
> > 
> > I see. Well in the case of cyclic transfers it looks like this is enoug  
> h
> > (I don't have any way to test interleaved/SG transfers) so maybe
> > maintainers can take this now as it is ready and fixes cyclic
> > transfers, so when the interleaved transfers are ready you can
> > improve these functions with a series on top of it?
> >   
> So I decided to base my new patchset on my previous one, as I haven't seen any ack from any maintainer yet on both mine and your patchset. I'm going to submit it this week.

Well, the difference between the two approaches is that I am fixing
something upstream, and you're adding a new feature, which is not
ready yet. I don't mind about using your patch though, I just want
upstream to be fixed.

> This specific commit of yours (PATCH 4/4) basically does the same thing as mine patch, so there will be no difference in its functionality, i.e. it will also fix cyclic transfers.

Thanks,
Miquèl
Jan Kuliga Dec. 4, 2023, 1:13 p.m. UTC | #6
Hi Miquel,                                                                                               

On 4.12.2023 12:02, Miquel Raynal wrote:
> Hi Jan,
> 
>>>>>> +    vchan_synchronize(&xdma_chan->vchan); +} + /** * 
>>>>>> xdma_prep_device_sg - prepare a descriptor for a DMA
>> tr
>>>> ansaction
>>>>>> * @chan: DMA channel pointer @@ -1088,6 +1154,8 @@ static 
>>>>>> int xdma_probe(struct platform_device *
>> pd
>>>> ev)
>>>>>> xdev->dma_dev.device_prep_slave_sg =
>> xdma_prep_device_sg;
>>>>>> xdev->dma_dev.device_config = xdma
>> _de
>>>> vice_config;
>>>>>> xdev->dma_dev.device_issue_pending =
>> xdma_issue_pending;
>>>>>> +    xdev->dma_dev.device_terminate_all = xdma_term
>> in
>>>> ate_all;
>>>>>> +    xdev->dma_dev.device_synchronize = xdma_synchr
>> on
>>>> ize;
>>>>>> xdev->dma_dev.filter.map = pdata->
>> dev
>>>> ice_map;
>>>>>> xdev->dma_dev.filter.mapcnt = pdat
>> a->
>>>> device_map_cnt;
>>>>>> xdev->dma_dev.filter.fn = xdma_fil
>> ter
>>>> _fn;
> 
> Not related, but if you could fix your mailer, it is a bit hard to 
> track your answers.
> 
Thanks for pointing this out, I didn't notice it. From now on it should be okay.

>>>> 
>>>> I have already prepared a patch with an appropriate fix, which 
>>>> I'm goi
>> ng to submit with the whole patch series, once I have interleaved 
>> DMA transfers properly sorted out (hopefully soon). Or maybe should
>> I post this patch with fix, immediately as a reply to the already
>> sent one? What do y ou prefer?
>>> 
>>> I see. Well in the case of cyclic transfers it looks like this
>>> is enoug
>> h
>>> (I don't have any way to test interleaved/SG transfers) so maybe
>>>  maintainers can take this now as it is ready and fixes cyclic 
>>> transfers, so when the interleaved transfers are ready you can 
>>> improve these functions with a series on top of it?
>>> 
>> So I decided to base my new patchset on my previous one, as I 
>> haven't seen any ack from any maintainer yet on both mine and your 
>> patchset. I'm going to submit it this week.
> 
> Well, the difference between the two approaches is that I am fixing 
> something upstream, and you're adding a new feature, which is not 
> ready yet. I don't mind about using your patch though, I just want 
> upstream to be fixed.
> 
>> This specific commit of yours (PATCH 4/4) basically does the same 
>> thing as mine patch, so there will be no difference in its 
>> functionality, i.e. it will also fix cyclic transfers.
> 
Okay, so as far as I understand, you'd like me to submit my patchset based on the top of yours.
I guess maintainers will be fine with that (so do I). If so, what is the proper way to post my next
patch series? Should I post it as a reply to your patchset, or as a completely new thread
with a information that it is based on this patchset? I don't want to wait with submission
without getting any feedback until your patches are going to be upstreamed.

> Thanks, MiquèlThanks,
Jan
Miquel Raynal Dec. 4, 2023, 2:36 p.m. UTC | #7
Hi Jan,

jankul@alatek.krakow.pl wrote on Mon, 4 Dec 2023 14:13:13 +0100:

> Hi Miquel,                                                                                               
> 
> On 4.12.2023 12:02, Miquel Raynal wrote:
> > Hi Jan,
> >   
> >>>>>> +    vchan_synchronize(&xdma_chan->vchan); +} + /** * 
> >>>>>> xdma_prep_device_sg - prepare a descriptor for a DMA  
> >> tr  
> >>>> ansaction  
> >>>>>> * @chan: DMA channel pointer @@ -1088,6 +1154,8 @@ static 
> >>>>>> int xdma_probe(struct platform_device *  
> >> pd  
> >>>> ev)  
> >>>>>> xdev->dma_dev.device_prep_slave_sg =  
> >> xdma_prep_device_sg;  
> >>>>>> xdev->dma_dev.device_config = xdma  
> >> _de  
> >>>> vice_config;  
> >>>>>> xdev->dma_dev.device_issue_pending =  
> >> xdma_issue_pending;  
> >>>>>> +    xdev->dma_dev.device_terminate_all = xdma_term  
> >> in  
> >>>> ate_all;  
> >>>>>> +    xdev->dma_dev.device_synchronize = xdma_synchr  
> >> on  
> >>>> ize;  
> >>>>>> xdev->dma_dev.filter.map = pdata->  
> >> dev  
> >>>> ice_map;  
> >>>>>> xdev->dma_dev.filter.mapcnt = pdat  
> >> a->  
> >>>> device_map_cnt;  
> >>>>>> xdev->dma_dev.filter.fn = xdma_fil  
> >> ter  
> >>>> _fn;  
> > 
> > Not related, but if you could fix your mailer, it is a bit hard to 
> > track your answers.
> >   
> Thanks for pointing this out, I didn't notice it. From now on it should be okay.
> 
> >>>> 
> >>>> I have already prepared a patch with an appropriate fix, which 
> >>>> I'm goi  
> >> ng to submit with the whole patch series, once I have interleaved 
> >> DMA transfers properly sorted out (hopefully soon). Or maybe should
> >> I post this patch with fix, immediately as a reply to the already
> >> sent one? What do y ou prefer?  
> >>> 
> >>> I see. Well in the case of cyclic transfers it looks like this
> >>> is enoug  
> >> h  
> >>> (I don't have any way to test interleaved/SG transfers) so maybe
> >>>  maintainers can take this now as it is ready and fixes cyclic 
> >>> transfers, so when the interleaved transfers are ready you can 
> >>> improve these functions with a series on top of it?
> >>>   
> >> So I decided to base my new patchset on my previous one, as I 
> >> haven't seen any ack from any maintainer yet on both mine and your 
> >> patchset. I'm going to submit it this week.  
> > 
> > Well, the difference between the two approaches is that I am fixing 
> > something upstream, and you're adding a new feature, which is not 
> > ready yet. I don't mind about using your patch though, I just want 
> > upstream to be fixed.
> >   
> >> This specific commit of yours (PATCH 4/4) basically does the same 
> >> thing as mine patch, so there will be no difference in its 
> >> functionality, i.e. it will also fix cyclic transfers.  
> >   
> Okay, so as far as I understand, you'd like me to submit my patchset based on the top of yours.

That would be ideal, unless my series get postponed for any reason.
I believe the maintainers will soon give their feedback, we'll do what
they prefer.

I believe Lizhi will also give a Tested-by -or not-.

> I guess maintainers will be fine with that (so do I). If so, what is the proper way to post my next
> patch series? Should I post it as a reply to your patchset, or as a completely new thread
> with a information that it is based on this patchset?

You can definitely send an individual patchset and just point out that
it applies on top of the few fixes I sent.

> I don't want to wait with submission
> without getting any feedback until your patches are going to be upstreamed.

Of course.

Thanks,
Miquèl
Lizhi Hou Dec. 4, 2023, 4:41 p.m. UTC | #8
On 12/4/23 06:36, Miquel Raynal wrote:
> Hi Jan,
>
> jankul@alatek.krakow.pl wrote on Mon, 4 Dec 2023 14:13:13 +0100:
>
>> Hi Miquel,
>>
>> On 4.12.2023 12:02, Miquel Raynal wrote:
>>> Hi Jan,
>>>    
>>>>>>>> +    vchan_synchronize(&xdma_chan->vchan); +} + /** *
>>>>>>>> xdma_prep_device_sg - prepare a descriptor for a DMA
>>>> tr
>>>>>> ansaction
>>>>>>>> * @chan: DMA channel pointer @@ -1088,6 +1154,8 @@ static
>>>>>>>> int xdma_probe(struct platform_device *
>>>> pd
>>>>>> ev)
>>>>>>>> xdev->dma_dev.device_prep_slave_sg =
>>>> xdma_prep_device_sg;
>>>>>>>> xdev->dma_dev.device_config = xdma
>>>> _de
>>>>>> vice_config;
>>>>>>>> xdev->dma_dev.device_issue_pending =
>>>> xdma_issue_pending;
>>>>>>>> +    xdev->dma_dev.device_terminate_all = xdma_term
>>>> in
>>>>>> ate_all;
>>>>>>>> +    xdev->dma_dev.device_synchronize = xdma_synchr
>>>> on
>>>>>> ize;
>>>>>>>> xdev->dma_dev.filter.map = pdata->
>>>> dev
>>>>>> ice_map;
>>>>>>>> xdev->dma_dev.filter.mapcnt = pdat
>>>> a->
>>>>>> device_map_cnt;
>>>>>>>> xdev->dma_dev.filter.fn = xdma_fil
>>>> ter
>>>>>> _fn;
>>> Not related, but if you could fix your mailer, it is a bit hard to
>>> track your answers.
>>>    
>> Thanks for pointing this out, I didn't notice it. From now on it should be okay.
>>
>>>>>> I have already prepared a patch with an appropriate fix, which
>>>>>> I'm goi
>>>> ng to submit with the whole patch series, once I have interleaved
>>>> DMA transfers properly sorted out (hopefully soon). Or maybe should
>>>> I post this patch with fix, immediately as a reply to the already
>>>> sent one? What do y ou prefer?
>>>>> I see. Well in the case of cyclic transfers it looks like this
>>>>> is enoug
>>>> h
>>>>> (I don't have any way to test interleaved/SG transfers) so maybe
>>>>>   maintainers can take this now as it is ready and fixes cyclic
>>>>> transfers, so when the interleaved transfers are ready you can
>>>>> improve these functions with a series on top of it?
>>>>>    
>>>> So I decided to base my new patchset on my previous one, as I
>>>> haven't seen any ack from any maintainer yet on both mine and your
>>>> patchset. I'm going to submit it this week.
>>> Well, the difference between the two approaches is that I am fixing
>>> something upstream, and you're adding a new feature, which is not
>>> ready yet. I don't mind about using your patch though, I just want
>>> upstream to be fixed.
>>>    
>>>> This specific commit of yours (PATCH 4/4) basically does the same
>>>> thing as mine patch, so there will be no difference in its
>>>> functionality, i.e. it will also fix cyclic transfers.
>>>    
>> Okay, so as far as I understand, you'd like me to submit my patchset based on the top of yours.
> That would be ideal, unless my series get postponed for any reason.
> I believe the maintainers will soon give their feedback, we'll do what
> they prefer.
>
> I believe Lizhi will also give a Tested-by -or not-.

Yes, I verified this patch set for sg list test and it passed.

Tested-by: Lizhi Hou <lizhi.hou@amd.com>

>
>> I guess maintainers will be fine with that (so do I). If so, what is the proper way to post my next
>> patch series? Should I post it as a reply to your patchset, or as a completely new thread
>> with a information that it is based on this patchset?
> You can definitely send an individual patchset and just point out that
> it applies on top of the few fixes I sent.
>
>> I don't want to wait with submission
>> without getting any feedback until your patches are going to be upstreamed.
> Of course.
>
> Thanks,
> Miquèl
Jan Kuliga Dec. 8, 2023, 2:27 p.m. UTC | #9
Hi,

Here [1] you can find my new patchset, based on Miquel's one, as we agreed earlier this week. As my patches touch some common driver code, I'd appreciate if you could run them against your testcases and leave some feedback.

Thanks,
Jan

[1] https://lore.kernel.org/dmaengine/20231208134838.49500-1-jankul@alatek.krakow.pl/T/#t 

On 4.12.2023 17:41, Lizhi Hou wrote:
> 
> On 12/4/23 06:36, Miquel Raynal wrote:
>> Hi Jan,
>>
>> jankul@alatek.krakow.pl wrote on Mon, 4 Dec 2023 14:13:13 +0100:
>>
>>> Hi Miquel,
>>>
>>> On 4.12.2023 12:02, Miquel Raynal wrote:
>>>> Hi Jan,
>>>>   
>>>>>>>>> +    vchan_synchronize(&xdma_chan->vchan); +} + /** *
>>>>>>>>> xdma_prep_device_sg - prepare a descriptor for a DMA
>>>>> tr
>>>>>>> ansaction
>>>>>>>>> * @chan: DMA channel pointer @@ -1088,6 +1154,8 @@ static
>>>>>>>>> int xdma_probe(struct platform_device *
>>>>> pd
>>>>>>> ev)
>>>>>>>>> xdev->dma_dev.device_prep_slave_sg =
>>>>> xdma_prep_device_sg;
>>>>>>>>> xdev->dma_dev.device_config = xdma
>>>>> _de
>>>>>>> vice_config;
>>>>>>>>> xdev->dma_dev.device_issue_pending =
>>>>> xdma_issue_pending;
>>>>>>>>> +    xdev->dma_dev.device_terminate_all = xdma_term
>>>>> in
>>>>>>> ate_all;
>>>>>>>>> +    xdev->dma_dev.device_synchronize = xdma_synchr
>>>>> on
>>>>>>> ize;
>>>>>>>>> xdev->dma_dev.filter.map = pdata->
>>>>> dev
>>>>>>> ice_map;
>>>>>>>>> xdev->dma_dev.filter.mapcnt = pdat
>>>>> a->
>>>>>>> device_map_cnt;
>>>>>>>>> xdev->dma_dev.filter.fn = xdma_fil
>>>>> ter
>>>>>>> _fn;
>>>> Not related, but if you could fix your mailer, it is a bit hard to
>>>> track your answers.
>>>>    
>>> Thanks for pointing this out, I didn't notice it. From now on it should be okay.
>>>
>>>>>>> I have already prepared a patch with an appropriate fix, which
>>>>>>> I'm goi
>>>>> ng to submit with the whole patch series, once I have interleaved
>>>>> DMA transfers properly sorted out (hopefully soon). Or maybe should
>>>>> I post this patch with fix, immediately as a reply to the already
>>>>> sent one? What do y ou prefer?
>>>>>> I see. Well in the case of cyclic transfers it looks like this
>>>>>> is enoug
>>>>> h
>>>>>> (I don't have any way to test interleaved/SG transfers) so maybe
>>>>>>   maintainers can take this now as it is ready and fixes cyclic
>>>>>> transfers, so when the interleaved transfers are ready you can
>>>>>> improve these functions with a series on top of it?
>>>>>>    
>>>>> So I decided to base my new patchset on my previous one, as I
>>>>> haven't seen any ack from any maintainer yet on both mine and your
>>>>> patchset. I'm going to submit it this week.
>>>> Well, the difference between the two approaches is that I am fixing
>>>> something upstream, and you're adding a new feature, which is not
>>>> ready yet. I don't mind about using your patch though, I just want
>>>> upstream to be fixed.
>>>>   
>>>>> This specific commit of yours (PATCH 4/4) basically does the same
>>>>> thing as mine patch, so there will be no difference in its
>>>>> functionality, i.e. it will also fix cyclic transfers.
>>>>    
>>> Okay, so as far as I understand, you'd like me to submit my patchset based on the top of yours.
>> That would be ideal, unless my series get postponed for any reason.
>> I believe the maintainers will soon give their feedback, we'll do what
>> they prefer.
>>
>> I believe Lizhi will also give a Tested-by -or not-.
> 
> Yes, I verified this patch set for sg list test and it passed.
> 
> Tested-by: Lizhi Hou <lizhi.hou@amd.com>
> 
>>
>>> I guess maintainers will be fine with that (so do I). If so, what is the proper way to post my next
>>> patch series? Should I post it as a reply to your patchset, or as a completely new thread
>>> with a information that it is based on this patchset?
>> You can definitely send an individual patchset and just point out that
>> it applies on top of the few fixes I sent.
>>
>>> I don't want to wait with submission
>>> without getting any feedback until your patches are going to be upstreamed.
>> Of course.
>>
>> Thanks,
>> Miquèl
diff mbox series

Patch

diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c
index e931ff42209c..290bb5d2d1e2 100644
--- a/drivers/dma/xilinx/xdma.c
+++ b/drivers/dma/xilinx/xdma.c
@@ -371,6 +371,31 @@  static int xdma_xfer_start(struct xdma_chan *xchan)
 		return ret;
 
 	xchan->busy = true;
+
+	return 0;
+}
+
+/**
+ * xdma_xfer_stop - Stop DMA transfer
+ * @xchan: DMA channel pointer
+ */
+static int xdma_xfer_stop(struct xdma_chan *xchan)
+{
+	struct virt_dma_desc *vd = vchan_next_desc(&xchan->vchan);
+	struct xdma_device *xdev = xchan->xdev_hdl;
+	int ret;
+
+	if (!vd || !xchan->busy)
+		return -EINVAL;
+
+	/* clear run stop bit to prevent any further auto-triggering */
+	ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C,
+			   CHAN_CTRL_RUN_STOP);
+	if (ret)
+		return ret;
+
+	xchan->busy = false;
+
 	return 0;
 }
 
@@ -475,6 +500,47 @@  static void xdma_issue_pending(struct dma_chan *chan)
 	spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
 }
 
+/**
+ * xdma_terminate_all - Terminate all transactions
+ * @chan: DMA channel pointer
+ */
+static int xdma_terminate_all(struct dma_chan *chan)
+{
+	struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+	struct xdma_desc *desc = NULL;
+	struct virt_dma_desc *vd;
+	unsigned long flags;
+	LIST_HEAD(head);
+
+	spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
+	xdma_xfer_stop(xdma_chan);
+
+	vd = vchan_next_desc(&xdma_chan->vchan);
+	if (vd)
+		desc = to_xdma_desc(vd);
+	if (desc) {
+		dma_cookie_complete(&desc->vdesc.tx);
+		vchan_terminate_vdesc(&desc->vdesc);
+	}
+
+	vchan_get_all_descriptors(&xdma_chan->vchan, &head);
+	spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
+	vchan_dma_desc_free_list(&xdma_chan->vchan, &head);
+
+	return 0;
+}
+
+/**
+ * xdma_synchronize - Synchronize terminated transactions
+ * @chan: DMA channel pointer
+ */
+static void xdma_synchronize(struct dma_chan *chan)
+{
+	struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+
+	vchan_synchronize(&xdma_chan->vchan);
+}
+
 /**
  * xdma_prep_device_sg - prepare a descriptor for a DMA transaction
  * @chan: DMA channel pointer
@@ -1088,6 +1154,8 @@  static int xdma_probe(struct platform_device *pdev)
 	xdev->dma_dev.device_prep_slave_sg = xdma_prep_device_sg;
 	xdev->dma_dev.device_config = xdma_device_config;
 	xdev->dma_dev.device_issue_pending = xdma_issue_pending;
+	xdev->dma_dev.device_terminate_all = xdma_terminate_all;
+	xdev->dma_dev.device_synchronize = xdma_synchronize;
 	xdev->dma_dev.filter.map = pdata->device_map;
 	xdev->dma_dev.filter.mapcnt = pdata->device_map_cnt;
 	xdev->dma_dev.filter.fn = xdma_filter_fn;