diff mbox series

[v2,3/4] dmaengine: imx-sdma: implement channel termination via worker

Message ID 20180914170615.12659-3-l.stach@pengutronix.de (mailing list archive)
State Superseded
Headers show
Series [v2,1/4] Revert "dmaengine: imx-sdma: Use GFP_NOWAIT for dma allocations" | expand

Commit Message

Lucas Stach Sept. 14, 2018, 5:06 p.m. UTC
The dmaengine documentation states that device_terminate_all may be
asynchronous and need not wait for the active transfers have stopped.

This allows us to move most of the functionality currently implemented
in the sdma channel termination function to run in a worker, outside
of any atomic context. Moving this out of atomic context has two
benefits: we can now sleep while waiting for the channel to terminate,
instead of busy waiting and the freeing of the dma descriptors happens
with IRQs enabled, getting rid of a warning in the dma mapping code.

As the termination is now asnc, we need to implement the
device_synchronize dma engine function, which simply waits for the
worker to finish its execution.

Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
---
v2: Keep vchan_get_all_descriptors in the terminate_all call, so the
    worker doesn't corrupt the next transfer if that's already in
    the setup process.
---
 drivers/dma/imx-sdma.c | 42 ++++++++++++++++++++++++++++++++----------
 1 file changed, 32 insertions(+), 10 deletions(-)

Comments

Robin Gong Sept. 17, 2018, 7:51 a.m. UTC | #1
> -----Original Message-----
> From: Lucas Stach <l.stach@pengutronix.de>
> Sent: 2018年9月15日 1:06
> To: Vinod Koul <vkoul@kernel.org>
> Cc: dmaengine@vger.kernel.org; linux-kernel@vger.kernel.org; Robin Gong
> <yibin.gong@nxp.com>; dl-linux-imx <linux-imx@nxp.com>;
> kernel@pengutronix.de; patchwork-lst@pengutronix.de
> Subject: [PATCH v2 3/4] dmaengine: imx-sdma: implement channel termination
> via worker
> 
> The dmaengine documentation states that device_terminate_all may be
> asynchronous and need not wait for the active transfers have stopped.
> 
> This allows us to move most of the functionality currently implemented in the
> sdma channel termination function to run in a worker, outside of any atomic
> context. Moving this out of atomic context has two
> benefits: we can now sleep while waiting for the channel to terminate, instead
> of busy waiting and the freeing of the dma descriptors happens with IRQs
> enabled, getting rid of a warning in the dma mapping code.
> 
> As the termination is now asnc, we need to implement the device_synchronize
> dma engine function, which simply waits for the worker to finish its execution.
> 
> Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
> ---
> v2: Keep vchan_get_all_descriptors in the terminate_all call, so the
>     worker doesn't corrupt the next transfer if that's already in
>     the setup process.
> ---
>  drivers/dma/imx-sdma.c | 42 ++++++++++++++++++++++++++++++++----------
>  1 file changed, 32 insertions(+), 10 deletions(-)
> 
> diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index
> 8d2fec8b16cc..da41e8fbf151 100644
> --- a/drivers/dma/imx-sdma.c
> +++ b/drivers/dma/imx-sdma.c
> @@ -32,6 +32,7 @@
>  #include <linux/of_address.h>
>  #include <linux/of_device.h>
>  #include <linux/of_dma.h>
> +#include <linux/workqueue.h>
> 
>  #include <asm/irq.h>
>  #include <linux/platform_data/dma-imx-sdma.h>
> @@ -375,6 +376,8 @@ struct sdma_channel {
>  	u32				shp_addr, per_addr;
>  	enum dma_status			status;
>  	struct imx_dma_data		data;
> +	struct work_struct		terminate_worker;
> +	struct list_head		deferred_desc_list;
>  };
> 
>  #define IMX_DMA_SG_LOOP		BIT(0)
> @@ -1025,31 +1028,47 @@ static int sdma_disable_channel(struct dma_chan
> *chan)
> 
>  	return 0;
>  }
> +static void sdma_channel_terminate(struct work_struct *work) {
> +	struct sdma_channel *sdmac = container_of(work, struct sdma_channel,
> +						  terminate_worker);
> +
> +	/*
> +	 * According to NXP R&D team a delay of one BD SDMA cost time
> +	 * (maximum is 1ms) should be added after disable of the channel
> +	 * bit, to ensure SDMA core has really been stopped after SDMA
> +	 * clients call .device_terminate_all.
> +	 */
> +	usleep_range(1000, 2000);
> +
> +	vchan_dma_desc_free_list(&sdmac->vc, &sdmac->deferred_desc_list);
> +	INIT_LIST_HEAD(&sdmac->deferred_desc_list);
> +}
> 
>  static int sdma_disable_channel_with_delay(struct dma_chan *chan)  {
>  	struct sdma_channel *sdmac = to_sdma_chan(chan);
>  	unsigned long flags;
> -	LIST_HEAD(head);
> 
>  	sdma_disable_channel(chan);
> +
>  	spin_lock_irqsave(&sdmac->vc.lock, flags);
> -	vchan_get_all_descriptors(&sdmac->vc, &head);
> +	vchan_get_all_descriptors(&sdmac->vc, &sdmac->deferred_desc_list);
>  	sdmac->desc = NULL;
>  	spin_unlock_irqrestore(&sdmac->vc.lock, flags);
> -	vchan_dma_desc_free_list(&sdmac->vc, &head);
> 
> -	/*
> -	 * According to NXP R&D team a delay of one BD SDMA cost time
> -	 * (maximum is 1ms) should be added after disable of the channel
> -	 * bit, to ensure SDMA core has really been stopped after SDMA
> -	 * clients call .device_terminate_all.
> -	 */
> -	mdelay(1);
> +	schedule_work(&sdmac->terminate_worker);
> 
>  	return 0;
>  }
> 
> +static void sdma_channel_synchronize(struct dma_chan *chan) {
> +	struct sdma_channel *sdmac = to_sdma_chan(chan);
> +
Please add the below to make sure internal virt dma tasklet killed too.
tasklet_kill(&sdmac->vc.task);
> +	flush_work(&sdmac->terminate_worker);
> +}
> +
>  static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
> {
>  	struct sdma_engine *sdma = sdmac->sdma; @@ -1993,6 +2012,8 @@
> static int sdma_probe(struct platform_device *pdev)
> 
>  		sdmac->channel = i;
>  		sdmac->vc.desc_free = sdma_desc_free;
> +		INIT_WORK(&sdmac->terminate_worker,
> sdma_channel_terminate);
> +		INIT_LIST_HEAD(&sdmac->deferred_desc_list);
>  		/*
>  		 * Add the channel to the DMAC list. Do not add channel 0 though
>  		 * because we need it internally in the SDMA driver. This also means
> @@ -2045,6 +2066,7 @@ static int sdma_probe(struct platform_device
> *pdev)
>  	sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
>  	sdma->dma_device.device_config = sdma_config;
>  	sdma->dma_device.device_terminate_all =
> sdma_disable_channel_with_delay;
> +	sdma->dma_device.device_synchronize = sdma_channel_synchronize;
>  	sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS;
>  	sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS;
>  	sdma->dma_device.directions = SDMA_DMA_DIRECTIONS;
> --
> 2.19.0
Lucas Stach Sept. 17, 2018, 11:03 a.m. UTC | #2
Hi Robin,

Am Montag, den 17.09.2018, 07:51 +0000 schrieb Robin Gong:
> > -----Original Message-----
> > > > From: Lucas Stach <l.stach@pengutronix.de>
> > Sent: 2018年9月15日 1:06
> > > > To: Vinod Koul <vkoul@kernel.org>
> > > > > > Cc: dmaengine@vger.kernel.org; linux-kernel@vger.kernel.org; Robin Gong
> > > > > > <yibin.gong@nxp.com>; dl-linux-imx <linux-imx@nxp.com>;
> > kernel@pengutronix.de; patchwork-lst@pengutronix.de
> > Subject: [PATCH v2 3/4] dmaengine: imx-sdma: implement channel termination
> > via worker
> > 
> > The dmaengine documentation states that device_terminate_all may be
> > asynchronous and need not wait for the active transfers have stopped.
> > 
> > This allows us to move most of the functionality currently implemented in the
> > sdma channel termination function to run in a worker, outside of any atomic
> > context. Moving this out of atomic context has two
> > benefits: we can now sleep while waiting for the channel to terminate, instead
> > of busy waiting and the freeing of the dma descriptors happens with IRQs
> > enabled, getting rid of a warning in the dma mapping code.
> > 
> > As the termination is now asnc, we need to implement the device_synchronize
> > dma engine function, which simply waits for the worker to finish its execution.
> > 
> > > > Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
> > ---
> > v2: Keep vchan_get_all_descriptors in the terminate_all call, so the
> >     worker doesn't corrupt the next transfer if that's already in
> >     the setup process.
> > ---
> >  drivers/dma/imx-sdma.c | 42 ++++++++++++++++++++++++++++++++----------
> >  1 file changed, 32 insertions(+), 10 deletions(-)
> > 
> > diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index
> > 8d2fec8b16cc..da41e8fbf151 100644
> > --- a/drivers/dma/imx-sdma.c
> > +++ b/drivers/dma/imx-sdma.c
> > @@ -32,6 +32,7 @@
> >  #include <linux/of_address.h>
> >  #include <linux/of_device.h>
> >  #include <linux/of_dma.h>
> > +#include <linux/workqueue.h>
> > 
> >  #include <asm/irq.h>
> >  #include <linux/platform_data/dma-imx-sdma.h>
> > @@ -375,6 +376,8 @@ struct sdma_channel {
> > > > > >  	u32				shp_addr, per_addr;
> > > > > >  	enum dma_status			status;
> > > > > >  	struct imx_dma_data		data;
> > > > > > +	struct work_struct		terminate_worker;
> > > > > > +	struct list_head		deferred_desc_list;
> >  };
> > 
> > > >  #define IMX_DMA_SG_LOOP		BIT(0)
> > @@ -1025,31 +1028,47 @@ static int sdma_disable_channel(struct dma_chan
> > *chan)
> > 
> > > >  	return 0;
> >  }
> > +static void sdma_channel_terminate(struct work_struct *work) {
> > > > +	struct sdma_channel *sdmac = container_of(work, struct sdma_channel,
> > > > +						  terminate_worker);
> > +
> > > > +	/*
> > > > +	 * According to NXP R&D team a delay of one BD SDMA cost time
> > > > +	 * (maximum is 1ms) should be added after disable of the channel
> > > > +	 * bit, to ensure SDMA core has really been stopped after SDMA
> > > > +	 * clients call .device_terminate_all.
> > > > +	 */
> > > > +	usleep_range(1000, 2000);
> > +
> > > > +	vchan_dma_desc_free_list(&sdmac->vc, &sdmac->deferred_desc_list);
> > > > +	INIT_LIST_HEAD(&sdmac->deferred_desc_list);
> > +}
> > 
> >  static int sdma_disable_channel_with_delay(struct dma_chan *chan)  {
> > > >  	struct sdma_channel *sdmac = to_sdma_chan(chan);
> > > >  	unsigned long flags;
> > > > -	LIST_HEAD(head);
> > 
> > > >  	sdma_disable_channel(chan);
> > +
> > > >  	spin_lock_irqsave(&sdmac->vc.lock, flags);
> > > > -	vchan_get_all_descriptors(&sdmac->vc, &head);
> > > > +	vchan_get_all_descriptors(&sdmac->vc, &sdmac->deferred_desc_list);
> > > >  	sdmac->desc = NULL;
> > > >  	spin_unlock_irqrestore(&sdmac->vc.lock, flags);
> > > > -	vchan_dma_desc_free_list(&sdmac->vc, &head);
> > 
> > > > -	/*
> > > > -	 * According to NXP R&D team a delay of one BD SDMA cost time
> > > > -	 * (maximum is 1ms) should be added after disable of the channel
> > > > -	 * bit, to ensure SDMA core has really been stopped after SDMA
> > > > -	 * clients call .device_terminate_all.
> > > > -	 */
> > > > -	mdelay(1);
> > > > +	schedule_work(&sdmac->terminate_worker);
> > 
> > > >  	return 0;
> >  }
> > 
> > +static void sdma_channel_synchronize(struct dma_chan *chan) {
> > > > +	struct sdma_channel *sdmac = to_sdma_chan(chan);
> > +
> 
> Please add the below to make sure internal virt dma tasklet killed too.
> tasklet_kill(&sdmac->vc.task);

This is not possible. We can not do anything that alters the current
state of the VC in the synchronize call. Otherwise we could be running
into the the same class of issues as the v1 had, with corrupting the
state of the next set up transfer.

Regards,
Lucas
Robin Gong Sept. 19, 2018, 9:20 a.m. UTC | #3
> -----Original Message-----
> From: Lucas Stach <l.stach@pengutronix.de>
> Sent: 2018年9月17日 19:04
> To: Robin Gong <yibin.gong@nxp.com>; Vinod Koul <vkoul@kernel.org>
> Cc: dmaengine@vger.kernel.org; linux-kernel@vger.kernel.org; dl-linux-imx
> <linux-imx@nxp.com>; kernel@pengutronix.de;
> patchwork-lst@pengutronix.de
> Subject: Re: [PATCH v2 3/4] dmaengine: imx-sdma: implement channel
> termination via worker
> 
> Hi Robin,
> 
> Am Montag, den 17.09.2018, 07:51 +0000 schrieb Robin Gong:
> > > -----Original Message-----
> > > > > From: Lucas Stach <l.stach@pengutronix.de>
> > > Sent: 2018年9月15日 1:06
> > > > > To: Vinod Koul <vkoul@kernel.org>
> > > > > > > Cc: dmaengine@vger.kernel.org; linux-kernel@vger.kernel.org;
> > > > > > > Robin Gong <yibin.gong@nxp.com>; dl-linux-imx
> > > > > > > <linux-imx@nxp.com>;
> > > kernel@pengutronix.de; patchwork-lst@pengutronix.de
> > > Subject: [PATCH v2 3/4] dmaengine: imx-sdma: implement channel
> > > termination via worker
> > >
> > > The dmaengine documentation states that device_terminate_all may be
> > > asynchronous and need not wait for the active transfers have stopped.
> > >
> > > This allows us to move most of the functionality currently
> > > implemented in the sdma channel termination function to run in a
> > > worker, outside of any atomic context. Moving this out of atomic
> > > context has two
> > > benefits: we can now sleep while waiting for the channel to
> > > terminate, instead of busy waiting and the freeing of the dma
> > > descriptors happens with IRQs enabled, getting rid of a warning in the dma
> mapping code.
> > >
> > > As the termination is now asnc, we need to implement the
> > > device_synchronize dma engine function, which simply waits for the worker
> to finish its execution.
> > >
> > > > > Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
> > > ---
> > > v2: Keep vchan_get_all_descriptors in the terminate_all call, so the
> > >     worker doesn't corrupt the next transfer if that's already in
> > >     the setup process.
> > > ---
> > >  drivers/dma/imx-sdma.c | 42
> > > ++++++++++++++++++++++++++++++++----------
> > >  1 file changed, 32 insertions(+), 10 deletions(-)
> > >
> > > diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index
> > > 8d2fec8b16cc..da41e8fbf151 100644
> > > --- a/drivers/dma/imx-sdma.c
> > > +++ b/drivers/dma/imx-sdma.c
> > > @@ -32,6 +32,7 @@
> > >  #include <linux/of_address.h>
> > >  #include <linux/of_device.h>
> > >  #include <linux/of_dma.h>
> > > +#include <linux/workqueue.h>
> > >
> > >  #include <asm/irq.h>
> > >  #include <linux/platform_data/dma-imx-sdma.h>
> > > @@ -375,6 +376,8 @@ struct sdma_channel {
> > > > > > >  	u32				shp_addr, per_addr;
> > > > > > >  	enum dma_status			status;
> > > > > > >  	struct imx_dma_data		data;
> > > > > > > +	struct work_struct		terminate_worker;
> > > > > > > +	struct list_head		deferred_desc_list;
> > >  };
> > >
> > > > >  #define IMX_DMA_SG_LOOP		BIT(0)
> > > @@ -1025,31 +1028,47 @@ static int sdma_disable_channel(struct
> > > dma_chan
> > > *chan)
> > >
> > > > >  	return 0;
> > >  }
> > > +static void sdma_channel_terminate(struct work_struct *work) {
> > > > > +	struct sdma_channel *sdmac = container_of(work, struct
> sdma_channel,
> > > > > +						  terminate_worker);
> > > +
> > > > > +	/*
> > > > > +	 * According to NXP R&D team a delay of one BD SDMA cost time
> > > > > +	 * (maximum is 1ms) should be added after disable of the channel
> > > > > +	 * bit, to ensure SDMA core has really been stopped after SDMA
> > > > > +	 * clients call .device_terminate_all.
> > > > > +	 */
> > > > > +	usleep_range(1000, 2000);
> > > +
> > > > > +	vchan_dma_desc_free_list(&sdmac->vc,
> &sdmac->deferred_desc_list);
> > > > > +	INIT_LIST_HEAD(&sdmac->deferred_desc_list);
> > > +}
> > >
> > >  static int sdma_disable_channel_with_delay(struct dma_chan *chan)
> > > {
> > > > >  	struct sdma_channel *sdmac = to_sdma_chan(chan);
> > > > >  	unsigned long flags;
> > > > > -	LIST_HEAD(head);
> > >
> > > > >  	sdma_disable_channel(chan);
> > > +
> > > > >  	spin_lock_irqsave(&sdmac->vc.lock, flags);
> > > > > -	vchan_get_all_descriptors(&sdmac->vc, &head);
> > > > > +	vchan_get_all_descriptors(&sdmac->vc,
> > > > > +&sdmac->deferred_desc_list);
> > > > >  	sdmac->desc = NULL;
> > > > >  	spin_unlock_irqrestore(&sdmac->vc.lock, flags);
> > > > > -	vchan_dma_desc_free_list(&sdmac->vc, &head);
> > >
> > > > > -	/*
> > > > > -	 * According to NXP R&D team a delay of one BD SDMA cost time
> > > > > -	 * (maximum is 1ms) should be added after disable of the channel
> > > > > -	 * bit, to ensure SDMA core has really been stopped after SDMA
> > > > > -	 * clients call .device_terminate_all.
> > > > > -	 */
> > > > > -	mdelay(1);
> > > > > +	schedule_work(&sdmac->terminate_worker);
> > >
> > > > >  	return 0;
> > >  }
> > >
> > > +static void sdma_channel_synchronize(struct dma_chan *chan) {
> > > > > +	struct sdma_channel *sdmac = to_sdma_chan(chan);
> > > +
> >
> > Please add the below to make sure internal virt dma tasklet killed too.
> > tasklet_kill(&sdmac->vc.task);
> 
> This is not possible. We can not do anything that alters the current state of the
> VC in the synchronize call. Otherwise we could be running into the the same
> class of issues as the v1 had, with corrupting the state of the next set up
> transfer.
tasklet_kill never alter the current state, just wait for the tasklet running over.
But here 'vchan_synchronize()' maybe better than 'tasklet_kill' directly. 

For the v1 Issue, I investigate it today and found that caused by uart driver call flush_buffer before
TX start and then terminate dma channel directly which is not setup dma channel yet. But in v1,
dma channel terminated in worker instead, thus, when tx dma finish setup transfer and waiting
for the dma done interrupt, the worker scheduled and set 'sdmac->desc=NULL', which cause
dma channel stopped forever once the dma done interrupt coming. Only one line code as below 
could fix your V1 issue:

diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 2f2e5af..56d542b 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1054,7 +1054,9 @@ static int sdma_disable_channel_with_delay(struct dma_chan *chan)
        struct sdma_channel *sdmac = to_sdma_chan(chan);

        sdma_disable_channel(chan);
-       schedule_work(&sdmac->terminate_worker);
+
+       if (sdmac->desc)
+               schedule_work(&sdmac->terminate_worker);

        return 0;
> 
> Regards,
> Lucas
diff mbox series

Patch

diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 8d2fec8b16cc..da41e8fbf151 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -32,6 +32,7 @@ 
 #include <linux/of_address.h>
 #include <linux/of_device.h>
 #include <linux/of_dma.h>
+#include <linux/workqueue.h>
 
 #include <asm/irq.h>
 #include <linux/platform_data/dma-imx-sdma.h>
@@ -375,6 +376,8 @@  struct sdma_channel {
 	u32				shp_addr, per_addr;
 	enum dma_status			status;
 	struct imx_dma_data		data;
+	struct work_struct		terminate_worker;
+	struct list_head		deferred_desc_list;
 };
 
 #define IMX_DMA_SG_LOOP		BIT(0)
@@ -1025,31 +1028,47 @@  static int sdma_disable_channel(struct dma_chan *chan)
 
 	return 0;
 }
+static void sdma_channel_terminate(struct work_struct *work)
+{
+	struct sdma_channel *sdmac = container_of(work, struct sdma_channel,
+						  terminate_worker);
+
+	/*
+	 * According to NXP R&D team a delay of one BD SDMA cost time
+	 * (maximum is 1ms) should be added after disable of the channel
+	 * bit, to ensure SDMA core has really been stopped after SDMA
+	 * clients call .device_terminate_all.
+	 */
+	usleep_range(1000, 2000);
+
+	vchan_dma_desc_free_list(&sdmac->vc, &sdmac->deferred_desc_list);
+	INIT_LIST_HEAD(&sdmac->deferred_desc_list);
+}
 
 static int sdma_disable_channel_with_delay(struct dma_chan *chan)
 {
 	struct sdma_channel *sdmac = to_sdma_chan(chan);
 	unsigned long flags;
-	LIST_HEAD(head);
 
 	sdma_disable_channel(chan);
+
 	spin_lock_irqsave(&sdmac->vc.lock, flags);
-	vchan_get_all_descriptors(&sdmac->vc, &head);
+	vchan_get_all_descriptors(&sdmac->vc, &sdmac->deferred_desc_list);
 	sdmac->desc = NULL;
 	spin_unlock_irqrestore(&sdmac->vc.lock, flags);
-	vchan_dma_desc_free_list(&sdmac->vc, &head);
 
-	/*
-	 * According to NXP R&D team a delay of one BD SDMA cost time
-	 * (maximum is 1ms) should be added after disable of the channel
-	 * bit, to ensure SDMA core has really been stopped after SDMA
-	 * clients call .device_terminate_all.
-	 */
-	mdelay(1);
+	schedule_work(&sdmac->terminate_worker);
 
 	return 0;
 }
 
+static void sdma_channel_synchronize(struct dma_chan *chan)
+{
+	struct sdma_channel *sdmac = to_sdma_chan(chan);
+
+	flush_work(&sdmac->terminate_worker);
+}
+
 static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
 {
 	struct sdma_engine *sdma = sdmac->sdma;
@@ -1993,6 +2012,8 @@  static int sdma_probe(struct platform_device *pdev)
 
 		sdmac->channel = i;
 		sdmac->vc.desc_free = sdma_desc_free;
+		INIT_WORK(&sdmac->terminate_worker, sdma_channel_terminate);
+		INIT_LIST_HEAD(&sdmac->deferred_desc_list);
 		/*
 		 * Add the channel to the DMAC list. Do not add channel 0 though
 		 * because we need it internally in the SDMA driver. This also means
@@ -2045,6 +2066,7 @@  static int sdma_probe(struct platform_device *pdev)
 	sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
 	sdma->dma_device.device_config = sdma_config;
 	sdma->dma_device.device_terminate_all = sdma_disable_channel_with_delay;
+	sdma->dma_device.device_synchronize = sdma_channel_synchronize;
 	sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS;
 	sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS;
 	sdma->dma_device.directions = SDMA_DMA_DIRECTIONS;