diff mbox series

[9/9] dmaengine: imx-sdma: Fix memory leak

Message ID 20191216105328.15198-10-s.hauer@pengutronix.de (mailing list archive)
State Accepted
Headers show
Series dmaengine: virt-dma related fixes | expand

Commit Message

Sascha Hauer Dec. 16, 2019, 10:53 a.m. UTC
The current descriptor is not on any list of the virtual DMA channel.
Once sdma_terminate_all() is called when a descriptor is currently
in flight then this one is forgotten to be freed. We have to call
vchan_terminate_vdesc() on this descriptor to re-add it to the lists.
Now that we also free the currently running descriptor we can (and
actually have to) remove the current descriptor from its list also
for the cyclic case.

Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
---
 drivers/dma/imx-sdma.c | 19 +++++++++++--------
 1 file changed, 11 insertions(+), 8 deletions(-)

Comments

Robin Gong Dec. 18, 2019, 10:09 a.m. UTC | #1
On 2019/16/12 18:53 Sascha Hauer <s.hauer@pengutronix.de> wrote:
> The current descriptor is not on any list of the virtual DMA channel.
> Once sdma_terminate_all() is called when a descriptor is currently in flight then
> this one is forgotten to be freed. We have to call
> vchan_terminate_vdesc() on this descriptor to re-add it to the lists.
> Now that we also free the currently running descriptor we can (and actually
> have to) remove the current descriptor from its list also for the cyclic case.
> 
> Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
Thanks Sacha. Now it's better to document or comment somewhere to notice
vchan_terminate_vdesc() should be called in channel(cyclic) terminated unless
dma controller driver free all desc by itself after this patch set applied.
Reviewed-by: Robin Gong <yibin.gong@nxp.com>
Tested-by: Robin Gong <yibin.gong@nxp.com>
> ---
>  drivers/dma/imx-sdma.c | 19 +++++++++++--------
>  1 file changed, 11 insertions(+), 8 deletions(-)
> 
> diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index
> 99dbfd9039cf..066b21a32232 100644
> --- a/drivers/dma/imx-sdma.c
> +++ b/drivers/dma/imx-sdma.c
> @@ -760,12 +760,8 @@ static void sdma_start_desc(struct sdma_channel
> *sdmac)
>  		return;
>  	}
>  	sdmac->desc = desc = to_sdma_desc(&vd->tx);
> -	/*
> -	 * Do not delete the node in desc_issued list in cyclic mode, otherwise
> -	 * the desc allocated will never be freed in vchan_dma_desc_free_list
> -	 */
> -	if (!(sdmac->flags & IMX_DMA_SG_LOOP))
> -		list_del(&vd->node);
> +
> +	list_del(&vd->node);
> 
>  	sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
>  	sdma->channel_control[channel].current_bd_ptr = desc->bd_phys; @@
> -1071,7 +1067,6 @@ static void sdma_channel_terminate_work(struct
> work_struct *work)
> 
>  	spin_lock_irqsave(&sdmac->vc.lock, flags);
>  	vchan_get_all_descriptors(&sdmac->vc, &head);
> -	sdmac->desc = NULL;
>  	spin_unlock_irqrestore(&sdmac->vc.lock, flags);
>  	vchan_dma_desc_free_list(&sdmac->vc, &head);
>  	sdmac->context_loaded = false;
> @@ -1080,11 +1075,19 @@ static void sdma_channel_terminate_work(struct
> work_struct *work)  static int sdma_terminate_all(struct dma_chan *chan)  {
>  	struct sdma_channel *sdmac = to_sdma_chan(chan);
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&sdmac->vc.lock, flags);
> 
>  	sdma_disable_channel(chan);
> 
> -	if (sdmac->desc)
> +	if (sdmac->desc) {
> +		vchan_terminate_vdesc(&sdmac->desc->vd);
> +		sdmac->desc = NULL;
>  		schedule_work(&sdmac->terminate_worker);
> +	}
> +
> +	spin_unlock_irqrestore(&sdmac->vc.lock, flags);
> 
>  	return 0;
>  }
> --
> 2.24.0
diff mbox series

Patch

diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 99dbfd9039cf..066b21a32232 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -760,12 +760,8 @@  static void sdma_start_desc(struct sdma_channel *sdmac)
 		return;
 	}
 	sdmac->desc = desc = to_sdma_desc(&vd->tx);
-	/*
-	 * Do not delete the node in desc_issued list in cyclic mode, otherwise
-	 * the desc allocated will never be freed in vchan_dma_desc_free_list
-	 */
-	if (!(sdmac->flags & IMX_DMA_SG_LOOP))
-		list_del(&vd->node);
+
+	list_del(&vd->node);
 
 	sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
 	sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
@@ -1071,7 +1067,6 @@  static void sdma_channel_terminate_work(struct work_struct *work)
 
 	spin_lock_irqsave(&sdmac->vc.lock, flags);
 	vchan_get_all_descriptors(&sdmac->vc, &head);
-	sdmac->desc = NULL;
 	spin_unlock_irqrestore(&sdmac->vc.lock, flags);
 	vchan_dma_desc_free_list(&sdmac->vc, &head);
 	sdmac->context_loaded = false;
@@ -1080,11 +1075,19 @@  static void sdma_channel_terminate_work(struct work_struct *work)
 static int sdma_terminate_all(struct dma_chan *chan)
 {
 	struct sdma_channel *sdmac = to_sdma_chan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&sdmac->vc.lock, flags);
 
 	sdma_disable_channel(chan);
 
-	if (sdmac->desc)
+	if (sdmac->desc) {
+		vchan_terminate_vdesc(&sdmac->desc->vd);
+		sdmac->desc = NULL;
 		schedule_work(&sdmac->terminate_worker);
+	}
+
+	spin_unlock_irqrestore(&sdmac->vc.lock, flags);
 
 	return 0;
 }