diff mbox

[v2,2/4] dmaengine: Add STM32 DMA driver

Message ID 1444745127-1105-3-git-send-email-cedric.madianga@gmail.com (mailing list archive)
State Changes Requested
Headers show

Commit Message

M'boumba Cedric Madianga Oct. 13, 2015, 2:05 p.m. UTC
This patch adds support for the STM32 DMA controller.

Signed-off-by: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
---
 drivers/dma/Kconfig     |   12 +
 drivers/dma/Makefile    |    1 +
 drivers/dma/stm32-dma.c | 1175 +++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 1188 insertions(+)
 create mode 100644 drivers/dma/stm32-dma.c

Comments

Daniel Thompson Oct. 13, 2015, 2:34 p.m. UTC | #1
On 13/10/15 15:05, M'boumba Cedric Madianga wrote:
> This patch adds support for the STM32 DMA controller.
>
> Signed-off-by: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
> ---
>   drivers/dma/Kconfig     |   12 +
>   drivers/dma/Makefile    |    1 +
>   drivers/dma/stm32-dma.c | 1175 +++++++++++++++++++++++++++++++++++++++++++++++
>   3 files changed, 1188 insertions(+)
>   create mode 100644 drivers/dma/stm32-dma.c

> diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
> new file mode 100644
> index 0000000..031bab7
> --- /dev/null
> +++ b/drivers/dma/stm32-dma.c

> +enum stm32_dma_width {
> +	STM32_DMA_BYTE,
> +	STM32_DMA_HALF_WORD,
> +	STM32_DMA_WORD,
> +};
> +
> +enum stm32_dma_burst_size {
> +	STM32_DMA_BURST_SINGLE,
> +	STM32_DMA_BURST_INCR4,
> +	STM32_DMA_BURST_INCR8,
> +	STM32_DMA_BURST_INCR16,
> +};
> +
> +enum stm32_dma_channel_id {
> +	STM32_DMA_CHANNEL0,
> +	STM32_DMA_CHANNEL1,
> +	STM32_DMA_CHANNEL2,
> +	STM32_DMA_CHANNEL3,
> +	STM32_DMA_CHANNEL4,
> +	STM32_DMA_CHANNEL5,
> +	STM32_DMA_CHANNEL6,
> +	STM32_DMA_CHANNEL7,
> +};

Why have (unused) enumerations to map numeric symbols to their own 
natural values? Using normal integers would be better.


> +enum stm32_dma_request_id {
> +	STM32_DMA_REQUEST0,
> +	STM32_DMA_REQUEST1,
> +	STM32_DMA_REQUEST2,
> +	STM32_DMA_REQUEST3,
> +	STM32_DMA_REQUEST4,
> +	STM32_DMA_REQUEST5,
> +	STM32_DMA_REQUEST6,
> +	STM32_DMA_REQUEST7,
> +};

Ditto.


> +static void stm32_dma_dump_reg(struct stm32_dma_chan *chan)
> +{
> +	struct stm32_dma_device *dmadev = stm32_dma_chan_get_dev(chan);
> +
> +	dev_dbg(chan2dev(chan), "SCR:   0x%08x\n",
> +		stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)));
> +	dev_dbg(chan2dev(chan), "NDTR:  0x%08x\n",
> +		stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id)));
> +	dev_dbg(chan2dev(chan), "SPAR:  0x%08x\n",
> +		stm32_dma_read(dmadev, STM32_DMA_SPAR(chan->id)));
> +	dev_dbg(chan2dev(chan), "SM0AR: 0x%08x\n",
> +		stm32_dma_read(dmadev, STM32_DMA_SM0AR(chan->id)));
> +	dev_dbg(chan2dev(chan), "SM1AR: 0x%08x\n",
> +		stm32_dma_read(dmadev, STM32_DMA_SM1AR(chan->id)));
> +	dev_dbg(chan2dev(chan), "SFCR:  0x%08x\n",
> +		stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id)));
> +}

Even at dev_dbg() this looks like log spam. This debug info gets issued 
every time we set up a transfer!


> +static int stm32_dma_alloc_chan_resources(struct dma_chan *c)
> +{
> +	struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
> +	struct stm32_dma_device *dmadev = stm32_dma_chan_get_dev(chan);
> +	int ret;
> +
> +	chan->config_init = false;
> +	ret = clk_prepare_enable(dmadev->clk);
> +	if (ret < 0) {
> +		dev_err(chan2dev(chan), "clk_prepare_enable failed: %d\n", ret);
> +		return ret;
> +	}
> +
> +	ret = stm32_dma_disable_chan(chan);
> +
> +	return ret;
> +}

The error path here looks like it will leak clock references.


> +static int stm32_dma_probe(struct platform_device *pdev)
> +{
> +	struct stm32_dma_chan *chan;
> +	struct stm32_dma_device *dmadev;
> +	struct dma_device *dd;
> +	const struct of_device_id *match;
> +	unsigned int i;
> +	struct resource *res;
> +	int ret;
> +
> +	match = of_match_device(stm32_dma_of_match, &pdev->dev);
> +	if (!match) {
> +		dev_err(&pdev->dev, "Error: No device match found\n");
> +		return -ENODEV;
> +	}
> +
> +	dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
> +	if (!dmadev)
> +		return -ENOMEM;
> +
> +	dd = &dmadev->ddev;
> +
> +	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
> +	dmadev->base = devm_ioremap_resource(&pdev->dev, res);
> +	if (IS_ERR(dmadev->base))
> +		return PTR_ERR(dmadev->base);
> +
> +	dmadev->clk = devm_clk_get(&pdev->dev, NULL);
> +	if (IS_ERR(dmadev->clk)) {
> +		dev_err(&pdev->dev, "Error: Missing controller clock\n");
> +		return PTR_ERR(dmadev->clk);
> +	}
> +
> +	dmadev->mem2mem = of_property_read_bool(pdev->dev.of_node,
> +						"st,mem2mem");
> +
> +	dmadev->rst = devm_reset_control_get(&pdev->dev, NULL);
> +	if (!IS_ERR(dmadev->rst)) {
> +		reset_control_assert(dmadev->rst);
> +		udelay(2);
> +		reset_control_deassert(dmadev->rst);
> +	}
> +
> +	dma_cap_set(DMA_SLAVE, dd->cap_mask);
> +	dma_cap_set(DMA_PRIVATE, dd->cap_mask);
> +	dma_cap_set(DMA_CYCLIC, dd->cap_mask);
> +	dd->device_alloc_chan_resources = stm32_dma_alloc_chan_resources;
> +	dd->device_free_chan_resources = stm32_dma_free_chan_resources;
> +	dd->device_tx_status = stm32_dma_tx_status;
> +	dd->device_issue_pending = stm32_dma_issue_pending;
> +	dd->device_prep_slave_sg = stm32_dma_prep_slave_sg;
> +	dd->device_prep_dma_cyclic = stm32_dma_prep_dma_cyclic;
> +	dd->device_config = stm32_dma_slave_config;
> +	dd->device_terminate_all = stm32_dma_terminate_all;
> +	dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
> +		BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
> +		BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
> +	dd->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
> +		BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
> +		BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
> +	dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
> +	dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
> +	dd->dev = &pdev->dev;
> +	INIT_LIST_HEAD(&dd->channels);
> +
> +	if (dmadev->mem2mem) {
> +		dma_cap_set(DMA_MEMCPY, dd->cap_mask);
> +		dd->device_prep_dma_memcpy = stm32_dma_prep_dma_memcpy;
> +		dd->directions |= BIT(DMA_MEM_TO_MEM);
> +	}
> +
> +	for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) {
> +		chan = &dmadev->chan[i];
> +		chan->id = i;
> +		chan->vchan.desc_free = stm32_dma_desc_free;
> +		vchan_init(&chan->vchan, dd);
> +	}
> +
> +	ret = dma_async_device_register(dd);
> +	if (ret)
> +		return ret;
> +
> +	for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) {
> +		chan = &dmadev->chan[i];
> +		res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
> +		if (!res) {
> +			ret = -EINVAL;
> +			dev_err(&pdev->dev, "No irq resource for chan %d\n", i);
> +			goto err_unregister;
> +		}
> +		chan->irq = res->start;
> +		ret = devm_request_irq(&pdev->dev, chan->irq,
> +				       stm32_dma_chan_irq, 0,
> +				       dev_name(chan2dev(chan)), chan);
> +		if (ret) {
> +			dev_err(&pdev->dev,
> +				"request_irq failed with err %d channel %d\n",
> +				ret, i);
> +			goto err_unregister;
> +		}
> +	}
> +
> +	ret = of_dma_controller_register(pdev->dev.of_node,
> +					 stm32_dma_of_xlate, dmadev);
> +	if (ret < 0) {
> +		dev_err(&pdev->dev,
> +			"STM32 DMA DMA OF registration failed %d\n", ret);
> +		goto err_unregister;
> +	}
> +
> +	platform_set_drvdata(pdev, dmadev);
> +
> +	dev_info(&pdev->dev, "STM32 DMA driver registered\n");
> +
> +	return 0;
> +
> +err_unregister:
> +	dma_async_device_unregister(dd);
> +
> +	return ret;
> +}
> +
> +static int stm32_dma_remove(struct platform_device *pdev)
> +{
> +	struct stm32_dma_device *dmadev = platform_get_drvdata(pdev);
> +
> +	of_dma_controller_free(pdev->dev.of_node);
> +
> +	dma_async_device_unregister(&dmadev->ddev);
> +
> +	clk_disable_unprepare(dmadev->clk);

What is the purpose of disabling/unpreparing the clock here? 
stm32_dma_alloc_chan_resources() and stm32_dma_free_chan_resources() 
should pair up and the clock should already be stopped.


Daniel.
--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
M'boumba Cedric Madianga Oct. 14, 2015, 7:54 a.m. UTC | #2
Hi Daniel,

2015-10-13 16:34 GMT+02:00 Daniel Thompson <daniel.thompson@linaro.org>:
> On 13/10/15 15:05, M'boumba Cedric Madianga wrote:
>>
>> This patch adds support for the STM32 DMA controller.
>>
>> Signed-off-by: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
>> ---
>>   drivers/dma/Kconfig     |   12 +
>>   drivers/dma/Makefile    |    1 +
>>   drivers/dma/stm32-dma.c | 1175
>> +++++++++++++++++++++++++++++++++++++++++++++++
>>   3 files changed, 1188 insertions(+)
>>   create mode 100644 drivers/dma/stm32-dma.c
>
>
>> diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
>> new file mode 100644
>> index 0000000..031bab7
>> --- /dev/null
>> +++ b/drivers/dma/stm32-dma.c
>
>
>> +enum stm32_dma_width {
>> +       STM32_DMA_BYTE,
>> +       STM32_DMA_HALF_WORD,
>> +       STM32_DMA_WORD,
>> +};
>> +
>> +enum stm32_dma_burst_size {
>> +       STM32_DMA_BURST_SINGLE,
>> +       STM32_DMA_BURST_INCR4,
>> +       STM32_DMA_BURST_INCR8,
>> +       STM32_DMA_BURST_INCR16,
>> +};
>> +
>> +enum stm32_dma_channel_id {
>> +       STM32_DMA_CHANNEL0,
>> +       STM32_DMA_CHANNEL1,
>> +       STM32_DMA_CHANNEL2,
>> +       STM32_DMA_CHANNEL3,
>> +       STM32_DMA_CHANNEL4,
>> +       STM32_DMA_CHANNEL5,
>> +       STM32_DMA_CHANNEL6,
>> +       STM32_DMA_CHANNEL7,
>> +};
>
>
> Why have (unused) enumerations to map numeric symbols to their own natural
> values? Using normal integers would be better.

Good point. I will remove these in the next version.
Thanks.

>
>
>> +enum stm32_dma_request_id {
>> +       STM32_DMA_REQUEST0,
>> +       STM32_DMA_REQUEST1,
>> +       STM32_DMA_REQUEST2,
>> +       STM32_DMA_REQUEST3,
>> +       STM32_DMA_REQUEST4,
>> +       STM32_DMA_REQUEST5,
>> +       STM32_DMA_REQUEST6,
>> +       STM32_DMA_REQUEST7,
>> +};
>
>
> Ditto.

As above, I will remove it in the next version. Thanks.

>
>
>> +static void stm32_dma_dump_reg(struct stm32_dma_chan *chan)
>> +{
>> +       struct stm32_dma_device *dmadev = stm32_dma_chan_get_dev(chan);
>> +
>> +       dev_dbg(chan2dev(chan), "SCR:   0x%08x\n",
>> +               stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)));
>> +       dev_dbg(chan2dev(chan), "NDTR:  0x%08x\n",
>> +               stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id)));
>> +       dev_dbg(chan2dev(chan), "SPAR:  0x%08x\n",
>> +               stm32_dma_read(dmadev, STM32_DMA_SPAR(chan->id)));
>> +       dev_dbg(chan2dev(chan), "SM0AR: 0x%08x\n",
>> +               stm32_dma_read(dmadev, STM32_DMA_SM0AR(chan->id)));
>> +       dev_dbg(chan2dev(chan), "SM1AR: 0x%08x\n",
>> +               stm32_dma_read(dmadev, STM32_DMA_SM1AR(chan->id)));
>> +       dev_dbg(chan2dev(chan), "SFCR:  0x%08x\n",
>> +               stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id)));
>> +}
>
>
> Even at dev_dbg() this looks like log spam. This debug info gets issued
> every time we set up a transfer!

Yes, this info will be logged after each transfer.
But it will complete other debug info print when DMADEVICES_DEBUG is set.

>
>
>> +static int stm32_dma_alloc_chan_resources(struct dma_chan *c)
>> +{
>> +       struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
>> +       struct stm32_dma_device *dmadev = stm32_dma_chan_get_dev(chan);
>> +       int ret;
>> +
>> +       chan->config_init = false;
>> +       ret = clk_prepare_enable(dmadev->clk);
>> +       if (ret < 0) {
>> +               dev_err(chan2dev(chan), "clk_prepare_enable failed: %d\n",
>> ret);
>> +               return ret;
>> +       }
>> +
>> +       ret = stm32_dma_disable_chan(chan);
>> +
>> +       return ret;
>> +}
>
>
> The error path here looks like it will leak clock references.

Sorry I didn't catch it. What does it mean ?

>
>
>
>> +static int stm32_dma_probe(struct platform_device *pdev)
>> +{
>> +       struct stm32_dma_chan *chan;
>> +       struct stm32_dma_device *dmadev;
>> +       struct dma_device *dd;
>> +       const struct of_device_id *match;
>> +       unsigned int i;
>> +       struct resource *res;
>> +       int ret;
>> +
>> +       match = of_match_device(stm32_dma_of_match, &pdev->dev);
>> +       if (!match) {
>> +               dev_err(&pdev->dev, "Error: No device match found\n");
>> +               return -ENODEV;
>> +       }
>> +
>> +       dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
>> +       if (!dmadev)
>> +               return -ENOMEM;
>> +
>> +       dd = &dmadev->ddev;
>> +
>> +       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
>> +       dmadev->base = devm_ioremap_resource(&pdev->dev, res);
>> +       if (IS_ERR(dmadev->base))
>> +               return PTR_ERR(dmadev->base);
>> +
>> +       dmadev->clk = devm_clk_get(&pdev->dev, NULL);
>> +       if (IS_ERR(dmadev->clk)) {
>> +               dev_err(&pdev->dev, "Error: Missing controller clock\n");
>> +               return PTR_ERR(dmadev->clk);
>> +       }
>> +
>> +       dmadev->mem2mem = of_property_read_bool(pdev->dev.of_node,
>> +                                               "st,mem2mem");
>> +
>> +       dmadev->rst = devm_reset_control_get(&pdev->dev, NULL);
>> +       if (!IS_ERR(dmadev->rst)) {
>> +               reset_control_assert(dmadev->rst);
>> +               udelay(2);
>> +               reset_control_deassert(dmadev->rst);
>> +       }
>> +
>> +       dma_cap_set(DMA_SLAVE, dd->cap_mask);
>> +       dma_cap_set(DMA_PRIVATE, dd->cap_mask);
>> +       dma_cap_set(DMA_CYCLIC, dd->cap_mask);
>> +       dd->device_alloc_chan_resources = stm32_dma_alloc_chan_resources;
>> +       dd->device_free_chan_resources = stm32_dma_free_chan_resources;
>> +       dd->device_tx_status = stm32_dma_tx_status;
>> +       dd->device_issue_pending = stm32_dma_issue_pending;
>> +       dd->device_prep_slave_sg = stm32_dma_prep_slave_sg;
>> +       dd->device_prep_dma_cyclic = stm32_dma_prep_dma_cyclic;
>> +       dd->device_config = stm32_dma_slave_config;
>> +       dd->device_terminate_all = stm32_dma_terminate_all;
>> +       dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
>> +               BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
>> +               BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
>> +       dd->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
>> +               BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
>> +               BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
>> +       dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
>> +       dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
>> +       dd->dev = &pdev->dev;
>> +       INIT_LIST_HEAD(&dd->channels);
>> +
>> +       if (dmadev->mem2mem) {
>> +               dma_cap_set(DMA_MEMCPY, dd->cap_mask);
>> +               dd->device_prep_dma_memcpy = stm32_dma_prep_dma_memcpy;
>> +               dd->directions |= BIT(DMA_MEM_TO_MEM);
>> +       }
>> +
>> +       for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) {
>> +               chan = &dmadev->chan[i];
>> +               chan->id = i;
>> +               chan->vchan.desc_free = stm32_dma_desc_free;
>> +               vchan_init(&chan->vchan, dd);
>> +       }
>> +
>> +       ret = dma_async_device_register(dd);
>> +       if (ret)
>> +               return ret;
>> +
>> +       for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) {
>> +               chan = &dmadev->chan[i];
>> +               res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
>> +               if (!res) {
>> +                       ret = -EINVAL;
>> +                       dev_err(&pdev->dev, "No irq resource for chan
>> %d\n", i);
>> +                       goto err_unregister;
>> +               }
>> +               chan->irq = res->start;
>> +               ret = devm_request_irq(&pdev->dev, chan->irq,
>> +                                      stm32_dma_chan_irq, 0,
>> +                                      dev_name(chan2dev(chan)), chan);
>> +               if (ret) {
>> +                       dev_err(&pdev->dev,
>> +                               "request_irq failed with err %d channel
>> %d\n",
>> +                               ret, i);
>> +                       goto err_unregister;
>> +               }
>> +       }
>> +
>> +       ret = of_dma_controller_register(pdev->dev.of_node,
>> +                                        stm32_dma_of_xlate, dmadev);
>> +       if (ret < 0) {
>> +               dev_err(&pdev->dev,
>> +                       "STM32 DMA DMA OF registration failed %d\n", ret);
>> +               goto err_unregister;
>> +       }
>> +
>> +       platform_set_drvdata(pdev, dmadev);
>> +
>> +       dev_info(&pdev->dev, "STM32 DMA driver registered\n");
>> +
>> +       return 0;
>> +
>> +err_unregister:
>> +       dma_async_device_unregister(dd);
>> +
>> +       return ret;
>> +}
>> +
>> +static int stm32_dma_remove(struct platform_device *pdev)
>> +{
>> +       struct stm32_dma_device *dmadev = platform_get_drvdata(pdev);
>> +
>> +       of_dma_controller_free(pdev->dev.of_node);
>> +
>> +       dma_async_device_unregister(&dmadev->ddev);
>> +
>> +       clk_disable_unprepare(dmadev->clk);
>
>
> What is the purpose of disabling/unpreparing the clock here?
> stm32_dma_alloc_chan_resources() and stm32_dma_free_chan_resources() should
> pair up and the clock should already be stopped.

I agree with you. I will remove it in the next version. Thanks

>
>
> Daniel.

Thanks for your review.

BR,
Cedric
--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Daniel Thompson Oct. 14, 2015, 8:52 a.m. UTC | #3
On 14/10/15 08:54, M'boumba Cedric Madianga wrote:
>>> +static int stm32_dma_alloc_chan_resources(struct dma_chan *c)
>>> +{
>>> +       struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
>>> +       struct stm32_dma_device *dmadev = stm32_dma_chan_get_dev(chan);
>>> +       int ret;
>>> +
>>> +       chan->config_init = false;
>>> +       ret = clk_prepare_enable(dmadev->clk);
>>> +       if (ret < 0) {
>>> +               dev_err(chan2dev(chan), "clk_prepare_enable failed: %d\n",
>>> ret);
>>> +               return ret;
>>> +       }
>>> +
>>> +       ret = stm32_dma_disable_chan(chan);
>>> +
>>> +       return ret;
>>> +}
>>
>>
>> The error path here looks like it will leak clock references.
>
> Sorry I didn't catch it. What does it mean ?

If stm32_dma_disable_chan() returns an error then we will not restore 
the original the clock counts causing them to "leak".


Daniel.
--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
M'boumba Cedric Madianga Oct. 14, 2015, 8:57 a.m. UTC | #4
Hi Daniel,

2015-10-14 10:52 GMT+02:00 Daniel Thompson <daniel.thompson@linaro.org>:
> On 14/10/15 08:54, M'boumba Cedric Madianga wrote:
>>>>
>>>> +static int stm32_dma_alloc_chan_resources(struct dma_chan *c)
>>>> +{
>>>> +       struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
>>>> +       struct stm32_dma_device *dmadev = stm32_dma_chan_get_dev(chan);
>>>> +       int ret;
>>>> +
>>>> +       chan->config_init = false;
>>>> +       ret = clk_prepare_enable(dmadev->clk);
>>>> +       if (ret < 0) {
>>>> +               dev_err(chan2dev(chan), "clk_prepare_enable failed:
>>>> %d\n",
>>>> ret);
>>>> +               return ret;
>>>> +       }
>>>> +
>>>> +       ret = stm32_dma_disable_chan(chan);
>>>> +
>>>> +       return ret;
>>>> +}
>>>
>
>
> If stm32_dma_disable_chan() returns an error then we will not restore the
> original the clock counts causing them to "leak".

Ok got it. Thanks for the clarification.
I will fix it in the next version.

BR,
Cedric
--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Vinod Koul Oct. 14, 2015, 11:16 a.m. UTC | #5
On Tue, Oct 13, 2015 at 04:05:25PM +0200, M'boumba Cedric Madianga wrote:
> +#define STM32_DMA_LISR			0x0000 /* DMA Low Int Status Reg */
> +#define STM32_DMA_HISR			0x0004 /* DMA High Int Status Reg */
> +#define STM32_DMA_LIFCR			0x0008 /* DMA Low Int Flag Clear Reg */
> +#define STM32_DMA_HIFCR			0x000c /* DMA High Int Flag Clear Reg */
> +#define STM32_DMA_TCI			BIT(5) /* Transfer Complete Interrupt */
> +#define STM32_DMA_HTI			BIT(4) /* Half Transfer Interrupt */
> +#define STM32_DMA_TEI			BIT(3) /* Transfer Error Interrupt */
> +#define STM32_DMA_DMEI			BIT(2) /* Direct Mode Error Interrupt */
> +#define STM32_DMA_FEI			BIT(0) /* FIFO Error Interrupt */

Why not use BIT() for everything here and make it consistent

Also where ever possible stick to 80 char limit like above you can

> +
> +/* DMA Stream x Configuration Register */
> +#define STM32_DMA_SCR(x)		(0x0010 + 0x18 * (x)) /* x = 0..7 */
> +#define STM32_DMA_SCR_REQ(n)		((n & 0x7) << 25)

this and below looks ugly and hard to maintain, are you sure spec doesn't
have a formulae for these?

> +static inline uint32_t stm32_dma_read(struct stm32_dma_device *dmadev, u32 reg)

this and few other could be made more readable

> +static struct stm32_dma_desc *stm32_dma_alloc_desc(unsigned int num_sgs)
> +{
> +	return kzalloc(sizeof(struct stm32_dma_desc) +
> +		       sizeof(struct stm32_dma_sg_req) * num_sgs, GFP_ATOMIC);

Not GFP_NOWAIT ?

> +static enum stm32_dma_width stm32_get_dma_width(struct stm32_dma_chan *chan,
> +						enum dma_slave_buswidth width)
> +{
> +	switch (width) {
> +	case DMA_SLAVE_BUSWIDTH_1_BYTE:
> +		return STM32_DMA_BYTE;
> +	case DMA_SLAVE_BUSWIDTH_2_BYTES:
> +		return STM32_DMA_HALF_WORD;
> +	case DMA_SLAVE_BUSWIDTH_4_BYTES:
> +		return STM32_DMA_WORD;
> +	default:
> +		dev_warn(chan2dev(chan),
> +			 "Dma bus width not supported, using 32bits\n");
> +		return STM32_DMA_WORD;

pls return error here
Assuming wrong parameter can cause havoc of transfer, so is not advisable

> +static enum stm32_dma_burst_size stm32_get_dma_burst(
> +		struct stm32_dma_chan *chan, u32 maxburst)
> +{
> +	switch (maxburst) {
> +	case 0:
> +	case 1:
> +		return STM32_DMA_BURST_SINGLE;
> +	case 4:
> +		return STM32_DMA_BURST_INCR4;
> +	case 8:
> +		return STM32_DMA_BURST_INCR8;
> +	case 16:
> +		return STM32_DMA_BURST_INCR16;
> +	default:
> +		dev_warn(chan2dev(chan),
> +			 "Dma burst size not supported, using single\n");
> +		return STM32_DMA_BURST_SINGLE;

here too

> +	}
> +}
> +
> +static int stm32_dma_slave_config(struct dma_chan *c,
> +				  struct dma_slave_config *config)
> +{
> +	struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
> +
> +	if (chan->busy) {
> +		dev_err(chan2dev(chan), "Configuration not allowed\n");
> +		return -EBUSY;
> +	}

That is false condition. This configuration should be used for next
descriptor prepare

> +static int stm32_dma_disable_chan(struct stm32_dma_chan *chan)
> +{
> +	struct stm32_dma_device *dmadev = stm32_dma_chan_get_dev(chan);
> +	unsigned long timeout = jiffies + msecs_to_jiffies(5000);
> +	u32 dma_scr;
> +
> +	dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
> +	if (dma_scr & STM32_DMA_SCR_EN) {
> +		dma_scr &= ~STM32_DMA_SCR_EN;
> +		stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr);
> +
> +		do {
> +			dma_scr = stm32_dma_read(dmadev,
> +						 STM32_DMA_SCR(chan->id));
> +			dma_scr &= STM32_DMA_SCR_EN;
> +			if (!dma_scr)
> +				break;

empty line here would improve readability

> +static irqreturn_t stm32_dma_chan_irq(int irq, void *devid)
> +{
> +	struct stm32_dma_chan *chan = devid;
> +	struct stm32_dma_device *dmadev = stm32_dma_chan_get_dev(chan);
> +	u32 status, scr, sfcr;
> +
> +	spin_lock(&chan->vchan.lock);
> +
> +	status = stm32_dma_irq_status(chan);
> +	scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
> +	sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id));
> +
> +	if ((status & STM32_DMA_HTI) && (scr & STM32_DMA_SCR_HTIE)) {
> +		stm32_dma_irq_clear(chan, STM32_DMA_HTI);
> +		vchan_cyclic_callback(&chan->desc->vdesc);
> +		spin_unlock(&chan->vchan.lock);
> +		return IRQ_HANDLED;

line here please and below

> +	} else if ((status & STM32_DMA_TCI) && (scr & STM32_DMA_SCR_TCIE)) {
> +		stm32_dma_irq_clear(chan, STM32_DMA_TCI);
> +		stm32_dma_handle_chan_done(chan);
> +		spin_unlock(&chan->vchan.lock);
> +		return IRQ_HANDLED;
> +	} else if ((status & STM32_DMA_TEI) && (scr & STM32_DMA_SCR_TEIE)) {
> +		dev_err(chan2dev(chan), "DMA error: received TEI event\n");
> +		stm32_dma_irq_clear(chan, STM32_DMA_TEI);
> +		chan->status = DMA_ERROR;
> +		spin_unlock(&chan->vchan.lock);
> +		return IRQ_HANDLED;
> +	} else if ((status & STM32_DMA_FEI) && (sfcr & STM32_DMA_SFCR_FEIE)) {
> +		dev_err(chan2dev(chan), "DMA error: received FEI event\n");
> +		stm32_dma_irq_clear(chan, STM32_DMA_FEI);
> +		chan->status = DMA_ERROR;
> +		spin_unlock(&chan->vchan.lock);
> +		return IRQ_HANDLED;

this is repeat of above apart from err print!!

> +	} else if ((status & STM32_DMA_DMEI) && (scr & STM32_DMA_SCR_DMEIE)) {
> +		dev_err(chan2dev(chan), "DMA error: received DMEI event\n");
> +		stm32_dma_irq_clear(chan, STM32_DMA_DMEI);
> +		chan->status = DMA_ERROR;
> +		spin_unlock(&chan->vchan.lock);
> +		return IRQ_HANDLED;

same here :(

> +static enum dma_status stm32_dma_tx_status(struct dma_chan *c,
> +					   dma_cookie_t cookie,
> +					   struct dma_tx_state *state)
> +{
> +	struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
> +	struct virt_dma_desc *vdesc;
> +	enum dma_status status;
> +	unsigned long flags;
> +	unsigned int residue;
> +
> +	status = dma_cookie_status(c, cookie, state);
> +	if (status == DMA_COMPLETE)
> +		return status;
> +
> +	if (!state)
> +		return chan->status;
why channel status and not status from dma_cookie_status()?

> +static int stm32_dma_remove(struct platform_device *pdev)
> +{
> +	struct stm32_dma_device *dmadev = platform_get_drvdata(pdev);
> +
> +	of_dma_controller_free(pdev->dev.of_node);
> +
> +	dma_async_device_unregister(&dmadev->ddev);
> +
> +	clk_disable_unprepare(dmadev->clk);

and your irq is enabled and you can still receive interrupts and schedule
tasklets :(
M'boumba Cedric Madianga Oct. 14, 2015, 1:07 p.m. UTC | #6
Hi Vinod,


2015-10-14 13:16 GMT+02:00 Vinod Koul <vinod.koul@intel.com>:
> On Tue, Oct 13, 2015 at 04:05:25PM +0200, M'boumba Cedric Madianga wrote:
>> +#define STM32_DMA_LISR                       0x0000 /* DMA Low Int Status Reg */
>> +#define STM32_DMA_HISR                       0x0004 /* DMA High Int Status Reg */
>> +#define STM32_DMA_LIFCR                      0x0008 /* DMA Low Int Flag Clear Reg */
>> +#define STM32_DMA_HIFCR                      0x000c /* DMA High Int Flag Clear Reg */
>> +#define STM32_DMA_TCI                        BIT(5) /* Transfer Complete Interrupt */
>> +#define STM32_DMA_HTI                        BIT(4) /* Half Transfer Interrupt */
>> +#define STM32_DMA_TEI                        BIT(3) /* Transfer Error Interrupt */
>> +#define STM32_DMA_DMEI                       BIT(2) /* Direct Mode Error Interrupt */
>> +#define STM32_DMA_FEI                        BIT(0) /* FIFO Error Interrupt */
>
> Why not use BIT() for everything here and make it consistent
>
> Also where ever possible stick to 80 char limit like above you can

In fact STM32_DMA_LISR, HISR, LIFCR and HIFCR are offset to access register.
So BIT() could not be used for this purpose.

>
>> +
>> +/* DMA Stream x Configuration Register */
>> +#define STM32_DMA_SCR(x)             (0x0010 + 0x18 * (x)) /* x = 0..7 */
>> +#define STM32_DMA_SCR_REQ(n)         ((n & 0x7) << 25)
>
> this and below looks ugly and hard to maintain, are you sure spec doesn't
> have a formulae for these?

We don't have any formulae in spec to handle this.
It is clearly described that we have to access registers in that way
to address the good channel.
Please, see below an extract of the spec for more details:
DMA stream x configuration register (DMA_SxCR) (x = 0..7)
This register is used to configure the concerned stream.
Address offset: 0x10 + 0x18 × stream number

>
>> +static inline uint32_t stm32_dma_read(struct stm32_dma_device *dmadev, u32 reg)
>
> this and few other could be made more readable

Ok, I could replace uint32_t by u32. Is it what you expect ?
For others, I don't see how I could make it more readable.
Could you please give me more details to help me ? Thanks in advance

>
>> +static struct stm32_dma_desc *stm32_dma_alloc_desc(unsigned int num_sgs)
>> +{
>> +     return kzalloc(sizeof(struct stm32_dma_desc) +
>> +                    sizeof(struct stm32_dma_sg_req) * num_sgs, GFP_ATOMIC);
>
> Not GFP_NOWAIT ?

You're right. We could use GFP_NOWAIT instead. Thanks.

>
>> +static enum stm32_dma_width stm32_get_dma_width(struct stm32_dma_chan *chan,
>> +                                             enum dma_slave_buswidth width)
>> +{
>> +     switch (width) {
>> +     case DMA_SLAVE_BUSWIDTH_1_BYTE:
>> +             return STM32_DMA_BYTE;
>> +     case DMA_SLAVE_BUSWIDTH_2_BYTES:
>> +             return STM32_DMA_HALF_WORD;
>> +     case DMA_SLAVE_BUSWIDTH_4_BYTES:
>> +             return STM32_DMA_WORD;
>> +     default:
>> +             dev_warn(chan2dev(chan),
>> +                      "Dma bus width not supported, using 32bits\n");
>> +             return STM32_DMA_WORD;
>
> pls return error here
> Assuming wrong parameter can cause havoc of transfer, so is not advisable

OK, I will fix it in the next version. Thanks for the explanation.

>
>> +static enum stm32_dma_burst_size stm32_get_dma_burst(
>> +             struct stm32_dma_chan *chan, u32 maxburst)
>> +{
>> +     switch (maxburst) {
>> +     case 0:
>> +     case 1:
>> +             return STM32_DMA_BURST_SINGLE;
>> +     case 4:
>> +             return STM32_DMA_BURST_INCR4;
>> +     case 8:
>> +             return STM32_DMA_BURST_INCR8;
>> +     case 16:
>> +             return STM32_DMA_BURST_INCR16;
>> +     default:
>> +             dev_warn(chan2dev(chan),
>> +                      "Dma burst size not supported, using single\n");
>> +             return STM32_DMA_BURST_SINGLE;
>
> here too

Ditto

>
>> +     }
>> +}
>> +
>> +static int stm32_dma_slave_config(struct dma_chan *c,
>> +                               struct dma_slave_config *config)
>> +{
>> +     struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
>> +
>> +     if (chan->busy) {
>> +             dev_err(chan2dev(chan), "Configuration not allowed\n");
>> +             return -EBUSY;
>> +     }
>
> That is false condition. This configuration should be used for next
> descriptor prepare

OK. Will be fixed in the next patch version.

>
>> +static int stm32_dma_disable_chan(struct stm32_dma_chan *chan)
>> +{
>> +     struct stm32_dma_device *dmadev = stm32_dma_chan_get_dev(chan);
>> +     unsigned long timeout = jiffies + msecs_to_jiffies(5000);
>> +     u32 dma_scr;
>> +
>> +     dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
>> +     if (dma_scr & STM32_DMA_SCR_EN) {
>> +             dma_scr &= ~STM32_DMA_SCR_EN;
>> +             stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr);
>> +
>> +             do {
>> +                     dma_scr = stm32_dma_read(dmadev,
>> +                                              STM32_DMA_SCR(chan->id));
>> +                     dma_scr &= STM32_DMA_SCR_EN;
>> +                     if (!dma_scr)
>> +                             break;
>
> empty line here would improve readability

OK

>
>> +static irqreturn_t stm32_dma_chan_irq(int irq, void *devid)
>> +{
>> +     struct stm32_dma_chan *chan = devid;
>> +     struct stm32_dma_device *dmadev = stm32_dma_chan_get_dev(chan);
>> +     u32 status, scr, sfcr;
>> +
>> +     spin_lock(&chan->vchan.lock);
>> +
>> +     status = stm32_dma_irq_status(chan);
>> +     scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
>> +     sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id));
>> +
>> +     if ((status & STM32_DMA_HTI) && (scr & STM32_DMA_SCR_HTIE)) {
>> +             stm32_dma_irq_clear(chan, STM32_DMA_HTI);
>> +             vchan_cyclic_callback(&chan->desc->vdesc);
>> +             spin_unlock(&chan->vchan.lock);
>> +             return IRQ_HANDLED;
>
> line here please and below

OK

>
>> +     } else if ((status & STM32_DMA_TCI) && (scr & STM32_DMA_SCR_TCIE)) {
>> +             stm32_dma_irq_clear(chan, STM32_DMA_TCI);
>> +             stm32_dma_handle_chan_done(chan);
>> +             spin_unlock(&chan->vchan.lock);
>> +             return IRQ_HANDLED;
>> +     } else if ((status & STM32_DMA_TEI) && (scr & STM32_DMA_SCR_TEIE)) {
>> +             dev_err(chan2dev(chan), "DMA error: received TEI event\n");
>> +             stm32_dma_irq_clear(chan, STM32_DMA_TEI);
>> +             chan->status = DMA_ERROR;
>> +             spin_unlock(&chan->vchan.lock);
>> +             return IRQ_HANDLED;
>> +     } else if ((status & STM32_DMA_FEI) && (sfcr & STM32_DMA_SFCR_FEIE)) {
>> +             dev_err(chan2dev(chan), "DMA error: received FEI event\n");
>> +             stm32_dma_irq_clear(chan, STM32_DMA_FEI);
>> +             chan->status = DMA_ERROR;
>> +             spin_unlock(&chan->vchan.lock);
>> +             return IRQ_HANDLED;
>
> this is repeat of above apart from err print!!

Not only for print as we also need that to define which bit to clear
in the IRQ status.
In that way we will be sure to handle each interrupt event.

>
>> +     } else if ((status & STM32_DMA_DMEI) && (scr & STM32_DMA_SCR_DMEIE)) {
>> +             dev_err(chan2dev(chan), "DMA error: received DMEI event\n");
>> +             stm32_dma_irq_clear(chan, STM32_DMA_DMEI);
>> +             chan->status = DMA_ERROR;
>> +             spin_unlock(&chan->vchan.lock);
>> +             return IRQ_HANDLED;
>
> same here :(

Ditto

>
>> +static enum dma_status stm32_dma_tx_status(struct dma_chan *c,
>> +                                        dma_cookie_t cookie,
>> +                                        struct dma_tx_state *state)
>> +{
>> +     struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
>> +     struct virt_dma_desc *vdesc;
>> +     enum dma_status status;
>> +     unsigned long flags;
>> +     unsigned int residue;
>> +
>> +     status = dma_cookie_status(c, cookie, state);
>> +     if (status == DMA_COMPLETE)
>> +             return status;
>> +
>> +     if (!state)
>> +             return chan->status;
> why channel status and not status from dma_cookie_status()?

If status is different than DMA_COMPLETE it seems better to use channel status.
Indeed, in that way, we have the possibility to notify that there is a
potential error in the bus via DMA_ERROR.
But if I return status from dma_cookie_status(), I will always notify
DMA_IN_PROGRESS.

>
>> +static int stm32_dma_remove(struct platform_device *pdev)
>> +{
>> +     struct stm32_dma_device *dmadev = platform_get_drvdata(pdev);
>> +
>> +     of_dma_controller_free(pdev->dev.of_node);
>> +
>> +     dma_async_device_unregister(&dmadev->ddev);
>> +
>> +     clk_disable_unprepare(dmadev->clk);
>
> and your irq is enabled and you can still receive interrupts and schedule
> tasklets :(

So, I have to free interrupts and also kill tasklet from interrupt here.
I will also disable the channel in case of ongoing transfer in  order
to let the DMA controller in a good state.

Thanks for your review :)

BR,
Cedric
--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
M'boumba Cedric Madianga Oct. 14, 2015, 1:17 p.m. UTC | #7
Hi Daniel,

>>> +
>>> +static int stm32_dma_remove(struct platform_device *pdev)
>>> +{
>>> +       struct stm32_dma_device *dmadev = platform_get_drvdata(pdev);
>>> +
>>> +       of_dma_controller_free(pdev->dev.of_node);
>>> +
>>> +       dma_async_device_unregister(&dmadev->ddev);
>>> +
>>> +       clk_disable_unprepare(dmadev->clk);
>>
>>
>> What is the purpose of disabling/unpreparing the clock here?
>> stm32_dma_alloc_chan_resources() and stm32_dma_free_chan_resources() should
>> pair up and the clock should already be stopped.

stm32_dma_remove() could be called during an on-going transfer during
module unload.
So in that case, it seems that disabling/unpreparing the clock is needed.

BR,
Cedric
--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Daniel Thompson Oct. 14, 2015, 1:29 p.m. UTC | #8
On 14/10/15 14:17, M'boumba Cedric Madianga wrote:
> Hi Daniel,
>
>>>> +
>>>> +static int stm32_dma_remove(struct platform_device *pdev)
>>>> +{
>>>> +       struct stm32_dma_device *dmadev = platform_get_drvdata(pdev);
>>>> +
>>>> +       of_dma_controller_free(pdev->dev.of_node);
>>>> +
>>>> +       dma_async_device_unregister(&dmadev->ddev);
>>>> +
>>>> +       clk_disable_unprepare(dmadev->clk);
>>>
>>>
>>> What is the purpose of disabling/unpreparing the clock here?
>>> stm32_dma_alloc_chan_resources() and stm32_dma_free_chan_resources() should
>>> pair up and the clock should already be stopped.
>
> stm32_dma_remove() could be called during an on-going transfer during
> module unload.
> So in that case, it seems that disabling/unpreparing the clock is needed.

Really?

I think we need to be sure any on-going transfers are stopped.

There are multiple reasons for this, not least the risk of executing a 
callback that has been freed, but the one related to my point is that a 
single clk_disable_unprepare() will remain broken because if you don't 
know that the transfers have stopped then you don't know how many 
on-going transfers there are.


Daniel.
--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
M'boumba Cedric Madianga Oct. 14, 2015, 1:41 p.m. UTC | #9
2015-10-14 15:29 GMT+02:00 Daniel Thompson <daniel.thompson@linaro.org>:
> On 14/10/15 14:17, M'boumba Cedric Madianga wrote:
>>
>> Hi Daniel,
>>
>>>>> +
>>>>> +static int stm32_dma_remove(struct platform_device *pdev)
>>>>> +{
>>>>> +       struct stm32_dma_device *dmadev = platform_get_drvdata(pdev);
>>>>> +
>>>>> +       of_dma_controller_free(pdev->dev.of_node);
>>>>> +
>>>>> +       dma_async_device_unregister(&dmadev->ddev);
>>>>> +
>>>>> +       clk_disable_unprepare(dmadev->clk);
>>>>
>>>>
>>>>
>>>> What is the purpose of disabling/unpreparing the clock here?
>>>> stm32_dma_alloc_chan_resources() and stm32_dma_free_chan_resources()
>>>> should
>>>> pair up and the clock should already be stopped.
>>
>>
>> stm32_dma_remove() could be called during an on-going transfer during
>> module unload.
>> So in that case, it seems that disabling/unpreparing the clock is needed.
>
>
> Really?
>
> I think we need to be sure any on-going transfers are stopped.
>
> There are multiple reasons for this, not least the risk of executing a
> callback that has been freed, but the one related to my point is that a
> single clk_disable_unprepare() will remain broken because if you don't know
> that the transfers have stopped then you don't know how many on-going
> transfers there are.

In the next version, I am going to stop the DMA, free all interrupts
and kill tasklet in stm32_dma_remove.
In that way, all on-going transfers will be lost as we don't have to
wait the end of remaining transfer in order to execute this function
as quickly as possible.
But even with this improvement, I think I have to disable the clock here.
--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Vinod Koul Oct. 14, 2015, 2:14 p.m. UTC | #10
On Wed, Oct 14, 2015 at 03:07:16PM +0200, M'boumba Cedric Madianga wrote:
> >> +static inline uint32_t stm32_dma_read(struct stm32_dma_device *dmadev, u32 reg)
> >
> > this and few other could be made more readable
> 
> Ok, I could replace uint32_t by u32. Is it what you expect ?
> For others, I don't see how I could make it more readable.
> Could you please give me more details to help me ? Thanks in advance

Yes that is one and also aplitting this up so that it fits within 80chars
while not sacrficing readablity...

> >> +     } else if ((status & STM32_DMA_FEI) && (sfcr & STM32_DMA_SFCR_FEIE)) {
> >> +             dev_err(chan2dev(chan), "DMA error: received FEI event\n");
> >> +             stm32_dma_irq_clear(chan, STM32_DMA_FEI);
> >> +             chan->status = DMA_ERROR;
> >> +             spin_unlock(&chan->vchan.lock);
> >> +             return IRQ_HANDLED;
> >
> > this is repeat of above apart from err print!!
> 
> Not only for print as we also need that to define which bit to clear
> in the IRQ status.
> In that way we will be sure to handle each interrupt event.

You can print error type based on status, or print the whole status but
handle it same for all three cases

> >> +     enum dma_status status;
> >> +     unsigned long flags;
> >> +     unsigned int residue;
> >> +
> >> +     status = dma_cookie_status(c, cookie, state);
> >> +     if (status == DMA_COMPLETE)
> >> +             return status;
> >> +
> >> +     if (!state)
> >> +             return chan->status;
> > why channel status and not status from dma_cookie_status()?
> 
> If status is different than DMA_COMPLETE it seems better to use channel status.
> Indeed, in that way, we have the possibility to notify that there is a
> potential error in the bus via DMA_ERROR.
> But if I return status from dma_cookie_status(), I will always notify
> DMA_IN_PROGRESS.

No, you are supposed to return the descriptor status and not channel status!
Daniel Thompson Oct. 14, 2015, 2:24 p.m. UTC | #11
On 14/10/15 14:41, M'boumba Cedric Madianga wrote:
> 2015-10-14 15:29 GMT+02:00 Daniel Thompson <daniel.thompson@linaro.org>:
>> On 14/10/15 14:17, M'boumba Cedric Madianga wrote:
>>>
>>> Hi Daniel,
>>>
>>>>>> +
>>>>>> +static int stm32_dma_remove(struct platform_device *pdev)
>>>>>> +{
>>>>>> +       struct stm32_dma_device *dmadev = platform_get_drvdata(pdev);
>>>>>> +
>>>>>> +       of_dma_controller_free(pdev->dev.of_node);
>>>>>> +
>>>>>> +       dma_async_device_unregister(&dmadev->ddev);
>>>>>> +
>>>>>> +       clk_disable_unprepare(dmadev->clk);
>>>>>
>>>>>
>>>>>
>>>>> What is the purpose of disabling/unpreparing the clock here?
>>>>> stm32_dma_alloc_chan_resources() and stm32_dma_free_chan_resources()
>>>>> should
>>>>> pair up and the clock should already be stopped.
>>>
>>>
>>> stm32_dma_remove() could be called during an on-going transfer during
>>> module unload.
>>> So in that case, it seems that disabling/unpreparing the clock is needed.
>>
>>
>> Really?
>>
>> I think we need to be sure any on-going transfers are stopped.
>>
>> There are multiple reasons for this, not least the risk of executing a
>> callback that has been freed, but the one related to my point is that a
>> single clk_disable_unprepare() will remain broken because if you don't know
>> that the transfers have stopped then you don't know how many on-going
>> transfers there are.
>
> In the next version, I am going to stop the DMA, free all interrupts
> and kill tasklet in stm32_dma_remove.
> In that way, all on-going transfers will be lost as we don't have to
> wait the end of remaining transfer in order to execute this function
> as quickly as possible.

Hmnnn...

The dmaengine framework will WARN_ONCE() if an dmaengine is removed 
whilst it is active and also works hard to ensure dmaengine modules are 
not removed whilst there are active drivers using the framework.

How do we get into this function whilst there is still an active DMA 
channels?


> But even with this improvement, I think I have to disable the clock here.

As above, I think the dmaengine framework work to protect you from this 
sort problem. However even if I am wrong about that then unconditionally 
calling clk_disable_unprepare() can not be used to reliably manage the 
clocks. You don't know what the counts are!


Daniel.
--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
M'boumba Cedric Madianga Oct. 14, 2015, 3:26 p.m. UTC | #12
2015-10-14 16:24 GMT+02:00 Daniel Thompson <daniel.thompson@linaro.org>:
>
> Hmnnn...
>
> The dmaengine framework will WARN_ONCE() if an dmaengine is removed whilst
> it is active and also works hard to ensure dmaengine modules are not removed
> whilst there are active drivers using the framework.
>
> How do we get into this function whilst there is still an active DMA
> channels?

For example, when a user try "rmmod stm32-dma" in uart console.
It will enter in stm32_dma_remove while there is potentially still active DMA.

>
>
>> But even with this improvement, I think I have to disable the clock here.
>
>
> As above, I think the dmaengine framework work to protect you from this sort
> problem. However even if I am wrong about that then unconditionally calling
> clk_disable_unprepare() can not be used to reliably manage the clocks. You
> don't know what the counts are!

Ok got it. Thanks for the clarification.

Cedric
--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Daniel Thompson Oct. 14, 2015, 3:28 p.m. UTC | #13
On 14/10/15 16:26, M'boumba Cedric Madianga wrote:
> 2015-10-14 16:24 GMT+02:00 Daniel Thompson <daniel.thompson@linaro.org>:
>>
>> Hmnnn...
>>
>> The dmaengine framework will WARN_ONCE() if an dmaengine is removed whilst
>> it is active and also works hard to ensure dmaengine modules are not removed
>> whilst there are active drivers using the framework.
>>
>> How do we get into this function whilst there is still an active DMA
>> channels?
>
> For example, when a user try "rmmod stm32-dma" in uart console.
> It will enter in stm32_dma_remove while there is potentially still active DMA.

Check dmaengine.c for yourself but I think in this case the dmaengine 
framework will hold references to the module and prevent the remove from 
taking place.


Daniel.
--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
M'boumba Cedric Madianga Oct. 14, 2015, 3:41 p.m. UTC | #14
2015-10-14 17:28 GMT+02:00 Daniel Thompson <daniel.thompson@linaro.org>:
> On 14/10/15 16:26, M'boumba Cedric Madianga wrote:
>>
>> 2015-10-14 16:24 GMT+02:00 Daniel Thompson <daniel.thompson@linaro.org>:
>>>
>>>
>>> Hmnnn...
>>>
>>> The dmaengine framework will WARN_ONCE() if an dmaengine is removed
>>> whilst
>>> it is active and also works hard to ensure dmaengine modules are not
>>> removed
>>> whilst there are active drivers using the framework.
>>>
>>> How do we get into this function whilst there is still an active DMA
>>> channels?
>>
>>
>> For example, when a user try "rmmod stm32-dma" in uart console.
>> It will enter in stm32_dma_remove while there is potentially still active
>> DMA.
>
>
> Check dmaengine.c for yourself but I think in this case the dmaengine
> framework will hold references to the module and prevent the remove from
> taking place.

Yes I did it.
As far I understand, the dmaengine framework will print a warning
message but doesn't stop removing operation if there are some active
clients.

Cedric.
--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Vinod Koul Oct. 15, 2015, 4:07 a.m. UTC | #15
On Wed, Oct 14, 2015 at 05:41:26PM +0200, M'boumba Cedric Madianga wrote:
> 2015-10-14 17:28 GMT+02:00 Daniel Thompson <daniel.thompson@linaro.org>:
> > On 14/10/15 16:26, M'boumba Cedric Madianga wrote:
> >>
> >> 2015-10-14 16:24 GMT+02:00 Daniel Thompson <daniel.thompson@linaro.org>:
> >>>
> >>>
> >>> Hmnnn...
> >>>
> >>> The dmaengine framework will WARN_ONCE() if an dmaengine is removed
> >>> whilst
> >>> it is active and also works hard to ensure dmaengine modules are not
> >>> removed
> >>> whilst there are active drivers using the framework.
> >>>
> >>> How do we get into this function whilst there is still an active DMA
> >>> channels?
> >>
> >>
> >> For example, when a user try "rmmod stm32-dma" in uart console.
> >> It will enter in stm32_dma_remove while there is potentially still active
> >> DMA.
> >
> >
> > Check dmaengine.c for yourself but I think in this case the dmaengine
> > framework will hold references to the module and prevent the remove from
> > taking place.
> 
> Yes I did it.
> As far I understand, the dmaengine framework will print a warning
> message but doesn't stop removing operation if there are some active
> clients.

we hold a ref, so removal wont work, see dma_chan_get()
diff mbox

Patch

diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 5c931d4..a5dd649 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -431,6 +431,18 @@  config STE_DMA40
 	help
 	  Support for ST-Ericsson DMA40 controller
 
+config STM32_DMA
+	tristate "STMicroelectronics STM32 DMA support"
+	depends on ARCH_STM32
+	select DMA_ENGINE
+	select DMA_OF
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Enable support for the on-chip DMA controller on STMicroelectronics
+	  STM32 MCUs.
+	  If you have a board based on such a MCU and wish to use DMA say Y or M
+	  here.
+
 config S3C24XX_DMAC
 	tristate "Samsung S3C24XX DMA support"
 	depends on ARCH_S3C24XX
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index ef9c099..2dd0a067 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -56,6 +56,7 @@  obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o
 obj-$(CONFIG_RENESAS_DMA) += sh/
 obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
 obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
+obj-$(CONFIG_STM32_DMA) += stm32-dma.o
 obj-$(CONFIG_S3C24XX_DMAC) += s3c24xx-dma.o
 obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
 obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
new file mode 100644
index 0000000..031bab7
--- /dev/null
+++ b/drivers/dma/stm32-dma.c
@@ -0,0 +1,1175 @@ 
+/*
+ * Driver for STM32 DMA controller
+ *
+ * Inspired by dma-jz4740.c and tegra20-apb-dma.c
+ *
+ * Copyright (C) M'boumba Cedric Madianga 2015
+ * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
+ *
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include "virt-dma.h"
+
+#define STM32_DMA_LISR			0x0000 /* DMA Low Int Status Reg */
+#define STM32_DMA_HISR			0x0004 /* DMA High Int Status Reg */
+#define STM32_DMA_LIFCR			0x0008 /* DMA Low Int Flag Clear Reg */
+#define STM32_DMA_HIFCR			0x000c /* DMA High Int Flag Clear Reg */
+#define STM32_DMA_TCI			BIT(5) /* Transfer Complete Interrupt */
+#define STM32_DMA_HTI			BIT(4) /* Half Transfer Interrupt */
+#define STM32_DMA_TEI			BIT(3) /* Transfer Error Interrupt */
+#define STM32_DMA_DMEI			BIT(2) /* Direct Mode Error Interrupt */
+#define STM32_DMA_FEI			BIT(0) /* FIFO Error Interrupt */
+
+/* DMA Stream x Configuration Register */
+#define STM32_DMA_SCR(x)		(0x0010 + 0x18 * (x)) /* x = 0..7 */
+#define STM32_DMA_SCR_REQ(n)		((n & 0x7) << 25)
+#define STM32_DMA_SCR_MBURST_MASK	GENMASK(24, 23)
+#define STM32_DMA_SCR_MBURST(n)	        ((n & 0x3) << 23)
+#define STM32_DMA_SCR_PBURST_MASK	GENMASK(22, 21)
+#define STM32_DMA_SCR_PBURST(n)	        ((n & 0x3) << 21)
+#define STM32_DMA_SCR_PL_MASK		GENMASK(17, 16)
+#define STM32_DMA_SCR_PL(n)		((n & 0x3) << 16)
+#define STM32_DMA_SCR_MSIZE_MASK	GENMASK(14, 13)
+#define STM32_DMA_SCR_MSIZE(n)		((n & 0x3) << 13)
+#define STM32_DMA_SCR_PSIZE_MASK	GENMASK(12, 11)
+#define STM32_DMA_SCR_PSIZE(n)		((n & 0x3) << 11)
+#define STM32_DMA_SCR_PSIZE_GET(n)	((n & STM32_DMA_SCR_PSIZE_MASK) >> 11)
+#define STM32_DMA_SCR_DIR_MASK		GENMASK(7, 6)
+#define STM32_DMA_SCR_DIR(n)		((n & 0x3) << 6)
+#define STM32_DMA_SCR_CT		BIT(19) /* Target in double buffer */
+#define STM32_DMA_SCR_DBM		BIT(18) /* Double Buffer Mode */
+#define STM32_DMA_SCR_PINCOS		BIT(15) /* Peripheral inc offset size */
+#define STM32_DMA_SCR_MINC		BIT(10) /* Memory increment mode */
+#define STM32_DMA_SCR_PINC		BIT(9) /* Peripheral increment mode */
+#define STM32_DMA_SCR_CIRC		BIT(8) /* Circular mode */
+#define STM32_DMA_SCR_PFCTRL		BIT(5) /* Peripheral Flow Controller */
+#define STM32_DMA_SCR_TCIE		BIT(4) /* Transfer Cplete Int Enable*/
+#define STM32_DMA_SCR_HTIE		BIT(3) /* Halft Transfer Int Enable*/
+#define STM32_DMA_SCR_TEIE		BIT(2) /* Transfer Error Int Enable */
+#define STM32_DMA_SCR_DMEIE		BIT(1) /* Direct Mode Err Int Enable */
+#define STM32_DMA_SCR_EN		BIT(0) /* Stream Enable */
+#define STM32_DMA_STREAM_CFG_MASK	(STM32_DMA_SCR_DMEIE \
+					| STM32_DMA_SCR_TEIE \
+					| STM32_DMA_SCR_HTIE \
+					| STM32_DMA_SCR_TCIE \
+					| STM32_DMA_SCR_PINC \
+					| STM32_DMA_SCR_MINC \
+					| STM32_DMA_SCR_PINCOS \
+					| STM32_DMA_SCR_PL_MASK)
+
+/* DMA Stream x number of data register */
+#define STM32_DMA_SNDTR(x)		(0x0014 + 0x18 * (x))
+
+/* DMA stream peripheral address register */
+#define STM32_DMA_SPAR(x)		(0x0018 + 0x18 * (x))
+
+/* DMA stream x memory 0 address register */
+#define STM32_DMA_SM0AR(x)		(0x001c + 0x18 * (x))
+
+/* DMA stream x memory 1 address register */
+#define STM32_DMA_SM1AR(x)		(0x0020 + 0x18 * (x))
+
+/* DMA stream x FIFO control register */
+#define STM32_DMA_SFCR(x)		(0x0024 + 0x18 * (x))
+#define STM32_DMA_SFCR_FTH_MASK		GENMASK(1, 0)
+#define STM32_DMA_SFCR_FTH(n)		(n & STM32_DMA_SFCR_FTH_MASK)
+#define STM32_DMA_SFCR_FEIE		BIT(7) /* FIFO error interrupt enable */
+#define STM32_DMA_SFCR_DMDIS		BIT(2) /* Direct mode disable */
+#define STM32_DMA_FIFO_CFG_MASK		(STM32_DMA_SFCR_FTH_MASK \
+					| STM32_DMA_SFCR_FEIE \
+					| STM32_DMA_SFCR_DMDIS)
+
+/* DMA direction */
+#define STM32_DMA_DEV_TO_MEM		0x00
+#define	STM32_DMA_MEM_TO_DEV		0x01
+#define	STM32_DMA_MEM_TO_MEM		0x02
+
+/* DMA priority level */
+#define STM32_DMA_PRIORITY_LOW		0x00
+#define STM32_DMA_PRIORITY_MEDIUM	0x01
+#define STM32_DMA_PRIORITY_HIGH		0x02
+#define STM32_DMA_PRIORITY_VERY_HIGH	0x03
+
+/* DMA FIFO threshold selection */
+#define STM32_DMA_FIFO_THRESHOLD_1QUARTERFULL		0x00
+#define STM32_DMA_FIFO_THRESHOLD_HALFFULL		0x01
+#define STM32_DMA_FIFO_THRESHOLD_3QUARTERSFULL		0x02
+#define STM32_DMA_FIFO_THRESHOLD_FULL			0x03
+
+#define STM32_DMA_MAX_DATA_ITEMS	0xffff
+#define STM32_DMA_MAX_CHANNELS		0x08
+#define STM32_DMA_MAX_REQUEST_ID	0x08
+#define STM32_DMA_MAX_DATA_PARAM	0x04
+
+enum stm32_dma_width {
+	STM32_DMA_BYTE,
+	STM32_DMA_HALF_WORD,
+	STM32_DMA_WORD,
+};
+
+enum stm32_dma_burst_size {
+	STM32_DMA_BURST_SINGLE,
+	STM32_DMA_BURST_INCR4,
+	STM32_DMA_BURST_INCR8,
+	STM32_DMA_BURST_INCR16,
+};
+
+enum stm32_dma_channel_id {
+	STM32_DMA_CHANNEL0,
+	STM32_DMA_CHANNEL1,
+	STM32_DMA_CHANNEL2,
+	STM32_DMA_CHANNEL3,
+	STM32_DMA_CHANNEL4,
+	STM32_DMA_CHANNEL5,
+	STM32_DMA_CHANNEL6,
+	STM32_DMA_CHANNEL7,
+};
+
+enum stm32_dma_request_id {
+	STM32_DMA_REQUEST0,
+	STM32_DMA_REQUEST1,
+	STM32_DMA_REQUEST2,
+	STM32_DMA_REQUEST3,
+	STM32_DMA_REQUEST4,
+	STM32_DMA_REQUEST5,
+	STM32_DMA_REQUEST6,
+	STM32_DMA_REQUEST7,
+};
+
+struct stm32_dma_cfg {
+	enum stm32_dma_channel_id channel_id;
+	enum stm32_dma_request_id request_line;
+	u32 stream_config;
+	u32 fifo_config;
+};
+
+struct stm32_dma_chan_reg {
+	u32 dma_lisr;
+	u32 dma_hisr;
+	u32 dma_lifcr;
+	u32 dma_hifcr;
+	u32 dma_scr;
+	u32 dma_sndtr;
+	u32 dma_spar;
+	u32 dma_sm0ar;
+	u32 dma_sm1ar;
+	u32 dma_sfcr;
+};
+
+struct stm32_dma_sg_req {
+	unsigned int len;
+	struct stm32_dma_chan_reg chan_reg;
+};
+
+struct stm32_dma_desc {
+	struct virt_dma_desc vdesc;
+	bool cyclic;
+	unsigned int num_sgs;
+	struct stm32_dma_sg_req sg_req[];
+};
+
+struct stm32_dma_chan {
+	struct virt_dma_chan vchan;
+	bool config_init;
+	bool busy;
+	unsigned int id;
+	unsigned int irq;
+	struct stm32_dma_desc *desc;
+	unsigned int next_sg;
+	struct dma_slave_config	dma_sconfig;
+	enum dma_status	status;
+	struct stm32_dma_chan_reg chan_reg;
+};
+
+struct stm32_dma_device {
+	struct dma_device ddev;
+	void __iomem *base;
+	struct clk *clk;
+	struct reset_control *rst;
+	bool mem2mem;
+	struct stm32_dma_chan chan[STM32_DMA_MAX_CHANNELS];
+};
+
+static struct stm32_dma_device *stm32_dma_chan_get_dev(
+	struct stm32_dma_chan *chan)
+{
+	return container_of(chan->vchan.chan.device, struct stm32_dma_device,
+			    ddev);
+}
+
+static struct stm32_dma_chan *to_stm32_dma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct stm32_dma_chan, vchan.chan);
+}
+
+static struct stm32_dma_desc *to_stm32_dma_desc(struct virt_dma_desc *vdesc)
+{
+	return container_of(vdesc, struct stm32_dma_desc, vdesc);
+}
+
+static inline struct device *chan2dev(struct stm32_dma_chan *chan)
+{
+	return &chan->vchan.chan.dev->device;
+}
+
+static inline uint32_t stm32_dma_read(struct stm32_dma_device *dmadev, u32 reg)
+{
+	return readl_relaxed(dmadev->base + reg);
+}
+
+static inline void stm32_dma_write(struct stm32_dma_device *dmadev, u32 reg,
+				   u32 val)
+{
+	writel_relaxed(val, dmadev->base + reg);
+}
+
+static struct stm32_dma_desc *stm32_dma_alloc_desc(unsigned int num_sgs)
+{
+	return kzalloc(sizeof(struct stm32_dma_desc) +
+		       sizeof(struct stm32_dma_sg_req) * num_sgs, GFP_ATOMIC);
+}
+
+static enum stm32_dma_width stm32_get_dma_width(struct stm32_dma_chan *chan,
+						enum dma_slave_buswidth width)
+{
+	switch (width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		return STM32_DMA_BYTE;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		return STM32_DMA_HALF_WORD;
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+		return STM32_DMA_WORD;
+	default:
+		dev_warn(chan2dev(chan),
+			 "Dma bus width not supported, using 32bits\n");
+		return STM32_DMA_WORD;
+	}
+}
+
+static enum stm32_dma_burst_size stm32_get_dma_burst(
+		struct stm32_dma_chan *chan, u32 maxburst)
+{
+	switch (maxburst) {
+	case 0:
+	case 1:
+		return STM32_DMA_BURST_SINGLE;
+	case 4:
+		return STM32_DMA_BURST_INCR4;
+	case 8:
+		return STM32_DMA_BURST_INCR8;
+	case 16:
+		return STM32_DMA_BURST_INCR16;
+	default:
+		dev_warn(chan2dev(chan),
+			 "Dma burst size not supported, using single\n");
+		return STM32_DMA_BURST_SINGLE;
+	}
+}
+
+static int stm32_dma_slave_config(struct dma_chan *c,
+				  struct dma_slave_config *config)
+{
+	struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
+
+	if (chan->busy) {
+		dev_err(chan2dev(chan), "Configuration not allowed\n");
+		return -EBUSY;
+	}
+
+	memcpy(&chan->dma_sconfig, config, sizeof(*config));
+
+	chan->config_init = true;
+
+	return 0;
+}
+
+static u32 stm32_dma_irq_status(struct stm32_dma_chan *chan)
+{
+	struct stm32_dma_device *dmadev = stm32_dma_chan_get_dev(chan);
+	u32 flags, dma_isr;
+
+	/*
+	 * Read "flags" from DMA_xISR register corresponding to the selected
+	 * DMA channel at the correct bit offset inside that register.
+	 *
+	 * If (ch % 4) is 2 or 3, left shift the mask by 16 bits.
+	 * If (ch % 4) is 1 or 3, additionally left shift the mask by 6 bits.
+	 */
+
+	if (chan->id & 4)
+		dma_isr = stm32_dma_read(dmadev, STM32_DMA_HISR);
+	else
+		dma_isr = stm32_dma_read(dmadev, STM32_DMA_LISR);
+
+	flags = dma_isr >> (((chan->id & 2) << 3) | ((chan->id & 1) * 6));
+
+	return flags;
+}
+
+static void stm32_dma_irq_clear(struct stm32_dma_chan *chan, u32 flags)
+{
+	struct stm32_dma_device *dmadev = stm32_dma_chan_get_dev(chan);
+	u32 dma_ifcr;
+
+	/*
+	 * Write "flags" to the DMA_xIFCR register corresponding to the selected
+	 * DMA channel at the correct bit offset inside that register.
+	 *
+	 * If (ch % 4) is 2 or 3, left shift the mask by 16 bits.
+	 * If (ch % 4) is 1 or 3, additionally left shift the mask by 6 bits.
+	 */
+	dma_ifcr = flags << (((chan->id & 2) << 3) | ((chan->id & 1) * 6));
+
+	if (chan->id & 4)
+		stm32_dma_write(dmadev, STM32_DMA_HIFCR, dma_ifcr);
+	else
+		stm32_dma_write(dmadev, STM32_DMA_LIFCR, dma_ifcr);
+}
+
+static int stm32_dma_disable_chan(struct stm32_dma_chan *chan)
+{
+	struct stm32_dma_device *dmadev = stm32_dma_chan_get_dev(chan);
+	unsigned long timeout = jiffies + msecs_to_jiffies(5000);
+	u32 dma_scr;
+
+	dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
+	if (dma_scr & STM32_DMA_SCR_EN) {
+		dma_scr &= ~STM32_DMA_SCR_EN;
+		stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr);
+
+		do {
+			dma_scr = stm32_dma_read(dmadev,
+						 STM32_DMA_SCR(chan->id));
+			dma_scr &= STM32_DMA_SCR_EN;
+			if (!dma_scr)
+				break;
+			if (time_after_eq(jiffies, timeout)) {
+				dev_err(chan2dev(chan), "%s: timeout!\n",
+					__func__);
+				chan->status = DMA_ERROR;
+				return -EBUSY;
+			}
+			cond_resched();
+		} while (1);
+	}
+
+	return 0;
+}
+
+static void stm32_dma_stop(struct stm32_dma_chan *chan)
+{
+	struct stm32_dma_device *dmadev = stm32_dma_chan_get_dev(chan);
+	u32 dma_scr, dma_sfcr, status;
+	int ret;
+
+	/* Disable interrupts */
+	dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
+	dma_scr &= ~(STM32_DMA_SCR_DMEIE | STM32_DMA_SCR_TEIE |
+			STM32_DMA_SCR_HTIE | STM32_DMA_SCR_TCIE);
+	stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr);
+	dma_sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id));
+	dma_sfcr &= ~STM32_DMA_SFCR_FEIE;
+	stm32_dma_write(dmadev, STM32_DMA_SFCR(chan->id), dma_sfcr);
+
+	/* Disable DMA */
+	ret = stm32_dma_disable_chan(chan);
+	if (ret < 0)
+		return;
+
+	/* Clear interrupt status if it is there */
+	status = stm32_dma_irq_status(chan);
+	if (status) {
+		dev_dbg(chan2dev(chan), "%s(): clearing interrupt: 0x%08x\n",
+			__func__, status);
+		stm32_dma_irq_clear(chan, status);
+	}
+
+	chan->busy = false;
+}
+
+static int stm32_dma_terminate_all(struct dma_chan *c)
+{
+	struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
+	unsigned long flags;
+	LIST_HEAD(head);
+
+	spin_lock_irqsave(&chan->vchan.lock, flags);
+
+	if (chan->busy) {
+		stm32_dma_stop(chan);
+		chan->desc = NULL;
+	}
+
+	vchan_get_all_descriptors(&chan->vchan, &head);
+	spin_unlock_irqrestore(&chan->vchan.lock, flags);
+	vchan_dma_desc_free_list(&chan->vchan, &head);
+
+	return 0;
+}
+
+static void stm32_dma_dump_reg(struct stm32_dma_chan *chan)
+{
+	struct stm32_dma_device *dmadev = stm32_dma_chan_get_dev(chan);
+
+	dev_dbg(chan2dev(chan), "SCR:   0x%08x\n",
+		stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)));
+	dev_dbg(chan2dev(chan), "NDTR:  0x%08x\n",
+		stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id)));
+	dev_dbg(chan2dev(chan), "SPAR:  0x%08x\n",
+		stm32_dma_read(dmadev, STM32_DMA_SPAR(chan->id)));
+	dev_dbg(chan2dev(chan), "SM0AR: 0x%08x\n",
+		stm32_dma_read(dmadev, STM32_DMA_SM0AR(chan->id)));
+	dev_dbg(chan2dev(chan), "SM1AR: 0x%08x\n",
+		stm32_dma_read(dmadev, STM32_DMA_SM1AR(chan->id)));
+	dev_dbg(chan2dev(chan), "SFCR:  0x%08x\n",
+		stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id)));
+}
+
+static int stm32_dma_start_transfer(struct stm32_dma_chan *chan)
+{
+	struct stm32_dma_device *dmadev = stm32_dma_chan_get_dev(chan);
+	struct virt_dma_desc *vdesc;
+	struct stm32_dma_sg_req *sg_req;
+	struct stm32_dma_chan_reg *reg;
+	unsigned int status;
+	int ret;
+
+	ret = stm32_dma_disable_chan(chan);
+	if (ret < 0)
+		return ret;
+
+	if (!chan->desc) {
+		vdesc = vchan_next_desc(&chan->vchan);
+		if (!vdesc)
+			return 0;
+		chan->desc = to_stm32_dma_desc(vdesc);
+		chan->next_sg = 0;
+	}
+
+	if (chan->next_sg == chan->desc->num_sgs)
+		chan->next_sg = 0;
+
+	sg_req = &chan->desc->sg_req[chan->next_sg];
+	reg = &sg_req->chan_reg;
+
+	stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr);
+	stm32_dma_write(dmadev, STM32_DMA_SPAR(chan->id), reg->dma_spar);
+	stm32_dma_write(dmadev, STM32_DMA_SM0AR(chan->id), reg->dma_sm0ar);
+	stm32_dma_write(dmadev, STM32_DMA_SFCR(chan->id), reg->dma_sfcr);
+	stm32_dma_write(dmadev, STM32_DMA_SM1AR(chan->id), reg->dma_sm1ar);
+	stm32_dma_write(dmadev, STM32_DMA_SNDTR(chan->id), reg->dma_sndtr);
+
+	chan->next_sg++;
+
+	/* Clear interrupt status if it is there */
+	status = stm32_dma_irq_status(chan);
+	if (status)
+		stm32_dma_irq_clear(chan, status);
+
+	stm32_dma_dump_reg(chan);
+
+	/* Start DMA */
+	stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id),
+			sg_req->chan_reg.dma_scr | STM32_DMA_SCR_EN);
+
+	chan->busy = true;
+	chan->status = DMA_IN_PROGRESS;
+
+	return 0;
+}
+
+static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan)
+{
+	struct stm32_dma_device *dmadev = stm32_dma_chan_get_dev(chan);
+	struct stm32_dma_sg_req *sg_req;
+	unsigned int dma_scr;
+
+	dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
+
+	if (dma_scr & STM32_DMA_SCR_DBM) {
+		if (chan->next_sg == chan->desc->num_sgs)
+			chan->next_sg = 0;
+
+		sg_req = &chan->desc->sg_req[chan->next_sg];
+
+		if (dma_scr & STM32_DMA_SCR_CT) {
+			stm32_dma_write(dmadev, STM32_DMA_SM0AR(chan->id),
+					sg_req->chan_reg.dma_sm0ar);
+			dev_dbg(chan2dev(chan), "CT=1 <=> SM0AR: 0x%08x\n",
+				stm32_dma_read(dmadev,
+					       STM32_DMA_SM0AR(chan->id)));
+		} else {
+			stm32_dma_write(dmadev, STM32_DMA_SM1AR(chan->id),
+					sg_req->chan_reg.dma_sm1ar);
+			dev_dbg(chan2dev(chan), "CT=0 <=> SM1AR: 0x%08x\n",
+				stm32_dma_read(dmadev,
+					       STM32_DMA_SM1AR(chan->id)));
+		}
+
+		chan->next_sg++;
+	}
+}
+
+static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan)
+{
+	if (chan->desc) {
+		if (chan->desc->cyclic) {
+			vchan_cyclic_callback(&chan->desc->vdesc);
+			stm32_dma_configure_next_sg(chan);
+		} else {
+			chan->busy = false;
+			if (chan->next_sg == chan->desc->num_sgs) {
+				list_del(&chan->desc->vdesc.node);
+				vchan_cookie_complete(&chan->desc->vdesc);
+				chan->desc = NULL;
+				chan->status = DMA_COMPLETE;
+			}
+			stm32_dma_start_transfer(chan);
+		}
+	}
+}
+
+static irqreturn_t stm32_dma_chan_irq(int irq, void *devid)
+{
+	struct stm32_dma_chan *chan = devid;
+	struct stm32_dma_device *dmadev = stm32_dma_chan_get_dev(chan);
+	u32 status, scr, sfcr;
+
+	spin_lock(&chan->vchan.lock);
+
+	status = stm32_dma_irq_status(chan);
+	scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
+	sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id));
+
+	if ((status & STM32_DMA_HTI) && (scr & STM32_DMA_SCR_HTIE)) {
+		stm32_dma_irq_clear(chan, STM32_DMA_HTI);
+		vchan_cyclic_callback(&chan->desc->vdesc);
+		spin_unlock(&chan->vchan.lock);
+		return IRQ_HANDLED;
+	} else if ((status & STM32_DMA_TCI) && (scr & STM32_DMA_SCR_TCIE)) {
+		stm32_dma_irq_clear(chan, STM32_DMA_TCI);
+		stm32_dma_handle_chan_done(chan);
+		spin_unlock(&chan->vchan.lock);
+		return IRQ_HANDLED;
+	} else if ((status & STM32_DMA_TEI) && (scr & STM32_DMA_SCR_TEIE)) {
+		dev_err(chan2dev(chan), "DMA error: received TEI event\n");
+		stm32_dma_irq_clear(chan, STM32_DMA_TEI);
+		chan->status = DMA_ERROR;
+		spin_unlock(&chan->vchan.lock);
+		return IRQ_HANDLED;
+	} else if ((status & STM32_DMA_FEI) && (sfcr & STM32_DMA_SFCR_FEIE)) {
+		dev_err(chan2dev(chan), "DMA error: received FEI event\n");
+		stm32_dma_irq_clear(chan, STM32_DMA_FEI);
+		chan->status = DMA_ERROR;
+		spin_unlock(&chan->vchan.lock);
+		return IRQ_HANDLED;
+	} else if ((status & STM32_DMA_DMEI) && (scr & STM32_DMA_SCR_DMEIE)) {
+		dev_err(chan2dev(chan), "DMA error: received DMEI event\n");
+		stm32_dma_irq_clear(chan, STM32_DMA_DMEI);
+		chan->status = DMA_ERROR;
+		spin_unlock(&chan->vchan.lock);
+		return IRQ_HANDLED;
+	}
+
+	spin_unlock(&chan->vchan.lock);
+
+	dev_err(chan2dev(chan),
+		"Interrupt already served status 0x%08x\n", status);
+
+	return IRQ_NONE;
+}
+
+static void stm32_dma_issue_pending(struct dma_chan *c)
+{
+	struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&chan->vchan.lock, flags);
+	if (!chan->busy) {
+		if (vchan_issue_pending(&chan->vchan) && !chan->desc) {
+			ret = stm32_dma_start_transfer(chan);
+			if ((chan->desc->cyclic) && (!ret))
+				stm32_dma_configure_next_sg(chan);
+		}
+	}
+	spin_unlock_irqrestore(&chan->vchan.lock, flags);
+}
+
+static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
+				    enum dma_transfer_direction direction,
+				    enum dma_slave_buswidth *buswidth)
+{
+	enum dma_slave_buswidth src_addr_width, dst_addr_width;
+	enum stm32_dma_width src_bus_width, dst_bus_width;
+	enum stm32_dma_burst_size src_burst_size, dst_burst_size;
+	u32 src_maxburst, dst_maxburst;
+	dma_addr_t src_addr, dst_addr;
+
+	src_addr_width = chan->dma_sconfig.src_addr_width;
+	dst_addr_width = chan->dma_sconfig.dst_addr_width;
+	src_maxburst = chan->dma_sconfig.src_maxburst;
+	dst_maxburst = chan->dma_sconfig.dst_maxburst;
+	src_addr = chan->dma_sconfig.src_addr;
+	dst_addr = chan->dma_sconfig.dst_addr;
+
+	switch (direction) {
+	case DMA_MEM_TO_DEV:
+		dst_bus_width = stm32_get_dma_width(chan, dst_addr_width);
+		dst_burst_size = stm32_get_dma_burst(chan, dst_maxburst);
+		if (!src_addr_width)
+			src_addr_width = dst_addr_width;
+		src_bus_width = stm32_get_dma_width(chan, src_addr_width);
+		src_burst_size = stm32_get_dma_burst(chan, src_maxburst);
+
+		chan->chan_reg.dma_scr |= (chan->chan_reg.dma_scr &
+			~(STM32_DMA_SCR_DIR_MASK | STM32_DMA_SCR_PSIZE_MASK |
+			STM32_DMA_SCR_MSIZE_MASK | STM32_DMA_SCR_PBURST_MASK |
+			STM32_DMA_SCR_MBURST_MASK)) |
+			STM32_DMA_SCR_DIR(STM32_DMA_MEM_TO_DEV) |
+			STM32_DMA_SCR_PSIZE(dst_bus_width) |
+			STM32_DMA_SCR_MSIZE(src_bus_width) |
+			STM32_DMA_SCR_PBURST(dst_burst_size) |
+			STM32_DMA_SCR_MBURST(src_burst_size);
+
+		chan->chan_reg.dma_spar = chan->dma_sconfig.dst_addr;
+		*buswidth = dst_addr_width;
+		return 0;
+
+	case DMA_DEV_TO_MEM:
+		src_bus_width = stm32_get_dma_width(chan, src_addr_width);
+		src_burst_size = stm32_get_dma_burst(chan, src_maxburst);
+		if (!dst_addr_width)
+			dst_addr_width = src_addr_width;
+		dst_bus_width = stm32_get_dma_width(chan, dst_addr_width);
+		dst_burst_size = stm32_get_dma_burst(chan, dst_maxburst);
+
+		chan->chan_reg.dma_scr |= (chan->chan_reg.dma_scr &
+			~(STM32_DMA_SCR_DIR_MASK | STM32_DMA_SCR_PSIZE_MASK |
+			STM32_DMA_SCR_MSIZE_MASK | STM32_DMA_SCR_PBURST_MASK |
+			STM32_DMA_SCR_MBURST_MASK)) |
+			STM32_DMA_SCR_DIR(STM32_DMA_DEV_TO_MEM) |
+			STM32_DMA_SCR_PSIZE(src_bus_width) |
+			STM32_DMA_SCR_MSIZE(dst_bus_width) |
+			STM32_DMA_SCR_PBURST(src_burst_size) |
+			STM32_DMA_SCR_MBURST(dst_burst_size);
+		chan->chan_reg.dma_spar = chan->dma_sconfig.src_addr;
+		*buswidth = chan->dma_sconfig.src_addr_width;
+		return 0;
+
+	default:
+		dev_err(chan2dev(chan), "Dma direction is not supported\n");
+		return -EINVAL;
+	}
+}
+
+static void stm32_dma_clear_reg(struct stm32_dma_chan_reg *regs)
+{
+	memset(regs, 0, sizeof(struct stm32_dma_chan_reg));
+}
+
+static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
+	struct dma_chan *c, struct scatterlist *sgl,
+	unsigned int sg_len, enum dma_transfer_direction direction,
+	unsigned long flags, void *context)
+{
+	struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
+	struct stm32_dma_desc *desc;
+	struct scatterlist *sg;
+	enum dma_slave_buswidth buswidth;
+	unsigned int i, nb_data_items;
+	int ret;
+
+	if (!chan->config_init) {
+		dev_err(chan2dev(chan), "dma channel is not configured\n");
+		return NULL;
+	}
+
+	if (sg_len < 1) {
+		dev_err(chan2dev(chan), "Invalid segment length %d\n", sg_len);
+		return NULL;
+	}
+
+	desc = stm32_dma_alloc_desc(sg_len);
+	if (!desc)
+		return NULL;
+
+	ret = stm32_dma_set_xfer_param(chan, direction, &buswidth);
+	if (ret < 0)
+		goto err;
+
+	/* Set peripheral flow controller */
+	if (chan->dma_sconfig.device_fc)
+		chan->chan_reg.dma_scr |= STM32_DMA_SCR_PFCTRL;
+	else
+		chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL;
+
+	for_each_sg(sgl, sg, sg_len, i) {
+		desc->sg_req[i].len = sg_dma_len(sg);
+
+		nb_data_items = desc->sg_req[i].len / buswidth;
+		if (nb_data_items > STM32_DMA_MAX_DATA_ITEMS) {
+			dev_err(chan2dev(chan),
+				"number of items not supported\n");
+			goto err;
+		}
+
+		stm32_dma_clear_reg(&desc->sg_req[i].chan_reg);
+		desc->sg_req[i].chan_reg.dma_scr = chan->chan_reg.dma_scr;
+		desc->sg_req[i].chan_reg.dma_sfcr = chan->chan_reg.dma_sfcr;
+		desc->sg_req[i].chan_reg.dma_spar = chan->chan_reg.dma_spar;
+		desc->sg_req[i].chan_reg.dma_sm0ar = sg_dma_address(sg);
+		desc->sg_req[i].chan_reg.dma_sm1ar = sg_dma_address(sg);
+		desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items;
+	}
+
+	desc->num_sgs = sg_len;
+	desc->cyclic = false;
+
+	return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+
+err:
+	kfree(desc);
+	return NULL;
+}
+
+static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic(
+	struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
+	size_t period_len, enum dma_transfer_direction direction,
+	unsigned long flags)
+{
+	struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
+	struct stm32_dma_desc *desc;
+	enum dma_slave_buswidth buswidth;
+	unsigned int num_periods, nb_data_items, i;
+	int ret;
+
+	if (!buf_len || !period_len) {
+		dev_err(chan2dev(chan), "Invalid buffer/period len\n");
+		return NULL;
+	}
+
+	if (!chan->config_init) {
+		dev_err(chan2dev(chan), "dma channel is not configured\n");
+		return NULL;
+	}
+
+	if (buf_len % period_len) {
+		dev_err(chan2dev(chan), "buf_len not multiple of period_len\n");
+		return NULL;
+	}
+
+	/*
+	 * We allow to take more number of requests till DMA is
+	 * not started. The driver will loop over all requests.
+	 * Once DMA is started then new requests can be queued only after
+	 * terminating the DMA.
+	 */
+	if (chan->busy) {
+		dev_err(chan2dev(chan),
+			"Request not allowed when dma running\n");
+		return NULL;
+	}
+
+	ret = stm32_dma_set_xfer_param(chan, direction, &buswidth);
+	if (ret < 0)
+		return NULL;
+
+	nb_data_items = period_len / buswidth;
+	if (nb_data_items > STM32_DMA_MAX_DATA_ITEMS) {
+		dev_err(chan2dev(chan), "number of items not supported\n");
+		return NULL;
+	}
+
+	/*  Enable Circular mode or double buffer mode */
+	if (buf_len == period_len)
+		chan->chan_reg.dma_scr |= STM32_DMA_SCR_CIRC;
+	else
+		chan->chan_reg.dma_scr |= STM32_DMA_SCR_DBM;
+
+	/* Clear periph ctrl if client set it */
+	chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL;
+
+	num_periods = buf_len / period_len;
+
+	desc = stm32_dma_alloc_desc(num_periods);
+	if (!desc)
+		return NULL;
+
+	for (i = 0; i < num_periods; i++) {
+		desc->sg_req[i].len = period_len;
+
+		stm32_dma_clear_reg(&desc->sg_req[i].chan_reg);
+		desc->sg_req[i].chan_reg.dma_scr = chan->chan_reg.dma_scr;
+		desc->sg_req[i].chan_reg.dma_sfcr = chan->chan_reg.dma_sfcr;
+		desc->sg_req[i].chan_reg.dma_spar = chan->chan_reg.dma_spar;
+		desc->sg_req[i].chan_reg.dma_sm0ar = buf_addr;
+		desc->sg_req[i].chan_reg.dma_sm1ar = buf_addr;
+		desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items;
+		buf_addr += period_len;
+	}
+
+	desc->num_sgs = num_periods;
+	desc->cyclic = true;
+
+	return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+}
+
+static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
+	struct dma_chan *c, dma_addr_t dest,
+	dma_addr_t src, size_t len, unsigned long flags)
+{
+	struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
+	unsigned int num_sgs;
+	struct stm32_dma_desc *desc;
+	size_t xfer_count, offset;
+	int i;
+
+	num_sgs = DIV_ROUND_UP(len, STM32_DMA_MAX_DATA_ITEMS);
+	desc = stm32_dma_alloc_desc(num_sgs);
+	if (!desc)
+		return NULL;
+
+	for (offset = 0, i = 0; offset < len; offset += xfer_count, i++) {
+		xfer_count = min_t(size_t, len - offset,
+				   STM32_DMA_MAX_DATA_ITEMS);
+
+		desc->sg_req[i].len = xfer_count;
+
+		stm32_dma_clear_reg(&desc->sg_req[i].chan_reg);
+		desc->sg_req[i].chan_reg.dma_scr =
+			STM32_DMA_SCR_DIR(STM32_DMA_MEM_TO_MEM) |
+			STM32_DMA_SCR_MINC |
+			STM32_DMA_SCR_PINC |
+			STM32_DMA_SCR_TCIE |
+			STM32_DMA_SCR_TEIE;
+		desc->sg_req[i].chan_reg.dma_sfcr = STM32_DMA_SFCR_DMDIS |
+			STM32_DMA_SFCR_FTH(STM32_DMA_FIFO_THRESHOLD_FULL) |
+			STM32_DMA_SFCR_FEIE;
+		desc->sg_req[i].chan_reg.dma_spar = src + offset;
+		desc->sg_req[i].chan_reg.dma_sm0ar = dest + offset;
+		desc->sg_req[i].chan_reg.dma_sndtr = xfer_count;
+	}
+
+	desc->num_sgs = num_sgs;
+	desc->cyclic = false;
+
+	return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+}
+
+static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
+				     struct stm32_dma_desc *desc,
+				     unsigned int next_sg)
+{
+	struct stm32_dma_device *dmadev = stm32_dma_chan_get_dev(chan);
+	unsigned int dma_scr, width, residue, count;
+	unsigned int i;
+
+	residue = 0;
+
+	for (i = next_sg; i < desc->num_sgs; i++)
+		residue += desc->sg_req[i].len;
+
+	if (next_sg != 0) {
+		dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
+		width = STM32_DMA_SCR_PSIZE_GET(dma_scr);
+		count = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id));
+
+		residue += count << width;
+	}
+
+	return residue;
+}
+
+static enum dma_status stm32_dma_tx_status(struct dma_chan *c,
+					   dma_cookie_t cookie,
+					   struct dma_tx_state *state)
+{
+	struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
+	struct virt_dma_desc *vdesc;
+	enum dma_status status;
+	unsigned long flags;
+	unsigned int residue;
+
+	status = dma_cookie_status(c, cookie, state);
+	if (status == DMA_COMPLETE)
+		return status;
+
+	if (!state)
+		return chan->status;
+
+	spin_lock_irqsave(&chan->vchan.lock, flags);
+	vdesc = vchan_find_desc(&chan->vchan, cookie);
+	if (cookie == chan->desc->vdesc.tx.cookie) {
+		residue = stm32_dma_desc_residue(chan, chan->desc,
+						 chan->next_sg);
+	} else if (vdesc) {
+		residue = stm32_dma_desc_residue(chan,
+						 to_stm32_dma_desc(vdesc), 0);
+	} else {
+		residue = 0;
+	}
+
+	dma_set_residue(state, residue);
+
+	spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+	return chan->status;
+}
+
+static int stm32_dma_alloc_chan_resources(struct dma_chan *c)
+{
+	struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
+	struct stm32_dma_device *dmadev = stm32_dma_chan_get_dev(chan);
+	int ret;
+
+	chan->config_init = false;
+	ret = clk_prepare_enable(dmadev->clk);
+	if (ret < 0) {
+		dev_err(chan2dev(chan), "clk_prepare_enable failed: %d\n", ret);
+		return ret;
+	}
+
+	ret = stm32_dma_disable_chan(chan);
+
+	return ret;
+}
+
+static void stm32_dma_free_chan_resources(struct dma_chan *c)
+{
+	struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
+	struct stm32_dma_device *dmadev = stm32_dma_chan_get_dev(chan);
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "Freeing channel %d\n", chan->id);
+
+	if (chan->busy) {
+		spin_lock_irqsave(&chan->vchan.lock, flags);
+		stm32_dma_stop(chan);
+		chan->desc = NULL;
+		spin_unlock_irqrestore(&chan->vchan.lock, flags);
+	}
+
+	clk_disable_unprepare(dmadev->clk);
+
+	vchan_free_chan_resources(to_virt_chan(c));
+}
+
+static void stm32_dma_desc_free(struct virt_dma_desc *vdesc)
+{
+	kfree(container_of(vdesc, struct stm32_dma_desc, vdesc));
+}
+
+void stm32_dma_set_config(struct stm32_dma_chan *chan,
+			  struct stm32_dma_cfg *cfg)
+{
+	stm32_dma_clear_reg(&chan->chan_reg);
+	chan->chan_reg.dma_scr = cfg->stream_config &
+		STM32_DMA_STREAM_CFG_MASK;
+	chan->chan_reg.dma_scr |= STM32_DMA_SCR_REQ(cfg->request_line);
+	chan->chan_reg.dma_sfcr = cfg->fifo_config &
+		STM32_DMA_FIFO_CFG_MASK;
+}
+
+static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec,
+					   struct of_dma *ofdma)
+{
+	struct stm32_dma_device *dmadev = ofdma->of_dma_data;
+	struct stm32_dma_cfg cfg;
+	struct stm32_dma_chan *chan;
+	struct dma_chan *c;
+
+	if (dma_spec->args_count != STM32_DMA_MAX_DATA_PARAM)
+		return NULL;
+
+	cfg.channel_id = dma_spec->args[0];
+	cfg.request_line = dma_spec->args[1];
+	cfg.stream_config = dma_spec->args[2];
+	cfg.fifo_config = dma_spec->args[3];
+
+	if ((cfg.channel_id >= STM32_DMA_MAX_CHANNELS) || (cfg.request_line >=
+				STM32_DMA_MAX_REQUEST_ID))
+		return NULL;
+
+	chan = &dmadev->chan[cfg.channel_id];
+
+	c = dma_get_slave_channel(&chan->vchan.chan);
+	if (c)
+		stm32_dma_set_config(chan, &cfg);
+
+	return c;
+}
+
+static const struct of_device_id stm32_dma_of_match[] = {
+	{ .compatible = "st,stm32-dma", },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, stm32_dma_of_match);
+
+static int stm32_dma_probe(struct platform_device *pdev)
+{
+	struct stm32_dma_chan *chan;
+	struct stm32_dma_device *dmadev;
+	struct dma_device *dd;
+	const struct of_device_id *match;
+	unsigned int i;
+	struct resource *res;
+	int ret;
+
+	match = of_match_device(stm32_dma_of_match, &pdev->dev);
+	if (!match) {
+		dev_err(&pdev->dev, "Error: No device match found\n");
+		return -ENODEV;
+	}
+
+	dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
+	if (!dmadev)
+		return -ENOMEM;
+
+	dd = &dmadev->ddev;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dmadev->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(dmadev->base))
+		return PTR_ERR(dmadev->base);
+
+	dmadev->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(dmadev->clk)) {
+		dev_err(&pdev->dev, "Error: Missing controller clock\n");
+		return PTR_ERR(dmadev->clk);
+	}
+
+	dmadev->mem2mem = of_property_read_bool(pdev->dev.of_node,
+						"st,mem2mem");
+
+	dmadev->rst = devm_reset_control_get(&pdev->dev, NULL);
+	if (!IS_ERR(dmadev->rst)) {
+		reset_control_assert(dmadev->rst);
+		udelay(2);
+		reset_control_deassert(dmadev->rst);
+	}
+
+	dma_cap_set(DMA_SLAVE, dd->cap_mask);
+	dma_cap_set(DMA_PRIVATE, dd->cap_mask);
+	dma_cap_set(DMA_CYCLIC, dd->cap_mask);
+	dd->device_alloc_chan_resources = stm32_dma_alloc_chan_resources;
+	dd->device_free_chan_resources = stm32_dma_free_chan_resources;
+	dd->device_tx_status = stm32_dma_tx_status;
+	dd->device_issue_pending = stm32_dma_issue_pending;
+	dd->device_prep_slave_sg = stm32_dma_prep_slave_sg;
+	dd->device_prep_dma_cyclic = stm32_dma_prep_dma_cyclic;
+	dd->device_config = stm32_dma_slave_config;
+	dd->device_terminate_all = stm32_dma_terminate_all;
+	dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+		BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+		BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+	dd->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+		BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+		BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+	dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+	dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+	dd->dev = &pdev->dev;
+	INIT_LIST_HEAD(&dd->channels);
+
+	if (dmadev->mem2mem) {
+		dma_cap_set(DMA_MEMCPY, dd->cap_mask);
+		dd->device_prep_dma_memcpy = stm32_dma_prep_dma_memcpy;
+		dd->directions |= BIT(DMA_MEM_TO_MEM);
+	}
+
+	for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) {
+		chan = &dmadev->chan[i];
+		chan->id = i;
+		chan->vchan.desc_free = stm32_dma_desc_free;
+		vchan_init(&chan->vchan, dd);
+	}
+
+	ret = dma_async_device_register(dd);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) {
+		chan = &dmadev->chan[i];
+		res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
+		if (!res) {
+			ret = -EINVAL;
+			dev_err(&pdev->dev, "No irq resource for chan %d\n", i);
+			goto err_unregister;
+		}
+		chan->irq = res->start;
+		ret = devm_request_irq(&pdev->dev, chan->irq,
+				       stm32_dma_chan_irq, 0,
+				       dev_name(chan2dev(chan)), chan);
+		if (ret) {
+			dev_err(&pdev->dev,
+				"request_irq failed with err %d channel %d\n",
+				ret, i);
+			goto err_unregister;
+		}
+	}
+
+	ret = of_dma_controller_register(pdev->dev.of_node,
+					 stm32_dma_of_xlate, dmadev);
+	if (ret < 0) {
+		dev_err(&pdev->dev,
+			"STM32 DMA DMA OF registration failed %d\n", ret);
+		goto err_unregister;
+	}
+
+	platform_set_drvdata(pdev, dmadev);
+
+	dev_info(&pdev->dev, "STM32 DMA driver registered\n");
+
+	return 0;
+
+err_unregister:
+	dma_async_device_unregister(dd);
+
+	return ret;
+}
+
+static int stm32_dma_remove(struct platform_device *pdev)
+{
+	struct stm32_dma_device *dmadev = platform_get_drvdata(pdev);
+
+	of_dma_controller_free(pdev->dev.of_node);
+
+	dma_async_device_unregister(&dmadev->ddev);
+
+	clk_disable_unprepare(dmadev->clk);
+
+	return 0;
+}
+
+static struct platform_driver stm32_dma_driver = {
+	.probe = stm32_dma_probe,
+	.remove = stm32_dma_remove,
+	.driver = {
+		.name = "stm32-dma",
+		.of_match_table = stm32_dma_of_match,
+	},
+};
+module_platform_driver(stm32_dma_driver);
+
+MODULE_ALIAS("platform:" DRIVER_NAME);
+MODULE_AUTHOR("M'boumba Cedric Madianga <cedric.madianga@gmail.com>");
+MODULE_DESCRIPTION("STM32 DMA driver");
+MODULE_LICENSE("GPL v2");