diff mbox

[03/18] dmaengine: st_fdma: Add STMicroelectronics FDMA engine driver support

Message ID 1461236675-10176-4-git-send-email-peter.griffin@linaro.org (mailing list archive)
State New, archived
Headers show

Commit Message

Peter Griffin April 21, 2016, 11:04 a.m. UTC
This patch adds support for the Flexible Direct Memory Access (FDMA) core
driver. The FDMA is a slim core CPU with a dedicated firmware.
It is a general purpose DMA controller capable of supporting 16
independent DMA channels. Data moves maybe from memory to memory
or between memory and paced latency critical real time targets and it
is found on al STi based chipsets.

Signed-off-by: Ludovic Barre <ludovic.barre@st.com>
Signed-off-by: Peter Griffin <peter.griffin@linaro.org>
---
 drivers/dma/Kconfig   |  12 +
 drivers/dma/Makefile  |   1 +
 drivers/dma/st_fdma.c | 967 ++++++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 980 insertions(+)
 create mode 100644 drivers/dma/st_fdma.c

Comments

Arnd Bergmann April 21, 2016, 11:24 a.m. UTC | #1
On Thursday 21 April 2016 12:04:20 Peter Griffin wrote:
> This patch adds support for the Flexible Direct Memory Access (FDMA) core
> driver. The FDMA is a slim core CPU with a dedicated firmware.
> It is a general purpose DMA controller capable of supporting 16
> independent DMA channels. Data moves maybe from memory to memory
> or between memory and paced latency critical real time targets and it
> is found on al STi based chipsets.
> 
> Signed-off-by: Ludovic Barre <ludovic.barre@st.com>
> Signed-off-by: Peter Griffin <peter.griffin@linaro.org>
> ---
>  drivers/dma/Kconfig   |  12 +
>  drivers/dma/Makefile  |   1 +
>  drivers/dma/st_fdma.c | 967 ++++++++++++++++++++++++++++++++++++++++++++++++++
>  3 files changed, 980 insertions(+)
>  create mode 100644 drivers/dma/st_fdma.c
> 
> diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
> index d96d87c..5910c4f 100644
> --- a/drivers/dma/Kconfig
> +++ b/drivers/dma/Kconfig
> @@ -527,6 +527,18 @@ config ZX_DMA
>  	help
>  	  Support the DMA engine for ZTE ZX296702 platform devices.
>  
> +config ST_FDMA
> +	tristate "ST FDMA dmaengine support"
> +	depends on ARCH_STI

Try to ensure that the driver builds on x86 (possibly adding
further dependencies if needed), then make this

	depends on ARCH_STI || COMPILE_TEST

to get better build coverage.

> +static struct dma_chan *st_fdma_of_xlate(struct of_phandle_args *dma_spec,
> +					 struct of_dma *ofdma)
> +{
> +	struct st_fdma_dev *fdev = ofdma->of_dma_data;
> +	struct st_fdma_cfg cfg;
> +
> +	if (dma_spec->args_count < 1)
> +		return NULL;
> +
> +	cfg.of_node = dma_spec->np;
> +	cfg.req_line = dma_spec->args[0];
> +	cfg.req_ctrl = 0;
> +	cfg.type = ST_FDMA_TYPE_FREE_RUN;
> +
> +	if (dma_spec->args_count > 1)
> +		cfg.req_ctrl = dma_spec->args[1] & REQ_CTRL_CFG_MASK;
> +
> +	if (dma_spec->args_count > 2)
> +		cfg.type = dma_spec->args[2];
> +
> +	dev_dbg(fdev->dev, "xlate req_line:%d type:%d req_ctrl:%#lx\n",
> +		cfg.req_line, cfg.type, cfg.req_ctrl);
> +
> +	return dma_request_channel(fdev->dma_device.cap_mask,
> +			st_fdma_filter_fn, &cfg);
> +}

No need to look at all DMA channels in the system here with
dma_request_channel(). Just call dma_get_any_slave_channel()
to get the first available channel for this engine, then
set the configuration right in that channel data while parsing
the DT properties.

	Arnd
Appana Durga Kedareswara rao April 21, 2016, 11:26 a.m. UTC | #2
> -----Original Message-----
> From: dmaengine-owner@vger.kernel.org [mailto:dmaengine-
> owner@vger.kernel.org] On Behalf Of Peter Griffin
> Sent: Thursday, April 21, 2016 4:34 PM
> To: linux-arm-kernel@lists.infradead.org; linux-kernel@vger.kernel.org;
> srinivas.kandagatla@gmail.com; maxime.coquelin@st.com;
> patrice.chotard@st.com; vinod.koul@intel.com
> Cc: peter.griffin@linaro.org; lee.jones@linaro.org;
> dmaengine@vger.kernel.org; devicetree@vger.kernel.org; arnd@arndb.de;
> broonie@kernel.org; ludovic.barre@st.com
> Subject: [PATCH 03/18] dmaengine: st_fdma: Add STMicroelectronics FDMA
> engine driver support
> 
> This patch adds support for the Flexible Direct Memory Access (FDMA) core
> driver. The FDMA is a slim core CPU with a dedicated firmware.
> It is a general purpose DMA controller capable of supporting 16
> independent DMA channels. Data moves maybe from memory to memory
> or between memory and paced latency critical real time targets and it
> is found on al STi based chipsets.
> 
> Signed-off-by: Ludovic Barre <ludovic.barre@st.com>
> Signed-off-by: Peter Griffin <peter.griffin@linaro.org>
> ---
>  drivers/dma/Kconfig   |  12 +
>  drivers/dma/Makefile  |   1 +
>  drivers/dma/st_fdma.c | 967
> ++++++++++++++++++++++++++++++++++++++++++++++++++
>  3 files changed, 980 insertions(+)
>  create mode 100644 drivers/dma/st_fdma.c
> 
> diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
> index d96d87c..5910c4f 100644
> --- a/drivers/dma/Kconfig
> +++ b/drivers/dma/Kconfig
> @@ -527,6 +527,18 @@ config ZX_DMA
>  	help
>  	  Support the DMA engine for ZTE ZX296702 platform devices.
> 
> +config ST_FDMA
> +	tristate "ST FDMA dmaengine support"
> +	depends on ARCH_STI
> +	select DMA_ENGINE
> +	select FW_LOADER
> +	select DMA_VIRTUAL_CHANNELS
> +	help
> +	  Enable support for ST FDMA controller.
> +	  It supports 16 independent DMA channels, accepts up to 32 DMA
> requests
> +
> +	  Say Y here if you have such a chipset.
> +	  If unsure, say N.
> 
>  # driver files
>  source "drivers/dma/bestcomm/Kconfig"
> diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
> index 6084127..b81ca99 100644
> --- a/drivers/dma/Makefile
> +++ b/drivers/dma/Makefile
> @@ -65,6 +65,7 @@ obj-$(CONFIG_TI_DMA_CROSSBAR) += ti-dma-crossbar.o
>  obj-$(CONFIG_TI_EDMA) += edma.o
>  obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
>  obj-$(CONFIG_ZX_DMA) += zx296702_dma.o
> +obj-$(CONFIG_ST_FDMA) += st_fdma.o
> 
>  obj-y += qcom/
>  obj-y += xilinx/
> diff --git a/drivers/dma/st_fdma.c b/drivers/dma/st_fdma.c
> new file mode 100644
> index 0000000..9bf0100
> --- /dev/null
> +++ b/drivers/dma/st_fdma.c
> @@ -0,0 +1,967 @@
> +/*
> + * st_fdma.c
> + *
> + * Copyright (C) 2014 STMicroelectronics
> + * Author: Ludovic Barre <Ludovic.barre@st.com>
> + * License terms:  GNU General Public License (GPL), version 2
> + */
> +#include <linux/init.h>
> +#include <linux/module.h>
> +#include <linux/slab.h>
> +#include <linux/of.h>
> +#include <linux/of_device.h>
> +#include <linux/of_dma.h>
> +#include <linux/platform_device.h>
> +#include <linux/interrupt.h>
> +#include <linux/clk.h>
> +#include <linux/dmaengine.h>
> +#include <linux/dmapool.h>
> +#include <linux/firmware.h>
> +#include <linux/elf.h>
> +#include <linux/atomic.h>
> +
> +#include "st_fdma.h"
> +#include "dmaengine.h"
> +#include "virt-dma.h"
> +
> +static char *fdma_clk_name[CLK_MAX_NUM] = {
> +	[CLK_SLIM]	= "fdma_slim",
> +	[CLK_HI]	= "fdma_hi",
> +	[CLK_LOW]	= "fdma_low",
> +	[CLK_IC]	= "fdma_ic",
> +};
> +
> +static int st_fdma_clk_get(struct st_fdma_dev *fdev)
> +{
> +	int i;
> +
> +	for (i = 0; i < CLK_MAX_NUM; i++) {
> +		fdev->clks[i] = devm_clk_get(fdev->dev, fdma_clk_name[i]);
> +		if (IS_ERR(fdev->clks[i])) {
> +			dev_err(fdev->dev,
> +				"failed to get clock: %s\n", fdma_clk_name[i]);
> +			return PTR_ERR(fdev->clks[i]);
> +		}
> +	}
> +
> +	if (i != CLK_MAX_NUM) {
> +		dev_err(fdev->dev, "all clocks are not defined\n");
> +		return -EINVAL;
> +	}
> +
> +	return 0;
> +}
> +
> +static int st_fdma_clk_enable(struct st_fdma_dev *fdev)
> +{
> +	int i, ret;
> +
> +	for (i = 0; i < CLK_MAX_NUM; i++) {
> +		ret = clk_prepare_enable(fdev->clks[i]);
> +		if (ret < 0)

You should disable and unprepared the other clocks...

Kedar...

> +			return ret;
> +	}
> +
> +	return 0;
> +}
> +
> +static void st_fdma_clk_disable(struct st_fdma_dev *fdev)
> +{
> +	int i;
> +
> +	for (i = 0; i < CLK_MAX_NUM; i++)
> +		clk_disable_unprepare(fdev->clks[i]);
> +}
> +
> +static inline struct st_fdma_chan *to_st_fdma_chan(struct dma_chan *c)
> +{
> +	return container_of(c, struct st_fdma_chan, vchan.chan);
> +}
> +
> +static struct st_fdma_desc *to_st_fdma_desc(struct virt_dma_desc *vd)
> +{
> +	return container_of(vd, struct st_fdma_desc, vdesc);
> +}
> +
> +static void st_fdma_enable(struct st_fdma_dev *fdev)
> +{
> +	unsigned long hw_id, hw_ver, fw_rev;
> +	u32 val;
> +
> +	/* disable CPU pipeline clock & reset cpu pipeline */
> +	val = FDMA_CLK_GATE_DIS | FDMA_CLK_GATE_RESET;
> +	fdma_write(fdev, val, CLK_GATE);
> +	/* disable SLIM core STBus sync */
> +	fdma_write(fdev, FDMA_STBUS_SYNC_DIS, STBUS_SYNC);
> +	/* enable cpu pipeline clock */
> +	fdma_write(fdev, !FDMA_CLK_GATE_DIS, CLK_GATE);
> +	/* clear int & cmd mailbox */
> +	fdma_write(fdev, ~0UL, INT_CLR);
> +	fdma_write(fdev, ~0UL, CMD_CLR);
> +	/* enable all channels cmd & int */
> +	fdma_write(fdev, ~0UL, INT_MASK);
> +	fdma_write(fdev, ~0UL, CMD_MASK);
> +	/* enable cpu */
> +	writel(FDMA_EN_RUN, fdev->io_base + FDMA_EN_OFST);
> +
> +	hw_id = fdma_read(fdev, ID);
> +	hw_ver = fdma_read(fdev, VER);
> +	fw_rev = fdma_read(fdev, REV_ID);
> +
> +	dev_info(fdev->dev, "fw rev:%ld.%ld on SLIM %ld.%ld\n",
> +		 FDMA_REV_ID_MAJ(fw_rev), FDMA_REV_ID_MIN(fw_rev),
> +		 hw_id, hw_ver);
> +}
> +
> +static int st_fdma_disable(struct st_fdma_dev *fdev)
> +{
> +	/* mask all (cmd & int) channels */
> +	fdma_write(fdev, 0UL, INT_MASK);
> +	fdma_write(fdev, 0UL, CMD_MASK);
> +	/* disable cpu pipeline clock */
> +	fdma_write(fdev, FDMA_CLK_GATE_DIS, CLK_GATE);
> +	writel(!FDMA_EN_RUN, fdev->io_base + FDMA_EN_OFST);
> +
> +	return readl(fdev->io_base + FDMA_EN_OFST);
> +}
> +
> +static int st_fdma_dreq_get(struct st_fdma_chan *fchan)
> +{
> +	struct st_fdma_dev *fdev = fchan->fdev;
> +	u32 req_line_cfg = fchan->cfg.req_line;
> +	u32 dreq_line;
> +	int try = 0;
> +
> +	/*
> +	 * dreq_mask is shared for n channels of fdma, so all accesses must be
> +	 * atomic. if the dreq_mask it change between ffz ant set_bit,
> +	 * we retry
> +	 */
> +	do {
> +		if (fdev->dreq_mask == ~0L) {
> +			dev_err(fdev->dev, "No req lines available\n");
> +			return -EINVAL;
> +		}
> +
> +		if (try || req_line_cfg >= ST_FDMA_NR_DREQS) {
> +			dev_err(fdev->dev, "Invalid or used req line\n");
> +			return -EINVAL;
> +		} else {
> +			dreq_line = req_line_cfg;
> +		}
> +
> +		try++;
> +	} while (test_and_set_bit(dreq_line, &fdev->dreq_mask));
> +
> +	dev_dbg(fdev->dev, "get dreq_line:%d mask:%#lx\n",
> +		dreq_line, fdev->dreq_mask);
> +
> +	return dreq_line;
> +}
> +
> +static void st_fdma_dreq_put(struct st_fdma_chan *fchan)
> +{
> +	struct st_fdma_dev *fdev = fchan->fdev;
> +
> +	dev_dbg(fdev->dev, "put dreq_line:%#x\n", fchan->dreq_line);
> +	clear_bit(fchan->dreq_line, &fdev->dreq_mask);
> +}
> +
> +static void st_fdma_xfer_desc(struct st_fdma_chan *fchan)
> +{
> +	struct virt_dma_desc *vdesc;
> +	unsigned long nbytes, ch_cmd, cmd;
> +
> +	vdesc = vchan_next_desc(&fchan->vchan);
> +	if (!vdesc)
> +		return;
> +
> +	fchan->fdesc = to_st_fdma_desc(vdesc);
> +	nbytes = fchan->fdesc->node[0].desc->nbytes;
> +	cmd = FDMA_CMD_START(fchan->vchan.chan.chan_id);
> +	ch_cmd = fchan->fdesc->node[0].pdesc | FDMA_CH_CMD_STA_START;
> +
> +	/* start the channel for the descriptor */
> +	fnode_write(fchan, nbytes, CNTN);
> +	fchan_write(fchan, ch_cmd, CH_CMD);
> +	writel(cmd, fchan->fdev->io_base + FDMA_CMD_SET_OFST);
> +
> +	dev_dbg(fchan->fdev->dev, "start chan:%d\n", fchan-
> >vchan.chan.chan_id);
> +}
> +
> +static void st_fdma_ch_sta_update(struct st_fdma_chan *fchan,
> +				  unsigned long int_sta)
> +{
> +	unsigned long ch_sta, ch_err;
> +	int ch_id = fchan->vchan.chan.chan_id;
> +	struct st_fdma_dev *fdev = fchan->fdev;
> +
> +	ch_sta = fchan_read(fchan, CH_CMD);
> +	ch_err = ch_sta & FDMA_CH_CMD_ERR_MASK;
> +	ch_sta &= FDMA_CH_CMD_STA_MASK;
> +
> +	if (int_sta & FDMA_INT_STA_ERR) {
> +		dev_warn(fdev->dev, "chan:%d, error:%ld\n", ch_id, ch_err);
> +		fchan->status = DMA_ERROR;
> +		return;
> +	}
> +
> +	switch (ch_sta) {
> +	case FDMA_CH_CMD_STA_PAUSED:
> +		fchan->status = DMA_PAUSED;
> +		break;
> +	case FDMA_CH_CMD_STA_RUNNING:
> +		fchan->status = DMA_IN_PROGRESS;
> +		break;
> +	}
> +}
> +
> +static irqreturn_t st_fdma_irq_handler(int irq, void *dev_id)
> +{
> +	struct st_fdma_dev *fdev = dev_id;
> +	irqreturn_t ret = IRQ_NONE;
> +	struct st_fdma_chan *fchan = &fdev->chans[0];
> +	unsigned long int_sta, clr;
> +
> +	int_sta = fdma_read(fdev, INT_STA);
> +	clr = int_sta;
> +
> +	for (; int_sta != 0 ; int_sta >>= 2, fchan++) {
> +		if (!(int_sta & (FDMA_INT_STA_CH | FDMA_INT_STA_ERR)))
> +			continue;
> +
> +		spin_lock(&fchan->vchan.lock);
> +		st_fdma_ch_sta_update(fchan, int_sta);
> +
> +		if (fchan->fdesc) {
> +			if (!fchan->fdesc->iscyclic) {
> +				list_del(&fchan->fdesc->vdesc.node);
> +				vchan_cookie_complete(&fchan->fdesc-
> >vdesc);
> +				fchan->fdesc = NULL;
> +				fchan->status = DMA_COMPLETE;
> +			} else {
> +				vchan_cyclic_callback(&fchan->fdesc->vdesc);
> +			}
> +
> +			/* Start the next descriptor (if available) */
> +			if (!fchan->fdesc)
> +				st_fdma_xfer_desc(fchan);
> +		}
> +
> +		spin_unlock(&fchan->vchan.lock);
> +		ret = IRQ_HANDLED;
> +	}
> +
> +	fdma_write(fdev, clr, INT_CLR);
> +
> +	return ret;
> +}
> +
> +static struct dma_chan *st_fdma_of_xlate(struct of_phandle_args *dma_spec,
> +					 struct of_dma *ofdma)
> +{
> +	struct st_fdma_dev *fdev = ofdma->of_dma_data;
> +	struct st_fdma_cfg cfg;
> +
> +	if (dma_spec->args_count < 1)
> +		return NULL;
> +
> +	cfg.of_node = dma_spec->np;
> +	cfg.req_line = dma_spec->args[0];
> +	cfg.req_ctrl = 0;
> +	cfg.type = ST_FDMA_TYPE_FREE_RUN;
> +
> +	if (dma_spec->args_count > 1)
> +		cfg.req_ctrl = dma_spec->args[1] & REQ_CTRL_CFG_MASK;
> +
> +	if (dma_spec->args_count > 2)
> +		cfg.type = dma_spec->args[2];
> +
> +	dev_dbg(fdev->dev, "xlate req_line:%d type:%d req_ctrl:%#lx\n",
> +		cfg.req_line, cfg.type, cfg.req_ctrl);
> +
> +	return dma_request_channel(fdev->dma_device.cap_mask,
> +			st_fdma_filter_fn, &cfg);
> +}
> +
> +static void st_fdma_free_desc(struct virt_dma_desc *vdesc)
> +{
> +	struct st_fdma_desc *fdesc;
> +	int i;
> +
> +	fdesc = to_st_fdma_desc(vdesc);
> +	for (i = 0; i < fdesc->n_nodes; i++)
> +			dma_pool_free(fdesc->fchan->node_pool,
> +				      fdesc->node[i].desc,
> +				      fdesc->node[i].pdesc);
> +	kfree(fdesc);
> +}
> +
> +static struct st_fdma_desc *st_fdma_alloc_desc(struct st_fdma_chan *fchan,
> +					       int sg_len)
> +{
> +	struct st_fdma_desc *fdesc;
> +	int i;
> +
> +	fdesc = kzalloc(sizeof(*fdesc) +
> +			sizeof(struct st_fdma_sw_node) * sg_len,
> GFP_NOWAIT);
> +	if (!fdesc)
> +		return NULL;
> +
> +	fdesc->fchan = fchan;
> +	fdesc->n_nodes = sg_len;
> +	for (i = 0; i < sg_len; i++) {
> +		fdesc->node[i].desc = dma_pool_alloc(fchan->node_pool,
> +				GFP_NOWAIT, &fdesc->node[i].pdesc);
> +		if (!fdesc->node[i].desc)
> +			goto err;
> +	}
> +	return fdesc;
> +
> +err:
> +	while (--i >= 0)
> +		dma_pool_free(fchan->node_pool, fdesc->node[i].desc,
> +			      fdesc->node[i].pdesc);
> +	kfree(fdesc);
> +	return NULL;
> +}
> +
> +static int st_fdma_alloc_chan_res(struct dma_chan *chan)
> +{
> +	struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
> +
> +	if (fchan->cfg.type == ST_FDMA_TYPE_FREE_RUN) {
> +		fchan->dreq_line = 0;
> +	} else {
> +		fchan->dreq_line = st_fdma_dreq_get(fchan);
> +		if (IS_ERR_VALUE(fchan->dreq_line))
> +			return -EINVAL;
> +	}
> +
> +	/* Create the dma pool for descriptor allocation */
> +	fchan->node_pool = dmam_pool_create(dev_name(&chan->dev-
> >device),
> +					    fchan->fdev->dev,
> +					    sizeof(struct st_fdma_hw_node),
> +					    __alignof__(struct
> st_fdma_hw_node),
> +					    0);
> +
> +	if (!fchan->node_pool) {
> +		dev_err(fchan->fdev->dev, "unable to allocate desc pool\n");
> +		return -ENOMEM;
> +	}
> +
> +	dev_dbg(fchan->fdev->dev, "alloc ch_id:%d type:%d\n",
> +		fchan->vchan.chan.chan_id, fchan->cfg.type);
> +
> +	return 0;
> +}
> +
> +static void st_fdma_free_chan_res(struct dma_chan *chan)
> +{
> +	struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
> +	unsigned long flags;
> +	LIST_HEAD(head);
> +
> +	dev_dbg(fchan->fdev->dev, "freeing chan:%d\n",
> +		fchan->vchan.chan.chan_id);
> +
> +	if (fchan->cfg.type != ST_FDMA_TYPE_FREE_RUN)
> +		st_fdma_dreq_put(fchan);
> +
> +	spin_lock_irqsave(&fchan->vchan.lock, flags);
> +	fchan->fdesc = NULL;
> +	vchan_get_all_descriptors(&fchan->vchan, &head);
> +	spin_unlock_irqrestore(&fchan->vchan.lock, flags);
> +
> +	dma_pool_destroy(fchan->node_pool);
> +	fchan->node_pool = NULL;
> +	memset(&fchan->cfg, 0, sizeof(struct st_fdma_cfg));
> +}
> +
> +static struct dma_async_tx_descriptor *st_fdma_prep_dma_memcpy(
> +	struct dma_chan *chan,	dma_addr_t dst, dma_addr_t src,
> +	size_t len, unsigned long flags)
> +{
> +	struct st_fdma_chan *fchan;
> +	struct st_fdma_desc *fdesc;
> +	struct st_fdma_hw_node *hw_node;
> +
> +	if (!len)
> +		return NULL;
> +
> +	fchan = to_st_fdma_chan(chan);
> +
> +	if (!atomic_read(&fchan->fdev->fw_loaded)) {
> +		dev_err(fchan->fdev->dev, "%s: fdma fw not loaded\n",
> __func__);
> +		return NULL;
> +	}
> +
> +	/* We only require a single descriptor */
> +	fdesc = st_fdma_alloc_desc(fchan, 1);
> +	if (!fdesc) {
> +		dev_err(fchan->fdev->dev, "no memory for desc\n");
> +		return NULL;
> +	}
> +
> +	hw_node = fdesc->node[0].desc;
> +	hw_node->next = 0;
> +	hw_node->control = NODE_CTRL_REQ_MAP_FREE_RUN;
> +	hw_node->control |= NODE_CTRL_SRC_INCR;
> +	hw_node->control |= NODE_CTRL_DST_INCR;
> +	hw_node->control |= NODE_CTRL_INT_EON;
> +	hw_node->nbytes = len;
> +	hw_node->saddr = src;
> +	hw_node->daddr = dst;
> +	hw_node->generic.length = len;
> +	hw_node->generic.sstride = 0;
> +	hw_node->generic.dstride = 0;
> +
> +	return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags);
> +}
> +
> +static int config_reqctrl(struct st_fdma_chan *fchan,
> +			  enum dma_transfer_direction direction)
> +{
> +	u32 maxburst = 0, addr = 0;
> +	enum dma_slave_buswidth width;
> +	int ch_id = fchan->vchan.chan.chan_id;
> +	struct st_fdma_dev *fdev = fchan->fdev;
> +
> +	if (direction == DMA_DEV_TO_MEM) {
> +		fchan->cfg.req_ctrl &= ~REQ_CTRL_WNR;
> +		maxburst = fchan->scfg.src_maxburst;
> +		width = fchan->scfg.src_addr_width;
> +		addr = fchan->scfg.src_addr;
> +	} else if (direction == DMA_MEM_TO_DEV) {
> +		fchan->cfg.req_ctrl |= REQ_CTRL_WNR;
> +		maxburst = fchan->scfg.dst_maxburst;
> +		width = fchan->scfg.dst_addr_width;
> +		addr = fchan->scfg.dst_addr;
> +	} else {
> +		return -EINVAL;
> +	}
> +
> +	fchan->cfg.req_ctrl &= ~REQ_CTRL_OPCODE_MASK;
> +	if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
> +		fchan->cfg.req_ctrl |= REQ_CTRL_OPCODE_LD_ST1;
> +	else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
> +		fchan->cfg.req_ctrl |= REQ_CTRL_OPCODE_LD_ST2;
> +	else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
> +		fchan->cfg.req_ctrl |= REQ_CTRL_OPCODE_LD_ST4;
> +	else if (width == DMA_SLAVE_BUSWIDTH_8_BYTES)
> +		fchan->cfg.req_ctrl |= REQ_CTRL_OPCODE_LD_ST8;
> +	else
> +		return -EINVAL;
> +
> +	fchan->cfg.req_ctrl &= ~REQ_CTRL_NUM_OPS_MASK;
> +	fchan->cfg.req_ctrl |= REQ_CTRL_NUM_OPS(maxburst-1);
> +	dreq_write(fchan, fchan->cfg.req_ctrl, REQ_CTRL);
> +
> +	fchan->cfg.dev_addr = addr;
> +	fchan->cfg.dir = direction;
> +
> +	dev_dbg(fdev->dev, "chan:%d config_reqctrl:%#x req_ctrl:%#lx\n",
> +		ch_id, addr, fchan->cfg.req_ctrl);
> +
> +	return 0;
> +}
> +
> +static void fill_hw_node(struct st_fdma_hw_node *hw_node,
> +			struct st_fdma_chan *fchan,
> +			enum dma_transfer_direction direction)
> +{
> +
> +	if (direction == DMA_MEM_TO_DEV) {
> +		hw_node->control |= NODE_CTRL_SRC_INCR;
> +		hw_node->control |= NODE_CTRL_DST_STATIC;
> +		hw_node->daddr = fchan->cfg.dev_addr;
> +	} else {
> +		hw_node->control |= NODE_CTRL_SRC_STATIC;
> +		hw_node->control |= NODE_CTRL_DST_INCR;
> +		hw_node->saddr = fchan->cfg.dev_addr;
> +	}
> +	hw_node->generic.sstride = 0;
> +	hw_node->generic.dstride = 0;
> +}
> +
> +static struct dma_async_tx_descriptor *st_fdma_prep_dma_cyclic(
> +		struct dma_chan *chan, dma_addr_t buf_addr, size_t len,
> +		size_t period_len, enum dma_transfer_direction direction,
> +		unsigned long flags)
> +{
> +	struct st_fdma_chan *fchan;
> +	struct st_fdma_desc *fdesc;
> +	int sg_len, i;
> +
> +	if (!chan || !len || !period_len)
> +		return NULL;
> +
> +	fchan = to_st_fdma_chan(chan);
> +
> +	if (!atomic_read(&fchan->fdev->fw_loaded)) {
> +		dev_err(fchan->fdev->dev, "%s: fdma fw not loaded\n",
> __func__);
> +		return NULL;
> +	}
> +
> +	if (!is_slave_direction(direction)) {
> +		dev_err(fchan->fdev->dev, "bad direction?\n");
> +		return NULL;
> +	}
> +
> +	if (config_reqctrl(fchan, direction)) {
> +		dev_err(fchan->fdev->dev, "bad width or direction\n");
> +		return NULL;
> +	}
> +
> +	/* the buffer length must be a multiple of period_len */
> +	if (len % period_len != 0) {
> +		dev_err(fchan->fdev->dev, "len is not multiple of period\n");
> +		return NULL;
> +	}
> +
> +	sg_len = len / period_len;
> +	fdesc = st_fdma_alloc_desc(fchan, sg_len);
> +	if (!fdesc) {
> +		dev_err(fchan->fdev->dev, "no memory for desc\n");
> +		return NULL;
> +	}
> +
> +	fdesc->iscyclic = true;
> +
> +	for (i = 0; i < sg_len; i++) {
> +		struct st_fdma_hw_node *hw_node = fdesc->node[i].desc;
> +
> +		hw_node->next = fdesc->node[(i + 1) % sg_len].pdesc;
> +
> +		hw_node->control = NODE_CTRL_REQ_MAP_DREQ(fchan-
> >dreq_line);
> +		hw_node->control |= NODE_CTRL_INT_EON;
> +
> +
> +		fill_hw_node(hw_node, fchan, direction);
> +
> +		if (direction == DMA_MEM_TO_DEV)
> +			hw_node->saddr = buf_addr + (i * period_len);
> +		else
> +			hw_node->daddr = buf_addr + (i * period_len);
> +
> +		hw_node->nbytes = period_len;
> +		hw_node->generic.length = period_len;
> +	}
> +
> +	return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags);
> +}
> +
> +static struct dma_async_tx_descriptor *st_fdma_prep_slave_sg(
> +		struct dma_chan *chan, struct scatterlist *sgl,
> +		unsigned int sg_len, enum dma_transfer_direction direction,
> +		unsigned long flags, void *context)
> +{
> +	struct st_fdma_chan *fchan;
> +	struct st_fdma_desc *fdesc;
> +	struct st_fdma_hw_node *hw_node;
> +	struct scatterlist *sg;
> +	int i;
> +
> +	if (!chan || !sgl || !sg_len)
> +		return NULL;
> +
> +	fchan = to_st_fdma_chan(chan);
> +
> +	if (!atomic_read(&fchan->fdev->fw_loaded)) {
> +		dev_err(fchan->fdev->dev, "%s: fdma fw not loaded\n",
> __func__);
> +		return NULL;
> +	}
> +
> +	if (!is_slave_direction(direction)) {
> +		dev_err(fchan->fdev->dev, "bad direction?\n");
> +		return NULL;
> +	}
> +
> +	fdesc = st_fdma_alloc_desc(fchan, sg_len);
> +	if (!fdesc) {
> +		dev_err(fchan->fdev->dev, "no memory for desc\n");
> +		return NULL;
> +	}
> +
> +	fdesc->iscyclic = false;
> +
> +	for_each_sg(sgl, sg, sg_len, i) {
> +		hw_node = fdesc->node[i].desc;
> +
> +		hw_node->next = fdesc->node[(i + 1) % sg_len].pdesc;
> +		hw_node->control = NODE_CTRL_REQ_MAP_DREQ(fchan-
> >dreq_line);
> +
> +		fill_hw_node(hw_node, fchan, direction);
> +
> +		if (direction == DMA_MEM_TO_DEV)
> +			hw_node->saddr = sg_dma_address(sg);
> +		else
> +			hw_node->daddr = sg_dma_address(sg);
> +
> +		hw_node->nbytes = sg_dma_len(sg);
> +		hw_node->generic.length = sg_dma_len(sg);
> +	}
> +
> +	/* interrupt at end of last node */
> +	hw_node->control |= NODE_CTRL_INT_EON;
> +
> +	return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags);
> +}
> +
> +static size_t st_fdma_desc_residue(struct st_fdma_chan *fchan,
> +				   struct virt_dma_desc *vdesc,
> +				   bool in_progress)
> +{
> +	struct st_fdma_desc *fdesc = fchan->fdesc;
> +	size_t residue = 0;
> +	dma_addr_t cur_addr = 0;
> +	int i;
> +
> +	if (in_progress) {
> +		cur_addr = fchan_read(fchan, CH_CMD);
> +		cur_addr &= FDMA_CH_CMD_DATA_MASK;
> +	}
> +
> +	for (i = fchan->fdesc->n_nodes - 1 ; i >= 0; i--) {
> +		if (cur_addr == fdesc->node[i].pdesc) {
> +			residue += fnode_read(fchan, CNTN);
> +			break;
> +		}
> +		residue += fdesc->node[i].desc->nbytes;
> +	}
> +
> +	return residue;
> +}
> +
> +static enum dma_status st_fdma_tx_status(struct dma_chan *chan,
> +					 dma_cookie_t cookie,
> +					 struct dma_tx_state *txstate)
> +{
> +	struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
> +	struct virt_dma_desc *vd;
> +	enum dma_status ret;
> +	unsigned long flags;
> +
> +	ret = dma_cookie_status(chan, cookie, txstate);
> +	if (ret == DMA_COMPLETE)
> +		return ret;
> +
> +	if (!txstate)
> +		return fchan->status;
> +
> +	spin_lock_irqsave(&fchan->vchan.lock, flags);
> +	vd = vchan_find_desc(&fchan->vchan, cookie);
> +	if (fchan->fdesc && cookie == fchan->fdesc->vdesc.tx.cookie)
> +		txstate->residue = st_fdma_desc_residue(fchan, vd, true);
> +	else if (vd)
> +		txstate->residue = st_fdma_desc_residue(fchan, vd, false);
> +	else
> +		txstate->residue = 0;
> +
> +	spin_unlock_irqrestore(&fchan->vchan.lock, flags);
> +
> +	return fchan->status;
> +}
> +
> +static void st_fdma_issue_pending(struct dma_chan *chan)
> +{
> +	struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&fchan->vchan.lock, flags);
> +
> +	if (vchan_issue_pending(&fchan->vchan) && !fchan->fdesc)
> +		st_fdma_xfer_desc(fchan);
> +
> +	spin_unlock_irqrestore(&fchan->vchan.lock, flags);
> +}
> +
> +static int st_fdma_pause(struct dma_chan *chan)
> +{
> +	unsigned long flags;
> +	LIST_HEAD(head);
> +	struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
> +	int ch_id = fchan->vchan.chan.chan_id;
> +	unsigned long cmd = FDMA_CMD_PAUSE(ch_id);
> +
> +	dev_dbg(fchan->fdev->dev, "pause chan:%d\n", ch_id);
> +
> +	spin_lock_irqsave(&fchan->vchan.lock, flags);
> +	if (fchan->fdesc)
> +		fdma_write(fchan->fdev, cmd, CMD_SET);
> +	spin_unlock_irqrestore(&fchan->vchan.lock, flags);
> +
> +	return 0;
> +}
> +
> +static int st_fdma_resume(struct dma_chan *chan)
> +{
> +	unsigned long flags;
> +	unsigned long val;
> +	struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
> +	int ch_id = fchan->vchan.chan.chan_id;
> +
> +	dev_dbg(fchan->fdev->dev, "resume chan:%d\n", ch_id);
> +
> +	spin_lock_irqsave(&fchan->vchan.lock, flags);
> +	if (fchan->fdesc) {
> +		val = fchan_read(fchan, CH_CMD);
> +		val &= FDMA_CH_CMD_DATA_MASK;
> +		fchan_write(fchan, val, CH_CMD);
> +	}
> +	spin_unlock_irqrestore(&fchan->vchan.lock, flags);
> +
> +	return 0;
> +}
> +
> +static int st_fdma_terminate_all(struct dma_chan *chan)
> +{
> +	unsigned long flags;
> +	LIST_HEAD(head);
> +	struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
> +	int ch_id = fchan->vchan.chan.chan_id;
> +	unsigned long cmd = FDMA_CMD_PAUSE(ch_id);
> +
> +	dev_dbg(fchan->fdev->dev, "terminate chan:%d\n", ch_id);
> +
> +	spin_lock_irqsave(&fchan->vchan.lock, flags);
> +	fdma_write(fchan->fdev, cmd, CMD_SET);
> +	fchan->fdesc = NULL;
> +	vchan_get_all_descriptors(&fchan->vchan, &head);
> +	spin_unlock_irqrestore(&fchan->vchan.lock, flags);
> +	vchan_dma_desc_free_list(&fchan->vchan, &head);
> +
> +	return 0;
> +}
> +
> +static int st_fdma_slave_config(struct dma_chan *chan,
> +				struct dma_slave_config *slave_cfg)
> +{
> +	struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
> +	memcpy(&fchan->scfg, slave_cfg, sizeof(fchan->scfg));
> +	return 0;
> +}
> +
> +static const struct st_fdma_ram fdma_mpe31_mem[] = {
> +	{ .name = "dmem", .offset = 0x10000, .size = 0x3000 },
> +	{ .name = "imem", .offset = 0x18000, .size = 0x8000 },
> +};
> +
> +static const struct st_fdma_driverdata fdma_mpe31_stih407_11 = {
> +	.fdma_mem = fdma_mpe31_mem,
> +	.num_mem = ARRAY_SIZE(fdma_mpe31_mem),
> +	.name = "STiH407",
> +	.id = 0,
> +};
> +
> +static const struct st_fdma_driverdata fdma_mpe31_stih407_12 = {
> +	.fdma_mem = fdma_mpe31_mem,
> +	.num_mem = ARRAY_SIZE(fdma_mpe31_mem),
> +	.name = "STiH407",
> +	.id = 1,
> +};
> +
> +static const struct st_fdma_driverdata fdma_mpe31_stih407_13 = {
> +	.fdma_mem = fdma_mpe31_mem,
> +	.num_mem = ARRAY_SIZE(fdma_mpe31_mem),
> +	.name = "STiH407",
> +	.id = 2,
> +};
> +
> +static const struct of_device_id st_fdma_match[] = {
> +	{ .compatible = "st,stih407-fdma-mpe31-11"
> +	  , .data = &fdma_mpe31_stih407_11 },
> +	{ .compatible = "st,stih407-fdma-mpe31-12"
> +	  , .data = &fdma_mpe31_stih407_12 },
> +	{ .compatible = "st,stih407-fdma-mpe31-13"
> +	  , .data = &fdma_mpe31_stih407_13 },
> +	{},
> +};
> +MODULE_DEVICE_TABLE(of, st_fdma_match);
> +
> +static int st_fdma_parse_dt(struct platform_device *pdev,
> +			const struct st_fdma_driverdata *drvdata,
> +			struct st_fdma_dev *fdev)
> +{
> +	struct device_node *np = pdev->dev.of_node;
> +	int ret;
> +
> +	if (!np)
> +		goto err;
> +
> +	ret = of_property_read_u32(np, "dma-channels", &fdev->nr_channels);
> +	if (ret)
> +		goto err;
> +
> +	snprintf(fdev->fw_name, FW_NAME_SIZE, "fdma_%s_%d.elf",
> +		drvdata->name, drvdata->id);
> +
> +err:
> +	return ret;
> +}
> +#define FDMA_DMA_BUSWIDTHS	(BIT(DMA_SLAVE_BUSWIDTH_1_BYTE)
> | \
> +				 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
> +				 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
> +				 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
> +
> +static int st_fdma_probe(struct platform_device *pdev)
> +{
> +	struct st_fdma_dev *fdev;
> +	const struct of_device_id *match;
> +	struct device_node *np = pdev->dev.of_node;
> +	const struct st_fdma_driverdata *drvdata;
> +	int irq, ret, i;
> +
> +	match = of_match_device((st_fdma_match), &pdev->dev);
> +	if (!match || !match->data) {
> +		dev_err(&pdev->dev, "No device match found\n");
> +		return -ENODEV;
> +	}
> +
> +	drvdata = match->data;
> +
> +	fdev = devm_kzalloc(&pdev->dev, sizeof(*fdev), GFP_KERNEL);
> +	if (!fdev)
> +		return -ENOMEM;
> +
> +	ret = st_fdma_parse_dt(pdev, drvdata, fdev);
> +	if (ret) {
> +		dev_err(&pdev->dev, "unable to find platform data\n");
> +		goto err;
> +	}
> +
> +	fdev->chans = devm_kzalloc(&pdev->dev,
> +				   fdev->nr_channels
> +				   * sizeof(struct st_fdma_chan), GFP_KERNEL);
> +	if (!fdev->chans)
> +		return -ENOMEM;
> +
> +	fdev->dev = &pdev->dev;
> +	fdev->drvdata = drvdata;
> +	platform_set_drvdata(pdev, fdev);
> +
> +	fdev->io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
> +	fdev->io_base = devm_ioremap_resource(&pdev->dev, fdev->io_res);
> +	if (IS_ERR(fdev->io_base))
> +		return PTR_ERR(fdev->io_base);
> +
> +	irq = platform_get_irq(pdev, 0);
> +	if (irq < 0) {
> +		dev_err(&pdev->dev, "Failed to get irq resource\n");
> +		return -EINVAL;
> +	}
> +
> +	ret = devm_request_irq(&pdev->dev, irq, st_fdma_irq_handler, 0,
> +			       dev_name(&pdev->dev), fdev);
> +	if (ret) {
> +		dev_err(&pdev->dev, "Failed to request irq\n");
> +		goto err;
> +	}
> +
> +	ret = st_fdma_clk_get(fdev);
> +	if (ret)
> +		goto err;
> +
> +	ret = st_fdma_clk_enable(fdev);
> +	if (ret) {
> +		dev_err(&pdev->dev, "Failed to enable clocks\n");
> +		goto err_clk;
> +	}
> +
> +	/* Initialise list of FDMA channels */
> +	INIT_LIST_HEAD(&fdev->dma_device.channels);
> +	for (i = 0; i < fdev->nr_channels; i++) {
> +		struct st_fdma_chan *fchan = &fdev->chans[i];
> +
> +		fchan->fdev = fdev;
> +		fchan->vchan.desc_free = st_fdma_free_desc;
> +		vchan_init(&fchan->vchan, &fdev->dma_device);
> +	}
> +
> +	/* Initialise the FDMA dreq (reserve 0 & 31 for FDMA use) */
> +	fdev->dreq_mask = BIT(0) | BIT(31);
> +
> +	dma_cap_set(DMA_SLAVE, fdev->dma_device.cap_mask);
> +	dma_cap_set(DMA_CYCLIC, fdev->dma_device.cap_mask);
> +	dma_cap_set(DMA_MEMCPY, fdev->dma_device.cap_mask);
> +
> +	fdev->dma_device.dev = &pdev->dev;
> +	fdev->dma_device.device_alloc_chan_resources =
> st_fdma_alloc_chan_res;
> +	fdev->dma_device.device_free_chan_resources =
> st_fdma_free_chan_res;
> +	fdev->dma_device.device_prep_dma_cyclic	=
> st_fdma_prep_dma_cyclic;
> +	fdev->dma_device.device_prep_slave_sg = st_fdma_prep_slave_sg;
> +	fdev->dma_device.device_prep_dma_memcpy =
> st_fdma_prep_dma_memcpy;
> +	fdev->dma_device.device_tx_status = st_fdma_tx_status;
> +	fdev->dma_device.device_issue_pending = st_fdma_issue_pending;
> +	fdev->dma_device.device_terminate_all = st_fdma_terminate_all;
> +	fdev->dma_device.device_config = st_fdma_slave_config;
> +	fdev->dma_device.device_pause = st_fdma_pause;
> +	fdev->dma_device.device_resume = st_fdma_resume;
> +
> +	fdev->dma_device.src_addr_widths = FDMA_DMA_BUSWIDTHS;
> +	fdev->dma_device.dst_addr_widths = FDMA_DMA_BUSWIDTHS;
> +	fdev->dma_device.directions = BIT(DMA_DEV_TO_MEM) |
> BIT(DMA_MEM_TO_DEV);
> +	fdev->dma_device.residue_granularity =
> DMA_RESIDUE_GRANULARITY_BURST;
> +
> +	ret = dma_async_device_register(&fdev->dma_device);
> +	if (ret) {
> +		dev_err(&pdev->dev, "Failed to register DMA device\n");
> +		goto err_clk;
> +	}
> +
> +	ret = of_dma_controller_register(np, st_fdma_of_xlate, fdev);
> +	if (ret) {
> +		dev_err(&pdev->dev, "Failed to register controller\n");
> +		goto err_dma_dev;
> +	}
> +
> +	dev_info(&pdev->dev, "ST FDMA engine driver, irq:%d\n", irq);
> +
> +	return 0;
> +
> +err_dma_dev:
> +	dma_async_device_unregister(&fdev->dma_device);
> +err_clk:
> +	st_fdma_clk_disable(fdev);
> +err:
> +	return ret;
> +}
> +
> +static int st_fdma_remove(struct platform_device *pdev)
> +{
> +	struct st_fdma_dev *fdev = platform_get_drvdata(pdev);
> +
> +	st_fdma_clk_disable(fdev);
> +
> +	return 0;
> +}
> +
> +static struct platform_driver st_fdma_platform_driver = {
> +	.driver = {
> +		.name = "st-fdma",
> +		.of_match_table = st_fdma_match,
> +	},
> +	.probe = st_fdma_probe,
> +	.remove = st_fdma_remove,
> +};
> +module_platform_driver(st_fdma_platform_driver);
> +
> +bool st_fdma_filter_fn(struct dma_chan *chan, void *param)
> +{
> +	struct st_fdma_cfg *config = param;
> +	struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
> +
> +	if (!param)
> +		return false;
> +
> +	if (fchan->fdev->dma_device.dev->of_node != config->of_node)
> +		return false;
> +
> +	fchan->cfg = *config;
> +
> +	return true;
> +}
> +
> +MODULE_LICENSE("GPL v2");
> +MODULE_DESCRIPTION("STMicroelectronics FDMA engine driver");
> +MODULE_AUTHOR("Ludovic.barre <Ludovic.barre@st.com>");
> --
> 1.9.1
> 
> --
> To unsubscribe from this list: send the line "unsubscribe dmaengine" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
Peter Griffin April 21, 2016, 2:58 p.m. UTC | #3
Hi Appana,

Thanks for the review.

On Thu, 21 Apr 2016, Appana Durga Kedareswara Rao wrote:

> 
> 
> > -----Original Message-----
> > From: dmaengine-owner@vger.kernel.org [mailto:dmaengine-
> > owner@vger.kernel.org] On Behalf Of Peter Griffin
> > Sent: Thursday, April 21, 2016 4:34 PM
> > To: linux-arm-kernel@lists.infradead.org; linux-kernel@vger.kernel.org;
> > srinivas.kandagatla@gmail.com; maxime.coquelin@st.com;
> > patrice.chotard@st.com; vinod.koul@intel.com
> > Cc: peter.griffin@linaro.org; lee.jones@linaro.org;
> > dmaengine@vger.kernel.org; devicetree@vger.kernel.org; arnd@arndb.de;
> > broonie@kernel.org; ludovic.barre@st.com
> > Subject: [PATCH 03/18] dmaengine: st_fdma: Add STMicroelectronics FDMA
> > engine driver support
> > 
> > This patch adds support for the Flexible Direct Memory Access (FDMA) core
> > driver. The FDMA is a slim core CPU with a dedicated firmware.
> > It is a general purpose DMA controller capable of supporting 16
> > independent DMA channels. Data moves maybe from memory to memory
> > or between memory and paced latency critical real time targets and it
> > is found on al STi based chipsets.
> > 
> > Signed-off-by: Ludovic Barre <ludovic.barre@st.com>
> > Signed-off-by: Peter Griffin <peter.griffin@linaro.org>
> > ---
> >  drivers/dma/Kconfig   |  12 +
> >  drivers/dma/Makefile  |   1 +
> >  drivers/dma/st_fdma.c | 967
> > ++++++++++++++++++++++++++++++++++++++++++++++++++
> >  3 files changed, 980 insertions(+)
> >  create mode 100644 drivers/dma/st_fdma.c
> > 
> > diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
> > index d96d87c..5910c4f 100644
> > --- a/drivers/dma/Kconfig
> > +++ b/drivers/dma/Kconfig
> > @@ -527,6 +527,18 @@ config ZX_DMA
> >  	help
> >  	  Support the DMA engine for ZTE ZX296702 platform devices.
> > 
> > +config ST_FDMA
> > +	tristate "ST FDMA dmaengine support"
> > +	depends on ARCH_STI
> > +	select DMA_ENGINE
> > +	select FW_LOADER
> > +	select DMA_VIRTUAL_CHANNELS
> > +	help
> > +	  Enable support for ST FDMA controller.
> > +	  It supports 16 independent DMA channels, accepts up to 32 DMA
> > requests
> > +
> > +	  Say Y here if you have such a chipset.
> > +	  If unsure, say N.
> > 
> >  # driver files
> >  source "drivers/dma/bestcomm/Kconfig"
> > diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
> > index 6084127..b81ca99 100644
> > --- a/drivers/dma/Makefile
> > +++ b/drivers/dma/Makefile
> > @@ -65,6 +65,7 @@ obj-$(CONFIG_TI_DMA_CROSSBAR) += ti-dma-crossbar.o
> >  obj-$(CONFIG_TI_EDMA) += edma.o
> >  obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
> >  obj-$(CONFIG_ZX_DMA) += zx296702_dma.o
> > +obj-$(CONFIG_ST_FDMA) += st_fdma.o
> > 
> >  obj-y += qcom/
> >  obj-y += xilinx/
> > diff --git a/drivers/dma/st_fdma.c b/drivers/dma/st_fdma.c
> > new file mode 100644
> > index 0000000..9bf0100
> > --- /dev/null
> > +++ b/drivers/dma/st_fdma.c
> > @@ -0,0 +1,967 @@
> > +/*
> > + * st_fdma.c
> > + *
> > + * Copyright (C) 2014 STMicroelectronics
> > + * Author: Ludovic Barre <Ludovic.barre@st.com>
> > + * License terms:  GNU General Public License (GPL), version 2
> > + */
> > +#include <linux/init.h>
> > +#include <linux/module.h>
> > +#include <linux/slab.h>
> > +#include <linux/of.h>
> > +#include <linux/of_device.h>
> > +#include <linux/of_dma.h>
> > +#include <linux/platform_device.h>
> > +#include <linux/interrupt.h>
> > +#include <linux/clk.h>
> > +#include <linux/dmaengine.h>
> > +#include <linux/dmapool.h>
> > +#include <linux/firmware.h>
> > +#include <linux/elf.h>
> > +#include <linux/atomic.h>
> > +
> > +#include "st_fdma.h"
> > +#include "dmaengine.h"
> > +#include "virt-dma.h"
> > +
> > +static char *fdma_clk_name[CLK_MAX_NUM] = {
> > +	[CLK_SLIM]	= "fdma_slim",
> > +	[CLK_HI]	= "fdma_hi",
> > +	[CLK_LOW]	= "fdma_low",
> > +	[CLK_IC]	= "fdma_ic",
> > +};
> > +
> > +static int st_fdma_clk_get(struct st_fdma_dev *fdev)
> > +{
> > +	int i;
> > +
> > +	for (i = 0; i < CLK_MAX_NUM; i++) {
> > +		fdev->clks[i] = devm_clk_get(fdev->dev, fdma_clk_name[i]);
> > +		if (IS_ERR(fdev->clks[i])) {
> > +			dev_err(fdev->dev,
> > +				"failed to get clock: %s\n", fdma_clk_name[i]);
> > +			return PTR_ERR(fdev->clks[i]);
> > +		}
> > +	}
> > +
> > +	if (i != CLK_MAX_NUM) {
> > +		dev_err(fdev->dev, "all clocks are not defined\n");
> > +		return -EINVAL;
> > +	}
> > +
> > +	return 0;
> > +}
> > +
> > +static int st_fdma_clk_enable(struct st_fdma_dev *fdev)
> > +{
> > +	int i, ret;
> > +
> > +	for (i = 0; i < CLK_MAX_NUM; i++) {
> > +		ret = clk_prepare_enable(fdev->clks[i]);
> > +		if (ret < 0)
> 
> You should disable and unprepared the other clocks...

Good point, will fix in v4.

regards,

Perer
Lee Jones April 25, 2016, 9:04 a.m. UTC | #4
On Thu, 21 Apr 2016, Appana Durga Kedareswara Rao wrote:
> > -----Original Message-----
> > From: dmaengine-owner@vger.kernel.org [mailto:dmaengine-
> > owner@vger.kernel.org] On Behalf Of Peter Griffin
> > Sent: Thursday, April 21, 2016 4:34 PM
> > To: linux-arm-kernel@lists.infradead.org; linux-kernel@vger.kernel.org;
> > srinivas.kandagatla@gmail.com; maxime.coquelin@st.com;
> > patrice.chotard@st.com; vinod.koul@intel.com
> > Cc: peter.griffin@linaro.org; lee.jones@linaro.org;
> > dmaengine@vger.kernel.org; devicetree@vger.kernel.org; arnd@arndb.de;
> > broonie@kernel.org; ludovic.barre@st.com
> > Subject: [PATCH 03/18] dmaengine: st_fdma: Add STMicroelectronics FDMA
> > engine driver support

What is this text?  If you wish to contribute to upstream mailing
lists, please fix your email client.  Also, please read:

  Documentation/email-clients.txt

> > This patch adds support for the Flexible Direct Memory Access (FDMA) core
> > driver. The FDMA is a slim core CPU with a dedicated firmware.
> > It is a general purpose DMA controller capable of supporting 16
> > independent DMA channels. Data moves maybe from memory to memory
> > or between memory and paced latency critical real time targets and it
> > is found on al STi based chipsets.
> > 
> > Signed-off-by: Ludovic Barre <ludovic.barre@st.com>
> > Signed-off-by: Peter Griffin <peter.griffin@linaro.org>
> > ---
> >  drivers/dma/Kconfig   |  12 +
> >  drivers/dma/Makefile  |   1 +
> >  drivers/dma/st_fdma.c | 967
> > ++++++++++++++++++++++++++++++++++++++++++++++++++
> >  3 files changed, 980 insertions(+)
> >  create mode 100644 drivers/dma/st_fdma.c

[...]
> > +static int st_fdma_clk_enable(struct st_fdma_dev *fdev)
> > +{
> > +	int i, ret;
> > +
> > +	for (i = 0; i < CLK_MAX_NUM; i++) {
> > +		ret = clk_prepare_enable(fdev->clks[i]);
> > +		if (ret < 0)
> 
> You should disable and unprepared the other clocks...

Replying to a 1000 line email and not snipping (especially for a one
single-line comment), it considered bad etiquette.  Please remove all
unrelated text in future.
Vinod Koul April 26, 2016, 4:56 p.m. UTC | #5
On Thu, Apr 21, 2016 at 12:04:20PM +0100, Peter Griffin wrote:

> +	if (!atomic_read(&fchan->fdev->fw_loaded)) {
> +		dev_err(fchan->fdev->dev, "%s: fdma fw not loaded\n", __func__);
> +		return NULL;
> +	}

so who is loading the fw and setting fw_loaded, it is not set in this patch?

> +	if (direction == DMA_DEV_TO_MEM) {
> +		fchan->cfg.req_ctrl &= ~REQ_CTRL_WNR;
> +		maxburst = fchan->scfg.src_maxburst;
> +		width = fchan->scfg.src_addr_width;
> +		addr = fchan->scfg.src_addr;
> +	} else if (direction == DMA_MEM_TO_DEV) {
> +		fchan->cfg.req_ctrl |= REQ_CTRL_WNR;
> +		maxburst = fchan->scfg.dst_maxburst;
> +		width = fchan->scfg.dst_addr_width;
> +		addr = fchan->scfg.dst_addr;
> +	} else {
> +		return -EINVAL;
> +	}

switch please

> +
> +	fchan->cfg.req_ctrl &= ~REQ_CTRL_OPCODE_MASK;
> +	if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
> +		fchan->cfg.req_ctrl |= REQ_CTRL_OPCODE_LD_ST1;
> +	else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
> +		fchan->cfg.req_ctrl |= REQ_CTRL_OPCODE_LD_ST2;
> +	else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
> +		fchan->cfg.req_ctrl |= REQ_CTRL_OPCODE_LD_ST4;
> +	else if (width == DMA_SLAVE_BUSWIDTH_8_BYTES)
> +		fchan->cfg.req_ctrl |= REQ_CTRL_OPCODE_LD_ST8;
> +	else
> +		return -EINVAL;

here as well

> +static void fill_hw_node(struct st_fdma_hw_node *hw_node,
> +			struct st_fdma_chan *fchan,
> +			enum dma_transfer_direction direction)
> +{
> +
> +	if (direction == DMA_MEM_TO_DEV) {
> +		hw_node->control |= NODE_CTRL_SRC_INCR;
> +		hw_node->control |= NODE_CTRL_DST_STATIC;
> +		hw_node->daddr = fchan->cfg.dev_addr;
> +	} else {
> +		hw_node->control |= NODE_CTRL_SRC_STATIC;
> +		hw_node->control |= NODE_CTRL_DST_INCR;
> +		hw_node->saddr = fchan->cfg.dev_addr;
> +	}

empty line here at other places too. The code looks very compressed and bit
harder to read overall

> +	fdesc = st_fdma_alloc_desc(fchan, sg_len);
> +	if (!fdesc) {
> +		dev_err(fchan->fdev->dev, "no memory for desc\n");
> +		return NULL;
> +	}
> +
> +	fdesc->iscyclic = false;
> +
> +	for_each_sg(sgl, sg, sg_len, i) {
> +		hw_node = fdesc->node[i].desc;
> +
> +		hw_node->next = fdesc->node[(i + 1) % sg_len].pdesc;
> +		hw_node->control = NODE_CTRL_REQ_MAP_DREQ(fchan->dreq_line);
> +
> +		fill_hw_node(hw_node, fchan, direction);
> +
> +		if (direction == DMA_MEM_TO_DEV)
> +			hw_node->saddr = sg_dma_address(sg);
> +		else
> +			hw_node->daddr = sg_dma_address(sg);
> +
> +		hw_node->nbytes = sg_dma_len(sg);
> +		hw_node->generic.length = sg_dma_len(sg);
> +	}
> +
> +	/* interrupt at end of last node */
> +	hw_node->control |= NODE_CTRL_INT_EON;
> +
> +	return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags);

bunch of this seems similar to cyclic case, can you create common setup
routine for these, anyway cyclic is a special cases of slave_sg

> +
> +	ret = dma_cookie_status(chan, cookie, txstate);
> +	if (ret == DMA_COMPLETE)
> +		return ret;
> +
> +	if (!txstate)
> +		return fchan->status;

why channel status, query is for descriptor

> +static int st_fdma_remove(struct platform_device *pdev)
> +{
> +	struct st_fdma_dev *fdev = platform_get_drvdata(pdev);
> +
> +	st_fdma_clk_disable(fdev);

and you irq is still enabled and tasklets can be scheduled!!
Peter Griffin April 27, 2016, 12:59 p.m. UTC | #6
Hi Vinod,

Thanks for reviewing.

On Tue, 26 Apr 2016, Vinod Koul wrote:

> On Thu, Apr 21, 2016 at 12:04:20PM +0100, Peter Griffin wrote:
> 
> > +	if (!atomic_read(&fchan->fdev->fw_loaded)) {
> > +		dev_err(fchan->fdev->dev, "%s: fdma fw not loaded\n", __func__);
> > +		return NULL;
> > +	}
> 
> so who is loading the fw and setting fw_loaded, it is not set in this patch?

This shouldn't be in this patch. It should have been added as part of the
"dmaengine: st_fdma: Add xp70 firmware loading mechanism" patch.

> 
> > +	if (direction == DMA_DEV_TO_MEM) {
> > +		fchan->cfg.req_ctrl &= ~REQ_CTRL_WNR;
> > +		maxburst = fchan->scfg.src_maxburst;
> > +		width = fchan->scfg.src_addr_width;
> > +		addr = fchan->scfg.src_addr;
> > +	} else if (direction == DMA_MEM_TO_DEV) {
> > +		fchan->cfg.req_ctrl |= REQ_CTRL_WNR;
> > +		maxburst = fchan->scfg.dst_maxburst;
> > +		width = fchan->scfg.dst_addr_width;
> > +		addr = fchan->scfg.dst_addr;
> > +	} else {
> > +		return -EINVAL;
> > +	}
> 
> switch please

Ok, will fix in v4
> 
> > +
> > +	fchan->cfg.req_ctrl &= ~REQ_CTRL_OPCODE_MASK;
> > +	if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
> > +		fchan->cfg.req_ctrl |= REQ_CTRL_OPCODE_LD_ST1;
> > +	else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
> > +		fchan->cfg.req_ctrl |= REQ_CTRL_OPCODE_LD_ST2;
> > +	else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
> > +		fchan->cfg.req_ctrl |= REQ_CTRL_OPCODE_LD_ST4;
> > +	else if (width == DMA_SLAVE_BUSWIDTH_8_BYTES)
> > +		fchan->cfg.req_ctrl |= REQ_CTRL_OPCODE_LD_ST8;
> > +	else
> > +		return -EINVAL;
> 
> here as well

Ok, will fix in v4.
> 
> > +static void fill_hw_node(struct st_fdma_hw_node *hw_node,
> > +			struct st_fdma_chan *fchan,
> > +			enum dma_transfer_direction direction)
> > +{
> > +
> > +	if (direction == DMA_MEM_TO_DEV) {
> > +		hw_node->control |= NODE_CTRL_SRC_INCR;
> > +		hw_node->control |= NODE_CTRL_DST_STATIC;
> > +		hw_node->daddr = fchan->cfg.dev_addr;
> > +	} else {
> > +		hw_node->control |= NODE_CTRL_SRC_STATIC;
> > +		hw_node->control |= NODE_CTRL_DST_INCR;
> > +		hw_node->saddr = fchan->cfg.dev_addr;
> > +	}
> 
> empty line here at other places too. The code looks very compressed and bit
> harder to read overall

Ok, will fix in v4.

> 
> > +	fdesc = st_fdma_alloc_desc(fchan, sg_len);
> > +	if (!fdesc) {
> > +		dev_err(fchan->fdev->dev, "no memory for desc\n");
> > +		return NULL;
> > +	}
> > +
> > +	fdesc->iscyclic = false;
> > +
> > +	for_each_sg(sgl, sg, sg_len, i) {
> > +		hw_node = fdesc->node[i].desc;
> > +
> > +		hw_node->next = fdesc->node[(i + 1) % sg_len].pdesc;
> > +		hw_node->control = NODE_CTRL_REQ_MAP_DREQ(fchan->dreq_line);
> > +
> > +		fill_hw_node(hw_node, fchan, direction);
> > +
> > +		if (direction == DMA_MEM_TO_DEV)
> > +			hw_node->saddr = sg_dma_address(sg);
> > +		else
> > +			hw_node->daddr = sg_dma_address(sg);
> > +
> > +		hw_node->nbytes = sg_dma_len(sg);
> > +		hw_node->generic.length = sg_dma_len(sg);
> > +	}
> > +
> > +	/* interrupt at end of last node */
> > +	hw_node->control |= NODE_CTRL_INT_EON;
> > +
> > +	return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags);
> 
> bunch of this seems similar to cyclic case, can you create common setup
> routine for these, anyway cyclic is a special cases of slave_sg

In v4 I've made a st_fdma_prep_common() which abstracts out all the common
checks at the beginning of the *_prep*() functions.

In v3 fill_fw_node() is already (from one of your previous reviews)
abstracting out all the common parts from these loops. So everything
that is now left is actually differences between the two setups.

Is that Ok?

> 
> > +
> > +	ret = dma_cookie_status(chan, cookie, txstate);
> > +	if (ret == DMA_COMPLETE)
> > +		return ret;
> > +
> > +	if (!txstate)
> > +		return fchan->status;
> 
> why channel status, query is for descriptor

Ok, will fix in v4.

> 
> > +static int st_fdma_remove(struct platform_device *pdev)
> > +{
> > +	struct st_fdma_dev *fdev = platform_get_drvdata(pdev);
> > +
> > +	st_fdma_clk_disable(fdev);
> 
> and you irq is still enabled and tasklets can be scheduled!!
> 
Eeek! Very good point. Also looking at some other drivers we
should be doing a of_dma_controller_free() and
dma_async_device_unregister().

So something like this: -

  st_fdma_disable(); /*disables irqs*/
  of_dma_controller_free(pdev->dev.of_node);
  dma_async_device_unregister(&priv->slave);
  st_fdma_clk_disable(fdev);

regards,

Peter.
Vinod Koul May 2, 2016, 9:30 a.m. UTC | #7
On Wed, Apr 27, 2016 at 01:59:23PM +0100, Peter Griffin wrote:
> > bunch of this seems similar to cyclic case, can you create common setup
> > routine for these, anyway cyclic is a special cases of slave_sg
> 
> In v4 I've made a st_fdma_prep_common() which abstracts out all the common
> checks at the beginning of the *_prep*() functions.
> 
> In v3 fill_fw_node() is already (from one of your previous reviews)
> abstracting out all the common parts from these loops. So everything
> that is now left is actually differences between the two setups.
> 
> Is that Ok?

Sounds better

> > and you irq is still enabled and tasklets can be scheduled!!
> > 
> Eeek! Very good point. Also looking at some other drivers we
> should be doing a of_dma_controller_free() and
> dma_async_device_unregister().
> 
> So something like this: -
> 
>   st_fdma_disable(); /*disables irqs*/
>   of_dma_controller_free(pdev->dev.of_node);
>   dma_async_device_unregister(&priv->slave);
>   st_fdma_clk_disable(fdev);

you can call devm_free_irq() explictyly, that will syncronize irq and
free it up. Also you should kill the tasklet afterwards.
Peter Griffin May 9, 2016, 5:30 p.m. UTC | #8
Hi Vinod,

On Mon, 02 May 2016, Vinod Koul wrote:

> On Wed, Apr 27, 2016 at 01:59:23PM +0100, Peter Griffin wrote:
> > > bunch of this seems similar to cyclic case, can you create common setup
> > > routine for these, anyway cyclic is a special cases of slave_sg
> > 
> > In v4 I've made a st_fdma_prep_common() which abstracts out all the common
> > checks at the beginning of the *_prep*() functions.
> > 
> > In v3 fill_fw_node() is already (from one of your previous reviews)
> > abstracting out all the common parts from these loops. So everything
> > that is now left is actually differences between the two setups.
> > 
> > Is that Ok?
> 
> Sounds better
> 
> > > and you irq is still enabled and tasklets can be scheduled!!
> > > 
> > Eeek! Very good point. Also looking at some other drivers we
> > should be doing a of_dma_controller_free() and
> > dma_async_device_unregister().
> > 
> > So something like this: -
> > 
> >   st_fdma_disable(); /*disables irqs*/
> >   of_dma_controller_free(pdev->dev.of_node);
> >   dma_async_device_unregister(&priv->slave);
> >   st_fdma_clk_disable(fdev);
> 
> you can call devm_free_irq() explictyly, that will syncronize irq and
> free it up.

Ok will do/

> Also you should kill the tasklet afterwards.
> 
We aren't using a tasklet in this driver.

Peter.
diff mbox

Patch

diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index d96d87c..5910c4f 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -527,6 +527,18 @@  config ZX_DMA
 	help
 	  Support the DMA engine for ZTE ZX296702 platform devices.
 
+config ST_FDMA
+	tristate "ST FDMA dmaengine support"
+	depends on ARCH_STI
+	select DMA_ENGINE
+	select FW_LOADER
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Enable support for ST FDMA controller.
+	  It supports 16 independent DMA channels, accepts up to 32 DMA requests
+
+	  Say Y here if you have such a chipset.
+	  If unsure, say N.
 
 # driver files
 source "drivers/dma/bestcomm/Kconfig"
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 6084127..b81ca99 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -65,6 +65,7 @@  obj-$(CONFIG_TI_DMA_CROSSBAR) += ti-dma-crossbar.o
 obj-$(CONFIG_TI_EDMA) += edma.o
 obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
 obj-$(CONFIG_ZX_DMA) += zx296702_dma.o
+obj-$(CONFIG_ST_FDMA) += st_fdma.o
 
 obj-y += qcom/
 obj-y += xilinx/
diff --git a/drivers/dma/st_fdma.c b/drivers/dma/st_fdma.c
new file mode 100644
index 0000000..9bf0100
--- /dev/null
+++ b/drivers/dma/st_fdma.c
@@ -0,0 +1,967 @@ 
+/*
+ * st_fdma.c
+ *
+ * Copyright (C) 2014 STMicroelectronics
+ * Author: Ludovic Barre <Ludovic.barre@st.com>
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/firmware.h>
+#include <linux/elf.h>
+#include <linux/atomic.h>
+
+#include "st_fdma.h"
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+static char *fdma_clk_name[CLK_MAX_NUM] = {
+	[CLK_SLIM]	= "fdma_slim",
+	[CLK_HI]	= "fdma_hi",
+	[CLK_LOW]	= "fdma_low",
+	[CLK_IC]	= "fdma_ic",
+};
+
+static int st_fdma_clk_get(struct st_fdma_dev *fdev)
+{
+	int i;
+
+	for (i = 0; i < CLK_MAX_NUM; i++) {
+		fdev->clks[i] = devm_clk_get(fdev->dev, fdma_clk_name[i]);
+		if (IS_ERR(fdev->clks[i])) {
+			dev_err(fdev->dev,
+				"failed to get clock: %s\n", fdma_clk_name[i]);
+			return PTR_ERR(fdev->clks[i]);
+		}
+	}
+
+	if (i != CLK_MAX_NUM) {
+		dev_err(fdev->dev, "all clocks are not defined\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int st_fdma_clk_enable(struct st_fdma_dev *fdev)
+{
+	int i, ret;
+
+	for (i = 0; i < CLK_MAX_NUM; i++) {
+		ret = clk_prepare_enable(fdev->clks[i]);
+		if (ret < 0)
+			return ret;
+	}
+
+	return 0;
+}
+
+static void st_fdma_clk_disable(struct st_fdma_dev *fdev)
+{
+	int i;
+
+	for (i = 0; i < CLK_MAX_NUM; i++)
+		clk_disable_unprepare(fdev->clks[i]);
+}
+
+static inline struct st_fdma_chan *to_st_fdma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct st_fdma_chan, vchan.chan);
+}
+
+static struct st_fdma_desc *to_st_fdma_desc(struct virt_dma_desc *vd)
+{
+	return container_of(vd, struct st_fdma_desc, vdesc);
+}
+
+static void st_fdma_enable(struct st_fdma_dev *fdev)
+{
+	unsigned long hw_id, hw_ver, fw_rev;
+	u32 val;
+
+	/* disable CPU pipeline clock & reset cpu pipeline */
+	val = FDMA_CLK_GATE_DIS | FDMA_CLK_GATE_RESET;
+	fdma_write(fdev, val, CLK_GATE);
+	/* disable SLIM core STBus sync */
+	fdma_write(fdev, FDMA_STBUS_SYNC_DIS, STBUS_SYNC);
+	/* enable cpu pipeline clock */
+	fdma_write(fdev, !FDMA_CLK_GATE_DIS, CLK_GATE);
+	/* clear int & cmd mailbox */
+	fdma_write(fdev, ~0UL, INT_CLR);
+	fdma_write(fdev, ~0UL, CMD_CLR);
+	/* enable all channels cmd & int */
+	fdma_write(fdev, ~0UL, INT_MASK);
+	fdma_write(fdev, ~0UL, CMD_MASK);
+	/* enable cpu */
+	writel(FDMA_EN_RUN, fdev->io_base + FDMA_EN_OFST);
+
+	hw_id = fdma_read(fdev, ID);
+	hw_ver = fdma_read(fdev, VER);
+	fw_rev = fdma_read(fdev, REV_ID);
+
+	dev_info(fdev->dev, "fw rev:%ld.%ld on SLIM %ld.%ld\n",
+		 FDMA_REV_ID_MAJ(fw_rev), FDMA_REV_ID_MIN(fw_rev),
+		 hw_id, hw_ver);
+}
+
+static int st_fdma_disable(struct st_fdma_dev *fdev)
+{
+	/* mask all (cmd & int) channels */
+	fdma_write(fdev, 0UL, INT_MASK);
+	fdma_write(fdev, 0UL, CMD_MASK);
+	/* disable cpu pipeline clock */
+	fdma_write(fdev, FDMA_CLK_GATE_DIS, CLK_GATE);
+	writel(!FDMA_EN_RUN, fdev->io_base + FDMA_EN_OFST);
+
+	return readl(fdev->io_base + FDMA_EN_OFST);
+}
+
+static int st_fdma_dreq_get(struct st_fdma_chan *fchan)
+{
+	struct st_fdma_dev *fdev = fchan->fdev;
+	u32 req_line_cfg = fchan->cfg.req_line;
+	u32 dreq_line;
+	int try = 0;
+
+	/*
+	 * dreq_mask is shared for n channels of fdma, so all accesses must be
+	 * atomic. if the dreq_mask it change between ffz ant set_bit,
+	 * we retry
+	 */
+	do {
+		if (fdev->dreq_mask == ~0L) {
+			dev_err(fdev->dev, "No req lines available\n");
+			return -EINVAL;
+		}
+
+		if (try || req_line_cfg >= ST_FDMA_NR_DREQS) {
+			dev_err(fdev->dev, "Invalid or used req line\n");
+			return -EINVAL;
+		} else {
+			dreq_line = req_line_cfg;
+		}
+
+		try++;
+	} while (test_and_set_bit(dreq_line, &fdev->dreq_mask));
+
+	dev_dbg(fdev->dev, "get dreq_line:%d mask:%#lx\n",
+		dreq_line, fdev->dreq_mask);
+
+	return dreq_line;
+}
+
+static void st_fdma_dreq_put(struct st_fdma_chan *fchan)
+{
+	struct st_fdma_dev *fdev = fchan->fdev;
+
+	dev_dbg(fdev->dev, "put dreq_line:%#x\n", fchan->dreq_line);
+	clear_bit(fchan->dreq_line, &fdev->dreq_mask);
+}
+
+static void st_fdma_xfer_desc(struct st_fdma_chan *fchan)
+{
+	struct virt_dma_desc *vdesc;
+	unsigned long nbytes, ch_cmd, cmd;
+
+	vdesc = vchan_next_desc(&fchan->vchan);
+	if (!vdesc)
+		return;
+
+	fchan->fdesc = to_st_fdma_desc(vdesc);
+	nbytes = fchan->fdesc->node[0].desc->nbytes;
+	cmd = FDMA_CMD_START(fchan->vchan.chan.chan_id);
+	ch_cmd = fchan->fdesc->node[0].pdesc | FDMA_CH_CMD_STA_START;
+
+	/* start the channel for the descriptor */
+	fnode_write(fchan, nbytes, CNTN);
+	fchan_write(fchan, ch_cmd, CH_CMD);
+	writel(cmd, fchan->fdev->io_base + FDMA_CMD_SET_OFST);
+
+	dev_dbg(fchan->fdev->dev, "start chan:%d\n", fchan->vchan.chan.chan_id);
+}
+
+static void st_fdma_ch_sta_update(struct st_fdma_chan *fchan,
+				  unsigned long int_sta)
+{
+	unsigned long ch_sta, ch_err;
+	int ch_id = fchan->vchan.chan.chan_id;
+	struct st_fdma_dev *fdev = fchan->fdev;
+
+	ch_sta = fchan_read(fchan, CH_CMD);
+	ch_err = ch_sta & FDMA_CH_CMD_ERR_MASK;
+	ch_sta &= FDMA_CH_CMD_STA_MASK;
+
+	if (int_sta & FDMA_INT_STA_ERR) {
+		dev_warn(fdev->dev, "chan:%d, error:%ld\n", ch_id, ch_err);
+		fchan->status = DMA_ERROR;
+		return;
+	}
+
+	switch (ch_sta) {
+	case FDMA_CH_CMD_STA_PAUSED:
+		fchan->status = DMA_PAUSED;
+		break;
+	case FDMA_CH_CMD_STA_RUNNING:
+		fchan->status = DMA_IN_PROGRESS;
+		break;
+	}
+}
+
+static irqreturn_t st_fdma_irq_handler(int irq, void *dev_id)
+{
+	struct st_fdma_dev *fdev = dev_id;
+	irqreturn_t ret = IRQ_NONE;
+	struct st_fdma_chan *fchan = &fdev->chans[0];
+	unsigned long int_sta, clr;
+
+	int_sta = fdma_read(fdev, INT_STA);
+	clr = int_sta;
+
+	for (; int_sta != 0 ; int_sta >>= 2, fchan++) {
+		if (!(int_sta & (FDMA_INT_STA_CH | FDMA_INT_STA_ERR)))
+			continue;
+
+		spin_lock(&fchan->vchan.lock);
+		st_fdma_ch_sta_update(fchan, int_sta);
+
+		if (fchan->fdesc) {
+			if (!fchan->fdesc->iscyclic) {
+				list_del(&fchan->fdesc->vdesc.node);
+				vchan_cookie_complete(&fchan->fdesc->vdesc);
+				fchan->fdesc = NULL;
+				fchan->status = DMA_COMPLETE;
+			} else {
+				vchan_cyclic_callback(&fchan->fdesc->vdesc);
+			}
+
+			/* Start the next descriptor (if available) */
+			if (!fchan->fdesc)
+				st_fdma_xfer_desc(fchan);
+		}
+
+		spin_unlock(&fchan->vchan.lock);
+		ret = IRQ_HANDLED;
+	}
+
+	fdma_write(fdev, clr, INT_CLR);
+
+	return ret;
+}
+
+static struct dma_chan *st_fdma_of_xlate(struct of_phandle_args *dma_spec,
+					 struct of_dma *ofdma)
+{
+	struct st_fdma_dev *fdev = ofdma->of_dma_data;
+	struct st_fdma_cfg cfg;
+
+	if (dma_spec->args_count < 1)
+		return NULL;
+
+	cfg.of_node = dma_spec->np;
+	cfg.req_line = dma_spec->args[0];
+	cfg.req_ctrl = 0;
+	cfg.type = ST_FDMA_TYPE_FREE_RUN;
+
+	if (dma_spec->args_count > 1)
+		cfg.req_ctrl = dma_spec->args[1] & REQ_CTRL_CFG_MASK;
+
+	if (dma_spec->args_count > 2)
+		cfg.type = dma_spec->args[2];
+
+	dev_dbg(fdev->dev, "xlate req_line:%d type:%d req_ctrl:%#lx\n",
+		cfg.req_line, cfg.type, cfg.req_ctrl);
+
+	return dma_request_channel(fdev->dma_device.cap_mask,
+			st_fdma_filter_fn, &cfg);
+}
+
+static void st_fdma_free_desc(struct virt_dma_desc *vdesc)
+{
+	struct st_fdma_desc *fdesc;
+	int i;
+
+	fdesc = to_st_fdma_desc(vdesc);
+	for (i = 0; i < fdesc->n_nodes; i++)
+			dma_pool_free(fdesc->fchan->node_pool,
+				      fdesc->node[i].desc,
+				      fdesc->node[i].pdesc);
+	kfree(fdesc);
+}
+
+static struct st_fdma_desc *st_fdma_alloc_desc(struct st_fdma_chan *fchan,
+					       int sg_len)
+{
+	struct st_fdma_desc *fdesc;
+	int i;
+
+	fdesc = kzalloc(sizeof(*fdesc) +
+			sizeof(struct st_fdma_sw_node) * sg_len, GFP_NOWAIT);
+	if (!fdesc)
+		return NULL;
+
+	fdesc->fchan = fchan;
+	fdesc->n_nodes = sg_len;
+	for (i = 0; i < sg_len; i++) {
+		fdesc->node[i].desc = dma_pool_alloc(fchan->node_pool,
+				GFP_NOWAIT, &fdesc->node[i].pdesc);
+		if (!fdesc->node[i].desc)
+			goto err;
+	}
+	return fdesc;
+
+err:
+	while (--i >= 0)
+		dma_pool_free(fchan->node_pool, fdesc->node[i].desc,
+			      fdesc->node[i].pdesc);
+	kfree(fdesc);
+	return NULL;
+}
+
+static int st_fdma_alloc_chan_res(struct dma_chan *chan)
+{
+	struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
+
+	if (fchan->cfg.type == ST_FDMA_TYPE_FREE_RUN) {
+		fchan->dreq_line = 0;
+	} else {
+		fchan->dreq_line = st_fdma_dreq_get(fchan);
+		if (IS_ERR_VALUE(fchan->dreq_line))
+			return -EINVAL;
+	}
+
+	/* Create the dma pool for descriptor allocation */
+	fchan->node_pool = dmam_pool_create(dev_name(&chan->dev->device),
+					    fchan->fdev->dev,
+					    sizeof(struct st_fdma_hw_node),
+					    __alignof__(struct st_fdma_hw_node),
+					    0);
+
+	if (!fchan->node_pool) {
+		dev_err(fchan->fdev->dev, "unable to allocate desc pool\n");
+		return -ENOMEM;
+	}
+
+	dev_dbg(fchan->fdev->dev, "alloc ch_id:%d type:%d\n",
+		fchan->vchan.chan.chan_id, fchan->cfg.type);
+
+	return 0;
+}
+
+static void st_fdma_free_chan_res(struct dma_chan *chan)
+{
+	struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
+	unsigned long flags;
+	LIST_HEAD(head);
+
+	dev_dbg(fchan->fdev->dev, "freeing chan:%d\n",
+		fchan->vchan.chan.chan_id);
+
+	if (fchan->cfg.type != ST_FDMA_TYPE_FREE_RUN)
+		st_fdma_dreq_put(fchan);
+
+	spin_lock_irqsave(&fchan->vchan.lock, flags);
+	fchan->fdesc = NULL;
+	vchan_get_all_descriptors(&fchan->vchan, &head);
+	spin_unlock_irqrestore(&fchan->vchan.lock, flags);
+
+	dma_pool_destroy(fchan->node_pool);
+	fchan->node_pool = NULL;
+	memset(&fchan->cfg, 0, sizeof(struct st_fdma_cfg));
+}
+
+static struct dma_async_tx_descriptor *st_fdma_prep_dma_memcpy(
+	struct dma_chan *chan,	dma_addr_t dst, dma_addr_t src,
+	size_t len, unsigned long flags)
+{
+	struct st_fdma_chan *fchan;
+	struct st_fdma_desc *fdesc;
+	struct st_fdma_hw_node *hw_node;
+
+	if (!len)
+		return NULL;
+
+	fchan = to_st_fdma_chan(chan);
+
+	if (!atomic_read(&fchan->fdev->fw_loaded)) {
+		dev_err(fchan->fdev->dev, "%s: fdma fw not loaded\n", __func__);
+		return NULL;
+	}
+
+	/* We only require a single descriptor */
+	fdesc = st_fdma_alloc_desc(fchan, 1);
+	if (!fdesc) {
+		dev_err(fchan->fdev->dev, "no memory for desc\n");
+		return NULL;
+	}
+
+	hw_node = fdesc->node[0].desc;
+	hw_node->next = 0;
+	hw_node->control = NODE_CTRL_REQ_MAP_FREE_RUN;
+	hw_node->control |= NODE_CTRL_SRC_INCR;
+	hw_node->control |= NODE_CTRL_DST_INCR;
+	hw_node->control |= NODE_CTRL_INT_EON;
+	hw_node->nbytes = len;
+	hw_node->saddr = src;
+	hw_node->daddr = dst;
+	hw_node->generic.length = len;
+	hw_node->generic.sstride = 0;
+	hw_node->generic.dstride = 0;
+
+	return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags);
+}
+
+static int config_reqctrl(struct st_fdma_chan *fchan,
+			  enum dma_transfer_direction direction)
+{
+	u32 maxburst = 0, addr = 0;
+	enum dma_slave_buswidth width;
+	int ch_id = fchan->vchan.chan.chan_id;
+	struct st_fdma_dev *fdev = fchan->fdev;
+
+	if (direction == DMA_DEV_TO_MEM) {
+		fchan->cfg.req_ctrl &= ~REQ_CTRL_WNR;
+		maxburst = fchan->scfg.src_maxburst;
+		width = fchan->scfg.src_addr_width;
+		addr = fchan->scfg.src_addr;
+	} else if (direction == DMA_MEM_TO_DEV) {
+		fchan->cfg.req_ctrl |= REQ_CTRL_WNR;
+		maxburst = fchan->scfg.dst_maxburst;
+		width = fchan->scfg.dst_addr_width;
+		addr = fchan->scfg.dst_addr;
+	} else {
+		return -EINVAL;
+	}
+
+	fchan->cfg.req_ctrl &= ~REQ_CTRL_OPCODE_MASK;
+	if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
+		fchan->cfg.req_ctrl |= REQ_CTRL_OPCODE_LD_ST1;
+	else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
+		fchan->cfg.req_ctrl |= REQ_CTRL_OPCODE_LD_ST2;
+	else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
+		fchan->cfg.req_ctrl |= REQ_CTRL_OPCODE_LD_ST4;
+	else if (width == DMA_SLAVE_BUSWIDTH_8_BYTES)
+		fchan->cfg.req_ctrl |= REQ_CTRL_OPCODE_LD_ST8;
+	else
+		return -EINVAL;
+
+	fchan->cfg.req_ctrl &= ~REQ_CTRL_NUM_OPS_MASK;
+	fchan->cfg.req_ctrl |= REQ_CTRL_NUM_OPS(maxburst-1);
+	dreq_write(fchan, fchan->cfg.req_ctrl, REQ_CTRL);
+
+	fchan->cfg.dev_addr = addr;
+	fchan->cfg.dir = direction;
+
+	dev_dbg(fdev->dev, "chan:%d config_reqctrl:%#x req_ctrl:%#lx\n",
+		ch_id, addr, fchan->cfg.req_ctrl);
+
+	return 0;
+}
+
+static void fill_hw_node(struct st_fdma_hw_node *hw_node,
+			struct st_fdma_chan *fchan,
+			enum dma_transfer_direction direction)
+{
+
+	if (direction == DMA_MEM_TO_DEV) {
+		hw_node->control |= NODE_CTRL_SRC_INCR;
+		hw_node->control |= NODE_CTRL_DST_STATIC;
+		hw_node->daddr = fchan->cfg.dev_addr;
+	} else {
+		hw_node->control |= NODE_CTRL_SRC_STATIC;
+		hw_node->control |= NODE_CTRL_DST_INCR;
+		hw_node->saddr = fchan->cfg.dev_addr;
+	}
+	hw_node->generic.sstride = 0;
+	hw_node->generic.dstride = 0;
+}
+
+static struct dma_async_tx_descriptor *st_fdma_prep_dma_cyclic(
+		struct dma_chan *chan, dma_addr_t buf_addr, size_t len,
+		size_t period_len, enum dma_transfer_direction direction,
+		unsigned long flags)
+{
+	struct st_fdma_chan *fchan;
+	struct st_fdma_desc *fdesc;
+	int sg_len, i;
+
+	if (!chan || !len || !period_len)
+		return NULL;
+
+	fchan = to_st_fdma_chan(chan);
+
+	if (!atomic_read(&fchan->fdev->fw_loaded)) {
+		dev_err(fchan->fdev->dev, "%s: fdma fw not loaded\n", __func__);
+		return NULL;
+	}
+
+	if (!is_slave_direction(direction)) {
+		dev_err(fchan->fdev->dev, "bad direction?\n");
+		return NULL;
+	}
+
+	if (config_reqctrl(fchan, direction)) {
+		dev_err(fchan->fdev->dev, "bad width or direction\n");
+		return NULL;
+	}
+
+	/* the buffer length must be a multiple of period_len */
+	if (len % period_len != 0) {
+		dev_err(fchan->fdev->dev, "len is not multiple of period\n");
+		return NULL;
+	}
+
+	sg_len = len / period_len;
+	fdesc = st_fdma_alloc_desc(fchan, sg_len);
+	if (!fdesc) {
+		dev_err(fchan->fdev->dev, "no memory for desc\n");
+		return NULL;
+	}
+
+	fdesc->iscyclic = true;
+
+	for (i = 0; i < sg_len; i++) {
+		struct st_fdma_hw_node *hw_node = fdesc->node[i].desc;
+
+		hw_node->next = fdesc->node[(i + 1) % sg_len].pdesc;
+
+		hw_node->control = NODE_CTRL_REQ_MAP_DREQ(fchan->dreq_line);
+		hw_node->control |= NODE_CTRL_INT_EON;
+
+
+		fill_hw_node(hw_node, fchan, direction);
+
+		if (direction == DMA_MEM_TO_DEV)
+			hw_node->saddr = buf_addr + (i * period_len);
+		else
+			hw_node->daddr = buf_addr + (i * period_len);
+
+		hw_node->nbytes = period_len;
+		hw_node->generic.length = period_len;
+	}
+
+	return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags);
+}
+
+static struct dma_async_tx_descriptor *st_fdma_prep_slave_sg(
+		struct dma_chan *chan, struct scatterlist *sgl,
+		unsigned int sg_len, enum dma_transfer_direction direction,
+		unsigned long flags, void *context)
+{
+	struct st_fdma_chan *fchan;
+	struct st_fdma_desc *fdesc;
+	struct st_fdma_hw_node *hw_node;
+	struct scatterlist *sg;
+	int i;
+
+	if (!chan || !sgl || !sg_len)
+		return NULL;
+
+	fchan = to_st_fdma_chan(chan);
+
+	if (!atomic_read(&fchan->fdev->fw_loaded)) {
+		dev_err(fchan->fdev->dev, "%s: fdma fw not loaded\n", __func__);
+		return NULL;
+	}
+
+	if (!is_slave_direction(direction)) {
+		dev_err(fchan->fdev->dev, "bad direction?\n");
+		return NULL;
+	}
+
+	fdesc = st_fdma_alloc_desc(fchan, sg_len);
+	if (!fdesc) {
+		dev_err(fchan->fdev->dev, "no memory for desc\n");
+		return NULL;
+	}
+
+	fdesc->iscyclic = false;
+
+	for_each_sg(sgl, sg, sg_len, i) {
+		hw_node = fdesc->node[i].desc;
+
+		hw_node->next = fdesc->node[(i + 1) % sg_len].pdesc;
+		hw_node->control = NODE_CTRL_REQ_MAP_DREQ(fchan->dreq_line);
+
+		fill_hw_node(hw_node, fchan, direction);
+
+		if (direction == DMA_MEM_TO_DEV)
+			hw_node->saddr = sg_dma_address(sg);
+		else
+			hw_node->daddr = sg_dma_address(sg);
+
+		hw_node->nbytes = sg_dma_len(sg);
+		hw_node->generic.length = sg_dma_len(sg);
+	}
+
+	/* interrupt at end of last node */
+	hw_node->control |= NODE_CTRL_INT_EON;
+
+	return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags);
+}
+
+static size_t st_fdma_desc_residue(struct st_fdma_chan *fchan,
+				   struct virt_dma_desc *vdesc,
+				   bool in_progress)
+{
+	struct st_fdma_desc *fdesc = fchan->fdesc;
+	size_t residue = 0;
+	dma_addr_t cur_addr = 0;
+	int i;
+
+	if (in_progress) {
+		cur_addr = fchan_read(fchan, CH_CMD);
+		cur_addr &= FDMA_CH_CMD_DATA_MASK;
+	}
+
+	for (i = fchan->fdesc->n_nodes - 1 ; i >= 0; i--) {
+		if (cur_addr == fdesc->node[i].pdesc) {
+			residue += fnode_read(fchan, CNTN);
+			break;
+		}
+		residue += fdesc->node[i].desc->nbytes;
+	}
+
+	return residue;
+}
+
+static enum dma_status st_fdma_tx_status(struct dma_chan *chan,
+					 dma_cookie_t cookie,
+					 struct dma_tx_state *txstate)
+{
+	struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
+	struct virt_dma_desc *vd;
+	enum dma_status ret;
+	unsigned long flags;
+
+	ret = dma_cookie_status(chan, cookie, txstate);
+	if (ret == DMA_COMPLETE)
+		return ret;
+
+	if (!txstate)
+		return fchan->status;
+
+	spin_lock_irqsave(&fchan->vchan.lock, flags);
+	vd = vchan_find_desc(&fchan->vchan, cookie);
+	if (fchan->fdesc && cookie == fchan->fdesc->vdesc.tx.cookie)
+		txstate->residue = st_fdma_desc_residue(fchan, vd, true);
+	else if (vd)
+		txstate->residue = st_fdma_desc_residue(fchan, vd, false);
+	else
+		txstate->residue = 0;
+
+	spin_unlock_irqrestore(&fchan->vchan.lock, flags);
+
+	return fchan->status;
+}
+
+static void st_fdma_issue_pending(struct dma_chan *chan)
+{
+	struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&fchan->vchan.lock, flags);
+
+	if (vchan_issue_pending(&fchan->vchan) && !fchan->fdesc)
+		st_fdma_xfer_desc(fchan);
+
+	spin_unlock_irqrestore(&fchan->vchan.lock, flags);
+}
+
+static int st_fdma_pause(struct dma_chan *chan)
+{
+	unsigned long flags;
+	LIST_HEAD(head);
+	struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
+	int ch_id = fchan->vchan.chan.chan_id;
+	unsigned long cmd = FDMA_CMD_PAUSE(ch_id);
+
+	dev_dbg(fchan->fdev->dev, "pause chan:%d\n", ch_id);
+
+	spin_lock_irqsave(&fchan->vchan.lock, flags);
+	if (fchan->fdesc)
+		fdma_write(fchan->fdev, cmd, CMD_SET);
+	spin_unlock_irqrestore(&fchan->vchan.lock, flags);
+
+	return 0;
+}
+
+static int st_fdma_resume(struct dma_chan *chan)
+{
+	unsigned long flags;
+	unsigned long val;
+	struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
+	int ch_id = fchan->vchan.chan.chan_id;
+
+	dev_dbg(fchan->fdev->dev, "resume chan:%d\n", ch_id);
+
+	spin_lock_irqsave(&fchan->vchan.lock, flags);
+	if (fchan->fdesc) {
+		val = fchan_read(fchan, CH_CMD);
+		val &= FDMA_CH_CMD_DATA_MASK;
+		fchan_write(fchan, val, CH_CMD);
+	}
+	spin_unlock_irqrestore(&fchan->vchan.lock, flags);
+
+	return 0;
+}
+
+static int st_fdma_terminate_all(struct dma_chan *chan)
+{
+	unsigned long flags;
+	LIST_HEAD(head);
+	struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
+	int ch_id = fchan->vchan.chan.chan_id;
+	unsigned long cmd = FDMA_CMD_PAUSE(ch_id);
+
+	dev_dbg(fchan->fdev->dev, "terminate chan:%d\n", ch_id);
+
+	spin_lock_irqsave(&fchan->vchan.lock, flags);
+	fdma_write(fchan->fdev, cmd, CMD_SET);
+	fchan->fdesc = NULL;
+	vchan_get_all_descriptors(&fchan->vchan, &head);
+	spin_unlock_irqrestore(&fchan->vchan.lock, flags);
+	vchan_dma_desc_free_list(&fchan->vchan, &head);
+
+	return 0;
+}
+
+static int st_fdma_slave_config(struct dma_chan *chan,
+				struct dma_slave_config *slave_cfg)
+{
+	struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
+	memcpy(&fchan->scfg, slave_cfg, sizeof(fchan->scfg));
+	return 0;
+}
+
+static const struct st_fdma_ram fdma_mpe31_mem[] = {
+	{ .name = "dmem", .offset = 0x10000, .size = 0x3000 },
+	{ .name = "imem", .offset = 0x18000, .size = 0x8000 },
+};
+
+static const struct st_fdma_driverdata fdma_mpe31_stih407_11 = {
+	.fdma_mem = fdma_mpe31_mem,
+	.num_mem = ARRAY_SIZE(fdma_mpe31_mem),
+	.name = "STiH407",
+	.id = 0,
+};
+
+static const struct st_fdma_driverdata fdma_mpe31_stih407_12 = {
+	.fdma_mem = fdma_mpe31_mem,
+	.num_mem = ARRAY_SIZE(fdma_mpe31_mem),
+	.name = "STiH407",
+	.id = 1,
+};
+
+static const struct st_fdma_driverdata fdma_mpe31_stih407_13 = {
+	.fdma_mem = fdma_mpe31_mem,
+	.num_mem = ARRAY_SIZE(fdma_mpe31_mem),
+	.name = "STiH407",
+	.id = 2,
+};
+
+static const struct of_device_id st_fdma_match[] = {
+	{ .compatible = "st,stih407-fdma-mpe31-11"
+	  , .data = &fdma_mpe31_stih407_11 },
+	{ .compatible = "st,stih407-fdma-mpe31-12"
+	  , .data = &fdma_mpe31_stih407_12 },
+	{ .compatible = "st,stih407-fdma-mpe31-13"
+	  , .data = &fdma_mpe31_stih407_13 },
+	{},
+};
+MODULE_DEVICE_TABLE(of, st_fdma_match);
+
+static int st_fdma_parse_dt(struct platform_device *pdev,
+			const struct st_fdma_driverdata *drvdata,
+			struct st_fdma_dev *fdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	int ret;
+
+	if (!np)
+		goto err;
+
+	ret = of_property_read_u32(np, "dma-channels", &fdev->nr_channels);
+	if (ret)
+		goto err;
+
+	snprintf(fdev->fw_name, FW_NAME_SIZE, "fdma_%s_%d.elf",
+		drvdata->name, drvdata->id);
+
+err:
+	return ret;
+}
+#define FDMA_DMA_BUSWIDTHS	(BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+				 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+				 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
+				 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
+
+static int st_fdma_probe(struct platform_device *pdev)
+{
+	struct st_fdma_dev *fdev;
+	const struct of_device_id *match;
+	struct device_node *np = pdev->dev.of_node;
+	const struct st_fdma_driverdata *drvdata;
+	int irq, ret, i;
+
+	match = of_match_device((st_fdma_match), &pdev->dev);
+	if (!match || !match->data) {
+		dev_err(&pdev->dev, "No device match found\n");
+		return -ENODEV;
+	}
+
+	drvdata = match->data;
+
+	fdev = devm_kzalloc(&pdev->dev, sizeof(*fdev), GFP_KERNEL);
+	if (!fdev)
+		return -ENOMEM;
+
+	ret = st_fdma_parse_dt(pdev, drvdata, fdev);
+	if (ret) {
+		dev_err(&pdev->dev, "unable to find platform data\n");
+		goto err;
+	}
+
+	fdev->chans = devm_kzalloc(&pdev->dev,
+				   fdev->nr_channels
+				   * sizeof(struct st_fdma_chan), GFP_KERNEL);
+	if (!fdev->chans)
+		return -ENOMEM;
+
+	fdev->dev = &pdev->dev;
+	fdev->drvdata = drvdata;
+	platform_set_drvdata(pdev, fdev);
+
+	fdev->io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	fdev->io_base = devm_ioremap_resource(&pdev->dev, fdev->io_res);
+	if (IS_ERR(fdev->io_base))
+		return PTR_ERR(fdev->io_base);
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "Failed to get irq resource\n");
+		return -EINVAL;
+	}
+
+	ret = devm_request_irq(&pdev->dev, irq, st_fdma_irq_handler, 0,
+			       dev_name(&pdev->dev), fdev);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to request irq\n");
+		goto err;
+	}
+
+	ret = st_fdma_clk_get(fdev);
+	if (ret)
+		goto err;
+
+	ret = st_fdma_clk_enable(fdev);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to enable clocks\n");
+		goto err_clk;
+	}
+
+	/* Initialise list of FDMA channels */
+	INIT_LIST_HEAD(&fdev->dma_device.channels);
+	for (i = 0; i < fdev->nr_channels; i++) {
+		struct st_fdma_chan *fchan = &fdev->chans[i];
+
+		fchan->fdev = fdev;
+		fchan->vchan.desc_free = st_fdma_free_desc;
+		vchan_init(&fchan->vchan, &fdev->dma_device);
+	}
+
+	/* Initialise the FDMA dreq (reserve 0 & 31 for FDMA use) */
+	fdev->dreq_mask = BIT(0) | BIT(31);
+
+	dma_cap_set(DMA_SLAVE, fdev->dma_device.cap_mask);
+	dma_cap_set(DMA_CYCLIC, fdev->dma_device.cap_mask);
+	dma_cap_set(DMA_MEMCPY, fdev->dma_device.cap_mask);
+
+	fdev->dma_device.dev = &pdev->dev;
+	fdev->dma_device.device_alloc_chan_resources = st_fdma_alloc_chan_res;
+	fdev->dma_device.device_free_chan_resources = st_fdma_free_chan_res;
+	fdev->dma_device.device_prep_dma_cyclic	= st_fdma_prep_dma_cyclic;
+	fdev->dma_device.device_prep_slave_sg = st_fdma_prep_slave_sg;
+	fdev->dma_device.device_prep_dma_memcpy = st_fdma_prep_dma_memcpy;
+	fdev->dma_device.device_tx_status = st_fdma_tx_status;
+	fdev->dma_device.device_issue_pending = st_fdma_issue_pending;
+	fdev->dma_device.device_terminate_all = st_fdma_terminate_all;
+	fdev->dma_device.device_config = st_fdma_slave_config;
+	fdev->dma_device.device_pause = st_fdma_pause;
+	fdev->dma_device.device_resume = st_fdma_resume;
+
+	fdev->dma_device.src_addr_widths = FDMA_DMA_BUSWIDTHS;
+	fdev->dma_device.dst_addr_widths = FDMA_DMA_BUSWIDTHS;
+	fdev->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+	fdev->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+
+	ret = dma_async_device_register(&fdev->dma_device);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to register DMA device\n");
+		goto err_clk;
+	}
+
+	ret = of_dma_controller_register(np, st_fdma_of_xlate, fdev);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to register controller\n");
+		goto err_dma_dev;
+	}
+
+	dev_info(&pdev->dev, "ST FDMA engine driver, irq:%d\n", irq);
+
+	return 0;
+
+err_dma_dev:
+	dma_async_device_unregister(&fdev->dma_device);
+err_clk:
+	st_fdma_clk_disable(fdev);
+err:
+	return ret;
+}
+
+static int st_fdma_remove(struct platform_device *pdev)
+{
+	struct st_fdma_dev *fdev = platform_get_drvdata(pdev);
+
+	st_fdma_clk_disable(fdev);
+
+	return 0;
+}
+
+static struct platform_driver st_fdma_platform_driver = {
+	.driver = {
+		.name = "st-fdma",
+		.of_match_table = st_fdma_match,
+	},
+	.probe = st_fdma_probe,
+	.remove = st_fdma_remove,
+};
+module_platform_driver(st_fdma_platform_driver);
+
+bool st_fdma_filter_fn(struct dma_chan *chan, void *param)
+{
+	struct st_fdma_cfg *config = param;
+	struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
+
+	if (!param)
+		return false;
+
+	if (fchan->fdev->dma_device.dev->of_node != config->of_node)
+		return false;
+
+	fchan->cfg = *config;
+
+	return true;
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("STMicroelectronics FDMA engine driver");
+MODULE_AUTHOR("Ludovic.barre <Ludovic.barre@st.com>");