diff mbox

[1/4] dma: fsl-qdma: add qDMA Command queue mode driver

Message ID 20171130074836.1381-1-wen.he_1@nxp.com (mailing list archive)
State Changes Requested
Headers show

Commit Message

Wen He Nov. 30, 2017, 7:48 a.m. UTC
The patch is used for add the qdma command queue support.
Both qDMA source data and destination data can be either
contiguous or non-contiguous using one or more scatter/gather tables.

Signed-off-by: Wen He <wen.he_1@nxp.com>
---
 drivers/dma/Kconfig    |   12 +
 drivers/dma/Makefile   |    1 +
 drivers/dma/fsl-qdma.c | 1126 ++++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 1139 insertions(+)
 create mode 100644 drivers/dma/fsl-qdma.c

Comments

Vinod Koul Dec. 11, 2017, 3:51 a.m. UTC | #1
On Thu, Nov 30, 2017 at 03:48:33PM +0800, Wen He wrote:
> The patch is used for add the qdma command queue support.
> Both qDMA source data and destination data can be either
> contiguous or non-contiguous using one or more scatter/gather tables.

The subsystem name is dmaengine so please use that. If in doubt, use git
log on that susbsystem to find the tag

> 
> Signed-off-by: Wen He <wen.he_1@nxp.com>
> ---
>  drivers/dma/Kconfig    |   12 +
>  drivers/dma/Makefile   |    1 +
>  drivers/dma/fsl-qdma.c | 1126 ++++++++++++++++++++++++++++++++++++++++++++++++
>  3 files changed, 1139 insertions(+)
>  create mode 100644 drivers/dma/fsl-qdma.c
> 
> diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
> index 27df3e2837fd..22e203c68e94 100644
> --- a/drivers/dma/Kconfig
> +++ b/drivers/dma/Kconfig
> @@ -215,6 +215,18 @@ config FSL_EDMA
>  	  multiplexing capability for DMA request sources(slot).
>  	  This module can be found on Freescale Vybrid and LS-1 SoCs.
>  
> +config FSL_QDMA
> +       tristate "Freescale qDMA engine support"
> +       select DMA_ENGINE
> +       select DMA_VIRTUAL_CHANNELS
> +       select DMA_ENGINE_RAID
> +       select ASYNC_TX_ENABLE_CHANNEL_SWITCH
> +       help
> +         Support the Freescale qDMA engine with command queue and legacy mode.
> +         Channel virtualization is supported through enqueuing of DMA jobs to,
> +         or dequeuing DMA jobs from, different work queues.
> +         This module can be found on Freescale LS SoCs.
> +
>  config FSL_RAID
>          tristate "Freescale RAID engine Support"
>          depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH
> diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
> index b9dca8a0e142..7a49b7b6124a 100644
> --- a/drivers/dma/Makefile
> +++ b/drivers/dma/Makefile
> @@ -32,6 +32,7 @@ obj-$(CONFIG_DW_DMAC_CORE) += dw/
>  obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
>  obj-$(CONFIG_FSL_DMA) += fsldma.o
>  obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
> +obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o
>  obj-$(CONFIG_FSL_RAID) += fsl_raid.o
>  obj-$(CONFIG_HSU_DMA) += hsu/
>  obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
> diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
> new file mode 100644
> index 000000000000..43ed48a73ed6
> --- /dev/null
> +++ b/drivers/dma/fsl-qdma.c
> @@ -0,0 +1,1126 @@
> +/*
> + * drivers/dma/fsl-qdma.c
> + *
> + * Copyright 2014-2015 Freescale Semiconductor, Inc.

we are in 2017

> + *
> + * Driver for the Freescale qDMA engine with software command queue mode.
> + * Channel virtualization is supported through enqueuing of DMA jobs to,
> + * or dequeuing DMA jobs from, different work queues.
> + * This module can be found on Freescale LS SoCs.
> + *
> + * This program is free software; you can redistribute  it and/or modify it
> + * under  the terms of  the GNU General  Public License as published by the
> + * Free Software Foundation;  either version 2 of the  License, or (at your
> + * option) any later version.

Please consider used SPDX tags for this

> + */
> +
> +#include <asm/cacheflush.h>
> +#include <linux/clk.h>
> +#include <linux/delay.h>
> +#include <linux/dma-mapping.h>
> +#include <linux/dmapool.h>
> +#include <linux/init.h>
> +#include <linux/interrupt.h>
> +#include <linux/module.h>
> +#include <linux/of.h>
> +#include <linux/of_address.h>
> +#include <linux/of_device.h>
> +#include <linux/of_dma.h>
> +#include <linux/of_irq.h>
> +#include <linux/slab.h>
> +#include <linux/spinlock.h>

Do you need all these?

> +
> +#include "virt-dma.h"
> +
> +#define FSL_QDMA_DMR			0x0
> +#define FSL_QDMA_DSR			0x4
> +#define FSL_QDMA_DEIER			0xe00
> +#define FSL_QDMA_DEDR			0xe04
> +#define FSL_QDMA_DECFDW0R		0xe10
> +#define FSL_QDMA_DECFDW1R		0xe14
> +#define FSL_QDMA_DECFDW2R		0xe18
> +#define FSL_QDMA_DECFDW3R		0xe1c
> +#define FSL_QDMA_DECFQIDR		0xe30
> +#define FSL_QDMA_DECBR			0xe34

BIT() or GENMASK() pls for register definations, here and other places in
the driver

> +
> +#define FSL_QDMA_BCQMR(x)		(0xc0 + 0x100 * (x))
> +#define FSL_QDMA_BCQSR(x)		(0xc4 + 0x100 * (x))
> +#define FSL_QDMA_BCQEDPA_SADDR(x)	(0xc8 + 0x100 * (x))
> +#define FSL_QDMA_BCQDPA_SADDR(x)	(0xcc + 0x100 * (x))
> +#define FSL_QDMA_BCQEEPA_SADDR(x)	(0xd0 + 0x100 * (x))
> +#define FSL_QDMA_BCQEPA_SADDR(x)	(0xd4 + 0x100 * (x))
> +#define FSL_QDMA_BCQIER(x)		(0xe0 + 0x100 * (x))
> +#define FSL_QDMA_BCQIDR(x)		(0xe4 + 0x100 * (x))
> +
> +#define FSL_QDMA_SQDPAR			0x80c
> +#define FSL_QDMA_SQEPAR			0x814
> +#define FSL_QDMA_BSQMR			0x800
> +#define FSL_QDMA_BSQSR			0x804
> +#define FSL_QDMA_BSQICR			0x828
> +#define FSL_QDMA_CQMR			0xa00
> +#define FSL_QDMA_CQDSCR1		0xa08
> +#define FSL_QDMA_CQDSCR2                0xa0c
> +#define FSL_QDMA_CQIER			0xa10
> +#define FSL_QDMA_CQEDR			0xa14
> +#define FSL_QDMA_SQCCMR			0xa20
> +
> +#define FSL_QDMA_SQICR_ICEN
> +
> +#define FSL_QDMA_CQIDR_CQT		0xff000000
> +#define FSL_QDMA_CQIDR_SQPE		0x800000
> +#define FSL_QDMA_CQIDR_SQT		0x8000
> +
> +#define FSL_QDMA_BCQIER_CQTIE		0x8000
> +#define FSL_QDMA_BCQIER_CQPEIE		0x800000
> +#define FSL_QDMA_BSQICR_ICEN		0x80000000
> +#define FSL_QDMA_BSQICR_ICST(x)		((x) << 16)
> +#define FSL_QDMA_CQIER_MEIE		0x80000000
> +#define FSL_QDMA_CQIER_TEIE		0x1
> +#define FSL_QDMA_SQCCMR_ENTER_WM	0x200000
> +
> +#define FSL_QDMA_QUEUE_MAX		8
> +
> +#define FSL_QDMA_BCQMR_EN		0x80000000
> +#define FSL_QDMA_BCQMR_EI		0x40000000
> +#define FSL_QDMA_BCQMR_CD_THLD(x)	((x) << 20)
> +#define FSL_QDMA_BCQMR_CQ_SIZE(x)	((x) << 16)
> +
> +#define FSL_QDMA_BCQSR_QF		0x10000
> +#define FSL_QDMA_BCQSR_XOFF		0x1
> +
> +#define FSL_QDMA_BSQMR_EN		0x80000000
> +#define FSL_QDMA_BSQMR_DI		0x40000000
> +#define FSL_QDMA_BSQMR_CQ_SIZE(x)	((x) << 16)
> +
> +#define FSL_QDMA_BSQSR_QE		0x20000
> +
> +#define FSL_QDMA_DMR_DQD		0x40000000
> +#define FSL_QDMA_DSR_DB			0x80000000
> +
> +#define FSL_QDMA_BASE_BUFFER_SIZE	96
> +#define FSL_QDMA_EXPECT_SG_ENTRY_NUM	16
> +#define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN	64
> +#define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX	16384
> +#define FSL_QDMA_QUEUE_NUM_MAX		8
> +
> +#define FSL_QDMA_CMD_RWTTYPE		0x4
> +#define FSL_QDMA_CMD_LWC                0x2
> +
> +#define FSL_QDMA_CMD_RWTTYPE_OFFSET	28
> +#define FSL_QDMA_CMD_NS_OFFSET		27
> +#define FSL_QDMA_CMD_DQOS_OFFSET	24
> +#define FSL_QDMA_CMD_WTHROTL_OFFSET	20
> +#define FSL_QDMA_CMD_DSEN_OFFSET	19
> +#define FSL_QDMA_CMD_LWC_OFFSET		16
> +
> +#define FSL_QDMA_E_SG_TABLE		1
> +#define FSL_QDMA_E_DATA_BUFFER		0
> +#define FSL_QDMA_F_LAST_ENTRY		1
> +
> +u64 pre_addr, pre_queue;
> +
> +/* qDMA Command Descriptor Fotmats */
> +
> +/* Compound Command Descriptor Fotmat */
> +struct fsl_qdma_ccdf {
> +	__le32 status; /* ser, status */
> +	__le32 cfg;	/* format, offset */
> +	union {
> +		struct {
> +			__le32 addr_lo;	/* low 32-bits of 40-bit address */
> +			u8 addr_hi;	/* high 8-bits of 40-bit address */
> +			u8 __reserved1[2];
> +			u8 cfg8b_w1; /* dd, queue*/
> +		} __packed;
> +		__le64 data;
> +	};
> +} __packed;
> +
> +#define QDMA_CCDF_STATUS		20
> +#define QDMA_CCDF_OFFSET		20
> +#define QDMA_CCDF_MASK			GENMASK(28, 20)
> +#define QDMA_CCDF_FOTMAT		BIT(29)
> +#define QDMA_CCDF_SER			BIT(30)
> +
> +static inline u64 qdma_ccdf_addr_get64(const struct fsl_qdma_ccdf *ccdf)
> +{
> +	return le64_to_cpu(ccdf->data) & 0xffffffffffLLU;
> +}
> +static inline u64 qdma_ccdf_get_queue(const struct fsl_qdma_ccdf *ccdf)
> +{
> +	return ccdf->cfg8b_w1 & 0xff;
> +}
> +static inline void qdma_ccdf_addr_set64(struct fsl_qdma_ccdf *ccdf, u64 addr)
> +{
> +	ccdf->addr_hi = upper_32_bits(addr);
> +	ccdf->addr_lo = cpu_to_le32(lower_32_bits(addr));
> +}
> +static inline int qdma_ccdf_get_offset(const struct fsl_qdma_ccdf *ccdf)
> +{
> +	return (le32_to_cpu(ccdf->cfg) & QDMA_CCDF_MASK) >> QDMA_CCDF_OFFSET;
> +}
> +static inline void qdma_ccdf_set_format(struct fsl_qdma_ccdf *ccdf, int offset)
> +{
> +	ccdf->cfg = cpu_to_le32(QDMA_CCDF_FOTMAT | offset);
> +}
> +static inline int qdma_ccdf_get_status(const struct fsl_qdma_ccdf *ccdf)
> +{
> +	return (le32_to_cpu(ccdf->status) & QDMA_CCDF_MASK) >> QDMA_CCDF_STATUS;
> +}
> +static inline void qdma_ccdf_set_ser(struct fsl_qdma_ccdf *ccdf, int status)
> +{
> +	ccdf->status = cpu_to_le32(QDMA_CCDF_SER | status);
> +}
> +/* qDMA Compound S/G Format */
> +struct fsl_qdma_csgf {
> +	__le32 offset; /* offset */
> +	__le32 cfg;	/* E bit, F bit, length */
> +	union {
> +		struct {
> +			__le32 addr_lo;	/* low 32-bits of 40-bit address */
> +			u8 addr_hi;	/* high 8-bits of 40-bit address */
> +			u8 __reserved1[3];
> +		};
> +		__le64 data;
> +	};

this looks terribly similar to fsl_qdma_ccdf, if you have union then why not
add this too to it?

> +} __packed;
> +
> +#define QDMA_SG_FIN			BIT(30)
> +#define QDMA_SG_EXT			BIT(31)
> +#define QDMA_SG_LEN_MASK		GENMASK(29, 0)

empty line here please

> +static inline u64 qdma_csgf_addr_get64(const struct fsl_qdma_csgf *sg)
> +{
> +	return be64_to_cpu(sg->data) & 0xffffffffffLLU;
> +}
> +static inline void qdma_csgf_addr_set64(struct fsl_qdma_csgf *sg, u64 addr)
> +{
> +	sg->addr_hi = upper_32_bits(addr);
> +	sg->addr_lo = cpu_to_le32(lower_32_bits(addr));
> +}
> +static inline void qdma_csgf_set_len(struct fsl_qdma_csgf *csgf, int len)
> +{
> +	csgf->cfg = cpu_to_le32(len & QDMA_SG_LEN_MASK);
> +}
> +static inline void qdma_csgf_set_f(struct fsl_qdma_csgf *csgf, int len)
> +{
> +	csgf->cfg = cpu_to_le32(QDMA_SG_FIN | (len & QDMA_SG_LEN_MASK));
> +}
> +static inline void qdma_csgf_set_e(struct fsl_qdma_csgf *csgf, int len)
> +{
> +	csgf->cfg = cpu_to_le32(QDMA_SG_EXT | (len & QDMA_SG_LEN_MASK));
> +}
> +
> +/* qDMA Source Descriptor Format */
> +struct fsl_qdma_sdf {
> +	__le32 rev3;
> +	__le32 cfg; /* rev4, bit[0-11] - ssd, bit[12-23] sss */
> +	__le32 rev5;
> +	__le32 cmd;
> +} __packed;
> +
> +/*qDMA Destination Descriptor Format*/
> +struct fsl_qdma_ddf {
> +	__le32 rev1;
> +	__le32 cfg; /* rev2, bit[0-11] - dsd, bit[12-23] - dss */
> +	__le32 rev3;
> +	__le32 cmd;
> +} __packed;
> +
> +struct fsl_qdma_chan {
> +	struct virt_dma_chan		vchan;
> +	struct virt_dma_desc		vdesc;
> +	enum dma_status			status;
> +	u32				slave_id;
> +	struct fsl_qdma_engine		*qdma;
> +	struct fsl_qdma_queue		*queue;
> +	struct list_head		qcomp;
> +};
> +
> +struct fsl_qdma_queue {
> +	struct fsl_qdma_ccdf	*virt_head;
> +	struct fsl_qdma_ccdf	*virt_tail;
> +	struct list_head	comp_used;
> +	struct list_head	comp_free;
> +	struct dma_pool		*comp_pool;
> +	struct dma_pool		*sg_pool;
> +	spinlock_t		queue_lock;
> +	dma_addr_t		bus_addr;
> +	u32                     n_cq;
> +	u32			id;
> +	struct fsl_qdma_ccdf	*cq;
> +};
> +
> +struct fsl_qdma_sg {
> +	dma_addr_t		bus_addr;
> +	void			*virt_addr;
> +};
> +
> +struct fsl_qdma_comp {
> +	dma_addr_t              bus_addr;
> +	void			*virt_addr;
> +	struct fsl_qdma_chan	*qchan;
> +	struct fsl_qdma_sg	*sg_block;
> +	struct virt_dma_desc    vdesc;
> +	struct list_head	list;
> +	u32			sg_block_src;
> +	u32			sg_block_dst;
> +};
> +
> +struct fsl_qdma_engine {
> +	struct dma_device	dma_dev;
> +	void __iomem		*ctrl_base;
> +	void __iomem            *status_base;
> +	void __iomem		*block_base;
> +	u32			n_chans;
> +	u32			n_queues;
> +	struct mutex            fsl_qdma_mutex;
> +	int			error_irq;
> +	int			queue_irq;
> +	bool			big_endian;
> +	struct fsl_qdma_queue	*queue;
> +	struct fsl_qdma_queue	*status;
> +	struct fsl_qdma_chan	chans[];
> +
> +};
> +
> +static u32 qdma_readl(struct fsl_qdma_engine *qdma, void __iomem *addr)
> +{
> +	if (qdma->big_endian)
> +		return ioread32be(addr);
> +	else
> +		return ioread32(addr);
> +}

IIRC there are macros which which take care of this, can you check other FSL
drivers please

> +
> +static void qdma_writel(struct fsl_qdma_engine *qdma, u32 val,
> +						void __iomem *addr)
> +{
> +	if (qdma->big_endian)
> +		iowrite32be(val, addr);
> +	else
> +		iowrite32(val, addr);
> +}
> +
> +static struct fsl_qdma_chan *to_fsl_qdma_chan(struct dma_chan *chan)
> +{
> +	return container_of(chan, struct fsl_qdma_chan, vchan.chan);
> +}
> +
> +static struct fsl_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
> +{
> +	return container_of(vd, struct fsl_qdma_comp, vdesc);
> +}
> +
> +static int fsl_qdma_alloc_chan_resources(struct dma_chan *chan)
> +{
> +	/*
> +	 * In QDMA mode, We don't need to do anything.
> +	 */
> +	return 0;
> +}

this is not mandatory, so can be removed

> +
> +static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
> +{
> +	struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
> +	unsigned long flags;
> +	LIST_HEAD(head);
> +
> +	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
> +	vchan_get_all_descriptors(&fsl_chan->vchan, &head);
> +	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
> +
> +	vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
> +}
> +
> +static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
> +					dma_addr_t dst, dma_addr_t src, u32 len)
> +{
> +	struct fsl_qdma_ccdf *ccdf;
> +	struct fsl_qdma_csgf *csgf_desc, *csgf_src, *csgf_dest;
> +	struct fsl_qdma_sdf *sdf;
> +	struct fsl_qdma_ddf *ddf;
> +
> +	ccdf = (struct fsl_qdma_ccdf *)fsl_comp->virt_addr;
> +	csgf_desc = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 1;
> +	csgf_src = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 2;
> +	csgf_dest = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 3;
> +	sdf = (struct fsl_qdma_sdf *)fsl_comp->virt_addr + 4;
> +	ddf = (struct fsl_qdma_ddf *)fsl_comp->virt_addr + 5;
> +
> +	memset(fsl_comp->virt_addr, 0, FSL_QDMA_BASE_BUFFER_SIZE);
> +	/* Head Command Descriptor(Frame Descriptor) */
> +	qdma_ccdf_addr_set64(ccdf, fsl_comp->bus_addr + 16);
> +	qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(ccdf));
> +	qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(ccdf));
> +	/* Status notification is enqueued to status queue. */
> +	/* Compound Command Descriptor(Frame List Table) */
> +	qdma_csgf_addr_set64(csgf_desc, fsl_comp->bus_addr + 64);
> +	/* It must be 32 as Compound S/G Descriptor */
> +	qdma_csgf_set_len(csgf_desc, 32);
> +	qdma_csgf_addr_set64(csgf_src, src);
> +	qdma_csgf_set_len(csgf_src, len);
> +	qdma_csgf_addr_set64(csgf_dest, dst);
> +	qdma_csgf_set_len(csgf_dest, len);
> +	/* This entry is the last entry. */
> +	qdma_csgf_set_f(csgf_dest, len);
> +	/* Descriptor Buffer */
> +	sdf->cmd = cpu_to_le32(
> +			FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET);
> +	ddf->cmd = cpu_to_le32(
> +			FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET);
> +	ddf->cmd |= cpu_to_le32(
> +			FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET);
> +}
> +
> +/*
> + * Prei-request full command descriptor for enqueue.

prei?

> + */
> +static int fsl_qdma_pre_request_enqueue_desc(struct fsl_qdma_queue *queue)
> +{
> +	struct fsl_qdma_comp *comp_temp;
> +	int i;
> +
> +	for (i = 0; i < queue->n_cq; i++) {
> +		comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
> +		if (!comp_temp)
> +			return -1;
> +		comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool,
> +						      GFP_NOWAIT,
> +						      &comp_temp->bus_addr);
> +		if (!comp_temp->virt_addr)
> +			return -1;

eeek -1?

> +		list_add_tail(&comp_temp->list, &queue->comp_free);
> +	}
> +	return 0;
> +}
> +
> +/*
> + * Request a command descriptor for enqueue.
> + */
> +static struct fsl_qdma_comp *fsl_qdma_request_enqueue_desc(
> +					struct fsl_qdma_chan *fsl_chan,
> +					unsigned int dst_nents,
> +					unsigned int src_nents)
> +{
> +	struct fsl_qdma_comp *comp_temp;
> +	struct fsl_qdma_sg *sg_block;
> +	struct fsl_qdma_queue *queue = fsl_chan->queue;
> +	unsigned long flags;
> +	unsigned int dst_sg_entry_block, src_sg_entry_block, sg_entry_total, i;
> +
> +	spin_lock_irqsave(&queue->queue_lock, flags);
> +	if (list_empty(&queue->comp_free)) {
> +		spin_unlock_irqrestore(&queue->queue_lock, flags);
> +		comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
> +		if (!comp_temp)
> +			return NULL;
> +		comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool,
> +						      GFP_NOWAIT,
> +						      &comp_temp->bus_addr);
> +		if (!comp_temp->virt_addr)
> +			return NULL;
> +	} else {
> +		comp_temp = list_first_entry(&queue->comp_free,
> +					     struct fsl_qdma_comp,
> +					     list);
> +		list_del(&comp_temp->list);
> +		spin_unlock_irqrestore(&queue->queue_lock, flags);
> +	}
> +
> +	if (dst_nents != 0)
> +		dst_sg_entry_block = dst_nents /
> +					(FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;

would this be same as DIV_ROUND_UP()


> +	else
> +		dst_sg_entry_block = 0;
> +
> +	if (src_nents != 0)
> +		src_sg_entry_block = src_nents /
> +					(FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
> +	else
> +		src_sg_entry_block = 0;
> +
> +	sg_entry_total = dst_sg_entry_block + src_sg_entry_block;
> +	if (sg_entry_total) {
> +		sg_block = kzalloc(sizeof(*sg_block) *
> +					      sg_entry_total,
> +					      GFP_KERNEL);
> +		if (!sg_block)
> +			return NULL;

okay you are leaking here the pool allocation need to be rolled back

> +		comp_temp->sg_block = sg_block;
> +		for (i = 0; i < sg_entry_total; i++) {
> +			sg_block->virt_addr = dma_pool_alloc(queue->sg_pool,
> +							GFP_NOWAIT,
> +							&sg_block->bus_addr);
> +			memset(sg_block->virt_addr, 0,
> +					FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16);
> +			sg_block++;
> +		}
> +	}
> +
> +	comp_temp->sg_block_src = src_sg_entry_block;
> +	comp_temp->sg_block_dst = dst_sg_entry_block;
> +	comp_temp->qchan = fsl_chan;
> +
> +	return comp_temp;
> +}
> +
> +static struct fsl_qdma_queue *fsl_qdma_alloc_queue_resources(
> +					struct platform_device *pdev,
> +					unsigned int queue_num)
> +{
> +	struct device_node *np = pdev->dev.of_node;
> +	struct fsl_qdma_queue *queue_head, *queue_temp;
> +	int ret, len, i;
> +	unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
> +
> +	if (queue_num > FSL_QDMA_QUEUE_MAX)
> +		queue_num = FSL_QDMA_QUEUE_MAX;
> +	len = sizeof(*queue_head) * queue_num;
> +	queue_head = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
> +	if (!queue_head)
> +		return NULL;
> +
> +	ret = of_property_read_u32_array(np, "queue-sizes", queue_size,
> +								queue_num);

why not device_property_read_u32_array()

> +	if (ret) {
> +		dev_err(&pdev->dev, "Can't get queue-sizes.\n");
> +		return NULL;
> +	}
> +
> +	for (i = 0; i < queue_num; i++) {
> +		if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX
> +			|| queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
> +			dev_err(&pdev->dev, "Get wrong queue-sizes.\n");
> +			return NULL;
> +		}
> +		queue_temp = queue_head + i;
> +		queue_temp->cq = dma_alloc_coherent(&pdev->dev,
> +						sizeof(struct fsl_qdma_ccdf) *
> +						queue_size[i],
> +						&queue_temp->bus_addr,
> +						GFP_KERNEL);
> +		if (!queue_temp->cq)
> +			return NULL;
> +		queue_temp->n_cq = queue_size[i];
> +		queue_temp->id = i;
> +		queue_temp->virt_head = queue_temp->cq;
> +		queue_temp->virt_tail = queue_temp->cq;
> +		/*
> +		 * The dma pool for queue command buffer
> +		 */
> +		queue_temp->comp_pool = dma_pool_create("comp_pool",
> +						&pdev->dev,
> +						FSL_QDMA_BASE_BUFFER_SIZE,
> +						16, 0);
> +		if (!queue_temp->comp_pool) {
> +			dma_free_coherent(&pdev->dev,
> +						sizeof(struct fsl_qdma_ccdf) *
> +						queue_size[i],
> +						queue_temp->cq,
> +						queue_temp->bus_addr);
> +			return NULL;
> +		}
> +		/*
> +		 * The dma pool for queue command buffer
> +		 */
> +		queue_temp->sg_pool = dma_pool_create("sg_pool",
> +					&pdev->dev,
> +					FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16,
> +					64, 0);
> +		if (!queue_temp->sg_pool) {
> +			dma_free_coherent(&pdev->dev,
> +						sizeof(struct fsl_qdma_ccdf) *
> +						queue_size[i],
> +						queue_temp->cq,
> +						queue_temp->bus_addr);
> +			dma_pool_destroy(queue_temp->comp_pool);
> +			return NULL;
> +		}
> +		/*
> +		 * List for queue command buffer
> +		 */
> +		INIT_LIST_HEAD(&queue_temp->comp_used);
> +		INIT_LIST_HEAD(&queue_temp->comp_free);
> +		spin_lock_init(&queue_temp->queue_lock);
> +	}
> +
> +	return queue_head;
> +}
> +
> +static struct fsl_qdma_queue *fsl_qdma_prep_status_queue(
> +						struct platform_device *pdev)
> +{
> +	struct device_node *np = pdev->dev.of_node;
> +	struct fsl_qdma_queue *status_head;
> +	unsigned int status_size;
> +	int ret;
> +
> +	ret = of_property_read_u32(np, "status-sizes", &status_size);
> +	if (ret) {
> +		dev_err(&pdev->dev, "Can't get status-sizes.\n");
> +		return NULL;
> +	}
> +	if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX
> +			|| status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
> +		dev_err(&pdev->dev, "Get wrong status_size.\n");
> +		return NULL;
> +	}
> +	status_head = devm_kzalloc(&pdev->dev, sizeof(*status_head),
> +								GFP_KERNEL);
> +	if (!status_head)
> +		return NULL;
> +
> +	/*
> +	 * Buffer for queue command
> +	 */
> +	status_head->cq = dma_alloc_coherent(&pdev->dev,
> +						sizeof(struct fsl_qdma_ccdf) *
> +						status_size,
> +						&status_head->bus_addr,
> +						GFP_KERNEL);
> +	if (!status_head->cq)
> +		return NULL;
> +	status_head->n_cq = status_size;
> +	status_head->virt_head = status_head->cq;
> +	status_head->virt_tail = status_head->cq;
> +	status_head->comp_pool = NULL;
> +
> +	return status_head;
> +}
> +
> +static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
> +{
> +	void __iomem *ctrl = fsl_qdma->ctrl_base;
> +	void __iomem *block = fsl_qdma->block_base;
> +	int i, count = 5;
> +	u32 reg;
> +
> +	/* Disable the command queue and wait for idle state. */
> +	reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
> +	reg |= FSL_QDMA_DMR_DQD;
> +	qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
> +	for (i = 0; i < FSL_QDMA_QUEUE_NUM_MAX; i++)
> +		qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQMR(i));
> +
> +	while (1) {
> +		reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DSR);
> +		if (!(reg & FSL_QDMA_DSR_DB))
> +			break;
> +		if (count-- < 0)
> +			return -EBUSY;
> +		udelay(100);
> +	}
> +
> +	/* Disable status queue. */
> +	qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BSQMR);
> +
> +	/*
> +	 * Clear the command queue interrupt detect register for all queues.
> +	 */
> +	qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
> +
> +	return 0;
> +}
> +
> +static int fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma)
> +{
> +	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
> +	struct fsl_qdma_queue *fsl_status = fsl_qdma->status;
> +	struct fsl_qdma_queue *temp_queue;
> +	struct fsl_qdma_comp *fsl_comp;
> +	struct fsl_qdma_ccdf *status_addr;
> +	struct fsl_qdma_csgf *csgf_src;
> +	void __iomem *block = fsl_qdma->block_base;
> +	u32 reg, i;
> +	bool duplicate, duplicate_handle;
> +
> +	while (1) {
> +		duplicate = 0;
> +		duplicate_handle = 0;
> +		reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR);
> +		if (reg & FSL_QDMA_BSQSR_QE)
> +			return 0;
> +		status_addr = fsl_status->virt_head;
> +		if (qdma_ccdf_get_queue(status_addr) == pre_queue &&
> +			qdma_ccdf_addr_get64(status_addr) == pre_addr)
> +			duplicate = 1;
> +		i = qdma_ccdf_get_queue(status_addr);
> +		pre_queue = qdma_ccdf_get_queue(status_addr);
> +		pre_addr = qdma_ccdf_addr_get64(status_addr);
> +		temp_queue = fsl_queue + i;
> +		spin_lock(&temp_queue->queue_lock);
> +		if (list_empty(&temp_queue->comp_used)) {
> +			if (duplicate)
> +				duplicate_handle = 1;
> +			else {
> +				spin_unlock(&temp_queue->queue_lock);
> +				return -1;
> +			}
> +		} else {
> +			fsl_comp = list_first_entry(&temp_queue->comp_used,
> +							struct fsl_qdma_comp,
> +							list);
> +			csgf_src = (struct fsl_qdma_csgf *)fsl_comp->virt_addr
> +							   + 2;
> +			if (fsl_comp->bus_addr + 16 != pre_addr) {
> +				if (duplicate)
> +					duplicate_handle = 1;
> +				else {
> +					spin_unlock(&temp_queue->queue_lock);
> +					return -1;
> +				}
> +			}
> +		}
> +
> +			if (duplicate_handle) {
> +			reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
> +			reg |= FSL_QDMA_BSQMR_DI;
> +			qdma_ccdf_addr_set64(status_addr, 0x0);
> +			fsl_status->virt_head++;
> +			if (fsl_status->virt_head == fsl_status->cq
> +						   + fsl_status->n_cq)
> +				fsl_status->virt_head = fsl_status->cq;
> +			qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
> +			spin_unlock(&temp_queue->queue_lock);
> +			continue;
> +		}
> +		list_del(&fsl_comp->list);
> +
> +		reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
> +		reg |= FSL_QDMA_BSQMR_DI;
> +		qdma_ccdf_addr_set64(status_addr, 0x0);
> +		fsl_status->virt_head++;
> +		if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)
> +			fsl_status->virt_head = fsl_status->cq;
> +		qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
> +		spin_unlock(&temp_queue->queue_lock);
> +
> +		spin_lock(&fsl_comp->qchan->vchan.lock);
> +		vchan_cookie_complete(&fsl_comp->vdesc);
> +		fsl_comp->qchan->status = DMA_COMPLETE;
> +		spin_unlock(&fsl_comp->qchan->vchan.lock);
> +	}
> +	return 0;
> +}
> +
> +static irqreturn_t fsl_qdma_error_handler(int irq, void *dev_id)
> +{
> +	struct fsl_qdma_engine *fsl_qdma = dev_id;
> +	unsigned int intr;
> +	void __iomem *status = fsl_qdma->status_base;
> +
> +	intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR);
> +
> +	if (intr)
> +		dev_err(fsl_qdma->dma_dev.dev, "DMA transaction error!\n");
> +
> +	qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR);

Why are we writing all 1s to this?

> +	return IRQ_HANDLED;
> +}
> +
> +static irqreturn_t fsl_qdma_queue_handler(int irq, void *dev_id)
> +{
> +	struct fsl_qdma_engine *fsl_qdma = dev_id;
> +	unsigned int intr, reg;
> +	void __iomem *block = fsl_qdma->block_base;
> +	void __iomem *ctrl = fsl_qdma->ctrl_base;
> +
> +	intr = qdma_readl(fsl_qdma, block + FSL_QDMA_BCQIDR(0));
> +
> +	if ((intr & FSL_QDMA_CQIDR_SQT) != 0)
> +		intr = fsl_qdma_queue_transfer_complete(fsl_qdma);
> +
> +	if (intr != 0) {
> +		reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
> +		reg |= FSL_QDMA_DMR_DQD;
> +		qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
> +		qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQIER(0));
> +		dev_err(fsl_qdma->dma_dev.dev, "QDMA: status err!\n");
> +	}
> +
> +	qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
> +
> +	return IRQ_HANDLED;
> +}
> +
> +static int
> +fsl_qdma_irq_init(struct platform_device *pdev,
> +		  struct fsl_qdma_engine *fsl_qdma)
> +{
> +	int ret;
> +
> +	fsl_qdma->error_irq = platform_get_irq_byname(pdev,
> +							"qdma-error");
> +	if (fsl_qdma->error_irq < 0) {
> +		dev_err(&pdev->dev, "Can't get qdma controller irq.\n");
> +		return fsl_qdma->error_irq;
> +	}
> +
> +	fsl_qdma->queue_irq = platform_get_irq_byname(pdev, "qdma-queue");
> +	if (fsl_qdma->queue_irq < 0) {
> +		dev_err(&pdev->dev, "Can't get qdma queue irq.\n");
> +		return fsl_qdma->queue_irq;
> +	}
> +
> +	ret = devm_request_irq(&pdev->dev, fsl_qdma->error_irq,
> +			fsl_qdma_error_handler, 0, "qDMA error", fsl_qdma);
> +	if (ret) {
> +		dev_err(&pdev->dev, "Can't register qDMA controller IRQ.\n");
> +		return  ret;
> +	}
> +	ret = devm_request_irq(&pdev->dev, fsl_qdma->queue_irq,
> +			fsl_qdma_queue_handler, 0, "qDMA queue", fsl_qdma);
> +	if (ret) {
> +		dev_err(&pdev->dev, "Can't register qDMA queue IRQ.\n");
> +		return  ret;
> +	}
> +
> +	return 0;
> +}
> +
> +static int fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
> +{
> +	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
> +	struct fsl_qdma_queue *temp;
> +	void __iomem *ctrl = fsl_qdma->ctrl_base;
> +	void __iomem *status = fsl_qdma->status_base;
> +	void __iomem *block = fsl_qdma->block_base;
> +	int i, ret;
> +	u32 reg;
> +
> +	/* Try to halt the qDMA engine first. */
> +	ret = fsl_qdma_halt(fsl_qdma);
> +	if (ret) {
> +		dev_err(fsl_qdma->dma_dev.dev, "DMA halt failed!");
> +		return ret;
> +	}
> +
> +	/*
> +	 * Clear the command queue interrupt detect register for all queues.
> +	 */
> +	qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
> +
> +	for (i = 0; i < fsl_qdma->n_queues; i++) {
> +		temp = fsl_queue + i;
> +		/*
> +		 * Initialize Command Queue registers to point to the first
> +		 * command descriptor in memory.
> +		 * Dequeue Pointer Address Registers
> +		 * Enqueue Pointer Address Registers
> +		 */
> +		qdma_writel(fsl_qdma, temp->bus_addr,
> +				block + FSL_QDMA_BCQDPA_SADDR(i));
> +		qdma_writel(fsl_qdma, temp->bus_addr,
> +				block + FSL_QDMA_BCQEPA_SADDR(i));
> +
> +		/* Initialize the queue mode. */
> +		reg = FSL_QDMA_BCQMR_EN;
> +		reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq)-4);
> +		reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq)-6);
> +		qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BCQMR(i));
> +	}
> +
> +	/*
> +	 * Workaround for erratum: ERR010812.
> +	 * We must enable XOFF to avoid the enqueue rejection occurs.
> +	 * Setting SQCCMR ENTER_WM to 0x20.
> +	 */
> +	qdma_writel(fsl_qdma, FSL_QDMA_SQCCMR_ENTER_WM,
> +			      block + FSL_QDMA_SQCCMR);
> +	/*
> +	 * Initialize status queue registers to point to the first
> +	 * command descriptor in memory.
> +	 * Dequeue Pointer Address Registers
> +	 * Enqueue Pointer Address Registers
> +	 */
> +	qdma_writel(fsl_qdma, fsl_qdma->status->bus_addr,
> +					block + FSL_QDMA_SQEPAR);
> +	qdma_writel(fsl_qdma, fsl_qdma->status->bus_addr,
> +					block + FSL_QDMA_SQDPAR);
> +	/* Initialize status queue interrupt. */
> +	qdma_writel(fsl_qdma, FSL_QDMA_BCQIER_CQTIE,
> +			      block + FSL_QDMA_BCQIER(0));
> +	qdma_writel(fsl_qdma, FSL_QDMA_BSQICR_ICEN | FSL_QDMA_BSQICR_ICST(5)
> +						   | 0x8000,
> +			      block + FSL_QDMA_BSQICR);
> +	qdma_writel(fsl_qdma, FSL_QDMA_CQIER_MEIE | FSL_QDMA_CQIER_TEIE,
> +			      block + FSL_QDMA_CQIER);
> +	/* Initialize controller interrupt register. */
> +	qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR);
> +	qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEIER);
> +
> +	/* Initialize the status queue mode. */
> +	reg = FSL_QDMA_BSQMR_EN;
> +	reg |= FSL_QDMA_BSQMR_CQ_SIZE(ilog2(fsl_qdma->status->n_cq)-6);
> +	qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
> +
> +	reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
> +	reg &= ~FSL_QDMA_DMR_DQD;
> +	qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
> +
> +	return 0;
> +}
> +
> +static struct dma_async_tx_descriptor *
> +fsl_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
> +		dma_addr_t src, size_t len, unsigned long flags)
> +{
> +	struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
> +	struct fsl_qdma_comp *fsl_comp;
> +
> +	fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan, 0, 0);
> +	fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len);
> +
> +	return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags);
> +}
> +
> +static void fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
> +{
> +	void __iomem *block = fsl_chan->qdma->block_base;
> +	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
> +	struct fsl_qdma_comp *fsl_comp;
> +	struct virt_dma_desc *vdesc;
> +	u32 reg;
> +
> +	reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQSR(fsl_queue->id));
> +	if (reg & (FSL_QDMA_BCQSR_QF | FSL_QDMA_BCQSR_XOFF))
> +		return;
> +	vdesc = vchan_next_desc(&fsl_chan->vchan);
> +	if (!vdesc)
> +		return;
> +	list_del(&vdesc->node);
> +	fsl_comp = to_fsl_qdma_comp(vdesc);
> +
> +	memcpy(fsl_queue->virt_head++, fsl_comp->virt_addr, 16);
> +	if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq)
> +		fsl_queue->virt_head = fsl_queue->cq;
> +
> +	list_add_tail(&fsl_comp->list, &fsl_queue->comp_used);
> +	barrier();
> +	reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQMR(fsl_queue->id));
> +	reg |= FSL_QDMA_BCQMR_EI;
> +	qdma_writel(fsl_chan->qdma, reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
> +	fsl_chan->status = DMA_IN_PROGRESS;
> +}
> +
> +static enum dma_status fsl_qdma_tx_status(struct dma_chan *chan,
> +		dma_cookie_t cookie, struct dma_tx_state *txstate)
> +{
> +	return dma_cookie_status(chan, cookie, txstate);
> +}
> +
> +static void fsl_qdma_free_desc(struct virt_dma_desc *vdesc)
> +{
> +	struct fsl_qdma_comp *fsl_comp;
> +	struct fsl_qdma_queue *fsl_queue;
> +	struct fsl_qdma_sg *sg_block;
> +	unsigned long flags;
> +	unsigned int i;
> +
> +	fsl_comp = to_fsl_qdma_comp(vdesc);
> +	fsl_queue = fsl_comp->qchan->queue;
> +
> +	if (fsl_comp->sg_block) {
> +		for (i = 0; i < fsl_comp->sg_block_src +
> +				fsl_comp->sg_block_dst; i++) {
> +			sg_block = fsl_comp->sg_block + i;
> +			dma_pool_free(fsl_queue->sg_pool,
> +				      sg_block->virt_addr,
> +				      sg_block->bus_addr);
> +		}
> +		kfree(fsl_comp->sg_block);
> +	}
> +
> +	spin_lock_irqsave(&fsl_queue->queue_lock, flags);
> +	list_add_tail(&fsl_comp->list, &fsl_queue->comp_free);
> +	spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
> +}
> +
> +static void fsl_qdma_issue_pending(struct dma_chan *chan)
> +{
> +	struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
> +	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&fsl_queue->queue_lock, flags);
> +	spin_lock(&fsl_chan->vchan.lock);
> +	if (vchan_issue_pending(&fsl_chan->vchan))
> +		fsl_qdma_enqueue_desc(fsl_chan);
> +	spin_unlock(&fsl_chan->vchan.lock);
> +	spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
> +}

No reside, terminate callback?

> +
> +static int fsl_qdma_probe(struct platform_device *pdev)
> +{
> +	struct device_node *np = pdev->dev.of_node;
> +	struct fsl_qdma_engine *fsl_qdma;
> +	struct fsl_qdma_chan *fsl_chan;
> +	struct resource *res;
> +	unsigned int len, chans, queues;
> +	int ret, i;
> +
> +	ret = of_property_read_u32(np, "channels", &chans);
> +	if (ret) {
> +		dev_err(&pdev->dev, "Can't get channels.\n");
> +		return ret;
> +	}
> +
> +	len = sizeof(*fsl_qdma) + sizeof(*fsl_chan) * chans;
> +	fsl_qdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
> +	if (!fsl_qdma)
> +		return -ENOMEM;
> +
> +	ret = of_property_read_u32(np, "queues", &queues);
> +	if (ret) {
> +		dev_err(&pdev->dev, "Can't get queues.\n");
> +		return ret;
> +	}
> +
> +	fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, queues);
> +	if (!fsl_qdma->queue)
> +		return -ENOMEM;
> +
> +	fsl_qdma->status = fsl_qdma_prep_status_queue(pdev);
> +	if (!fsl_qdma->status)
> +		return -ENOMEM;
> +
> +	fsl_qdma->n_chans = chans;
> +	fsl_qdma->n_queues = queues;
> +	mutex_init(&fsl_qdma->fsl_qdma_mutex);
> +
> +	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
> +	fsl_qdma->ctrl_base = devm_ioremap_resource(&pdev->dev, res);
> +	if (IS_ERR(fsl_qdma->ctrl_base))
> +		return PTR_ERR(fsl_qdma->ctrl_base);
> +
> +	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
> +	fsl_qdma->status_base = devm_ioremap_resource(&pdev->dev, res);
> +	if (IS_ERR(fsl_qdma->status_base))
> +		return PTR_ERR(fsl_qdma->status_base);
> +
> +	res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
> +	fsl_qdma->block_base = devm_ioremap_resource(&pdev->dev, res);
> +	if (IS_ERR(fsl_qdma->block_base))
> +		return PTR_ERR(fsl_qdma->block_base);
> +
> +	ret = fsl_qdma_irq_init(pdev, fsl_qdma);
> +	if (ret)
> +		return ret;
> +
> +	fsl_qdma->big_endian = of_property_read_bool(np, "big-endian");
> +	INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels);
> +	for (i = 0; i < fsl_qdma->n_chans; i++) {
> +		struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
> +
> +		fsl_chan->qdma = fsl_qdma;
> +		fsl_chan->queue = fsl_qdma->queue + i % fsl_qdma->n_queues;
> +		fsl_chan->vchan.desc_free = fsl_qdma_free_desc;
> +		INIT_LIST_HEAD(&fsl_chan->qcomp);
> +		vchan_init(&fsl_chan->vchan, &fsl_qdma->dma_dev);
> +	}
> +	for (i = 0; i < fsl_qdma->n_queues; i++)
> +		fsl_qdma_pre_request_enqueue_desc(fsl_qdma->queue + i);
> +
> +	dma_cap_set(DMA_MEMCPY, fsl_qdma->dma_dev.cap_mask);
> +
> +	fsl_qdma->dma_dev.dev = &pdev->dev;
> +	fsl_qdma->dma_dev.device_alloc_chan_resources
> +		= fsl_qdma_alloc_chan_resources;
> +	fsl_qdma->dma_dev.device_free_chan_resources
> +		= fsl_qdma_free_chan_resources;
> +	fsl_qdma->dma_dev.device_tx_status = fsl_qdma_tx_status;
> +	fsl_qdma->dma_dev.device_prep_dma_memcpy = fsl_qdma_prep_memcpy;
> +	fsl_qdma->dma_dev.device_issue_pending = fsl_qdma_issue_pending;
> +
> +	dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
> +
> +	platform_set_drvdata(pdev, fsl_qdma);
> +
> +	ret = dma_async_device_register(&fsl_qdma->dma_dev);
> +	if (ret) {
> +		dev_err(&pdev->dev, "Can't register Freescale qDMA engine.\n");
> +		return ret;
> +	}
> +
> +	ret = fsl_qdma_reg_init(fsl_qdma);
> +	if (ret) {
> +		dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
> +		return ret;
> +	}
> +
> +
> +	return 0;
> +}
> +
> +static int fsl_qdma_remove(struct platform_device *pdev)
> +{
> +	struct device_node *np = pdev->dev.of_node;
> +	struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);
> +	struct fsl_qdma_queue *queue_temp;
> +	struct fsl_qdma_queue *status = fsl_qdma->status;
> +	struct fsl_qdma_comp *comp_temp, *_comp_temp;
> +	int i;
> +
> +	of_dma_controller_free(np);
> +	dma_async_device_unregister(&fsl_qdma->dma_dev);
> +
> +	/* Free descriptor areas */
> +	for (i = 0; i < fsl_qdma->n_queues; i++) {
> +		queue_temp = fsl_qdma->queue + i;
> +		list_for_each_entry_safe(comp_temp, _comp_temp,
> +					&queue_temp->comp_used,	list) {
> +			dma_pool_free(queue_temp->comp_pool,
> +					comp_temp->virt_addr,
> +					comp_temp->bus_addr);
> +			list_del(&comp_temp->list);
> +			kfree(comp_temp);
> +		}
> +		list_for_each_entry_safe(comp_temp, _comp_temp,
> +					&queue_temp->comp_free, list) {
> +			dma_pool_free(queue_temp->comp_pool,
> +					comp_temp->virt_addr,
> +					comp_temp->bus_addr);
> +			list_del(&comp_temp->list);
> +			kfree(comp_temp);
> +		}
> +		dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_ccdf) *
> +					queue_temp->n_cq, queue_temp->cq,
> +					queue_temp->bus_addr);
> +		dma_pool_destroy(queue_temp->comp_pool);
> +	}
> +
> +	dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_ccdf) *
> +				status->n_cq, status->cq, status->bus_addr);

at this point you can still get irq and tasklets can be still scheduled, how
do you ensure they are stopped

> +	return 0;
> +}
> +
> +static const struct of_device_id fsl_qdma_dt_ids[] = {
> +	{ .compatible = "fsl,ls1021a-qdma", },
> +	{ /* sentinel */ }
> +};
> +MODULE_DEVICE_TABLE(of, fsl_qdma_dt_ids);
> +
> +static struct platform_driver fsl_qdma_driver = {
> +	.driver		= {
> +		.name	= "fsl-qdma",
> +		.owner  = THIS_MODULE,

this is not required

> +		.of_match_table = fsl_qdma_dt_ids,
> +	},
> +	.probe          = fsl_qdma_probe,
> +	.remove		= fsl_qdma_remove,
> +};
> +
> +static int __init fsl_qdma_init(void)
> +{
> +	return platform_driver_register(&fsl_qdma_driver);
> +}
> +subsys_initcall(fsl_qdma_init);
> +
> +static void __exit fsl_qdma_exit(void)
> +{
> +	platform_driver_unregister(&fsl_qdma_driver);
> +}
> +module_exit(fsl_qdma_exit);

i think this can be removed with module_platform_driver()
Wen He Dec. 19, 2017, 6:28 a.m. UTC | #2
Hi Vinod,

> -----Original Message-----

> From: Vinod Koul [mailto:vinod.koul@intel.com]

> Sent: 2017年12月11日 11:52

> To: Wen He <wen.he_1@nxp.com>

> Cc: Leo Li <leoyang.li@nxp.com>; dmaengine@vger.kernel.org

> Subject: Re: [PATCH 1/4] dma: fsl-qdma: add qDMA Command queue mode

> driver

> 

> On Thu, Nov 30, 2017 at 03:48:33PM +0800, Wen He wrote:

> > The patch is used for add the qdma command queue support.

> > Both qDMA source data and destination data can be either contiguous or

> > non-contiguous using one or more scatter/gather tables.

> 

> The subsystem name is dmaengine so please use that. If in doubt, use git log

> on that susbsystem to find the tag

> 

> >

> > Signed-off-by: Wen He <wen.he_1@nxp.com>

> > ---

> >  drivers/dma/Kconfig    |   12 +

> >  drivers/dma/Makefile   |    1 +

> >  drivers/dma/fsl-qdma.c | 1126

> > ++++++++++++++++++++++++++++++++++++++++++++++++

> >  3 files changed, 1139 insertions(+)

> >  create mode 100644 drivers/dma/fsl-qdma.c

> >

> > diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index

> > 27df3e2837fd..22e203c68e94 100644

> > --- a/drivers/dma/Kconfig

> > +++ b/drivers/dma/Kconfig

> > @@ -215,6 +215,18 @@ config FSL_EDMA

> >  	  multiplexing capability for DMA request sources(slot).

> >  	  This module can be found on Freescale Vybrid and LS-1 SoCs.

> >

> > +config FSL_QDMA

> > +       tristate "Freescale qDMA engine support"

> > +       select DMA_ENGINE

> > +       select DMA_VIRTUAL_CHANNELS

> > +       select DMA_ENGINE_RAID

> > +       select ASYNC_TX_ENABLE_CHANNEL_SWITCH

> > +       help

> > +         Support the Freescale qDMA engine with command queue and

> legacy mode.

> > +         Channel virtualization is supported through enqueuing of DMA

> jobs to,

> > +         or dequeuing DMA jobs from, different work queues.

> > +         This module can be found on Freescale LS SoCs.

> > +

> >  config FSL_RAID

> >          tristate "Freescale RAID engine Support"

> >          depends on FSL_SOC

> && !ASYNC_TX_ENABLE_CHANNEL_SWITCH diff

> > --git a/drivers/dma/Makefile b/drivers/dma/Makefile index

> > b9dca8a0e142..7a49b7b6124a 100644

> > --- a/drivers/dma/Makefile

> > +++ b/drivers/dma/Makefile

> > @@ -32,6 +32,7 @@ obj-$(CONFIG_DW_DMAC_CORE) += dw/

> >  obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o

> >  obj-$(CONFIG_FSL_DMA) += fsldma.o

> >  obj-$(CONFIG_FSL_EDMA) += fsl-edma.o

> > +obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o

> >  obj-$(CONFIG_FSL_RAID) += fsl_raid.o

> >  obj-$(CONFIG_HSU_DMA) += hsu/

> >  obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o diff --git

> > a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c new file mode 100644

> > index 000000000000..43ed48a73ed6

> > --- /dev/null

> > +++ b/drivers/dma/fsl-qdma.c

> > @@ -0,0 +1,1126 @@

> > +/*

> > + * drivers/dma/fsl-qdma.c

> > + *

> > + * Copyright 2014-2015 Freescale Semiconductor, Inc.

> 

> we are in 2017

> 


Okay.

> > + *

> > + * Driver for the Freescale qDMA engine with software command queue

> mode.

> > + * Channel virtualization is supported through enqueuing of DMA jobs

> > + to,

> > + * or dequeuing DMA jobs from, different work queues.

> > + * This module can be found on Freescale LS SoCs.

> > + *

> > + * This program is free software; you can redistribute  it and/or

> > + modify it

> > + * under  the terms of  the GNU General  Public License as published

> > + by the

> > + * Free Software Foundation;  either version 2 of the  License, or

> > + (at your

> > + * option) any later version.

> 

> Please consider used SPDX tags for this

> 


Got it, Thanks.

> > + */

> > +

> > +#include <asm/cacheflush.h>

> > +#include <linux/clk.h>

> > +#include <linux/delay.h>

> > +#include <linux/dma-mapping.h>

> > +#include <linux/dmapool.h>

> > +#include <linux/init.h>

> > +#include <linux/interrupt.h>

> > +#include <linux/module.h>

> > +#include <linux/of.h>

> > +#include <linux/of_address.h>

> > +#include <linux/of_device.h>

> > +#include <linux/of_dma.h>

> > +#include <linux/of_irq.h>

> > +#include <linux/slab.h>

> > +#include <linux/spinlock.h>

> 

> Do you need all these?

> 


Don't need all these, Is that ok?

#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/of_dma.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/dmaengine.h>
#include <linux/slab.h>
#include <linux/spinlock.h>

> > +

> > +#include "virt-dma.h"

> > +

> > +#define FSL_QDMA_DMR			0x0

> > +#define FSL_QDMA_DSR			0x4

> > +#define FSL_QDMA_DEIER			0xe00

> > +#define FSL_QDMA_DEDR			0xe04

> > +#define FSL_QDMA_DECFDW0R		0xe10

> > +#define FSL_QDMA_DECFDW1R		0xe14

> > +#define FSL_QDMA_DECFDW2R		0xe18

> > +#define FSL_QDMA_DECFDW3R		0xe1c

> > +#define FSL_QDMA_DECFQIDR		0xe30

> > +#define FSL_QDMA_DECBR			0xe34

> 

> BIT() or GENMASK() pls for register definations, here and other places in the

> driver

> 


Okay, does this means, I should be defined BIT() or GENMASK() to here?

> > +

> > +#define FSL_QDMA_BCQMR(x)		(0xc0 + 0x100 * (x))

> > +#define FSL_QDMA_BCQSR(x)		(0xc4 + 0x100 * (x))

> > +#define FSL_QDMA_BCQEDPA_SADDR(x)	(0xc8 + 0x100 * (x))

> > +#define FSL_QDMA_BCQDPA_SADDR(x)	(0xcc + 0x100 * (x))

> > +#define FSL_QDMA_BCQEEPA_SADDR(x)	(0xd0 + 0x100 * (x))

> > +#define FSL_QDMA_BCQEPA_SADDR(x)	(0xd4 + 0x100 * (x))

> > +#define FSL_QDMA_BCQIER(x)		(0xe0 + 0x100 * (x))

> > +#define FSL_QDMA_BCQIDR(x)		(0xe4 + 0x100 * (x))

> > +

> > +#define FSL_QDMA_SQDPAR			0x80c

> > +#define FSL_QDMA_SQEPAR			0x814

> > +#define FSL_QDMA_BSQMR			0x800

> > +#define FSL_QDMA_BSQSR			0x804

> > +#define FSL_QDMA_BSQICR			0x828

> > +#define FSL_QDMA_CQMR			0xa00

> > +#define FSL_QDMA_CQDSCR1		0xa08

> > +#define FSL_QDMA_CQDSCR2                0xa0c

> > +#define FSL_QDMA_CQIER			0xa10

> > +#define FSL_QDMA_CQEDR			0xa14

> > +#define FSL_QDMA_SQCCMR			0xa20

> > +

> > +#define FSL_QDMA_SQICR_ICEN

> > +

> > +#define FSL_QDMA_CQIDR_CQT		0xff000000

> > +#define FSL_QDMA_CQIDR_SQPE		0x800000

> > +#define FSL_QDMA_CQIDR_SQT		0x8000

> > +

> > +#define FSL_QDMA_BCQIER_CQTIE		0x8000

> > +#define FSL_QDMA_BCQIER_CQPEIE		0x800000

> > +#define FSL_QDMA_BSQICR_ICEN		0x80000000

> > +#define FSL_QDMA_BSQICR_ICST(x)		((x) << 16)

> > +#define FSL_QDMA_CQIER_MEIE		0x80000000

> > +#define FSL_QDMA_CQIER_TEIE		0x1

> > +#define FSL_QDMA_SQCCMR_ENTER_WM	0x200000

> > +

> > +#define FSL_QDMA_QUEUE_MAX		8

> > +

> > +#define FSL_QDMA_BCQMR_EN		0x80000000

> > +#define FSL_QDMA_BCQMR_EI		0x40000000

> > +#define FSL_QDMA_BCQMR_CD_THLD(x)	((x) << 20)

> > +#define FSL_QDMA_BCQMR_CQ_SIZE(x)	((x) << 16)

> > +

> > +#define FSL_QDMA_BCQSR_QF		0x10000

> > +#define FSL_QDMA_BCQSR_XOFF		0x1

> > +

> > +#define FSL_QDMA_BSQMR_EN		0x80000000

> > +#define FSL_QDMA_BSQMR_DI		0x40000000

> > +#define FSL_QDMA_BSQMR_CQ_SIZE(x)	((x) << 16)

> > +

> > +#define FSL_QDMA_BSQSR_QE		0x20000

> > +

> > +#define FSL_QDMA_DMR_DQD		0x40000000

> > +#define FSL_QDMA_DSR_DB			0x80000000

> > +

> > +#define FSL_QDMA_BASE_BUFFER_SIZE	96

> > +#define FSL_QDMA_EXPECT_SG_ENTRY_NUM	16

> > +#define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN	64

> > +#define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX	16384

> > +#define FSL_QDMA_QUEUE_NUM_MAX		8

> > +

> > +#define FSL_QDMA_CMD_RWTTYPE		0x4

> > +#define FSL_QDMA_CMD_LWC                0x2

> > +

> > +#define FSL_QDMA_CMD_RWTTYPE_OFFSET	28

> > +#define FSL_QDMA_CMD_NS_OFFSET		27

> > +#define FSL_QDMA_CMD_DQOS_OFFSET	24

> > +#define FSL_QDMA_CMD_WTHROTL_OFFSET	20

> > +#define FSL_QDMA_CMD_DSEN_OFFSET	19

> > +#define FSL_QDMA_CMD_LWC_OFFSET		16

> > +

> > +#define FSL_QDMA_E_SG_TABLE		1

> > +#define FSL_QDMA_E_DATA_BUFFER		0

> > +#define FSL_QDMA_F_LAST_ENTRY		1

> > +

> > +u64 pre_addr, pre_queue;

> > +

> > +/* qDMA Command Descriptor Fotmats */

> > +

> > +/* Compound Command Descriptor Fotmat */ struct fsl_qdma_ccdf {

> > +	__le32 status; /* ser, status */

> > +	__le32 cfg;	/* format, offset */

> > +	union {

> > +		struct {

> > +			__le32 addr_lo;	/* low 32-bits of 40-bit address */

> > +			u8 addr_hi;	/* high 8-bits of 40-bit address */

> > +			u8 __reserved1[2];

> > +			u8 cfg8b_w1; /* dd, queue*/

> > +		} __packed;

> > +		__le64 data;

> > +	};

> > +} __packed;

> > +

> > +#define QDMA_CCDF_STATUS		20

> > +#define QDMA_CCDF_OFFSET		20

> > +#define QDMA_CCDF_MASK			GENMASK(28, 20)

> > +#define QDMA_CCDF_FOTMAT		BIT(29)

> > +#define QDMA_CCDF_SER			BIT(30)

> > +

> > +static inline u64 qdma_ccdf_addr_get64(const struct fsl_qdma_ccdf

> > +*ccdf) {

> > +	return le64_to_cpu(ccdf->data) & 0xffffffffffLLU; } static inline

> > +u64 qdma_ccdf_get_queue(const struct fsl_qdma_ccdf *ccdf) {

> > +	return ccdf->cfg8b_w1 & 0xff;

> > +}

> > +static inline void qdma_ccdf_addr_set64(struct fsl_qdma_ccdf *ccdf,

> > +u64 addr) {

> > +	ccdf->addr_hi = upper_32_bits(addr);

> > +	ccdf->addr_lo = cpu_to_le32(lower_32_bits(addr)); } static inline

> > +int qdma_ccdf_get_offset(const struct fsl_qdma_ccdf *ccdf) {

> > +	return (le32_to_cpu(ccdf->cfg) & QDMA_CCDF_MASK) >>

> > +QDMA_CCDF_OFFSET; } static inline void qdma_ccdf_set_format(struct

> > +fsl_qdma_ccdf *ccdf, int offset) {

> > +	ccdf->cfg = cpu_to_le32(QDMA_CCDF_FOTMAT | offset); } static inline

> > +int qdma_ccdf_get_status(const struct fsl_qdma_ccdf *ccdf) {

> > +	return (le32_to_cpu(ccdf->status) & QDMA_CCDF_MASK) >>

> > +QDMA_CCDF_STATUS; } static inline void qdma_ccdf_set_ser(struct

> > +fsl_qdma_ccdf *ccdf, int status) {

> > +	ccdf->status = cpu_to_le32(QDMA_CCDF_SER | status); }

> > +/* qDMA Compound S/G Format */

> > +struct fsl_qdma_csgf {

> > +	__le32 offset; /* offset */

> > +	__le32 cfg;	/* E bit, F bit, length */

> > +	union {

> > +		struct {

> > +			__le32 addr_lo;	/* low 32-bits of 40-bit address */

> > +			u8 addr_hi;	/* high 8-bits of 40-bit address */

> > +			u8 __reserved1[3];

> > +		};

> > +		__le64 data;

> > +	};

> 

> this looks terribly similar to fsl_qdma_ccdf, if you have union then why not add

> this too to it?

> 


This struct defined description two different dma message format that Compound Command Descriptor Format and qDMA Compound S/G Format,
if add this too to union, it could be ambiguous.

> > +} __packed;

> > +

> > +#define QDMA_SG_FIN			BIT(30)

> > +#define QDMA_SG_EXT			BIT(31)

> > +#define QDMA_SG_LEN_MASK		GENMASK(29, 0)

> 

> empty line here please

> 


Okay, I am so sorry.

> > +static inline u64 qdma_csgf_addr_get64(const struct fsl_qdma_csgf

> > +*sg) {

> > +	return be64_to_cpu(sg->data) & 0xffffffffffLLU; } static inline void

> > +qdma_csgf_addr_set64(struct fsl_qdma_csgf *sg, u64 addr) {

> > +	sg->addr_hi = upper_32_bits(addr);

> > +	sg->addr_lo = cpu_to_le32(lower_32_bits(addr)); } static inline void

> > +qdma_csgf_set_len(struct fsl_qdma_csgf *csgf, int len) {

> > +	csgf->cfg = cpu_to_le32(len & QDMA_SG_LEN_MASK); } static inline

> > +void qdma_csgf_set_f(struct fsl_qdma_csgf *csgf, int len) {

> > +	csgf->cfg = cpu_to_le32(QDMA_SG_FIN | (len &

> QDMA_SG_LEN_MASK)); }

> > +static inline void qdma_csgf_set_e(struct fsl_qdma_csgf *csgf, int

> > +len) {

> > +	csgf->cfg = cpu_to_le32(QDMA_SG_EXT | (len &

> QDMA_SG_LEN_MASK)); }

> > +

> > +/* qDMA Source Descriptor Format */

> > +struct fsl_qdma_sdf {

> > +	__le32 rev3;

> > +	__le32 cfg; /* rev4, bit[0-11] - ssd, bit[12-23] sss */

> > +	__le32 rev5;

> > +	__le32 cmd;

> > +} __packed;

> > +

> > +/*qDMA Destination Descriptor Format*/ struct fsl_qdma_ddf {

> > +	__le32 rev1;

> > +	__le32 cfg; /* rev2, bit[0-11] - dsd, bit[12-23] - dss */

> > +	__le32 rev3;

> > +	__le32 cmd;

> > +} __packed;

> > +

> > +struct fsl_qdma_chan {

> > +	struct virt_dma_chan		vchan;

> > +	struct virt_dma_desc		vdesc;

> > +	enum dma_status			status;

> > +	u32				slave_id;

> > +	struct fsl_qdma_engine		*qdma;

> > +	struct fsl_qdma_queue		*queue;

> > +	struct list_head		qcomp;

> > +};

> > +

> > +struct fsl_qdma_queue {

> > +	struct fsl_qdma_ccdf	*virt_head;

> > +	struct fsl_qdma_ccdf	*virt_tail;

> > +	struct list_head	comp_used;

> > +	struct list_head	comp_free;

> > +	struct dma_pool		*comp_pool;

> > +	struct dma_pool		*sg_pool;

> > +	spinlock_t		queue_lock;

> > +	dma_addr_t		bus_addr;

> > +	u32                     n_cq;

> > +	u32			id;

> > +	struct fsl_qdma_ccdf	*cq;

> > +};

> > +

> > +struct fsl_qdma_sg {

> > +	dma_addr_t		bus_addr;

> > +	void			*virt_addr;

> > +};

> > +

> > +struct fsl_qdma_comp {

> > +	dma_addr_t              bus_addr;

> > +	void			*virt_addr;

> > +	struct fsl_qdma_chan	*qchan;

> > +	struct fsl_qdma_sg	*sg_block;

> > +	struct virt_dma_desc    vdesc;

> > +	struct list_head	list;

> > +	u32			sg_block_src;

> > +	u32			sg_block_dst;

> > +};

> > +

> > +struct fsl_qdma_engine {

> > +	struct dma_device	dma_dev;

> > +	void __iomem		*ctrl_base;

> > +	void __iomem            *status_base;

> > +	void __iomem		*block_base;

> > +	u32			n_chans;

> > +	u32			n_queues;

> > +	struct mutex            fsl_qdma_mutex;

> > +	int			error_irq;

> > +	int			queue_irq;

> > +	bool			big_endian;

> > +	struct fsl_qdma_queue	*queue;

> > +	struct fsl_qdma_queue	*status;

> > +	struct fsl_qdma_chan	chans[];

> > +

> > +};

> > +

> > +static u32 qdma_readl(struct fsl_qdma_engine *qdma, void __iomem

> > +*addr) {

> > +	if (qdma->big_endian)

> > +		return ioread32be(addr);

> > +	else

> > +		return ioread32(addr);

> > +}

> 

> IIRC there are macros which which take care of this, can you check other FSL

> drivers please

> 


Okay, but I search the FSL drivers and no found. Do you know where are them ?

> > +

> > +static void qdma_writel(struct fsl_qdma_engine *qdma, u32 val,

> > +						void __iomem *addr)

> > +{

> > +	if (qdma->big_endian)

> > +		iowrite32be(val, addr);

> > +	else

> > +		iowrite32(val, addr);

> > +}

> > +

> > +static struct fsl_qdma_chan *to_fsl_qdma_chan(struct dma_chan *chan)

> > +{

> > +	return container_of(chan, struct fsl_qdma_chan, vchan.chan); }

> > +

> > +static struct fsl_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc

> > +*vd) {

> > +	return container_of(vd, struct fsl_qdma_comp, vdesc); }

> > +

> > +static int fsl_qdma_alloc_chan_resources(struct dma_chan *chan) {

> > +	/*

> > +	 * In QDMA mode, We don't need to do anything.

> > +	 */

> > +	return 0;

> > +}

> 

> this is not mandatory, so can be removed

> 


Okay.

> > +

> > +static void fsl_qdma_free_chan_resources(struct dma_chan *chan) {

> > +	struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);

> > +	unsigned long flags;

> > +	LIST_HEAD(head);

> > +

> > +	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);

> > +	vchan_get_all_descriptors(&fsl_chan->vchan, &head);

> > +	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);

> > +

> > +	vchan_dma_desc_free_list(&fsl_chan->vchan, &head); }

> > +

> > +static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,

> > +					dma_addr_t dst, dma_addr_t src, u32 len) {

> > +	struct fsl_qdma_ccdf *ccdf;

> > +	struct fsl_qdma_csgf *csgf_desc, *csgf_src, *csgf_dest;

> > +	struct fsl_qdma_sdf *sdf;

> > +	struct fsl_qdma_ddf *ddf;

> > +

> > +	ccdf = (struct fsl_qdma_ccdf *)fsl_comp->virt_addr;

> > +	csgf_desc = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 1;

> > +	csgf_src = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 2;

> > +	csgf_dest = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 3;

> > +	sdf = (struct fsl_qdma_sdf *)fsl_comp->virt_addr + 4;

> > +	ddf = (struct fsl_qdma_ddf *)fsl_comp->virt_addr + 5;

> > +

> > +	memset(fsl_comp->virt_addr, 0, FSL_QDMA_BASE_BUFFER_SIZE);

> > +	/* Head Command Descriptor(Frame Descriptor) */

> > +	qdma_ccdf_addr_set64(ccdf, fsl_comp->bus_addr + 16);

> > +	qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(ccdf));

> > +	qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(ccdf));

> > +	/* Status notification is enqueued to status queue. */

> > +	/* Compound Command Descriptor(Frame List Table) */

> > +	qdma_csgf_addr_set64(csgf_desc, fsl_comp->bus_addr + 64);

> > +	/* It must be 32 as Compound S/G Descriptor */

> > +	qdma_csgf_set_len(csgf_desc, 32);

> > +	qdma_csgf_addr_set64(csgf_src, src);

> > +	qdma_csgf_set_len(csgf_src, len);

> > +	qdma_csgf_addr_set64(csgf_dest, dst);

> > +	qdma_csgf_set_len(csgf_dest, len);

> > +	/* This entry is the last entry. */

> > +	qdma_csgf_set_f(csgf_dest, len);

> > +	/* Descriptor Buffer */

> > +	sdf->cmd = cpu_to_le32(

> > +			FSL_QDMA_CMD_RWTTYPE <<

> FSL_QDMA_CMD_RWTTYPE_OFFSET);

> > +	ddf->cmd = cpu_to_le32(

> > +			FSL_QDMA_CMD_RWTTYPE <<

> FSL_QDMA_CMD_RWTTYPE_OFFSET);

> > +	ddf->cmd |= cpu_to_le32(

> > +			FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET); }

> > +

> > +/*

> > + * Prei-request full command descriptor for enqueue.

> 

> prei?

> 


pre

> > + */

> > +static int fsl_qdma_pre_request_enqueue_desc(struct fsl_qdma_queue

> > +*queue) {

> > +	struct fsl_qdma_comp *comp_temp;

> > +	int i;

> > +

> > +	for (i = 0; i < queue->n_cq; i++) {

> > +		comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);

> > +		if (!comp_temp)

> > +			return -1;

> > +		comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool,

> > +						      GFP_NOWAIT,

> > +						      &comp_temp->bus_addr);

> > +		if (!comp_temp->virt_addr)

> > +			return -1;

> 

> eeek -1?

> 


Okay, this return should be -ENOMEM.

> > +		list_add_tail(&comp_temp->list, &queue->comp_free);

> > +	}

> > +	return 0;

> > +}

> > +

> > +/*

> > + * Request a command descriptor for enqueue.

> > + */

> > +static struct fsl_qdma_comp *fsl_qdma_request_enqueue_desc(

> > +					struct fsl_qdma_chan *fsl_chan,

> > +					unsigned int dst_nents,

> > +					unsigned int src_nents)

> > +{

> > +	struct fsl_qdma_comp *comp_temp;

> > +	struct fsl_qdma_sg *sg_block;

> > +	struct fsl_qdma_queue *queue = fsl_chan->queue;

> > +	unsigned long flags;

> > +	unsigned int dst_sg_entry_block, src_sg_entry_block, sg_entry_total,

> > +i;

> > +

> > +	spin_lock_irqsave(&queue->queue_lock, flags);

> > +	if (list_empty(&queue->comp_free)) {

> > +		spin_unlock_irqrestore(&queue->queue_lock, flags);

> > +		comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);

> > +		if (!comp_temp)

> > +			return NULL;

> > +		comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool,

> > +						      GFP_NOWAIT,

> > +						      &comp_temp->bus_addr);

> > +		if (!comp_temp->virt_addr)

> > +			return NULL;

> > +	} else {

> > +		comp_temp = list_first_entry(&queue->comp_free,

> > +					     struct fsl_qdma_comp,

> > +					     list);

> > +		list_del(&comp_temp->list);

> > +		spin_unlock_irqrestore(&queue->queue_lock, flags);

> > +	}

> > +

> > +	if (dst_nents != 0)

> > +		dst_sg_entry_block = dst_nents /

> > +					(FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;

> 

> would this be same as DIV_ROUND_UP()

> 

> 


It not the same, the DIV_ROUND_UP define the prototype:
#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP #define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) .
Actually I need n/ (d - 1) + 1, but the DIV_ROUND_UP has n + d -1 /d.

> > +	else

> > +		dst_sg_entry_block = 0;

> > +

> > +	if (src_nents != 0)

> > +		src_sg_entry_block = src_nents /

> > +					(FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;

> > +	else

> > +		src_sg_entry_block = 0;

> > +

> > +	sg_entry_total = dst_sg_entry_block + src_sg_entry_block;

> > +	if (sg_entry_total) {

> > +		sg_block = kzalloc(sizeof(*sg_block) *

> > +					      sg_entry_total,

> > +					      GFP_KERNEL);

> > +		if (!sg_block)

> > +			return NULL;

> 

> okay you are leaking here the pool allocation need to be rolled back

> 


okay, I got it, Is that ok?

if (!sg_block) {
	dma_pool_free(queue->comp_pool,
    comp_temp->virt_addr,
    comp_temp->bus_addr);
    return NULL;
}

> > +		comp_temp->sg_block = sg_block;

> > +		for (i = 0; i < sg_entry_total; i++) {

> > +			sg_block->virt_addr = dma_pool_alloc(queue->sg_pool,

> > +							GFP_NOWAIT,

> > +							&sg_block->bus_addr);

> > +			memset(sg_block->virt_addr, 0,

> > +					FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16);

> > +			sg_block++;

> > +		}

> > +	}

> > +

> > +	comp_temp->sg_block_src = src_sg_entry_block;

> > +	comp_temp->sg_block_dst = dst_sg_entry_block;

> > +	comp_temp->qchan = fsl_chan;

> > +

> > +	return comp_temp;

> > +}

> > +

> > +static struct fsl_qdma_queue *fsl_qdma_alloc_queue_resources(

> > +					struct platform_device *pdev,

> > +					unsigned int queue_num)

> > +{

> > +	struct device_node *np = pdev->dev.of_node;

> > +	struct fsl_qdma_queue *queue_head, *queue_temp;

> > +	int ret, len, i;

> > +	unsigned int queue_size[FSL_QDMA_QUEUE_MAX];

> > +

> > +	if (queue_num > FSL_QDMA_QUEUE_MAX)

> > +		queue_num = FSL_QDMA_QUEUE_MAX;

> > +	len = sizeof(*queue_head) * queue_num;

> > +	queue_head = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);

> > +	if (!queue_head)

> > +		return NULL;

> > +

> > +	ret = of_property_read_u32_array(np, "queue-sizes", queue_size,

> > +								queue_num);

> 

> why not device_property_read_u32_array()

> 


Okay, thanks, It's a good idea.

> > +	if (ret) {

> > +		dev_err(&pdev->dev, "Can't get queue-sizes.\n");

> > +		return NULL;

> > +	}

> > +

> > +	for (i = 0; i < queue_num; i++) {

> > +		if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX

> > +			|| queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {

> > +			dev_err(&pdev->dev, "Get wrong queue-sizes.\n");

> > +			return NULL;

> > +		}

> > +		queue_temp = queue_head + i;

> > +		queue_temp->cq = dma_alloc_coherent(&pdev->dev,

> > +						sizeof(struct fsl_qdma_ccdf) *

> > +						queue_size[i],

> > +						&queue_temp->bus_addr,

> > +						GFP_KERNEL);

> > +		if (!queue_temp->cq)

> > +			return NULL;

> > +		queue_temp->n_cq = queue_size[i];

> > +		queue_temp->id = i;

> > +		queue_temp->virt_head = queue_temp->cq;

> > +		queue_temp->virt_tail = queue_temp->cq;

> > +		/*

> > +		 * The dma pool for queue command buffer

> > +		 */

> > +		queue_temp->comp_pool = dma_pool_create("comp_pool",

> > +						&pdev->dev,

> > +						FSL_QDMA_BASE_BUFFER_SIZE,

> > +						16, 0);

> > +		if (!queue_temp->comp_pool) {

> > +			dma_free_coherent(&pdev->dev,

> > +						sizeof(struct fsl_qdma_ccdf) *

> > +						queue_size[i],

> > +						queue_temp->cq,

> > +						queue_temp->bus_addr);

> > +			return NULL;

> > +		}

> > +		/*

> > +		 * The dma pool for queue command buffer

> > +		 */

> > +		queue_temp->sg_pool = dma_pool_create("sg_pool",

> > +					&pdev->dev,

> > +					FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16,

> > +					64, 0);

> > +		if (!queue_temp->sg_pool) {

> > +			dma_free_coherent(&pdev->dev,

> > +						sizeof(struct fsl_qdma_ccdf) *

> > +						queue_size[i],

> > +						queue_temp->cq,

> > +						queue_temp->bus_addr);

> > +			dma_pool_destroy(queue_temp->comp_pool);

> > +			return NULL;

> > +		}

> > +		/*

> > +		 * List for queue command buffer

> > +		 */

> > +		INIT_LIST_HEAD(&queue_temp->comp_used);

> > +		INIT_LIST_HEAD(&queue_temp->comp_free);

> > +		spin_lock_init(&queue_temp->queue_lock);

> > +	}

> > +

> > +	return queue_head;

> > +}

> > +

> > +static struct fsl_qdma_queue *fsl_qdma_prep_status_queue(

> > +						struct platform_device *pdev)

> > +{

> > +	struct device_node *np = pdev->dev.of_node;

> > +	struct fsl_qdma_queue *status_head;

> > +	unsigned int status_size;

> > +	int ret;

> > +

> > +	ret = of_property_read_u32(np, "status-sizes", &status_size);

> > +	if (ret) {

> > +		dev_err(&pdev->dev, "Can't get status-sizes.\n");

> > +		return NULL;

> > +	}

> > +	if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX

> > +			|| status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {

> > +		dev_err(&pdev->dev, "Get wrong status_size.\n");

> > +		return NULL;

> > +	}

> > +	status_head = devm_kzalloc(&pdev->dev, sizeof(*status_head),

> > +								GFP_KERNEL);

> > +	if (!status_head)

> > +		return NULL;

> > +

> > +	/*

> > +	 * Buffer for queue command

> > +	 */

> > +	status_head->cq = dma_alloc_coherent(&pdev->dev,

> > +						sizeof(struct fsl_qdma_ccdf) *

> > +						status_size,

> > +						&status_head->bus_addr,

> > +						GFP_KERNEL);

> > +	if (!status_head->cq)

> > +		return NULL;

> > +	status_head->n_cq = status_size;

> > +	status_head->virt_head = status_head->cq;

> > +	status_head->virt_tail = status_head->cq;

> > +	status_head->comp_pool = NULL;

> > +

> > +	return status_head;

> > +}

> > +

> > +static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma) {

> > +	void __iomem *ctrl = fsl_qdma->ctrl_base;

> > +	void __iomem *block = fsl_qdma->block_base;

> > +	int i, count = 5;

> > +	u32 reg;

> > +

> > +	/* Disable the command queue and wait for idle state. */

> > +	reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);

> > +	reg |= FSL_QDMA_DMR_DQD;

> > +	qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);

> > +	for (i = 0; i < FSL_QDMA_QUEUE_NUM_MAX; i++)

> > +		qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQMR(i));

> > +

> > +	while (1) {

> > +		reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DSR);

> > +		if (!(reg & FSL_QDMA_DSR_DB))

> > +			break;

> > +		if (count-- < 0)

> > +			return -EBUSY;

> > +		udelay(100);

> > +	}

> > +

> > +	/* Disable status queue. */

> > +	qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BSQMR);

> > +

> > +	/*

> > +	 * Clear the command queue interrupt detect register for all queues.

> > +	 */

> > +	qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));

> > +

> > +	return 0;

> > +}

> > +

> > +static int fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine

> > +*fsl_qdma) {

> > +	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;

> > +	struct fsl_qdma_queue *fsl_status = fsl_qdma->status;

> > +	struct fsl_qdma_queue *temp_queue;

> > +	struct fsl_qdma_comp *fsl_comp;

> > +	struct fsl_qdma_ccdf *status_addr;

> > +	struct fsl_qdma_csgf *csgf_src;

> > +	void __iomem *block = fsl_qdma->block_base;

> > +	u32 reg, i;

> > +	bool duplicate, duplicate_handle;

> > +

> > +	while (1) {

> > +		duplicate = 0;

> > +		duplicate_handle = 0;

> > +		reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR);

> > +		if (reg & FSL_QDMA_BSQSR_QE)

> > +			return 0;

> > +		status_addr = fsl_status->virt_head;

> > +		if (qdma_ccdf_get_queue(status_addr) == pre_queue &&

> > +			qdma_ccdf_addr_get64(status_addr) == pre_addr)

> > +			duplicate = 1;

> > +		i = qdma_ccdf_get_queue(status_addr);

> > +		pre_queue = qdma_ccdf_get_queue(status_addr);

> > +		pre_addr = qdma_ccdf_addr_get64(status_addr);

> > +		temp_queue = fsl_queue + i;

> > +		spin_lock(&temp_queue->queue_lock);

> > +		if (list_empty(&temp_queue->comp_used)) {

> > +			if (duplicate)

> > +				duplicate_handle = 1;

> > +			else {

> > +				spin_unlock(&temp_queue->queue_lock);

> > +				return -1;

> > +			}

> > +		} else {

> > +			fsl_comp = list_first_entry(&temp_queue->comp_used,

> > +							struct fsl_qdma_comp,

> > +							list);

> > +			csgf_src = (struct fsl_qdma_csgf *)fsl_comp->virt_addr

> > +							   + 2;

> > +			if (fsl_comp->bus_addr + 16 != pre_addr) {

> > +				if (duplicate)

> > +					duplicate_handle = 1;

> > +				else {

> > +					spin_unlock(&temp_queue->queue_lock);

> > +					return -1;

> > +				}

> > +			}

> > +		}

> > +

> > +			if (duplicate_handle) {

> > +			reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);

> > +			reg |= FSL_QDMA_BSQMR_DI;

> > +			qdma_ccdf_addr_set64(status_addr, 0x0);

> > +			fsl_status->virt_head++;

> > +			if (fsl_status->virt_head == fsl_status->cq

> > +						   + fsl_status->n_cq)

> > +				fsl_status->virt_head = fsl_status->cq;

> > +			qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);

> > +			spin_unlock(&temp_queue->queue_lock);

> > +			continue;

> > +		}

> > +		list_del(&fsl_comp->list);

> > +

> > +		reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);

> > +		reg |= FSL_QDMA_BSQMR_DI;

> > +		qdma_ccdf_addr_set64(status_addr, 0x0);

> > +		fsl_status->virt_head++;

> > +		if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)

> > +			fsl_status->virt_head = fsl_status->cq;

> > +		qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);

> > +		spin_unlock(&temp_queue->queue_lock);

> > +

> > +		spin_lock(&fsl_comp->qchan->vchan.lock);

> > +		vchan_cookie_complete(&fsl_comp->vdesc);

> > +		fsl_comp->qchan->status = DMA_COMPLETE;

> > +		spin_unlock(&fsl_comp->qchan->vchan.lock);

> > +	}

> > +	return 0;

> > +}

> > +

> > +static irqreturn_t fsl_qdma_error_handler(int irq, void *dev_id) {

> > +	struct fsl_qdma_engine *fsl_qdma = dev_id;

> > +	unsigned int intr;

> > +	void __iomem *status = fsl_qdma->status_base;

> > +

> > +	intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR);

> > +

> > +	if (intr)

> > +		dev_err(fsl_qdma->dma_dev.dev, "DMA transaction error!\n");

> > +

> > +	qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR);

> 

> Why are we writing all 1s to this?

> 


Beacause CPU datasheet had description, write 1 to clear the error 
condition and the interrupt.

Origin description:

The DMA error detect register indicates if an error condition was
detected that should generate a system error interrupt. 
The error capture registers may hold additional information regarding
the error. Write 1 to clear the error condition and the interrupt, if enabled.

> > +	return IRQ_HANDLED;

> > +}

> > +

> > +static irqreturn_t fsl_qdma_queue_handler(int irq, void *dev_id) {

> > +	struct fsl_qdma_engine *fsl_qdma = dev_id;

> > +	unsigned int intr, reg;

> > +	void __iomem *block = fsl_qdma->block_base;

> > +	void __iomem *ctrl = fsl_qdma->ctrl_base;

> > +

> > +	intr = qdma_readl(fsl_qdma, block + FSL_QDMA_BCQIDR(0));

> > +

> > +	if ((intr & FSL_QDMA_CQIDR_SQT) != 0)

> > +		intr = fsl_qdma_queue_transfer_complete(fsl_qdma);

> > +

> > +	if (intr != 0) {

> > +		reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);

> > +		reg |= FSL_QDMA_DMR_DQD;

> > +		qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);

> > +		qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQIER(0));

> > +		dev_err(fsl_qdma->dma_dev.dev, "QDMA: status err!\n");

> > +	}

> > +

> > +	qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));

> > +

> > +	return IRQ_HANDLED;

> > +}

> > +

> > +static int

> > +fsl_qdma_irq_init(struct platform_device *pdev,

> > +		  struct fsl_qdma_engine *fsl_qdma) {

> > +	int ret;

> > +

> > +	fsl_qdma->error_irq = platform_get_irq_byname(pdev,

> > +							"qdma-error");

> > +	if (fsl_qdma->error_irq < 0) {

> > +		dev_err(&pdev->dev, "Can't get qdma controller irq.\n");

> > +		return fsl_qdma->error_irq;

> > +	}

> > +

> > +	fsl_qdma->queue_irq = platform_get_irq_byname(pdev, "qdma-queue");

> > +	if (fsl_qdma->queue_irq < 0) {

> > +		dev_err(&pdev->dev, "Can't get qdma queue irq.\n");

> > +		return fsl_qdma->queue_irq;

> > +	}

> > +

> > +	ret = devm_request_irq(&pdev->dev, fsl_qdma->error_irq,

> > +			fsl_qdma_error_handler, 0, "qDMA error", fsl_qdma);

> > +	if (ret) {

> > +		dev_err(&pdev->dev, "Can't register qDMA controller IRQ.\n");

> > +		return  ret;

> > +	}

> > +	ret = devm_request_irq(&pdev->dev, fsl_qdma->queue_irq,

> > +			fsl_qdma_queue_handler, 0, "qDMA queue", fsl_qdma);

> > +	if (ret) {

> > +		dev_err(&pdev->dev, "Can't register qDMA queue IRQ.\n");

> > +		return  ret;

> > +	}

> > +

> > +	return 0;

> > +}

> > +

> > +static int fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma) {

> > +	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;

> > +	struct fsl_qdma_queue *temp;

> > +	void __iomem *ctrl = fsl_qdma->ctrl_base;

> > +	void __iomem *status = fsl_qdma->status_base;

> > +	void __iomem *block = fsl_qdma->block_base;

> > +	int i, ret;

> > +	u32 reg;

> > +

> > +	/* Try to halt the qDMA engine first. */

> > +	ret = fsl_qdma_halt(fsl_qdma);

> > +	if (ret) {

> > +		dev_err(fsl_qdma->dma_dev.dev, "DMA halt failed!");

> > +		return ret;

> > +	}

> > +

> > +	/*

> > +	 * Clear the command queue interrupt detect register for all queues.

> > +	 */

> > +	qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));

> > +

> > +	for (i = 0; i < fsl_qdma->n_queues; i++) {

> > +		temp = fsl_queue + i;

> > +		/*

> > +		 * Initialize Command Queue registers to point to the first

> > +		 * command descriptor in memory.

> > +		 * Dequeue Pointer Address Registers

> > +		 * Enqueue Pointer Address Registers

> > +		 */

> > +		qdma_writel(fsl_qdma, temp->bus_addr,

> > +				block + FSL_QDMA_BCQDPA_SADDR(i));

> > +		qdma_writel(fsl_qdma, temp->bus_addr,

> > +				block + FSL_QDMA_BCQEPA_SADDR(i));

> > +

> > +		/* Initialize the queue mode. */

> > +		reg = FSL_QDMA_BCQMR_EN;

> > +		reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq)-4);

> > +		reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq)-6);

> > +		qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BCQMR(i));

> > +	}

> > +

> > +	/*

> > +	 * Workaround for erratum: ERR010812.

> > +	 * We must enable XOFF to avoid the enqueue rejection occurs.

> > +	 * Setting SQCCMR ENTER_WM to 0x20.

> > +	 */

> > +	qdma_writel(fsl_qdma, FSL_QDMA_SQCCMR_ENTER_WM,

> > +			      block + FSL_QDMA_SQCCMR);

> > +	/*

> > +	 * Initialize status queue registers to point to the first

> > +	 * command descriptor in memory.

> > +	 * Dequeue Pointer Address Registers

> > +	 * Enqueue Pointer Address Registers

> > +	 */

> > +	qdma_writel(fsl_qdma, fsl_qdma->status->bus_addr,

> > +					block + FSL_QDMA_SQEPAR);

> > +	qdma_writel(fsl_qdma, fsl_qdma->status->bus_addr,

> > +					block + FSL_QDMA_SQDPAR);

> > +	/* Initialize status queue interrupt. */

> > +	qdma_writel(fsl_qdma, FSL_QDMA_BCQIER_CQTIE,

> > +			      block + FSL_QDMA_BCQIER(0));

> > +	qdma_writel(fsl_qdma, FSL_QDMA_BSQICR_ICEN |

> FSL_QDMA_BSQICR_ICST(5)

> > +						   | 0x8000,

> > +			      block + FSL_QDMA_BSQICR);

> > +	qdma_writel(fsl_qdma, FSL_QDMA_CQIER_MEIE |

> FSL_QDMA_CQIER_TEIE,

> > +			      block + FSL_QDMA_CQIER);

> > +	/* Initialize controller interrupt register. */

> > +	qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR);

> > +	qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEIER);

> > +

> > +	/* Initialize the status queue mode. */

> > +	reg = FSL_QDMA_BSQMR_EN;

> > +	reg |= FSL_QDMA_BSQMR_CQ_SIZE(ilog2(fsl_qdma->status->n_cq)-6);

> > +	qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);

> > +

> > +	reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);

> > +	reg &= ~FSL_QDMA_DMR_DQD;

> > +	qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);

> > +

> > +	return 0;

> > +}

> > +

> > +static struct dma_async_tx_descriptor * fsl_qdma_prep_memcpy(struct

> > +dma_chan *chan, dma_addr_t dst,

> > +		dma_addr_t src, size_t len, unsigned long flags) {

> > +	struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);

> > +	struct fsl_qdma_comp *fsl_comp;

> > +

> > +	fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan, 0, 0);

> > +	fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len);

> > +

> > +	return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags); }

> > +

> > +static void fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan) {

> > +	void __iomem *block = fsl_chan->qdma->block_base;

> > +	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;

> > +	struct fsl_qdma_comp *fsl_comp;

> > +	struct virt_dma_desc *vdesc;

> > +	u32 reg;

> > +

> > +	reg = qdma_readl(fsl_chan->qdma, block +

> FSL_QDMA_BCQSR(fsl_queue->id));

> > +	if (reg & (FSL_QDMA_BCQSR_QF | FSL_QDMA_BCQSR_XOFF))

> > +		return;

> > +	vdesc = vchan_next_desc(&fsl_chan->vchan);

> > +	if (!vdesc)

> > +		return;

> > +	list_del(&vdesc->node);

> > +	fsl_comp = to_fsl_qdma_comp(vdesc);

> > +

> > +	memcpy(fsl_queue->virt_head++, fsl_comp->virt_addr, 16);

> > +	if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq)

> > +		fsl_queue->virt_head = fsl_queue->cq;

> > +

> > +	list_add_tail(&fsl_comp->list, &fsl_queue->comp_used);

> > +	barrier();

> > +	reg = qdma_readl(fsl_chan->qdma, block +

> FSL_QDMA_BCQMR(fsl_queue->id));

> > +	reg |= FSL_QDMA_BCQMR_EI;

> > +	qdma_writel(fsl_chan->qdma, reg, block +

> FSL_QDMA_BCQMR(fsl_queue->id));

> > +	fsl_chan->status = DMA_IN_PROGRESS;

> > +}

> > +

> > +static enum dma_status fsl_qdma_tx_status(struct dma_chan *chan,

> > +		dma_cookie_t cookie, struct dma_tx_state *txstate) {

> > +	return dma_cookie_status(chan, cookie, txstate); }

> > +

> > +static void fsl_qdma_free_desc(struct virt_dma_desc *vdesc) {

> > +	struct fsl_qdma_comp *fsl_comp;

> > +	struct fsl_qdma_queue *fsl_queue;

> > +	struct fsl_qdma_sg *sg_block;

> > +	unsigned long flags;

> > +	unsigned int i;

> > +

> > +	fsl_comp = to_fsl_qdma_comp(vdesc);

> > +	fsl_queue = fsl_comp->qchan->queue;

> > +

> > +	if (fsl_comp->sg_block) {

> > +		for (i = 0; i < fsl_comp->sg_block_src +

> > +				fsl_comp->sg_block_dst; i++) {

> > +			sg_block = fsl_comp->sg_block + i;

> > +			dma_pool_free(fsl_queue->sg_pool,

> > +				      sg_block->virt_addr,

> > +				      sg_block->bus_addr);

> > +		}

> > +		kfree(fsl_comp->sg_block);

> > +	}

> > +

> > +	spin_lock_irqsave(&fsl_queue->queue_lock, flags);

> > +	list_add_tail(&fsl_comp->list, &fsl_queue->comp_free);

> > +	spin_unlock_irqrestore(&fsl_queue->queue_lock, flags); }

> > +

> > +static void fsl_qdma_issue_pending(struct dma_chan *chan) {

> > +	struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);

> > +	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;

> > +	unsigned long flags;

> > +

> > +	spin_lock_irqsave(&fsl_queue->queue_lock, flags);

> > +	spin_lock(&fsl_chan->vchan.lock);

> > +	if (vchan_issue_pending(&fsl_chan->vchan))

> > +		fsl_qdma_enqueue_desc(fsl_chan);

> > +	spin_unlock(&fsl_chan->vchan.lock);

> > +	spin_unlock_irqrestore(&fsl_queue->queue_lock, flags); }

> 

> No reside, terminate callback?

> 


Yes, for now the dmatest thread no reside when terminate callback.

> > +

> > +static int fsl_qdma_probe(struct platform_device *pdev) {

> > +	struct device_node *np = pdev->dev.of_node;

> > +	struct fsl_qdma_engine *fsl_qdma;

> > +	struct fsl_qdma_chan *fsl_chan;

> > +	struct resource *res;

> > +	unsigned int len, chans, queues;

> > +	int ret, i;

> > +

> > +	ret = of_property_read_u32(np, "channels", &chans);

> > +	if (ret) {

> > +		dev_err(&pdev->dev, "Can't get channels.\n");

> > +		return ret;

> > +	}

> > +

> > +	len = sizeof(*fsl_qdma) + sizeof(*fsl_chan) * chans;

> > +	fsl_qdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);

> > +	if (!fsl_qdma)

> > +		return -ENOMEM;

> > +

> > +	ret = of_property_read_u32(np, "queues", &queues);

> > +	if (ret) {

> > +		dev_err(&pdev->dev, "Can't get queues.\n");

> > +		return ret;

> > +	}

> > +

> > +	fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, queues);

> > +	if (!fsl_qdma->queue)

> > +		return -ENOMEM;

> > +

> > +	fsl_qdma->status = fsl_qdma_prep_status_queue(pdev);

> > +	if (!fsl_qdma->status)

> > +		return -ENOMEM;

> > +

> > +	fsl_qdma->n_chans = chans;

> > +	fsl_qdma->n_queues = queues;

> > +	mutex_init(&fsl_qdma->fsl_qdma_mutex);

> > +

> > +	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);

> > +	fsl_qdma->ctrl_base = devm_ioremap_resource(&pdev->dev, res);

> > +	if (IS_ERR(fsl_qdma->ctrl_base))

> > +		return PTR_ERR(fsl_qdma->ctrl_base);

> > +

> > +	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);

> > +	fsl_qdma->status_base = devm_ioremap_resource(&pdev->dev, res);

> > +	if (IS_ERR(fsl_qdma->status_base))

> > +		return PTR_ERR(fsl_qdma->status_base);

> > +

> > +	res = platform_get_resource(pdev, IORESOURCE_MEM, 2);

> > +	fsl_qdma->block_base = devm_ioremap_resource(&pdev->dev, res);

> > +	if (IS_ERR(fsl_qdma->block_base))

> > +		return PTR_ERR(fsl_qdma->block_base);

> > +

> > +	ret = fsl_qdma_irq_init(pdev, fsl_qdma);

> > +	if (ret)

> > +		return ret;

> > +

> > +	fsl_qdma->big_endian = of_property_read_bool(np, "big-endian");

> > +	INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels);

> > +	for (i = 0; i < fsl_qdma->n_chans; i++) {

> > +		struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];

> > +

> > +		fsl_chan->qdma = fsl_qdma;

> > +		fsl_chan->queue = fsl_qdma->queue + i % fsl_qdma->n_queues;

> > +		fsl_chan->vchan.desc_free = fsl_qdma_free_desc;

> > +		INIT_LIST_HEAD(&fsl_chan->qcomp);

> > +		vchan_init(&fsl_chan->vchan, &fsl_qdma->dma_dev);

> > +	}

> > +	for (i = 0; i < fsl_qdma->n_queues; i++)

> > +		fsl_qdma_pre_request_enqueue_desc(fsl_qdma->queue + i);

> > +

> > +	dma_cap_set(DMA_MEMCPY, fsl_qdma->dma_dev.cap_mask);

> > +

> > +	fsl_qdma->dma_dev.dev = &pdev->dev;

> > +	fsl_qdma->dma_dev.device_alloc_chan_resources

> > +		= fsl_qdma_alloc_chan_resources;

> > +	fsl_qdma->dma_dev.device_free_chan_resources

> > +		= fsl_qdma_free_chan_resources;

> > +	fsl_qdma->dma_dev.device_tx_status = fsl_qdma_tx_status;

> > +	fsl_qdma->dma_dev.device_prep_dma_memcpy =

> fsl_qdma_prep_memcpy;

> > +	fsl_qdma->dma_dev.device_issue_pending = fsl_qdma_issue_pending;

> > +

> > +	dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));

> > +

> > +	platform_set_drvdata(pdev, fsl_qdma);

> > +

> > +	ret = dma_async_device_register(&fsl_qdma->dma_dev);

> > +	if (ret) {

> > +		dev_err(&pdev->dev, "Can't register Freescale qDMA engine.\n");

> > +		return ret;

> > +	}

> > +

> > +	ret = fsl_qdma_reg_init(fsl_qdma);

> > +	if (ret) {

> > +		dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");

> > +		return ret;

> > +	}

> > +

> > +

> > +	return 0;

> > +}

> > +

> > +static int fsl_qdma_remove(struct platform_device *pdev) {

> > +	struct device_node *np = pdev->dev.of_node;

> > +	struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);

> > +	struct fsl_qdma_queue *queue_temp;

> > +	struct fsl_qdma_queue *status = fsl_qdma->status;

> > +	struct fsl_qdma_comp *comp_temp, *_comp_temp;

> > +	int i;

> > +

> > +	of_dma_controller_free(np);

> > +	dma_async_device_unregister(&fsl_qdma->dma_dev);

> > +

> > +	/* Free descriptor areas */

> > +	for (i = 0; i < fsl_qdma->n_queues; i++) {

> > +		queue_temp = fsl_qdma->queue + i;

> > +		list_for_each_entry_safe(comp_temp, _comp_temp,

> > +					&queue_temp->comp_used,	list) {

> > +			dma_pool_free(queue_temp->comp_pool,

> > +					comp_temp->virt_addr,

> > +					comp_temp->bus_addr);

> > +			list_del(&comp_temp->list);

> > +			kfree(comp_temp);

> > +		}

> > +		list_for_each_entry_safe(comp_temp, _comp_temp,

> > +					&queue_temp->comp_free, list) {

> > +			dma_pool_free(queue_temp->comp_pool,

> > +					comp_temp->virt_addr,

> > +					comp_temp->bus_addr);

> > +			list_del(&comp_temp->list);

> > +			kfree(comp_temp);

> > +		}

> > +		dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_ccdf) *

> > +					queue_temp->n_cq, queue_temp->cq,

> > +					queue_temp->bus_addr);

> > +		dma_pool_destroy(queue_temp->comp_pool);

> > +	}

> > +

> > +	dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_ccdf) *

> > +				status->n_cq, status->cq, status->bus_addr);

> 

> at this point you can still get irq and tasklets can be still scheduled, how do

> you ensure they are stopped

> 


May be I missed, will and below to funtions call In fsl_qdma_remove() in next version. 
Is that ok?

static void fsl_qdma_cleanup_vchan(struct dma_device *dmadev)
{
        struct fsl_qdma_chan *chan, *_chan;

        list_for_each_entry_safe(chan, _chan,
                                &dmadev->channels, vchan.chan.device_node) {
                list_del(&chan->vchan.chan.device_node);
                tasklet_kill(&chan->vchan.task);
        }
}

static void fsl_qdma_irq_exit(
                struct platform_device *pdev, struct fsl_qdma_engine *fsl_qdma)
{
        if(fsl_qdma->queue_irq == fsl_qdma->error_irq) {
                devm_free_irq(&pdev->dev, fsl_qdma->queue_irq, fsl_qdma);
        } else {
                devm_free_irq(&pdev->dev, fsl_qdma->queue_irq, fsl_qdma);
                devm_free_irq(&pdev->dev, fsl_qdma->error_irq, fsl_qdma);
        }
}

> > +	return 0;

> > +}

> > +

> > +static const struct of_device_id fsl_qdma_dt_ids[] = {

> > +	{ .compatible = "fsl,ls1021a-qdma", },

> > +	{ /* sentinel */ }

> > +};

> > +MODULE_DEVICE_TABLE(of, fsl_qdma_dt_ids);

> > +

> > +static struct platform_driver fsl_qdma_driver = {

> > +	.driver		= {

> > +		.name	= "fsl-qdma",

> > +		.owner  = THIS_MODULE,

> 

> this is not required

> 


Got it.

> > +		.of_match_table = fsl_qdma_dt_ids,

> > +	},

> > +	.probe          = fsl_qdma_probe,

> > +	.remove		= fsl_qdma_remove,

> > +};

> > +

> > +static int __init fsl_qdma_init(void) {

> > +	return platform_driver_register(&fsl_qdma_driver);

> > +}

> > +subsys_initcall(fsl_qdma_init);

> > +

> > +static void __exit fsl_qdma_exit(void) {

> > +	platform_driver_unregister(&fsl_qdma_driver);

> > +}

> > +module_exit(fsl_qdma_exit);

> 

> i think this can be removed with module_platform_driver()

> 


Got it, learn a lot. Thanks.

--
Best Regards,
Wen
Vinod Koul Dec. 19, 2017, 8:49 a.m. UTC | #3
On Tue, Dec 19, 2017 at 06:28:01AM +0000, Wen He wrote:

> > > +#include <asm/cacheflush.h>
> > > +#include <linux/clk.h>
> > > +#include <linux/delay.h>
> > > +#include <linux/dma-mapping.h>
> > > +#include <linux/dmapool.h>
> > > +#include <linux/init.h>
> > > +#include <linux/interrupt.h>
> > > +#include <linux/module.h>
> > > +#include <linux/of.h>
> > > +#include <linux/of_address.h>
> > > +#include <linux/of_device.h>
> > > +#include <linux/of_dma.h>
> > > +#include <linux/of_irq.h>
> > > +#include <linux/slab.h>
> > > +#include <linux/spinlock.h>
> > 
> > Do you need all these?
> > 
> 
> Don't need all these, Is that ok?

Okay please remove the ones not needed

> > BIT() or GENMASK() pls for register definations, here and other places in the
> > driver
> > 
> 
> Okay, does this means, I should be defined BIT() or GENMASK() to here?

Yes please

> > this looks terribly similar to fsl_qdma_ccdf, if you have union then why not add
> > this too to it?
> > 
> 
> This struct defined description two different dma message format that Compound Command Descriptor Format and qDMA Compound S/G Format,
> if add this too to union, it could be ambiguous.

at lower level do we care about the format, we fill and submit to HW

> > > +static u32 qdma_readl(struct fsl_qdma_engine *qdma, void __iomem
> > > +*addr) {
> > > +	if (qdma->big_endian)
> > > +		return ioread32be(addr);
> > > +	else
> > > +		return ioread32(addr);
> > > +}
> > 
> > IIRC there are macros which which take care of this, can you check other FSL
> > drivers please
> > 
> 
> Okay, but I search the FSL drivers and no found. Do you know where are them ?

See DMA_IN/OUT in dma/fsldma.h

> > > +	sg_entry_total = dst_sg_entry_block + src_sg_entry_block;
> > > +	if (sg_entry_total) {
> > > +		sg_block = kzalloc(sizeof(*sg_block) *
> > > +					      sg_entry_total,
> > > +					      GFP_KERNEL);
> > > +		if (!sg_block)
> > > +			return NULL;
> > 
> > okay you are leaking here the pool allocation need to be rolled back
> > 
> 
> okay, I got it, Is that ok?
> 
> if (!sg_block) {
> 	dma_pool_free(queue->comp_pool,
>     comp_temp->virt_addr,
>     comp_temp->bus_addr);
>     return NULL;

Looks okay

> > > +
> > > +	dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_ccdf) *
> > > +				status->n_cq, status->cq, status->bus_addr);
> > 
> > at this point you can still get irq and tasklets can be still scheduled, how do
> > you ensure they are stopped
> > 
> 
> May be I missed, will and below to funtions call In fsl_qdma_remove() in next version. 
> Is that ok?

sure
Wen He Dec. 19, 2017, 11:19 a.m. UTC | #4
SGkgVmlub2QsDQoNCj4gLS0tLS1PcmlnaW5hbCBNZXNzYWdlLS0tLS0NCj4gRnJvbTogVmlub2Qg
S291bCBbbWFpbHRvOnZpbm9kLmtvdWxAaW50ZWwuY29tXQ0KPiBTZW50OiAyMDE3xOoxMtTCMTnI
1SAxNjo1MA0KPiBUbzogV2VuIEhlIDx3ZW4uaGVfMUBueHAuY29tPg0KPiBDYzogTGVvIExpIDxs
ZW95YW5nLmxpQG54cC5jb20+OyBkbWFlbmdpbmVAdmdlci5rZXJuZWwub3JnOyBKaWFmZWkgUGFu
DQo+IDxqaWFmZWkucGFuQG54cC5jb20+OyBKaWFoZW5nIEZhbiA8amlhaGVuZy5mYW5AbnhwLmNv
bT4NCj4gU3ViamVjdDogUmU6IFtQQVRDSCAxLzRdIGRtYTogZnNsLXFkbWE6IGFkZCBxRE1BIENv
bW1hbmQgcXVldWUgbW9kZQ0KPiBkcml2ZXINCj4gDQo+IE9uIFR1ZSwgRGVjIDE5LCAyMDE3IGF0
IDA2OjI4OjAxQU0gKzAwMDAsIFdlbiBIZSB3cm90ZToNCj4gDQo+ID4gPiA+ICsjaW5jbHVkZSA8
YXNtL2NhY2hlZmx1c2guaD4NCj4gPiA+ID4gKyNpbmNsdWRlIDxsaW51eC9jbGsuaD4NCj4gPiA+
ID4gKyNpbmNsdWRlIDxsaW51eC9kZWxheS5oPg0KPiA+ID4gPiArI2luY2x1ZGUgPGxpbnV4L2Rt
YS1tYXBwaW5nLmg+DQo+ID4gPiA+ICsjaW5jbHVkZSA8bGludXgvZG1hcG9vbC5oPg0KPiA+ID4g
PiArI2luY2x1ZGUgPGxpbnV4L2luaXQuaD4NCj4gPiA+ID4gKyNpbmNsdWRlIDxsaW51eC9pbnRl
cnJ1cHQuaD4NCj4gPiA+ID4gKyNpbmNsdWRlIDxsaW51eC9tb2R1bGUuaD4NCj4gPiA+ID4gKyNp
bmNsdWRlIDxsaW51eC9vZi5oPg0KPiA+ID4gPiArI2luY2x1ZGUgPGxpbnV4L29mX2FkZHJlc3Mu
aD4NCj4gPiA+ID4gKyNpbmNsdWRlIDxsaW51eC9vZl9kZXZpY2UuaD4NCj4gPiA+ID4gKyNpbmNs
dWRlIDxsaW51eC9vZl9kbWEuaD4NCj4gPiA+ID4gKyNpbmNsdWRlIDxsaW51eC9vZl9pcnEuaD4N
Cj4gPiA+ID4gKyNpbmNsdWRlIDxsaW51eC9zbGFiLmg+DQo+ID4gPiA+ICsjaW5jbHVkZSA8bGlu
dXgvc3BpbmxvY2suaD4NCj4gPiA+DQo+ID4gPiBEbyB5b3UgbmVlZCBhbGwgdGhlc2U/DQo+ID4g
Pg0KPiA+DQo+ID4gRG9uJ3QgbmVlZCBhbGwgdGhlc2UsIElzIHRoYXQgb2s/DQo+IA0KPiBPa2F5
IHBsZWFzZSByZW1vdmUgdGhlIG9uZXMgbm90IG5lZWRlZA0KPiANCj4gPiA+IEJJVCgpIG9yIEdF
Tk1BU0soKSBwbHMgZm9yIHJlZ2lzdGVyIGRlZmluYXRpb25zLCBoZXJlIGFuZCBvdGhlcg0KPiA+
ID4gcGxhY2VzIGluIHRoZSBkcml2ZXINCj4gPiA+DQo+ID4NCj4gPiBPa2F5LCBkb2VzIHRoaXMg
bWVhbnMsIEkgc2hvdWxkIGJlIGRlZmluZWQgQklUKCkgb3IgR0VOTUFTSygpIHRvIGhlcmU/DQo+
IA0KPiBZZXMgcGxlYXNlDQo+IA0KPiA+ID4gdGhpcyBsb29rcyB0ZXJyaWJseSBzaW1pbGFyIHRv
IGZzbF9xZG1hX2NjZGYsIGlmIHlvdSBoYXZlIHVuaW9uIHRoZW4NCj4gPiA+IHdoeSBub3QgYWRk
IHRoaXMgdG9vIHRvIGl0Pw0KPiA+ID4NCj4gPg0KPiA+IFRoaXMgc3RydWN0IGRlZmluZWQgZGVz
Y3JpcHRpb24gdHdvIGRpZmZlcmVudCBkbWEgbWVzc2FnZSBmb3JtYXQgdGhhdA0KPiA+IENvbXBv
dW5kIENvbW1hbmQgRGVzY3JpcHRvciBGb3JtYXQgYW5kIHFETUEgQ29tcG91bmQgUy9HIEZvcm1h
dCwNCj4gaWYgYWRkIHRoaXMgdG9vIHRvIHVuaW9uLCBpdCBjb3VsZCBiZSBhbWJpZ3VvdXMuDQo+
IA0KPiBhdCBsb3dlciBsZXZlbCBkbyB3ZSBjYXJlIGFib3V0IHRoZSBmb3JtYXQsIHdlIGZpbGwg
YW5kIHN1Ym1pdCB0byBIVw0KPiANCg0KT2theSwgSXMgdGhhdCBvaz8gDQoNCldpbGwgYW5kIGJl
bG93IHVzaW5nIGZzbF9xZG1hX2Rlc2NfZm9ybWF0IGluc3RlYWQgZnNsX3FkbWFfY2NmZyBhbmQg
ZGVsZXRlIGZzbF9xZG1hX2NzZ2YgaW4gbmV4dCB2ZXJzaW9uLiANCktlZXAgYW5kIGZpeGVkIGFs
bCBvZiBkZXNjcmlwdGlvbiBjYWxsIGZ1bmN0aW9uLCB0aGUgZGVzYyBwb2ludGVyIGRlZmluZWQg
YnkgdGhlIGZzbF9xZG1hX2Rlc2NzX2Zvcm1hdC4NCg0Kc3RydWN0IGZzbF9xZG1hX2Rlc2NfZm9y
bWF0ICpjY2RmLCAqY3NnZjsNCg0KdGhlbiBmaWxsIGFuZCBzdWJtaXQgdG8gSFcuDQoNCj4gPiA+
ID4gK3N0YXRpYyB1MzIgcWRtYV9yZWFkbChzdHJ1Y3QgZnNsX3FkbWFfZW5naW5lICpxZG1hLCB2
b2lkIF9faW9tZW0NCj4gPiA+ID4gKyphZGRyKSB7DQo+ID4gPiA+ICsJaWYgKHFkbWEtPmJpZ19l
bmRpYW4pDQo+ID4gPiA+ICsJCXJldHVybiBpb3JlYWQzMmJlKGFkZHIpOw0KPiA+ID4gPiArCWVs
c2UNCj4gPiA+ID4gKwkJcmV0dXJuIGlvcmVhZDMyKGFkZHIpOw0KPiA+ID4gPiArfQ0KPiA+ID4N
Cj4gPiA+IElJUkMgdGhlcmUgYXJlIG1hY3JvcyB3aGljaCB3aGljaCB0YWtlIGNhcmUgb2YgdGhp
cywgY2FuIHlvdSBjaGVjaw0KPiA+ID4gb3RoZXIgRlNMIGRyaXZlcnMgcGxlYXNlDQo+ID4gPg0K
PiA+DQo+ID4gT2theSwgYnV0IEkgc2VhcmNoIHRoZSBGU0wgZHJpdmVycyBhbmQgbm8gZm91bmQu
IERvIHlvdSBrbm93IHdoZXJlIGFyZQ0KPiB0aGVtID8NCj4gDQo+IFNlZSBETUFfSU4vT1VUIGlu
IGRtYS9mc2xkbWEuaA0KPiANCg0KT2theSwgYnV0IERNQV9JTi9PVVQgbmVlZCBzdHJ1Y3QgZnNs
ZG1hX2NoYW4uDQpNYXkgYmUgSSBjYW4gcmVmZXIgdG8gaXQgdGhhdCBJbXBsZW1lbnQgYSBzaW1p
bGFyIG1hY3JvLCB3aWxsIGFuZCBiZWxvdyB0byBpbnN0ZWFkIGl0IGluIG5leHQgdmVyc2lvbi4N
Cg0KI2RlZmluZSBRRE1BX0lOKGZzbF9xZG1hX2VuZ2luZSwgYWRkcikgICAgICAgICAgICAgICAg
ICAgICAgICAgICAgICAgICAgXA0KICAgICAgICAgICAgICAgICgoKGZzbF9xZG1hX2VuZ2luZSkt
PmJpZ19lbmRpYW4gJiBGU0xfRE1BX0JJR19FTkRJQU4pID8gICAgICAgICBcDQogICAgICAgICAg
ICAgICAgICAgICAgICBpb3JlYWQzMmJlKGFkZHIpIDogaW9yZWFkMzIoYWRkcikpDQoNCiNkZWZp
bmUgUURNQV9PVVQoZnNsX3FkbWFfZW5naW5lLCBhZGRyLCB2YWwpICAgICAgICAgICAgICAgICAg
ICAgICAgICAgIFwNCiAgICAgICAgICAgICAgICAoKChmc2xfcWRtYV9lbmdpbmUpLT5iaWdfZW5k
aWFuICYgRlNMX0RNQV9CSUdfRU5ESUFOKSA/ICAgICAgICAgXA0KICAgICAgICAgICAgICAgICAg
ICAgICAgaW93cml0ZTMyYmUodmFsLCBhZGRyKSA6IGlvd3JpdGUzMih2YWwsIGFkZHIpKQ0KDQo+
ID4gPiA+ICsJc2dfZW50cnlfdG90YWwgPSBkc3Rfc2dfZW50cnlfYmxvY2sgKyBzcmNfc2dfZW50
cnlfYmxvY2s7DQo+ID4gPiA+ICsJaWYgKHNnX2VudHJ5X3RvdGFsKSB7DQo+ID4gPiA+ICsJCXNn
X2Jsb2NrID0ga3phbGxvYyhzaXplb2YoKnNnX2Jsb2NrKSAqDQo+ID4gPiA+ICsJCQkJCSAgICAg
IHNnX2VudHJ5X3RvdGFsLA0KPiA+ID4gPiArCQkJCQkgICAgICBHRlBfS0VSTkVMKTsNCj4gPiA+
ID4gKwkJaWYgKCFzZ19ibG9jaykNCj4gPiA+ID4gKwkJCXJldHVybiBOVUxMOw0KPiA+ID4NCj4g
PiA+IG9rYXkgeW91IGFyZSBsZWFraW5nIGhlcmUgdGhlIHBvb2wgYWxsb2NhdGlvbiBuZWVkIHRv
IGJlIHJvbGxlZCBiYWNrDQo+ID4gPg0KPiA+DQo+ID4gb2theSwgSSBnb3QgaXQsIElzIHRoYXQg
b2s/DQo+ID4NCj4gPiBpZiAoIXNnX2Jsb2NrKSB7DQo+ID4gCWRtYV9wb29sX2ZyZWUocXVldWUt
PmNvbXBfcG9vbCwNCj4gPiAgICAgY29tcF90ZW1wLT52aXJ0X2FkZHIsDQo+ID4gICAgIGNvbXBf
dGVtcC0+YnVzX2FkZHIpOw0KPiA+ICAgICByZXR1cm4gTlVMTDsNCj4gDQo+IExvb2tzIG9rYXkN
Cj4gDQo+ID4gPiA+ICsNCj4gPiA+ID4gKwlkbWFfZnJlZV9jb2hlcmVudCgmcGRldi0+ZGV2LCBz
aXplb2Yoc3RydWN0IGZzbF9xZG1hX2NjZGYpICoNCj4gPiA+ID4gKwkJCQlzdGF0dXMtPm5fY3Es
IHN0YXR1cy0+Y3EsIHN0YXR1cy0+YnVzX2FkZHIpOw0KPiA+ID4NCj4gPiA+IGF0IHRoaXMgcG9p
bnQgeW91IGNhbiBzdGlsbCBnZXQgaXJxIGFuZCB0YXNrbGV0cyBjYW4gYmUgc3RpbGwNCj4gPiA+
IHNjaGVkdWxlZCwgaG93IGRvIHlvdSBlbnN1cmUgdGhleSBhcmUgc3RvcHBlZA0KPiA+ID4NCj4g
Pg0KPiA+IE1heSBiZSBJIG1pc3NlZCwgd2lsbCBhbmQgYmVsb3cgdG8gZnVudGlvbnMgY2FsbCBJ
biBmc2xfcWRtYV9yZW1vdmUoKSBpbg0KPiBuZXh0IHZlcnNpb24uDQo+ID4gSXMgdGhhdCBvaz8N
Cj4gDQo+IHN1cmUNCj4gLS0NCj4gflZpbm9kDQoNCi0tDQp+QmVzdCBSZWdhcmRzLA0Kfldlbg0K
--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Vinod Koul Dec. 19, 2017, 4:44 p.m. UTC | #5
On Tue, Dec 19, 2017 at 11:19:52AM +0000, Wen He wrote:

> > > This struct defined description two different dma message format that
> > > Compound Command Descriptor Format and qDMA Compound S/G Format,
> > if add this too to union, it could be ambiguous.
> > 
> > at lower level do we care about the format, we fill and submit to HW
> > 
> 
> Okay, Is that ok? 
> 
> Will and below using fsl_qdma_desc_format instead fsl_qdma_ccfg and delete fsl_qdma_csgf in next version. 
> Keep and fixed all of description call function, the desc pointer defined by the fsl_qdma_descs_format.
> 
> struct fsl_qdma_desc_format *ccdf, *csgf;
> 
> then fill and submit to HW.

Looks okay for now, will check the resulting code and let you know

> > > > IIRC there are macros which which take care of this, can you check
> > > > other FSL drivers please
> > > >
> > >
> > > Okay, but I search the FSL drivers and no found. Do you know where are
> > them ?
> > 
> > See DMA_IN/OUT in dma/fsldma.h
> > 
> 
> Okay, but DMA_IN/OUT need struct fsldma_chan.
> May be I can refer to it that Implement a similar macro, will and below to instead it in next version.
> 
> #define QDMA_IN(fsl_qdma_engine, addr)                                  \
>                 (((fsl_qdma_engine)->big_endian & FSL_DMA_BIG_ENDIAN) ?         \
>                         ioread32be(addr) : ioread32(addr))
> 
> #define QDMA_OUT(fsl_qdma_engine, addr, val)                            \
>                 (((fsl_qdma_engine)->big_endian & FSL_DMA_BIG_ENDIAN) ?         \
>                         iowrite32be(val, addr) : iowrite32(val, addr))

Ah that would be duplicate. How about making it common and abstracting this
as FSL_DMA_IN/OUT and using in both drivers?
Wen He Dec. 20, 2017, 9:13 a.m. UTC | #6
DQoNCj4gLS0tLS1PcmlnaW5hbCBNZXNzYWdlLS0tLS0NCj4gRnJvbTogVmlub2QgS291bCBbbWFp
bHRvOnZpbm9kLmtvdWxAaW50ZWwuY29tXQ0KPiBTZW50OiAyMDE3xOoxMtTCMjDI1SAwOjQ1DQo+
IFRvOiBXZW4gSGUgPHdlbi5oZV8xQG54cC5jb20+DQo+IENjOiBMZW8gTGkgPGxlb3lhbmcubGlA
bnhwLmNvbT47IGRtYWVuZ2luZUB2Z2VyLmtlcm5lbC5vcmc7IEppYWZlaSBQYW4NCj4gPGppYWZl
aS5wYW5AbnhwLmNvbT47IEppYWhlbmcgRmFuIDxqaWFoZW5nLmZhbkBueHAuY29tPg0KPiBTdWJq
ZWN0OiBSZTogW1BBVENIIDEvNF0gZG1hOiBmc2wtcWRtYTogYWRkIHFETUEgQ29tbWFuZCBxdWV1
ZSBtb2RlDQo+IGRyaXZlcg0KPiANCj4gT24gVHVlLCBEZWMgMTksIDIwMTcgYXQgMTE6MTk6NTJB
TSArMDAwMCwgV2VuIEhlIHdyb3RlOg0KPiANCj4gPiA+ID4gVGhpcyBzdHJ1Y3QgZGVmaW5lZCBk
ZXNjcmlwdGlvbiB0d28gZGlmZmVyZW50IGRtYSBtZXNzYWdlIGZvcm1hdA0KPiA+ID4gPiB0aGF0
IENvbXBvdW5kIENvbW1hbmQgRGVzY3JpcHRvciBGb3JtYXQgYW5kIHFETUEgQ29tcG91bmQgUy9H
DQo+ID4gPiA+IEZvcm1hdCwNCj4gPiA+IGlmIGFkZCB0aGlzIHRvbyB0byB1bmlvbiwgaXQgY291
bGQgYmUgYW1iaWd1b3VzLg0KPiA+ID4NCj4gPiA+IGF0IGxvd2VyIGxldmVsIGRvIHdlIGNhcmUg
YWJvdXQgdGhlIGZvcm1hdCwgd2UgZmlsbCBhbmQgc3VibWl0IHRvIEhXDQo+ID4gPg0KPiA+DQo+
ID4gT2theSwgSXMgdGhhdCBvaz8NCj4gPg0KPiA+IFdpbGwgYW5kIGJlbG93IHVzaW5nIGZzbF9x
ZG1hX2Rlc2NfZm9ybWF0IGluc3RlYWQgZnNsX3FkbWFfY2NmZyBhbmQNCj4gZGVsZXRlIGZzbF9x
ZG1hX2NzZ2YgaW4gbmV4dCB2ZXJzaW9uLg0KPiA+IEtlZXAgYW5kIGZpeGVkIGFsbCBvZiBkZXNj
cmlwdGlvbiBjYWxsIGZ1bmN0aW9uLCB0aGUgZGVzYyBwb2ludGVyIGRlZmluZWQgYnkNCj4gdGhl
IGZzbF9xZG1hX2Rlc2NzX2Zvcm1hdC4NCj4gPg0KPiA+IHN0cnVjdCBmc2xfcWRtYV9kZXNjX2Zv
cm1hdCAqY2NkZiwgKmNzZ2Y7DQo+ID4NCj4gPiB0aGVuIGZpbGwgYW5kIHN1Ym1pdCB0byBIVy4N
Cj4gDQo+IExvb2tzIG9rYXkgZm9yIG5vdywgd2lsbCBjaGVjayB0aGUgcmVzdWx0aW5nIGNvZGUg
YW5kIGxldCB5b3Uga25vdw0KPiANCj4gPiA+ID4gPiBJSVJDIHRoZXJlIGFyZSBtYWNyb3Mgd2hp
Y2ggd2hpY2ggdGFrZSBjYXJlIG9mIHRoaXMsIGNhbiB5b3UNCj4gPiA+ID4gPiBjaGVjayBvdGhl
ciBGU0wgZHJpdmVycyBwbGVhc2UNCj4gPiA+ID4gPg0KPiA+ID4gPg0KPiA+ID4gPiBPa2F5LCBi
dXQgSSBzZWFyY2ggdGhlIEZTTCBkcml2ZXJzIGFuZCBubyBmb3VuZC4gRG8geW91IGtub3cgd2hl
cmUNCj4gPiA+ID4gYXJlDQo+ID4gPiB0aGVtID8NCj4gPiA+DQo+ID4gPiBTZWUgRE1BX0lOL09V
VCBpbiBkbWEvZnNsZG1hLmgNCj4gPiA+DQo+ID4NCj4gPiBPa2F5LCBidXQgRE1BX0lOL09VVCBu
ZWVkIHN0cnVjdCBmc2xkbWFfY2hhbi4NCj4gPiBNYXkgYmUgSSBjYW4gcmVmZXIgdG8gaXQgdGhh
dCBJbXBsZW1lbnQgYSBzaW1pbGFyIG1hY3JvLCB3aWxsIGFuZCBiZWxvdyB0bw0KPiBpbnN0ZWFk
IGl0IGluIG5leHQgdmVyc2lvbi4NCj4gPg0KPiA+ICNkZWZpbmUgUURNQV9JTihmc2xfcWRtYV9l
bmdpbmUsIGFkZHIpDQo+IFwNCj4gPiAgICAgICAgICAgICAgICAgKCgoZnNsX3FkbWFfZW5naW5l
KS0+YmlnX2VuZGlhbiAmDQo+IEZTTF9ETUFfQklHX0VORElBTikgPyAgICAgICAgIFwNCj4gPiAg
ICAgICAgICAgICAgICAgICAgICAgICBpb3JlYWQzMmJlKGFkZHIpIDogaW9yZWFkMzIoYWRkcikp
DQo+ID4NCj4gPiAjZGVmaW5lIFFETUFfT1VUKGZzbF9xZG1hX2VuZ2luZSwgYWRkciwgdmFsKQ0K
PiBcDQo+ID4gICAgICAgICAgICAgICAgICgoKGZzbF9xZG1hX2VuZ2luZSktPmJpZ19lbmRpYW4g
Jg0KPiBGU0xfRE1BX0JJR19FTkRJQU4pID8gICAgICAgICBcDQo+ID4gICAgICAgICAgICAgICAg
ICAgICAgICAgaW93cml0ZTMyYmUodmFsLCBhZGRyKSA6IGlvd3JpdGUzMih2YWwsIGFkZHIpKQ0K
PiANCj4gQWggdGhhdCB3b3VsZCBiZSBkdXBsaWNhdGUuIEhvdyBhYm91dCBtYWtpbmcgaXQgY29t
bW9uIGFuZCBhYnN0cmFjdGluZyB0aGlzDQo+IGFzIEZTTF9ETUFfSU4vT1VUIGFuZCB1c2luZyBp
biBib3RoIGRyaXZlcnM/DQoNCk9LLCBidXQgZm9yIG5vdyB3ZSBhcmUgb24gcnVzaCB0byBsZXQg
dGhpcyBtZXJnZWQuIENvdWxkIHdlIGFwcGx5IGN1cnJlbnQgdmVyc2lvbiBmaXN0LCANCnRoZW4g
SSB3aWxsIGRpc2N1c3Mgd2l0aCBhdXRob3Igb2YgdGhhdCBtYWNybyB0byBnZXQgaXQgZG9uZT8g
VGhhbmsgeW91Lg0KDQo+IC0tDQo+IH5WaW5vZA0KLS0NCkJlc3QgUmVnYXJkcywNCldlbg0K
--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Vinod Koul Dec. 20, 2017, 4:16 p.m. UTC | #7
On Wed, Dec 20, 2017 at 09:13:02AM +0000, Wen He wrote:
> > > Okay, but DMA_IN/OUT need struct fsldma_chan.
> > > May be I can refer to it that Implement a similar macro, will and below to
> > instead it in next version.
> > >
> > > #define QDMA_IN(fsl_qdma_engine, addr)
> > \
> > >                 (((fsl_qdma_engine)->big_endian &
> > FSL_DMA_BIG_ENDIAN) ?         \
> > >                         ioread32be(addr) : ioread32(addr))
> > >
> > > #define QDMA_OUT(fsl_qdma_engine, addr, val)
> > \
> > >                 (((fsl_qdma_engine)->big_endian &
> > FSL_DMA_BIG_ENDIAN) ?         \
> > >                         iowrite32be(val, addr) : iowrite32(val, addr))
> > 
> > Ah that would be duplicate. How about making it common and abstracting this
> > as FSL_DMA_IN/OUT and using in both drivers?
> 
> OK, but for now we are on rush to let this merged. Could we apply current version fist, 
> then I will discuss with author of that macro to get it done? Thank you.

Am really sorry but that is not how upstreaming works. We dont do this and
we certainly don't rush.
Wen He Dec. 21, 2017, 6:37 a.m. UTC | #8
> -----Original Message-----

> From: dmaengine-owner@vger.kernel.org

> [mailto:dmaengine-owner@vger.kernel.org] On Behalf Of Vinod Koul

> Sent: 2017年12月21日 0:17

> To: Wen He <wen.he_1@nxp.com>

> Cc: Leo Li <leoyang.li@nxp.com>; dmaengine@vger.kernel.org; Jiafei Pan

> <jiafei.pan@nxp.com>; Jiaheng Fan <jiaheng.fan@nxp.com>

> Subject: Re: [PATCH 1/4] dma: fsl-qdma: add qDMA Command queue mode

> driver

> 

> On Wed, Dec 20, 2017 at 09:13:02AM +0000, Wen He wrote:

> > > > Okay, but DMA_IN/OUT need struct fsldma_chan.

> > > > May be I can refer to it that Implement a similar macro, will and

> > > > below to

> > > instead it in next version.

> > > >

> > > > #define QDMA_IN(fsl_qdma_engine, addr)

> > > \

> > > >                 (((fsl_qdma_engine)->big_endian &

> > > FSL_DMA_BIG_ENDIAN) ?         \

> > > >                         ioread32be(addr) : ioread32(addr))

> > > >

> > > > #define QDMA_OUT(fsl_qdma_engine, addr, val)

> > > \

> > > >                 (((fsl_qdma_engine)->big_endian &

> > > FSL_DMA_BIG_ENDIAN) ?         \

> > > >                         iowrite32be(val, addr) : iowrite32(val,

> > > > addr))

> > >

> > > Ah that would be duplicate. How about making it common and

> > > abstracting this as FSL_DMA_IN/OUT and using in both drivers?

> >

> > OK, but for now we are on rush to let this merged. Could we apply

> > current version fist, then I will discuss with author of that macro to get it

> done? Thank you.

> 

> Am really sorry but that is not how upstreaming works. We dont do this and

> we certainly don't rush.

> 


Okay, got it. through discuss with author, I know the fsldma.c is early freescale powerpc Socs driver and DMA_IN/OUT used.
So I want will and below defined FSL_DMA_IN/OUT instead DMA_IN/OUT in fsldma.h and using in both drivers.
Is that ok?

#ifdef CONFIG_PPC
static void ioread32(const u32 __iomem *addr)
{
        return in_le32(addr);
}

static void iowrite32(u32 __iomem *addr, u32 val)
{
        out_le32(addr, val);
}

static void ioread32be(const u32 __iomem *addr)
{
        return in_be32(addr);
}

static void iowrite32be(u32 __iomem *addr, u32 val)
{
        out_be32(addr, val);
}
#endif

#define FSL_DMA_IN(fsl_dma, addr, width)                                \
                (((fsl_dma)->feature & FSL_DMA_BIG_ENDIAN) ?            \
                        ioread##width##be(addr) : ioread##width(addr))

#define FSL_DMA_OUT(fsl_dma, addr, val, width)                          \
                (((fsl_dma)->feature & FSL_DMA_BIG_ENDIAN) ?            \
                        iowrite##width##be(val, addr) : iowrite##width(val, addr))

Best Regards,
Wen

> --

> ~Vinod

> --

> To unsubscribe from this list: send the line "unsubscribe dmaengine" in the

> body of a message to majordomo@vger.kernel.org More majordomo info at

> https://emea01.safelinks.protection.outlook.com/?url=http%3A%2F%2Fvger.

> kernel.org%2Fmajordomo-info.html&data=02%7C01%7Cwen.he_1%40nxp.co

> m%7C74567b527be2494f807008d547c47ce9%7C686ea1d3bc2b4c6fa92cd99

> c5c301635%7C0%7C0%7C636493831608931495&sdata=0nw8F8Vpcszm5WS

> ixNlv2PsonEJu1ippSVwyARvwzLM%3D&reserved=0
Appana Durga Kedareswara Rao Dec. 21, 2017, 1:59 p.m. UTC | #9
SGksDQoNCglUaGFua3MgZm9yIHRoZSBwYXRjaC4uIC4NCjxTbmlwPg0KPj4gPiA+IEFoIHRoYXQg
d291bGQgYmUgZHVwbGljYXRlLiBIb3cgYWJvdXQgbWFraW5nIGl0IGNvbW1vbiBhbmQNCj4+ID4g
PiBhYnN0cmFjdGluZyB0aGlzIGFzIEZTTF9ETUFfSU4vT1VUIGFuZCB1c2luZyBpbiBib3RoIGRy
aXZlcnM/DQo+PiA+DQo+PiA+IE9LLCBidXQgZm9yIG5vdyB3ZSBhcmUgb24gcnVzaCB0byBsZXQg
dGhpcyBtZXJnZWQuIENvdWxkIHdlIGFwcGx5DQo+PiA+IGN1cnJlbnQgdmVyc2lvbiBmaXN0LCB0
aGVuIEkgd2lsbCBkaXNjdXNzIHdpdGggYXV0aG9yIG9mIHRoYXQgbWFjcm8NCj4+ID4gdG8gZ2V0
IGl0DQo+PiBkb25lPyBUaGFuayB5b3UuDQo+Pg0KPj4gQW0gcmVhbGx5IHNvcnJ5IGJ1dCB0aGF0
IGlzIG5vdCBob3cgdXBzdHJlYW1pbmcgd29ya3MuIFdlIGRvbnQgZG8gdGhpcw0KPj4gYW5kIHdl
IGNlcnRhaW5seSBkb24ndCBydXNoLg0KPj4NCj4NCj5Pa2F5LCBnb3QgaXQuIHRocm91Z2ggZGlz
Y3VzcyB3aXRoIGF1dGhvciwgSSBrbm93IHRoZSBmc2xkbWEuYyBpcyBlYXJseSBmcmVlc2NhbGUN
Cj5wb3dlcnBjIFNvY3MgZHJpdmVyIGFuZCBETUFfSU4vT1VUIHVzZWQuDQo+U28gSSB3YW50IHdp
bGwgYW5kIGJlbG93IGRlZmluZWQgRlNMX0RNQV9JTi9PVVQgaW5zdGVhZCBETUFfSU4vT1VUIGlu
DQo+ZnNsZG1hLmggYW5kIHVzaW5nIGluIGJvdGggZHJpdmVycy4NCj5JcyB0aGF0IG9rPw0KPg0K
PiNpZmRlZiBDT05GSUdfUFBDDQo+c3RhdGljIHZvaWQgaW9yZWFkMzIoY29uc3QgdTMyIF9faW9t
ZW0gKmFkZHIpIHsNCj4gICAgICAgIHJldHVybiBpbl9sZTMyKGFkZHIpOw0KPn0NCj4NCj5zdGF0
aWMgdm9pZCBpb3dyaXRlMzIodTMyIF9faW9tZW0gKmFkZHIsIHUzMiB2YWwpIHsNCj4gICAgICAg
IG91dF9sZTMyKGFkZHIsIHZhbCk7DQo+fQ0KPg0KPnN0YXRpYyB2b2lkIGlvcmVhZDMyYmUoY29u
c3QgdTMyIF9faW9tZW0gKmFkZHIpIHsNCj4gICAgICAgIHJldHVybiBpbl9iZTMyKGFkZHIpOw0K
Pn0NCj4NCj5zdGF0aWMgdm9pZCBpb3dyaXRlMzJiZSh1MzIgX19pb21lbSAqYWRkciwgdTMyIHZh
bCkgew0KPiAgICAgICAgb3V0X2JlMzIoYWRkciwgdmFsKTsNCj59DQo+I2VuZGlmDQo+DQo+I2Rl
ZmluZSBGU0xfRE1BX0lOKGZzbF9kbWEsIGFkZHIsIHdpZHRoKSAgICAgICAgICAgICAgICAgICAg
ICAgICAgICAgICAgXA0KPiAgICAgICAgICAgICAgICAoKChmc2xfZG1hKS0+ZmVhdHVyZSAmIEZT
TF9ETUFfQklHX0VORElBTikgPyAgICAgICAgICAgIFwNCj4gICAgICAgICAgICAgICAgICAgICAg
ICBpb3JlYWQjI3dpZHRoIyNiZShhZGRyKSA6IGlvcmVhZCMjd2lkdGgoYWRkcikpDQo+DQo+I2Rl
ZmluZSBGU0xfRE1BX09VVChmc2xfZG1hLCBhZGRyLCB2YWwsIHdpZHRoKSAgICAgICAgICAgICAg
ICAgICAgICAgICAgXA0KPiAgICAgICAgICAgICAgICAoKChmc2xfZG1hKS0+ZmVhdHVyZSAmIEZT
TF9ETUFfQklHX0VORElBTikgPyAgICAgICAgICAgIFwNCj4gICAgICAgICAgICAgICAgICAgICAg
ICBpb3dyaXRlIyN3aWR0aCMjYmUodmFsLCBhZGRyKSA6IGlvd3JpdGUjI3dpZHRoKHZhbCwgYWRk
cikpDQo+DQoNCk5vdCBzdXJlIHdoZXRoZXIgaXQgaXMgdXNlZnVsIG9yIG5vdC4NClRvIGRpZmZl
cmVudGlhdGUgYi93IGJpZy1lbmRpYW4gYW5kIGxpdHRsZSBlbmRpYW4geW91IGNhbiByZWFkIGEg
cmVnaXN0ZXIgYW5kIGJhc2VkIG9uIHRoYXQNCllvdSBjYW4gZGlmZmVyZW50aWF0ZSBzdWIgc2Vx
dWVudCB3cml0ZXMvcmVhZHMgaW4gdGhlIGRyaXZlci4NCg0KRm9yIHlvdXIgcmVmZXJlbmNlIA0K
aHR0cHM6Ly9lbGl4aXIuZnJlZS1lbGVjdHJvbnMuY29tL2xpbnV4L2xhdGVzdC9zb3VyY2UvZHJp
dmVycy9uZXQvY2FuL3hpbGlueF9jYW4uYyNMMTE1NiANCg0KUmVnYXJkcywNCktlZGFyLg0KDQo+
QmVzdCBSZWdhcmRzLA0KPldlbg0KPg0KPj4gLS0NCj4+IH5WaW5vZA0KPj4gLS0NCj4+IFRvIHVu
c3Vic2NyaWJlIGZyb20gdGhpcyBsaXN0OiBzZW5kIHRoZSBsaW5lICJ1bnN1YnNjcmliZSBkbWFl
bmdpbmUiDQo+PiBpbiB0aGUgYm9keSBvZiBhIG1lc3NhZ2UgdG8gbWFqb3Jkb21vQHZnZXIua2Vy
bmVsLm9yZyBNb3JlIG1ham9yZG9tbw0KPj4gaW5mbyBhdA0KPmh0dHBzOi8vZW1lYTAxLnNhZmVs
aW5rcy5wcm90ZWN0aW9uLm91dGxvb2suY29tLz91cmw9aHR0cCUzQSUyRiUyRnZnZXIuDQo+PiBr
ZXJuZWwub3JnJTJGbWFqb3Jkb21vLWluZm8uaHRtbCZkYXRhPTAyJTdDMDElN0N3ZW4uaGVfMSU0
MG54cC5jbw0KPj4gbSU3Qzc0NTY3YjUyN2JlMjQ5NGY4MDcwMDhkNTQ3YzQ3Y2U5JTdDNjg2ZWEx
ZDNiYzJiNGM2ZmE5MmNkOTkNCj4+IGM1YzMwMTYzNSU3QzAlN0MwJTdDNjM2NDkzODMxNjA4OTMx
NDk1JnNkYXRhPTBudzhGOFZwY3N6bTVXUw0KPj4gaXhObHYyUHNvbkVKdTFpcHBTVnd5QVJ2d3pM
TSUzRCZyZXNlcnZlZD0wDQo+BOmfrHsubj/lo4/nha/lo4Q/JeWonWx6d23lkbRi5r+N0KDpqpJy
56yheei0h3p4Iua/itCkF+a0neWhhH3un7votKLniJY/ajorduWivj/nj6PotZkNCj56Wivigqw/
embvvILnqZDmrpjunYTllbPll4Np76O17o6Nej/nlZDjgqI/5quS55KAPz/mkrcbZg0K
--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Wen He Dec. 22, 2017, 2:34 a.m. UTC | #10
SGkgS2VkYXIsDQoNCj4gLS0tLS1PcmlnaW5hbCBNZXNzYWdlLS0tLS0NCj4gRnJvbTogQXBwYW5h
IER1cmdhIEtlZGFyZXN3YXJhIFJhbyBbbWFpbHRvOmFwcGFuYWRAeGlsaW54LmNvbV0NCj4gU2Vu
dDogMjAxN+W5tDEy5pyIMjHml6UgMjI6MDANCj4gVG86IFdlbiBIZSA8d2VuLmhlXzFAbnhwLmNv
bT47IFZpbm9kIEtvdWwgPHZpbm9kLmtvdWxAaW50ZWwuY29tPg0KPiBDYzogTGVvIExpIDxsZW95
YW5nLmxpQG54cC5jb20+OyBkbWFlbmdpbmVAdmdlci5rZXJuZWwub3JnOyBKaWFmZWkgUGFuDQo+
IDxqaWFmZWkucGFuQG54cC5jb20+OyBKaWFoZW5nIEZhbiA8amlhaGVuZy5mYW5AbnhwLmNvbT4N
Cj4gU3ViamVjdDogUkU6IFtQQVRDSCAxLzRdIGRtYTogZnNsLXFkbWE6IGFkZCBxRE1BIENvbW1h
bmQgcXVldWUgbW9kZQ0KPiBkcml2ZXINCj4gDQo+IEhpLA0KPiANCj4gCVRoYW5rcyBmb3IgdGhl
IHBhdGNoLi4gLg0KPiA8U25pcD4NCj4gPj4gPiA+IEFoIHRoYXQgd291bGQgYmUgZHVwbGljYXRl
LiBIb3cgYWJvdXQgbWFraW5nIGl0IGNvbW1vbiBhbmQNCj4gPj4gPiA+IGFic3RyYWN0aW5nIHRo
aXMgYXMgRlNMX0RNQV9JTi9PVVQgYW5kIHVzaW5nIGluIGJvdGggZHJpdmVycz8NCj4gPj4gPg0K
PiA+PiA+IE9LLCBidXQgZm9yIG5vdyB3ZSBhcmUgb24gcnVzaCB0byBsZXQgdGhpcyBtZXJnZWQu
IENvdWxkIHdlIGFwcGx5DQo+ID4+ID4gY3VycmVudCB2ZXJzaW9uIGZpc3QsIHRoZW4gSSB3aWxs
IGRpc2N1c3Mgd2l0aCBhdXRob3Igb2YgdGhhdCBtYWNybw0KPiA+PiA+IHRvIGdldCBpdA0KPiA+
PiBkb25lPyBUaGFuayB5b3UuDQo+ID4+DQo+ID4+IEFtIHJlYWxseSBzb3JyeSBidXQgdGhhdCBp
cyBub3QgaG93IHVwc3RyZWFtaW5nIHdvcmtzLiBXZSBkb250IGRvDQo+ID4+IHRoaXMgYW5kIHdl
IGNlcnRhaW5seSBkb24ndCBydXNoLg0KPiA+Pg0KPiA+DQo+ID5Pa2F5LCBnb3QgaXQuIHRocm91
Z2ggZGlzY3VzcyB3aXRoIGF1dGhvciwgSSBrbm93IHRoZSBmc2xkbWEuYyBpcyBlYXJseQ0KPiA+
ZnJlZXNjYWxlIHBvd2VycGMgU29jcyBkcml2ZXIgYW5kIERNQV9JTi9PVVQgdXNlZC4NCj4gPlNv
IEkgd2FudCB3aWxsIGFuZCBiZWxvdyBkZWZpbmVkIEZTTF9ETUFfSU4vT1VUIGluc3RlYWQgRE1B
X0lOL09VVCBpbg0KPiA+ZnNsZG1hLmggYW5kIHVzaW5nIGluIGJvdGggZHJpdmVycy4NCj4gPklz
IHRoYXQgb2s/DQo+ID4NCj4gPiNpZmRlZiBDT05GSUdfUFBDDQo+ID5zdGF0aWMgdm9pZCBpb3Jl
YWQzMihjb25zdCB1MzIgX19pb21lbSAqYWRkcikgew0KPiA+ICAgICAgICByZXR1cm4gaW5fbGUz
MihhZGRyKTsNCj4gPn0NCj4gPg0KPiA+c3RhdGljIHZvaWQgaW93cml0ZTMyKHUzMiBfX2lvbWVt
ICphZGRyLCB1MzIgdmFsKSB7DQo+ID4gICAgICAgIG91dF9sZTMyKGFkZHIsIHZhbCk7DQo+ID59
DQo+ID4NCj4gPnN0YXRpYyB2b2lkIGlvcmVhZDMyYmUoY29uc3QgdTMyIF9faW9tZW0gKmFkZHIp
IHsNCj4gPiAgICAgICAgcmV0dXJuIGluX2JlMzIoYWRkcik7DQo+ID59DQo+ID4NCj4gPnN0YXRp
YyB2b2lkIGlvd3JpdGUzMmJlKHUzMiBfX2lvbWVtICphZGRyLCB1MzIgdmFsKSB7DQo+ID4gICAg
ICAgIG91dF9iZTMyKGFkZHIsIHZhbCk7DQo+ID59DQo+ID4jZW5kaWYNCj4gPg0KPiA+I2RlZmlu
ZSBGU0xfRE1BX0lOKGZzbF9kbWEsIGFkZHIsIHdpZHRoKQ0KPiBcDQo+ID4gICAgICAgICAgICAg
ICAgKCgoZnNsX2RtYSktPmZlYXR1cmUgJiBGU0xfRE1BX0JJR19FTkRJQU4pID8NCj4gXA0KPiA+
ICAgICAgICAgICAgICAgICAgICAgICAgaW9yZWFkIyN3aWR0aCMjYmUoYWRkcikgOg0KPiBpb3Jl
YWQjI3dpZHRoKGFkZHIpKQ0KPiA+DQo+ID4jZGVmaW5lIEZTTF9ETUFfT1VUKGZzbF9kbWEsIGFk
ZHIsIHZhbCwgd2lkdGgpDQo+IFwNCj4gPiAgICAgICAgICAgICAgICAoKChmc2xfZG1hKS0+ZmVh
dHVyZSAmIEZTTF9ETUFfQklHX0VORElBTikgPw0KPiBcDQo+ID4gICAgICAgICAgICAgICAgICAg
ICAgICBpb3dyaXRlIyN3aWR0aCMjYmUodmFsLCBhZGRyKSA6DQo+ID5pb3dyaXRlIyN3aWR0aCh2
YWwsIGFkZHIpKQ0KPiA+DQo+IA0KPiBOb3Qgc3VyZSB3aGV0aGVyIGl0IGlzIHVzZWZ1bCBvciBu
b3QuDQo+IFRvIGRpZmZlcmVudGlhdGUgYi93IGJpZy1lbmRpYW4gYW5kIGxpdHRsZSBlbmRpYW4g
eW91IGNhbiByZWFkIGEgcmVnaXN0ZXIgYW5kDQo+IGJhc2VkIG9uIHRoYXQgWW91IGNhbiBkaWZm
ZXJlbnRpYXRlIHN1YiBzZXF1ZW50IHdyaXRlcy9yZWFkcyBpbiB0aGUgZHJpdmVyLg0KPiANCj4g
Rm9yIHlvdXIgcmVmZXJlbmNlDQo+IGh0dHBzOi8vZW1lYTAxLnNhZmVsaW5rcy5wcm90ZWN0aW9u
Lm91dGxvb2suY29tLz91cmw9aHR0cHMlM0ElMkYlMkZlbGl4aXINCj4gLmZyZWUtZWxlY3Ryb25z
LmNvbSUyRmxpbnV4JTJGbGF0ZXN0JTJGc291cmNlJTJGZHJpdmVycyUyRm5ldCUyRmNhbiUNCj4g
MkZ4aWxpbnhfY2FuLmMlMjNMMTE1NiZkYXRhPTAyJTdDMDElN0N3ZW4uaGVfMSU0MG54cC5jb20l
N0NhMjUxDQo+IDQ1NmEyNTkyNDhhZDBkMzQwOGQ1NDg3YjIyNTclN0M2ODZlYTFkM2JjMmI0YzZm
YTkyY2Q5OWM1YzMwMTYzDQo+IDUlN0MwJTdDMCU3QzYzNjQ5NDYxNjA2NzEzNzU0OCZzZGF0YT12
OCUyQiUyQk9KUHZNaE0zck0yMSUNCj4gMkZnYVlJTSUyRnhwOFhsRW5Nd2VRaGV2MmlSYnJrJTNE
JnJlc2VydmVkPTANCj4gDQoNClRoaXMgZW5kaWFuIG1vZGUgaXMgbm90IENQVSBlbmRpYW4gbW9k
ZSwgaXMgSVAocWRtYSkgTW9kdWxlIGVuZGlhbiBtb2RlLg0KRm9yIG5vdyBOWFAgUW9ySVEgTGF5
ZXJzY2FwZSBQcm9jZXNzb3JzIGluY2x1ZGluZyAxMyBzb2NzLCBzYW1lIElQIG9uIGRpZmZlcmVu
dCBzb2MgaGFzIG1heSBiZSBkaWZmZXJlbnQgZW5kaWFuLW1vZGUuDQoNCmZzbF9xZG1hLT5mZWF0
dXJlID0gb2ZfcHJvcGVydHlfcmVhZF9ib29sKG5wLCAiYmlnLWVuZGlhbiIpOw0KDQpJbiBhYm92
ZSwgSVAgZW5kaWFuLW1vZGUgZGVmaW5lZCBpbiBkdHMgZmlsZSwgZnNsX3FkbWFfcHJvYmUoKSBu
ZWVkIHJlYWQgZHRzIG5vZGUgYmUgdXNlZCBmb3IgZ2V0dGluZyBJUCBlbmRpYW4gbW9kZS4NCg0K
U28sIE1heWJlIEkgY2FuJ3QgZm9sbG93IHlvdXIgcmVmZXJlbmNlLg0KDQpGb3IgeW91ciByZWZl
cmVuY2UNCmh0dHBzOi8vZWxpeGlyLmZyZWUtZWxlY3Ryb25zLmNvbS9saW51eC9sYXRlc3Qvc291
cmNlL2FyY2gvYXJtNjQvYm9vdC9kdHMvZnJlZXNjYWxlL2ZzbC1sczEwNDNhLmR0c2kNCg0KUmVn
YXJkcywNCldlbg0KDQo+IFJlZ2FyZHMsDQo+IEtlZGFyLg0KPiANCj4gPkJlc3QgUmVnYXJkcywN
Cj4gPldlbg0KPiA+DQo+ID4+IC0tDQo+ID4+IH5WaW5vZA0KPiA+PiAtLQ0KPiA+PiBUbyB1bnN1
YnNjcmliZSBmcm9tIHRoaXMgbGlzdDogc2VuZCB0aGUgbGluZSAidW5zdWJzY3JpYmUgZG1hZW5n
aW5lIg0KPiA+PiBpbiB0aGUgYm9keSBvZiBhIG1lc3NhZ2UgdG8gbWFqb3Jkb21vQHZnZXIua2Vy
bmVsLm9yZyBNb3JlIG1ham9yZG9tbw0KPiA+PiBpbmZvIGF0DQo+ID5odHRwczovL2VtZWEwMS5z
YWZlbGlua3MucHJvdGVjdGlvbi5vdXRsb29rLmNvbS8/dXJsPWh0dHAlM0ElMkYlMkZ2Z2VyDQo+
IC4NCj4gPj4NCj4ga2VybmVsLm9yZyUyRm1ham9yZG9tby1pbmZvLmh0bWwmZGF0YT0wMiU3QzAx
JTdDd2VuLmhlXzElNDBueHAuY28NCj4gPj4NCj4gbSU3Qzc0NTY3YjUyN2JlMjQ5NGY4MDcwMDhk
NTQ3YzQ3Y2U5JTdDNjg2ZWExZDNiYzJiNGM2ZmE5MmNkOTkNCj4gPj4NCj4gYzVjMzAxNjM1JTdD
MCU3QzAlN0M2MzY0OTM4MzE2MDg5MzE0OTUmc2RhdGE9MG53OEY4VnBjc3ptNVdTDQo+ID4+IGl4
Tmx2MlBzb25FSnUxaXBwU1Z3eUFSdnd6TE0lM0QmcmVzZXJ2ZWQ9MA0KPiA+BOmfrHsubj/lo4/n
ha/lo4Q/JeWonWx6d23lkbRi5r+N0KDpqpJy56yheei0h3p4Iua/itCkF+a0neWhhH3un7votKLn
iJY/ajordg0KPiDlor4/54+j6LWZDQo+ID56Wivigqw/embvvILnqZDmrpjunYTllbPll4Np76O1
7o6Nej/nlZDjgqI/5quS55KAPz/mkrcbZg0K
--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Appana Durga Kedareswara Rao Dec. 22, 2017, 4:56 a.m. UTC | #11
SGkgV2VuLA0KDQo8U25pcD4NCj4+IE5vdCBzdXJlIHdoZXRoZXIgaXQgaXMgdXNlZnVsIG9yIG5v
dC4NCj4+IFRvIGRpZmZlcmVudGlhdGUgYi93IGJpZy1lbmRpYW4gYW5kIGxpdHRsZSBlbmRpYW4g
eW91IGNhbiByZWFkIGENCj4+IHJlZ2lzdGVyIGFuZCBiYXNlZCBvbiB0aGF0IFlvdSBjYW4gZGlm
ZmVyZW50aWF0ZSBzdWIgc2VxdWVudCB3cml0ZXMvcmVhZHMgaW4gdGhlDQo+ZHJpdmVyLg0KPj4N
Cj4+IEZvciB5b3VyIHJlZmVyZW5jZQ0KPj4gaHR0cHM6Ly9lbWVhMDEuc2FmZWxpbmtzLnByb3Rl
Y3Rpb24ub3V0bG9vay5jb20vP3VybD1odHRwcyUzQSUyRiUyRmVsaQ0KPj4geGlyDQo+PiAuZnJl
ZS1lbGVjdHJvbnMuY29tJTJGbGludXglMkZsYXRlc3QlMkZzb3VyY2UlMkZkcml2ZXJzJTJGbmV0
JTJGY2FuJQ0KPj4gMkZ4aWxpbnhfY2FuLmMlMjNMMTE1NiZkYXRhPTAyJTdDMDElN0N3ZW4uaGVf
MSU0MG54cC5jb20lN0NhMjUxDQo+PiA0NTZhMjU5MjQ4YWQwZDM0MDhkNTQ4N2IyMjU3JTdDNjg2
ZWExZDNiYzJiNGM2ZmE5MmNkOTljNWMzMDE2Mw0KPj4gNSU3QzAlN0MwJTdDNjM2NDk0NjE2MDY3
MTM3NTQ4JnNkYXRhPXY4JTJCJTJCT0pQdk1oTTNyTTIxJQ0KPj4gMkZnYVlJTSUyRnhwOFhsRW5N
d2VRaGV2MmlSYnJrJTNEJnJlc2VydmVkPTANCj4+DQo+DQo+VGhpcyBlbmRpYW4gbW9kZSBpcyBu
b3QgQ1BVIGVuZGlhbiBtb2RlLCBpcyBJUChxZG1hKSBNb2R1bGUgZW5kaWFuIG1vZGUuDQo+Rm9y
IG5vdyBOWFAgUW9ySVEgTGF5ZXJzY2FwZSBQcm9jZXNzb3JzIGluY2x1ZGluZyAxMyBzb2NzLCBz
YW1lIElQIG9uIGRpZmZlcmVudA0KPnNvYyBoYXMgbWF5IGJlIGRpZmZlcmVudCBlbmRpYW4tbW9k
ZS4NCj4NCj5mc2xfcWRtYS0+ZmVhdHVyZSA9IG9mX3Byb3BlcnR5X3JlYWRfYm9vbChucCwgImJp
Zy1lbmRpYW4iKTsNCj4NCj5JbiBhYm92ZSwgSVAgZW5kaWFuLW1vZGUgZGVmaW5lZCBpbiBkdHMg
ZmlsZSwgZnNsX3FkbWFfcHJvYmUoKSBuZWVkIHJlYWQgZHRzDQo+bm9kZSBiZSB1c2VkIGZvciBn
ZXR0aW5nIElQIGVuZGlhbiBtb2RlLg0KPg0KPlNvLCBNYXliZSBJIGNhbid0IGZvbGxvdyB5b3Vy
IHJlZmVyZW5jZS4NCj4NCj5Gb3IgeW91ciByZWZlcmVuY2UNCj5odHRwczovL2VsaXhpci5mcmVl
LQ0KPmVsZWN0cm9ucy5jb20vbGludXgvbGF0ZXN0L3NvdXJjZS9hcmNoL2FybTY0L2Jvb3QvZHRz
L2ZyZWVzY2FsZS9mc2wtDQo+bHMxMDQzYS5kdHNpDQoNCkdvdCBpdCB0aGFua3MgZm9yIHRoZSBj
bGFyaWZpY2F0aW9uLi4uIA0KDQpSZWdhcmRzLA0KS2VkYXIuDQoNCj4NCj5SZWdhcmRzLA0KPldl
bg0KPg0KPj4gUmVnYXJkcywNCj4+IEtlZGFyLg0KPj4NCj4+ID5CZXN0IFJlZ2FyZHMsDQo+PiA+
V2VuDQo+PiA+DQo+PiA+PiAtLQ0KPj4gPj4gflZpbm9kDQo+PiA+PiAtLQ0KPj4gPj4gVG8gdW5z
dWJzY3JpYmUgZnJvbSB0aGlzIGxpc3Q6IHNlbmQgdGhlIGxpbmUgInVuc3Vic2NyaWJlIGRtYWVu
Z2luZSINCj4+ID4+IGluIHRoZSBib2R5IG9mIGEgbWVzc2FnZSB0byBtYWpvcmRvbW9Admdlci5r
ZXJuZWwub3JnIE1vcmUNCj4+ID4+IG1ham9yZG9tbyBpbmZvIGF0DQo+PiA+aHR0cHM6Ly9lbWVh
MDEuc2FmZWxpbmtzLnByb3RlY3Rpb24ub3V0bG9vay5jb20vP3VybD1odHRwJTNBJTJGJTJGdmdl
DQo+PiA+cg0KPj4gLg0KPj4gPj4NCj4+IGtlcm5lbC5vcmclMkZtYWpvcmRvbW8taW5mby5odG1s
JmRhdGE9MDIlN0MwMSU3Q3dlbi5oZV8xJTQwbnhwLmNvDQo+PiA+Pg0KPj4gbSU3Qzc0NTY3YjUy
N2JlMjQ5NGY4MDcwMDhkNTQ3YzQ3Y2U5JTdDNjg2ZWExZDNiYzJiNGM2ZmE5MmNkOTkNCj4+ID4+
DQo+PiBjNWMzMDE2MzUlN0MwJTdDMCU3QzYzNjQ5MzgzMTYwODkzMTQ5NSZzZGF0YT0wbnc4RjhW
cGNzem01V1MNCj4+ID4+IGl4Tmx2MlBzb25FSnUxaXBwU1Z3eUFSdnd6TE0lM0QmcmVzZXJ2ZWQ9
MA0KPj4gPgTpn6x7Lm4/5aOP54Wv5aOEPyXlqJ1sendt5ZG0Yua/jdCg6aqScuesoXnotId6eCLm
v4rQpBfmtJ3loYR97p+76LSi54iWP2o6K3YNCj4+IOWivj/nj6PotZkNCj4+ID56Wivigqw/embv
vILnqZDmrpjunYTllbPll4Np76O17o6Nej/nlZDjgqI/5quS55KAPz/mkrcbZg0K
--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 27df3e2837fd..22e203c68e94 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -215,6 +215,18 @@  config FSL_EDMA
 	  multiplexing capability for DMA request sources(slot).
 	  This module can be found on Freescale Vybrid and LS-1 SoCs.
 
+config FSL_QDMA
+       tristate "Freescale qDMA engine support"
+       select DMA_ENGINE
+       select DMA_VIRTUAL_CHANNELS
+       select DMA_ENGINE_RAID
+       select ASYNC_TX_ENABLE_CHANNEL_SWITCH
+       help
+         Support the Freescale qDMA engine with command queue and legacy mode.
+         Channel virtualization is supported through enqueuing of DMA jobs to,
+         or dequeuing DMA jobs from, different work queues.
+         This module can be found on Freescale LS SoCs.
+
 config FSL_RAID
         tristate "Freescale RAID engine Support"
         depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index b9dca8a0e142..7a49b7b6124a 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -32,6 +32,7 @@  obj-$(CONFIG_DW_DMAC_CORE) += dw/
 obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
 obj-$(CONFIG_FSL_DMA) += fsldma.o
 obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
+obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o
 obj-$(CONFIG_FSL_RAID) += fsl_raid.o
 obj-$(CONFIG_HSU_DMA) += hsu/
 obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
new file mode 100644
index 000000000000..43ed48a73ed6
--- /dev/null
+++ b/drivers/dma/fsl-qdma.c
@@ -0,0 +1,1126 @@ 
+/*
+ * drivers/dma/fsl-qdma.c
+ *
+ * Copyright 2014-2015 Freescale Semiconductor, Inc.
+ *
+ * Driver for the Freescale qDMA engine with software command queue mode.
+ * Channel virtualization is supported through enqueuing of DMA jobs to,
+ * or dequeuing DMA jobs from, different work queues.
+ * This module can be found on Freescale LS SoCs.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <asm/cacheflush.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_dma.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "virt-dma.h"
+
+#define FSL_QDMA_DMR			0x0
+#define FSL_QDMA_DSR			0x4
+#define FSL_QDMA_DEIER			0xe00
+#define FSL_QDMA_DEDR			0xe04
+#define FSL_QDMA_DECFDW0R		0xe10
+#define FSL_QDMA_DECFDW1R		0xe14
+#define FSL_QDMA_DECFDW2R		0xe18
+#define FSL_QDMA_DECFDW3R		0xe1c
+#define FSL_QDMA_DECFQIDR		0xe30
+#define FSL_QDMA_DECBR			0xe34
+
+#define FSL_QDMA_BCQMR(x)		(0xc0 + 0x100 * (x))
+#define FSL_QDMA_BCQSR(x)		(0xc4 + 0x100 * (x))
+#define FSL_QDMA_BCQEDPA_SADDR(x)	(0xc8 + 0x100 * (x))
+#define FSL_QDMA_BCQDPA_SADDR(x)	(0xcc + 0x100 * (x))
+#define FSL_QDMA_BCQEEPA_SADDR(x)	(0xd0 + 0x100 * (x))
+#define FSL_QDMA_BCQEPA_SADDR(x)	(0xd4 + 0x100 * (x))
+#define FSL_QDMA_BCQIER(x)		(0xe0 + 0x100 * (x))
+#define FSL_QDMA_BCQIDR(x)		(0xe4 + 0x100 * (x))
+
+#define FSL_QDMA_SQDPAR			0x80c
+#define FSL_QDMA_SQEPAR			0x814
+#define FSL_QDMA_BSQMR			0x800
+#define FSL_QDMA_BSQSR			0x804
+#define FSL_QDMA_BSQICR			0x828
+#define FSL_QDMA_CQMR			0xa00
+#define FSL_QDMA_CQDSCR1		0xa08
+#define FSL_QDMA_CQDSCR2                0xa0c
+#define FSL_QDMA_CQIER			0xa10
+#define FSL_QDMA_CQEDR			0xa14
+#define FSL_QDMA_SQCCMR			0xa20
+
+#define FSL_QDMA_SQICR_ICEN
+
+#define FSL_QDMA_CQIDR_CQT		0xff000000
+#define FSL_QDMA_CQIDR_SQPE		0x800000
+#define FSL_QDMA_CQIDR_SQT		0x8000
+
+#define FSL_QDMA_BCQIER_CQTIE		0x8000
+#define FSL_QDMA_BCQIER_CQPEIE		0x800000
+#define FSL_QDMA_BSQICR_ICEN		0x80000000
+#define FSL_QDMA_BSQICR_ICST(x)		((x) << 16)
+#define FSL_QDMA_CQIER_MEIE		0x80000000
+#define FSL_QDMA_CQIER_TEIE		0x1
+#define FSL_QDMA_SQCCMR_ENTER_WM	0x200000
+
+#define FSL_QDMA_QUEUE_MAX		8
+
+#define FSL_QDMA_BCQMR_EN		0x80000000
+#define FSL_QDMA_BCQMR_EI		0x40000000
+#define FSL_QDMA_BCQMR_CD_THLD(x)	((x) << 20)
+#define FSL_QDMA_BCQMR_CQ_SIZE(x)	((x) << 16)
+
+#define FSL_QDMA_BCQSR_QF		0x10000
+#define FSL_QDMA_BCQSR_XOFF		0x1
+
+#define FSL_QDMA_BSQMR_EN		0x80000000
+#define FSL_QDMA_BSQMR_DI		0x40000000
+#define FSL_QDMA_BSQMR_CQ_SIZE(x)	((x) << 16)
+
+#define FSL_QDMA_BSQSR_QE		0x20000
+
+#define FSL_QDMA_DMR_DQD		0x40000000
+#define FSL_QDMA_DSR_DB			0x80000000
+
+#define FSL_QDMA_BASE_BUFFER_SIZE	96
+#define FSL_QDMA_EXPECT_SG_ENTRY_NUM	16
+#define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN	64
+#define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX	16384
+#define FSL_QDMA_QUEUE_NUM_MAX		8
+
+#define FSL_QDMA_CMD_RWTTYPE		0x4
+#define FSL_QDMA_CMD_LWC                0x2
+
+#define FSL_QDMA_CMD_RWTTYPE_OFFSET	28
+#define FSL_QDMA_CMD_NS_OFFSET		27
+#define FSL_QDMA_CMD_DQOS_OFFSET	24
+#define FSL_QDMA_CMD_WTHROTL_OFFSET	20
+#define FSL_QDMA_CMD_DSEN_OFFSET	19
+#define FSL_QDMA_CMD_LWC_OFFSET		16
+
+#define FSL_QDMA_E_SG_TABLE		1
+#define FSL_QDMA_E_DATA_BUFFER		0
+#define FSL_QDMA_F_LAST_ENTRY		1
+
+u64 pre_addr, pre_queue;
+
+/* qDMA Command Descriptor Fotmats */
+
+/* Compound Command Descriptor Fotmat */
+struct fsl_qdma_ccdf {
+	__le32 status; /* ser, status */
+	__le32 cfg;	/* format, offset */
+	union {
+		struct {
+			__le32 addr_lo;	/* low 32-bits of 40-bit address */
+			u8 addr_hi;	/* high 8-bits of 40-bit address */
+			u8 __reserved1[2];
+			u8 cfg8b_w1; /* dd, queue*/
+		} __packed;
+		__le64 data;
+	};
+} __packed;
+
+#define QDMA_CCDF_STATUS		20
+#define QDMA_CCDF_OFFSET		20
+#define QDMA_CCDF_MASK			GENMASK(28, 20)
+#define QDMA_CCDF_FOTMAT		BIT(29)
+#define QDMA_CCDF_SER			BIT(30)
+
+static inline u64 qdma_ccdf_addr_get64(const struct fsl_qdma_ccdf *ccdf)
+{
+	return le64_to_cpu(ccdf->data) & 0xffffffffffLLU;
+}
+static inline u64 qdma_ccdf_get_queue(const struct fsl_qdma_ccdf *ccdf)
+{
+	return ccdf->cfg8b_w1 & 0xff;
+}
+static inline void qdma_ccdf_addr_set64(struct fsl_qdma_ccdf *ccdf, u64 addr)
+{
+	ccdf->addr_hi = upper_32_bits(addr);
+	ccdf->addr_lo = cpu_to_le32(lower_32_bits(addr));
+}
+static inline int qdma_ccdf_get_offset(const struct fsl_qdma_ccdf *ccdf)
+{
+	return (le32_to_cpu(ccdf->cfg) & QDMA_CCDF_MASK) >> QDMA_CCDF_OFFSET;
+}
+static inline void qdma_ccdf_set_format(struct fsl_qdma_ccdf *ccdf, int offset)
+{
+	ccdf->cfg = cpu_to_le32(QDMA_CCDF_FOTMAT | offset);
+}
+static inline int qdma_ccdf_get_status(const struct fsl_qdma_ccdf *ccdf)
+{
+	return (le32_to_cpu(ccdf->status) & QDMA_CCDF_MASK) >> QDMA_CCDF_STATUS;
+}
+static inline void qdma_ccdf_set_ser(struct fsl_qdma_ccdf *ccdf, int status)
+{
+	ccdf->status = cpu_to_le32(QDMA_CCDF_SER | status);
+}
+/* qDMA Compound S/G Format */
+struct fsl_qdma_csgf {
+	__le32 offset; /* offset */
+	__le32 cfg;	/* E bit, F bit, length */
+	union {
+		struct {
+			__le32 addr_lo;	/* low 32-bits of 40-bit address */
+			u8 addr_hi;	/* high 8-bits of 40-bit address */
+			u8 __reserved1[3];
+		};
+		__le64 data;
+	};
+} __packed;
+
+#define QDMA_SG_FIN			BIT(30)
+#define QDMA_SG_EXT			BIT(31)
+#define QDMA_SG_LEN_MASK		GENMASK(29, 0)
+static inline u64 qdma_csgf_addr_get64(const struct fsl_qdma_csgf *sg)
+{
+	return be64_to_cpu(sg->data) & 0xffffffffffLLU;
+}
+static inline void qdma_csgf_addr_set64(struct fsl_qdma_csgf *sg, u64 addr)
+{
+	sg->addr_hi = upper_32_bits(addr);
+	sg->addr_lo = cpu_to_le32(lower_32_bits(addr));
+}
+static inline void qdma_csgf_set_len(struct fsl_qdma_csgf *csgf, int len)
+{
+	csgf->cfg = cpu_to_le32(len & QDMA_SG_LEN_MASK);
+}
+static inline void qdma_csgf_set_f(struct fsl_qdma_csgf *csgf, int len)
+{
+	csgf->cfg = cpu_to_le32(QDMA_SG_FIN | (len & QDMA_SG_LEN_MASK));
+}
+static inline void qdma_csgf_set_e(struct fsl_qdma_csgf *csgf, int len)
+{
+	csgf->cfg = cpu_to_le32(QDMA_SG_EXT | (len & QDMA_SG_LEN_MASK));
+}
+
+/* qDMA Source Descriptor Format */
+struct fsl_qdma_sdf {
+	__le32 rev3;
+	__le32 cfg; /* rev4, bit[0-11] - ssd, bit[12-23] sss */
+	__le32 rev5;
+	__le32 cmd;
+} __packed;
+
+/*qDMA Destination Descriptor Format*/
+struct fsl_qdma_ddf {
+	__le32 rev1;
+	__le32 cfg; /* rev2, bit[0-11] - dsd, bit[12-23] - dss */
+	__le32 rev3;
+	__le32 cmd;
+} __packed;
+
+struct fsl_qdma_chan {
+	struct virt_dma_chan		vchan;
+	struct virt_dma_desc		vdesc;
+	enum dma_status			status;
+	u32				slave_id;
+	struct fsl_qdma_engine		*qdma;
+	struct fsl_qdma_queue		*queue;
+	struct list_head		qcomp;
+};
+
+struct fsl_qdma_queue {
+	struct fsl_qdma_ccdf	*virt_head;
+	struct fsl_qdma_ccdf	*virt_tail;
+	struct list_head	comp_used;
+	struct list_head	comp_free;
+	struct dma_pool		*comp_pool;
+	struct dma_pool		*sg_pool;
+	spinlock_t		queue_lock;
+	dma_addr_t		bus_addr;
+	u32                     n_cq;
+	u32			id;
+	struct fsl_qdma_ccdf	*cq;
+};
+
+struct fsl_qdma_sg {
+	dma_addr_t		bus_addr;
+	void			*virt_addr;
+};
+
+struct fsl_qdma_comp {
+	dma_addr_t              bus_addr;
+	void			*virt_addr;
+	struct fsl_qdma_chan	*qchan;
+	struct fsl_qdma_sg	*sg_block;
+	struct virt_dma_desc    vdesc;
+	struct list_head	list;
+	u32			sg_block_src;
+	u32			sg_block_dst;
+};
+
+struct fsl_qdma_engine {
+	struct dma_device	dma_dev;
+	void __iomem		*ctrl_base;
+	void __iomem            *status_base;
+	void __iomem		*block_base;
+	u32			n_chans;
+	u32			n_queues;
+	struct mutex            fsl_qdma_mutex;
+	int			error_irq;
+	int			queue_irq;
+	bool			big_endian;
+	struct fsl_qdma_queue	*queue;
+	struct fsl_qdma_queue	*status;
+	struct fsl_qdma_chan	chans[];
+
+};
+
+static u32 qdma_readl(struct fsl_qdma_engine *qdma, void __iomem *addr)
+{
+	if (qdma->big_endian)
+		return ioread32be(addr);
+	else
+		return ioread32(addr);
+}
+
+static void qdma_writel(struct fsl_qdma_engine *qdma, u32 val,
+						void __iomem *addr)
+{
+	if (qdma->big_endian)
+		iowrite32be(val, addr);
+	else
+		iowrite32(val, addr);
+}
+
+static struct fsl_qdma_chan *to_fsl_qdma_chan(struct dma_chan *chan)
+{
+	return container_of(chan, struct fsl_qdma_chan, vchan.chan);
+}
+
+static struct fsl_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
+{
+	return container_of(vd, struct fsl_qdma_comp, vdesc);
+}
+
+static int fsl_qdma_alloc_chan_resources(struct dma_chan *chan)
+{
+	/*
+	 * In QDMA mode, We don't need to do anything.
+	 */
+	return 0;
+}
+
+static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
+{
+	struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
+	unsigned long flags;
+	LIST_HEAD(head);
+
+	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+	vchan_get_all_descriptors(&fsl_chan->vchan, &head);
+	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+
+	vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
+}
+
+static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
+					dma_addr_t dst, dma_addr_t src, u32 len)
+{
+	struct fsl_qdma_ccdf *ccdf;
+	struct fsl_qdma_csgf *csgf_desc, *csgf_src, *csgf_dest;
+	struct fsl_qdma_sdf *sdf;
+	struct fsl_qdma_ddf *ddf;
+
+	ccdf = (struct fsl_qdma_ccdf *)fsl_comp->virt_addr;
+	csgf_desc = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 1;
+	csgf_src = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 2;
+	csgf_dest = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 3;
+	sdf = (struct fsl_qdma_sdf *)fsl_comp->virt_addr + 4;
+	ddf = (struct fsl_qdma_ddf *)fsl_comp->virt_addr + 5;
+
+	memset(fsl_comp->virt_addr, 0, FSL_QDMA_BASE_BUFFER_SIZE);
+	/* Head Command Descriptor(Frame Descriptor) */
+	qdma_ccdf_addr_set64(ccdf, fsl_comp->bus_addr + 16);
+	qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(ccdf));
+	qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(ccdf));
+	/* Status notification is enqueued to status queue. */
+	/* Compound Command Descriptor(Frame List Table) */
+	qdma_csgf_addr_set64(csgf_desc, fsl_comp->bus_addr + 64);
+	/* It must be 32 as Compound S/G Descriptor */
+	qdma_csgf_set_len(csgf_desc, 32);
+	qdma_csgf_addr_set64(csgf_src, src);
+	qdma_csgf_set_len(csgf_src, len);
+	qdma_csgf_addr_set64(csgf_dest, dst);
+	qdma_csgf_set_len(csgf_dest, len);
+	/* This entry is the last entry. */
+	qdma_csgf_set_f(csgf_dest, len);
+	/* Descriptor Buffer */
+	sdf->cmd = cpu_to_le32(
+			FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET);
+	ddf->cmd = cpu_to_le32(
+			FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET);
+	ddf->cmd |= cpu_to_le32(
+			FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET);
+}
+
+/*
+ * Prei-request full command descriptor for enqueue.
+ */
+static int fsl_qdma_pre_request_enqueue_desc(struct fsl_qdma_queue *queue)
+{
+	struct fsl_qdma_comp *comp_temp;
+	int i;
+
+	for (i = 0; i < queue->n_cq; i++) {
+		comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
+		if (!comp_temp)
+			return -1;
+		comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool,
+						      GFP_NOWAIT,
+						      &comp_temp->bus_addr);
+		if (!comp_temp->virt_addr)
+			return -1;
+		list_add_tail(&comp_temp->list, &queue->comp_free);
+	}
+	return 0;
+}
+
+/*
+ * Request a command descriptor for enqueue.
+ */
+static struct fsl_qdma_comp *fsl_qdma_request_enqueue_desc(
+					struct fsl_qdma_chan *fsl_chan,
+					unsigned int dst_nents,
+					unsigned int src_nents)
+{
+	struct fsl_qdma_comp *comp_temp;
+	struct fsl_qdma_sg *sg_block;
+	struct fsl_qdma_queue *queue = fsl_chan->queue;
+	unsigned long flags;
+	unsigned int dst_sg_entry_block, src_sg_entry_block, sg_entry_total, i;
+
+	spin_lock_irqsave(&queue->queue_lock, flags);
+	if (list_empty(&queue->comp_free)) {
+		spin_unlock_irqrestore(&queue->queue_lock, flags);
+		comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
+		if (!comp_temp)
+			return NULL;
+		comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool,
+						      GFP_NOWAIT,
+						      &comp_temp->bus_addr);
+		if (!comp_temp->virt_addr)
+			return NULL;
+	} else {
+		comp_temp = list_first_entry(&queue->comp_free,
+					     struct fsl_qdma_comp,
+					     list);
+		list_del(&comp_temp->list);
+		spin_unlock_irqrestore(&queue->queue_lock, flags);
+	}
+
+	if (dst_nents != 0)
+		dst_sg_entry_block = dst_nents /
+					(FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
+	else
+		dst_sg_entry_block = 0;
+
+	if (src_nents != 0)
+		src_sg_entry_block = src_nents /
+					(FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
+	else
+		src_sg_entry_block = 0;
+
+	sg_entry_total = dst_sg_entry_block + src_sg_entry_block;
+	if (sg_entry_total) {
+		sg_block = kzalloc(sizeof(*sg_block) *
+					      sg_entry_total,
+					      GFP_KERNEL);
+		if (!sg_block)
+			return NULL;
+		comp_temp->sg_block = sg_block;
+		for (i = 0; i < sg_entry_total; i++) {
+			sg_block->virt_addr = dma_pool_alloc(queue->sg_pool,
+							GFP_NOWAIT,
+							&sg_block->bus_addr);
+			memset(sg_block->virt_addr, 0,
+					FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16);
+			sg_block++;
+		}
+	}
+
+	comp_temp->sg_block_src = src_sg_entry_block;
+	comp_temp->sg_block_dst = dst_sg_entry_block;
+	comp_temp->qchan = fsl_chan;
+
+	return comp_temp;
+}
+
+static struct fsl_qdma_queue *fsl_qdma_alloc_queue_resources(
+					struct platform_device *pdev,
+					unsigned int queue_num)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct fsl_qdma_queue *queue_head, *queue_temp;
+	int ret, len, i;
+	unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
+
+	if (queue_num > FSL_QDMA_QUEUE_MAX)
+		queue_num = FSL_QDMA_QUEUE_MAX;
+	len = sizeof(*queue_head) * queue_num;
+	queue_head = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+	if (!queue_head)
+		return NULL;
+
+	ret = of_property_read_u32_array(np, "queue-sizes", queue_size,
+								queue_num);
+	if (ret) {
+		dev_err(&pdev->dev, "Can't get queue-sizes.\n");
+		return NULL;
+	}
+
+	for (i = 0; i < queue_num; i++) {
+		if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX
+			|| queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
+			dev_err(&pdev->dev, "Get wrong queue-sizes.\n");
+			return NULL;
+		}
+		queue_temp = queue_head + i;
+		queue_temp->cq = dma_alloc_coherent(&pdev->dev,
+						sizeof(struct fsl_qdma_ccdf) *
+						queue_size[i],
+						&queue_temp->bus_addr,
+						GFP_KERNEL);
+		if (!queue_temp->cq)
+			return NULL;
+		queue_temp->n_cq = queue_size[i];
+		queue_temp->id = i;
+		queue_temp->virt_head = queue_temp->cq;
+		queue_temp->virt_tail = queue_temp->cq;
+		/*
+		 * The dma pool for queue command buffer
+		 */
+		queue_temp->comp_pool = dma_pool_create("comp_pool",
+						&pdev->dev,
+						FSL_QDMA_BASE_BUFFER_SIZE,
+						16, 0);
+		if (!queue_temp->comp_pool) {
+			dma_free_coherent(&pdev->dev,
+						sizeof(struct fsl_qdma_ccdf) *
+						queue_size[i],
+						queue_temp->cq,
+						queue_temp->bus_addr);
+			return NULL;
+		}
+		/*
+		 * The dma pool for queue command buffer
+		 */
+		queue_temp->sg_pool = dma_pool_create("sg_pool",
+					&pdev->dev,
+					FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16,
+					64, 0);
+		if (!queue_temp->sg_pool) {
+			dma_free_coherent(&pdev->dev,
+						sizeof(struct fsl_qdma_ccdf) *
+						queue_size[i],
+						queue_temp->cq,
+						queue_temp->bus_addr);
+			dma_pool_destroy(queue_temp->comp_pool);
+			return NULL;
+		}
+		/*
+		 * List for queue command buffer
+		 */
+		INIT_LIST_HEAD(&queue_temp->comp_used);
+		INIT_LIST_HEAD(&queue_temp->comp_free);
+		spin_lock_init(&queue_temp->queue_lock);
+	}
+
+	return queue_head;
+}
+
+static struct fsl_qdma_queue *fsl_qdma_prep_status_queue(
+						struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct fsl_qdma_queue *status_head;
+	unsigned int status_size;
+	int ret;
+
+	ret = of_property_read_u32(np, "status-sizes", &status_size);
+	if (ret) {
+		dev_err(&pdev->dev, "Can't get status-sizes.\n");
+		return NULL;
+	}
+	if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX
+			|| status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
+		dev_err(&pdev->dev, "Get wrong status_size.\n");
+		return NULL;
+	}
+	status_head = devm_kzalloc(&pdev->dev, sizeof(*status_head),
+								GFP_KERNEL);
+	if (!status_head)
+		return NULL;
+
+	/*
+	 * Buffer for queue command
+	 */
+	status_head->cq = dma_alloc_coherent(&pdev->dev,
+						sizeof(struct fsl_qdma_ccdf) *
+						status_size,
+						&status_head->bus_addr,
+						GFP_KERNEL);
+	if (!status_head->cq)
+		return NULL;
+	status_head->n_cq = status_size;
+	status_head->virt_head = status_head->cq;
+	status_head->virt_tail = status_head->cq;
+	status_head->comp_pool = NULL;
+
+	return status_head;
+}
+
+static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
+{
+	void __iomem *ctrl = fsl_qdma->ctrl_base;
+	void __iomem *block = fsl_qdma->block_base;
+	int i, count = 5;
+	u32 reg;
+
+	/* Disable the command queue and wait for idle state. */
+	reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
+	reg |= FSL_QDMA_DMR_DQD;
+	qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
+	for (i = 0; i < FSL_QDMA_QUEUE_NUM_MAX; i++)
+		qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQMR(i));
+
+	while (1) {
+		reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DSR);
+		if (!(reg & FSL_QDMA_DSR_DB))
+			break;
+		if (count-- < 0)
+			return -EBUSY;
+		udelay(100);
+	}
+
+	/* Disable status queue. */
+	qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BSQMR);
+
+	/*
+	 * Clear the command queue interrupt detect register for all queues.
+	 */
+	qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
+
+	return 0;
+}
+
+static int fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma)
+{
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
+	struct fsl_qdma_queue *fsl_status = fsl_qdma->status;
+	struct fsl_qdma_queue *temp_queue;
+	struct fsl_qdma_comp *fsl_comp;
+	struct fsl_qdma_ccdf *status_addr;
+	struct fsl_qdma_csgf *csgf_src;
+	void __iomem *block = fsl_qdma->block_base;
+	u32 reg, i;
+	bool duplicate, duplicate_handle;
+
+	while (1) {
+		duplicate = 0;
+		duplicate_handle = 0;
+		reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR);
+		if (reg & FSL_QDMA_BSQSR_QE)
+			return 0;
+		status_addr = fsl_status->virt_head;
+		if (qdma_ccdf_get_queue(status_addr) == pre_queue &&
+			qdma_ccdf_addr_get64(status_addr) == pre_addr)
+			duplicate = 1;
+		i = qdma_ccdf_get_queue(status_addr);
+		pre_queue = qdma_ccdf_get_queue(status_addr);
+		pre_addr = qdma_ccdf_addr_get64(status_addr);
+		temp_queue = fsl_queue + i;
+		spin_lock(&temp_queue->queue_lock);
+		if (list_empty(&temp_queue->comp_used)) {
+			if (duplicate)
+				duplicate_handle = 1;
+			else {
+				spin_unlock(&temp_queue->queue_lock);
+				return -1;
+			}
+		} else {
+			fsl_comp = list_first_entry(&temp_queue->comp_used,
+							struct fsl_qdma_comp,
+							list);
+			csgf_src = (struct fsl_qdma_csgf *)fsl_comp->virt_addr
+							   + 2;
+			if (fsl_comp->bus_addr + 16 != pre_addr) {
+				if (duplicate)
+					duplicate_handle = 1;
+				else {
+					spin_unlock(&temp_queue->queue_lock);
+					return -1;
+				}
+			}
+		}
+
+			if (duplicate_handle) {
+			reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
+			reg |= FSL_QDMA_BSQMR_DI;
+			qdma_ccdf_addr_set64(status_addr, 0x0);
+			fsl_status->virt_head++;
+			if (fsl_status->virt_head == fsl_status->cq
+						   + fsl_status->n_cq)
+				fsl_status->virt_head = fsl_status->cq;
+			qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
+			spin_unlock(&temp_queue->queue_lock);
+			continue;
+		}
+		list_del(&fsl_comp->list);
+
+		reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
+		reg |= FSL_QDMA_BSQMR_DI;
+		qdma_ccdf_addr_set64(status_addr, 0x0);
+		fsl_status->virt_head++;
+		if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)
+			fsl_status->virt_head = fsl_status->cq;
+		qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
+		spin_unlock(&temp_queue->queue_lock);
+
+		spin_lock(&fsl_comp->qchan->vchan.lock);
+		vchan_cookie_complete(&fsl_comp->vdesc);
+		fsl_comp->qchan->status = DMA_COMPLETE;
+		spin_unlock(&fsl_comp->qchan->vchan.lock);
+	}
+	return 0;
+}
+
+static irqreturn_t fsl_qdma_error_handler(int irq, void *dev_id)
+{
+	struct fsl_qdma_engine *fsl_qdma = dev_id;
+	unsigned int intr;
+	void __iomem *status = fsl_qdma->status_base;
+
+	intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR);
+
+	if (intr)
+		dev_err(fsl_qdma->dma_dev.dev, "DMA transaction error!\n");
+
+	qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fsl_qdma_queue_handler(int irq, void *dev_id)
+{
+	struct fsl_qdma_engine *fsl_qdma = dev_id;
+	unsigned int intr, reg;
+	void __iomem *block = fsl_qdma->block_base;
+	void __iomem *ctrl = fsl_qdma->ctrl_base;
+
+	intr = qdma_readl(fsl_qdma, block + FSL_QDMA_BCQIDR(0));
+
+	if ((intr & FSL_QDMA_CQIDR_SQT) != 0)
+		intr = fsl_qdma_queue_transfer_complete(fsl_qdma);
+
+	if (intr != 0) {
+		reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
+		reg |= FSL_QDMA_DMR_DQD;
+		qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
+		qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQIER(0));
+		dev_err(fsl_qdma->dma_dev.dev, "QDMA: status err!\n");
+	}
+
+	qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
+
+	return IRQ_HANDLED;
+}
+
+static int
+fsl_qdma_irq_init(struct platform_device *pdev,
+		  struct fsl_qdma_engine *fsl_qdma)
+{
+	int ret;
+
+	fsl_qdma->error_irq = platform_get_irq_byname(pdev,
+							"qdma-error");
+	if (fsl_qdma->error_irq < 0) {
+		dev_err(&pdev->dev, "Can't get qdma controller irq.\n");
+		return fsl_qdma->error_irq;
+	}
+
+	fsl_qdma->queue_irq = platform_get_irq_byname(pdev, "qdma-queue");
+	if (fsl_qdma->queue_irq < 0) {
+		dev_err(&pdev->dev, "Can't get qdma queue irq.\n");
+		return fsl_qdma->queue_irq;
+	}
+
+	ret = devm_request_irq(&pdev->dev, fsl_qdma->error_irq,
+			fsl_qdma_error_handler, 0, "qDMA error", fsl_qdma);
+	if (ret) {
+		dev_err(&pdev->dev, "Can't register qDMA controller IRQ.\n");
+		return  ret;
+	}
+	ret = devm_request_irq(&pdev->dev, fsl_qdma->queue_irq,
+			fsl_qdma_queue_handler, 0, "qDMA queue", fsl_qdma);
+	if (ret) {
+		dev_err(&pdev->dev, "Can't register qDMA queue IRQ.\n");
+		return  ret;
+	}
+
+	return 0;
+}
+
+static int fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
+{
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
+	struct fsl_qdma_queue *temp;
+	void __iomem *ctrl = fsl_qdma->ctrl_base;
+	void __iomem *status = fsl_qdma->status_base;
+	void __iomem *block = fsl_qdma->block_base;
+	int i, ret;
+	u32 reg;
+
+	/* Try to halt the qDMA engine first. */
+	ret = fsl_qdma_halt(fsl_qdma);
+	if (ret) {
+		dev_err(fsl_qdma->dma_dev.dev, "DMA halt failed!");
+		return ret;
+	}
+
+	/*
+	 * Clear the command queue interrupt detect register for all queues.
+	 */
+	qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
+
+	for (i = 0; i < fsl_qdma->n_queues; i++) {
+		temp = fsl_queue + i;
+		/*
+		 * Initialize Command Queue registers to point to the first
+		 * command descriptor in memory.
+		 * Dequeue Pointer Address Registers
+		 * Enqueue Pointer Address Registers
+		 */
+		qdma_writel(fsl_qdma, temp->bus_addr,
+				block + FSL_QDMA_BCQDPA_SADDR(i));
+		qdma_writel(fsl_qdma, temp->bus_addr,
+				block + FSL_QDMA_BCQEPA_SADDR(i));
+
+		/* Initialize the queue mode. */
+		reg = FSL_QDMA_BCQMR_EN;
+		reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq)-4);
+		reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq)-6);
+		qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BCQMR(i));
+	}
+
+	/*
+	 * Workaround for erratum: ERR010812.
+	 * We must enable XOFF to avoid the enqueue rejection occurs.
+	 * Setting SQCCMR ENTER_WM to 0x20.
+	 */
+	qdma_writel(fsl_qdma, FSL_QDMA_SQCCMR_ENTER_WM,
+			      block + FSL_QDMA_SQCCMR);
+	/*
+	 * Initialize status queue registers to point to the first
+	 * command descriptor in memory.
+	 * Dequeue Pointer Address Registers
+	 * Enqueue Pointer Address Registers
+	 */
+	qdma_writel(fsl_qdma, fsl_qdma->status->bus_addr,
+					block + FSL_QDMA_SQEPAR);
+	qdma_writel(fsl_qdma, fsl_qdma->status->bus_addr,
+					block + FSL_QDMA_SQDPAR);
+	/* Initialize status queue interrupt. */
+	qdma_writel(fsl_qdma, FSL_QDMA_BCQIER_CQTIE,
+			      block + FSL_QDMA_BCQIER(0));
+	qdma_writel(fsl_qdma, FSL_QDMA_BSQICR_ICEN | FSL_QDMA_BSQICR_ICST(5)
+						   | 0x8000,
+			      block + FSL_QDMA_BSQICR);
+	qdma_writel(fsl_qdma, FSL_QDMA_CQIER_MEIE | FSL_QDMA_CQIER_TEIE,
+			      block + FSL_QDMA_CQIER);
+	/* Initialize controller interrupt register. */
+	qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR);
+	qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEIER);
+
+	/* Initialize the status queue mode. */
+	reg = FSL_QDMA_BSQMR_EN;
+	reg |= FSL_QDMA_BSQMR_CQ_SIZE(ilog2(fsl_qdma->status->n_cq)-6);
+	qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
+
+	reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
+	reg &= ~FSL_QDMA_DMR_DQD;
+	qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
+
+	return 0;
+}
+
+static struct dma_async_tx_descriptor *
+fsl_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
+		dma_addr_t src, size_t len, unsigned long flags)
+{
+	struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
+	struct fsl_qdma_comp *fsl_comp;
+
+	fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan, 0, 0);
+	fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len);
+
+	return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags);
+}
+
+static void fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
+{
+	void __iomem *block = fsl_chan->qdma->block_base;
+	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+	struct fsl_qdma_comp *fsl_comp;
+	struct virt_dma_desc *vdesc;
+	u32 reg;
+
+	reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQSR(fsl_queue->id));
+	if (reg & (FSL_QDMA_BCQSR_QF | FSL_QDMA_BCQSR_XOFF))
+		return;
+	vdesc = vchan_next_desc(&fsl_chan->vchan);
+	if (!vdesc)
+		return;
+	list_del(&vdesc->node);
+	fsl_comp = to_fsl_qdma_comp(vdesc);
+
+	memcpy(fsl_queue->virt_head++, fsl_comp->virt_addr, 16);
+	if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq)
+		fsl_queue->virt_head = fsl_queue->cq;
+
+	list_add_tail(&fsl_comp->list, &fsl_queue->comp_used);
+	barrier();
+	reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQMR(fsl_queue->id));
+	reg |= FSL_QDMA_BCQMR_EI;
+	qdma_writel(fsl_chan->qdma, reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
+	fsl_chan->status = DMA_IN_PROGRESS;
+}
+
+static enum dma_status fsl_qdma_tx_status(struct dma_chan *chan,
+		dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+	return dma_cookie_status(chan, cookie, txstate);
+}
+
+static void fsl_qdma_free_desc(struct virt_dma_desc *vdesc)
+{
+	struct fsl_qdma_comp *fsl_comp;
+	struct fsl_qdma_queue *fsl_queue;
+	struct fsl_qdma_sg *sg_block;
+	unsigned long flags;
+	unsigned int i;
+
+	fsl_comp = to_fsl_qdma_comp(vdesc);
+	fsl_queue = fsl_comp->qchan->queue;
+
+	if (fsl_comp->sg_block) {
+		for (i = 0; i < fsl_comp->sg_block_src +
+				fsl_comp->sg_block_dst; i++) {
+			sg_block = fsl_comp->sg_block + i;
+			dma_pool_free(fsl_queue->sg_pool,
+				      sg_block->virt_addr,
+				      sg_block->bus_addr);
+		}
+		kfree(fsl_comp->sg_block);
+	}
+
+	spin_lock_irqsave(&fsl_queue->queue_lock, flags);
+	list_add_tail(&fsl_comp->list, &fsl_queue->comp_free);
+	spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
+}
+
+static void fsl_qdma_issue_pending(struct dma_chan *chan)
+{
+	struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
+	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+	unsigned long flags;
+
+	spin_lock_irqsave(&fsl_queue->queue_lock, flags);
+	spin_lock(&fsl_chan->vchan.lock);
+	if (vchan_issue_pending(&fsl_chan->vchan))
+		fsl_qdma_enqueue_desc(fsl_chan);
+	spin_unlock(&fsl_chan->vchan.lock);
+	spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
+}
+
+static int fsl_qdma_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct fsl_qdma_engine *fsl_qdma;
+	struct fsl_qdma_chan *fsl_chan;
+	struct resource *res;
+	unsigned int len, chans, queues;
+	int ret, i;
+
+	ret = of_property_read_u32(np, "channels", &chans);
+	if (ret) {
+		dev_err(&pdev->dev, "Can't get channels.\n");
+		return ret;
+	}
+
+	len = sizeof(*fsl_qdma) + sizeof(*fsl_chan) * chans;
+	fsl_qdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+	if (!fsl_qdma)
+		return -ENOMEM;
+
+	ret = of_property_read_u32(np, "queues", &queues);
+	if (ret) {
+		dev_err(&pdev->dev, "Can't get queues.\n");
+		return ret;
+	}
+
+	fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, queues);
+	if (!fsl_qdma->queue)
+		return -ENOMEM;
+
+	fsl_qdma->status = fsl_qdma_prep_status_queue(pdev);
+	if (!fsl_qdma->status)
+		return -ENOMEM;
+
+	fsl_qdma->n_chans = chans;
+	fsl_qdma->n_queues = queues;
+	mutex_init(&fsl_qdma->fsl_qdma_mutex);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	fsl_qdma->ctrl_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(fsl_qdma->ctrl_base))
+		return PTR_ERR(fsl_qdma->ctrl_base);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	fsl_qdma->status_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(fsl_qdma->status_base))
+		return PTR_ERR(fsl_qdma->status_base);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+	fsl_qdma->block_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(fsl_qdma->block_base))
+		return PTR_ERR(fsl_qdma->block_base);
+
+	ret = fsl_qdma_irq_init(pdev, fsl_qdma);
+	if (ret)
+		return ret;
+
+	fsl_qdma->big_endian = of_property_read_bool(np, "big-endian");
+	INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels);
+	for (i = 0; i < fsl_qdma->n_chans; i++) {
+		struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
+
+		fsl_chan->qdma = fsl_qdma;
+		fsl_chan->queue = fsl_qdma->queue + i % fsl_qdma->n_queues;
+		fsl_chan->vchan.desc_free = fsl_qdma_free_desc;
+		INIT_LIST_HEAD(&fsl_chan->qcomp);
+		vchan_init(&fsl_chan->vchan, &fsl_qdma->dma_dev);
+	}
+	for (i = 0; i < fsl_qdma->n_queues; i++)
+		fsl_qdma_pre_request_enqueue_desc(fsl_qdma->queue + i);
+
+	dma_cap_set(DMA_MEMCPY, fsl_qdma->dma_dev.cap_mask);
+
+	fsl_qdma->dma_dev.dev = &pdev->dev;
+	fsl_qdma->dma_dev.device_alloc_chan_resources
+		= fsl_qdma_alloc_chan_resources;
+	fsl_qdma->dma_dev.device_free_chan_resources
+		= fsl_qdma_free_chan_resources;
+	fsl_qdma->dma_dev.device_tx_status = fsl_qdma_tx_status;
+	fsl_qdma->dma_dev.device_prep_dma_memcpy = fsl_qdma_prep_memcpy;
+	fsl_qdma->dma_dev.device_issue_pending = fsl_qdma_issue_pending;
+
+	dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
+
+	platform_set_drvdata(pdev, fsl_qdma);
+
+	ret = dma_async_device_register(&fsl_qdma->dma_dev);
+	if (ret) {
+		dev_err(&pdev->dev, "Can't register Freescale qDMA engine.\n");
+		return ret;
+	}
+
+	ret = fsl_qdma_reg_init(fsl_qdma);
+	if (ret) {
+		dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
+		return ret;
+	}
+
+
+	return 0;
+}
+
+static int fsl_qdma_remove(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);
+	struct fsl_qdma_queue *queue_temp;
+	struct fsl_qdma_queue *status = fsl_qdma->status;
+	struct fsl_qdma_comp *comp_temp, *_comp_temp;
+	int i;
+
+	of_dma_controller_free(np);
+	dma_async_device_unregister(&fsl_qdma->dma_dev);
+
+	/* Free descriptor areas */
+	for (i = 0; i < fsl_qdma->n_queues; i++) {
+		queue_temp = fsl_qdma->queue + i;
+		list_for_each_entry_safe(comp_temp, _comp_temp,
+					&queue_temp->comp_used,	list) {
+			dma_pool_free(queue_temp->comp_pool,
+					comp_temp->virt_addr,
+					comp_temp->bus_addr);
+			list_del(&comp_temp->list);
+			kfree(comp_temp);
+		}
+		list_for_each_entry_safe(comp_temp, _comp_temp,
+					&queue_temp->comp_free, list) {
+			dma_pool_free(queue_temp->comp_pool,
+					comp_temp->virt_addr,
+					comp_temp->bus_addr);
+			list_del(&comp_temp->list);
+			kfree(comp_temp);
+		}
+		dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_ccdf) *
+					queue_temp->n_cq, queue_temp->cq,
+					queue_temp->bus_addr);
+		dma_pool_destroy(queue_temp->comp_pool);
+	}
+
+	dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_ccdf) *
+				status->n_cq, status->cq, status->bus_addr);
+	return 0;
+}
+
+static const struct of_device_id fsl_qdma_dt_ids[] = {
+	{ .compatible = "fsl,ls1021a-qdma", },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, fsl_qdma_dt_ids);
+
+static struct platform_driver fsl_qdma_driver = {
+	.driver		= {
+		.name	= "fsl-qdma",
+		.owner  = THIS_MODULE,
+		.of_match_table = fsl_qdma_dt_ids,
+	},
+	.probe          = fsl_qdma_probe,
+	.remove		= fsl_qdma_remove,
+};
+
+static int __init fsl_qdma_init(void)
+{
+	return platform_driver_register(&fsl_qdma_driver);
+}
+subsys_initcall(fsl_qdma_init);
+
+static void __exit fsl_qdma_exit(void)
+{
+	platform_driver_unregister(&fsl_qdma_driver);
+}
+module_exit(fsl_qdma_exit);
+
+MODULE_ALIAS("platform:fsl-qdma");
+MODULE_DESCRIPTION("Freescale qDMA engine driver");
+MODULE_LICENSE("GPL v2");