diff mbox

[v5,2/6] dmaengine: fsl-qdma: Add qDMA controller driver for Layerscape SoCs

Message ID 20180525111920.24498-2-wen.he_1@nxp.com (mailing list archive)
State Changes Requested
Headers show

Commit Message

Wen He May 25, 2018, 11:19 a.m. UTC
NXP Queue DMA controller(qDMA) on Layerscape SoCs supports channel
virtuallization by allowing DMA jobs to be enqueued into different
command queues.

Note that this module depends on NXP DPAA.

Signed-off-by: Wen He <wen.he_1@nxp.com>
Signed-off-by: Jiaheng Fan <jiaheng.fan@nxp.com>
---
change in v5:
	- Fixed the issues includes:
		* add error handler which every function
		* replace word to bit definition
		* move global variable to struct definition
		* add some comments to context

change in v4:
	- Fixed the issues that Vinod point out in the mail list.

change in v3:
	- Add 'depends on ARM || ARM64' in file 'drivers/dma/Kconfig'

change in v2:
	- Replace GPL V2 License details by SPDX tags
	- Replace Freescale by NXP
	- Reduce and optimize header file references
	- Replace big_endian by feature in struct fsl_qdma_engine
	- Replace struct fsl_qdma_format by struct fsl_qdma_ccdf
	  and struct fsl_qdma_csgf
	- Remove empty line
	- Replace 'if..else' by macro 'FSL_QDMA_IN/OUT' in function
	  qdma_readl() and qdma_writel()
	- Remove function fsl_qdma_alloc_chan_resources()
	- Replace 'prei' by 'pre'
	- Replace '-1' by '-ENOMEM' in function fsl_qdma_pre_request_enqueue_desc()
	- Fix dma pool allocation need to rolled back in function
	  fsl_qdma_request_enqueue_desc()
	- Replace function of_property_read_u32_array() by device_property_read_u32_array()
	- Add functions fsl_qdma_cleanup_vchan() and fsl_qdma_irq_exit() to ensure
	  irq and tasklets stopped
	- Replace dts node element 'channels' by 'dma-channels'
	- Replace function platform_driver_register() by module_platform_driver()

 drivers/dma/Kconfig    |   13 +
 drivers/dma/Makefile   |    1 +
 drivers/dma/fsl-qdma.c | 1101 ++++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 1115 insertions(+), 0 deletions(-)
 create mode 100644 drivers/dma/fsl-qdma.c

Comments

Vinod Koul May 29, 2018, 7:07 a.m. UTC | #1
On 25-05-18, 19:19, Wen He wrote:

> +/**
> + * struct fsl_qdma_format - This is the struct holding describing compound
> + *			    descriptor format with qDMA.
> + * @status:		    This field which describes command status and
> + *			    enqueue status notification.
> + * @cfg:		    This field which describes frame offset and frame
> + *			    format.
> + * @addr_lo:		    This field which indicating the start of the buffer
> + *			    holding the compound descriptor of the lower 32-bits
> + *			    address in memory 40-bit address.
> + * @addr_hi:		    This field's the same as above field, but point high
> + *			    8-bits in memory 40-bit address.
> + * @__reserved1:	    Reserved field.
> + * @cfg8b_w1:		    This field which describes compound descriptor
> + *			    command queue origin produced by qDMA and dynamic

you may remove 'This field which describes'... in above lines, give reader no
information :)

> + *			    debug field.
> + * @data		    Pointer to the memory 40-bit address, describes DMA
> + *			    source informaion and DMA destination information.

typo informaion

> +static struct fsl_qdma_queue *fsl_qdma_alloc_queue_resources(
> +					struct platform_device *pdev,
> +					unsigned int queue_num)
> +{
> +	struct fsl_qdma_queue *queue_head, *queue_temp;
> +	int ret, len, i;
> +	unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
> +
> +	if (queue_num > FSL_QDMA_QUEUE_MAX)
> +		queue_num = FSL_QDMA_QUEUE_MAX;
> +	len = sizeof(*queue_head) * queue_num;
> +	queue_head = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
> +	if (!queue_head)
> +		return NULL;
> +
> +	ret = device_property_read_u32_array(&pdev->dev, "queue-sizes",
> +					queue_size, queue_num);
> +	if (ret) {
> +		dev_err(&pdev->dev, "Can't get queue-sizes.\n");
> +		return NULL;
> +	}
> +
> +	for (i = 0; i < queue_num; i++) {
> +		if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX
> +			|| queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
> +			dev_err(&pdev->dev, "Get wrong queue-sizes.\n");
> +			return NULL;

the indents here are bad for reading..

> +static int fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma)
> +{
> +	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
> +	struct fsl_qdma_queue *fsl_status = fsl_qdma->status;
> +	struct fsl_qdma_queue *temp_queue;
> +	struct fsl_qdma_comp *fsl_comp;
> +	struct fsl_qdma_format *status_addr;
> +	struct fsl_qdma_format *csgf_src;
> +	struct fsl_pre_status pre;
> +	void __iomem *block = fsl_qdma->block_base;
> +	u32 reg, i;
> +	bool duplicate, duplicate_handle;
> +
> +	memset(&pre, 0, sizeof(struct fsl_pre_status));
> +
> +	while (1) {
> +		duplicate = 0;
> +		duplicate_handle = 0;
> +		reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR);
> +		if (reg & FSL_QDMA_BSQSR_QE)
> +			return 0;
> +		status_addr = fsl_status->virt_head;
> +		if (qdma_ccdf_get_queue(status_addr) == pre.queue &&
> +			qdma_ccdf_addr_get64(status_addr) == pre.addr)
> +			duplicate = 1;
> +		i = qdma_ccdf_get_queue(status_addr);
> +		pre.queue = qdma_ccdf_get_queue(status_addr);
> +		pre.addr = qdma_ccdf_addr_get64(status_addr);
> +		temp_queue = fsl_queue + i;
> +		spin_lock(&temp_queue->queue_lock);
> +		if (list_empty(&temp_queue->comp_used)) {
> +			if (duplicate)
> +				duplicate_handle = 1;

code style mandates braces for this as else has braces..

> +			else {
> +				spin_unlock(&temp_queue->queue_lock);
> +				return -EAGAIN;
> +			}
> +		} else {
> +			fsl_comp = list_first_entry(&temp_queue->comp_used,
> +							struct fsl_qdma_comp,
> +							list);
> +			csgf_src = fsl_comp->virt_addr + 2;
> +			if (fsl_comp->bus_addr + 16 != pre.addr) {
> +				if (duplicate)
> +					duplicate_handle = 1;

here as well

> +static irqreturn_t fsl_qdma_error_handler(int irq, void *dev_id)
> +{
> +	struct fsl_qdma_engine *fsl_qdma = dev_id;
> +	unsigned int intr;
> +	void __iomem *status = fsl_qdma->status_base;
> +
> +	intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR);
> +
> +	if (intr)
> +		dev_err(fsl_qdma->dma_dev.dev, "DMA transaction error!\n");
> +
> +	qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR);

why unconditional write, was expecting that you would write if intr is non null

> +static int fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
> +{
> +	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
> +	struct fsl_qdma_queue *temp;
> +	void __iomem *ctrl = fsl_qdma->ctrl_base;
> +	void __iomem *status = fsl_qdma->status_base;
> +	void __iomem *block = fsl_qdma->block_base;
> +	int i, ret;
> +	u32 reg;
> +
> +	/* Try to halt the qDMA engine first. */
> +	ret = fsl_qdma_halt(fsl_qdma);
> +	if (ret) {
> +		dev_err(fsl_qdma->dma_dev.dev, "DMA halt failed!");
> +		return ret;
> +	}
> +
> +	/*
> +	 * Clear the command queue interrupt detect register for all queues.
> +	 */
> +	qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));

bunch of writes with 0xffffffff, can you explain why? Also helps to make a
macro for this

> +
> +	for (i = 0; i < fsl_qdma->n_queues; i++) {
> +		temp = fsl_queue + i;
> +		/*
> +		 * Initialize Command Queue registers to point to the first
> +		 * command descriptor in memory.
> +		 * Dequeue Pointer Address Registers
> +		 * Enqueue Pointer Address Registers
> +		 */
> +		qdma_writel(fsl_qdma, temp->bus_addr,
> +				block + FSL_QDMA_BCQDPA_SADDR(i));
> +		qdma_writel(fsl_qdma, temp->bus_addr,
> +				block + FSL_QDMA_BCQEPA_SADDR(i));
> +
> +		/* Initialize the queue mode. */
> +		reg = FSL_QDMA_BCQMR_EN;
> +		reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq)-4);
> +		reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq)-6);

space around - in the above two lines

> +static enum dma_status fsl_qdma_tx_status(struct dma_chan *chan,
> +		dma_cookie_t cookie, struct dma_tx_state *txstate)
> +{
> +	enum dma_status ret;
> +
> +	ret = dma_cookie_status(chan, cookie, txstate);
> +	if (ret == DMA_COMPLETE || !txstate)
> +		return ret;
> +
> +	return ret;

hmmm, this seems same as return dma_cookie_status() so why should we have rest
of the code

> +static void fsl_qdma_issue_pending(struct dma_chan *chan)
> +{
> +	struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
> +	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&fsl_queue->queue_lock, flags);
> +	spin_lock(&fsl_chan->vchan.lock);
> +	if (vchan_issue_pending(&fsl_chan->vchan))
> +		fsl_qdma_enqueue_desc(fsl_chan);
> +	spin_unlock(&fsl_chan->vchan.lock);
> +	spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);

why do we need two locks, and since you are doing vchan why should you add your
own lock on top

...

Overall the patch has some code style issues which keep catching my eye, can
you please check them. Also would help to run checkpatch with --strict and
--codespell option to catch typos and alignment issue. Please beware checkpatch
is a guide and NOT a rulebook so use your discretion :)
Wen He May 29, 2018, 9:59 a.m. UTC | #2
> -----Original Message-----

> From: dmaengine-owner@vger.kernel.org

> [mailto:dmaengine-owner@vger.kernel.org] On Behalf Of Vinod

> Sent: 2018年5月29日 15:07

> To: Wen He <wen.he_1@nxp.com>

> Cc: dmaengine@vger.kernel.org; robh+dt@kernel.org;

> devicetree@vger.kernel.org; Leo Li <leoyang.li@nxp.com>; Jiafei Pan

> <jiafei.pan@nxp.com>; Jiaheng Fan <jiaheng.fan@nxp.com>

> Subject: Re: [v5 2/6] dmaengine: fsl-qdma: Add qDMA controller driver for

> Layerscape SoCs

> 

> On 25-05-18, 19:19, Wen He wrote:

> 

> > +/**

> > + * struct fsl_qdma_format - This is the struct holding describing compound

> > + *			    descriptor format with qDMA.

> > + * @status:		    This field which describes command status and

> > + *			    enqueue status notification.

> > + * @cfg:		    This field which describes frame offset and frame

> > + *			    format.

> > + * @addr_lo:		    This field which indicating the start of the buffer

> > + *			    holding the compound descriptor of the lower 32-bits

> > + *			    address in memory 40-bit address.

> > + * @addr_hi:		    This field's the same as above field, but point

> high

> > + *			    8-bits in memory 40-bit address.

> > + * @__reserved1:	    Reserved field.

> > + * @cfg8b_w1:		    This field which describes compound descriptor

> > + *			    command queue origin produced by qDMA and

> dynamic

> 

> you may remove 'This field which describes'... in above lines, give reader no

> information :)

> 


Ok, so I remove 'this filed which describes' but keep the second half, right?

> > + *			    debug field.

> > + * @data		    Pointer to the memory 40-bit address, describes

> DMA

> > + *			    source informaion and DMA destination information.

> 

> typo informaion

> 


I am sorry , It's stupid.

> > +static struct fsl_qdma_queue *fsl_qdma_alloc_queue_resources(

> > +					struct platform_device *pdev,

> > +					unsigned int queue_num)

> > +{

> > +	struct fsl_qdma_queue *queue_head, *queue_temp;

> > +	int ret, len, i;

> > +	unsigned int queue_size[FSL_QDMA_QUEUE_MAX];

> > +

> > +	if (queue_num > FSL_QDMA_QUEUE_MAX)

> > +		queue_num = FSL_QDMA_QUEUE_MAX;

> > +	len = sizeof(*queue_head) * queue_num;

> > +	queue_head = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);

> > +	if (!queue_head)

> > +		return NULL;

> > +

> > +	ret = device_property_read_u32_array(&pdev->dev, "queue-sizes",

> > +					queue_size, queue_num);

> > +	if (ret) {

> > +		dev_err(&pdev->dev, "Can't get queue-sizes.\n");

> > +		return NULL;

> > +	}

> > +

> > +	for (i = 0; i < queue_num; i++) {

> > +		if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX

> > +			|| queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {

> > +			dev_err(&pdev->dev, "Get wrong queue-sizes.\n");

> > +			return NULL;

> 

> the indents here are bad for reading..

> 


So I add a empty line in here?

> > +static int fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine

> > +*fsl_qdma) {

> > +	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;

> > +	struct fsl_qdma_queue *fsl_status = fsl_qdma->status;

> > +	struct fsl_qdma_queue *temp_queue;

> > +	struct fsl_qdma_comp *fsl_comp;

> > +	struct fsl_qdma_format *status_addr;

> > +	struct fsl_qdma_format *csgf_src;

> > +	struct fsl_pre_status pre;

> > +	void __iomem *block = fsl_qdma->block_base;

> > +	u32 reg, i;

> > +	bool duplicate, duplicate_handle;

> > +

> > +	memset(&pre, 0, sizeof(struct fsl_pre_status));

> > +

> > +	while (1) {

> > +		duplicate = 0;

> > +		duplicate_handle = 0;

> > +		reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR);

> > +		if (reg & FSL_QDMA_BSQSR_QE)

> > +			return 0;

> > +		status_addr = fsl_status->virt_head;

> > +		if (qdma_ccdf_get_queue(status_addr) == pre.queue &&

> > +			qdma_ccdf_addr_get64(status_addr) == pre.addr)

> > +			duplicate = 1;

> > +		i = qdma_ccdf_get_queue(status_addr);

> > +		pre.queue = qdma_ccdf_get_queue(status_addr);

> > +		pre.addr = qdma_ccdf_addr_get64(status_addr);

> > +		temp_queue = fsl_queue + i;

> > +		spin_lock(&temp_queue->queue_lock);

> > +		if (list_empty(&temp_queue->comp_used)) {

> > +			if (duplicate)

> > +				duplicate_handle = 1;

> 

> code style mandates braces for this as else has braces..

> 


Thanks, it's good comments.

> > +			else {

> > +				spin_unlock(&temp_queue->queue_lock);

> > +				return -EAGAIN;

> > +			}

> > +		} else {

> > +			fsl_comp = list_first_entry(&temp_queue->comp_used,

> > +							struct fsl_qdma_comp,

> > +							list);

> > +			csgf_src = fsl_comp->virt_addr + 2;

> > +			if (fsl_comp->bus_addr + 16 != pre.addr) {

> > +				if (duplicate)

> > +					duplicate_handle = 1;

> 

> here as well

> 


sure

> > +static irqreturn_t fsl_qdma_error_handler(int irq, void *dev_id) {

> > +	struct fsl_qdma_engine *fsl_qdma = dev_id;

> > +	unsigned int intr;

> > +	void __iomem *status = fsl_qdma->status_base;

> > +

> > +	intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR);

> > +

> > +	if (intr)

> > +		dev_err(fsl_qdma->dma_dev.dev, "DMA transaction error!\n");

> > +

> > +	qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR);

> 

> why unconditional write, was expecting that you would write if intr is non null

> 

> > +static int fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma) {

> > +	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;

> > +	struct fsl_qdma_queue *temp;

> > +	void __iomem *ctrl = fsl_qdma->ctrl_base;

> > +	void __iomem *status = fsl_qdma->status_base;

> > +	void __iomem *block = fsl_qdma->block_base;

> > +	int i, ret;

> > +	u32 reg;

> > +

> > +	/* Try to halt the qDMA engine first. */

> > +	ret = fsl_qdma_halt(fsl_qdma);

> > +	if (ret) {

> > +		dev_err(fsl_qdma->dma_dev.dev, "DMA halt failed!");

> > +		return ret;

> > +	}

> > +

> > +	/*

> > +	 * Clear the command queue interrupt detect register for all queues.

> > +	 */

> > +	qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));

> 

> bunch of writes with 0xffffffff, can you explain why? Also helps to make a

> macro for this

> 


Maybe I missed that I should defined the value to the macro and add comment.
Right?

> > +

> > +	for (i = 0; i < fsl_qdma->n_queues; i++) {

> > +		temp = fsl_queue + i;

> > +		/*

> > +		 * Initialize Command Queue registers to point to the first

> > +		 * command descriptor in memory.

> > +		 * Dequeue Pointer Address Registers

> > +		 * Enqueue Pointer Address Registers

> > +		 */

> > +		qdma_writel(fsl_qdma, temp->bus_addr,

> > +				block + FSL_QDMA_BCQDPA_SADDR(i));

> > +		qdma_writel(fsl_qdma, temp->bus_addr,

> > +				block + FSL_QDMA_BCQEPA_SADDR(i));

> > +

> > +		/* Initialize the queue mode. */

> > +		reg = FSL_QDMA_BCQMR_EN;

> > +		reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq)-4);

> > +		reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq)-6);

> 

> space around - in the above two lines

> 


Sure, thanks. 

> > +static enum dma_status fsl_qdma_tx_status(struct dma_chan *chan,

> > +		dma_cookie_t cookie, struct dma_tx_state *txstate) {

> > +	enum dma_status ret;

> > +

> > +	ret = dma_cookie_status(chan, cookie, txstate);

> > +	if (ret == DMA_COMPLETE || !txstate)

> > +		return ret;

> > +

> > +	return ret;

> 

> hmmm, this seems same as return dma_cookie_status() so why should we

> have rest of the code

> 


Right, here code only fill the struct dma_device member's device_tx_status.

> > +static void fsl_qdma_issue_pending(struct dma_chan *chan) {

> > +	struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);

> > +	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;

> > +	unsigned long flags;

> > +

> > +	spin_lock_irqsave(&fsl_queue->queue_lock, flags);

> > +	spin_lock(&fsl_chan->vchan.lock);

> > +	if (vchan_issue_pending(&fsl_chan->vchan))

> > +		fsl_qdma_enqueue_desc(fsl_chan);

> > +	spin_unlock(&fsl_chan->vchan.lock);

> > +	spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);

> 

> why do we need two locks, and since you are doing vchan why should you add

> your own lock on top

> 


Yes, we need two locks.
As you know, the QDMA support multiple virtualized blocks for multi-core support.
so we need to make sure that muliti-core access issues.

Best Regards,
Wen
> ...

> 

> Overall the patch has some code style issues which keep catching my eye, can

> you please check them. Also would help to run checkpatch with --strict and

> --codespell option to catch typos and alignment issue. Please beware

> checkpatch is a guide and NOT a rulebook so use your discretion :)

> 

> --

> ~Vinod

> --

> To unsubscribe from this list: send the line "unsubscribe dmaengine" in the

> body of a message to majordomo@vger.kernel.org More majordomo info at

> https://emea01.safelinks.protection.outlook.com/?url=http%3A%2F%2Fvger.

> kernel.org%2Fmajordomo-info.html&data=02%7C01%7Cwen.he_1%40nxp.co

> m%7C15adc08d93884118988108d5c532d7ef%7C686ea1d3bc2b4c6fa92cd99

> c5c301635%7C0%7C0%7C636631744524104561&sdata=0ucY71mrKgPEe8e3

> IhON91zaUeOzTgFVbzRnFPwKwQo%3D&reserved=0
Vinod Koul May 29, 2018, 10:19 a.m. UTC | #3
On 29-05-18, 09:59, Wen He wrote:
> > On 25-05-18, 19:19, Wen He wrote:
> > 
> > > +/**
> > > + * struct fsl_qdma_format - This is the struct holding describing compound
> > > + *			    descriptor format with qDMA.
> > > + * @status:		    This field which describes command status and
> > > + *			    enqueue status notification.
> > > + * @cfg:		    This field which describes frame offset and frame
> > > + *			    format.
> > > + * @addr_lo:		    This field which indicating the start of the buffer
> > > + *			    holding the compound descriptor of the lower 32-bits
> > > + *			    address in memory 40-bit address.
> > > + * @addr_hi:		    This field's the same as above field, but point
> > high
> > > + *			    8-bits in memory 40-bit address.
> > > + * @__reserved1:	    Reserved field.
> > > + * @cfg8b_w1:		    This field which describes compound descriptor
> > > + *			    command queue origin produced by qDMA and
> > dynamic
> > 
> > you may remove 'This field which describes'... in above lines, give reader no
> > information :)
> > 
> 
> Ok, so I remove 'this filed which describes' but keep the second half, right?

right

> > > +	for (i = 0; i < queue_num; i++) {
> > > +		if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX
> > > +			|| queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
> > > +			dev_err(&pdev->dev, "Get wrong queue-sizes.\n");
> > > +			return NULL;
> > 
> > the indents here are bad for reading..
> > 
> 
> So I add a empty line in here?

No, indent of trailing condition is same as code block, that causes confusion,
they should have different indent

> > > +	/*
> > > +	 * Clear the command queue interrupt detect register for all queues.
> > > +	 */
> > > +	qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
> > 
> > bunch of writes with 0xffffffff, can you explain why? Also helps to make a
> > macro for this
> > 
> 
> Maybe I missed that I should defined the value to the macro and add comment.
> Right?

that would help, but why are you writing 0xffffffff at all these places?

> > > +static void fsl_qdma_issue_pending(struct dma_chan *chan) {
> > > +	struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
> > > +	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
> > > +	unsigned long flags;
> > > +
> > > +	spin_lock_irqsave(&fsl_queue->queue_lock, flags);
> > > +	spin_lock(&fsl_chan->vchan.lock);
> > > +	if (vchan_issue_pending(&fsl_chan->vchan))
> > > +		fsl_qdma_enqueue_desc(fsl_chan);
> > > +	spin_unlock(&fsl_chan->vchan.lock);
> > > +	spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
> > 
> > why do we need two locks, and since you are doing vchan why should you add
> > your own lock on top
> > 
> 
> Yes, we need two locks.
> As you know, the QDMA support multiple virtualized blocks for multi-core support.
> so we need to make sure that muliti-core access issues.

but why cant you use vchan lock for all?
Wen He May 29, 2018, 10:38 a.m. UTC | #4
> -----Original Message-----

> From: Vinod [mailto:vkoul@kernel.org]

> Sent: 2018年5月29日 18:20

> To: Wen He <wen.he_1@nxp.com>

> Cc: dmaengine@vger.kernel.org; robh+dt@kernel.org;

> devicetree@vger.kernel.org; Leo Li <leoyang.li@nxp.com>; Jiafei Pan

> <jiafei.pan@nxp.com>; Jiaheng Fan <jiaheng.fan@nxp.com>

> Subject: Re: [v5 2/6] dmaengine: fsl-qdma: Add qDMA controller driver for

> Layerscape SoCs

> 

> On 29-05-18, 09:59, Wen He wrote:

> > > On 25-05-18, 19:19, Wen He wrote:

> > >

> > > > +/**

> > > > + * struct fsl_qdma_format - This is the struct holding describing

> compound

> > > > + *			    descriptor format with qDMA.

> > > > + * @status:		    This field which describes command status and

> > > > + *			    enqueue status notification.

> > > > + * @cfg:		    This field which describes frame offset and

> frame

> > > > + *			    format.

> > > > + * @addr_lo:		    This field which indicating the start of the

> buffer

> > > > + *			    holding the compound descriptor of the lower 32-bits

> > > > + *			    address in memory 40-bit address.

> > > > + * @addr_hi:		    This field's the same as above field, but

> point

> > > high

> > > > + *			    8-bits in memory 40-bit address.

> > > > + * @__reserved1:	    Reserved field.

> > > > + * @cfg8b_w1:		    This field which describes compound

> descriptor

> > > > + *			    command queue origin produced by qDMA and

> > > dynamic

> > >

> > > you may remove 'This field which describes'... in above lines, give

> > > reader no information :)

> > >

> >

> > Ok, so I remove 'this filed which describes' but keep the second half, right?

> 

> right

> 

> > > > +	for (i = 0; i < queue_num; i++) {

> > > > +		if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX

> > > > +			|| queue_size[i] <

> FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {

> > > > +			dev_err(&pdev->dev, "Get wrong queue-sizes.\n");

> > > > +			return NULL;

> > >

> > > the indents here are bad for reading..

> > >

> >

> > So I add a empty line in here?

> 

> No, indent of trailing condition is same as code block, that causes confusion,

> they should have different indent

> 


Got it.

> > > > +	/*

> > > > +	 * Clear the command queue interrupt detect register for all

> queues.

> > > > +	 */

> > > > +	qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));

> > >

> > > bunch of writes with 0xffffffff, can you explain why? Also helps to

> > > make a macro for this

> > >

> >

> > Maybe I missed that I should defined the value to the macro and add

> comment.

> > Right?

> 

> that would help, but why are you writing 0xffffffff at all these places?

> 


This value is from the datasheet.
Should I write comments to here?

> > > > +static void fsl_qdma_issue_pending(struct dma_chan *chan) {

> > > > +	struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);

> > > > +	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;

> > > > +	unsigned long flags;

> > > > +

> > > > +	spin_lock_irqsave(&fsl_queue->queue_lock, flags);

> > > > +	spin_lock(&fsl_chan->vchan.lock);

> > > > +	if (vchan_issue_pending(&fsl_chan->vchan))

> > > > +		fsl_qdma_enqueue_desc(fsl_chan);

> > > > +	spin_unlock(&fsl_chan->vchan.lock);

> > > > +	spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);

> > >

> > > why do we need two locks, and since you are doing vchan why should

> > > you add your own lock on top

> > >

> >

> > Yes, we need two locks.

> > As you know, the QDMA support multiple virtualized blocks for multi-core

> support.

> > so we need to make sure that muliti-core access issues.

> 

> but why cant you use vchan lock for all?

> 


We can't only use vchan lock for all. otherwise enqueue action will be interrupted.

Best Regards,
Wen

> --

> ~Vinod
Vinod Koul May 30, 2018, 10:27 a.m. UTC | #5
On 29-05-18, 10:38, Wen He wrote:

> > > > > +	/*
> > > > > +	 * Clear the command queue interrupt detect register for all
> > queues.
> > > > > +	 */
> > > > > +	qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
> > > >
> > > > bunch of writes with 0xffffffff, can you explain why? Also helps to
> > > > make a macro for this
> > > >
> > >
> > > Maybe I missed that I should defined the value to the macro and add
> > comment.
> > > Right?
> > 
> > that would help, but why are you writing 0xffffffff at all these places?
> 
> This value is from the datasheet.
> Should I write comments to here?

Yes that would help explaining what this does..

> > > > > +static void fsl_qdma_issue_pending(struct dma_chan *chan) {
> > > > > +	struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
> > > > > +	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
> > > > > +	unsigned long flags;
> > > > > +
> > > > > +	spin_lock_irqsave(&fsl_queue->queue_lock, flags);
> > > > > +	spin_lock(&fsl_chan->vchan.lock);
> > > > > +	if (vchan_issue_pending(&fsl_chan->vchan))
> > > > > +		fsl_qdma_enqueue_desc(fsl_chan);
> > > > > +	spin_unlock(&fsl_chan->vchan.lock);
> > > > > +	spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
> > > >
> > > > why do we need two locks, and since you are doing vchan why should
> > > > you add your own lock on top
> > > >
> > >
> > > Yes, we need two locks.
> > > As you know, the QDMA support multiple virtualized blocks for multi-core
> > support.
> > > so we need to make sure that muliti-core access issues.
> > 
> > but why cant you use vchan lock for all?
> > 
> 
> We can't only use vchan lock for all. otherwise enqueue action will be interrupted.

I think it is possible to use only vchan lock
Leo Li May 30, 2018, 6:51 p.m. UTC | #6
On Fri, May 25, 2018 at 6:19 AM, Wen He <wen.he_1@nxp.com> wrote:
> NXP Queue DMA controller(qDMA) on Layerscape SoCs supports channel
> virtuallization by allowing DMA jobs to be enqueued into different
> command queues.
>
> Note that this module depends on NXP DPAA.
>
> Signed-off-by: Wen He <wen.he_1@nxp.com>
> Signed-off-by: Jiaheng Fan <jiaheng.fan@nxp.com>
> ---
> change in v5:
>         - Fixed the issues includes:
>                 * add error handler which every function
>                 * replace word to bit definition
>                 * move global variable to struct definition
>                 * add some comments to context
>
> change in v4:
>         - Fixed the issues that Vinod point out in the mail list.
>
> change in v3:
>         - Add 'depends on ARM || ARM64' in file 'drivers/dma/Kconfig'
>
> change in v2:
>         - Replace GPL V2 License details by SPDX tags
>         - Replace Freescale by NXP
>         - Reduce and optimize header file references
>         - Replace big_endian by feature in struct fsl_qdma_engine
>         - Replace struct fsl_qdma_format by struct fsl_qdma_ccdf
>           and struct fsl_qdma_csgf
>         - Remove empty line
>         - Replace 'if..else' by macro 'FSL_QDMA_IN/OUT' in function
>           qdma_readl() and qdma_writel()
>         - Remove function fsl_qdma_alloc_chan_resources()
>         - Replace 'prei' by 'pre'
>         - Replace '-1' by '-ENOMEM' in function fsl_qdma_pre_request_enqueue_desc()
>         - Fix dma pool allocation need to rolled back in function
>           fsl_qdma_request_enqueue_desc()
>         - Replace function of_property_read_u32_array() by device_property_read_u32_array()
>         - Add functions fsl_qdma_cleanup_vchan() and fsl_qdma_irq_exit() to ensure
>           irq and tasklets stopped
>         - Replace dts node element 'channels' by 'dma-channels'
>         - Replace function platform_driver_register() by module_platform_driver()
>
>  drivers/dma/Kconfig    |   13 +
>  drivers/dma/Makefile   |    1 +
>  drivers/dma/fsl-qdma.c | 1101 ++++++++++++++++++++++++++++++++++++++++++++++++
>  3 files changed, 1115 insertions(+), 0 deletions(-)
>  create mode 100644 drivers/dma/fsl-qdma.c
>
> diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
> index 6d61cd0..99aff33 100644
> --- a/drivers/dma/Kconfig
> +++ b/drivers/dma/Kconfig
> @@ -225,6 +225,19 @@ config FSL_EDMA
>           multiplexing capability for DMA request sources(slot).
>           This module can be found on Freescale Vybrid and LS-1 SoCs.
>
> +config FSL_QDMA
> +       tristate "NXP Layerscape qDMA engine support"
> +       depends on ARM || ARM64
> +       select DMA_ENGINE
> +       select DMA_VIRTUAL_CHANNELS
> +       select DMA_ENGINE_RAID
> +       select ASYNC_TX_ENABLE_CHANNEL_SWITCH
> +       help
> +         Support the NXP Layerscape qDMA engine with command queue and legacy mode.
> +         Channel virtualization is supported through enqueuing of DMA jobs to,
> +         or dequeuing DMA jobs from, different work queues.
> +         This module can be found on NXP Layerscape SoCs.
> +
>  config FSL_RAID
>          tristate "Freescale RAID engine Support"
>          depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH
> diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
> index 0f62a4d..93db0fc 100644
> --- a/drivers/dma/Makefile
> +++ b/drivers/dma/Makefile
> @@ -33,6 +33,7 @@ obj-$(CONFIG_DW_DMAC_CORE) += dw/
>  obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
>  obj-$(CONFIG_FSL_DMA) += fsldma.o
>  obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
> +obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o
>  obj-$(CONFIG_FSL_RAID) += fsl_raid.o
>  obj-$(CONFIG_HSU_DMA) += hsu/
>  obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
> diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
> new file mode 100644
> index 0000000..81df812
> --- /dev/null
> +++ b/drivers/dma/fsl-qdma.c
> @@ -0,0 +1,1101 @@

Not a critical issue, but the format of the file header looks a little
bit weird to me.  Would you mind clean it up?

> +// SPDX-License-Identifier: GPL-2.0
> +// Copyright 2018 NXP

I know the SPDX tag is recommended to be the 1st line, but copyright
normally goes below the file description not above.

> +

No newline needed.

> +/*
> + * Driver for NXP Layerscape Queue Direct Memory Access Controller
> + *
> + * Author:
> + *  Wen He <wen.he_1@nxp.com>
> + *  Jiaheng Fan <jiaheng.fan@nxp.com>
> + *

No newline needed.

> + */
> +


Regards,
Leo
--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Wen He May 31, 2018, 1:58 a.m. UTC | #7
DQoNCj4gLS0tLS1PcmlnaW5hbCBNZXNzYWdlLS0tLS0NCj4gRnJvbTogVmlub2QgS291bCBbbWFp
bHRvOnZpbm9kLmtvdWxAbGluYXJvLm9yZ10NCj4gU2VudDogMjAxOMTqNdTCMzDI1SAxODoyOA0K
PiBUbzogV2VuIEhlIDx3ZW4uaGVfMUBueHAuY29tPg0KPiBDYzogZG1hZW5naW5lQHZnZXIua2Vy
bmVsLm9yZzsgcm9iaCtkdEBrZXJuZWwub3JnOw0KPiBkZXZpY2V0cmVlQHZnZXIua2VybmVsLm9y
ZzsgTGVvIExpIDxsZW95YW5nLmxpQG54cC5jb20+OyBKaWFmZWkgUGFuDQo+IDxqaWFmZWkucGFu
QG54cC5jb20+OyBKaWFoZW5nIEZhbiA8amlhaGVuZy5mYW5AbnhwLmNvbT4NCj4gU3ViamVjdDog
UmU6IFt2NSAyLzZdIGRtYWVuZ2luZTogZnNsLXFkbWE6IEFkZCBxRE1BIGNvbnRyb2xsZXIgZHJp
dmVyIGZvcg0KPiBMYXllcnNjYXBlIFNvQ3MNCj4gDQo+IE9uIDI5LTA1LTE4LCAxMDozOCwgV2Vu
IEhlIHdyb3RlOg0KPiANCj4gPiA+ID4gPiA+ICsJLyoNCj4gPiA+ID4gPiA+ICsJICogQ2xlYXIg
dGhlIGNvbW1hbmQgcXVldWUgaW50ZXJydXB0IGRldGVjdCByZWdpc3RlciBmb3IgYWxsDQo+ID4g
PiBxdWV1ZXMuDQo+ID4gPiA+ID4gPiArCSAqLw0KPiA+ID4gPiA+ID4gKwlxZG1hX3dyaXRlbChm
c2xfcWRtYSwgMHhmZmZmZmZmZiwgYmxvY2sgKw0KPiA+ID4gPiA+ID4gK0ZTTF9RRE1BX0JDUUlE
UigwKSk7DQo+ID4gPiA+ID4NCj4gPiA+ID4gPiBidW5jaCBvZiB3cml0ZXMgd2l0aCAweGZmZmZm
ZmZmLCBjYW4geW91IGV4cGxhaW4gd2h5PyBBbHNvIGhlbHBzDQo+ID4gPiA+ID4gdG8gbWFrZSBh
IG1hY3JvIGZvciB0aGlzDQo+ID4gPiA+ID4NCj4gPiA+ID4NCj4gPiA+ID4gTWF5YmUgSSBtaXNz
ZWQgdGhhdCBJIHNob3VsZCBkZWZpbmVkIHRoZSB2YWx1ZSB0byB0aGUgbWFjcm8gYW5kDQo+ID4g
PiA+IGFkZA0KPiA+ID4gY29tbWVudC4NCj4gPiA+ID4gUmlnaHQ/DQo+ID4gPg0KPiA+ID4gdGhh
dCB3b3VsZCBoZWxwLCBidXQgd2h5IGFyZSB5b3Ugd3JpdGluZyAweGZmZmZmZmZmIGF0IGFsbCB0
aGVzZSBwbGFjZXM/DQo+ID4NCj4gPiBUaGlzIHZhbHVlIGlzIGZyb20gdGhlIGRhdGFzaGVldC4N
Cj4gPiBTaG91bGQgSSB3cml0ZSBjb21tZW50cyB0byBoZXJlPw0KPiANCj4gWWVzIHRoYXQgd291
bGQgaGVscCBleHBsYWluaW5nIHdoYXQgdGhpcyBkb2VzLi4NCj4gDQoNCkdvdCBpdC4gVGhhbmtz
DQoNCj4gPiA+ID4gPiA+ICtzdGF0aWMgdm9pZCBmc2xfcWRtYV9pc3N1ZV9wZW5kaW5nKHN0cnVj
dCBkbWFfY2hhbiAqY2hhbikgew0KPiA+ID4gPiA+ID4gKwlzdHJ1Y3QgZnNsX3FkbWFfY2hhbiAq
ZnNsX2NoYW4gPSB0b19mc2xfcWRtYV9jaGFuKGNoYW4pOw0KPiA+ID4gPiA+ID4gKwlzdHJ1Y3Qg
ZnNsX3FkbWFfcXVldWUgKmZzbF9xdWV1ZSA9IGZzbF9jaGFuLT5xdWV1ZTsNCj4gPiA+ID4gPiA+
ICsJdW5zaWduZWQgbG9uZyBmbGFnczsNCj4gPiA+ID4gPiA+ICsNCj4gPiA+ID4gPiA+ICsJc3Bp
bl9sb2NrX2lycXNhdmUoJmZzbF9xdWV1ZS0+cXVldWVfbG9jaywgZmxhZ3MpOw0KPiA+ID4gPiA+
ID4gKwlzcGluX2xvY2soJmZzbF9jaGFuLT52Y2hhbi5sb2NrKTsNCj4gPiA+ID4gPiA+ICsJaWYg
KHZjaGFuX2lzc3VlX3BlbmRpbmcoJmZzbF9jaGFuLT52Y2hhbikpDQo+ID4gPiA+ID4gPiArCQlm
c2xfcWRtYV9lbnF1ZXVlX2Rlc2MoZnNsX2NoYW4pOw0KPiA+ID4gPiA+ID4gKwlzcGluX3VubG9j
aygmZnNsX2NoYW4tPnZjaGFuLmxvY2spOw0KPiA+ID4gPiA+ID4gKwlzcGluX3VubG9ja19pcnFy
ZXN0b3JlKCZmc2xfcXVldWUtPnF1ZXVlX2xvY2ssIGZsYWdzKTsNCj4gPiA+ID4gPg0KPiA+ID4g
PiA+IHdoeSBkbyB3ZSBuZWVkIHR3byBsb2NrcywgYW5kIHNpbmNlIHlvdSBhcmUgZG9pbmcgdmNo
YW4gd2h5DQo+ID4gPiA+ID4gc2hvdWxkIHlvdSBhZGQgeW91ciBvd24gbG9jayBvbiB0b3ANCj4g
PiA+ID4gPg0KPiA+ID4gPg0KPiA+ID4gPiBZZXMsIHdlIG5lZWQgdHdvIGxvY2tzLg0KPiA+ID4g
PiBBcyB5b3Uga25vdywgdGhlIFFETUEgc3VwcG9ydCBtdWx0aXBsZSB2aXJ0dWFsaXplZCBibG9j
a3MgZm9yDQo+ID4gPiA+IG11bHRpLWNvcmUNCj4gPiA+IHN1cHBvcnQuDQo+ID4gPiA+IHNvIHdl
IG5lZWQgdG8gbWFrZSBzdXJlIHRoYXQgbXVsaXRpLWNvcmUgYWNjZXNzIGlzc3Vlcy4NCj4gPiA+
DQo+ID4gPiBidXQgd2h5IGNhbnQgeW91IHVzZSB2Y2hhbiBsb2NrIGZvciBhbGw/DQo+ID4gPg0K
PiA+DQo+ID4gV2UgY2FuJ3Qgb25seSB1c2UgdmNoYW4gbG9jayBmb3IgYWxsLiBvdGhlcndpc2Ug
ZW5xdWV1ZSBhY3Rpb24gd2lsbCBiZQ0KPiBpbnRlcnJ1cHRlZC4NCj4gDQo+IEkgdGhpbmsgaXQg
aXMgcG9zc2libGUgdG8gdXNlIG9ubHkgdmNoYW4gbG9jaw0KPiANCg0KSSB0cmllZCB0aGF0IGlm
IEkgdXNlIG9ubHkgdmNoYW4gbG9jayB0aGVuIHFkbWEgd2lsbCBiZSBjYW4ndCB3b3JrLg0KRG8g
eW91IGhhdmUgYSBvdGhlciBnb29kIGlkZWE/DQoNCkJlc3QgUmVnYXJkcywNCldlbg0KDQo+IC0t
DQo+IH5WaW5vZA0K
--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Wen He June 4, 2018, 9:53 a.m. UTC | #8
DQoNCj4gLS0tLS1PcmlnaW5hbCBNZXNzYWdlLS0tLS0NCj4gRnJvbTogZG1hZW5naW5lLW93bmVy
QHZnZXIua2VybmVsLm9yZw0KPiBbbWFpbHRvOmRtYWVuZ2luZS1vd25lckB2Z2VyLmtlcm5lbC5v
cmddIE9uIEJlaGFsZiBPZiBMaSBZYW5nDQo+IFNlbnQ6IDIwMTjlubQ15pyIMzHml6UgMjo1MQ0K
PiBUbzogV2VuIEhlIDx3ZW4uaGVfMUBueHAuY29tPg0KPiBDYzogdmtvdWxAa2VybmVsLm9yZzsg
ZG1hZW5naW5lQHZnZXIua2VybmVsLm9yZzsgUm9iIEhlcnJpbmcNCj4gPHJvYmgrZHRAa2VybmVs
Lm9yZz47IG9wZW4gbGlzdDpPUEVOIEZJUk1XQVJFIEFORCBGTEFUVEVORUQgREVWSUNFDQo+IFRS
RUUgQklORElOR1MgPGRldmljZXRyZWVAdmdlci5rZXJuZWwub3JnPjsgSmlhZmVpIFBhbg0KPiA8
amlhZmVpLnBhbkBueHAuY29tPjsgSmlhaGVuZyBGYW4gPGppYWhlbmcuZmFuQG54cC5jb20+DQo+
IFN1YmplY3Q6IFJlOiBbdjUgMi82XSBkbWFlbmdpbmU6IGZzbC1xZG1hOiBBZGQgcURNQSBjb250
cm9sbGVyIGRyaXZlciBmb3INCj4gTGF5ZXJzY2FwZSBTb0NzDQo+IA0KPiBPbiBGcmksIE1heSAy
NSwgMjAxOCBhdCA2OjE5IEFNLCBXZW4gSGUgPHdlbi5oZV8xQG54cC5jb20+IHdyb3RlOg0KPiA+
IE5YUCBRdWV1ZSBETUEgY29udHJvbGxlcihxRE1BKSBvbiBMYXllcnNjYXBlIFNvQ3Mgc3VwcG9y
dHMgY2hhbm5lbA0KPiA+IHZpcnR1YWxsaXphdGlvbiBieSBhbGxvd2luZyBETUEgam9icyB0byBi
ZSBlbnF1ZXVlZCBpbnRvIGRpZmZlcmVudA0KPiA+IGNvbW1hbmQgcXVldWVzLg0KPiA+DQo+ID4g
Tm90ZSB0aGF0IHRoaXMgbW9kdWxlIGRlcGVuZHMgb24gTlhQIERQQUEuDQo+ID4NCj4gPiBTaWdu
ZWQtb2ZmLWJ5OiBXZW4gSGUgPHdlbi5oZV8xQG54cC5jb20+DQo+ID4gU2lnbmVkLW9mZi1ieTog
SmlhaGVuZyBGYW4gPGppYWhlbmcuZmFuQG54cC5jb20+DQo+ID4gLS0tDQo+ID4gY2hhbmdlIGlu
IHY1Og0KPiA+ICAgICAgICAgLSBGaXhlZCB0aGUgaXNzdWVzIGluY2x1ZGVzOg0KPiA+ICAgICAg
ICAgICAgICAgICAqIGFkZCBlcnJvciBoYW5kbGVyIHdoaWNoIGV2ZXJ5IGZ1bmN0aW9uDQo+ID4g
ICAgICAgICAgICAgICAgICogcmVwbGFjZSB3b3JkIHRvIGJpdCBkZWZpbml0aW9uDQo+ID4gICAg
ICAgICAgICAgICAgICogbW92ZSBnbG9iYWwgdmFyaWFibGUgdG8gc3RydWN0IGRlZmluaXRpb24N
Cj4gPiAgICAgICAgICAgICAgICAgKiBhZGQgc29tZSBjb21tZW50cyB0byBjb250ZXh0DQo+ID4N
Cj4gPiBjaGFuZ2UgaW4gdjQ6DQo+ID4gICAgICAgICAtIEZpeGVkIHRoZSBpc3N1ZXMgdGhhdCBW
aW5vZCBwb2ludCBvdXQgaW4gdGhlIG1haWwgbGlzdC4NCj4gPg0KPiA+IGNoYW5nZSBpbiB2MzoN
Cj4gPiAgICAgICAgIC0gQWRkICdkZXBlbmRzIG9uIEFSTSB8fCBBUk02NCcgaW4gZmlsZSAnZHJp
dmVycy9kbWEvS2NvbmZpZycNCj4gPg0KPiA+IGNoYW5nZSBpbiB2MjoNCj4gPiAgICAgICAgIC0g
UmVwbGFjZSBHUEwgVjIgTGljZW5zZSBkZXRhaWxzIGJ5IFNQRFggdGFncw0KPiA+ICAgICAgICAg
LSBSZXBsYWNlIEZyZWVzY2FsZSBieSBOWFANCj4gPiAgICAgICAgIC0gUmVkdWNlIGFuZCBvcHRp
bWl6ZSBoZWFkZXIgZmlsZSByZWZlcmVuY2VzDQo+ID4gICAgICAgICAtIFJlcGxhY2UgYmlnX2Vu
ZGlhbiBieSBmZWF0dXJlIGluIHN0cnVjdCBmc2xfcWRtYV9lbmdpbmUNCj4gPiAgICAgICAgIC0g
UmVwbGFjZSBzdHJ1Y3QgZnNsX3FkbWFfZm9ybWF0IGJ5IHN0cnVjdCBmc2xfcWRtYV9jY2RmDQo+
ID4gICAgICAgICAgIGFuZCBzdHJ1Y3QgZnNsX3FkbWFfY3NnZg0KPiA+ICAgICAgICAgLSBSZW1v
dmUgZW1wdHkgbGluZQ0KPiA+ICAgICAgICAgLSBSZXBsYWNlICdpZi4uZWxzZScgYnkgbWFjcm8g
J0ZTTF9RRE1BX0lOL09VVCcgaW4gZnVuY3Rpb24NCj4gPiAgICAgICAgICAgcWRtYV9yZWFkbCgp
IGFuZCBxZG1hX3dyaXRlbCgpDQo+ID4gICAgICAgICAtIFJlbW92ZSBmdW5jdGlvbiBmc2xfcWRt
YV9hbGxvY19jaGFuX3Jlc291cmNlcygpDQo+ID4gICAgICAgICAtIFJlcGxhY2UgJ3ByZWknIGJ5
ICdwcmUnDQo+ID4gICAgICAgICAtIFJlcGxhY2UgJy0xJyBieSAnLUVOT01FTScgaW4gZnVuY3Rp
b24NCj4gZnNsX3FkbWFfcHJlX3JlcXVlc3RfZW5xdWV1ZV9kZXNjKCkNCj4gPiAgICAgICAgIC0g
Rml4IGRtYSBwb29sIGFsbG9jYXRpb24gbmVlZCB0byByb2xsZWQgYmFjayBpbiBmdW5jdGlvbg0K
PiA+ICAgICAgICAgICBmc2xfcWRtYV9yZXF1ZXN0X2VucXVldWVfZGVzYygpDQo+ID4gICAgICAg
ICAtIFJlcGxhY2UgZnVuY3Rpb24gb2ZfcHJvcGVydHlfcmVhZF91MzJfYXJyYXkoKSBieQ0KPiBk
ZXZpY2VfcHJvcGVydHlfcmVhZF91MzJfYXJyYXkoKQ0KPiA+ICAgICAgICAgLSBBZGQgZnVuY3Rp
b25zIGZzbF9xZG1hX2NsZWFudXBfdmNoYW4oKSBhbmQNCj4gZnNsX3FkbWFfaXJxX2V4aXQoKSB0
byBlbnN1cmUNCj4gPiAgICAgICAgICAgaXJxIGFuZCB0YXNrbGV0cyBzdG9wcGVkDQo+ID4gICAg
ICAgICAtIFJlcGxhY2UgZHRzIG5vZGUgZWxlbWVudCAnY2hhbm5lbHMnIGJ5ICdkbWEtY2hhbm5l
bHMnDQo+ID4gICAgICAgICAtIFJlcGxhY2UgZnVuY3Rpb24gcGxhdGZvcm1fZHJpdmVyX3JlZ2lz
dGVyKCkgYnkNCj4gPiBtb2R1bGVfcGxhdGZvcm1fZHJpdmVyKCkNCj4gPg0KPiA+ICBkcml2ZXJz
L2RtYS9LY29uZmlnICAgIHwgICAxMyArDQo+ID4gIGRyaXZlcnMvZG1hL01ha2VmaWxlICAgfCAg
ICAxICsNCj4gPiAgZHJpdmVycy9kbWEvZnNsLXFkbWEuYyB8IDExMDENCj4gPiArKysrKysrKysr
KysrKysrKysrKysrKysrKysrKysrKysrKysrKysrKysrKysrKysNCj4gPiAgMyBmaWxlcyBjaGFu
Z2VkLCAxMTE1IGluc2VydGlvbnMoKyksIDAgZGVsZXRpb25zKC0pICBjcmVhdGUgbW9kZQ0KPiA+
IDEwMDY0NCBkcml2ZXJzL2RtYS9mc2wtcWRtYS5jDQo+ID4NCj4gPiBkaWZmIC0tZ2l0IGEvZHJp
dmVycy9kbWEvS2NvbmZpZyBiL2RyaXZlcnMvZG1hL0tjb25maWcgaW5kZXgNCj4gPiA2ZDYxY2Qw
Li45OWFmZjMzIDEwMDY0NA0KPiA+IC0tLSBhL2RyaXZlcnMvZG1hL0tjb25maWcNCj4gPiArKysg
Yi9kcml2ZXJzL2RtYS9LY29uZmlnDQo+ID4gQEAgLTIyNSw2ICsyMjUsMTkgQEAgY29uZmlnIEZT
TF9FRE1BDQo+ID4gICAgICAgICAgIG11bHRpcGxleGluZyBjYXBhYmlsaXR5IGZvciBETUEgcmVx
dWVzdCBzb3VyY2VzKHNsb3QpLg0KPiA+ICAgICAgICAgICBUaGlzIG1vZHVsZSBjYW4gYmUgZm91
bmQgb24gRnJlZXNjYWxlIFZ5YnJpZCBhbmQgTFMtMSBTb0NzLg0KPiA+DQo+ID4gK2NvbmZpZyBG
U0xfUURNQQ0KPiA+ICsgICAgICAgdHJpc3RhdGUgIk5YUCBMYXllcnNjYXBlIHFETUEgZW5naW5l
IHN1cHBvcnQiDQo+ID4gKyAgICAgICBkZXBlbmRzIG9uIEFSTSB8fCBBUk02NA0KPiA+ICsgICAg
ICAgc2VsZWN0IERNQV9FTkdJTkUNCj4gPiArICAgICAgIHNlbGVjdCBETUFfVklSVFVBTF9DSEFO
TkVMUw0KPiA+ICsgICAgICAgc2VsZWN0IERNQV9FTkdJTkVfUkFJRA0KPiA+ICsgICAgICAgc2Vs
ZWN0IEFTWU5DX1RYX0VOQUJMRV9DSEFOTkVMX1NXSVRDSA0KPiA+ICsgICAgICAgaGVscA0KPiA+
ICsgICAgICAgICBTdXBwb3J0IHRoZSBOWFAgTGF5ZXJzY2FwZSBxRE1BIGVuZ2luZSB3aXRoIGNv
bW1hbmQNCj4gcXVldWUgYW5kIGxlZ2FjeSBtb2RlLg0KPiA+ICsgICAgICAgICBDaGFubmVsIHZp
cnR1YWxpemF0aW9uIGlzIHN1cHBvcnRlZCB0aHJvdWdoIGVucXVldWluZyBvZiBETUENCj4gam9i
cyB0bywNCj4gPiArICAgICAgICAgb3IgZGVxdWV1aW5nIERNQSBqb2JzIGZyb20sIGRpZmZlcmVu
dCB3b3JrIHF1ZXVlcy4NCj4gPiArICAgICAgICAgVGhpcyBtb2R1bGUgY2FuIGJlIGZvdW5kIG9u
IE5YUCBMYXllcnNjYXBlIFNvQ3MuDQo+ID4gKw0KPiA+ICBjb25maWcgRlNMX1JBSUQNCj4gPiAg
ICAgICAgICB0cmlzdGF0ZSAiRnJlZXNjYWxlIFJBSUQgZW5naW5lIFN1cHBvcnQiDQo+ID4gICAg
ICAgICAgZGVwZW5kcyBvbiBGU0xfU09DDQo+ICYmICFBU1lOQ19UWF9FTkFCTEVfQ0hBTk5FTF9T
V0lUQ0ggZGlmZg0KPiA+IC0tZ2l0IGEvZHJpdmVycy9kbWEvTWFrZWZpbGUgYi9kcml2ZXJzL2Rt
YS9NYWtlZmlsZSBpbmRleA0KPiA+IDBmNjJhNGQuLjkzZGIwZmMgMTAwNjQ0DQo+ID4gLS0tIGEv
ZHJpdmVycy9kbWEvTWFrZWZpbGUNCj4gPiArKysgYi9kcml2ZXJzL2RtYS9NYWtlZmlsZQ0KPiA+
IEBAIC0zMyw2ICszMyw3IEBAIG9iai0kKENPTkZJR19EV19ETUFDX0NPUkUpICs9IGR3Lw0KPiA+
ICBvYmotJChDT05GSUdfRVA5M1hYX0RNQSkgKz0gZXA5M3h4X2RtYS5vDQo+ID4gIG9iai0kKENP
TkZJR19GU0xfRE1BKSArPSBmc2xkbWEubw0KPiA+ICBvYmotJChDT05GSUdfRlNMX0VETUEpICs9
IGZzbC1lZG1hLm8NCj4gPiArb2JqLSQoQ09ORklHX0ZTTF9RRE1BKSArPSBmc2wtcWRtYS5vDQo+
ID4gIG9iai0kKENPTkZJR19GU0xfUkFJRCkgKz0gZnNsX3JhaWQubw0KPiA+ICBvYmotJChDT05G
SUdfSFNVX0RNQSkgKz0gaHN1Lw0KPiA+ICBvYmotJChDT05GSUdfSU1HX01EQ19ETUEpICs9IGlt
Zy1tZGMtZG1hLm8gZGlmZiAtLWdpdA0KPiA+IGEvZHJpdmVycy9kbWEvZnNsLXFkbWEuYyBiL2Ry
aXZlcnMvZG1hL2ZzbC1xZG1hLmMgbmV3IGZpbGUgbW9kZSAxMDA2NDQNCj4gPiBpbmRleCAwMDAw
MDAwLi44MWRmODEyDQo+ID4gLS0tIC9kZXYvbnVsbA0KPiA+ICsrKyBiL2RyaXZlcnMvZG1hL2Zz
bC1xZG1hLmMNCj4gPiBAQCAtMCwwICsxLDExMDEgQEANCj4gDQo+IE5vdCBhIGNyaXRpY2FsIGlz
c3VlLCBidXQgdGhlIGZvcm1hdCBvZiB0aGUgZmlsZSBoZWFkZXIgbG9va3MgYSBsaXR0bGUgYml0
IHdlaXJkIHRvDQo+IG1lLiAgV291bGQgeW91IG1pbmQgY2xlYW4gaXQgdXA/DQo+IA0KDQpHb3Qg
aXQgLCB5b3UgYXJlIHJpZ2h0LCBUaGFua3MuDQoNCj4gPiArLy8gU1BEWC1MaWNlbnNlLUlkZW50
aWZpZXI6IEdQTC0yLjANCj4gPiArLy8gQ29weXJpZ2h0IDIwMTggTlhQDQo+IA0KPiBJIGtub3cg
dGhlIFNQRFggdGFnIGlzIHJlY29tbWVuZGVkIHRvIGJlIHRoZSAxc3QgbGluZSwgYnV0IGNvcHly
aWdodCBub3JtYWxseQ0KPiBnb2VzIGJlbG93IHRoZSBmaWxlIGRlc2NyaXB0aW9uIG5vdCBhYm92
ZS4NCj4gDQo+ID4gKw0KPiANCj4gTm8gbmV3bGluZSBuZWVkZWQuDQo+IA0KPiA+ICsvKg0KPiA+
ICsgKiBEcml2ZXIgZm9yIE5YUCBMYXllcnNjYXBlIFF1ZXVlIERpcmVjdCBNZW1vcnkgQWNjZXNz
IENvbnRyb2xsZXINCj4gPiArICoNCj4gPiArICogQXV0aG9yOg0KPiA+ICsgKiAgV2VuIEhlIDx3
ZW4uaGVfMUBueHAuY29tPg0KPiA+ICsgKiAgSmlhaGVuZyBGYW4gPGppYWhlbmcuZmFuQG54cC5j
b20+DQo+ID4gKyAqDQo+IA0KPiBObyBuZXdsaW5lIG5lZWRlZC4NCj4gDQo+ID4gKyAqLw0KPiA+
ICsNCj4gDQo+IA0KPiBSZWdhcmRzLA0KPiBMZW8NCg0KQWxsIHJpZ2h0LCBidXQgVmlub2QgcmVj
b21tZW5kIHRoaXMga2luZCBvZiB3cml0aW5nLg0KTWF5IGJlIHdlIG5lZWQgdG8gZGlzY3VzcyB3
aXRoIFZpbm9kLg0KDQpCZXN0IFJlZ2FyZHMsDQpXZW4NCg0KPiAtLQ0KPiBUbyB1bnN1YnNjcmli
ZSBmcm9tIHRoaXMgbGlzdDogc2VuZCB0aGUgbGluZSAidW5zdWJzY3JpYmUgZG1hZW5naW5lIiBp
biB0aGUNCj4gYm9keSBvZiBhIG1lc3NhZ2UgdG8gbWFqb3Jkb21vQHZnZXIua2VybmVsLm9yZyBN
b3JlIG1ham9yZG9tbyBpbmZvIGF0DQo+IGh0dHBzOi8vZW1lYTAxLnNhZmVsaW5rcy5wcm90ZWN0
aW9uLm91dGxvb2suY29tLz91cmw9aHR0cCUzQSUyRiUyRnZnZXIuDQo+IGtlcm5lbC5vcmclMkZt
YWpvcmRvbW8taW5mby5odG1sJmRhdGE9MDIlN0MwMSU3Q3dlbi5oZV8xJTQwbnhwLmNvDQo+IG0l
N0NjMWYzMzEyZjRkMTk0Y2I0Y2QzNzA4ZDVjNjVlNTYyZSU3QzY4NmVhMWQzYmMyYjRjNmZhOTJj
ZDk5DQo+IGM1YzMwMTYzNSU3QzAlN0MwJTdDNjM2NjMzMDMwODM2MjY1ODAxJnNkYXRhPWp0ZWhF
JTJGbEh4YWpxelA3Vg0KPiAzZ1JCYkN2RVF2bzNqTUJCcWxXT0Z4cDU1dUElM0QmcmVzZXJ2ZWQ9
MA0K
--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Vinod Koul June 5, 2018, 4:28 p.m. UTC | #9
On 31-05-18, 01:58, Wen He wrote:
> > > > > > > +static void fsl_qdma_issue_pending(struct dma_chan *chan) {
> > > > > > > +	struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
> > > > > > > +	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
> > > > > > > +	unsigned long flags;
> > > > > > > +
> > > > > > > +	spin_lock_irqsave(&fsl_queue->queue_lock, flags);
> > > > > > > +	spin_lock(&fsl_chan->vchan.lock);
> > > > > > > +	if (vchan_issue_pending(&fsl_chan->vchan))
> > > > > > > +		fsl_qdma_enqueue_desc(fsl_chan);
> > > > > > > +	spin_unlock(&fsl_chan->vchan.lock);
> > > > > > > +	spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
> > > > > >
> > > > > > why do we need two locks, and since you are doing vchan why
> > > > > > should you add your own lock on top
> > > > > >
> > > > >
> > > > > Yes, we need two locks.
> > > > > As you know, the QDMA support multiple virtualized blocks for
> > > > > multi-core
> > > > support.
> > > > > so we need to make sure that muliti-core access issues.
> > > >
> > > > but why cant you use vchan lock for all?
> > > >
> > >
> > > We can't only use vchan lock for all. otherwise enqueue action will be
> > interrupted.
> > 
> > I think it is possible to use only vchan lock
> 
> I tried that if I use only vchan lock then qdma will be can't work.
> Do you have a other good idea?

can you explain the scenario...
Vinod Koul June 5, 2018, 4:30 p.m. UTC | #10
On 04-06-18, 09:53, Wen He wrote:

> 
> > > +// SPDX-License-Identifier: GPL-2.0
> > > +// Copyright 2018 NXP
> > 
> > I know the SPDX tag is recommended to be the 1st line, but copyright normally
> > goes below the file description not above.

I do not think that is specified anywhere.. can you point it?

> 
> All right, but Vinod recommend this kind of writing.
> May be we need to discuss with Vinod.
Leo Li June 5, 2018, 5:24 p.m. UTC | #11
On Tue, Jun 5, 2018 at 11:30 AM, Vinod <vkoul@kernel.org> wrote:
> On 04-06-18, 09:53, Wen He wrote:
>
>>
>> > > +// SPDX-License-Identifier: GPL-2.0
>> > > +// Copyright 2018 NXP
>> >
>> > I know the SPDX tag is recommended to be the 1st line, but copyright normally
>> > goes below the file description not above.
>
> I do not think that is specified anywhere.. can you point it?

No.  It is not documented.  But the majority of the source files in
Linux are using this header format like below, so I assume it is
something like a convention.

/*
 * {filename --} file description
 *
 * Copyright ...
 *
 * License ...
 */

The Documentation/process/license-rules.rst only mentioned about the
requirement for SPDX tags.  So I think we should keep the format for
other part of the header.

Regards,
Leo
--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Wen He June 11, 2018, 8:14 a.m. UTC | #12
> -----Original Message-----

> From: dmaengine-owner@vger.kernel.org

> [mailto:dmaengine-owner@vger.kernel.org] On Behalf Of Vinod

> Sent: 2018年6月6日 0:29

> To: Wen He <wen.he_1@nxp.com>

> Cc: dmaengine@vger.kernel.org; robh+dt@kernel.org;

> devicetree@vger.kernel.org; Leo Li <leoyang.li@nxp.com>; Jiafei Pan

> <jiafei.pan@nxp.com>; Jiaheng Fan <jiaheng.fan@nxp.com>

> Subject: Re: [v5 2/6] dmaengine: fsl-qdma: Add qDMA controller driver for

> Layerscape SoCs

> 

> On 31-05-18, 01:58, Wen He wrote:

> > > > > > > > +static void fsl_qdma_issue_pending(struct dma_chan *chan) {

> > > > > > > > +	struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);

> > > > > > > > +	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;

> > > > > > > > +	unsigned long flags;

> > > > > > > > +

> > > > > > > > +	spin_lock_irqsave(&fsl_queue->queue_lock, flags);

> > > > > > > > +	spin_lock(&fsl_chan->vchan.lock);

> > > > > > > > +	if (vchan_issue_pending(&fsl_chan->vchan))

> > > > > > > > +		fsl_qdma_enqueue_desc(fsl_chan);

> > > > > > > > +	spin_unlock(&fsl_chan->vchan.lock);

> > > > > > > > +	spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);

> > > > > > >

> > > > > > > why do we need two locks, and since you are doing vchan why

> > > > > > > should you add your own lock on top

> > > > > > >

> > > > > >

> > > > > > Yes, we need two locks.

> > > > > > As you know, the QDMA support multiple virtualized blocks for

> > > > > > multi-core

> > > > > support.

> > > > > > so we need to make sure that muliti-core access issues.

> > > > >

> > > > > but why cant you use vchan lock for all?

> > > > >

> > > >

> > > > We can't only use vchan lock for all. otherwise enqueue action

> > > > will be

> > > interrupted.

> > >

> > > I think it is possible to use only vchan lock

> >

> > I tried that if I use only vchan lock then qdma will be can't work.

> > Do you have a other good idea?

> 

> can you explain the scenario...

> 

All right. 
When DMA client start transmit, will be call function dma_async_issue_pending(), the dma_async_issue_pending() call the hook pointer
device_issue_pending. The function fsl_qdma_issue_pending() is used to fill the device_issue_pending field.

The function fsl_qdma_issue_pending() call the function fsl_qdma_enqueue_desc().

The function fsl_qdma_enqueue_desc() includes 3 steps.
1. peek at the next descriptor to be processed.
2. if next descriptor exist, then insert to linked list(used to get it when this descriptor transfer complete).
3. if next descriptor exist, then writing to qdma.

In above steps, we will use struct fsl_qdma_chan and struct fsl_qdma_queue, so we need two locks to protected it.

Best Regards,
Wen

> --

> ~Vinod

> --

> To unsubscribe from this list: send the line "unsubscribe dmaengine" in the

> body of a message to majordomo@vger.kernel.org More majordomo info at

> https://emea01.safelinks.protection.outlook.com/?url=http%3A%2F%2Fvger.

> kernel.org%2Fmajordomo-info.html&data=02%7C01%7Cwen.he_1%40nxp.co

> m%7C0f7cc533d6bf4de067d008d5cb01742c%7C686ea1d3bc2b4c6fa92cd99

> c5c301635%7C0%7C0%7C636638129466650321&sdata=zrc0Pf%2Bq0pqixqm

> LC5jhjLvvV5MiSUM68XtanJxgMbQ%3D&reserved=0
Vinod Koul June 12, 2018, 4 a.m. UTC | #13
On 05-06-18, 12:24, Li Yang wrote:
> On Tue, Jun 5, 2018 at 11:30 AM, Vinod <vkoul@kernel.org> wrote:
> > On 04-06-18, 09:53, Wen He wrote:
> >
> >>
> >> > > +// SPDX-License-Identifier: GPL-2.0
> >> > > +// Copyright 2018 NXP
> >> >
> >> > I know the SPDX tag is recommended to be the 1st line, but copyright normally
> >> > goes below the file description not above.
> >
> > I do not think that is specified anywhere.. can you point it?
> 
> No.  It is not documented.  But the majority of the source files in
> Linux are using this header format like below, so I assume it is
> something like a convention.
> 
> /*
>  * {filename --} file description
>  *
>  * Copyright ...
>  *
>  * License ...
>  */
> 
> The Documentation/process/license-rules.rst only mentioned about the
> requirement for SPDX tags.  So I think we should keep the format for
> other part of the header.

And Linus said this https://lkml.org/lkml/2017/11/25/133
Leo Li June 12, 2018, 4:32 p.m. UTC | #14
On Mon, Jun 11, 2018 at 11:00 PM, Vinod <vkoul@kernel.org> wrote:
> On 05-06-18, 12:24, Li Yang wrote:
>> On Tue, Jun 5, 2018 at 11:30 AM, Vinod <vkoul@kernel.org> wrote:
>> > On 04-06-18, 09:53, Wen He wrote:
>> >
>> >>
>> >> > > +// SPDX-License-Identifier: GPL-2.0
>> >> > > +// Copyright 2018 NXP
>> >> >
>> >> > I know the SPDX tag is recommended to be the 1st line, but copyright normally
>> >> > goes below the file description not above.
>> >
>> > I do not think that is specified anywhere.. can you point it?
>>
>> No.  It is not documented.  But the majority of the source files in
>> Linux are using this header format like below, so I assume it is
>> something like a convention.
>>
>> /*
>>  * {filename --} file description
>>  *
>>  * Copyright ...
>>  *
>>  * License ...
>>  */
>>
>> The Documentation/process/license-rules.rst only mentioned about the
>> requirement for SPDX tags.  So I think we should keep the format for
>> other part of the header.
>
> And Linus said this https://lkml.org/lkml/2017/11/25/133

Thanks for the information.  It is new to me.  So probably we can keep
the original template but just change * based comment to // based
comment, like:

// SPDX-License-Identifier: ...
//
// {filename --} file description
//
// Copyright ....

Regards,
Leo
--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Wen He June 14, 2018, 2:15 a.m. UTC | #15
> -----Original Message-----

> From: dmaengine-owner@vger.kernel.org

> [mailto:dmaengine-owner@vger.kernel.org] On Behalf Of Wen He

> Sent: 2018年6月11日 16:15

> To: Vinod <vkoul@kernel.org>

> Cc: dmaengine@vger.kernel.org; robh+dt@kernel.org;

> devicetree@vger.kernel.org; Leo Li <leoyang.li@nxp.com>; Jiafei Pan

> <jiafei.pan@nxp.com>; Jiaheng Fan <jiaheng.fan@nxp.com>

> Subject: RE: [v5 2/6] dmaengine: fsl-qdma: Add qDMA controller driver for

> Layerscape SoCs

> 

> 

> 

> > -----Original Message-----

> > From: dmaengine-owner@vger.kernel.org

> > [mailto:dmaengine-owner@vger.kernel.org] On Behalf Of Vinod

> > Sent: 2018年6月6日 0:29

> > To: Wen He <wen.he_1@nxp.com>

> > Cc: dmaengine@vger.kernel.org; robh+dt@kernel.org;

> > devicetree@vger.kernel.org; Leo Li <leoyang.li@nxp.com>; Jiafei Pan

> > <jiafei.pan@nxp.com>; Jiaheng Fan <jiaheng.fan@nxp.com>

> > Subject: Re: [v5 2/6] dmaengine: fsl-qdma: Add qDMA controller driver

> > for Layerscape SoCs

> >

> > On 31-05-18, 01:58, Wen He wrote:

> > > > > > > > > +static void fsl_qdma_issue_pending(struct dma_chan *chan) {

> > > > > > > > > +	struct fsl_qdma_chan *fsl_chan =

> to_fsl_qdma_chan(chan);

> > > > > > > > > +	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;

> > > > > > > > > +	unsigned long flags;

> > > > > > > > > +

> > > > > > > > > +	spin_lock_irqsave(&fsl_queue->queue_lock, flags);

> > > > > > > > > +	spin_lock(&fsl_chan->vchan.lock);

> > > > > > > > > +	if (vchan_issue_pending(&fsl_chan->vchan))

> > > > > > > > > +		fsl_qdma_enqueue_desc(fsl_chan);

> > > > > > > > > +	spin_unlock(&fsl_chan->vchan.lock);

> > > > > > > > > +	spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);

> > > > > > > >

> > > > > > > > why do we need two locks, and since you are doing vchan

> > > > > > > > why should you add your own lock on top

> > > > > > > >

> > > > > > >

> > > > > > > Yes, we need two locks.

> > > > > > > As you know, the QDMA support multiple virtualized blocks

> > > > > > > for multi-core

> > > > > > support.

> > > > > > > so we need to make sure that muliti-core access issues.

> > > > > >

> > > > > > but why cant you use vchan lock for all?

> > > > > >

> > > > >

> > > > > We can't only use vchan lock for all. otherwise enqueue action

> > > > > will be

> > > > interrupted.

> > > >

> > > > I think it is possible to use only vchan lock

> > >

> > > I tried that if I use only vchan lock then qdma will be can't work.

> > > Do you have a other good idea?

> >

> > can you explain the scenario...

> >

> All right.

> When DMA client start transmit, will be call function

> dma_async_issue_pending(), the dma_async_issue_pending() call the hook

> pointer device_issue_pending. The function fsl_qdma_issue_pending() is used

> to fill the device_issue_pending field.

> 

> The function fsl_qdma_issue_pending() call the function

> fsl_qdma_enqueue_desc().

> 

> The function fsl_qdma_enqueue_desc() includes 3 steps.

> 1. peek at the next descriptor to be processed.

> 2. if next descriptor exist, then insert to linked list(used to get it when this

> descriptor transfer complete).

> 3. if next descriptor exist, then writing to qdma.

> 

> In above steps, we will use struct fsl_qdma_chan and struct fsl_qdma_queue,

> so we need two locks to protected it.

> 

> Best Regards,

> Wen

> 


Hi Vinod,

Do you have any other comments besides we discussed?
Can I submit next version patch?

Looking forward to your reply.

Best Regards,
Wen

> > --

> > ~Vinod

> > --

> > To unsubscribe from this list: send the line "unsubscribe dmaengine"

> > in the body of a message to majordomo@vger.kernel.org More majordomo

> > info at

> https://emea01.safelinks.protection.outlook.com/?url=http%3A%2F%2Fvger.

> >

> kernel.org%2Fmajordomo-info.html&data=02%7C01%7Cwen.he_1%40nxp.co

> >

> m%7C0f7cc533d6bf4de067d008d5cb01742c%7C686ea1d3bc2b4c6fa92cd99

> >

> c5c301635%7C0%7C0%7C636638129466650321&sdata=zrc0Pf%2Bq0pqixqm

> > LC5jhjLvvV5MiSUM68XtanJxgMbQ%3D&reserved=0

> ㈤旃??迆??瑬+-遍荻w疄藳笔鈓鏵i猷妛豝n噐■?侂h櫒璀?Ⅷ瓽珴

> 閔?(殠娸"濟?m?飦赇z罐枈帼f"穐殘坢
Vinod Koul June 14, 2018, 7:26 a.m. UTC | #16
On 14-06-18, 02:15, Wen He wrote:
> 
> Hi Vinod,
> 
> Do you have any other comments besides we discussed?
> Can I submit next version patch?
> 
> Looking forward to your reply.

Nope pls go ahead and submit
diff mbox

Patch

diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6d61cd0..99aff33 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -225,6 +225,19 @@  config FSL_EDMA
 	  multiplexing capability for DMA request sources(slot).
 	  This module can be found on Freescale Vybrid and LS-1 SoCs.
 
+config FSL_QDMA
+       tristate "NXP Layerscape qDMA engine support"
+       depends on ARM || ARM64
+       select DMA_ENGINE
+       select DMA_VIRTUAL_CHANNELS
+       select DMA_ENGINE_RAID
+       select ASYNC_TX_ENABLE_CHANNEL_SWITCH
+       help
+         Support the NXP Layerscape qDMA engine with command queue and legacy mode.
+         Channel virtualization is supported through enqueuing of DMA jobs to,
+         or dequeuing DMA jobs from, different work queues.
+         This module can be found on NXP Layerscape SoCs.
+
 config FSL_RAID
         tristate "Freescale RAID engine Support"
         depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 0f62a4d..93db0fc 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -33,6 +33,7 @@  obj-$(CONFIG_DW_DMAC_CORE) += dw/
 obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
 obj-$(CONFIG_FSL_DMA) += fsldma.o
 obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
+obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o
 obj-$(CONFIG_FSL_RAID) += fsl_raid.o
 obj-$(CONFIG_HSU_DMA) += hsu/
 obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
new file mode 100644
index 0000000..81df812
--- /dev/null
+++ b/drivers/dma/fsl-qdma.c
@@ -0,0 +1,1101 @@ 
+// SPDX-License-Identifier: GPL-2.0
+// Copyright 2018 NXP
+
+/*
+ * Driver for NXP Layerscape Queue Direct Memory Access Controller
+ *
+ * Author:
+ *  Wen He <wen.he_1@nxp.com>
+ *  Jiaheng Fan <jiaheng.fan@nxp.com>
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/of_dma.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/dmaengine.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "virt-dma.h"
+#include "fsldma.h"
+
+/* Register related definition */
+#define FSL_QDMA_DMR			0x0
+#define FSL_QDMA_DSR			0x4
+#define FSL_QDMA_DEIER			0xe00
+#define FSL_QDMA_DEDR			0xe04
+#define FSL_QDMA_DECFDW0R		0xe10
+#define FSL_QDMA_DECFDW1R		0xe14
+#define FSL_QDMA_DECFDW2R		0xe18
+#define FSL_QDMA_DECFDW3R		0xe1c
+#define FSL_QDMA_DECFQIDR		0xe30
+#define FSL_QDMA_DECBR			0xe34
+
+#define FSL_QDMA_BCQMR(x)		(0xc0 + 0x100 * (x))
+#define FSL_QDMA_BCQSR(x)		(0xc4 + 0x100 * (x))
+#define FSL_QDMA_BCQEDPA_SADDR(x)	(0xc8 + 0x100 * (x))
+#define FSL_QDMA_BCQDPA_SADDR(x)	(0xcc + 0x100 * (x))
+#define FSL_QDMA_BCQEEPA_SADDR(x)	(0xd0 + 0x100 * (x))
+#define FSL_QDMA_BCQEPA_SADDR(x)	(0xd4 + 0x100 * (x))
+#define FSL_QDMA_BCQIER(x)		(0xe0 + 0x100 * (x))
+#define FSL_QDMA_BCQIDR(x)		(0xe4 + 0x100 * (x))
+
+#define FSL_QDMA_SQDPAR			0x80c
+#define FSL_QDMA_SQEPAR			0x814
+#define FSL_QDMA_BSQMR			0x800
+#define FSL_QDMA_BSQSR			0x804
+#define FSL_QDMA_BSQICR			0x828
+#define FSL_QDMA_CQMR			0xa00
+#define FSL_QDMA_CQDSCR1		0xa08
+#define FSL_QDMA_CQDSCR2                0xa0c
+#define FSL_QDMA_CQIER			0xa10
+#define FSL_QDMA_CQEDR			0xa14
+#define FSL_QDMA_SQCCMR			0xa20
+
+/* Registers for bit and genmask */
+#define FSL_QDMA_CQIDR_SQT		BIT(15)
+#define QDMA_CCDF_FOTMAT		BIT(29)
+#define QDMA_CCDF_SER			BIT(30)
+#define QDMA_SG_FIN			BIT(30)
+#define QDMA_SG_EXT			BIT(31)
+#define QDMA_SG_LEN_MASK		GENMASK(29, 0)
+#define QDMA_CCDF_MASK			GENMASK(28, 20)
+
+#define FSL_QDMA_BCQIER_CQTIE		BIT(15)
+#define FSL_QDMA_BCQIER_CQPEIE		BIT(23)
+#define FSL_QDMA_BSQICR_ICEN		BIT(31)
+
+#define FSL_QDMA_BSQICR_ICST(x)		((x) << 16)
+#define FSL_QDMA_CQIER_MEIE		BIT(31)
+#define FSL_QDMA_CQIER_TEIE		BIT(0)
+#define FSL_QDMA_SQCCMR_ENTER_WM	BIT(21)
+
+#define FSL_QDMA_BCQMR_EN		BIT(31)
+#define FSL_QDMA_BCQMR_EI		BIT(30)
+#define FSL_QDMA_BCQMR_CD_THLD(x)	((x) << 20)
+#define FSL_QDMA_BCQMR_CQ_SIZE(x)	((x) << 16)
+
+#define FSL_QDMA_BCQSR_QF		BIT(16)
+#define FSL_QDMA_BCQSR_XOFF		BIT(0)
+
+#define FSL_QDMA_BSQMR_EN		BIT(31)
+#define FSL_QDMA_BSQMR_DI		BIT(30)
+#define FSL_QDMA_BSQMR_CQ_SIZE(x)	((x) << 16)
+
+#define FSL_QDMA_BSQSR_QE		BIT(17)
+
+#define FSL_QDMA_DMR_DQD		BIT(30)
+#define FSL_QDMA_DSR_DB		BIT(31)
+
+/* Size related definition */
+#define FSL_QDMA_QUEUE_MAX		8
+#define FSL_QDMA_BASE_BUFFER_SIZE	96
+#define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN	64
+#define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX	16384
+#define FSL_QDMA_QUEUE_NUM_MAX		8
+
+/* Field definition for CMD */
+#define FSL_QDMA_CMD_RWTTYPE		0x4
+#define FSL_QDMA_CMD_LWC                0x2
+#define FSL_QDMA_CMD_RWTTYPE_OFFSET	28
+#define FSL_QDMA_CMD_NS_OFFSET		27
+#define FSL_QDMA_CMD_DQOS_OFFSET	24
+#define FSL_QDMA_CMD_WTHROTL_OFFSET	20
+#define FSL_QDMA_CMD_DSEN_OFFSET	19
+#define FSL_QDMA_CMD_LWC_OFFSET		16
+
+#define FSL_QDMA_E_SG_TABLE		1
+#define FSL_QDMA_E_DATA_BUFFER		0
+#define FSL_QDMA_F_LAST_ENTRY		1
+
+/* Field definition for Descriptor offset */
+#define QDMA_CCDF_STATUS		20
+#define QDMA_CCDF_OFFSET		20
+
+/**
+ * struct fsl_qdma_format - This is the struct holding describing compound
+ *			    descriptor format with qDMA.
+ * @status:		    This field which describes command status and
+ *			    enqueue status notification.
+ * @cfg:		    This field which describes frame offset and frame
+ *			    format.
+ * @addr_lo:		    This field which indicating the start of the buffer
+ *			    holding the compound descriptor of the lower 32-bits
+ *			    address in memory 40-bit address.
+ * @addr_hi:		    This field's the same as above field, but point high
+ *			    8-bits in memory 40-bit address.
+ * @__reserved1:	    Reserved field.
+ * @cfg8b_w1:		    This field which describes compound descriptor
+ *			    command queue origin produced by qDMA and dynamic
+ *			    debug field.
+ * @data		    Pointer to the memory 40-bit address, describes DMA
+ *			    source informaion and DMA destination information.
+ */
+struct fsl_qdma_format {
+	__le32 status;
+	__le32 cfg;
+	union {
+		struct {
+			__le32 addr_lo;
+			u8 addr_hi;
+			u8 __reserved1[2];
+			u8 cfg8b_w1;
+		} __packed;
+		__le64 data;
+	};
+} __packed;
+
+/* qDMA status notification pre information */
+struct fsl_pre_status {
+	u64 queue;
+	u64 addr;
+};
+
+struct fsl_qdma_chan {
+	struct virt_dma_chan		vchan;
+	struct virt_dma_desc		vdesc;
+	enum dma_status			status;
+	u32				slave_id;
+	struct fsl_qdma_engine		*qdma;
+	struct fsl_qdma_queue		*queue;
+	struct list_head		qcomp;
+};
+
+struct fsl_qdma_queue {
+	struct fsl_qdma_format	*virt_head;
+	struct fsl_qdma_format	*virt_tail;
+	struct list_head	comp_used;
+	struct list_head	comp_free;
+	struct dma_pool		*comp_pool;
+	spinlock_t		queue_lock;
+	dma_addr_t		bus_addr;
+	u32                     n_cq;
+	u32			id;
+	struct fsl_qdma_format	*cq;
+};
+
+struct fsl_qdma_comp {
+	dma_addr_t              bus_addr;
+	struct fsl_qdma_format	*virt_addr;
+	struct fsl_qdma_chan	*qchan;
+	struct virt_dma_desc    vdesc;
+	struct list_head	list;
+};
+
+struct fsl_qdma_engine {
+	struct dma_device	dma_dev;
+	void __iomem		*ctrl_base;
+	void __iomem            *status_base;
+	void __iomem		*block_base;
+	u32			n_chans;
+	u32			n_queues;
+	struct mutex            fsl_qdma_mutex;
+	int			error_irq;
+	int			queue_irq;
+	bool			feature;
+	struct fsl_qdma_queue	*queue;
+	struct fsl_qdma_queue	*status;
+	struct fsl_qdma_chan	chans[];
+
+};
+
+static inline u64
+qdma_ccdf_addr_get64(const struct fsl_qdma_format *ccdf)
+{
+	return le64_to_cpu(ccdf->data) & (U64_MAX >> 24);
+}
+
+static inline void
+qdma_desc_addr_set64(struct fsl_qdma_format *ccdf, u64 addr)
+{
+	ccdf->addr_hi = upper_32_bits(addr);
+	ccdf->addr_lo = cpu_to_le32(lower_32_bits(addr));
+}
+
+static inline u64
+qdma_ccdf_get_queue(const struct fsl_qdma_format *ccdf)
+{
+	return ccdf->cfg8b_w1 & U8_MAX;
+}
+
+static inline int
+qdma_ccdf_get_offset(const struct fsl_qdma_format *ccdf)
+{
+	return (le32_to_cpu(ccdf->cfg) & QDMA_CCDF_MASK) >> QDMA_CCDF_OFFSET;
+}
+
+static inline void
+qdma_ccdf_set_format(struct fsl_qdma_format *ccdf, int offset)
+{
+	ccdf->cfg = cpu_to_le32(QDMA_CCDF_FOTMAT | offset);
+}
+
+static inline int
+qdma_ccdf_get_status(const struct fsl_qdma_format *ccdf)
+{
+	return (le32_to_cpu(ccdf->status) & QDMA_CCDF_MASK) >> QDMA_CCDF_STATUS;
+}
+
+static inline void
+qdma_ccdf_set_ser(struct fsl_qdma_format *ccdf, int status)
+{
+	ccdf->status = cpu_to_le32(QDMA_CCDF_SER | status);
+}
+
+static inline void qdma_csgf_set_len(struct fsl_qdma_format *csgf, int len)
+{
+	csgf->cfg = cpu_to_le32(len & QDMA_SG_LEN_MASK);
+}
+
+static inline void qdma_csgf_set_f(struct fsl_qdma_format *csgf, int len)
+{
+	csgf->cfg = cpu_to_le32(QDMA_SG_FIN | (len & QDMA_SG_LEN_MASK));
+}
+
+static inline void qdma_csgf_set_e(struct fsl_qdma_format *csgf, int len)
+{
+	csgf->cfg = cpu_to_le32(QDMA_SG_EXT | (len & QDMA_SG_LEN_MASK));
+}
+
+static u32 qdma_readl(struct fsl_qdma_engine *qdma, void __iomem *addr)
+{
+	return FSL_DMA_IN(qdma, addr, 32);
+}
+
+static void qdma_writel(struct fsl_qdma_engine *qdma, u32 val,
+						void __iomem *addr)
+{
+	FSL_DMA_OUT(qdma, addr, val, 32);
+}
+
+static struct fsl_qdma_chan *to_fsl_qdma_chan(struct dma_chan *chan)
+{
+	return container_of(chan, struct fsl_qdma_chan, vchan.chan);
+}
+
+static struct fsl_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
+{
+	return container_of(vd, struct fsl_qdma_comp, vdesc);
+}
+
+static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
+{
+	struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
+	unsigned long flags;
+	LIST_HEAD(head);
+
+	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+	vchan_get_all_descriptors(&fsl_chan->vchan, &head);
+	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+
+	vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
+}
+
+static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
+					dma_addr_t dst, dma_addr_t src, u32 len)
+{
+	struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest;
+	struct fsl_qdma_format *sdf, *ddf;
+
+	ccdf = fsl_comp->virt_addr;
+	csgf_desc = fsl_comp->virt_addr + 1;
+	csgf_src = fsl_comp->virt_addr + 2;
+	csgf_dest = fsl_comp->virt_addr + 3;
+	sdf = fsl_comp->virt_addr + 4;
+	ddf = fsl_comp->virt_addr + 5;
+
+	memset(fsl_comp->virt_addr, 0, FSL_QDMA_BASE_BUFFER_SIZE);
+	/* Head Command Descriptor(Frame Descriptor) */
+	qdma_desc_addr_set64(ccdf, fsl_comp->bus_addr + 16);
+	qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(ccdf));
+	qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(ccdf));
+
+	/* Status notification is enqueued to status queue. */
+	/* Compound Command Descriptor(Frame List Table) */
+	qdma_desc_addr_set64(csgf_desc, fsl_comp->bus_addr + 64);
+	/* It must be 32 as Compound S/G Descriptor */
+	qdma_csgf_set_len(csgf_desc, 32);
+	qdma_desc_addr_set64(csgf_src, src);
+	qdma_csgf_set_len(csgf_src, len);
+	qdma_desc_addr_set64(csgf_dest, dst);
+	qdma_csgf_set_len(csgf_dest, len);
+	/* This entry is the last entry. */
+	qdma_csgf_set_f(csgf_dest, len);
+	/* Descriptor Buffer */
+	sdf->data = cpu_to_le64(
+			FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET);
+	ddf->data = cpu_to_le64(
+			FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET);
+	ddf->data |= cpu_to_le64(
+			FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET);
+}
+
+/*
+ * Pre-request full command descriptor for enqueue.
+ */
+static int fsl_qdma_pre_request_enqueue_desc(struct fsl_qdma_queue *queue)
+{
+	struct fsl_qdma_comp *comp_temp, *_comp_temp;
+	int i;
+
+	for (i = 0; i < queue->n_cq; i++) {
+		comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
+		if (!comp_temp)
+			goto err;
+
+		comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool,
+						      GFP_KERNEL,
+						      &comp_temp->bus_addr);
+		if (!comp_temp->virt_addr)
+			goto err;
+
+		list_add_tail(&comp_temp->list, &queue->comp_free);
+	}
+	return 0;
+
+err:
+	if (i == 0 && comp_temp) {
+		kfree(comp_temp);
+		return -ENOMEM;
+	}
+
+	while (--i >= 1) {
+		list_for_each_entry_safe(comp_temp, _comp_temp,
+				&queue->comp_free, list) {
+			dma_pool_free(queue->comp_pool,
+					comp_temp->virt_addr,
+					comp_temp->bus_addr);
+			list_del(&comp_temp->list);
+			kfree(comp_temp);
+		}
+	}
+	return -ENOMEM;
+}
+
+/*
+ * Request a command descriptor for enqueue.
+ */
+static struct fsl_qdma_comp *fsl_qdma_request_enqueue_desc(
+					struct fsl_qdma_chan *fsl_chan,
+					unsigned int dst_nents,
+					unsigned int src_nents)
+{
+	struct fsl_qdma_comp *comp_temp;
+	struct fsl_qdma_queue *queue = fsl_chan->queue;
+	unsigned long flags;
+
+	spin_lock_irqsave(&queue->queue_lock, flags);
+	if (list_empty(&queue->comp_free)) {
+		spin_unlock_irqrestore(&queue->queue_lock, flags);
+		comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
+		if (!comp_temp)
+			return NULL;
+
+		comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool,
+						      GFP_KERNEL,
+						      &comp_temp->bus_addr);
+		if (!comp_temp->virt_addr) {
+			kfree(comp_temp);
+			return NULL;
+		}
+
+	} else {
+		comp_temp = list_first_entry(&queue->comp_free,
+					     struct fsl_qdma_comp,
+					     list);
+		list_del(&comp_temp->list);
+		spin_unlock_irqrestore(&queue->queue_lock, flags);
+	}
+
+	comp_temp->qchan = fsl_chan;
+
+	return comp_temp;
+}
+
+static struct fsl_qdma_queue *fsl_qdma_alloc_queue_resources(
+					struct platform_device *pdev,
+					unsigned int queue_num)
+{
+	struct fsl_qdma_queue *queue_head, *queue_temp;
+	int ret, len, i;
+	unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
+
+	if (queue_num > FSL_QDMA_QUEUE_MAX)
+		queue_num = FSL_QDMA_QUEUE_MAX;
+	len = sizeof(*queue_head) * queue_num;
+	queue_head = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+	if (!queue_head)
+		return NULL;
+
+	ret = device_property_read_u32_array(&pdev->dev, "queue-sizes",
+					queue_size, queue_num);
+	if (ret) {
+		dev_err(&pdev->dev, "Can't get queue-sizes.\n");
+		return NULL;
+	}
+
+	for (i = 0; i < queue_num; i++) {
+		if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX
+			|| queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
+			dev_err(&pdev->dev, "Get wrong queue-sizes.\n");
+			return NULL;
+		}
+		queue_temp = queue_head + i;
+		queue_temp->cq = dma_alloc_coherent(&pdev->dev,
+						sizeof(struct fsl_qdma_format) *
+						queue_size[i],
+						&queue_temp->bus_addr,
+						GFP_KERNEL);
+		if (!queue_temp->cq) {
+			devm_kfree(&pdev->dev, queue_head);
+			return NULL;
+		}
+		queue_temp->n_cq = queue_size[i];
+		queue_temp->id = i;
+		queue_temp->virt_head = queue_temp->cq;
+		queue_temp->virt_tail = queue_temp->cq;
+
+		/*
+		 * Create a comp dma pool that size
+		 * is 'FSL_QDMA_BASE_BUFFER_SIZE'.
+		 * The dma pool for queue command buffer.
+		 */
+		queue_temp->comp_pool = dma_pool_create("comp_pool",
+						&pdev->dev,
+						FSL_QDMA_BASE_BUFFER_SIZE,
+						16, 0);
+		if (!queue_temp->comp_pool)
+			goto err;
+
+		/*
+		 * List for queue command buffer
+		 */
+		INIT_LIST_HEAD(&queue_temp->comp_used);
+		INIT_LIST_HEAD(&queue_temp->comp_free);
+		spin_lock_init(&queue_temp->queue_lock);
+	}
+
+	return queue_head;
+
+err:
+	if (i == 0 && queue_temp->comp_pool)
+		dma_pool_destroy(queue_temp->comp_pool);
+	while (--i >= 1) {
+		queue_temp = queue_head + i;
+		if (i == 1 && unlikely(queue_temp->comp_pool))
+			dma_pool_destroy(queue_temp->comp_pool);
+	}
+
+	dev_err(&pdev->dev,
+		"unable to allocate channel %d descriptor pool\n",
+		queue_temp->id);
+
+	while (--i >= 0) {
+		queue_temp = queue_head + i;
+		dma_free_coherent(&pdev->dev,
+				sizeof(struct fsl_qdma_format) *
+				queue_size[i],
+				queue_temp->cq,
+				queue_temp->bus_addr);
+	}
+	devm_kfree(&pdev->dev, queue_head);
+	return NULL;
+}
+
+static struct fsl_qdma_queue *fsl_qdma_prep_status_queue(
+						struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct fsl_qdma_queue *status_head;
+	unsigned int status_size;
+	int ret;
+
+	ret = of_property_read_u32(np, "status-sizes", &status_size);
+	if (ret) {
+		dev_err(&pdev->dev, "Can't get status-sizes.\n");
+		return NULL;
+	}
+	if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX
+			|| status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
+		dev_err(&pdev->dev, "Get wrong status_size.\n");
+		return NULL;
+	}
+	status_head = devm_kzalloc(&pdev->dev, sizeof(*status_head),
+								GFP_KERNEL);
+	if (!status_head)
+		return NULL;
+
+	/*
+	 * Buffer for queue command
+	 */
+	status_head->cq = dma_alloc_coherent(&pdev->dev,
+						sizeof(struct fsl_qdma_format) *
+						status_size,
+						&status_head->bus_addr,
+						GFP_KERNEL);
+	if (!status_head->cq) {
+		devm_kfree(&pdev->dev, status_head);
+		return NULL;
+	}
+
+	status_head->n_cq = status_size;
+	status_head->virt_head = status_head->cq;
+	status_head->virt_tail = status_head->cq;
+	status_head->comp_pool = NULL;
+
+	return status_head;
+}
+
+static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
+{
+	void __iomem *ctrl = fsl_qdma->ctrl_base;
+	void __iomem *block = fsl_qdma->block_base;
+	int i, count = 5;
+	u32 reg;
+
+	/* Disable the command queue and wait for idle state. */
+	reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
+	reg |= FSL_QDMA_DMR_DQD;
+	qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
+	for (i = 0; i < FSL_QDMA_QUEUE_NUM_MAX; i++)
+		qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQMR(i));
+
+	while (1) {
+		reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DSR);
+		if (!(reg & FSL_QDMA_DSR_DB))
+			break;
+		if (count-- < 0)
+			return -EBUSY;
+		udelay(100);
+	}
+
+	/* Disable status queue. */
+	qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BSQMR);
+
+	/*
+	 * Clear the command queue interrupt detect register for all queues.
+	 */
+	qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
+
+	return 0;
+}
+
+static int fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma)
+{
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
+	struct fsl_qdma_queue *fsl_status = fsl_qdma->status;
+	struct fsl_qdma_queue *temp_queue;
+	struct fsl_qdma_comp *fsl_comp;
+	struct fsl_qdma_format *status_addr;
+	struct fsl_qdma_format *csgf_src;
+	struct fsl_pre_status pre;
+	void __iomem *block = fsl_qdma->block_base;
+	u32 reg, i;
+	bool duplicate, duplicate_handle;
+
+	memset(&pre, 0, sizeof(struct fsl_pre_status));
+
+	while (1) {
+		duplicate = 0;
+		duplicate_handle = 0;
+		reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR);
+		if (reg & FSL_QDMA_BSQSR_QE)
+			return 0;
+		status_addr = fsl_status->virt_head;
+		if (qdma_ccdf_get_queue(status_addr) == pre.queue &&
+			qdma_ccdf_addr_get64(status_addr) == pre.addr)
+			duplicate = 1;
+		i = qdma_ccdf_get_queue(status_addr);
+		pre.queue = qdma_ccdf_get_queue(status_addr);
+		pre.addr = qdma_ccdf_addr_get64(status_addr);
+		temp_queue = fsl_queue + i;
+		spin_lock(&temp_queue->queue_lock);
+		if (list_empty(&temp_queue->comp_used)) {
+			if (duplicate)
+				duplicate_handle = 1;
+			else {
+				spin_unlock(&temp_queue->queue_lock);
+				return -EAGAIN;
+			}
+		} else {
+			fsl_comp = list_first_entry(&temp_queue->comp_used,
+							struct fsl_qdma_comp,
+							list);
+			csgf_src = fsl_comp->virt_addr + 2;
+			if (fsl_comp->bus_addr + 16 != pre.addr) {
+				if (duplicate)
+					duplicate_handle = 1;
+				else {
+					spin_unlock(&temp_queue->queue_lock);
+					return -EAGAIN;
+				}
+			}
+		}
+
+			if (duplicate_handle) {
+				reg = qdma_readl(fsl_qdma, block +
+						FSL_QDMA_BSQMR);
+			reg |= FSL_QDMA_BSQMR_DI;
+			qdma_desc_addr_set64(status_addr, 0x0);
+			fsl_status->virt_head++;
+			if (fsl_status->virt_head == fsl_status->cq
+						   + fsl_status->n_cq)
+				fsl_status->virt_head = fsl_status->cq;
+			qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
+			spin_unlock(&temp_queue->queue_lock);
+			continue;
+		}
+		list_del(&fsl_comp->list);
+
+		reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
+		reg |= FSL_QDMA_BSQMR_DI;
+		qdma_desc_addr_set64(status_addr, 0x0);
+		fsl_status->virt_head++;
+		if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)
+			fsl_status->virt_head = fsl_status->cq;
+		qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
+		spin_unlock(&temp_queue->queue_lock);
+
+		spin_lock(&fsl_comp->qchan->vchan.lock);
+		vchan_cookie_complete(&fsl_comp->vdesc);
+		fsl_comp->qchan->status = DMA_COMPLETE;
+		spin_unlock(&fsl_comp->qchan->vchan.lock);
+	}
+
+	return 0;
+}
+
+static irqreturn_t fsl_qdma_error_handler(int irq, void *dev_id)
+{
+	struct fsl_qdma_engine *fsl_qdma = dev_id;
+	unsigned int intr;
+	void __iomem *status = fsl_qdma->status_base;
+
+	intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR);
+
+	if (intr)
+		dev_err(fsl_qdma->dma_dev.dev, "DMA transaction error!\n");
+
+	qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fsl_qdma_queue_handler(int irq, void *dev_id)
+{
+	struct fsl_qdma_engine *fsl_qdma = dev_id;
+	unsigned int intr, reg;
+	void __iomem *block = fsl_qdma->block_base;
+	void __iomem *ctrl = fsl_qdma->ctrl_base;
+
+	intr = qdma_readl(fsl_qdma, block + FSL_QDMA_BCQIDR(0));
+
+	if ((intr & FSL_QDMA_CQIDR_SQT) != 0)
+		intr = fsl_qdma_queue_transfer_complete(fsl_qdma);
+
+	if (intr != 0) {
+		reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
+		reg |= FSL_QDMA_DMR_DQD;
+		qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
+		qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQIER(0));
+		dev_err(fsl_qdma->dma_dev.dev, "QDMA: status err!\n");
+	}
+
+	qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
+
+	return IRQ_HANDLED;
+}
+
+static int
+fsl_qdma_irq_init(struct platform_device *pdev,
+		  struct fsl_qdma_engine *fsl_qdma)
+{
+	int ret;
+
+	fsl_qdma->error_irq = platform_get_irq_byname(pdev,
+							"qdma-error");
+	if (fsl_qdma->error_irq < 0) {
+		dev_err(&pdev->dev, "Can't get qdma controller irq.\n");
+		return fsl_qdma->error_irq;
+	}
+
+	fsl_qdma->queue_irq = platform_get_irq_byname(pdev, "qdma-queue");
+	if (fsl_qdma->queue_irq < 0) {
+		dev_err(&pdev->dev, "Can't get qdma queue irq.\n");
+		return fsl_qdma->queue_irq;
+	}
+
+	ret = devm_request_irq(&pdev->dev, fsl_qdma->error_irq,
+			fsl_qdma_error_handler, 0, "qDMA error", fsl_qdma);
+	if (ret) {
+		dev_err(&pdev->dev, "Can't register qDMA controller IRQ.\n");
+		return  ret;
+	}
+	ret = devm_request_irq(&pdev->dev, fsl_qdma->queue_irq,
+			fsl_qdma_queue_handler, 0, "qDMA queue", fsl_qdma);
+	if (ret) {
+		dev_err(&pdev->dev, "Can't register qDMA queue IRQ.\n");
+		return  ret;
+	}
+
+	return 0;
+}
+
+static void fsl_qdma_irq_exit(
+		struct platform_device *pdev, struct fsl_qdma_engine *fsl_qdma)
+{
+	if (fsl_qdma->queue_irq == fsl_qdma->error_irq) {
+		devm_free_irq(&pdev->dev, fsl_qdma->queue_irq, fsl_qdma);
+	} else {
+		devm_free_irq(&pdev->dev, fsl_qdma->queue_irq, fsl_qdma);
+		devm_free_irq(&pdev->dev, fsl_qdma->error_irq, fsl_qdma);
+	}
+}
+
+static int fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
+{
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
+	struct fsl_qdma_queue *temp;
+	void __iomem *ctrl = fsl_qdma->ctrl_base;
+	void __iomem *status = fsl_qdma->status_base;
+	void __iomem *block = fsl_qdma->block_base;
+	int i, ret;
+	u32 reg;
+
+	/* Try to halt the qDMA engine first. */
+	ret = fsl_qdma_halt(fsl_qdma);
+	if (ret) {
+		dev_err(fsl_qdma->dma_dev.dev, "DMA halt failed!");
+		return ret;
+	}
+
+	/*
+	 * Clear the command queue interrupt detect register for all queues.
+	 */
+	qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
+
+	for (i = 0; i < fsl_qdma->n_queues; i++) {
+		temp = fsl_queue + i;
+		/*
+		 * Initialize Command Queue registers to point to the first
+		 * command descriptor in memory.
+		 * Dequeue Pointer Address Registers
+		 * Enqueue Pointer Address Registers
+		 */
+		qdma_writel(fsl_qdma, temp->bus_addr,
+				block + FSL_QDMA_BCQDPA_SADDR(i));
+		qdma_writel(fsl_qdma, temp->bus_addr,
+				block + FSL_QDMA_BCQEPA_SADDR(i));
+
+		/* Initialize the queue mode. */
+		reg = FSL_QDMA_BCQMR_EN;
+		reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq)-4);
+		reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq)-6);
+		qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BCQMR(i));
+	}
+
+	/*
+	 * Workaround for erratum: ERR010812.
+	 * We must enable XOFF to avoid the enqueue rejection occurs.
+	 * Setting SQCCMR ENTER_WM to 0x20.
+	 */
+	qdma_writel(fsl_qdma, FSL_QDMA_SQCCMR_ENTER_WM,
+			      block + FSL_QDMA_SQCCMR);
+	/*
+	 * Initialize status queue registers to point to the first
+	 * command descriptor in memory.
+	 * Dequeue Pointer Address Registers
+	 * Enqueue Pointer Address Registers
+	 */
+	qdma_writel(fsl_qdma, fsl_qdma->status->bus_addr,
+					block + FSL_QDMA_SQEPAR);
+	qdma_writel(fsl_qdma, fsl_qdma->status->bus_addr,
+					block + FSL_QDMA_SQDPAR);
+	/* Initialize status queue interrupt. */
+	qdma_writel(fsl_qdma, FSL_QDMA_BCQIER_CQTIE,
+			      block + FSL_QDMA_BCQIER(0));
+	qdma_writel(fsl_qdma, FSL_QDMA_BSQICR_ICEN | FSL_QDMA_BSQICR_ICST(5)
+						   | 0x8000,
+			      block + FSL_QDMA_BSQICR);
+	qdma_writel(fsl_qdma, FSL_QDMA_CQIER_MEIE | FSL_QDMA_CQIER_TEIE,
+			      block + FSL_QDMA_CQIER);
+	/* Initialize controller interrupt register. */
+	qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR);
+	qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEIER);
+
+	/* Initialize the status queue mode. */
+	reg = FSL_QDMA_BSQMR_EN;
+	reg |= FSL_QDMA_BSQMR_CQ_SIZE(ilog2(fsl_qdma->status->n_cq)-6);
+	qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
+
+	reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
+	reg &= ~FSL_QDMA_DMR_DQD;
+	qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
+
+	return 0;
+}
+
+static struct dma_async_tx_descriptor *
+fsl_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
+		dma_addr_t src, size_t len, unsigned long flags)
+{
+	struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
+	struct fsl_qdma_comp *fsl_comp;
+
+	fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan, 0, 0);
+	fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len);
+
+	return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags);
+}
+
+static void fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
+{
+	void __iomem *block = fsl_chan->qdma->block_base;
+	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+	struct fsl_qdma_comp *fsl_comp;
+	struct virt_dma_desc *vdesc;
+	u32 reg;
+
+	reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQSR(fsl_queue->id));
+	if (reg & (FSL_QDMA_BCQSR_QF | FSL_QDMA_BCQSR_XOFF))
+		return;
+	vdesc = vchan_next_desc(&fsl_chan->vchan);
+	if (!vdesc)
+		return;
+	list_del(&vdesc->node);
+	fsl_comp = to_fsl_qdma_comp(vdesc);
+
+	memcpy(fsl_queue->virt_head++, fsl_comp->virt_addr,
+					sizeof(struct fsl_qdma_format));
+	if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq)
+		fsl_queue->virt_head = fsl_queue->cq;
+
+	list_add_tail(&fsl_comp->list, &fsl_queue->comp_used);
+	barrier();
+	reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQMR(fsl_queue->id));
+	reg |= FSL_QDMA_BCQMR_EI;
+	qdma_writel(fsl_chan->qdma, reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
+	fsl_chan->status = DMA_IN_PROGRESS;
+}
+
+static enum dma_status fsl_qdma_tx_status(struct dma_chan *chan,
+		dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+	enum dma_status ret;
+
+	ret = dma_cookie_status(chan, cookie, txstate);
+	if (ret == DMA_COMPLETE || !txstate)
+		return ret;
+
+	return ret;
+}
+
+static void fsl_qdma_free_desc(struct virt_dma_desc *vdesc)
+{
+	struct fsl_qdma_comp *fsl_comp;
+	struct fsl_qdma_queue *fsl_queue;
+	unsigned long flags;
+
+	fsl_comp = to_fsl_qdma_comp(vdesc);
+	fsl_queue = fsl_comp->qchan->queue;
+
+	spin_lock_irqsave(&fsl_queue->queue_lock, flags);
+	list_add_tail(&fsl_comp->list, &fsl_queue->comp_free);
+	spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
+}
+
+static void fsl_qdma_issue_pending(struct dma_chan *chan)
+{
+	struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
+	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+	unsigned long flags;
+
+	spin_lock_irqsave(&fsl_queue->queue_lock, flags);
+	spin_lock(&fsl_chan->vchan.lock);
+	if (vchan_issue_pending(&fsl_chan->vchan))
+		fsl_qdma_enqueue_desc(fsl_chan);
+	spin_unlock(&fsl_chan->vchan.lock);
+	spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
+}
+
+static int fsl_qdma_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct fsl_qdma_engine *fsl_qdma;
+	struct fsl_qdma_chan *fsl_chan;
+	struct resource *res;
+	unsigned int len, chans, queues;
+	int ret, i;
+
+	ret = of_property_read_u32(np, "dma-channels", &chans);
+	if (ret) {
+		dev_err(&pdev->dev, "Can't get dma-channels.\n");
+		return ret;
+	}
+
+	len = sizeof(*fsl_qdma) + sizeof(*fsl_chan) * chans;
+	fsl_qdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+	if (!fsl_qdma)
+		return -ENOMEM;
+
+	ret = of_property_read_u32(np, "fsl,queues", &queues);
+	if (ret) {
+		dev_err(&pdev->dev, "Can't get queues.\n");
+		return ret;
+	}
+
+	fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, queues);
+	if (!fsl_qdma->queue)
+		return -ENOMEM;
+
+	fsl_qdma->status = fsl_qdma_prep_status_queue(pdev);
+	if (!fsl_qdma->status)
+		return -ENOMEM;
+
+	fsl_qdma->n_chans = chans;
+	fsl_qdma->n_queues = queues;
+	mutex_init(&fsl_qdma->fsl_qdma_mutex);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	fsl_qdma->ctrl_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(fsl_qdma->ctrl_base))
+		return PTR_ERR(fsl_qdma->ctrl_base);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	fsl_qdma->status_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(fsl_qdma->status_base))
+		return PTR_ERR(fsl_qdma->status_base);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+	fsl_qdma->block_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(fsl_qdma->block_base))
+		return PTR_ERR(fsl_qdma->block_base);
+
+	ret = fsl_qdma_irq_init(pdev, fsl_qdma);
+	if (ret)
+		return ret;
+
+	fsl_qdma->feature = of_property_read_bool(np, "big-endian");
+	INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels);
+	for (i = 0; i < fsl_qdma->n_chans; i++) {
+		struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
+
+		fsl_chan->qdma = fsl_qdma;
+		fsl_chan->queue = fsl_qdma->queue + i % fsl_qdma->n_queues;
+		fsl_chan->vchan.desc_free = fsl_qdma_free_desc;
+		INIT_LIST_HEAD(&fsl_chan->qcomp);
+		vchan_init(&fsl_chan->vchan, &fsl_qdma->dma_dev);
+	}
+	for (i = 0; i < fsl_qdma->n_queues; i++)
+		fsl_qdma_pre_request_enqueue_desc(fsl_qdma->queue + i);
+
+	dma_cap_set(DMA_MEMCPY, fsl_qdma->dma_dev.cap_mask);
+
+	fsl_qdma->dma_dev.dev = &pdev->dev;
+	fsl_qdma->dma_dev.device_free_chan_resources
+		= fsl_qdma_free_chan_resources;
+	fsl_qdma->dma_dev.device_tx_status = fsl_qdma_tx_status;
+	fsl_qdma->dma_dev.device_prep_dma_memcpy = fsl_qdma_prep_memcpy;
+	fsl_qdma->dma_dev.device_issue_pending = fsl_qdma_issue_pending;
+
+	dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
+
+	platform_set_drvdata(pdev, fsl_qdma);
+
+	ret = dma_async_device_register(&fsl_qdma->dma_dev);
+	if (ret) {
+		dev_err(&pdev->dev, "Can't register NXP Layerscape qDMA engine.\n");
+		return ret;
+	}
+
+	ret = fsl_qdma_reg_init(fsl_qdma);
+	if (ret) {
+		dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static void fsl_qdma_cleanup_vchan(struct dma_device *dmadev)
+{
+	struct fsl_qdma_chan *chan, *_chan;
+
+	list_for_each_entry_safe(chan, _chan,
+				&dmadev->channels, vchan.chan.device_node) {
+		list_del(&chan->vchan.chan.device_node);
+		tasklet_kill(&chan->vchan.task);
+	}
+}
+
+static int fsl_qdma_remove(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);
+	struct fsl_qdma_queue *queue_temp;
+	struct fsl_qdma_queue *status = fsl_qdma->status;
+	struct fsl_qdma_comp *comp_temp, *_comp_temp;
+	int i;
+
+	fsl_qdma_irq_exit(pdev, fsl_qdma);
+	fsl_qdma_cleanup_vchan(&fsl_qdma->dma_dev);
+	of_dma_controller_free(np);
+	dma_async_device_unregister(&fsl_qdma->dma_dev);
+
+	/* Free descriptor areas */
+	for (i = 0; i < fsl_qdma->n_queues; i++) {
+		queue_temp = fsl_qdma->queue + i;
+		list_for_each_entry_safe(comp_temp, _comp_temp,
+					&queue_temp->comp_used,	list) {
+			dma_pool_free(queue_temp->comp_pool,
+					comp_temp->virt_addr,
+					comp_temp->bus_addr);
+			list_del(&comp_temp->list);
+			kfree(comp_temp);
+		}
+		list_for_each_entry_safe(comp_temp, _comp_temp,
+					&queue_temp->comp_free, list) {
+			dma_pool_free(queue_temp->comp_pool,
+					comp_temp->virt_addr,
+					comp_temp->bus_addr);
+			list_del(&comp_temp->list);
+			kfree(comp_temp);
+		}
+		dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_format) *
+					queue_temp->n_cq, queue_temp->cq,
+					queue_temp->bus_addr);
+		dma_pool_destroy(queue_temp->comp_pool);
+	}
+
+	dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_format) *
+				status->n_cq, status->cq, status->bus_addr);
+	return 0;
+}
+
+static const struct of_device_id fsl_qdma_dt_ids[] = {
+	{ .compatible = "fsl,ls1021a-qdma", },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, fsl_qdma_dt_ids);
+
+static struct platform_driver fsl_qdma_driver = {
+	.driver		= {
+		.name	= "fsl-qdma",
+		.of_match_table = fsl_qdma_dt_ids,
+	},
+	.probe          = fsl_qdma_probe,
+	.remove		= fsl_qdma_remove,
+};
+
+module_platform_driver(fsl_qdma_driver);
+
+MODULE_ALIAS("platform:fsl-qdma");
+MODULE_DESCRIPTION("NXP Layerscape qDMA engine driver");
+MODULE_LICENSE("GPL v2");