diff mbox

[2/2] dma: xilinx: dpdma: Add the Xilinx DisplayPort DMA engine driver

Message ID 1515204848-3493-2-git-send-email-hyun.kwon@xilinx.com (mailing list archive)
State Changes Requested
Headers show

Commit Message

Hyun Kwon Jan. 6, 2018, 2:14 a.m. UTC
The ZynqMP includes the DisplayPort subsystem with own DMA engine
called DPDMA. The DPDMA IP comes with 6 individual channels
(4 for display, 2 for audio). This driver implements DMAENGINE API
which can be used for audio (ALSA) and display (DRM) drivers.

Signed-off-by: Hyun Kwon <hyun.kwon@xilinx.com>
Signed-off-by: Tejas Upadhyay <tejasu@xilinx.com>
Signed-off-by: Michal Simek <michal.simek@xilinx.com>
---
 MAINTAINERS                       |    7 +
 drivers/dma/Kconfig               |   19 +
 drivers/dma/xilinx/Makefile       |    1 +
 drivers/dma/xilinx/xilinx_dpdma.c | 2309 +++++++++++++++++++++++++++++++++++++
 4 files changed, 2336 insertions(+)
 create mode 100644 drivers/dma/xilinx/xilinx_dpdma.c

Comments

Vinod Koul Jan. 12, 2018, 2:13 p.m. UTC | #1
On Fri, Jan 05, 2018 at 06:14:08PM -0800, Hyun Kwon wrote:
> The ZynqMP includes the DisplayPort subsystem with own DMA engine
> called DPDMA. The DPDMA IP comes with 6 individual channels
> (4 for display, 2 for audio). This driver implements DMAENGINE API
> which can be used for audio (ALSA) and display (DRM) drivers.

The subsystem name is dmaengine.

> Signed-off-by: Hyun Kwon <hyun.kwon@xilinx.com>
> Signed-off-by: Tejas Upadhyay <tejasu@xilinx.com>
> Signed-off-by: Michal Simek <michal.simek@xilinx.com>
> ---
>  MAINTAINERS                       |    7 +
>  drivers/dma/Kconfig               |   19 +
>  drivers/dma/xilinx/Makefile       |    1 +
>  drivers/dma/xilinx/xilinx_dpdma.c | 2309 +++++++++++++++++++++++++++++++++++++

That is a very long file for review! Pls split it up to reasonable logical
chunks

> +config XILINX_DPDMA_DEBUG_FS
> +        bool "Xilinx DPDMA debugfs"
> +        depends on DEBUG_FS && XILINX_DPDMA
> +        help
> +          Enable the debugfs code for DPDMA driver. The debugfs code
> +          enables debugging or testing related features. It exposes some
> +          low level controls to the user space to help testing automation,
> +          as well as can enable additional diagnostic or statistical
> +          information.

why should this be a compile option

> + * Xilinx ZynqMP DPDMA Engine driver
> + *
> + *  Copyright (C) 2015 - 2018 Xilinx, Inc.
> + *
> + *  Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
> + *
> + * SPDX-License-Identifier: GPL-2.0

this should be first line in file with c99 style,

// SPDX-License-Identifier: GPL-2.0

> + */
> +
> +#include <linux/bitops.h>
> +#include <linux/clk.h>
> +#include <linux/debugfs.h>
> +#include <linux/delay.h>
> +#include <linux/device.h>
> +#include <linux/dmaengine.h>
> +#include <linux/dmapool.h>
> +#include <linux/gfp.h>
> +#include <linux/interrupt.h>
> +#include <linux/irqreturn.h>
> +#include <linux/module.h>
> +#include <linux/of.h>
> +#include <linux/of_dma.h>
> +#include <linux/platform_device.h>
> +#include <linux/sched.h>
> +#include <linux/slab.h>
> +#include <linux/spinlock.h>
> +#include <linux/types.h>
> +#include <linux/uaccess.h>
> +#include <linux/wait.h>

do you need all these headers?

> +
> +#include "../dmaengine.h"
> +
> +/* DPDMA registers */
> +#define XILINX_DPDMA_ERR_CTRL				0x0
> +#define XILINX_DPDMA_ISR				0x4
> +#define XILINX_DPDMA_IMR				0x8
> +#define XILINX_DPDMA_IEN				0xc
> +#define XILINX_DPDMA_IDS				0x10
> +#define XILINX_DPDMA_INTR_DESC_DONE_MASK		(0x3f << 0)

GENMASK() for this and others

> +#define XILINX_DPDMA_INTR_DESC_DONE_SHIFT		0

you can define a common shift using ffs

> + * struct xilinx_dpdma_hw_desc - DPDMA hardware descriptor
> + * @control: control configuration field
> + * @desc_id: descriptor ID
> + * @xfer_size: transfer size
> + * @hsize_stride: horizontal size and stride
> + * @timestamp_lsb: LSB of time stamp
> + * @timestamp_msb: MSB of time stamp
> + * @addr_ext: upper 16 bit of 48 bit address (next_desc and src_addr)
> + * @next_desc: next descriptor 32 bit address
> + * @src_addr: payload source address (lower 32 bit of 1st 4KB page)
> + * @addr_ext_23: upper 16 bit of 48 bit address (src_addr2 and src_addr3)
> + * @addr_ext_45: upper 16 bit of 48 bit address (src_addr4 and src_addr5)
> + * @src_addr2: payload source address (lower 32 bit of 2nd 4KB page)
> + * @src_addr3: payload source address (lower 32 bit of 3rd 4KB page)
> + * @src_addr4: payload source address (lower 32 bit of 4th 4KB page)
> + * @src_addr5: payload source address (lower 32 bit of 5th 4KB page)

what does it mean (lower 32 bit of Nth 4KB page)

> +struct xilinx_dpdma_sw_desc {
> +	struct xilinx_dpdma_hw_desc hw;
> +	struct list_head node;
> +	dma_addr_t phys;

dma_addr_t is not a physical addr

> +struct xilinx_dpdma_device {
> +	struct dma_device common;
> +	void __iomem *reg;
> +	struct device *dev;
> +
> +	struct clk *axi_clk;
> +	struct xilinx_dpdma_chan *chan[XILINX_DPDMA_NUM_CHAN];
> +
> +	bool ext_addr;
> +	void (*desc_addr)(struct xilinx_dpdma_sw_desc *sw_desc,
> +			  struct xilinx_dpdma_sw_desc *prev,
> +			  dma_addr_t dma_addr[], unsigned int num_src_addr);
> +};
> +
> +#ifdef CONFIG_XILINX_DPDMA_DEBUG_FS

this can be CONFIG_DEBUGFS, also am skipping this part to focus on driver.
Pls split this to a separate patch

> +static int xilinx_dpdma_config(struct dma_chan *dchan,
> +			       struct dma_slave_config *config)
> +{
> +	if (config->direction != DMA_MEM_TO_DEV)
> +		return -EINVAL;

?? Why are the config values not used, how else do you configure the
channel?

> +
> +	return 0;
> +}
> +
> +static int xilinx_dpdma_pause(struct dma_chan *dchan)
> +{
> +	xilinx_dpdma_chan_pause(to_xilinx_chan(dchan));
> +
> +	return 0;
> +}
> +
> +static int xilinx_dpdma_resume(struct dma_chan *dchan)
> +{
> +	xilinx_dpdma_chan_unpause(to_xilinx_chan(dchan));
> +
> +	return 0;
> +}
> +
> +static int xilinx_dpdma_terminate_all(struct dma_chan *dchan)
> +{
> +	return xilinx_dpdma_chan_terminate_all(to_xilinx_chan(dchan));
> +}
> +
> +static void xilinx_dpdma_synchronize(struct dma_chan *dchan)
> +{
> +	xilinx_dpdma_chan_synchronize(to_xilinx_chan(dchan));
> +}

why have these wrappers which do not do anything?

> +
> +	chan->reg = xdev->reg + XILINX_DPDMA_CH_BASE + XILINX_DPDMA_CH_OFFSET *
> +		    chan->id;
> +	chan->status = IDLE;
> +
> +	spin_lock_init(&chan->lock);
> +	INIT_LIST_HEAD(&chan->done_list);
> +	init_waitqueue_head(&chan->wait_to_stop);
> +
> +	tasklet_init(&chan->done_task, xilinx_dpdma_chan_done_task,
> +		     (unsigned long)chan);
> +	tasklet_init(&chan->err_task, xilinx_dpdma_chan_err_task,
> +		     (unsigned long)chan);

Have you checked the vchan code, i dont see that used. It will help you in
descriptor management and reduce code from driver

> +static int xilinx_dpdma_remove(struct platform_device *pdev)
> +{
> +	struct xilinx_dpdma_device *xdev;
> +	unsigned int i;
> +
> +	xdev = platform_get_drvdata(pdev);
> +
> +	xilinx_dpdma_disable_intr(xdev);
> +	of_dma_controller_free(pdev->dev.of_node);
> +	dma_async_device_unregister(&xdev->common);
> +	clk_disable_unprepare(xdev->axi_clk);
> +
> +	for (i = 0; i < XILINX_DPDMA_NUM_CHAN; i++)
> +		if (xdev->chan[i])
> +			xilinx_dpdma_chan_remove(xdev->chan[i]);

At this point your irq is still enabled and can fire, and can schedule
tasklet.. Are you sure you are okay with that?

> +MODULE_AUTHOR("Xilinx, Inc.");
> +MODULE_DESCRIPTION("Xilinx DPDMA driver");
> +MODULE_LICENSE("GPL v2");

No MODULE_ALIAS()?

I haven't reviewed in details though as it is too large patch. Pls use vchan
and split things up for better review.
Hyun Kwon Jan. 23, 2018, 5:03 p.m. UTC | #2
SGkgVmlub2QsDQoNClRoYW5rcyBmb3IgdGhlIHJldmlldy4NCg0KPiAtLS0tLU9yaWdpbmFsIE1l
c3NhZ2UtLS0tLQ0KPiBGcm9tOiBkbWFlbmdpbmUtb3duZXJAdmdlci5rZXJuZWwub3JnIFttYWls
dG86ZG1hZW5naW5lLQ0KPiBvd25lckB2Z2VyLmtlcm5lbC5vcmddIE9uIEJlaGFsZiBPZiBWaW5v
ZCBLb3VsDQo+IFNlbnQ6IEZyaWRheSwgSmFudWFyeSAxMiwgMjAxOCA2OjE0IEFNDQo+IFRvOiBI
eXVuIEt3b24gPGh5dW5rQHhpbGlueC5jb20+DQo+IENjOiBkbWFlbmdpbmVAdmdlci5rZXJuZWwu
b3JnOyBkZXZpY2V0cmVlQHZnZXIua2VybmVsLm9yZzsgTWljaGFsIFNpbWVrDQo+IDxtaWNoYWwu
c2ltZWtAeGlsaW54LmNvbT47IFRlamFzIFVwYWRoeWF5IDxURUpBU1VAeGlsaW54LmNvbT4NCj4g
U3ViamVjdDogUmU6IFtQQVRDSCAyLzJdIGRtYTogeGlsaW54OiBkcGRtYTogQWRkIHRoZSBYaWxp
bnggRGlzcGxheVBvcnQgRE1BDQo+IGVuZ2luZSBkcml2ZXINCj4gDQo+IE9uIEZyaSwgSmFuIDA1
LCAyMDE4IGF0IDA2OjE0OjA4UE0gLTA4MDAsIEh5dW4gS3dvbiB3cm90ZToNCj4gPiBUaGUgWnlu
cU1QIGluY2x1ZGVzIHRoZSBEaXNwbGF5UG9ydCBzdWJzeXN0ZW0gd2l0aCBvd24gRE1BIGVuZ2lu
ZQ0KPiA+IGNhbGxlZCBEUERNQS4gVGhlIERQRE1BIElQIGNvbWVzIHdpdGggNiBpbmRpdmlkdWFs
IGNoYW5uZWxzDQo+ID4gKDQgZm9yIGRpc3BsYXksIDIgZm9yIGF1ZGlvKS4gVGhpcyBkcml2ZXIg
aW1wbGVtZW50cyBETUFFTkdJTkUgQVBJDQo+ID4gd2hpY2ggY2FuIGJlIHVzZWQgZm9yIGF1ZGlv
IChBTFNBKSBhbmQgZGlzcGxheSAoRFJNKSBkcml2ZXJzLg0KPiANCj4gVGhlIHN1YnN5c3RlbSBu
YW1lIGlzIGRtYWVuZ2luZS4NCj4gDQo+ID4gU2lnbmVkLW9mZi1ieTogSHl1biBLd29uIDxoeXVu
Lmt3b25AeGlsaW54LmNvbT4NCj4gPiBTaWduZWQtb2ZmLWJ5OiBUZWphcyBVcGFkaHlheSA8dGVq
YXN1QHhpbGlueC5jb20+DQo+ID4gU2lnbmVkLW9mZi1ieTogTWljaGFsIFNpbWVrIDxtaWNoYWwu
c2ltZWtAeGlsaW54LmNvbT4NCj4gPiAtLS0NCj4gPiAgTUFJTlRBSU5FUlMgICAgICAgICAgICAg
ICAgICAgICAgIHwgICAgNyArDQo+ID4gIGRyaXZlcnMvZG1hL0tjb25maWcgICAgICAgICAgICAg
ICB8ICAgMTkgKw0KPiA+ICBkcml2ZXJzL2RtYS94aWxpbngvTWFrZWZpbGUgICAgICAgfCAgICAx
ICsNCj4gPiAgZHJpdmVycy9kbWEveGlsaW54L3hpbGlueF9kcGRtYS5jIHwgMjMwOQ0KPiArKysr
KysrKysrKysrKysrKysrKysrKysrKysrKysrKysrKysrDQo+IA0KPiBUaGF0IGlzIGEgdmVyeSBs
b25nIGZpbGUgZm9yIHJldmlldyEgUGxzIHNwbGl0IGl0IHVwIHRvIHJlYXNvbmFibGUgbG9naWNh
bA0KPiBjaHVua3MNCg0KU3VyZS4gV2lsbCBzcGxpdC4NCg0KPiANCj4gPiArY29uZmlnIFhJTElO
WF9EUERNQV9ERUJVR19GUw0KPiA+ICsgICAgICAgIGJvb2wgIlhpbGlueCBEUERNQSBkZWJ1Z2Zz
Ig0KPiA+ICsgICAgICAgIGRlcGVuZHMgb24gREVCVUdfRlMgJiYgWElMSU5YX0RQRE1BDQo+ID4g
KyAgICAgICAgaGVscA0KPiA+ICsgICAgICAgICAgRW5hYmxlIHRoZSBkZWJ1Z2ZzIGNvZGUgZm9y
IERQRE1BIGRyaXZlci4gVGhlIGRlYnVnZnMgY29kZQ0KPiA+ICsgICAgICAgICAgZW5hYmxlcyBk
ZWJ1Z2dpbmcgb3IgdGVzdGluZyByZWxhdGVkIGZlYXR1cmVzLiBJdCBleHBvc2VzIHNvbWUNCj4g
PiArICAgICAgICAgIGxvdyBsZXZlbCBjb250cm9scyB0byB0aGUgdXNlciBzcGFjZSB0byBoZWxw
IHRlc3RpbmcgYXV0b21hdGlvbiwNCj4gPiArICAgICAgICAgIGFzIHdlbGwgYXMgY2FuIGVuYWJs
ZSBhZGRpdGlvbmFsIGRpYWdub3N0aWMgb3Igc3RhdGlzdGljYWwNCj4gPiArICAgICAgICAgIGlu
Zm9ybWF0aW9uLg0KPiANCj4gd2h5IHNob3VsZCB0aGlzIGJlIGEgY29tcGlsZSBvcHRpb24NCj4g
DQoNClRoaXMgZGVidWdmcyBjb2RlIGlzIGZvciB0ZXN0aW5nIC8gcmVncmVzc2lvbiwgYW5kIHdl
IGRvbid0IHdhbnQgdG8gZW5hYmxlIGl0DQpmb3IgcmVndWxhciB1c2Vycy4NCg0KPiA+ICsgKiBY
aWxpbnggWnlucU1QIERQRE1BIEVuZ2luZSBkcml2ZXINCj4gPiArICoNCj4gPiArICogIENvcHly
aWdodCAoQykgMjAxNSAtIDIwMTggWGlsaW54LCBJbmMuDQo+ID4gKyAqDQo+ID4gKyAqICBBdXRo
b3I6IEh5dW4gV29vIEt3b24gPGh5dW4ua3dvbkB4aWxpbnguY29tPg0KPiA+ICsgKg0KPiA+ICsg
KiBTUERYLUxpY2Vuc2UtSWRlbnRpZmllcjogR1BMLTIuMA0KPiANCj4gdGhpcyBzaG91bGQgYmUg
Zmlyc3QgbGluZSBpbiBmaWxlIHdpdGggYzk5IHN0eWxlLA0KPiANCj4gLy8gU1BEWC1MaWNlbnNl
LUlkZW50aWZpZXI6IEdQTC0yLjANCj4gDQoNCkFncmVlZC4gV2lsbCBjaGFuZ2UuDQoNCj4gPiAr
ICovDQo+ID4gKw0KPiA+ICsjaW5jbHVkZSA8bGludXgvYml0b3BzLmg+DQo+ID4gKyNpbmNsdWRl
IDxsaW51eC9jbGsuaD4NCj4gPiArI2luY2x1ZGUgPGxpbnV4L2RlYnVnZnMuaD4NCj4gPiArI2lu
Y2x1ZGUgPGxpbnV4L2RlbGF5Lmg+DQo+ID4gKyNpbmNsdWRlIDxsaW51eC9kZXZpY2UuaD4NCj4g
PiArI2luY2x1ZGUgPGxpbnV4L2RtYWVuZ2luZS5oPg0KPiA+ICsjaW5jbHVkZSA8bGludXgvZG1h
cG9vbC5oPg0KPiA+ICsjaW5jbHVkZSA8bGludXgvZ2ZwLmg+DQo+ID4gKyNpbmNsdWRlIDxsaW51
eC9pbnRlcnJ1cHQuaD4NCj4gPiArI2luY2x1ZGUgPGxpbnV4L2lycXJldHVybi5oPg0KPiA+ICsj
aW5jbHVkZSA8bGludXgvbW9kdWxlLmg+DQo+ID4gKyNpbmNsdWRlIDxsaW51eC9vZi5oPg0KPiA+
ICsjaW5jbHVkZSA8bGludXgvb2ZfZG1hLmg+DQo+ID4gKyNpbmNsdWRlIDxsaW51eC9wbGF0Zm9y
bV9kZXZpY2UuaD4NCj4gPiArI2luY2x1ZGUgPGxpbnV4L3NjaGVkLmg+DQo+ID4gKyNpbmNsdWRl
IDxsaW51eC9zbGFiLmg+DQo+ID4gKyNpbmNsdWRlIDxsaW51eC9zcGlubG9jay5oPg0KPiA+ICsj
aW5jbHVkZSA8bGludXgvdHlwZXMuaD4NCj4gPiArI2luY2x1ZGUgPGxpbnV4L3VhY2Nlc3MuaD4N
Cj4gPiArI2luY2x1ZGUgPGxpbnV4L3dhaXQuaD4NCj4gDQo+IGRvIHlvdSBuZWVkIGFsbCB0aGVz
ZSBoZWFkZXJzPw0KPiANCg0KSSBkaXJlY3RseSBpbmNsdWRlZCBhbGwgaGVhZGVycyB0aGF0IGFy
ZSB1c2VkIGluIHRoaXMgZHJpdmVyLiBTb21lIG9mIHRoZW0gY2FuDQpiZSByZW1vdmVkIGZyb20g
aW5kaXJlY3QgaW5jbHVzaW9ucywgYW5kIEknbSBmaW5lIHdpdGggdGhhdC4gUGxlYXNlIGxldCBt
ZSBrbm93DQppZiB0aGF0J3MgeW91ciBwcmVmZXJlbmNlLg0KDQo+ID4gKw0KPiA+ICsjaW5jbHVk
ZSAiLi4vZG1hZW5naW5lLmgiDQo+ID4gKw0KPiA+ICsvKiBEUERNQSByZWdpc3RlcnMgKi8NCj4g
PiArI2RlZmluZSBYSUxJTlhfRFBETUFfRVJSX0NUUkwJCQkJMHgwDQo+ID4gKyNkZWZpbmUgWElM
SU5YX0RQRE1BX0lTUgkJCQkweDQNCj4gPiArI2RlZmluZSBYSUxJTlhfRFBETUFfSU1SCQkJCTB4
OA0KPiA+ICsjZGVmaW5lIFhJTElOWF9EUERNQV9JRU4JCQkJMHhjDQo+ID4gKyNkZWZpbmUgWElM
SU5YX0RQRE1BX0lEUwkJCQkweDEwDQo+ID4gKyNkZWZpbmUgWElMSU5YX0RQRE1BX0lOVFJfREVT
Q19ET05FX01BU0sJCSgweDNmIDw8IDApDQo+IA0KPiBHRU5NQVNLKCkgZm9yIHRoaXMgYW5kIG90
aGVycw0KPiANCg0KQWdyZWVkLiBXaWxsIGZpeC4NCg0KPiA+ICsjZGVmaW5lIFhJTElOWF9EUERN
QV9JTlRSX0RFU0NfRE9ORV9TSElGVAkJMA0KPiANCj4geW91IGNhbiBkZWZpbmUgYSBjb21tb24g
c2hpZnQgdXNpbmcgZmZzDQo+IA0KDQpJIGd1ZXNzIHlvdSBtZWFuIHRvIHJlcGxhY2UsICh2YWx1
ZSAmIE1BU0spIDw8IFNISUZULCB3aXRoDQoodmFsdWUgJiBNQVNLKSA8PCBmZnMoTUFTSykuIEkn
bGwgY2hhbmdlIHRvIHRoYXQgd2F5LiBMZXQgbWUga25vdyBvdGhlcndpc2UuDQoNCj4gPiArICog
c3RydWN0IHhpbGlueF9kcGRtYV9od19kZXNjIC0gRFBETUEgaGFyZHdhcmUgZGVzY3JpcHRvcg0K
PiA+ICsgKiBAY29udHJvbDogY29udHJvbCBjb25maWd1cmF0aW9uIGZpZWxkDQo+ID4gKyAqIEBk
ZXNjX2lkOiBkZXNjcmlwdG9yIElEDQo+ID4gKyAqIEB4ZmVyX3NpemU6IHRyYW5zZmVyIHNpemUN
Cj4gPiArICogQGhzaXplX3N0cmlkZTogaG9yaXpvbnRhbCBzaXplIGFuZCBzdHJpZGUNCj4gPiAr
ICogQHRpbWVzdGFtcF9sc2I6IExTQiBvZiB0aW1lIHN0YW1wDQo+ID4gKyAqIEB0aW1lc3RhbXBf
bXNiOiBNU0Igb2YgdGltZSBzdGFtcA0KPiA+ICsgKiBAYWRkcl9leHQ6IHVwcGVyIDE2IGJpdCBv
ZiA0OCBiaXQgYWRkcmVzcyAobmV4dF9kZXNjIGFuZCBzcmNfYWRkcikNCj4gPiArICogQG5leHRf
ZGVzYzogbmV4dCBkZXNjcmlwdG9yIDMyIGJpdCBhZGRyZXNzDQo+ID4gKyAqIEBzcmNfYWRkcjog
cGF5bG9hZCBzb3VyY2UgYWRkcmVzcyAobG93ZXIgMzIgYml0IG9mIDFzdCA0S0IgcGFnZSkNCj4g
PiArICogQGFkZHJfZXh0XzIzOiB1cHBlciAxNiBiaXQgb2YgNDggYml0IGFkZHJlc3MgKHNyY19h
ZGRyMiBhbmQgc3JjX2FkZHIzKQ0KPiA+ICsgKiBAYWRkcl9leHRfNDU6IHVwcGVyIDE2IGJpdCBv
ZiA0OCBiaXQgYWRkcmVzcyAoc3JjX2FkZHI0IGFuZCBzcmNfYWRkcjUpDQo+ID4gKyAqIEBzcmNf
YWRkcjI6IHBheWxvYWQgc291cmNlIGFkZHJlc3MgKGxvd2VyIDMyIGJpdCBvZiAybmQgNEtCIHBh
Z2UpDQo+ID4gKyAqIEBzcmNfYWRkcjM6IHBheWxvYWQgc291cmNlIGFkZHJlc3MgKGxvd2VyIDMy
IGJpdCBvZiAzcmQgNEtCIHBhZ2UpDQo+ID4gKyAqIEBzcmNfYWRkcjQ6IHBheWxvYWQgc291cmNl
IGFkZHJlc3MgKGxvd2VyIDMyIGJpdCBvZiA0dGggNEtCIHBhZ2UpDQo+ID4gKyAqIEBzcmNfYWRk
cjU6IHBheWxvYWQgc291cmNlIGFkZHJlc3MgKGxvd2VyIDMyIGJpdCBvZiA1dGggNEtCIHBhZ2Up
DQo+IA0KPiB3aGF0IGRvZXMgaXQgbWVhbiAobG93ZXIgMzIgYml0IG9mIE50aCA0S0IgcGFnZSkN
Cj4gDQoNCkVhY2ggZGVzY3JpcHRvciBjYW4gcG9pbnQgdXAgdG8gNSAtIDQ4Yml0IGFkZHJlc3Mg
cGF5bG9hZHMuIHNyY19hZGRyKiBmaWVsZHMNCmNvbnRhaW4gbG93ZXIgMzJiaXQgb2YgNDhiaXQg
YWRkcmVzcy4gUmVtYWluaW5nIHVwcGVyIDE2Yml0IGlzIHByb2dyYW1tZWQgaW4NCmFkZHJfZXh0
KiBmaWVsZHMuDQoNCj4gPiArc3RydWN0IHhpbGlueF9kcGRtYV9zd19kZXNjIHsNCj4gPiArCXN0
cnVjdCB4aWxpbnhfZHBkbWFfaHdfZGVzYyBodzsNCj4gPiArCXN0cnVjdCBsaXN0X2hlYWQgbm9k
ZTsNCj4gPiArCWRtYV9hZGRyX3QgcGh5czsNCj4gDQo+IGRtYV9hZGRyX3QgaXMgbm90IGEgcGh5
c2ljYWwgYWRkcg0KPiANCg0KQWdyZWVkLiBXaWxsIHJlbmFtZS4NCg0KPiA+ICtzdHJ1Y3QgeGls
aW54X2RwZG1hX2RldmljZSB7DQo+ID4gKwlzdHJ1Y3QgZG1hX2RldmljZSBjb21tb247DQo+ID4g
Kwl2b2lkIF9faW9tZW0gKnJlZzsNCj4gPiArCXN0cnVjdCBkZXZpY2UgKmRldjsNCj4gPiArDQo+
ID4gKwlzdHJ1Y3QgY2xrICpheGlfY2xrOw0KPiA+ICsJc3RydWN0IHhpbGlueF9kcGRtYV9jaGFu
ICpjaGFuW1hJTElOWF9EUERNQV9OVU1fQ0hBTl07DQo+ID4gKw0KPiA+ICsJYm9vbCBleHRfYWRk
cjsNCj4gPiArCXZvaWQgKCpkZXNjX2FkZHIpKHN0cnVjdCB4aWxpbnhfZHBkbWFfc3dfZGVzYyAq
c3dfZGVzYywNCj4gPiArCQkJICBzdHJ1Y3QgeGlsaW54X2RwZG1hX3N3X2Rlc2MgKnByZXYsDQo+
ID4gKwkJCSAgZG1hX2FkZHJfdCBkbWFfYWRkcltdLCB1bnNpZ25lZCBpbnQgbnVtX3NyY19hZGRy
KTsNCj4gPiArfTsNCj4gPiArDQo+ID4gKyNpZmRlZiBDT05GSUdfWElMSU5YX0RQRE1BX0RFQlVH
X0ZTDQo+IA0KPiB0aGlzIGNhbiBiZSBDT05GSUdfREVCVUdGUywgYWxzbyBhbSBza2lwcGluZyB0
aGlzIHBhcnQgdG8gZm9jdXMgb24gZHJpdmVyLg0KPiBQbHMgc3BsaXQgdGhpcyB0byBhIHNlcGFy
YXRlIHBhdGNoDQo+IA0KDQpUaGUgaW50ZW50aW9uIHdhcyB0byBlbmFibGUgZGVidWdmcyBjb2Rl
IG9mIHRoaXMgbW9kdWxlIGluZGl2aWR1YWxseS4gSSdsbA0Kc3BsaXQgaW50byBhIHNlcGFyYXRl
IHBhdGNoLCBzbyBpdCBjYW4gYmUgcmV2aWV3ZWQgYmV0dGVyLg0KDQo+ID4gK3N0YXRpYyBpbnQg
eGlsaW54X2RwZG1hX2NvbmZpZyhzdHJ1Y3QgZG1hX2NoYW4gKmRjaGFuLA0KPiA+ICsJCQkgICAg
ICAgc3RydWN0IGRtYV9zbGF2ZV9jb25maWcgKmNvbmZpZykNCj4gPiArew0KPiA+ICsJaWYgKGNv
bmZpZy0+ZGlyZWN0aW9uICE9IERNQV9NRU1fVE9fREVWKQ0KPiA+ICsJCXJldHVybiAtRUlOVkFM
Ow0KPiANCj4gPz8gV2h5IGFyZSB0aGUgY29uZmlnIHZhbHVlcyBub3QgdXNlZCwgaG93IGVsc2Ug
ZG8geW91IGNvbmZpZ3VyZSB0aGUNCj4gY2hhbm5lbD8NCj4gDQoNClRoZXJlJ3Mgbm90IG11Y2gg
Y29uZmlndXJhdGlvbiB0byBiZSBkb25lLiBDaGFubmVscyBhcmUgcHJldHR5IG11Y2ggaWRlbnRp
Y2FsDQp3aXRoIGZpeGVkIGNvbmZpZ3VyYXRpb25zLg0KDQo+ID4gKw0KPiA+ICsJcmV0dXJuIDA7
DQo+ID4gK30NCj4gPiArDQo+ID4gK3N0YXRpYyBpbnQgeGlsaW54X2RwZG1hX3BhdXNlKHN0cnVj
dCBkbWFfY2hhbiAqZGNoYW4pDQo+ID4gK3sNCj4gPiArCXhpbGlueF9kcGRtYV9jaGFuX3BhdXNl
KHRvX3hpbGlueF9jaGFuKGRjaGFuKSk7DQo+ID4gKw0KPiA+ICsJcmV0dXJuIDA7DQo+ID4gK30N
Cj4gPiArDQo+ID4gK3N0YXRpYyBpbnQgeGlsaW54X2RwZG1hX3Jlc3VtZShzdHJ1Y3QgZG1hX2No
YW4gKmRjaGFuKQ0KPiA+ICt7DQo+ID4gKwl4aWxpbnhfZHBkbWFfY2hhbl91bnBhdXNlKHRvX3hp
bGlueF9jaGFuKGRjaGFuKSk7DQo+ID4gKw0KPiA+ICsJcmV0dXJuIDA7DQo+ID4gK30NCj4gPiAr
DQo+ID4gK3N0YXRpYyBpbnQgeGlsaW54X2RwZG1hX3Rlcm1pbmF0ZV9hbGwoc3RydWN0IGRtYV9j
aGFuICpkY2hhbikNCj4gPiArew0KPiA+ICsJcmV0dXJuIHhpbGlueF9kcGRtYV9jaGFuX3Rlcm1p
bmF0ZV9hbGwodG9feGlsaW54X2NoYW4oZGNoYW4pKTsNCj4gPiArfQ0KPiA+ICsNCj4gPiArc3Rh
dGljIHZvaWQgeGlsaW54X2RwZG1hX3N5bmNocm9uaXplKHN0cnVjdCBkbWFfY2hhbiAqZGNoYW4p
DQo+ID4gK3sNCj4gPiArCXhpbGlueF9kcGRtYV9jaGFuX3N5bmNocm9uaXplKHRvX3hpbGlueF9j
aGFuKGRjaGFuKSk7DQo+ID4gK30NCj4gDQo+IHdoeSBoYXZlIHRoZXNlIHdyYXBwZXJzIHdoaWNo
IGRvIG5vdCBkbyBhbnl0aGluZz8NCj4gDQoNCkl0J3MganVzdCBteSBwZXJzb25hbCBwcmVmZXJl
bmNlIHRvIHNwbGl0IGludG8gZGlmZmVyZW50IGNvZGUgcGFydGl0aW9ucywgYW5kDQplYWNoIHNl
Y3Rpb24gaXMgcGFydGl0aW9uZWQgLyBncm91cGVkIHdpdGggc29tZSBjb21tZW50IGxpbmUuIDot
KQ0KRXgsIGEgcGFydGl0aW9uIGZvciBzdHJ1Y3QgZG1hX2NoYW4sIGFuZCBhbm90aGVyIG9uZSBm
b3INCnN0cnVjdCB4aWxpbnhfZHBkbWFfY2hhbi4gSXQgZ2l2ZXMgbWUgc29ydCBvZiBhYnN0cmFj
dGVkIHZpZXcuIEJ1dCBpdCBtYXkgYmUNCmp1c3QgbWUsIGFuZCBpdCBjb21lcyB3aXRoIGV4dHJh
IGluZGlyZWN0aW9ucy4gSSdsbCByZW1vdmUgdW5uZWNlc3Nhcnkgd3JhcHBlcnMuDQoNCj4gPiAr
DQo+ID4gKwljaGFuLT5yZWcgPSB4ZGV2LT5yZWcgKyBYSUxJTlhfRFBETUFfQ0hfQkFTRSArDQo+
IFhJTElOWF9EUERNQV9DSF9PRkZTRVQgKg0KPiA+ICsJCSAgICBjaGFuLT5pZDsNCj4gPiArCWNo
YW4tPnN0YXR1cyA9IElETEU7DQo+ID4gKw0KPiA+ICsJc3Bpbl9sb2NrX2luaXQoJmNoYW4tPmxv
Y2spOw0KPiA+ICsJSU5JVF9MSVNUX0hFQUQoJmNoYW4tPmRvbmVfbGlzdCk7DQo+ID4gKwlpbml0
X3dhaXRxdWV1ZV9oZWFkKCZjaGFuLT53YWl0X3RvX3N0b3ApOw0KPiA+ICsNCj4gPiArCXRhc2ts
ZXRfaW5pdCgmY2hhbi0+ZG9uZV90YXNrLCB4aWxpbnhfZHBkbWFfY2hhbl9kb25lX3Rhc2ssDQo+
ID4gKwkJICAgICAodW5zaWduZWQgbG9uZyljaGFuKTsNCj4gPiArCXRhc2tsZXRfaW5pdCgmY2hh
bi0+ZXJyX3Rhc2ssIHhpbGlueF9kcGRtYV9jaGFuX2Vycl90YXNrLA0KPiA+ICsJCSAgICAgKHVu
c2lnbmVkIGxvbmcpY2hhbik7DQo+IA0KPiBIYXZlIHlvdSBjaGVja2VkIHRoZSB2Y2hhbiBjb2Rl
LCBpIGRvbnQgc2VlIHRoYXQgdXNlZC4gSXQgd2lsbCBoZWxwIHlvdSBpbg0KPiBkZXNjcmlwdG9y
IG1hbmFnZW1lbnQgYW5kIHJlZHVjZSBjb2RlIGZyb20gZHJpdmVyDQo+IA0KDQpUaGFua3MgZm9y
IHRoZSBwb2ludGVyLiBJJ2xsIHRha2UgYSBsb29rLg0KDQo+ID4gK3N0YXRpYyBpbnQgeGlsaW54
X2RwZG1hX3JlbW92ZShzdHJ1Y3QgcGxhdGZvcm1fZGV2aWNlICpwZGV2KQ0KPiA+ICt7DQo+ID4g
KwlzdHJ1Y3QgeGlsaW54X2RwZG1hX2RldmljZSAqeGRldjsNCj4gPiArCXVuc2lnbmVkIGludCBp
Ow0KPiA+ICsNCj4gPiArCXhkZXYgPSBwbGF0Zm9ybV9nZXRfZHJ2ZGF0YShwZGV2KTsNCj4gPiAr
DQo+ID4gKwl4aWxpbnhfZHBkbWFfZGlzYWJsZV9pbnRyKHhkZXYpOw0KPiA+ICsJb2ZfZG1hX2Nv
bnRyb2xsZXJfZnJlZShwZGV2LT5kZXYub2Zfbm9kZSk7DQo+ID4gKwlkbWFfYXN5bmNfZGV2aWNl
X3VucmVnaXN0ZXIoJnhkZXYtPmNvbW1vbik7DQo+ID4gKwljbGtfZGlzYWJsZV91bnByZXBhcmUo
eGRldi0+YXhpX2Nsayk7DQo+ID4gKw0KPiA+ICsJZm9yIChpID0gMDsgaSA8IFhJTElOWF9EUERN
QV9OVU1fQ0hBTjsgaSsrKQ0KPiA+ICsJCWlmICh4ZGV2LT5jaGFuW2ldKQ0KPiA+ICsJCQl4aWxp
bnhfZHBkbWFfY2hhbl9yZW1vdmUoeGRldi0+Y2hhbltpXSk7DQo+IA0KPiBBdCB0aGlzIHBvaW50
IHlvdXIgaXJxIGlzIHN0aWxsIGVuYWJsZWQgYW5kIGNhbiBmaXJlLCBhbmQgY2FuIHNjaGVkdWxl
DQo+IHRhc2tsZXQuLiBBcmUgeW91IHN1cmUgeW91IGFyZSBva2F5IHdpdGggdGhhdD8NCj4gDQoN
Ck9rLiBZb3UgbWVhbiB0aGF0IGFuIGludGVycnVwdCBjYW4gb2NjdXIgcmlnaHQgYmVmb3JlDQp4
aWxpbnhfZHBkbWFfZGlzYWJsZV9pbnRyKCksIGFuZCB0aGUgaW50ZXJydXB0IG1heSBiZSBoYW5k
bGVkICBpbiB0aGUgbWlkZGxlIG9yDQphZnRlciByZW1vdmluZyB0aGlzIGRyaXZlci4gSSdsbCBz
d2l0Y2ggdG8gcmVxdWVzdF9pcnEoKSBmcm9tDQpkZXZtX3JlcXVlc3RfaXJxKCksIGFuZCByZW1v
dmUgdGhlIGlycSB3aGVuIHJlbW92aW5nIHRoZSBkcml2ZXIuDQoNCj4gPiArTU9EVUxFX0FVVEhP
UigiWGlsaW54LCBJbmMuIik7DQo+ID4gK01PRFVMRV9ERVNDUklQVElPTigiWGlsaW54IERQRE1B
IGRyaXZlciIpOw0KPiA+ICtNT0RVTEVfTElDRU5TRSgiR1BMIHYyIik7DQo+IA0KPiBObyBNT0RV
TEVfQUxJQVMoKT8NCg0KSXMgaXQgcmVxdWlyZWQ/IENvdWxkIHlvdSBwbGVhc2UgZWxhYm9yYXRl
LCB0byBoZWxwIG15IHVuZGVyc3RhbmRpbmc/IA0KDQo+IA0KPiBJIGhhdmVuJ3QgcmV2aWV3ZWQg
aW4gZGV0YWlscyB0aG91Z2ggYXMgaXQgaXMgdG9vIGxhcmdlIHBhdGNoLiBQbHMgdXNlIHZjaGFu
DQo+IGFuZCBzcGxpdCB0aGluZ3MgdXAgZm9yIGJldHRlciByZXZpZXcuDQoNClN1cmUgd2lsbCBk
by4NCg0KVGhhbmtzLA0KLWh5dW4NCg0KPiANCj4gLS0NCj4gflZpbm9kDQo+IC0tDQo+IFRvIHVu
c3Vic2NyaWJlIGZyb20gdGhpcyBsaXN0OiBzZW5kIHRoZSBsaW5lICJ1bnN1YnNjcmliZSBkbWFl
bmdpbmUiIGluDQo+IHRoZSBib2R5IG9mIGEgbWVzc2FnZSB0byBtYWpvcmRvbW9Admdlci5rZXJu
ZWwub3JnDQo+IE1vcmUgbWFqb3Jkb21vIGluZm8gYXQgIGh0dHA6Ly92Z2VyLmtlcm5lbC5vcmcv
bWFqb3Jkb21vLWluZm8uaHRtbA0K
--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Vinod Koul Jan. 24, 2018, 2:40 p.m. UTC | #3
On Tue, Jan 23, 2018 at 05:03:19PM +0000, Hyun Kwon wrote:

> > why should this be a compile option
> > 
> 
> This debugfs code is for testing / regression, and we don't want to enable it
> for regular users.

Right and that is why you have CONFIG_DEBUGFS, why not use that?

> > do you need all these headers?
> > 
> 
> I directly included all headers that are used in this driver. Some of them can
> be removed from indirect inclusions, and I'm fine with that. Please let me know
> if that's your preference.

Yes pls remove the ones that are needed

> > > +#define XILINX_DPDMA_INTR_DESC_DONE_SHIFT		0
> > 
> > you can define a common shift using ffs
> > 
> 
> I guess you mean to replace, (value & MASK) << SHIFT, with
> (value & MASK) << ffs(MASK). I'll change to that way. Let me know otherwise.

yes and you may use the ones in bitfield.h

> > what does it mean (lower 32 bit of Nth 4KB page)
> > 
> 
> Each descriptor can point up to 5 - 48bit address payloads. src_addr* fields
> contain lower 32bit of 48bit address. Remaining upper 16bit is programmed in
> addr_ext* fields.

pls document this

> > > +static int xilinx_dpdma_config(struct dma_chan *dchan,
> > > +			       struct dma_slave_config *config)
> > > +{
> > > +	if (config->direction != DMA_MEM_TO_DEV)
> > > +		return -EINVAL;
> > 
> > ?? Why are the config values not used, how else do you configure the
> > channel?
> > 
> 
> There's not much configuration to be done. Channels are pretty much identical
> with fixed configurations.

hmmm, you do support DMA_SLAVE right?

> > why have these wrappers which do not do anything?
> > 
> 
> It's just my personal preference to split into different code partitions, and
> each section is partitioned / grouped with some comment line. :-)
> Ex, a partition for struct dma_chan, and another one for
> struct xilinx_dpdma_chan. It gives me sort of abstracted view. But it may be
> just me, and it comes with extra indirections. I'll remove unnecessary wrappers.

wrapper without any logic dont help

> > > +	for (i = 0; i < XILINX_DPDMA_NUM_CHAN; i++)
> > > +		if (xdev->chan[i])
> > > +			xilinx_dpdma_chan_remove(xdev->chan[i]);
> > 
> > At this point your irq is still enabled and can fire, and can schedule
> > tasklet.. Are you sure you are okay with that?
> > 
> 
> Ok. You mean that an interrupt can occur right before
> xilinx_dpdma_disable_intr(), and the interrupt may be handled  in the middle or
> after removing this driver. I'll switch to request_irq() from
> devm_request_irq(), and remove the irq when removing the driver.

yes and ensure tasklets are quiesced

> > > +MODULE_AUTHOR("Xilinx, Inc.");
> > > +MODULE_DESCRIPTION("Xilinx DPDMA driver");
> > > +MODULE_LICENSE("GPL v2");
> > 
> > No MODULE_ALIAS()?
> 
> Is it required? Could you please elaborate, to help my understanding? 

did you try builing as modules and testing this?
Hyun Kwon Jan. 24, 2018, 5:45 p.m. UTC | #4
> -----Original Message-----

> From: Vinod Koul [mailto:vinod.koul@intel.com]

> Sent: Wednesday, January 24, 2018 6:41 AM

> To: Hyun Kwon <hyunk@xilinx.com>

> Cc: dmaengine@vger.kernel.org; devicetree@vger.kernel.org; Michal Simek

> <michal.simek@xilinx.com>; Tejas Upadhyay <TEJASU@xilinx.com>

> Subject: Re: [PATCH 2/2] dma: xilinx: dpdma: Add the Xilinx DisplayPort

> DMA engine driver

> 

> On Tue, Jan 23, 2018 at 05:03:19PM +0000, Hyun Kwon wrote:

> 

> > > why should this be a compile option

> > >

> >

> > This debugfs code is for testing / regression, and we don't want to enable

> it

> > for regular users.

> 

> Right and that is why you have CONFIG_DEBUGFS, why not use that?

> 

> > > do you need all these headers?

> > >

> >

> > I directly included all headers that are used in this driver. Some of them

> can

> > be removed from indirect inclusions, and I'm fine with that. Please let me

> know

> > if that's your preference.

> 

> Yes pls remove the ones that are needed

> 

> > > > +#define XILINX_DPDMA_INTR_DESC_DONE_SHIFT		0

> > >

> > > you can define a common shift using ffs

> > >

> >

> > I guess you mean to replace, (value & MASK) << SHIFT, with

> > (value & MASK) << ffs(MASK). I'll change to that way. Let me know

> otherwise.

> 

> yes and you may use the ones in bitfield.h

> 

> > > what does it mean (lower 32 bit of Nth 4KB page)

> > >

> >

> > Each descriptor can point up to 5 - 48bit address payloads. src_addr*

> fields

> > contain lower 32bit of 48bit address. Remaining upper 16bit is

> programmed in

> > addr_ext* fields.

> 

> pls document this

> 

> > > > +static int xilinx_dpdma_config(struct dma_chan *dchan,

> > > > +			       struct dma_slave_config *config)

> > > > +{

> > > > +	if (config->direction != DMA_MEM_TO_DEV)

> > > > +		return -EINVAL;

> > >

> > > ?? Why are the config values not used, how else do you configure the

> > > channel?

> > >

> >

> > There's not much configuration to be done. Channels are pretty much

> identical

> > with fixed configurations.

> 

> hmmm, you do support DMA_SLAVE right?

> 


Yes, and it's single direction, DMA_MEM_TO_DEV.

> > > why have these wrappers which do not do anything?

> > >

> >

> > It's just my personal preference to split into different code partitions, and

> > each section is partitioned / grouped with some comment line. :-)

> > Ex, a partition for struct dma_chan, and another one for

> > struct xilinx_dpdma_chan. It gives me sort of abstracted view. But it may

> be

> > just me, and it comes with extra indirections. I'll remove unnecessary

> wrappers.

> 

> wrapper without any logic dont help

> 

> > > > +	for (i = 0; i < XILINX_DPDMA_NUM_CHAN; i++)

> > > > +		if (xdev->chan[i])

> > > > +			xilinx_dpdma_chan_remove(xdev->chan[i]);

> > >

> > > At this point your irq is still enabled and can fire, and can schedule

> > > tasklet.. Are you sure you are okay with that?

> > >

> >

> > Ok. You mean that an interrupt can occur right before

> > xilinx_dpdma_disable_intr(), and the interrupt may be handled  in the

> middle or

> > after removing this driver. I'll switch to request_irq() from

> > devm_request_irq(), and remove the irq when removing the driver.

> 

> yes and ensure tasklets are quiesced

> 

> > > > +MODULE_AUTHOR("Xilinx, Inc.");

> > > > +MODULE_DESCRIPTION("Xilinx DPDMA driver");

> > > > +MODULE_LICENSE("GPL v2");

> > >

> > > No MODULE_ALIAS()?

> >

> > Is it required? Could you please elaborate, to help my understanding?

> 

> did you try builing as modules and testing this?


I'm testing it as a loadable kernel module.

I'll address rest of your comments.

Thanks,
-hyun


> 

> --

> ~Vinod
Vinod Koul Jan. 29, 2018, 4:19 a.m. UTC | #5
On Wed, Jan 24, 2018 at 05:45:00PM +0000, Hyun Kwon wrote:

> > > > > +static int xilinx_dpdma_config(struct dma_chan *dchan,
> > > > > +			       struct dma_slave_config *config)
> > > > > +{
> > > > > +	if (config->direction != DMA_MEM_TO_DEV)
> > > > > +		return -EINVAL;
> > > >
> > > > ?? Why are the config values not used, how else do you configure the
> > > > channel?
> > > >
> > >
> > > There's not much configuration to be done. Channels are pretty much
> > identical
> > > with fixed configurations.
> > 
> > hmmm, you do support DMA_SLAVE right?
> > 
> 
> Yes, and it's single direction, DMA_MEM_TO_DEV.

Okay how do you find the DMA data width and the burst to apply when you DMA
to device? Are these not programmable?

I think you need to go back and check that part. Moreover a client may send
you parameters and program the Device FIFO, you not honoring those will
result in bad data received on Device, am sure you don't want that
diff mbox

Patch

diff --git a/MAINTAINERS b/MAINTAINERS
index a6e86e2..2f1a16f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -15005,6 +15005,13 @@  F:	Documentation/devicetree/bindings/media/xilinx/
 F:	drivers/media/platform/xilinx/
 F:	include/uapi/linux/xilinx-v4l2-controls.h
 
+Xilinx ZynqMP DPDMA DRIVER
+M:	Hyun Kwon <hyun.kwon@xilinx.com>
+L:	dmaengine@vger.kernel.org
+S:	Supported
+F:	Documentation/devicetree/bindings/dma/xilinx/xilinx_dpdma.txt
+F:	drivers/dma/xilinx_dpdma.c
+
 XILLYBUS DRIVER
 M:	Eli Billauer <eli.billauer@gmail.com>
 L:	linux-kernel@vger.kernel.org
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 27df3e2..5120238 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -614,6 +614,25 @@  config XILINX_DMA
 	  AXI DMA engine provides high-bandwidth one dimensional direct
 	  memory access between memory and AXI4-Stream target peripherals.
 
+config XILINX_DPDMA
+	tristate "Xilinx DPDMA Engine"
+	select DMA_ENGINE
+	help
+	  Enable support for Xilinx ZynqMP DisplayPort DMA. Choose this option
+	  if you have a Xilinx ZynqMP SoC with DisplayPort subsystem
+	  DMA engines. The driver provides the dmaengine functionalities
+	  for the DisplayPort DMA engines.
+
+config XILINX_DPDMA_DEBUG_FS
+        bool "Xilinx DPDMA debugfs"
+        depends on DEBUG_FS && XILINX_DPDMA
+        help
+          Enable the debugfs code for DPDMA driver. The debugfs code
+          enables debugging or testing related features. It exposes some
+          low level controls to the user space to help testing automation,
+          as well as can enable additional diagnostic or statistical
+          information.
+
 config XILINX_ZYNQMP_DMA
 	tristate "Xilinx ZynqMP DMA Engine"
 	depends on (ARCH_ZYNQ || MICROBLAZE || ARM64)
diff --git a/drivers/dma/xilinx/Makefile b/drivers/dma/xilinx/Makefile
index 9e91f8f..679c4b8 100644
--- a/drivers/dma/xilinx/Makefile
+++ b/drivers/dma/xilinx/Makefile
@@ -1,2 +1,3 @@ 
 obj-$(CONFIG_XILINX_DMA) += xilinx_dma.o
+obj-$(CONFIG_XILINX_DPDMA) += xilinx_dpdma.o
 obj-$(CONFIG_XILINX_ZYNQMP_DMA) += zynqmp_dma.o
diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
new file mode 100644
index 0000000..8a5646e
--- /dev/null
+++ b/drivers/dma/xilinx/xilinx_dpdma.c
@@ -0,0 +1,2309 @@ 
+/*
+ * Xilinx ZynqMP DPDMA Engine driver
+ *
+ *  Copyright (C) 2015 - 2018 Xilinx, Inc.
+ *
+ *  Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/gfp.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+
+#include "../dmaengine.h"
+
+/* DPDMA registers */
+#define XILINX_DPDMA_ERR_CTRL				0x0
+#define XILINX_DPDMA_ISR				0x4
+#define XILINX_DPDMA_IMR				0x8
+#define XILINX_DPDMA_IEN				0xc
+#define XILINX_DPDMA_IDS				0x10
+#define XILINX_DPDMA_INTR_DESC_DONE_MASK		(0x3f << 0)
+#define XILINX_DPDMA_INTR_DESC_DONE_SHIFT		0
+#define XILINX_DPDMA_INTR_NO_OSTAND_MASK		(0x3f << 6)
+#define XILINX_DPDMA_INTR_NO_OSTAND_SHIFT		6
+#define XILINX_DPDMA_INTR_AXI_ERR_MASK			(0x3f << 12)
+#define XILINX_DPDMA_INTR_AXI_ERR_SHIFT			12
+#define XILINX_DPDMA_INTR_DESC_ERR_MASK			(0x3f << 18)
+#define XILINX_DPDMA_INTR_DESC_ERR_SHIFT		16
+#define XILINX_DPDMA_INTR_WR_CMD_FIFO_FULL		BIT(24)
+#define XILINX_DPDMA_INTR_WR_DATA_FIFO_FULL		BIT(25)
+#define XILINX_DPDMA_INTR_AXI_4K_CROSS			BIT(26)
+#define XILINX_DPDMA_INTR_VSYNC				BIT(27)
+#define XILINX_DPDMA_INTR_CHAN_ERR_MASK			0x41000
+#define XILINX_DPDMA_INTR_CHAN_ERR			0xfff000
+#define XILINX_DPDMA_INTR_GLOBAL_ERR			0x7000000
+#define XILINX_DPDMA_INTR_ERR_ALL			0x7fff000
+#define XILINX_DPDMA_INTR_CHAN_MASK			0x41041
+#define XILINX_DPDMA_INTR_GLOBAL_MASK			0xf000000
+#define XILINX_DPDMA_INTR_ALL				0xfffffff
+#define XILINX_DPDMA_EISR				0x14
+#define XILINX_DPDMA_EIMR				0x18
+#define XILINX_DPDMA_EIEN				0x1c
+#define XILINX_DPDMA_EIDS				0x20
+#define XILINX_DPDMA_EINTR_INV_APB			BIT(0)
+#define XILINX_DPDMA_EINTR_RD_AXI_ERR_MASK		(0x3f << 1)
+#define XILINX_DPDMA_EINTR_RD_AXI_ERR_SHIFT		1
+#define XILINX_DPDMA_EINTR_PRE_ERR_MASK			(0x3f << 7)
+#define XILINX_DPDMA_EINTR_PRE_ERR_SHIFT		7
+#define XILINX_DPDMA_EINTR_CRC_ERR_MASK			(0x3f << 13)
+#define XILINX_DPDMA_EINTR_CRC_ERR_SHIFT		13
+#define XILINX_DPDMA_EINTR_WR_AXI_ERR_MASK		(0x3f << 19)
+#define XILINX_DPDMA_EINTR_WR_AXI_ERR_SHIFT		19
+#define XILINX_DPDMA_EINTR_DESC_DONE_ERR_MASK		(0x3f << 25)
+#define XILINX_DPDMA_EINTR_DESC_DONE_ERR_SHIFT		25
+#define XILINX_DPDMA_EINTR_RD_CMD_FIFO_FULL		BIT(32)
+#define XILINX_DPDMA_EINTR_CHAN_ERR_MASK		0x2082082
+#define XILINX_DPDMA_EINTR_CHAN_ERR			0x7ffffffe
+#define XILINX_DPDMA_EINTR_GLOBAL_ERR			0x80000001
+#define XILINX_DPDMA_EINTR_ALL				0xffffffff
+#define XILINX_DPDMA_CNTL				0x100
+#define XILINX_DPDMA_GBL				0x104
+#define XILINX_DPDMA_GBL_TRIG_SHIFT			0
+#define XILINX_DPDMA_GBL_RETRIG_SHIFT			6
+#define XILINX_DPDMA_ALC0_CNTL				0x108
+#define XILINX_DPDMA_ALC0_STATUS			0x10c
+#define XILINX_DPDMA_ALC0_MAX				0x110
+#define XILINX_DPDMA_ALC0_MIN				0x114
+#define XILINX_DPDMA_ALC0_ACC				0x118
+#define XILINX_DPDMA_ALC0_ACC_TRAN			0x11c
+#define XILINX_DPDMA_ALC1_CNTL				0x120
+#define XILINX_DPDMA_ALC1_STATUS			0x124
+#define XILINX_DPDMA_ALC1_MAX				0x128
+#define XILINX_DPDMA_ALC1_MIN				0x12c
+#define XILINX_DPDMA_ALC1_ACC				0x130
+#define XILINX_DPDMA_ALC1_ACC_TRAN			0x134
+
+/* Channel register */
+#define XILINX_DPDMA_CH_BASE				0x200
+#define XILINX_DPDMA_CH_OFFSET				0x100
+#define XILINX_DPDMA_CH_DESC_START_ADDRE		0x0
+#define XILINX_DPDMA_CH_DESC_START_ADDR			0x4
+#define XILINX_DPDMA_CH_DESC_NEXT_ADDRE			0x8
+#define XILINX_DPDMA_CH_DESC_NEXT_ADDR			0xc
+#define XILINX_DPDMA_CH_PYLD_CUR_ADDRE			0x10
+#define XILINX_DPDMA_CH_PYLD_CUR_ADDR			0x14
+#define XILINX_DPDMA_CH_CNTL				0x18
+#define XILINX_DPDMA_CH_CNTL_ENABLE			BIT(0)
+#define XILINX_DPDMA_CH_CNTL_PAUSE			BIT(1)
+#define XILINX_DPDMA_CH_CNTL_QOS_DSCR_WR_SHIFT		2
+#define XILINX_DPDMA_CH_CNTL_QOS_DSCR_RD_SHIFT		6
+#define XILINX_DPDMA_CH_CNTL_QOS_DATA_RD_SHIFT		10
+#define XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS		11
+#define XILINX_DPDMA_CH_STATUS				0x1c
+#define XILINX_DPDMA_CH_STATUS_OTRAN_CNT_MASK		(0xf << 21)
+#define XILINX_DPDMA_CH_STATUS_OTRAN_CNT_SHIFT		21
+#define XILINX_DPDMA_CH_VDO				0x20
+#define XILINX_DPDMA_CH_PYLD_SZ				0x24
+#define XILINX_DPDMA_CH_DESC_ID				0x28
+
+/* DPDMA descriptor fields */
+#define XILINX_DPDMA_DESC_CONTROL_PREEMBLE		(0xa5)
+#define XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR		BIT(8)
+#define XILINX_DPDMA_DESC_CONTROL_DESC_UPDATE		BIT(9)
+#define XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE		BIT(10)
+#define XILINX_DPDMA_DESC_CONTROL_FRAG_MODE		BIT(18)
+#define XILINX_DPDMA_DESC_CONTROL_LAST			BIT(19)
+#define XILINX_DPDMA_DESC_CONTROL_ENABLE_CRC		BIT(20)
+#define XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME		BIT(21)
+#define XILINX_DPDMA_DESC_ID_MASK			(0xffff << 0)
+#define XILINX_DPDMA_DESC_ID_SHIFT			(0)
+#define XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_MASK	(0x3ffff << 0)
+#define XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_SHIFT	(0)
+#define XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_MASK	(0x3fff << 18)
+#define XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_SHIFT	(18)
+#define XILINX_DPDMA_DESC_ADDR_EXT_ADDR_MASK		(0xfff)
+#define XILINX_DPDMA_DESC_ADDR_EXT_ADDR_SHIFT		(16)
+
+#define XILINX_DPDMA_ALIGN_BYTES			256
+
+#define XILINX_DPDMA_NUM_CHAN				6
+#define XILINX_DPDMA_PAGE_MASK				((1 << 12) - 1)
+#define XILINX_DPDMA_PAGE_SHIFT				12
+
+/**
+ * struct xilinx_dpdma_hw_desc - DPDMA hardware descriptor
+ * @control: control configuration field
+ * @desc_id: descriptor ID
+ * @xfer_size: transfer size
+ * @hsize_stride: horizontal size and stride
+ * @timestamp_lsb: LSB of time stamp
+ * @timestamp_msb: MSB of time stamp
+ * @addr_ext: upper 16 bit of 48 bit address (next_desc and src_addr)
+ * @next_desc: next descriptor 32 bit address
+ * @src_addr: payload source address (lower 32 bit of 1st 4KB page)
+ * @addr_ext_23: upper 16 bit of 48 bit address (src_addr2 and src_addr3)
+ * @addr_ext_45: upper 16 bit of 48 bit address (src_addr4 and src_addr5)
+ * @src_addr2: payload source address (lower 32 bit of 2nd 4KB page)
+ * @src_addr3: payload source address (lower 32 bit of 3rd 4KB page)
+ * @src_addr4: payload source address (lower 32 bit of 4th 4KB page)
+ * @src_addr5: payload source address (lower 32 bit of 5th 4KB page)
+ * @crc: descriptor CRC
+ */
+struct xilinx_dpdma_hw_desc {
+	u32 control;
+	u32 desc_id;
+	u32 xfer_size;
+	u32 hsize_stride;
+	u32 timestamp_lsb;
+	u32 timestamp_msb;
+	u32 addr_ext;
+	u32 next_desc;
+	u32 src_addr;
+	u32 addr_ext_23;
+	u32 addr_ext_45;
+	u32 src_addr2;
+	u32 src_addr3;
+	u32 src_addr4;
+	u32 src_addr5;
+	u32 crc;
+} __aligned(XILINX_DPDMA_ALIGN_BYTES);
+
+/**
+ * struct xilinx_dpdma_sw_desc - DPDMA software descriptor
+ * @hw: DPDMA hardware descriptor
+ * @node: list node for software descriptors
+ * @phys: physical address of the software descriptor
+ */
+struct xilinx_dpdma_sw_desc {
+	struct xilinx_dpdma_hw_desc hw;
+	struct list_head node;
+	dma_addr_t phys;
+};
+
+/**
+ * enum xilinx_dpdma_tx_desc_status - DPDMA tx descriptor status
+ * @PREPARED: descriptor is prepared for transaction
+ * @ACTIVE: transaction is (being) done successfully
+ * @ERRORED: descriptor generates some errors
+ */
+enum xilinx_dpdma_tx_desc_status {
+	PREPARED,
+	ACTIVE,
+	ERRORED
+};
+
+/**
+ * struct xilinx_dpdma_tx_desc - DPDMA transaction descriptor
+ * @async_tx: DMA async transaction descriptor
+ * @descriptors: list of software descriptors
+ * @node: list node for transaction descriptors
+ * @status: tx descriptor status
+ * @done_cnt: number of complete notification to deliver
+ */
+struct xilinx_dpdma_tx_desc {
+	struct dma_async_tx_descriptor async_tx;
+	struct list_head descriptors;
+	struct list_head node;
+	enum xilinx_dpdma_tx_desc_status status;
+	unsigned int done_cnt;
+};
+
+/**
+ * enum xilinx_dpdma_chan_id - DPDMA channel ID
+ * @VIDEO0: video 1st channel
+ * @VIDEO1: video 2nd channel for multi plane yuv formats
+ * @VIDEO2: video 3rd channel for multi plane yuv formats
+ * @GRAPHICS: graphics channel
+ * @AUDIO0: 1st audio channel
+ * @AUDIO1: 2nd audio channel
+ */
+enum xilinx_dpdma_chan_id {
+	VIDEO0,
+	VIDEO1,
+	VIDEO2,
+	GRAPHICS,
+	AUDIO0,
+	AUDIO1
+};
+
+/**
+ * enum xilinx_dpdma_chan_status - DPDMA channel status
+ * @IDLE: idle state
+ * @STREAMING: actively streaming state
+ */
+enum xilinx_dpdma_chan_status {
+	IDLE,
+	STREAMING
+};
+
+/*
+ * DPDMA descriptor placement
+ * --------------------------
+ * DPDMA descritpor life time is described with following placements:
+ *
+ * allocated_desc -> submitted_desc -> pending_desc -> active_desc -> done_list
+ *
+ * Transition is triggered as following:
+ *
+ * -> allocated_desc : a descriptor allocation
+ * allocated_desc -> submitted_desc: a descriptor submission
+ * submitted_desc -> pending_desc: request to issue pending a descriptor
+ * pending_desc -> active_desc: VSYNC intr when a desc is scheduled to DPDMA
+ * active_desc -> done_list: VSYNC intr when DPDMA switches to a new desc
+ */
+
+/**
+ * struct xilinx_dpdma_chan - DPDMA channel
+ * @common: generic dma channel structure
+ * @reg: register base address
+ * @id: channel ID
+ * @wait_to_stop: queue to wait for outstanding transacitons before stopping
+ * @status: channel status
+ * @first_frame: flag for the first frame of stream
+ * @video_group: flag if multi-channel operation is needed for video channels
+ * @lock: lock to access struct xilinx_dpdma_chan
+ * @desc_pool: descriptor allocation pool
+ * @done_task: done IRQ bottom half handler
+ * @err_task: error IRQ bottom half handler
+ * @allocated_desc: allocated descriptor
+ * @submitted_desc: submitted descriptor
+ * @pending_desc: pending descriptor to be scheduled in next period
+ * @active_desc: descriptor that the DPDMA channel is active on
+ * @done_list: done descriptor list
+ * @xdev: DPDMA device
+ */
+struct xilinx_dpdma_chan {
+	struct dma_chan common;
+	void __iomem *reg;
+	enum xilinx_dpdma_chan_id id;
+
+	wait_queue_head_t wait_to_stop;
+	enum xilinx_dpdma_chan_status status;
+	bool first_frame;
+	bool video_group;
+
+	spinlock_t lock; /* lock to access struct xilinx_dpdma_chan */
+	struct dma_pool *desc_pool;
+	struct tasklet_struct done_task;
+	struct tasklet_struct err_task;
+
+	struct xilinx_dpdma_tx_desc *allocated_desc;
+	struct xilinx_dpdma_tx_desc *submitted_desc;
+	struct xilinx_dpdma_tx_desc *pending_desc;
+	struct xilinx_dpdma_tx_desc *active_desc;
+	struct list_head done_list;
+
+	struct xilinx_dpdma_device *xdev;
+};
+
+/**
+ * struct xilinx_dpdma_device - DPDMA device
+ * @common: generic dma device structure
+ * @reg: register base address
+ * @dev: generic device structure
+ * @axi_clk: axi clock
+ * @chan: DPDMA channels
+ * @ext_addr: flag for 64 bit system (48 bit addressing)
+ * @desc_addr: descriptor addressing callback (32 bit vs 64 bit)
+ */
+struct xilinx_dpdma_device {
+	struct dma_device common;
+	void __iomem *reg;
+	struct device *dev;
+
+	struct clk *axi_clk;
+	struct xilinx_dpdma_chan *chan[XILINX_DPDMA_NUM_CHAN];
+
+	bool ext_addr;
+	void (*desc_addr)(struct xilinx_dpdma_sw_desc *sw_desc,
+			  struct xilinx_dpdma_sw_desc *prev,
+			  dma_addr_t dma_addr[], unsigned int num_src_addr);
+};
+
+#ifdef CONFIG_XILINX_DPDMA_DEBUG_FS
+#define XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE	32
+#define XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR	"65535"
+#define IN_RANGE(x, min, max) ({		\
+		typeof(x) _x = (x);		\
+		_x >= (min) && _x <= (max); })
+
+/* Match xilinx_dpdma_testcases vs dpdma_debugfs_reqs[] entry */
+enum xilinx_dpdma_testcases {
+	DPDMA_TC_INTR_DONE,
+	DPDMA_TC_NONE
+};
+
+struct xilinx_dpdma_debugfs {
+	enum xilinx_dpdma_testcases testcase;
+	u16 xilinx_dpdma_intr_done_count;
+	enum xilinx_dpdma_chan_id chan_id;
+};
+
+static struct xilinx_dpdma_debugfs dpdma_debugfs;
+struct xilinx_dpdma_debugfs_request {
+	const char *req;
+	enum xilinx_dpdma_testcases tc;
+	ssize_t (*read_handler)(char **kern_buff);
+	ssize_t (*write_handler)(char **cmd);
+};
+
+static void xilinx_dpdma_debugfs_intr_done_count_incr(int chan_id)
+{
+	if (chan_id == dpdma_debugfs.chan_id)
+		dpdma_debugfs.xilinx_dpdma_intr_done_count++;
+}
+
+static s64 xilinx_dpdma_debugfs_argument_value(char *arg)
+{
+	s64 value;
+
+	if (!arg)
+		return -1;
+
+	if (!kstrtos64(arg, 0, &value))
+		return value;
+
+	return -1;
+}
+
+static ssize_t
+xilinx_dpdma_debugfs_desc_done_intr_write(char **dpdma_test_arg)
+{
+	char *arg;
+	char *arg_chan_id;
+	s64 id;
+
+	arg = strsep(dpdma_test_arg, " ");
+	if (strncasecmp(arg, "start", 5) != 0)
+		return -EINVAL;
+
+	arg_chan_id = strsep(dpdma_test_arg, " ");
+	id = xilinx_dpdma_debugfs_argument_value(arg_chan_id);
+
+	if (id < 0 || !IN_RANGE(id, VIDEO0, AUDIO1))
+		return -EINVAL;
+
+	dpdma_debugfs.testcase = DPDMA_TC_INTR_DONE;
+	dpdma_debugfs.xilinx_dpdma_intr_done_count = 0;
+	dpdma_debugfs.chan_id = id;
+
+	return 0;
+}
+
+static ssize_t xilinx_dpdma_debugfs_desc_done_intr_read(char **kern_buff)
+{
+	size_t out_str_len;
+
+	dpdma_debugfs.testcase = DPDMA_TC_NONE;
+
+	out_str_len = strlen(XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR);
+	out_str_len = min_t(size_t, XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE,
+			    out_str_len);
+	snprintf(*kern_buff, out_str_len, "%d",
+		 dpdma_debugfs.xilinx_dpdma_intr_done_count);
+
+	return 0;
+}
+
+/* Match xilinx_dpdma_testcases vs dpdma_debugfs_reqs[] entry */
+struct xilinx_dpdma_debugfs_request dpdma_debugfs_reqs[] = {
+	{"DESCRIPTOR_DONE_INTR", DPDMA_TC_INTR_DONE,
+			xilinx_dpdma_debugfs_desc_done_intr_read,
+			xilinx_dpdma_debugfs_desc_done_intr_write},
+};
+
+static ssize_t xilinx_dpdma_debugfs_write(struct file *f, const char __user
+					       *buf, size_t size, loff_t *pos)
+{
+	char *kern_buff, *kern_buff_start;
+	char *dpdma_test_req;
+	int ret;
+	int i;
+
+	if (*pos != 0 || size <= 0)
+		return -EINVAL;
+
+	/* Supporting single instance of test as of now*/
+	if (dpdma_debugfs.testcase != DPDMA_TC_NONE)
+		return -EBUSY;
+
+	kern_buff = kzalloc(size, GFP_KERNEL);
+	if (!kern_buff)
+		return -ENOMEM;
+	kern_buff_start = kern_buff;
+
+	ret = strncpy_from_user(kern_buff, buf, size);
+	if (ret < 0) {
+		kfree(kern_buff_start);
+		return ret;
+	}
+
+	/* Read the testcase name from a user request */
+	dpdma_test_req = strsep(&kern_buff, " ");
+
+	for (i = 0; i < ARRAY_SIZE(dpdma_debugfs_reqs); i++) {
+		if (!strcasecmp(dpdma_test_req, dpdma_debugfs_reqs[i].req)) {
+			if (!dpdma_debugfs_reqs[i].write_handler(&kern_buff)) {
+				kfree(kern_buff_start);
+				return size;
+			}
+			break;
+		}
+	}
+	kfree(kern_buff_start);
+	return -EINVAL;
+}
+
+static ssize_t xilinx_dpdma_debugfs_read(struct file *f, char __user *buf,
+					 size_t size, loff_t *pos)
+{
+	char *kern_buff = NULL;
+	size_t kern_buff_len, out_str_len;
+	enum xilinx_dpdma_testcases tc;
+	int ret;
+
+	if (size <= 0)
+		return -EINVAL;
+
+	if (*pos != 0)
+		return 0;
+
+	kern_buff = kzalloc(XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE, GFP_KERNEL);
+	if (!kern_buff) {
+		dpdma_debugfs.testcase = DPDMA_TC_NONE;
+		return -ENOMEM;
+	}
+
+	tc = dpdma_debugfs.testcase;
+	if (tc == DPDMA_TC_NONE) {
+		out_str_len = strlen("No testcase executed");
+		out_str_len = min_t(size_t, XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE,
+				    out_str_len);
+		snprintf(kern_buff, out_str_len, "%s", "No testcase executed");
+	} else {
+		ret = dpdma_debugfs_reqs[tc].read_handler(&kern_buff);
+		if (ret) {
+			kfree(kern_buff);
+			return ret;
+		}
+	}
+
+	kern_buff_len = strlen(kern_buff);
+	size = min(size, kern_buff_len);
+
+	ret = copy_to_user(buf, kern_buff, size);
+
+	kfree(kern_buff);
+	if (ret)
+		return ret;
+
+	*pos = size + 1;
+	return size;
+}
+
+static const struct file_operations fops_xilinx_dpdma_dbgfs = {
+	.owner = THIS_MODULE,
+	.read = xilinx_dpdma_debugfs_read,
+	.write = xilinx_dpdma_debugfs_write,
+};
+
+static int xilinx_dpdma_debugfs_init(struct device *dev)
+{
+	int err;
+	struct dentry *xilinx_dpdma_debugfs_dir, *xilinx_dpdma_debugfs_file;
+
+	dpdma_debugfs.testcase = DPDMA_TC_NONE;
+
+	xilinx_dpdma_debugfs_dir = debugfs_create_dir("dpdma", NULL);
+	if (!xilinx_dpdma_debugfs_dir) {
+		dev_err(dev, "debugfs_create_dir failed\n");
+		return -ENODEV;
+	}
+
+	xilinx_dpdma_debugfs_file =
+		debugfs_create_file("testcase", 0444,
+				    xilinx_dpdma_debugfs_dir, NULL,
+				    &fops_xilinx_dpdma_dbgfs);
+	if (!xilinx_dpdma_debugfs_file) {
+		dev_err(dev, "debugfs_create_file testcase failed\n");
+		err = -ENODEV;
+		goto err_dbgfs;
+	}
+	return 0;
+
+err_dbgfs:
+	debugfs_remove_recursive(xilinx_dpdma_debugfs_dir);
+	xilinx_dpdma_debugfs_dir = NULL;
+	return err;
+}
+
+#else
+static int xilinx_dpdma_debugfs_init(struct device *dev)
+{
+	return 0;
+}
+
+static void xilinx_dpdma_debugfs_intr_done_count_incr(int chan_id)
+{
+}
+#endif /* CONFIG_XILINX_DPDMA_DEBUG_FS */
+
+#define to_dpdma_tx_desc(tx) \
+	container_of(tx, struct xilinx_dpdma_tx_desc, async_tx)
+
+#define to_xilinx_chan(chan) \
+	container_of(chan, struct xilinx_dpdma_chan, common)
+
+/* IO operations */
+
+static inline u32 dpdma_read(void __iomem *base, u32 offset)
+{
+	return ioread32(base + offset);
+}
+
+static inline void dpdma_write(void __iomem *base, u32 offset, u32 val)
+{
+	iowrite32(val, base + offset);
+}
+
+static inline void dpdma_clr(void __iomem *base, u32 offset, u32 clr)
+{
+	dpdma_write(base, offset, dpdma_read(base, offset) & ~clr);
+}
+
+static inline void dpdma_set(void __iomem *base, u32 offset, u32 set)
+{
+	dpdma_write(base, offset, dpdma_read(base, offset) | set);
+}
+
+/* Xilinx DPDMA descriptor operations */
+
+/**
+ * xilinx_dpdma_sw_desc_next_32 - Set 32 bit address of a next sw descriptor
+ * @sw_desc: current software descriptor
+ * @next: next descriptor
+ *
+ * Update the current sw descriptor @sw_desc with 32 bit address of the next
+ * descriptor @next.
+ */
+static inline void
+xilinx_dpdma_sw_desc_next_32(struct xilinx_dpdma_sw_desc *sw_desc,
+			     struct xilinx_dpdma_sw_desc *next)
+{
+	sw_desc->hw.next_desc = next->phys;
+}
+
+/**
+ * xilinx_dpdma_sw_desc_addr_32 - Update the sw descriptor with 32 bit address
+ * @sw_desc: software descriptor
+ * @prev: previous descriptor
+ * @dma_addr: array of dma addresses
+ * @num_src_addr: number of addresses in @dma_addr
+ *
+ * Update the descriptor @sw_desc with 32 bit address.
+ */
+static void xilinx_dpdma_sw_desc_addr_32(struct xilinx_dpdma_sw_desc *sw_desc,
+					 struct xilinx_dpdma_sw_desc *prev,
+					 dma_addr_t dma_addr[],
+					 unsigned int num_src_addr)
+{
+	struct xilinx_dpdma_hw_desc *hw_desc = &sw_desc->hw;
+	unsigned int i;
+
+	hw_desc->src_addr = dma_addr[0];
+
+	if (prev)
+		xilinx_dpdma_sw_desc_next_32(prev, sw_desc);
+
+	for (i = 1; i < num_src_addr; i++) {
+		u32 *addr = &hw_desc->src_addr2;
+		u32 frag_addr;
+
+		frag_addr = dma_addr[i];
+		addr[i - 1] = frag_addr;
+	}
+}
+
+/**
+ * xilinx_dpdma_sw_desc_next_64 - Set 64 bit address of a next sw descriptor
+ * @sw_desc: current software descriptor
+ * @next: next descriptor
+ *
+ * Update the current sw descriptor @sw_desc with 64 bit address of the next
+ * descriptor @next.
+ */
+static inline void
+xilinx_dpdma_sw_desc_next_64(struct xilinx_dpdma_sw_desc *sw_desc,
+			     struct xilinx_dpdma_sw_desc *next)
+{
+	sw_desc->hw.next_desc = lower_32_bits(next->phys);
+	sw_desc->hw.addr_ext |= upper_32_bits(next->phys) &
+				XILINX_DPDMA_DESC_ADDR_EXT_ADDR_MASK;
+}
+
+/**
+ * xilinx_dpdma_sw_desc_addr_64 - Update the sw descriptor with 64 bit address
+ * @sw_desc: software descriptor
+ * @prev: previous descriptor
+ * @dma_addr: array of dma addresses
+ * @num_src_addr: number of addresses in @dma_addr
+ *
+ * Update the descriptor @sw_desc with 64 bit address.
+ */
+static void xilinx_dpdma_sw_desc_addr_64(struct xilinx_dpdma_sw_desc *sw_desc,
+					 struct xilinx_dpdma_sw_desc *prev,
+					 dma_addr_t dma_addr[],
+					 unsigned int num_src_addr)
+{
+	struct xilinx_dpdma_hw_desc *hw_desc = &sw_desc->hw;
+	unsigned int i;
+	u32 src_addr_extn;
+
+	hw_desc->src_addr = lower_32_bits(dma_addr[0]);
+	src_addr_extn = upper_32_bits(dma_addr[0]) &
+			XILINX_DPDMA_DESC_ADDR_EXT_ADDR_MASK;
+	hw_desc->addr_ext |= (src_addr_extn <<
+			      XILINX_DPDMA_DESC_ADDR_EXT_ADDR_SHIFT);
+
+	if (prev)
+		xilinx_dpdma_sw_desc_next_64(prev, sw_desc);
+
+	for (i = 1; i < num_src_addr; i++) {
+		u32 *addr = &hw_desc->src_addr2;
+		u32 *addr_ext = &hw_desc->addr_ext_23;
+		u64 frag_addr;
+
+		frag_addr = dma_addr[i];
+		addr[i] = (u32)frag_addr;
+
+		frag_addr >>= 32;
+		frag_addr &= XILINX_DPDMA_DESC_ADDR_EXT_ADDR_MASK;
+		frag_addr <<= XILINX_DPDMA_DESC_ADDR_EXT_ADDR_SHIFT * (i % 2);
+		addr_ext[i / 2] = frag_addr;
+	}
+}
+
+/* Xilinx DPDMA channel descriptor operations */
+
+/**
+ * xilinx_dpdma_chan_alloc_sw_desc - Allocate a software descriptor
+ * @chan: DPDMA channel
+ *
+ * Allocate a software descriptor from the channel's descriptor pool.
+ *
+ * Return: a software descriptor or NULL.
+ */
+static struct xilinx_dpdma_sw_desc *
+xilinx_dpdma_chan_alloc_sw_desc(struct xilinx_dpdma_chan *chan)
+{
+	struct xilinx_dpdma_sw_desc *sw_desc;
+	dma_addr_t phys;
+
+	sw_desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
+	if (!sw_desc)
+		return NULL;
+
+	sw_desc->phys = phys;
+
+	return sw_desc;
+}
+
+/**
+ * xilinx_dpdma_chan_free_sw_desc - Free a software descriptor
+ * @chan: DPDMA channel
+ * @sw_desc: software descriptor to free
+ *
+ * Free a software descriptor from the channel's descriptor pool.
+ */
+static void
+xilinx_dpdma_chan_free_sw_desc(struct xilinx_dpdma_chan *chan,
+			       struct xilinx_dpdma_sw_desc *sw_desc)
+{
+	dma_pool_free(chan->desc_pool, sw_desc, sw_desc->phys);
+}
+
+/**
+ * xilinx_dpdma_chan_dump_tx_desc - Dump a tx descriptor
+ * @chan: DPDMA channel
+ * @tx_desc: tx descriptor to dump
+ *
+ * Dump contents of a tx descriptor
+ */
+static void xilinx_dpdma_chan_dump_tx_desc(struct xilinx_dpdma_chan *chan,
+					   struct xilinx_dpdma_tx_desc *tx_desc)
+{
+	struct xilinx_dpdma_sw_desc *sw_desc;
+	struct device *dev = chan->xdev->dev;
+	unsigned int i = 0;
+
+	dev_dbg(dev, "------- TX descriptor dump start -------\n");
+	dev_dbg(dev, "------- channel ID = %d -------\n", chan->id);
+
+	list_for_each_entry(sw_desc, &tx_desc->descriptors, node) {
+		struct xilinx_dpdma_hw_desc *hw_desc = &sw_desc->hw;
+
+		dev_dbg(dev, "------- HW descriptor %d -------\n", i++);
+		dev_dbg(dev, "descriptor phys: %pad\n", &sw_desc->phys);
+		dev_dbg(dev, "control: 0x%08x\n", hw_desc->control);
+		dev_dbg(dev, "desc_id: 0x%08x\n", hw_desc->desc_id);
+		dev_dbg(dev, "xfer_size: 0x%08x\n", hw_desc->xfer_size);
+		dev_dbg(dev, "hsize_stride: 0x%08x\n", hw_desc->hsize_stride);
+		dev_dbg(dev, "timestamp_lsb: 0x%08x\n", hw_desc->timestamp_lsb);
+		dev_dbg(dev, "timestamp_msb: 0x%08x\n", hw_desc->timestamp_msb);
+		dev_dbg(dev, "addr_ext: 0x%08x\n", hw_desc->addr_ext);
+		dev_dbg(dev, "next_desc: 0x%08x\n", hw_desc->next_desc);
+		dev_dbg(dev, "src_addr: 0x%08x\n", hw_desc->src_addr);
+		dev_dbg(dev, "addr_ext_23: 0x%08x\n", hw_desc->addr_ext_23);
+		dev_dbg(dev, "addr_ext_45: 0x%08x\n", hw_desc->addr_ext_45);
+		dev_dbg(dev, "src_addr2: 0x%08x\n", hw_desc->src_addr2);
+		dev_dbg(dev, "src_addr3: 0x%08x\n", hw_desc->src_addr3);
+		dev_dbg(dev, "src_addr4: 0x%08x\n", hw_desc->src_addr4);
+		dev_dbg(dev, "src_addr5: 0x%08x\n", hw_desc->src_addr5);
+		dev_dbg(dev, "crc: 0x%08x\n", hw_desc->crc);
+	}
+
+	dev_dbg(dev, "------- TX descriptor dump end -------\n");
+}
+
+/**
+ * xilinx_dpdma_chan_alloc_tx_desc - Allocate a transaction descriptor
+ * @chan: DPDMA channel
+ *
+ * Allocate a tx descriptor.
+ *
+ * Return: a tx descriptor or NULL.
+ */
+static struct xilinx_dpdma_tx_desc *
+xilinx_dpdma_chan_alloc_tx_desc(struct xilinx_dpdma_chan *chan)
+{
+	struct xilinx_dpdma_tx_desc *tx_desc;
+
+	tx_desc = kzalloc(sizeof(*tx_desc), GFP_KERNEL);
+	if (!tx_desc)
+		return NULL;
+
+	INIT_LIST_HEAD(&tx_desc->descriptors);
+	tx_desc->status = PREPARED;
+
+	return tx_desc;
+}
+
+/**
+ * xilinx_dpdma_chan_free_tx_desc - Free a transaction descriptor
+ * @chan: DPDMA channel
+ * @tx_desc: tx descriptor
+ *
+ * Free the tx descriptor @tx_desc including its software descriptors.
+ */
+static void
+xilinx_dpdma_chan_free_tx_desc(struct xilinx_dpdma_chan *chan,
+			       struct xilinx_dpdma_tx_desc *tx_desc)
+{
+	struct xilinx_dpdma_sw_desc *sw_desc, *next;
+
+	if (!tx_desc)
+		return;
+
+	list_for_each_entry_safe(sw_desc, next, &tx_desc->descriptors, node) {
+		list_del(&sw_desc->node);
+		xilinx_dpdma_chan_free_sw_desc(chan, sw_desc);
+	}
+
+	kfree(tx_desc);
+}
+
+/**
+ * xilinx_dpdma_chan_submit_tx_desc - Submit a transaction descriptor
+ * @chan: DPDMA channel
+ * @tx_desc: tx descriptor
+ *
+ * Submit the tx descriptor @tx_desc to the channel @chan.
+ *
+ * Return: a cookie assigned to the tx descriptor
+ */
+static dma_cookie_t
+xilinx_dpdma_chan_submit_tx_desc(struct xilinx_dpdma_chan *chan,
+				 struct xilinx_dpdma_tx_desc *tx_desc)
+{
+	struct xilinx_dpdma_sw_desc *sw_desc;
+	dma_cookie_t cookie;
+	unsigned long flags;
+
+	spin_lock_irqsave(&chan->lock, flags);
+
+	if (chan->submitted_desc) {
+		cookie = chan->submitted_desc->async_tx.cookie;
+		goto out_unlock;
+	}
+
+	cookie = dma_cookie_assign(&tx_desc->async_tx);
+
+	/* Assign the cookie to descriptors in this transaction */
+	/* Only 16 bit will be used, but it should be enough */
+	list_for_each_entry(sw_desc, &tx_desc->descriptors, node)
+		sw_desc->hw.desc_id = cookie;
+
+	if (tx_desc != chan->allocated_desc)
+		dev_err(chan->xdev->dev, "desc != allocated_desc\n");
+	else
+		chan->allocated_desc = NULL;
+	chan->submitted_desc = tx_desc;
+
+	if (chan->id == VIDEO1 || chan->id == VIDEO2) {
+		chan->video_group = true;
+		chan->xdev->chan[VIDEO0]->video_group = true;
+	}
+
+out_unlock:
+	spin_unlock_irqrestore(&chan->lock, flags);
+
+	return cookie;
+}
+
+/**
+ * xilinx_dpdma_chan_free_desc_list - Free a descriptor list
+ * @chan: DPDMA channel
+ * @list: tx descriptor list
+ *
+ * Free tx descriptors in the list @list.
+ */
+static void xilinx_dpdma_chan_free_desc_list(struct xilinx_dpdma_chan *chan,
+					     struct list_head *list)
+{
+	struct xilinx_dpdma_tx_desc *tx_desc, *next;
+
+	list_for_each_entry_safe(tx_desc, next, list, node) {
+		list_del(&tx_desc->node);
+		xilinx_dpdma_chan_free_tx_desc(chan, tx_desc);
+	}
+}
+
+/**
+ * xilinx_dpdma_chan_free_all_desc - Free all descriptors of the channel
+ * @chan: DPDMA channel
+ *
+ * Free all descriptors associated with the channel. The channel should be
+ * disabled before this function is called, otherwise, this function may
+ * result in misbehavior of the system due to remaining outstanding
+ * transactions.
+ */
+static void xilinx_dpdma_chan_free_all_desc(struct xilinx_dpdma_chan *chan)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&chan->lock, flags);
+
+	dev_dbg(chan->xdev->dev, "chan->status = %s\n",
+		chan->status == STREAMING ? "STREAMING" : "IDLE");
+
+	xilinx_dpdma_chan_free_tx_desc(chan, chan->allocated_desc);
+	chan->allocated_desc = NULL;
+	xilinx_dpdma_chan_free_tx_desc(chan, chan->submitted_desc);
+	chan->submitted_desc = NULL;
+	xilinx_dpdma_chan_free_tx_desc(chan, chan->pending_desc);
+	chan->pending_desc = NULL;
+	xilinx_dpdma_chan_free_tx_desc(chan, chan->active_desc);
+	chan->active_desc = NULL;
+	xilinx_dpdma_chan_free_desc_list(chan, &chan->done_list);
+
+	spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_dpdma_chan_cleanup_desc - Clean up descriptors
+ * @chan: DPDMA channel
+ *
+ * Trigger the complete callbacks of descriptors with finished transactions.
+ * Free descriptors which are no longer in use.
+ */
+static void xilinx_dpdma_chan_cleanup_desc(struct xilinx_dpdma_chan *chan)
+{
+	struct xilinx_dpdma_tx_desc *desc;
+	dma_async_tx_callback callback;
+	void *callback_param;
+	unsigned long flags;
+	unsigned int cnt, i;
+
+	spin_lock_irqsave(&chan->lock, flags);
+
+	while (!list_empty(&chan->done_list)) {
+		desc = list_first_entry(&chan->done_list,
+					struct xilinx_dpdma_tx_desc, node);
+		list_del(&desc->node);
+
+		cnt = desc->done_cnt;
+		desc->done_cnt = 0;
+		callback = desc->async_tx.callback;
+		callback_param = desc->async_tx.callback_param;
+		if (callback) {
+			spin_unlock_irqrestore(&chan->lock, flags);
+			for (i = 0; i < cnt; i++)
+				callback(callback_param);
+			spin_lock_irqsave(&chan->lock, flags);
+		}
+
+		xilinx_dpdma_chan_free_tx_desc(chan, desc);
+	}
+
+	if (chan->active_desc) {
+		cnt = chan->active_desc->done_cnt;
+		chan->active_desc->done_cnt = 0;
+		callback = chan->active_desc->async_tx.callback;
+		callback_param = chan->active_desc->async_tx.callback_param;
+		if (callback) {
+			spin_unlock_irqrestore(&chan->lock, flags);
+			for (i = 0; i < cnt; i++)
+				callback(callback_param);
+			spin_lock_irqsave(&chan->lock, flags);
+		}
+	}
+
+	spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_dpdma_chan_desc_active - Set the descriptor as active
+ * @chan: DPDMA channel
+ *
+ * Make the pending descriptor @chan->pending_desc as active. This function
+ * should be called when the channel starts operating on the pending descriptor.
+ */
+static void xilinx_dpdma_chan_desc_active(struct xilinx_dpdma_chan *chan)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&chan->lock, flags);
+
+	if (!chan->pending_desc)
+		goto out_unlock;
+
+	if (chan->active_desc)
+		list_add_tail(&chan->active_desc->node, &chan->done_list);
+
+	chan->active_desc = chan->pending_desc;
+	chan->pending_desc = NULL;
+
+out_unlock:
+	spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_dpdma_chan_desc_done_intr - Mark the current descriptor as 'done'
+ * @chan: DPDMA channel
+ *
+ * Mark the current active descriptor @chan->active_desc as 'done'. This
+ * function should be called to mark completion of the currently active
+ * descriptor.
+ */
+static void xilinx_dpdma_chan_desc_done_intr(struct xilinx_dpdma_chan *chan)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&chan->lock, flags);
+
+	xilinx_dpdma_debugfs_intr_done_count_incr(chan->id);
+
+	if (!chan->active_desc) {
+		dev_dbg(chan->xdev->dev, "done intr with no active desc\n");
+		goto out_unlock;
+	}
+
+	chan->active_desc->done_cnt++;
+	if (chan->active_desc->status ==  PREPARED) {
+		dma_cookie_complete(&chan->active_desc->async_tx);
+		chan->active_desc->status = ACTIVE;
+	}
+
+out_unlock:
+	spin_unlock_irqrestore(&chan->lock, flags);
+	tasklet_schedule(&chan->done_task);
+}
+
+/**
+ * xilinx_dpdma_chan_prep_slave_sg - Prepare a scatter-gather dma descriptor
+ * @chan: DPDMA channel
+ * @sgl: scatter-gather list
+ *
+ * Prepare a tx descriptor incudling internal software/hardware descriptors
+ * for the given scatter-gather transaction.
+ *
+ * Return: A dma async tx descriptor on success, or NULL.
+ */
+static struct dma_async_tx_descriptor *
+xilinx_dpdma_chan_prep_slave_sg(struct xilinx_dpdma_chan *chan,
+				struct scatterlist *sgl)
+{
+	struct xilinx_dpdma_tx_desc *tx_desc;
+	struct xilinx_dpdma_sw_desc *sw_desc, *last = NULL;
+	struct scatterlist *iter = sgl;
+	u32 line_size = 0;
+
+	if (chan->allocated_desc)
+		return &chan->allocated_desc->async_tx;
+
+	tx_desc = xilinx_dpdma_chan_alloc_tx_desc(chan);
+	if (!tx_desc)
+		return NULL;
+
+	while (!sg_is_chain(iter))
+		line_size += sg_dma_len(iter++);
+
+	while (sgl) {
+		struct xilinx_dpdma_hw_desc *hw_desc;
+		dma_addr_t dma_addr[4];
+		unsigned int num_pages = 0;
+
+		sw_desc = xilinx_dpdma_chan_alloc_sw_desc(chan);
+		if (!sw_desc)
+			goto error;
+
+		while (!sg_is_chain(sgl) && !sg_is_last(sgl)) {
+			dma_addr[num_pages] = sg_dma_address(sgl++);
+			if (!IS_ALIGNED(dma_addr[num_pages++],
+					XILINX_DPDMA_ALIGN_BYTES)) {
+				dev_err(chan->xdev->dev,
+					"buffer should be aligned at %d B\n",
+					XILINX_DPDMA_ALIGN_BYTES);
+				goto error;
+			}
+		}
+
+		chan->xdev->desc_addr(sw_desc, last, dma_addr, num_pages);
+		hw_desc = &sw_desc->hw;
+		hw_desc->xfer_size = line_size;
+		hw_desc->hsize_stride =
+			line_size << XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_SHIFT;
+		hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_PREEMBLE;
+		hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_FRAG_MODE;
+		hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE;
+
+		list_add_tail(&sw_desc->node, &tx_desc->descriptors);
+		last = sw_desc;
+		if (sg_is_last(sgl))
+			break;
+		sgl = sg_chain_ptr(sgl);
+	}
+
+	sw_desc = list_first_entry(&tx_desc->descriptors,
+				   struct xilinx_dpdma_sw_desc, node);
+	if (chan->xdev->ext_addr)
+		xilinx_dpdma_sw_desc_next_64(last, sw_desc);
+	else
+		xilinx_dpdma_sw_desc_next_32(last, sw_desc);
+	last->hw.control |= XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR;
+	last->hw.control |= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME;
+
+	chan->allocated_desc = tx_desc;
+
+	return &tx_desc->async_tx;
+
+error:
+	xilinx_dpdma_chan_free_tx_desc(chan, tx_desc);
+
+	return NULL;
+}
+
+/**
+ * xilinx_dpdma_chan_prep_cyclic - Prepare a cyclic dma descriptor
+ * @chan: DPDMA channel
+ * @buf_addr: buffer address
+ * @buf_len: buffer length
+ * @period_len: number of periods
+ *
+ * Prepare a tx descriptor incudling internal software/hardware descriptors
+ * for the given cyclic transaction.
+ *
+ * Return: A dma async tx descriptor on success, or NULL.
+ */
+static struct dma_async_tx_descriptor *
+xilinx_dpdma_chan_prep_cyclic(struct xilinx_dpdma_chan *chan,
+			      dma_addr_t buf_addr, size_t buf_len,
+			      size_t period_len)
+{
+	struct xilinx_dpdma_tx_desc *tx_desc;
+	struct xilinx_dpdma_sw_desc *sw_desc, *last = NULL;
+	unsigned int periods = buf_len / period_len;
+	unsigned int i;
+
+	if (chan->allocated_desc)
+		return &chan->allocated_desc->async_tx;
+
+	tx_desc = xilinx_dpdma_chan_alloc_tx_desc(chan);
+	if (!tx_desc)
+		return NULL;
+
+	for (i = 0; i < periods; i++) {
+		struct xilinx_dpdma_hw_desc *hw_desc;
+
+		if (!IS_ALIGNED(buf_addr, XILINX_DPDMA_ALIGN_BYTES)) {
+			dev_err(chan->xdev->dev,
+				"buffer should be aligned at %d B\n",
+				XILINX_DPDMA_ALIGN_BYTES);
+			goto error;
+		}
+
+		sw_desc = xilinx_dpdma_chan_alloc_sw_desc(chan);
+		if (!sw_desc)
+			goto error;
+
+		chan->xdev->desc_addr(sw_desc, last, &buf_addr, 1);
+		hw_desc = &sw_desc->hw;
+		hw_desc->xfer_size = period_len;
+		hw_desc->hsize_stride =
+			period_len <<
+			XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_SHIFT;
+		hw_desc->hsize_stride |=
+			period_len <<
+			XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_SHIFT;
+		hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_PREEMBLE;
+		hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE;
+		hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR;
+
+		list_add_tail(&sw_desc->node, &tx_desc->descriptors);
+
+		buf_addr += period_len;
+		last = sw_desc;
+	}
+
+	sw_desc = list_first_entry(&tx_desc->descriptors,
+				   struct xilinx_dpdma_sw_desc, node);
+	if (chan->xdev->ext_addr)
+		xilinx_dpdma_sw_desc_next_64(last, sw_desc);
+	else
+		xilinx_dpdma_sw_desc_next_32(last, sw_desc);
+	last->hw.control |= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME;
+
+	chan->allocated_desc = tx_desc;
+
+	return &tx_desc->async_tx;
+
+error:
+	xilinx_dpdma_chan_free_tx_desc(chan, tx_desc);
+
+	return NULL;
+}
+
+/**
+ * xilinx_dpdma_chan_prep_interleaved - Prepare a interleaved dma descriptor
+ * @chan: DPDMA channel
+ * @xt: dma interleaved template
+ *
+ * Prepare a tx descriptor incudling internal software/hardware descriptors
+ * based on @xt.
+ *
+ * Return: A dma async tx descriptor on success, or NULL.
+ */
+static struct dma_async_tx_descriptor *
+xilinx_dpdma_chan_prep_interleaved(struct xilinx_dpdma_chan *chan,
+				   struct dma_interleaved_template *xt)
+{
+	struct xilinx_dpdma_tx_desc *tx_desc;
+	struct xilinx_dpdma_sw_desc *sw_desc;
+	struct xilinx_dpdma_hw_desc *hw_desc;
+	size_t hsize = xt->sgl[0].size;
+	size_t stride = hsize + xt->sgl[0].icg;
+
+	if (!IS_ALIGNED(xt->src_start, XILINX_DPDMA_ALIGN_BYTES)) {
+		dev_err(chan->xdev->dev, "buffer should be aligned at %d B\n",
+			XILINX_DPDMA_ALIGN_BYTES);
+		return NULL;
+	}
+
+	if (chan->allocated_desc)
+		return &chan->allocated_desc->async_tx;
+
+	tx_desc = xilinx_dpdma_chan_alloc_tx_desc(chan);
+	if (!tx_desc)
+		return NULL;
+
+	sw_desc = xilinx_dpdma_chan_alloc_sw_desc(chan);
+	if (!sw_desc)
+		goto error;
+
+	chan->xdev->desc_addr(sw_desc, sw_desc, &xt->src_start, 1);
+	hw_desc = &sw_desc->hw;
+	hw_desc->xfer_size = hsize * xt->numf;
+	hw_desc->hsize_stride = hsize <<
+				XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_SHIFT;
+	hw_desc->hsize_stride |= (stride / 16) <<
+				 XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_SHIFT;
+	hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_PREEMBLE;
+	hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR;
+	hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE;
+	hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME;
+
+	list_add_tail(&sw_desc->node, &tx_desc->descriptors);
+	chan->allocated_desc = tx_desc;
+
+	return &tx_desc->async_tx;
+
+error:
+	xilinx_dpdma_chan_free_tx_desc(chan, tx_desc);
+
+	return NULL;
+}
+
+/* Xilinx DPDMA channel operations */
+
+/**
+ * xilinx_dpdma_chan_enable - Enable the channel
+ * @chan: DPDMA channel
+ *
+ * Enable the channel and its interrupts. Set the QoS values for video class.
+ */
+static inline void xilinx_dpdma_chan_enable(struct xilinx_dpdma_chan *chan)
+{
+	u32 reg;
+
+	reg = XILINX_DPDMA_INTR_CHAN_MASK << chan->id;
+	reg |= XILINX_DPDMA_INTR_GLOBAL_MASK;
+	dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN, reg);
+	reg = XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id;
+	reg |= XILINX_DPDMA_INTR_GLOBAL_ERR;
+	dpdma_write(chan->xdev->reg, XILINX_DPDMA_EIEN, reg);
+
+	reg = XILINX_DPDMA_CH_CNTL_ENABLE;
+	reg |= XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS <<
+	       XILINX_DPDMA_CH_CNTL_QOS_DSCR_WR_SHIFT;
+	reg |= XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS <<
+	       XILINX_DPDMA_CH_CNTL_QOS_DSCR_RD_SHIFT;
+	reg |= XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS <<
+	       XILINX_DPDMA_CH_CNTL_QOS_DATA_RD_SHIFT;
+	dpdma_set(chan->reg, XILINX_DPDMA_CH_CNTL, reg);
+}
+
+/**
+ * xilinx_dpdma_chan_disable - Disable the channel
+ * @chan: DPDMA channel
+ *
+ * Disable the channel and its interrupts.
+ */
+static inline void xilinx_dpdma_chan_disable(struct xilinx_dpdma_chan *chan)
+{
+	u32 reg;
+
+	reg = XILINX_DPDMA_INTR_CHAN_MASK << chan->id;
+	dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN, reg);
+	reg = XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id;
+	dpdma_write(chan->xdev->reg, XILINX_DPDMA_EIEN, reg);
+
+	dpdma_clr(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_ENABLE);
+}
+
+/**
+ * xilinx_dpdma_chan_pause - Pause the channel
+ * @chan: DPDMA channel
+ *
+ * Pause the channel.
+ */
+static inline void xilinx_dpdma_chan_pause(struct xilinx_dpdma_chan *chan)
+{
+	dpdma_set(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_PAUSE);
+}
+
+/**
+ * xilinx_dpdma_chan_unpause - Unpause the channel
+ * @chan: DPDMA channel
+ *
+ * Unpause the channel.
+ */
+static inline void xilinx_dpdma_chan_unpause(struct xilinx_dpdma_chan *chan)
+{
+	dpdma_clr(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_PAUSE);
+}
+
+static u32
+xilinx_dpdma_chan_video_group_ready(struct xilinx_dpdma_chan *chan)
+{
+	struct xilinx_dpdma_device *xdev = chan->xdev;
+	u32 i = 0, ret = 0;
+
+	for (i = VIDEO0; i < GRAPHICS; i++) {
+		if (xdev->chan[i]->video_group &&
+		    xdev->chan[i]->status != STREAMING)
+			return 0;
+
+		if (xdev->chan[i]->video_group)
+			ret |= BIT(i);
+	}
+
+	return ret;
+}
+
+/**
+ * xilinx_dpdma_chan_issue_pending - Issue the pending descriptor
+ * @chan: DPDMA channel
+ *
+ * Issue the first pending descriptor from @chan->submitted_desc. If the channel
+ * is already streaming, the channel is re-triggered with the pending
+ * descriptor.
+ */
+static void xilinx_dpdma_chan_issue_pending(struct xilinx_dpdma_chan *chan)
+{
+	struct xilinx_dpdma_device *xdev = chan->xdev;
+	struct xilinx_dpdma_sw_desc *sw_desc;
+	unsigned long flags;
+	u32 reg, channels;
+
+	spin_lock_irqsave(&chan->lock, flags);
+
+	if (!chan->submitted_desc || chan->pending_desc)
+		goto out_unlock;
+
+	chan->pending_desc = chan->submitted_desc;
+	chan->submitted_desc = NULL;
+
+	sw_desc = list_first_entry(&chan->pending_desc->descriptors,
+				   struct xilinx_dpdma_sw_desc, node);
+	dpdma_write(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDR,
+		    (u32)sw_desc->phys);
+	if (xdev->ext_addr)
+		dpdma_write(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDRE,
+			    ((u64)sw_desc->phys >> 32) &
+			    XILINX_DPDMA_DESC_ADDR_EXT_ADDR_MASK);
+
+	if (chan->first_frame) {
+		chan->first_frame = false;
+		if (chan->video_group) {
+			channels = xilinx_dpdma_chan_video_group_ready(chan);
+			if (!channels)
+				goto out_unlock;
+			reg = channels << XILINX_DPDMA_GBL_TRIG_SHIFT;
+		} else {
+			reg = 1 << (XILINX_DPDMA_GBL_TRIG_SHIFT + chan->id);
+		}
+	} else {
+		if (chan->video_group) {
+			channels = xilinx_dpdma_chan_video_group_ready(chan);
+			if (!channels)
+				goto out_unlock;
+			reg = channels << XILINX_DPDMA_GBL_RETRIG_SHIFT;
+		} else {
+			reg = 1 << (XILINX_DPDMA_GBL_RETRIG_SHIFT + chan->id);
+		}
+	}
+
+	dpdma_write(xdev->reg, XILINX_DPDMA_GBL, reg);
+
+out_unlock:
+	spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_dpdma_chan_start - Start the channel
+ * @chan: DPDMA channel
+ *
+ * Start the channel by enabling interrupts and triggering the channel.
+ * If the channel is enabled already or there's no pending descriptor, this
+ * function won't do anything on the channel.
+ */
+static void xilinx_dpdma_chan_start(struct xilinx_dpdma_chan *chan)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&chan->lock, flags);
+
+	if (!chan->submitted_desc || chan->status == STREAMING)
+		goto out_unlock;
+
+	xilinx_dpdma_chan_unpause(chan);
+	xilinx_dpdma_chan_enable(chan);
+	chan->first_frame = true;
+	chan->status = STREAMING;
+
+out_unlock:
+	spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_dpdma_chan_ostand - Number of outstanding transactions
+ * @chan: DPDMA channel
+ *
+ * Read and return the number of outstanding transactions from register.
+ *
+ * Return: Number of outstanding transactions from the status register.
+ */
+static inline u32 xilinx_dpdma_chan_ostand(struct xilinx_dpdma_chan *chan)
+{
+	return dpdma_read(chan->reg, XILINX_DPDMA_CH_STATUS) >>
+	       XILINX_DPDMA_CH_STATUS_OTRAN_CNT_SHIFT &
+	       XILINX_DPDMA_CH_STATUS_OTRAN_CNT_MASK;
+}
+
+/**
+ * xilinx_dpdma_chan_no_ostand - Notify no outstanding transaction event
+ * @chan: DPDMA channel
+ *
+ * Notify waiters for no outstanding event, so waiters can stop the channel
+ * safely. This function is supposed to be called when 'no outstanding'
+ * interrupt is generated. The 'no outstanding' interrupt is disabled and
+ * should be re-enabled when this event is handled. If the channel status
+ * register still shows some number of outstanding transactions, the interrupt
+ * remains enabled.
+ *
+ * Return: 0 on success. On failure, -EWOULDBLOCK if there's still outstanding
+ * transaction(s).
+ */
+static int xilinx_dpdma_chan_notify_no_ostand(struct xilinx_dpdma_chan *chan)
+{
+	u32 cnt;
+
+	cnt = xilinx_dpdma_chan_ostand(chan);
+	if (cnt) {
+		dev_dbg(chan->xdev->dev, "%d outstanding transactions\n", cnt);
+		return -EWOULDBLOCK;
+	}
+
+	/* Disable 'no outstanding' interrupt */
+	dpdma_write(chan->xdev->reg, XILINX_DPDMA_IDS,
+		    1 << (XILINX_DPDMA_INTR_NO_OSTAND_SHIFT + chan->id));
+	wake_up(&chan->wait_to_stop);
+
+	return 0;
+}
+
+/**
+ * xilinx_dpdma_chan_wait_no_ostand - Wait for the no outstanding intr
+ * @chan: DPDMA channel
+ *
+ * Wait for the no outstanding transaction interrupt. This functions can sleep
+ * for 50ms.
+ *
+ * Return: 0 on success. On failure, -ETIMEOUT for time out, or the error code
+ * from wait_event_interruptible_timeout().
+ */
+static int xilinx_dpdma_chan_wait_no_ostand(struct xilinx_dpdma_chan *chan)
+{
+	int ret;
+
+	/* Wait for a no outstanding transaction interrupt upto 50msec */
+	ret = wait_event_interruptible_timeout(chan->wait_to_stop,
+					       !xilinx_dpdma_chan_ostand(chan),
+					       msecs_to_jiffies(50));
+	if (ret > 0) {
+		dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN,
+			    1 <<
+			    (XILINX_DPDMA_INTR_NO_OSTAND_SHIFT + chan->id));
+		return 0;
+	}
+
+	dev_err(chan->xdev->dev, "not ready to stop: %d trans\n",
+		xilinx_dpdma_chan_ostand(chan));
+
+	if (ret == 0)
+		return -ETIMEDOUT;
+
+	return ret;
+}
+
+/**
+ * xilinx_dpdma_chan_poll_no_ostand - Poll the outstanding transaction status
+ * @chan: DPDMA channel
+ *
+ * Poll the outstanding transaction status, and return when there's no
+ * outstanding transaction. This functions can be used in the interrupt context
+ * or where the atomicity is required. Calling thread may wait more than 50ms.
+ *
+ * Return: 0 on success, or -ETIMEDOUT.
+ */
+static int xilinx_dpdma_chan_poll_no_ostand(struct xilinx_dpdma_chan *chan)
+{
+	u32 cnt, loop = 50000;
+
+	/* Poll at least for 50ms (20 fps). */
+	do {
+		cnt = xilinx_dpdma_chan_ostand(chan);
+		udelay(1);
+	} while (loop-- > 0 && cnt);
+
+	if (loop) {
+		dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN,
+			    1 <<
+			    (XILINX_DPDMA_INTR_NO_OSTAND_SHIFT + chan->id));
+		return 0;
+	}
+
+	dev_err(chan->xdev->dev, "not ready to stop: %d trans\n",
+		xilinx_dpdma_chan_ostand(chan));
+
+	return -ETIMEDOUT;
+}
+
+/**
+ * xilinx_dpdma_chan_stop - Stop the channel
+ * @chan: DPDMA channel
+ * @poll: flag whether to poll or wait
+ *
+ * Stop the channel with the following sequence: 1. Pause, 2. Wait (sleep) for
+ * no outstanding transaction interrupt, 3. Disable the channel.
+ *
+ * Return: 0 on success, or an error from xilinx_dpdma_chan_poll/wait_ostand().
+ */
+static int xilinx_dpdma_chan_stop(struct xilinx_dpdma_chan *chan, bool poll)
+{
+	unsigned long flags;
+	bool ret;
+
+	xilinx_dpdma_chan_pause(chan);
+	if (poll)
+		ret = xilinx_dpdma_chan_poll_no_ostand(chan);
+	else
+		ret = xilinx_dpdma_chan_wait_no_ostand(chan);
+	if (ret)
+		return ret;
+
+	spin_lock_irqsave(&chan->lock, flags);
+	xilinx_dpdma_chan_disable(chan);
+	chan->status = IDLE;
+	spin_unlock_irqrestore(&chan->lock, flags);
+
+	return 0;
+}
+
+/**
+ * xilinx_dpdma_chan_alloc_resources - Allocate resources for the channel
+ * @chan: DPDMA channel
+ *
+ * Allocate a descriptor pool for the channel.
+ *
+ * Return: 0 on success, or -ENOMEM if failed to allocate a pool.
+ */
+static int xilinx_dpdma_chan_alloc_resources(struct xilinx_dpdma_chan *chan)
+{
+	chan->desc_pool = dma_pool_create(dev_name(chan->xdev->dev),
+				chan->xdev->dev,
+				sizeof(struct xilinx_dpdma_sw_desc),
+				__alignof__(struct xilinx_dpdma_sw_desc), 0);
+	if (!chan->desc_pool) {
+		dev_err(chan->xdev->dev,
+			"failed to allocate a descriptor pool\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+/**
+ * xilinx_dpdma_chan_free_resources - Free all resources for the channel
+ * @chan: DPDMA channel
+ *
+ * Free all descriptors and the descriptor pool for the channel.
+ */
+static void xilinx_dpdma_chan_free_resources(struct xilinx_dpdma_chan *chan)
+{
+	xilinx_dpdma_chan_free_all_desc(chan);
+	dma_pool_destroy(chan->desc_pool);
+	chan->desc_pool = NULL;
+}
+
+/**
+ * xilinx_dpdma_chan_terminate_all - Terminate the channel and descriptors
+ * @chan: DPDMA channel
+ *
+ * Stop the channel and free all associated descriptors. Poll the no outstanding
+ * transaction interrupt as this can be called from an atomic context.
+ *
+ * Return: 0 on success, or the error code from xilinx_dpdma_chan_stop().
+ */
+static int xilinx_dpdma_chan_terminate_all(struct xilinx_dpdma_chan *chan)
+{
+	struct xilinx_dpdma_device *xdev = chan->xdev;
+	int ret;
+	unsigned int i;
+
+	if (chan->video_group) {
+		for (i = VIDEO0; i < GRAPHICS; i++) {
+			if (xdev->chan[i]->video_group &&
+			    xdev->chan[i]->status == STREAMING) {
+				xilinx_dpdma_chan_pause(xdev->chan[i]);
+				xdev->chan[i]->video_group = false;
+			}
+		}
+	}
+
+	ret = xilinx_dpdma_chan_stop(chan, true);
+	if (ret)
+		return ret;
+
+	xilinx_dpdma_chan_free_all_desc(chan);
+
+	return 0;
+}
+
+/**
+ * xilinx_dpdma_chan_synchronize - Synchronize all outgoing transfer
+ * @chan: DPDMA channel
+ *
+ * Stop the channel and free all associated descriptors. As this can't be
+ * called in an atomic context, sleep-wait for no outstanding transaction
+ * interrupt. Then kill all related tasklets.
+ *
+ * Return: 0 on success, or the error code from xilinx_dpdma_chan_stop().
+ */
+static int xilinx_dpdma_chan_synchronize(struct xilinx_dpdma_chan *chan)
+{
+	struct xilinx_dpdma_device *xdev = chan->xdev;
+	int ret;
+	unsigned int i;
+
+	if (chan->video_group) {
+		for (i = VIDEO0; i < GRAPHICS; i++) {
+			if (xdev->chan[i]->video_group &&
+			    xdev->chan[i]->status == STREAMING) {
+				xilinx_dpdma_chan_pause(xdev->chan[i]);
+				xdev->chan[i]->video_group = false;
+			}
+		}
+	}
+
+	ret = xilinx_dpdma_chan_stop(chan, false);
+	if (ret)
+		return ret;
+
+	tasklet_kill(&chan->err_task);
+	tasklet_kill(&chan->done_task);
+	xilinx_dpdma_chan_free_all_desc(chan);
+
+	return 0;
+}
+
+/**
+ * xilinx_dpdma_chan_err - Detect any channel error
+ * @chan: DPDMA channel
+ * @isr: masked Interrupt Status Register
+ * @eisr: Error Interrupt Status Register
+ *
+ * Return: true if any channel error occurs, or false otherwise.
+ */
+static bool
+xilinx_dpdma_chan_err(struct xilinx_dpdma_chan *chan, u32 isr, u32 eisr)
+{
+	if (!chan)
+		return false;
+
+	if (chan->status == STREAMING &&
+	    ((isr & (XILINX_DPDMA_INTR_CHAN_ERR_MASK << chan->id)) ||
+	    (eisr & (XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id))))
+		return true;
+
+	return false;
+}
+
+/**
+ * xilinx_dpdma_chan_handle_err - DPDMA channel error handling
+ * @chan: DPDMA channel
+ *
+ * This function is called when any channel error or any global error occurs.
+ * The function disables the paused channel by errors and determines
+ * if the current active descriptor can be rescheduled depending on
+ * the descriptor status.
+ */
+static void xilinx_dpdma_chan_handle_err(struct xilinx_dpdma_chan *chan)
+{
+	struct xilinx_dpdma_device *xdev = chan->xdev;
+	struct device *dev = xdev->dev;
+	unsigned long flags;
+
+	spin_lock_irqsave(&chan->lock, flags);
+
+	dev_dbg(dev, "cur desc addr = 0x%04x%08x\n",
+		dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDRE),
+		dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDR));
+	dev_dbg(dev, "cur payload addr = 0x%04x%08x\n",
+		dpdma_read(chan->reg, XILINX_DPDMA_CH_PYLD_CUR_ADDRE),
+		dpdma_read(chan->reg, XILINX_DPDMA_CH_PYLD_CUR_ADDR));
+
+	xilinx_dpdma_chan_disable(chan);
+	chan->status = IDLE;
+
+	if (!chan->active_desc)
+		goto out_unlock;
+
+	xilinx_dpdma_chan_dump_tx_desc(chan, chan->active_desc);
+
+	switch (chan->active_desc->status) {
+	case ERRORED:
+		dev_dbg(dev, "repeated error on desc\n");
+	case ACTIVE:
+	case PREPARED:
+		/* Reschedule if there's no new descriptor */
+		if (!chan->submitted_desc)
+			chan->submitted_desc = chan->active_desc;
+		chan->active_desc->status = ERRORED;
+		break;
+	}
+	chan->active_desc = NULL;
+
+out_unlock:
+	spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/* DMA tx descriptor */
+
+static dma_cookie_t xilinx_dpdma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+	struct xilinx_dpdma_chan *chan = to_xilinx_chan(tx->chan);
+	struct xilinx_dpdma_tx_desc *tx_desc = to_dpdma_tx_desc(tx);
+
+	return xilinx_dpdma_chan_submit_tx_desc(chan, tx_desc);
+}
+
+/* DMA channel operations */
+
+static struct dma_async_tx_descriptor *
+xilinx_dpdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
+			   unsigned int sg_len,
+			   enum dma_transfer_direction direction,
+			   unsigned long flags, void *context)
+{
+	struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
+	struct dma_async_tx_descriptor *async_tx;
+
+	if (direction != DMA_MEM_TO_DEV)
+		return NULL;
+
+	if (!sgl || sg_len < 2)
+		return NULL;
+
+	async_tx = xilinx_dpdma_chan_prep_slave_sg(chan, sgl);
+	if (!async_tx)
+		return NULL;
+
+	dma_async_tx_descriptor_init(async_tx, dchan);
+	async_tx->tx_submit = xilinx_dpdma_tx_submit;
+	async_tx->flags = flags;
+	async_tx_ack(async_tx);
+
+	return async_tx;
+}
+
+static struct dma_async_tx_descriptor *
+xilinx_dpdma_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t buf_addr,
+			     size_t buf_len, size_t period_len,
+			     enum dma_transfer_direction direction,
+			     unsigned long flags)
+{
+	struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
+	struct dma_async_tx_descriptor *async_tx;
+
+	if (direction != DMA_MEM_TO_DEV)
+		return NULL;
+
+	if (buf_len % period_len)
+		return NULL;
+
+	async_tx = xilinx_dpdma_chan_prep_cyclic(chan, buf_addr, buf_len,
+						 period_len);
+	if (!async_tx)
+		return NULL;
+
+	dma_async_tx_descriptor_init(async_tx, dchan);
+	async_tx->tx_submit = xilinx_dpdma_tx_submit;
+	async_tx->flags = flags;
+	async_tx_ack(async_tx);
+
+	return async_tx;
+}
+
+static struct dma_async_tx_descriptor *
+xilinx_dpdma_prep_interleaved_dma(struct dma_chan *dchan,
+				  struct dma_interleaved_template *xt,
+				  unsigned long flags)
+{
+	struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
+	struct dma_async_tx_descriptor *async_tx;
+
+	if (xt->dir != DMA_MEM_TO_DEV)
+		return NULL;
+
+	if (!xt->numf || !xt->sgl[0].size)
+		return NULL;
+
+	async_tx = xilinx_dpdma_chan_prep_interleaved(chan, xt);
+	if (!async_tx)
+		return NULL;
+
+	dma_async_tx_descriptor_init(async_tx, dchan);
+	async_tx->tx_submit = xilinx_dpdma_tx_submit;
+	async_tx->flags = flags;
+	async_tx_ack(async_tx);
+
+	return async_tx;
+}
+
+static int xilinx_dpdma_alloc_chan_resources(struct dma_chan *dchan)
+{
+	struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
+
+	dma_cookie_init(dchan);
+
+	return xilinx_dpdma_chan_alloc_resources(chan);
+}
+
+static void xilinx_dpdma_free_chan_resources(struct dma_chan *dchan)
+{
+	struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
+
+	xilinx_dpdma_chan_free_resources(chan);
+}
+
+static enum dma_status xilinx_dpdma_tx_status(struct dma_chan *dchan,
+					      dma_cookie_t cookie,
+					      struct dma_tx_state *txstate)
+{
+	return dma_cookie_status(dchan, cookie, txstate);
+}
+
+static void xilinx_dpdma_issue_pending(struct dma_chan *dchan)
+{
+	struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
+
+	xilinx_dpdma_chan_start(chan);
+	xilinx_dpdma_chan_issue_pending(chan);
+}
+
+static int xilinx_dpdma_config(struct dma_chan *dchan,
+			       struct dma_slave_config *config)
+{
+	if (config->direction != DMA_MEM_TO_DEV)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int xilinx_dpdma_pause(struct dma_chan *dchan)
+{
+	xilinx_dpdma_chan_pause(to_xilinx_chan(dchan));
+
+	return 0;
+}
+
+static int xilinx_dpdma_resume(struct dma_chan *dchan)
+{
+	xilinx_dpdma_chan_unpause(to_xilinx_chan(dchan));
+
+	return 0;
+}
+
+static int xilinx_dpdma_terminate_all(struct dma_chan *dchan)
+{
+	return xilinx_dpdma_chan_terminate_all(to_xilinx_chan(dchan));
+}
+
+static void xilinx_dpdma_synchronize(struct dma_chan *dchan)
+{
+	xilinx_dpdma_chan_synchronize(to_xilinx_chan(dchan));
+}
+
+/* Xilinx DPDMA device operations */
+
+/**
+ * xilinx_dpdma_err - Detect any global error
+ * @isr: Interrupt Status Register
+ * @eisr: Error Interrupt Status Register
+ *
+ * Return: True if any global error occurs, or false otherwise.
+ */
+static bool xilinx_dpdma_err(u32 isr, u32 eisr)
+{
+	if ((isr & XILINX_DPDMA_INTR_GLOBAL_ERR ||
+	     eisr & XILINX_DPDMA_EINTR_GLOBAL_ERR))
+		return true;
+
+	return false;
+}
+
+/**
+ * xilinx_dpdma_handle_err_intr - Handle DPDMA error interrupt
+ * @xdev: DPDMA device
+ * @isr: masked Interrupt Status Register
+ * @eisr: Error Interrupt Status Register
+ *
+ * Handle if any error occurs based on @isr and @eisr. This function disables
+ * corresponding error interrupts, and those should be re-enabled once handling
+ * is done.
+ */
+static void xilinx_dpdma_handle_err_intr(struct xilinx_dpdma_device *xdev,
+					 u32 isr, u32 eisr)
+{
+	bool err = xilinx_dpdma_err(isr, eisr);
+	unsigned int i;
+
+	dev_dbg_ratelimited(xdev->dev,
+			    "error intr: isr = 0x%08x, eisr = 0x%08x\n",
+			    isr, eisr);
+
+	/* Disable channel error interrupts until errors are handled. */
+	dpdma_write(xdev->reg, XILINX_DPDMA_IDS,
+		    isr & ~XILINX_DPDMA_INTR_GLOBAL_ERR);
+	dpdma_write(xdev->reg, XILINX_DPDMA_EIDS,
+		    eisr & ~XILINX_DPDMA_EINTR_GLOBAL_ERR);
+
+	for (i = 0; i < XILINX_DPDMA_NUM_CHAN; i++)
+		if (err || xilinx_dpdma_chan_err(xdev->chan[i], isr, eisr))
+			tasklet_schedule(&xdev->chan[i]->err_task);
+}
+
+/**
+ * xilinx_dpdma_handle_vsync_intr - Handle the VSYNC interrupt
+ * @xdev: DPDMA device
+ *
+ * Handle the VSYNC event. At this point, the current frame becomes active,
+ * which means the DPDMA actually starts fetching, and the next frame can be
+ * scheduled.
+ */
+static void xilinx_dpdma_handle_vsync_intr(struct xilinx_dpdma_device *xdev)
+{
+	unsigned int i;
+
+	for (i = 0; i < XILINX_DPDMA_NUM_CHAN; i++) {
+		if (xdev->chan[i] &&
+		    xdev->chan[i]->status == STREAMING) {
+			xilinx_dpdma_chan_desc_active(xdev->chan[i]);
+			xilinx_dpdma_chan_issue_pending(xdev->chan[i]);
+		}
+	}
+}
+
+/**
+ * xilinx_dpdma_enable_intr - Enable interrupts
+ * @xdev: DPDMA device
+ *
+ * Enable interrupts.
+ */
+static void xilinx_dpdma_enable_intr(struct xilinx_dpdma_device *xdev)
+{
+	dpdma_write(xdev->reg, XILINX_DPDMA_IEN, XILINX_DPDMA_INTR_ALL);
+	dpdma_write(xdev->reg, XILINX_DPDMA_EIEN, XILINX_DPDMA_EINTR_ALL);
+}
+
+/**
+ * xilinx_dpdma_disable_intr - Disable interrupts
+ * @xdev: DPDMA device
+ *
+ * Disable interrupts.
+ */
+static void xilinx_dpdma_disable_intr(struct xilinx_dpdma_device *xdev)
+{
+	dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ERR_ALL);
+	dpdma_write(xdev->reg, XILINX_DPDMA_EIDS, XILINX_DPDMA_EINTR_ALL);
+}
+
+/* Interrupt handling operations*/
+
+/**
+ * xilinx_dpdma_chan_err_task - Per channel tasklet for error handling
+ * @data: tasklet data to be casted to DPDMA channel structure
+ *
+ * Per channel error handling tasklet. This function waits for the outstanding
+ * transaction to complete and triggers error handling. After error handling,
+ * re-enable channel error interrupts, and restart the channel if needed.
+ */
+static void xilinx_dpdma_chan_err_task(unsigned long data)
+{
+	struct xilinx_dpdma_chan *chan = (struct xilinx_dpdma_chan *)data;
+	struct xilinx_dpdma_device *xdev = chan->xdev;
+
+	/* Proceed error handling even when polling fails. */
+	xilinx_dpdma_chan_poll_no_ostand(chan);
+
+	xilinx_dpdma_chan_handle_err(chan);
+
+	dpdma_write(xdev->reg, XILINX_DPDMA_IEN,
+		    XILINX_DPDMA_INTR_CHAN_ERR_MASK << chan->id);
+	dpdma_write(xdev->reg, XILINX_DPDMA_EIEN,
+		    XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id);
+
+	xilinx_dpdma_chan_start(chan);
+	xilinx_dpdma_chan_issue_pending(chan);
+}
+
+/**
+ * xilinx_dpdma_chan_done_task - Per channel tasklet for done interrupt handling
+ * @data: tasklet data to be casted to DPDMA channel structure
+ *
+ * Per channel done interrupt handling tasklet.
+ */
+static void xilinx_dpdma_chan_done_task(unsigned long data)
+{
+	struct xilinx_dpdma_chan *chan = (struct xilinx_dpdma_chan *)data;
+
+	xilinx_dpdma_chan_cleanup_desc(chan);
+}
+
+static irqreturn_t xilinx_dpdma_irq_handler(int irq, void *data)
+{
+	struct xilinx_dpdma_device *xdev = data;
+	u32 status, error, i;
+	unsigned long masked;
+
+	status = dpdma_read(xdev->reg, XILINX_DPDMA_ISR);
+	error = dpdma_read(xdev->reg, XILINX_DPDMA_EISR);
+	if (!status && !error)
+		return IRQ_NONE;
+
+	dpdma_write(xdev->reg, XILINX_DPDMA_ISR, status);
+	dpdma_write(xdev->reg, XILINX_DPDMA_EISR, error);
+
+	if (status & XILINX_DPDMA_INTR_VSYNC)
+		xilinx_dpdma_handle_vsync_intr(xdev);
+
+	masked = (status & XILINX_DPDMA_INTR_DESC_DONE_MASK) >>
+		 XILINX_DPDMA_INTR_DESC_DONE_SHIFT;
+	if (masked)
+		for_each_set_bit(i, &masked, XILINX_DPDMA_NUM_CHAN)
+			xilinx_dpdma_chan_desc_done_intr(xdev->chan[i]);
+
+	masked = (status & XILINX_DPDMA_INTR_NO_OSTAND_MASK) >>
+		 XILINX_DPDMA_INTR_NO_OSTAND_SHIFT;
+	if (masked)
+		for_each_set_bit(i, &masked, XILINX_DPDMA_NUM_CHAN)
+			xilinx_dpdma_chan_notify_no_ostand(xdev->chan[i]);
+
+	masked = status & XILINX_DPDMA_INTR_ERR_ALL;
+	if (masked || error)
+		xilinx_dpdma_handle_err_intr(xdev, masked, error);
+
+	return IRQ_HANDLED;
+}
+
+/* Initialization operations */
+
+static struct xilinx_dpdma_chan *
+xilinx_dpdma_chan_probe(struct device_node *node,
+			struct xilinx_dpdma_device *xdev)
+{
+	struct xilinx_dpdma_chan *chan;
+
+	chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
+	if (!chan)
+		return ERR_PTR(-ENOMEM);
+
+	if (of_device_is_compatible(node, "xlnx,video0")) {
+		chan->id = VIDEO0;
+	} else if (of_device_is_compatible(node, "xlnx,video1")) {
+		chan->id = VIDEO1;
+	} else if (of_device_is_compatible(node, "xlnx,video2")) {
+		chan->id = VIDEO2;
+	} else if (of_device_is_compatible(node, "xlnx,graphics")) {
+		chan->id = GRAPHICS;
+	} else if (of_device_is_compatible(node, "xlnx,audio0")) {
+		chan->id = AUDIO0;
+	} else if (of_device_is_compatible(node, "xlnx,audio1")) {
+		chan->id = AUDIO1;
+	} else {
+		dev_err(xdev->dev, "invalid channel compatible string in DT\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	chan->reg = xdev->reg + XILINX_DPDMA_CH_BASE + XILINX_DPDMA_CH_OFFSET *
+		    chan->id;
+	chan->status = IDLE;
+
+	spin_lock_init(&chan->lock);
+	INIT_LIST_HEAD(&chan->done_list);
+	init_waitqueue_head(&chan->wait_to_stop);
+
+	tasklet_init(&chan->done_task, xilinx_dpdma_chan_done_task,
+		     (unsigned long)chan);
+	tasklet_init(&chan->err_task, xilinx_dpdma_chan_err_task,
+		     (unsigned long)chan);
+
+	chan->common.device = &xdev->common;
+	chan->xdev = xdev;
+
+	list_add_tail(&chan->common.device_node, &xdev->common.channels);
+	xdev->chan[chan->id] = chan;
+
+	return chan;
+}
+
+static void xilinx_dpdma_chan_remove(struct xilinx_dpdma_chan *chan)
+{
+	tasklet_kill(&chan->err_task);
+	tasklet_kill(&chan->done_task);
+	list_del(&chan->common.device_node);
+}
+
+static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
+					    struct of_dma *ofdma)
+{
+	struct xilinx_dpdma_device *xdev = ofdma->of_dma_data;
+	uint32_t chan_id = dma_spec->args[0];
+
+	if (chan_id >= XILINX_DPDMA_NUM_CHAN)
+		return NULL;
+
+	if (!xdev->chan[chan_id])
+		return NULL;
+
+	return dma_get_slave_channel(&xdev->chan[chan_id]->common);
+}
+
+static int xilinx_dpdma_probe(struct platform_device *pdev)
+{
+	struct xilinx_dpdma_device *xdev;
+	struct xilinx_dpdma_chan *chan;
+	struct dma_device *ddev;
+	struct resource *res;
+	struct device_node *node, *child;
+	u32 i;
+	int irq, ret;
+
+	xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
+	if (!xdev)
+		return -ENOMEM;
+
+	xdev->dev = &pdev->dev;
+	ddev = &xdev->common;
+	ddev->dev = &pdev->dev;
+	node = xdev->dev->of_node;
+
+	xdev->axi_clk = devm_clk_get(xdev->dev, "axi_clk");
+	if (IS_ERR(xdev->axi_clk))
+		return PTR_ERR(xdev->axi_clk);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	xdev->reg = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(xdev->reg))
+		return PTR_ERR(xdev->reg);
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(xdev->dev, "failed to get platform irq\n");
+		return irq;
+	}
+
+	ret = devm_request_irq(xdev->dev, irq, xilinx_dpdma_irq_handler,
+			       IRQF_SHARED, dev_name(xdev->dev), xdev);
+	if (ret) {
+		dev_err(xdev->dev, "failed to request IRQ\n");
+		return ret;
+	}
+
+	INIT_LIST_HEAD(&xdev->common.channels);
+	dma_cap_set(DMA_SLAVE, ddev->cap_mask);
+	dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
+	dma_cap_set(DMA_CYCLIC, ddev->cap_mask);
+	dma_cap_set(DMA_INTERLEAVE, ddev->cap_mask);
+	ddev->copy_align = fls(XILINX_DPDMA_ALIGN_BYTES - 1);
+
+	ddev->device_alloc_chan_resources = xilinx_dpdma_alloc_chan_resources;
+	ddev->device_free_chan_resources = xilinx_dpdma_free_chan_resources;
+	ddev->device_prep_slave_sg = xilinx_dpdma_prep_slave_sg;
+	ddev->device_prep_dma_cyclic = xilinx_dpdma_prep_dma_cyclic;
+	ddev->device_prep_interleaved_dma = xilinx_dpdma_prep_interleaved_dma;
+	ddev->device_tx_status = xilinx_dpdma_tx_status;
+	ddev->device_issue_pending = xilinx_dpdma_issue_pending;
+	ddev->device_config = xilinx_dpdma_config;
+	ddev->device_pause = xilinx_dpdma_pause;
+	ddev->device_resume = xilinx_dpdma_resume;
+	ddev->device_terminate_all = xilinx_dpdma_terminate_all;
+	ddev->device_synchronize = xilinx_dpdma_synchronize;
+	ddev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED);
+	ddev->directions = BIT(DMA_MEM_TO_DEV);
+	ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+
+	for_each_child_of_node(node, child) {
+		chan = xilinx_dpdma_chan_probe(child, xdev);
+		if (IS_ERR(chan)) {
+			dev_err(xdev->dev, "failed to probe a channel\n");
+			ret = PTR_ERR(chan);
+			goto error;
+		}
+	}
+
+	xdev->ext_addr = sizeof(dma_addr_t) > 4;
+	if (xdev->ext_addr)
+		xdev->desc_addr = xilinx_dpdma_sw_desc_addr_64;
+	else
+		xdev->desc_addr = xilinx_dpdma_sw_desc_addr_32;
+
+	ret = clk_prepare_enable(xdev->axi_clk);
+	if (ret) {
+		dev_err(xdev->dev, "failed to enable the axi clock\n");
+		goto error;
+	}
+
+	ret = dma_async_device_register(ddev);
+	if (ret) {
+		dev_err(xdev->dev, "failed to register the dma device\n");
+		goto error_dma_async;
+	}
+
+	ret = of_dma_controller_register(xdev->dev->of_node,
+					 of_dma_xilinx_xlate, ddev);
+	if (ret) {
+		dev_err(xdev->dev, "failed to register DMA to DT DMA helper\n");
+		goto error_of_dma;
+	}
+
+	xilinx_dpdma_enable_intr(xdev);
+
+	xilinx_dpdma_debugfs_init(&pdev->dev);
+
+	dev_info(&pdev->dev, "Xilinx DPDMA engine is probed\n");
+
+	return 0;
+
+error_of_dma:
+	dma_async_device_unregister(ddev);
+error_dma_async:
+	clk_disable_unprepare(xdev->axi_clk);
+error:
+	for (i = 0; i < XILINX_DPDMA_NUM_CHAN; i++)
+		if (xdev->chan[i])
+			xilinx_dpdma_chan_remove(xdev->chan[i]);
+
+	return ret;
+}
+
+static int xilinx_dpdma_remove(struct platform_device *pdev)
+{
+	struct xilinx_dpdma_device *xdev;
+	unsigned int i;
+
+	xdev = platform_get_drvdata(pdev);
+
+	xilinx_dpdma_disable_intr(xdev);
+	of_dma_controller_free(pdev->dev.of_node);
+	dma_async_device_unregister(&xdev->common);
+	clk_disable_unprepare(xdev->axi_clk);
+
+	for (i = 0; i < XILINX_DPDMA_NUM_CHAN; i++)
+		if (xdev->chan[i])
+			xilinx_dpdma_chan_remove(xdev->chan[i]);
+
+	return 0;
+}
+
+static const struct of_device_id xilinx_dpdma_of_match[] = {
+	{ .compatible = "xlnx,dpdma",},
+	{ /* end of table */ },
+};
+MODULE_DEVICE_TABLE(of, xilinx_dpdma_of_match);
+
+static struct platform_driver xilinx_dpdma_driver = {
+	.probe			= xilinx_dpdma_probe,
+	.remove			= xilinx_dpdma_remove,
+	.driver			= {
+		.name		= "xilinx-dpdma",
+		.of_match_table	= xilinx_dpdma_of_match,
+	},
+};
+
+module_platform_driver(xilinx_dpdma_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx DPDMA driver");
+MODULE_LICENSE("GPL v2");