diff mbox

[2/2] dma: Add Xilinx AXI Central Direct Memory Access Engine driver support

Message ID 1396274044-6524-2-git-send-email-sthokal@xilinx.com (mailing list archive)
State Superseded
Delegated to: Vinod Koul
Headers show

Commit Message

Srikanth Thokala March 31, 2014, 1:54 p.m. UTC
This is the driver for the AXI Central Direct Memory Access (AXI
CDMA) core, which is a soft Xilinx IP core that provides high-bandwidth
Direct Memory Access (DMA) between a memory-mapped source address and a
memory-mapped destination address.

This module works on Zynq (ARM Based SoC) and Microblaze platforms.

Signed-off-by: Srikanth Thokala <sthokal@xilinx.com>
---
NOTE:
- This driver patch is created on top of earlier series,
  1/2 - "dma: Add Xilinx Video DMA DT Binding Documentation"
  2/2 - "dma: Add Xilinx AXI Video Direct Memory Access Engine driver support"
- Rebased on v3.14.0-rc8
---
 drivers/dma/Kconfig              |   12 +
 drivers/dma/xilinx/Makefile      |    1 +
 drivers/dma/xilinx/xilinx_cdma.c |  998 ++++++++++++++++++++++++++++++++++++++
 include/linux/amba/xilinx_dma.h  |   15 +-
 4 files changed, 1025 insertions(+), 1 deletion(-)
 create mode 100644 drivers/dma/xilinx/xilinx_cdma.c

Comments

Srikanth Thokala April 7, 2014, 2:52 p.m. UTC | #1
Hi,

Kindly review this driver and please let me know if you have any comments.

Thanks
Srikanth

On Mon, Mar 31, 2014 at 7:24 PM, Srikanth Thokala <sthokal@xilinx.com> wrote:
> This is the driver for the AXI Central Direct Memory Access (AXI
> CDMA) core, which is a soft Xilinx IP core that provides high-bandwidth
> Direct Memory Access (DMA) between a memory-mapped source address and a
> memory-mapped destination address.
>
> This module works on Zynq (ARM Based SoC) and Microblaze platforms.
>
> Signed-off-by: Srikanth Thokala <sthokal@xilinx.com>
> ---
> NOTE:
> - This driver patch is created on top of earlier series,
>   1/2 - "dma: Add Xilinx Video DMA DT Binding Documentation"
>   2/2 - "dma: Add Xilinx AXI Video Direct Memory Access Engine driver support"
> - Rebased on v3.14.0-rc8
> ---
>  drivers/dma/Kconfig              |   12 +
>  drivers/dma/xilinx/Makefile      |    1 +
>  drivers/dma/xilinx/xilinx_cdma.c |  998 ++++++++++++++++++++++++++++++++++++++
>  include/linux/amba/xilinx_dma.h  |   15 +-
>  4 files changed, 1025 insertions(+), 1 deletion(-)
>  create mode 100644 drivers/dma/xilinx/xilinx_cdma.c
>
> diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
> index 44b312e..e2c5cf2 100644
> --- a/drivers/dma/Kconfig
> +++ b/drivers/dma/Kconfig
> @@ -365,6 +365,18 @@ config XILINX_VDMA
>           channels, Memory Mapped to Stream (MM2S) and Stream to
>           Memory Mapped (S2MM) for the data transfers.
>
> +config XILINX_CDMA
> +       tristate "Xilinx AXI CDMA Engine"
> +       depends on (ARCH_ZYNQ || MICROBLAZE)
> +       select DMA_ENGINE
> +       help
> +         Enable support for Xilinx AXI CDMA Soft IP.
> +
> +         The AXI CDMA is a soft IP which provides high-bandwidth
> +         Direct Memory Access (DMA) between a memory-mapped source
> +         address and a memory-mapped destination address using the
> +         AXI4 protocol.
> +
>  config DMA_ENGINE
>         bool
>
> diff --git a/drivers/dma/xilinx/Makefile b/drivers/dma/xilinx/Makefile
> index 3c4e9f2..e1dee77 100644
> --- a/drivers/dma/xilinx/Makefile
> +++ b/drivers/dma/xilinx/Makefile
> @@ -1 +1,2 @@
>  obj-$(CONFIG_XILINX_VDMA) += xilinx_vdma.o
> +obj-$(CONFIG_XILINX_CDMA) += xilinx_cdma.o
> diff --git a/drivers/dma/xilinx/xilinx_cdma.c b/drivers/dma/xilinx/xilinx_cdma.c
> new file mode 100644
> index 0000000..2ae7c77
> --- /dev/null
> +++ b/drivers/dma/xilinx/xilinx_cdma.c
> @@ -0,0 +1,998 @@
> +/*
> + * DMA driver for Xilinx Central DMA Engine
> + *
> + * Copyright (C) 2010 - 2014 Xilinx, Inc. All rights reserved.
> + *
> + * Based on the Freescale DMA driver.
> + *
> + * Description:
> + *  The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
> + *  Access (DMA) between a memory-mapped source address and a memory-mapped
> + *  destination address.
> + *
> + * This program is free software: you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License as published by
> + * the Free Software Foundation, either version 2 of the License, or
> + * (at your option) any later version.
> + */
> +
> +#include <linux/amba/xilinx_dma.h>
> +#include <linux/bitops.h>
> +#include <linux/dmapool.h>
> +#include <linux/init.h>
> +#include <linux/interrupt.h>
> +#include <linux/io.h>
> +#include <linux/module.h>
> +#include <linux/of_address.h>
> +#include <linux/of_dma.h>
> +#include <linux/of_irq.h>
> +#include <linux/of_platform.h>
> +#include <linux/slab.h>
> +
> +#include "../dmaengine.h"
> +
> +/* Register Offsets */
> +#define XILINX_CDMA_CONTROL_OFFSET     0x00
> +#define XILINX_CDMA_STATUS_OFFSET      0x04
> +#define XILINX_CDMA_CDESC_OFFSET       0x08
> +#define XILINX_CDMA_TDESC_OFFSET       0x10
> +#define XILINX_CDMA_SRCADDR_OFFSET     0x18
> +#define XILINX_CDMA_DSTADDR_OFFSET     0x20
> +#define XILINX_CDMA_BTT_OFFSET         0x28
> +
> +/* General register bits definitions */
> +#define XILINX_CDMA_CR_RESET           BIT(2)
> +#define XILINX_CDMA_CR_SGMODE          BIT(3)
> +
> +#define XILINX_CDMA_SR_IDLE            BIT(1)
> +
> +#define XILINX_CDMA_XR_IRQ_IOC_MASK    BIT(12)
> +#define XILINX_CDMA_XR_IRQ_DELAY_MASK  BIT(13)
> +#define XILINX_CDMA_XR_IRQ_ERROR_MASK  BIT(14)
> +#define XILINX_CDMA_XR_IRQ_ALL_MASK    GENMASK(14, 12)
> +
> +#define XILINX_CDMA_XR_DELAY_MASK      GENMASK(31, 24)
> +#define XILINX_CDMA_XR_COALESCE_MASK   GENMASK(23, 16)
> +
> +#define XILINX_CDMA_DELAY_MAX          GENMASK(7, 0)
> +#define XILINX_CDMA_DELAY_SHIFT                24
> +
> +#define XILINX_CDMA_COALESCE_MAX       GENMASK(7, 0)
> +#define XILINX_CDMA_COALESCE_SHIFT     16
> +
> +/* Delay loop counter to prevent hardware failure */
> +#define XILINX_CDMA_RESET_LOOP         1000000
> +
> +/* Maximum transfer length */
> +#define XILINX_CDMA_MAX_TRANS_LEN      GENMASK(22, 0)
> +
> +/**
> + * struct xilinx_cdma_desc_hw - Hardware Descriptor
> + * @next_desc: Next Descriptor Pointer @0x00
> + * @pad1: Reserved @0x04
> + * @src_addr: Source address @0x08
> + * @pad2: Reserved @0x0C
> + * @dest_addr: Destination address @0x10
> + * @pad3: Reserved @0x14
> + * @control: Control field @0x18
> + * @status: Status field @0x1C
> + */
> +struct xilinx_cdma_desc_hw {
> +       u32 next_desc;
> +       u32 pad1;
> +       u32 src_addr;
> +       u32 pad2;
> +       u32 dest_addr;
> +       u32 pad3;
> +       u32 control;
> +       u32 status;
> +} __aligned(64);
> +
> +/**
> + * struct xilinx_cdma_tx_segment - Descriptor segment
> + * @hw: Hardware descriptor
> + * @node: Node in the descriptor segments list
> + * @phys: Physical address of segment
> + */
> +struct xilinx_cdma_tx_segment {
> +       struct xilinx_cdma_desc_hw hw;
> +       struct list_head node;
> +       dma_addr_t phys;
> +} __aligned(64);
> +
> +/**
> + * struct xilinx_cdma_tx_descriptor - Per Transaction structure
> + * @async_tx: Async transaction descriptor
> + * @segments: TX segments list
> + * @node: Node in the channel descriptors list
> + */
> +struct xilinx_cdma_tx_descriptor {
> +       struct dma_async_tx_descriptor async_tx;
> +       struct list_head segments;
> +       struct list_head node;
> +};
> +
> +/**
> + * struct xilinx_cdma_chan - Driver specific cdma channel structure
> + * @xdev: Driver specific device structure
> + * @lock: Descriptor operation lock
> + * @done_list: Complete descriptors
> + * @pending_list: Descriptors waiting
> + * @active_desc: Active descriptor
> + * @allocated_desc: Allocated descriptor
> + * @common: DMA common channel
> + * @desc_pool: Descriptors pool
> + * @dev: The dma device
> + * @irq: Channel IRQ
> + * @has_sg: Support scatter transfers
> + * @err: Channel has errors
> + * @tasklet: Cleanup work after irq
> + */
> +struct xilinx_cdma_chan {
> +       struct xilinx_cdma_device *xdev;
> +       spinlock_t lock;
> +       struct list_head done_list;
> +       struct list_head pending_list;
> +       struct xilinx_cdma_tx_descriptor *active_desc;
> +       struct xilinx_cdma_tx_descriptor *allocated_desc;
> +       struct dma_chan common;
> +       struct dma_pool *desc_pool;
> +       struct device *dev;
> +       int irq;
> +       bool has_sg;
> +       int err;
> +       struct tasklet_struct tasklet;
> +};
> +
> +/**
> + * struct xilinx_cdma_device - CDMA device structure
> + * @regs: I/O mapped base address
> + * @dev: Device Structure
> + * @common: DMA device structure
> + * @chan: Driver specific cdma channel
> + * @has_sg: Specifies whether Scatter-Gather is present or not
> + */
> +struct xilinx_cdma_device {
> +       void __iomem *regs;
> +       struct device *dev;
> +       struct dma_device common;
> +       struct xilinx_cdma_chan *chan;
> +       bool has_sg;
> +};
> +
> +/* Macros */
> +#define to_xilinx_chan(chan) \
> +       container_of(chan, struct xilinx_cdma_chan, common)
> +#define to_cdma_tx_descriptor(tx) \
> +       container_of(tx, struct xilinx_cdma_tx_descriptor, async_tx)
> +
> +/* IO accessors */
> +static inline void cdma_write(struct xilinx_cdma_chan *chan, u32 reg, u32 val)
> +{
> +       writel(val, chan->xdev->regs + reg);
> +}
> +
> +static inline u32 cdma_read(struct xilinx_cdma_chan *chan, u32 reg)
> +{
> +       return readl(chan->xdev->regs + reg);
> +}
> +
> +static inline void cdma_ctrl_clr(struct xilinx_cdma_chan *chan, u32 reg,
> +                                u32 clr)
> +{
> +       cdma_write(chan, reg, cdma_read(chan, reg) & ~clr);
> +}
> +
> +static inline void cdma_ctrl_set(struct xilinx_cdma_chan *chan, u32 reg,
> +                                u32 set)
> +{
> +       cdma_write(chan, reg, cdma_read(chan, reg) | set);
> +}
> +
> +/* -----------------------------------------------------------------------------
> + * Descriptors and segments alloc and free
> + */
> +
> +/**
> + * xilinx_cdma_alloc_tx_segment - Allocate transaction segment
> + * @chan: Driver specific cdma channel
> + *
> + * Return: The allocated segment on success and NULL on failure.
> + */
> +static struct xilinx_cdma_tx_segment *
> +xilinx_cdma_alloc_tx_segment(struct xilinx_cdma_chan *chan)
> +{
> +       struct xilinx_cdma_tx_segment *segment;
> +       dma_addr_t phys;
> +
> +       segment = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &phys);
> +       if (!segment)
> +               return NULL;
> +
> +       memset(segment, 0, sizeof(*segment));
> +       segment->phys = phys;
> +
> +       return segment;
> +}
> +
> +/**
> + * xilinx_cdma_free_tx_segment - Free transaction segment
> + * @chan: Driver specific cdma channel
> + * @segment: cdma transaction segment
> + */
> +static void xilinx_cdma_free_tx_segment(struct xilinx_cdma_chan *chan,
> +                                       struct xilinx_cdma_tx_segment *segment)
> +{
> +       dma_pool_free(chan->desc_pool, segment, segment->phys);
> +}
> +
> +/**
> + * xilinx_cdma_tx_descriptor - Allocate transaction descriptor
> + * @chan: Driver specific cdma channel
> + *
> + * Return: The allocated descriptor on success and NULL on failure.
> + */
> +static struct xilinx_cdma_tx_descriptor *
> +xilinx_cdma_alloc_tx_descriptor(struct xilinx_cdma_chan *chan)
> +{
> +       struct xilinx_cdma_tx_descriptor *desc;
> +       unsigned long flags;
> +
> +       if (chan->allocated_desc)
> +               return chan->allocated_desc;
> +
> +       desc = kzalloc(sizeof(*desc), GFP_KERNEL);
> +       if (!desc)
> +               return NULL;
> +
> +       spin_lock_irqsave(&chan->lock, flags);
> +       chan->allocated_desc = desc;
> +       spin_unlock_irqrestore(&chan->lock, flags);
> +
> +       INIT_LIST_HEAD(&desc->segments);
> +
> +       return desc;
> +}
> +
> +/**
> + * xilinx_cdma_free_tx_descriptor - Free transaction descriptor
> + * @chan: Driver specific cdma channel
> + * @desc: cdma transaction descriptor
> + */
> +static void
> +xilinx_cdma_free_tx_descriptor(struct xilinx_cdma_chan *chan,
> +                              struct xilinx_cdma_tx_descriptor *desc)
> +{
> +       struct xilinx_cdma_tx_segment *segment, *next;
> +
> +       if (!desc)
> +               return;
> +
> +       list_for_each_entry_safe(segment, next, &desc->segments, node) {
> +               list_del(&segment->node);
> +               xilinx_cdma_free_tx_segment(chan, segment);
> +       }
> +
> +       kfree(desc);
> +}
> +
> +/**
> + * xilinx_cdma_alloc_chan_resources - Allocate channel resources
> + * @dchan: DMA channel
> + *
> + * Return: '0' on success and failure value on error
> + */
> +static int xilinx_cdma_alloc_chan_resources(struct dma_chan *dchan)
> +{
> +       struct xilinx_cdma_chan *chan = to_xilinx_chan(dchan);
> +
> +       /* Has this channel already been allocated? */
> +       if (chan->desc_pool)
> +               return 0;
> +
> +       /*
> +        * We need the descriptor to be aligned to 64bytes
> +        * for meeting Xilinx DMA specification requirement.
> +        */
> +       chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
> +                               chan->dev,
> +                               sizeof(struct xilinx_cdma_tx_segment),
> +                               __alignof__(struct xilinx_cdma_tx_segment), 0);
> +       if (!chan->desc_pool) {
> +               dev_err(chan->dev,
> +                       "unable to allocate channel descriptor pool\n");
> +               return -ENOMEM;
> +       }
> +
> +       dma_cookie_init(dchan);
> +       return 0;
> +}
> +
> +/**
> + * xilinx_cdma_free_desc_list - Free descriptors list
> + * @chan: Driver specific cdma channel
> + * @list: List to parse and delete the descriptor
> + */
> +static void xilinx_cdma_free_desc_list(struct xilinx_cdma_chan *chan,
> +                                      struct list_head *list)
> +{
> +       struct xilinx_cdma_tx_descriptor *desc, *next;
> +
> +       list_for_each_entry_safe(desc, next, list, node) {
> +               list_del(&desc->node);
> +               xilinx_cdma_free_tx_descriptor(chan, desc);
> +       }
> +}
> +
> +/**
> + * xilinx_cdma_free_chan_resources - Free channel resources
> + * @dchan: DMA channel
> + */
> +static void xilinx_cdma_free_chan_resources(struct dma_chan *dchan)
> +{
> +       struct xilinx_cdma_chan *chan = to_xilinx_chan(dchan);
> +       unsigned long flags;
> +
> +       spin_lock_irqsave(&chan->lock, flags);
> +       xilinx_cdma_free_desc_list(chan, &chan->done_list);
> +       xilinx_cdma_free_desc_list(chan, &chan->pending_list);
> +       spin_unlock_irqrestore(&chan->lock, flags);
> +
> +       dma_pool_destroy(chan->desc_pool);
> +       chan->desc_pool = NULL;
> +}
> +
> +/**
> + * xilinx_cdma_chan_desc_cleanup - Clean channel descriptors
> + * @chan: Driver specific cdma channel
> + */
> +static void xilinx_cdma_chan_desc_cleanup(struct xilinx_cdma_chan *chan)
> +{
> +       struct xilinx_cdma_tx_descriptor *desc, *next;
> +       unsigned long flags;
> +
> +       spin_lock_irqsave(&chan->lock, flags);
> +
> +       list_for_each_entry_safe(desc, next, &chan->done_list, node) {
> +               dma_async_tx_callback callback;
> +               void *callback_param;
> +
> +               /* Remove from the list of running transactions */
> +               list_del(&desc->node);
> +
> +               /* Run the link descriptor callback function */
> +               callback = desc->async_tx.callback;
> +               callback_param = desc->async_tx.callback_param;
> +               if (callback) {
> +                       spin_unlock_irqrestore(&chan->lock, flags);
> +                       callback(callback_param);
> +                       spin_lock_irqsave(&chan->lock, flags);
> +               }
> +
> +               /* Run any dependencies, then free the descriptor */
> +               dma_run_dependencies(&desc->async_tx);
> +               xilinx_cdma_free_tx_descriptor(chan, desc);
> +       }
> +
> +       spin_unlock_irqrestore(&chan->lock, flags);
> +}
> +
> +/**
> + * xilinx_cdma_tx_status - Get CDMA transaction status
> + * @dchan: DMA channel
> + * @cookie: Transaction identifier
> + * @txstate: Transaction state
> + *
> + * Return: DMA transaction status
> + */
> +static enum dma_status xilinx_tx_status(struct dma_chan *dchan,
> +                                       dma_cookie_t cookie,
> +                                       struct dma_tx_state *txstate)
> +{
> +       return dma_cookie_status(dchan, cookie, txstate);
> +}
> +
> +/**
> + * xilinx_cdma_is_idle - Check if cdma channel is idle
> + * @chan: Driver specific cdma channel
> + *
> + * Return: 'true' if idle, 'false' if not.
> + */
> +static bool cdma_is_idle(struct xilinx_cdma_chan *chan)
> +{
> +       return cdma_read(chan, XILINX_CDMA_STATUS_OFFSET) & XILINX_CDMA_SR_IDLE;
> +}
> +
> +/**
> + * xilinx_cdma_start_transfer - Starts cdma transfer
> + * @chan: Driver specific channel struct pointer
> + */
> +static void xilinx_cdma_start_transfer(struct xilinx_cdma_chan *chan)
> +{
> +       unsigned long flags;
> +       struct xilinx_cdma_tx_descriptor *desc;
> +       struct xilinx_cdma_tx_segment *head, *tail;
> +
> +       if (chan->err)
> +               return;
> +
> +       spin_lock_irqsave(&chan->lock, flags);
> +
> +       if (list_empty(&chan->pending_list))
> +               goto out_unlock;
> +
> +       /* If hardware is busy, cannot submit */
> +       if (!cdma_is_idle(chan)) {
> +               dev_dbg(chan->dev, "DMA controller still busy %x\n",
> +                       cdma_read(chan, XILINX_CDMA_STATUS_OFFSET));
> +               goto out_unlock;
> +       }
> +
> +       desc = list_first_entry(&chan->pending_list,
> +                               struct xilinx_cdma_tx_descriptor, node);
> +
> +       if (chan->has_sg) {
> +               head = list_first_entry(&desc->segments,
> +                                       struct xilinx_cdma_tx_segment, node);
> +               tail = list_entry(desc->segments.prev,
> +                                 struct xilinx_cdma_tx_segment, node);
> +
> +               cdma_write(chan, XILINX_CDMA_CDESC_OFFSET, head->phys);
> +
> +               /* Update tail ptr register which will start the transfer */
> +               cdma_write(chan, XILINX_CDMA_TDESC_OFFSET, tail->phys);
> +       } else {
> +               /* In simple mode */
> +               struct xilinx_cdma_tx_segment *segment;
> +               struct xilinx_cdma_desc_hw *hw;
> +
> +               segment = list_first_entry(&desc->segments,
> +                                          struct xilinx_cdma_tx_segment,
> +                                          node);
> +
> +               hw = &segment->hw;
> +
> +               cdma_write(chan, XILINX_CDMA_SRCADDR_OFFSET, hw->src_addr);
> +               cdma_write(chan, XILINX_CDMA_DSTADDR_OFFSET, hw->dest_addr);
> +
> +               /* Start the transfer */
> +               cdma_write(chan, XILINX_CDMA_BTT_OFFSET,
> +                          hw->control & XILINX_CDMA_MAX_TRANS_LEN);
> +       }
> +
> +       list_del(&desc->node);
> +       chan->active_desc = desc;
> +
> +out_unlock:
> +       spin_unlock_irqrestore(&chan->lock, flags);
> +}
> +
> +/**
> + * xilinx_cdma_issue_pending - Issue pending transactions
> + * @dchan: DMA channel
> + */
> +static void xilinx_cdma_issue_pending(struct dma_chan *dchan)
> +{
> +       struct xilinx_cdma_chan *chan = to_xilinx_chan(dchan);
> +
> +       xilinx_cdma_start_transfer(chan);
> +}
> +
> +/**
> + * xilinx_cdma_complete_descriptor - Mark the active descriptor as complete
> + * @chan : xilinx DMA channel
> + */
> +static void xilinx_cdma_complete_descriptor(struct xilinx_cdma_chan *chan)
> +{
> +       struct xilinx_cdma_tx_descriptor *desc;
> +       unsigned long flags;
> +
> +       spin_lock_irqsave(&chan->lock, flags);
> +
> +       desc = chan->active_desc;
> +       if (!desc) {
> +               dev_dbg(chan->dev, "no running descriptors\n");
> +               goto out_unlock;
> +       }
> +
> +       dma_cookie_complete(&desc->async_tx);
> +       list_add_tail(&desc->node, &chan->done_list);
> +
> +       chan->active_desc = NULL;
> +
> +out_unlock:
> +       spin_unlock_irqrestore(&chan->lock, flags);
> +}
> +
> +/**
> + * xilinx_cdma_chan_reset - Reset CDMA channel
> + * @chan: Driver specific CDMA channel
> + *
> + * Return: '0' on success and failure value on error
> + */
> +static int xilinx_cdma_chan_reset(struct xilinx_cdma_chan *chan)
> +{
> +       int loop = XILINX_CDMA_RESET_LOOP;
> +       u32 tmp;
> +
> +       cdma_ctrl_set(chan, XILINX_CDMA_CONTROL_OFFSET,
> +                     XILINX_CDMA_CR_RESET);
> +
> +       tmp = cdma_read(chan, XILINX_CDMA_CONTROL_OFFSET) &
> +             XILINX_CDMA_CR_RESET;
> +
> +       /* Wait for the hardware to finish reset */
> +       do {
> +               tmp = cdma_read(chan, XILINX_CDMA_CONTROL_OFFSET) &
> +                     XILINX_CDMA_CR_RESET;
> +       } while (loop-- && tmp);
> +
> +       if (!loop) {
> +               dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
> +                       cdma_read(chan, XILINX_CDMA_CONTROL_OFFSET),
> +                       cdma_read(chan, XILINX_CDMA_STATUS_OFFSET));
> +               return -EBUSY;
> +       }
> +
> +       /* Enable interrupts */
> +       cdma_ctrl_set(chan, XILINX_CDMA_CONTROL_OFFSET,
> +                     XILINX_CDMA_XR_IRQ_ALL_MASK);
> +
> +       /* Enable SG Mode */
> +       if (chan->has_sg)
> +               cdma_ctrl_set(chan, XILINX_CDMA_CONTROL_OFFSET,
> +                             XILINX_CDMA_CR_SGMODE);
> +
> +       return 0;
> +}
> +
> +/**
> + * xilinx_cdma_irq_handler - CDMA Interrupt handler
> + * @irq: IRQ number
> + * @data: Pointer to the Xilinx CDMA channel structure
> + *
> + * Return: IRQ_HANDLED/IRQ_NONE
> +*/
> +static irqreturn_t xilinx_cdma_irq_handler(int irq, void *data)
> +{
> +       struct xilinx_cdma_chan *chan = data;
> +       u32 status;
> +
> +       status = cdma_read(chan, XILINX_CDMA_STATUS_OFFSET);
> +       if (!(status & XILINX_CDMA_XR_IRQ_ALL_MASK))
> +               return IRQ_NONE;
> +
> +       /* Ack the interrupts */
> +       cdma_write(chan, XILINX_CDMA_STATUS_OFFSET,
> +                  XILINX_CDMA_XR_IRQ_ALL_MASK);
> +
> +       if (status & XILINX_CDMA_XR_IRQ_ERROR_MASK) {
> +               dev_err(chan->dev,
> +                       "Channel %x has errors %x, cdr %x tdr %x\n",
> +                       (u32)chan,
> +                       (u32)cdma_read(chan, XILINX_CDMA_STATUS_OFFSET),
> +                       (u32)cdma_read(chan, XILINX_CDMA_CDESC_OFFSET),
> +                       (u32)cdma_read(chan, XILINX_CDMA_TDESC_OFFSET));
> +               chan->err = true;
> +       }
> +
> +       /*
> +        * Device takes too long to do the transfer when user requires
> +        * responsiveness
> +        */
> +       if (status & XILINX_CDMA_XR_IRQ_DELAY_MASK)
> +               dev_dbg(chan->dev, "Inter-packet latency too long\n");
> +
> +       if (status & XILINX_CDMA_XR_IRQ_IOC_MASK) {
> +               xilinx_cdma_complete_descriptor(chan);
> +               xilinx_cdma_start_transfer(chan);
> +       }
> +
> +       tasklet_schedule(&chan->tasklet);
> +       return IRQ_HANDLED;
> +}
> +
> +/**
> + * xilinx_cdma_do_tasklet - Schedule completion tasklet
> + * @data: Pointer to the Xilinx cdma channel structure
> + */
> +static void xilinx_cdma_do_tasklet(unsigned long data)
> +{
> +       struct xilinx_cdma_chan *chan = (struct xilinx_cdma_chan *)data;
> +
> +       xilinx_cdma_chan_desc_cleanup(chan);
> +}
> +
> +/**
> + * xilinx_cdma_tx_submit - Submit DMA transaction
> + * @tx: Async transaction descriptor
> + *
> + * Return: cookie value on success and failure value on error
> + */
> +static dma_cookie_t xilinx_cdma_tx_submit(struct dma_async_tx_descriptor *tx)
> +{
> +       struct xilinx_cdma_chan *chan = to_xilinx_chan(tx->chan);
> +       struct xilinx_cdma_tx_descriptor *desc = to_cdma_tx_descriptor(tx);
> +       dma_cookie_t cookie;
> +       unsigned long flags;
> +       int err;
> +
> +       if (chan->err) {
> +               /*
> +                * If reset fails, need to hard reset the system.
> +                * Channel is no longer functional
> +                */
> +               err = xilinx_cdma_chan_reset(chan);
> +               if (err < 0)
> +                       return err;
> +       }
> +
> +       spin_lock_irqsave(&chan->lock, flags);
> +
> +       cookie = dma_cookie_assign(tx);
> +
> +       /* Append the transaction to the pending transactions queue. */
> +       list_add_tail(&desc->node, &chan->pending_list);
> +
> +       /* Free the allocated desc */
> +       chan->allocated_desc = NULL;
> +
> +       spin_unlock_irqrestore(&chan->lock, flags);
> +
> +       return cookie;
> +}
> +
> +/**
> + * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction
> + * @dchan: DMA channel
> + * @dma_dst: destination address
> + * @dma_src: source address
> + * @len: transfer length
> + * @flags: transfer ack flags
> + *
> + * Return: Async transaction descriptor on success and NULL on failure
> + */
> +static struct dma_async_tx_descriptor *
> +xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
> +                       dma_addr_t dma_src, size_t len, unsigned long flags)
> +{
> +       struct xilinx_cdma_chan *chan = to_xilinx_chan(dchan);
> +       struct xilinx_cdma_desc_hw *hw;
> +       struct xilinx_cdma_tx_descriptor *desc;
> +       struct xilinx_cdma_tx_segment *segment, *prev;
> +
> +       if (!len || len > XILINX_CDMA_MAX_TRANS_LEN)
> +               return NULL;
> +
> +       desc = xilinx_cdma_alloc_tx_descriptor(chan);
> +       if (!desc)
> +               return NULL;
> +
> +       dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
> +       desc->async_tx.tx_submit = xilinx_cdma_tx_submit;
> +       async_tx_ack(&desc->async_tx);
> +
> +       /* Allocate the link descriptor from DMA pool */
> +       segment = xilinx_cdma_alloc_tx_segment(chan);
> +       if (!segment)
> +               goto error;
> +
> +       hw = &segment->hw;
> +       hw->control = len;
> +       hw->src_addr = dma_src;
> +       hw->dest_addr = dma_dst;
> +
> +       /* Fill the previous next descriptor with current */
> +       prev = list_last_entry(&desc->segments,
> +                              struct xilinx_cdma_tx_segment, node);
> +       prev->hw.next_desc = segment->phys;
> +
> +       /* Insert the segment into the descriptor segments list. */
> +       list_add_tail(&segment->node, &desc->segments);
> +
> +       prev = segment;
> +
> +       /* Link the last hardware descriptor with the first. */
> +       segment = list_first_entry(&desc->segments,
> +                                  struct xilinx_cdma_tx_segment, node);
> +       prev->hw.next_desc = segment->phys;
> +
> +       return &desc->async_tx;
> +
> +error:
> +       xilinx_cdma_free_tx_descriptor(chan, desc);
> +       return NULL;
> +}
> +
> +/**
> + * xilinx_cdma_device_control - Configure DMA channel of the device
> + * @dchan: DMA Channel pointer
> + * @cmd: DMA control command
> + * @arg: Channel configuration
> + *
> + * Return: '0' on success and failure value on error
> + */
> +static int xilinx_cdma_device_control(struct dma_chan *dchan,
> +                                     enum dma_ctrl_cmd cmd, unsigned long arg)
> +{
> +       struct xilinx_cdma_chan *chan = to_xilinx_chan(dchan);
> +       unsigned long flags;
> +
> +       if (cmd != DMA_TERMINATE_ALL)
> +               return -ENXIO;
> +
> +       spin_lock_irqsave(&chan->lock, flags);
> +
> +       /* Remove and free all of the descriptors in the lists */
> +       xilinx_cdma_free_desc_list(chan, &chan->pending_list);
> +       xilinx_cdma_free_desc_list(chan, &chan->done_list);
> +
> +       spin_unlock_irqrestore(&chan->lock, flags);
> +
> +       return 0;
> +}
> +
> +/**
> + * xilinx_cdma_channel_set_config - Configure cdma channel
> + * @dchan: DMA channel
> + * @cfg: cdma device configuration pointer
> + *
> + * Return: '0' on success and failure value on error
> + */
> +int xilinx_cdma_channel_set_config(struct dma_chan *dchan,
> +                                       struct xilinx_cdma_config *cfg)
> +{
> +       struct xilinx_cdma_chan *chan = to_xilinx_chan(dchan);
> +       u32 reg = cdma_read(chan, XILINX_CDMA_CONTROL_OFFSET);
> +
> +       if (cfg->reset)
> +               return xilinx_cdma_chan_reset(chan);
> +
> +       if (cfg->coalesc <= XILINX_CDMA_COALESCE_MAX) {
> +               reg &= ~XILINX_CDMA_XR_COALESCE_MASK;
> +               reg |= cfg->coalesc << XILINX_CDMA_COALESCE_SHIFT;
> +       }
> +
> +       if (cfg->delay <= XILINX_CDMA_DELAY_MAX) {
> +               reg &= ~XILINX_CDMA_XR_DELAY_MASK;
> +               reg |= cfg->delay << XILINX_CDMA_DELAY_SHIFT;
> +       }
> +
> +       cdma_write(chan, XILINX_CDMA_CONTROL_OFFSET, reg);
> +
> +       return 0;
> +}
> +EXPORT_SYMBOL(xilinx_cdma_channel_set_config);
> +
> +/* -----------------------------------------------------------------------------
> + * Probe and remove
> + */
> +
> +/**
> + * xilinx_cdma_free_channel - Channel remove function
> + * @chan: Driver specific cdma channel
> + */
> +static void xilinx_cdma_free_channel(struct xilinx_cdma_chan *chan)
> +{
> +       /* Disable Interrupts */
> +       cdma_ctrl_clr(chan, XILINX_CDMA_CONTROL_OFFSET,
> +                     XILINX_CDMA_XR_IRQ_ALL_MASK);
> +
> +       if (chan->irq > 0)
> +               free_irq(chan->irq, chan);
> +
> +       tasklet_kill(&chan->tasklet);
> +
> +       list_del(&chan->common.device_node);
> +}
> +
> +/**
> + * xilinx_cdma_chan_probe - Per Channel Probing
> + * It get channel features from the device tree entry and
> + * initialize special channel handling routines
> + *
> + * @xdev: Driver specific device structure
> + * @node: Device node
> + *
> + * Return: '0' on success and failure value on error
> + */
> +static int xilinx_cdma_chan_probe(struct xilinx_cdma_device *xdev,
> +                                 struct device_node *node)
> +{
> +       struct xilinx_cdma_chan *chan;
> +       bool has_dre;
> +       u32 value, width;
> +       int err;
> +
> +       /* Alloc channel */
> +       chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
> +       if (!chan)
> +               return -ENOMEM;
> +
> +       chan->dev = xdev->dev;
> +       chan->has_sg = xdev->has_sg;
> +       chan->xdev = xdev;
> +
> +       spin_lock_init(&chan->lock);
> +       INIT_LIST_HEAD(&chan->pending_list);
> +       INIT_LIST_HEAD(&chan->done_list);
> +
> +       /* Retrieve the channel properties from the device tree */
> +       has_dre = of_property_read_bool(node, "xlnx,include-dre");
> +
> +       err = of_property_read_u32(node, "xlnx,datawidth", &value);
> +       if (err) {
> +               dev_err(xdev->dev, "unable to read datawidth property");
> +               return err;
> +       }
> +       width = value >> 3; /* convert bits to bytes */
> +
> +       /* If data width is greater than 8 bytes, DRE is not in hw */
> +       if (width > 8)
> +               has_dre = false;
> +
> +       if (!has_dre)
> +               xdev->common.copy_align = fls(width - 1);
> +
> +       /* Request the interrupt */
> +       chan->irq = irq_of_parse_and_map(node, 0);
> +       err = request_irq(chan->irq, xilinx_cdma_irq_handler, IRQF_SHARED,
> +                         "xilinx-cdma-controller", chan);
> +       if (err) {
> +               dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
> +               return err;
> +       }
> +
> +       /* Initialize the tasklet */
> +       tasklet_init(&chan->tasklet, xilinx_cdma_do_tasklet,
> +                    (unsigned long)chan);
> +
> +       /*
> +        * Initialize the DMA channel and add it to the DMA engine channels
> +        * list.
> +        */
> +       chan->common.device = &xdev->common;
> +
> +       list_add_tail(&chan->common.device_node, &xdev->common.channels);
> +       xdev->chan = chan;
> +
> +       /* Initialize the channel */
> +       err = xilinx_cdma_chan_reset(chan);
> +       if (err) {
> +               dev_err(xdev->dev, "Reset channel failed\n");
> +               return err;
> +       }
> +
> +       return 0;
> +}
> +
> +/**
> + * of_dma_xilinx_xlate - Translation function
> + * @dma_spec: Pointer to DMA specifier as found in the device tree
> + * @ofdma: Pointer to DMA controller data
> + *
> + * Return: DMA channel pointer on success and NULL on error
> + */
> +static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
> +                                               struct of_dma *ofdma)
> +{
> +       struct xilinx_cdma_device *xdev = ofdma->of_dma_data;
> +
> +       return dma_get_slave_channel(&xdev->chan->common);
> +}
> +
> +/**
> + * xilinx_cdma_probe - Driver probe function
> + * @pdev: Pointer to the platform_device structure
> + *
> + * Return: '0' on success and failure value on error
> + */
> +static int xilinx_cdma_probe(struct platform_device *pdev)
> +{
> +       struct xilinx_cdma_device *xdev;
> +       struct device_node *child, *node;
> +       struct resource *res;
> +       int ret;
> +
> +       xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
> +       if (!xdev)
> +               return -ENOMEM;
> +
> +       xdev->dev = &(pdev->dev);
> +       INIT_LIST_HEAD(&xdev->common.channels);
> +
> +       node = pdev->dev.of_node;
> +
> +       /* Map the registers */
> +       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
> +       xdev->regs = devm_ioremap_resource(&pdev->dev, res);
> +       if (IS_ERR(xdev->regs))
> +               return PTR_ERR(xdev->regs);
> +
> +       /* Check if SG is enabled */
> +       xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg");
> +
> +       dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
> +       xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
> +
> +       xdev->common.device_control = xilinx_cdma_device_control;
> +       xdev->common.device_issue_pending = xilinx_cdma_issue_pending;
> +       xdev->common.device_alloc_chan_resources =
> +                       xilinx_cdma_alloc_chan_resources;
> +       xdev->common.device_free_chan_resources =
> +                       xilinx_cdma_free_chan_resources;
> +       xdev->common.device_tx_status = xilinx_tx_status;
> +       xdev->common.dev = &pdev->dev;
> +
> +       platform_set_drvdata(pdev, xdev);
> +
> +       child = of_get_next_child(node, NULL);
> +       if (!child) {
> +               dev_err(&pdev->dev, "No channel found\n");
> +               return PTR_ERR(child);
> +       }
> +
> +       ret = xilinx_cdma_chan_probe(xdev, child);
> +       if (ret) {
> +               dev_err(&pdev->dev, "Probing channel failed\n");
> +               goto free_chan_resources;
> +       }
> +
> +       dma_async_device_register(&xdev->common);
> +
> +       ret = of_dma_controller_register(node, of_dma_xilinx_xlate, xdev);
> +       if (ret) {
> +               dev_err(&pdev->dev, "Unable to register DMA to DT\n");
> +               dma_async_device_unregister(&xdev->common);
> +               goto free_chan_resources;
> +       }
> +
> +       dev_info(&pdev->dev, "Xilinx AXI CDMA Engine driver Probed!!\n");
> +
> +       return 0;
> +
> +free_chan_resources:
> +       xilinx_cdma_free_channel(xdev->chan);
> +
> +       return ret;
> +}
> +
> +/**
> + * xilinx_cdma_remove - Driver remove function
> + * @pdev: Pointer to the platform_device structure
> + *
> + * Return: Always '0'
> + */
> +static int xilinx_cdma_remove(struct platform_device *pdev)
> +{
> +       struct xilinx_cdma_device *xdev = platform_get_drvdata(pdev);
> +
> +       of_dma_controller_free(pdev->dev.of_node);
> +       dma_async_device_unregister(&xdev->common);
> +
> +       xilinx_cdma_free_channel(xdev->chan);
> +
> +       return 0;
> +}
> +
> +static const struct of_device_id xilinx_cdma_of_match[] = {
> +       { .compatible = "xlnx,axi-cdma-1.00.a", },
> +       {}
> +};
> +MODULE_DEVICE_TABLE(of, xilinx_cdma_of_match);
> +
> +static struct platform_driver xilinx_cdma_driver = {
> +       .driver = {
> +               .name = "xilinx-cdma",
> +               .owner = THIS_MODULE,
> +               .of_match_table = xilinx_cdma_of_match,
> +       },
> +       .probe = xilinx_cdma_probe,
> +       .remove = xilinx_cdma_remove,
> +};
> +
> +module_platform_driver(xilinx_cdma_driver);
> +
> +MODULE_AUTHOR("Xilinx, Inc.");
> +MODULE_DESCRIPTION("Xilinx CDMA driver");
> +MODULE_LICENSE("GPL v2");
> diff --git a/include/linux/amba/xilinx_dma.h b/include/linux/amba/xilinx_dma.h
> index 34b98f2..fb74ff7 100644
> --- a/include/linux/amba/xilinx_dma.h
> +++ b/include/linux/amba/xilinx_dma.h
> @@ -41,7 +41,20 @@ struct xilinx_vdma_config {
>         int ext_fsync;
>  };
>
> +/**
> + * struct xilinx_cdma_config - CDMA Configuration structure
> + * @coalesc: Interrupt coalescing threshold
> + * @delay: Delay counter
> + * @reset: Reset Channel
> + */
> +struct xilinx_cdma_config {
> +       int coalesc;
> +       int delay;
> +       int reset;
> +};
> +
>  int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
>                                         struct xilinx_vdma_config *cfg);
> -
> +int xilinx_cdma_channel_set_config(struct dma_chan *dchan,
> +                                       struct xilinx_cdma_config *cfg);
>  #endif
> --
> 1.7.9.5
>
--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Jonathan Corbet April 8, 2014, 2:44 p.m. UTC | #2
On Mon, 7 Apr 2014 20:22:54 +0530
Srikanth Thokala <sthokal@xilinx.com> wrote:

> Kindly review this driver and please let me know if you have any comments.

Here's some comments from a quick look at the patch; they do not qualify as
a proper review by any means.

> +/**
> + * struct xilinx_cdma_chan - Driver specific cdma channel structure
> + * @xdev: Driver specific device structure
> + * @lock: Descriptor operation lock
> + * @done_list: Complete descriptors
> + * @pending_list: Descriptors waiting
> + * @active_desc: Active descriptor
> + * @allocated_desc: Allocated descriptor
> + * @common: DMA common channel
> + * @desc_pool: Descriptors pool
> + * @dev: The dma device
> + * @irq: Channel IRQ
> + * @has_sg: Support scatter transfers
> + * @err: Channel has errors
> + * @tasklet: Cleanup work after irq
> + */
> +struct xilinx_cdma_chan {
> +       struct xilinx_cdma_device *xdev;
> +       spinlock_t lock;
> +       struct list_head done_list;
> +       struct list_head pending_list;
> +       struct xilinx_cdma_tx_descriptor *active_desc;
> +       struct xilinx_cdma_tx_descriptor *allocated_desc;
> +       struct dma_chan common;
> +       struct dma_pool *desc_pool;
> +       struct device *dev;
> +       int irq;
> +       bool has_sg;
> +       int err;
> +       struct tasklet_struct tasklet;
> +};

Have you thought about using a threaded IRQ handler instead of a tasklet?
The tasklet interface has its pitfalls and some reviewers frown on the
addition of more users.

[...]

> +/**
> + * xilinx_cdma_tx_descriptor - Allocate transaction descriptor
> + * @chan: Driver specific cdma channel
> + *
> + * Return: The allocated descriptor on success and NULL on failure.
> + */
> +static struct xilinx_cdma_tx_descriptor *
> +xilinx_cdma_alloc_tx_descriptor(struct xilinx_cdma_chan *chan)
> +{
> +       struct xilinx_cdma_tx_descriptor *desc;
> +       unsigned long flags;
> +
> +       if (chan->allocated_desc)
> +               return chan->allocated_desc;

This looks racy.  What happens if two threads hit here at once, or, in
general, some other thread does something with chan->allocated_desc?

> +       desc = kzalloc(sizeof(*desc), GFP_KERNEL);
> +       if (!desc)
> +               return NULL;
> +
> +       spin_lock_irqsave(&chan->lock, flags);
> +       chan->allocated_desc = desc;
> +       spin_unlock_irqrestore(&chan->lock, flags);
> +
> +       INIT_LIST_HEAD(&desc->segments);

Using the lock to protect a single assignment doesn't really buy you much;
it's what is going on outside of the locked region that is going to bite
you.

Also, as soon as you do that assignment, desc is visible to the rest of the
world.  Somebody else could try to use it before you get around to that
INIT_LIST_HEAD() call, with unpleasant results.  You really need a lock
around the whole test/allocate/initialize operation.

> +       return desc;
> +}
> +
> +/**
> + * xilinx_cdma_free_tx_descriptor - Free transaction descriptor
> + * @chan: Driver specific cdma channel
> + * @desc: cdma transaction descriptor
> + */
> +static void
> +xilinx_cdma_free_tx_descriptor(struct xilinx_cdma_chan *chan,
> +                              struct xilinx_cdma_tx_descriptor *desc)
> +{
> +       struct xilinx_cdma_tx_segment *segment, *next;
> +
> +       if (!desc)
> +               return;
> +
> +       list_for_each_entry_safe(segment, next, &desc->segments, node) {
> +               list_del(&segment->node);
> +               xilinx_cdma_free_tx_segment(chan, segment);
> +       }
> +
> +       kfree(desc);
> +}

What are the locking requirements for this function?  It looks from a
casual reading like some callers hold the spinlock while others do not.  It
would be good to sort out (and document!) the requirement here.

> +/**
> + * xilinx_cdma_free_chan_resources - Free channel resources
> + * @dchan: DMA channel
> + */
> +static void xilinx_cdma_free_chan_resources(struct dma_chan *dchan)
> +{
> +       struct xilinx_cdma_chan *chan = to_xilinx_chan(dchan);
> +       unsigned long flags;
> +
> +       spin_lock_irqsave(&chan->lock, flags);
> +       xilinx_cdma_free_desc_list(chan, &chan->done_list);
> +       xilinx_cdma_free_desc_list(chan, &chan->pending_list);
> +       spin_unlock_irqrestore(&chan->lock, flags);
> +
> +       dma_pool_destroy(chan->desc_pool);
> +       chan->desc_pool = NULL;

Why is this part done outside the lock?

> + */
> +static void xilinx_cdma_chan_desc_cleanup(struct xilinx_cdma_chan *chan)
> +{
> +       struct xilinx_cdma_tx_descriptor *desc, *next;
> +       unsigned long flags;
> +
> +       spin_lock_irqsave(&chan->lock, flags);
> +
> +       list_for_each_entry_safe(desc, next, &chan->done_list, node) {
> +               dma_async_tx_callback callback;
> +               void *callback_param;
> +
> +               /* Remove from the list of running transactions */
> +               list_del(&desc->node);
> +
> +               /* Run the link descriptor callback function */
> +               callback = desc->async_tx.callback;
> +               callback_param = desc->async_tx.callback_param;
> +               if (callback) {
> +                       spin_unlock_irqrestore(&chan->lock, flags);

What happens if somebody slips in here and makes changes to the list?

> +                       callback(callback_param);
> +                       spin_lock_irqsave(&chan->lock, flags);
> +               }
> +
> +               /* Run any dependencies, then free the descriptor */
> +               dma_run_dependencies(&desc->async_tx);
> +               xilinx_cdma_free_tx_descriptor(chan, desc);
> +       }
> +
> +       spin_unlock_irqrestore(&chan->lock, flags);
> +}

[...]

> +/**
> + * xilinx_cdma_do_tasklet - Schedule completion tasklet
> + * @data: Pointer to the Xilinx cdma channel structure
> + */

That comment is misleading; this is the actual tasklet function, it doesn't
schedule anything.

Again, though, consider threaded IRQ handlers instead.

> +static void xilinx_cdma_do_tasklet(unsigned long data)
> +{
> +       struct xilinx_cdma_chan *chan = (struct xilinx_cdma_chan *)data;
> +
> +       xilinx_cdma_chan_desc_cleanup(chan);
> +}


> +/**
> + * xilinx_cdma_free_channel - Channel remove function
> + * @chan: Driver specific cdma channel
> + */
> +static void xilinx_cdma_free_channel(struct xilinx_cdma_chan *chan)
> +{
> +       /* Disable Interrupts */
> +       cdma_ctrl_clr(chan, XILINX_CDMA_CONTROL_OFFSET,
> +                     XILINX_CDMA_XR_IRQ_ALL_MASK);
> +
> +       if (chan->irq > 0)
> +               free_irq(chan->irq, chan);
> +
> +       tasklet_kill(&chan->tasklet);
> +
> +       list_del(&chan->common.device_node);
> +}

What assurance do you have that there are no operations outstanding when
you do this?

> +/**
> + * xilinx_cdma_chan_probe - Per Channel Probing
> + * It get channel features from the device tree entry and
> + * initialize special channel handling routines
> + *
> + * @xdev: Driver specific device structure
> + * @node: Device node
> + *
> + * Return: '0' on success and failure value on error
> + */
> +static int xilinx_cdma_chan_probe(struct xilinx_cdma_device *xdev,
> +                                 struct device_node *node)
> +{
> +       struct xilinx_cdma_chan *chan;
> +       bool has_dre;
> +       u32 value, width;
> +       int err;
> +
> +       /* Alloc channel */
> +       chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
> +       if (!chan)
> +               return -ENOMEM;
> +
> +       chan->dev = xdev->dev;
> +       chan->has_sg = xdev->has_sg;
> +       chan->xdev = xdev;
> +
> +       spin_lock_init(&chan->lock);
> +       INIT_LIST_HEAD(&chan->pending_list);
> +       INIT_LIST_HEAD(&chan->done_list);
> +
> +       /* Retrieve the channel properties from the device tree */
> +       has_dre = of_property_read_bool(node, "xlnx,include-dre");
> +
> +       err = of_property_read_u32(node, "xlnx,datawidth", &value);
> +       if (err) {
> +               dev_err(xdev->dev, "unable to read datawidth property");
> +               return err;
> +       }
> +       width = value >> 3; /* convert bits to bytes */
> +
> +       /* If data width is greater than 8 bytes, DRE is not in hw */
> +       if (width > 8)
> +               has_dre = false;
> +
> +       if (!has_dre)
> +               xdev->common.copy_align = fls(width - 1);
> +
> +       /* Request the interrupt */
> +       chan->irq = irq_of_parse_and_map(node, 0);
> +       err = request_irq(chan->irq, xilinx_cdma_irq_handler, IRQF_SHARED,
> +                         "xilinx-cdma-controller", chan);

Your interrupt handler could be called here.  Are you ready for that?

> +       if (err) {
> +               dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
> +               return err;
> +       }
> +
> +       /* Initialize the tasklet */
> +       tasklet_init(&chan->tasklet, xilinx_cdma_do_tasklet,
> +                    (unsigned long)chan);
> +
> +       /*
> +        * Initialize the DMA channel and add it to the DMA engine channels
> +        * list.
> +        */
> +       chan->common.device = &xdev->common;
> +
> +       list_add_tail(&chan->common.device_node, &xdev->common.channels);
> +       xdev->chan = chan;
> +
> +       /* Initialize the channel */
> +       err = xilinx_cdma_chan_reset(chan);
> +       if (err) {
> +               dev_err(xdev->dev, "Reset channel failed\n");
> +               return err;
> +       }
> +
> +       return 0;
> +}

[...]

jon
--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Srikanth Thokala April 9, 2014, 12:29 p.m. UTC | #3
Hi Jonathan,

On Tue, Apr 8, 2014 at 8:14 PM, Jonathan Corbet <corbet@lwn.net> wrote:
> On Mon, 7 Apr 2014 20:22:54 +0530
> Srikanth Thokala <sthokal@xilinx.com> wrote:
>
>> Kindly review this driver and please let me know if you have any comments.
>
> Here's some comments from a quick look at the patch; they do not qualify as
> a proper review by any means.
>
>> +/**
>> + * struct xilinx_cdma_chan - Driver specific cdma channel structure
>> + * @xdev: Driver specific device structure
>> + * @lock: Descriptor operation lock
>> + * @done_list: Complete descriptors
>> + * @pending_list: Descriptors waiting
>> + * @active_desc: Active descriptor
>> + * @allocated_desc: Allocated descriptor
>> + * @common: DMA common channel
>> + * @desc_pool: Descriptors pool
>> + * @dev: The dma device
>> + * @irq: Channel IRQ
>> + * @has_sg: Support scatter transfers
>> + * @err: Channel has errors
>> + * @tasklet: Cleanup work after irq
>> + */
>> +struct xilinx_cdma_chan {
>> +       struct xilinx_cdma_device *xdev;
>> +       spinlock_t lock;
>> +       struct list_head done_list;
>> +       struct list_head pending_list;
>> +       struct xilinx_cdma_tx_descriptor *active_desc;
>> +       struct xilinx_cdma_tx_descriptor *allocated_desc;
>> +       struct dma_chan common;
>> +       struct dma_pool *desc_pool;
>> +       struct device *dev;
>> +       int irq;
>> +       bool has_sg;
>> +       int err;
>> +       struct tasklet_struct tasklet;
>> +};
>
> Have you thought about using a threaded IRQ handler instead of a tasklet?
> The tasklet interface has its pitfalls and some reviewers frown on the
> addition of more users.

Ok, I see. I will add it to my list and send this change as a patch on top of
this driver.  Thanks.

>
> [...]
>
>> +/**
>> + * xilinx_cdma_tx_descriptor - Allocate transaction descriptor
>> + * @chan: Driver specific cdma channel
>> + *
>> + * Return: The allocated descriptor on success and NULL on failure.
>> + */
>> +static struct xilinx_cdma_tx_descriptor *
>> +xilinx_cdma_alloc_tx_descriptor(struct xilinx_cdma_chan *chan)
>> +{
>> +       struct xilinx_cdma_tx_descriptor *desc;
>> +       unsigned long flags;
>> +
>> +       if (chan->allocated_desc)
>> +               return chan->allocated_desc;
>
> This looks racy.  What happens if two threads hit here at once, or, in
> general, some other thread does something with chan->allocated_desc?
>
>> +       desc = kzalloc(sizeof(*desc), GFP_KERNEL);
>> +       if (!desc)
>> +               return NULL;
>> +
>> +       spin_lock_irqsave(&chan->lock, flags);
>> +       chan->allocated_desc = desc;
>> +       spin_unlock_irqrestore(&chan->lock, flags);
>> +
>> +       INIT_LIST_HEAD(&desc->segments);
>
> Using the lock to protect a single assignment doesn't really buy you much;
> it's what is going on outside of the locked region that is going to bite
> you.
>
> Also, as soon as you do that assignment, desc is visible to the rest of the
> world.  Somebody else could try to use it before you get around to that
> INIT_LIST_HEAD() call, with unpleasant results.  You really need a lock
> around the whole test/allocate/initialize operation.

Ok.

>
>> +       return desc;
>> +}
>> +
>> +/**
>> + * xilinx_cdma_free_tx_descriptor - Free transaction descriptor
>> + * @chan: Driver specific cdma channel
>> + * @desc: cdma transaction descriptor
>> + */
>> +static void
>> +xilinx_cdma_free_tx_descriptor(struct xilinx_cdma_chan *chan,
>> +                              struct xilinx_cdma_tx_descriptor *desc)
>> +{
>> +       struct xilinx_cdma_tx_segment *segment, *next;
>> +
>> +       if (!desc)
>> +               return;
>> +
>> +       list_for_each_entry_safe(segment, next, &desc->segments, node) {
>> +               list_del(&segment->node);
>> +               xilinx_cdma_free_tx_segment(chan, segment);
>> +       }
>> +
>> +       kfree(desc);
>> +}
>
> What are the locking requirements for this function?  It looks from a
> casual reading like some callers hold the spinlock while others do not.  It
> would be good to sort out (and document!) the requirement here.

Ok sure, I will document them.

>
>> +/**
>> + * xilinx_cdma_free_chan_resources - Free channel resources
>> + * @dchan: DMA channel
>> + */
>> +static void xilinx_cdma_free_chan_resources(struct dma_chan *dchan)
>> +{
>> +       struct xilinx_cdma_chan *chan = to_xilinx_chan(dchan);
>> +       unsigned long flags;
>> +
>> +       spin_lock_irqsave(&chan->lock, flags);
>> +       xilinx_cdma_free_desc_list(chan, &chan->done_list);
>> +       xilinx_cdma_free_desc_list(chan, &chan->pending_list);
>> +       spin_unlock_irqrestore(&chan->lock, flags);
>> +
>> +       dma_pool_destroy(chan->desc_pool);
>> +       chan->desc_pool = NULL;
>
> Why is this part done outside the lock?

I feel it is not required because we ensure no memory is used from pool
(by freeing up all the descriptors) before calling the pool_destroy() function.

>
>> + */
>> +static void xilinx_cdma_chan_desc_cleanup(struct xilinx_cdma_chan *chan)
>> +{
>> +       struct xilinx_cdma_tx_descriptor *desc, *next;
>> +       unsigned long flags;
>> +
>> +       spin_lock_irqsave(&chan->lock, flags);
>> +
>> +       list_for_each_entry_safe(desc, next, &chan->done_list, node) {
>> +               dma_async_tx_callback callback;
>> +               void *callback_param;
>> +
>> +               /* Remove from the list of running transactions */
>> +               list_del(&desc->node);
>> +
>> +               /* Run the link descriptor callback function */
>> +               callback = desc->async_tx.callback;
>> +               callback_param = desc->async_tx.callback_param;
>> +               if (callback) {
>> +                       spin_unlock_irqrestore(&chan->lock, flags);
>
> What happens if somebody slips in here and makes changes to the list?

Each completed desc is always added to the tail of done_list, so I don't see
any issues here.

>
>> +                       callback(callback_param);
>> +                       spin_lock_irqsave(&chan->lock, flags);
>> +               }
>> +
>> +               /* Run any dependencies, then free the descriptor */
>> +               dma_run_dependencies(&desc->async_tx);
>> +               xilinx_cdma_free_tx_descriptor(chan, desc);
>> +       }
>> +
>> +       spin_unlock_irqrestore(&chan->lock, flags);
>> +}
>
> [...]
>
>> +/**
>> + * xilinx_cdma_do_tasklet - Schedule completion tasklet
>> + * @data: Pointer to the Xilinx cdma channel structure
>> + */
>
> That comment is misleading; this is the actual tasklet function, it doesn't
> schedule anything.

This function in turn calls the callback function which is registered by
the slave device driver.  The slave driver generally does wait_for_completion
after submitting the transfer and this callback function will actually
trigger the
completion on which slave driver is waiting for.  I think I should elaborate the
description a bit.

>
> Again, though, consider threaded IRQ handlers instead.
>
>> +static void xilinx_cdma_do_tasklet(unsigned long data)
>> +{
>> +       struct xilinx_cdma_chan *chan = (struct xilinx_cdma_chan *)data;
>> +
>> +       xilinx_cdma_chan_desc_cleanup(chan);
>> +}
>
>
>> +/**
>> + * xilinx_cdma_free_channel - Channel remove function
>> + * @chan: Driver specific cdma channel
>> + */
>> +static void xilinx_cdma_free_channel(struct xilinx_cdma_chan *chan)
>> +{
>> +       /* Disable Interrupts */
>> +       cdma_ctrl_clr(chan, XILINX_CDMA_CONTROL_OFFSET,
>> +                     XILINX_CDMA_XR_IRQ_ALL_MASK);
>> +
>> +       if (chan->irq > 0)
>> +               free_irq(chan->irq, chan);
>> +
>> +       tasklet_kill(&chan->tasklet);
>> +
>> +       list_del(&chan->common.device_node);
>> +}
>
> What assurance do you have that there are no operations outstanding when
> you do this?

This is called either from the probe(), when registration fails or
from the remove()
before which slave device driver should ensure they release the
acquired channels.
Releasing the channel will free up all the channel resources.

>
>> +/**
>> + * xilinx_cdma_chan_probe - Per Channel Probing
>> + * It get channel features from the device tree entry and
>> + * initialize special channel handling routines
>> + *
>> + * @xdev: Driver specific device structure
>> + * @node: Device node
>> + *
>> + * Return: '0' on success and failure value on error
>> + */
>> +static int xilinx_cdma_chan_probe(struct xilinx_cdma_device *xdev,
>> +                                 struct device_node *node)
>> +{
>> +       struct xilinx_cdma_chan *chan;
>> +       bool has_dre;
>> +       u32 value, width;
>> +       int err;
>> +
>> +       /* Alloc channel */
>> +       chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
>> +       if (!chan)
>> +               return -ENOMEM;
>> +
>> +       chan->dev = xdev->dev;
>> +       chan->has_sg = xdev->has_sg;
>> +       chan->xdev = xdev;
>> +
>> +       spin_lock_init(&chan->lock);
>> +       INIT_LIST_HEAD(&chan->pending_list);
>> +       INIT_LIST_HEAD(&chan->done_list);
>> +
>> +       /* Retrieve the channel properties from the device tree */
>> +       has_dre = of_property_read_bool(node, "xlnx,include-dre");
>> +
>> +       err = of_property_read_u32(node, "xlnx,datawidth", &value);
>> +       if (err) {
>> +               dev_err(xdev->dev, "unable to read datawidth property");
>> +               return err;
>> +       }
>> +       width = value >> 3; /* convert bits to bytes */
>> +
>> +       /* If data width is greater than 8 bytes, DRE is not in hw */
>> +       if (width > 8)
>> +               has_dre = false;
>> +
>> +       if (!has_dre)
>> +               xdev->common.copy_align = fls(width - 1);
>> +
>> +       /* Request the interrupt */
>> +       chan->irq = irq_of_parse_and_map(node, 0);
>> +       err = request_irq(chan->irq, xilinx_cdma_irq_handler, IRQF_SHARED,
>> +                         "xilinx-cdma-controller", chan);
>
> Your interrupt handler could be called here.  Are you ready for that?

Interrupts are only enabled after resetting the channel, in the function
below  xilinx_cdma_chan_reset().  So, interrupt handler will never be called
here.

Thanks for the review,
Srikanth

>
>> +       if (err) {
>> +               dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
>> +               return err;
>> +       }
>> +
>> +       /* Initialize the tasklet */
>> +       tasklet_init(&chan->tasklet, xilinx_cdma_do_tasklet,
>> +                    (unsigned long)chan);
>> +
>> +       /*
>> +        * Initialize the DMA channel and add it to the DMA engine channels
>> +        * list.
>> +        */
>> +       chan->common.device = &xdev->common;
>> +
>> +       list_add_tail(&chan->common.device_node, &xdev->common.channels);
>> +       xdev->chan = chan;
>> +
>> +       /* Initialize the channel */
>> +       err = xilinx_cdma_chan_reset(chan);
>> +       if (err) {
>> +               dev_err(xdev->dev, "Reset channel failed\n");
>> +               return err;
>> +       }
>> +
>> +       return 0;
>> +}
>
> [...]
>
> jon
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at  http://www.tux.org/lkml/
--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 44b312e..e2c5cf2 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -365,6 +365,18 @@  config XILINX_VDMA
 	  channels, Memory Mapped to Stream (MM2S) and Stream to
 	  Memory Mapped (S2MM) for the data transfers.
 
+config XILINX_CDMA
+	tristate "Xilinx AXI CDMA Engine"
+	depends on (ARCH_ZYNQ || MICROBLAZE)
+	select DMA_ENGINE
+	help
+	  Enable support for Xilinx AXI CDMA Soft IP.
+
+	  The AXI CDMA is a soft IP which provides high-bandwidth
+	  Direct Memory Access (DMA) between a memory-mapped source
+	  address and a memory-mapped destination address using the
+	  AXI4 protocol.
+
 config DMA_ENGINE
 	bool
 
diff --git a/drivers/dma/xilinx/Makefile b/drivers/dma/xilinx/Makefile
index 3c4e9f2..e1dee77 100644
--- a/drivers/dma/xilinx/Makefile
+++ b/drivers/dma/xilinx/Makefile
@@ -1 +1,2 @@ 
 obj-$(CONFIG_XILINX_VDMA) += xilinx_vdma.o
+obj-$(CONFIG_XILINX_CDMA) += xilinx_cdma.o
diff --git a/drivers/dma/xilinx/xilinx_cdma.c b/drivers/dma/xilinx/xilinx_cdma.c
new file mode 100644
index 0000000..2ae7c77
--- /dev/null
+++ b/drivers/dma/xilinx/xilinx_cdma.c
@@ -0,0 +1,998 @@ 
+/*
+ * DMA driver for Xilinx Central DMA Engine
+ *
+ * Copyright (C) 2010 - 2014 Xilinx, Inc. All rights reserved.
+ *
+ * Based on the Freescale DMA driver.
+ *
+ * Description:
+ *  The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
+ *  Access (DMA) between a memory-mapped source address and a memory-mapped
+ *  destination address.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/amba/xilinx_dma.h>
+#include <linux/bitops.h>
+#include <linux/dmapool.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_dma.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+
+#include "../dmaengine.h"
+
+/* Register Offsets */
+#define XILINX_CDMA_CONTROL_OFFSET	0x00
+#define XILINX_CDMA_STATUS_OFFSET	0x04
+#define XILINX_CDMA_CDESC_OFFSET	0x08
+#define XILINX_CDMA_TDESC_OFFSET	0x10
+#define XILINX_CDMA_SRCADDR_OFFSET	0x18
+#define XILINX_CDMA_DSTADDR_OFFSET	0x20
+#define XILINX_CDMA_BTT_OFFSET		0x28
+
+/* General register bits definitions */
+#define XILINX_CDMA_CR_RESET		BIT(2)
+#define XILINX_CDMA_CR_SGMODE		BIT(3)
+
+#define XILINX_CDMA_SR_IDLE		BIT(1)
+
+#define XILINX_CDMA_XR_IRQ_IOC_MASK	BIT(12)
+#define XILINX_CDMA_XR_IRQ_DELAY_MASK	BIT(13)
+#define XILINX_CDMA_XR_IRQ_ERROR_MASK	BIT(14)
+#define XILINX_CDMA_XR_IRQ_ALL_MASK	GENMASK(14, 12)
+
+#define XILINX_CDMA_XR_DELAY_MASK	GENMASK(31, 24)
+#define XILINX_CDMA_XR_COALESCE_MASK	GENMASK(23, 16)
+
+#define XILINX_CDMA_DELAY_MAX		GENMASK(7, 0)
+#define XILINX_CDMA_DELAY_SHIFT		24
+
+#define XILINX_CDMA_COALESCE_MAX	GENMASK(7, 0)
+#define XILINX_CDMA_COALESCE_SHIFT	16
+
+/* Delay loop counter to prevent hardware failure */
+#define XILINX_CDMA_RESET_LOOP		1000000
+
+/* Maximum transfer length */
+#define XILINX_CDMA_MAX_TRANS_LEN	GENMASK(22, 0)
+
+/**
+ * struct xilinx_cdma_desc_hw - Hardware Descriptor
+ * @next_desc: Next Descriptor Pointer @0x00
+ * @pad1: Reserved @0x04
+ * @src_addr: Source address @0x08
+ * @pad2: Reserved @0x0C
+ * @dest_addr: Destination address @0x10
+ * @pad3: Reserved @0x14
+ * @control: Control field @0x18
+ * @status: Status field @0x1C
+ */
+struct xilinx_cdma_desc_hw {
+	u32 next_desc;
+	u32 pad1;
+	u32 src_addr;
+	u32 pad2;
+	u32 dest_addr;
+	u32 pad3;
+	u32 control;
+	u32 status;
+} __aligned(64);
+
+/**
+ * struct xilinx_cdma_tx_segment - Descriptor segment
+ * @hw: Hardware descriptor
+ * @node: Node in the descriptor segments list
+ * @phys: Physical address of segment
+ */
+struct xilinx_cdma_tx_segment {
+	struct xilinx_cdma_desc_hw hw;
+	struct list_head node;
+	dma_addr_t phys;
+} __aligned(64);
+
+/**
+ * struct xilinx_cdma_tx_descriptor - Per Transaction structure
+ * @async_tx: Async transaction descriptor
+ * @segments: TX segments list
+ * @node: Node in the channel descriptors list
+ */
+struct xilinx_cdma_tx_descriptor {
+	struct dma_async_tx_descriptor async_tx;
+	struct list_head segments;
+	struct list_head node;
+};
+
+/**
+ * struct xilinx_cdma_chan - Driver specific cdma channel structure
+ * @xdev: Driver specific device structure
+ * @lock: Descriptor operation lock
+ * @done_list: Complete descriptors
+ * @pending_list: Descriptors waiting
+ * @active_desc: Active descriptor
+ * @allocated_desc: Allocated descriptor
+ * @common: DMA common channel
+ * @desc_pool: Descriptors pool
+ * @dev: The dma device
+ * @irq: Channel IRQ
+ * @has_sg: Support scatter transfers
+ * @err: Channel has errors
+ * @tasklet: Cleanup work after irq
+ */
+struct xilinx_cdma_chan {
+	struct xilinx_cdma_device *xdev;
+	spinlock_t lock;
+	struct list_head done_list;
+	struct list_head pending_list;
+	struct xilinx_cdma_tx_descriptor *active_desc;
+	struct xilinx_cdma_tx_descriptor *allocated_desc;
+	struct dma_chan common;
+	struct dma_pool *desc_pool;
+	struct device *dev;
+	int irq;
+	bool has_sg;
+	int err;
+	struct tasklet_struct tasklet;
+};
+
+/**
+ * struct xilinx_cdma_device - CDMA device structure
+ * @regs: I/O mapped base address
+ * @dev: Device Structure
+ * @common: DMA device structure
+ * @chan: Driver specific cdma channel
+ * @has_sg: Specifies whether Scatter-Gather is present or not
+ */
+struct xilinx_cdma_device {
+	void __iomem *regs;
+	struct device *dev;
+	struct dma_device common;
+	struct xilinx_cdma_chan *chan;
+	bool has_sg;
+};
+
+/* Macros */
+#define to_xilinx_chan(chan) \
+	container_of(chan, struct xilinx_cdma_chan, common)
+#define to_cdma_tx_descriptor(tx) \
+	container_of(tx, struct xilinx_cdma_tx_descriptor, async_tx)
+
+/* IO accessors */
+static inline void cdma_write(struct xilinx_cdma_chan *chan, u32 reg, u32 val)
+{
+	writel(val, chan->xdev->regs + reg);
+}
+
+static inline u32 cdma_read(struct xilinx_cdma_chan *chan, u32 reg)
+{
+	return readl(chan->xdev->regs + reg);
+}
+
+static inline void cdma_ctrl_clr(struct xilinx_cdma_chan *chan, u32 reg,
+				 u32 clr)
+{
+	cdma_write(chan, reg, cdma_read(chan, reg) & ~clr);
+}
+
+static inline void cdma_ctrl_set(struct xilinx_cdma_chan *chan, u32 reg,
+				 u32 set)
+{
+	cdma_write(chan, reg, cdma_read(chan, reg) | set);
+}
+
+/* -----------------------------------------------------------------------------
+ * Descriptors and segments alloc and free
+ */
+
+/**
+ * xilinx_cdma_alloc_tx_segment - Allocate transaction segment
+ * @chan: Driver specific cdma channel
+ *
+ * Return: The allocated segment on success and NULL on failure.
+ */
+static struct xilinx_cdma_tx_segment *
+xilinx_cdma_alloc_tx_segment(struct xilinx_cdma_chan *chan)
+{
+	struct xilinx_cdma_tx_segment *segment;
+	dma_addr_t phys;
+
+	segment = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &phys);
+	if (!segment)
+		return NULL;
+
+	memset(segment, 0, sizeof(*segment));
+	segment->phys = phys;
+
+	return segment;
+}
+
+/**
+ * xilinx_cdma_free_tx_segment - Free transaction segment
+ * @chan: Driver specific cdma channel
+ * @segment: cdma transaction segment
+ */
+static void xilinx_cdma_free_tx_segment(struct xilinx_cdma_chan *chan,
+					struct xilinx_cdma_tx_segment *segment)
+{
+	dma_pool_free(chan->desc_pool, segment, segment->phys);
+}
+
+/**
+ * xilinx_cdma_tx_descriptor - Allocate transaction descriptor
+ * @chan: Driver specific cdma channel
+ *
+ * Return: The allocated descriptor on success and NULL on failure.
+ */
+static struct xilinx_cdma_tx_descriptor *
+xilinx_cdma_alloc_tx_descriptor(struct xilinx_cdma_chan *chan)
+{
+	struct xilinx_cdma_tx_descriptor *desc;
+	unsigned long flags;
+
+	if (chan->allocated_desc)
+		return chan->allocated_desc;
+
+	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+	if (!desc)
+		return NULL;
+
+	spin_lock_irqsave(&chan->lock, flags);
+	chan->allocated_desc = desc;
+	spin_unlock_irqrestore(&chan->lock, flags);
+
+	INIT_LIST_HEAD(&desc->segments);
+
+	return desc;
+}
+
+/**
+ * xilinx_cdma_free_tx_descriptor - Free transaction descriptor
+ * @chan: Driver specific cdma channel
+ * @desc: cdma transaction descriptor
+ */
+static void
+xilinx_cdma_free_tx_descriptor(struct xilinx_cdma_chan *chan,
+			       struct xilinx_cdma_tx_descriptor *desc)
+{
+	struct xilinx_cdma_tx_segment *segment, *next;
+
+	if (!desc)
+		return;
+
+	list_for_each_entry_safe(segment, next, &desc->segments, node) {
+		list_del(&segment->node);
+		xilinx_cdma_free_tx_segment(chan, segment);
+	}
+
+	kfree(desc);
+}
+
+/**
+ * xilinx_cdma_alloc_chan_resources - Allocate channel resources
+ * @dchan: DMA channel
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xilinx_cdma_alloc_chan_resources(struct dma_chan *dchan)
+{
+	struct xilinx_cdma_chan *chan = to_xilinx_chan(dchan);
+
+	/* Has this channel already been allocated? */
+	if (chan->desc_pool)
+		return 0;
+
+	/*
+	 * We need the descriptor to be aligned to 64bytes
+	 * for meeting Xilinx DMA specification requirement.
+	 */
+	chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
+				chan->dev,
+				sizeof(struct xilinx_cdma_tx_segment),
+				__alignof__(struct xilinx_cdma_tx_segment), 0);
+	if (!chan->desc_pool) {
+		dev_err(chan->dev,
+			"unable to allocate channel descriptor pool\n");
+		return -ENOMEM;
+	}
+
+	dma_cookie_init(dchan);
+	return 0;
+}
+
+/**
+ * xilinx_cdma_free_desc_list - Free descriptors list
+ * @chan: Driver specific cdma channel
+ * @list: List to parse and delete the descriptor
+ */
+static void xilinx_cdma_free_desc_list(struct xilinx_cdma_chan *chan,
+				       struct list_head *list)
+{
+	struct xilinx_cdma_tx_descriptor *desc, *next;
+
+	list_for_each_entry_safe(desc, next, list, node) {
+		list_del(&desc->node);
+		xilinx_cdma_free_tx_descriptor(chan, desc);
+	}
+}
+
+/**
+ * xilinx_cdma_free_chan_resources - Free channel resources
+ * @dchan: DMA channel
+ */
+static void xilinx_cdma_free_chan_resources(struct dma_chan *dchan)
+{
+	struct xilinx_cdma_chan *chan = to_xilinx_chan(dchan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&chan->lock, flags);
+	xilinx_cdma_free_desc_list(chan, &chan->done_list);
+	xilinx_cdma_free_desc_list(chan, &chan->pending_list);
+	spin_unlock_irqrestore(&chan->lock, flags);
+
+	dma_pool_destroy(chan->desc_pool);
+	chan->desc_pool = NULL;
+}
+
+/**
+ * xilinx_cdma_chan_desc_cleanup - Clean channel descriptors
+ * @chan: Driver specific cdma channel
+ */
+static void xilinx_cdma_chan_desc_cleanup(struct xilinx_cdma_chan *chan)
+{
+	struct xilinx_cdma_tx_descriptor *desc, *next;
+	unsigned long flags;
+
+	spin_lock_irqsave(&chan->lock, flags);
+
+	list_for_each_entry_safe(desc, next, &chan->done_list, node) {
+		dma_async_tx_callback callback;
+		void *callback_param;
+
+		/* Remove from the list of running transactions */
+		list_del(&desc->node);
+
+		/* Run the link descriptor callback function */
+		callback = desc->async_tx.callback;
+		callback_param = desc->async_tx.callback_param;
+		if (callback) {
+			spin_unlock_irqrestore(&chan->lock, flags);
+			callback(callback_param);
+			spin_lock_irqsave(&chan->lock, flags);
+		}
+
+		/* Run any dependencies, then free the descriptor */
+		dma_run_dependencies(&desc->async_tx);
+		xilinx_cdma_free_tx_descriptor(chan, desc);
+	}
+
+	spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_cdma_tx_status - Get CDMA transaction status
+ * @dchan: DMA channel
+ * @cookie: Transaction identifier
+ * @txstate: Transaction state
+ *
+ * Return: DMA transaction status
+ */
+static enum dma_status xilinx_tx_status(struct dma_chan *dchan,
+					dma_cookie_t cookie,
+					struct dma_tx_state *txstate)
+{
+	return dma_cookie_status(dchan, cookie, txstate);
+}
+
+/**
+ * xilinx_cdma_is_idle - Check if cdma channel is idle
+ * @chan: Driver specific cdma channel
+ *
+ * Return: 'true' if idle, 'false' if not.
+ */
+static bool cdma_is_idle(struct xilinx_cdma_chan *chan)
+{
+	return cdma_read(chan, XILINX_CDMA_STATUS_OFFSET) & XILINX_CDMA_SR_IDLE;
+}
+
+/**
+ * xilinx_cdma_start_transfer - Starts cdma transfer
+ * @chan: Driver specific channel struct pointer
+ */
+static void xilinx_cdma_start_transfer(struct xilinx_cdma_chan *chan)
+{
+	unsigned long flags;
+	struct xilinx_cdma_tx_descriptor *desc;
+	struct xilinx_cdma_tx_segment *head, *tail;
+
+	if (chan->err)
+		return;
+
+	spin_lock_irqsave(&chan->lock, flags);
+
+	if (list_empty(&chan->pending_list))
+		goto out_unlock;
+
+	/* If hardware is busy, cannot submit */
+	if (!cdma_is_idle(chan)) {
+		dev_dbg(chan->dev, "DMA controller still busy %x\n",
+			cdma_read(chan, XILINX_CDMA_STATUS_OFFSET));
+		goto out_unlock;
+	}
+
+	desc = list_first_entry(&chan->pending_list,
+				struct xilinx_cdma_tx_descriptor, node);
+
+	if (chan->has_sg) {
+		head = list_first_entry(&desc->segments,
+					struct xilinx_cdma_tx_segment, node);
+		tail = list_entry(desc->segments.prev,
+				  struct xilinx_cdma_tx_segment, node);
+
+		cdma_write(chan, XILINX_CDMA_CDESC_OFFSET, head->phys);
+
+		/* Update tail ptr register which will start the transfer */
+		cdma_write(chan, XILINX_CDMA_TDESC_OFFSET, tail->phys);
+	} else {
+		/* In simple mode */
+		struct xilinx_cdma_tx_segment *segment;
+		struct xilinx_cdma_desc_hw *hw;
+
+		segment = list_first_entry(&desc->segments,
+					   struct xilinx_cdma_tx_segment,
+					   node);
+
+		hw = &segment->hw;
+
+		cdma_write(chan, XILINX_CDMA_SRCADDR_OFFSET, hw->src_addr);
+		cdma_write(chan, XILINX_CDMA_DSTADDR_OFFSET, hw->dest_addr);
+
+		/* Start the transfer */
+		cdma_write(chan, XILINX_CDMA_BTT_OFFSET,
+			   hw->control & XILINX_CDMA_MAX_TRANS_LEN);
+	}
+
+	list_del(&desc->node);
+	chan->active_desc = desc;
+
+out_unlock:
+	spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_cdma_issue_pending - Issue pending transactions
+ * @dchan: DMA channel
+ */
+static void xilinx_cdma_issue_pending(struct dma_chan *dchan)
+{
+	struct xilinx_cdma_chan *chan = to_xilinx_chan(dchan);
+
+	xilinx_cdma_start_transfer(chan);
+}
+
+/**
+ * xilinx_cdma_complete_descriptor - Mark the active descriptor as complete
+ * @chan : xilinx DMA channel
+ */
+static void xilinx_cdma_complete_descriptor(struct xilinx_cdma_chan *chan)
+{
+	struct xilinx_cdma_tx_descriptor *desc;
+	unsigned long flags;
+
+	spin_lock_irqsave(&chan->lock, flags);
+
+	desc = chan->active_desc;
+	if (!desc) {
+		dev_dbg(chan->dev, "no running descriptors\n");
+		goto out_unlock;
+	}
+
+	dma_cookie_complete(&desc->async_tx);
+	list_add_tail(&desc->node, &chan->done_list);
+
+	chan->active_desc = NULL;
+
+out_unlock:
+	spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_cdma_chan_reset - Reset CDMA channel
+ * @chan: Driver specific CDMA channel
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xilinx_cdma_chan_reset(struct xilinx_cdma_chan *chan)
+{
+	int loop = XILINX_CDMA_RESET_LOOP;
+	u32 tmp;
+
+	cdma_ctrl_set(chan, XILINX_CDMA_CONTROL_OFFSET,
+		      XILINX_CDMA_CR_RESET);
+
+	tmp = cdma_read(chan, XILINX_CDMA_CONTROL_OFFSET) &
+	      XILINX_CDMA_CR_RESET;
+
+	/* Wait for the hardware to finish reset */
+	do {
+		tmp = cdma_read(chan, XILINX_CDMA_CONTROL_OFFSET) &
+		      XILINX_CDMA_CR_RESET;
+	} while (loop-- && tmp);
+
+	if (!loop) {
+		dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
+			cdma_read(chan, XILINX_CDMA_CONTROL_OFFSET),
+			cdma_read(chan, XILINX_CDMA_STATUS_OFFSET));
+		return -EBUSY;
+	}
+
+	/* Enable interrupts */
+	cdma_ctrl_set(chan, XILINX_CDMA_CONTROL_OFFSET,
+		      XILINX_CDMA_XR_IRQ_ALL_MASK);
+
+	/* Enable SG Mode */
+	if (chan->has_sg)
+		cdma_ctrl_set(chan, XILINX_CDMA_CONTROL_OFFSET,
+			      XILINX_CDMA_CR_SGMODE);
+
+	return 0;
+}
+
+/**
+ * xilinx_cdma_irq_handler - CDMA Interrupt handler
+ * @irq: IRQ number
+ * @data: Pointer to the Xilinx CDMA channel structure
+ *
+ * Return: IRQ_HANDLED/IRQ_NONE
+*/
+static irqreturn_t xilinx_cdma_irq_handler(int irq, void *data)
+{
+	struct xilinx_cdma_chan *chan = data;
+	u32 status;
+
+	status = cdma_read(chan, XILINX_CDMA_STATUS_OFFSET);
+	if (!(status & XILINX_CDMA_XR_IRQ_ALL_MASK))
+		return IRQ_NONE;
+
+	/* Ack the interrupts */
+	cdma_write(chan, XILINX_CDMA_STATUS_OFFSET,
+		   XILINX_CDMA_XR_IRQ_ALL_MASK);
+
+	if (status & XILINX_CDMA_XR_IRQ_ERROR_MASK) {
+		dev_err(chan->dev,
+			"Channel %x has errors %x, cdr %x tdr %x\n",
+			(u32)chan,
+			(u32)cdma_read(chan, XILINX_CDMA_STATUS_OFFSET),
+			(u32)cdma_read(chan, XILINX_CDMA_CDESC_OFFSET),
+			(u32)cdma_read(chan, XILINX_CDMA_TDESC_OFFSET));
+		chan->err = true;
+	}
+
+	/*
+	 * Device takes too long to do the transfer when user requires
+	 * responsiveness
+	 */
+	if (status & XILINX_CDMA_XR_IRQ_DELAY_MASK)
+		dev_dbg(chan->dev, "Inter-packet latency too long\n");
+
+	if (status & XILINX_CDMA_XR_IRQ_IOC_MASK) {
+		xilinx_cdma_complete_descriptor(chan);
+		xilinx_cdma_start_transfer(chan);
+	}
+
+	tasklet_schedule(&chan->tasklet);
+	return IRQ_HANDLED;
+}
+
+/**
+ * xilinx_cdma_do_tasklet - Schedule completion tasklet
+ * @data: Pointer to the Xilinx cdma channel structure
+ */
+static void xilinx_cdma_do_tasklet(unsigned long data)
+{
+	struct xilinx_cdma_chan *chan = (struct xilinx_cdma_chan *)data;
+
+	xilinx_cdma_chan_desc_cleanup(chan);
+}
+
+/**
+ * xilinx_cdma_tx_submit - Submit DMA transaction
+ * @tx: Async transaction descriptor
+ *
+ * Return: cookie value on success and failure value on error
+ */
+static dma_cookie_t xilinx_cdma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+	struct xilinx_cdma_chan *chan = to_xilinx_chan(tx->chan);
+	struct xilinx_cdma_tx_descriptor *desc = to_cdma_tx_descriptor(tx);
+	dma_cookie_t cookie;
+	unsigned long flags;
+	int err;
+
+	if (chan->err) {
+		/*
+		 * If reset fails, need to hard reset the system.
+		 * Channel is no longer functional
+		 */
+		err = xilinx_cdma_chan_reset(chan);
+		if (err < 0)
+			return err;
+	}
+
+	spin_lock_irqsave(&chan->lock, flags);
+
+	cookie = dma_cookie_assign(tx);
+
+	/* Append the transaction to the pending transactions queue. */
+	list_add_tail(&desc->node, &chan->pending_list);
+
+	/* Free the allocated desc */
+	chan->allocated_desc = NULL;
+
+	spin_unlock_irqrestore(&chan->lock, flags);
+
+	return cookie;
+}
+
+/**
+ * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction
+ * @dchan: DMA channel
+ * @dma_dst: destination address
+ * @dma_src: source address
+ * @len: transfer length
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *
+xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
+			dma_addr_t dma_src, size_t len, unsigned long flags)
+{
+	struct xilinx_cdma_chan *chan = to_xilinx_chan(dchan);
+	struct xilinx_cdma_desc_hw *hw;
+	struct xilinx_cdma_tx_descriptor *desc;
+	struct xilinx_cdma_tx_segment *segment, *prev;
+
+	if (!len || len > XILINX_CDMA_MAX_TRANS_LEN)
+		return NULL;
+
+	desc = xilinx_cdma_alloc_tx_descriptor(chan);
+	if (!desc)
+		return NULL;
+
+	dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+	desc->async_tx.tx_submit = xilinx_cdma_tx_submit;
+	async_tx_ack(&desc->async_tx);
+
+	/* Allocate the link descriptor from DMA pool */
+	segment = xilinx_cdma_alloc_tx_segment(chan);
+	if (!segment)
+		goto error;
+
+	hw = &segment->hw;
+	hw->control = len;
+	hw->src_addr = dma_src;
+	hw->dest_addr = dma_dst;
+
+	/* Fill the previous next descriptor with current */
+	prev = list_last_entry(&desc->segments,
+			       struct xilinx_cdma_tx_segment, node);
+	prev->hw.next_desc = segment->phys;
+
+	/* Insert the segment into the descriptor segments list. */
+	list_add_tail(&segment->node, &desc->segments);
+
+	prev = segment;
+
+	/* Link the last hardware descriptor with the first. */
+	segment = list_first_entry(&desc->segments,
+				   struct xilinx_cdma_tx_segment, node);
+	prev->hw.next_desc = segment->phys;
+
+	return &desc->async_tx;
+
+error:
+	xilinx_cdma_free_tx_descriptor(chan, desc);
+	return NULL;
+}
+
+/**
+ * xilinx_cdma_device_control - Configure DMA channel of the device
+ * @dchan: DMA Channel pointer
+ * @cmd: DMA control command
+ * @arg: Channel configuration
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xilinx_cdma_device_control(struct dma_chan *dchan,
+				      enum dma_ctrl_cmd cmd, unsigned long arg)
+{
+	struct xilinx_cdma_chan *chan = to_xilinx_chan(dchan);
+	unsigned long flags;
+
+	if (cmd != DMA_TERMINATE_ALL)
+		return -ENXIO;
+
+	spin_lock_irqsave(&chan->lock, flags);
+
+	/* Remove and free all of the descriptors in the lists */
+	xilinx_cdma_free_desc_list(chan, &chan->pending_list);
+	xilinx_cdma_free_desc_list(chan, &chan->done_list);
+
+	spin_unlock_irqrestore(&chan->lock, flags);
+
+	return 0;
+}
+
+/**
+ * xilinx_cdma_channel_set_config - Configure cdma channel
+ * @dchan: DMA channel
+ * @cfg: cdma device configuration pointer
+ *
+ * Return: '0' on success and failure value on error
+ */
+int xilinx_cdma_channel_set_config(struct dma_chan *dchan,
+					struct xilinx_cdma_config *cfg)
+{
+	struct xilinx_cdma_chan *chan = to_xilinx_chan(dchan);
+	u32 reg = cdma_read(chan, XILINX_CDMA_CONTROL_OFFSET);
+
+	if (cfg->reset)
+		return xilinx_cdma_chan_reset(chan);
+
+	if (cfg->coalesc <= XILINX_CDMA_COALESCE_MAX) {
+		reg &= ~XILINX_CDMA_XR_COALESCE_MASK;
+		reg |= cfg->coalesc << XILINX_CDMA_COALESCE_SHIFT;
+	}
+
+	if (cfg->delay <= XILINX_CDMA_DELAY_MAX) {
+		reg &= ~XILINX_CDMA_XR_DELAY_MASK;
+		reg |= cfg->delay << XILINX_CDMA_DELAY_SHIFT;
+	}
+
+	cdma_write(chan, XILINX_CDMA_CONTROL_OFFSET, reg);
+
+	return 0;
+}
+EXPORT_SYMBOL(xilinx_cdma_channel_set_config);
+
+/* -----------------------------------------------------------------------------
+ * Probe and remove
+ */
+
+/**
+ * xilinx_cdma_free_channel - Channel remove function
+ * @chan: Driver specific cdma channel
+ */
+static void xilinx_cdma_free_channel(struct xilinx_cdma_chan *chan)
+{
+	/* Disable Interrupts */
+	cdma_ctrl_clr(chan, XILINX_CDMA_CONTROL_OFFSET,
+		      XILINX_CDMA_XR_IRQ_ALL_MASK);
+
+	if (chan->irq > 0)
+		free_irq(chan->irq, chan);
+
+	tasklet_kill(&chan->tasklet);
+
+	list_del(&chan->common.device_node);
+}
+
+/**
+ * xilinx_cdma_chan_probe - Per Channel Probing
+ * It get channel features from the device tree entry and
+ * initialize special channel handling routines
+ *
+ * @xdev: Driver specific device structure
+ * @node: Device node
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xilinx_cdma_chan_probe(struct xilinx_cdma_device *xdev,
+				  struct device_node *node)
+{
+	struct xilinx_cdma_chan *chan;
+	bool has_dre;
+	u32 value, width;
+	int err;
+
+	/* Alloc channel */
+	chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
+	if (!chan)
+		return -ENOMEM;
+
+	chan->dev = xdev->dev;
+	chan->has_sg = xdev->has_sg;
+	chan->xdev = xdev;
+
+	spin_lock_init(&chan->lock);
+	INIT_LIST_HEAD(&chan->pending_list);
+	INIT_LIST_HEAD(&chan->done_list);
+
+	/* Retrieve the channel properties from the device tree */
+	has_dre = of_property_read_bool(node, "xlnx,include-dre");
+
+	err = of_property_read_u32(node, "xlnx,datawidth", &value);
+	if (err) {
+		dev_err(xdev->dev, "unable to read datawidth property");
+		return err;
+	}
+	width = value >> 3; /* convert bits to bytes */
+
+	/* If data width is greater than 8 bytes, DRE is not in hw */
+	if (width > 8)
+		has_dre = false;
+
+	if (!has_dre)
+		xdev->common.copy_align = fls(width - 1);
+
+	/* Request the interrupt */
+	chan->irq = irq_of_parse_and_map(node, 0);
+	err = request_irq(chan->irq, xilinx_cdma_irq_handler, IRQF_SHARED,
+			  "xilinx-cdma-controller", chan);
+	if (err) {
+		dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
+		return err;
+	}
+
+	/* Initialize the tasklet */
+	tasklet_init(&chan->tasklet, xilinx_cdma_do_tasklet,
+		     (unsigned long)chan);
+
+	/*
+	 * Initialize the DMA channel and add it to the DMA engine channels
+	 * list.
+	 */
+	chan->common.device = &xdev->common;
+
+	list_add_tail(&chan->common.device_node, &xdev->common.channels);
+	xdev->chan = chan;
+
+	/* Initialize the channel */
+	err = xilinx_cdma_chan_reset(chan);
+	if (err) {
+		dev_err(xdev->dev, "Reset channel failed\n");
+		return err;
+	}
+
+	return 0;
+}
+
+/**
+ * of_dma_xilinx_xlate - Translation function
+ * @dma_spec: Pointer to DMA specifier as found in the device tree
+ * @ofdma: Pointer to DMA controller data
+ *
+ * Return: DMA channel pointer on success and NULL on error
+ */
+static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
+						struct of_dma *ofdma)
+{
+	struct xilinx_cdma_device *xdev = ofdma->of_dma_data;
+
+	return dma_get_slave_channel(&xdev->chan->common);
+}
+
+/**
+ * xilinx_cdma_probe - Driver probe function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xilinx_cdma_probe(struct platform_device *pdev)
+{
+	struct xilinx_cdma_device *xdev;
+	struct device_node *child, *node;
+	struct resource *res;
+	int ret;
+
+	xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
+	if (!xdev)
+		return -ENOMEM;
+
+	xdev->dev = &(pdev->dev);
+	INIT_LIST_HEAD(&xdev->common.channels);
+
+	node = pdev->dev.of_node;
+
+	/* Map the registers */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	xdev->regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(xdev->regs))
+		return PTR_ERR(xdev->regs);
+
+	/* Check if SG is enabled */
+	xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg");
+
+	dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
+	xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
+
+	xdev->common.device_control = xilinx_cdma_device_control;
+	xdev->common.device_issue_pending = xilinx_cdma_issue_pending;
+	xdev->common.device_alloc_chan_resources =
+			xilinx_cdma_alloc_chan_resources;
+	xdev->common.device_free_chan_resources =
+			xilinx_cdma_free_chan_resources;
+	xdev->common.device_tx_status = xilinx_tx_status;
+	xdev->common.dev = &pdev->dev;
+
+	platform_set_drvdata(pdev, xdev);
+
+	child = of_get_next_child(node, NULL);
+	if (!child) {
+		dev_err(&pdev->dev, "No channel found\n");
+		return PTR_ERR(child);
+	}
+
+	ret = xilinx_cdma_chan_probe(xdev, child);
+	if (ret) {
+		dev_err(&pdev->dev, "Probing channel failed\n");
+		goto free_chan_resources;
+	}
+
+	dma_async_device_register(&xdev->common);
+
+	ret = of_dma_controller_register(node, of_dma_xilinx_xlate, xdev);
+	if (ret) {
+		dev_err(&pdev->dev, "Unable to register DMA to DT\n");
+		dma_async_device_unregister(&xdev->common);
+		goto free_chan_resources;
+	}
+
+	dev_info(&pdev->dev, "Xilinx AXI CDMA Engine driver Probed!!\n");
+
+	return 0;
+
+free_chan_resources:
+	xilinx_cdma_free_channel(xdev->chan);
+
+	return ret;
+}
+
+/**
+ * xilinx_cdma_remove - Driver remove function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: Always '0'
+ */
+static int xilinx_cdma_remove(struct platform_device *pdev)
+{
+	struct xilinx_cdma_device *xdev = platform_get_drvdata(pdev);
+
+	of_dma_controller_free(pdev->dev.of_node);
+	dma_async_device_unregister(&xdev->common);
+
+	xilinx_cdma_free_channel(xdev->chan);
+
+	return 0;
+}
+
+static const struct of_device_id xilinx_cdma_of_match[] = {
+	{ .compatible = "xlnx,axi-cdma-1.00.a", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, xilinx_cdma_of_match);
+
+static struct platform_driver xilinx_cdma_driver = {
+	.driver = {
+		.name = "xilinx-cdma",
+		.owner = THIS_MODULE,
+		.of_match_table = xilinx_cdma_of_match,
+	},
+	.probe = xilinx_cdma_probe,
+	.remove = xilinx_cdma_remove,
+};
+
+module_platform_driver(xilinx_cdma_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx CDMA driver");
+MODULE_LICENSE("GPL v2");
diff --git a/include/linux/amba/xilinx_dma.h b/include/linux/amba/xilinx_dma.h
index 34b98f2..fb74ff7 100644
--- a/include/linux/amba/xilinx_dma.h
+++ b/include/linux/amba/xilinx_dma.h
@@ -41,7 +41,20 @@  struct xilinx_vdma_config {
 	int ext_fsync;
 };
 
+/**
+ * struct xilinx_cdma_config - CDMA Configuration structure
+ * @coalesc: Interrupt coalescing threshold
+ * @delay: Delay counter
+ * @reset: Reset Channel
+ */
+struct xilinx_cdma_config {
+	int coalesc;
+	int delay;
+	int reset;
+};
+
 int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
 					struct xilinx_vdma_config *cfg);
-
+int xilinx_cdma_channel_set_config(struct dma_chan *dchan,
+					struct xilinx_cdma_config *cfg);
 #endif