diff mbox

[v6,7/8] dmaengine: add a driver for Intel integrated DMA 64-bit

Message ID 1438009443-55317-8-git-send-email-andriy.shevchenko@linux.intel.com (mailing list archive)
State Not Applicable, archived
Headers show

Commit Message

Andy Shevchenko July 27, 2015, 3:04 p.m. UTC
Intel integrated DMA (iDMA) 64-bit is a specific IP that is used as a part of
LPSS devices such as HSUART or SPI. The iDMA IP is attached for private
usage on each host controller independently.

While it has similarities with Synopsys DesignWare DMA, the following
distinctions doesn't allow to use the existing driver:
- 64-bit mode with corresponding changes in Hardware Linked List data structure
- many slight differences in the channel registers

Moreover this driver is based on the DMA virtual channels framework that helps
to make the driver cleaner and easy to understand.

Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
---
 drivers/dma/Kconfig  |   8 +
 drivers/dma/Makefile |   1 +
 drivers/dma/idma64.c | 710 +++++++++++++++++++++++++++++++++++++++++++++++++++
 drivers/dma/idma64.h | 233 +++++++++++++++++
 4 files changed, 952 insertions(+)
 create mode 100644 drivers/dma/idma64.c
 create mode 100644 drivers/dma/idma64.h

Comments

Lee Jones July 28, 2015, 7:48 a.m. UTC | #1
On Mon, 27 Jul 2015, Andy Shevchenko wrote:

> Intel integrated DMA (iDMA) 64-bit is a specific IP that is used as a part of
> LPSS devices such as HSUART or SPI. The iDMA IP is attached for private
> usage on each host controller independently.
> 
> While it has similarities with Synopsys DesignWare DMA, the following
> distinctions doesn't allow to use the existing driver:
> - 64-bit mode with corresponding changes in Hardware Linked List data structure
> - many slight differences in the channel registers
> 
> Moreover this driver is based on the DMA virtual channels framework that helps
> to make the driver cleaner and easy to understand.
> 
> Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
> ---
>  drivers/dma/Kconfig  |   8 +
>  drivers/dma/Makefile |   1 +
>  drivers/dma/idma64.c | 710 +++++++++++++++++++++++++++++++++++++++++++++++++++
>  drivers/dma/idma64.h | 233 +++++++++++++++++
>  4 files changed, 952 insertions(+)
>  create mode 100644 drivers/dma/idma64.c
>  create mode 100644 drivers/dma/idma64.h

Applied, thanks.  Pull request to follow.

> diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
> index 88d474b..bdbbe5b 100644
> --- a/drivers/dma/Kconfig
> +++ b/drivers/dma/Kconfig
> @@ -85,6 +85,14 @@ config INTEL_IOP_ADMA
>  	help
>  	  Enable support for the Intel(R) IOP Series RAID engines.
>  
> +config IDMA64
> +	tristate "Intel integrated DMA 64-bit support"
> +	select DMA_ENGINE
> +	select DMA_VIRTUAL_CHANNELS
> +	help
> +	  Enable DMA support for Intel Low Power Subsystem such as found on
> +	  Intel Skylake PCH.
> +
>  source "drivers/dma/dw/Kconfig"
>  
>  config AT_HDMAC
> diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
> index 6a4d6f2..56ff8c7 100644
> --- a/drivers/dma/Makefile
> +++ b/drivers/dma/Makefile
> @@ -14,6 +14,7 @@ obj-$(CONFIG_HSU_DMA) += hsu/
>  obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o
>  obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
>  obj-$(CONFIG_MV_XOR) += mv_xor.o
> +obj-$(CONFIG_IDMA64) += idma64.o
>  obj-$(CONFIG_DW_DMAC_CORE) += dw/
>  obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
>  obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
> diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
> new file mode 100644
> index 0000000..18c14e1
> --- /dev/null
> +++ b/drivers/dma/idma64.c
> @@ -0,0 +1,710 @@
> +/*
> + * Core driver for the Intel integrated DMA 64-bit
> + *
> + * Copyright (C) 2015 Intel Corporation
> + * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation.
> + */
> +
> +#include <linux/bitops.h>
> +#include <linux/delay.h>
> +#include <linux/dmaengine.h>
> +#include <linux/dma-mapping.h>
> +#include <linux/dmapool.h>
> +#include <linux/init.h>
> +#include <linux/module.h>
> +#include <linux/platform_device.h>
> +#include <linux/slab.h>
> +
> +#include "idma64.h"
> +
> +/* Platform driver name */
> +#define DRV_NAME		"idma64"
> +
> +/* For now we support only two channels */
> +#define IDMA64_NR_CHAN		2
> +
> +/* ---------------------------------------------------------------------- */
> +
> +static struct device *chan2dev(struct dma_chan *chan)
> +{
> +	return &chan->dev->device;
> +}
> +
> +/* ---------------------------------------------------------------------- */
> +
> +static void idma64_off(struct idma64 *idma64)
> +{
> +	unsigned short count = 100;
> +
> +	dma_writel(idma64, CFG, 0);
> +
> +	channel_clear_bit(idma64, MASK(XFER), idma64->all_chan_mask);
> +	channel_clear_bit(idma64, MASK(BLOCK), idma64->all_chan_mask);
> +	channel_clear_bit(idma64, MASK(SRC_TRAN), idma64->all_chan_mask);
> +	channel_clear_bit(idma64, MASK(DST_TRAN), idma64->all_chan_mask);
> +	channel_clear_bit(idma64, MASK(ERROR), idma64->all_chan_mask);
> +
> +	do {
> +		cpu_relax();
> +	} while (dma_readl(idma64, CFG) & IDMA64_CFG_DMA_EN && --count);
> +}
> +
> +static void idma64_on(struct idma64 *idma64)
> +{
> +	dma_writel(idma64, CFG, IDMA64_CFG_DMA_EN);
> +}
> +
> +/* ---------------------------------------------------------------------- */
> +
> +static void idma64_chan_init(struct idma64 *idma64, struct idma64_chan *idma64c)
> +{
> +	u32 cfghi = IDMA64C_CFGH_SRC_PER(1) | IDMA64C_CFGH_DST_PER(0);
> +	u32 cfglo = 0;
> +
> +	/* Enforce FIFO drain when channel is suspended */
> +	cfglo |= IDMA64C_CFGL_CH_DRAIN;
> +
> +	/* Set default burst alignment */
> +	cfglo |= IDMA64C_CFGL_DST_BURST_ALIGN | IDMA64C_CFGL_SRC_BURST_ALIGN;
> +
> +	channel_writel(idma64c, CFG_LO, cfglo);
> +	channel_writel(idma64c, CFG_HI, cfghi);
> +
> +	/* Enable interrupts */
> +	channel_set_bit(idma64, MASK(XFER), idma64c->mask);
> +	channel_set_bit(idma64, MASK(ERROR), idma64c->mask);
> +
> +	/*
> +	 * Enforce the controller to be turned on.
> +	 *
> +	 * The iDMA is turned off in ->probe() and looses context during system
> +	 * suspend / resume cycle. That's why we have to enable it each time we
> +	 * use it.
> +	 */
> +	idma64_on(idma64);
> +}
> +
> +static void idma64_chan_stop(struct idma64 *idma64, struct idma64_chan *idma64c)
> +{
> +	channel_clear_bit(idma64, CH_EN, idma64c->mask);
> +}
> +
> +static void idma64_chan_start(struct idma64 *idma64, struct idma64_chan *idma64c)
> +{
> +	struct idma64_desc *desc = idma64c->desc;
> +	struct idma64_hw_desc *hw = &desc->hw[0];
> +
> +	channel_writeq(idma64c, SAR, 0);
> +	channel_writeq(idma64c, DAR, 0);
> +
> +	channel_writel(idma64c, CTL_HI, IDMA64C_CTLH_BLOCK_TS(~0UL));
> +	channel_writel(idma64c, CTL_LO, IDMA64C_CTLL_LLP_S_EN | IDMA64C_CTLL_LLP_D_EN);
> +
> +	channel_writeq(idma64c, LLP, hw->llp);
> +
> +	channel_set_bit(idma64, CH_EN, idma64c->mask);
> +}
> +
> +static void idma64_stop_transfer(struct idma64_chan *idma64c)
> +{
> +	struct idma64 *idma64 = to_idma64(idma64c->vchan.chan.device);
> +
> +	idma64_chan_stop(idma64, idma64c);
> +}
> +
> +static void idma64_start_transfer(struct idma64_chan *idma64c)
> +{
> +	struct idma64 *idma64 = to_idma64(idma64c->vchan.chan.device);
> +	struct virt_dma_desc *vdesc;
> +
> +	/* Get the next descriptor */
> +	vdesc = vchan_next_desc(&idma64c->vchan);
> +	if (!vdesc) {
> +		idma64c->desc = NULL;
> +		return;
> +	}
> +
> +	list_del(&vdesc->node);
> +	idma64c->desc = to_idma64_desc(vdesc);
> +
> +	/* Configure the channel */
> +	idma64_chan_init(idma64, idma64c);
> +
> +	/* Start the channel with a new descriptor */
> +	idma64_chan_start(idma64, idma64c);
> +}
> +
> +/* ---------------------------------------------------------------------- */
> +
> +static void idma64_chan_irq(struct idma64 *idma64, unsigned short c,
> +		u32 status_err, u32 status_xfer)
> +{
> +	struct idma64_chan *idma64c = &idma64->chan[c];
> +	struct idma64_desc *desc;
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&idma64c->vchan.lock, flags);
> +	desc = idma64c->desc;
> +	if (desc) {
> +		if (status_err & (1 << c)) {
> +			dma_writel(idma64, CLEAR(ERROR), idma64c->mask);
> +			desc->status = DMA_ERROR;
> +		} else if (status_xfer & (1 << c)) {
> +			dma_writel(idma64, CLEAR(XFER), idma64c->mask);
> +			desc->status = DMA_COMPLETE;
> +			vchan_cookie_complete(&desc->vdesc);
> +			idma64_start_transfer(idma64c);
> +		}
> +
> +		/* idma64_start_transfer() updates idma64c->desc */
> +		if (idma64c->desc == NULL || desc->status == DMA_ERROR)
> +			idma64_stop_transfer(idma64c);
> +	}
> +	spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
> +}
> +
> +static irqreturn_t idma64_irq(int irq, void *dev)
> +{
> +	struct idma64 *idma64 = dev;
> +	u32 status = dma_readl(idma64, STATUS_INT);
> +	u32 status_xfer;
> +	u32 status_err;
> +	unsigned short i;
> +
> +	dev_vdbg(idma64->dma.dev, "%s: status=%#x\n", __func__, status);
> +
> +	/* Check if we have any interrupt from the DMA controller */
> +	if (!status)
> +		return IRQ_NONE;
> +
> +	/* Disable interrupts */
> +	channel_clear_bit(idma64, MASK(XFER), idma64->all_chan_mask);
> +	channel_clear_bit(idma64, MASK(ERROR), idma64->all_chan_mask);
> +
> +	status_xfer = dma_readl(idma64, RAW(XFER));
> +	status_err = dma_readl(idma64, RAW(ERROR));
> +
> +	for (i = 0; i < idma64->dma.chancnt; i++)
> +		idma64_chan_irq(idma64, i, status_err, status_xfer);
> +
> +	/* Re-enable interrupts */
> +	channel_set_bit(idma64, MASK(XFER), idma64->all_chan_mask);
> +	channel_set_bit(idma64, MASK(ERROR), idma64->all_chan_mask);
> +
> +	return IRQ_HANDLED;
> +}
> +
> +/* ---------------------------------------------------------------------- */
> +
> +static struct idma64_desc *idma64_alloc_desc(unsigned int ndesc)
> +{
> +	struct idma64_desc *desc;
> +
> +	desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
> +	if (!desc)
> +		return NULL;
> +
> +	desc->hw = kcalloc(ndesc, sizeof(*desc->hw), GFP_NOWAIT);
> +	if (!desc->hw) {
> +		kfree(desc);
> +		return NULL;
> +	}
> +
> +	return desc;
> +}
> +
> +static void idma64_desc_free(struct idma64_chan *idma64c,
> +		struct idma64_desc *desc)
> +{
> +	struct idma64_hw_desc *hw;
> +
> +	if (desc->ndesc) {
> +		unsigned int i = desc->ndesc;
> +
> +		do {
> +			hw = &desc->hw[--i];
> +			dma_pool_free(idma64c->pool, hw->lli, hw->llp);
> +		} while (i);
> +	}
> +
> +	kfree(desc->hw);
> +	kfree(desc);
> +}
> +
> +static void idma64_vdesc_free(struct virt_dma_desc *vdesc)
> +{
> +	struct idma64_chan *idma64c = to_idma64_chan(vdesc->tx.chan);
> +
> +	idma64_desc_free(idma64c, to_idma64_desc(vdesc));
> +}
> +
> +static u64 idma64_hw_desc_fill(struct idma64_hw_desc *hw,
> +		struct dma_slave_config *config,
> +		enum dma_transfer_direction direction, u64 llp)
> +{
> +	struct idma64_lli *lli = hw->lli;
> +	u64 sar, dar;
> +	u32 ctlhi = IDMA64C_CTLH_BLOCK_TS(hw->len);
> +	u32 ctllo = IDMA64C_CTLL_LLP_S_EN | IDMA64C_CTLL_LLP_D_EN;
> +	u32 src_width, dst_width;
> +
> +	if (direction == DMA_MEM_TO_DEV) {
> +		sar = hw->phys;
> +		dar = config->dst_addr;
> +		ctllo |= IDMA64C_CTLL_DST_FIX | IDMA64C_CTLL_SRC_INC |
> +			 IDMA64C_CTLL_FC_M2P;
> +		src_width = min_t(u32, 2, __fls(sar | hw->len));
> +		dst_width = __fls(config->dst_addr_width);
> +	} else {	/* DMA_DEV_TO_MEM */
> +		sar = config->src_addr;
> +		dar = hw->phys;
> +		ctllo |= IDMA64C_CTLL_DST_INC | IDMA64C_CTLL_SRC_FIX |
> +			 IDMA64C_CTLL_FC_P2M;
> +		src_width = __fls(config->src_addr_width);
> +		dst_width = min_t(u32, 2, __fls(dar | hw->len));
> +	}
> +
> +	lli->sar = sar;
> +	lli->dar = dar;
> +
> +	lli->ctlhi = ctlhi;
> +	lli->ctllo = ctllo |
> +		     IDMA64C_CTLL_SRC_MSIZE(config->src_maxburst) |
> +		     IDMA64C_CTLL_DST_MSIZE(config->dst_maxburst) |
> +		     IDMA64C_CTLL_DST_WIDTH(dst_width) |
> +		     IDMA64C_CTLL_SRC_WIDTH(src_width);
> +
> +	lli->llp = llp;
> +	return hw->llp;
> +}
> +
> +static void idma64_desc_fill(struct idma64_chan *idma64c,
> +		struct idma64_desc *desc)
> +{
> +	struct dma_slave_config *config = &idma64c->config;
> +	struct idma64_hw_desc *hw = &desc->hw[desc->ndesc - 1];
> +	struct idma64_lli *lli = hw->lli;
> +	u64 llp = 0;
> +	unsigned int i = desc->ndesc;
> +
> +	/* Fill the hardware descriptors and link them to a list */
> +	do {
> +		hw = &desc->hw[--i];
> +		llp = idma64_hw_desc_fill(hw, config, desc->direction, llp);
> +		desc->length += hw->len;
> +	} while (i);
> +
> +	/* Trigger interrupt after last block */
> +	lli->ctllo |= IDMA64C_CTLL_INT_EN;
> +}
> +
> +static struct dma_async_tx_descriptor *idma64_prep_slave_sg(
> +		struct dma_chan *chan, struct scatterlist *sgl,
> +		unsigned int sg_len, enum dma_transfer_direction direction,
> +		unsigned long flags, void *context)
> +{
> +	struct idma64_chan *idma64c = to_idma64_chan(chan);
> +	struct idma64_desc *desc;
> +	struct scatterlist *sg;
> +	unsigned int i;
> +
> +	desc = idma64_alloc_desc(sg_len);
> +	if (!desc)
> +		return NULL;
> +
> +	for_each_sg(sgl, sg, sg_len, i) {
> +		struct idma64_hw_desc *hw = &desc->hw[i];
> +
> +		/* Allocate DMA capable memory for hardware descriptor */
> +		hw->lli = dma_pool_alloc(idma64c->pool, GFP_NOWAIT, &hw->llp);
> +		if (!hw->lli) {
> +			desc->ndesc = i;
> +			idma64_desc_free(idma64c, desc);
> +			return NULL;
> +		}
> +
> +		hw->phys = sg_dma_address(sg);
> +		hw->len = sg_dma_len(sg);
> +	}
> +
> +	desc->ndesc = sg_len;
> +	desc->direction = direction;
> +	desc->status = DMA_IN_PROGRESS;
> +
> +	idma64_desc_fill(idma64c, desc);
> +	return vchan_tx_prep(&idma64c->vchan, &desc->vdesc, flags);
> +}
> +
> +static void idma64_issue_pending(struct dma_chan *chan)
> +{
> +	struct idma64_chan *idma64c = to_idma64_chan(chan);
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&idma64c->vchan.lock, flags);
> +	if (vchan_issue_pending(&idma64c->vchan) && !idma64c->desc)
> +		idma64_start_transfer(idma64c);
> +	spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
> +}
> +
> +static size_t idma64_active_desc_size(struct idma64_chan *idma64c)
> +{
> +	struct idma64_desc *desc = idma64c->desc;
> +	struct idma64_hw_desc *hw;
> +	size_t bytes = desc->length;
> +	u64 llp;
> +	u32 ctlhi;
> +	unsigned int i = 0;
> +
> +	llp = channel_readq(idma64c, LLP);
> +	do {
> +		hw = &desc->hw[i];
> +	} while ((hw->llp != llp) && (++i < desc->ndesc));
> +
> +	if (!i)
> +		return bytes;
> +
> +	do {
> +		bytes -= desc->hw[--i].len;
> +	} while (i);
> +
> +	ctlhi = channel_readl(idma64c, CTL_HI);
> +	return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi);
> +}
> +
> +static enum dma_status idma64_tx_status(struct dma_chan *chan,
> +		dma_cookie_t cookie, struct dma_tx_state *state)
> +{
> +	struct idma64_chan *idma64c = to_idma64_chan(chan);
> +	struct virt_dma_desc *vdesc;
> +	enum dma_status status;
> +	size_t bytes;
> +	unsigned long flags;
> +
> +	status = dma_cookie_status(chan, cookie, state);
> +	if (status == DMA_COMPLETE)
> +		return status;
> +
> +	spin_lock_irqsave(&idma64c->vchan.lock, flags);
> +	vdesc = vchan_find_desc(&idma64c->vchan, cookie);
> +	if (idma64c->desc && cookie == idma64c->desc->vdesc.tx.cookie) {
> +		bytes = idma64_active_desc_size(idma64c);
> +		dma_set_residue(state, bytes);
> +		status = idma64c->desc->status;
> +	} else if (vdesc) {
> +		bytes = to_idma64_desc(vdesc)->length;
> +		dma_set_residue(state, bytes);
> +	}
> +	spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
> +
> +	return status;
> +}
> +
> +static void convert_burst(u32 *maxburst)
> +{
> +	if (*maxburst)
> +		*maxburst = __fls(*maxburst);
> +	else
> +		*maxburst = 0;
> +}
> +
> +static int idma64_slave_config(struct dma_chan *chan,
> +		struct dma_slave_config *config)
> +{
> +	struct idma64_chan *idma64c = to_idma64_chan(chan);
> +
> +	/* Check if chan will be configured for slave transfers */
> +	if (!is_slave_direction(config->direction))
> +		return -EINVAL;
> +
> +	memcpy(&idma64c->config, config, sizeof(idma64c->config));
> +
> +	convert_burst(&idma64c->config.src_maxburst);
> +	convert_burst(&idma64c->config.dst_maxburst);
> +
> +	return 0;
> +}
> +
> +static void idma64_chan_deactivate(struct idma64_chan *idma64c)
> +{
> +	unsigned short count = 100;
> +	u32 cfglo;
> +
> +	cfglo = channel_readl(idma64c, CFG_LO);
> +	channel_writel(idma64c, CFG_LO, cfglo | IDMA64C_CFGL_CH_SUSP);
> +	do {
> +		udelay(1);
> +		cfglo = channel_readl(idma64c, CFG_LO);
> +	} while (!(cfglo & IDMA64C_CFGL_FIFO_EMPTY) && --count);
> +}
> +
> +static void idma64_chan_activate(struct idma64_chan *idma64c)
> +{
> +	u32 cfglo;
> +
> +	cfglo = channel_readl(idma64c, CFG_LO);
> +	channel_writel(idma64c, CFG_LO, cfglo & ~IDMA64C_CFGL_CH_SUSP);
> +}
> +
> +static int idma64_pause(struct dma_chan *chan)
> +{
> +	struct idma64_chan *idma64c = to_idma64_chan(chan);
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&idma64c->vchan.lock, flags);
> +	if (idma64c->desc && idma64c->desc->status == DMA_IN_PROGRESS) {
> +		idma64_chan_deactivate(idma64c);
> +		idma64c->desc->status = DMA_PAUSED;
> +	}
> +	spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
> +
> +	return 0;
> +}
> +
> +static int idma64_resume(struct dma_chan *chan)
> +{
> +	struct idma64_chan *idma64c = to_idma64_chan(chan);
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&idma64c->vchan.lock, flags);
> +	if (idma64c->desc && idma64c->desc->status == DMA_PAUSED) {
> +		idma64c->desc->status = DMA_IN_PROGRESS;
> +		idma64_chan_activate(idma64c);
> +	}
> +	spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
> +
> +	return 0;
> +}
> +
> +static int idma64_terminate_all(struct dma_chan *chan)
> +{
> +	struct idma64_chan *idma64c = to_idma64_chan(chan);
> +	unsigned long flags;
> +	LIST_HEAD(head);
> +
> +	spin_lock_irqsave(&idma64c->vchan.lock, flags);
> +	idma64_chan_deactivate(idma64c);
> +	idma64_stop_transfer(idma64c);
> +	if (idma64c->desc) {
> +		idma64_vdesc_free(&idma64c->desc->vdesc);
> +		idma64c->desc = NULL;
> +	}
> +	vchan_get_all_descriptors(&idma64c->vchan, &head);
> +	spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
> +
> +	vchan_dma_desc_free_list(&idma64c->vchan, &head);
> +	return 0;
> +}
> +
> +static int idma64_alloc_chan_resources(struct dma_chan *chan)
> +{
> +	struct idma64_chan *idma64c = to_idma64_chan(chan);
> +
> +	/* Create a pool of consistent memory blocks for hardware descriptors */
> +	idma64c->pool = dma_pool_create(dev_name(chan2dev(chan)),
> +					chan->device->dev,
> +					sizeof(struct idma64_lli), 8, 0);
> +	if (!idma64c->pool) {
> +		dev_err(chan2dev(chan), "No memory for descriptors\n");
> +		return -ENOMEM;
> +	}
> +
> +	return 0;
> +}
> +
> +static void idma64_free_chan_resources(struct dma_chan *chan)
> +{
> +	struct idma64_chan *idma64c = to_idma64_chan(chan);
> +
> +	vchan_free_chan_resources(to_virt_chan(chan));
> +	dma_pool_destroy(idma64c->pool);
> +	idma64c->pool = NULL;
> +}
> +
> +/* ---------------------------------------------------------------------- */
> +
> +#define IDMA64_BUSWIDTHS				\
> +	BIT(DMA_SLAVE_BUSWIDTH_1_BYTE)		|	\
> +	BIT(DMA_SLAVE_BUSWIDTH_2_BYTES)		|	\
> +	BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
> +
> +static int idma64_probe(struct idma64_chip *chip)
> +{
> +	struct idma64 *idma64;
> +	unsigned short nr_chan = IDMA64_NR_CHAN;
> +	unsigned short i;
> +	int ret;
> +
> +	idma64 = devm_kzalloc(chip->dev, sizeof(*idma64), GFP_KERNEL);
> +	if (!idma64)
> +		return -ENOMEM;
> +
> +	idma64->regs = chip->regs;
> +	chip->idma64 = idma64;
> +
> +	idma64->chan = devm_kcalloc(chip->dev, nr_chan, sizeof(*idma64->chan),
> +				    GFP_KERNEL);
> +	if (!idma64->chan)
> +		return -ENOMEM;
> +
> +	idma64->all_chan_mask = (1 << nr_chan) - 1;
> +
> +	/* Turn off iDMA controller */
> +	idma64_off(idma64);
> +
> +	ret = devm_request_irq(chip->dev, chip->irq, idma64_irq, IRQF_SHARED,
> +			       dev_name(chip->dev), idma64);
> +	if (ret)
> +		return ret;
> +
> +	INIT_LIST_HEAD(&idma64->dma.channels);
> +	for (i = 0; i < nr_chan; i++) {
> +		struct idma64_chan *idma64c = &idma64->chan[i];
> +
> +		idma64c->vchan.desc_free = idma64_vdesc_free;
> +		vchan_init(&idma64c->vchan, &idma64->dma);
> +
> +		idma64c->regs = idma64->regs + i * IDMA64_CH_LENGTH;
> +		idma64c->mask = BIT(i);
> +	}
> +
> +	dma_cap_set(DMA_SLAVE, idma64->dma.cap_mask);
> +	dma_cap_set(DMA_PRIVATE, idma64->dma.cap_mask);
> +
> +	idma64->dma.device_alloc_chan_resources = idma64_alloc_chan_resources;
> +	idma64->dma.device_free_chan_resources = idma64_free_chan_resources;
> +
> +	idma64->dma.device_prep_slave_sg = idma64_prep_slave_sg;
> +
> +	idma64->dma.device_issue_pending = idma64_issue_pending;
> +	idma64->dma.device_tx_status = idma64_tx_status;
> +
> +	idma64->dma.device_config = idma64_slave_config;
> +	idma64->dma.device_pause = idma64_pause;
> +	idma64->dma.device_resume = idma64_resume;
> +	idma64->dma.device_terminate_all = idma64_terminate_all;
> +
> +	idma64->dma.src_addr_widths = IDMA64_BUSWIDTHS;
> +	idma64->dma.dst_addr_widths = IDMA64_BUSWIDTHS;
> +	idma64->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
> +	idma64->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
> +
> +	idma64->dma.dev = chip->dev;
> +
> +	ret = dma_async_device_register(&idma64->dma);
> +	if (ret)
> +		return ret;
> +
> +	dev_info(chip->dev, "Found Intel integrated DMA 64-bit\n");
> +	return 0;
> +}
> +
> +static int idma64_remove(struct idma64_chip *chip)
> +{
> +	struct idma64 *idma64 = chip->idma64;
> +	unsigned short i;
> +
> +	dma_async_device_unregister(&idma64->dma);
> +
> +	/*
> +	 * Explicitly call devm_request_irq() to avoid the side effects with
> +	 * the scheduled tasklets.
> +	 */
> +	devm_free_irq(chip->dev, chip->irq, idma64);
> +
> +	for (i = 0; i < idma64->dma.chancnt; i++) {
> +		struct idma64_chan *idma64c = &idma64->chan[i];
> +
> +		tasklet_kill(&idma64c->vchan.task);
> +	}
> +
> +	return 0;
> +}
> +
> +/* ---------------------------------------------------------------------- */
> +
> +static int idma64_platform_probe(struct platform_device *pdev)
> +{
> +	struct idma64_chip *chip;
> +	struct device *dev = &pdev->dev;
> +	struct resource *mem;
> +	int ret;
> +
> +	chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
> +	if (!chip)
> +		return -ENOMEM;
> +
> +	chip->irq = platform_get_irq(pdev, 0);
> +	if (chip->irq < 0)
> +		return chip->irq;
> +
> +	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
> +	chip->regs = devm_ioremap_resource(dev, mem);
> +	if (IS_ERR(chip->regs))
> +		return PTR_ERR(chip->regs);
> +
> +	ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
> +	if (ret)
> +		return ret;
> +
> +	chip->dev = dev;
> +
> +	ret = idma64_probe(chip);
> +	if (ret)
> +		return ret;
> +
> +	platform_set_drvdata(pdev, chip);
> +	return 0;
> +}
> +
> +static int idma64_platform_remove(struct platform_device *pdev)
> +{
> +	struct idma64_chip *chip = platform_get_drvdata(pdev);
> +
> +	return idma64_remove(chip);
> +}
> +
> +#ifdef CONFIG_PM_SLEEP
> +
> +static int idma64_pm_suspend(struct device *dev)
> +{
> +	struct platform_device *pdev = to_platform_device(dev);
> +	struct idma64_chip *chip = platform_get_drvdata(pdev);
> +
> +	idma64_off(chip->idma64);
> +	return 0;
> +}
> +
> +static int idma64_pm_resume(struct device *dev)
> +{
> +	struct platform_device *pdev = to_platform_device(dev);
> +	struct idma64_chip *chip = platform_get_drvdata(pdev);
> +
> +	idma64_on(chip->idma64);
> +	return 0;
> +}
> +
> +#endif /* CONFIG_PM_SLEEP */
> +
> +static const struct dev_pm_ops idma64_dev_pm_ops = {
> +	SET_SYSTEM_SLEEP_PM_OPS(idma64_pm_suspend, idma64_pm_resume)
> +};
> +
> +static struct platform_driver idma64_platform_driver = {
> +	.probe		= idma64_platform_probe,
> +	.remove		= idma64_platform_remove,
> +	.driver = {
> +		.name	= DRV_NAME,
> +		.pm	= &idma64_dev_pm_ops,
> +	},
> +};
> +
> +module_platform_driver(idma64_platform_driver);
> +
> +MODULE_LICENSE("GPL v2");
> +MODULE_DESCRIPTION("iDMA64 core driver");
> +MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
> +MODULE_ALIAS("platform:" DRV_NAME);
> diff --git a/drivers/dma/idma64.h b/drivers/dma/idma64.h
> new file mode 100644
> index 0000000..a4d9968
> --- /dev/null
> +++ b/drivers/dma/idma64.h
> @@ -0,0 +1,233 @@
> +/*
> + * Driver for the Intel integrated DMA 64-bit
> + *
> + * Copyright (C) 2015 Intel Corporation
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation.
> + */
> +
> +#ifndef __DMA_IDMA64_H__
> +#define __DMA_IDMA64_H__
> +
> +#include <linux/device.h>
> +#include <linux/io.h>
> +#include <linux/spinlock.h>
> +#include <linux/types.h>
> +
> +#include "virt-dma.h"
> +
> +/* Channel registers */
> +
> +#define IDMA64_CH_SAR		0x00	/* Source Address Register */
> +#define IDMA64_CH_DAR		0x08	/* Destination Address Register */
> +#define IDMA64_CH_LLP		0x10	/* Linked List Pointer */
> +#define IDMA64_CH_CTL_LO	0x18	/* Control Register Low */
> +#define IDMA64_CH_CTL_HI	0x1c	/* Control Register High */
> +#define IDMA64_CH_SSTAT		0x20
> +#define IDMA64_CH_DSTAT		0x28
> +#define IDMA64_CH_SSTATAR	0x30
> +#define IDMA64_CH_DSTATAR	0x38
> +#define IDMA64_CH_CFG_LO	0x40	/* Configuration Register Low */
> +#define IDMA64_CH_CFG_HI	0x44	/* Configuration Register High */
> +#define IDMA64_CH_SGR		0x48
> +#define IDMA64_CH_DSR		0x50
> +
> +#define IDMA64_CH_LENGTH	0x58
> +
> +/* Bitfields in CTL_LO */
> +#define IDMA64C_CTLL_INT_EN		(1 << 0)	/* irqs enabled? */
> +#define IDMA64C_CTLL_DST_WIDTH(x)	((x) << 1)	/* bytes per element */
> +#define IDMA64C_CTLL_SRC_WIDTH(x)	((x) << 4)
> +#define IDMA64C_CTLL_DST_INC		(0 << 8)	/* DAR update/not */
> +#define IDMA64C_CTLL_DST_FIX		(1 << 8)
> +#define IDMA64C_CTLL_SRC_INC		(0 << 10)	/* SAR update/not */
> +#define IDMA64C_CTLL_SRC_FIX		(1 << 10)
> +#define IDMA64C_CTLL_DST_MSIZE(x)	((x) << 11)	/* burst, #elements */
> +#define IDMA64C_CTLL_SRC_MSIZE(x)	((x) << 14)
> +#define IDMA64C_CTLL_FC_M2P		(1 << 20)	/* mem-to-periph */
> +#define IDMA64C_CTLL_FC_P2M		(2 << 20)	/* periph-to-mem */
> +#define IDMA64C_CTLL_LLP_D_EN		(1 << 27)	/* dest block chain */
> +#define IDMA64C_CTLL_LLP_S_EN		(1 << 28)	/* src block chain */
> +
> +/* Bitfields in CTL_HI */
> +#define IDMA64C_CTLH_BLOCK_TS(x)	((x) & ((1 << 17) - 1))
> +#define IDMA64C_CTLH_DONE		(1 << 17)
> +
> +/* Bitfields in CFG_LO */
> +#define IDMA64C_CFGL_DST_BURST_ALIGN	(1 << 0)	/* dst burst align */
> +#define IDMA64C_CFGL_SRC_BURST_ALIGN	(1 << 1)	/* src burst align */
> +#define IDMA64C_CFGL_CH_SUSP		(1 << 8)
> +#define IDMA64C_CFGL_FIFO_EMPTY		(1 << 9)
> +#define IDMA64C_CFGL_CH_DRAIN		(1 << 10)	/* drain FIFO */
> +#define IDMA64C_CFGL_DST_OPT_BL		(1 << 20)	/* optimize dst burst length */
> +#define IDMA64C_CFGL_SRC_OPT_BL		(1 << 21)	/* optimize src burst length */
> +
> +/* Bitfields in CFG_HI */
> +#define IDMA64C_CFGH_SRC_PER(x)		((x) << 0)	/* src peripheral */
> +#define IDMA64C_CFGH_DST_PER(x)		((x) << 4)	/* dst peripheral */
> +#define IDMA64C_CFGH_RD_ISSUE_THD(x)	((x) << 8)
> +#define IDMA64C_CFGH_RW_ISSUE_THD(x)	((x) << 18)
> +
> +/* Interrupt registers */
> +
> +#define IDMA64_INT_XFER		0x00
> +#define IDMA64_INT_BLOCK	0x08
> +#define IDMA64_INT_SRC_TRAN	0x10
> +#define IDMA64_INT_DST_TRAN	0x18
> +#define IDMA64_INT_ERROR	0x20
> +
> +#define IDMA64_RAW(x)		(0x2c0 + IDMA64_INT_##x)	/* r */
> +#define IDMA64_STATUS(x)	(0x2e8 + IDMA64_INT_##x)	/* r (raw & mask) */
> +#define IDMA64_MASK(x)		(0x310 + IDMA64_INT_##x)	/* rw (set = irq enabled) */
> +#define IDMA64_CLEAR(x)		(0x338 + IDMA64_INT_##x)	/* w (ack, affects "raw") */
> +
> +/* Common registers */
> +
> +#define IDMA64_STATUS_INT	0x360	/* r */
> +#define IDMA64_CFG		0x398
> +#define IDMA64_CH_EN		0x3a0
> +
> +/* Bitfields in CFG */
> +#define IDMA64_CFG_DMA_EN		(1 << 0)
> +
> +/* Hardware descriptor for Linked LIst transfers */
> +struct idma64_lli {
> +	u64		sar;
> +	u64		dar;
> +	u64		llp;
> +	u32		ctllo;
> +	u32		ctlhi;
> +	u32		sstat;
> +	u32		dstat;
> +};
> +
> +struct idma64_hw_desc {
> +	struct idma64_lli *lli;
> +	dma_addr_t llp;
> +	dma_addr_t phys;
> +	unsigned int len;
> +};
> +
> +struct idma64_desc {
> +	struct virt_dma_desc vdesc;
> +	enum dma_transfer_direction direction;
> +	struct idma64_hw_desc *hw;
> +	unsigned int ndesc;
> +	size_t length;
> +	enum dma_status status;
> +};
> +
> +static inline struct idma64_desc *to_idma64_desc(struct virt_dma_desc *vdesc)
> +{
> +	return container_of(vdesc, struct idma64_desc, vdesc);
> +}
> +
> +struct idma64_chan {
> +	struct virt_dma_chan vchan;
> +
> +	void __iomem *regs;
> +
> +	/* hardware configuration */
> +	enum dma_transfer_direction direction;
> +	unsigned int mask;
> +	struct dma_slave_config config;
> +
> +	void *pool;
> +	struct idma64_desc *desc;
> +};
> +
> +static inline struct idma64_chan *to_idma64_chan(struct dma_chan *chan)
> +{
> +	return container_of(chan, struct idma64_chan, vchan.chan);
> +}
> +
> +#define channel_set_bit(idma64, reg, mask)	\
> +	dma_writel(idma64, reg, ((mask) << 8) | (mask))
> +#define channel_clear_bit(idma64, reg, mask)	\
> +	dma_writel(idma64, reg, ((mask) << 8) | 0)
> +
> +static inline u32 idma64c_readl(struct idma64_chan *idma64c, int offset)
> +{
> +	return readl(idma64c->regs + offset);
> +}
> +
> +static inline void idma64c_writel(struct idma64_chan *idma64c, int offset,
> +				  u32 value)
> +{
> +	writel(value, idma64c->regs + offset);
> +}
> +
> +#define channel_readl(idma64c, reg)		\
> +	idma64c_readl(idma64c, IDMA64_CH_##reg)
> +#define channel_writel(idma64c, reg, value)	\
> +	idma64c_writel(idma64c, IDMA64_CH_##reg, (value))
> +
> +static inline u64 idma64c_readq(struct idma64_chan *idma64c, int offset)
> +{
> +	u64 l, h;
> +
> +	l = idma64c_readl(idma64c, offset);
> +	h = idma64c_readl(idma64c, offset + 4);
> +
> +	return l | (h << 32);
> +}
> +
> +static inline void idma64c_writeq(struct idma64_chan *idma64c, int offset,
> +				  u64 value)
> +{
> +	idma64c_writel(idma64c, offset, value);
> +	idma64c_writel(idma64c, offset + 4, value >> 32);
> +}
> +
> +#define channel_readq(idma64c, reg)		\
> +	idma64c_readq(idma64c, IDMA64_CH_##reg)
> +#define channel_writeq(idma64c, reg, value)	\
> +	idma64c_writeq(idma64c, IDMA64_CH_##reg, (value))
> +
> +struct idma64 {
> +	struct dma_device dma;
> +
> +	void __iomem *regs;
> +
> +	/* channels */
> +	unsigned short all_chan_mask;
> +	struct idma64_chan *chan;
> +};
> +
> +static inline struct idma64 *to_idma64(struct dma_device *ddev)
> +{
> +	return container_of(ddev, struct idma64, dma);
> +}
> +
> +static inline u32 idma64_readl(struct idma64 *idma64, int offset)
> +{
> +	return readl(idma64->regs + offset);
> +}
> +
> +static inline void idma64_writel(struct idma64 *idma64, int offset, u32 value)
> +{
> +	writel(value, idma64->regs + offset);
> +}
> +
> +#define dma_readl(idma64, reg)			\
> +	idma64_readl(idma64, IDMA64_##reg)
> +#define dma_writel(idma64, reg, value)		\
> +	idma64_writel(idma64, IDMA64_##reg, (value))
> +
> +/**
> + * struct idma64_chip - representation of DesignWare DMA controller hardware
> + * @dev:		struct device of the DMA controller
> + * @irq:		irq line
> + * @regs:		memory mapped I/O space
> + * @idma64:		struct idma64 that is filed by idma64_probe()
> + */
> +struct idma64_chip {
> +	struct device	*dev;
> +	int		irq;
> +	void __iomem	*regs;
> +	struct idma64	*idma64;
> +};
> +
> +#endif /* __DMA_IDMA64_H__ */
Lee Jones July 28, 2015, 7:53 a.m. UTC | #2
FAO Vinod,

On Tue, 28 Jul 2015, Lee Jones wrote:
> On Mon, 27 Jul 2015, Andy Shevchenko wrote:
> 
> > Intel integrated DMA (iDMA) 64-bit is a specific IP that is used as a part of
> > LPSS devices such as HSUART or SPI. The iDMA IP is attached for private
> > usage on each host controller independently.
> > 
> > While it has similarities with Synopsys DesignWare DMA, the following
> > distinctions doesn't allow to use the existing driver:
> > - 64-bit mode with corresponding changes in Hardware Linked List data structure
> > - many slight differences in the channel registers
> > 
> > Moreover this driver is based on the DMA virtual channels framework that helps
> > to make the driver cleaner and easy to understand.
> > 
> > Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
> > ---
> >  drivers/dma/Kconfig  |   8 +
> >  drivers/dma/Makefile |   1 +
> >  drivers/dma/idma64.c | 710 +++++++++++++++++++++++++++++++++++++++++++++++++++
> >  drivers/dma/idma64.h | 233 +++++++++++++++++
> >  4 files changed, 952 insertions(+)
> >  create mode 100644 drivers/dma/idma64.c
> >  create mode 100644 drivers/dma/idma64.h
> 
> Applied, thanks.  Pull request to follow.

Will send out a pull request when I have Vinod's Ack.

[...]

Patches will still be applied to -next for soak testing though.
Andy Shevchenko July 28, 2015, 8:14 a.m. UTC | #3
On Tue, 2015-07-28 at 08:53 +0100, Lee Jones wrote:
> FAO Vinod,
> 
> On Tue, 28 Jul 2015, Lee Jones wrote:
> > On Mon, 27 Jul 2015, Andy Shevchenko wrote:
> > 
> > > Intel integrated DMA (iDMA) 64-bit is a specific IP that is used 
> > > as a part of
> > > LPSS devices such as HSUART or SPI. The iDMA IP is attached for 
> > > private
> > > usage on each host controller independently.
> > > 
> > > While it has similarities with Synopsys DesignWare DMA, the 
> > > following
> > > distinctions doesn't allow to use the existing driver:
> > > - 64-bit mode with corresponding changes in Hardware Linked List 
> > > data structure
> > > - many slight differences in the channel registers
> > > 
> > > Moreover this driver is based on the DMA virtual channels 
> > > framework that helps
> > > to make the driver cleaner and easy to understand.
> > > 
> > > Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com
> > > >
> > > ---
> > >  drivers/dma/Kconfig  |   8 +
> > >  drivers/dma/Makefile |   1 +
> > >  drivers/dma/idma64.c | 710 
> > > +++++++++++++++++++++++++++++++++++++++++++++++++++
> > >  drivers/dma/idma64.h | 233 +++++++++++++++++
> > >  4 files changed, 952 insertions(+)
> > >  create mode 100644 drivers/dma/idma64.c
> > >  create mode 100644 drivers/dma/idma64.h
> > 
> > Applied, thanks.  Pull request to follow.
> 
> Will send out a pull request when I have Vinod's Ack.

Vinod, could you have a look at this once again?

> 
> [...]
> 
> Patches will still be applied to -next for soak testing though.

Thanks!
Vinod Koul July 28, 2015, 8:43 a.m. UTC | #4
On Mon, Jul 27, 2015 at 06:04:02PM +0300, Andy Shevchenko wrote:
> Intel integrated DMA (iDMA) 64-bit is a specific IP that is used as a part of
> LPSS devices such as HSUART or SPI. The iDMA IP is attached for private
> usage on each host controller independently.
> 
> While it has similarities with Synopsys DesignWare DMA, the following
> distinctions doesn't allow to use the existing driver:
> - 64-bit mode with corresponding changes in Hardware Linked List data structure
> - many slight differences in the channel registers
> 
> Moreover this driver is based on the DMA virtual channels framework that helps
> to make the driver cleaner and easy to understand.
Acked-by: Vinod Koul <vinod.koul@intel.com>
diff mbox

Patch

diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 88d474b..bdbbe5b 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -85,6 +85,14 @@  config INTEL_IOP_ADMA
 	help
 	  Enable support for the Intel(R) IOP Series RAID engines.
 
+config IDMA64
+	tristate "Intel integrated DMA 64-bit support"
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Enable DMA support for Intel Low Power Subsystem such as found on
+	  Intel Skylake PCH.
+
 source "drivers/dma/dw/Kconfig"
 
 config AT_HDMAC
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 6a4d6f2..56ff8c7 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -14,6 +14,7 @@  obj-$(CONFIG_HSU_DMA) += hsu/
 obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o
 obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
 obj-$(CONFIG_MV_XOR) += mv_xor.o
+obj-$(CONFIG_IDMA64) += idma64.o
 obj-$(CONFIG_DW_DMAC_CORE) += dw/
 obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
 obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
new file mode 100644
index 0000000..18c14e1
--- /dev/null
+++ b/drivers/dma/idma64.c
@@ -0,0 +1,710 @@ 
+/*
+ * Core driver for the Intel integrated DMA 64-bit
+ *
+ * Copyright (C) 2015 Intel Corporation
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "idma64.h"
+
+/* Platform driver name */
+#define DRV_NAME		"idma64"
+
+/* For now we support only two channels */
+#define IDMA64_NR_CHAN		2
+
+/* ---------------------------------------------------------------------- */
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+	return &chan->dev->device;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void idma64_off(struct idma64 *idma64)
+{
+	unsigned short count = 100;
+
+	dma_writel(idma64, CFG, 0);
+
+	channel_clear_bit(idma64, MASK(XFER), idma64->all_chan_mask);
+	channel_clear_bit(idma64, MASK(BLOCK), idma64->all_chan_mask);
+	channel_clear_bit(idma64, MASK(SRC_TRAN), idma64->all_chan_mask);
+	channel_clear_bit(idma64, MASK(DST_TRAN), idma64->all_chan_mask);
+	channel_clear_bit(idma64, MASK(ERROR), idma64->all_chan_mask);
+
+	do {
+		cpu_relax();
+	} while (dma_readl(idma64, CFG) & IDMA64_CFG_DMA_EN && --count);
+}
+
+static void idma64_on(struct idma64 *idma64)
+{
+	dma_writel(idma64, CFG, IDMA64_CFG_DMA_EN);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void idma64_chan_init(struct idma64 *idma64, struct idma64_chan *idma64c)
+{
+	u32 cfghi = IDMA64C_CFGH_SRC_PER(1) | IDMA64C_CFGH_DST_PER(0);
+	u32 cfglo = 0;
+
+	/* Enforce FIFO drain when channel is suspended */
+	cfglo |= IDMA64C_CFGL_CH_DRAIN;
+
+	/* Set default burst alignment */
+	cfglo |= IDMA64C_CFGL_DST_BURST_ALIGN | IDMA64C_CFGL_SRC_BURST_ALIGN;
+
+	channel_writel(idma64c, CFG_LO, cfglo);
+	channel_writel(idma64c, CFG_HI, cfghi);
+
+	/* Enable interrupts */
+	channel_set_bit(idma64, MASK(XFER), idma64c->mask);
+	channel_set_bit(idma64, MASK(ERROR), idma64c->mask);
+
+	/*
+	 * Enforce the controller to be turned on.
+	 *
+	 * The iDMA is turned off in ->probe() and looses context during system
+	 * suspend / resume cycle. That's why we have to enable it each time we
+	 * use it.
+	 */
+	idma64_on(idma64);
+}
+
+static void idma64_chan_stop(struct idma64 *idma64, struct idma64_chan *idma64c)
+{
+	channel_clear_bit(idma64, CH_EN, idma64c->mask);
+}
+
+static void idma64_chan_start(struct idma64 *idma64, struct idma64_chan *idma64c)
+{
+	struct idma64_desc *desc = idma64c->desc;
+	struct idma64_hw_desc *hw = &desc->hw[0];
+
+	channel_writeq(idma64c, SAR, 0);
+	channel_writeq(idma64c, DAR, 0);
+
+	channel_writel(idma64c, CTL_HI, IDMA64C_CTLH_BLOCK_TS(~0UL));
+	channel_writel(idma64c, CTL_LO, IDMA64C_CTLL_LLP_S_EN | IDMA64C_CTLL_LLP_D_EN);
+
+	channel_writeq(idma64c, LLP, hw->llp);
+
+	channel_set_bit(idma64, CH_EN, idma64c->mask);
+}
+
+static void idma64_stop_transfer(struct idma64_chan *idma64c)
+{
+	struct idma64 *idma64 = to_idma64(idma64c->vchan.chan.device);
+
+	idma64_chan_stop(idma64, idma64c);
+}
+
+static void idma64_start_transfer(struct idma64_chan *idma64c)
+{
+	struct idma64 *idma64 = to_idma64(idma64c->vchan.chan.device);
+	struct virt_dma_desc *vdesc;
+
+	/* Get the next descriptor */
+	vdesc = vchan_next_desc(&idma64c->vchan);
+	if (!vdesc) {
+		idma64c->desc = NULL;
+		return;
+	}
+
+	list_del(&vdesc->node);
+	idma64c->desc = to_idma64_desc(vdesc);
+
+	/* Configure the channel */
+	idma64_chan_init(idma64, idma64c);
+
+	/* Start the channel with a new descriptor */
+	idma64_chan_start(idma64, idma64c);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void idma64_chan_irq(struct idma64 *idma64, unsigned short c,
+		u32 status_err, u32 status_xfer)
+{
+	struct idma64_chan *idma64c = &idma64->chan[c];
+	struct idma64_desc *desc;
+	unsigned long flags;
+
+	spin_lock_irqsave(&idma64c->vchan.lock, flags);
+	desc = idma64c->desc;
+	if (desc) {
+		if (status_err & (1 << c)) {
+			dma_writel(idma64, CLEAR(ERROR), idma64c->mask);
+			desc->status = DMA_ERROR;
+		} else if (status_xfer & (1 << c)) {
+			dma_writel(idma64, CLEAR(XFER), idma64c->mask);
+			desc->status = DMA_COMPLETE;
+			vchan_cookie_complete(&desc->vdesc);
+			idma64_start_transfer(idma64c);
+		}
+
+		/* idma64_start_transfer() updates idma64c->desc */
+		if (idma64c->desc == NULL || desc->status == DMA_ERROR)
+			idma64_stop_transfer(idma64c);
+	}
+	spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
+}
+
+static irqreturn_t idma64_irq(int irq, void *dev)
+{
+	struct idma64 *idma64 = dev;
+	u32 status = dma_readl(idma64, STATUS_INT);
+	u32 status_xfer;
+	u32 status_err;
+	unsigned short i;
+
+	dev_vdbg(idma64->dma.dev, "%s: status=%#x\n", __func__, status);
+
+	/* Check if we have any interrupt from the DMA controller */
+	if (!status)
+		return IRQ_NONE;
+
+	/* Disable interrupts */
+	channel_clear_bit(idma64, MASK(XFER), idma64->all_chan_mask);
+	channel_clear_bit(idma64, MASK(ERROR), idma64->all_chan_mask);
+
+	status_xfer = dma_readl(idma64, RAW(XFER));
+	status_err = dma_readl(idma64, RAW(ERROR));
+
+	for (i = 0; i < idma64->dma.chancnt; i++)
+		idma64_chan_irq(idma64, i, status_err, status_xfer);
+
+	/* Re-enable interrupts */
+	channel_set_bit(idma64, MASK(XFER), idma64->all_chan_mask);
+	channel_set_bit(idma64, MASK(ERROR), idma64->all_chan_mask);
+
+	return IRQ_HANDLED;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static struct idma64_desc *idma64_alloc_desc(unsigned int ndesc)
+{
+	struct idma64_desc *desc;
+
+	desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
+	if (!desc)
+		return NULL;
+
+	desc->hw = kcalloc(ndesc, sizeof(*desc->hw), GFP_NOWAIT);
+	if (!desc->hw) {
+		kfree(desc);
+		return NULL;
+	}
+
+	return desc;
+}
+
+static void idma64_desc_free(struct idma64_chan *idma64c,
+		struct idma64_desc *desc)
+{
+	struct idma64_hw_desc *hw;
+
+	if (desc->ndesc) {
+		unsigned int i = desc->ndesc;
+
+		do {
+			hw = &desc->hw[--i];
+			dma_pool_free(idma64c->pool, hw->lli, hw->llp);
+		} while (i);
+	}
+
+	kfree(desc->hw);
+	kfree(desc);
+}
+
+static void idma64_vdesc_free(struct virt_dma_desc *vdesc)
+{
+	struct idma64_chan *idma64c = to_idma64_chan(vdesc->tx.chan);
+
+	idma64_desc_free(idma64c, to_idma64_desc(vdesc));
+}
+
+static u64 idma64_hw_desc_fill(struct idma64_hw_desc *hw,
+		struct dma_slave_config *config,
+		enum dma_transfer_direction direction, u64 llp)
+{
+	struct idma64_lli *lli = hw->lli;
+	u64 sar, dar;
+	u32 ctlhi = IDMA64C_CTLH_BLOCK_TS(hw->len);
+	u32 ctllo = IDMA64C_CTLL_LLP_S_EN | IDMA64C_CTLL_LLP_D_EN;
+	u32 src_width, dst_width;
+
+	if (direction == DMA_MEM_TO_DEV) {
+		sar = hw->phys;
+		dar = config->dst_addr;
+		ctllo |= IDMA64C_CTLL_DST_FIX | IDMA64C_CTLL_SRC_INC |
+			 IDMA64C_CTLL_FC_M2P;
+		src_width = min_t(u32, 2, __fls(sar | hw->len));
+		dst_width = __fls(config->dst_addr_width);
+	} else {	/* DMA_DEV_TO_MEM */
+		sar = config->src_addr;
+		dar = hw->phys;
+		ctllo |= IDMA64C_CTLL_DST_INC | IDMA64C_CTLL_SRC_FIX |
+			 IDMA64C_CTLL_FC_P2M;
+		src_width = __fls(config->src_addr_width);
+		dst_width = min_t(u32, 2, __fls(dar | hw->len));
+	}
+
+	lli->sar = sar;
+	lli->dar = dar;
+
+	lli->ctlhi = ctlhi;
+	lli->ctllo = ctllo |
+		     IDMA64C_CTLL_SRC_MSIZE(config->src_maxburst) |
+		     IDMA64C_CTLL_DST_MSIZE(config->dst_maxburst) |
+		     IDMA64C_CTLL_DST_WIDTH(dst_width) |
+		     IDMA64C_CTLL_SRC_WIDTH(src_width);
+
+	lli->llp = llp;
+	return hw->llp;
+}
+
+static void idma64_desc_fill(struct idma64_chan *idma64c,
+		struct idma64_desc *desc)
+{
+	struct dma_slave_config *config = &idma64c->config;
+	struct idma64_hw_desc *hw = &desc->hw[desc->ndesc - 1];
+	struct idma64_lli *lli = hw->lli;
+	u64 llp = 0;
+	unsigned int i = desc->ndesc;
+
+	/* Fill the hardware descriptors and link them to a list */
+	do {
+		hw = &desc->hw[--i];
+		llp = idma64_hw_desc_fill(hw, config, desc->direction, llp);
+		desc->length += hw->len;
+	} while (i);
+
+	/* Trigger interrupt after last block */
+	lli->ctllo |= IDMA64C_CTLL_INT_EN;
+}
+
+static struct dma_async_tx_descriptor *idma64_prep_slave_sg(
+		struct dma_chan *chan, struct scatterlist *sgl,
+		unsigned int sg_len, enum dma_transfer_direction direction,
+		unsigned long flags, void *context)
+{
+	struct idma64_chan *idma64c = to_idma64_chan(chan);
+	struct idma64_desc *desc;
+	struct scatterlist *sg;
+	unsigned int i;
+
+	desc = idma64_alloc_desc(sg_len);
+	if (!desc)
+		return NULL;
+
+	for_each_sg(sgl, sg, sg_len, i) {
+		struct idma64_hw_desc *hw = &desc->hw[i];
+
+		/* Allocate DMA capable memory for hardware descriptor */
+		hw->lli = dma_pool_alloc(idma64c->pool, GFP_NOWAIT, &hw->llp);
+		if (!hw->lli) {
+			desc->ndesc = i;
+			idma64_desc_free(idma64c, desc);
+			return NULL;
+		}
+
+		hw->phys = sg_dma_address(sg);
+		hw->len = sg_dma_len(sg);
+	}
+
+	desc->ndesc = sg_len;
+	desc->direction = direction;
+	desc->status = DMA_IN_PROGRESS;
+
+	idma64_desc_fill(idma64c, desc);
+	return vchan_tx_prep(&idma64c->vchan, &desc->vdesc, flags);
+}
+
+static void idma64_issue_pending(struct dma_chan *chan)
+{
+	struct idma64_chan *idma64c = to_idma64_chan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&idma64c->vchan.lock, flags);
+	if (vchan_issue_pending(&idma64c->vchan) && !idma64c->desc)
+		idma64_start_transfer(idma64c);
+	spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
+}
+
+static size_t idma64_active_desc_size(struct idma64_chan *idma64c)
+{
+	struct idma64_desc *desc = idma64c->desc;
+	struct idma64_hw_desc *hw;
+	size_t bytes = desc->length;
+	u64 llp;
+	u32 ctlhi;
+	unsigned int i = 0;
+
+	llp = channel_readq(idma64c, LLP);
+	do {
+		hw = &desc->hw[i];
+	} while ((hw->llp != llp) && (++i < desc->ndesc));
+
+	if (!i)
+		return bytes;
+
+	do {
+		bytes -= desc->hw[--i].len;
+	} while (i);
+
+	ctlhi = channel_readl(idma64c, CTL_HI);
+	return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi);
+}
+
+static enum dma_status idma64_tx_status(struct dma_chan *chan,
+		dma_cookie_t cookie, struct dma_tx_state *state)
+{
+	struct idma64_chan *idma64c = to_idma64_chan(chan);
+	struct virt_dma_desc *vdesc;
+	enum dma_status status;
+	size_t bytes;
+	unsigned long flags;
+
+	status = dma_cookie_status(chan, cookie, state);
+	if (status == DMA_COMPLETE)
+		return status;
+
+	spin_lock_irqsave(&idma64c->vchan.lock, flags);
+	vdesc = vchan_find_desc(&idma64c->vchan, cookie);
+	if (idma64c->desc && cookie == idma64c->desc->vdesc.tx.cookie) {
+		bytes = idma64_active_desc_size(idma64c);
+		dma_set_residue(state, bytes);
+		status = idma64c->desc->status;
+	} else if (vdesc) {
+		bytes = to_idma64_desc(vdesc)->length;
+		dma_set_residue(state, bytes);
+	}
+	spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
+
+	return status;
+}
+
+static void convert_burst(u32 *maxburst)
+{
+	if (*maxburst)
+		*maxburst = __fls(*maxburst);
+	else
+		*maxburst = 0;
+}
+
+static int idma64_slave_config(struct dma_chan *chan,
+		struct dma_slave_config *config)
+{
+	struct idma64_chan *idma64c = to_idma64_chan(chan);
+
+	/* Check if chan will be configured for slave transfers */
+	if (!is_slave_direction(config->direction))
+		return -EINVAL;
+
+	memcpy(&idma64c->config, config, sizeof(idma64c->config));
+
+	convert_burst(&idma64c->config.src_maxburst);
+	convert_burst(&idma64c->config.dst_maxburst);
+
+	return 0;
+}
+
+static void idma64_chan_deactivate(struct idma64_chan *idma64c)
+{
+	unsigned short count = 100;
+	u32 cfglo;
+
+	cfglo = channel_readl(idma64c, CFG_LO);
+	channel_writel(idma64c, CFG_LO, cfglo | IDMA64C_CFGL_CH_SUSP);
+	do {
+		udelay(1);
+		cfglo = channel_readl(idma64c, CFG_LO);
+	} while (!(cfglo & IDMA64C_CFGL_FIFO_EMPTY) && --count);
+}
+
+static void idma64_chan_activate(struct idma64_chan *idma64c)
+{
+	u32 cfglo;
+
+	cfglo = channel_readl(idma64c, CFG_LO);
+	channel_writel(idma64c, CFG_LO, cfglo & ~IDMA64C_CFGL_CH_SUSP);
+}
+
+static int idma64_pause(struct dma_chan *chan)
+{
+	struct idma64_chan *idma64c = to_idma64_chan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&idma64c->vchan.lock, flags);
+	if (idma64c->desc && idma64c->desc->status == DMA_IN_PROGRESS) {
+		idma64_chan_deactivate(idma64c);
+		idma64c->desc->status = DMA_PAUSED;
+	}
+	spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
+
+	return 0;
+}
+
+static int idma64_resume(struct dma_chan *chan)
+{
+	struct idma64_chan *idma64c = to_idma64_chan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&idma64c->vchan.lock, flags);
+	if (idma64c->desc && idma64c->desc->status == DMA_PAUSED) {
+		idma64c->desc->status = DMA_IN_PROGRESS;
+		idma64_chan_activate(idma64c);
+	}
+	spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
+
+	return 0;
+}
+
+static int idma64_terminate_all(struct dma_chan *chan)
+{
+	struct idma64_chan *idma64c = to_idma64_chan(chan);
+	unsigned long flags;
+	LIST_HEAD(head);
+
+	spin_lock_irqsave(&idma64c->vchan.lock, flags);
+	idma64_chan_deactivate(idma64c);
+	idma64_stop_transfer(idma64c);
+	if (idma64c->desc) {
+		idma64_vdesc_free(&idma64c->desc->vdesc);
+		idma64c->desc = NULL;
+	}
+	vchan_get_all_descriptors(&idma64c->vchan, &head);
+	spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
+
+	vchan_dma_desc_free_list(&idma64c->vchan, &head);
+	return 0;
+}
+
+static int idma64_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct idma64_chan *idma64c = to_idma64_chan(chan);
+
+	/* Create a pool of consistent memory blocks for hardware descriptors */
+	idma64c->pool = dma_pool_create(dev_name(chan2dev(chan)),
+					chan->device->dev,
+					sizeof(struct idma64_lli), 8, 0);
+	if (!idma64c->pool) {
+		dev_err(chan2dev(chan), "No memory for descriptors\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void idma64_free_chan_resources(struct dma_chan *chan)
+{
+	struct idma64_chan *idma64c = to_idma64_chan(chan);
+
+	vchan_free_chan_resources(to_virt_chan(chan));
+	dma_pool_destroy(idma64c->pool);
+	idma64c->pool = NULL;
+}
+
+/* ---------------------------------------------------------------------- */
+
+#define IDMA64_BUSWIDTHS				\
+	BIT(DMA_SLAVE_BUSWIDTH_1_BYTE)		|	\
+	BIT(DMA_SLAVE_BUSWIDTH_2_BYTES)		|	\
+	BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
+
+static int idma64_probe(struct idma64_chip *chip)
+{
+	struct idma64 *idma64;
+	unsigned short nr_chan = IDMA64_NR_CHAN;
+	unsigned short i;
+	int ret;
+
+	idma64 = devm_kzalloc(chip->dev, sizeof(*idma64), GFP_KERNEL);
+	if (!idma64)
+		return -ENOMEM;
+
+	idma64->regs = chip->regs;
+	chip->idma64 = idma64;
+
+	idma64->chan = devm_kcalloc(chip->dev, nr_chan, sizeof(*idma64->chan),
+				    GFP_KERNEL);
+	if (!idma64->chan)
+		return -ENOMEM;
+
+	idma64->all_chan_mask = (1 << nr_chan) - 1;
+
+	/* Turn off iDMA controller */
+	idma64_off(idma64);
+
+	ret = devm_request_irq(chip->dev, chip->irq, idma64_irq, IRQF_SHARED,
+			       dev_name(chip->dev), idma64);
+	if (ret)
+		return ret;
+
+	INIT_LIST_HEAD(&idma64->dma.channels);
+	for (i = 0; i < nr_chan; i++) {
+		struct idma64_chan *idma64c = &idma64->chan[i];
+
+		idma64c->vchan.desc_free = idma64_vdesc_free;
+		vchan_init(&idma64c->vchan, &idma64->dma);
+
+		idma64c->regs = idma64->regs + i * IDMA64_CH_LENGTH;
+		idma64c->mask = BIT(i);
+	}
+
+	dma_cap_set(DMA_SLAVE, idma64->dma.cap_mask);
+	dma_cap_set(DMA_PRIVATE, idma64->dma.cap_mask);
+
+	idma64->dma.device_alloc_chan_resources = idma64_alloc_chan_resources;
+	idma64->dma.device_free_chan_resources = idma64_free_chan_resources;
+
+	idma64->dma.device_prep_slave_sg = idma64_prep_slave_sg;
+
+	idma64->dma.device_issue_pending = idma64_issue_pending;
+	idma64->dma.device_tx_status = idma64_tx_status;
+
+	idma64->dma.device_config = idma64_slave_config;
+	idma64->dma.device_pause = idma64_pause;
+	idma64->dma.device_resume = idma64_resume;
+	idma64->dma.device_terminate_all = idma64_terminate_all;
+
+	idma64->dma.src_addr_widths = IDMA64_BUSWIDTHS;
+	idma64->dma.dst_addr_widths = IDMA64_BUSWIDTHS;
+	idma64->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+	idma64->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+
+	idma64->dma.dev = chip->dev;
+
+	ret = dma_async_device_register(&idma64->dma);
+	if (ret)
+		return ret;
+
+	dev_info(chip->dev, "Found Intel integrated DMA 64-bit\n");
+	return 0;
+}
+
+static int idma64_remove(struct idma64_chip *chip)
+{
+	struct idma64 *idma64 = chip->idma64;
+	unsigned short i;
+
+	dma_async_device_unregister(&idma64->dma);
+
+	/*
+	 * Explicitly call devm_request_irq() to avoid the side effects with
+	 * the scheduled tasklets.
+	 */
+	devm_free_irq(chip->dev, chip->irq, idma64);
+
+	for (i = 0; i < idma64->dma.chancnt; i++) {
+		struct idma64_chan *idma64c = &idma64->chan[i];
+
+		tasklet_kill(&idma64c->vchan.task);
+	}
+
+	return 0;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int idma64_platform_probe(struct platform_device *pdev)
+{
+	struct idma64_chip *chip;
+	struct device *dev = &pdev->dev;
+	struct resource *mem;
+	int ret;
+
+	chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chip->irq = platform_get_irq(pdev, 0);
+	if (chip->irq < 0)
+		return chip->irq;
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	chip->regs = devm_ioremap_resource(dev, mem);
+	if (IS_ERR(chip->regs))
+		return PTR_ERR(chip->regs);
+
+	ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+	if (ret)
+		return ret;
+
+	chip->dev = dev;
+
+	ret = idma64_probe(chip);
+	if (ret)
+		return ret;
+
+	platform_set_drvdata(pdev, chip);
+	return 0;
+}
+
+static int idma64_platform_remove(struct platform_device *pdev)
+{
+	struct idma64_chip *chip = platform_get_drvdata(pdev);
+
+	return idma64_remove(chip);
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static int idma64_pm_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct idma64_chip *chip = platform_get_drvdata(pdev);
+
+	idma64_off(chip->idma64);
+	return 0;
+}
+
+static int idma64_pm_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct idma64_chip *chip = platform_get_drvdata(pdev);
+
+	idma64_on(chip->idma64);
+	return 0;
+}
+
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops idma64_dev_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(idma64_pm_suspend, idma64_pm_resume)
+};
+
+static struct platform_driver idma64_platform_driver = {
+	.probe		= idma64_platform_probe,
+	.remove		= idma64_platform_remove,
+	.driver = {
+		.name	= DRV_NAME,
+		.pm	= &idma64_dev_pm_ops,
+	},
+};
+
+module_platform_driver(idma64_platform_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("iDMA64 core driver");
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/dma/idma64.h b/drivers/dma/idma64.h
new file mode 100644
index 0000000..a4d9968
--- /dev/null
+++ b/drivers/dma/idma64.h
@@ -0,0 +1,233 @@ 
+/*
+ * Driver for the Intel integrated DMA 64-bit
+ *
+ * Copyright (C) 2015 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __DMA_IDMA64_H__
+#define __DMA_IDMA64_H__
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "virt-dma.h"
+
+/* Channel registers */
+
+#define IDMA64_CH_SAR		0x00	/* Source Address Register */
+#define IDMA64_CH_DAR		0x08	/* Destination Address Register */
+#define IDMA64_CH_LLP		0x10	/* Linked List Pointer */
+#define IDMA64_CH_CTL_LO	0x18	/* Control Register Low */
+#define IDMA64_CH_CTL_HI	0x1c	/* Control Register High */
+#define IDMA64_CH_SSTAT		0x20
+#define IDMA64_CH_DSTAT		0x28
+#define IDMA64_CH_SSTATAR	0x30
+#define IDMA64_CH_DSTATAR	0x38
+#define IDMA64_CH_CFG_LO	0x40	/* Configuration Register Low */
+#define IDMA64_CH_CFG_HI	0x44	/* Configuration Register High */
+#define IDMA64_CH_SGR		0x48
+#define IDMA64_CH_DSR		0x50
+
+#define IDMA64_CH_LENGTH	0x58
+
+/* Bitfields in CTL_LO */
+#define IDMA64C_CTLL_INT_EN		(1 << 0)	/* irqs enabled? */
+#define IDMA64C_CTLL_DST_WIDTH(x)	((x) << 1)	/* bytes per element */
+#define IDMA64C_CTLL_SRC_WIDTH(x)	((x) << 4)
+#define IDMA64C_CTLL_DST_INC		(0 << 8)	/* DAR update/not */
+#define IDMA64C_CTLL_DST_FIX		(1 << 8)
+#define IDMA64C_CTLL_SRC_INC		(0 << 10)	/* SAR update/not */
+#define IDMA64C_CTLL_SRC_FIX		(1 << 10)
+#define IDMA64C_CTLL_DST_MSIZE(x)	((x) << 11)	/* burst, #elements */
+#define IDMA64C_CTLL_SRC_MSIZE(x)	((x) << 14)
+#define IDMA64C_CTLL_FC_M2P		(1 << 20)	/* mem-to-periph */
+#define IDMA64C_CTLL_FC_P2M		(2 << 20)	/* periph-to-mem */
+#define IDMA64C_CTLL_LLP_D_EN		(1 << 27)	/* dest block chain */
+#define IDMA64C_CTLL_LLP_S_EN		(1 << 28)	/* src block chain */
+
+/* Bitfields in CTL_HI */
+#define IDMA64C_CTLH_BLOCK_TS(x)	((x) & ((1 << 17) - 1))
+#define IDMA64C_CTLH_DONE		(1 << 17)
+
+/* Bitfields in CFG_LO */
+#define IDMA64C_CFGL_DST_BURST_ALIGN	(1 << 0)	/* dst burst align */
+#define IDMA64C_CFGL_SRC_BURST_ALIGN	(1 << 1)	/* src burst align */
+#define IDMA64C_CFGL_CH_SUSP		(1 << 8)
+#define IDMA64C_CFGL_FIFO_EMPTY		(1 << 9)
+#define IDMA64C_CFGL_CH_DRAIN		(1 << 10)	/* drain FIFO */
+#define IDMA64C_CFGL_DST_OPT_BL		(1 << 20)	/* optimize dst burst length */
+#define IDMA64C_CFGL_SRC_OPT_BL		(1 << 21)	/* optimize src burst length */
+
+/* Bitfields in CFG_HI */
+#define IDMA64C_CFGH_SRC_PER(x)		((x) << 0)	/* src peripheral */
+#define IDMA64C_CFGH_DST_PER(x)		((x) << 4)	/* dst peripheral */
+#define IDMA64C_CFGH_RD_ISSUE_THD(x)	((x) << 8)
+#define IDMA64C_CFGH_RW_ISSUE_THD(x)	((x) << 18)
+
+/* Interrupt registers */
+
+#define IDMA64_INT_XFER		0x00
+#define IDMA64_INT_BLOCK	0x08
+#define IDMA64_INT_SRC_TRAN	0x10
+#define IDMA64_INT_DST_TRAN	0x18
+#define IDMA64_INT_ERROR	0x20
+
+#define IDMA64_RAW(x)		(0x2c0 + IDMA64_INT_##x)	/* r */
+#define IDMA64_STATUS(x)	(0x2e8 + IDMA64_INT_##x)	/* r (raw & mask) */
+#define IDMA64_MASK(x)		(0x310 + IDMA64_INT_##x)	/* rw (set = irq enabled) */
+#define IDMA64_CLEAR(x)		(0x338 + IDMA64_INT_##x)	/* w (ack, affects "raw") */
+
+/* Common registers */
+
+#define IDMA64_STATUS_INT	0x360	/* r */
+#define IDMA64_CFG		0x398
+#define IDMA64_CH_EN		0x3a0
+
+/* Bitfields in CFG */
+#define IDMA64_CFG_DMA_EN		(1 << 0)
+
+/* Hardware descriptor for Linked LIst transfers */
+struct idma64_lli {
+	u64		sar;
+	u64		dar;
+	u64		llp;
+	u32		ctllo;
+	u32		ctlhi;
+	u32		sstat;
+	u32		dstat;
+};
+
+struct idma64_hw_desc {
+	struct idma64_lli *lli;
+	dma_addr_t llp;
+	dma_addr_t phys;
+	unsigned int len;
+};
+
+struct idma64_desc {
+	struct virt_dma_desc vdesc;
+	enum dma_transfer_direction direction;
+	struct idma64_hw_desc *hw;
+	unsigned int ndesc;
+	size_t length;
+	enum dma_status status;
+};
+
+static inline struct idma64_desc *to_idma64_desc(struct virt_dma_desc *vdesc)
+{
+	return container_of(vdesc, struct idma64_desc, vdesc);
+}
+
+struct idma64_chan {
+	struct virt_dma_chan vchan;
+
+	void __iomem *regs;
+
+	/* hardware configuration */
+	enum dma_transfer_direction direction;
+	unsigned int mask;
+	struct dma_slave_config config;
+
+	void *pool;
+	struct idma64_desc *desc;
+};
+
+static inline struct idma64_chan *to_idma64_chan(struct dma_chan *chan)
+{
+	return container_of(chan, struct idma64_chan, vchan.chan);
+}
+
+#define channel_set_bit(idma64, reg, mask)	\
+	dma_writel(idma64, reg, ((mask) << 8) | (mask))
+#define channel_clear_bit(idma64, reg, mask)	\
+	dma_writel(idma64, reg, ((mask) << 8) | 0)
+
+static inline u32 idma64c_readl(struct idma64_chan *idma64c, int offset)
+{
+	return readl(idma64c->regs + offset);
+}
+
+static inline void idma64c_writel(struct idma64_chan *idma64c, int offset,
+				  u32 value)
+{
+	writel(value, idma64c->regs + offset);
+}
+
+#define channel_readl(idma64c, reg)		\
+	idma64c_readl(idma64c, IDMA64_CH_##reg)
+#define channel_writel(idma64c, reg, value)	\
+	idma64c_writel(idma64c, IDMA64_CH_##reg, (value))
+
+static inline u64 idma64c_readq(struct idma64_chan *idma64c, int offset)
+{
+	u64 l, h;
+
+	l = idma64c_readl(idma64c, offset);
+	h = idma64c_readl(idma64c, offset + 4);
+
+	return l | (h << 32);
+}
+
+static inline void idma64c_writeq(struct idma64_chan *idma64c, int offset,
+				  u64 value)
+{
+	idma64c_writel(idma64c, offset, value);
+	idma64c_writel(idma64c, offset + 4, value >> 32);
+}
+
+#define channel_readq(idma64c, reg)		\
+	idma64c_readq(idma64c, IDMA64_CH_##reg)
+#define channel_writeq(idma64c, reg, value)	\
+	idma64c_writeq(idma64c, IDMA64_CH_##reg, (value))
+
+struct idma64 {
+	struct dma_device dma;
+
+	void __iomem *regs;
+
+	/* channels */
+	unsigned short all_chan_mask;
+	struct idma64_chan *chan;
+};
+
+static inline struct idma64 *to_idma64(struct dma_device *ddev)
+{
+	return container_of(ddev, struct idma64, dma);
+}
+
+static inline u32 idma64_readl(struct idma64 *idma64, int offset)
+{
+	return readl(idma64->regs + offset);
+}
+
+static inline void idma64_writel(struct idma64 *idma64, int offset, u32 value)
+{
+	writel(value, idma64->regs + offset);
+}
+
+#define dma_readl(idma64, reg)			\
+	idma64_readl(idma64, IDMA64_##reg)
+#define dma_writel(idma64, reg, value)		\
+	idma64_writel(idma64, IDMA64_##reg, (value))
+
+/**
+ * struct idma64_chip - representation of DesignWare DMA controller hardware
+ * @dev:		struct device of the DMA controller
+ * @irq:		irq line
+ * @regs:		memory mapped I/O space
+ * @idma64:		struct idma64 that is filed by idma64_probe()
+ */
+struct idma64_chip {
+	struct device	*dev;
+	int		irq;
+	void __iomem	*regs;
+	struct idma64	*idma64;
+};
+
+#endif /* __DMA_IDMA64_H__ */