diff mbox

[06/12] ioatdma: remove dma_v2.*

Message ID 20150804232758.40926.19783.stgit@djiang5-desk3.ch.intel.com (mailing list archive)
State Superseded
Headers show

Commit Message

Dave Jiang Aug. 4, 2015, 11:27 p.m. UTC
Clean out dma_v2 and remove ioat2 calls since we are moving everything
to just ioat.

Signed-off-by: Dave Jiang <dave.jiang@intel.com>
---
 drivers/dma/ioat/Makefile |    2 
 drivers/dma/ioat/dca.c    |    1 
 drivers/dma/ioat/dma.c    |  667 +++++++++++++++++++++++++++++++++++++++++++
 drivers/dma/ioat/dma.h    |  119 ++++++++
 drivers/dma/ioat/dma_v2.c |  695 ---------------------------------------------
 drivers/dma/ioat/dma_v2.h |  143 ---------
 drivers/dma/ioat/dma_v3.c |   63 ++--
 drivers/dma/ioat/pci.c    |   15 -
 8 files changed, 816 insertions(+), 889 deletions(-)
 delete mode 100644 drivers/dma/ioat/dma_v2.c
 delete mode 100644 drivers/dma/ioat/dma_v2.h


--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/drivers/dma/ioat/Makefile b/drivers/dma/ioat/Makefile
index 0ff7270..655df91 100644
--- a/drivers/dma/ioat/Makefile
+++ b/drivers/dma/ioat/Makefile
@@ -1,2 +1,2 @@ 
 obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o
-ioatdma-y := pci.o dma.o dma_v2.o dma_v3.o dca.o
+ioatdma-y := pci.o dma.o dma_v3.o dca.o
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index 540d94c..f2b9a42 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -31,7 +31,6 @@ 
 
 #include "dma.h"
 #include "registers.h"
-#include "dma_v2.h"
 
 /*
  * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 3cf2639..764cd8f 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -41,6 +41,19 @@  int ioat_pending_level = 4;
 module_param(ioat_pending_level, int, 0644);
 MODULE_PARM_DESC(ioat_pending_level,
 		 "high-water mark for pushing ioat descriptors (default: 4)");
+int ioat_ring_alloc_order = 8;
+module_param(ioat_ring_alloc_order, int, 0644);
+MODULE_PARM_DESC(ioat_ring_alloc_order,
+		 "ioat+: allocate 2^n descriptors per channel (default: 8 max: 16)");
+static int ioat_ring_max_alloc_order = IOAT_MAX_ORDER;
+module_param(ioat_ring_max_alloc_order, int, 0644);
+MODULE_PARM_DESC(ioat_ring_max_alloc_order,
+		 "ioat+: upper limit for ring size (default: 16)");
+static char ioat_interrupt_style[32] = "msix";
+module_param_string(ioat_interrupt_style, ioat_interrupt_style,
+		    sizeof(ioat_interrupt_style), 0644);
+MODULE_PARM_DESC(ioat_interrupt_style,
+		 "set ioat interrupt style: msix (default), msi, intx");
 
 /**
  * ioat_dma_do_interrupt - handler used for single vector interrupt mode
@@ -314,12 +327,6 @@  out:
 	return err;
 }
 
-static char ioat_interrupt_style[32] = "msix";
-module_param_string(ioat_interrupt_style, ioat_interrupt_style,
-		    sizeof(ioat_interrupt_style), 0644);
-MODULE_PARM_DESC(ioat_interrupt_style,
-		 "set ioat interrupt style: msix (default), msi, intx");
-
 /**
  * ioat_dma_setup_interrupts - setup interrupt handler
  * @ioat_dma: ioat dma device
@@ -577,3 +584,651 @@  void ioat_dma_remove(struct ioatdma_device *ioat_dma)
 
 	INIT_LIST_HEAD(&dma->channels);
 }
+
+void __ioat_issue_pending(struct ioatdma_chan *ioat_chan)
+{
+	ioat_chan->dmacount += ioat_ring_pending(ioat_chan);
+	ioat_chan->issued = ioat_chan->head;
+	writew(ioat_chan->dmacount,
+	       ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
+	dev_dbg(to_dev(ioat_chan),
+		"%s: head: %#x tail: %#x issued: %#x count: %#x\n",
+		__func__, ioat_chan->head, ioat_chan->tail,
+		ioat_chan->issued, ioat_chan->dmacount);
+}
+
+void ioat_issue_pending(struct dma_chan *c)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+
+	if (ioat_ring_pending(ioat_chan)) {
+		spin_lock_bh(&ioat_chan->prep_lock);
+		__ioat_issue_pending(ioat_chan);
+		spin_unlock_bh(&ioat_chan->prep_lock);
+	}
+}
+
+/**
+ * ioat_update_pending - log pending descriptors
+ * @ioat: ioat+ channel
+ *
+ * Check if the number of unsubmitted descriptors has exceeded the
+ * watermark.  Called with prep_lock held
+ */
+static void ioat_update_pending(struct ioatdma_chan *ioat_chan)
+{
+	if (ioat_ring_pending(ioat_chan) > ioat_pending_level)
+		__ioat_issue_pending(ioat_chan);
+}
+
+static void __ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
+{
+	struct ioat_ring_ent *desc;
+	struct ioat_dma_descriptor *hw;
+
+	if (ioat_ring_space(ioat_chan) < 1) {
+		dev_err(to_dev(ioat_chan),
+			"Unable to start null desc - ring full\n");
+		return;
+	}
+
+	dev_dbg(to_dev(ioat_chan),
+		"%s: head: %#x tail: %#x issued: %#x\n",
+		__func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
+	desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head);
+
+	hw = desc->hw;
+	hw->ctl = 0;
+	hw->ctl_f.null = 1;
+	hw->ctl_f.int_en = 1;
+	hw->ctl_f.compl_write = 1;
+	/* set size to non-zero value (channel returns error when size is 0) */
+	hw->size = NULL_DESC_BUFFER_SIZE;
+	hw->src_addr = 0;
+	hw->dst_addr = 0;
+	async_tx_ack(&desc->txd);
+	ioat_set_chainaddr(ioat_chan, desc->txd.phys);
+	dump_desc_dbg(ioat_chan, desc);
+	/* make sure descriptors are written before we submit */
+	wmb();
+	ioat_chan->head += 1;
+	__ioat_issue_pending(ioat_chan);
+}
+
+static void ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
+{
+	spin_lock_bh(&ioat_chan->prep_lock);
+	__ioat_start_null_desc(ioat_chan);
+	spin_unlock_bh(&ioat_chan->prep_lock);
+}
+
+void __ioat_restart_chan(struct ioatdma_chan *ioat_chan)
+{
+	/* set the tail to be re-issued */
+	ioat_chan->issued = ioat_chan->tail;
+	ioat_chan->dmacount = 0;
+	set_bit(IOAT_COMPLETION_PENDING, &ioat_chan->state);
+	mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
+
+	dev_dbg(to_dev(ioat_chan),
+		"%s: head: %#x tail: %#x issued: %#x count: %#x\n",
+		__func__, ioat_chan->head, ioat_chan->tail,
+		ioat_chan->issued, ioat_chan->dmacount);
+
+	if (ioat_ring_pending(ioat_chan)) {
+		struct ioat_ring_ent *desc;
+
+		desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
+		ioat_set_chainaddr(ioat_chan, desc->txd.phys);
+		__ioat_issue_pending(ioat_chan);
+	} else
+		__ioat_start_null_desc(ioat_chan);
+}
+
+int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo)
+{
+	unsigned long end = jiffies + tmo;
+	int err = 0;
+	u32 status;
+
+	status = ioat_chansts(ioat_chan);
+	if (is_ioat_active(status) || is_ioat_idle(status))
+		ioat_suspend(ioat_chan);
+	while (is_ioat_active(status) || is_ioat_idle(status)) {
+		if (tmo && time_after(jiffies, end)) {
+			err = -ETIMEDOUT;
+			break;
+		}
+		status = ioat_chansts(ioat_chan);
+		cpu_relax();
+	}
+
+	return err;
+}
+
+int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo)
+{
+	unsigned long end = jiffies + tmo;
+	int err = 0;
+
+	ioat_reset(ioat_chan);
+	while (ioat_reset_pending(ioat_chan)) {
+		if (end && time_after(jiffies, end)) {
+			err = -ETIMEDOUT;
+			break;
+		}
+		cpu_relax();
+	}
+
+	return err;
+}
+
+/**
+ * ioat_enumerate_channels - find and initialize the device's channels
+ * @ioat_dma: the ioat dma device to be enumerated
+ */
+int ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
+{
+	struct ioatdma_chan *ioat_chan;
+	struct device *dev = &ioat_dma->pdev->dev;
+	struct dma_device *dma = &ioat_dma->dma_dev;
+	u8 xfercap_log;
+	int i;
+
+	INIT_LIST_HEAD(&dma->channels);
+	dma->chancnt = readb(ioat_dma->reg_base + IOAT_CHANCNT_OFFSET);
+	dma->chancnt &= 0x1f; /* bits [4:0] valid */
+	if (dma->chancnt > ARRAY_SIZE(ioat_dma->idx)) {
+		dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
+			 dma->chancnt, ARRAY_SIZE(ioat_dma->idx));
+		dma->chancnt = ARRAY_SIZE(ioat_dma->idx);
+	}
+	xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET);
+	xfercap_log &= 0x1f; /* bits [4:0] valid */
+	if (xfercap_log == 0)
+		return 0;
+	dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
+
+	for (i = 0; i < dma->chancnt; i++) {
+		ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL);
+		if (!ioat_chan)
+			break;
+
+		ioat_init_channel(ioat_dma, ioat_chan, i);
+		ioat_chan->xfercap_log = xfercap_log;
+		spin_lock_init(&ioat_chan->prep_lock);
+		if (ioat_dma->reset_hw(ioat_chan)) {
+			i = 0;
+			break;
+		}
+	}
+	dma->chancnt = i;
+	return i;
+}
+
+static dma_cookie_t ioat_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
+{
+	struct dma_chan *c = tx->chan;
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+	dma_cookie_t cookie;
+
+	cookie = dma_cookie_assign(tx);
+	dev_dbg(to_dev(ioat_chan), "%s: cookie: %d\n", __func__, cookie);
+
+	if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
+		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
+
+	/* make descriptor updates visible before advancing ioat->head,
+	 * this is purposefully not smp_wmb() since we are also
+	 * publishing the descriptor updates to a dma device
+	 */
+	wmb();
+
+	ioat_chan->head += ioat_chan->produce;
+
+	ioat_update_pending(ioat_chan);
+	spin_unlock_bh(&ioat_chan->prep_lock);
+
+	return cookie;
+}
+
+static struct ioat_ring_ent *
+ioat_alloc_ring_ent(struct dma_chan *chan, gfp_t flags)
+{
+	struct ioat_dma_descriptor *hw;
+	struct ioat_ring_ent *desc;
+	struct ioatdma_device *ioat_dma;
+	dma_addr_t phys;
+
+	ioat_dma = to_ioatdma_device(chan->device);
+	hw = pci_pool_alloc(ioat_dma->dma_pool, flags, &phys);
+	if (!hw)
+		return NULL;
+	memset(hw, 0, sizeof(*hw));
+
+	desc = kmem_cache_zalloc(ioat_cache, flags);
+	if (!desc) {
+		pci_pool_free(ioat_dma->dma_pool, hw, phys);
+		return NULL;
+	}
+
+	dma_async_tx_descriptor_init(&desc->txd, chan);
+	desc->txd.tx_submit = ioat_tx_submit_unlock;
+	desc->hw = hw;
+	desc->txd.phys = phys;
+	return desc;
+}
+
+static void
+ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
+{
+	struct ioatdma_device *ioat_dma;
+
+	ioat_dma = to_ioatdma_device(chan->device);
+	pci_pool_free(ioat_dma->dma_pool, desc->hw, desc->txd.phys);
+	kmem_cache_free(ioat_cache, desc);
+}
+
+static struct ioat_ring_ent **
+ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
+{
+	struct ioat_ring_ent **ring;
+	int descs = 1 << order;
+	int i;
+
+	if (order > ioat_get_max_alloc_order())
+		return NULL;
+
+	/* allocate the array to hold the software ring */
+	ring = kcalloc(descs, sizeof(*ring), flags);
+	if (!ring)
+		return NULL;
+	for (i = 0; i < descs; i++) {
+		ring[i] = ioat_alloc_ring_ent(c, flags);
+		if (!ring[i]) {
+			while (i--)
+				ioat_free_ring_ent(ring[i], c);
+			kfree(ring);
+			return NULL;
+		}
+		set_desc_id(ring[i], i);
+	}
+
+	/* link descs */
+	for (i = 0; i < descs-1; i++) {
+		struct ioat_ring_ent *next = ring[i+1];
+		struct ioat_dma_descriptor *hw = ring[i]->hw;
+
+		hw->next = next->txd.phys;
+	}
+	ring[i]->hw->next = ring[0]->txd.phys;
+
+	return ring;
+}
+
+/**
+ * ioat_free_chan_resources - release all the descriptors
+ * @chan: the channel to be cleaned
+ */
+void ioat_free_chan_resources(struct dma_chan *c)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
+	struct ioat_ring_ent *desc;
+	const u16 total_descs = 1 << ioat_chan->alloc_order;
+	int descs;
+	int i;
+
+	/* Before freeing channel resources first check
+	 * if they have been previously allocated for this channel.
+	 */
+	if (!ioat_chan->ring)
+		return;
+
+	ioat_stop(ioat_chan);
+	ioat_dma->reset_hw(ioat_chan);
+
+	spin_lock_bh(&ioat_chan->cleanup_lock);
+	spin_lock_bh(&ioat_chan->prep_lock);
+	descs = ioat_ring_space(ioat_chan);
+	dev_dbg(to_dev(ioat_chan), "freeing %d idle descriptors\n", descs);
+	for (i = 0; i < descs; i++) {
+		desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head + i);
+		ioat_free_ring_ent(desc, c);
+	}
+
+	if (descs < total_descs)
+		dev_err(to_dev(ioat_chan), "Freeing %d in use descriptors!\n",
+			total_descs - descs);
+
+	for (i = 0; i < total_descs - descs; i++) {
+		desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail + i);
+		dump_desc_dbg(ioat_chan, desc);
+		ioat_free_ring_ent(desc, c);
+	}
+
+	kfree(ioat_chan->ring);
+	ioat_chan->ring = NULL;
+	ioat_chan->alloc_order = 0;
+	pci_pool_free(ioat_dma->completion_pool, ioat_chan->completion,
+		      ioat_chan->completion_dma);
+	spin_unlock_bh(&ioat_chan->prep_lock);
+	spin_unlock_bh(&ioat_chan->cleanup_lock);
+
+	ioat_chan->last_completion = 0;
+	ioat_chan->completion_dma = 0;
+	ioat_chan->dmacount = 0;
+}
+
+/* ioat_alloc_chan_resources - allocate/initialize ioat descriptor ring
+ * @chan: channel to be initialized
+ */
+int ioat_alloc_chan_resources(struct dma_chan *c)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+	struct ioat_ring_ent **ring;
+	u64 status;
+	int order;
+	int i = 0;
+	u32 chanerr;
+
+	/* have we already been set up? */
+	if (ioat_chan->ring)
+		return 1 << ioat_chan->alloc_order;
+
+	/* Setup register to interrupt and write completion status on error */
+	writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
+
+	/* allocate a completion writeback area */
+	/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
+	ioat_chan->completion =
+		pci_pool_alloc(ioat_chan->ioat_dma->completion_pool,
+			       GFP_KERNEL, &ioat_chan->completion_dma);
+	if (!ioat_chan->completion)
+		return -ENOMEM;
+
+	memset(ioat_chan->completion, 0, sizeof(*ioat_chan->completion));
+	writel(((u64)ioat_chan->completion_dma) & 0x00000000FFFFFFFF,
+	       ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
+	writel(((u64)ioat_chan->completion_dma) >> 32,
+	       ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
+
+	order = ioat_get_alloc_order();
+	ring = ioat_alloc_ring(c, order, GFP_KERNEL);
+	if (!ring)
+		return -ENOMEM;
+
+	spin_lock_bh(&ioat_chan->cleanup_lock);
+	spin_lock_bh(&ioat_chan->prep_lock);
+	ioat_chan->ring = ring;
+	ioat_chan->head = 0;
+	ioat_chan->issued = 0;
+	ioat_chan->tail = 0;
+	ioat_chan->alloc_order = order;
+	set_bit(IOAT_RUN, &ioat_chan->state);
+	spin_unlock_bh(&ioat_chan->prep_lock);
+	spin_unlock_bh(&ioat_chan->cleanup_lock);
+
+	ioat_start_null_desc(ioat_chan);
+
+	/* check that we got off the ground */
+	do {
+		udelay(1);
+		status = ioat_chansts(ioat_chan);
+	} while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
+
+	if (is_ioat_active(status) || is_ioat_idle(status))
+		return 1 << ioat_chan->alloc_order;
+
+	chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+
+	dev_WARN(to_dev(ioat_chan),
+		"failed to start channel chanerr: %#x\n", chanerr);
+	ioat_free_chan_resources(c);
+	return -EFAULT;
+}
+
+bool reshape_ring(struct ioatdma_chan *ioat_chan, int order)
+{
+	/* reshape differs from normal ring allocation in that we want
+	 * to allocate a new software ring while only
+	 * extending/truncating the hardware ring
+	 */
+	struct dma_chan *c = &ioat_chan->dma_chan;
+	const u32 curr_size = ioat_ring_size(ioat_chan);
+	const u16 active = ioat_ring_active(ioat_chan);
+	const u32 new_size = 1 << order;
+	struct ioat_ring_ent **ring;
+	u16 i;
+
+	if (order > ioat_get_max_alloc_order())
+		return false;
+
+	/* double check that we have at least 1 free descriptor */
+	if (active == curr_size)
+		return false;
+
+	/* when shrinking, verify that we can hold the current active
+	 * set in the new ring
+	 */
+	if (active >= new_size)
+		return false;
+
+	/* allocate the array to hold the software ring */
+	ring = kcalloc(new_size, sizeof(*ring), GFP_NOWAIT);
+	if (!ring)
+		return false;
+
+	/* allocate/trim descriptors as needed */
+	if (new_size > curr_size) {
+		/* copy current descriptors to the new ring */
+		for (i = 0; i < curr_size; i++) {
+			u16 curr_idx = (ioat_chan->tail+i) & (curr_size-1);
+			u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
+
+			ring[new_idx] = ioat_chan->ring[curr_idx];
+			set_desc_id(ring[new_idx], new_idx);
+		}
+
+		/* add new descriptors to the ring */
+		for (i = curr_size; i < new_size; i++) {
+			u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
+
+			ring[new_idx] = ioat_alloc_ring_ent(c, GFP_NOWAIT);
+			if (!ring[new_idx]) {
+				while (i--) {
+					u16 new_idx = (ioat_chan->tail+i) &
+						       (new_size-1);
+
+					ioat_free_ring_ent(ring[new_idx], c);
+				}
+				kfree(ring);
+				return false;
+			}
+			set_desc_id(ring[new_idx], new_idx);
+		}
+
+		/* hw link new descriptors */
+		for (i = curr_size-1; i < new_size; i++) {
+			u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
+			struct ioat_ring_ent *next =
+				ring[(new_idx+1) & (new_size-1)];
+			struct ioat_dma_descriptor *hw = ring[new_idx]->hw;
+
+			hw->next = next->txd.phys;
+		}
+	} else {
+		struct ioat_dma_descriptor *hw;
+		struct ioat_ring_ent *next;
+
+		/* copy current descriptors to the new ring, dropping the
+		 * removed descriptors
+		 */
+		for (i = 0; i < new_size; i++) {
+			u16 curr_idx = (ioat_chan->tail+i) & (curr_size-1);
+			u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
+
+			ring[new_idx] = ioat_chan->ring[curr_idx];
+			set_desc_id(ring[new_idx], new_idx);
+		}
+
+		/* free deleted descriptors */
+		for (i = new_size; i < curr_size; i++) {
+			struct ioat_ring_ent *ent;
+
+			ent = ioat_get_ring_ent(ioat_chan, ioat_chan->tail+i);
+			ioat_free_ring_ent(ent, c);
+		}
+
+		/* fix up hardware ring */
+		hw = ring[(ioat_chan->tail+new_size-1) & (new_size-1)]->hw;
+		next = ring[(ioat_chan->tail+new_size) & (new_size-1)];
+		hw->next = next->txd.phys;
+	}
+
+	dev_dbg(to_dev(ioat_chan), "%s: allocated %d descriptors\n",
+		__func__, new_size);
+
+	kfree(ioat_chan->ring);
+	ioat_chan->ring = ring;
+	ioat_chan->alloc_order = order;
+
+	return true;
+}
+
+/**
+ * ioat_check_space_lock - verify space and grab ring producer lock
+ * @ioat: ioat,3 channel (ring) to operate on
+ * @num_descs: allocation length
+ */
+int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs)
+{
+	bool retry;
+
+ retry:
+	spin_lock_bh(&ioat_chan->prep_lock);
+	/* never allow the last descriptor to be consumed, we need at
+	 * least one free at all times to allow for on-the-fly ring
+	 * resizing.
+	 */
+	if (likely(ioat_ring_space(ioat_chan) > num_descs)) {
+		dev_dbg(to_dev(ioat_chan), "%s: num_descs: %d (%x:%x:%x)\n",
+			__func__, num_descs, ioat_chan->head,
+			ioat_chan->tail, ioat_chan->issued);
+		ioat_chan->produce = num_descs;
+		return 0;  /* with ioat->prep_lock held */
+	}
+	retry = test_and_set_bit(IOAT_RESHAPE_PENDING, &ioat_chan->state);
+	spin_unlock_bh(&ioat_chan->prep_lock);
+
+	/* is another cpu already trying to expand the ring? */
+	if (retry)
+		goto retry;
+
+	spin_lock_bh(&ioat_chan->cleanup_lock);
+	spin_lock_bh(&ioat_chan->prep_lock);
+	retry = reshape_ring(ioat_chan, ioat_chan->alloc_order + 1);
+	clear_bit(IOAT_RESHAPE_PENDING, &ioat_chan->state);
+	spin_unlock_bh(&ioat_chan->prep_lock);
+	spin_unlock_bh(&ioat_chan->cleanup_lock);
+
+	/* if we were able to expand the ring retry the allocation */
+	if (retry)
+		goto retry;
+
+	dev_dbg_ratelimited(to_dev(ioat_chan),
+			    "%s: ring full! num_descs: %d (%x:%x:%x)\n",
+			    __func__, num_descs, ioat_chan->head,
+			    ioat_chan->tail, ioat_chan->issued);
+
+	/* progress reclaim in the allocation failure case we may be
+	 * called under bh_disabled so we need to trigger the timer
+	 * event directly
+	 */
+	if (time_is_before_jiffies(ioat_chan->timer.expires)
+	    && timer_pending(&ioat_chan->timer)) {
+		struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
+
+		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
+		ioat_dma->timer_fn((unsigned long)ioat_chan);
+	}
+
+	return -ENOMEM;
+}
+
+struct dma_async_tx_descriptor *
+ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
+			   dma_addr_t dma_src, size_t len, unsigned long flags)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+	struct ioat_dma_descriptor *hw;
+	struct ioat_ring_ent *desc;
+	dma_addr_t dst = dma_dest;
+	dma_addr_t src = dma_src;
+	size_t total_len = len;
+	int num_descs, idx, i;
+
+	num_descs = ioat_xferlen_to_descs(ioat_chan, len);
+	if (likely(num_descs) &&
+	    ioat_check_space_lock(ioat_chan, num_descs) == 0)
+		idx = ioat_chan->head;
+	else
+		return NULL;
+	i = 0;
+	do {
+		size_t copy = min_t(size_t, len, 1 << ioat_chan->xfercap_log);
+
+		desc = ioat_get_ring_ent(ioat_chan, idx + i);
+		hw = desc->hw;
+
+		hw->size = copy;
+		hw->ctl = 0;
+		hw->src_addr = src;
+		hw->dst_addr = dst;
+
+		len -= copy;
+		dst += copy;
+		src += copy;
+		dump_desc_dbg(ioat_chan, desc);
+	} while (++i < num_descs);
+
+	desc->txd.flags = flags;
+	desc->len = total_len;
+	hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
+	hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
+	hw->ctl_f.compl_write = 1;
+	dump_desc_dbg(ioat_chan, desc);
+	/* we leave the channel locked to ensure in order submission */
+
+	return &desc->txd;
+}
+
+static ssize_t ring_size_show(struct dma_chan *c, char *page)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+
+	return sprintf(page, "%d\n", (1 << ioat_chan->alloc_order) & ~1);
+}
+static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);
+
+static ssize_t ring_active_show(struct dma_chan *c, char *page)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+
+	/* ...taken outside the lock, no need to be precise */
+	return sprintf(page, "%d\n", ioat_ring_active(ioat_chan));
+}
+static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);
+
+static struct attribute *ioat_attrs[] = {
+	&ring_size_attr.attr,
+	&ring_active_attr.attr,
+	&ioat_cap_attr.attr,
+	&ioat_version_attr.attr,
+	NULL,
+};
+
+struct kobj_type ioat_ktype = {
+	.sysfs_ops = &ioat_sysfs_ops,
+	.default_attrs = ioat_attrs,
+};
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 11bbcf2..2566ec6 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -18,13 +18,14 @@ 
 #define IOATDMA_H
 
 #include <linux/dmaengine.h>
-#include "hw.h"
-#include "registers.h"
 #include <linux/init.h>
 #include <linux/dmapool.h>
 #include <linux/cache.h>
 #include <linux/pci_ids.h>
-#include <net/tcp.h>
+#include <linux/circ_buf.h>
+#include <linux/interrupt.h>
+#include "registers.h"
+#include "hw.h"
 
 #define IOAT_DMA_VERSION  "4.00"
 
@@ -154,6 +155,41 @@  struct ioat_sed_ent {
 	unsigned int hw_pool;
 };
 
+/**
+ * struct ioat_ring_ent - wrapper around hardware descriptor
+ * @hw: hardware DMA descriptor (for memcpy)
+ * @fill: hardware fill descriptor
+ * @xor: hardware xor descriptor
+ * @xor_ex: hardware xor extension descriptor
+ * @pq: hardware pq descriptor
+ * @pq_ex: hardware pq extension descriptor
+ * @pqu: hardware pq update descriptor
+ * @raw: hardware raw (un-typed) descriptor
+ * @txd: the generic software descriptor for all engines
+ * @len: total transaction length for unmap
+ * @result: asynchronous result of validate operations
+ * @id: identifier for debug
+ */
+
+struct ioat_ring_ent {
+	union {
+		struct ioat_dma_descriptor *hw;
+		struct ioat_xor_descriptor *xor;
+		struct ioat_xor_ext_descriptor *xor_ex;
+		struct ioat_pq_descriptor *pq;
+		struct ioat_pq_ext_descriptor *pq_ex;
+		struct ioat_pq_update_descriptor *pqu;
+		struct ioat_raw_descriptor *raw;
+	};
+	size_t len;
+	struct dma_async_tx_descriptor txd;
+	enum sum_check_flags *result;
+	#ifdef DEBUG
+	int id;
+	#endif
+	struct ioat_sed_ent *sed;
+};
+
 static inline struct ioatdma_chan *to_ioat_chan(struct dma_chan *c)
 {
 	return container_of(c, struct ioatdma_chan, dma_chan);
@@ -291,6 +327,60 @@  static inline bool is_ioat_bug(unsigned long err)
 	return !!err;
 }
 
+#define IOAT_MAX_ORDER 16
+#define ioat_get_alloc_order() \
+	(min(ioat_ring_alloc_order, IOAT_MAX_ORDER))
+#define ioat_get_max_alloc_order() \
+	(min(ioat_ring_max_alloc_order, IOAT_MAX_ORDER))
+
+static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan)
+{
+	return 1 << ioat_chan->alloc_order;
+}
+
+/* count of descriptors in flight with the engine */
+static inline u16 ioat_ring_active(struct ioatdma_chan *ioat_chan)
+{
+	return CIRC_CNT(ioat_chan->head, ioat_chan->tail,
+			ioat_ring_size(ioat_chan));
+}
+
+/* count of descriptors pending submission to hardware */
+static inline u16 ioat_ring_pending(struct ioatdma_chan *ioat_chan)
+{
+	return CIRC_CNT(ioat_chan->head, ioat_chan->issued,
+			ioat_ring_size(ioat_chan));
+}
+
+static inline u32 ioat_ring_space(struct ioatdma_chan *ioat_chan)
+{
+	return ioat_ring_size(ioat_chan) - ioat_ring_active(ioat_chan);
+}
+
+static inline u16
+ioat_xferlen_to_descs(struct ioatdma_chan *ioat_chan, size_t len)
+{
+	u16 num_descs = len >> ioat_chan->xfercap_log;
+
+	num_descs += !!(len & ((1 << ioat_chan->xfercap_log) - 1));
+	return num_descs;
+}
+
+static inline struct ioat_ring_ent *
+ioat_get_ring_ent(struct ioatdma_chan *ioat_chan, u16 idx)
+{
+	return ioat_chan->ring[idx & (ioat_ring_size(ioat_chan) - 1)];
+}
+
+static inline void
+ioat_set_chainaddr(struct ioatdma_chan *ioat_chan, u64 addr)
+{
+	writel(addr & 0x00000000FFFFFFFF,
+	       ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
+	writel(addr >> 32,
+	       ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
+}
+
 int ioat_probe(struct ioatdma_device *ioat_dma);
 int ioat_register(struct ioatdma_device *ioat_dma);
 int ioat_dma_self_test(struct ioatdma_device *ioat_dma);
@@ -306,7 +396,30 @@  void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type);
 void ioat_kobject_del(struct ioatdma_device *ioat_dma);
 int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma);
 void ioat_stop(struct ioatdma_chan *ioat_chan);
+int ioat_dma_probe(struct ioatdma_device *ioat_dma, int dca);
+int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca);
+struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
+int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs);
+int ioat_enumerate_channels(struct ioatdma_device *ioat_dma);
+struct dma_async_tx_descriptor *
+ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
+			   dma_addr_t dma_src, size_t len, unsigned long flags);
+void ioat_issue_pending(struct dma_chan *chan);
+int ioat_alloc_chan_resources(struct dma_chan *c);
+void ioat_free_chan_resources(struct dma_chan *c);
+void __ioat_restart_chan(struct ioatdma_chan *ioat_chan);
+bool reshape_ring(struct ioatdma_chan *ioat, int order);
+void __ioat_issue_pending(struct ioatdma_chan *ioat_chan);
+void ioat_timer_event(unsigned long data);
+int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo);
+int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo);
+
 extern const struct sysfs_ops ioat_sysfs_ops;
 extern struct ioat_sysfs_entry ioat_version_attr;
 extern struct ioat_sysfs_entry ioat_cap_attr;
+extern int ioat_pending_level;
+extern int ioat_ring_alloc_order;
+extern struct kobj_type ioat_ktype;
+extern struct kmem_cache *ioat_cache;
+
 #endif /* IOATDMA_H */
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
deleted file mode 100644
index a907bed..0000000
--- a/drivers/dma/ioat/dma_v2.c
+++ /dev/null
@@ -1,695 +0,0 @@ 
-/*
- * Intel I/OAT DMA Linux driver
- * Copyright(c) 2004 - 2009 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- */
-
-/*
- * This driver supports an Intel I/OAT DMA engine (versions >= 2), which
- * does asynchronous data movement and checksumming operations.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/dmaengine.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/workqueue.h>
-#include <linux/prefetch.h>
-#include "dma.h"
-#include "dma_v2.h"
-#include "registers.h"
-#include "hw.h"
-
-#include "../dmaengine.h"
-
-int ioat_ring_alloc_order = 8;
-module_param(ioat_ring_alloc_order, int, 0644);
-MODULE_PARM_DESC(ioat_ring_alloc_order,
-		 "ioat2+: allocate 2^n descriptors per channel"
-		 " (default: 8 max: 16)");
-static int ioat_ring_max_alloc_order = IOAT_MAX_ORDER;
-module_param(ioat_ring_max_alloc_order, int, 0644);
-MODULE_PARM_DESC(ioat_ring_max_alloc_order,
-		 "ioat2+: upper limit for ring size (default: 16)");
-
-void __ioat2_issue_pending(struct ioatdma_chan *ioat_chan)
-{
-	ioat_chan->dmacount += ioat2_ring_pending(ioat_chan);
-	ioat_chan->issued = ioat_chan->head;
-	writew(ioat_chan->dmacount,
-	       ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
-	dev_dbg(to_dev(ioat_chan),
-		"%s: head: %#x tail: %#x issued: %#x count: %#x\n",
-		__func__, ioat_chan->head, ioat_chan->tail,
-		ioat_chan->issued, ioat_chan->dmacount);
-}
-
-void ioat2_issue_pending(struct dma_chan *c)
-{
-	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
-
-	if (ioat2_ring_pending(ioat_chan)) {
-		spin_lock_bh(&ioat_chan->prep_lock);
-		__ioat2_issue_pending(ioat_chan);
-		spin_unlock_bh(&ioat_chan->prep_lock);
-	}
-}
-
-/**
- * ioat2_update_pending - log pending descriptors
- * @ioat: ioat2+ channel
- *
- * Check if the number of unsubmitted descriptors has exceeded the
- * watermark.  Called with prep_lock held
- */
-static void ioat2_update_pending(struct ioatdma_chan *ioat_chan)
-{
-	if (ioat2_ring_pending(ioat_chan) > ioat_pending_level)
-		__ioat2_issue_pending(ioat_chan);
-}
-
-static void __ioat2_start_null_desc(struct ioatdma_chan *ioat_chan)
-{
-	struct ioat_ring_ent *desc;
-	struct ioat_dma_descriptor *hw;
-
-	if (ioat2_ring_space(ioat_chan) < 1) {
-		dev_err(to_dev(ioat_chan),
-			"Unable to start null desc - ring full\n");
-		return;
-	}
-
-	dev_dbg(to_dev(ioat_chan),
-		"%s: head: %#x tail: %#x issued: %#x\n",
-		__func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
-	desc = ioat2_get_ring_ent(ioat_chan, ioat_chan->head);
-
-	hw = desc->hw;
-	hw->ctl = 0;
-	hw->ctl_f.null = 1;
-	hw->ctl_f.int_en = 1;
-	hw->ctl_f.compl_write = 1;
-	/* set size to non-zero value (channel returns error when size is 0) */
-	hw->size = NULL_DESC_BUFFER_SIZE;
-	hw->src_addr = 0;
-	hw->dst_addr = 0;
-	async_tx_ack(&desc->txd);
-	ioat2_set_chainaddr(ioat_chan, desc->txd.phys);
-	dump_desc_dbg(ioat_chan, desc);
-	wmb();
-	ioat_chan->head += 1;
-	__ioat2_issue_pending(ioat_chan);
-}
-
-static void ioat2_start_null_desc(struct ioatdma_chan *ioat_chan)
-{
-	spin_lock_bh(&ioat_chan->prep_lock);
-	__ioat2_start_null_desc(ioat_chan);
-	spin_unlock_bh(&ioat_chan->prep_lock);
-}
-
-void __ioat2_restart_chan(struct ioatdma_chan *ioat_chan)
-{
-	/* set the tail to be re-issued */
-	ioat_chan->issued = ioat_chan->tail;
-	ioat_chan->dmacount = 0;
-	set_bit(IOAT_COMPLETION_PENDING, &ioat_chan->state);
-	mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
-
-	dev_dbg(to_dev(ioat_chan),
-		"%s: head: %#x tail: %#x issued: %#x count: %#x\n",
-		__func__, ioat_chan->head, ioat_chan->tail,
-		ioat_chan->issued, ioat_chan->dmacount);
-
-	if (ioat2_ring_pending(ioat_chan)) {
-		struct ioat_ring_ent *desc;
-
-		desc = ioat2_get_ring_ent(ioat_chan, ioat_chan->tail);
-		ioat2_set_chainaddr(ioat_chan, desc->txd.phys);
-		__ioat2_issue_pending(ioat_chan);
-	} else
-		__ioat2_start_null_desc(ioat_chan);
-}
-
-int ioat2_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo)
-{
-	unsigned long end = jiffies + tmo;
-	int err = 0;
-	u32 status;
-
-	status = ioat_chansts(ioat_chan);
-	if (is_ioat_active(status) || is_ioat_idle(status))
-		ioat_suspend(ioat_chan);
-	while (is_ioat_active(status) || is_ioat_idle(status)) {
-		if (tmo && time_after(jiffies, end)) {
-			err = -ETIMEDOUT;
-			break;
-		}
-		status = ioat_chansts(ioat_chan);
-		cpu_relax();
-	}
-
-	return err;
-}
-
-int ioat2_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo)
-{
-	unsigned long end = jiffies + tmo;
-	int err = 0;
-
-	ioat_reset(ioat_chan);
-	while (ioat_reset_pending(ioat_chan)) {
-		if (end && time_after(jiffies, end)) {
-			err = -ETIMEDOUT;
-			break;
-		}
-		cpu_relax();
-	}
-
-	return err;
-}
-
-/**
- * ioat2_enumerate_channels - find and initialize the device's channels
- * @ioat_dma: the ioat dma device to be enumerated
- */
-int ioat2_enumerate_channels(struct ioatdma_device *ioat_dma)
-{
-	struct ioatdma_chan *ioat_chan;
-	struct device *dev = &ioat_dma->pdev->dev;
-	struct dma_device *dma = &ioat_dma->dma_dev;
-	u8 xfercap_log;
-	int i;
-
-	INIT_LIST_HEAD(&dma->channels);
-	dma->chancnt = readb(ioat_dma->reg_base + IOAT_CHANCNT_OFFSET);
-	dma->chancnt &= 0x1f; /* bits [4:0] valid */
-	if (dma->chancnt > ARRAY_SIZE(ioat_dma->idx)) {
-		dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
-			 dma->chancnt, ARRAY_SIZE(ioat_dma->idx));
-		dma->chancnt = ARRAY_SIZE(ioat_dma->idx);
-	}
-	xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET);
-	xfercap_log &= 0x1f; /* bits [4:0] valid */
-	if (xfercap_log == 0)
-		return 0;
-	dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
-
-	for (i = 0; i < dma->chancnt; i++) {
-		ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL);
-		if (!ioat_chan)
-			break;
-
-		ioat_init_channel(ioat_dma, ioat_chan, i);
-		ioat_chan->xfercap_log = xfercap_log;
-		spin_lock_init(&ioat_chan->prep_lock);
-		if (ioat_dma->reset_hw(ioat_chan)) {
-			i = 0;
-			break;
-		}
-	}
-	dma->chancnt = i;
-	return i;
-}
-
-static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
-{
-	struct dma_chan *c = tx->chan;
-	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
-	dma_cookie_t cookie;
-
-	cookie = dma_cookie_assign(tx);
-	dev_dbg(to_dev(ioat_chan), "%s: cookie: %d\n", __func__, cookie);
-
-	if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
-		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
-
-	/* make descriptor updates visible before advancing ioat->head,
-	 * this is purposefully not smp_wmb() since we are also
-	 * publishing the descriptor updates to a dma device
-	 */
-	wmb();
-
-	ioat_chan->head += ioat_chan->produce;
-
-	ioat2_update_pending(ioat_chan);
-	spin_unlock_bh(&ioat_chan->prep_lock);
-
-	return cookie;
-}
-
-static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan, gfp_t flags)
-{
-	struct ioat_dma_descriptor *hw;
-	struct ioat_ring_ent *desc;
-	struct ioatdma_device *ioat_dma;
-	dma_addr_t phys;
-
-	ioat_dma = to_ioatdma_device(chan->device);
-	hw = pci_pool_alloc(ioat_dma->dma_pool, flags, &phys);
-	if (!hw)
-		return NULL;
-	memset(hw, 0, sizeof(*hw));
-
-	desc = kmem_cache_zalloc(ioat2_cache, flags);
-	if (!desc) {
-		pci_pool_free(ioat_dma->dma_pool, hw, phys);
-		return NULL;
-	}
-
-	dma_async_tx_descriptor_init(&desc->txd, chan);
-	desc->txd.tx_submit = ioat2_tx_submit_unlock;
-	desc->hw = hw;
-	desc->txd.phys = phys;
-	return desc;
-}
-
-static void ioat2_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
-{
-	struct ioatdma_device *ioat_dma;
-
-	ioat_dma = to_ioatdma_device(chan->device);
-	pci_pool_free(ioat_dma->dma_pool, desc->hw, desc->txd.phys);
-	kmem_cache_free(ioat2_cache, desc);
-}
-
-static struct ioat_ring_ent **ioat2_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
-{
-	struct ioat_ring_ent **ring;
-	int descs = 1 << order;
-	int i;
-
-	if (order > ioat_get_max_alloc_order())
-		return NULL;
-
-	/* allocate the array to hold the software ring */
-	ring = kcalloc(descs, sizeof(*ring), flags);
-	if (!ring)
-		return NULL;
-	for (i = 0; i < descs; i++) {
-		ring[i] = ioat2_alloc_ring_ent(c, flags);
-		if (!ring[i]) {
-			while (i--)
-				ioat2_free_ring_ent(ring[i], c);
-			kfree(ring);
-			return NULL;
-		}
-		set_desc_id(ring[i], i);
-	}
-
-	/* link descs */
-	for (i = 0; i < descs-1; i++) {
-		struct ioat_ring_ent *next = ring[i+1];
-		struct ioat_dma_descriptor *hw = ring[i]->hw;
-
-		hw->next = next->txd.phys;
-	}
-	ring[i]->hw->next = ring[0]->txd.phys;
-
-	return ring;
-}
-
-void ioat2_free_chan_resources(struct dma_chan *c);
-
-/* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring
- * @chan: channel to be initialized
- */
-int ioat2_alloc_chan_resources(struct dma_chan *c)
-{
-	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
-	struct ioat_ring_ent **ring;
-	u64 status;
-	int order;
-	int i = 0;
-
-	/* have we already been set up? */
-	if (ioat_chan->ring)
-		return 1 << ioat_chan->alloc_order;
-
-	/* Setup register to interrupt and write completion status on error */
-	writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
-
-	/* allocate a completion writeback area */
-	/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
-	ioat_chan->completion =
-		pci_pool_alloc(ioat_chan->ioat_dma->completion_pool,
-			       GFP_KERNEL, &ioat_chan->completion_dma);
-	if (!ioat_chan->completion)
-		return -ENOMEM;
-
-	memset(ioat_chan->completion, 0, sizeof(*ioat_chan->completion));
-	writel(((u64)ioat_chan->completion_dma) & 0x00000000FFFFFFFF,
-	       ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
-	writel(((u64)ioat_chan->completion_dma) >> 32,
-	       ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
-
-	order = ioat_get_alloc_order();
-	ring = ioat2_alloc_ring(c, order, GFP_KERNEL);
-	if (!ring)
-		return -ENOMEM;
-
-	spin_lock_bh(&ioat_chan->cleanup_lock);
-	spin_lock_bh(&ioat_chan->prep_lock);
-	ioat_chan->ring = ring;
-	ioat_chan->head = 0;
-	ioat_chan->issued = 0;
-	ioat_chan->tail = 0;
-	ioat_chan->alloc_order = order;
-	set_bit(IOAT_RUN, &ioat_chan->state);
-	spin_unlock_bh(&ioat_chan->prep_lock);
-	spin_unlock_bh(&ioat_chan->cleanup_lock);
-
-	ioat2_start_null_desc(ioat_chan);
-
-	/* check that we got off the ground */
-	do {
-		udelay(1);
-		status = ioat_chansts(ioat_chan);
-	} while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
-
-	if (is_ioat_active(status) || is_ioat_idle(status)) {
-		return 1 << ioat_chan->alloc_order;
-	} else {
-		u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
-
-		dev_WARN(to_dev(ioat_chan),
-			"failed to start channel chanerr: %#x\n", chanerr);
-		ioat2_free_chan_resources(c);
-		return -EFAULT;
-	}
-}
-
-bool reshape_ring(struct ioatdma_chan *ioat_chan, int order)
-{
-	/* reshape differs from normal ring allocation in that we want
-	 * to allocate a new software ring while only
-	 * extending/truncating the hardware ring
-	 */
-	struct dma_chan *c = &ioat_chan->dma_chan;
-	const u32 curr_size = ioat2_ring_size(ioat_chan);
-	const u16 active = ioat2_ring_active(ioat_chan);
-	const u32 new_size = 1 << order;
-	struct ioat_ring_ent **ring;
-	u16 i;
-
-	if (order > ioat_get_max_alloc_order())
-		return false;
-
-	/* double check that we have at least 1 free descriptor */
-	if (active == curr_size)
-		return false;
-
-	/* when shrinking, verify that we can hold the current active
-	 * set in the new ring
-	 */
-	if (active >= new_size)
-		return false;
-
-	/* allocate the array to hold the software ring */
-	ring = kcalloc(new_size, sizeof(*ring), GFP_NOWAIT);
-	if (!ring)
-		return false;
-
-	/* allocate/trim descriptors as needed */
-	if (new_size > curr_size) {
-		/* copy current descriptors to the new ring */
-		for (i = 0; i < curr_size; i++) {
-			u16 curr_idx = (ioat_chan->tail+i) & (curr_size-1);
-			u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
-
-			ring[new_idx] = ioat_chan->ring[curr_idx];
-			set_desc_id(ring[new_idx], new_idx);
-		}
-
-		/* add new descriptors to the ring */
-		for (i = curr_size; i < new_size; i++) {
-			u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
-
-			ring[new_idx] = ioat2_alloc_ring_ent(c, GFP_NOWAIT);
-			if (!ring[new_idx]) {
-				while (i--) {
-					u16 new_idx = (ioat_chan->tail+i) &
-						       (new_size-1);
-
-					ioat2_free_ring_ent(ring[new_idx], c);
-				}
-				kfree(ring);
-				return false;
-			}
-			set_desc_id(ring[new_idx], new_idx);
-		}
-
-		/* hw link new descriptors */
-		for (i = curr_size-1; i < new_size; i++) {
-			u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
-			struct ioat_ring_ent *next = ring[(new_idx+1) & (new_size-1)];
-			struct ioat_dma_descriptor *hw = ring[new_idx]->hw;
-
-			hw->next = next->txd.phys;
-		}
-	} else {
-		struct ioat_dma_descriptor *hw;
-		struct ioat_ring_ent *next;
-
-		/* copy current descriptors to the new ring, dropping the
-		 * removed descriptors
-		 */
-		for (i = 0; i < new_size; i++) {
-			u16 curr_idx = (ioat_chan->tail+i) & (curr_size-1);
-			u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
-
-			ring[new_idx] = ioat_chan->ring[curr_idx];
-			set_desc_id(ring[new_idx], new_idx);
-		}
-
-		/* free deleted descriptors */
-		for (i = new_size; i < curr_size; i++) {
-			struct ioat_ring_ent *ent;
-
-			ent = ioat2_get_ring_ent(ioat_chan, ioat_chan->tail+i);
-			ioat2_free_ring_ent(ent, c);
-		}
-
-		/* fix up hardware ring */
-		hw = ring[(ioat_chan->tail+new_size-1) & (new_size-1)]->hw;
-		next = ring[(ioat_chan->tail+new_size) & (new_size-1)];
-		hw->next = next->txd.phys;
-	}
-
-	dev_dbg(to_dev(ioat_chan), "%s: allocated %d descriptors\n",
-		__func__, new_size);
-
-	kfree(ioat_chan->ring);
-	ioat_chan->ring = ring;
-	ioat_chan->alloc_order = order;
-
-	return true;
-}
-
-/**
- * ioat2_check_space_lock - verify space and grab ring producer lock
- * @ioat: ioat2,3 channel (ring) to operate on
- * @num_descs: allocation length
- */
-int ioat2_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs)
-{
-	bool retry;
-
- retry:
-	spin_lock_bh(&ioat_chan->prep_lock);
-	/* never allow the last descriptor to be consumed, we need at
-	 * least one free at all times to allow for on-the-fly ring
-	 * resizing.
-	 */
-	if (likely(ioat2_ring_space(ioat_chan) > num_descs)) {
-		dev_dbg(to_dev(ioat_chan), "%s: num_descs: %d (%x:%x:%x)\n",
-			__func__, num_descs, ioat_chan->head,
-			ioat_chan->tail, ioat_chan->issued);
-		ioat_chan->produce = num_descs;
-		return 0;  /* with ioat->prep_lock held */
-	}
-	retry = test_and_set_bit(IOAT_RESHAPE_PENDING, &ioat_chan->state);
-	spin_unlock_bh(&ioat_chan->prep_lock);
-
-	/* is another cpu already trying to expand the ring? */
-	if (retry)
-		goto retry;
-
-	spin_lock_bh(&ioat_chan->cleanup_lock);
-	spin_lock_bh(&ioat_chan->prep_lock);
-	retry = reshape_ring(ioat_chan, ioat_chan->alloc_order + 1);
-	clear_bit(IOAT_RESHAPE_PENDING, &ioat_chan->state);
-	spin_unlock_bh(&ioat_chan->prep_lock);
-	spin_unlock_bh(&ioat_chan->cleanup_lock);
-
-	/* if we were able to expand the ring retry the allocation */
-	if (retry)
-		goto retry;
-
-	if (printk_ratelimit())
-		dev_dbg(to_dev(ioat_chan),
-			"%s: ring full! num_descs: %d (%x:%x:%x)\n",
-			__func__, num_descs, ioat_chan->head,
-			ioat_chan->tail, ioat_chan->issued);
-
-	/* progress reclaim in the allocation failure case we may be
-	 * called under bh_disabled so we need to trigger the timer
-	 * event directly
-	 */
-	if (time_is_before_jiffies(ioat_chan->timer.expires)
-	    && timer_pending(&ioat_chan->timer)) {
-		struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
-
-		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
-		ioat_dma->timer_fn((unsigned long)ioat_chan);
-	}
-
-	return -ENOMEM;
-}
-
-struct dma_async_tx_descriptor *
-ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
-			   dma_addr_t dma_src, size_t len, unsigned long flags)
-{
-	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
-	struct ioat_dma_descriptor *hw;
-	struct ioat_ring_ent *desc;
-	dma_addr_t dst = dma_dest;
-	dma_addr_t src = dma_src;
-	size_t total_len = len;
-	int num_descs, idx, i;
-
-	num_descs = ioat2_xferlen_to_descs(ioat_chan, len);
-	if (likely(num_descs) &&
-	    ioat2_check_space_lock(ioat_chan, num_descs) == 0)
-		idx = ioat_chan->head;
-	else
-		return NULL;
-	i = 0;
-	do {
-		size_t copy = min_t(size_t, len, 1 << ioat_chan->xfercap_log);
-
-		desc = ioat2_get_ring_ent(ioat_chan, idx + i);
-		hw = desc->hw;
-
-		hw->size = copy;
-		hw->ctl = 0;
-		hw->src_addr = src;
-		hw->dst_addr = dst;
-
-		len -= copy;
-		dst += copy;
-		src += copy;
-		dump_desc_dbg(ioat_chan, desc);
-	} while (++i < num_descs);
-
-	desc->txd.flags = flags;
-	desc->len = total_len;
-	hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
-	hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
-	hw->ctl_f.compl_write = 1;
-	dump_desc_dbg(ioat_chan, desc);
-	/* we leave the channel locked to ensure in order submission */
-
-	return &desc->txd;
-}
-
-/**
- * ioat2_free_chan_resources - release all the descriptors
- * @chan: the channel to be cleaned
- */
-void ioat2_free_chan_resources(struct dma_chan *c)
-{
-	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
-	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
-	struct ioat_ring_ent *desc;
-	const u16 total_descs = 1 << ioat_chan->alloc_order;
-	int descs;
-	int i;
-
-	/* Before freeing channel resources first check
-	 * if they have been previously allocated for this channel.
-	 */
-	if (!ioat_chan->ring)
-		return;
-
-	ioat_stop(ioat_chan);
-	ioat_dma->reset_hw(ioat_chan);
-
-	spin_lock_bh(&ioat_chan->cleanup_lock);
-	spin_lock_bh(&ioat_chan->prep_lock);
-	descs = ioat2_ring_space(ioat_chan);
-	dev_dbg(to_dev(ioat_chan), "freeing %d idle descriptors\n", descs);
-	for (i = 0; i < descs; i++) {
-		desc = ioat2_get_ring_ent(ioat_chan, ioat_chan->head + i);
-		ioat2_free_ring_ent(desc, c);
-	}
-
-	if (descs < total_descs)
-		dev_err(to_dev(ioat_chan), "Freeing %d in use descriptors!\n",
-			total_descs - descs);
-
-	for (i = 0; i < total_descs - descs; i++) {
-		desc = ioat2_get_ring_ent(ioat_chan, ioat_chan->tail + i);
-		dump_desc_dbg(ioat_chan, desc);
-		ioat2_free_ring_ent(desc, c);
-	}
-
-	kfree(ioat_chan->ring);
-	ioat_chan->ring = NULL;
-	ioat_chan->alloc_order = 0;
-	pci_pool_free(ioat_dma->completion_pool, ioat_chan->completion,
-		      ioat_chan->completion_dma);
-	spin_unlock_bh(&ioat_chan->prep_lock);
-	spin_unlock_bh(&ioat_chan->cleanup_lock);
-
-	ioat_chan->last_completion = 0;
-	ioat_chan->completion_dma = 0;
-	ioat_chan->dmacount = 0;
-}
-
-static ssize_t ring_size_show(struct dma_chan *c, char *page)
-{
-	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
-
-	return sprintf(page, "%d\n", (1 << ioat_chan->alloc_order) & ~1);
-}
-static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);
-
-static ssize_t ring_active_show(struct dma_chan *c, char *page)
-{
-	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
-
-	/* ...taken outside the lock, no need to be precise */
-	return sprintf(page, "%d\n", ioat2_ring_active(ioat_chan));
-}
-static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);
-
-static struct attribute *ioat2_attrs[] = {
-	&ring_size_attr.attr,
-	&ring_active_attr.attr,
-	&ioat_cap_attr.attr,
-	&ioat_version_attr.attr,
-	NULL,
-};
-
-struct kobj_type ioat2_ktype = {
-	.sysfs_ops = &ioat_sysfs_ops,
-	.default_attrs = ioat2_attrs,
-};
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
deleted file mode 100644
index 7d69ed3..0000000
--- a/drivers/dma/ioat/dma_v2.h
+++ /dev/null
@@ -1,143 +0,0 @@ 
-/*
- * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called COPYING.
- */
-#ifndef IOATDMA_V2_H
-#define IOATDMA_V2_H
-
-#include <linux/dmaengine.h>
-#include <linux/circ_buf.h>
-#include "dma.h"
-#include "hw.h"
-
-
-extern int ioat_pending_level;
-extern int ioat_ring_alloc_order;
-
-/*
- * workaround for IOAT ver.3.0 null descriptor issue
- * (channel returns error when size is 0)
- */
-#define NULL_DESC_BUFFER_SIZE 1
-
-#define IOAT_MAX_ORDER 16
-#define ioat_get_alloc_order() \
-	(min(ioat_ring_alloc_order, IOAT_MAX_ORDER))
-#define ioat_get_max_alloc_order() \
-	(min(ioat_ring_max_alloc_order, IOAT_MAX_ORDER))
-
-static inline u32 ioat2_ring_size(struct ioatdma_chan *ioat_chan)
-{
-	return 1 << ioat_chan->alloc_order;
-}
-
-/* count of descriptors in flight with the engine */
-static inline u16 ioat2_ring_active(struct ioatdma_chan *ioat_chan)
-{
-	return CIRC_CNT(ioat_chan->head, ioat_chan->tail,
-			ioat2_ring_size(ioat_chan));
-}
-
-/* count of descriptors pending submission to hardware */
-static inline u16 ioat2_ring_pending(struct ioatdma_chan *ioat_chan)
-{
-	return CIRC_CNT(ioat_chan->head, ioat_chan->issued,
-			ioat2_ring_size(ioat_chan));
-}
-
-static inline u32 ioat2_ring_space(struct ioatdma_chan *ioat_chan)
-{
-	return ioat2_ring_size(ioat_chan) - ioat2_ring_active(ioat_chan);
-}
-
-static inline u16
-ioat2_xferlen_to_descs(struct ioatdma_chan *ioat_chan, size_t len)
-{
-	u16 num_descs = len >> ioat_chan->xfercap_log;
-
-	num_descs += !!(len & ((1 << ioat_chan->xfercap_log) - 1));
-	return num_descs;
-}
-
-/**
- * struct ioat_ring_ent - wrapper around hardware descriptor
- * @hw: hardware DMA descriptor (for memcpy)
- * @fill: hardware fill descriptor
- * @xor: hardware xor descriptor
- * @xor_ex: hardware xor extension descriptor
- * @pq: hardware pq descriptor
- * @pq_ex: hardware pq extension descriptor
- * @pqu: hardware pq update descriptor
- * @raw: hardware raw (un-typed) descriptor
- * @txd: the generic software descriptor for all engines
- * @len: total transaction length for unmap
- * @result: asynchronous result of validate operations
- * @id: identifier for debug
- */
-
-struct ioat_ring_ent {
-	union {
-		struct ioat_dma_descriptor *hw;
-		struct ioat_xor_descriptor *xor;
-		struct ioat_xor_ext_descriptor *xor_ex;
-		struct ioat_pq_descriptor *pq;
-		struct ioat_pq_ext_descriptor *pq_ex;
-		struct ioat_pq_update_descriptor *pqu;
-		struct ioat_raw_descriptor *raw;
-	};
-	size_t len;
-	struct dma_async_tx_descriptor txd;
-	enum sum_check_flags *result;
-	#ifdef DEBUG
-	int id;
-	#endif
-	struct ioat_sed_ent *sed;
-};
-
-static inline struct ioat_ring_ent *
-ioat2_get_ring_ent(struct ioatdma_chan *ioat_chan, u16 idx)
-{
-	return ioat_chan->ring[idx & (ioat2_ring_size(ioat_chan) - 1)];
-}
-
-static inline void
-ioat2_set_chainaddr(struct ioatdma_chan *ioat_chan, u64 addr)
-{
-	writel(addr & 0x00000000FFFFFFFF,
-	       ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
-	writel(addr >> 32,
-	       ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
-}
-
-int ioat2_dma_probe(struct ioatdma_device *ioat_dma, int dca);
-int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca);
-struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
-int ioat2_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs);
-int ioat2_enumerate_channels(struct ioatdma_device *ioat_dma);
-struct dma_async_tx_descriptor *
-ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
-			   dma_addr_t dma_src, size_t len, unsigned long flags);
-void ioat2_issue_pending(struct dma_chan *chan);
-int ioat2_alloc_chan_resources(struct dma_chan *c);
-void ioat2_free_chan_resources(struct dma_chan *c);
-void __ioat2_restart_chan(struct ioatdma_chan *ioat_chan);
-bool reshape_ring(struct ioatdma_chan *ioat, int order);
-void __ioat2_issue_pending(struct ioatdma_chan *ioat_chan);
-void ioat2_timer_event(unsigned long data);
-int ioat2_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo);
-int ioat2_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo);
-extern struct kobj_type ioat2_ktype;
-extern struct kmem_cache *ioat2_cache;
-#endif /* IOATDMA_V2_H */
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index b8bc906..673ced9 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -61,7 +61,6 @@ 
 #include "registers.h"
 #include "hw.h"
 #include "dma.h"
-#include "dma_v2.h"
 
 extern struct kmem_cache *ioat3_sed_cache;
 
@@ -390,13 +389,13 @@  static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
 	if (!phys_complete)
 		return;
 
-	active = ioat2_ring_active(ioat_chan);
+	active = ioat_ring_active(ioat_chan);
 	for (i = 0; i < active && !seen_current; i++) {
 		struct dma_async_tx_descriptor *tx;
 
 		smp_read_barrier_depends();
-		prefetch(ioat2_get_ring_ent(ioat_chan, idx + i + 1));
-		desc = ioat2_get_ring_ent(ioat_chan, idx + i);
+		prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
+		desc = ioat_get_ring_ent(ioat_chan, idx + i);
 		dump_desc_dbg(ioat_chan, desc);
 
 		/* set err stat if we are using dwbes */
@@ -479,11 +478,11 @@  static void ioat3_restart_channel(struct ioatdma_chan *ioat_chan)
 {
 	u64 phys_complete;
 
-	ioat2_quiesce(ioat_chan, 0);
+	ioat_quiesce(ioat_chan, 0);
 	if (ioat3_cleanup_preamble(ioat_chan, &phys_complete))
 		__cleanup(ioat_chan, phys_complete);
 
-	__ioat2_restart_chan(ioat_chan);
+	__ioat_restart_chan(ioat_chan);
 }
 
 static void ioat3_eh(struct ioatdma_chan *ioat_chan)
@@ -507,7 +506,7 @@  static void ioat3_eh(struct ioatdma_chan *ioat_chan)
 	dev_dbg(to_dev(ioat_chan), "%s: error = %x:%x\n",
 		__func__, chanerr, chanerr_int);
 
-	desc = ioat2_get_ring_ent(ioat_chan, ioat_chan->tail);
+	desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
 	hw = desc->hw;
 	dump_desc_dbg(ioat_chan, desc);
 
@@ -561,7 +560,7 @@  static void ioat3_eh(struct ioatdma_chan *ioat_chan)
 
 static void check_active(struct ioatdma_chan *ioat_chan)
 {
-	if (ioat2_ring_active(ioat_chan)) {
+	if (ioat_ring_active(ioat_chan)) {
 		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
 		return;
 	}
@@ -625,7 +624,7 @@  static void ioat3_timer_event(unsigned long data)
 	}
 
 
-	if (ioat2_ring_active(ioat_chan))
+	if (ioat_ring_active(ioat_chan))
 		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
 	else {
 		spin_lock_bh(&ioat_chan->prep_lock);
@@ -670,7 +669,7 @@  __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
 
 	BUG_ON(src_cnt < 2);
 
-	num_descs = ioat2_xferlen_to_descs(ioat_chan, len);
+	num_descs = ioat_xferlen_to_descs(ioat_chan, len);
 	/* we need 2x the number of descriptors to cover greater than 5
 	 * sources
 	 */
@@ -686,7 +685,7 @@  __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
 	 * order.
 	 */
 	if (likely(num_descs) &&
-	    ioat2_check_space_lock(ioat_chan, num_descs+1) == 0)
+	    ioat_check_space_lock(ioat_chan, num_descs+1) == 0)
 		idx = ioat_chan->head;
 	else
 		return NULL;
@@ -697,14 +696,14 @@  __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
 					 len, 1 << ioat_chan->xfercap_log);
 		int s;
 
-		desc = ioat2_get_ring_ent(ioat_chan, idx + i);
+		desc = ioat_get_ring_ent(ioat_chan, idx + i);
 		xor = desc->xor;
 
 		/* save a branch by unconditionally retrieving the
 		 * extended descriptor xor_set_src() knows to not write
 		 * to it in the single descriptor case
 		 */
-		ext = ioat2_get_ring_ent(ioat_chan, idx + i + 1);
+		ext = ioat_get_ring_ent(ioat_chan, idx + i + 1);
 		xor_ex = ext->xor_ex;
 
 		descs[0] = (struct ioat_raw_descriptor *) xor;
@@ -730,7 +729,7 @@  __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
 	xor->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
 
 	/* completion descriptor carries interrupt bit */
-	compl_desc = ioat2_get_ring_ent(ioat_chan, idx + i);
+	compl_desc = ioat_get_ring_ent(ioat_chan, idx + i);
 	compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
 	hw = compl_desc->hw;
 	hw->ctl = 0;
@@ -854,7 +853,7 @@  __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
 	 */
 	BUG_ON(src_cnt + dmaf_continue(flags) < 2);
 
-	num_descs = ioat2_xferlen_to_descs(ioat_chan, len);
+	num_descs = ioat_xferlen_to_descs(ioat_chan, len);
 	/* we need 2x the number of descriptors to cover greater than 3
 	 * sources (we need 1 extra source in the q-only continuation
 	 * case and 3 extra sources in the p+q continuation case.
@@ -872,7 +871,7 @@  __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
 	 * order.
 	 */
 	if (likely(num_descs) &&
-	    ioat2_check_space_lock(ioat_chan, num_descs + cb32) == 0)
+	    ioat_check_space_lock(ioat_chan, num_descs + cb32) == 0)
 		idx = ioat_chan->head;
 	else
 		return NULL;
@@ -882,14 +881,14 @@  __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
 		size_t xfer_size = min_t(size_t, len,
 					 1 << ioat_chan->xfercap_log);
 
-		desc = ioat2_get_ring_ent(ioat_chan, idx + i);
+		desc = ioat_get_ring_ent(ioat_chan, idx + i);
 		pq = desc->pq;
 
 		/* save a branch by unconditionally retrieving the
 		 * extended descriptor pq_set_src() knows to not write
 		 * to it in the single descriptor case
 		 */
-		ext = ioat2_get_ring_ent(ioat_chan, idx + i + with_ext);
+		ext = ioat_get_ring_ent(ioat_chan, idx + i + with_ext);
 		pq_ex = ext->pq_ex;
 
 		descs[0] = (struct ioat_raw_descriptor *) pq;
@@ -936,7 +935,7 @@  __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
 		compl_desc = desc;
 	} else {
 		/* completion descriptor carries interrupt bit */
-		compl_desc = ioat2_get_ring_ent(ioat_chan, idx + i);
+		compl_desc = ioat_get_ring_ent(ioat_chan, idx + i);
 		compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
 		hw = compl_desc->hw;
 		hw->ctl = 0;
@@ -972,13 +971,13 @@  __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result,
 
 	dev_dbg(to_dev(ioat_chan), "%s\n", __func__);
 
-	num_descs = ioat2_xferlen_to_descs(ioat_chan, len);
+	num_descs = ioat_xferlen_to_descs(ioat_chan, len);
 
 	/*
 	 * 16 source pq is only available on cb3.3 and has no completion
 	 * write hw bug.
 	 */
-	if (num_descs && ioat2_check_space_lock(ioat_chan, num_descs) == 0)
+	if (num_descs && ioat_check_space_lock(ioat_chan, num_descs) == 0)
 		idx = ioat_chan->head;
 	else
 		return NULL;
@@ -990,7 +989,7 @@  __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result,
 		size_t xfer_size = min_t(size_t, len,
 					 1 << ioat_chan->xfercap_log);
 
-		desc = ioat2_get_ring_ent(ioat_chan, idx + i);
+		desc = ioat_get_ring_ent(ioat_chan, idx + i);
 		pq = desc->pq;
 
 		descs[0] = (struct ioat_raw_descriptor *) pq;
@@ -1177,8 +1176,8 @@  ioat3_prep_interrupt_lock(struct dma_chan *c, unsigned long flags)
 	struct ioat_ring_ent *desc;
 	struct ioat_dma_descriptor *hw;
 
-	if (ioat2_check_space_lock(ioat_chan, 1) == 0)
-		desc = ioat2_get_ring_ent(ioat_chan, ioat_chan->head);
+	if (ioat_check_space_lock(ioat_chan, 1) == 0)
+		desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head);
 	else
 		return NULL;
 
@@ -1533,7 +1532,7 @@  static int ioat3_reset_hw(struct ioatdma_chan *ioat_chan)
 	u16 dev_id;
 	int err;
 
-	ioat2_quiesce(ioat_chan, msecs_to_jiffies(100));
+	ioat_quiesce(ioat_chan, msecs_to_jiffies(100));
 
 	chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
 	writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
@@ -1561,7 +1560,7 @@  static int ioat3_reset_hw(struct ioatdma_chan *ioat_chan)
 		}
 	}
 
-	err = ioat2_reset_sync(ioat_chan, msecs_to_jiffies(200));
+	err = ioat_reset_sync(ioat_chan, msecs_to_jiffies(200));
 	if (!err)
 		err = ioat3_irq_reinit(ioat_dma);
 
@@ -1607,15 +1606,15 @@  int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
 	bool is_raid_device = false;
 	int err;
 
-	ioat_dma->enumerate_channels = ioat2_enumerate_channels;
+	ioat_dma->enumerate_channels = ioat_enumerate_channels;
 	ioat_dma->reset_hw = ioat3_reset_hw;
 	ioat_dma->self_test = ioat3_dma_self_test;
 	ioat_dma->intr_quirk = ioat3_intr_quirk;
 	dma = &ioat_dma->dma_dev;
-	dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
-	dma->device_issue_pending = ioat2_issue_pending;
-	dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
-	dma->device_free_chan_resources = ioat2_free_chan_resources;
+	dma->device_prep_dma_memcpy = ioat_dma_prep_memcpy_lock;
+	dma->device_issue_pending = ioat_issue_pending;
+	dma->device_alloc_chan_resources = ioat_alloc_chan_resources;
+	dma->device_free_chan_resources = ioat_free_chan_resources;
 
 	dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
 	dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
@@ -1705,7 +1704,7 @@  int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
 	if (err)
 		return err;
 
-	ioat_kobject_add(ioat_dma, &ioat2_ktype);
+	ioat_kobject_add(ioat_dma, &ioat_ktype);
 
 	if (dca)
 		ioat_dma->dca = ioat3_dca_init(pdev, ioat_dma->reg_base);
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c
index 2ba20c6..0cfa714 100644
--- a/drivers/dma/ioat/pci.c
+++ b/drivers/dma/ioat/pci.c
@@ -28,7 +28,6 @@ 
 #include <linux/dca.h>
 #include <linux/slab.h>
 #include "dma.h"
-#include "dma_v2.h"
 #include "registers.h"
 #include "hw.h"
 
@@ -114,7 +113,7 @@  static int ioat_dca_enabled = 1;
 module_param(ioat_dca_enabled, int, 0644);
 MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)");
 
-struct kmem_cache *ioat2_cache;
+struct kmem_cache *ioat_cache;
 struct kmem_cache *ioat3_sed_cache;
 
 #define DRV_NAME "ioatdma"
@@ -212,14 +211,14 @@  static int __init ioat_init_module(void)
 	pr_info("%s: Intel(R) QuickData Technology Driver %s\n",
 		DRV_NAME, IOAT_DMA_VERSION);
 
-	ioat2_cache = kmem_cache_create("ioat2", sizeof(struct ioat_ring_ent),
+	ioat_cache = kmem_cache_create("ioat", sizeof(struct ioat_ring_ent),
 					0, SLAB_HWCACHE_ALIGN, NULL);
-	if (!ioat2_cache)
+	if (!ioat_cache)
 		return -ENOMEM;
 
 	ioat3_sed_cache = KMEM_CACHE(ioat_sed_ent, 0);
 	if (!ioat3_sed_cache)
-		goto err_ioat2_cache;
+		goto err_ioat_cache;
 
 	err = pci_register_driver(&ioat_pci_driver);
 	if (err)
@@ -230,8 +229,8 @@  static int __init ioat_init_module(void)
  err_ioat3_cache:
 	kmem_cache_destroy(ioat3_sed_cache);
 
- err_ioat2_cache:
-	kmem_cache_destroy(ioat2_cache);
+ err_ioat_cache:
+	kmem_cache_destroy(ioat_cache);
 
 	return err;
 }
@@ -240,6 +239,6 @@  module_init(ioat_init_module);
 static void __exit ioat_exit_module(void)
 {
 	pci_unregister_driver(&ioat_pci_driver);
-	kmem_cache_destroy(ioat2_cache);
+	kmem_cache_destroy(ioat_cache);
 }
 module_exit(ioat_exit_module);