diff mbox

[v2,RFC,2/8] arm: introduce a global dma_ops pointer

Message ID 1375300452-12545-2-git-send-email-stefano.stabellini@eu.citrix.com (mailing list archive)
State New, archived
Headers show

Commit Message

Stefano Stabellini July 31, 2013, 7:54 p.m. UTC
Initially set dma_ops to arm_dma_ops.
Use dma_ops instead of arm_dma_ops in dmabounce.

Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
CC: will.deacon@arm.com
CC: linux@arm.linux.org.uk
---
 arch/arm/common/dmabounce.c        |   10 +++++-----
 arch/arm/include/asm/dma-mapping.h |    3 ++-
 arch/arm/mm/dma-mapping.c          |    3 +++
 3 files changed, 10 insertions(+), 6 deletions(-)

Comments

Russell King - ARM Linux July 31, 2013, 8:01 p.m. UTC | #1
On Wed, Jul 31, 2013 at 08:54:06PM +0100, Stefano Stabellini wrote:
> Initially set dma_ops to arm_dma_ops.
> Use dma_ops instead of arm_dma_ops in dmabounce.
> 
> Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
> CC: will.deacon@arm.com
> CC: linux@arm.linux.org.uk

If you're using swiotlb, there's little point in converting dmabounce
to be able to use it, because it's basically providing the same
functionality - dmabounce is there to do software buffer bouncing within
the DMA to move streaming buffers from DMA-inaccessible memory into
DMA-accessible memory.

It's original use is with older SoCs with buggy DMA (eg, those which
can only address alternate 1MB chunks of memory for example) but also
got used in situations where alternative solutions would've been better
(like using swiotlb.)  I've been discouraging its use as it's suffered
from memory exhaustion problems (there's a number of threads and bug
reports which were never solved about IXP4xx(?) platforms suffering
this due to this bouncing.)
Stefano Stabellini Aug. 2, 2013, 11:42 a.m. UTC | #2
On Wed, 31 Jul 2013, Russell King - ARM Linux wrote:
> On Wed, Jul 31, 2013 at 08:54:06PM +0100, Stefano Stabellini wrote:
> > Initially set dma_ops to arm_dma_ops.
> > Use dma_ops instead of arm_dma_ops in dmabounce.
> > 
> > Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
> > CC: will.deacon@arm.com
> > CC: linux@arm.linux.org.uk
> 
> If you're using swiotlb, there's little point in converting dmabounce
> to be able to use it, because it's basically providing the same
> functionality - dmabounce is there to do software buffer bouncing within
> the DMA to move streaming buffers from DMA-inaccessible memory into
> DMA-accessible memory.
> 
> It's original use is with older SoCs with buggy DMA (eg, those which
> can only address alternate 1MB chunks of memory for example) but also
> got used in situations where alternative solutions would've been better
> (like using swiotlb.)  I've been discouraging its use as it's suffered
> from memory exhaustion problems (there's a number of threads and bug
> reports which were never solved about IXP4xx(?) platforms suffering
> this due to this bouncing.)

OK, I'll let dmabounce keep using arm_dma_ops directly (instead of
dma_ops).

Should I add "depends on !DMABOUNCE" to the SWIOTLB Kconfig entry too?
diff mbox

Patch

diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 1143c4d..b626122 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -325,7 +325,7 @@  static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
 		return DMA_ERROR_CODE;
 
 	if (ret == 0) {
-		arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir);
+		dma_ops->sync_single_for_device(dev, dma_addr, size, dir);
 		return dma_addr;
 	}
 
@@ -353,7 +353,7 @@  static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t
 
 	buf = find_safe_buffer_dev(dev, dma_addr, __func__);
 	if (!buf) {
-		arm_dma_ops.sync_single_for_cpu(dev, dma_addr, size, dir);
+		dma_ops->sync_single_for_cpu(dev, dma_addr, size, dir);
 		return;
 	}
 
@@ -397,7 +397,7 @@  static void dmabounce_sync_for_cpu(struct device *dev,
 	if (!__dmabounce_sync_for_cpu(dev, handle, size, dir))
 		return;
 
-	arm_dma_ops.sync_single_for_cpu(dev, handle, size, dir);
+	dma_ops->sync_single_for_cpu(dev, handle, size, dir);
 }
 
 static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
@@ -437,7 +437,7 @@  static void dmabounce_sync_for_device(struct device *dev,
 	if (!__dmabounce_sync_for_device(dev, handle, size, dir))
 		return;
 
-	arm_dma_ops.sync_single_for_device(dev, handle, size, dir);
+	dma_ops->sync_single_for_device(dev, handle, size, dir);
 }
 
 static int dmabounce_set_mask(struct device *dev, u64 dma_mask)
@@ -445,7 +445,7 @@  static int dmabounce_set_mask(struct device *dev, u64 dma_mask)
 	if (dev->archdata.dmabounce)
 		return 0;
 
-	return arm_dma_ops.set_dma_mask(dev, dma_mask);
+	return dma_ops->set_dma_mask(dev, dma_mask);
 }
 
 static struct dma_map_ops dmabounce_ops = {
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index ad89e0f..f907f65 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -12,6 +12,7 @@ 
 #include <asm/memory.h>
 
 #define DMA_ERROR_CODE	(~0)
+extern struct dma_map_ops *dma_ops;
 extern struct dma_map_ops arm_dma_ops;
 extern struct dma_map_ops arm_coherent_dma_ops;
 
@@ -19,7 +20,7 @@  static inline struct dma_map_ops *get_dma_ops(struct device *dev)
 {
 	if (dev && dev->archdata.dma_ops)
 		return dev->archdata.dma_ops;
-	return &arm_dma_ops;
+	return dma_ops;
 }
 
 static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 7f9b179..870b12c 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -141,6 +141,9 @@  struct dma_map_ops arm_dma_ops = {
 };
 EXPORT_SYMBOL(arm_dma_ops);
 
+struct dma_map_ops *dma_ops = &arm_dma_ops;
+EXPORT_SYMBOL(dma_ops);
+
 static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
 	dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs);
 static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,