diff mbox series

[06/10] sparc64/iommu: implement DMA_ATTR_NON_CONSISTENT

Message ID 20181208173702.15158-7-hch@lst.de (mailing list archive)
State Not Applicable
Headers show
Series [01/10] dma-direct: provide a generic implementation of DMA_ATTR_NON_CONSISTENT | expand

Commit Message

Christoph Hellwig Dec. 8, 2018, 5:36 p.m. UTC
Just allocate the memory and use map_page to map the memory.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 arch/sparc/kernel/iommu.c | 33 +++++++++++++++++++++++----------
 1 file changed, 23 insertions(+), 10 deletions(-)

Comments

David Miller Dec. 9, 2018, 4:58 a.m. UTC | #1
From: Christoph Hellwig <hch@lst.de>
Date: Sat,  8 Dec 2018 09:36:58 -0800

> Just allocate the memory and use map_page to map the memory.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>

Acked-by: David S. Miller <davem@davemloft.net>
diff mbox series

Patch

diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index 4bf0497e0704..4ce24c9dc691 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -699,14 +699,19 @@  static void *dma_4u_alloc(struct device *dev, size_t size,
 	first_page = (unsigned long) page_address(page);
 	memset((char *)first_page, 0, PAGE_SIZE << order);
 
+	if (attrs & DMA_ATTR_NON_CONSISTENT) {
+		*dma_addrp = dma_4u_map_page(dev, page, 0, size,
+					     DMA_BIDIRECTIONAL, 0);
+		if (*dma_addrp == DMA_MAPPING_ERROR)
+			goto out_free_page;
+		return page_address(page);
+	}
+
 	iommu = dev->archdata.iommu;
 
 	iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
-
-	if (unlikely(iopte == NULL)) {
-		free_pages(first_page, order);
-		return NULL;
-	}
+	if (unlikely(iopte == NULL))
+		goto out_free_page;
 
 	*dma_addrp = (iommu->tbl.table_map_base +
 		      ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
@@ -722,18 +727,26 @@  static void *dma_4u_alloc(struct device *dev, size_t size,
 	}
 
 	return ret;
+
+out_free_page:
+	free_pages(first_page, order);
+	return NULL;
 }
 
 static void dma_4u_free(struct device *dev, size_t size, void *cpu,
 			dma_addr_t dvma, unsigned long attrs)
 {
-	struct iommu *iommu;
-	unsigned long order, npages;
+	unsigned long order;
 
-	npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
-	iommu = dev->archdata.iommu;
+	if (attrs & DMA_ATTR_NON_CONSISTENT) {
+		dma_4u_unmap_page(dev, dvma, size, DMA_BIDIRECTIONAL, 0);
+	} else {
+		struct iommu *iommu = dev->archdata.iommu;
 
-	iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE);
+		iommu_tbl_range_free(&iommu->tbl, dvma,
+				     IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT,
+				     IOMMU_ERROR_CODE);
+	}
 
 	order = get_order(size);
 	if (order < 10)