diff mbox series

[V2,5/6] iommu: dma-iommu: add support for DMA_ATTR_LOW_ADDRESS

Message ID 20220511121544.5998-6-ajaykumar.rs@samsung.com (mailing list archive)
State New, archived
Headers show
Series IOMMU-DMA - support DMA_ATTR_LOW_ADDRESS attribute | expand

Commit Message

Ajay Kumar May 11, 2022, 12:15 p.m. UTC
From: Marek Szyprowski <m.szyprowski@samsung.com>

Implement support for the DMA_ATTR_LOW_ADDRESS DMA attribute. If it has
been set, call alloc_iova_first_fit() instead of the alloc_iova_fast() to
allocate the new IOVA from the beginning of the address space.

Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Signed-off-by: Ajay Kumar <ajaykumar.rs@samsung.com>
---
 drivers/iommu/dma-iommu.c | 50 +++++++++++++++++++++++++++++----------
 1 file changed, 38 insertions(+), 12 deletions(-)
diff mbox series

Patch

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index cb235b40303c..553c5b863e19 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -601,6 +601,18 @@  static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
 }
 
 #define DMA_ALLOC_IOVA_COHERENT		BIT(0)
+#define DMA_ALLOC_IOVA_FIRST_FIT	BIT(1)
+
+static unsigned int dma_attrs_to_alloc_flags(unsigned long attrs, bool coherent)
+{
+	unsigned int flags = 0;
+
+	if (coherent)
+		flags |= DMA_ALLOC_IOVA_COHERENT;
+	if (attrs & DMA_ATTR_LOW_ADDRESS)
+		flags |= DMA_ALLOC_IOVA_FIRST_FIT;
+	return flags;
+}
 
 static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
 		struct device *dev, size_t size, unsigned int flags)
@@ -625,13 +637,23 @@  static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
 		dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
 
 	/* Try to get PCI devices a SAC address */
-	if (dma_limit > DMA_BIT_MASK(32) && !iommu_dma_forcedac && dev_is_pci(dev))
-		iova = alloc_iova_fast(iovad, iova_len,
-				       DMA_BIT_MASK(32) >> shift, false);
+	if (dma_limit > DMA_BIT_MASK(32) && !iommu_dma_forcedac && dev_is_pci(dev)) {
+		if (unlikely(flags & DMA_ALLOC_IOVA_FIRST_FIT))
+			iova = alloc_iova_first_fit(iovad, iova_len,
+						    DMA_BIT_MASK(32) >> shift);
+		else
+			iova = alloc_iova_fast(iovad, iova_len,
+					      DMA_BIT_MASK(32) >> shift, false);
+	}
 
-	if (iova == IOVA_BAD_ADDR)
-		iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
-				       true);
+	if (iova == IOVA_BAD_ADDR) {
+		if (unlikely(flags & DMA_ALLOC_IOVA_FIRST_FIT))
+			iova = alloc_iova_first_fit(iovad, iova_len,
+						    dma_limit >> shift);
+		else
+			iova = alloc_iova_fast(iovad, iova_len,
+						dma_limit >> shift, true);
+	}
 
 	if (iova != IOVA_BAD_ADDR)
 		return (dma_addr_t)iova << shift;
@@ -779,6 +801,7 @@  static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
 	struct iova_domain *iovad = &cookie->iovad;
 	bool coherent = dev_is_dma_coherent(dev);
 	int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
+	unsigned int flags = dma_attrs_to_alloc_flags(attrs, true);
 	unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
 	struct page **pages;
 	dma_addr_t iova;
@@ -804,7 +827,7 @@  static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
 		return NULL;
 
 	size = iova_align(iovad, size);
-	iova = iommu_dma_alloc_iova(domain, dev, size, DMA_ALLOC_IOVA_COHERENT);
+	iova = iommu_dma_alloc_iova(domain, dev, size, flags);
 	if (iova == DMA_MAPPING_ERROR)
 		goto out_free_pages;
 
@@ -964,6 +987,7 @@  static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
 	phys_addr_t phys = page_to_phys(page) + offset;
 	bool coherent = dev_is_dma_coherent(dev);
 	int prot = dma_info_to_prot(dir, coherent, attrs);
+	unsigned int flags = dma_attrs_to_alloc_flags(attrs, false);
 	struct iommu_domain *domain = iommu_get_dma_domain(dev);
 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
 	struct iova_domain *iovad = &cookie->iovad;
@@ -1005,7 +1029,7 @@  static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
 	if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
 		arch_sync_dma_for_device(phys, size, dir);
 
-	iova = __iommu_dma_map(dev, phys, size, prot, 0);
+	iova = __iommu_dma_map(dev, phys, size, prot, flags);
 	if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(dev, phys))
 		swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
 	return iova;
@@ -1152,6 +1176,7 @@  static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
 	struct iova_domain *iovad = &cookie->iovad;
 	struct scatterlist *s, *prev = NULL;
 	int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
+	unsigned int flags = dma_attrs_to_alloc_flags(attrs, false);
 	dma_addr_t iova;
 	size_t iova_len = 0;
 	unsigned long mask = dma_get_seg_boundary(dev);
@@ -1209,7 +1234,7 @@  static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
 		prev = s;
 	}
 
-	iova = iommu_dma_alloc_iova(domain, dev, iova_len, 0);
+	iova = iommu_dma_alloc_iova(domain, dev, iova_len, flags);
 	if (iova == DMA_MAPPING_ERROR) {
 		ret = -ENOMEM;
 		goto out_restore_sg;
@@ -1268,7 +1293,8 @@  static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
 		size_t size, enum dma_data_direction dir, unsigned long attrs)
 {
 	return __iommu_dma_map(dev, phys, size,
-			dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO, 0);
+			dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
+			dma_attrs_to_alloc_flags(attrs, false));
 }
 
 static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
@@ -1357,6 +1383,7 @@  static void *iommu_dma_alloc(struct device *dev, size_t size,
 {
 	bool coherent = dev_is_dma_coherent(dev);
 	int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
+	unsigned int flags = dma_attrs_to_alloc_flags(attrs, true);
 	struct page *page = NULL;
 	void *cpu_addr;
 
@@ -1377,8 +1404,7 @@  static void *iommu_dma_alloc(struct device *dev, size_t size,
 	if (!cpu_addr)
 		return NULL;
 
-	*handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot,
-			DMA_ALLOC_IOVA_COHERENT);
+	*handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot, flags);
 	if (*handle == DMA_MAPPING_ERROR) {
 		__iommu_dma_free(dev, size, cpu_addr);
 		return NULL;