@@ -2459,32 +2459,6 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
__unmap_single(dma_dom, dma_addr, size, dir);
}
-static int sg_num_pages(struct device *dev,
- struct scatterlist *sglist,
- int nelems)
-{
- unsigned long mask, boundary_size;
- struct scatterlist *s;
- int i, npages = 0;
-
- mask = dma_get_seg_boundary(dev);
- boundary_size = mask + 1 ? ALIGN(mask + 1, PAGE_SIZE) >> PAGE_SHIFT :
- 1UL << (BITS_PER_LONG - PAGE_SHIFT);
-
- for_each_sg(sglist, s, nelems, i) {
- int p, n;
-
- s->dma_address = npages << PAGE_SHIFT;
- p = npages % boundary_size;
- n = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE);
- if (p + n > boundary_size)
- npages += boundary_size - p;
- npages += n;
- }
-
- return npages;
-}
-
/*
* The exported map_sg function for dma_ops (handles scatter-gather
* lists).
@@ -2507,7 +2481,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
dma_dom = to_dma_ops_domain(domain);
dma_mask = *dev->dma_mask;
- npages = sg_num_pages(dev, sglist, nelems);
+ npages = iommu_sg_num_pages(dev, sglist, nelems);
address = dma_ops_alloc_iova(dev, dma_dom, npages, dma_mask);
if (address == AMD_IOMMU_MAPPING_ERROR)
@@ -2585,7 +2559,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
startaddr = sg_dma_address(sglist) & PAGE_MASK;
dma_dom = to_dma_ops_domain(domain);
- npages = sg_num_pages(dev, sglist, nelems);
+ npages = iommu_sg_num_pages(dev, sglist, nelems);
__unmap_single(dma_dom, startaddr, npages << PAGE_SHIFT, dir);
}
@@ -33,6 +33,7 @@
#include <linux/bitops.h>
#include <linux/property.h>
#include <trace/events/iommu.h>
+#include <linux/iommu-helper.h>
static struct kset *iommu_group_kset;
static DEFINE_IDA(iommu_group_ida);
@@ -1631,6 +1632,32 @@ size_t iommu_unmap_fast(struct iommu_domain *domain,
}
EXPORT_SYMBOL_GPL(iommu_unmap_fast);
+int iommu_sg_num_pages(struct device *dev, struct scatterlist *sglist,
+ int nelems)
+{
+ unsigned long mask, boundary_size;
+ struct scatterlist *s;
+ int i, npages = 0;
+
+ mask = dma_get_seg_boundary(dev);
+ boundary_size = mask + 1 ? ALIGN(mask + 1, PAGE_SIZE) >> PAGE_SHIFT
+ : 1UL << (BITS_PER_LONG - PAGE_SHIFT);
+
+ for_each_sg(sglist, s, nelems, i) {
+ int p, n;
+
+ s->dma_address = npages << PAGE_SHIFT;
+ p = npages % boundary_size;
+ n = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE);
+ if (p + n > boundary_size)
+ npages += boundary_size - p;
+ npages += n;
+ }
+
+ return npages;
+}
+EXPORT_SYMBOL_GPL(iommu_sg_num_pages);
+
size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg, unsigned int nents, int prot)
{
@@ -303,6 +303,8 @@ extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t size);
extern size_t iommu_unmap_fast(struct iommu_domain *domain,
unsigned long iova, size_t size);
+extern int iommu_sg_num_pages(struct device *dev, struct scatterlist *sglist,
+ int nelems);
extern size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg,unsigned int nents,
int prot);
iommu_sg_num_pages() is a helper that walks a scattlerlist and counts pages taking segment boundaries and iommu_num_pages() into account. Up-level it for determining the IOVA range that dma_map_ops established at dma_map_sg() time. The intent is to iommu_unmap() the IOVA range in advance of freeing IOVA range. Cc: Joerg Roedel <joro@8bytes.org> Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- New patch in v8. drivers/iommu/amd_iommu.c | 30 ++---------------------------- drivers/iommu/iommu.c | 27 +++++++++++++++++++++++++++ include/linux/iommu.h | 2 ++ 3 files changed, 31 insertions(+), 28 deletions(-)