@@ -156,17 +156,20 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
}
} else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
+ unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
struct page *page;
- page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
- get_order(size), gfp & __GFP_NOWARN);
+ page = dma_alloc_from_contiguous(dev, count, get_order(size),
+ gfp & __GFP_NOWARN);
+ if (!page)
+ page = alloc_pages(gfp, get_order(size));
if (!page)
return NULL;
*handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
if (*handle == DMA_MAPPING_ERROR) {
- dma_release_from_contiguous(dev, page,
- size >> PAGE_SHIFT);
+ if (!dma_release_from_contiguous(dev, page, count))
+ __free_pages(page, get_order(size));
return NULL;
}
addr = dma_common_contiguous_remap(page, size, VM_USERMAP,
@@ -178,8 +181,8 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
memset(addr, 0, size);
} else {
iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
- dma_release_from_contiguous(dev, page,
- size >> PAGE_SHIFT);
+ if (!dma_release_from_contiguous(dev, page, count))
+ __free_pages(page, get_order(size));
}
} else {
pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
@@ -201,6 +204,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t handle, unsigned long attrs)
{
+ unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
size_t iosize = size;
size = PAGE_ALIGN(size);
@@ -222,7 +226,8 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
struct page *page = vmalloc_to_page(cpu_addr);
iommu_dma_unmap_page(dev, handle, iosize, 0, attrs);
- dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
+ if (!dma_release_from_contiguous(dev, page, count))
+ __free_pages(page, get_order(size));
dma_common_free_remap(cpu_addr, size, VM_USERMAP);
} else if (is_vmalloc_addr(cpu_addr)){
struct vm_struct *area = find_vm_area(cpu_addr);
The cma allocation will skip allocations of single pages to save CMA resource. This requires its callers to rebound those page allocations from normal area. So this patch adds fallback routines. Signed-off-by: Nicolin Chen <nicoleotsuka@gmail.com> --- arch/arm64/mm/dma-mapping.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-)