@@ -80,11 +80,8 @@ static int arm_nommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
- int ret;
-
- if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret))
- return ret;
-
+ if (!(attrs & DMA_ATTR_NON_CONSISTENT))
+ return dma_mmap_from_global_coherent(vma, cpu_addr, size);
return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
}
@@ -830,9 +830,6 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
unsigned long pfn = dma_to_pfn(dev, dma_addr);
unsigned long off = vma->vm_pgoff;
- if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
- return ret;
-
if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
ret = remap_pfn_range(vma, vma->vm_start,
pfn + off,
@@ -246,9 +246,6 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);
- if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
- return ret;
-
if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
/*
* DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
@@ -158,17 +158,12 @@ static inline int is_device_dma_capable(struct device *dev)
* These three functions are only for dma allocator.
* Don't use them in device drivers.
*/
-int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, size_t size, int *ret);
-
void *dma_alloc_from_global_coherent(size_t size, dma_addr_t *dma_handle);
void dma_release_from_global_coherent(size_t size, void *vaddr);
int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
- size_t size, int *ret);
+ size_t size);
#else
-#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
-
static inline void *dma_alloc_from_global_coherent(size_t size,
dma_addr_t *dma_handle)
{
@@ -177,12 +172,10 @@ static inline void *dma_alloc_from_global_coherent(size_t size,
static inline void dma_release_from_global_coherent(size_t size, void *vaddr)
{
- return 0;
}
static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
- void *cpu_addr, size_t size,
- int *ret)
+ void *cpu_addr, size_t size)
{
return 0;
}
@@ -197,60 +197,30 @@ void dma_release_from_global_coherent(size_t size, void *vaddr)
__dma_release_from_coherent(dma_coherent_default_memory, size, vaddr);
}
-static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
- struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
+int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
+ struct vm_area_struct *vma, void *vaddr, size_t size)
{
- if (mem && vaddr >= mem->virt_base && vaddr + size <=
- (mem->virt_base + (mem->size << PAGE_SHIFT))) {
- unsigned long off = vma->vm_pgoff;
- int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
- int user_count = vma_pages(vma);
- int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-
- *ret = -ENXIO;
- if (off < count && user_count <= count - off) {
- unsigned long pfn = mem->pfn_base + start + off;
- *ret = remap_pfn_range(vma, vma->vm_start, pfn,
- user_count << PAGE_SHIFT,
- vma->vm_page_prot);
- }
- return 1;
- }
- return 0;
-}
+ unsigned long off = vma->vm_pgoff;
+ int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
+ int user_count = vma_pages(vma);
+ int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-/**
- * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
- * @dev: device from which the memory was allocated
- * @vma: vm_area for the userspace memory
- * @vaddr: cpu address returned by dma_alloc_from_dev_coherent
- * @size: size of the memory buffer allocated
- * @ret: result from remap_pfn_range()
- *
- * This checks whether the memory was allocated from the per-device
- * coherent memory pool and if so, maps that memory to the provided vma.
- *
- * Returns 1 if @vaddr belongs to the device coherent pool and the caller
- * should return @ret, or 0 if they should proceed with mapping memory from
- * generic areas.
- */
-int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
- void *vaddr, size_t size, int *ret)
-{
- struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
-
- return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
+ if (WARN_ON_ONCE(!dma_in_coherent_range(mem, size, vaddr)))
+ return -ENXIO;
+ if (off >= count || user_count > count - off)
+ return -ENXIO;
+ return remap_pfn_range(vma, vma->vm_start, mem->pfn_base + start + off,
+ user_count << PAGE_SHIFT, vma->vm_page_prot);
}
-EXPORT_SYMBOL(dma_mmap_from_dev_coherent);
int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
- size_t size, int *ret)
+ size_t size)
{
if (!dma_coherent_default_memory)
return 0;
return __dma_mmap_from_coherent(dma_coherent_default_memory, vma,
- vaddr, size, ret);
+ vaddr, size);
}
/*
@@ -15,5 +15,7 @@ void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem, size_t size,
dma_addr_t *dma_handle);
void __dma_release_from_coherent(struct dma_coherent_mem *mem, size_t size,
void *vaddr);
+int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
+ struct vm_area_struct *vma, void *vaddr, size_t size);
#endif /* _DMA_INTERNAL_H */
@@ -158,13 +158,9 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long off = vma->vm_pgoff;
unsigned long pfn;
- int ret = -ENXIO;
vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);
- if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
- return ret;
-
if (off >= count || user_count > count - off)
return -ENXIO;
@@ -201,6 +197,10 @@ int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
unsigned long attrs)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
+ struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
+
+ if (mem)
+ return __dma_mmap_from_coherent(mem, vma, cpu_addr, size);
if (!dma_is_direct(ops) && ops->mmap)
return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
We handle allocation and freeing in common code, so we should handle mmap the same way. Also all users of per-device coherent memory are exclusive, that is if we can't allocate from the per-device pool we can't use the system memory either. Unfold the current dma_mmap_from_dev_coherent implementation and always use the per-device pool if it exists. Signed-off-by: Christoph Hellwig <hch@lst.de> --- arch/arm/mm/dma-mapping-nommu.c | 7 ++-- arch/arm/mm/dma-mapping.c | 3 -- arch/arm64/mm/dma-mapping.c | 3 -- include/linux/dma-mapping.h | 11 ++----- kernel/dma/coherent.c | 58 ++++++++------------------------- kernel/dma/internal.h | 2 ++ kernel/dma/mapping.c | 8 ++--- 7 files changed, 24 insertions(+), 68 deletions(-)