Message ID | 20190422175942.18788-24-hch@lst.de (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [01/26] arm64/iommu: handle non-remapped addresses in ->mmap and ->get_sgtable | expand |
On 22/04/2019 18:59, Christoph Hellwig wrote: > For entirely dma coherent architectures there is no requirement to ever > remap dma coherent allocation. Move all the remap and pool code under > IS_ENABLED() checks and drop the Kconfig dependency. Reviewed-by: Robin Murphy <robin.murphy@arm.com> > Signed-off-by: Christoph Hellwig <hch@lst.de> > --- > drivers/iommu/Kconfig | 1 - > drivers/iommu/dma-iommu.c | 16 +++++++++------- > 2 files changed, 9 insertions(+), 8 deletions(-) > > diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig > index bdc14baf2ee5..6f07f3b21816 100644 > --- a/drivers/iommu/Kconfig > +++ b/drivers/iommu/Kconfig > @@ -95,7 +95,6 @@ config IOMMU_DMA > select IOMMU_API > select IOMMU_IOVA > select NEED_SG_DMA_LENGTH > - depends on DMA_DIRECT_REMAP > > config FSL_PAMU > bool "Freescale IOMMU support" > diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c > index 8fc6098c1eeb..278a9a960107 100644 > --- a/drivers/iommu/dma-iommu.c > +++ b/drivers/iommu/dma-iommu.c > @@ -923,10 +923,11 @@ static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr) > struct page *page = NULL; > > /* Non-coherent atomic allocation? Easy */ > - if (dma_free_from_pool(cpu_addr, alloc_size)) > + if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && > + dma_free_from_pool(cpu_addr, alloc_size)) > return; > > - if (is_vmalloc_addr(cpu_addr)) { > + if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { > /* > * If it the address is remapped, then it's either non-coherent > * or highmem CMA, or an iommu_dma_alloc_remap() construction. > @@ -972,7 +973,7 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size, > if (!page) > return NULL; > > - if (!coherent || PageHighMem(page)) { > + if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) { > pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs); > > cpu_addr = dma_common_contiguous_remap(page, alloc_size, > @@ -1005,11 +1006,12 @@ static void *iommu_dma_alloc(struct device *dev, size_t size, > > gfp |= __GFP_ZERO; > > - if (gfpflags_allow_blocking(gfp) && > + if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) && > !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) > return iommu_dma_alloc_remap(dev, size, handle, gfp, attrs); > > - if (!gfpflags_allow_blocking(gfp) && !coherent) > + if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && > + !gfpflags_allow_blocking(gfp) && !coherent) > cpu_addr = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp); > else > cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs); > @@ -1041,7 +1043,7 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, > if (off >= nr_pages || vma_pages(vma) > nr_pages - off) > return -ENXIO; > > - if (is_vmalloc_addr(cpu_addr)) { > + if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { > struct page **pages = __iommu_dma_get_pages(cpu_addr); > > if (pages) > @@ -1063,7 +1065,7 @@ static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, > struct page *page; > int ret; > > - if (is_vmalloc_addr(cpu_addr)) { > + if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { > struct page **pages = __iommu_dma_get_pages(cpu_addr); > > if (pages) { >
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index bdc14baf2ee5..6f07f3b21816 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -95,7 +95,6 @@ config IOMMU_DMA select IOMMU_API select IOMMU_IOVA select NEED_SG_DMA_LENGTH - depends on DMA_DIRECT_REMAP config FSL_PAMU bool "Freescale IOMMU support" diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 8fc6098c1eeb..278a9a960107 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -923,10 +923,11 @@ static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr) struct page *page = NULL; /* Non-coherent atomic allocation? Easy */ - if (dma_free_from_pool(cpu_addr, alloc_size)) + if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && + dma_free_from_pool(cpu_addr, alloc_size)) return; - if (is_vmalloc_addr(cpu_addr)) { + if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { /* * If it the address is remapped, then it's either non-coherent * or highmem CMA, or an iommu_dma_alloc_remap() construction. @@ -972,7 +973,7 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size, if (!page) return NULL; - if (!coherent || PageHighMem(page)) { + if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) { pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs); cpu_addr = dma_common_contiguous_remap(page, alloc_size, @@ -1005,11 +1006,12 @@ static void *iommu_dma_alloc(struct device *dev, size_t size, gfp |= __GFP_ZERO; - if (gfpflags_allow_blocking(gfp) && + if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) && !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) return iommu_dma_alloc_remap(dev, size, handle, gfp, attrs); - if (!gfpflags_allow_blocking(gfp) && !coherent) + if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && + !gfpflags_allow_blocking(gfp) && !coherent) cpu_addr = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp); else cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs); @@ -1041,7 +1043,7 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, if (off >= nr_pages || vma_pages(vma) > nr_pages - off) return -ENXIO; - if (is_vmalloc_addr(cpu_addr)) { + if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { struct page **pages = __iommu_dma_get_pages(cpu_addr); if (pages) @@ -1063,7 +1065,7 @@ static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, struct page *page; int ret; - if (is_vmalloc_addr(cpu_addr)) { + if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { struct page **pages = __iommu_dma_get_pages(cpu_addr); if (pages) {
For entirely dma coherent architectures there is no requirement to ever remap dma coherent allocation. Move all the remap and pool code under IS_ENABLED() checks and drop the Kconfig dependency. Signed-off-by: Christoph Hellwig <hch@lst.de> --- drivers/iommu/Kconfig | 1 - drivers/iommu/dma-iommu.c | 16 +++++++++------- 2 files changed, 9 insertions(+), 8 deletions(-)