Message ID | 20190422175942.18788-12-hch@lst.de (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [01/26] arm64/iommu: handle non-remapped addresses in ->mmap and ->get_sgtable | expand |
On 22/04/2019 18:59, Christoph Hellwig wrote: > From: Robin Murphy <robin.murphy@arm.com> > > Since we duplicate the find_vm_area() logic a few times in places where > we only care aboute the pages, factor out a helper to abstract it. > > Signed-off-by: Robin Murphy <robin.murphy@arm.com> > [hch: don't warn when not finding a region, as we'll rely on that later] Yeah, I did think about that and the things which it might make a little easier, but preserved it as-is for the sake of keeping my modifications quick and simple. TBH I'm now feeling more inclined to drop the WARNs entirely at this point, since it's not like there's ever been any general guarantee that freeing the wrong thing shouldn't just crash, but that's something we can easily come back to later if need be. Robin. > Signed-off-by: Christoph Hellwig <hch@lst.de> > --- > drivers/iommu/dma-iommu.c | 32 ++++++++++++++++++++------------ > 1 file changed, 20 insertions(+), 12 deletions(-) > > diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c > index b52c5d6be7b4..8e2d9733113e 100644 > --- a/drivers/iommu/dma-iommu.c > +++ b/drivers/iommu/dma-iommu.c > @@ -525,6 +525,15 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev, > return pages; > } > > +static struct page **__iommu_dma_get_pages(void *cpu_addr) > +{ > + struct vm_struct *area = find_vm_area(cpu_addr); > + > + if (!area || !area->pages) > + return NULL; > + return area->pages; > +} > + > /** > * iommu_dma_free - Free a buffer allocated by __iommu_dma_alloc() > * @dev: Device which owns this buffer > @@ -1023,11 +1032,11 @@ static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr, > dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); > dma_common_free_remap(cpu_addr, size, VM_USERMAP); > } else if (is_vmalloc_addr(cpu_addr)){ > - struct vm_struct *area = find_vm_area(cpu_addr); > + struct page **pages = __iommu_dma_get_pages(cpu_addr); > > - if (WARN_ON(!area || !area->pages)) > + if (WARN_ON(!pages)) > return; > - __iommu_dma_free(dev, area->pages, iosize, &handle); > + __iommu_dma_free(dev, pages, iosize, &handle); > dma_common_free_remap(cpu_addr, size, VM_USERMAP); > } else { > __iommu_dma_unmap(dev, handle, iosize); > @@ -1049,7 +1058,7 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, > { > unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; > unsigned long off = vma->vm_pgoff; > - struct vm_struct *area; > + struct page **pages; > int ret; > > vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs); > @@ -1074,11 +1083,10 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, > return __iommu_dma_mmap_pfn(vma, pfn, size); > } > > - area = find_vm_area(cpu_addr); > - if (WARN_ON(!area || !area->pages)) > + pages = __iommu_dma_get_pages(cpu_addr); > + if (WARN_ON_ONCE(!pages)) > return -ENXIO; > - > - return __iommu_dma_mmap(area->pages, size, vma); > + return __iommu_dma_mmap(pages, size, vma); > } > > static int __iommu_dma_get_sgtable_page(struct sg_table *sgt, struct page *page, > @@ -1096,7 +1104,7 @@ static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, > unsigned long attrs) > { > unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; > - struct vm_struct *area = find_vm_area(cpu_addr); > + struct page **pages; > > if (!is_vmalloc_addr(cpu_addr)) { > struct page *page = virt_to_page(cpu_addr); > @@ -1112,10 +1120,10 @@ static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, > return __iommu_dma_get_sgtable_page(sgt, page, size); > } > > - if (WARN_ON(!area || !area->pages)) > + pages = __iommu_dma_get_pages(cpu_addr); > + if (WARN_ON_ONCE(!pages)) > return -ENXIO; > - > - return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size, > + return sg_alloc_table_from_pages(sgt, pages, count, 0, size, > GFP_KERNEL); > } > >
On Mon, Apr 29, 2019 at 02:05:46PM +0100, Robin Murphy wrote: > On 22/04/2019 18:59, Christoph Hellwig wrote: >> From: Robin Murphy <robin.murphy@arm.com> >> >> Since we duplicate the find_vm_area() logic a few times in places where >> we only care aboute the pages, factor out a helper to abstract it. >> >> Signed-off-by: Robin Murphy <robin.murphy@arm.com> >> [hch: don't warn when not finding a region, as we'll rely on that later] > > Yeah, I did think about that and the things which it might make a little > easier, but preserved it as-is for the sake of keeping my modifications > quick and simple. TBH I'm now feeling more inclined to drop the WARNs > entirely at this point, since it's not like there's ever been any general > guarantee that freeing the wrong thing shouldn't just crash, but that's > something we can easily come back to later if need be. Ok, I've dropped the warnings.
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index b52c5d6be7b4..8e2d9733113e 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -525,6 +525,15 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev, return pages; } +static struct page **__iommu_dma_get_pages(void *cpu_addr) +{ + struct vm_struct *area = find_vm_area(cpu_addr); + + if (!area || !area->pages) + return NULL; + return area->pages; +} + /** * iommu_dma_free - Free a buffer allocated by __iommu_dma_alloc() * @dev: Device which owns this buffer @@ -1023,11 +1032,11 @@ static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr, dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); dma_common_free_remap(cpu_addr, size, VM_USERMAP); } else if (is_vmalloc_addr(cpu_addr)){ - struct vm_struct *area = find_vm_area(cpu_addr); + struct page **pages = __iommu_dma_get_pages(cpu_addr); - if (WARN_ON(!area || !area->pages)) + if (WARN_ON(!pages)) return; - __iommu_dma_free(dev, area->pages, iosize, &handle); + __iommu_dma_free(dev, pages, iosize, &handle); dma_common_free_remap(cpu_addr, size, VM_USERMAP); } else { __iommu_dma_unmap(dev, handle, iosize); @@ -1049,7 +1058,7 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, { unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned long off = vma->vm_pgoff; - struct vm_struct *area; + struct page **pages; int ret; vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs); @@ -1074,11 +1083,10 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, return __iommu_dma_mmap_pfn(vma, pfn, size); } - area = find_vm_area(cpu_addr); - if (WARN_ON(!area || !area->pages)) + pages = __iommu_dma_get_pages(cpu_addr); + if (WARN_ON_ONCE(!pages)) return -ENXIO; - - return __iommu_dma_mmap(area->pages, size, vma); + return __iommu_dma_mmap(pages, size, vma); } static int __iommu_dma_get_sgtable_page(struct sg_table *sgt, struct page *page, @@ -1096,7 +1104,7 @@ static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, unsigned long attrs) { unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; - struct vm_struct *area = find_vm_area(cpu_addr); + struct page **pages; if (!is_vmalloc_addr(cpu_addr)) { struct page *page = virt_to_page(cpu_addr); @@ -1112,10 +1120,10 @@ static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, return __iommu_dma_get_sgtable_page(sgt, page, size); } - if (WARN_ON(!area || !area->pages)) + pages = __iommu_dma_get_pages(cpu_addr); + if (WARN_ON_ONCE(!pages)) return -ENXIO; - - return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size, + return sg_alloc_table_from_pages(sgt, pages, count, 0, size, GFP_KERNEL); }