Message ID | 20220511121544.5998-3-ajaykumar.rs@samsung.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | IOMMU-DMA - support DMA_ATTR_LOW_ADDRESS attribute | expand |
On 2022-05-11 13:15, Ajay Kumar wrote: > From: Marek Szyprowski <m.szyprowski@samsung.com> > > Zero is a valid DMA and IOVA address on many architectures, so adjust the > IOVA management code to properly handle it. A new value IOVA_BAD_ADDR > (~0UL) is introduced as a generic value for the error case. Adjust all > callers of the alloc_iova_fast() function for the new return value. And when does anything actually need this? In fact if you were to stop iommu-dma from reserving IOVA 0 - which you don't - it would only show how patch #3 is broken. Also note that it's really nothing to do with architectures either way; iommu-dma simply chooses to reserve IOVA 0 for its own convenience, mostly because it can. Much the same way that 0 is typically a valid CPU VA, but mapping something meaningful there is just asking for a world of pain debugging NULL-dereference bugs. Robin. > Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> > Signed-off-by: Ajay Kumar <ajaykumar.rs@samsung.com> > --- > drivers/iommu/dma-iommu.c | 16 +++++++++------- > drivers/iommu/iova.c | 13 +++++++++---- > include/linux/iova.h | 1 + > 3 files changed, 19 insertions(+), 11 deletions(-) > > diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c > index 1ca85d37eeab..16218d6a0703 100644 > --- a/drivers/iommu/dma-iommu.c > +++ b/drivers/iommu/dma-iommu.c > @@ -605,7 +605,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, > { > struct iommu_dma_cookie *cookie = domain->iova_cookie; > struct iova_domain *iovad = &cookie->iovad; > - unsigned long shift, iova_len, iova = 0; > + unsigned long shift, iova_len, iova = IOVA_BAD_ADDR; > > if (cookie->type == IOMMU_DMA_MSI_COOKIE) { > cookie->msi_iova += size; > @@ -625,11 +625,13 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, > iova = alloc_iova_fast(iovad, iova_len, > DMA_BIT_MASK(32) >> shift, false); > > - if (!iova) > + if (iova == IOVA_BAD_ADDR) > iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, > true); > > - return (dma_addr_t)iova << shift; > + if (iova != IOVA_BAD_ADDR) > + return (dma_addr_t)iova << shift; > + return DMA_MAPPING_ERROR; > } > > static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie, > @@ -688,7 +690,7 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, > size = iova_align(iovad, size + iova_off); > > iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev); > - if (!iova) > + if (iova == DMA_MAPPING_ERROR) > return DMA_MAPPING_ERROR; > > if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) { > @@ -799,7 +801,7 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev, > > size = iova_align(iovad, size); > iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev); > - if (!iova) > + if (iova == DMA_MAPPING_ERROR) > goto out_free_pages; > > if (sg_alloc_table_from_pages(sgt, pages, count, 0, size, GFP_KERNEL)) > @@ -1204,7 +1206,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, > } > > iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev); > - if (!iova) { > + if (iova == DMA_MAPPING_ERROR) { > ret = -ENOMEM; > goto out_restore_sg; > } > @@ -1516,7 +1518,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, > return NULL; > > iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); > - if (!iova) > + if (iova == DMA_MAPPING_ERROR) > goto out_free_page; > > if (iommu_map(domain, iova, msi_addr, size, prot)) > diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c > index db77aa675145..ae0fe0a6714e 100644 > --- a/drivers/iommu/iova.c > +++ b/drivers/iommu/iova.c > @@ -429,6 +429,8 @@ EXPORT_SYMBOL_GPL(free_iova); > * This function tries to satisfy an iova allocation from the rcache, > * and falls back to regular allocation on failure. If regular allocation > * fails too and the flush_rcache flag is set then the rcache will be flushed. > + * Returns a pfn the allocated iova starts at or IOVA_BAD_ADDR in the case > + * of a failure. > */ > unsigned long > alloc_iova_fast(struct iova_domain *iovad, unsigned long size, > @@ -447,7 +449,7 @@ alloc_iova_fast(struct iova_domain *iovad, unsigned long size, > size = roundup_pow_of_two(size); > > iova_pfn = iova_rcache_get(iovad, size, limit_pfn + 1); > - if (iova_pfn) > + if (iova_pfn != IOVA_BAD_ADDR) > return iova_pfn; > > retry: > @@ -456,7 +458,7 @@ alloc_iova_fast(struct iova_domain *iovad, unsigned long size, > unsigned int cpu; > > if (!flush_rcache) > - return 0; > + return IOVA_BAD_ADDR; > > /* Try replenishing IOVAs by flushing rcache. */ > flush_rcache = false; > @@ -831,7 +833,7 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache, > unsigned long limit_pfn) > { > struct iova_cpu_rcache *cpu_rcache; > - unsigned long iova_pfn = 0; > + unsigned long iova_pfn = IOVA_BAD_ADDR; > bool has_pfn = false; > unsigned long flags; > > @@ -858,6 +860,9 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache, > > spin_unlock_irqrestore(&cpu_rcache->lock, flags); > > + if (!iova_pfn) > + return IOVA_BAD_ADDR; > + > return iova_pfn; > } > > @@ -873,7 +878,7 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad, > unsigned int log_size = order_base_2(size); > > if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE || !iovad->rcaches) > - return 0; > + return IOVA_BAD_ADDR; > > return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size); > } > diff --git a/include/linux/iova.h b/include/linux/iova.h > index 320a70e40233..46b5b10c532b 100644 > --- a/include/linux/iova.h > +++ b/include/linux/iova.h > @@ -21,6 +21,7 @@ struct iova { > unsigned long pfn_lo; /* Lowest allocated pfn */ > }; > > +#define IOVA_BAD_ADDR (~0UL) > > struct iova_rcache; >
Hi Robin On Mon, May 23, 2022 at 11:00 PM Robin Murphy <robin.murphy@arm.com> wrote: > > On 2022-05-11 13:15, Ajay Kumar wrote: > > From: Marek Szyprowski <m.szyprowski@samsung.com> > > > > Zero is a valid DMA and IOVA address on many architectures, so adjust the > > IOVA management code to properly handle it. A new value IOVA_BAD_ADDR > > (~0UL) is introduced as a generic value for the error case. Adjust all > > callers of the alloc_iova_fast() function for the new return value. > > And when does anything actually need this? In fact if you were to stop > iommu-dma from reserving IOVA 0 - which you don't - it would only show > how patch #3 is broken. Right! Since the IOVA allocation happens from higher addr to lower addr, hitting this (IOVA==0) case means out of IOVA space which is highly unlikely. > Also note that it's really nothing to do with architectures either way; > iommu-dma simply chooses to reserve IOVA 0 for its own convenience, > mostly because it can. Much the same way that 0 is typically a valid CPU > VA, but mapping something meaningful there is just asking for a world of > pain debugging NULL-dereference bugs. > > Robin. This makes sense, let me think about managing the PFN at lowest address in some other way. Thanks, Ajay Kumar > > Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> > > Signed-off-by: Ajay Kumar <ajaykumar.rs@samsung.com> > > --- > > drivers/iommu/dma-iommu.c | 16 +++++++++------- > > drivers/iommu/iova.c | 13 +++++++++---- > > include/linux/iova.h | 1 + > > 3 files changed, 19 insertions(+), 11 deletions(-) > > > > diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c > > index 1ca85d37eeab..16218d6a0703 100644 > > --- a/drivers/iommu/dma-iommu.c > > +++ b/drivers/iommu/dma-iommu.c > > @@ -605,7 +605,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, > > { > > struct iommu_dma_cookie *cookie = domain->iova_cookie; > > struct iova_domain *iovad = &cookie->iovad; > > - unsigned long shift, iova_len, iova = 0; > > + unsigned long shift, iova_len, iova = IOVA_BAD_ADDR; > > > > if (cookie->type == IOMMU_DMA_MSI_COOKIE) { > > cookie->msi_iova += size; > > @@ -625,11 +625,13 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, > > iova = alloc_iova_fast(iovad, iova_len, > > DMA_BIT_MASK(32) >> shift, false); > > > > - if (!iova) > > + if (iova == IOVA_BAD_ADDR) > > iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, > > true); > > > > - return (dma_addr_t)iova << shift; > > + if (iova != IOVA_BAD_ADDR) > > + return (dma_addr_t)iova << shift; > > + return DMA_MAPPING_ERROR; > > } > > > > static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie, > > @@ -688,7 +690,7 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, > > size = iova_align(iovad, size + iova_off); > > > > iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev); > > - if (!iova) > > + if (iova == DMA_MAPPING_ERROR) > > return DMA_MAPPING_ERROR; > > > > if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) { > > @@ -799,7 +801,7 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev, > > > > size = iova_align(iovad, size); > > iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev); > > - if (!iova) > > + if (iova == DMA_MAPPING_ERROR) > > goto out_free_pages; > > > > if (sg_alloc_table_from_pages(sgt, pages, count, 0, size, GFP_KERNEL)) > > @@ -1204,7 +1206,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, > > } > > > > iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev); > > - if (!iova) { > > + if (iova == DMA_MAPPING_ERROR) { > > ret = -ENOMEM; > > goto out_restore_sg; > > } > > @@ -1516,7 +1518,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, > > return NULL; > > > > iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); > > - if (!iova) > > + if (iova == DMA_MAPPING_ERROR) > > goto out_free_page; > > > > if (iommu_map(domain, iova, msi_addr, size, prot)) > > diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c > > index db77aa675145..ae0fe0a6714e 100644 > > --- a/drivers/iommu/iova.c > > +++ b/drivers/iommu/iova.c > > @@ -429,6 +429,8 @@ EXPORT_SYMBOL_GPL(free_iova); > > * This function tries to satisfy an iova allocation from the rcache, > > * and falls back to regular allocation on failure. If regular allocation > > * fails too and the flush_rcache flag is set then the rcache will be flushed. > > + * Returns a pfn the allocated iova starts at or IOVA_BAD_ADDR in the case > > + * of a failure. > > */ > > unsigned long > > alloc_iova_fast(struct iova_domain *iovad, unsigned long size, > > @@ -447,7 +449,7 @@ alloc_iova_fast(struct iova_domain *iovad, unsigned long size, > > size = roundup_pow_of_two(size); > > > > iova_pfn = iova_rcache_get(iovad, size, limit_pfn + 1); > > - if (iova_pfn) > > + if (iova_pfn != IOVA_BAD_ADDR) > > return iova_pfn; > > > > retry: > > @@ -456,7 +458,7 @@ alloc_iova_fast(struct iova_domain *iovad, unsigned long size, > > unsigned int cpu; > > > > if (!flush_rcache) > > - return 0; > > + return IOVA_BAD_ADDR; > > > > /* Try replenishing IOVAs by flushing rcache. */ > > flush_rcache = false; > > @@ -831,7 +833,7 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache, > > unsigned long limit_pfn) > > { > > struct iova_cpu_rcache *cpu_rcache; > > - unsigned long iova_pfn = 0; > > + unsigned long iova_pfn = IOVA_BAD_ADDR; > > bool has_pfn = false; > > unsigned long flags; > > > > @@ -858,6 +860,9 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache, > > > > spin_unlock_irqrestore(&cpu_rcache->lock, flags); > > > > + if (!iova_pfn) > > + return IOVA_BAD_ADDR; > > + > > return iova_pfn; > > } > > > > @@ -873,7 +878,7 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad, > > unsigned int log_size = order_base_2(size); > > > > if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE || !iovad->rcaches) > > - return 0; > > + return IOVA_BAD_ADDR; > > > > return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size); > > } > > diff --git a/include/linux/iova.h b/include/linux/iova.h > > index 320a70e40233..46b5b10c532b 100644 > > --- a/include/linux/iova.h > > +++ b/include/linux/iova.h > > @@ -21,6 +21,7 @@ struct iova { > > unsigned long pfn_lo; /* Lowest allocated pfn */ > > }; > > > > +#define IOVA_BAD_ADDR (~0UL) > > > > struct iova_rcache; > >
Hi Robin, On 23.05.2022 19:30, Robin Murphy wrote: > On 2022-05-11 13:15, Ajay Kumar wrote: >> From: Marek Szyprowski <m.szyprowski@samsung.com> >> >> Zero is a valid DMA and IOVA address on many architectures, so adjust >> the >> IOVA management code to properly handle it. A new value IOVA_BAD_ADDR >> (~0UL) is introduced as a generic value for the error case. Adjust all >> callers of the alloc_iova_fast() function for the new return value. > > And when does anything actually need this? In fact if you were to stop > iommu-dma from reserving IOVA 0 - which you don't - it would only show > how patch #3 is broken. I don't get why you says that patch #3 is broken. Them main issue with address 0 being reserved appears when one uses first fit allocation algorithm. In such case the first allocation will be at the 0 address, what causes some issues. This patch simply makes the whole iova related code capable of such case. This mimics the similar code already used on ARM 32bit. There are drivers that rely on the way the IOVAs are allocated. This is especially important for the drivers for devices with limited addressing capabilities. And there exists such and they can be even behind the IOMMU. Lets focus on the s5p-mfc driver and the way one of the supported hardware does the DMA. The hardware has very limited DMA window (256M) and addresses all memory buffers as an offset from the firmware base. Currently it has been assumed that the first allocated buffer will be on the beginning of the IOVA space. This worked fine on ARM 32bit and supporting such case is a target of this patchset. > Also note that it's really nothing to do with architecture either way; > iommu-dma simply chooses to reserve IOVA 0 for its own convenience, > mostly because it can. Much the same way that 0 is typically a valid > CPU VA, but mapping something meaningful there is just asking for a > world of pain debugging NULL-dereference bugs. Right that it is not directly related to the architecture, but it is much more common case for some architectures where DMA (IOVA) address = physical address + some offset. The commit message can be rephrased, though. I see no reason to forbid the 0 as a valid IOVA. The DMA-mapping also uses DMA_MAPPING_ERROR as ~0. It looks that when last fit allocation algorithm is used no one has ever need to consider such case so far. Best regards
On 2022-06-02 16:58, Marek Szyprowski wrote: > Hi Robin, > > On 23.05.2022 19:30, Robin Murphy wrote: >> On 2022-05-11 13:15, Ajay Kumar wrote: >>> From: Marek Szyprowski <m.szyprowski@samsung.com> >>> >>> Zero is a valid DMA and IOVA address on many architectures, so adjust >>> the >>> IOVA management code to properly handle it. A new value IOVA_BAD_ADDR >>> (~0UL) is introduced as a generic value for the error case. Adjust all >>> callers of the alloc_iova_fast() function for the new return value. >> >> And when does anything actually need this? In fact if you were to stop >> iommu-dma from reserving IOVA 0 - which you don't - it would only show >> how patch #3 is broken. > > I don't get why you says that patch #3 is broken. My mistake, in fact it's already just as broken either way - I forgot that that's done implicitly via iovad->start_pfn rather than an actual reserve_iova() entry. I see there is an initial calculation attempting to honour start_pfn in patch #3, but it's always immediately overwritten. More critically, the rb_first()/rb_next walk means the starting point can only creep inevitably upwards as older allocations are freed, so will end up pathologically stuck at or above limit_pfn and unable to recover. At best it's more "next-fit" than "first-fit", and TBH the fact that it could ever even allocate anything at all in an empty domain makes me want to change the definition of IOVA_ANCHOR ;) > Them main issue with > address 0 being reserved appears when one uses first fit allocation > algorithm. In such case the first allocation will be at the 0 address, > what causes some issues. This patch simply makes the whole iova related > code capable of such case. This mimics the similar code already used on > ARM 32bit. There are drivers that rely on the way the IOVAs are > allocated. This is especially important for the drivers for devices with > limited addressing capabilities. And there exists such and they can be > even behind the IOMMU. > > Lets focus on the s5p-mfc driver and the way one of the supported > hardware does the DMA. The hardware has very limited DMA window (256M) > and addresses all memory buffers as an offset from the firmware base. > Currently it has been assumed that the first allocated buffer will be on > the beginning of the IOVA space. This worked fine on ARM 32bit and > supporting such case is a target of this patchset. I still understand the use-case; I object to a solution where PFN 0 is always reserved yet sometimes allocated. Not to mention the slightly more fundamental thing of the whole lot not actually working the way it's supposed to. I also remain opposed to baking in secret ABIs where allocators are expected to honour a particular undocumented behaviour. FWIW I've had plans for a while now to make the IOVA walk bidirectional to make the existing retry case more efficient, at which point it's then almost trivial to implement "bottom-up" allocation, which I'm thinking I might then force on by default for CONFIG_ARM to minimise any further surprises for v2 of the default domain conversion. However I'm increasingly less convinced that it's really sufficient for your case, and am leaning back towards the opinion that any driver with really specific constraints on how its DMA addresses are laid out should not be passively relying on a generic allocator to do exactly what it wants. A simple flag saying "try to allocate DMA addresses bottom-up" doesn't actually mean "allocate this thing on a 256MB-aligned address and everything subsequent within a 256MB window above it", so let's not build a fragile house of cards on top of pretending that it does. >> Also note that it's really nothing to do with architecture either way; >> iommu-dma simply chooses to reserve IOVA 0 for its own convenience, >> mostly because it can. Much the same way that 0 is typically a valid >> CPU VA, but mapping something meaningful there is just asking for a >> world of pain debugging NULL-dereference bugs. > > Right that it is not directly related to the architecture, but it is > much more common case for some architectures where DMA (IOVA) address = > physical address + some offset. The commit message can be rephrased, > though. > > I see no reason to forbid the 0 as a valid IOVA. The DMA-mapping also > uses DMA_MAPPING_ERROR as ~0. It looks that when last fit allocation > algorithm is used no one has ever need to consider such case so far. Because it's the most convenient and efficient thing to do. Remember we tried to use 0 for DMA_MAPPING_ERROR too, but that turned out to be a usable physical RAM address on some systems. With a virtual address space, on the other hand, we're free to do whatever we want; that's kind of the point. Thanks, Robin.
Hi Robin, On 06.06.2022 14:38, Robin Murphy wrote: > On 2022-06-02 16:58, Marek Szyprowski wrote: >> On 23.05.2022 19:30, Robin Murphy wrote: >>> On 2022-05-11 13:15, Ajay Kumar wrote: >>>> From: Marek Szyprowski <m.szyprowski@samsung.com> >>>> >>>> Zero is a valid DMA and IOVA address on many architectures, so adjust >>>> the >>>> IOVA management code to properly handle it. A new value IOVA_BAD_ADDR >>>> (~0UL) is introduced as a generic value for the error case. Adjust all >>>> callers of the alloc_iova_fast() function for the new return value. >>> >>> And when does anything actually need this? In fact if you were to stop >>> iommu-dma from reserving IOVA 0 - which you don't - it would only show >>> how patch #3 is broken. >> >> I don't get why you says that patch #3 is broken. > > My mistake, in fact it's already just as broken either way - I forgot > that that's done implicitly via iovad->start_pfn rather than an actual > reserve_iova() entry. I see there is an initial calculation attempting > to honour start_pfn in patch #3, but it's always immediately > overwritten. More critically, the rb_first()/rb_next walk means the > starting point can only creep inevitably upwards as older allocations > are freed, so will end up pathologically stuck at or above limit_pfn > and unable to recover. At best it's more "next-fit" than "first-fit", > and TBH the fact that it could ever even allocate anything at all in > an empty domain makes me want to change the definition of IOVA_ANCHOR ;) > >> Them main issue with >> address 0 being reserved appears when one uses first fit allocation >> algorithm. In such case the first allocation will be at the 0 address, >> what causes some issues. This patch simply makes the whole iova related >> code capable of such case. This mimics the similar code already used on >> ARM 32bit. There are drivers that rely on the way the IOVAs are >> allocated. This is especially important for the drivers for devices with >> limited addressing capabilities. And there exists such and they can be >> even behind the IOMMU. >> >> Lets focus on the s5p-mfc driver and the way one of the supported >> hardware does the DMA. The hardware has very limited DMA window (256M) >> and addresses all memory buffers as an offset from the firmware base. >> Currently it has been assumed that the first allocated buffer will be on >> the beginning of the IOVA space. This worked fine on ARM 32bit and >> supporting such case is a target of this patchset. > > I still understand the use-case; I object to a solution where PFN 0 is > always reserved yet sometimes allocated. Not to mention the slightly > more fundamental thing of the whole lot not actually working the way > it's supposed to. > > I also remain opposed to baking in secret ABIs where allocators are > expected to honour a particular undocumented behaviour. FWIW I've had > plans for a while now to make the IOVA walk bidirectional to make the > existing retry case more efficient, at which point it's then almost > trivial to implement "bottom-up" allocation, which I'm thinking I > might then force on by default for CONFIG_ARM to minimise any further > surprises for v2 of the default domain conversion. However I'm > increasingly less convinced that it's really sufficient for your case, > and am leaning back towards the opinion that any driver with really > specific constraints on how its DMA addresses are laid out should not > be passively relying on a generic allocator to do exactly what it > wants. A simple flag saying "try to allocate DMA addresses bottom-up" > doesn't actually mean "allocate this thing on a 256MB-aligned address > and everything subsequent within a 256MB window above it", so let's > not build a fragile house of cards on top of pretending that it does. Okay, I see your point. I'm fine with adding more of the IOVA allocation logic to the respective driver (s5p-mfc), however I would still like to use the dma-mapping framework and helpers, which depend on it (like v4l2-videobuf2, dma-buf, and so on). Would it be fine for you to let device drivers to access the IOVA domains hidden by the DMA-IOMMU glue code to mark certain parts of IOVA space as reserved or to manually remap the buffer (like firmware) with specific address requirements? Best regards
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 1ca85d37eeab..16218d6a0703 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -605,7 +605,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, { struct iommu_dma_cookie *cookie = domain->iova_cookie; struct iova_domain *iovad = &cookie->iovad; - unsigned long shift, iova_len, iova = 0; + unsigned long shift, iova_len, iova = IOVA_BAD_ADDR; if (cookie->type == IOMMU_DMA_MSI_COOKIE) { cookie->msi_iova += size; @@ -625,11 +625,13 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, iova = alloc_iova_fast(iovad, iova_len, DMA_BIT_MASK(32) >> shift, false); - if (!iova) + if (iova == IOVA_BAD_ADDR) iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, true); - return (dma_addr_t)iova << shift; + if (iova != IOVA_BAD_ADDR) + return (dma_addr_t)iova << shift; + return DMA_MAPPING_ERROR; } static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie, @@ -688,7 +690,7 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, size = iova_align(iovad, size + iova_off); iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev); - if (!iova) + if (iova == DMA_MAPPING_ERROR) return DMA_MAPPING_ERROR; if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) { @@ -799,7 +801,7 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev, size = iova_align(iovad, size); iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev); - if (!iova) + if (iova == DMA_MAPPING_ERROR) goto out_free_pages; if (sg_alloc_table_from_pages(sgt, pages, count, 0, size, GFP_KERNEL)) @@ -1204,7 +1206,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, } iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev); - if (!iova) { + if (iova == DMA_MAPPING_ERROR) { ret = -ENOMEM; goto out_restore_sg; } @@ -1516,7 +1518,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, return NULL; iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); - if (!iova) + if (iova == DMA_MAPPING_ERROR) goto out_free_page; if (iommu_map(domain, iova, msi_addr, size, prot)) diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index db77aa675145..ae0fe0a6714e 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -429,6 +429,8 @@ EXPORT_SYMBOL_GPL(free_iova); * This function tries to satisfy an iova allocation from the rcache, * and falls back to regular allocation on failure. If regular allocation * fails too and the flush_rcache flag is set then the rcache will be flushed. + * Returns a pfn the allocated iova starts at or IOVA_BAD_ADDR in the case + * of a failure. */ unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size, @@ -447,7 +449,7 @@ alloc_iova_fast(struct iova_domain *iovad, unsigned long size, size = roundup_pow_of_two(size); iova_pfn = iova_rcache_get(iovad, size, limit_pfn + 1); - if (iova_pfn) + if (iova_pfn != IOVA_BAD_ADDR) return iova_pfn; retry: @@ -456,7 +458,7 @@ alloc_iova_fast(struct iova_domain *iovad, unsigned long size, unsigned int cpu; if (!flush_rcache) - return 0; + return IOVA_BAD_ADDR; /* Try replenishing IOVAs by flushing rcache. */ flush_rcache = false; @@ -831,7 +833,7 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache, unsigned long limit_pfn) { struct iova_cpu_rcache *cpu_rcache; - unsigned long iova_pfn = 0; + unsigned long iova_pfn = IOVA_BAD_ADDR; bool has_pfn = false; unsigned long flags; @@ -858,6 +860,9 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache, spin_unlock_irqrestore(&cpu_rcache->lock, flags); + if (!iova_pfn) + return IOVA_BAD_ADDR; + return iova_pfn; } @@ -873,7 +878,7 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad, unsigned int log_size = order_base_2(size); if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE || !iovad->rcaches) - return 0; + return IOVA_BAD_ADDR; return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size); } diff --git a/include/linux/iova.h b/include/linux/iova.h index 320a70e40233..46b5b10c532b 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -21,6 +21,7 @@ struct iova { unsigned long pfn_lo; /* Lowest allocated pfn */ }; +#define IOVA_BAD_ADDR (~0UL) struct iova_rcache;