Message ID | 20210618152059.1194210-5-jean-philippe@linaro.org (mailing list archive) |
---|---|
State | Not Applicable, archived |
Headers | show |
Series | Add support for ACPI VIOT | expand |
On 2021-06-18 16:20, Jean-Philippe Brucker wrote: > Passing a 64-bit address width to iommu_setup_dma_ops() is valid on > virtual platforms, but isn't currently possible. The overflow check in > iommu_dma_init_domain() prevents this even when @dma_base isn't 0. Pass > a limit address instead of a size, so callers don't have to fake a size > to work around the check. > > The base and limit parameters are being phased out, because: > * they are redundant for x86 callers. dma-iommu already reserves the > first page, and the upper limit is already in domain->geometry. > * they can now be obtained from dev->dma_range_map on Arm. > But removing them on Arm isn't completely straightforward so is left for > future work. As an intermediate step, simplify the x86 callers by > passing dummy limits. Reviewed-by: Robin Murphy <robin.murphy@arm.com> > Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org> > --- > include/linux/dma-iommu.h | 4 ++-- > arch/arm64/mm/dma-mapping.c | 2 +- > drivers/iommu/amd/iommu.c | 2 +- > drivers/iommu/dma-iommu.c | 12 ++++++------ > drivers/iommu/intel/iommu.c | 5 +---- > 5 files changed, 11 insertions(+), 14 deletions(-) > > diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h > index 6e75a2d689b4..758ca4694257 100644 > --- a/include/linux/dma-iommu.h > +++ b/include/linux/dma-iommu.h > @@ -19,7 +19,7 @@ int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base); > void iommu_put_dma_cookie(struct iommu_domain *domain); > > /* Setup call for arch DMA mapping code */ > -void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size); > +void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit); > > /* The DMA API isn't _quite_ the whole story, though... */ > /* > @@ -50,7 +50,7 @@ struct msi_msg; > struct device; > > static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base, > - u64 size) > + u64 dma_limit) > { > } > > diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c > index 4bf1dd3eb041..6719f9efea09 100644 > --- a/arch/arm64/mm/dma-mapping.c > +++ b/arch/arm64/mm/dma-mapping.c > @@ -50,7 +50,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, > > dev->dma_coherent = coherent; > if (iommu) > - iommu_setup_dma_ops(dev, dma_base, size); > + iommu_setup_dma_ops(dev, dma_base, dma_base + size - 1); > > #ifdef CONFIG_XEN > if (xen_swiotlb_detect()) > diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c > index 3ac42bbdefc6..216323fb27ef 100644 > --- a/drivers/iommu/amd/iommu.c > +++ b/drivers/iommu/amd/iommu.c > @@ -1713,7 +1713,7 @@ static void amd_iommu_probe_finalize(struct device *dev) > /* Domains are initialized for this device - have a look what we ended up with */ > domain = iommu_get_domain_for_dev(dev); > if (domain->type == IOMMU_DOMAIN_DMA) > - iommu_setup_dma_ops(dev, IOVA_START_PFN << PAGE_SHIFT, 0); > + iommu_setup_dma_ops(dev, 0, U64_MAX); > else > set_dma_ops(dev, NULL); > } > diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c > index 7bcdd1205535..c62e19bed302 100644 > --- a/drivers/iommu/dma-iommu.c > +++ b/drivers/iommu/dma-iommu.c > @@ -319,16 +319,16 @@ static bool dev_is_untrusted(struct device *dev) > * iommu_dma_init_domain - Initialise a DMA mapping domain > * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() > * @base: IOVA at which the mappable address space starts > - * @size: Size of IOVA space > + * @limit: Last address of the IOVA space > * @dev: Device the domain is being initialised for > * > - * @base and @size should be exact multiples of IOMMU page granularity to > + * @base and @limit + 1 should be exact multiples of IOMMU page granularity to > * avoid rounding surprises. If necessary, we reserve the page at address 0 > * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but > * any change which could make prior IOVAs invalid will fail. > */ > static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, > - u64 size, struct device *dev) > + dma_addr_t limit, struct device *dev) > { > struct iommu_dma_cookie *cookie = domain->iova_cookie; > unsigned long order, base_pfn; > @@ -346,7 +346,7 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, > /* Check the domain allows at least some access to the device... */ > if (domain->geometry.force_aperture) { > if (base > domain->geometry.aperture_end || > - base + size <= domain->geometry.aperture_start) { > + limit < domain->geometry.aperture_start) { > pr_warn("specified DMA range outside IOMMU capability\n"); > return -EFAULT; > } > @@ -1308,7 +1308,7 @@ static const struct dma_map_ops iommu_dma_ops = { > * The IOMMU core code allocates the default DMA domain, which the underlying > * IOMMU driver needs to support via the dma-iommu layer. > */ > -void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size) > +void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit) > { > struct iommu_domain *domain = iommu_get_domain_for_dev(dev); > > @@ -1320,7 +1320,7 @@ void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size) > * underlying IOMMU driver needs to support via the dma-iommu layer. > */ > if (domain->type == IOMMU_DOMAIN_DMA) { > - if (iommu_dma_init_domain(domain, dma_base, size, dev)) > + if (iommu_dma_init_domain(domain, dma_base, dma_limit, dev)) > goto out_err; > dev->dma_ops = &iommu_dma_ops; > } > diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c > index be35284a2016..2f7213f0e7a1 100644 > --- a/drivers/iommu/intel/iommu.c > +++ b/drivers/iommu/intel/iommu.c > @@ -5165,13 +5165,10 @@ static void intel_iommu_release_device(struct device *dev) > > static void intel_iommu_probe_finalize(struct device *dev) > { > - dma_addr_t base = IOVA_START_PFN << VTD_PAGE_SHIFT; > struct iommu_domain *domain = iommu_get_domain_for_dev(dev); > - struct dmar_domain *dmar_domain = to_dmar_domain(domain); > > if (domain && domain->type == IOMMU_DOMAIN_DMA) > - iommu_setup_dma_ops(dev, base, > - __DOMAIN_MAX_ADDR(dmar_domain->gaw) - base); > + iommu_setup_dma_ops(dev, 0, U64_MAX); > else > set_dma_ops(dev, NULL); > } >
Hi Jean, On 6/18/21 5:20 PM, Jean-Philippe Brucker wrote: > Passing a 64-bit address width to iommu_setup_dma_ops() is valid on > virtual platforms, but isn't currently possible. The overflow check in > iommu_dma_init_domain() prevents this even when @dma_base isn't 0. Pass > a limit address instead of a size, so callers don't have to fake a size > to work around the check. > > The base and limit parameters are being phased out, because: > * they are redundant for x86 callers. dma-iommu already reserves the > first page, and the upper limit is already in domain->geometry. > * they can now be obtained from dev->dma_range_map on Arm. > But removing them on Arm isn't completely straightforward so is left for > future work. As an intermediate step, simplify the x86 callers by > passing dummy limits. > > Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org> Reviewed-by: Eric Auger <eric.auger@redhat.com> Eric > --- > include/linux/dma-iommu.h | 4 ++-- > arch/arm64/mm/dma-mapping.c | 2 +- > drivers/iommu/amd/iommu.c | 2 +- > drivers/iommu/dma-iommu.c | 12 ++++++------ > drivers/iommu/intel/iommu.c | 5 +---- > 5 files changed, 11 insertions(+), 14 deletions(-) > > diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h > index 6e75a2d689b4..758ca4694257 100644 > --- a/include/linux/dma-iommu.h > +++ b/include/linux/dma-iommu.h > @@ -19,7 +19,7 @@ int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base); > void iommu_put_dma_cookie(struct iommu_domain *domain); > > /* Setup call for arch DMA mapping code */ > -void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size); > +void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit); > > /* The DMA API isn't _quite_ the whole story, though... */ > /* > @@ -50,7 +50,7 @@ struct msi_msg; > struct device; > > static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base, > - u64 size) > + u64 dma_limit) > { > } > > diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c > index 4bf1dd3eb041..6719f9efea09 100644 > --- a/arch/arm64/mm/dma-mapping.c > +++ b/arch/arm64/mm/dma-mapping.c > @@ -50,7 +50,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, > > dev->dma_coherent = coherent; > if (iommu) > - iommu_setup_dma_ops(dev, dma_base, size); > + iommu_setup_dma_ops(dev, dma_base, dma_base + size - 1); > > #ifdef CONFIG_XEN > if (xen_swiotlb_detect()) > diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c > index 3ac42bbdefc6..216323fb27ef 100644 > --- a/drivers/iommu/amd/iommu.c > +++ b/drivers/iommu/amd/iommu.c > @@ -1713,7 +1713,7 @@ static void amd_iommu_probe_finalize(struct device *dev) > /* Domains are initialized for this device - have a look what we ended up with */ > domain = iommu_get_domain_for_dev(dev); > if (domain->type == IOMMU_DOMAIN_DMA) > - iommu_setup_dma_ops(dev, IOVA_START_PFN << PAGE_SHIFT, 0); > + iommu_setup_dma_ops(dev, 0, U64_MAX); > else > set_dma_ops(dev, NULL); > } > diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c > index 7bcdd1205535..c62e19bed302 100644 > --- a/drivers/iommu/dma-iommu.c > +++ b/drivers/iommu/dma-iommu.c > @@ -319,16 +319,16 @@ static bool dev_is_untrusted(struct device *dev) > * iommu_dma_init_domain - Initialise a DMA mapping domain > * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() > * @base: IOVA at which the mappable address space starts > - * @size: Size of IOVA space > + * @limit: Last address of the IOVA space > * @dev: Device the domain is being initialised for > * > - * @base and @size should be exact multiples of IOMMU page granularity to > + * @base and @limit + 1 should be exact multiples of IOMMU page granularity to > * avoid rounding surprises. If necessary, we reserve the page at address 0 > * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but > * any change which could make prior IOVAs invalid will fail. > */ > static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, > - u64 size, struct device *dev) > + dma_addr_t limit, struct device *dev) > { > struct iommu_dma_cookie *cookie = domain->iova_cookie; > unsigned long order, base_pfn; > @@ -346,7 +346,7 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, > /* Check the domain allows at least some access to the device... */ > if (domain->geometry.force_aperture) { > if (base > domain->geometry.aperture_end || > - base + size <= domain->geometry.aperture_start) { > + limit < domain->geometry.aperture_start) { > pr_warn("specified DMA range outside IOMMU capability\n"); > return -EFAULT; > } > @@ -1308,7 +1308,7 @@ static const struct dma_map_ops iommu_dma_ops = { > * The IOMMU core code allocates the default DMA domain, which the underlying > * IOMMU driver needs to support via the dma-iommu layer. > */ > -void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size) > +void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit) > { > struct iommu_domain *domain = iommu_get_domain_for_dev(dev); > > @@ -1320,7 +1320,7 @@ void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size) > * underlying IOMMU driver needs to support via the dma-iommu layer. > */ > if (domain->type == IOMMU_DOMAIN_DMA) { > - if (iommu_dma_init_domain(domain, dma_base, size, dev)) > + if (iommu_dma_init_domain(domain, dma_base, dma_limit, dev)) > goto out_err; > dev->dma_ops = &iommu_dma_ops; > } > diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c > index be35284a2016..2f7213f0e7a1 100644 > --- a/drivers/iommu/intel/iommu.c > +++ b/drivers/iommu/intel/iommu.c > @@ -5165,13 +5165,10 @@ static void intel_iommu_release_device(struct device *dev) > > static void intel_iommu_probe_finalize(struct device *dev) > { > - dma_addr_t base = IOVA_START_PFN << VTD_PAGE_SHIFT; > struct iommu_domain *domain = iommu_get_domain_for_dev(dev); > - struct dmar_domain *dmar_domain = to_dmar_domain(domain); > > if (domain && domain->type == IOMMU_DOMAIN_DMA) > - iommu_setup_dma_ops(dev, base, > - __DOMAIN_MAX_ADDR(dmar_domain->gaw) - base); > + iommu_setup_dma_ops(dev, 0, U64_MAX); > else > set_dma_ops(dev, NULL); > }
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h index 6e75a2d689b4..758ca4694257 100644 --- a/include/linux/dma-iommu.h +++ b/include/linux/dma-iommu.h @@ -19,7 +19,7 @@ int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base); void iommu_put_dma_cookie(struct iommu_domain *domain); /* Setup call for arch DMA mapping code */ -void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size); +void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit); /* The DMA API isn't _quite_ the whole story, though... */ /* @@ -50,7 +50,7 @@ struct msi_msg; struct device; static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base, - u64 size) + u64 dma_limit) { } diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 4bf1dd3eb041..6719f9efea09 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -50,7 +50,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, dev->dma_coherent = coherent; if (iommu) - iommu_setup_dma_ops(dev, dma_base, size); + iommu_setup_dma_ops(dev, dma_base, dma_base + size - 1); #ifdef CONFIG_XEN if (xen_swiotlb_detect()) diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 3ac42bbdefc6..216323fb27ef 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -1713,7 +1713,7 @@ static void amd_iommu_probe_finalize(struct device *dev) /* Domains are initialized for this device - have a look what we ended up with */ domain = iommu_get_domain_for_dev(dev); if (domain->type == IOMMU_DOMAIN_DMA) - iommu_setup_dma_ops(dev, IOVA_START_PFN << PAGE_SHIFT, 0); + iommu_setup_dma_ops(dev, 0, U64_MAX); else set_dma_ops(dev, NULL); } diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 7bcdd1205535..c62e19bed302 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -319,16 +319,16 @@ static bool dev_is_untrusted(struct device *dev) * iommu_dma_init_domain - Initialise a DMA mapping domain * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() * @base: IOVA at which the mappable address space starts - * @size: Size of IOVA space + * @limit: Last address of the IOVA space * @dev: Device the domain is being initialised for * - * @base and @size should be exact multiples of IOMMU page granularity to + * @base and @limit + 1 should be exact multiples of IOMMU page granularity to * avoid rounding surprises. If necessary, we reserve the page at address 0 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but * any change which could make prior IOVAs invalid will fail. */ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, - u64 size, struct device *dev) + dma_addr_t limit, struct device *dev) { struct iommu_dma_cookie *cookie = domain->iova_cookie; unsigned long order, base_pfn; @@ -346,7 +346,7 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, /* Check the domain allows at least some access to the device... */ if (domain->geometry.force_aperture) { if (base > domain->geometry.aperture_end || - base + size <= domain->geometry.aperture_start) { + limit < domain->geometry.aperture_start) { pr_warn("specified DMA range outside IOMMU capability\n"); return -EFAULT; } @@ -1308,7 +1308,7 @@ static const struct dma_map_ops iommu_dma_ops = { * The IOMMU core code allocates the default DMA domain, which the underlying * IOMMU driver needs to support via the dma-iommu layer. */ -void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size) +void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit) { struct iommu_domain *domain = iommu_get_domain_for_dev(dev); @@ -1320,7 +1320,7 @@ void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size) * underlying IOMMU driver needs to support via the dma-iommu layer. */ if (domain->type == IOMMU_DOMAIN_DMA) { - if (iommu_dma_init_domain(domain, dma_base, size, dev)) + if (iommu_dma_init_domain(domain, dma_base, dma_limit, dev)) goto out_err; dev->dma_ops = &iommu_dma_ops; } diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index be35284a2016..2f7213f0e7a1 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -5165,13 +5165,10 @@ static void intel_iommu_release_device(struct device *dev) static void intel_iommu_probe_finalize(struct device *dev) { - dma_addr_t base = IOVA_START_PFN << VTD_PAGE_SHIFT; struct iommu_domain *domain = iommu_get_domain_for_dev(dev); - struct dmar_domain *dmar_domain = to_dmar_domain(domain); if (domain && domain->type == IOMMU_DOMAIN_DMA) - iommu_setup_dma_ops(dev, base, - __DOMAIN_MAX_ADDR(dmar_domain->gaw) - base); + iommu_setup_dma_ops(dev, 0, U64_MAX); else set_dma_ops(dev, NULL); }
Passing a 64-bit address width to iommu_setup_dma_ops() is valid on virtual platforms, but isn't currently possible. The overflow check in iommu_dma_init_domain() prevents this even when @dma_base isn't 0. Pass a limit address instead of a size, so callers don't have to fake a size to work around the check. The base and limit parameters are being phased out, because: * they are redundant for x86 callers. dma-iommu already reserves the first page, and the upper limit is already in domain->geometry. * they can now be obtained from dev->dma_range_map on Arm. But removing them on Arm isn't completely straightforward so is left for future work. As an intermediate step, simplify the x86 callers by passing dummy limits. Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org> --- include/linux/dma-iommu.h | 4 ++-- arch/arm64/mm/dma-mapping.c | 2 +- drivers/iommu/amd/iommu.c | 2 +- drivers/iommu/dma-iommu.c | 12 ++++++------ drivers/iommu/intel/iommu.c | 5 +---- 5 files changed, 11 insertions(+), 14 deletions(-)