Message ID | 6-v1-21cc72fcfb22+a7a-iommu_all_defdom_jgg@nvidia.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | iommu: Make default_domain's mandatory | expand |
On 2023-05-01 19:02, Jason Gunthorpe wrote: > What exynos calls exynos_iommu_detach_device is actually putting the iommu > into identity mode. > > Move to the new core support for ARM_DMA_USE_IOMMU by defining > ops->identity_domain. > > Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> > --- > drivers/iommu/exynos-iommu.c | 64 ++++++++++++++++++------------------ > 1 file changed, 32 insertions(+), 32 deletions(-) > > diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c > index c275fe71c4db32..6ff7901103948a 100644 > --- a/drivers/iommu/exynos-iommu.c > +++ b/drivers/iommu/exynos-iommu.c > @@ -24,6 +24,7 @@ > > typedef u32 sysmmu_iova_t; > typedef u32 sysmmu_pte_t; > +static struct iommu_domain exynos_identity_domain; > > /* We do not consider super section mapping (16MB) */ > #define SECT_ORDER 20 > @@ -829,7 +830,7 @@ static int __maybe_unused exynos_sysmmu_suspend(struct device *dev) > struct exynos_iommu_owner *owner = dev_iommu_priv_get(master); > > mutex_lock(&owner->rpm_lock); > - if (data->domain) { > + if (&data->domain->domain != &exynos_identity_domain) { > dev_dbg(data->sysmmu, "saving state\n"); > __sysmmu_disable(data); > } > @@ -847,7 +848,7 @@ static int __maybe_unused exynos_sysmmu_resume(struct device *dev) > struct exynos_iommu_owner *owner = dev_iommu_priv_get(master); > > mutex_lock(&owner->rpm_lock); > - if (data->domain) { > + if (&data->domain->domain != &exynos_identity_domain) { > dev_dbg(data->sysmmu, "restoring state\n"); > __sysmmu_enable(data); > } > @@ -980,17 +981,22 @@ static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain) > kfree(domain); > } > > -static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain, > - struct device *dev) > +static int exynos_iommu_identity_attach(struct iommu_domain *identity_domain, > + struct device *dev) > { > - struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); > struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev); > - phys_addr_t pagetable = virt_to_phys(domain->pgtable); > + struct exynos_iommu_domain *domain; > + phys_addr_t pagetable; > struct sysmmu_drvdata *data, *next; > unsigned long flags; > > - if (!has_sysmmu(dev) || owner->domain != iommu_domain) > - return; > + if (!owner) > + return -ENODEV; That can't be true - devices can't be attached without having already dereferenced their group, which means they've been through probe_device successfully. > + if (owner->domain == identity_domain) > + return 0; > + > + domain = to_exynos_domain(owner->domain); > + pagetable = virt_to_phys(domain->pgtable); Identity domains by definition shouldn't have a pagetable? I don't think virt_to_phys(NULL) can be assumed to be valid or safe in general. > > mutex_lock(&owner->rpm_lock); > > @@ -1009,15 +1015,25 @@ static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain, > list_del_init(&data->domain_node); > spin_unlock(&data->lock); > } This iterates the whole domain->clients list, which may include other devices from other groups belonging to other IOMMU instances. I think that's technically an issue already given that we support cross-instance domain attach here, which the DRM drivers rely on. I can't quite work out off-hand if this is liable to make it any worse or not :/ > - owner->domain = NULL; > + owner->domain = identity_domain; > spin_unlock_irqrestore(&domain->lock, flags); > > mutex_unlock(&owner->rpm_lock); > > dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n", __func__, This no longer makes much sense. > &pagetable); > + return 0; > } > > +static struct iommu_domain_ops exynos_identity_ops = { > + .attach_dev = exynos_iommu_identity_attach, > +}; > + > +static struct iommu_domain exynos_identity_domain = { > + .type = IOMMU_DOMAIN_IDENTITY, > + .ops = &exynos_identity_ops, > +}; > + > static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain, > struct device *dev) > { > @@ -1026,12 +1042,11 @@ static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain, > struct sysmmu_drvdata *data; > phys_addr_t pagetable = virt_to_phys(domain->pgtable); > unsigned long flags; > + int err; > > - if (!has_sysmmu(dev)) > - return -ENODEV; > - > - if (owner->domain) > - exynos_iommu_detach_device(owner->domain, dev); > + err = exynos_iommu_identity_attach(&exynos_identity_domain, dev); > + if (err) > + return err; > > mutex_lock(&owner->rpm_lock); > > @@ -1407,26 +1422,12 @@ static struct iommu_device *exynos_iommu_probe_device(struct device *dev) > return &data->iommu; > } > > -static void exynos_iommu_set_platform_dma(struct device *dev) > -{ > - struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev); > - > - if (owner->domain) { > - struct iommu_group *group = iommu_group_get(dev); > - > - if (group) { > - exynos_iommu_detach_device(owner->domain, dev); > - iommu_group_put(group); > - } > - } > -} > - > static void exynos_iommu_release_device(struct device *dev) > { > struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev); > struct sysmmu_drvdata *data; > > - exynos_iommu_set_platform_dma(dev); > + WARN_ON(exynos_iommu_identity_attach(&exynos_identity_domain, dev)); > > list_for_each_entry(data, &owner->controllers, owner_node) > device_link_del(data->link); > @@ -1457,6 +1458,7 @@ static int exynos_iommu_of_xlate(struct device *dev, > > INIT_LIST_HEAD(&owner->controllers); > mutex_init(&owner->rpm_lock); > + owner->domain = &exynos_identity_domain; I think strictly this would be more of a probe_device thing than an of_xlate thing, but it's not super-important. Thanks, Robin. > dev_iommu_priv_set(dev, owner); > } > > @@ -1471,11 +1473,9 @@ static int exynos_iommu_of_xlate(struct device *dev, > } > > static const struct iommu_ops exynos_iommu_ops = { > + .identity_domain = &exynos_identity_domain, > .domain_alloc = exynos_iommu_domain_alloc, > .device_group = generic_device_group, > -#ifdef CONFIG_ARM > - .set_platform_dma_ops = exynos_iommu_set_platform_dma, > -#endif > .probe_device = exynos_iommu_probe_device, > .release_device = exynos_iommu_release_device, > .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index c275fe71c4db32..6ff7901103948a 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -24,6 +24,7 @@ typedef u32 sysmmu_iova_t; typedef u32 sysmmu_pte_t; +static struct iommu_domain exynos_identity_domain; /* We do not consider super section mapping (16MB) */ #define SECT_ORDER 20 @@ -829,7 +830,7 @@ static int __maybe_unused exynos_sysmmu_suspend(struct device *dev) struct exynos_iommu_owner *owner = dev_iommu_priv_get(master); mutex_lock(&owner->rpm_lock); - if (data->domain) { + if (&data->domain->domain != &exynos_identity_domain) { dev_dbg(data->sysmmu, "saving state\n"); __sysmmu_disable(data); } @@ -847,7 +848,7 @@ static int __maybe_unused exynos_sysmmu_resume(struct device *dev) struct exynos_iommu_owner *owner = dev_iommu_priv_get(master); mutex_lock(&owner->rpm_lock); - if (data->domain) { + if (&data->domain->domain != &exynos_identity_domain) { dev_dbg(data->sysmmu, "restoring state\n"); __sysmmu_enable(data); } @@ -980,17 +981,22 @@ static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain) kfree(domain); } -static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain, - struct device *dev) +static int exynos_iommu_identity_attach(struct iommu_domain *identity_domain, + struct device *dev) { - struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev); - phys_addr_t pagetable = virt_to_phys(domain->pgtable); + struct exynos_iommu_domain *domain; + phys_addr_t pagetable; struct sysmmu_drvdata *data, *next; unsigned long flags; - if (!has_sysmmu(dev) || owner->domain != iommu_domain) - return; + if (!owner) + return -ENODEV; + if (owner->domain == identity_domain) + return 0; + + domain = to_exynos_domain(owner->domain); + pagetable = virt_to_phys(domain->pgtable); mutex_lock(&owner->rpm_lock); @@ -1009,15 +1015,25 @@ static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain, list_del_init(&data->domain_node); spin_unlock(&data->lock); } - owner->domain = NULL; + owner->domain = identity_domain; spin_unlock_irqrestore(&domain->lock, flags); mutex_unlock(&owner->rpm_lock); dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n", __func__, &pagetable); + return 0; } +static struct iommu_domain_ops exynos_identity_ops = { + .attach_dev = exynos_iommu_identity_attach, +}; + +static struct iommu_domain exynos_identity_domain = { + .type = IOMMU_DOMAIN_IDENTITY, + .ops = &exynos_identity_ops, +}; + static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain, struct device *dev) { @@ -1026,12 +1042,11 @@ static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain, struct sysmmu_drvdata *data; phys_addr_t pagetable = virt_to_phys(domain->pgtable); unsigned long flags; + int err; - if (!has_sysmmu(dev)) - return -ENODEV; - - if (owner->domain) - exynos_iommu_detach_device(owner->domain, dev); + err = exynos_iommu_identity_attach(&exynos_identity_domain, dev); + if (err) + return err; mutex_lock(&owner->rpm_lock); @@ -1407,26 +1422,12 @@ static struct iommu_device *exynos_iommu_probe_device(struct device *dev) return &data->iommu; } -static void exynos_iommu_set_platform_dma(struct device *dev) -{ - struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev); - - if (owner->domain) { - struct iommu_group *group = iommu_group_get(dev); - - if (group) { - exynos_iommu_detach_device(owner->domain, dev); - iommu_group_put(group); - } - } -} - static void exynos_iommu_release_device(struct device *dev) { struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev); struct sysmmu_drvdata *data; - exynos_iommu_set_platform_dma(dev); + WARN_ON(exynos_iommu_identity_attach(&exynos_identity_domain, dev)); list_for_each_entry(data, &owner->controllers, owner_node) device_link_del(data->link); @@ -1457,6 +1458,7 @@ static int exynos_iommu_of_xlate(struct device *dev, INIT_LIST_HEAD(&owner->controllers); mutex_init(&owner->rpm_lock); + owner->domain = &exynos_identity_domain; dev_iommu_priv_set(dev, owner); } @@ -1471,11 +1473,9 @@ static int exynos_iommu_of_xlate(struct device *dev, } static const struct iommu_ops exynos_iommu_ops = { + .identity_domain = &exynos_identity_domain, .domain_alloc = exynos_iommu_domain_alloc, .device_group = generic_device_group, -#ifdef CONFIG_ARM - .set_platform_dma_ops = exynos_iommu_set_platform_dma, -#endif .probe_device = exynos_iommu_probe_device, .release_device = exynos_iommu_release_device, .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
What exynos calls exynos_iommu_detach_device is actually putting the iommu into identity mode. Move to the new core support for ARM_DMA_USE_IOMMU by defining ops->identity_domain. Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> --- drivers/iommu/exynos-iommu.c | 64 ++++++++++++++++++------------------ 1 file changed, 32 insertions(+), 32 deletions(-)